Print this page
9210 remove KMDB branch debugging support
9211 ::crregs could do with cr2/cr3 support
9209 ::ttrace should be able to filter by thread
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Yuri Pankov <yuripv@yuripv.net>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/mdb/common/mdb/mdb_target.c
+++ new/usr/src/cmd/mdb/common/mdb/mdb_target.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 + *
25 + * Copyright 2018 Joyent, Inc.
24 26 */
25 27
26 28 /*
27 29 * MDB Target Layer
28 30 *
29 31 * The *target* is the program being inspected by the debugger. The MDB target
30 32 * layer provides a set of functions that insulate common debugger code,
31 33 * including the MDB Module API, from the implementation details of how the
32 34 * debugger accesses information from a given target. Each target exports a
33 35 * standard set of properties, including one or more address spaces, one or
34 36 * more symbol tables, a set of load objects, and a set of threads that can be
35 37 * examined using the interfaces in <mdb/mdb_target.h>. This technique has
36 38 * been employed successfully in other debuggers, including [1], primarily
37 39 * to improve portability, although the term "target" often refers to the
38 40 * encapsulation of architectural or operating system-specific details. The
39 41 * target abstraction is useful for MDB because it allows us to easily extend
40 42 * the debugger to examine a variety of different program forms. Primarily,
41 43 * the target functions validate input arguments and then call an appropriate
42 44 * function in the target ops vector, defined in <mdb/mdb_target_impl.h>.
43 45 * However, this interface layer provides a very high level of flexibility for
44 46 * separating the debugger interface from instrumentation details. Experience
45 47 * has shown this kind of design can facilitate separating out debugger
46 48 * instrumentation into an external agent [2] and enable the development of
47 49 * advanced instrumentation frameworks [3]. We want MDB to be an ideal
48 50 * extensible framework for the development of such applications.
49 51 *
50 52 * Aside from a set of wrapper functions, the target layer also provides event
51 53 * management for targets that represent live executing programs. Our model of
52 54 * events is also extensible, and is based upon work in [3] and [4]. We define
53 55 * a *software event* as a state transition in the target program (for example,
54 56 * the transition of the program counter to a location of interest) that is
55 57 * observed by the debugger or its agent. A *software event specifier* is a
56 58 * description of a class of software events that is used by the debugger to
57 59 * instrument the target so that the corresponding software events can be
58 60 * observed. In MDB, software event specifiers are represented by the
59 61 * mdb_sespec_t structure, defined in <mdb/mdb_target_impl.h>. As the user,
60 62 * the internal debugger code, and MDB modules may all wish to observe software
61 63 * events and receive appropriate notification and callbacks, we do not expose
62 64 * software event specifiers directly as part of the user interface. Instead,
63 65 * clients of the target layer request that events be observed by creating
64 66 * new *virtual event specifiers*. Each virtual specifier is named by a unique
65 67 * non-zero integer (the VID), and is represented by a mdb_vespec_t structure.
66 68 * One or more virtual specifiers are then associated with each underlying
67 69 * software event specifier. This design enforces the constraint that the
68 70 * target must only insert one set of instrumentation, regardless of how many
69 71 * times the target layer was asked to trace a given event. For example, if
70 72 * multiple clients request a breakpoint at a particular address, the virtual
71 73 * specifiers will map to the same sespec, ensuring that only one breakpoint
72 74 * trap instruction is actually planted at the given target address. When no
73 75 * virtual specifiers refer to an sespec, it is no longer needed and can be
74 76 * removed, along with the corresponding instrumentation.
75 77 *
76 78 * The following state transition diagram illustrates the life cycle of a
77 79 * software event specifier and example transitions:
78 80 *
79 81 * cont/
80 82 * +--------+ delete +--------+ stop +-------+
81 83 * (|( DEAD )|) <------- ( ACTIVE ) <------> ( ARMED )
82 84 * +--------+ +--------+ +-------+
83 85 * ^ load/unload ^ ^ failure/ |
84 86 * delete | object / \ reset | failure
85 87 * | v v |
86 88 * | +--------+ +-------+ |
87 89 * +---- ( IDLE ) ( ERR ) <----+
88 90 * | +--------+ +-------+
89 91 * | |
90 92 * +------------------------------+
91 93 *
92 94 * The MDB execution control model is based upon the synchronous debugging
93 95 * model exported by Solaris proc(4). A target program is set running or the
94 96 * debugger is attached to a running target. On ISTOP (stop on event of
95 97 * interest), one target thread is selected as the representative. The
96 98 * algorithm for selecting the representative is target-specific, but we assume
97 99 * that if an observed software event has occurred, the target will select the
98 100 * thread that triggered the state transition of interest. The other threads
99 101 * are stopped in sympathy with the representative as soon as possible. Prior
100 102 * to continuing the target, we plant our instrumentation, transitioning event
101 103 * specifiers from the ACTIVE to the ARMED state, and then back again when the
102 104 * target stops. We then query each active event specifier to learn which ones
103 105 * are matched, and then invoke the callbacks associated with their vespecs.
104 106 * If an OS error occurs while attempting to arm or disarm a specifier, the
105 107 * specifier is transitioned to the ERROR state; we will attempt to arm it
106 108 * again at the next continue. If no target process is under our control or
107 109 * if an event is not currently applicable (e.g. a deferred breakpoint on an
108 110 * object that is not yet loaded), it remains in the IDLE state. The target
109 111 * implementation should intercept object load events and then transition the
110 112 * specifier to the ACTIVE state when the corresponding object is loaded.
111 113 *
112 114 * To simplify the debugger implementation and allow targets to easily provide
113 115 * new types of observable events, most of the event specifier management is
114 116 * done by the target layer. Each software event specifier provides an ops
115 117 * vector of subroutines that the target layer can call to perform the
116 118 * various state transitions described above. The target maintains two lists
117 119 * of mdb_sespec_t's: the t_idle list (IDLE state) and the t_active list
118 120 * (ACTIVE, ARMED, and ERROR states). Each mdb_sespec_t maintains a list of
119 121 * associated mdb_vespec_t's. If an sespec is IDLE or ERROR, its se_errno
120 122 * field will have an errno value specifying the reason for its inactivity.
121 123 * The vespec stores the client's callback function and private data, and the
122 124 * arguments used to construct the sespec. All objects are reference counted
123 125 * so we can destroy an object when it is no longer needed. The mdb_sespec_t
124 126 * invariants for the respective states are as follows:
125 127 *
126 128 * IDLE: on t_idle list, se_data == NULL, se_errno != 0, se_ctor not called
127 129 * ACTIVE: on t_active list, se_data valid, se_errno == 0, se_ctor called
128 130 * ARMED: on t_active list, se_data valid, se_errno == 0, se_ctor called
129 131 * ERROR: on t_active list, se_data valid, se_errno != 0, se_ctor called
130 132 *
131 133 * Additional commentary on specific state transitions and issues involving
132 134 * event management can be found below near the target layer functions.
133 135 *
134 136 * References
135 137 *
136 138 * [1] John Gilmore, "Working in GDB", Technical Report, Cygnus Support,
137 139 * 1.84 edition, 1994.
138 140 *
139 141 * [2] David R. Hanson and Mukund Raghavachari, "A Machine-Independent
140 142 * Debugger", Software--Practice and Experience, 26(11), 1277-1299(1996).
141 143 *
142 144 * [3] Michael W. Shapiro, "RDB: A System for Incremental Replay Debugging",
143 145 * Technical Report CS-97-12, Department of Computer Science,
144 146 * Brown University.
145 147 *
146 148 * [4] Daniel B. Price, "New Techniques for Replay Debugging", Technical
147 149 * Report CS-98-05, Department of Computer Science, Brown University.
148 150 */
149 151
150 152 #include <mdb/mdb_target_impl.h>
151 153 #include <mdb/mdb_debug.h>
152 154 #include <mdb/mdb_modapi.h>
153 155 #include <mdb/mdb_err.h>
154 156 #include <mdb/mdb_callb.h>
155 157 #include <mdb/mdb_gelf.h>
156 158 #include <mdb/mdb_io_impl.h>
157 159 #include <mdb/mdb_string.h>
158 160 #include <mdb/mdb_signal.h>
159 161 #include <mdb/mdb_frame.h>
160 162 #include <mdb/mdb.h>
161 163
162 164 #include <sys/stat.h>
163 165 #include <sys/param.h>
164 166 #include <sys/signal.h>
165 167 #include <strings.h>
166 168 #include <stdlib.h>
167 169 #include <errno.h>
168 170
169 171 /*
170 172 * Define convenience macros for referencing the set of vespec flag bits that
171 173 * are preserved by the target implementation, and the set of bits that
172 174 * determine automatic ve_hits == ve_limit behavior.
173 175 */
174 176 #define T_IMPL_BITS \
↓ open down ↓ |
141 lines elided |
↑ open up ↑ |
175 177 (MDB_TGT_SPEC_INTERNAL | MDB_TGT_SPEC_SILENT | MDB_TGT_SPEC_MATCHED | \
176 178 MDB_TGT_SPEC_DELETED)
177 179
178 180 #define T_AUTO_BITS \
179 181 (MDB_TGT_SPEC_AUTOSTOP | MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS)
180 182
181 183 /*
182 184 * Define convenience macro for referencing target flag pending continue bits.
183 185 */
184 186 #define T_CONT_BITS \
185 - (MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_STEP_BRANCH | \
186 - MDB_TGT_F_NEXT | MDB_TGT_F_CONT)
187 + (MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_NEXT | MDB_TGT_F_CONT)
187 188
188 189 mdb_tgt_t *
189 190 mdb_tgt_create(mdb_tgt_ctor_f *ctor, int flags, int argc, const char *argv[])
190 191 {
191 192 mdb_module_t *mp;
192 193 mdb_tgt_t *t;
193 194
194 195 if (flags & ~MDB_TGT_F_ALL) {
195 196 (void) set_errno(EINVAL);
196 197 return (NULL);
197 198 }
198 199
199 200 t = mdb_zalloc(sizeof (mdb_tgt_t), UM_SLEEP);
200 201 mdb_list_append(&mdb.m_tgtlist, t);
201 202
202 203 t->t_module = &mdb.m_rmod;
203 204 t->t_matched = T_SE_END;
204 205 t->t_flags = flags;
205 206 t->t_vepos = 1;
206 207 t->t_veneg = 1;
207 208
208 209 for (mp = mdb.m_mhead; mp != NULL; mp = mp->mod_next) {
209 210 if (ctor == mp->mod_tgt_ctor) {
210 211 t->t_module = mp;
211 212 break;
212 213 }
213 214 }
214 215
215 216 if (ctor(t, argc, argv) != 0) {
216 217 mdb_list_delete(&mdb.m_tgtlist, t);
217 218 mdb_free(t, sizeof (mdb_tgt_t));
218 219 return (NULL);
219 220 }
220 221
221 222 mdb_dprintf(MDB_DBG_TGT, "t_create %s (%p)\n",
222 223 t->t_module->mod_name, (void *)t);
223 224
224 225 (void) t->t_ops->t_status(t, &t->t_status);
225 226 return (t);
226 227 }
227 228
228 229 int
229 230 mdb_tgt_getflags(mdb_tgt_t *t)
230 231 {
231 232 return (t->t_flags);
232 233 }
233 234
234 235 int
235 236 mdb_tgt_setflags(mdb_tgt_t *t, int flags)
236 237 {
237 238 if (flags & ~MDB_TGT_F_ALL)
238 239 return (set_errno(EINVAL));
239 240
240 241 return (t->t_ops->t_setflags(t, flags));
241 242 }
242 243
243 244 int
244 245 mdb_tgt_setcontext(mdb_tgt_t *t, void *context)
245 246 {
246 247 return (t->t_ops->t_setcontext(t, context));
247 248 }
248 249
249 250 /*ARGSUSED*/
250 251 static int
251 252 tgt_delete_vespec(mdb_tgt_t *t, void *private, int vid, void *data)
252 253 {
253 254 (void) mdb_tgt_vespec_delete(t, vid);
254 255 return (0);
255 256 }
256 257
257 258 void
258 259 mdb_tgt_destroy(mdb_tgt_t *t)
259 260 {
260 261 mdb_xdata_t *xdp, *nxdp;
261 262
262 263 if (mdb.m_target == t) {
263 264 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
264 265 t->t_module->mod_name, (void *)t);
265 266 t->t_ops->t_deactivate(t);
266 267 mdb.m_target = NULL;
267 268 }
268 269
269 270 mdb_dprintf(MDB_DBG_TGT, "t_destroy %s (%p)\n",
270 271 t->t_module->mod_name, (void *)t);
271 272
272 273 for (xdp = mdb_list_next(&t->t_xdlist); xdp != NULL; xdp = nxdp) {
273 274 nxdp = mdb_list_next(xdp);
274 275 mdb_list_delete(&t->t_xdlist, xdp);
275 276 mdb_free(xdp, sizeof (mdb_xdata_t));
276 277 }
277 278
278 279 mdb_tgt_sespec_idle_all(t, EBUSY, TRUE);
279 280 (void) mdb_tgt_vespec_iter(t, tgt_delete_vespec, NULL);
280 281 t->t_ops->t_destroy(t);
281 282
282 283 mdb_list_delete(&mdb.m_tgtlist, t);
283 284 mdb_free(t, sizeof (mdb_tgt_t));
284 285
285 286 if (mdb.m_target == NULL)
286 287 mdb_tgt_activate(mdb_list_prev(&mdb.m_tgtlist));
287 288 }
288 289
289 290 void
290 291 mdb_tgt_activate(mdb_tgt_t *t)
291 292 {
292 293 mdb_tgt_t *otgt = mdb.m_target;
293 294
294 295 if (mdb.m_target != NULL) {
295 296 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
296 297 mdb.m_target->t_module->mod_name, (void *)mdb.m_target);
297 298 mdb.m_target->t_ops->t_deactivate(mdb.m_target);
298 299 }
299 300
300 301 if ((mdb.m_target = t) != NULL) {
301 302 const char *v = strstr(mdb.m_root, "%V");
302 303
303 304 mdb_dprintf(MDB_DBG_TGT, "t_activate %s (%p)\n",
304 305 t->t_module->mod_name, (void *)t);
305 306
306 307 /*
307 308 * If the root was explicitly set with -R and contains %V,
308 309 * expand it like a path. If the resulting directory is
309 310 * not present, then replace %V with "latest" and re-evaluate.
310 311 */
311 312 if (v != NULL) {
312 313 char old_root[MAXPATHLEN];
313 314 const char **p;
314 315 #ifndef _KMDB
315 316 struct stat s;
316 317 #endif
317 318 size_t len;
318 319
319 320 p = mdb_path_alloc(mdb.m_root, &len);
320 321 (void) strcpy(old_root, mdb.m_root);
321 322 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
322 323 mdb.m_root[MAXPATHLEN - 1] = '\0';
323 324 mdb_path_free(p, len);
324 325
325 326 #ifndef _KMDB
326 327 if (stat(mdb.m_root, &s) == -1 && errno == ENOENT) {
327 328 mdb.m_flags |= MDB_FL_LATEST;
328 329 p = mdb_path_alloc(old_root, &len);
329 330 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
330 331 mdb.m_root[MAXPATHLEN - 1] = '\0';
331 332 mdb_path_free(p, len);
332 333 }
333 334 #endif
334 335 }
335 336
336 337 /*
337 338 * Re-evaluate the macro and dmod paths now that we have the
338 339 * new target set and m_root figured out.
339 340 */
340 341 if (otgt == NULL) {
341 342 mdb_set_ipath(mdb.m_ipathstr);
342 343 mdb_set_lpath(mdb.m_lpathstr);
343 344 }
344 345
345 346 t->t_ops->t_activate(t);
346 347 }
347 348 }
348 349
349 350 void
350 351 mdb_tgt_periodic(mdb_tgt_t *t)
351 352 {
352 353 t->t_ops->t_periodic(t);
353 354 }
354 355
355 356 const char *
356 357 mdb_tgt_name(mdb_tgt_t *t)
357 358 {
358 359 return (t->t_ops->t_name(t));
359 360 }
360 361
361 362 const char *
362 363 mdb_tgt_isa(mdb_tgt_t *t)
363 364 {
364 365 return (t->t_ops->t_isa(t));
365 366 }
366 367
367 368 const char *
368 369 mdb_tgt_platform(mdb_tgt_t *t)
369 370 {
370 371 return (t->t_ops->t_platform(t));
371 372 }
372 373
373 374 int
374 375 mdb_tgt_uname(mdb_tgt_t *t, struct utsname *utsp)
375 376 {
376 377 return (t->t_ops->t_uname(t, utsp));
377 378 }
378 379
379 380 int
380 381 mdb_tgt_dmodel(mdb_tgt_t *t)
381 382 {
382 383 return (t->t_ops->t_dmodel(t));
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
383 384 }
384 385
385 386 int
386 387 mdb_tgt_auxv(mdb_tgt_t *t, const auxv_t **auxvp)
387 388 {
388 389 return (t->t_ops->t_auxv(t, auxvp));
389 390 }
390 391
391 392 ssize_t
392 393 mdb_tgt_aread(mdb_tgt_t *t, mdb_tgt_as_t as,
393 - void *buf, size_t n, mdb_tgt_addr_t addr)
394 + void *buf, size_t n, mdb_tgt_addr_t addr)
394 395 {
395 396 if (t->t_flags & MDB_TGT_F_ASIO)
396 397 return (t->t_ops->t_aread(t, as, buf, n, addr));
397 398
398 399 switch ((uintptr_t)as) {
399 400 case (uintptr_t)MDB_TGT_AS_VIRT:
400 401 return (t->t_ops->t_vread(t, buf, n, addr));
401 402 case (uintptr_t)MDB_TGT_AS_PHYS:
402 403 return (t->t_ops->t_pread(t, buf, n, addr));
403 404 case (uintptr_t)MDB_TGT_AS_FILE:
404 405 return (t->t_ops->t_fread(t, buf, n, addr));
405 406 case (uintptr_t)MDB_TGT_AS_IO:
406 407 return (t->t_ops->t_ioread(t, buf, n, addr));
407 408 }
408 409 return (t->t_ops->t_aread(t, as, buf, n, addr));
409 410 }
410 411
411 412 ssize_t
412 413 mdb_tgt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as,
413 - const void *buf, size_t n, mdb_tgt_addr_t addr)
414 + const void *buf, size_t n, mdb_tgt_addr_t addr)
414 415 {
415 416 if (!(t->t_flags & MDB_TGT_F_RDWR))
416 417 return (set_errno(EMDB_TGTRDONLY));
417 418
418 419 if (t->t_flags & MDB_TGT_F_ASIO)
419 420 return (t->t_ops->t_awrite(t, as, buf, n, addr));
420 421
421 422 switch ((uintptr_t)as) {
422 423 case (uintptr_t)MDB_TGT_AS_VIRT:
423 424 return (t->t_ops->t_vwrite(t, buf, n, addr));
424 425 case (uintptr_t)MDB_TGT_AS_PHYS:
425 426 return (t->t_ops->t_pwrite(t, buf, n, addr));
426 427 case (uintptr_t)MDB_TGT_AS_FILE:
427 428 return (t->t_ops->t_fwrite(t, buf, n, addr));
428 429 case (uintptr_t)MDB_TGT_AS_IO:
429 430 return (t->t_ops->t_iowrite(t, buf, n, addr));
430 431 }
431 432 return (t->t_ops->t_awrite(t, as, buf, n, addr));
432 433 }
433 434
434 435 ssize_t
435 436 mdb_tgt_vread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
436 437 {
437 438 return (t->t_ops->t_vread(t, buf, n, addr));
438 439 }
439 440
440 441 ssize_t
441 442 mdb_tgt_vwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
442 443 {
443 444 if (t->t_flags & MDB_TGT_F_RDWR)
444 445 return (t->t_ops->t_vwrite(t, buf, n, addr));
445 446
446 447 return (set_errno(EMDB_TGTRDONLY));
447 448 }
448 449
449 450 ssize_t
450 451 mdb_tgt_pread(mdb_tgt_t *t, void *buf, size_t n, physaddr_t addr)
451 452 {
452 453 return (t->t_ops->t_pread(t, buf, n, addr));
453 454 }
454 455
455 456 ssize_t
456 457 mdb_tgt_pwrite(mdb_tgt_t *t, const void *buf, size_t n, physaddr_t addr)
457 458 {
458 459 if (t->t_flags & MDB_TGT_F_RDWR)
459 460 return (t->t_ops->t_pwrite(t, buf, n, addr));
460 461
461 462 return (set_errno(EMDB_TGTRDONLY));
462 463 }
463 464
464 465 ssize_t
465 466 mdb_tgt_fread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
466 467 {
467 468 return (t->t_ops->t_fread(t, buf, n, addr));
468 469 }
469 470
470 471 ssize_t
471 472 mdb_tgt_fwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
472 473 {
473 474 if (t->t_flags & MDB_TGT_F_RDWR)
474 475 return (t->t_ops->t_fwrite(t, buf, n, addr));
475 476
476 477 return (set_errno(EMDB_TGTRDONLY));
477 478 }
478 479
479 480 ssize_t
480 481 mdb_tgt_ioread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
481 482 {
482 483 return (t->t_ops->t_ioread(t, buf, n, addr));
483 484 }
484 485
485 486 ssize_t
486 487 mdb_tgt_iowrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
487 488 {
488 489 if (t->t_flags & MDB_TGT_F_RDWR)
489 490 return (t->t_ops->t_iowrite(t, buf, n, addr));
490 491
491 492 return (set_errno(EMDB_TGTRDONLY));
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
492 493 }
493 494
494 495 int
495 496 mdb_tgt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
496 497 {
497 498 return (t->t_ops->t_vtop(t, as, va, pap));
498 499 }
499 500
500 501 ssize_t
501 502 mdb_tgt_readstr(mdb_tgt_t *t, mdb_tgt_as_t as, char *buf,
502 - size_t nbytes, mdb_tgt_addr_t addr)
503 + size_t nbytes, mdb_tgt_addr_t addr)
503 504 {
504 505 ssize_t n, nread = mdb_tgt_aread(t, as, buf, nbytes, addr);
505 506 char *p;
506 507
507 508 if (nread >= 0) {
508 509 if ((p = memchr(buf, '\0', nread)) != NULL)
509 510 nread = (size_t)(p - buf);
510 511 goto done;
511 512 }
512 513
513 514 nread = 0;
514 515 p = &buf[0];
515 516
516 517 while (nread < nbytes && (n = mdb_tgt_aread(t, as, p, 1, addr)) == 1) {
517 518 if (*p == '\0')
518 519 return (nread);
519 520 nread++;
520 521 addr++;
521 522 p++;
522 523 }
523 524
524 525 if (nread == 0 && n == -1)
525 526 return (-1); /* If we can't even read a byte, return -1 */
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
526 527
527 528 done:
528 529 if (nbytes != 0)
529 530 buf[MIN(nread, nbytes - 1)] = '\0';
530 531
531 532 return (nread);
532 533 }
533 534
534 535 ssize_t
535 536 mdb_tgt_writestr(mdb_tgt_t *t, mdb_tgt_as_t as,
536 - const char *buf, mdb_tgt_addr_t addr)
537 + const char *buf, mdb_tgt_addr_t addr)
537 538 {
538 539 ssize_t nwritten = mdb_tgt_awrite(t, as, buf, strlen(buf) + 1, addr);
539 540 return (nwritten > 0 ? nwritten - 1 : nwritten);
540 541 }
541 542
542 543 int
543 544 mdb_tgt_lookup_by_name(mdb_tgt_t *t, const char *obj,
544 - const char *name, GElf_Sym *symp, mdb_syminfo_t *sip)
545 + const char *name, GElf_Sym *symp, mdb_syminfo_t *sip)
545 546 {
546 547 mdb_syminfo_t info;
547 548 GElf_Sym sym;
548 549 uint_t id;
549 550
550 551 if (name == NULL || t == NULL)
551 552 return (set_errno(EINVAL));
552 553
553 554 if (obj == MDB_TGT_OBJ_EVERY &&
554 555 mdb_gelf_symtab_lookup_by_name(mdb.m_prsym, name, &sym, &id) == 0) {
555 556 info.sym_table = MDB_TGT_PRVSYM;
556 557 info.sym_id = id;
557 558 goto found;
558 559 }
559 560
560 561 if (t->t_ops->t_lookup_by_name(t, obj, name, &sym, &info) == 0)
561 562 goto found;
562 563
563 564 return (-1);
564 565
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
565 566 found:
566 567 if (symp != NULL)
567 568 *symp = sym;
568 569 if (sip != NULL)
569 570 *sip = info;
570 571 return (0);
571 572 }
572 573
573 574 int
574 575 mdb_tgt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
575 - char *buf, size_t len, GElf_Sym *symp, mdb_syminfo_t *sip)
576 + char *buf, size_t len, GElf_Sym *symp, mdb_syminfo_t *sip)
576 577 {
577 578 mdb_syminfo_t info;
578 579 GElf_Sym sym;
579 580
580 581 if (t == NULL)
581 582 return (set_errno(EINVAL));
582 583
583 584 if (t->t_ops->t_lookup_by_addr(t, addr, flags,
584 585 buf, len, &sym, &info) == 0) {
585 586 if (symp != NULL)
586 587 *symp = sym;
587 588 if (sip != NULL)
588 589 *sip = info;
589 590 return (0);
590 591 }
591 592
592 593 return (-1);
593 594 }
594 595
595 596 /*
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
596 597 * The mdb_tgt_lookup_by_scope function is a convenience routine for code that
597 598 * wants to look up a scoped symbol name such as "object`symbol". It is
598 599 * implemented as a simple wrapper around mdb_tgt_lookup_by_name. Note that
599 600 * we split on the *last* occurrence of "`", so the object name itself may
600 601 * contain additional scopes whose evaluation is left to the target. This
601 602 * allows targets to implement additional scopes, such as source files,
602 603 * function names, link map identifiers, etc.
603 604 */
604 605 int
605 606 mdb_tgt_lookup_by_scope(mdb_tgt_t *t, const char *s, GElf_Sym *symp,
606 - mdb_syminfo_t *sip)
607 + mdb_syminfo_t *sip)
607 608 {
608 609 const char *object = MDB_TGT_OBJ_EVERY;
609 610 const char *name = s;
610 611 char buf[MDB_TGT_SYM_NAMLEN];
611 612
612 613 if (t == NULL)
613 614 return (set_errno(EINVAL));
614 615
615 616 if (strchr(name, '`') != NULL) {
616 617
617 618 (void) strncpy(buf, s, sizeof (buf));
618 619 buf[sizeof (buf) - 1] = '\0';
619 620 name = buf;
620 621
621 622 if ((s = strrsplit(buf, '`')) != NULL) {
622 623 object = buf;
623 624 name = s;
624 625 if (*object == '\0')
625 626 return (set_errno(EMDB_NOOBJ));
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
626 627 if (*name == '\0')
627 628 return (set_errno(EMDB_NOSYM));
628 629 }
629 630 }
630 631
631 632 return (mdb_tgt_lookup_by_name(t, object, name, symp, sip));
632 633 }
633 634
634 635 int
635 636 mdb_tgt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which,
636 - uint_t type, mdb_tgt_sym_f *cb, void *p)
637 + uint_t type, mdb_tgt_sym_f *cb, void *p)
637 638 {
638 639 if ((which != MDB_TGT_SYMTAB && which != MDB_TGT_DYNSYM) ||
639 640 (type & ~(MDB_TGT_BIND_ANY | MDB_TGT_TYPE_ANY)) != 0)
640 641 return (set_errno(EINVAL));
641 642
642 643 return (t->t_ops->t_symbol_iter(t, obj, which, type, cb, p));
643 644 }
644 645
645 646 ssize_t
646 647 mdb_tgt_readsym(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf, size_t nbytes,
647 - const char *obj, const char *name)
648 + const char *obj, const char *name)
648 649 {
649 650 GElf_Sym sym;
650 651
651 652 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
652 653 return (mdb_tgt_aread(t, as, buf, nbytes, sym.st_value));
653 654
654 655 return (-1);
655 656 }
656 657
657 658 ssize_t
658 659 mdb_tgt_writesym(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
659 - size_t nbytes, const char *obj, const char *name)
660 + size_t nbytes, const char *obj, const char *name)
660 661 {
661 662 GElf_Sym sym;
662 663
663 664 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
664 665 return (mdb_tgt_awrite(t, as, buf, nbytes, sym.st_value));
665 666
666 667 return (-1);
667 668 }
668 669
669 670 int
670 671 mdb_tgt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
671 672 {
672 673 return (t->t_ops->t_mapping_iter(t, cb, p));
673 674 }
674 675
675 676 int
676 677 mdb_tgt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
677 678 {
678 679 return (t->t_ops->t_object_iter(t, cb, p));
679 680 }
680 681
681 682 const mdb_map_t *
682 683 mdb_tgt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
683 684 {
684 685 return (t->t_ops->t_addr_to_map(t, addr));
685 686 }
686 687
687 688 const mdb_map_t *
688 689 mdb_tgt_name_to_map(mdb_tgt_t *t, const char *name)
689 690 {
690 691 return (t->t_ops->t_name_to_map(t, name));
691 692 }
692 693
693 694 struct ctf_file *
694 695 mdb_tgt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
695 696 {
696 697 return (t->t_ops->t_addr_to_ctf(t, addr));
697 698 }
698 699
699 700 struct ctf_file *
700 701 mdb_tgt_name_to_ctf(mdb_tgt_t *t, const char *name)
701 702 {
702 703 return (t->t_ops->t_name_to_ctf(t, name));
703 704 }
704 705
705 706 /*
706 707 * Return the latest target status. We just copy out our cached copy. The
707 708 * status only needs to change when the target is run, stepped, or continued.
708 709 */
709 710 int
710 711 mdb_tgt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
711 712 {
712 713 uint_t dstop = (t->t_status.st_flags & MDB_TGT_DSTOP);
713 714 uint_t istop = (t->t_status.st_flags & MDB_TGT_ISTOP);
714 715 uint_t state = t->t_status.st_state;
715 716
716 717 if (tsp == NULL)
717 718 return (set_errno(EINVAL));
718 719
719 720 /*
720 721 * If we're called with the address of the target's internal status,
721 722 * then call down to update it; otherwise copy out the saved status.
722 723 */
723 724 if (tsp == &t->t_status && t->t_ops->t_status(t, &t->t_status) != 0)
724 725 return (-1); /* errno is set for us */
725 726
726 727 /*
727 728 * Assert that our state is valid before returning it. The state must
728 729 * be valid, and DSTOP and ISTOP cannot be set simultaneously. ISTOP
729 730 * is only valid when stopped. DSTOP is only valid when running or
730 731 * stopped. If any test fails, abort the debugger.
731 732 */
732 733 if (state > MDB_TGT_LOST)
733 734 fail("invalid target state (%u)\n", state);
734 735 if (state != MDB_TGT_STOPPED && istop)
735 736 fail("target state is (%u) and ISTOP is set\n", state);
736 737 if (state != MDB_TGT_STOPPED && state != MDB_TGT_RUNNING && dstop)
737 738 fail("target state is (%u) and DSTOP is set\n", state);
738 739 if (istop && dstop)
739 740 fail("target has ISTOP and DSTOP set simultaneously\n");
740 741
741 742 if (tsp != &t->t_status)
742 743 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
743 744
744 745 return (0);
745 746 }
746 747
747 748 /*
748 749 * For the given sespec, scan its list of vespecs for ones that are marked
749 750 * temporary and delete them. We use the same method as vespec_delete below.
750 751 */
751 752 /*ARGSUSED*/
752 753 void
753 754 mdb_tgt_sespec_prune_one(mdb_tgt_t *t, mdb_sespec_t *sep)
754 755 {
755 756 mdb_vespec_t *vep, *nvep;
756 757
757 758 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
758 759 nvep = mdb_list_next(vep);
759 760
760 761 if ((vep->ve_flags & (MDB_TGT_SPEC_DELETED |
761 762 MDB_TGT_SPEC_TEMPORARY)) == MDB_TGT_SPEC_TEMPORARY) {
762 763 vep->ve_flags |= MDB_TGT_SPEC_DELETED;
763 764 mdb_tgt_vespec_rele(t, vep);
764 765 }
765 766 }
766 767 }
767 768
768 769 /*
769 770 * Prune each sespec on the active list of temporary vespecs. This function
770 771 * is called, for example, after the target finishes a continue operation.
771 772 */
772 773 void
773 774 mdb_tgt_sespec_prune_all(mdb_tgt_t *t)
774 775 {
775 776 mdb_sespec_t *sep, *nsep;
776 777
777 778 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
778 779 nsep = mdb_list_next(sep);
779 780 mdb_tgt_sespec_prune_one(t, sep);
780 781 }
781 782 }
782 783
783 784 /*
784 785 * Transition the given sespec to the IDLE state. We invoke the destructor,
785 786 * and then move the sespec from the active list to the idle list.
786 787 */
787 788 void
788 789 mdb_tgt_sespec_idle_one(mdb_tgt_t *t, mdb_sespec_t *sep, int reason)
789 790 {
790 791 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
791 792
792 793 if (sep->se_state == MDB_TGT_SPEC_ARMED)
793 794 (void) sep->se_ops->se_disarm(t, sep);
794 795
795 796 sep->se_ops->se_dtor(t, sep);
796 797 sep->se_data = NULL;
797 798
798 799 sep->se_state = MDB_TGT_SPEC_IDLE;
799 800 sep->se_errno = reason;
800 801
801 802 mdb_list_delete(&t->t_active, sep);
802 803 mdb_list_append(&t->t_idle, sep);
803 804
804 805 mdb_tgt_sespec_prune_one(t, sep);
805 806 }
806 807
807 808 /*
808 809 * Transition each sespec on the active list to the IDLE state. This function
809 810 * is called, for example, after the target terminates execution.
810 811 */
811 812 void
812 813 mdb_tgt_sespec_idle_all(mdb_tgt_t *t, int reason, int clear_matched)
813 814 {
814 815 mdb_sespec_t *sep, *nsep;
815 816 mdb_vespec_t *vep;
816 817
817 818 while ((sep = t->t_matched) != T_SE_END && clear_matched) {
818 819 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
819 820 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
820 821 vep = mdb_list_next(vep);
821 822 }
822 823
823 824 t->t_matched = sep->se_matched;
824 825 sep->se_matched = NULL;
825 826 mdb_tgt_sespec_rele(t, sep);
826 827 }
827 828
828 829 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
829 830 nsep = mdb_list_next(sep);
830 831 mdb_tgt_sespec_idle_one(t, sep, reason);
831 832 }
832 833 }
833 834
834 835 /*
835 836 * Attempt to transition the given sespec from the IDLE to ACTIVE state. We
836 837 * do this by invoking se_ctor -- if this fails, we save the reason in se_errno
837 838 * and return -1 with errno set. One strange case we need to deal with here is
838 839 * the possibility that a given vespec is sitting on the idle list with its
839 840 * corresponding sespec, but it is actually a duplicate of another sespec on the
840 841 * active list. This can happen if the sespec is associated with a
841 842 * MDB_TGT_SPEC_DISABLED vespec that was just enabled, and is now ready to be
842 843 * activated. A more interesting reason this situation might arise is the case
843 844 * where a virtual address breakpoint is set at an address just mmap'ed by
844 845 * dlmopen. Since no symbol table information is available for this mapping
845 846 * yet, a pre-existing deferred symbolic breakpoint may already exist for this
846 847 * address, but it is on the idle list. When the symbol table is ready and the
847 848 * DLACTIVITY event occurs, we now discover that the virtual address obtained by
848 849 * evaluating the symbolic breakpoint matches the explicit virtual address of
849 850 * the active virtual breakpoint. To resolve this conflict in either case, we
850 851 * destroy the idle sespec, and attach its list of vespecs to the existing
851 852 * active sespec.
852 853 */
853 854 int
854 855 mdb_tgt_sespec_activate_one(mdb_tgt_t *t, mdb_sespec_t *sep)
855 856 {
856 857 mdb_vespec_t *vep = mdb_list_next(&sep->se_velist);
857 858
858 859 mdb_vespec_t *nvep;
859 860 mdb_sespec_t *dup;
860 861
861 862 ASSERT(sep->se_state == MDB_TGT_SPEC_IDLE);
862 863 ASSERT(vep != NULL);
863 864
864 865 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
865 866 return (0); /* cannot be activated while disabled bit set */
866 867
867 868 /*
868 869 * First search the active list for an existing, duplicate sespec to
869 870 * handle the special case described above.
870 871 */
871 872 for (dup = mdb_list_next(&t->t_active); dup; dup = mdb_list_next(dup)) {
872 873 if (dup->se_ops == sep->se_ops &&
873 874 dup->se_ops->se_secmp(t, dup, vep->ve_args)) {
874 875 ASSERT(dup != sep);
875 876 break;
876 877 }
877 878 }
878 879
879 880 /*
880 881 * If a duplicate is found, destroy the existing, idle sespec, and
881 882 * attach all of its vespecs to the duplicate sespec.
882 883 */
883 884 if (dup != NULL) {
884 885 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
885 886 mdb_dprintf(MDB_DBG_TGT, "merge [ %d ] to sespec %p\n",
886 887 vep->ve_id, (void *)dup);
887 888
888 889 if (dup->se_matched != NULL)
889 890 vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
890 891
891 892 nvep = mdb_list_next(vep);
892 893 vep->ve_hits = 0;
893 894
894 895 mdb_list_delete(&sep->se_velist, vep);
895 896 mdb_tgt_sespec_rele(t, sep);
896 897
897 898 mdb_list_append(&dup->se_velist, vep);
898 899 mdb_tgt_sespec_hold(t, dup);
899 900 vep->ve_se = dup;
900 901 }
901 902
902 903 mdb_dprintf(MDB_DBG_TGT, "merged idle sespec %p with %p\n",
903 904 (void *)sep, (void *)dup);
904 905 return (0);
905 906 }
906 907
907 908 /*
908 909 * If no duplicate is found, call the sespec's constructor. If this
909 910 * is successful, move the sespec to the active list.
910 911 */
911 912 if (sep->se_ops->se_ctor(t, sep, vep->ve_args) < 0) {
912 913 sep->se_errno = errno;
913 914 sep->se_data = NULL;
914 915
915 916 return (-1);
916 917 }
917 918
918 919 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
919 920 nvep = mdb_list_next(vep);
920 921 vep->ve_hits = 0;
921 922 }
922 923 mdb_list_delete(&t->t_idle, sep);
923 924 mdb_list_append(&t->t_active, sep);
924 925 sep->se_state = MDB_TGT_SPEC_ACTIVE;
925 926 sep->se_errno = 0;
926 927
927 928 return (0);
928 929 }
929 930
930 931 /*
931 932 * Transition each sespec on the idle list to the ACTIVE state. This function
932 933 * is called, for example, after the target's t_run() function returns. If
933 934 * the se_ctor() function fails, the specifier is not yet applicable; it will
934 935 * remain on the idle list and can be activated later.
935 936 *
936 937 * Returns 1 if there weren't any unexpected activation failures; 0 if there
937 938 * were.
938 939 */
939 940 int
940 941 mdb_tgt_sespec_activate_all(mdb_tgt_t *t)
941 942 {
942 943 mdb_sespec_t *sep, *nsep;
943 944 int rc = 1;
944 945
945 946 for (sep = mdb_list_next(&t->t_idle); sep != NULL; sep = nsep) {
946 947 nsep = mdb_list_next(sep);
947 948
948 949 if (mdb_tgt_sespec_activate_one(t, sep) < 0 &&
949 950 sep->se_errno != EMDB_NOOBJ)
950 951 rc = 0;
951 952 }
952 953
953 954 return (rc);
954 955 }
955 956
956 957 /*
957 958 * Transition the given sespec to the ARMED state. Note that we attempt to
958 959 * re-arm sespecs previously in the ERROR state. If se_arm() fails the sespec
959 960 * transitions to the ERROR state but stays on the active list.
960 961 */
961 962 void
962 963 mdb_tgt_sespec_arm_one(mdb_tgt_t *t, mdb_sespec_t *sep)
963 964 {
964 965 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
965 966
966 967 if (sep->se_state == MDB_TGT_SPEC_ARMED)
967 968 return; /* do not arm sespecs more than once */
968 969
969 970 if (sep->se_ops->se_arm(t, sep) == -1) {
970 971 sep->se_state = MDB_TGT_SPEC_ERROR;
971 972 sep->se_errno = errno;
972 973 } else {
973 974 sep->se_state = MDB_TGT_SPEC_ARMED;
974 975 sep->se_errno = 0;
975 976 }
976 977 }
977 978
978 979 /*
979 980 * Transition each sespec on the active list (except matched specs) to the
980 981 * ARMED state. This function is called prior to continuing the target.
981 982 */
982 983 void
983 984 mdb_tgt_sespec_arm_all(mdb_tgt_t *t)
984 985 {
985 986 mdb_sespec_t *sep, *nsep;
986 987
987 988 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
988 989 nsep = mdb_list_next(sep);
989 990 if (sep->se_matched == NULL)
990 991 mdb_tgt_sespec_arm_one(t, sep);
991 992 }
992 993 }
993 994
994 995 /*
995 996 * Transition each sespec on the active list that is in the ARMED state to
996 997 * the ACTIVE state. If se_disarm() fails, the sespec is transitioned to
997 998 * the ERROR state instead, but left on the active list.
998 999 */
999 1000 static void
1000 1001 tgt_disarm_sespecs(mdb_tgt_t *t)
1001 1002 {
1002 1003 mdb_sespec_t *sep;
1003 1004
1004 1005 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1005 1006 if (sep->se_state != MDB_TGT_SPEC_ARMED)
1006 1007 continue; /* do not disarm if in ERROR state */
1007 1008
1008 1009 if (sep->se_ops->se_disarm(t, sep) == -1) {
1009 1010 sep->se_state = MDB_TGT_SPEC_ERROR;
1010 1011 sep->se_errno = errno;
1011 1012 } else {
1012 1013 sep->se_state = MDB_TGT_SPEC_ACTIVE;
1013 1014 sep->se_errno = 0;
1014 1015 }
1015 1016 }
1016 1017 }
1017 1018
1018 1019 /*
1019 1020 * Determine if the software event that triggered the most recent stop matches
1020 1021 * any of the active event specifiers. If 'all' is TRUE, we consider all
1021 1022 * sespecs in our search. If 'all' is FALSE, we only consider ARMED sespecs.
1022 1023 * If we successfully match an event, we add it to the t_matched list and
1023 1024 * place an additional hold on it.
1024 1025 */
1025 1026 static mdb_sespec_t *
1026 1027 tgt_match_sespecs(mdb_tgt_t *t, int all)
1027 1028 {
1028 1029 mdb_sespec_t *sep;
1029 1030
1030 1031 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1031 1032 if (all == FALSE && sep->se_state != MDB_TGT_SPEC_ARMED)
1032 1033 continue; /* restrict search to ARMED sespecs */
1033 1034
1034 1035 if (sep->se_state != MDB_TGT_SPEC_ERROR &&
1035 1036 sep->se_ops->se_match(t, sep, &t->t_status)) {
1036 1037 mdb_dprintf(MDB_DBG_TGT, "match se %p\n", (void *)sep);
1037 1038 mdb_tgt_sespec_hold(t, sep);
1038 1039 sep->se_matched = t->t_matched;
1039 1040 t->t_matched = sep;
1040 1041 }
1041 1042 }
1042 1043
1043 1044 return (t->t_matched);
1044 1045 }
1045 1046
1046 1047 /*
1047 1048 * This function provides the low-level target continue algorithm. We proceed
1048 1049 * in three phases: (1) we arm the active sespecs, except the specs matched at
1049 1050 * the time we last stopped, (2) we call se_cont() on any matched sespecs to
1050 1051 * step over these event transitions, and then arm the corresponding sespecs,
1051 1052 * and (3) we call the appropriate low-level continue routine. Once the
1052 1053 * target stops again, we determine which sespecs were matched, and invoke the
1053 1054 * appropriate vespec callbacks and perform other vespec maintenance.
1054 1055 */
1055 1056 static int
1056 1057 tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp,
1057 1058 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1058 1059 {
1059 1060 mdb_var_t *hitv = mdb_nv_lookup(&mdb.m_nv, "hits");
1060 1061 uintptr_t pc = t->t_status.st_pc;
1061 1062 int error = 0;
1062 1063
1063 1064 mdb_sespec_t *sep, *nsep, *matched;
1064 1065 mdb_vespec_t *vep, *nvep;
1065 1066 uintptr_t addr;
1066 1067
1067 1068 uint_t cbits = 0; /* union of pending continue bits */
1068 1069 uint_t ncont = 0; /* # of callbacks that requested cont */
1069 1070 uint_t n = 0; /* # of callbacks */
1070 1071
1071 1072 /*
1072 1073 * If the target is undead, dead, or lost, we no longer allow continue.
1073 1074 * This effectively forces the user to use ::kill or ::run after death.
1074 1075 */
1075 1076 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1076 1077 return (set_errno(EMDB_TGTZOMB));
1077 1078 if (t->t_status.st_state == MDB_TGT_DEAD)
1078 1079 return (set_errno(EMDB_TGTCORE));
1079 1080 if (t->t_status.st_state == MDB_TGT_LOST)
1080 1081 return (set_errno(EMDB_TGTLOST));
↓ open down ↓ |
411 lines elided |
↑ open up ↑ |
1081 1082
1082 1083 /*
1083 1084 * If any of single-step, step-over, or step-out is pending, it takes
1084 1085 * precedence over an explicit or pending continue, because these are
1085 1086 * all different specialized forms of continue.
1086 1087 */
1087 1088 if (t->t_flags & MDB_TGT_F_STEP)
1088 1089 t_cont = t->t_ops->t_step;
1089 1090 else if (t->t_flags & MDB_TGT_F_NEXT)
1090 1091 t_cont = t->t_ops->t_step;
1091 - else if (t->t_flags & MDB_TGT_F_STEP_BRANCH)
1092 - t_cont = t->t_ops->t_cont;
1093 1092 else if (t->t_flags & MDB_TGT_F_STEP_OUT)
1094 1093 t_cont = t->t_ops->t_cont;
1095 1094
1096 1095 /*
1097 1096 * To handle step-over, we ask the target to find the address past the
1098 1097 * next control transfer instruction. If an address is found, we plant
1099 1098 * a temporary breakpoint there and continue; otherwise just step.
1100 1099 */
1101 1100 if ((t->t_flags & MDB_TGT_F_NEXT) && !(t->t_flags & MDB_TGT_F_STEP)) {
1102 1101 if (t->t_ops->t_next(t, &addr) == -1 || mdb_tgt_add_vbrkpt(t,
1103 1102 addr, MDB_TGT_SPEC_HIDDEN | MDB_TGT_SPEC_TEMPORARY,
1104 1103 no_se_f, NULL) == 0) {
1105 1104 mdb_dprintf(MDB_DBG_TGT, "next falling back to step: "
1106 1105 "%s\n", mdb_strerror(errno));
1107 1106 } else
1108 1107 t_cont = t->t_ops->t_cont;
1109 1108 }
1110 1109
1111 1110 /*
1112 1111 * To handle step-out, we ask the target to find the return address of
1113 1112 * the current frame, plant a temporary breakpoint there, and continue.
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
1114 1113 */
1115 1114 if (t->t_flags & MDB_TGT_F_STEP_OUT) {
1116 1115 if (t->t_ops->t_step_out(t, &addr) == -1)
1117 1116 return (-1); /* errno is set for us */
1118 1117
1119 1118 if (mdb_tgt_add_vbrkpt(t, addr, MDB_TGT_SPEC_HIDDEN |
1120 1119 MDB_TGT_SPEC_TEMPORARY, no_se_f, NULL) == 0)
1121 1120 return (-1); /* errno is set for us */
1122 1121 }
1123 1122
1124 - /*
1125 - * To handle step-branch, we ask the target to enable it for the coming
1126 - * continue. Step-branch is incompatible with step, so don't enable it
1127 - * if we're going to be stepping.
1128 - */
1129 - if (t->t_flags & MDB_TGT_F_STEP_BRANCH && t_cont == t->t_ops->t_cont) {
1130 - if (t->t_ops->t_step_branch(t) == -1)
1131 - return (-1); /* errno is set for us */
1132 - }
1133 -
1134 1123 (void) mdb_signal_block(SIGHUP);
1135 1124 (void) mdb_signal_block(SIGTERM);
1136 1125 mdb_intr_disable();
1137 1126
1138 1127 t->t_flags &= ~T_CONT_BITS;
1139 1128 t->t_flags |= MDB_TGT_F_BUSY;
1140 1129 mdb_tgt_sespec_arm_all(t);
1141 1130
1142 1131 ASSERT(t->t_matched != NULL);
1143 1132 matched = t->t_matched;
1144 1133 t->t_matched = T_SE_END;
1145 1134
1146 1135 if (mdb.m_term != NULL)
1147 1136 IOP_SUSPEND(mdb.m_term);
1148 1137
1149 1138 /*
1150 1139 * Iterate over the matched sespec list, performing autostop processing
1151 1140 * and clearing the matched bit for each associated vespec. We then
1152 1141 * invoke each sespec's se_cont callback in order to continue past
1153 1142 * the corresponding event. If the matched list has more than one
1154 1143 * sespec, we assume that the se_cont callbacks are non-interfering.
1155 1144 */
1156 1145 for (sep = matched; sep != T_SE_END; sep = sep->se_matched) {
1157 1146 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
1158 1147 if ((vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP) &&
1159 1148 (vep->ve_limit && vep->ve_hits == vep->ve_limit))
1160 1149 vep->ve_hits = 0;
1161 1150
1162 1151 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1163 1152 vep = mdb_list_next(vep);
1164 1153 }
1165 1154
1166 1155 if (sep->se_ops->se_cont(t, sep, &t->t_status) == -1) {
1167 1156 error = errno ? errno : -1;
1168 1157 tgt_disarm_sespecs(t);
1169 1158 break;
1170 1159 }
1171 1160
1172 1161 if (!(t->t_status.st_flags & MDB_TGT_ISTOP)) {
1173 1162 tgt_disarm_sespecs(t);
1174 1163 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1175 1164 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1176 1165 else if (t->t_status.st_state == MDB_TGT_LOST)
1177 1166 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1178 1167 break;
1179 1168 }
1180 1169 }
1181 1170
1182 1171 /*
1183 1172 * Clear the se_matched field for each matched sespec, and drop the
1184 1173 * reference count since the sespec is no longer on the matched list.
1185 1174 */
1186 1175 for (sep = matched; sep != T_SE_END; sep = nsep) {
1187 1176 nsep = sep->se_matched;
1188 1177 sep->se_matched = NULL;
1189 1178 mdb_tgt_sespec_rele(t, sep);
1190 1179 }
1191 1180
1192 1181 /*
1193 1182 * If the matched list was non-empty, see if we hit another event while
1194 1183 * performing se_cont() processing. If so, don't bother continuing any
1195 1184 * further. If not, arm the sespecs on the old matched list by calling
1196 1185 * mdb_tgt_sespec_arm_all() again and then continue by calling t_cont.
1197 1186 */
1198 1187 if (matched != T_SE_END) {
1199 1188 if (error != 0 || !(t->t_status.st_flags & MDB_TGT_ISTOP))
1200 1189 goto out; /* abort now if se_cont() failed */
1201 1190
1202 1191 if ((t->t_matched = tgt_match_sespecs(t, FALSE)) != T_SE_END) {
1203 1192 tgt_disarm_sespecs(t);
1204 1193 goto out;
1205 1194 }
1206 1195
1207 1196 mdb_tgt_sespec_arm_all(t);
1208 1197 }
1209 1198
1210 1199 if (t_cont != t->t_ops->t_step || pc == t->t_status.st_pc) {
1211 1200 if (t_cont(t, &t->t_status) != 0)
1212 1201 error = errno ? errno : -1;
1213 1202 }
1214 1203
1215 1204 tgt_disarm_sespecs(t);
1216 1205
1217 1206 if (t->t_flags & MDB_TGT_F_UNLOAD)
1218 1207 longjmp(mdb.m_frame->f_pcb, MDB_ERR_QUIT);
1219 1208
1220 1209 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1221 1210 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1222 1211 else if (t->t_status.st_state == MDB_TGT_LOST)
1223 1212 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1224 1213 else if (t->t_status.st_flags & MDB_TGT_ISTOP)
1225 1214 t->t_matched = tgt_match_sespecs(t, TRUE);
1226 1215 out:
1227 1216 if (mdb.m_term != NULL)
1228 1217 IOP_RESUME(mdb.m_term);
1229 1218
1230 1219 (void) mdb_signal_unblock(SIGTERM);
1231 1220 (void) mdb_signal_unblock(SIGHUP);
1232 1221 mdb_intr_enable();
1233 1222
1234 1223 for (sep = t->t_matched; sep != T_SE_END; sep = sep->se_matched) {
1235 1224 /*
1236 1225 * When we invoke a ve_callback, it may in turn request that the
1237 1226 * target continue immediately after callback processing is
1238 1227 * complete. We only allow this to occur if *all* callbacks
1239 1228 * agree to continue. To implement this behavior, we keep a
1240 1229 * count (ncont) of such requests, and only apply the cumulative
1241 1230 * continue bits (cbits) to the target if ncont is equal to the
1242 1231 * total number of callbacks that are invoked (n).
1243 1232 */
1244 1233 for (vep = mdb_list_next(&sep->se_velist);
1245 1234 vep != NULL; vep = nvep, n++) {
1246 1235 /*
1247 1236 * Place an extra hold on the current vespec and pick
1248 1237 * up the next pointer before invoking the callback: we
1249 1238 * must be prepared for the vespec to be deleted or
1250 1239 * moved to a different list by the callback.
1251 1240 */
1252 1241 mdb_tgt_vespec_hold(t, vep);
1253 1242 nvep = mdb_list_next(vep);
1254 1243
1255 1244 vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
1256 1245 vep->ve_hits++;
1257 1246
1258 1247 mdb_nv_set_value(mdb.m_dot, t->t_status.st_pc);
1259 1248 mdb_nv_set_value(hitv, vep->ve_hits);
1260 1249
1261 1250 ASSERT((t->t_flags & T_CONT_BITS) == 0);
1262 1251 vep->ve_callback(t, vep->ve_id, vep->ve_data);
1263 1252
1264 1253 ncont += (t->t_flags & T_CONT_BITS) != 0;
1265 1254 cbits |= (t->t_flags & T_CONT_BITS);
1266 1255 t->t_flags &= ~T_CONT_BITS;
1267 1256
1268 1257 if (vep->ve_limit && vep->ve_hits == vep->ve_limit) {
1269 1258 if (vep->ve_flags & MDB_TGT_SPEC_AUTODEL)
1270 1259 (void) mdb_tgt_vespec_delete(t,
1271 1260 vep->ve_id);
1272 1261 else if (vep->ve_flags & MDB_TGT_SPEC_AUTODIS)
1273 1262 (void) mdb_tgt_vespec_disable(t,
1274 1263 vep->ve_id);
1275 1264 }
1276 1265
1277 1266 if (vep->ve_limit && vep->ve_hits < vep->ve_limit) {
1278 1267 if (vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP)
1279 1268 (void) mdb_tgt_continue(t, NULL);
1280 1269 }
1281 1270
1282 1271 mdb_tgt_vespec_rele(t, vep);
1283 1272 }
1284 1273 }
1285 1274
1286 1275 if (t->t_matched != T_SE_END && ncont == n)
1287 1276 t->t_flags |= cbits; /* apply continues (see above) */
1288 1277
1289 1278 mdb_tgt_sespec_prune_all(t);
1290 1279
1291 1280 t->t_status.st_flags &= ~MDB_TGT_BUSY;
1292 1281 t->t_flags &= ~MDB_TGT_F_BUSY;
1293 1282
1294 1283 if (tsp != NULL)
1295 1284 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
1296 1285
1297 1286 if (error != 0)
1298 1287 return (set_errno(error));
1299 1288
1300 1289 return (0);
1301 1290 }
1302 1291
1303 1292 /*
1304 1293 * This function is the common glue that connects the high-level target layer
1305 1294 * continue functions (e.g. step and cont below) with the low-level
1306 1295 * tgt_continue() function above. Since vespec callbacks may perform any
1307 1296 * actions, including attempting to continue the target itself, we must be
1308 1297 * prepared to be called while the target is still marked F_BUSY. In this
1309 1298 * case, we just set a pending bit and return. When we return from the call
1310 1299 * to tgt_continue() that made us busy into the tgt_request_continue() call
1311 1300 * that is still on the stack, we will loop around and call tgt_continue()
1312 1301 * again. This allows vespecs to continue the target without recursion.
1313 1302 */
1314 1303 static int
1315 1304 tgt_request_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp, uint_t tflag,
1316 1305 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1317 1306 {
1318 1307 mdb_tgt_spec_desc_t desc;
1319 1308 mdb_sespec_t *sep;
1320 1309 char buf[BUFSIZ];
1321 1310 int status;
1322 1311
1323 1312 if (t->t_flags & MDB_TGT_F_BUSY) {
1324 1313 t->t_flags |= tflag;
1325 1314 return (0);
1326 1315 }
1327 1316
1328 1317 do {
1329 1318 status = tgt_continue(t, tsp, t_cont);
1330 1319 } while (status == 0 && (t->t_flags & T_CONT_BITS));
1331 1320
1332 1321 if (status == 0) {
1333 1322 for (sep = t->t_matched; sep != T_SE_END;
1334 1323 sep = sep->se_matched) {
1335 1324 mdb_vespec_t *vep;
1336 1325
1337 1326 for (vep = mdb_list_next(&sep->se_velist); vep;
1338 1327 vep = mdb_list_next(vep)) {
1339 1328 if (vep->ve_flags & MDB_TGT_SPEC_SILENT)
1340 1329 continue;
1341 1330 warn("%s\n", sep->se_ops->se_info(t, sep,
1342 1331 vep, &desc, buf, sizeof (buf)));
1343 1332 }
1344 1333 }
1345 1334
1346 1335 mdb_callb_fire(MDB_CALLB_STCHG);
1347 1336 }
1348 1337
1349 1338 t->t_flags &= ~T_CONT_BITS;
1350 1339 return (status);
1351 1340 }
1352 1341
1353 1342 /*
1354 1343 * Restart target execution: we rely upon the underlying target implementation
1355 1344 * to do most of the work for us. In particular, we assume it will properly
1356 1345 * preserve the state of our event lists if the run fails for some reason,
1357 1346 * and that it will reset all events to the IDLE state if the run succeeds.
1358 1347 * If it is successful, we attempt to activate all of the idle sespecs. The
1359 1348 * t_run() operation is defined to leave the target stopped at the earliest
1360 1349 * possible point in execution, and then return control to the debugger,
1361 1350 * awaiting a step or continue operation to set it running again.
1362 1351 */
1363 1352 int
1364 1353 mdb_tgt_run(mdb_tgt_t *t, int argc, const mdb_arg_t *argv)
1365 1354 {
1366 1355 int i;
1367 1356
1368 1357 for (i = 0; i < argc; i++) {
1369 1358 if (argv->a_type != MDB_TYPE_STRING)
1370 1359 return (set_errno(EINVAL));
1371 1360 }
1372 1361
1373 1362 if (t->t_ops->t_run(t, argc, argv) == -1)
1374 1363 return (-1); /* errno is set for us */
1375 1364
1376 1365 t->t_flags &= ~T_CONT_BITS;
1377 1366 (void) mdb_tgt_sespec_activate_all(t);
1378 1367
1379 1368 if (mdb.m_term != NULL)
1380 1369 IOP_CTL(mdb.m_term, MDB_IOC_CTTY, NULL);
1381 1370
1382 1371 return (0);
1383 1372 }
1384 1373
1385 1374 int
1386 1375 mdb_tgt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1387 1376 {
1388 1377 return (tgt_request_continue(t, tsp, MDB_TGT_F_STEP, t->t_ops->t_step));
↓ open down ↓ |
245 lines elided |
↑ open up ↑ |
1389 1378 }
1390 1379
1391 1380 int
1392 1381 mdb_tgt_step_out(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1393 1382 {
1394 1383 t->t_flags |= MDB_TGT_F_STEP_OUT; /* set flag even if tgt not busy */
1395 1384 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1396 1385 }
1397 1386
1398 1387 int
1399 -mdb_tgt_step_branch(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1400 -{
1401 - t->t_flags |= MDB_TGT_F_STEP_BRANCH; /* set flag even if tgt not busy */
1402 - return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1403 -}
1404 -
1405 -int
1406 1388 mdb_tgt_next(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1407 1389 {
1408 1390 t->t_flags |= MDB_TGT_F_NEXT; /* set flag even if tgt not busy */
1409 1391 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_step));
1410 1392 }
1411 1393
1412 1394 int
1413 1395 mdb_tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1414 1396 {
1415 1397 return (tgt_request_continue(t, tsp, MDB_TGT_F_CONT, t->t_ops->t_cont));
1416 1398 }
1417 1399
1418 1400 int
1419 1401 mdb_tgt_signal(mdb_tgt_t *t, int sig)
1420 1402 {
1421 1403 return (t->t_ops->t_signal(t, sig));
1422 1404 }
1423 1405
1424 1406 void *
1425 1407 mdb_tgt_vespec_data(mdb_tgt_t *t, int vid)
1426 1408 {
1427 1409 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1428 1410
1429 1411 if (vep == NULL) {
1430 1412 (void) set_errno(EMDB_NOSESPEC);
1431 1413 return (NULL);
1432 1414 }
1433 1415
1434 1416 return (vep->ve_data);
1435 1417 }
1436 1418
1437 1419 /*
1438 1420 * Return a structured description and comment string for the given vespec.
1439 1421 * We fill in the common information from the vespec, and then call down to
1440 1422 * the underlying sespec to provide the comment string and modify any
1441 1423 * event type-specific information.
1442 1424 */
1443 1425 char *
1444 1426 mdb_tgt_vespec_info(mdb_tgt_t *t, int vid, mdb_tgt_spec_desc_t *sp,
1445 1427 char *buf, size_t nbytes)
1446 1428 {
1447 1429 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1448 1430
1449 1431 mdb_tgt_spec_desc_t desc;
1450 1432 mdb_sespec_t *sep;
1451 1433
1452 1434 if (vep == NULL) {
1453 1435 if (sp != NULL)
1454 1436 bzero(sp, sizeof (mdb_tgt_spec_desc_t));
1455 1437 (void) set_errno(EMDB_NOSESPEC);
1456 1438 return (NULL);
1457 1439 }
1458 1440
1459 1441 if (sp == NULL)
1460 1442 sp = &desc;
1461 1443
1462 1444 sep = vep->ve_se;
1463 1445
1464 1446 sp->spec_id = vep->ve_id;
1465 1447 sp->spec_flags = vep->ve_flags;
1466 1448 sp->spec_hits = vep->ve_hits;
1467 1449 sp->spec_limit = vep->ve_limit;
1468 1450 sp->spec_state = sep->se_state;
1469 1451 sp->spec_errno = sep->se_errno;
1470 1452 sp->spec_base = NULL;
1471 1453 sp->spec_size = 0;
1472 1454 sp->spec_data = vep->ve_data;
1473 1455
1474 1456 return (sep->se_ops->se_info(t, sep, vep, sp, buf, nbytes));
1475 1457 }
1476 1458
1477 1459 /*
1478 1460 * Qsort callback for sorting vespecs by VID, used below.
1479 1461 */
1480 1462 static int
1481 1463 tgt_vespec_compare(const mdb_vespec_t **lp, const mdb_vespec_t **rp)
1482 1464 {
1483 1465 return ((*lp)->ve_id - (*rp)->ve_id);
1484 1466 }
1485 1467
1486 1468 /*
1487 1469 * Iterate over all vespecs and call the specified callback function with the
1488 1470 * corresponding VID and caller data pointer. We want the callback function
1489 1471 * to see a consistent, sorted snapshot of the vespecs, and allow the callback
1490 1472 * to take actions such as deleting the vespec itself, so we cannot simply
1491 1473 * iterate over the lists. Instead, we pre-allocate an array of vespec
1492 1474 * pointers, fill it in and place an additional hold on each vespec, and then
1493 1475 * sort it. After the callback has been executed on each vespec in the
1494 1476 * sorted array, we remove our hold and free the temporary array.
1495 1477 */
1496 1478 int
1497 1479 mdb_tgt_vespec_iter(mdb_tgt_t *t, mdb_tgt_vespec_f *func, void *p)
1498 1480 {
1499 1481 mdb_vespec_t **veps, **vepp, **vend;
1500 1482 mdb_vespec_t *vep, *nvep;
1501 1483 mdb_sespec_t *sep;
1502 1484
1503 1485 uint_t vecnt = t->t_vecnt;
1504 1486
1505 1487 veps = mdb_alloc(sizeof (mdb_vespec_t *) * vecnt, UM_SLEEP);
1506 1488 vend = veps + vecnt;
1507 1489 vepp = veps;
1508 1490
1509 1491 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1510 1492 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1511 1493 mdb_tgt_vespec_hold(t, vep);
1512 1494 nvep = mdb_list_next(vep);
1513 1495 *vepp++ = vep;
1514 1496 }
1515 1497 }
1516 1498
1517 1499 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
1518 1500 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1519 1501 mdb_tgt_vespec_hold(t, vep);
1520 1502 nvep = mdb_list_next(vep);
1521 1503 *vepp++ = vep;
1522 1504 }
1523 1505 }
1524 1506
1525 1507 if (vepp != vend) {
1526 1508 fail("target has %u vespecs on list but vecnt shows %u\n",
1527 1509 (uint_t)(vepp - veps), vecnt);
1528 1510 }
1529 1511
1530 1512 qsort(veps, vecnt, sizeof (mdb_vespec_t *),
1531 1513 (int (*)(const void *, const void *))tgt_vespec_compare);
1532 1514
1533 1515 for (vepp = veps; vepp < vend; vepp++) {
1534 1516 if (func(t, p, (*vepp)->ve_id, (*vepp)->ve_data) != 0)
1535 1517 break;
1536 1518 }
1537 1519
1538 1520 for (vepp = veps; vepp < vend; vepp++)
1539 1521 mdb_tgt_vespec_rele(t, *vepp);
1540 1522
1541 1523 mdb_free(veps, sizeof (mdb_vespec_t *) * vecnt);
1542 1524 return (0);
1543 1525 }
1544 1526
1545 1527 /*
1546 1528 * Reset the vespec flags, match limit, and callback data to the specified
1547 1529 * values. We silently correct invalid parameters, except for the VID.
1548 1530 * The caller is required to query the existing properties and pass back
1549 1531 * the existing values for any properties that should not be modified.
1550 1532 * If the callback data is modified, the caller is responsible for cleaning
1551 1533 * up any state associated with the previous value.
1552 1534 */
1553 1535 int
1554 1536 mdb_tgt_vespec_modify(mdb_tgt_t *t, int id, uint_t flags,
1555 1537 uint_t limit, void *data)
1556 1538 {
1557 1539 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1558 1540
1559 1541 if (vep == NULL)
1560 1542 return (set_errno(EMDB_NOSESPEC));
1561 1543
1562 1544 /*
1563 1545 * If the value of the MDB_TGT_SPEC_DISABLED bit is changing, call the
1564 1546 * appropriate vespec function to do the enable/disable work.
1565 1547 */
1566 1548 if ((flags & MDB_TGT_SPEC_DISABLED) !=
1567 1549 (vep->ve_flags & MDB_TGT_SPEC_DISABLED)) {
1568 1550 if (flags & MDB_TGT_SPEC_DISABLED)
1569 1551 (void) mdb_tgt_vespec_disable(t, id);
1570 1552 else
1571 1553 (void) mdb_tgt_vespec_enable(t, id);
1572 1554 }
1573 1555
1574 1556 /*
1575 1557 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
1576 1558 * value: extra bits are cleared according to order of precedence.
1577 1559 */
1578 1560 if (flags & MDB_TGT_SPEC_AUTOSTOP)
1579 1561 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
1580 1562 else if (flags & MDB_TGT_SPEC_AUTODEL)
1581 1563 flags &= ~MDB_TGT_SPEC_AUTODIS;
1582 1564
1583 1565 /*
1584 1566 * The TEMPORARY property always takes precedence over STICKY.
1585 1567 */
1586 1568 if (flags & MDB_TGT_SPEC_TEMPORARY)
1587 1569 flags &= ~MDB_TGT_SPEC_STICKY;
1588 1570
1589 1571 /*
1590 1572 * If any MDB_TGT_SPEC_AUTO* bits are changing, reset the hit count
1591 1573 * back to zero and clear all of the old auto bits.
1592 1574 */
1593 1575 if ((flags & T_AUTO_BITS) != (vep->ve_flags & T_AUTO_BITS)) {
1594 1576 vep->ve_flags &= ~T_AUTO_BITS;
1595 1577 vep->ve_hits = 0;
1596 1578 }
1597 1579
1598 1580 vep->ve_flags = (vep->ve_flags & T_IMPL_BITS) | (flags & ~T_IMPL_BITS);
1599 1581 vep->ve_data = data;
1600 1582
1601 1583 /*
1602 1584 * If any MDB_TGT_SPEC_AUTO* flags are set, make sure the limit is at
1603 1585 * least one. If none are set, reset it back to zero.
1604 1586 */
1605 1587 if (vep->ve_flags & T_AUTO_BITS)
1606 1588 vep->ve_limit = MAX(limit, 1);
1607 1589 else
1608 1590 vep->ve_limit = 0;
1609 1591
1610 1592 /*
1611 1593 * As a convenience, we allow the caller to specify SPEC_DELETED in
1612 1594 * the flags field as indication that the event should be deleted.
1613 1595 */
1614 1596 if (flags & MDB_TGT_SPEC_DELETED)
1615 1597 (void) mdb_tgt_vespec_delete(t, id);
1616 1598
1617 1599 return (0);
1618 1600 }
1619 1601
1620 1602 /*
1621 1603 * Remove the user disabled bit from the specified vespec, and attempt to
1622 1604 * activate the underlying sespec and move it to the active list if possible.
1623 1605 */
1624 1606 int
1625 1607 mdb_tgt_vespec_enable(mdb_tgt_t *t, int id)
1626 1608 {
1627 1609 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1628 1610
1629 1611 if (vep == NULL)
1630 1612 return (set_errno(EMDB_NOSESPEC));
1631 1613
1632 1614 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) {
1633 1615 ASSERT(mdb_list_next(vep) == NULL);
1634 1616 vep->ve_flags &= ~MDB_TGT_SPEC_DISABLED;
1635 1617 if (mdb_tgt_sespec_activate_one(t, vep->ve_se) < 0)
1636 1618 return (-1); /* errno is set for us */
1637 1619 }
1638 1620
1639 1621 return (0);
1640 1622 }
1641 1623
1642 1624 /*
1643 1625 * Set the user disabled bit on the specified vespec, and move it to the idle
1644 1626 * list. If the vespec is not alone with its sespec or if it is a currently
1645 1627 * matched event, we must always create a new idle sespec and move the vespec
1646 1628 * there. If the vespec was alone and active, we can simply idle the sespec.
1647 1629 */
1648 1630 int
1649 1631 mdb_tgt_vespec_disable(mdb_tgt_t *t, int id)
1650 1632 {
1651 1633 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1652 1634 mdb_sespec_t *sep;
1653 1635
1654 1636 if (vep == NULL)
1655 1637 return (set_errno(EMDB_NOSESPEC));
1656 1638
1657 1639 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
1658 1640 return (0); /* already disabled */
1659 1641
1660 1642 if (mdb_list_prev(vep) != NULL || mdb_list_next(vep) != NULL ||
1661 1643 vep->ve_se->se_matched != NULL) {
1662 1644
1663 1645 sep = mdb_tgt_sespec_insert(t, vep->ve_se->se_ops, &t->t_idle);
1664 1646
1665 1647 mdb_list_delete(&vep->ve_se->se_velist, vep);
1666 1648 mdb_tgt_sespec_rele(t, vep->ve_se);
1667 1649
1668 1650 mdb_list_append(&sep->se_velist, vep);
1669 1651 mdb_tgt_sespec_hold(t, sep);
1670 1652
1671 1653 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1672 1654 vep->ve_se = sep;
1673 1655
1674 1656 } else if (vep->ve_se->se_state != MDB_TGT_SPEC_IDLE)
1675 1657 mdb_tgt_sespec_idle_one(t, vep->ve_se, EMDB_SPECDIS);
1676 1658
1677 1659 vep->ve_flags |= MDB_TGT_SPEC_DISABLED;
1678 1660 return (0);
1679 1661 }
1680 1662
1681 1663 /*
1682 1664 * Delete the given vespec. We use the MDB_TGT_SPEC_DELETED flag to ensure that
1683 1665 * multiple calls to mdb_tgt_vespec_delete to not attempt to decrement the
1684 1666 * reference count on the vespec more than once. This is because the vespec
1685 1667 * may remain referenced if it is currently held by another routine (e.g.
1686 1668 * vespec_iter), and so the user could attempt to delete it more than once
1687 1669 * since it reference count will be >= 2 prior to the first delete call.
1688 1670 */
1689 1671 int
1690 1672 mdb_tgt_vespec_delete(mdb_tgt_t *t, int id)
1691 1673 {
1692 1674 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1693 1675
1694 1676 if (vep == NULL)
1695 1677 return (set_errno(EMDB_NOSESPEC));
1696 1678
1697 1679 if (vep->ve_flags & MDB_TGT_SPEC_DELETED)
1698 1680 return (set_errno(EBUSY));
1699 1681
1700 1682 vep->ve_flags |= MDB_TGT_SPEC_DELETED;
1701 1683 mdb_tgt_vespec_rele(t, vep);
1702 1684 return (0);
1703 1685 }
1704 1686
1705 1687 int
1706 1688 mdb_tgt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr,
1707 1689 int spec_flags, mdb_tgt_se_f *func, void *p)
1708 1690 {
1709 1691 return (t->t_ops->t_add_vbrkpt(t, addr, spec_flags, func, p));
1710 1692 }
1711 1693
1712 1694 int
1713 1695 mdb_tgt_add_sbrkpt(mdb_tgt_t *t, const char *symbol,
1714 1696 int spec_flags, mdb_tgt_se_f *func, void *p)
1715 1697 {
1716 1698 return (t->t_ops->t_add_sbrkpt(t, symbol, spec_flags, func, p));
1717 1699 }
1718 1700
1719 1701 int
1720 1702 mdb_tgt_add_pwapt(mdb_tgt_t *t, physaddr_t pa, size_t n, uint_t flags,
1721 1703 int spec_flags, mdb_tgt_se_f *func, void *p)
1722 1704 {
1723 1705 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1724 1706 (void) set_errno(EINVAL);
1725 1707 return (0);
1726 1708 }
1727 1709
1728 1710 if (pa + n < pa) {
1729 1711 (void) set_errno(EMDB_WPRANGE);
1730 1712 return (0);
1731 1713 }
1732 1714
1733 1715 return (t->t_ops->t_add_pwapt(t, pa, n, flags, spec_flags, func, p));
1734 1716 }
1735 1717
1736 1718 int
1737 1719 mdb_tgt_add_vwapt(mdb_tgt_t *t, uintptr_t va, size_t n, uint_t flags,
1738 1720 int spec_flags, mdb_tgt_se_f *func, void *p)
1739 1721 {
1740 1722 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1741 1723 (void) set_errno(EINVAL);
1742 1724 return (0);
1743 1725 }
1744 1726
1745 1727 if (va + n < va) {
1746 1728 (void) set_errno(EMDB_WPRANGE);
1747 1729 return (0);
1748 1730 }
1749 1731
1750 1732 return (t->t_ops->t_add_vwapt(t, va, n, flags, spec_flags, func, p));
1751 1733 }
1752 1734
1753 1735 int
1754 1736 mdb_tgt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t n, uint_t flags,
1755 1737 int spec_flags, mdb_tgt_se_f *func, void *p)
1756 1738 {
1757 1739 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1758 1740 (void) set_errno(EINVAL);
1759 1741 return (0);
1760 1742 }
1761 1743
1762 1744 if (addr + n < addr) {
1763 1745 (void) set_errno(EMDB_WPRANGE);
1764 1746 return (0);
1765 1747 }
1766 1748
1767 1749 return (t->t_ops->t_add_iowapt(t, addr, n, flags, spec_flags, func, p));
1768 1750 }
1769 1751
1770 1752 int
1771 1753 mdb_tgt_add_sysenter(mdb_tgt_t *t, int sysnum,
1772 1754 int spec_flags, mdb_tgt_se_f *func, void *p)
1773 1755 {
1774 1756 return (t->t_ops->t_add_sysenter(t, sysnum, spec_flags, func, p));
1775 1757 }
1776 1758
1777 1759 int
1778 1760 mdb_tgt_add_sysexit(mdb_tgt_t *t, int sysnum,
1779 1761 int spec_flags, mdb_tgt_se_f *func, void *p)
1780 1762 {
1781 1763 return (t->t_ops->t_add_sysexit(t, sysnum, spec_flags, func, p));
1782 1764 }
1783 1765
1784 1766 int
1785 1767 mdb_tgt_add_signal(mdb_tgt_t *t, int sig,
1786 1768 int spec_flags, mdb_tgt_se_f *func, void *p)
1787 1769 {
1788 1770 return (t->t_ops->t_add_signal(t, sig, spec_flags, func, p));
1789 1771 }
1790 1772
1791 1773 int
1792 1774 mdb_tgt_add_fault(mdb_tgt_t *t, int flt,
1793 1775 int spec_flags, mdb_tgt_se_f *func, void *p)
1794 1776 {
1795 1777 return (t->t_ops->t_add_fault(t, flt, spec_flags, func, p));
1796 1778 }
1797 1779
1798 1780 int
1799 1781 mdb_tgt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1800 1782 const char *rname, mdb_tgt_reg_t *rp)
1801 1783 {
1802 1784 return (t->t_ops->t_getareg(t, tid, rname, rp));
1803 1785 }
1804 1786
1805 1787 int
1806 1788 mdb_tgt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1807 1789 const char *rname, mdb_tgt_reg_t r)
1808 1790 {
1809 1791 return (t->t_ops->t_putareg(t, tid, rname, r));
1810 1792 }
1811 1793
1812 1794 int
1813 1795 mdb_tgt_stack_iter(mdb_tgt_t *t, const mdb_tgt_gregset_t *gregs,
1814 1796 mdb_tgt_stack_f *cb, void *p)
1815 1797 {
1816 1798 return (t->t_ops->t_stack_iter(t, gregs, cb, p));
1817 1799 }
1818 1800
1819 1801 int
1820 1802 mdb_tgt_xdata_iter(mdb_tgt_t *t, mdb_tgt_xdata_f *func, void *private)
1821 1803 {
1822 1804 mdb_xdata_t *xdp;
1823 1805
1824 1806 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1825 1807 if (func(private, xdp->xd_name, xdp->xd_desc,
1826 1808 xdp->xd_copy(t, NULL, 0)) != 0)
1827 1809 break;
1828 1810 }
1829 1811
1830 1812 return (0);
1831 1813 }
1832 1814
1833 1815 ssize_t
1834 1816 mdb_tgt_getxdata(mdb_tgt_t *t, const char *name, void *buf, size_t nbytes)
1835 1817 {
1836 1818 mdb_xdata_t *xdp;
1837 1819
1838 1820 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1839 1821 if (strcmp(xdp->xd_name, name) == 0)
1840 1822 return (xdp->xd_copy(t, buf, nbytes));
1841 1823 }
1842 1824
1843 1825 return (set_errno(ENODATA));
1844 1826 }
1845 1827
1846 1828 long
1847 1829 mdb_tgt_notsup()
1848 1830 {
1849 1831 return (set_errno(EMDB_TGTNOTSUP));
1850 1832 }
1851 1833
1852 1834 void *
1853 1835 mdb_tgt_null()
1854 1836 {
1855 1837 (void) set_errno(EMDB_TGTNOTSUP);
1856 1838 return (NULL);
↓ open down ↓ |
441 lines elided |
↑ open up ↑ |
1857 1839 }
1858 1840
1859 1841 long
1860 1842 mdb_tgt_nop()
1861 1843 {
1862 1844 return (0L);
1863 1845 }
1864 1846
1865 1847 int
1866 1848 mdb_tgt_xdata_insert(mdb_tgt_t *t, const char *name, const char *desc,
1867 - ssize_t (*copy)(mdb_tgt_t *, void *, size_t))
1849 + ssize_t (*copy)(mdb_tgt_t *, void *, size_t))
1868 1850 {
1869 1851 mdb_xdata_t *xdp;
1870 1852
1871 1853 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1872 1854 if (strcmp(xdp->xd_name, name) == 0)
1873 1855 return (set_errno(EMDB_XDEXISTS));
1874 1856 }
1875 1857
1876 1858 xdp = mdb_alloc(sizeof (mdb_xdata_t), UM_SLEEP);
1877 1859 mdb_list_append(&t->t_xdlist, xdp);
1878 1860
1879 1861 xdp->xd_name = name;
1880 1862 xdp->xd_desc = desc;
1881 1863 xdp->xd_copy = copy;
1882 1864
1883 1865 return (0);
1884 1866 }
1885 1867
1886 1868 int
1887 1869 mdb_tgt_xdata_delete(mdb_tgt_t *t, const char *name)
1888 1870 {
1889 1871 mdb_xdata_t *xdp;
1890 1872
1891 1873 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1892 1874 if (strcmp(xdp->xd_name, name) == 0) {
1893 1875 mdb_list_delete(&t->t_xdlist, xdp);
1894 1876 mdb_free(xdp, sizeof (mdb_xdata_t));
1895 1877 return (0);
1896 1878 }
1897 1879 }
1898 1880
1899 1881 return (set_errno(EMDB_NOXD));
1900 1882 }
1901 1883
1902 1884 int
1903 1885 mdb_tgt_sym_match(const GElf_Sym *sym, uint_t mask)
1904 1886 {
1905 1887 #if STT_NUM != (STT_TLS + 1)
1906 1888 #error "STT_NUM has grown. update mdb_tgt_sym_match()"
1907 1889 #endif
1908 1890
1909 1891 uchar_t s_bind = GELF_ST_BIND(sym->st_info);
1910 1892 uchar_t s_type = GELF_ST_TYPE(sym->st_info);
1911 1893
1912 1894 /*
1913 1895 * In case you haven't already guessed, this relies on the bitmask
1914 1896 * used by <mdb/mdb_target.h> and <libproc.h> for encoding symbol
1915 1897 * type and binding matching the order of STB and STT constants
1916 1898 * in <sys/elf.h>. Changes to ELF must maintain binary
1917 1899 * compatibility, so I think this is reasonably fair game.
1918 1900 */
1919 1901 if (s_bind < STB_NUM && s_type < STT_NUM) {
1920 1902 uint_t type = (1 << (s_type + 8)) | (1 << s_bind);
1921 1903 return ((type & ~mask) == 0);
1922 1904 }
1923 1905
1924 1906 return (0); /* Unknown binding or type; fail to match */
1925 1907 }
1926 1908
1927 1909 void
1928 1910 mdb_tgt_elf_export(mdb_gelf_file_t *gf)
1929 1911 {
1930 1912 GElf_Xword d = 0, t = 0;
1931 1913 GElf_Addr b = 0, e = 0;
1932 1914 uint32_t m = 0;
1933 1915 mdb_var_t *v;
1934 1916
1935 1917 /*
1936 1918 * Reset legacy adb variables based on the specified ELF object file
1937 1919 * provided by the target. We define these variables:
1938 1920 *
1939 1921 * b - the address of the data segment (first writeable Phdr)
1940 1922 * d - the size of the data segment
1941 1923 * e - the address of the entry point
1942 1924 * m - the magic number identifying the file
1943 1925 * t - the address of the text segment (first executable Phdr)
1944 1926 */
1945 1927 if (gf != NULL) {
1946 1928 const GElf_Phdr *text = NULL, *data = NULL;
1947 1929 size_t i;
1948 1930
1949 1931 e = gf->gf_ehdr.e_entry;
1950 1932 bcopy(&gf->gf_ehdr.e_ident[EI_MAG0], &m, sizeof (m));
1951 1933
1952 1934 for (i = 0; i < gf->gf_npload; i++) {
1953 1935 if (text == NULL && (gf->gf_phdrs[i].p_flags & PF_X))
1954 1936 text = &gf->gf_phdrs[i];
1955 1937 if (data == NULL && (gf->gf_phdrs[i].p_flags & PF_W))
1956 1938 data = &gf->gf_phdrs[i];
1957 1939 }
1958 1940
1959 1941 if (text != NULL)
1960 1942 t = text->p_memsz;
1961 1943 if (data != NULL) {
1962 1944 b = data->p_vaddr;
1963 1945 d = data->p_memsz;
1964 1946 }
1965 1947 }
1966 1948
1967 1949 if ((v = mdb_nv_lookup(&mdb.m_nv, "b")) != NULL)
1968 1950 mdb_nv_set_value(v, b);
1969 1951 if ((v = mdb_nv_lookup(&mdb.m_nv, "d")) != NULL)
1970 1952 mdb_nv_set_value(v, d);
1971 1953 if ((v = mdb_nv_lookup(&mdb.m_nv, "e")) != NULL)
1972 1954 mdb_nv_set_value(v, e);
1973 1955 if ((v = mdb_nv_lookup(&mdb.m_nv, "m")) != NULL)
1974 1956 mdb_nv_set_value(v, m);
1975 1957 if ((v = mdb_nv_lookup(&mdb.m_nv, "t")) != NULL)
1976 1958 mdb_nv_set_value(v, t);
1977 1959 }
1978 1960
1979 1961 /*ARGSUSED*/
1980 1962 void
1981 1963 mdb_tgt_sespec_hold(mdb_tgt_t *t, mdb_sespec_t *sep)
1982 1964 {
1983 1965 sep->se_refs++;
1984 1966 ASSERT(sep->se_refs != 0);
1985 1967 }
1986 1968
1987 1969 void
1988 1970 mdb_tgt_sespec_rele(mdb_tgt_t *t, mdb_sespec_t *sep)
1989 1971 {
1990 1972 ASSERT(sep->se_refs != 0);
1991 1973
1992 1974 if (--sep->se_refs == 0) {
1993 1975 mdb_dprintf(MDB_DBG_TGT, "destroying sespec %p\n", (void *)sep);
1994 1976 ASSERT(mdb_list_next(&sep->se_velist) == NULL);
1995 1977
1996 1978 if (sep->se_state != MDB_TGT_SPEC_IDLE) {
1997 1979 sep->se_ops->se_dtor(t, sep);
1998 1980 mdb_list_delete(&t->t_active, sep);
1999 1981 } else
2000 1982 mdb_list_delete(&t->t_idle, sep);
2001 1983
2002 1984 mdb_free(sep, sizeof (mdb_sespec_t));
2003 1985 }
2004 1986 }
2005 1987
2006 1988 mdb_sespec_t *
2007 1989 mdb_tgt_sespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, mdb_list_t *list)
2008 1990 {
2009 1991 mdb_sespec_t *sep = mdb_zalloc(sizeof (mdb_sespec_t), UM_SLEEP);
2010 1992
2011 1993 if (list == &t->t_active)
2012 1994 sep->se_state = MDB_TGT_SPEC_ACTIVE;
2013 1995 else
2014 1996 sep->se_state = MDB_TGT_SPEC_IDLE;
2015 1997
2016 1998 mdb_list_append(list, sep);
2017 1999 sep->se_ops = ops;
2018 2000 return (sep);
2019 2001 }
2020 2002
2021 2003 mdb_sespec_t *
2022 2004 mdb_tgt_sespec_lookup_active(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2023 2005 {
2024 2006 mdb_sespec_t *sep;
2025 2007
2026 2008 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2027 2009 if (sep->se_ops == ops && sep->se_ops->se_secmp(t, sep, args))
2028 2010 break;
2029 2011 }
2030 2012
2031 2013 return (sep);
2032 2014 }
2033 2015
2034 2016 mdb_sespec_t *
2035 2017 mdb_tgt_sespec_lookup_idle(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2036 2018 {
2037 2019 mdb_sespec_t *sep;
2038 2020
2039 2021 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2040 2022 if (sep->se_ops == ops && sep->se_ops->se_vecmp(t,
2041 2023 mdb_list_next(&sep->se_velist), args))
2042 2024 break;
2043 2025 }
2044 2026
2045 2027 return (sep);
2046 2028 }
2047 2029
2048 2030 /*ARGSUSED*/
2049 2031 void
2050 2032 mdb_tgt_vespec_hold(mdb_tgt_t *t, mdb_vespec_t *vep)
2051 2033 {
2052 2034 vep->ve_refs++;
2053 2035 ASSERT(vep->ve_refs != 0);
2054 2036 }
2055 2037
2056 2038 void
2057 2039 mdb_tgt_vespec_rele(mdb_tgt_t *t, mdb_vespec_t *vep)
2058 2040 {
2059 2041 ASSERT(vep->ve_refs != 0);
2060 2042
2061 2043 if (--vep->ve_refs == 0) {
2062 2044 /*
2063 2045 * Remove this vespec from the sespec's velist and decrement
2064 2046 * the reference count on the sespec.
2065 2047 */
2066 2048 mdb_list_delete(&vep->ve_se->se_velist, vep);
2067 2049 mdb_tgt_sespec_rele(t, vep->ve_se);
2068 2050
2069 2051 /*
2070 2052 * If we are deleting the most recently assigned VID, reset
2071 2053 * t_vepos or t_veneg as appropriate to re-use that number.
2072 2054 * This could be enhanced to re-use any free number by
2073 2055 * maintaining a bitmap or hash of the allocated IDs.
2074 2056 */
2075 2057 if (vep->ve_id > 0 && t->t_vepos == vep->ve_id + 1)
2076 2058 t->t_vepos = vep->ve_id;
2077 2059 else if (vep->ve_id < 0 && t->t_veneg == -vep->ve_id + 1)
2078 2060 t->t_veneg = -vep->ve_id;
2079 2061
2080 2062 /*
2081 2063 * Call the destructor to clean up ve_args, and then free
2082 2064 * the actual vespec structure.
2083 2065 */
2084 2066 vep->ve_dtor(vep);
2085 2067 mdb_free(vep, sizeof (mdb_vespec_t));
2086 2068
2087 2069 ASSERT(t->t_vecnt != 0);
2088 2070 t->t_vecnt--;
2089 2071 }
2090 2072 }
2091 2073
2092 2074 int
2093 2075 mdb_tgt_vespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, int flags,
2094 2076 mdb_tgt_se_f *func, void *data, void *args, void (*dtor)(mdb_vespec_t *))
2095 2077 {
2096 2078 mdb_vespec_t *vep = mdb_zalloc(sizeof (mdb_vespec_t), UM_SLEEP);
2097 2079
2098 2080 int id, mult, *seqp;
2099 2081 mdb_sespec_t *sep;
2100 2082
2101 2083 /*
2102 2084 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
2103 2085 * value: extra bits are cleared according to order of precedence.
2104 2086 */
2105 2087 if (flags & MDB_TGT_SPEC_AUTOSTOP)
2106 2088 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
2107 2089 else if (flags & MDB_TGT_SPEC_AUTODEL)
2108 2090 flags &= ~MDB_TGT_SPEC_AUTODIS;
2109 2091
2110 2092 /*
2111 2093 * The TEMPORARY property always takes precedence over STICKY.
2112 2094 */
2113 2095 if (flags & MDB_TGT_SPEC_TEMPORARY)
2114 2096 flags &= ~MDB_TGT_SPEC_STICKY;
2115 2097
2116 2098 /*
2117 2099 * Find a matching sespec or create a new one on the appropriate list.
2118 2100 * We always create a new sespec if the vespec is created disabled.
2119 2101 */
2120 2102 if (flags & MDB_TGT_SPEC_DISABLED)
2121 2103 sep = mdb_tgt_sespec_insert(t, ops, &t->t_idle);
2122 2104 else if ((sep = mdb_tgt_sespec_lookup_active(t, ops, args)) == NULL &&
2123 2105 (sep = mdb_tgt_sespec_lookup_idle(t, ops, args)) == NULL)
2124 2106 sep = mdb_tgt_sespec_insert(t, ops, &t->t_active);
2125 2107
2126 2108 /*
2127 2109 * Generate a new ID for the vespec. Increasing positive integers are
2128 2110 * assigned to visible vespecs; decreasing negative integers are
2129 2111 * assigned to hidden vespecs. The target saves our most recent choice.
2130 2112 */
2131 2113 if (flags & MDB_TGT_SPEC_INTERNAL) {
2132 2114 seqp = &t->t_veneg;
2133 2115 mult = -1;
2134 2116 } else {
2135 2117 seqp = &t->t_vepos;
2136 2118 mult = 1;
2137 2119 }
2138 2120
2139 2121 id = *seqp;
2140 2122
2141 2123 while (mdb_tgt_vespec_lookup(t, id * mult) != NULL)
2142 2124 id = MAX(id + 1, 1);
2143 2125
2144 2126 *seqp = MAX(id + 1, 1);
2145 2127
2146 2128 vep->ve_id = id * mult;
2147 2129 vep->ve_flags = flags & ~(MDB_TGT_SPEC_MATCHED | MDB_TGT_SPEC_DELETED);
2148 2130 vep->ve_se = sep;
2149 2131 vep->ve_callback = func;
2150 2132 vep->ve_data = data;
2151 2133 vep->ve_args = args;
2152 2134 vep->ve_dtor = dtor;
2153 2135
2154 2136 mdb_list_append(&sep->se_velist, vep);
2155 2137 mdb_tgt_sespec_hold(t, sep);
2156 2138
2157 2139 mdb_tgt_vespec_hold(t, vep);
2158 2140 t->t_vecnt++;
2159 2141
2160 2142 /*
2161 2143 * If this vespec is the first reference to the sespec and it's active,
2162 2144 * then it is newly created and we should attempt to initialize it.
2163 2145 * If se_ctor fails, then move the sespec back to the idle list.
2164 2146 */
2165 2147 if (sep->se_refs == 1 && sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2166 2148 sep->se_ops->se_ctor(t, sep, vep->ve_args) == -1) {
2167 2149
2168 2150 mdb_list_delete(&t->t_active, sep);
2169 2151 mdb_list_append(&t->t_idle, sep);
2170 2152
2171 2153 sep->se_state = MDB_TGT_SPEC_IDLE;
2172 2154 sep->se_errno = errno;
2173 2155 sep->se_data = NULL;
2174 2156 }
2175 2157
2176 2158 /*
2177 2159 * If the sespec is active and the target is currently running (because
2178 2160 * we grabbed it using PGRAB_NOSTOP), then go ahead and attempt to arm
2179 2161 * the sespec so it will take effect immediately.
2180 2162 */
2181 2163 if (sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2182 2164 t->t_status.st_state == MDB_TGT_RUNNING)
2183 2165 mdb_tgt_sespec_arm_one(t, sep);
2184 2166
2185 2167 mdb_dprintf(MDB_DBG_TGT, "inserted [ %d ] sep=%p refs=%u state=%d\n",
2186 2168 vep->ve_id, (void *)sep, sep->se_refs, sep->se_state);
2187 2169
2188 2170 return (vep->ve_id);
2189 2171 }
2190 2172
2191 2173 /*
2192 2174 * Search the target's active, idle, and disabled lists for the vespec matching
2193 2175 * the specified VID, and return a pointer to it, or NULL if no match is found.
2194 2176 */
2195 2177 mdb_vespec_t *
2196 2178 mdb_tgt_vespec_lookup(mdb_tgt_t *t, int vid)
2197 2179 {
2198 2180 mdb_sespec_t *sep;
2199 2181 mdb_vespec_t *vep;
2200 2182
2201 2183 if (vid == 0)
2202 2184 return (NULL); /* 0 is never a valid VID */
2203 2185
2204 2186 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2205 2187 for (vep = mdb_list_next(&sep->se_velist); vep;
2206 2188 vep = mdb_list_next(vep)) {
2207 2189 if (vep->ve_id == vid)
2208 2190 return (vep);
2209 2191 }
2210 2192 }
2211 2193
2212 2194 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2213 2195 for (vep = mdb_list_next(&sep->se_velist); vep;
2214 2196 vep = mdb_list_next(vep)) {
2215 2197 if (vep->ve_id == vid)
2216 2198 return (vep);
2217 2199 }
2218 2200 }
2219 2201
2220 2202 return (NULL);
2221 2203 }
2222 2204
2223 2205 /*ARGSUSED*/
2224 2206 void
2225 2207 no_ve_dtor(mdb_vespec_t *vep)
2226 2208 {
2227 2209 /* default destructor does nothing */
2228 2210 }
2229 2211
2230 2212 /*ARGSUSED*/
2231 2213 void
2232 2214 no_se_f(mdb_tgt_t *t, int vid, void *data)
2233 2215 {
2234 2216 /* default callback does nothing */
2235 2217 }
2236 2218
2237 2219 /*ARGSUSED*/
2238 2220 void
2239 2221 no_se_dtor(mdb_tgt_t *t, mdb_sespec_t *sep)
2240 2222 {
2241 2223 /* default destructor does nothing */
2242 2224 }
2243 2225
2244 2226 /*ARGSUSED*/
2245 2227 int
2246 2228 no_se_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args)
2247 2229 {
2248 2230 return (sep->se_data == args);
2249 2231 }
2250 2232
2251 2233 /*ARGSUSED*/
2252 2234 int
2253 2235 no_se_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args)
2254 2236 {
2255 2237 return (vep->ve_args == args);
2256 2238 }
2257 2239
2258 2240 /*ARGSUSED*/
2259 2241 int
2260 2242 no_se_arm(mdb_tgt_t *t, mdb_sespec_t *sep)
2261 2243 {
2262 2244 return (0); /* return success */
2263 2245 }
2264 2246
2265 2247 /*ARGSUSED*/
2266 2248 int
2267 2249 no_se_disarm(mdb_tgt_t *t, mdb_sespec_t *sep)
2268 2250 {
2269 2251 return (0); /* return success */
2270 2252 }
2271 2253
2272 2254 /*ARGSUSED*/
2273 2255 int
2274 2256 no_se_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp)
2275 2257 {
2276 2258 if (tsp != &t->t_status)
2277 2259 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
2278 2260
2279 2261 return (0); /* return success */
2280 2262 }
2281 2263
2282 2264 int
2283 2265 mdb_tgt_register_dcmds(mdb_tgt_t *t, const mdb_dcmd_t *dcp, int flags)
2284 2266 {
2285 2267 int fail = 0;
2286 2268
2287 2269 for (; dcp->dc_name != NULL; dcp++) {
2288 2270 if (mdb_module_add_dcmd(t->t_module, dcp, flags) == -1) {
2289 2271 warn("failed to add dcmd %s", dcp->dc_name);
2290 2272 fail++;
2291 2273 }
2292 2274 }
2293 2275
2294 2276 return (fail > 0 ? -1 : 0);
2295 2277 }
2296 2278
2297 2279 int
2298 2280 mdb_tgt_register_walkers(mdb_tgt_t *t, const mdb_walker_t *wp, int flags)
2299 2281 {
2300 2282 int fail = 0;
2301 2283
2302 2284 for (; wp->walk_name != NULL; wp++) {
2303 2285 if (mdb_module_add_walker(t->t_module, wp, flags) == -1) {
2304 2286 warn("failed to add walk %s", wp->walk_name);
2305 2287 fail++;
2306 2288 }
2307 2289 }
2308 2290
2309 2291 return (fail > 0 ? -1 : 0);
2310 2292 }
2311 2293
2312 2294 void
2313 2295 mdb_tgt_register_regvars(mdb_tgt_t *t, const mdb_tgt_regdesc_t *rdp,
2314 2296 const mdb_nv_disc_t *disc, int flags)
2315 2297 {
2316 2298 for (; rdp->rd_name != NULL; rdp++) {
2317 2299 if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
2318 2300 continue; /* Don't export register as a variable */
2319 2301
2320 2302 if (rdp->rd_flags & MDB_TGT_R_RDONLY)
2321 2303 flags |= MDB_NV_RDONLY;
2322 2304
2323 2305 (void) mdb_nv_insert(&mdb.m_nv, rdp->rd_name, disc,
2324 2306 (uintptr_t)t, MDB_NV_PERSIST | flags);
2325 2307 }
2326 2308 }
↓ open down ↓ |
449 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX