Print this page
XXX AVX procfs
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libc_db/common/thread_db.c
+++ new/usr/src/lib/libc_db/common/thread_db.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <stdio.h>
28 28 #include <stdlib.h>
29 29 #include <stddef.h>
30 30 #include <unistd.h>
31 31 #include <thr_uberdata.h>
32 32 #include <thread_db.h>
33 33 #include <libc_int.h>
34 34
35 35 /*
36 36 * Private structures.
37 37 */
38 38
39 39 typedef union {
40 40 mutex_t lock;
41 41 rwlock_t rwlock;
42 42 sema_t semaphore;
43 43 cond_t condition;
44 44 } td_so_un_t;
45 45
46 46 struct td_thragent {
47 47 rwlock_t rwlock;
48 48 struct ps_prochandle *ph_p;
49 49 int initialized;
50 50 int sync_tracking;
51 51 int model;
52 52 int primary_map;
53 53 psaddr_t bootstrap_addr;
54 54 psaddr_t uberdata_addr;
55 55 psaddr_t tdb_eventmask_addr;
56 56 psaddr_t tdb_register_sync_addr;
57 57 psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
58 58 psaddr_t hash_table_addr;
59 59 int hash_size;
60 60 lwpid_t single_lwpid;
61 61 psaddr_t single_ulwp_addr;
62 62 };
63 63
64 64 /*
65 65 * This is the name of the variable in libc that contains
66 66 * the uberdata address that we will need.
67 67 */
68 68 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
69 69 /*
70 70 * This is the actual name of uberdata, used in the event
71 71 * that tdb_bootstrap has not yet been initialized.
72 72 */
73 73 #define TD_UBERDATA_NAME "_uberdata"
74 74 /*
75 75 * The library name should end with ".so.1", but older versions of
76 76 * dbx expect the unadorned name and malfunction if ".1" is specified.
77 77 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
78 78 * is applied to another instance of itself (due to the presence of
79 79 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
80 80 */
81 81 #define TD_LIBRARY_NAME "libc.so"
82 82 #define TD_LIBRARY_NAME_1 "libc.so.1"
83 83
84 84 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
85 85
86 86 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
87 87 void *cbdata_p, td_thr_state_e state, int ti_pri,
88 88 sigset_t *ti_sigmask_p, unsigned ti_user_flags);
89 89
90 90 /*
91 91 * Initialize threads debugging interface.
92 92 */
93 93 #pragma weak td_init = __td_init
94 94 td_err_e
95 95 __td_init()
96 96 {
97 97 return (TD_OK);
98 98 }
99 99
100 100 /*
101 101 * This function does nothing, and never did.
102 102 * But the symbol is in the ABI, so we can't delete it.
103 103 */
104 104 #pragma weak td_log = __td_log
105 105 void
106 106 __td_log()
107 107 {
108 108 }
109 109
110 110 /*
111 111 * Short-cut to read just the hash table size from the process,
112 112 * to avoid repeatedly reading the full uberdata structure when
113 113 * dealing with a single-threaded process.
114 114 */
115 115 static uint_t
116 116 td_read_hash_size(td_thragent_t *ta_p)
117 117 {
118 118 psaddr_t addr;
119 119 uint_t hash_size;
120 120
121 121 switch (ta_p->initialized) {
122 122 default: /* uninitialized */
123 123 return (0);
124 124 case 1: /* partially initialized */
125 125 break;
126 126 case 2: /* fully initialized */
127 127 return (ta_p->hash_size);
128 128 }
129 129
130 130 if (ta_p->model == PR_MODEL_NATIVE) {
131 131 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
132 132 } else {
133 133 #if defined(_LP64) && defined(_SYSCALL32)
134 134 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
135 135 #else
136 136 addr = 0;
137 137 #endif
138 138 }
139 139 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
140 140 != PS_OK)
141 141 return (0);
142 142 return (hash_size);
143 143 }
144 144
145 145 static td_err_e
146 146 td_read_uberdata(td_thragent_t *ta_p)
147 147 {
148 148 struct ps_prochandle *ph_p = ta_p->ph_p;
149 149
150 150 if (ta_p->model == PR_MODEL_NATIVE) {
151 151 uberdata_t uberdata;
152 152
153 153 if (ps_pdread(ph_p, ta_p->uberdata_addr,
154 154 &uberdata, sizeof (uberdata)) != PS_OK)
155 155 return (TD_DBERR);
156 156 ta_p->primary_map = uberdata.primary_map;
157 157 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
158 158 offsetof(uberdata_t, tdb.tdb_ev_global_mask);
159 159 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
160 160 offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
161 161 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
162 162 ta_p->hash_size = uberdata.hash_size;
163 163 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
164 164 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
165 165 return (TD_DBERR);
166 166
167 167 } else {
168 168 #if defined(_LP64) && defined(_SYSCALL32)
169 169 uberdata32_t uberdata;
170 170 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
171 171 int i;
172 172
173 173 if (ps_pdread(ph_p, ta_p->uberdata_addr,
174 174 &uberdata, sizeof (uberdata)) != PS_OK)
175 175 return (TD_DBERR);
176 176 ta_p->primary_map = uberdata.primary_map;
177 177 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
178 178 offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
179 179 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
180 180 offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
181 181 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
182 182 ta_p->hash_size = uberdata.hash_size;
183 183 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
184 184 tdb_events, sizeof (tdb_events)) != PS_OK)
185 185 return (TD_DBERR);
186 186 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
187 187 ta_p->tdb_events[i] = tdb_events[i];
188 188 #else
189 189 return (TD_DBERR);
190 190 #endif
191 191 }
192 192 if (ta_p->hash_size != 1) { /* multi-threaded */
193 193 ta_p->initialized = 2;
194 194 ta_p->single_lwpid = 0;
195 195 ta_p->single_ulwp_addr = NULL;
196 196 } else { /* single-threaded */
197 197 ta_p->initialized = 1;
198 198 /*
199 199 * Get the address and lwpid of the single thread/LWP.
200 200 * It may not be ulwp_one if this is a child of fork1().
201 201 */
202 202 if (ta_p->model == PR_MODEL_NATIVE) {
203 203 thr_hash_table_t head;
204 204 lwpid_t lwpid = 0;
205 205
206 206 if (ps_pdread(ph_p, ta_p->hash_table_addr,
207 207 &head, sizeof (head)) != PS_OK)
208 208 return (TD_DBERR);
209 209 if ((psaddr_t)head.hash_bucket == NULL)
210 210 ta_p->initialized = 0;
211 211 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
212 212 offsetof(ulwp_t, ul_lwpid),
213 213 &lwpid, sizeof (lwpid)) != PS_OK)
214 214 return (TD_DBERR);
215 215 ta_p->single_lwpid = lwpid;
216 216 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
217 217 } else {
218 218 #if defined(_LP64) && defined(_SYSCALL32)
219 219 thr_hash_table32_t head;
220 220 lwpid_t lwpid = 0;
221 221
222 222 if (ps_pdread(ph_p, ta_p->hash_table_addr,
223 223 &head, sizeof (head)) != PS_OK)
224 224 return (TD_DBERR);
225 225 if ((psaddr_t)head.hash_bucket == NULL)
226 226 ta_p->initialized = 0;
227 227 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
228 228 offsetof(ulwp32_t, ul_lwpid),
229 229 &lwpid, sizeof (lwpid)) != PS_OK)
230 230 return (TD_DBERR);
231 231 ta_p->single_lwpid = lwpid;
232 232 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
233 233 #else
234 234 return (TD_DBERR);
235 235 #endif
236 236 }
237 237 }
238 238 if (!ta_p->primary_map)
239 239 ta_p->initialized = 0;
240 240 return (TD_OK);
241 241 }
242 242
243 243 static td_err_e
244 244 td_read_bootstrap_data(td_thragent_t *ta_p)
245 245 {
246 246 struct ps_prochandle *ph_p = ta_p->ph_p;
247 247 psaddr_t bootstrap_addr;
248 248 psaddr_t uberdata_addr;
249 249 ps_err_e db_return;
250 250 td_err_e return_val;
251 251 int do_1;
252 252
253 253 switch (ta_p->initialized) {
254 254 case 2: /* fully initialized */
255 255 return (TD_OK);
256 256 case 1: /* partially initialized */
257 257 if (td_read_hash_size(ta_p) == 1)
258 258 return (TD_OK);
259 259 return (td_read_uberdata(ta_p));
260 260 }
261 261
262 262 /*
263 263 * Uninitialized -- do the startup work.
264 264 * We set ta_p->initialized to -1 to cut off recursive calls
265 265 * into libc_db by code in the provider of ps_pglobal_lookup().
266 266 */
267 267 do_1 = 0;
268 268 ta_p->initialized = -1;
269 269 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
270 270 TD_BOOTSTRAP_NAME, &bootstrap_addr);
271 271 if (db_return == PS_NOSYM) {
272 272 do_1 = 1;
273 273 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
274 274 TD_BOOTSTRAP_NAME, &bootstrap_addr);
275 275 }
276 276 if (db_return == PS_NOSYM) /* libc is not linked yet */
277 277 return (TD_NOLIBTHREAD);
278 278 if (db_return != PS_OK)
279 279 return (TD_ERR);
280 280 db_return = ps_pglobal_lookup(ph_p,
281 281 do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
282 282 TD_UBERDATA_NAME, &uberdata_addr);
283 283 if (db_return == PS_NOSYM) /* libc is not linked yet */
284 284 return (TD_NOLIBTHREAD);
285 285 if (db_return != PS_OK)
286 286 return (TD_ERR);
287 287
288 288 /*
289 289 * Read the uberdata address into the thread agent structure.
290 290 */
291 291 if (ta_p->model == PR_MODEL_NATIVE) {
292 292 psaddr_t psaddr;
293 293 if (ps_pdread(ph_p, bootstrap_addr,
294 294 &psaddr, sizeof (psaddr)) != PS_OK)
295 295 return (TD_DBERR);
296 296 if ((ta_p->bootstrap_addr = psaddr) == NULL)
297 297 psaddr = uberdata_addr;
298 298 else if (ps_pdread(ph_p, psaddr,
299 299 &psaddr, sizeof (psaddr)) != PS_OK)
300 300 return (TD_DBERR);
301 301 if (psaddr == NULL) {
302 302 /* primary linkmap in the tgt is not initialized */
303 303 ta_p->bootstrap_addr = NULL;
304 304 psaddr = uberdata_addr;
305 305 }
306 306 ta_p->uberdata_addr = psaddr;
307 307 } else {
308 308 #if defined(_LP64) && defined(_SYSCALL32)
309 309 caddr32_t psaddr;
310 310 if (ps_pdread(ph_p, bootstrap_addr,
311 311 &psaddr, sizeof (psaddr)) != PS_OK)
312 312 return (TD_DBERR);
313 313 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
314 314 psaddr = (caddr32_t)uberdata_addr;
315 315 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
316 316 &psaddr, sizeof (psaddr)) != PS_OK)
317 317 return (TD_DBERR);
318 318 if (psaddr == NULL) {
319 319 /* primary linkmap in the tgt is not initialized */
320 320 ta_p->bootstrap_addr = NULL;
321 321 psaddr = (caddr32_t)uberdata_addr;
322 322 }
323 323 ta_p->uberdata_addr = (psaddr_t)psaddr;
324 324 #else
325 325 return (TD_DBERR);
326 326 #endif /* _SYSCALL32 */
327 327 }
328 328
329 329 if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
330 330 return (return_val);
331 331 if (ta_p->bootstrap_addr == NULL)
332 332 ta_p->initialized = 0;
333 333 return (TD_OK);
334 334 }
335 335
336 336 #pragma weak ps_kill
337 337 #pragma weak ps_lrolltoaddr
338 338
339 339 /*
340 340 * Allocate a new agent process handle ("thread agent").
341 341 */
342 342 #pragma weak td_ta_new = __td_ta_new
343 343 td_err_e
344 344 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
345 345 {
346 346 td_thragent_t *ta_p;
347 347 int model;
348 348 td_err_e return_val = TD_OK;
349 349
350 350 if (ph_p == NULL)
351 351 return (TD_BADPH);
352 352 if (ta_pp == NULL)
353 353 return (TD_ERR);
354 354 *ta_pp = NULL;
355 355 if (ps_pstop(ph_p) != PS_OK)
356 356 return (TD_DBERR);
357 357 /*
358 358 * ps_pdmodel might not be defined if this is an older client.
359 359 * Make it a weak symbol and test if it exists before calling.
360 360 */
361 361 #pragma weak ps_pdmodel
362 362 if (ps_pdmodel == NULL) {
363 363 model = PR_MODEL_NATIVE;
364 364 } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
365 365 (void) ps_pcontinue(ph_p);
366 366 return (TD_ERR);
367 367 }
368 368 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
369 369 (void) ps_pcontinue(ph_p);
370 370 return (TD_MALLOC);
371 371 }
372 372
373 373 /*
374 374 * Initialize the agent process handle.
375 375 * Pick up the symbol value we need from the target process.
376 376 */
377 377 (void) memset(ta_p, 0, sizeof (*ta_p));
378 378 ta_p->ph_p = ph_p;
379 379 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
380 380 ta_p->model = model;
381 381 return_val = td_read_bootstrap_data(ta_p);
382 382
383 383 /*
384 384 * Because the old libthread_db enabled lock tracking by default,
385 385 * we must also do it. However, we do it only if the application
386 386 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
387 387 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
388 388 */
389 389 if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
390 390 register_sync_t oldenable;
391 391 register_sync_t enable = REGISTER_SYNC_ENABLE;
392 392 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
393 393
394 394 if (ps_pdread(ph_p, psaddr,
395 395 &oldenable, sizeof (oldenable)) != PS_OK)
396 396 return_val = TD_DBERR;
397 397 else if (oldenable != REGISTER_SYNC_OFF ||
398 398 ps_pdwrite(ph_p, psaddr,
399 399 &enable, sizeof (enable)) != PS_OK) {
400 400 /*
401 401 * Lock tracking was already enabled or we
402 402 * failed to enable it, probably because we
403 403 * are examining a core file. In either case
404 404 * set the sync_tracking flag non-zero to
405 405 * indicate that we should not attempt to
406 406 * disable lock tracking when we delete the
407 407 * agent process handle in td_ta_delete().
408 408 */
409 409 ta_p->sync_tracking = 1;
410 410 }
411 411 }
412 412
413 413 if (return_val == TD_OK)
414 414 *ta_pp = ta_p;
415 415 else
416 416 free(ta_p);
417 417
418 418 (void) ps_pcontinue(ph_p);
419 419 return (return_val);
420 420 }
421 421
422 422 /*
423 423 * Utility function to grab the readers lock and return the prochandle,
424 424 * given an agent process handle. Performs standard error checking.
425 425 * Returns non-NULL with the lock held, or NULL with the lock not held.
426 426 */
427 427 static struct ps_prochandle *
428 428 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
429 429 {
430 430 struct ps_prochandle *ph_p = NULL;
431 431 td_err_e error;
432 432
433 433 if (ta_p == NULL || ta_p->initialized == -1) {
434 434 *err = TD_BADTA;
435 435 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
436 436 *err = TD_BADTA;
437 437 } else if ((ph_p = ta_p->ph_p) == NULL) {
438 438 (void) rw_unlock(&ta_p->rwlock);
439 439 *err = TD_BADPH;
440 440 } else if (ta_p->initialized != 2 &&
441 441 (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
442 442 (void) rw_unlock(&ta_p->rwlock);
443 443 ph_p = NULL;
444 444 *err = error;
445 445 } else {
446 446 *err = TD_OK;
447 447 }
448 448
449 449 return (ph_p);
450 450 }
451 451
452 452 /*
453 453 * Utility function to grab the readers lock and return the prochandle,
454 454 * given an agent thread handle. Performs standard error checking.
455 455 * Returns non-NULL with the lock held, or NULL with the lock not held.
456 456 */
457 457 static struct ps_prochandle *
458 458 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
459 459 {
460 460 if (th_p == NULL || th_p->th_unique == NULL) {
461 461 *err = TD_BADTH;
462 462 return (NULL);
463 463 }
464 464 return (ph_lock_ta(th_p->th_ta_p, err));
465 465 }
466 466
467 467 /*
468 468 * Utility function to grab the readers lock and return the prochandle,
469 469 * given a synchronization object handle. Performs standard error checking.
470 470 * Returns non-NULL with the lock held, or NULL with the lock not held.
471 471 */
472 472 static struct ps_prochandle *
473 473 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
474 474 {
475 475 if (sh_p == NULL || sh_p->sh_unique == NULL) {
476 476 *err = TD_BADSH;
477 477 return (NULL);
478 478 }
479 479 return (ph_lock_ta(sh_p->sh_ta_p, err));
480 480 }
481 481
482 482 /*
483 483 * Unlock the agent process handle obtained from ph_lock_*().
484 484 */
485 485 static void
486 486 ph_unlock(td_thragent_t *ta_p)
487 487 {
488 488 (void) rw_unlock(&ta_p->rwlock);
489 489 }
490 490
491 491 /*
492 492 * De-allocate an agent process handle,
493 493 * releasing all related resources.
494 494 *
495 495 * XXX -- This is hopelessly broken ---
496 496 * Storage for thread agent is not deallocated. The prochandle
497 497 * in the thread agent is set to NULL so that future uses of
498 498 * the thread agent can be detected and an error value returned.
499 499 * All functions in the external user interface that make
500 500 * use of the thread agent are expected
501 501 * to check for a NULL prochandle in the thread agent.
502 502 * All such functions are also expected to obtain a
503 503 * reader lock on the thread agent while it is using it.
504 504 */
505 505 #pragma weak td_ta_delete = __td_ta_delete
506 506 td_err_e
507 507 __td_ta_delete(td_thragent_t *ta_p)
508 508 {
509 509 struct ps_prochandle *ph_p;
510 510
511 511 /*
512 512 * This is the only place we grab the writer lock.
513 513 * We are going to NULL out the prochandle.
514 514 */
515 515 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
516 516 return (TD_BADTA);
517 517 if ((ph_p = ta_p->ph_p) == NULL) {
518 518 (void) rw_unlock(&ta_p->rwlock);
519 519 return (TD_BADPH);
520 520 }
521 521 /*
522 522 * If synch. tracking was disabled when td_ta_new() was called and
523 523 * if td_ta_sync_tracking_enable() was never called, then disable
524 524 * synch. tracking (it was enabled by default in td_ta_new()).
525 525 */
526 526 if (ta_p->sync_tracking == 0 &&
527 527 ps_kill != NULL && ps_lrolltoaddr != NULL) {
528 528 register_sync_t enable = REGISTER_SYNC_DISABLE;
529 529
530 530 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
531 531 &enable, sizeof (enable));
532 532 }
533 533 ta_p->ph_p = NULL;
534 534 (void) rw_unlock(&ta_p->rwlock);
535 535 return (TD_OK);
536 536 }
537 537
538 538 /*
539 539 * Map an agent process handle to a client prochandle.
540 540 * Currently unused by dbx.
541 541 */
542 542 #pragma weak td_ta_get_ph = __td_ta_get_ph
543 543 td_err_e
544 544 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
545 545 {
546 546 td_err_e return_val;
547 547
548 548 if (ph_pp != NULL) /* protect stupid callers */
549 549 *ph_pp = NULL;
550 550 if (ph_pp == NULL)
551 551 return (TD_ERR);
552 552 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
553 553 return (return_val);
554 554 ph_unlock(ta_p);
555 555 return (TD_OK);
556 556 }
557 557
558 558 /*
559 559 * Set the process's suggested concurrency level.
560 560 * This is a no-op in a one-level model.
561 561 * Currently unused by dbx.
562 562 */
563 563 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
564 564 /* ARGSUSED1 */
565 565 td_err_e
566 566 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
567 567 {
568 568 if (ta_p == NULL)
569 569 return (TD_BADTA);
570 570 if (ta_p->ph_p == NULL)
571 571 return (TD_BADPH);
572 572 return (TD_OK);
573 573 }
574 574
575 575 /*
576 576 * Get the number of threads in the process.
577 577 */
578 578 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
579 579 td_err_e
580 580 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
581 581 {
582 582 struct ps_prochandle *ph_p;
583 583 td_err_e return_val;
584 584 int nthreads;
585 585 int nzombies;
586 586 psaddr_t nthreads_addr;
587 587 psaddr_t nzombies_addr;
588 588
589 589 if (ta_p->model == PR_MODEL_NATIVE) {
590 590 nthreads_addr = ta_p->uberdata_addr +
591 591 offsetof(uberdata_t, nthreads);
592 592 nzombies_addr = ta_p->uberdata_addr +
593 593 offsetof(uberdata_t, nzombies);
594 594 } else {
595 595 #if defined(_LP64) && defined(_SYSCALL32)
596 596 nthreads_addr = ta_p->uberdata_addr +
597 597 offsetof(uberdata32_t, nthreads);
598 598 nzombies_addr = ta_p->uberdata_addr +
599 599 offsetof(uberdata32_t, nzombies);
600 600 #else
601 601 nthreads_addr = 0;
602 602 nzombies_addr = 0;
603 603 #endif /* _SYSCALL32 */
604 604 }
605 605
606 606 if (nthread_p == NULL)
607 607 return (TD_ERR);
608 608 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
609 609 return (return_val);
610 610 if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
611 611 return_val = TD_DBERR;
612 612 if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
613 613 return_val = TD_DBERR;
614 614 ph_unlock(ta_p);
615 615 if (return_val == TD_OK)
616 616 *nthread_p = nthreads + nzombies;
617 617 return (return_val);
618 618 }
619 619
620 620 typedef struct {
621 621 thread_t tid;
622 622 int found;
623 623 td_thrhandle_t th;
624 624 } td_mapper_param_t;
625 625
626 626 /*
627 627 * Check the value in data against the thread id.
628 628 * If it matches, return 1 to terminate iterations.
629 629 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
630 630 */
631 631 static int
632 632 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
633 633 {
634 634 td_thrinfo_t ti;
635 635
636 636 if (__td_thr_get_info(th_p, &ti) == TD_OK &&
637 637 data->tid == ti.ti_tid) {
638 638 data->found = 1;
639 639 data->th = *th_p;
640 640 return (1);
641 641 }
642 642 return (0);
643 643 }
644 644
645 645 /*
646 646 * Given a thread identifier, return the corresponding thread handle.
647 647 */
648 648 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
649 649 td_err_e
650 650 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
651 651 td_thrhandle_t *th_p)
652 652 {
653 653 td_err_e return_val;
654 654 td_mapper_param_t data;
655 655
656 656 if (th_p != NULL && /* optimize for a single thread */
657 657 ta_p != NULL &&
658 658 ta_p->initialized == 1 &&
659 659 (td_read_hash_size(ta_p) == 1 ||
660 660 td_read_uberdata(ta_p) == TD_OK) &&
661 661 ta_p->initialized == 1 &&
662 662 ta_p->single_lwpid == tid) {
663 663 th_p->th_ta_p = ta_p;
664 664 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
665 665 return (TD_NOTHR);
666 666 return (TD_OK);
667 667 }
668 668
669 669 /*
670 670 * LOCKING EXCEPTION - Locking is not required here because
671 671 * the locking and checking will be done in __td_ta_thr_iter.
672 672 */
673 673
674 674 if (ta_p == NULL)
675 675 return (TD_BADTA);
676 676 if (th_p == NULL)
677 677 return (TD_BADTH);
678 678 if (tid == 0)
679 679 return (TD_NOTHR);
680 680
681 681 data.tid = tid;
682 682 data.found = 0;
683 683 return_val = __td_ta_thr_iter(ta_p,
684 684 (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
685 685 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
686 686 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
687 687 if (return_val == TD_OK) {
688 688 if (data.found == 0)
689 689 return_val = TD_NOTHR;
690 690 else
691 691 *th_p = data.th;
692 692 }
693 693
694 694 return (return_val);
695 695 }
696 696
697 697 /*
698 698 * Map the address of a synchronization object to a sync. object handle.
699 699 */
700 700 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
701 701 td_err_e
702 702 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
703 703 {
704 704 struct ps_prochandle *ph_p;
705 705 td_err_e return_val;
706 706 uint16_t sync_magic;
707 707
708 708 if (sh_p == NULL)
709 709 return (TD_BADSH);
710 710 if (addr == NULL)
711 711 return (TD_ERR);
712 712 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
713 713 return (return_val);
714 714 /*
715 715 * Check the magic number of the sync. object to make sure it's valid.
716 716 * The magic number is at the same offset for all sync. objects.
717 717 */
718 718 if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
719 719 &sync_magic, sizeof (sync_magic)) != PS_OK) {
720 720 ph_unlock(ta_p);
721 721 return (TD_BADSH);
722 722 }
723 723 ph_unlock(ta_p);
724 724 if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
725 725 sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
726 726 return (TD_BADSH);
727 727 /*
728 728 * Just fill in the appropriate fields of the sync. handle.
729 729 */
730 730 sh_p->sh_ta_p = (td_thragent_t *)ta_p;
731 731 sh_p->sh_unique = addr;
732 732 return (TD_OK);
733 733 }
734 734
735 735 /*
736 736 * Iterate over the set of global TSD keys.
737 737 * The call back function is called with three arguments,
738 738 * a key, a pointer to the destructor function, and the cbdata pointer.
739 739 * Currently unused by dbx.
740 740 */
741 741 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
742 742 td_err_e
743 743 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
744 744 {
745 745 struct ps_prochandle *ph_p;
746 746 td_err_e return_val;
747 747 int key;
748 748 int numkeys;
749 749 psaddr_t dest_addr;
750 750 psaddr_t *destructors = NULL;
751 751 PFrV destructor;
752 752
753 753 if (cb == NULL)
754 754 return (TD_ERR);
755 755 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
756 756 return (return_val);
757 757 if (ps_pstop(ph_p) != PS_OK) {
758 758 ph_unlock(ta_p);
759 759 return (TD_DBERR);
760 760 }
761 761
762 762 if (ta_p->model == PR_MODEL_NATIVE) {
763 763 tsd_metadata_t tsdm;
764 764
765 765 if (ps_pdread(ph_p,
766 766 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
767 767 &tsdm, sizeof (tsdm)) != PS_OK)
768 768 return_val = TD_DBERR;
769 769 else {
770 770 numkeys = tsdm.tsdm_nused;
771 771 dest_addr = (psaddr_t)tsdm.tsdm_destro;
772 772 if (numkeys > 0)
773 773 destructors =
774 774 malloc(numkeys * sizeof (psaddr_t));
775 775 }
776 776 } else {
777 777 #if defined(_LP64) && defined(_SYSCALL32)
778 778 tsd_metadata32_t tsdm;
779 779
780 780 if (ps_pdread(ph_p,
781 781 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
782 782 &tsdm, sizeof (tsdm)) != PS_OK)
783 783 return_val = TD_DBERR;
784 784 else {
785 785 numkeys = tsdm.tsdm_nused;
786 786 dest_addr = (psaddr_t)tsdm.tsdm_destro;
787 787 if (numkeys > 0)
788 788 destructors =
789 789 malloc(numkeys * sizeof (caddr32_t));
790 790 }
791 791 #else
792 792 return_val = TD_DBERR;
793 793 #endif /* _SYSCALL32 */
794 794 }
795 795
796 796 if (return_val != TD_OK || numkeys <= 0) {
797 797 (void) ps_pcontinue(ph_p);
798 798 ph_unlock(ta_p);
799 799 return (return_val);
800 800 }
801 801
802 802 if (destructors == NULL)
803 803 return_val = TD_MALLOC;
804 804 else if (ta_p->model == PR_MODEL_NATIVE) {
805 805 if (ps_pdread(ph_p, dest_addr,
806 806 destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
807 807 return_val = TD_DBERR;
808 808 else {
809 809 for (key = 1; key < numkeys; key++) {
810 810 destructor = (PFrV)destructors[key];
811 811 if (destructor != TSD_UNALLOCATED &&
812 812 (*cb)(key, destructor, cbdata_p))
813 813 break;
814 814 }
815 815 }
816 816 #if defined(_LP64) && defined(_SYSCALL32)
817 817 } else {
818 818 caddr32_t *destructors32 = (caddr32_t *)destructors;
819 819 caddr32_t destruct32;
820 820
821 821 if (ps_pdread(ph_p, dest_addr,
822 822 destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
823 823 return_val = TD_DBERR;
824 824 else {
825 825 for (key = 1; key < numkeys; key++) {
826 826 destruct32 = destructors32[key];
827 827 if ((destruct32 !=
828 828 (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
829 829 (*cb)(key, (PFrV)(uintptr_t)destruct32,
830 830 cbdata_p))
831 831 break;
832 832 }
833 833 }
834 834 #endif /* _SYSCALL32 */
835 835 }
836 836
837 837 if (destructors)
838 838 free(destructors);
839 839 (void) ps_pcontinue(ph_p);
840 840 ph_unlock(ta_p);
841 841 return (return_val);
842 842 }
843 843
844 844 int
845 845 sigequalset(const sigset_t *s1, const sigset_t *s2)
846 846 {
847 847 return (
848 848 s1->__sigbits[0] == s2->__sigbits[0] &&
849 849 s1->__sigbits[1] == s2->__sigbits[1] &&
850 850 s1->__sigbits[2] == s2->__sigbits[2] &&
851 851 s1->__sigbits[3] == s2->__sigbits[3]);
852 852 }
853 853
854 854 /*
855 855 * Description:
856 856 * Iterate over all threads. For each thread call
857 857 * the function pointed to by "cb" with a pointer
858 858 * to a thread handle, and a pointer to data which
859 859 * can be NULL. Only call td_thr_iter_f() on threads
860 860 * which match the properties of state, ti_pri,
861 861 * ti_sigmask_p, and ti_user_flags. If cb returns
862 862 * a non-zero value, terminate iterations.
863 863 *
864 864 * Input:
865 865 * *ta_p - thread agent
866 866 * *cb - call back function defined by user.
867 867 * td_thr_iter_f() takes a thread handle and
868 868 * cbdata_p as a parameter.
869 869 * cbdata_p - parameter for td_thr_iter_f().
870 870 *
871 871 * state - state of threads of interest. A value of
872 872 * TD_THR_ANY_STATE from enum td_thr_state_e
873 873 * does not restrict iterations by state.
874 874 * ti_pri - lower bound of priorities of threads of
875 875 * interest. A value of TD_THR_LOWEST_PRIORITY
876 876 * defined in thread_db.h does not restrict
877 877 * iterations by priority. A thread with priority
878 878 * less than ti_pri will NOT be passed to the callback
879 879 * function.
880 880 * ti_sigmask_p - signal mask of threads of interest.
881 881 * A value of TD_SIGNO_MASK defined in thread_db.h
882 882 * does not restrict iterations by signal mask.
883 883 * ti_user_flags - user flags of threads of interest. A
884 884 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
885 885 * does not restrict iterations by user flags.
886 886 */
887 887 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
888 888 td_err_e
889 889 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
890 890 void *cbdata_p, td_thr_state_e state, int ti_pri,
891 891 sigset_t *ti_sigmask_p, unsigned ti_user_flags)
892 892 {
893 893 struct ps_prochandle *ph_p;
894 894 psaddr_t first_lwp_addr;
895 895 psaddr_t first_zombie_addr;
896 896 psaddr_t curr_lwp_addr;
897 897 psaddr_t next_lwp_addr;
898 898 td_thrhandle_t th;
899 899 ps_err_e db_return;
900 900 ps_err_e db_return2;
901 901 td_err_e return_val;
902 902
903 903 if (cb == NULL)
904 904 return (TD_ERR);
905 905 /*
906 906 * If state is not within bound, short circuit.
907 907 */
908 908 if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
909 909 return (TD_OK);
910 910
911 911 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
912 912 return (return_val);
913 913 if (ps_pstop(ph_p) != PS_OK) {
914 914 ph_unlock(ta_p);
915 915 return (TD_DBERR);
916 916 }
917 917
918 918 /*
919 919 * For each ulwp_t in the circular linked lists pointed
920 920 * to by "all_lwps" and "all_zombies":
921 921 * (1) Filter each thread.
922 922 * (2) Create the thread_object for each thread that passes.
923 923 * (3) Call the call back function on each thread.
924 924 */
925 925
926 926 if (ta_p->model == PR_MODEL_NATIVE) {
927 927 db_return = ps_pdread(ph_p,
928 928 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
929 929 &first_lwp_addr, sizeof (first_lwp_addr));
930 930 db_return2 = ps_pdread(ph_p,
931 931 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
932 932 &first_zombie_addr, sizeof (first_zombie_addr));
933 933 } else {
934 934 #if defined(_LP64) && defined(_SYSCALL32)
935 935 caddr32_t addr32;
936 936
937 937 db_return = ps_pdread(ph_p,
938 938 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
939 939 &addr32, sizeof (addr32));
940 940 first_lwp_addr = addr32;
941 941 db_return2 = ps_pdread(ph_p,
942 942 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
943 943 &addr32, sizeof (addr32));
944 944 first_zombie_addr = addr32;
945 945 #else /* _SYSCALL32 */
946 946 db_return = PS_ERR;
947 947 db_return2 = PS_ERR;
948 948 #endif /* _SYSCALL32 */
949 949 }
950 950 if (db_return == PS_OK)
951 951 db_return = db_return2;
952 952
953 953 /*
954 954 * If first_lwp_addr and first_zombie_addr are both NULL,
955 955 * libc must not yet be initialized or all threads have
956 956 * exited. Return TD_NOTHR and all will be well.
957 957 */
958 958 if (db_return == PS_OK &&
959 959 first_lwp_addr == NULL && first_zombie_addr == NULL) {
960 960 (void) ps_pcontinue(ph_p);
961 961 ph_unlock(ta_p);
962 962 return (TD_NOTHR);
963 963 }
964 964 if (db_return != PS_OK) {
965 965 (void) ps_pcontinue(ph_p);
966 966 ph_unlock(ta_p);
967 967 return (TD_DBERR);
968 968 }
969 969
970 970 /*
971 971 * Run down the lists of all living and dead lwps.
972 972 */
973 973 if (first_lwp_addr == NULL)
974 974 first_lwp_addr = first_zombie_addr;
975 975 curr_lwp_addr = first_lwp_addr;
976 976 for (;;) {
977 977 td_thr_state_e ts_state;
978 978 int userpri;
979 979 unsigned userflags;
980 980 sigset_t mask;
981 981
982 982 /*
983 983 * Read the ulwp struct.
984 984 */
985 985 if (ta_p->model == PR_MODEL_NATIVE) {
986 986 ulwp_t ulwp;
987 987
988 988 if (ps_pdread(ph_p, curr_lwp_addr,
989 989 &ulwp, sizeof (ulwp)) != PS_OK &&
990 990 ((void) memset(&ulwp, 0, sizeof (ulwp)),
991 991 ps_pdread(ph_p, curr_lwp_addr,
992 992 &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
993 993 return_val = TD_DBERR;
994 994 break;
995 995 }
996 996 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
997 997
998 998 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
999 999 ulwp.ul_stop? TD_THR_STOPPED :
1000 1000 ulwp.ul_wchan? TD_THR_SLEEP :
1001 1001 TD_THR_ACTIVE;
1002 1002 userpri = ulwp.ul_pri;
1003 1003 userflags = ulwp.ul_usropts;
1004 1004 if (ulwp.ul_dead)
1005 1005 (void) sigemptyset(&mask);
1006 1006 else
1007 1007 mask = *(sigset_t *)&ulwp.ul_sigmask;
1008 1008 } else {
1009 1009 #if defined(_LP64) && defined(_SYSCALL32)
1010 1010 ulwp32_t ulwp;
1011 1011
1012 1012 if (ps_pdread(ph_p, curr_lwp_addr,
1013 1013 &ulwp, sizeof (ulwp)) != PS_OK &&
1014 1014 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1015 1015 ps_pdread(ph_p, curr_lwp_addr,
1016 1016 &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1017 1017 return_val = TD_DBERR;
1018 1018 break;
1019 1019 }
1020 1020 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1021 1021
1022 1022 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1023 1023 ulwp.ul_stop? TD_THR_STOPPED :
1024 1024 ulwp.ul_wchan? TD_THR_SLEEP :
1025 1025 TD_THR_ACTIVE;
1026 1026 userpri = ulwp.ul_pri;
1027 1027 userflags = ulwp.ul_usropts;
1028 1028 if (ulwp.ul_dead)
1029 1029 (void) sigemptyset(&mask);
1030 1030 else
1031 1031 mask = *(sigset_t *)&ulwp.ul_sigmask;
1032 1032 #else /* _SYSCALL32 */
1033 1033 return_val = TD_ERR;
1034 1034 break;
1035 1035 #endif /* _SYSCALL32 */
1036 1036 }
1037 1037
1038 1038 /*
1039 1039 * Filter on state, priority, sigmask, and user flags.
1040 1040 */
1041 1041
1042 1042 if ((state != ts_state) &&
1043 1043 (state != TD_THR_ANY_STATE))
1044 1044 goto advance;
1045 1045
1046 1046 if (ti_pri > userpri)
1047 1047 goto advance;
1048 1048
1049 1049 if (ti_sigmask_p != TD_SIGNO_MASK &&
1050 1050 !sigequalset(ti_sigmask_p, &mask))
1051 1051 goto advance;
1052 1052
1053 1053 if (ti_user_flags != userflags &&
1054 1054 ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1055 1055 goto advance;
1056 1056
1057 1057 /*
1058 1058 * Call back - break if the return
1059 1059 * from the call back is non-zero.
1060 1060 */
1061 1061 th.th_ta_p = (td_thragent_t *)ta_p;
1062 1062 th.th_unique = curr_lwp_addr;
1063 1063 if ((*cb)(&th, cbdata_p))
1064 1064 break;
1065 1065
1066 1066 advance:
1067 1067 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1068 1068 /*
1069 1069 * Switch to the zombie list, unless it is NULL
1070 1070 * or we have already been doing the zombie list,
1071 1071 * in which case terminate the loop.
1072 1072 */
1073 1073 if (first_zombie_addr == NULL ||
1074 1074 first_lwp_addr == first_zombie_addr)
1075 1075 break;
1076 1076 curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1077 1077 }
1078 1078 }
1079 1079
1080 1080 (void) ps_pcontinue(ph_p);
1081 1081 ph_unlock(ta_p);
1082 1082 return (return_val);
1083 1083 }
1084 1084
1085 1085 /*
1086 1086 * Enable or disable process synchronization object tracking.
1087 1087 * Currently unused by dbx.
1088 1088 */
1089 1089 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1090 1090 td_err_e
1091 1091 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1092 1092 {
1093 1093 struct ps_prochandle *ph_p;
1094 1094 td_err_e return_val;
1095 1095 register_sync_t enable;
1096 1096
1097 1097 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1098 1098 return (return_val);
1099 1099 /*
1100 1100 * Values of tdb_register_sync in the victim process:
1101 1101 * REGISTER_SYNC_ENABLE enables registration of synch objects
1102 1102 * REGISTER_SYNC_DISABLE disables registration of synch objects
1103 1103 * These cause the table to be cleared and tdb_register_sync set to:
1104 1104 * REGISTER_SYNC_ON registration in effect
1105 1105 * REGISTER_SYNC_OFF registration not in effect
1106 1106 */
1107 1107 enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1108 1108 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1109 1109 &enable, sizeof (enable)) != PS_OK)
1110 1110 return_val = TD_DBERR;
1111 1111 /*
1112 1112 * Remember that this interface was called (see td_ta_delete()).
1113 1113 */
1114 1114 ta_p->sync_tracking = 1;
1115 1115 ph_unlock(ta_p);
1116 1116 return (return_val);
1117 1117 }
1118 1118
1119 1119 /*
1120 1120 * Iterate over all known synchronization variables.
1121 1121 * It is very possible that the list generated is incomplete,
1122 1122 * because the iterator can only find synchronization variables
1123 1123 * that have been registered by the process since synchronization
1124 1124 * object registration was enabled.
1125 1125 * The call back function cb is called for each synchronization
1126 1126 * variable with two arguments: a pointer to the synchronization
1127 1127 * handle and the passed-in argument cbdata.
1128 1128 * If cb returns a non-zero value, iterations are terminated.
1129 1129 */
1130 1130 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1131 1131 td_err_e
1132 1132 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1133 1133 {
1134 1134 struct ps_prochandle *ph_p;
1135 1135 td_err_e return_val;
1136 1136 int i;
1137 1137 register_sync_t enable;
1138 1138 psaddr_t next_desc;
1139 1139 tdb_sync_stats_t sync_stats;
1140 1140 td_synchandle_t synchandle;
1141 1141 psaddr_t psaddr;
1142 1142 void *vaddr;
1143 1143 uint64_t *sync_addr_hash = NULL;
1144 1144
1145 1145 if (cb == NULL)
1146 1146 return (TD_ERR);
1147 1147 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1148 1148 return (return_val);
1149 1149 if (ps_pstop(ph_p) != PS_OK) {
1150 1150 ph_unlock(ta_p);
1151 1151 return (TD_DBERR);
1152 1152 }
1153 1153 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1154 1154 &enable, sizeof (enable)) != PS_OK) {
1155 1155 return_val = TD_DBERR;
1156 1156 goto out;
1157 1157 }
1158 1158 if (enable != REGISTER_SYNC_ON)
1159 1159 goto out;
1160 1160
1161 1161 /*
1162 1162 * First read the hash table.
1163 1163 * The hash table is large; allocate with mmap().
1164 1164 */
1165 1165 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1166 1166 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1167 1167 == MAP_FAILED) {
1168 1168 return_val = TD_MALLOC;
1169 1169 goto out;
1170 1170 }
1171 1171 sync_addr_hash = vaddr;
1172 1172
1173 1173 if (ta_p->model == PR_MODEL_NATIVE) {
1174 1174 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1175 1175 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1176 1176 &psaddr, sizeof (&psaddr)) != PS_OK) {
1177 1177 return_val = TD_DBERR;
1178 1178 goto out;
1179 1179 }
1180 1180 } else {
1181 1181 #ifdef _SYSCALL32
1182 1182 caddr32_t addr;
1183 1183
1184 1184 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1185 1185 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1186 1186 &addr, sizeof (addr)) != PS_OK) {
1187 1187 return_val = TD_DBERR;
1188 1188 goto out;
1189 1189 }
1190 1190 psaddr = addr;
1191 1191 #else
1192 1192 return_val = TD_ERR;
1193 1193 goto out;
1194 1194 #endif /* _SYSCALL32 */
1195 1195 }
1196 1196
1197 1197 if (psaddr == NULL)
1198 1198 goto out;
1199 1199 if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1200 1200 TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1201 1201 return_val = TD_DBERR;
1202 1202 goto out;
1203 1203 }
1204 1204
1205 1205 /*
1206 1206 * Now scan the hash table.
1207 1207 */
1208 1208 for (i = 0; i < TDB_HASH_SIZE; i++) {
1209 1209 for (next_desc = (psaddr_t)sync_addr_hash[i];
1210 1210 next_desc != NULL;
1211 1211 next_desc = (psaddr_t)sync_stats.next) {
1212 1212 if (ps_pdread(ph_p, next_desc,
1213 1213 &sync_stats, sizeof (sync_stats)) != PS_OK) {
1214 1214 return_val = TD_DBERR;
1215 1215 goto out;
1216 1216 }
1217 1217 if (sync_stats.un.type == TDB_NONE) {
1218 1218 /* not registered since registration enabled */
1219 1219 continue;
1220 1220 }
1221 1221 synchandle.sh_ta_p = ta_p;
1222 1222 synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1223 1223 if ((*cb)(&synchandle, cbdata) != 0)
1224 1224 goto out;
1225 1225 }
1226 1226 }
1227 1227
1228 1228 out:
1229 1229 if (sync_addr_hash != NULL)
1230 1230 (void) munmap((void *)sync_addr_hash,
1231 1231 TDB_HASH_SIZE * sizeof (uint64_t));
1232 1232 (void) ps_pcontinue(ph_p);
1233 1233 ph_unlock(ta_p);
1234 1234 return (return_val);
1235 1235 }
1236 1236
1237 1237 /*
1238 1238 * Enable process statistics collection.
1239 1239 */
1240 1240 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1241 1241 /* ARGSUSED */
1242 1242 td_err_e
1243 1243 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1244 1244 {
1245 1245 return (TD_NOCAPAB);
1246 1246 }
1247 1247
1248 1248 /*
1249 1249 * Reset process statistics.
1250 1250 */
1251 1251 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1252 1252 /* ARGSUSED */
1253 1253 td_err_e
1254 1254 __td_ta_reset_stats(const td_thragent_t *ta_p)
1255 1255 {
1256 1256 return (TD_NOCAPAB);
1257 1257 }
1258 1258
1259 1259 /*
1260 1260 * Read process statistics.
1261 1261 */
1262 1262 #pragma weak td_ta_get_stats = __td_ta_get_stats
1263 1263 /* ARGSUSED */
1264 1264 td_err_e
1265 1265 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1266 1266 {
1267 1267 return (TD_NOCAPAB);
1268 1268 }
1269 1269
1270 1270 /*
1271 1271 * Transfer information from lwp struct to thread information struct.
1272 1272 * XXX -- lots of this needs cleaning up.
1273 1273 */
1274 1274 static void
1275 1275 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1276 1276 ulwp_t *ulwp, td_thrinfo_t *ti_p)
1277 1277 {
1278 1278 lwpid_t lwpid;
1279 1279
1280 1280 if ((lwpid = ulwp->ul_lwpid) == 0)
1281 1281 lwpid = 1;
1282 1282 (void) memset(ti_p, 0, sizeof (*ti_p));
1283 1283 ti_p->ti_ta_p = ta_p;
1284 1284 ti_p->ti_user_flags = ulwp->ul_usropts;
1285 1285 ti_p->ti_tid = lwpid;
1286 1286 ti_p->ti_exitval = ulwp->ul_rval;
1287 1287 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1288 1288 if (!ulwp->ul_dead) {
1289 1289 /*
1290 1290 * The bloody fools got this backwards!
1291 1291 */
1292 1292 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1293 1293 ti_p->ti_stksize = ulwp->ul_stksiz;
1294 1294 }
1295 1295 ti_p->ti_ro_area = ts_addr;
1296 1296 ti_p->ti_ro_size = ulwp->ul_replace?
1297 1297 REPLACEMENT_SIZE : sizeof (ulwp_t);
1298 1298 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1299 1299 ulwp->ul_stop? TD_THR_STOPPED :
1300 1300 ulwp->ul_wchan? TD_THR_SLEEP :
1301 1301 TD_THR_ACTIVE;
1302 1302 ti_p->ti_db_suspended = 0;
1303 1303 ti_p->ti_type = TD_THR_USER;
1304 1304 ti_p->ti_sp = ulwp->ul_sp;
1305 1305 ti_p->ti_flags = 0;
1306 1306 ti_p->ti_pri = ulwp->ul_pri;
1307 1307 ti_p->ti_lid = lwpid;
1308 1308 if (!ulwp->ul_dead)
1309 1309 ti_p->ti_sigmask = ulwp->ul_sigmask;
1310 1310 ti_p->ti_traceme = 0;
1311 1311 ti_p->ti_preemptflag = 0;
1312 1312 ti_p->ti_pirecflag = 0;
1313 1313 (void) sigemptyset(&ti_p->ti_pending);
1314 1314 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1315 1315 }
1316 1316
1317 1317 #if defined(_LP64) && defined(_SYSCALL32)
1318 1318 static void
1319 1319 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1320 1320 ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1321 1321 {
1322 1322 lwpid_t lwpid;
1323 1323
1324 1324 if ((lwpid = ulwp->ul_lwpid) == 0)
1325 1325 lwpid = 1;
1326 1326 (void) memset(ti_p, 0, sizeof (*ti_p));
1327 1327 ti_p->ti_ta_p = ta_p;
1328 1328 ti_p->ti_user_flags = ulwp->ul_usropts;
1329 1329 ti_p->ti_tid = lwpid;
1330 1330 ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1331 1331 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1332 1332 if (!ulwp->ul_dead) {
1333 1333 /*
1334 1334 * The bloody fools got this backwards!
1335 1335 */
1336 1336 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1337 1337 ti_p->ti_stksize = ulwp->ul_stksiz;
1338 1338 }
1339 1339 ti_p->ti_ro_area = ts_addr;
1340 1340 ti_p->ti_ro_size = ulwp->ul_replace?
1341 1341 REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1342 1342 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1343 1343 ulwp->ul_stop? TD_THR_STOPPED :
1344 1344 ulwp->ul_wchan? TD_THR_SLEEP :
1345 1345 TD_THR_ACTIVE;
1346 1346 ti_p->ti_db_suspended = 0;
1347 1347 ti_p->ti_type = TD_THR_USER;
1348 1348 ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1349 1349 ti_p->ti_flags = 0;
1350 1350 ti_p->ti_pri = ulwp->ul_pri;
1351 1351 ti_p->ti_lid = lwpid;
1352 1352 if (!ulwp->ul_dead)
1353 1353 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1354 1354 ti_p->ti_traceme = 0;
1355 1355 ti_p->ti_preemptflag = 0;
1356 1356 ti_p->ti_pirecflag = 0;
1357 1357 (void) sigemptyset(&ti_p->ti_pending);
1358 1358 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1359 1359 }
1360 1360 #endif /* _SYSCALL32 */
1361 1361
1362 1362 /*
1363 1363 * Get thread information.
1364 1364 */
1365 1365 #pragma weak td_thr_get_info = __td_thr_get_info
1366 1366 td_err_e
1367 1367 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1368 1368 {
1369 1369 struct ps_prochandle *ph_p;
1370 1370 td_thragent_t *ta_p;
1371 1371 td_err_e return_val;
1372 1372 psaddr_t psaddr;
1373 1373
1374 1374 if (ti_p == NULL)
1375 1375 return (TD_ERR);
1376 1376 (void) memset(ti_p, NULL, sizeof (*ti_p));
1377 1377
1378 1378 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1379 1379 return (return_val);
1380 1380 ta_p = th_p->th_ta_p;
1381 1381 if (ps_pstop(ph_p) != PS_OK) {
1382 1382 ph_unlock(ta_p);
1383 1383 return (TD_DBERR);
1384 1384 }
1385 1385
1386 1386 /*
1387 1387 * Read the ulwp struct from the process.
1388 1388 * Transfer the ulwp struct to the thread information struct.
1389 1389 */
1390 1390 psaddr = th_p->th_unique;
1391 1391 if (ta_p->model == PR_MODEL_NATIVE) {
1392 1392 ulwp_t ulwp;
1393 1393
1394 1394 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1395 1395 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1396 1396 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1397 1397 return_val = TD_DBERR;
1398 1398 else
1399 1399 td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1400 1400 } else {
1401 1401 #if defined(_LP64) && defined(_SYSCALL32)
1402 1402 ulwp32_t ulwp;
1403 1403
1404 1404 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1405 1405 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1406 1406 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1407 1407 PS_OK)
1408 1408 return_val = TD_DBERR;
1409 1409 else
1410 1410 td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1411 1411 #else
1412 1412 return_val = TD_ERR;
1413 1413 #endif /* _SYSCALL32 */
1414 1414 }
1415 1415
1416 1416 (void) ps_pcontinue(ph_p);
1417 1417 ph_unlock(ta_p);
1418 1418 return (return_val);
1419 1419 }
1420 1420
1421 1421 /*
1422 1422 * Given a process and an event number, return information about
1423 1423 * an address in the process or at which a breakpoint can be set
1424 1424 * to monitor the event.
1425 1425 */
1426 1426 #pragma weak td_ta_event_addr = __td_ta_event_addr
1427 1427 td_err_e
1428 1428 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1429 1429 {
1430 1430 if (ta_p == NULL)
1431 1431 return (TD_BADTA);
1432 1432 if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1433 1433 return (TD_NOEVENT);
1434 1434 if (notify_p == NULL)
1435 1435 return (TD_ERR);
1436 1436
1437 1437 notify_p->type = NOTIFY_BPT;
1438 1438 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1439 1439
1440 1440 return (TD_OK);
1441 1441 }
1442 1442
1443 1443 /*
1444 1444 * Add the events in eventset 2 to eventset 1.
1445 1445 */
1446 1446 static void
1447 1447 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1448 1448 {
1449 1449 int i;
1450 1450
1451 1451 for (i = 0; i < TD_EVENTSIZE; i++)
1452 1452 event1_p->event_bits[i] |= event2_p->event_bits[i];
1453 1453 }
1454 1454
1455 1455 /*
1456 1456 * Delete the events in eventset 2 from eventset 1.
1457 1457 */
1458 1458 static void
1459 1459 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1460 1460 {
1461 1461 int i;
1462 1462
1463 1463 for (i = 0; i < TD_EVENTSIZE; i++)
1464 1464 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1465 1465 }
1466 1466
1467 1467 /*
1468 1468 * Either add or delete the given event set from a thread's event mask.
1469 1469 */
1470 1470 static td_err_e
1471 1471 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1472 1472 {
1473 1473 struct ps_prochandle *ph_p;
1474 1474 td_err_e return_val = TD_OK;
1475 1475 char enable;
1476 1476 td_thr_events_t evset;
1477 1477 psaddr_t psaddr_evset;
1478 1478 psaddr_t psaddr_enab;
1479 1479
1480 1480 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1481 1481 return (return_val);
1482 1482 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1483 1483 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1484 1484 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1485 1485 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1486 1486 } else {
1487 1487 #if defined(_LP64) && defined(_SYSCALL32)
1488 1488 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1489 1489 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1490 1490 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1491 1491 #else
1492 1492 ph_unlock(th_p->th_ta_p);
1493 1493 return (TD_ERR);
1494 1494 #endif /* _SYSCALL32 */
1495 1495 }
1496 1496 if (ps_pstop(ph_p) != PS_OK) {
1497 1497 ph_unlock(th_p->th_ta_p);
1498 1498 return (TD_DBERR);
1499 1499 }
1500 1500
1501 1501 if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1502 1502 return_val = TD_DBERR;
1503 1503 else {
1504 1504 if (onoff)
1505 1505 eventsetaddset(&evset, events);
1506 1506 else
1507 1507 eventsetdelset(&evset, events);
1508 1508 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1509 1509 != PS_OK)
1510 1510 return_val = TD_DBERR;
1511 1511 else {
1512 1512 enable = 0;
1513 1513 if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1514 1514 enable = 1;
1515 1515 if (ps_pdwrite(ph_p, psaddr_enab,
1516 1516 &enable, sizeof (enable)) != PS_OK)
1517 1517 return_val = TD_DBERR;
1518 1518 }
1519 1519 }
1520 1520
1521 1521 (void) ps_pcontinue(ph_p);
1522 1522 ph_unlock(th_p->th_ta_p);
1523 1523 return (return_val);
1524 1524 }
1525 1525
1526 1526 /*
1527 1527 * Enable or disable tracing for a given thread. Tracing
1528 1528 * is filtered based on the event mask of each thread. Tracing
1529 1529 * can be turned on/off for the thread without changing thread
1530 1530 * event mask.
1531 1531 * Currently unused by dbx.
1532 1532 */
1533 1533 #pragma weak td_thr_event_enable = __td_thr_event_enable
1534 1534 td_err_e
1535 1535 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1536 1536 {
1537 1537 td_thr_events_t evset;
1538 1538
1539 1539 td_event_emptyset(&evset);
1540 1540 td_event_addset(&evset, TD_EVENTS_ENABLE);
1541 1541 return (mod_eventset(th_p, &evset, onoff));
1542 1542 }
1543 1543
1544 1544 /*
1545 1545 * Set event mask to enable event. event is turned on in
1546 1546 * event mask for thread. If a thread encounters an event
1547 1547 * for which its event mask is on, notification will be sent
1548 1548 * to the debugger.
1549 1549 * Addresses for each event are provided to the
1550 1550 * debugger. It is assumed that a breakpoint of some type will
1551 1551 * be placed at that address. If the event mask for the thread
1552 1552 * is on, the instruction at the address will be executed.
1553 1553 * Otherwise, the instruction will be skipped.
1554 1554 */
1555 1555 #pragma weak td_thr_set_event = __td_thr_set_event
1556 1556 td_err_e
1557 1557 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1558 1558 {
1559 1559 return (mod_eventset(th_p, events, 1));
1560 1560 }
1561 1561
1562 1562 /*
1563 1563 * Enable or disable a set of events in the process-global event mask,
1564 1564 * depending on the value of onoff.
1565 1565 */
1566 1566 static td_err_e
1567 1567 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1568 1568 {
1569 1569 struct ps_prochandle *ph_p;
1570 1570 td_thr_events_t targ_eventset;
1571 1571 td_err_e return_val;
1572 1572
1573 1573 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1574 1574 return (return_val);
1575 1575 if (ps_pstop(ph_p) != PS_OK) {
1576 1576 ph_unlock(ta_p);
1577 1577 return (TD_DBERR);
1578 1578 }
1579 1579 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1580 1580 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1581 1581 return_val = TD_DBERR;
1582 1582 else {
1583 1583 if (onoff)
1584 1584 eventsetaddset(&targ_eventset, events);
1585 1585 else
1586 1586 eventsetdelset(&targ_eventset, events);
1587 1587 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1588 1588 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1589 1589 return_val = TD_DBERR;
1590 1590 }
1591 1591 (void) ps_pcontinue(ph_p);
1592 1592 ph_unlock(ta_p);
1593 1593 return (return_val);
1594 1594 }
1595 1595
1596 1596 /*
1597 1597 * Enable a set of events in the process-global event mask.
1598 1598 */
1599 1599 #pragma weak td_ta_set_event = __td_ta_set_event
1600 1600 td_err_e
1601 1601 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1602 1602 {
1603 1603 return (td_ta_mod_event(ta_p, events, 1));
1604 1604 }
1605 1605
1606 1606 /*
1607 1607 * Set event mask to disable the given event set; these events are cleared
1608 1608 * from the event mask of the thread. Events that occur for a thread
1609 1609 * with the event masked off will not cause notification to be
1610 1610 * sent to the debugger (see td_thr_set_event for fuller description).
1611 1611 */
1612 1612 #pragma weak td_thr_clear_event = __td_thr_clear_event
1613 1613 td_err_e
1614 1614 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1615 1615 {
1616 1616 return (mod_eventset(th_p, events, 0));
1617 1617 }
1618 1618
1619 1619 /*
1620 1620 * Disable a set of events in the process-global event mask.
1621 1621 */
1622 1622 #pragma weak td_ta_clear_event = __td_ta_clear_event
1623 1623 td_err_e
1624 1624 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1625 1625 {
1626 1626 return (td_ta_mod_event(ta_p, events, 0));
1627 1627 }
1628 1628
1629 1629 /*
1630 1630 * This function returns the most recent event message, if any,
1631 1631 * associated with a thread. Given a thread handle, return the message
1632 1632 * corresponding to the event encountered by the thread. Only one
1633 1633 * message per thread is saved. Messages from earlier events are lost
1634 1634 * when later events occur.
1635 1635 */
1636 1636 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1637 1637 td_err_e
1638 1638 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1639 1639 {
1640 1640 struct ps_prochandle *ph_p;
1641 1641 td_err_e return_val = TD_OK;
1642 1642 psaddr_t psaddr;
1643 1643
1644 1644 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1645 1645 return (return_val);
1646 1646 if (ps_pstop(ph_p) != PS_OK) {
1647 1647 ph_unlock(th_p->th_ta_p);
1648 1648 return (TD_BADTA);
1649 1649 }
1650 1650 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1651 1651 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1652 1652 td_evbuf_t evbuf;
1653 1653
1654 1654 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1655 1655 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1656 1656 return_val = TD_DBERR;
1657 1657 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1658 1658 return_val = TD_NOEVENT;
1659 1659 } else {
1660 1660 msg->event = evbuf.eventnum;
1661 1661 msg->th_p = (td_thrhandle_t *)th_p;
1662 1662 msg->msg.data = (uintptr_t)evbuf.eventdata;
1663 1663 /* "Consume" the message */
1664 1664 evbuf.eventnum = TD_EVENT_NONE;
1665 1665 evbuf.eventdata = NULL;
1666 1666 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1667 1667 != PS_OK)
1668 1668 return_val = TD_DBERR;
1669 1669 }
1670 1670 } else {
1671 1671 #if defined(_LP64) && defined(_SYSCALL32)
1672 1672 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1673 1673 td_evbuf32_t evbuf;
1674 1674
1675 1675 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1676 1676 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1677 1677 return_val = TD_DBERR;
1678 1678 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1679 1679 return_val = TD_NOEVENT;
1680 1680 } else {
1681 1681 msg->event = evbuf.eventnum;
1682 1682 msg->th_p = (td_thrhandle_t *)th_p;
1683 1683 msg->msg.data = (uintptr_t)evbuf.eventdata;
1684 1684 /* "Consume" the message */
1685 1685 evbuf.eventnum = TD_EVENT_NONE;
1686 1686 evbuf.eventdata = NULL;
1687 1687 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1688 1688 != PS_OK)
1689 1689 return_val = TD_DBERR;
1690 1690 }
1691 1691 #else
1692 1692 return_val = TD_ERR;
1693 1693 #endif /* _SYSCALL32 */
1694 1694 }
1695 1695
1696 1696 (void) ps_pcontinue(ph_p);
1697 1697 ph_unlock(th_p->th_ta_p);
1698 1698 return (return_val);
1699 1699 }
1700 1700
1701 1701 /*
1702 1702 * The callback function td_ta_event_getmsg uses when looking for
1703 1703 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1704 1704 */
1705 1705 static int
1706 1706 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1707 1707 {
1708 1708 static td_thrhandle_t th;
1709 1709 td_event_msg_t *msg = arg;
1710 1710
1711 1711 if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1712 1712 /*
1713 1713 * Got an event, stop iterating.
1714 1714 *
1715 1715 * Because of past mistakes in interface definition,
1716 1716 * we are forced to pass back a static local variable
1717 1717 * for the thread handle because th_p is a pointer
1718 1718 * to a local variable in __td_ta_thr_iter().
1719 1719 * Grr...
1720 1720 */
1721 1721 th = *th_p;
1722 1722 msg->th_p = &th;
1723 1723 return (1);
1724 1724 }
1725 1725 return (0);
1726 1726 }
1727 1727
1728 1728 /*
1729 1729 * This function is just like td_thr_event_getmsg, except that it is
1730 1730 * passed a process handle rather than a thread handle, and returns
1731 1731 * an event message for some thread in the process that has an event
1732 1732 * message pending. If no thread has an event message pending, this
1733 1733 * routine returns TD_NOEVENT. Thus, all pending event messages may
1734 1734 * be collected from a process by repeatedly calling this routine
1735 1735 * until it returns TD_NOEVENT.
1736 1736 */
1737 1737 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1738 1738 td_err_e
1739 1739 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1740 1740 {
1741 1741 td_err_e return_val;
1742 1742
1743 1743 if (ta_p == NULL)
1744 1744 return (TD_BADTA);
1745 1745 if (ta_p->ph_p == NULL)
1746 1746 return (TD_BADPH);
1747 1747 if (msg == NULL)
1748 1748 return (TD_ERR);
1749 1749 msg->event = TD_EVENT_NONE;
1750 1750 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1751 1751 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1752 1752 TD_THR_ANY_USER_FLAGS)) != TD_OK)
1753 1753 return (return_val);
1754 1754 if (msg->event == TD_EVENT_NONE)
1755 1755 return (TD_NOEVENT);
1756 1756 return (TD_OK);
1757 1757 }
1758 1758
1759 1759 static lwpid_t
1760 1760 thr_to_lwpid(const td_thrhandle_t *th_p)
1761 1761 {
1762 1762 struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1763 1763 lwpid_t lwpid;
1764 1764
1765 1765 /*
1766 1766 * The caller holds the prochandle lock
1767 1767 * and has already verfied everything.
1768 1768 */
1769 1769 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1770 1770 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1771 1771
1772 1772 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1773 1773 &lwpid, sizeof (lwpid)) != PS_OK)
1774 1774 lwpid = 0;
1775 1775 else if (lwpid == 0)
1776 1776 lwpid = 1;
1777 1777 } else {
1778 1778 #if defined(_LP64) && defined(_SYSCALL32)
1779 1779 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1780 1780
1781 1781 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1782 1782 &lwpid, sizeof (lwpid)) != PS_OK)
1783 1783 lwpid = 0;
1784 1784 else if (lwpid == 0)
1785 1785 lwpid = 1;
1786 1786 #else
1787 1787 lwpid = 0;
1788 1788 #endif /* _SYSCALL32 */
1789 1789 }
1790 1790
1791 1791 return (lwpid);
1792 1792 }
1793 1793
1794 1794 /*
1795 1795 * Suspend a thread.
1796 1796 * XXX: What does this mean in a one-level model?
1797 1797 */
1798 1798 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1799 1799 td_err_e
1800 1800 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1801 1801 {
1802 1802 struct ps_prochandle *ph_p;
1803 1803 td_err_e return_val;
1804 1804
1805 1805 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1806 1806 return (return_val);
1807 1807 if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1808 1808 return_val = TD_DBERR;
1809 1809 ph_unlock(th_p->th_ta_p);
1810 1810 return (return_val);
1811 1811 }
1812 1812
1813 1813 /*
1814 1814 * Resume a suspended thread.
1815 1815 * XXX: What does this mean in a one-level model?
1816 1816 */
1817 1817 #pragma weak td_thr_dbresume = __td_thr_dbresume
1818 1818 td_err_e
1819 1819 __td_thr_dbresume(const td_thrhandle_t *th_p)
1820 1820 {
1821 1821 struct ps_prochandle *ph_p;
1822 1822 td_err_e return_val;
1823 1823
1824 1824 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1825 1825 return (return_val);
1826 1826 if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1827 1827 return_val = TD_DBERR;
1828 1828 ph_unlock(th_p->th_ta_p);
1829 1829 return (return_val);
1830 1830 }
1831 1831
1832 1832 /*
1833 1833 * Set a thread's signal mask.
1834 1834 * Currently unused by dbx.
1835 1835 */
1836 1836 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1837 1837 /* ARGSUSED */
1838 1838 td_err_e
1839 1839 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1840 1840 {
1841 1841 return (TD_NOCAPAB);
1842 1842 }
1843 1843
1844 1844 /*
1845 1845 * Set a thread's "signals-pending" set.
1846 1846 * Currently unused by dbx.
1847 1847 */
1848 1848 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1849 1849 /* ARGSUSED */
1850 1850 td_err_e
1851 1851 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1852 1852 uchar_t ti_pending_flag, const sigset_t ti_pending)
1853 1853 {
1854 1854 return (TD_NOCAPAB);
1855 1855 }
1856 1856
1857 1857 /*
1858 1858 * Get a thread's general register set.
1859 1859 */
1860 1860 #pragma weak td_thr_getgregs = __td_thr_getgregs
1861 1861 td_err_e
1862 1862 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1863 1863 {
1864 1864 struct ps_prochandle *ph_p;
1865 1865 td_err_e return_val;
1866 1866
1867 1867 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1868 1868 return (return_val);
1869 1869 if (ps_pstop(ph_p) != PS_OK) {
1870 1870 ph_unlock(th_p->th_ta_p);
1871 1871 return (TD_DBERR);
1872 1872 }
1873 1873
1874 1874 if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1875 1875 return_val = TD_DBERR;
1876 1876
1877 1877 (void) ps_pcontinue(ph_p);
1878 1878 ph_unlock(th_p->th_ta_p);
1879 1879 return (return_val);
1880 1880 }
1881 1881
1882 1882 /*
1883 1883 * Set a thread's general register set.
1884 1884 */
1885 1885 #pragma weak td_thr_setgregs = __td_thr_setgregs
1886 1886 td_err_e
1887 1887 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1888 1888 {
1889 1889 struct ps_prochandle *ph_p;
1890 1890 td_err_e return_val;
1891 1891
1892 1892 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1893 1893 return (return_val);
1894 1894 if (ps_pstop(ph_p) != PS_OK) {
1895 1895 ph_unlock(th_p->th_ta_p);
1896 1896 return (TD_DBERR);
1897 1897 }
1898 1898
1899 1899 if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1900 1900 return_val = TD_DBERR;
1901 1901
1902 1902 (void) ps_pcontinue(ph_p);
1903 1903 ph_unlock(th_p->th_ta_p);
1904 1904 return (return_val);
1905 1905 }
1906 1906
1907 1907 /*
1908 1908 * Get a thread's floating-point register set.
1909 1909 */
1910 1910 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1911 1911 td_err_e
1912 1912 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1913 1913 {
1914 1914 struct ps_prochandle *ph_p;
1915 1915 td_err_e return_val;
1916 1916
1917 1917 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1918 1918 return (return_val);
1919 1919 if (ps_pstop(ph_p) != PS_OK) {
1920 1920 ph_unlock(th_p->th_ta_p);
1921 1921 return (TD_DBERR);
1922 1922 }
1923 1923
1924 1924 if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1925 1925 return_val = TD_DBERR;
1926 1926
1927 1927 (void) ps_pcontinue(ph_p);
1928 1928 ph_unlock(th_p->th_ta_p);
1929 1929 return (return_val);
1930 1930 }
1931 1931
1932 1932 /*
1933 1933 * Set a thread's floating-point register set.
1934 1934 */
1935 1935 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1936 1936 td_err_e
1937 1937 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1938 1938 {
1939 1939 struct ps_prochandle *ph_p;
1940 1940 td_err_e return_val;
1941 1941
1942 1942 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1943 1943 return (return_val);
1944 1944 if (ps_pstop(ph_p) != PS_OK) {
1945 1945 ph_unlock(th_p->th_ta_p);
1946 1946 return (TD_DBERR);
1947 1947 }
1948 1948
1949 1949 if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1950 1950 return_val = TD_DBERR;
1951 1951
1952 1952 (void) ps_pcontinue(ph_p);
1953 1953 ph_unlock(th_p->th_ta_p);
1954 1954 return (return_val);
1955 1955 }
↓ open down ↓ |
1955 lines elided |
↑ open up ↑ |
1956 1956
1957 1957 /*
1958 1958 * Get the size of the extra state register set for this architecture.
1959 1959 * Currently unused by dbx.
1960 1960 */
1961 1961 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1962 1962 /* ARGSUSED */
1963 1963 td_err_e
1964 1964 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1965 1965 {
1966 -#if defined(__sparc)
1967 1966 struct ps_prochandle *ph_p;
1968 1967 td_err_e return_val;
1969 1968
1970 1969 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1971 1970 return (return_val);
1972 1971 if (ps_pstop(ph_p) != PS_OK) {
1973 1972 ph_unlock(th_p->th_ta_p);
1974 1973 return (TD_DBERR);
1975 1974 }
1976 1975
1977 1976 if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
1978 1977 return_val = TD_DBERR;
1979 1978
1980 1979 (void) ps_pcontinue(ph_p);
1981 1980 ph_unlock(th_p->th_ta_p);
1982 1981 return (return_val);
1983 -#else /* __sparc */
1984 - return (TD_NOXREGS);
1985 -#endif /* __sparc */
1986 1982 }
1987 1983
1988 1984 /*
1989 1985 * Get a thread's extra state register set.
1990 1986 */
1991 1987 #pragma weak td_thr_getxregs = __td_thr_getxregs
1992 1988 /* ARGSUSED */
1993 1989 td_err_e
1994 1990 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
1995 1991 {
1996 -#if defined(__sparc)
1997 1992 struct ps_prochandle *ph_p;
1998 1993 td_err_e return_val;
1999 1994
2000 1995 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2001 1996 return (return_val);
2002 1997 if (ps_pstop(ph_p) != PS_OK) {
2003 1998 ph_unlock(th_p->th_ta_p);
2004 1999 return (TD_DBERR);
2005 2000 }
2006 2001
2007 2002 if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2008 2003 return_val = TD_DBERR;
2009 2004
2010 2005 (void) ps_pcontinue(ph_p);
2011 2006 ph_unlock(th_p->th_ta_p);
2012 2007 return (return_val);
2013 -#else /* __sparc */
2014 - return (TD_NOXREGS);
2015 -#endif /* __sparc */
2016 2008 }
2017 2009
2018 2010 /*
2019 2011 * Set a thread's extra state register set.
2020 2012 */
2021 2013 #pragma weak td_thr_setxregs = __td_thr_setxregs
2022 2014 /* ARGSUSED */
2023 2015 td_err_e
2024 2016 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2025 2017 {
2026 -#if defined(__sparc)
2027 2018 struct ps_prochandle *ph_p;
2028 2019 td_err_e return_val;
2029 2020
2030 2021 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2031 2022 return (return_val);
2032 2023 if (ps_pstop(ph_p) != PS_OK) {
2033 2024 ph_unlock(th_p->th_ta_p);
2034 2025 return (TD_DBERR);
2035 2026 }
2036 2027
2037 2028 if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2038 2029 return_val = TD_DBERR;
2039 2030
2040 2031 (void) ps_pcontinue(ph_p);
2041 2032 ph_unlock(th_p->th_ta_p);
2042 2033 return (return_val);
2043 -#else /* __sparc */
2044 - return (TD_NOXREGS);
2045 -#endif /* __sparc */
2046 2034 }
2047 2035
2048 2036 struct searcher {
2049 2037 psaddr_t addr;
2050 2038 int status;
2051 2039 };
2052 2040
2053 2041 /*
2054 2042 * Check the struct thread address in *th_p again first
2055 2043 * value in "data". If value in data is found, set second value
2056 2044 * in "data" to 1 and return 1 to terminate iterations.
2057 2045 * This function is used by td_thr_validate() to verify that
2058 2046 * a thread handle is valid.
2059 2047 */
2060 2048 static int
2061 2049 td_searcher(const td_thrhandle_t *th_p, void *data)
2062 2050 {
2063 2051 struct searcher *searcher_data = (struct searcher *)data;
2064 2052
2065 2053 if (searcher_data->addr == th_p->th_unique) {
2066 2054 searcher_data->status = 1;
2067 2055 return (1);
2068 2056 }
2069 2057 return (0);
2070 2058 }
2071 2059
2072 2060 /*
2073 2061 * Validate the thread handle. Check that
2074 2062 * a thread exists in the thread agent/process that
2075 2063 * corresponds to thread with handle *th_p.
2076 2064 * Currently unused by dbx.
2077 2065 */
2078 2066 #pragma weak td_thr_validate = __td_thr_validate
2079 2067 td_err_e
2080 2068 __td_thr_validate(const td_thrhandle_t *th_p)
2081 2069 {
2082 2070 td_err_e return_val;
2083 2071 struct searcher searcher_data = {0, 0};
2084 2072
2085 2073 if (th_p == NULL)
2086 2074 return (TD_BADTH);
2087 2075 if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
2088 2076 return (TD_BADTH);
2089 2077
2090 2078 /*
2091 2079 * LOCKING EXCEPTION - Locking is not required
2092 2080 * here because no use of the thread agent is made (other
2093 2081 * than the sanity check) and checking of the thread
2094 2082 * agent will be done in __td_ta_thr_iter.
2095 2083 */
2096 2084
2097 2085 searcher_data.addr = th_p->th_unique;
2098 2086 return_val = __td_ta_thr_iter(th_p->th_ta_p,
2099 2087 td_searcher, &searcher_data,
2100 2088 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2101 2089 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2102 2090
2103 2091 if (return_val == TD_OK && searcher_data.status == 0)
2104 2092 return_val = TD_NOTHR;
2105 2093
2106 2094 return (return_val);
2107 2095 }
2108 2096
2109 2097 /*
2110 2098 * Get a thread's private binding to a given thread specific
2111 2099 * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
2112 2100 * have a binding for a particular key, then NULL is returned.
2113 2101 */
2114 2102 #pragma weak td_thr_tsd = __td_thr_tsd
2115 2103 td_err_e
2116 2104 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2117 2105 {
2118 2106 struct ps_prochandle *ph_p;
2119 2107 td_thragent_t *ta_p;
2120 2108 td_err_e return_val;
2121 2109 int maxkey;
2122 2110 int nkey;
2123 2111 psaddr_t tsd_paddr;
2124 2112
2125 2113 if (data_pp == NULL)
2126 2114 return (TD_ERR);
2127 2115 *data_pp = NULL;
2128 2116 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2129 2117 return (return_val);
2130 2118 ta_p = th_p->th_ta_p;
2131 2119 if (ps_pstop(ph_p) != PS_OK) {
2132 2120 ph_unlock(ta_p);
2133 2121 return (TD_DBERR);
2134 2122 }
2135 2123
2136 2124 if (ta_p->model == PR_MODEL_NATIVE) {
2137 2125 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2138 2126 tsd_metadata_t tsdm;
2139 2127 tsd_t stsd;
2140 2128
2141 2129 if (ps_pdread(ph_p,
2142 2130 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2143 2131 &tsdm, sizeof (tsdm)) != PS_OK)
2144 2132 return_val = TD_DBERR;
2145 2133 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2146 2134 &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2147 2135 return_val = TD_DBERR;
2148 2136 else if (tsd_paddr != NULL &&
2149 2137 ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2150 2138 return_val = TD_DBERR;
2151 2139 else {
2152 2140 maxkey = tsdm.tsdm_nused;
2153 2141 nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2154 2142
2155 2143 if (key < TSD_NFAST)
2156 2144 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2157 2145 }
2158 2146 } else {
2159 2147 #if defined(_LP64) && defined(_SYSCALL32)
2160 2148 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2161 2149 tsd_metadata32_t tsdm;
2162 2150 tsd32_t stsd;
2163 2151 caddr32_t addr;
2164 2152
2165 2153 if (ps_pdread(ph_p,
2166 2154 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2167 2155 &tsdm, sizeof (tsdm)) != PS_OK)
2168 2156 return_val = TD_DBERR;
2169 2157 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2170 2158 &addr, sizeof (addr)) != PS_OK)
2171 2159 return_val = TD_DBERR;
2172 2160 else if (addr != NULL &&
2173 2161 ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2174 2162 return_val = TD_DBERR;
2175 2163 else {
2176 2164 maxkey = tsdm.tsdm_nused;
2177 2165 nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2178 2166
2179 2167 if (key < TSD_NFAST) {
2180 2168 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2181 2169 } else {
2182 2170 tsd_paddr = addr;
2183 2171 }
2184 2172 }
2185 2173 #else
2186 2174 return_val = TD_ERR;
2187 2175 #endif /* _SYSCALL32 */
2188 2176 }
2189 2177
2190 2178 if (return_val == TD_OK && (key < 1 || key >= maxkey))
2191 2179 return_val = TD_NOTSD;
2192 2180 if (return_val != TD_OK || key >= nkey) {
2193 2181 /* NULL has already been stored in data_pp */
2194 2182 (void) ps_pcontinue(ph_p);
2195 2183 ph_unlock(ta_p);
2196 2184 return (return_val);
2197 2185 }
2198 2186
2199 2187 /*
2200 2188 * Read the value from the thread's tsd array.
2201 2189 */
2202 2190 if (ta_p->model == PR_MODEL_NATIVE) {
2203 2191 void *value;
2204 2192
2205 2193 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2206 2194 &value, sizeof (value)) != PS_OK)
2207 2195 return_val = TD_DBERR;
2208 2196 else
2209 2197 *data_pp = value;
2210 2198 #if defined(_LP64) && defined(_SYSCALL32)
2211 2199 } else {
2212 2200 caddr32_t value32;
2213 2201
2214 2202 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2215 2203 &value32, sizeof (value32)) != PS_OK)
2216 2204 return_val = TD_DBERR;
2217 2205 else
2218 2206 *data_pp = (void *)(uintptr_t)value32;
2219 2207 #endif /* _SYSCALL32 */
2220 2208 }
2221 2209
2222 2210 (void) ps_pcontinue(ph_p);
2223 2211 ph_unlock(ta_p);
2224 2212 return (return_val);
2225 2213 }
2226 2214
2227 2215 /*
2228 2216 * Get the base address of a thread's thread local storage (TLS) block
2229 2217 * for the module (executable or shared object) identified by 'moduleid'.
2230 2218 */
2231 2219 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2232 2220 td_err_e
2233 2221 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2234 2222 {
2235 2223 struct ps_prochandle *ph_p;
2236 2224 td_thragent_t *ta_p;
2237 2225 td_err_e return_val;
2238 2226
2239 2227 if (base == NULL)
2240 2228 return (TD_ERR);
2241 2229 *base = NULL;
2242 2230 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2243 2231 return (return_val);
2244 2232 ta_p = th_p->th_ta_p;
2245 2233 if (ps_pstop(ph_p) != PS_OK) {
2246 2234 ph_unlock(ta_p);
2247 2235 return (TD_DBERR);
2248 2236 }
2249 2237
2250 2238 if (ta_p->model == PR_MODEL_NATIVE) {
2251 2239 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2252 2240 tls_metadata_t tls_metadata;
2253 2241 TLS_modinfo tlsmod;
2254 2242 tls_t tls;
2255 2243
2256 2244 if (ps_pdread(ph_p,
2257 2245 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2258 2246 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2259 2247 return_val = TD_DBERR;
2260 2248 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2261 2249 return_val = TD_NOTLS;
2262 2250 else if (ps_pdread(ph_p,
2263 2251 (psaddr_t)((TLS_modinfo *)
2264 2252 tls_metadata.tls_modinfo.tls_data + moduleid),
2265 2253 &tlsmod, sizeof (tlsmod)) != PS_OK)
2266 2254 return_val = TD_DBERR;
2267 2255 else if (tlsmod.tm_memsz == 0)
2268 2256 return_val = TD_NOTLS;
2269 2257 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2270 2258 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2271 2259 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2272 2260 &tls, sizeof (tls)) != PS_OK)
2273 2261 return_val = TD_DBERR;
2274 2262 else if (moduleid >= tls.tls_size)
2275 2263 return_val = TD_TLSDEFER;
2276 2264 else if (ps_pdread(ph_p,
2277 2265 (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2278 2266 &tls, sizeof (tls)) != PS_OK)
2279 2267 return_val = TD_DBERR;
2280 2268 else if (tls.tls_size == 0)
2281 2269 return_val = TD_TLSDEFER;
2282 2270 else
2283 2271 *base = (psaddr_t)tls.tls_data;
2284 2272 } else {
2285 2273 #if defined(_LP64) && defined(_SYSCALL32)
2286 2274 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2287 2275 tls_metadata32_t tls_metadata;
2288 2276 TLS_modinfo32 tlsmod;
2289 2277 tls32_t tls;
2290 2278
2291 2279 if (ps_pdread(ph_p,
2292 2280 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2293 2281 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2294 2282 return_val = TD_DBERR;
2295 2283 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2296 2284 return_val = TD_NOTLS;
2297 2285 else if (ps_pdread(ph_p,
2298 2286 (psaddr_t)((TLS_modinfo32 *)
2299 2287 (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2300 2288 &tlsmod, sizeof (tlsmod)) != PS_OK)
2301 2289 return_val = TD_DBERR;
2302 2290 else if (tlsmod.tm_memsz == 0)
2303 2291 return_val = TD_NOTLS;
2304 2292 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2305 2293 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2306 2294 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2307 2295 &tls, sizeof (tls)) != PS_OK)
2308 2296 return_val = TD_DBERR;
2309 2297 else if (moduleid >= tls.tls_size)
2310 2298 return_val = TD_TLSDEFER;
2311 2299 else if (ps_pdread(ph_p,
2312 2300 (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2313 2301 &tls, sizeof (tls)) != PS_OK)
2314 2302 return_val = TD_DBERR;
2315 2303 else if (tls.tls_size == 0)
2316 2304 return_val = TD_TLSDEFER;
2317 2305 else
2318 2306 *base = (psaddr_t)tls.tls_data;
2319 2307 #else
2320 2308 return_val = TD_ERR;
2321 2309 #endif /* _SYSCALL32 */
2322 2310 }
2323 2311
2324 2312 (void) ps_pcontinue(ph_p);
2325 2313 ph_unlock(ta_p);
2326 2314 return (return_val);
2327 2315 }
2328 2316
2329 2317 /*
2330 2318 * Change a thread's priority to the value specified by ti_pri.
2331 2319 * Currently unused by dbx.
2332 2320 */
2333 2321 #pragma weak td_thr_setprio = __td_thr_setprio
2334 2322 /* ARGSUSED */
2335 2323 td_err_e
2336 2324 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2337 2325 {
2338 2326 return (TD_NOCAPAB);
2339 2327 }
2340 2328
2341 2329 /*
2342 2330 * This structure links td_thr_lockowner and the lowner_cb callback function.
2343 2331 */
2344 2332 typedef struct {
2345 2333 td_sync_iter_f *owner_cb;
2346 2334 void *owner_cb_arg;
2347 2335 td_thrhandle_t *th_p;
2348 2336 } lowner_cb_ctl_t;
2349 2337
2350 2338 static int
2351 2339 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2352 2340 {
2353 2341 lowner_cb_ctl_t *ocb = arg;
2354 2342 int trunc = 0;
2355 2343 union {
2356 2344 rwlock_t rwl;
2357 2345 mutex_t mx;
2358 2346 } rw_m;
2359 2347
2360 2348 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2361 2349 &rw_m, sizeof (rw_m)) != PS_OK) {
2362 2350 trunc = 1;
2363 2351 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2364 2352 &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2365 2353 return (0);
2366 2354 }
2367 2355 if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2368 2356 rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2369 2357 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2370 2358 if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2371 2359 mutex_t *rwlock = &rw_m.rwl.mutex;
2372 2360 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2373 2361 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2374 2362 }
2375 2363 return (0);
2376 2364 }
2377 2365
2378 2366 /*
2379 2367 * Iterate over the set of locks owned by a specified thread.
2380 2368 * If cb returns a non-zero value, terminate iterations.
2381 2369 */
2382 2370 #pragma weak td_thr_lockowner = __td_thr_lockowner
2383 2371 td_err_e
2384 2372 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2385 2373 void *cb_data)
2386 2374 {
2387 2375 td_thragent_t *ta_p;
2388 2376 td_err_e return_val;
2389 2377 lowner_cb_ctl_t lcb;
2390 2378
2391 2379 /*
2392 2380 * Just sanity checks.
2393 2381 */
2394 2382 if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2395 2383 return (return_val);
2396 2384 ta_p = th_p->th_ta_p;
2397 2385 ph_unlock(ta_p);
2398 2386
2399 2387 lcb.owner_cb = cb;
2400 2388 lcb.owner_cb_arg = cb_data;
2401 2389 lcb.th_p = (td_thrhandle_t *)th_p;
2402 2390 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2403 2391 }
2404 2392
2405 2393 /*
2406 2394 * If a thread is asleep on a synchronization variable,
2407 2395 * then get the synchronization handle.
2408 2396 */
2409 2397 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2410 2398 td_err_e
2411 2399 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2412 2400 {
2413 2401 struct ps_prochandle *ph_p;
2414 2402 td_err_e return_val = TD_OK;
2415 2403 uintptr_t wchan;
2416 2404
2417 2405 if (sh_p == NULL)
2418 2406 return (TD_ERR);
2419 2407 if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2420 2408 return (return_val);
2421 2409
2422 2410 /*
2423 2411 * No need to stop the process for a simple read.
2424 2412 */
2425 2413 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2426 2414 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2427 2415
2428 2416 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2429 2417 &wchan, sizeof (wchan)) != PS_OK)
2430 2418 return_val = TD_DBERR;
2431 2419 } else {
2432 2420 #if defined(_LP64) && defined(_SYSCALL32)
2433 2421 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2434 2422 caddr32_t wchan32;
2435 2423
2436 2424 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2437 2425 &wchan32, sizeof (wchan32)) != PS_OK)
2438 2426 return_val = TD_DBERR;
2439 2427 wchan = wchan32;
2440 2428 #else
2441 2429 return_val = TD_ERR;
2442 2430 #endif /* _SYSCALL32 */
2443 2431 }
2444 2432
2445 2433 if (return_val != TD_OK || wchan == NULL) {
2446 2434 sh_p->sh_ta_p = NULL;
2447 2435 sh_p->sh_unique = NULL;
2448 2436 if (return_val == TD_OK)
2449 2437 return_val = TD_ERR;
2450 2438 } else {
2451 2439 sh_p->sh_ta_p = th_p->th_ta_p;
2452 2440 sh_p->sh_unique = (psaddr_t)wchan;
2453 2441 }
2454 2442
2455 2443 ph_unlock(th_p->th_ta_p);
2456 2444 return (return_val);
2457 2445 }
2458 2446
2459 2447 /*
2460 2448 * Which thread is running on an lwp?
2461 2449 */
2462 2450 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2463 2451 td_err_e
2464 2452 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2465 2453 td_thrhandle_t *th_p)
2466 2454 {
2467 2455 return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2468 2456 }
2469 2457
2470 2458 /*
2471 2459 * Common code for td_sync_get_info() and td_sync_get_stats()
2472 2460 */
2473 2461 static td_err_e
2474 2462 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2475 2463 td_syncinfo_t *si_p)
2476 2464 {
2477 2465 int trunc = 0;
2478 2466 td_so_un_t generic_so;
2479 2467
2480 2468 /*
2481 2469 * Determine the sync. object type; a little type fudgery here.
2482 2470 * First attempt to read the whole union. If that fails, attempt
2483 2471 * to read just the condvar. A condvar is the smallest sync. object.
2484 2472 */
2485 2473 if (ps_pdread(ph_p, sh_p->sh_unique,
2486 2474 &generic_so, sizeof (generic_so)) != PS_OK) {
2487 2475 trunc = 1;
2488 2476 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2489 2477 sizeof (generic_so.condition)) != PS_OK)
2490 2478 return (TD_DBERR);
2491 2479 }
2492 2480
2493 2481 switch (generic_so.condition.cond_magic) {
2494 2482 case MUTEX_MAGIC:
2495 2483 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2496 2484 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2497 2485 return (TD_DBERR);
2498 2486 si_p->si_type = TD_SYNC_MUTEX;
2499 2487 si_p->si_shared_type =
2500 2488 (generic_so.lock.mutex_type & USYNC_PROCESS);
2501 2489 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2502 2490 sizeof (generic_so.lock.mutex_flag));
2503 2491 si_p->si_state.mutex_locked =
2504 2492 (generic_so.lock.mutex_lockw != 0);
2505 2493 si_p->si_size = sizeof (generic_so.lock);
2506 2494 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2507 2495 si_p->si_rcount = generic_so.lock.mutex_rcount;
2508 2496 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2509 2497 if (si_p->si_state.mutex_locked) {
2510 2498 if (si_p->si_shared_type & USYNC_PROCESS)
2511 2499 si_p->si_ownerpid =
2512 2500 generic_so.lock.mutex_ownerpid;
2513 2501 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2514 2502 si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2515 2503 }
2516 2504 break;
2517 2505 case COND_MAGIC:
2518 2506 si_p->si_type = TD_SYNC_COND;
2519 2507 si_p->si_shared_type =
2520 2508 (generic_so.condition.cond_type & USYNC_PROCESS);
2521 2509 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2522 2510 sizeof (generic_so.condition.flags.flag));
2523 2511 si_p->si_size = sizeof (generic_so.condition);
2524 2512 si_p->si_has_waiters =
2525 2513 (generic_so.condition.cond_waiters_user |
2526 2514 generic_so.condition.cond_waiters_kernel)? 1 : 0;
2527 2515 break;
2528 2516 case SEMA_MAGIC:
2529 2517 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2530 2518 &generic_so.semaphore, sizeof (generic_so.semaphore))
2531 2519 != PS_OK)
2532 2520 return (TD_DBERR);
2533 2521 si_p->si_type = TD_SYNC_SEMA;
2534 2522 si_p->si_shared_type =
2535 2523 (generic_so.semaphore.type & USYNC_PROCESS);
2536 2524 si_p->si_state.sem_count = generic_so.semaphore.count;
2537 2525 si_p->si_size = sizeof (generic_so.semaphore);
2538 2526 si_p->si_has_waiters =
2539 2527 ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2540 2528 /* this is useless but the old interface provided it */
2541 2529 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2542 2530 break;
2543 2531 case RWL_MAGIC:
2544 2532 {
2545 2533 uint32_t rwstate;
2546 2534
2547 2535 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2548 2536 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2549 2537 return (TD_DBERR);
2550 2538 si_p->si_type = TD_SYNC_RWLOCK;
2551 2539 si_p->si_shared_type =
2552 2540 (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2553 2541 si_p->si_size = sizeof (generic_so.rwlock);
2554 2542
2555 2543 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2556 2544 if (rwstate & URW_WRITE_LOCKED) {
2557 2545 si_p->si_state.nreaders = -1;
2558 2546 si_p->si_is_wlock = 1;
2559 2547 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2560 2548 si_p->si_owner.th_unique =
2561 2549 generic_so.rwlock.rwlock_owner;
2562 2550 if (si_p->si_shared_type & USYNC_PROCESS)
2563 2551 si_p->si_ownerpid =
2564 2552 generic_so.rwlock.rwlock_ownerpid;
2565 2553 } else {
2566 2554 si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2567 2555 }
2568 2556 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2569 2557
2570 2558 /* this is useless but the old interface provided it */
2571 2559 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2572 2560 break;
2573 2561 }
2574 2562 default:
2575 2563 return (TD_BADSH);
2576 2564 }
2577 2565
2578 2566 si_p->si_ta_p = sh_p->sh_ta_p;
2579 2567 si_p->si_sv_addr = sh_p->sh_unique;
2580 2568 return (TD_OK);
2581 2569 }
2582 2570
2583 2571 /*
2584 2572 * Given a synchronization handle, fill in the
2585 2573 * information for the synchronization variable into *si_p.
2586 2574 */
2587 2575 #pragma weak td_sync_get_info = __td_sync_get_info
2588 2576 td_err_e
2589 2577 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2590 2578 {
2591 2579 struct ps_prochandle *ph_p;
2592 2580 td_err_e return_val;
2593 2581
2594 2582 if (si_p == NULL)
2595 2583 return (TD_ERR);
2596 2584 (void) memset(si_p, 0, sizeof (*si_p));
2597 2585 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2598 2586 return (return_val);
2599 2587 if (ps_pstop(ph_p) != PS_OK) {
2600 2588 ph_unlock(sh_p->sh_ta_p);
2601 2589 return (TD_DBERR);
2602 2590 }
2603 2591
2604 2592 return_val = sync_get_info_common(sh_p, ph_p, si_p);
2605 2593
2606 2594 (void) ps_pcontinue(ph_p);
2607 2595 ph_unlock(sh_p->sh_ta_p);
2608 2596 return (return_val);
2609 2597 }
2610 2598
2611 2599 static uint_t
2612 2600 tdb_addr_hash64(uint64_t addr)
2613 2601 {
2614 2602 uint64_t value60 = (addr >> 4);
2615 2603 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2616 2604 return ((value30 >> 15) ^ (value30 & 0x7fff));
2617 2605 }
2618 2606
2619 2607 static uint_t
2620 2608 tdb_addr_hash32(uint64_t addr)
2621 2609 {
2622 2610 uint32_t value30 = (addr >> 2); /* 30 bits */
2623 2611 return ((value30 >> 15) ^ (value30 & 0x7fff));
2624 2612 }
2625 2613
2626 2614 static td_err_e
2627 2615 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2628 2616 psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2629 2617 {
2630 2618 psaddr_t next_desc;
2631 2619 uint64_t first;
2632 2620 uint_t ix;
2633 2621
2634 2622 /*
2635 2623 * Compute the hash table index from the synch object's address.
2636 2624 */
2637 2625 if (ta_p->model == PR_MODEL_LP64)
2638 2626 ix = tdb_addr_hash64(sync_obj_addr);
2639 2627 else
2640 2628 ix = tdb_addr_hash32(sync_obj_addr);
2641 2629
2642 2630 /*
2643 2631 * Get the address of the first element in the linked list.
2644 2632 */
2645 2633 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2646 2634 &first, sizeof (first)) != PS_OK)
2647 2635 return (TD_DBERR);
2648 2636
2649 2637 /*
2650 2638 * Search the linked list for an entry for the synch object..
2651 2639 */
2652 2640 for (next_desc = (psaddr_t)first; next_desc != NULL;
2653 2641 next_desc = (psaddr_t)sync_stats->next) {
2654 2642 if (ps_pdread(ta_p->ph_p, next_desc,
2655 2643 sync_stats, sizeof (*sync_stats)) != PS_OK)
2656 2644 return (TD_DBERR);
2657 2645 if (sync_stats->sync_addr == sync_obj_addr)
2658 2646 return (TD_OK);
2659 2647 }
2660 2648
2661 2649 (void) memset(sync_stats, 0, sizeof (*sync_stats));
2662 2650 return (TD_OK);
2663 2651 }
2664 2652
2665 2653 /*
2666 2654 * Given a synchronization handle, fill in the
2667 2655 * statistics for the synchronization variable into *ss_p.
2668 2656 */
2669 2657 #pragma weak td_sync_get_stats = __td_sync_get_stats
2670 2658 td_err_e
2671 2659 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2672 2660 {
2673 2661 struct ps_prochandle *ph_p;
2674 2662 td_thragent_t *ta_p;
2675 2663 td_err_e return_val;
2676 2664 register_sync_t enable;
2677 2665 psaddr_t hashaddr;
2678 2666 tdb_sync_stats_t sync_stats;
2679 2667 size_t ix;
2680 2668
2681 2669 if (ss_p == NULL)
2682 2670 return (TD_ERR);
2683 2671 (void) memset(ss_p, 0, sizeof (*ss_p));
2684 2672 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2685 2673 return (return_val);
2686 2674 ta_p = sh_p->sh_ta_p;
2687 2675 if (ps_pstop(ph_p) != PS_OK) {
2688 2676 ph_unlock(ta_p);
2689 2677 return (TD_DBERR);
2690 2678 }
2691 2679
2692 2680 if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2693 2681 != TD_OK) {
2694 2682 if (return_val != TD_BADSH)
2695 2683 goto out;
2696 2684 /* we can correct TD_BADSH */
2697 2685 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2698 2686 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2699 2687 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2700 2688 /* we correct si_type and si_size below */
2701 2689 return_val = TD_OK;
2702 2690 }
2703 2691 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2704 2692 &enable, sizeof (enable)) != PS_OK) {
2705 2693 return_val = TD_DBERR;
2706 2694 goto out;
2707 2695 }
2708 2696 if (enable != REGISTER_SYNC_ON)
2709 2697 goto out;
2710 2698
2711 2699 /*
2712 2700 * Get the address of the hash table in the target process.
2713 2701 */
2714 2702 if (ta_p->model == PR_MODEL_NATIVE) {
2715 2703 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2716 2704 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2717 2705 &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2718 2706 return_val = TD_DBERR;
2719 2707 goto out;
2720 2708 }
2721 2709 } else {
2722 2710 #if defined(_LP64) && defined(_SYSCALL32)
2723 2711 caddr32_t addr;
2724 2712
2725 2713 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2726 2714 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2727 2715 &addr, sizeof (addr)) != PS_OK) {
2728 2716 return_val = TD_DBERR;
2729 2717 goto out;
2730 2718 }
2731 2719 hashaddr = addr;
2732 2720 #else
2733 2721 return_val = TD_ERR;
2734 2722 goto out;
2735 2723 #endif /* _SYSCALL32 */
2736 2724 }
2737 2725
2738 2726 if (hashaddr == 0)
2739 2727 return_val = TD_BADSH;
2740 2728 else
2741 2729 return_val = read_sync_stats(ta_p, hashaddr,
2742 2730 sh_p->sh_unique, &sync_stats);
2743 2731 if (return_val != TD_OK)
2744 2732 goto out;
2745 2733
2746 2734 /*
2747 2735 * We have the hash table entry. Transfer the data to
2748 2736 * the td_syncstats_t structure provided by the caller.
2749 2737 */
2750 2738 switch (sync_stats.un.type) {
2751 2739 case TDB_MUTEX:
2752 2740 {
2753 2741 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2754 2742
2755 2743 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2756 2744 ss_p->ss_info.si_size = sizeof (mutex_t);
2757 2745 msp->mutex_lock =
2758 2746 sync_stats.un.mutex.mutex_lock;
2759 2747 msp->mutex_sleep =
2760 2748 sync_stats.un.mutex.mutex_sleep;
2761 2749 msp->mutex_sleep_time =
2762 2750 sync_stats.un.mutex.mutex_sleep_time;
2763 2751 msp->mutex_hold_time =
2764 2752 sync_stats.un.mutex.mutex_hold_time;
2765 2753 msp->mutex_try =
2766 2754 sync_stats.un.mutex.mutex_try;
2767 2755 msp->mutex_try_fail =
2768 2756 sync_stats.un.mutex.mutex_try_fail;
2769 2757 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2770 2758 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2771 2759 < ta_p->hash_size * sizeof (thr_hash_table_t))
2772 2760 msp->mutex_internal =
2773 2761 ix / sizeof (thr_hash_table_t) + 1;
2774 2762 break;
2775 2763 }
2776 2764 case TDB_COND:
2777 2765 {
2778 2766 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2779 2767
2780 2768 ss_p->ss_info.si_type = TD_SYNC_COND;
2781 2769 ss_p->ss_info.si_size = sizeof (cond_t);
2782 2770 csp->cond_wait =
2783 2771 sync_stats.un.cond.cond_wait;
2784 2772 csp->cond_timedwait =
2785 2773 sync_stats.un.cond.cond_timedwait;
2786 2774 csp->cond_wait_sleep_time =
2787 2775 sync_stats.un.cond.cond_wait_sleep_time;
2788 2776 csp->cond_timedwait_sleep_time =
2789 2777 sync_stats.un.cond.cond_timedwait_sleep_time;
2790 2778 csp->cond_timedwait_timeout =
2791 2779 sync_stats.un.cond.cond_timedwait_timeout;
2792 2780 csp->cond_signal =
2793 2781 sync_stats.un.cond.cond_signal;
2794 2782 csp->cond_broadcast =
2795 2783 sync_stats.un.cond.cond_broadcast;
2796 2784 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2797 2785 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2798 2786 < ta_p->hash_size * sizeof (thr_hash_table_t))
2799 2787 csp->cond_internal =
2800 2788 ix / sizeof (thr_hash_table_t) + 1;
2801 2789 break;
2802 2790 }
2803 2791 case TDB_RWLOCK:
2804 2792 {
2805 2793 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2806 2794
2807 2795 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2808 2796 ss_p->ss_info.si_size = sizeof (rwlock_t);
2809 2797 rwsp->rw_rdlock =
2810 2798 sync_stats.un.rwlock.rw_rdlock;
2811 2799 rwsp->rw_rdlock_try =
2812 2800 sync_stats.un.rwlock.rw_rdlock_try;
2813 2801 rwsp->rw_rdlock_try_fail =
2814 2802 sync_stats.un.rwlock.rw_rdlock_try_fail;
2815 2803 rwsp->rw_wrlock =
2816 2804 sync_stats.un.rwlock.rw_wrlock;
2817 2805 rwsp->rw_wrlock_hold_time =
2818 2806 sync_stats.un.rwlock.rw_wrlock_hold_time;
2819 2807 rwsp->rw_wrlock_try =
2820 2808 sync_stats.un.rwlock.rw_wrlock_try;
2821 2809 rwsp->rw_wrlock_try_fail =
2822 2810 sync_stats.un.rwlock.rw_wrlock_try_fail;
2823 2811 break;
2824 2812 }
2825 2813 case TDB_SEMA:
2826 2814 {
2827 2815 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2828 2816
2829 2817 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2830 2818 ss_p->ss_info.si_size = sizeof (sema_t);
2831 2819 ssp->sema_wait =
2832 2820 sync_stats.un.sema.sema_wait;
2833 2821 ssp->sema_wait_sleep =
2834 2822 sync_stats.un.sema.sema_wait_sleep;
2835 2823 ssp->sema_wait_sleep_time =
2836 2824 sync_stats.un.sema.sema_wait_sleep_time;
2837 2825 ssp->sema_trywait =
2838 2826 sync_stats.un.sema.sema_trywait;
2839 2827 ssp->sema_trywait_fail =
2840 2828 sync_stats.un.sema.sema_trywait_fail;
2841 2829 ssp->sema_post =
2842 2830 sync_stats.un.sema.sema_post;
2843 2831 ssp->sema_max_count =
2844 2832 sync_stats.un.sema.sema_max_count;
2845 2833 ssp->sema_min_count =
2846 2834 sync_stats.un.sema.sema_min_count;
2847 2835 break;
2848 2836 }
2849 2837 default:
2850 2838 return_val = TD_BADSH;
2851 2839 break;
2852 2840 }
2853 2841
2854 2842 out:
2855 2843 (void) ps_pcontinue(ph_p);
2856 2844 ph_unlock(ta_p);
2857 2845 return (return_val);
2858 2846 }
2859 2847
2860 2848 /*
2861 2849 * Change the state of a synchronization variable.
2862 2850 * 1) mutex lock state set to value
2863 2851 * 2) semaphore's count set to value
2864 2852 * 3) writer's lock set by value < 0
2865 2853 * 4) reader's lock number of readers set to value >= 0
2866 2854 * Currently unused by dbx.
2867 2855 */
2868 2856 #pragma weak td_sync_setstate = __td_sync_setstate
2869 2857 td_err_e
2870 2858 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2871 2859 {
2872 2860 struct ps_prochandle *ph_p;
2873 2861 int trunc = 0;
2874 2862 td_err_e return_val;
2875 2863 td_so_un_t generic_so;
2876 2864 uint32_t *rwstate;
2877 2865 int value = (int)lvalue;
2878 2866
2879 2867 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2880 2868 return (return_val);
2881 2869 if (ps_pstop(ph_p) != PS_OK) {
2882 2870 ph_unlock(sh_p->sh_ta_p);
2883 2871 return (TD_DBERR);
2884 2872 }
2885 2873
2886 2874 /*
2887 2875 * Read the synch. variable information.
2888 2876 * First attempt to read the whole union and if that fails
2889 2877 * fall back to reading only the smallest member, the condvar.
2890 2878 */
2891 2879 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2892 2880 sizeof (generic_so)) != PS_OK) {
2893 2881 trunc = 1;
2894 2882 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2895 2883 sizeof (generic_so.condition)) != PS_OK) {
2896 2884 (void) ps_pcontinue(ph_p);
2897 2885 ph_unlock(sh_p->sh_ta_p);
2898 2886 return (TD_DBERR);
2899 2887 }
2900 2888 }
2901 2889
2902 2890 /*
2903 2891 * Set the new value in the sync. variable, read the synch. variable
2904 2892 * information. from the process, reset its value and write it back.
2905 2893 */
2906 2894 switch (generic_so.condition.mutex_magic) {
2907 2895 case MUTEX_MAGIC:
2908 2896 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2909 2897 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2910 2898 return_val = TD_DBERR;
2911 2899 break;
2912 2900 }
2913 2901 generic_so.lock.mutex_lockw = (uint8_t)value;
2914 2902 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2915 2903 sizeof (generic_so.lock)) != PS_OK)
2916 2904 return_val = TD_DBERR;
2917 2905 break;
2918 2906 case SEMA_MAGIC:
2919 2907 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2920 2908 &generic_so.semaphore, sizeof (generic_so.semaphore))
2921 2909 != PS_OK) {
2922 2910 return_val = TD_DBERR;
2923 2911 break;
2924 2912 }
2925 2913 generic_so.semaphore.count = value;
2926 2914 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2927 2915 sizeof (generic_so.semaphore)) != PS_OK)
2928 2916 return_val = TD_DBERR;
2929 2917 break;
2930 2918 case COND_MAGIC:
2931 2919 /* Operation not supported on a condition variable */
2932 2920 return_val = TD_ERR;
2933 2921 break;
2934 2922 case RWL_MAGIC:
2935 2923 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2936 2924 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2937 2925 return_val = TD_DBERR;
2938 2926 break;
2939 2927 }
2940 2928 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2941 2929 *rwstate &= URW_HAS_WAITERS;
2942 2930 if (value < 0)
2943 2931 *rwstate |= URW_WRITE_LOCKED;
2944 2932 else
2945 2933 *rwstate |= (value & URW_READERS_MASK);
2946 2934 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2947 2935 sizeof (generic_so.rwlock)) != PS_OK)
2948 2936 return_val = TD_DBERR;
2949 2937 break;
2950 2938 default:
2951 2939 /* Bad sync. object type */
2952 2940 return_val = TD_BADSH;
2953 2941 break;
2954 2942 }
2955 2943
2956 2944 (void) ps_pcontinue(ph_p);
2957 2945 ph_unlock(sh_p->sh_ta_p);
2958 2946 return (return_val);
2959 2947 }
2960 2948
2961 2949 typedef struct {
2962 2950 td_thr_iter_f *waiter_cb;
2963 2951 psaddr_t sync_obj_addr;
2964 2952 uint16_t sync_magic;
2965 2953 void *waiter_cb_arg;
2966 2954 td_err_e errcode;
2967 2955 } waiter_cb_ctl_t;
2968 2956
2969 2957 static int
2970 2958 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2971 2959 {
2972 2960 td_thragent_t *ta_p = th_p->th_ta_p;
2973 2961 struct ps_prochandle *ph_p = ta_p->ph_p;
2974 2962 waiter_cb_ctl_t *wcb = arg;
2975 2963 caddr_t wchan;
2976 2964
2977 2965 if (ta_p->model == PR_MODEL_NATIVE) {
2978 2966 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2979 2967
2980 2968 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2981 2969 &wchan, sizeof (wchan)) != PS_OK) {
2982 2970 wcb->errcode = TD_DBERR;
2983 2971 return (1);
2984 2972 }
2985 2973 } else {
2986 2974 #if defined(_LP64) && defined(_SYSCALL32)
2987 2975 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2988 2976 caddr32_t wchan32;
2989 2977
2990 2978 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2991 2979 &wchan32, sizeof (wchan32)) != PS_OK) {
2992 2980 wcb->errcode = TD_DBERR;
2993 2981 return (1);
2994 2982 }
2995 2983 wchan = (caddr_t)(uintptr_t)wchan32;
2996 2984 #else
2997 2985 wcb->errcode = TD_ERR;
2998 2986 return (1);
2999 2987 #endif /* _SYSCALL32 */
3000 2988 }
3001 2989
3002 2990 if (wchan == NULL)
3003 2991 return (0);
3004 2992
3005 2993 if (wchan == (caddr_t)wcb->sync_obj_addr)
3006 2994 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3007 2995
3008 2996 return (0);
3009 2997 }
3010 2998
3011 2999 /*
3012 3000 * For a given synchronization variable, iterate over the
3013 3001 * set of waiting threads. The call back function is passed
3014 3002 * two parameters, a pointer to a thread handle and a pointer
3015 3003 * to extra call back data.
3016 3004 */
3017 3005 #pragma weak td_sync_waiters = __td_sync_waiters
3018 3006 td_err_e
3019 3007 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3020 3008 {
3021 3009 struct ps_prochandle *ph_p;
3022 3010 waiter_cb_ctl_t wcb;
3023 3011 td_err_e return_val;
3024 3012
3025 3013 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3026 3014 return (return_val);
3027 3015 if (ps_pdread(ph_p,
3028 3016 (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3029 3017 (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3030 3018 ph_unlock(sh_p->sh_ta_p);
3031 3019 return (TD_DBERR);
3032 3020 }
3033 3021 ph_unlock(sh_p->sh_ta_p);
3034 3022
3035 3023 switch (wcb.sync_magic) {
3036 3024 case MUTEX_MAGIC:
3037 3025 case COND_MAGIC:
3038 3026 case SEMA_MAGIC:
3039 3027 case RWL_MAGIC:
3040 3028 break;
3041 3029 default:
3042 3030 return (TD_BADSH);
3043 3031 }
3044 3032
3045 3033 wcb.waiter_cb = cb;
3046 3034 wcb.sync_obj_addr = sh_p->sh_unique;
3047 3035 wcb.waiter_cb_arg = cb_data;
3048 3036 wcb.errcode = TD_OK;
3049 3037 return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3050 3038 TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3051 3039 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3052 3040
3053 3041 if (return_val != TD_OK)
3054 3042 return (return_val);
3055 3043
3056 3044 return (wcb.errcode);
3057 3045 }
↓ open down ↓ |
1002 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX