1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #include <stdio.h>
  28 #include <stdlib.h>
  29 #include <stddef.h>
  30 #include <unistd.h>
  31 #include <thr_uberdata.h>
  32 #include <thread_db.h>
  33 #include <libc_int.h>
  34 
  35 /*
  36  * Private structures.
  37  */
  38 
  39 typedef union {
  40         mutex_t         lock;
  41         rwlock_t        rwlock;
  42         sema_t          semaphore;
  43         cond_t          condition;
  44 } td_so_un_t;
  45 
  46 struct td_thragent {
  47         rwlock_t        rwlock;
  48         struct ps_prochandle *ph_p;
  49         int             initialized;
  50         int             sync_tracking;
  51         int             model;
  52         int             primary_map;
  53         psaddr_t        bootstrap_addr;
  54         psaddr_t        uberdata_addr;
  55         psaddr_t        tdb_eventmask_addr;
  56         psaddr_t        tdb_register_sync_addr;
  57         psaddr_t        tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
  58         psaddr_t        hash_table_addr;
  59         int             hash_size;
  60         lwpid_t         single_lwpid;
  61         psaddr_t        single_ulwp_addr;
  62 };
  63 
  64 /*
  65  * This is the name of the variable in libc that contains
  66  * the uberdata address that we will need.
  67  */
  68 #define TD_BOOTSTRAP_NAME       "_tdb_bootstrap"
  69 /*
  70  * This is the actual name of uberdata, used in the event
  71  * that tdb_bootstrap has not yet been initialized.
  72  */
  73 #define TD_UBERDATA_NAME        "_uberdata"
  74 /*
  75  * The library name should end with ".so.1", but older versions of
  76  * dbx expect the unadorned name and malfunction if ".1" is specified.
  77  * Unfortunately, if ".1" is not specified, mdb malfunctions when it
  78  * is applied to another instance of itself (due to the presence of
  79  * /usr/lib/mdb/proc/libc.so).  So we try it both ways.
  80  */
  81 #define TD_LIBRARY_NAME         "libc.so"
  82 #define TD_LIBRARY_NAME_1       "libc.so.1"
  83 
  84 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
  85 
  86 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
  87         void *cbdata_p, td_thr_state_e state, int ti_pri,
  88         sigset_t *ti_sigmask_p, unsigned ti_user_flags);
  89 
  90 /*
  91  * Initialize threads debugging interface.
  92  */
  93 #pragma weak td_init = __td_init
  94 td_err_e
  95 __td_init()
  96 {
  97         return (TD_OK);
  98 }
  99 
 100 /*
 101  * This function does nothing, and never did.
 102  * But the symbol is in the ABI, so we can't delete it.
 103  */
 104 #pragma weak td_log = __td_log
 105 void
 106 __td_log()
 107 {
 108 }
 109 
 110 /*
 111  * Short-cut to read just the hash table size from the process,
 112  * to avoid repeatedly reading the full uberdata structure when
 113  * dealing with a single-threaded process.
 114  */
 115 static uint_t
 116 td_read_hash_size(td_thragent_t *ta_p)
 117 {
 118         psaddr_t addr;
 119         uint_t hash_size;
 120 
 121         switch (ta_p->initialized) {
 122         default:        /* uninitialized */
 123                 return (0);
 124         case 1:         /* partially initialized */
 125                 break;
 126         case 2:         /* fully initialized */
 127                 return (ta_p->hash_size);
 128         }
 129 
 130         if (ta_p->model == PR_MODEL_NATIVE) {
 131                 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
 132         } else {
 133 #if defined(_LP64) && defined(_SYSCALL32)
 134                 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
 135 #else
 136                 addr = 0;
 137 #endif
 138         }
 139         if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
 140             != PS_OK)
 141                 return (0);
 142         return (hash_size);
 143 }
 144 
 145 static td_err_e
 146 td_read_uberdata(td_thragent_t *ta_p)
 147 {
 148         struct ps_prochandle *ph_p = ta_p->ph_p;
 149 
 150         if (ta_p->model == PR_MODEL_NATIVE) {
 151                 uberdata_t uberdata;
 152 
 153                 if (ps_pdread(ph_p, ta_p->uberdata_addr,
 154                     &uberdata, sizeof (uberdata)) != PS_OK)
 155                         return (TD_DBERR);
 156                 ta_p->primary_map = uberdata.primary_map;
 157                 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
 158                     offsetof(uberdata_t, tdb.tdb_ev_global_mask);
 159                 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
 160                     offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
 161                 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
 162                 ta_p->hash_size = uberdata.hash_size;
 163                 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
 164                     ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
 165                         return (TD_DBERR);
 166 
 167         } else {
 168 #if defined(_LP64) && defined(_SYSCALL32)
 169                 uberdata32_t uberdata;
 170                 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
 171                 int i;
 172 
 173                 if (ps_pdread(ph_p, ta_p->uberdata_addr,
 174                     &uberdata, sizeof (uberdata)) != PS_OK)
 175                         return (TD_DBERR);
 176                 ta_p->primary_map = uberdata.primary_map;
 177                 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
 178                     offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
 179                 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
 180                     offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
 181                 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
 182                 ta_p->hash_size = uberdata.hash_size;
 183                 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
 184                     tdb_events, sizeof (tdb_events)) != PS_OK)
 185                         return (TD_DBERR);
 186                 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
 187                         ta_p->tdb_events[i] = tdb_events[i];
 188 #else
 189                 return (TD_DBERR);
 190 #endif
 191         }
 192         if (ta_p->hash_size != 1) {  /* multi-threaded */
 193                 ta_p->initialized = 2;
 194                 ta_p->single_lwpid = 0;
 195                 ta_p->single_ulwp_addr = NULL;
 196         } else {                        /* single-threaded */
 197                 ta_p->initialized = 1;
 198                 /*
 199                  * Get the address and lwpid of the single thread/LWP.
 200                  * It may not be ulwp_one if this is a child of fork1().
 201                  */
 202                 if (ta_p->model == PR_MODEL_NATIVE) {
 203                         thr_hash_table_t head;
 204                         lwpid_t lwpid = 0;
 205 
 206                         if (ps_pdread(ph_p, ta_p->hash_table_addr,
 207                             &head, sizeof (head)) != PS_OK)
 208                                 return (TD_DBERR);
 209                         if ((psaddr_t)head.hash_bucket == NULL)
 210                                 ta_p->initialized = 0;
 211                         else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
 212                             offsetof(ulwp_t, ul_lwpid),
 213                             &lwpid, sizeof (lwpid)) != PS_OK)
 214                                 return (TD_DBERR);
 215                         ta_p->single_lwpid = lwpid;
 216                         ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
 217                 } else {
 218 #if defined(_LP64) && defined(_SYSCALL32)
 219                         thr_hash_table32_t head;
 220                         lwpid_t lwpid = 0;
 221 
 222                         if (ps_pdread(ph_p, ta_p->hash_table_addr,
 223                             &head, sizeof (head)) != PS_OK)
 224                                 return (TD_DBERR);
 225                         if ((psaddr_t)head.hash_bucket == NULL)
 226                                 ta_p->initialized = 0;
 227                         else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
 228                             offsetof(ulwp32_t, ul_lwpid),
 229                             &lwpid, sizeof (lwpid)) != PS_OK)
 230                                 return (TD_DBERR);
 231                         ta_p->single_lwpid = lwpid;
 232                         ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
 233 #else
 234                         return (TD_DBERR);
 235 #endif
 236                 }
 237         }
 238         if (!ta_p->primary_map)
 239                 ta_p->initialized = 0;
 240         return (TD_OK);
 241 }
 242 
 243 static td_err_e
 244 td_read_bootstrap_data(td_thragent_t *ta_p)
 245 {
 246         struct ps_prochandle *ph_p = ta_p->ph_p;
 247         psaddr_t bootstrap_addr;
 248         psaddr_t uberdata_addr;
 249         ps_err_e db_return;
 250         td_err_e return_val;
 251         int do_1;
 252 
 253         switch (ta_p->initialized) {
 254         case 2:                 /* fully initialized */
 255                 return (TD_OK);
 256         case 1:                 /* partially initialized */
 257                 if (td_read_hash_size(ta_p) == 1)
 258                         return (TD_OK);
 259                 return (td_read_uberdata(ta_p));
 260         }
 261 
 262         /*
 263          * Uninitialized -- do the startup work.
 264          * We set ta_p->initialized to -1 to cut off recursive calls
 265          * into libc_db by code in the provider of ps_pglobal_lookup().
 266          */
 267         do_1 = 0;
 268         ta_p->initialized = -1;
 269         db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
 270             TD_BOOTSTRAP_NAME, &bootstrap_addr);
 271         if (db_return == PS_NOSYM) {
 272                 do_1 = 1;
 273                 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
 274                     TD_BOOTSTRAP_NAME, &bootstrap_addr);
 275         }
 276         if (db_return == PS_NOSYM)      /* libc is not linked yet */
 277                 return (TD_NOLIBTHREAD);
 278         if (db_return != PS_OK)
 279                 return (TD_ERR);
 280         db_return = ps_pglobal_lookup(ph_p,
 281             do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
 282             TD_UBERDATA_NAME, &uberdata_addr);
 283         if (db_return == PS_NOSYM)      /* libc is not linked yet */
 284                 return (TD_NOLIBTHREAD);
 285         if (db_return != PS_OK)
 286                 return (TD_ERR);
 287 
 288         /*
 289          * Read the uberdata address into the thread agent structure.
 290          */
 291         if (ta_p->model == PR_MODEL_NATIVE) {
 292                 psaddr_t psaddr;
 293                 if (ps_pdread(ph_p, bootstrap_addr,
 294                     &psaddr, sizeof (psaddr)) != PS_OK)
 295                         return (TD_DBERR);
 296                 if ((ta_p->bootstrap_addr = psaddr) == NULL)
 297                         psaddr = uberdata_addr;
 298                 else if (ps_pdread(ph_p, psaddr,
 299                     &psaddr, sizeof (psaddr)) != PS_OK)
 300                         return (TD_DBERR);
 301                 if (psaddr == NULL) {
 302                         /* primary linkmap in the tgt is not initialized */
 303                         ta_p->bootstrap_addr = NULL;
 304                         psaddr = uberdata_addr;
 305                 }
 306                 ta_p->uberdata_addr = psaddr;
 307         } else {
 308 #if defined(_LP64) && defined(_SYSCALL32)
 309                 caddr32_t psaddr;
 310                 if (ps_pdread(ph_p, bootstrap_addr,
 311                     &psaddr, sizeof (psaddr)) != PS_OK)
 312                         return (TD_DBERR);
 313                 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
 314                         psaddr = (caddr32_t)uberdata_addr;
 315                 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
 316                     &psaddr, sizeof (psaddr)) != PS_OK)
 317                         return (TD_DBERR);
 318                 if (psaddr == NULL) {
 319                         /* primary linkmap in the tgt is not initialized */
 320                         ta_p->bootstrap_addr = NULL;
 321                         psaddr = (caddr32_t)uberdata_addr;
 322                 }
 323                 ta_p->uberdata_addr = (psaddr_t)psaddr;
 324 #else
 325                 return (TD_DBERR);
 326 #endif  /* _SYSCALL32 */
 327         }
 328 
 329         if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
 330                 return (return_val);
 331         if (ta_p->bootstrap_addr == NULL)
 332                 ta_p->initialized = 0;
 333         return (TD_OK);
 334 }
 335 
 336 #pragma weak ps_kill
 337 #pragma weak ps_lrolltoaddr
 338 
 339 /*
 340  * Allocate a new agent process handle ("thread agent").
 341  */
 342 #pragma weak td_ta_new = __td_ta_new
 343 td_err_e
 344 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
 345 {
 346         td_thragent_t *ta_p;
 347         int model;
 348         td_err_e return_val = TD_OK;
 349 
 350         if (ph_p == NULL)
 351                 return (TD_BADPH);
 352         if (ta_pp == NULL)
 353                 return (TD_ERR);
 354         *ta_pp = NULL;
 355         if (ps_pstop(ph_p) != PS_OK)
 356                 return (TD_DBERR);
 357         /*
 358          * ps_pdmodel might not be defined if this is an older client.
 359          * Make it a weak symbol and test if it exists before calling.
 360          */
 361 #pragma weak ps_pdmodel
 362         if (ps_pdmodel == NULL) {
 363                 model = PR_MODEL_NATIVE;
 364         } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
 365                 (void) ps_pcontinue(ph_p);
 366                 return (TD_ERR);
 367         }
 368         if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
 369                 (void) ps_pcontinue(ph_p);
 370                 return (TD_MALLOC);
 371         }
 372 
 373         /*
 374          * Initialize the agent process handle.
 375          * Pick up the symbol value we need from the target process.
 376          */
 377         (void) memset(ta_p, 0, sizeof (*ta_p));
 378         ta_p->ph_p = ph_p;
 379         (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
 380         ta_p->model = model;
 381         return_val = td_read_bootstrap_data(ta_p);
 382 
 383         /*
 384          * Because the old libthread_db enabled lock tracking by default,
 385          * we must also do it.  However, we do it only if the application
 386          * provides the ps_kill() and ps_lrolltoaddr() interfaces.
 387          * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
 388          */
 389         if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
 390                 register_sync_t oldenable;
 391                 register_sync_t enable = REGISTER_SYNC_ENABLE;
 392                 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
 393 
 394                 if (ps_pdread(ph_p, psaddr,
 395                     &oldenable, sizeof (oldenable)) != PS_OK)
 396                         return_val = TD_DBERR;
 397                 else if (oldenable != REGISTER_SYNC_OFF ||
 398                     ps_pdwrite(ph_p, psaddr,
 399                     &enable, sizeof (enable)) != PS_OK) {
 400                         /*
 401                          * Lock tracking was already enabled or we
 402                          * failed to enable it, probably because we
 403                          * are examining a core file.  In either case
 404                          * set the sync_tracking flag non-zero to
 405                          * indicate that we should not attempt to
 406                          * disable lock tracking when we delete the
 407                          * agent process handle in td_ta_delete().
 408                          */
 409                         ta_p->sync_tracking = 1;
 410                 }
 411         }
 412 
 413         if (return_val == TD_OK)
 414                 *ta_pp = ta_p;
 415         else
 416                 free(ta_p);
 417 
 418         (void) ps_pcontinue(ph_p);
 419         return (return_val);
 420 }
 421 
 422 /*
 423  * Utility function to grab the readers lock and return the prochandle,
 424  * given an agent process handle.  Performs standard error checking.
 425  * Returns non-NULL with the lock held, or NULL with the lock not held.
 426  */
 427 static struct ps_prochandle *
 428 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
 429 {
 430         struct ps_prochandle *ph_p = NULL;
 431         td_err_e error;
 432 
 433         if (ta_p == NULL || ta_p->initialized == -1) {
 434                 *err = TD_BADTA;
 435         } else if (rw_rdlock(&ta_p->rwlock) != 0) {      /* can't happen? */
 436                 *err = TD_BADTA;
 437         } else if ((ph_p = ta_p->ph_p) == NULL) {
 438                 (void) rw_unlock(&ta_p->rwlock);
 439                 *err = TD_BADPH;
 440         } else if (ta_p->initialized != 2 &&
 441             (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
 442                 (void) rw_unlock(&ta_p->rwlock);
 443                 ph_p = NULL;
 444                 *err = error;
 445         } else {
 446                 *err = TD_OK;
 447         }
 448 
 449         return (ph_p);
 450 }
 451 
 452 /*
 453  * Utility function to grab the readers lock and return the prochandle,
 454  * given an agent thread handle.  Performs standard error checking.
 455  * Returns non-NULL with the lock held, or NULL with the lock not held.
 456  */
 457 static struct ps_prochandle *
 458 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
 459 {
 460         if (th_p == NULL || th_p->th_unique == NULL) {
 461                 *err = TD_BADTH;
 462                 return (NULL);
 463         }
 464         return (ph_lock_ta(th_p->th_ta_p, err));
 465 }
 466 
 467 /*
 468  * Utility function to grab the readers lock and return the prochandle,
 469  * given a synchronization object handle.  Performs standard error checking.
 470  * Returns non-NULL with the lock held, or NULL with the lock not held.
 471  */
 472 static struct ps_prochandle *
 473 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
 474 {
 475         if (sh_p == NULL || sh_p->sh_unique == NULL) {
 476                 *err = TD_BADSH;
 477                 return (NULL);
 478         }
 479         return (ph_lock_ta(sh_p->sh_ta_p, err));
 480 }
 481 
 482 /*
 483  * Unlock the agent process handle obtained from ph_lock_*().
 484  */
 485 static void
 486 ph_unlock(td_thragent_t *ta_p)
 487 {
 488         (void) rw_unlock(&ta_p->rwlock);
 489 }
 490 
 491 /*
 492  * De-allocate an agent process handle,
 493  * releasing all related resources.
 494  *
 495  * XXX -- This is hopelessly broken ---
 496  * Storage for thread agent is not deallocated.  The prochandle
 497  * in the thread agent is set to NULL so that future uses of
 498  * the thread agent can be detected and an error value returned.
 499  * All functions in the external user interface that make
 500  * use of the thread agent are expected
 501  * to check for a NULL prochandle in the thread agent.
 502  * All such functions are also expected to obtain a
 503  * reader lock on the thread agent while it is using it.
 504  */
 505 #pragma weak td_ta_delete = __td_ta_delete
 506 td_err_e
 507 __td_ta_delete(td_thragent_t *ta_p)
 508 {
 509         struct ps_prochandle *ph_p;
 510 
 511         /*
 512          * This is the only place we grab the writer lock.
 513          * We are going to NULL out the prochandle.
 514          */
 515         if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
 516                 return (TD_BADTA);
 517         if ((ph_p = ta_p->ph_p) == NULL) {
 518                 (void) rw_unlock(&ta_p->rwlock);
 519                 return (TD_BADPH);
 520         }
 521         /*
 522          * If synch. tracking was disabled when td_ta_new() was called and
 523          * if td_ta_sync_tracking_enable() was never called, then disable
 524          * synch. tracking (it was enabled by default in td_ta_new()).
 525          */
 526         if (ta_p->sync_tracking == 0 &&
 527             ps_kill != NULL && ps_lrolltoaddr != NULL) {
 528                 register_sync_t enable = REGISTER_SYNC_DISABLE;
 529 
 530                 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
 531                     &enable, sizeof (enable));
 532         }
 533         ta_p->ph_p = NULL;
 534         (void) rw_unlock(&ta_p->rwlock);
 535         return (TD_OK);
 536 }
 537 
 538 /*
 539  * Map an agent process handle to a client prochandle.
 540  * Currently unused by dbx.
 541  */
 542 #pragma weak td_ta_get_ph = __td_ta_get_ph
 543 td_err_e
 544 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
 545 {
 546         td_err_e return_val;
 547 
 548         if (ph_pp != NULL)      /* protect stupid callers */
 549                 *ph_pp = NULL;
 550         if (ph_pp == NULL)
 551                 return (TD_ERR);
 552         if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
 553                 return (return_val);
 554         ph_unlock(ta_p);
 555         return (TD_OK);
 556 }
 557 
 558 /*
 559  * Set the process's suggested concurrency level.
 560  * This is a no-op in a one-level model.
 561  * Currently unused by dbx.
 562  */
 563 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
 564 /* ARGSUSED1 */
 565 td_err_e
 566 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
 567 {
 568         if (ta_p == NULL)
 569                 return (TD_BADTA);
 570         if (ta_p->ph_p == NULL)
 571                 return (TD_BADPH);
 572         return (TD_OK);
 573 }
 574 
 575 /*
 576  * Get the number of threads in the process.
 577  */
 578 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
 579 td_err_e
 580 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
 581 {
 582         struct ps_prochandle *ph_p;
 583         td_err_e return_val;
 584         int nthreads;
 585         int nzombies;
 586         psaddr_t nthreads_addr;
 587         psaddr_t nzombies_addr;
 588 
 589         if (ta_p->model == PR_MODEL_NATIVE) {
 590                 nthreads_addr = ta_p->uberdata_addr +
 591                     offsetof(uberdata_t, nthreads);
 592                 nzombies_addr = ta_p->uberdata_addr +
 593                     offsetof(uberdata_t, nzombies);
 594         } else {
 595 #if defined(_LP64) && defined(_SYSCALL32)
 596                 nthreads_addr = ta_p->uberdata_addr +
 597                     offsetof(uberdata32_t, nthreads);
 598                 nzombies_addr = ta_p->uberdata_addr +
 599                     offsetof(uberdata32_t, nzombies);
 600 #else
 601                 nthreads_addr = 0;
 602                 nzombies_addr = 0;
 603 #endif  /* _SYSCALL32 */
 604         }
 605 
 606         if (nthread_p == NULL)
 607                 return (TD_ERR);
 608         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 609                 return (return_val);
 610         if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
 611                 return_val = TD_DBERR;
 612         if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
 613                 return_val = TD_DBERR;
 614         ph_unlock(ta_p);
 615         if (return_val == TD_OK)
 616                 *nthread_p = nthreads + nzombies;
 617         return (return_val);
 618 }
 619 
 620 typedef struct {
 621         thread_t        tid;
 622         int             found;
 623         td_thrhandle_t  th;
 624 } td_mapper_param_t;
 625 
 626 /*
 627  * Check the value in data against the thread id.
 628  * If it matches, return 1 to terminate iterations.
 629  * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
 630  */
 631 static int
 632 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
 633 {
 634         td_thrinfo_t ti;
 635 
 636         if (__td_thr_get_info(th_p, &ti) == TD_OK &&
 637             data->tid == ti.ti_tid) {
 638                 data->found = 1;
 639                 data->th = *th_p;
 640                 return (1);
 641         }
 642         return (0);
 643 }
 644 
 645 /*
 646  * Given a thread identifier, return the corresponding thread handle.
 647  */
 648 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
 649 td_err_e
 650 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
 651         td_thrhandle_t *th_p)
 652 {
 653         td_err_e                return_val;
 654         td_mapper_param_t       data;
 655 
 656         if (th_p != NULL &&     /* optimize for a single thread */
 657             ta_p != NULL &&
 658             ta_p->initialized == 1 &&
 659             (td_read_hash_size(ta_p) == 1 ||
 660             td_read_uberdata(ta_p) == TD_OK) &&
 661             ta_p->initialized == 1 &&
 662             ta_p->single_lwpid == tid) {
 663                 th_p->th_ta_p = ta_p;
 664                 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
 665                         return (TD_NOTHR);
 666                 return (TD_OK);
 667         }
 668 
 669         /*
 670          * LOCKING EXCEPTION - Locking is not required here because
 671          * the locking and checking will be done in __td_ta_thr_iter.
 672          */
 673 
 674         if (ta_p == NULL)
 675                 return (TD_BADTA);
 676         if (th_p == NULL)
 677                 return (TD_BADTH);
 678         if (tid == 0)
 679                 return (TD_NOTHR);
 680 
 681         data.tid = tid;
 682         data.found = 0;
 683         return_val = __td_ta_thr_iter(ta_p,
 684             (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
 685             TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
 686             TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
 687         if (return_val == TD_OK) {
 688                 if (data.found == 0)
 689                         return_val = TD_NOTHR;
 690                 else
 691                         *th_p = data.th;
 692         }
 693 
 694         return (return_val);
 695 }
 696 
 697 /*
 698  * Map the address of a synchronization object to a sync. object handle.
 699  */
 700 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
 701 td_err_e
 702 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
 703 {
 704         struct ps_prochandle *ph_p;
 705         td_err_e return_val;
 706         uint16_t sync_magic;
 707 
 708         if (sh_p == NULL)
 709                 return (TD_BADSH);
 710         if (addr == NULL)
 711                 return (TD_ERR);
 712         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 713                 return (return_val);
 714         /*
 715          * Check the magic number of the sync. object to make sure it's valid.
 716          * The magic number is at the same offset for all sync. objects.
 717          */
 718         if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
 719             &sync_magic, sizeof (sync_magic)) != PS_OK) {
 720                 ph_unlock(ta_p);
 721                 return (TD_BADSH);
 722         }
 723         ph_unlock(ta_p);
 724         if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
 725             sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
 726                 return (TD_BADSH);
 727         /*
 728          * Just fill in the appropriate fields of the sync. handle.
 729          */
 730         sh_p->sh_ta_p = (td_thragent_t *)ta_p;
 731         sh_p->sh_unique = addr;
 732         return (TD_OK);
 733 }
 734 
 735 /*
 736  * Iterate over the set of global TSD keys.
 737  * The call back function is called with three arguments,
 738  * a key, a pointer to the destructor function, and the cbdata pointer.
 739  * Currently unused by dbx.
 740  */
 741 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
 742 td_err_e
 743 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
 744 {
 745         struct ps_prochandle *ph_p;
 746         td_err_e        return_val;
 747         int             key;
 748         int             numkeys;
 749         psaddr_t        dest_addr;
 750         psaddr_t        *destructors = NULL;
 751         PFrV            destructor;
 752 
 753         if (cb == NULL)
 754                 return (TD_ERR);
 755         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 756                 return (return_val);
 757         if (ps_pstop(ph_p) != PS_OK) {
 758                 ph_unlock(ta_p);
 759                 return (TD_DBERR);
 760         }
 761 
 762         if (ta_p->model == PR_MODEL_NATIVE) {
 763                 tsd_metadata_t tsdm;
 764 
 765                 if (ps_pdread(ph_p,
 766                     ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
 767                     &tsdm, sizeof (tsdm)) != PS_OK)
 768                         return_val = TD_DBERR;
 769                 else {
 770                         numkeys = tsdm.tsdm_nused;
 771                         dest_addr = (psaddr_t)tsdm.tsdm_destro;
 772                         if (numkeys > 0)
 773                                 destructors =
 774                                     malloc(numkeys * sizeof (psaddr_t));
 775                 }
 776         } else {
 777 #if defined(_LP64) && defined(_SYSCALL32)
 778                 tsd_metadata32_t tsdm;
 779 
 780                 if (ps_pdread(ph_p,
 781                     ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
 782                     &tsdm, sizeof (tsdm)) != PS_OK)
 783                         return_val = TD_DBERR;
 784                 else {
 785                         numkeys = tsdm.tsdm_nused;
 786                         dest_addr = (psaddr_t)tsdm.tsdm_destro;
 787                         if (numkeys > 0)
 788                                 destructors =
 789                                     malloc(numkeys * sizeof (caddr32_t));
 790                 }
 791 #else
 792                 return_val = TD_DBERR;
 793 #endif  /* _SYSCALL32 */
 794         }
 795 
 796         if (return_val != TD_OK || numkeys <= 0) {
 797                 (void) ps_pcontinue(ph_p);
 798                 ph_unlock(ta_p);
 799                 return (return_val);
 800         }
 801 
 802         if (destructors == NULL)
 803                 return_val = TD_MALLOC;
 804         else if (ta_p->model == PR_MODEL_NATIVE) {
 805                 if (ps_pdread(ph_p, dest_addr,
 806                     destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
 807                         return_val = TD_DBERR;
 808                 else {
 809                         for (key = 1; key < numkeys; key++) {
 810                                 destructor = (PFrV)destructors[key];
 811                                 if (destructor != TSD_UNALLOCATED &&
 812                                     (*cb)(key, destructor, cbdata_p))
 813                                         break;
 814                         }
 815                 }
 816 #if defined(_LP64) && defined(_SYSCALL32)
 817         } else {
 818                 caddr32_t *destructors32 = (caddr32_t *)destructors;
 819                 caddr32_t destruct32;
 820 
 821                 if (ps_pdread(ph_p, dest_addr,
 822                     destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
 823                         return_val = TD_DBERR;
 824                 else {
 825                         for (key = 1; key < numkeys; key++) {
 826                                 destruct32 = destructors32[key];
 827                                 if ((destruct32 !=
 828                                     (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
 829                                     (*cb)(key, (PFrV)(uintptr_t)destruct32,
 830                                     cbdata_p))
 831                                         break;
 832                         }
 833                 }
 834 #endif  /* _SYSCALL32 */
 835         }
 836 
 837         if (destructors)
 838                 free(destructors);
 839         (void) ps_pcontinue(ph_p);
 840         ph_unlock(ta_p);
 841         return (return_val);
 842 }
 843 
 844 int
 845 sigequalset(const sigset_t *s1, const sigset_t *s2)
 846 {
 847         return (
 848             s1->__sigbits[0] == s2->__sigbits[0] &&
 849             s1->__sigbits[1] == s2->__sigbits[1] &&
 850             s1->__sigbits[2] == s2->__sigbits[2] &&
 851             s1->__sigbits[3] == s2->__sigbits[3]);
 852 }
 853 
 854 /*
 855  * Description:
 856  *   Iterate over all threads. For each thread call
 857  * the function pointed to by "cb" with a pointer
 858  * to a thread handle, and a pointer to data which
 859  * can be NULL. Only call td_thr_iter_f() on threads
 860  * which match the properties of state, ti_pri,
 861  * ti_sigmask_p, and ti_user_flags.  If cb returns
 862  * a non-zero value, terminate iterations.
 863  *
 864  * Input:
 865  *   *ta_p - thread agent
 866  *   *cb - call back function defined by user.
 867  * td_thr_iter_f() takes a thread handle and
 868  * cbdata_p as a parameter.
 869  *   cbdata_p - parameter for td_thr_iter_f().
 870  *
 871  *   state - state of threads of interest.  A value of
 872  * TD_THR_ANY_STATE from enum td_thr_state_e
 873  * does not restrict iterations by state.
 874  *   ti_pri - lower bound of priorities of threads of
 875  * interest.  A value of TD_THR_LOWEST_PRIORITY
 876  * defined in thread_db.h does not restrict
 877  * iterations by priority.  A thread with priority
 878  * less than ti_pri will NOT be passed to the callback
 879  * function.
 880  *   ti_sigmask_p - signal mask of threads of interest.
 881  * A value of TD_SIGNO_MASK defined in thread_db.h
 882  * does not restrict iterations by signal mask.
 883  *   ti_user_flags - user flags of threads of interest.  A
 884  * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
 885  * does not restrict iterations by user flags.
 886  */
 887 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
 888 td_err_e
 889 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
 890         void *cbdata_p, td_thr_state_e state, int ti_pri,
 891         sigset_t *ti_sigmask_p, unsigned ti_user_flags)
 892 {
 893         struct ps_prochandle *ph_p;
 894         psaddr_t        first_lwp_addr;
 895         psaddr_t        first_zombie_addr;
 896         psaddr_t        curr_lwp_addr;
 897         psaddr_t        next_lwp_addr;
 898         td_thrhandle_t  th;
 899         ps_err_e        db_return;
 900         ps_err_e        db_return2;
 901         td_err_e        return_val;
 902 
 903         if (cb == NULL)
 904                 return (TD_ERR);
 905         /*
 906          * If state is not within bound, short circuit.
 907          */
 908         if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
 909                 return (TD_OK);
 910 
 911         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 912                 return (return_val);
 913         if (ps_pstop(ph_p) != PS_OK) {
 914                 ph_unlock(ta_p);
 915                 return (TD_DBERR);
 916         }
 917 
 918         /*
 919          * For each ulwp_t in the circular linked lists pointed
 920          * to by "all_lwps" and "all_zombies":
 921          * (1) Filter each thread.
 922          * (2) Create the thread_object for each thread that passes.
 923          * (3) Call the call back function on each thread.
 924          */
 925 
 926         if (ta_p->model == PR_MODEL_NATIVE) {
 927                 db_return = ps_pdread(ph_p,
 928                     ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
 929                     &first_lwp_addr, sizeof (first_lwp_addr));
 930                 db_return2 = ps_pdread(ph_p,
 931                     ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
 932                     &first_zombie_addr, sizeof (first_zombie_addr));
 933         } else {
 934 #if defined(_LP64) && defined(_SYSCALL32)
 935                 caddr32_t addr32;
 936 
 937                 db_return = ps_pdread(ph_p,
 938                     ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
 939                     &addr32, sizeof (addr32));
 940                 first_lwp_addr = addr32;
 941                 db_return2 = ps_pdread(ph_p,
 942                     ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
 943                     &addr32, sizeof (addr32));
 944                 first_zombie_addr = addr32;
 945 #else   /* _SYSCALL32 */
 946                 db_return = PS_ERR;
 947                 db_return2 = PS_ERR;
 948 #endif  /* _SYSCALL32 */
 949         }
 950         if (db_return == PS_OK)
 951                 db_return = db_return2;
 952 
 953         /*
 954          * If first_lwp_addr and first_zombie_addr are both NULL,
 955          * libc must not yet be initialized or all threads have
 956          * exited.  Return TD_NOTHR and all will be well.
 957          */
 958         if (db_return == PS_OK &&
 959             first_lwp_addr == NULL && first_zombie_addr == NULL) {
 960                 (void) ps_pcontinue(ph_p);
 961                 ph_unlock(ta_p);
 962                 return (TD_NOTHR);
 963         }
 964         if (db_return != PS_OK) {
 965                 (void) ps_pcontinue(ph_p);
 966                 ph_unlock(ta_p);
 967                 return (TD_DBERR);
 968         }
 969 
 970         /*
 971          * Run down the lists of all living and dead lwps.
 972          */
 973         if (first_lwp_addr == NULL)
 974                 first_lwp_addr = first_zombie_addr;
 975         curr_lwp_addr = first_lwp_addr;
 976         for (;;) {
 977                 td_thr_state_e ts_state;
 978                 int userpri;
 979                 unsigned userflags;
 980                 sigset_t mask;
 981 
 982                 /*
 983                  * Read the ulwp struct.
 984                  */
 985                 if (ta_p->model == PR_MODEL_NATIVE) {
 986                         ulwp_t ulwp;
 987 
 988                         if (ps_pdread(ph_p, curr_lwp_addr,
 989                             &ulwp, sizeof (ulwp)) != PS_OK &&
 990                             ((void) memset(&ulwp, 0, sizeof (ulwp)),
 991                             ps_pdread(ph_p, curr_lwp_addr,
 992                             &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
 993                                 return_val = TD_DBERR;
 994                                 break;
 995                         }
 996                         next_lwp_addr = (psaddr_t)ulwp.ul_forw;
 997 
 998                         ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
 999                             ulwp.ul_stop? TD_THR_STOPPED :
1000                             ulwp.ul_wchan? TD_THR_SLEEP :
1001                             TD_THR_ACTIVE;
1002                         userpri = ulwp.ul_pri;
1003                         userflags = ulwp.ul_usropts;
1004                         if (ulwp.ul_dead)
1005                                 (void) sigemptyset(&mask);
1006                         else
1007                                 mask = *(sigset_t *)&ulwp.ul_sigmask;
1008                 } else {
1009 #if defined(_LP64) && defined(_SYSCALL32)
1010                         ulwp32_t ulwp;
1011 
1012                         if (ps_pdread(ph_p, curr_lwp_addr,
1013                             &ulwp, sizeof (ulwp)) != PS_OK &&
1014                             ((void) memset(&ulwp, 0, sizeof (ulwp)),
1015                             ps_pdread(ph_p, curr_lwp_addr,
1016                             &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1017                                 return_val = TD_DBERR;
1018                                 break;
1019                         }
1020                         next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1021 
1022                         ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1023                             ulwp.ul_stop? TD_THR_STOPPED :
1024                             ulwp.ul_wchan? TD_THR_SLEEP :
1025                             TD_THR_ACTIVE;
1026                         userpri = ulwp.ul_pri;
1027                         userflags = ulwp.ul_usropts;
1028                         if (ulwp.ul_dead)
1029                                 (void) sigemptyset(&mask);
1030                         else
1031                                 mask = *(sigset_t *)&ulwp.ul_sigmask;
1032 #else   /* _SYSCALL32 */
1033                         return_val = TD_ERR;
1034                         break;
1035 #endif  /* _SYSCALL32 */
1036                 }
1037 
1038                 /*
1039                  * Filter on state, priority, sigmask, and user flags.
1040                  */
1041 
1042                 if ((state != ts_state) &&
1043                     (state != TD_THR_ANY_STATE))
1044                         goto advance;
1045 
1046                 if (ti_pri > userpri)
1047                         goto advance;
1048 
1049                 if (ti_sigmask_p != TD_SIGNO_MASK &&
1050                     !sigequalset(ti_sigmask_p, &mask))
1051                         goto advance;
1052 
1053                 if (ti_user_flags != userflags &&
1054                     ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1055                         goto advance;
1056 
1057                 /*
1058                  * Call back - break if the return
1059                  * from the call back is non-zero.
1060                  */
1061                 th.th_ta_p = (td_thragent_t *)ta_p;
1062                 th.th_unique = curr_lwp_addr;
1063                 if ((*cb)(&th, cbdata_p))
1064                         break;
1065 
1066 advance:
1067                 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1068                         /*
1069                          * Switch to the zombie list, unless it is NULL
1070                          * or we have already been doing the zombie list,
1071                          * in which case terminate the loop.
1072                          */
1073                         if (first_zombie_addr == NULL ||
1074                             first_lwp_addr == first_zombie_addr)
1075                                 break;
1076                         curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1077                 }
1078         }
1079 
1080         (void) ps_pcontinue(ph_p);
1081         ph_unlock(ta_p);
1082         return (return_val);
1083 }
1084 
1085 /*
1086  * Enable or disable process synchronization object tracking.
1087  * Currently unused by dbx.
1088  */
1089 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1090 td_err_e
1091 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1092 {
1093         struct ps_prochandle *ph_p;
1094         td_err_e return_val;
1095         register_sync_t enable;
1096 
1097         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1098                 return (return_val);
1099         /*
1100          * Values of tdb_register_sync in the victim process:
1101          *      REGISTER_SYNC_ENABLE    enables registration of synch objects
1102          *      REGISTER_SYNC_DISABLE   disables registration of synch objects
1103          * These cause the table to be cleared and tdb_register_sync set to:
1104          *      REGISTER_SYNC_ON        registration in effect
1105          *      REGISTER_SYNC_OFF       registration not in effect
1106          */
1107         enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1108         if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1109             &enable, sizeof (enable)) != PS_OK)
1110                 return_val = TD_DBERR;
1111         /*
1112          * Remember that this interface was called (see td_ta_delete()).
1113          */
1114         ta_p->sync_tracking = 1;
1115         ph_unlock(ta_p);
1116         return (return_val);
1117 }
1118 
1119 /*
1120  * Iterate over all known synchronization variables.
1121  * It is very possible that the list generated is incomplete,
1122  * because the iterator can only find synchronization variables
1123  * that have been registered by the process since synchronization
1124  * object registration was enabled.
1125  * The call back function cb is called for each synchronization
1126  * variable with two arguments: a pointer to the synchronization
1127  * handle and the passed-in argument cbdata.
1128  * If cb returns a non-zero value, iterations are terminated.
1129  */
1130 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1131 td_err_e
1132 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1133 {
1134         struct ps_prochandle *ph_p;
1135         td_err_e        return_val;
1136         int             i;
1137         register_sync_t enable;
1138         psaddr_t        next_desc;
1139         tdb_sync_stats_t sync_stats;
1140         td_synchandle_t synchandle;
1141         psaddr_t        psaddr;
1142         void            *vaddr;
1143         uint64_t        *sync_addr_hash = NULL;
1144 
1145         if (cb == NULL)
1146                 return (TD_ERR);
1147         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1148                 return (return_val);
1149         if (ps_pstop(ph_p) != PS_OK) {
1150                 ph_unlock(ta_p);
1151                 return (TD_DBERR);
1152         }
1153         if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1154             &enable, sizeof (enable)) != PS_OK) {
1155                 return_val = TD_DBERR;
1156                 goto out;
1157         }
1158         if (enable != REGISTER_SYNC_ON)
1159                 goto out;
1160 
1161         /*
1162          * First read the hash table.
1163          * The hash table is large; allocate with mmap().
1164          */
1165         if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1166             PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1167             == MAP_FAILED) {
1168                 return_val = TD_MALLOC;
1169                 goto out;
1170         }
1171         sync_addr_hash = vaddr;
1172 
1173         if (ta_p->model == PR_MODEL_NATIVE) {
1174                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1175                     offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1176                     &psaddr, sizeof (&psaddr)) != PS_OK) {
1177                         return_val = TD_DBERR;
1178                         goto out;
1179                 }
1180         } else {
1181 #ifdef  _SYSCALL32
1182                 caddr32_t addr;
1183 
1184                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1185                     offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1186                     &addr, sizeof (addr)) != PS_OK) {
1187                         return_val = TD_DBERR;
1188                         goto out;
1189                 }
1190                 psaddr = addr;
1191 #else
1192                 return_val = TD_ERR;
1193                 goto out;
1194 #endif /* _SYSCALL32 */
1195         }
1196 
1197         if (psaddr == NULL)
1198                 goto out;
1199         if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1200             TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1201                 return_val = TD_DBERR;
1202                 goto out;
1203         }
1204 
1205         /*
1206          * Now scan the hash table.
1207          */
1208         for (i = 0; i < TDB_HASH_SIZE; i++) {
1209                 for (next_desc = (psaddr_t)sync_addr_hash[i];
1210                     next_desc != NULL;
1211                     next_desc = (psaddr_t)sync_stats.next) {
1212                         if (ps_pdread(ph_p, next_desc,
1213                             &sync_stats, sizeof (sync_stats)) != PS_OK) {
1214                                 return_val = TD_DBERR;
1215                                 goto out;
1216                         }
1217                         if (sync_stats.un.type == TDB_NONE) {
1218                                 /* not registered since registration enabled */
1219                                 continue;
1220                         }
1221                         synchandle.sh_ta_p = ta_p;
1222                         synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1223                         if ((*cb)(&synchandle, cbdata) != 0)
1224                                 goto out;
1225                 }
1226         }
1227 
1228 out:
1229         if (sync_addr_hash != NULL)
1230                 (void) munmap((void *)sync_addr_hash,
1231                     TDB_HASH_SIZE * sizeof (uint64_t));
1232         (void) ps_pcontinue(ph_p);
1233         ph_unlock(ta_p);
1234         return (return_val);
1235 }
1236 
1237 /*
1238  * Enable process statistics collection.
1239  */
1240 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1241 /* ARGSUSED */
1242 td_err_e
1243 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1244 {
1245         return (TD_NOCAPAB);
1246 }
1247 
1248 /*
1249  * Reset process statistics.
1250  */
1251 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1252 /* ARGSUSED */
1253 td_err_e
1254 __td_ta_reset_stats(const td_thragent_t *ta_p)
1255 {
1256         return (TD_NOCAPAB);
1257 }
1258 
1259 /*
1260  * Read process statistics.
1261  */
1262 #pragma weak td_ta_get_stats = __td_ta_get_stats
1263 /* ARGSUSED */
1264 td_err_e
1265 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1266 {
1267         return (TD_NOCAPAB);
1268 }
1269 
1270 /*
1271  * Transfer information from lwp struct to thread information struct.
1272  * XXX -- lots of this needs cleaning up.
1273  */
1274 static void
1275 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1276         ulwp_t *ulwp, td_thrinfo_t *ti_p)
1277 {
1278         lwpid_t lwpid;
1279 
1280         if ((lwpid = ulwp->ul_lwpid) == 0)
1281                 lwpid = 1;
1282         (void) memset(ti_p, 0, sizeof (*ti_p));
1283         ti_p->ti_ta_p = ta_p;
1284         ti_p->ti_user_flags = ulwp->ul_usropts;
1285         ti_p->ti_tid = lwpid;
1286         ti_p->ti_exitval = ulwp->ul_rval;
1287         ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1288         if (!ulwp->ul_dead) {
1289                 /*
1290                  * The bloody fools got this backwards!
1291                  */
1292                 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1293                 ti_p->ti_stksize = ulwp->ul_stksiz;
1294         }
1295         ti_p->ti_ro_area = ts_addr;
1296         ti_p->ti_ro_size = ulwp->ul_replace?
1297             REPLACEMENT_SIZE : sizeof (ulwp_t);
1298         ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1299             ulwp->ul_stop? TD_THR_STOPPED :
1300             ulwp->ul_wchan? TD_THR_SLEEP :
1301             TD_THR_ACTIVE;
1302         ti_p->ti_db_suspended = 0;
1303         ti_p->ti_type = TD_THR_USER;
1304         ti_p->ti_sp = ulwp->ul_sp;
1305         ti_p->ti_flags = 0;
1306         ti_p->ti_pri = ulwp->ul_pri;
1307         ti_p->ti_lid = lwpid;
1308         if (!ulwp->ul_dead)
1309                 ti_p->ti_sigmask = ulwp->ul_sigmask;
1310         ti_p->ti_traceme = 0;
1311         ti_p->ti_preemptflag = 0;
1312         ti_p->ti_pirecflag = 0;
1313         (void) sigemptyset(&ti_p->ti_pending);
1314         ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1315 }
1316 
1317 #if defined(_LP64) && defined(_SYSCALL32)
1318 static void
1319 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1320         ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1321 {
1322         lwpid_t lwpid;
1323 
1324         if ((lwpid = ulwp->ul_lwpid) == 0)
1325                 lwpid = 1;
1326         (void) memset(ti_p, 0, sizeof (*ti_p));
1327         ti_p->ti_ta_p = ta_p;
1328         ti_p->ti_user_flags = ulwp->ul_usropts;
1329         ti_p->ti_tid = lwpid;
1330         ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1331         ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1332         if (!ulwp->ul_dead) {
1333                 /*
1334                  * The bloody fools got this backwards!
1335                  */
1336                 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1337                 ti_p->ti_stksize = ulwp->ul_stksiz;
1338         }
1339         ti_p->ti_ro_area = ts_addr;
1340         ti_p->ti_ro_size = ulwp->ul_replace?
1341             REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1342         ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1343             ulwp->ul_stop? TD_THR_STOPPED :
1344             ulwp->ul_wchan? TD_THR_SLEEP :
1345             TD_THR_ACTIVE;
1346         ti_p->ti_db_suspended = 0;
1347         ti_p->ti_type = TD_THR_USER;
1348         ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1349         ti_p->ti_flags = 0;
1350         ti_p->ti_pri = ulwp->ul_pri;
1351         ti_p->ti_lid = lwpid;
1352         if (!ulwp->ul_dead)
1353                 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1354         ti_p->ti_traceme = 0;
1355         ti_p->ti_preemptflag = 0;
1356         ti_p->ti_pirecflag = 0;
1357         (void) sigemptyset(&ti_p->ti_pending);
1358         ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1359 }
1360 #endif  /* _SYSCALL32 */
1361 
1362 /*
1363  * Get thread information.
1364  */
1365 #pragma weak td_thr_get_info = __td_thr_get_info
1366 td_err_e
1367 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1368 {
1369         struct ps_prochandle *ph_p;
1370         td_thragent_t   *ta_p;
1371         td_err_e        return_val;
1372         psaddr_t        psaddr;
1373 
1374         if (ti_p == NULL)
1375                 return (TD_ERR);
1376         (void) memset(ti_p, NULL, sizeof (*ti_p));
1377 
1378         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1379                 return (return_val);
1380         ta_p = th_p->th_ta_p;
1381         if (ps_pstop(ph_p) != PS_OK) {
1382                 ph_unlock(ta_p);
1383                 return (TD_DBERR);
1384         }
1385 
1386         /*
1387          * Read the ulwp struct from the process.
1388          * Transfer the ulwp struct to the thread information struct.
1389          */
1390         psaddr = th_p->th_unique;
1391         if (ta_p->model == PR_MODEL_NATIVE) {
1392                 ulwp_t ulwp;
1393 
1394                 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1395                     ((void) memset(&ulwp, 0, sizeof (ulwp)),
1396                     ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1397                         return_val = TD_DBERR;
1398                 else
1399                         td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1400         } else {
1401 #if defined(_LP64) && defined(_SYSCALL32)
1402                 ulwp32_t ulwp;
1403 
1404                 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1405                     ((void) memset(&ulwp, 0, sizeof (ulwp)),
1406                     ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1407                     PS_OK)
1408                         return_val = TD_DBERR;
1409                 else
1410                         td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1411 #else
1412                 return_val = TD_ERR;
1413 #endif  /* _SYSCALL32 */
1414         }
1415 
1416         (void) ps_pcontinue(ph_p);
1417         ph_unlock(ta_p);
1418         return (return_val);
1419 }
1420 
1421 /*
1422  * Given a process and an event number, return information about
1423  * an address in the process or at which a breakpoint can be set
1424  * to monitor the event.
1425  */
1426 #pragma weak td_ta_event_addr = __td_ta_event_addr
1427 td_err_e
1428 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1429 {
1430         if (ta_p == NULL)
1431                 return (TD_BADTA);
1432         if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1433                 return (TD_NOEVENT);
1434         if (notify_p == NULL)
1435                 return (TD_ERR);
1436 
1437         notify_p->type = NOTIFY_BPT;
1438         notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1439 
1440         return (TD_OK);
1441 }
1442 
1443 /*
1444  * Add the events in eventset 2 to eventset 1.
1445  */
1446 static void
1447 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1448 {
1449         int     i;
1450 
1451         for (i = 0; i < TD_EVENTSIZE; i++)
1452                 event1_p->event_bits[i] |= event2_p->event_bits[i];
1453 }
1454 
1455 /*
1456  * Delete the events in eventset 2 from eventset 1.
1457  */
1458 static void
1459 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1460 {
1461         int     i;
1462 
1463         for (i = 0; i < TD_EVENTSIZE; i++)
1464                 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1465 }
1466 
1467 /*
1468  * Either add or delete the given event set from a thread's event mask.
1469  */
1470 static td_err_e
1471 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1472 {
1473         struct ps_prochandle *ph_p;
1474         td_err_e        return_val = TD_OK;
1475         char            enable;
1476         td_thr_events_t evset;
1477         psaddr_t        psaddr_evset;
1478         psaddr_t        psaddr_enab;
1479 
1480         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1481                 return (return_val);
1482         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1483                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1484                 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1485                 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1486         } else {
1487 #if defined(_LP64) && defined(_SYSCALL32)
1488                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1489                 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1490                 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1491 #else
1492                 ph_unlock(th_p->th_ta_p);
1493                 return (TD_ERR);
1494 #endif  /* _SYSCALL32 */
1495         }
1496         if (ps_pstop(ph_p) != PS_OK) {
1497                 ph_unlock(th_p->th_ta_p);
1498                 return (TD_DBERR);
1499         }
1500 
1501         if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1502                 return_val = TD_DBERR;
1503         else {
1504                 if (onoff)
1505                         eventsetaddset(&evset, events);
1506                 else
1507                         eventsetdelset(&evset, events);
1508                 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1509                     != PS_OK)
1510                         return_val = TD_DBERR;
1511                 else {
1512                         enable = 0;
1513                         if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1514                                 enable = 1;
1515                         if (ps_pdwrite(ph_p, psaddr_enab,
1516                             &enable, sizeof (enable)) != PS_OK)
1517                                 return_val = TD_DBERR;
1518                 }
1519         }
1520 
1521         (void) ps_pcontinue(ph_p);
1522         ph_unlock(th_p->th_ta_p);
1523         return (return_val);
1524 }
1525 
1526 /*
1527  * Enable or disable tracing for a given thread.  Tracing
1528  * is filtered based on the event mask of each thread.  Tracing
1529  * can be turned on/off for the thread without changing thread
1530  * event mask.
1531  * Currently unused by dbx.
1532  */
1533 #pragma weak td_thr_event_enable = __td_thr_event_enable
1534 td_err_e
1535 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1536 {
1537         td_thr_events_t evset;
1538 
1539         td_event_emptyset(&evset);
1540         td_event_addset(&evset, TD_EVENTS_ENABLE);
1541         return (mod_eventset(th_p, &evset, onoff));
1542 }
1543 
1544 /*
1545  * Set event mask to enable event. event is turned on in
1546  * event mask for thread.  If a thread encounters an event
1547  * for which its event mask is on, notification will be sent
1548  * to the debugger.
1549  * Addresses for each event are provided to the
1550  * debugger.  It is assumed that a breakpoint of some type will
1551  * be placed at that address.  If the event mask for the thread
1552  * is on, the instruction at the address will be executed.
1553  * Otherwise, the instruction will be skipped.
1554  */
1555 #pragma weak td_thr_set_event = __td_thr_set_event
1556 td_err_e
1557 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1558 {
1559         return (mod_eventset(th_p, events, 1));
1560 }
1561 
1562 /*
1563  * Enable or disable a set of events in the process-global event mask,
1564  * depending on the value of onoff.
1565  */
1566 static td_err_e
1567 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1568 {
1569         struct ps_prochandle *ph_p;
1570         td_thr_events_t targ_eventset;
1571         td_err_e        return_val;
1572 
1573         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1574                 return (return_val);
1575         if (ps_pstop(ph_p) != PS_OK) {
1576                 ph_unlock(ta_p);
1577                 return (TD_DBERR);
1578         }
1579         if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1580             &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1581                 return_val = TD_DBERR;
1582         else {
1583                 if (onoff)
1584                         eventsetaddset(&targ_eventset, events);
1585                 else
1586                         eventsetdelset(&targ_eventset, events);
1587                 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1588                     &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1589                         return_val = TD_DBERR;
1590         }
1591         (void) ps_pcontinue(ph_p);
1592         ph_unlock(ta_p);
1593         return (return_val);
1594 }
1595 
1596 /*
1597  * Enable a set of events in the process-global event mask.
1598  */
1599 #pragma weak td_ta_set_event = __td_ta_set_event
1600 td_err_e
1601 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1602 {
1603         return (td_ta_mod_event(ta_p, events, 1));
1604 }
1605 
1606 /*
1607  * Set event mask to disable the given event set; these events are cleared
1608  * from the event mask of the thread.  Events that occur for a thread
1609  * with the event masked off will not cause notification to be
1610  * sent to the debugger (see td_thr_set_event for fuller description).
1611  */
1612 #pragma weak td_thr_clear_event = __td_thr_clear_event
1613 td_err_e
1614 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1615 {
1616         return (mod_eventset(th_p, events, 0));
1617 }
1618 
1619 /*
1620  * Disable a set of events in the process-global event mask.
1621  */
1622 #pragma weak td_ta_clear_event = __td_ta_clear_event
1623 td_err_e
1624 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1625 {
1626         return (td_ta_mod_event(ta_p, events, 0));
1627 }
1628 
1629 /*
1630  * This function returns the most recent event message, if any,
1631  * associated with a thread.  Given a thread handle, return the message
1632  * corresponding to the event encountered by the thread.  Only one
1633  * message per thread is saved.  Messages from earlier events are lost
1634  * when later events occur.
1635  */
1636 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1637 td_err_e
1638 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1639 {
1640         struct ps_prochandle *ph_p;
1641         td_err_e        return_val = TD_OK;
1642         psaddr_t        psaddr;
1643 
1644         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1645                 return (return_val);
1646         if (ps_pstop(ph_p) != PS_OK) {
1647                 ph_unlock(th_p->th_ta_p);
1648                 return (TD_BADTA);
1649         }
1650         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1651                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1652                 td_evbuf_t evbuf;
1653 
1654                 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1655                 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1656                         return_val = TD_DBERR;
1657                 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1658                         return_val = TD_NOEVENT;
1659                 } else {
1660                         msg->event = evbuf.eventnum;
1661                         msg->th_p = (td_thrhandle_t *)th_p;
1662                         msg->msg.data = (uintptr_t)evbuf.eventdata;
1663                         /* "Consume" the message */
1664                         evbuf.eventnum = TD_EVENT_NONE;
1665                         evbuf.eventdata = NULL;
1666                         if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1667                             != PS_OK)
1668                                 return_val = TD_DBERR;
1669                 }
1670         } else {
1671 #if defined(_LP64) && defined(_SYSCALL32)
1672                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1673                 td_evbuf32_t evbuf;
1674 
1675                 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1676                 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1677                         return_val = TD_DBERR;
1678                 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1679                         return_val = TD_NOEVENT;
1680                 } else {
1681                         msg->event = evbuf.eventnum;
1682                         msg->th_p = (td_thrhandle_t *)th_p;
1683                         msg->msg.data = (uintptr_t)evbuf.eventdata;
1684                         /* "Consume" the message */
1685                         evbuf.eventnum = TD_EVENT_NONE;
1686                         evbuf.eventdata = NULL;
1687                         if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1688                             != PS_OK)
1689                                 return_val = TD_DBERR;
1690                 }
1691 #else
1692                 return_val = TD_ERR;
1693 #endif  /* _SYSCALL32 */
1694         }
1695 
1696         (void) ps_pcontinue(ph_p);
1697         ph_unlock(th_p->th_ta_p);
1698         return (return_val);
1699 }
1700 
1701 /*
1702  * The callback function td_ta_event_getmsg uses when looking for
1703  * a thread with an event.  A thin wrapper around td_thr_event_getmsg.
1704  */
1705 static int
1706 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1707 {
1708         static td_thrhandle_t th;
1709         td_event_msg_t *msg = arg;
1710 
1711         if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1712                 /*
1713                  * Got an event, stop iterating.
1714                  *
1715                  * Because of past mistakes in interface definition,
1716                  * we are forced to pass back a static local variable
1717                  * for the thread handle because th_p is a pointer
1718                  * to a local variable in __td_ta_thr_iter().
1719                  * Grr...
1720                  */
1721                 th = *th_p;
1722                 msg->th_p = &th;
1723                 return (1);
1724         }
1725         return (0);
1726 }
1727 
1728 /*
1729  * This function is just like td_thr_event_getmsg, except that it is
1730  * passed a process handle rather than a thread handle, and returns
1731  * an event message for some thread in the process that has an event
1732  * message pending.  If no thread has an event message pending, this
1733  * routine returns TD_NOEVENT.  Thus, all pending event messages may
1734  * be collected from a process by repeatedly calling this routine
1735  * until it returns TD_NOEVENT.
1736  */
1737 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1738 td_err_e
1739 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1740 {
1741         td_err_e return_val;
1742 
1743         if (ta_p == NULL)
1744                 return (TD_BADTA);
1745         if (ta_p->ph_p == NULL)
1746                 return (TD_BADPH);
1747         if (msg == NULL)
1748                 return (TD_ERR);
1749         msg->event = TD_EVENT_NONE;
1750         if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1751             TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1752             TD_THR_ANY_USER_FLAGS)) != TD_OK)
1753                 return (return_val);
1754         if (msg->event == TD_EVENT_NONE)
1755                 return (TD_NOEVENT);
1756         return (TD_OK);
1757 }
1758 
1759 static lwpid_t
1760 thr_to_lwpid(const td_thrhandle_t *th_p)
1761 {
1762         struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1763         lwpid_t lwpid;
1764 
1765         /*
1766          * The caller holds the prochandle lock
1767          * and has already verfied everything.
1768          */
1769         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1770                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1771 
1772                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1773                     &lwpid, sizeof (lwpid)) != PS_OK)
1774                         lwpid = 0;
1775                 else if (lwpid == 0)
1776                         lwpid = 1;
1777         } else {
1778 #if defined(_LP64) && defined(_SYSCALL32)
1779                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1780 
1781                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1782                     &lwpid, sizeof (lwpid)) != PS_OK)
1783                         lwpid = 0;
1784                 else if (lwpid == 0)
1785                         lwpid = 1;
1786 #else
1787                 lwpid = 0;
1788 #endif  /* _SYSCALL32 */
1789         }
1790 
1791         return (lwpid);
1792 }
1793 
1794 /*
1795  * Suspend a thread.
1796  * XXX: What does this mean in a one-level model?
1797  */
1798 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1799 td_err_e
1800 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1801 {
1802         struct ps_prochandle *ph_p;
1803         td_err_e return_val;
1804 
1805         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1806                 return (return_val);
1807         if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1808                 return_val = TD_DBERR;
1809         ph_unlock(th_p->th_ta_p);
1810         return (return_val);
1811 }
1812 
1813 /*
1814  * Resume a suspended thread.
1815  * XXX: What does this mean in a one-level model?
1816  */
1817 #pragma weak td_thr_dbresume = __td_thr_dbresume
1818 td_err_e
1819 __td_thr_dbresume(const td_thrhandle_t *th_p)
1820 {
1821         struct ps_prochandle *ph_p;
1822         td_err_e return_val;
1823 
1824         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1825                 return (return_val);
1826         if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1827                 return_val = TD_DBERR;
1828         ph_unlock(th_p->th_ta_p);
1829         return (return_val);
1830 }
1831 
1832 /*
1833  * Set a thread's signal mask.
1834  * Currently unused by dbx.
1835  */
1836 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1837 /* ARGSUSED */
1838 td_err_e
1839 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1840 {
1841         return (TD_NOCAPAB);
1842 }
1843 
1844 /*
1845  * Set a thread's "signals-pending" set.
1846  * Currently unused by dbx.
1847  */
1848 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1849 /* ARGSUSED */
1850 td_err_e
1851 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1852         uchar_t ti_pending_flag, const sigset_t ti_pending)
1853 {
1854         return (TD_NOCAPAB);
1855 }
1856 
1857 /*
1858  * Get a thread's general register set.
1859  */
1860 #pragma weak td_thr_getgregs = __td_thr_getgregs
1861 td_err_e
1862 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1863 {
1864         struct ps_prochandle *ph_p;
1865         td_err_e return_val;
1866 
1867         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1868                 return (return_val);
1869         if (ps_pstop(ph_p) != PS_OK) {
1870                 ph_unlock(th_p->th_ta_p);
1871                 return (TD_DBERR);
1872         }
1873 
1874         if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1875                 return_val = TD_DBERR;
1876 
1877         (void) ps_pcontinue(ph_p);
1878         ph_unlock(th_p->th_ta_p);
1879         return (return_val);
1880 }
1881 
1882 /*
1883  * Set a thread's general register set.
1884  */
1885 #pragma weak td_thr_setgregs = __td_thr_setgregs
1886 td_err_e
1887 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1888 {
1889         struct ps_prochandle *ph_p;
1890         td_err_e return_val;
1891 
1892         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1893                 return (return_val);
1894         if (ps_pstop(ph_p) != PS_OK) {
1895                 ph_unlock(th_p->th_ta_p);
1896                 return (TD_DBERR);
1897         }
1898 
1899         if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1900                 return_val = TD_DBERR;
1901 
1902         (void) ps_pcontinue(ph_p);
1903         ph_unlock(th_p->th_ta_p);
1904         return (return_val);
1905 }
1906 
1907 /*
1908  * Get a thread's floating-point register set.
1909  */
1910 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1911 td_err_e
1912 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1913 {
1914         struct ps_prochandle *ph_p;
1915         td_err_e return_val;
1916 
1917         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1918                 return (return_val);
1919         if (ps_pstop(ph_p) != PS_OK) {
1920                 ph_unlock(th_p->th_ta_p);
1921                 return (TD_DBERR);
1922         }
1923 
1924         if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1925                 return_val = TD_DBERR;
1926 
1927         (void) ps_pcontinue(ph_p);
1928         ph_unlock(th_p->th_ta_p);
1929         return (return_val);
1930 }
1931 
1932 /*
1933  * Set a thread's floating-point register set.
1934  */
1935 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1936 td_err_e
1937 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1938 {
1939         struct ps_prochandle *ph_p;
1940         td_err_e return_val;
1941 
1942         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1943                 return (return_val);
1944         if (ps_pstop(ph_p) != PS_OK) {
1945                 ph_unlock(th_p->th_ta_p);
1946                 return (TD_DBERR);
1947         }
1948 
1949         if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1950                 return_val = TD_DBERR;
1951 
1952         (void) ps_pcontinue(ph_p);
1953         ph_unlock(th_p->th_ta_p);
1954         return (return_val);
1955 }
1956 
1957 /*
1958  * Get the size of the extra state register set for this architecture.
1959  * Currently unused by dbx.
1960  */
1961 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1962 /* ARGSUSED */
1963 td_err_e
1964 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1965 {
1966         struct ps_prochandle *ph_p;
1967         td_err_e return_val;
1968 
1969         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1970                 return (return_val);
1971         if (ps_pstop(ph_p) != PS_OK) {
1972                 ph_unlock(th_p->th_ta_p);
1973                 return (TD_DBERR);
1974         }
1975 
1976         if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
1977                 return_val = TD_DBERR;
1978 
1979         (void) ps_pcontinue(ph_p);
1980         ph_unlock(th_p->th_ta_p);
1981         return (return_val);
1982 }
1983 
1984 /*
1985  * Get a thread's extra state register set.
1986  */
1987 #pragma weak td_thr_getxregs = __td_thr_getxregs
1988 /* ARGSUSED */
1989 td_err_e
1990 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
1991 {
1992         struct ps_prochandle *ph_p;
1993         td_err_e return_val;
1994 
1995         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1996                 return (return_val);
1997         if (ps_pstop(ph_p) != PS_OK) {
1998                 ph_unlock(th_p->th_ta_p);
1999                 return (TD_DBERR);
2000         }
2001 
2002         if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2003                 return_val = TD_DBERR;
2004 
2005         (void) ps_pcontinue(ph_p);
2006         ph_unlock(th_p->th_ta_p);
2007         return (return_val);
2008 }
2009 
2010 /*
2011  * Set a thread's extra state register set.
2012  */
2013 #pragma weak td_thr_setxregs = __td_thr_setxregs
2014 /* ARGSUSED */
2015 td_err_e
2016 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2017 {
2018         struct ps_prochandle *ph_p;
2019         td_err_e return_val;
2020 
2021         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2022                 return (return_val);
2023         if (ps_pstop(ph_p) != PS_OK) {
2024                 ph_unlock(th_p->th_ta_p);
2025                 return (TD_DBERR);
2026         }
2027 
2028         if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2029                 return_val = TD_DBERR;
2030 
2031         (void) ps_pcontinue(ph_p);
2032         ph_unlock(th_p->th_ta_p);
2033         return (return_val);
2034 }
2035 
2036 struct searcher {
2037         psaddr_t        addr;
2038         int             status;
2039 };
2040 
2041 /*
2042  * Check the struct thread address in *th_p again first
2043  * value in "data".  If value in data is found, set second value
2044  * in "data" to 1 and return 1 to terminate iterations.
2045  * This function is used by td_thr_validate() to verify that
2046  * a thread handle is valid.
2047  */
2048 static int
2049 td_searcher(const td_thrhandle_t *th_p, void *data)
2050 {
2051         struct searcher *searcher_data = (struct searcher *)data;
2052 
2053         if (searcher_data->addr == th_p->th_unique) {
2054                 searcher_data->status = 1;
2055                 return (1);
2056         }
2057         return (0);
2058 }
2059 
2060 /*
2061  * Validate the thread handle.  Check that
2062  * a thread exists in the thread agent/process that
2063  * corresponds to thread with handle *th_p.
2064  * Currently unused by dbx.
2065  */
2066 #pragma weak td_thr_validate = __td_thr_validate
2067 td_err_e
2068 __td_thr_validate(const td_thrhandle_t *th_p)
2069 {
2070         td_err_e return_val;
2071         struct searcher searcher_data = {0, 0};
2072 
2073         if (th_p == NULL)
2074                 return (TD_BADTH);
2075         if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
2076                 return (TD_BADTH);
2077 
2078         /*
2079          * LOCKING EXCEPTION - Locking is not required
2080          * here because no use of the thread agent is made (other
2081          * than the sanity check) and checking of the thread
2082          * agent will be done in __td_ta_thr_iter.
2083          */
2084 
2085         searcher_data.addr = th_p->th_unique;
2086         return_val = __td_ta_thr_iter(th_p->th_ta_p,
2087             td_searcher, &searcher_data,
2088             TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2089             TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2090 
2091         if (return_val == TD_OK && searcher_data.status == 0)
2092                 return_val = TD_NOTHR;
2093 
2094         return (return_val);
2095 }
2096 
2097 /*
2098  * Get a thread's private binding to a given thread specific
2099  * data(TSD) key(see thr_getspecific(3T).  If the thread doesn't
2100  * have a binding for a particular key, then NULL is returned.
2101  */
2102 #pragma weak td_thr_tsd = __td_thr_tsd
2103 td_err_e
2104 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2105 {
2106         struct ps_prochandle *ph_p;
2107         td_thragent_t   *ta_p;
2108         td_err_e        return_val;
2109         int             maxkey;
2110         int             nkey;
2111         psaddr_t        tsd_paddr;
2112 
2113         if (data_pp == NULL)
2114                 return (TD_ERR);
2115         *data_pp = NULL;
2116         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2117                 return (return_val);
2118         ta_p = th_p->th_ta_p;
2119         if (ps_pstop(ph_p) != PS_OK) {
2120                 ph_unlock(ta_p);
2121                 return (TD_DBERR);
2122         }
2123 
2124         if (ta_p->model == PR_MODEL_NATIVE) {
2125                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2126                 tsd_metadata_t tsdm;
2127                 tsd_t stsd;
2128 
2129                 if (ps_pdread(ph_p,
2130                     ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2131                     &tsdm, sizeof (tsdm)) != PS_OK)
2132                         return_val = TD_DBERR;
2133                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2134                     &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2135                         return_val = TD_DBERR;
2136                 else if (tsd_paddr != NULL &&
2137                     ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2138                         return_val = TD_DBERR;
2139                 else {
2140                         maxkey = tsdm.tsdm_nused;
2141                         nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2142 
2143                         if (key < TSD_NFAST)
2144                                 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2145                 }
2146         } else {
2147 #if defined(_LP64) && defined(_SYSCALL32)
2148                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2149                 tsd_metadata32_t tsdm;
2150                 tsd32_t stsd;
2151                 caddr32_t addr;
2152 
2153                 if (ps_pdread(ph_p,
2154                     ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2155                     &tsdm, sizeof (tsdm)) != PS_OK)
2156                         return_val = TD_DBERR;
2157                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2158                     &addr, sizeof (addr)) != PS_OK)
2159                         return_val = TD_DBERR;
2160                 else if (addr != NULL &&
2161                     ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2162                         return_val = TD_DBERR;
2163                 else {
2164                         maxkey = tsdm.tsdm_nused;
2165                         nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2166 
2167                         if (key < TSD_NFAST) {
2168                                 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2169                         } else {
2170                                 tsd_paddr = addr;
2171                         }
2172                 }
2173 #else
2174                 return_val = TD_ERR;
2175 #endif  /* _SYSCALL32 */
2176         }
2177 
2178         if (return_val == TD_OK && (key < 1 || key >= maxkey))
2179                 return_val = TD_NOTSD;
2180         if (return_val != TD_OK || key >= nkey) {
2181                 /* NULL has already been stored in data_pp */
2182                 (void) ps_pcontinue(ph_p);
2183                 ph_unlock(ta_p);
2184                 return (return_val);
2185         }
2186 
2187         /*
2188          * Read the value from the thread's tsd array.
2189          */
2190         if (ta_p->model == PR_MODEL_NATIVE) {
2191                 void *value;
2192 
2193                 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2194                     &value, sizeof (value)) != PS_OK)
2195                         return_val = TD_DBERR;
2196                 else
2197                         *data_pp = value;
2198 #if defined(_LP64) && defined(_SYSCALL32)
2199         } else {
2200                 caddr32_t value32;
2201 
2202                 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2203                     &value32, sizeof (value32)) != PS_OK)
2204                         return_val = TD_DBERR;
2205                 else
2206                         *data_pp = (void *)(uintptr_t)value32;
2207 #endif  /* _SYSCALL32 */
2208         }
2209 
2210         (void) ps_pcontinue(ph_p);
2211         ph_unlock(ta_p);
2212         return (return_val);
2213 }
2214 
2215 /*
2216  * Get the base address of a thread's thread local storage (TLS) block
2217  * for the module (executable or shared object) identified by 'moduleid'.
2218  */
2219 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2220 td_err_e
2221 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2222 {
2223         struct ps_prochandle *ph_p;
2224         td_thragent_t   *ta_p;
2225         td_err_e        return_val;
2226 
2227         if (base == NULL)
2228                 return (TD_ERR);
2229         *base = NULL;
2230         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2231                 return (return_val);
2232         ta_p = th_p->th_ta_p;
2233         if (ps_pstop(ph_p) != PS_OK) {
2234                 ph_unlock(ta_p);
2235                 return (TD_DBERR);
2236         }
2237 
2238         if (ta_p->model == PR_MODEL_NATIVE) {
2239                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2240                 tls_metadata_t tls_metadata;
2241                 TLS_modinfo tlsmod;
2242                 tls_t tls;
2243 
2244                 if (ps_pdread(ph_p,
2245                     ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2246                     &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2247                         return_val = TD_DBERR;
2248                 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2249                         return_val = TD_NOTLS;
2250                 else if (ps_pdread(ph_p,
2251                     (psaddr_t)((TLS_modinfo *)
2252                     tls_metadata.tls_modinfo.tls_data + moduleid),
2253                     &tlsmod, sizeof (tlsmod)) != PS_OK)
2254                         return_val = TD_DBERR;
2255                 else if (tlsmod.tm_memsz == 0)
2256                         return_val = TD_NOTLS;
2257                 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2258                         *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2259                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2260                     &tls, sizeof (tls)) != PS_OK)
2261                         return_val = TD_DBERR;
2262                 else if (moduleid >= tls.tls_size)
2263                         return_val = TD_TLSDEFER;
2264                 else if (ps_pdread(ph_p,
2265                     (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2266                     &tls, sizeof (tls)) != PS_OK)
2267                         return_val = TD_DBERR;
2268                 else if (tls.tls_size == 0)
2269                         return_val = TD_TLSDEFER;
2270                 else
2271                         *base = (psaddr_t)tls.tls_data;
2272         } else {
2273 #if defined(_LP64) && defined(_SYSCALL32)
2274                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2275                 tls_metadata32_t tls_metadata;
2276                 TLS_modinfo32 tlsmod;
2277                 tls32_t tls;
2278 
2279                 if (ps_pdread(ph_p,
2280                     ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2281                     &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2282                         return_val = TD_DBERR;
2283                 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2284                         return_val = TD_NOTLS;
2285                 else if (ps_pdread(ph_p,
2286                     (psaddr_t)((TLS_modinfo32 *)
2287                     (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2288                     &tlsmod, sizeof (tlsmod)) != PS_OK)
2289                         return_val = TD_DBERR;
2290                 else if (tlsmod.tm_memsz == 0)
2291                         return_val = TD_NOTLS;
2292                 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2293                         *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2294                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2295                     &tls, sizeof (tls)) != PS_OK)
2296                         return_val = TD_DBERR;
2297                 else if (moduleid >= tls.tls_size)
2298                         return_val = TD_TLSDEFER;
2299                 else if (ps_pdread(ph_p,
2300                     (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2301                     &tls, sizeof (tls)) != PS_OK)
2302                         return_val = TD_DBERR;
2303                 else if (tls.tls_size == 0)
2304                         return_val = TD_TLSDEFER;
2305                 else
2306                         *base = (psaddr_t)tls.tls_data;
2307 #else
2308                 return_val = TD_ERR;
2309 #endif  /* _SYSCALL32 */
2310         }
2311 
2312         (void) ps_pcontinue(ph_p);
2313         ph_unlock(ta_p);
2314         return (return_val);
2315 }
2316 
2317 /*
2318  * Change a thread's priority to the value specified by ti_pri.
2319  * Currently unused by dbx.
2320  */
2321 #pragma weak td_thr_setprio = __td_thr_setprio
2322 /* ARGSUSED */
2323 td_err_e
2324 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2325 {
2326         return (TD_NOCAPAB);
2327 }
2328 
2329 /*
2330  * This structure links td_thr_lockowner and the lowner_cb callback function.
2331  */
2332 typedef struct {
2333         td_sync_iter_f  *owner_cb;
2334         void            *owner_cb_arg;
2335         td_thrhandle_t  *th_p;
2336 } lowner_cb_ctl_t;
2337 
2338 static int
2339 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2340 {
2341         lowner_cb_ctl_t *ocb = arg;
2342         int trunc = 0;
2343         union {
2344                 rwlock_t rwl;
2345                 mutex_t mx;
2346         } rw_m;
2347 
2348         if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2349             &rw_m, sizeof (rw_m)) != PS_OK) {
2350                 trunc = 1;
2351                 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2352                     &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2353                         return (0);
2354         }
2355         if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2356             rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2357                 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2358         if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2359                 mutex_t *rwlock = &rw_m.rwl.mutex;
2360                 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2361                         return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2362         }
2363         return (0);
2364 }
2365 
2366 /*
2367  * Iterate over the set of locks owned by a specified thread.
2368  * If cb returns a non-zero value, terminate iterations.
2369  */
2370 #pragma weak td_thr_lockowner = __td_thr_lockowner
2371 td_err_e
2372 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2373         void *cb_data)
2374 {
2375         td_thragent_t   *ta_p;
2376         td_err_e        return_val;
2377         lowner_cb_ctl_t lcb;
2378 
2379         /*
2380          * Just sanity checks.
2381          */
2382         if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2383                 return (return_val);
2384         ta_p = th_p->th_ta_p;
2385         ph_unlock(ta_p);
2386 
2387         lcb.owner_cb = cb;
2388         lcb.owner_cb_arg = cb_data;
2389         lcb.th_p = (td_thrhandle_t *)th_p;
2390         return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2391 }
2392 
2393 /*
2394  * If a thread is asleep on a synchronization variable,
2395  * then get the synchronization handle.
2396  */
2397 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2398 td_err_e
2399 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2400 {
2401         struct ps_prochandle *ph_p;
2402         td_err_e        return_val = TD_OK;
2403         uintptr_t       wchan;
2404 
2405         if (sh_p == NULL)
2406                 return (TD_ERR);
2407         if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2408                 return (return_val);
2409 
2410         /*
2411          * No need to stop the process for a simple read.
2412          */
2413         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2414                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2415 
2416                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2417                     &wchan, sizeof (wchan)) != PS_OK)
2418                         return_val = TD_DBERR;
2419         } else {
2420 #if defined(_LP64) && defined(_SYSCALL32)
2421                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2422                 caddr32_t wchan32;
2423 
2424                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2425                     &wchan32, sizeof (wchan32)) != PS_OK)
2426                         return_val = TD_DBERR;
2427                 wchan = wchan32;
2428 #else
2429                 return_val = TD_ERR;
2430 #endif  /* _SYSCALL32 */
2431         }
2432 
2433         if (return_val != TD_OK || wchan == NULL) {
2434                 sh_p->sh_ta_p = NULL;
2435                 sh_p->sh_unique = NULL;
2436                 if (return_val == TD_OK)
2437                         return_val = TD_ERR;
2438         } else {
2439                 sh_p->sh_ta_p = th_p->th_ta_p;
2440                 sh_p->sh_unique = (psaddr_t)wchan;
2441         }
2442 
2443         ph_unlock(th_p->th_ta_p);
2444         return (return_val);
2445 }
2446 
2447 /*
2448  * Which thread is running on an lwp?
2449  */
2450 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2451 td_err_e
2452 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2453         td_thrhandle_t *th_p)
2454 {
2455         return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2456 }
2457 
2458 /*
2459  * Common code for td_sync_get_info() and td_sync_get_stats()
2460  */
2461 static td_err_e
2462 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2463         td_syncinfo_t *si_p)
2464 {
2465         int trunc = 0;
2466         td_so_un_t generic_so;
2467 
2468         /*
2469          * Determine the sync. object type; a little type fudgery here.
2470          * First attempt to read the whole union.  If that fails, attempt
2471          * to read just the condvar.  A condvar is the smallest sync. object.
2472          */
2473         if (ps_pdread(ph_p, sh_p->sh_unique,
2474             &generic_so, sizeof (generic_so)) != PS_OK) {
2475                 trunc = 1;
2476                 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2477                     sizeof (generic_so.condition)) != PS_OK)
2478                         return (TD_DBERR);
2479         }
2480 
2481         switch (generic_so.condition.cond_magic) {
2482         case MUTEX_MAGIC:
2483                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2484                     &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2485                         return (TD_DBERR);
2486                 si_p->si_type = TD_SYNC_MUTEX;
2487                 si_p->si_shared_type =
2488                     (generic_so.lock.mutex_type & USYNC_PROCESS);
2489                 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2490                     sizeof (generic_so.lock.mutex_flag));
2491                 si_p->si_state.mutex_locked =
2492                     (generic_so.lock.mutex_lockw != 0);
2493                 si_p->si_size = sizeof (generic_so.lock);
2494                 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2495                 si_p->si_rcount = generic_so.lock.mutex_rcount;
2496                 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2497                 if (si_p->si_state.mutex_locked) {
2498                         if (si_p->si_shared_type & USYNC_PROCESS)
2499                                 si_p->si_ownerpid =
2500                                     generic_so.lock.mutex_ownerpid;
2501                         si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2502                         si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2503                 }
2504                 break;
2505         case COND_MAGIC:
2506                 si_p->si_type = TD_SYNC_COND;
2507                 si_p->si_shared_type =
2508                     (generic_so.condition.cond_type & USYNC_PROCESS);
2509                 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2510                     sizeof (generic_so.condition.flags.flag));
2511                 si_p->si_size = sizeof (generic_so.condition);
2512                 si_p->si_has_waiters =
2513                     (generic_so.condition.cond_waiters_user |
2514                     generic_so.condition.cond_waiters_kernel)? 1 : 0;
2515                 break;
2516         case SEMA_MAGIC:
2517                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2518                     &generic_so.semaphore, sizeof (generic_so.semaphore))
2519                     != PS_OK)
2520                         return (TD_DBERR);
2521                 si_p->si_type = TD_SYNC_SEMA;
2522                 si_p->si_shared_type =
2523                     (generic_so.semaphore.type & USYNC_PROCESS);
2524                 si_p->si_state.sem_count = generic_so.semaphore.count;
2525                 si_p->si_size = sizeof (generic_so.semaphore);
2526                 si_p->si_has_waiters =
2527                     ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2528                 /* this is useless but the old interface provided it */
2529                 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2530                 break;
2531         case RWL_MAGIC:
2532         {
2533                 uint32_t rwstate;
2534 
2535                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2536                     &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2537                         return (TD_DBERR);
2538                 si_p->si_type = TD_SYNC_RWLOCK;
2539                 si_p->si_shared_type =
2540                     (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2541                 si_p->si_size = sizeof (generic_so.rwlock);
2542 
2543                 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2544                 if (rwstate & URW_WRITE_LOCKED) {
2545                         si_p->si_state.nreaders = -1;
2546                         si_p->si_is_wlock = 1;
2547                         si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2548                         si_p->si_owner.th_unique =
2549                             generic_so.rwlock.rwlock_owner;
2550                         if (si_p->si_shared_type & USYNC_PROCESS)
2551                                 si_p->si_ownerpid =
2552                                     generic_so.rwlock.rwlock_ownerpid;
2553                 } else {
2554                         si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2555                 }
2556                 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2557 
2558                 /* this is useless but the old interface provided it */
2559                 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2560                 break;
2561         }
2562         default:
2563                 return (TD_BADSH);
2564         }
2565 
2566         si_p->si_ta_p = sh_p->sh_ta_p;
2567         si_p->si_sv_addr = sh_p->sh_unique;
2568         return (TD_OK);
2569 }
2570 
2571 /*
2572  * Given a synchronization handle, fill in the
2573  * information for the synchronization variable into *si_p.
2574  */
2575 #pragma weak td_sync_get_info = __td_sync_get_info
2576 td_err_e
2577 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2578 {
2579         struct ps_prochandle *ph_p;
2580         td_err_e return_val;
2581 
2582         if (si_p == NULL)
2583                 return (TD_ERR);
2584         (void) memset(si_p, 0, sizeof (*si_p));
2585         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2586                 return (return_val);
2587         if (ps_pstop(ph_p) != PS_OK) {
2588                 ph_unlock(sh_p->sh_ta_p);
2589                 return (TD_DBERR);
2590         }
2591 
2592         return_val = sync_get_info_common(sh_p, ph_p, si_p);
2593 
2594         (void) ps_pcontinue(ph_p);
2595         ph_unlock(sh_p->sh_ta_p);
2596         return (return_val);
2597 }
2598 
2599 static uint_t
2600 tdb_addr_hash64(uint64_t addr)
2601 {
2602         uint64_t value60 = (addr >> 4);
2603         uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2604         return ((value30 >> 15) ^ (value30 & 0x7fff));
2605 }
2606 
2607 static uint_t
2608 tdb_addr_hash32(uint64_t addr)
2609 {
2610         uint32_t value30 = (addr >> 2);           /* 30 bits */
2611         return ((value30 >> 15) ^ (value30 & 0x7fff));
2612 }
2613 
2614 static td_err_e
2615 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2616         psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2617 {
2618         psaddr_t next_desc;
2619         uint64_t first;
2620         uint_t ix;
2621 
2622         /*
2623          * Compute the hash table index from the synch object's address.
2624          */
2625         if (ta_p->model == PR_MODEL_LP64)
2626                 ix = tdb_addr_hash64(sync_obj_addr);
2627         else
2628                 ix = tdb_addr_hash32(sync_obj_addr);
2629 
2630         /*
2631          * Get the address of the first element in the linked list.
2632          */
2633         if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2634             &first, sizeof (first)) != PS_OK)
2635                 return (TD_DBERR);
2636 
2637         /*
2638          * Search the linked list for an entry for the synch object..
2639          */
2640         for (next_desc = (psaddr_t)first; next_desc != NULL;
2641             next_desc = (psaddr_t)sync_stats->next) {
2642                 if (ps_pdread(ta_p->ph_p, next_desc,
2643                     sync_stats, sizeof (*sync_stats)) != PS_OK)
2644                         return (TD_DBERR);
2645                 if (sync_stats->sync_addr == sync_obj_addr)
2646                         return (TD_OK);
2647         }
2648 
2649         (void) memset(sync_stats, 0, sizeof (*sync_stats));
2650         return (TD_OK);
2651 }
2652 
2653 /*
2654  * Given a synchronization handle, fill in the
2655  * statistics for the synchronization variable into *ss_p.
2656  */
2657 #pragma weak td_sync_get_stats = __td_sync_get_stats
2658 td_err_e
2659 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2660 {
2661         struct ps_prochandle *ph_p;
2662         td_thragent_t *ta_p;
2663         td_err_e return_val;
2664         register_sync_t enable;
2665         psaddr_t hashaddr;
2666         tdb_sync_stats_t sync_stats;
2667         size_t ix;
2668 
2669         if (ss_p == NULL)
2670                 return (TD_ERR);
2671         (void) memset(ss_p, 0, sizeof (*ss_p));
2672         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2673                 return (return_val);
2674         ta_p = sh_p->sh_ta_p;
2675         if (ps_pstop(ph_p) != PS_OK) {
2676                 ph_unlock(ta_p);
2677                 return (TD_DBERR);
2678         }
2679 
2680         if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2681             != TD_OK) {
2682                 if (return_val != TD_BADSH)
2683                         goto out;
2684                 /* we can correct TD_BADSH */
2685                 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2686                 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2687                 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2688                 /* we correct si_type and si_size below */
2689                 return_val = TD_OK;
2690         }
2691         if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2692             &enable, sizeof (enable)) != PS_OK) {
2693                 return_val = TD_DBERR;
2694                 goto out;
2695         }
2696         if (enable != REGISTER_SYNC_ON)
2697                 goto out;
2698 
2699         /*
2700          * Get the address of the hash table in the target process.
2701          */
2702         if (ta_p->model == PR_MODEL_NATIVE) {
2703                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2704                     offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2705                     &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2706                         return_val = TD_DBERR;
2707                         goto out;
2708                 }
2709         } else {
2710 #if defined(_LP64) && defined(_SYSCALL32)
2711                 caddr32_t addr;
2712 
2713                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2714                     offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2715                     &addr, sizeof (addr)) != PS_OK) {
2716                         return_val = TD_DBERR;
2717                         goto out;
2718                 }
2719                 hashaddr = addr;
2720 #else
2721                 return_val = TD_ERR;
2722                 goto out;
2723 #endif  /* _SYSCALL32 */
2724         }
2725 
2726         if (hashaddr == 0)
2727                 return_val = TD_BADSH;
2728         else
2729                 return_val = read_sync_stats(ta_p, hashaddr,
2730                     sh_p->sh_unique, &sync_stats);
2731         if (return_val != TD_OK)
2732                 goto out;
2733 
2734         /*
2735          * We have the hash table entry.  Transfer the data to
2736          * the td_syncstats_t structure provided by the caller.
2737          */
2738         switch (sync_stats.un.type) {
2739         case TDB_MUTEX:
2740         {
2741                 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2742 
2743                 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2744                 ss_p->ss_info.si_size = sizeof (mutex_t);
2745                 msp->mutex_lock =
2746                     sync_stats.un.mutex.mutex_lock;
2747                 msp->mutex_sleep =
2748                     sync_stats.un.mutex.mutex_sleep;
2749                 msp->mutex_sleep_time =
2750                     sync_stats.un.mutex.mutex_sleep_time;
2751                 msp->mutex_hold_time =
2752                     sync_stats.un.mutex.mutex_hold_time;
2753                 msp->mutex_try =
2754                     sync_stats.un.mutex.mutex_try;
2755                 msp->mutex_try_fail =
2756                     sync_stats.un.mutex.mutex_try_fail;
2757                 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2758                     (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2759                     < ta_p->hash_size * sizeof (thr_hash_table_t))
2760                         msp->mutex_internal =
2761                             ix / sizeof (thr_hash_table_t) + 1;
2762                 break;
2763         }
2764         case TDB_COND:
2765         {
2766                 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2767 
2768                 ss_p->ss_info.si_type = TD_SYNC_COND;
2769                 ss_p->ss_info.si_size = sizeof (cond_t);
2770                 csp->cond_wait =
2771                     sync_stats.un.cond.cond_wait;
2772                 csp->cond_timedwait =
2773                     sync_stats.un.cond.cond_timedwait;
2774                 csp->cond_wait_sleep_time =
2775                     sync_stats.un.cond.cond_wait_sleep_time;
2776                 csp->cond_timedwait_sleep_time =
2777                     sync_stats.un.cond.cond_timedwait_sleep_time;
2778                 csp->cond_timedwait_timeout =
2779                     sync_stats.un.cond.cond_timedwait_timeout;
2780                 csp->cond_signal =
2781                     sync_stats.un.cond.cond_signal;
2782                 csp->cond_broadcast =
2783                     sync_stats.un.cond.cond_broadcast;
2784                 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2785                     (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2786                     < ta_p->hash_size * sizeof (thr_hash_table_t))
2787                         csp->cond_internal =
2788                             ix / sizeof (thr_hash_table_t) + 1;
2789                 break;
2790         }
2791         case TDB_RWLOCK:
2792         {
2793                 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2794 
2795                 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2796                 ss_p->ss_info.si_size = sizeof (rwlock_t);
2797                 rwsp->rw_rdlock =
2798                     sync_stats.un.rwlock.rw_rdlock;
2799                 rwsp->rw_rdlock_try =
2800                     sync_stats.un.rwlock.rw_rdlock_try;
2801                 rwsp->rw_rdlock_try_fail =
2802                     sync_stats.un.rwlock.rw_rdlock_try_fail;
2803                 rwsp->rw_wrlock =
2804                     sync_stats.un.rwlock.rw_wrlock;
2805                 rwsp->rw_wrlock_hold_time =
2806                     sync_stats.un.rwlock.rw_wrlock_hold_time;
2807                 rwsp->rw_wrlock_try =
2808                     sync_stats.un.rwlock.rw_wrlock_try;
2809                 rwsp->rw_wrlock_try_fail =
2810                     sync_stats.un.rwlock.rw_wrlock_try_fail;
2811                 break;
2812         }
2813         case TDB_SEMA:
2814         {
2815                 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2816 
2817                 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2818                 ss_p->ss_info.si_size = sizeof (sema_t);
2819                 ssp->sema_wait =
2820                     sync_stats.un.sema.sema_wait;
2821                 ssp->sema_wait_sleep =
2822                     sync_stats.un.sema.sema_wait_sleep;
2823                 ssp->sema_wait_sleep_time =
2824                     sync_stats.un.sema.sema_wait_sleep_time;
2825                 ssp->sema_trywait =
2826                     sync_stats.un.sema.sema_trywait;
2827                 ssp->sema_trywait_fail =
2828                     sync_stats.un.sema.sema_trywait_fail;
2829                 ssp->sema_post =
2830                     sync_stats.un.sema.sema_post;
2831                 ssp->sema_max_count =
2832                     sync_stats.un.sema.sema_max_count;
2833                 ssp->sema_min_count =
2834                     sync_stats.un.sema.sema_min_count;
2835                 break;
2836         }
2837         default:
2838                 return_val = TD_BADSH;
2839                 break;
2840         }
2841 
2842 out:
2843         (void) ps_pcontinue(ph_p);
2844         ph_unlock(ta_p);
2845         return (return_val);
2846 }
2847 
2848 /*
2849  * Change the state of a synchronization variable.
2850  *      1) mutex lock state set to value
2851  *      2) semaphore's count set to value
2852  *      3) writer's lock set by value < 0
2853  *      4) reader's lock number of readers set to value >= 0
2854  * Currently unused by dbx.
2855  */
2856 #pragma weak td_sync_setstate = __td_sync_setstate
2857 td_err_e
2858 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2859 {
2860         struct ps_prochandle *ph_p;
2861         int             trunc = 0;
2862         td_err_e        return_val;
2863         td_so_un_t      generic_so;
2864         uint32_t        *rwstate;
2865         int             value = (int)lvalue;
2866 
2867         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2868                 return (return_val);
2869         if (ps_pstop(ph_p) != PS_OK) {
2870                 ph_unlock(sh_p->sh_ta_p);
2871                 return (TD_DBERR);
2872         }
2873 
2874         /*
2875          * Read the synch. variable information.
2876          * First attempt to read the whole union and if that fails
2877          * fall back to reading only the smallest member, the condvar.
2878          */
2879         if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2880             sizeof (generic_so)) != PS_OK) {
2881                 trunc = 1;
2882                 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2883                     sizeof (generic_so.condition)) != PS_OK) {
2884                         (void) ps_pcontinue(ph_p);
2885                         ph_unlock(sh_p->sh_ta_p);
2886                         return (TD_DBERR);
2887                 }
2888         }
2889 
2890         /*
2891          * Set the new value in the sync. variable, read the synch. variable
2892          * information. from the process, reset its value and write it back.
2893          */
2894         switch (generic_so.condition.mutex_magic) {
2895         case MUTEX_MAGIC:
2896                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2897                     &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2898                         return_val = TD_DBERR;
2899                         break;
2900                 }
2901                 generic_so.lock.mutex_lockw = (uint8_t)value;
2902                 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2903                     sizeof (generic_so.lock)) != PS_OK)
2904                         return_val = TD_DBERR;
2905                 break;
2906         case SEMA_MAGIC:
2907                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2908                     &generic_so.semaphore, sizeof (generic_so.semaphore))
2909                     != PS_OK) {
2910                         return_val = TD_DBERR;
2911                         break;
2912                 }
2913                 generic_so.semaphore.count = value;
2914                 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2915                     sizeof (generic_so.semaphore)) != PS_OK)
2916                         return_val = TD_DBERR;
2917                 break;
2918         case COND_MAGIC:
2919                 /* Operation not supported on a condition variable */
2920                 return_val = TD_ERR;
2921                 break;
2922         case RWL_MAGIC:
2923                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2924                     &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2925                         return_val = TD_DBERR;
2926                         break;
2927                 }
2928                 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2929                 *rwstate &= URW_HAS_WAITERS;
2930                 if (value < 0)
2931                         *rwstate |= URW_WRITE_LOCKED;
2932                 else
2933                         *rwstate |= (value & URW_READERS_MASK);
2934                 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2935                     sizeof (generic_so.rwlock)) != PS_OK)
2936                         return_val = TD_DBERR;
2937                 break;
2938         default:
2939                 /* Bad sync. object type */
2940                 return_val = TD_BADSH;
2941                 break;
2942         }
2943 
2944         (void) ps_pcontinue(ph_p);
2945         ph_unlock(sh_p->sh_ta_p);
2946         return (return_val);
2947 }
2948 
2949 typedef struct {
2950         td_thr_iter_f   *waiter_cb;
2951         psaddr_t        sync_obj_addr;
2952         uint16_t        sync_magic;
2953         void            *waiter_cb_arg;
2954         td_err_e        errcode;
2955 } waiter_cb_ctl_t;
2956 
2957 static int
2958 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2959 {
2960         td_thragent_t   *ta_p = th_p->th_ta_p;
2961         struct ps_prochandle *ph_p = ta_p->ph_p;
2962         waiter_cb_ctl_t *wcb = arg;
2963         caddr_t         wchan;
2964 
2965         if (ta_p->model == PR_MODEL_NATIVE) {
2966                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2967 
2968                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2969                     &wchan, sizeof (wchan)) != PS_OK) {
2970                         wcb->errcode = TD_DBERR;
2971                         return (1);
2972                 }
2973         } else {
2974 #if defined(_LP64) && defined(_SYSCALL32)
2975                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2976                 caddr32_t wchan32;
2977 
2978                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2979                     &wchan32, sizeof (wchan32)) != PS_OK) {
2980                         wcb->errcode = TD_DBERR;
2981                         return (1);
2982                 }
2983                 wchan = (caddr_t)(uintptr_t)wchan32;
2984 #else
2985                 wcb->errcode = TD_ERR;
2986                 return (1);
2987 #endif  /* _SYSCALL32 */
2988         }
2989 
2990         if (wchan == NULL)
2991                 return (0);
2992 
2993         if (wchan == (caddr_t)wcb->sync_obj_addr)
2994                 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
2995 
2996         return (0);
2997 }
2998 
2999 /*
3000  * For a given synchronization variable, iterate over the
3001  * set of waiting threads.  The call back function is passed
3002  * two parameters, a pointer to a thread handle and a pointer
3003  * to extra call back data.
3004  */
3005 #pragma weak td_sync_waiters = __td_sync_waiters
3006 td_err_e
3007 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3008 {
3009         struct ps_prochandle *ph_p;
3010         waiter_cb_ctl_t wcb;
3011         td_err_e        return_val;
3012 
3013         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3014                 return (return_val);
3015         if (ps_pdread(ph_p,
3016             (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3017             (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3018                 ph_unlock(sh_p->sh_ta_p);
3019                 return (TD_DBERR);
3020         }
3021         ph_unlock(sh_p->sh_ta_p);
3022 
3023         switch (wcb.sync_magic) {
3024         case MUTEX_MAGIC:
3025         case COND_MAGIC:
3026         case SEMA_MAGIC:
3027         case RWL_MAGIC:
3028                 break;
3029         default:
3030                 return (TD_BADSH);
3031         }
3032 
3033         wcb.waiter_cb = cb;
3034         wcb.sync_obj_addr = sh_p->sh_unique;
3035         wcb.waiter_cb_arg = cb_data;
3036         wcb.errcode = TD_OK;
3037         return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3038             TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3039             TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3040 
3041         if (return_val != TD_OK)
3042                 return (return_val);
3043 
3044         return (wcb.errcode);
3045 }