1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 * Copyright (c) 2017 by The MathWorks, Inc. All rights reserved.
26 */
27 /*
28 * Copyright 2016 Joyent, Inc.
29 */
30
31 #include "lint.h"
32 #include "thr_uberdata.h"
33 #include <pthread.h>
34 #include <procfs.h>
35 #include <sys/uio.h>
36 #include <ctype.h>
37 #include "libc.h"
38
39 /*
40 * These symbols should not be exported from libc, but
41 * /lib/libm.so.2 references _thr_main. libm needs to be fixed.
42 * Also, some older versions of the Studio compiler/debugger
43 * components reference them. These need to be fixed, too.
44 */
45 #pragma weak _thr_main = thr_main
46 #pragma weak _thr_create = thr_create
47 #pragma weak _thr_join = thr_join
48 #pragma weak _thr_self = thr_self
49
50 #undef errno
51 extern int errno;
52
53 /*
54 * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate
55 * "we are linked with libthread". The Sun Workshop 6 update 1 compilation
56 * system used it illegally (it is a consolidation private symbol).
57 * To accommodate this and possibly other abusers of the symbol,
58 * we make it always equal to 1 now that libthread has been folded
59 * into libc. The new __libc_threaded symbol is used to indicate
60 * the new meaning, "more than one thread exists".
61 */
62 int __threaded = 1; /* always equal to 1 */
63 int __libc_threaded = 0; /* zero until first thr_create() */
64
65 /*
66 * thr_concurrency and pthread_concurrency are not used by the library.
67 * They exist solely to hold and return the values set by calls to
68 * thr_setconcurrency() and pthread_setconcurrency().
69 * Because thr_concurrency is affected by the THR_NEW_LWP flag
70 * to thr_create(), thr_concurrency is protected by link_lock.
71 */
72 static int thr_concurrency = 1;
73 static int pthread_concurrency;
74
75 #define HASHTBLSZ 1024 /* must be a power of two */
76 #define TIDHASH(tid, udp) (tid & (udp)->hash_mask)
77
78 /* initial allocation, just enough for one lwp */
79 #pragma align 64(init_hash_table)
80 thr_hash_table_t init_hash_table[1] = {
81 { DEFAULTMUTEX, DEFAULTCV, NULL },
82 };
83
84 extern const Lc_interface rtld_funcs[];
85
86 /*
87 * The weak version is known to libc_db and mdb.
88 */
89 #pragma weak _uberdata = __uberdata
90 uberdata_t __uberdata = {
91 { DEFAULTMUTEX, NULL, 0 }, /* link_lock */
92 { RECURSIVEMUTEX, NULL, 0 }, /* ld_lock */
93 { RECURSIVEMUTEX, NULL, 0 }, /* fork_lock */
94 { RECURSIVEMUTEX, NULL, 0 }, /* atfork_lock */
95 { RECURSIVEMUTEX, NULL, 0 }, /* callout_lock */
96 { DEFAULTMUTEX, NULL, 0 }, /* tdb_hash_lock */
97 { 0, }, /* tdb_hash_lock_stats */
98 { { 0 }, }, /* siguaction[NSIG] */
99 {{ DEFAULTMUTEX, NULL, 0 }, /* bucket[NBUCKETS] */
100 { DEFAULTMUTEX, NULL, 0 },
101 { DEFAULTMUTEX, NULL, 0 },
102 { DEFAULTMUTEX, NULL, 0 },
103 { DEFAULTMUTEX, NULL, 0 },
104 { DEFAULTMUTEX, NULL, 0 },
105 { DEFAULTMUTEX, NULL, 0 },
106 { DEFAULTMUTEX, NULL, 0 },
107 { DEFAULTMUTEX, NULL, 0 },
108 { DEFAULTMUTEX, NULL, 0 }},
109 { RECURSIVEMUTEX, NULL, NULL }, /* atexit_root */
110 { RECURSIVEMUTEX, NULL }, /* quickexit_root */
111 { DEFAULTMUTEX, 0, 0, NULL }, /* tsd_metadata */
112 { DEFAULTMUTEX, {0, 0}, {0, 0} }, /* tls_metadata */
113 0, /* primary_map */
114 0, /* bucket_init */
115 0, /* pad[0] */
116 0, /* pad[1] */
117 { 0 }, /* uberflags */
118 NULL, /* queue_head */
119 init_hash_table, /* thr_hash_table */
120 1, /* hash_size: size of the hash table */
121 0, /* hash_mask: hash_size - 1 */
122 NULL, /* ulwp_one */
123 NULL, /* all_lwps */
124 NULL, /* all_zombies */
125 0, /* nthreads */
126 0, /* nzombies */
127 0, /* ndaemons */
128 0, /* pid */
129 sigacthandler, /* sigacthandler */
130 NULL, /* lwp_stacks */
131 NULL, /* lwp_laststack */
132 0, /* nfreestack */
133 10, /* thread_stack_cache */
134 NULL, /* ulwp_freelist */
135 NULL, /* ulwp_lastfree */
136 NULL, /* ulwp_replace_free */
137 NULL, /* ulwp_replace_last */
138 NULL, /* atforklist */
139 NULL, /* robustlocks */
140 NULL, /* robustlist */
141 NULL, /* progname */
142 NULL, /* ub_comm_page */
143 NULL, /* __tdb_bootstrap */
144 { /* tdb */
145 NULL, /* tdb_sync_addr_hash */
146 0, /* tdb_register_count */
147 0, /* tdb_hash_alloc_failed */
148 NULL, /* tdb_sync_addr_free */
149 NULL, /* tdb_sync_addr_last */
150 0, /* tdb_sync_alloc */
151 { 0, 0 }, /* tdb_ev_global_mask */
152 tdb_events, /* tdb_events array */
153 },
154 };
155
156 /*
157 * The weak version is known to libc_db and mdb.
158 */
159 #pragma weak _tdb_bootstrap = __tdb_bootstrap
160 uberdata_t **__tdb_bootstrap = NULL;
161
162 int thread_queue_fifo = 4;
163 int thread_queue_dump = 0;
164 int thread_cond_wait_defer = 0;
165 int thread_error_detection = 0;
166 int thread_async_safe = 0;
167 int thread_stack_cache = 10;
168 int thread_door_noreserve = 0;
169 int thread_locks_misaligned = 0;
170
171 static ulwp_t *ulwp_alloc(void);
172 static void ulwp_free(ulwp_t *);
173
174 /*
175 * Insert the lwp into the hash table.
176 */
177 void
178 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
179 {
180 ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket;
181 udp->thr_hash_table[ix].hash_bucket = ulwp;
182 ulwp->ul_ix = ix;
183 }
184
185 void
186 hash_in(ulwp_t *ulwp, uberdata_t *udp)
187 {
188 int ix = TIDHASH(ulwp->ul_lwpid, udp);
189 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
190
191 lmutex_lock(mp);
192 hash_in_unlocked(ulwp, ix, udp);
193 lmutex_unlock(mp);
194 }
195
196 /*
197 * Delete the lwp from the hash table.
198 */
199 void
200 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
201 {
202 ulwp_t **ulwpp;
203
204 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
205 ulwp != *ulwpp;
206 ulwpp = &(*ulwpp)->ul_hash)
207 ;
208 *ulwpp = ulwp->ul_hash;
209 ulwp->ul_hash = NULL;
210 ulwp->ul_ix = -1;
211 }
212
213 void
214 hash_out(ulwp_t *ulwp, uberdata_t *udp)
215 {
216 int ix;
217
218 if ((ix = ulwp->ul_ix) >= 0) {
219 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
220
221 lmutex_lock(mp);
222 hash_out_unlocked(ulwp, ix, udp);
223 lmutex_unlock(mp);
224 }
225 }
226
227 /*
228 * Retain stack information for thread structures that are being recycled for
229 * new threads. All other members of the thread structure should be zeroed.
230 */
231 static void
232 ulwp_clean(ulwp_t *ulwp)
233 {
234 caddr_t stk = ulwp->ul_stk;
235 size_t mapsiz = ulwp->ul_mapsiz;
236 size_t guardsize = ulwp->ul_guardsize;
237 uintptr_t stktop = ulwp->ul_stktop;
238 size_t stksiz = ulwp->ul_stksiz;
239
240 (void) memset(ulwp, 0, sizeof (*ulwp));
241
242 ulwp->ul_stk = stk;
243 ulwp->ul_mapsiz = mapsiz;
244 ulwp->ul_guardsize = guardsize;
245 ulwp->ul_stktop = stktop;
246 ulwp->ul_stksiz = stksiz;
247 }
248
249 static int stackprot;
250
251 /*
252 * Answer the question, "Is the lwp in question really dead?"
253 * We must inquire of the operating system to be really sure
254 * because the lwp may have called lwp_exit() but it has not
255 * yet completed the exit.
256 */
257 static int
258 dead_and_buried(ulwp_t *ulwp)
259 {
260 if (ulwp->ul_lwpid == (lwpid_t)(-1))
261 return (1);
262 if (ulwp->ul_dead && ulwp->ul_detached &&
263 _lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) {
264 ulwp->ul_lwpid = (lwpid_t)(-1);
265 return (1);
266 }
267 return (0);
268 }
269
270 /*
271 * Attempt to keep the stack cache within the specified cache limit.
272 */
273 static void
274 trim_stack_cache(int cache_limit)
275 {
276 ulwp_t *self = curthread;
277 uberdata_t *udp = self->ul_uberdata;
278 ulwp_t *prev = NULL;
279 ulwp_t **ulwpp = &udp->lwp_stacks;
280 ulwp_t *ulwp;
281
282 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self));
283
284 while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) {
285 if (dead_and_buried(ulwp)) {
286 *ulwpp = ulwp->ul_next;
287 if (ulwp == udp->lwp_laststack)
288 udp->lwp_laststack = prev;
289 hash_out(ulwp, udp);
290 udp->nfreestack--;
291 (void) munmap(ulwp->ul_stk, ulwp->ul_mapsiz);
292 /*
293 * Now put the free ulwp on the ulwp freelist.
294 */
295 ulwp->ul_mapsiz = 0;
296 ulwp->ul_next = NULL;
297 if (udp->ulwp_freelist == NULL)
298 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
299 else {
300 udp->ulwp_lastfree->ul_next = ulwp;
301 udp->ulwp_lastfree = ulwp;
302 }
303 } else {
304 prev = ulwp;
305 ulwpp = &ulwp->ul_next;
306 }
307 }
308 }
309
310 /*
311 * Find an unused stack of the requested size
312 * or create a new stack of the requested size.
313 * Return a pointer to the ulwp_t structure referring to the stack, or NULL.
314 * thr_exit() stores 1 in the ul_dead member.
315 * thr_join() stores -1 in the ul_lwpid member.
316 */
317 static ulwp_t *
318 find_stack(size_t stksize, size_t guardsize)
319 {
320 static size_t pagesize = 0;
321
322 uberdata_t *udp = curthread->ul_uberdata;
323 size_t mapsize;
324 ulwp_t *prev;
325 ulwp_t *ulwp;
326 ulwp_t **ulwpp;
327 void *stk;
328
329 /*
330 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC
331 * unless overridden by the system's configuration.
332 */
333 if (stackprot == 0) { /* do this once */
334 long lprot = _sysconf(_SC_STACK_PROT);
335 if (lprot <= 0)
336 lprot = (PROT_READ|PROT_WRITE|PROT_EXEC);
337 stackprot = (int)lprot;
338 }
339 if (pagesize == 0) /* do this once */
340 pagesize = _sysconf(_SC_PAGESIZE);
341
342 /*
343 * One megabyte stacks by default, but subtract off
344 * two pages for the system-created red zones.
345 * Round up a non-zero stack size to a pagesize multiple.
346 */
347 if (stksize == 0)
348 stksize = DEFAULTSTACK - 2 * pagesize;
349 else
350 stksize = ((stksize + pagesize - 1) & -pagesize);
351
352 /*
353 * Round up the mapping size to a multiple of pagesize.
354 * Note: mmap() provides at least one page of red zone
355 * so we deduct that from the value of guardsize.
356 */
357 if (guardsize != 0)
358 guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize;
359 mapsize = stksize + guardsize;
360
361 lmutex_lock(&udp->link_lock);
362 for (prev = NULL, ulwpp = &udp->lwp_stacks;
363 (ulwp = *ulwpp) != NULL;
364 prev = ulwp, ulwpp = &ulwp->ul_next) {
365 if (ulwp->ul_mapsiz == mapsize &&
366 ulwp->ul_guardsize == guardsize &&
367 dead_and_buried(ulwp)) {
368 /*
369 * The previous lwp is gone; reuse the stack.
370 * Remove the ulwp from the stack list.
371 */
372 *ulwpp = ulwp->ul_next;
373 ulwp->ul_next = NULL;
374 if (ulwp == udp->lwp_laststack)
375 udp->lwp_laststack = prev;
376 hash_out(ulwp, udp);
377 udp->nfreestack--;
378 lmutex_unlock(&udp->link_lock);
379 ulwp_clean(ulwp);
380 return (ulwp);
381 }
382 }
383
384 /*
385 * None of the cached stacks matched our mapping size.
386 * Reduce the stack cache to get rid of possibly
387 * very old stacks that will never be reused.
388 */
389 if (udp->nfreestack > udp->thread_stack_cache)
390 trim_stack_cache(udp->thread_stack_cache);
391 else if (udp->nfreestack > 0)
392 trim_stack_cache(udp->nfreestack - 1);
393 lmutex_unlock(&udp->link_lock);
394
395 /*
396 * Create a new stack.
397 */
398 if ((stk = mmap(NULL, mapsize, stackprot,
399 MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) {
400 /*
401 * We have allocated our stack. Now allocate the ulwp.
402 */
403 ulwp = ulwp_alloc();
404 if (ulwp == NULL)
405 (void) munmap(stk, mapsize);
406 else {
407 ulwp->ul_stk = stk;
408 ulwp->ul_mapsiz = mapsize;
409 ulwp->ul_guardsize = guardsize;
410 ulwp->ul_stktop = (uintptr_t)stk + mapsize;
411 ulwp->ul_stksiz = stksize;
412 if (guardsize) /* protect the extra red zone */
413 (void) mprotect(stk, guardsize, PROT_NONE);
414 }
415 }
416 return (ulwp);
417 }
418
419 /*
420 * Get a ulwp_t structure from the free list or allocate a new one.
421 * Such ulwp_t's do not have a stack allocated by the library.
422 */
423 static ulwp_t *
424 ulwp_alloc(void)
425 {
426 ulwp_t *self = curthread;
427 uberdata_t *udp = self->ul_uberdata;
428 size_t tls_size;
429 ulwp_t *prev;
430 ulwp_t *ulwp;
431 ulwp_t **ulwpp;
432 caddr_t data;
433
434 lmutex_lock(&udp->link_lock);
435 for (prev = NULL, ulwpp = &udp->ulwp_freelist;
436 (ulwp = *ulwpp) != NULL;
437 prev = ulwp, ulwpp = &ulwp->ul_next) {
438 if (dead_and_buried(ulwp)) {
439 *ulwpp = ulwp->ul_next;
440 ulwp->ul_next = NULL;
441 if (ulwp == udp->ulwp_lastfree)
442 udp->ulwp_lastfree = prev;
443 hash_out(ulwp, udp);
444 lmutex_unlock(&udp->link_lock);
445 ulwp_clean(ulwp);
446 return (ulwp);
447 }
448 }
449 lmutex_unlock(&udp->link_lock);
450
451 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
452 data = lmalloc(sizeof (*ulwp) + tls_size);
453 if (data != NULL) {
454 /* LINTED pointer cast may result in improper alignment */
455 ulwp = (ulwp_t *)(data + tls_size);
456 }
457 return (ulwp);
458 }
459
460 /*
461 * Free a ulwp structure.
462 * If there is an associated stack, put it on the stack list and
463 * munmap() previously freed stacks up to the residual cache limit.
464 * Else put it on the ulwp free list and never call lfree() on it.
465 */
466 static void
467 ulwp_free(ulwp_t *ulwp)
468 {
469 uberdata_t *udp = curthread->ul_uberdata;
470
471 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread));
472 ulwp->ul_next = NULL;
473 if (ulwp == udp->ulwp_one) /* don't reuse the primoridal stack */
474 /*EMPTY*/;
475 else if (ulwp->ul_mapsiz != 0) {
476 if (udp->lwp_stacks == NULL)
477 udp->lwp_stacks = udp->lwp_laststack = ulwp;
478 else {
479 udp->lwp_laststack->ul_next = ulwp;
480 udp->lwp_laststack = ulwp;
481 }
482 if (++udp->nfreestack > udp->thread_stack_cache)
483 trim_stack_cache(udp->thread_stack_cache);
484 } else {
485 if (udp->ulwp_freelist == NULL)
486 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
487 else {
488 udp->ulwp_lastfree->ul_next = ulwp;
489 udp->ulwp_lastfree = ulwp;
490 }
491 }
492 }
493
494 /*
495 * Find a named lwp and return a pointer to its hash list location.
496 * On success, returns with the hash lock held.
497 */
498 ulwp_t **
499 find_lwpp(thread_t tid)
500 {
501 uberdata_t *udp = curthread->ul_uberdata;
502 int ix = TIDHASH(tid, udp);
503 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
504 ulwp_t *ulwp;
505 ulwp_t **ulwpp;
506
507 if (tid == 0)
508 return (NULL);
509
510 lmutex_lock(mp);
511 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
512 (ulwp = *ulwpp) != NULL;
513 ulwpp = &ulwp->ul_hash) {
514 if (ulwp->ul_lwpid == tid)
515 return (ulwpp);
516 }
517 lmutex_unlock(mp);
518 return (NULL);
519 }
520
521 /*
522 * Wake up all lwps waiting on this lwp for some reason.
523 */
524 void
525 ulwp_broadcast(ulwp_t *ulwp)
526 {
527 ulwp_t *self = curthread;
528 uberdata_t *udp = self->ul_uberdata;
529
530 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
531 (void) cond_broadcast(ulwp_condvar(ulwp, udp));
532 }
533
534 /*
535 * Find a named lwp and return a pointer to it.
536 * Returns with the hash lock held.
537 */
538 ulwp_t *
539 find_lwp(thread_t tid)
540 {
541 ulwp_t *self = curthread;
542 uberdata_t *udp = self->ul_uberdata;
543 ulwp_t *ulwp = NULL;
544 ulwp_t **ulwpp;
545
546 if (self->ul_lwpid == tid) {
547 ulwp = self;
548 ulwp_lock(ulwp, udp);
549 } else if ((ulwpp = find_lwpp(tid)) != NULL) {
550 ulwp = *ulwpp;
551 }
552
553 if (ulwp && ulwp->ul_dead) {
554 ulwp_unlock(ulwp, udp);
555 ulwp = NULL;
556 }
557
558 return (ulwp);
559 }
560
561 int
562 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
563 long flags, thread_t *new_thread, size_t guardsize)
564 {
565 ulwp_t *self = curthread;
566 uberdata_t *udp = self->ul_uberdata;
567 ucontext_t uc;
568 uint_t lwp_flags;
569 thread_t tid;
570 int error;
571 ulwp_t *ulwp;
572
573 /*
574 * Enforce the restriction of not creating any threads
575 * until the primary link map has been initialized.
576 * Also, disallow thread creation to a child of vfork().
577 */
578 if (!self->ul_primarymap || self->ul_vfork)
579 return (ENOTSUP);
580
581 if (udp->hash_size == 1)
582 finish_init();
583
584 if ((stk || stksize) && stksize < MINSTACK)
585 return (EINVAL);
586
587 if (stk == NULL) {
588 if ((ulwp = find_stack(stksize, guardsize)) == NULL)
589 return (ENOMEM);
590 stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize;
591 } else {
592 /* initialize the private stack */
593 if ((ulwp = ulwp_alloc()) == NULL)
594 return (ENOMEM);
595 ulwp->ul_stk = stk;
596 ulwp->ul_stktop = (uintptr_t)stk + stksize;
597 ulwp->ul_stksiz = stksize;
598 }
599 /* ulwp is not in the hash table; make sure hash_out() doesn't fail */
600 ulwp->ul_ix = -1;
601 ulwp->ul_errnop = &ulwp->ul_errno;
602
603 lwp_flags = LWP_SUSPENDED;
604 if (flags & (THR_DETACHED|THR_DAEMON)) {
605 flags |= THR_DETACHED;
606 lwp_flags |= LWP_DETACHED;
607 }
608 if (flags & THR_DAEMON)
609 lwp_flags |= LWP_DAEMON;
610
611 /* creating a thread: enforce mt-correctness in mutex_lock() */
612 self->ul_async_safe = 1;
613
614 /* per-thread copies of global variables, for speed */
615 ulwp->ul_queue_fifo = self->ul_queue_fifo;
616 ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer;
617 ulwp->ul_error_detection = self->ul_error_detection;
618 ulwp->ul_async_safe = self->ul_async_safe;
619 ulwp->ul_max_spinners = self->ul_max_spinners;
620 ulwp->ul_adaptive_spin = self->ul_adaptive_spin;
621 ulwp->ul_queue_spin = self->ul_queue_spin;
622 ulwp->ul_door_noreserve = self->ul_door_noreserve;
623 ulwp->ul_misaligned = self->ul_misaligned;
624
625 /* new thread inherits creating thread's scheduling parameters */
626 ulwp->ul_policy = self->ul_policy;
627 ulwp->ul_pri = (self->ul_epri? self->ul_epri : self->ul_pri);
628 ulwp->ul_cid = self->ul_cid;
629 ulwp->ul_rtclassid = self->ul_rtclassid;
630
631 ulwp->ul_primarymap = self->ul_primarymap;
632 ulwp->ul_self = ulwp;
633 ulwp->ul_uberdata = udp;
634
635 /* debugger support */
636 ulwp->ul_usropts = flags;
637
638 #ifdef __sparc
639 /*
640 * We cache several instructions in the thread structure for use
641 * by the fasttrap DTrace provider. When changing this, read the
642 * comment in fasttrap.h for the all the other places that must
643 * be changed.
644 */
645 ulwp->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */
646 ulwp->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */
647 ulwp->ul_dftret = 0x91d0203a; /* ta 0x3a */
648 ulwp->ul_dreturn = 0x81ca0000; /* return %o0 */
649 #endif
650
651 ulwp->ul_startpc = func;
652 ulwp->ul_startarg = arg;
653 _fpinherit(ulwp);
654 /*
655 * Defer signals on the new thread until its TLS constructors
656 * have been called. _thrp_setup() will call sigon() after
657 * it has called tls_setup().
658 */
659 ulwp->ul_sigdefer = 1;
660
661 error = setup_context(&uc, _thrp_setup, ulwp,
662 (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize);
663 if (error != 0 && stk != NULL) /* inaccessible stack */
664 error = EFAULT;
665
666 /*
667 * Call enter_critical() to avoid being suspended until we
668 * have linked the new thread into the proper lists.
669 * This is necessary because forkall() and fork1() must
670 * suspend all threads and they must see a complete list.
671 */
672 enter_critical(self);
673 uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask;
674 if (error != 0 ||
675 (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) {
676 exit_critical(self);
677 ulwp->ul_lwpid = (lwpid_t)(-1);
678 ulwp->ul_dead = 1;
679 ulwp->ul_detached = 1;
680 lmutex_lock(&udp->link_lock);
681 ulwp_free(ulwp);
682 lmutex_unlock(&udp->link_lock);
683 return (error);
684 }
685 self->ul_nocancel = 0; /* cancellation is now possible */
686 udp->uberflags.uf_mt = 1;
687 if (new_thread)
688 *new_thread = tid;
689 if (flags & THR_DETACHED)
690 ulwp->ul_detached = 1;
691 ulwp->ul_lwpid = tid;
692 ulwp->ul_stop = TSTP_REGULAR;
693 if (flags & THR_SUSPENDED)
694 ulwp->ul_created = 1;
695
696 lmutex_lock(&udp->link_lock);
697 ulwp->ul_forw = udp->all_lwps;
698 ulwp->ul_back = udp->all_lwps->ul_back;
699 ulwp->ul_back->ul_forw = ulwp;
700 ulwp->ul_forw->ul_back = ulwp;
701 hash_in(ulwp, udp);
702 udp->nthreads++;
703 if (flags & THR_DAEMON)
704 udp->ndaemons++;
705 if (flags & THR_NEW_LWP)
706 thr_concurrency++;
707 __libc_threaded = 1; /* inform stdio */
708 lmutex_unlock(&udp->link_lock);
709
710 if (__td_event_report(self, TD_CREATE, udp)) {
711 self->ul_td_evbuf.eventnum = TD_CREATE;
712 self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid;
713 tdb_event(TD_CREATE, udp);
714 }
715
716 exit_critical(self);
717
718 if (!(flags & THR_SUSPENDED))
719 (void) _thrp_continue(tid, TSTP_REGULAR);
720
721 return (0);
722 }
723
724 int
725 thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
726 long flags, thread_t *new_thread)
727 {
728 return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 0));
729 }
730
731 /*
732 * A special cancellation cleanup hook for DCE.
733 * cleanuphndlr, when it is not NULL, will contain a callback
734 * function to be called before a thread is terminated in
735 * thr_exit() as a result of being cancelled.
736 */
737 static void (*cleanuphndlr)(void) = NULL;
738
739 /*
740 * _pthread_setcleanupinit: sets the cleanup hook.
741 */
742 int
743 _pthread_setcleanupinit(void (*func)(void))
744 {
745 cleanuphndlr = func;
746 return (0);
747 }
748
749 void
750 _thrp_exit()
751 {
752 ulwp_t *self = curthread;
753 uberdata_t *udp = self->ul_uberdata;
754 ulwp_t *replace = NULL;
755
756 if (__td_event_report(self, TD_DEATH, udp)) {
757 self->ul_td_evbuf.eventnum = TD_DEATH;
758 tdb_event(TD_DEATH, udp);
759 }
760
761 ASSERT(self->ul_sigdefer != 0);
762
763 lmutex_lock(&udp->link_lock);
764 udp->nthreads--;
765 if (self->ul_usropts & THR_NEW_LWP)
766 thr_concurrency--;
767 if (self->ul_usropts & THR_DAEMON)
768 udp->ndaemons--;
769 else if (udp->nthreads == udp->ndaemons) {
770 /*
771 * We are the last non-daemon thread exiting.
772 * Exit the process. We retain our TSD and TLS so
773 * that atexit() application functions can use them.
774 */
775 lmutex_unlock(&udp->link_lock);
776 exit(0);
777 thr_panic("_thrp_exit(): exit(0) returned");
778 }
779 lmutex_unlock(&udp->link_lock);
780
781 /*
782 * tsd_exit() may call its destructor free(), thus depending on
783 * tmem, therefore tmem_exit() needs to be called after tsd_exit()
784 * and tls_exit().
785 */
786 tsd_exit(); /* deallocate thread-specific data */
787 tls_exit(); /* deallocate thread-local storage */
788 tmem_exit(); /* deallocate tmem allocations */
789 heldlock_exit(); /* deal with left-over held locks */
790
791 /* block all signals to finish exiting */
792 block_all_signals(self);
793 /* also prevent ourself from being suspended */
794 enter_critical(self);
795 rwl_free(self);
796 lmutex_lock(&udp->link_lock);
797 ulwp_free(self);
798 (void) ulwp_lock(self, udp);
799
800 if (self->ul_mapsiz && !self->ul_detached) {
801 /*
802 * We want to free the stack for reuse but must keep
803 * the ulwp_t struct for the benefit of thr_join().
804 * For this purpose we allocate a replacement ulwp_t.
805 */
806 if ((replace = udp->ulwp_replace_free) == NULL)
807 replace = lmalloc(REPLACEMENT_SIZE);
808 else if ((udp->ulwp_replace_free = replace->ul_next) == NULL)
809 udp->ulwp_replace_last = NULL;
810 }
811
812 if (udp->all_lwps == self)
813 udp->all_lwps = self->ul_forw;
814 if (udp->all_lwps == self)
815 udp->all_lwps = NULL;
816 else {
817 self->ul_forw->ul_back = self->ul_back;
818 self->ul_back->ul_forw = self->ul_forw;
819 }
820 self->ul_forw = self->ul_back = NULL;
821 #if defined(THREAD_DEBUG)
822 /* collect queue lock statistics before marking ourself dead */
823 record_spin_locks(self);
824 #endif
825 self->ul_dead = 1;
826 self->ul_pleasestop = 0;
827 if (replace != NULL) {
828 int ix = self->ul_ix; /* the hash index */
829 (void) memcpy(replace, self, REPLACEMENT_SIZE);
830 replace->ul_self = replace;
831 replace->ul_next = NULL; /* clone not on stack list */
832 replace->ul_mapsiz = 0; /* allows clone to be freed */
833 replace->ul_replace = 1; /* requires clone to be freed */
834 hash_out_unlocked(self, ix, udp);
835 hash_in_unlocked(replace, ix, udp);
836 ASSERT(!(self->ul_detached));
837 self->ul_detached = 1; /* this frees the stack */
838 self->ul_schedctl = NULL;
839 self->ul_schedctl_called = &udp->uberflags;
840 set_curthread(self = replace);
841 /*
842 * Having just changed the address of curthread, we
843 * must reset the ownership of the locks we hold so
844 * that assertions will not fire when we release them.
845 */
846 udp->link_lock.mutex_owner = (uintptr_t)self;
847 ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self;
848 /*
849 * NOTE:
850 * On i386, %gs still references the original, not the
851 * replacement, ulwp structure. Fetching the replacement
852 * curthread pointer via %gs:0 works correctly since the
853 * original ulwp structure will not be reallocated until
854 * this lwp has completed its lwp_exit() system call (see
855 * dead_and_buried()), but from here on out, we must make
856 * no references to %gs:<offset> other than %gs:0.
857 */
858 }
859 /*
860 * Put non-detached terminated threads in the all_zombies list.
861 */
862 if (!self->ul_detached) {
863 udp->nzombies++;
864 if (udp->all_zombies == NULL) {
865 ASSERT(udp->nzombies == 1);
866 udp->all_zombies = self->ul_forw = self->ul_back = self;
867 } else {
868 self->ul_forw = udp->all_zombies;
869 self->ul_back = udp->all_zombies->ul_back;
870 self->ul_back->ul_forw = self;
871 self->ul_forw->ul_back = self;
872 }
873 }
874 /*
875 * Notify everyone waiting for this thread.
876 */
877 ulwp_broadcast(self);
878 (void) ulwp_unlock(self, udp);
879 /*
880 * Prevent any more references to the schedctl data.
881 * We are exiting and continue_fork() may not find us.
882 * Do this just before dropping link_lock, since fork
883 * serializes on link_lock.
884 */
885 self->ul_schedctl = NULL;
886 self->ul_schedctl_called = &udp->uberflags;
887 lmutex_unlock(&udp->link_lock);
888
889 ASSERT(self->ul_critical == 1);
890 ASSERT(self->ul_preempt == 0);
891 _lwp_terminate(); /* never returns */
892 thr_panic("_thrp_exit(): _lwp_terminate() returned");
893 }
894
895 #if defined(THREAD_DEBUG)
896 void
897 collect_queue_statistics()
898 {
899 uberdata_t *udp = curthread->ul_uberdata;
900 ulwp_t *ulwp;
901
902 if (thread_queue_dump) {
903 lmutex_lock(&udp->link_lock);
904 if ((ulwp = udp->all_lwps) != NULL) {
905 do {
906 record_spin_locks(ulwp);
907 } while ((ulwp = ulwp->ul_forw) != udp->all_lwps);
908 }
909 lmutex_unlock(&udp->link_lock);
910 }
911 }
912 #endif
913
914 static void __NORETURN
915 _thrp_exit_common(void *status, int unwind)
916 {
917 ulwp_t *self = curthread;
918 int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED);
919
920 ASSERT(self->ul_critical == 0 && self->ul_preempt == 0);
921
922 /*
923 * Disable cancellation and call the special DCE cancellation
924 * cleanup hook if it is enabled. Do nothing else before calling
925 * the DCE cancellation cleanup hook; it may call longjmp() and
926 * never return here.
927 */
928 self->ul_cancel_disabled = 1;
929 self->ul_cancel_async = 0;
930 self->ul_save_async = 0;
931 self->ul_cancelable = 0;
932 self->ul_cancel_pending = 0;
933 set_cancel_pending_flag(self, 1);
934 if (cancelled && cleanuphndlr != NULL)
935 (*cleanuphndlr)();
936
937 /*
938 * Block application signals while we are exiting.
939 * We call out to C++, TSD, and TLS destructors while exiting
940 * and these are application-defined, so we cannot be assured
941 * that they won't reset the signal mask. We use sigoff() to
942 * defer any signals that may be received as a result of this
943 * bad behavior. Such signals will be lost to the process
944 * when the thread finishes exiting.
945 */
946 (void) thr_sigsetmask(SIG_SETMASK, &maskset, NULL);
947 sigoff(self);
948
949 self->ul_rval = status;
950
951 /*
952 * If thr_exit is being called from the places where
953 * C++ destructors are to be called such as cancellation
954 * points, then set this flag. It is checked in _t_cancel()
955 * to decide whether _ex_unwind() is to be called or not.
956 */
957 if (unwind)
958 self->ul_unwind = 1;
959
960 /*
961 * _thrp_unwind() will eventually call _thrp_exit().
962 * It never returns.
963 */
964 _thrp_unwind(NULL);
965 thr_panic("_thrp_exit_common(): _thrp_unwind() returned");
966
967 for (;;) /* to shut the compiler up about __NORETURN */
968 continue;
969 }
970
971 /*
972 * Called when a thread returns from its start function.
973 * We are at the top of the stack; no unwinding is necessary.
974 */
975 void
976 _thrp_terminate(void *status)
977 {
978 _thrp_exit_common(status, 0);
979 }
980
981 #pragma weak pthread_exit = thr_exit
982 #pragma weak _thr_exit = thr_exit
983 void
984 thr_exit(void *status)
985 {
986 _thrp_exit_common(status, 1);
987 }
988
989 int
990 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel)
991 {
992 uberdata_t *udp = curthread->ul_uberdata;
993 mutex_t *mp;
994 void *rval;
995 thread_t found;
996 ulwp_t *ulwp;
997 ulwp_t **ulwpp;
998 int replace;
999 int error;
1000
1001 if (do_cancel)
1002 error = lwp_wait(tid, &found);
1003 else {
1004 while ((error = __lwp_wait(tid, &found)) == EINTR)
1005 ;
1006 }
1007 if (error)
1008 return (error);
1009
1010 /*
1011 * We must hold link_lock to avoid a race condition with find_stack().
1012 */
1013 lmutex_lock(&udp->link_lock);
1014 if ((ulwpp = find_lwpp(found)) == NULL) {
1015 /*
1016 * lwp_wait() found an lwp that the library doesn't know
1017 * about. It must have been created with _lwp_create().
1018 * Just return its lwpid; we can't know its status.
1019 */
1020 lmutex_unlock(&udp->link_lock);
1021 rval = NULL;
1022 } else {
1023 /*
1024 * Remove ulwp from the hash table.
1025 */
1026 ulwp = *ulwpp;
1027 *ulwpp = ulwp->ul_hash;
1028 ulwp->ul_hash = NULL;
1029 /*
1030 * Remove ulwp from all_zombies list.
1031 */
1032 ASSERT(udp->nzombies >= 1);
1033 if (udp->all_zombies == ulwp)
1034 udp->all_zombies = ulwp->ul_forw;
1035 if (udp->all_zombies == ulwp)
1036 udp->all_zombies = NULL;
1037 else {
1038 ulwp->ul_forw->ul_back = ulwp->ul_back;
1039 ulwp->ul_back->ul_forw = ulwp->ul_forw;
1040 }
1041 ulwp->ul_forw = ulwp->ul_back = NULL;
1042 udp->nzombies--;
1043 ASSERT(ulwp->ul_dead && !ulwp->ul_detached &&
1044 !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON)));
1045 /*
1046 * We can't call ulwp_unlock(ulwp) after we set
1047 * ulwp->ul_ix = -1 so we have to get a pointer to the
1048 * ulwp's hash table mutex now in order to unlock it below.
1049 */
1050 mp = ulwp_mutex(ulwp, udp);
1051 ulwp->ul_lwpid = (lwpid_t)(-1);
1052 ulwp->ul_ix = -1;
1053 rval = ulwp->ul_rval;
1054 replace = ulwp->ul_replace;
1055 lmutex_unlock(mp);
1056 if (replace) {
1057 ulwp->ul_next = NULL;
1058 if (udp->ulwp_replace_free == NULL)
1059 udp->ulwp_replace_free =
1060 udp->ulwp_replace_last = ulwp;
1061 else {
1062 udp->ulwp_replace_last->ul_next = ulwp;
1063 udp->ulwp_replace_last = ulwp;
1064 }
1065 }
1066 lmutex_unlock(&udp->link_lock);
1067 }
1068
1069 if (departed != NULL)
1070 *departed = found;
1071 if (status != NULL)
1072 *status = rval;
1073 return (0);
1074 }
1075
1076 int
1077 thr_join(thread_t tid, thread_t *departed, void **status)
1078 {
1079 int error = _thrp_join(tid, departed, status, 1);
1080 return ((error == EINVAL)? ESRCH : error);
1081 }
1082
1083 /*
1084 * pthread_join() differs from Solaris thr_join():
1085 * It does not return the departed thread's id
1086 * and hence does not have a "departed" argument.
1087 * It returns EINVAL if tid refers to a detached thread.
1088 */
1089 #pragma weak _pthread_join = pthread_join
1090 int
1091 pthread_join(pthread_t tid, void **status)
1092 {
1093 return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1));
1094 }
1095
1096 int
1097 pthread_detach(pthread_t tid)
1098 {
1099 uberdata_t *udp = curthread->ul_uberdata;
1100 ulwp_t *ulwp;
1101 ulwp_t **ulwpp;
1102 int error = 0;
1103
1104 if ((ulwpp = find_lwpp(tid)) == NULL)
1105 return (ESRCH);
1106 ulwp = *ulwpp;
1107
1108 if (ulwp->ul_dead) {
1109 ulwp_unlock(ulwp, udp);
1110 error = _thrp_join(tid, NULL, NULL, 0);
1111 } else {
1112 error = __lwp_detach(tid);
1113 ulwp->ul_detached = 1;
1114 ulwp->ul_usropts |= THR_DETACHED;
1115 ulwp_unlock(ulwp, udp);
1116 }
1117 return (error);
1118 }
1119
1120 static const char *
1121 ematch(const char *ev, const char *match)
1122 {
1123 int c;
1124
1125 while ((c = *match++) != '\0') {
1126 if (*ev++ != c)
1127 return (NULL);
1128 }
1129 if (*ev++ != '=')
1130 return (NULL);
1131 return (ev);
1132 }
1133
1134 static int
1135 envvar(const char *ev, const char *match, int limit)
1136 {
1137 int val = -1;
1138 const char *ename;
1139
1140 if ((ename = ematch(ev, match)) != NULL) {
1141 int c;
1142 for (val = 0; (c = *ename) != '\0'; ename++) {
1143 if (!isdigit(c)) {
1144 val = -1;
1145 break;
1146 }
1147 val = val * 10 + (c - '0');
1148 if (val > limit) {
1149 val = limit;
1150 break;
1151 }
1152 }
1153 }
1154 return (val);
1155 }
1156
1157 static void
1158 etest(const char *ev)
1159 {
1160 int value;
1161
1162 if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0)
1163 thread_queue_spin = value;
1164 if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0)
1165 thread_adaptive_spin = value;
1166 if ((value = envvar(ev, "MAX_SPINNERS", 255)) >= 0)
1167 thread_max_spinners = value;
1168 if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0)
1169 thread_queue_fifo = value;
1170 #if defined(THREAD_DEBUG)
1171 if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0)
1172 thread_queue_verify = value;
1173 if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0)
1174 thread_queue_dump = value;
1175 #endif
1176 if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0)
1177 thread_stack_cache = value;
1178 if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0)
1179 thread_cond_wait_defer = value;
1180 if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0)
1181 thread_error_detection = value;
1182 if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0)
1183 thread_async_safe = value;
1184 if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0)
1185 thread_door_noreserve = value;
1186 if ((value = envvar(ev, "LOCKS_MISALIGNED", 1)) >= 0)
1187 thread_locks_misaligned = value;
1188 }
1189
1190 /*
1191 * Look for and evaluate environment variables of the form "_THREAD_*".
1192 * For compatibility with the past, we also look for environment
1193 * names of the form "LIBTHREAD_*".
1194 */
1195 static void
1196 set_thread_vars()
1197 {
1198 extern const char **_environ;
1199 const char **pev;
1200 const char *ev;
1201 char c;
1202
1203 if ((pev = _environ) == NULL)
1204 return;
1205 while ((ev = *pev++) != NULL) {
1206 c = *ev;
1207 if (c == '_' && strncmp(ev, "_THREAD_", 8) == 0)
1208 etest(ev + 8);
1209 if (c == 'L' && strncmp(ev, "LIBTHREAD_", 10) == 0)
1210 etest(ev + 10);
1211 }
1212 }
1213
1214 /* PROBE_SUPPORT begin */
1215 #pragma weak __tnf_probe_notify
1216 extern void __tnf_probe_notify(void);
1217 /* PROBE_SUPPORT end */
1218
1219 /* same as atexit() but private to the library */
1220 extern int _atexit(void (*)(void));
1221
1222 /* same as _cleanup() but private to the library */
1223 extern void __cleanup(void);
1224
1225 extern void atfork_init(void);
1226
1227 #ifdef __amd64
1228 extern void __proc64id(void);
1229 #endif
1230
1231 static void
1232 init_auxv_data(uberdata_t *udp)
1233 {
1234 Dl_argsinfo_t args;
1235
1236 udp->ub_comm_page = NULL;
1237 if (dlinfo(RTLD_SELF, RTLD_DI_ARGSINFO, &args) < 0)
1238 return;
1239
1240 while (args.dla_auxv->a_type != AT_NULL) {
1241 if (args.dla_auxv->a_type == AT_SUN_COMMPAGE) {
1242 udp->ub_comm_page = args.dla_auxv->a_un.a_ptr;
1243 }
1244 args.dla_auxv++;
1245 }
1246 }
1247
1248 /*
1249 * libc_init() is called by ld.so.1 for library initialization.
1250 * We perform minimal initialization; enough to work with the main thread.
1251 */
1252 void
1253 libc_init(void)
1254 {
1255 uberdata_t *udp = &__uberdata;
1256 ulwp_t *oldself = __curthread();
1257 ucontext_t uc;
1258 ulwp_t *self;
1259 struct rlimit rl;
1260 caddr_t data;
1261 size_t tls_size;
1262 int setmask;
1263
1264 /*
1265 * For the initial stage of initialization, we must be careful
1266 * not to call any function that could possibly call _cerror().
1267 * For this purpose, we call only the raw system call wrappers.
1268 */
1269
1270 #ifdef __amd64
1271 /*
1272 * Gather information about cache layouts for optimized
1273 * AMD and Intel assembler strfoo() and memfoo() functions.
1274 */
1275 __proc64id();
1276 #endif
1277
1278 /*
1279 * Every libc, regardless of which link map, must register __cleanup().
1280 */
1281 (void) _atexit(__cleanup);
1282
1283 /*
1284 * Every libc, regardless of link map, needs to go through and check
1285 * its aux vectors. Doing so will indicate whether or not this has
1286 * been given a comm page (to optimize certain system actions).
1287 */
1288 init_auxv_data(udp);
1289
1290 /*
1291 * We keep our uberdata on one of (a) the first alternate link map
1292 * or (b) the primary link map. We switch to the primary link map
1293 * and stay there once we see it. All intermediate link maps are
1294 * subject to being unloaded at any time.
1295 */
1296 if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) {
1297 __tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap;
1298 mutex_setup();
1299 atfork_init(); /* every link map needs atfork() processing */
1300 init_progname();
1301 return;
1302 }
1303
1304 /*
1305 * To establish the main stack information, we have to get our context.
1306 * This is also convenient to use for getting our signal mask.
1307 */
1308 uc.uc_flags = UC_ALL;
1309 (void) __getcontext(&uc);
1310 ASSERT(uc.uc_link == NULL);
1311
1312 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
1313 ASSERT(primary_link_map || tls_size == 0);
1314 data = lmalloc(sizeof (ulwp_t) + tls_size);
1315 if (data == NULL)
1316 thr_panic("cannot allocate thread structure for main thread");
1317 /* LINTED pointer cast may result in improper alignment */
1318 self = (ulwp_t *)(data + tls_size);
1319 init_hash_table[0].hash_bucket = self;
1320
1321 self->ul_sigmask = uc.uc_sigmask;
1322 delete_reserved_signals(&self->ul_sigmask);
1323 /*
1324 * Are the old and new sets different?
1325 * (This can happen if we are currently blocking SIGCANCEL.)
1326 * If so, we must explicitly set our signal mask, below.
1327 */
1328 setmask =
1329 ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) |
1330 (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1]) |
1331 (self->ul_sigmask.__sigbits[2] ^ uc.uc_sigmask.__sigbits[2]) |
1332 (self->ul_sigmask.__sigbits[3] ^ uc.uc_sigmask.__sigbits[3]));
1333
1334 #ifdef __sparc
1335 /*
1336 * We cache several instructions in the thread structure for use
1337 * by the fasttrap DTrace provider. When changing this, read the
1338 * comment in fasttrap.h for the all the other places that must
1339 * be changed.
1340 */
1341 self->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */
1342 self->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */
1343 self->ul_dftret = 0x91d0203a; /* ta 0x3a */
1344 self->ul_dreturn = 0x81ca0000; /* return %o0 */
1345 #endif
1346
1347 self->ul_stktop = (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size;
1348 (void) getrlimit(RLIMIT_STACK, &rl);
1349 self->ul_stksiz = rl.rlim_cur;
1350 self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz);
1351
1352 self->ul_forw = self->ul_back = self;
1353 self->ul_hash = NULL;
1354 self->ul_ix = 0;
1355 self->ul_lwpid = 1; /* _lwp_self() */
1356 self->ul_main = 1;
1357 self->ul_self = self;
1358 self->ul_policy = -1; /* initialize only when needed */
1359 self->ul_pri = 0;
1360 self->ul_cid = 0;
1361 self->ul_rtclassid = -1;
1362 self->ul_uberdata = udp;
1363 if (oldself != NULL) {
1364 int i;
1365
1366 ASSERT(primary_link_map);
1367 ASSERT(oldself->ul_main == 1);
1368 self->ul_stsd = oldself->ul_stsd;
1369 for (i = 0; i < TSD_NFAST; i++)
1370 self->ul_ftsd[i] = oldself->ul_ftsd[i];
1371 self->ul_tls = oldself->ul_tls;
1372 /*
1373 * Retrieve all pointers to uberdata allocated
1374 * while running on previous link maps.
1375 * We would like to do a structure assignment here, but
1376 * gcc turns structure assignments into calls to memcpy(),
1377 * a function exported from libc. We can't call any such
1378 * external functions until we establish curthread, below,
1379 * so we just call our private version of memcpy().
1380 */
1381 (void) memcpy(udp, oldself->ul_uberdata, sizeof (*udp));
1382 /*
1383 * These items point to global data on the primary link map.
1384 */
1385 udp->thr_hash_table = init_hash_table;
1386 udp->sigacthandler = sigacthandler;
1387 udp->tdb.tdb_events = tdb_events;
1388 ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt);
1389 ASSERT(udp->lwp_stacks == NULL);
1390 ASSERT(udp->ulwp_freelist == NULL);
1391 ASSERT(udp->ulwp_replace_free == NULL);
1392 ASSERT(udp->hash_size == 1);
1393 }
1394 udp->all_lwps = self;
1395 udp->ulwp_one = self;
1396 udp->pid = getpid();
1397 udp->nthreads = 1;
1398 /*
1399 * In every link map, tdb_bootstrap points to the same piece of
1400 * allocated memory. When the primary link map is initialized,
1401 * the allocated memory is assigned a pointer to the one true
1402 * uberdata. This allows libc_db to initialize itself regardless
1403 * of which instance of libc it finds in the address space.
1404 */
1405 if (udp->tdb_bootstrap == NULL)
1406 udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *));
1407 __tdb_bootstrap = udp->tdb_bootstrap;
1408 if (primary_link_map) {
1409 self->ul_primarymap = 1;
1410 udp->primary_map = 1;
1411 *udp->tdb_bootstrap = udp;
1412 }
1413 /*
1414 * Cancellation can't happen until:
1415 * pthread_cancel() is called
1416 * or:
1417 * another thread is created
1418 * For now, as a single-threaded process, set the flag that tells
1419 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen.
1420 */
1421 self->ul_nocancel = 1;
1422
1423 #if defined(__amd64)
1424 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self);
1425 #elif defined(__i386)
1426 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self);
1427 #endif /* __i386 || __amd64 */
1428 set_curthread(self); /* redundant on i386 */
1429 /*
1430 * Now curthread is established and it is safe to call any
1431 * function in libc except one that uses thread-local storage.
1432 */
1433 self->ul_errnop = &errno;
1434 if (oldself != NULL) {
1435 /* tls_size was zero when oldself was allocated */
1436 lfree(oldself, sizeof (ulwp_t));
1437 }
1438 mutex_setup();
1439 atfork_init();
1440 signal_init();
1441
1442 /*
1443 * If the stack is unlimited, we set the size to zero to disable
1444 * stack checking.
1445 * XXX: Work harder here. Get the stack size from /proc/self/rmap
1446 */
1447 if (self->ul_stksiz == RLIM_INFINITY) {
1448 self->ul_ustack.ss_sp = (void *)self->ul_stktop;
1449 self->ul_ustack.ss_size = 0;
1450 } else {
1451 self->ul_ustack.ss_sp = self->ul_stk;
1452 self->ul_ustack.ss_size = self->ul_stksiz;
1453 }
1454 self->ul_ustack.ss_flags = 0;
1455 (void) setustack(&self->ul_ustack);
1456
1457 /*
1458 * Get the variables that affect thread behavior from the environment.
1459 */
1460 set_thread_vars();
1461 udp->uberflags.uf_thread_error_detection = (char)thread_error_detection;
1462 udp->thread_stack_cache = thread_stack_cache;
1463
1464 /*
1465 * Make per-thread copies of global variables, for speed.
1466 */
1467 self->ul_queue_fifo = (char)thread_queue_fifo;
1468 self->ul_cond_wait_defer = (char)thread_cond_wait_defer;
1469 self->ul_error_detection = (char)thread_error_detection;
1470 self->ul_async_safe = (char)thread_async_safe;
1471 self->ul_door_noreserve = (char)thread_door_noreserve;
1472 self->ul_misaligned = (char)thread_locks_misaligned;
1473 self->ul_max_spinners = (uint8_t)thread_max_spinners;
1474 self->ul_adaptive_spin = thread_adaptive_spin;
1475 self->ul_queue_spin = thread_queue_spin;
1476
1477 #if defined(__sparc) && !defined(_LP64)
1478 if (self->ul_misaligned) {
1479 /*
1480 * Tell the kernel to fix up ldx/stx instructions that
1481 * refer to non-8-byte aligned data instead of giving
1482 * the process an alignment trap and generating SIGBUS.
1483 *
1484 * Programs compiled for 32-bit sparc with the Studio SS12
1485 * compiler get this done for them automatically (in _init()).
1486 * We do it here for the benefit of programs compiled with
1487 * other compilers, like gcc.
1488 *
1489 * This is necessary for the _THREAD_LOCKS_MISALIGNED=1
1490 * environment variable horrible hack to work.
1491 */
1492 extern void _do_fix_align(void);
1493 _do_fix_align();
1494 }
1495 #endif
1496
1497 /*
1498 * When we have initialized the primary link map, inform
1499 * the dynamic linker about our interface functions.
1500 * Set up our pointer to the program name.
1501 */
1502 if (self->ul_primarymap)
1503 _ld_libc((void *)rtld_funcs);
1504 init_progname();
1505
1506 /*
1507 * Defer signals until TLS constructors have been called.
1508 */
1509 sigoff(self);
1510 tls_setup();
1511 sigon(self);
1512 if (setmask)
1513 (void) restore_signals(self);
1514
1515 /*
1516 * Make private copies of __xpg4 and __xpg6 so libc can test
1517 * them after this point without invoking the dynamic linker.
1518 */
1519 libc__xpg4 = __xpg4;
1520 libc__xpg6 = __xpg6;
1521
1522 /* PROBE_SUPPORT begin */
1523 if (self->ul_primarymap && __tnf_probe_notify != NULL)
1524 __tnf_probe_notify();
1525 /* PROBE_SUPPORT end */
1526
1527 init_sigev_thread();
1528 init_aio();
1529
1530 /*
1531 * We need to reset __threaded dynamically at runtime, so that
1532 * __threaded can be bound to __threaded outside libc which may not
1533 * have initial value of 1 (without a copy relocation in a.out).
1534 */
1535 __threaded = 1;
1536 }
1537
1538 #pragma fini(libc_fini)
1539 void
1540 libc_fini()
1541 {
1542 /*
1543 * If we are doing fini processing for the instance of libc
1544 * on the first alternate link map (this happens only when
1545 * the dynamic linker rejects a bad audit library), then clear
1546 * __curthread(). We abandon whatever memory was allocated by
1547 * lmalloc() while running on this alternate link-map but we
1548 * don't care (and can't find the memory in any case); we just
1549 * want to protect the application from this bad audit library.
1550 * No fini processing is done by libc in the normal case.
1551 */
1552
1553 uberdata_t *udp = curthread->ul_uberdata;
1554
1555 if (udp->primary_map == 0 && udp == &__uberdata)
1556 set_curthread(NULL);
1557 }
1558
1559 /*
1560 * finish_init is called when we are about to become multi-threaded,
1561 * that is, on the first call to thr_create().
1562 */
1563 void
1564 finish_init()
1565 {
1566 ulwp_t *self = curthread;
1567 uberdata_t *udp = self->ul_uberdata;
1568 thr_hash_table_t *htp;
1569 void *data;
1570 int i;
1571
1572 /*
1573 * No locks needed here; we are single-threaded on the first call.
1574 * We can be called only after the primary link map has been set up.
1575 */
1576 ASSERT(self->ul_primarymap);
1577 ASSERT(self == udp->ulwp_one);
1578 ASSERT(!udp->uberflags.uf_mt);
1579 ASSERT(udp->hash_size == 1);
1580
1581 /*
1582 * Initialize self->ul_policy, self->ul_cid, and self->ul_pri.
1583 */
1584 update_sched(self);
1585
1586 /*
1587 * Allocate the queue_head array if not already allocated.
1588 */
1589 if (udp->queue_head == NULL)
1590 queue_alloc();
1591
1592 /*
1593 * Now allocate the thread hash table.
1594 */
1595 if ((data = mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t),
1596 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0))
1597 == MAP_FAILED)
1598 thr_panic("cannot allocate thread hash table");
1599
1600 udp->thr_hash_table = htp = (thr_hash_table_t *)data;
1601 udp->hash_size = HASHTBLSZ;
1602 udp->hash_mask = HASHTBLSZ - 1;
1603
1604 for (i = 0; i < HASHTBLSZ; i++, htp++) {
1605 htp->hash_lock.mutex_flag = LOCK_INITED;
1606 htp->hash_lock.mutex_magic = MUTEX_MAGIC;
1607 htp->hash_cond.cond_magic = COND_MAGIC;
1608 }
1609 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1610
1611 /*
1612 * Set up the SIGCANCEL handler for threads cancellation.
1613 */
1614 setup_cancelsig(SIGCANCEL);
1615
1616 /*
1617 * Arrange to do special things on exit --
1618 * - collect queue statistics from all remaining active threads.
1619 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set.
1620 * - grab assert_lock to ensure that assertion failures
1621 * and a core dump take precedence over _exit().
1622 * (Functions are called in the reverse order of their registration.)
1623 */
1624 (void) _atexit(grab_assert_lock);
1625 #if defined(THREAD_DEBUG)
1626 (void) _atexit(dump_queue_statistics);
1627 (void) _atexit(collect_queue_statistics);
1628 #endif
1629 }
1630
1631 /*
1632 * Used only by postfork1_child(), below.
1633 */
1634 static void
1635 mark_dead_and_buried(ulwp_t *ulwp)
1636 {
1637 ulwp->ul_dead = 1;
1638 ulwp->ul_lwpid = (lwpid_t)(-1);
1639 ulwp->ul_hash = NULL;
1640 ulwp->ul_ix = -1;
1641 ulwp->ul_schedctl = NULL;
1642 ulwp->ul_schedctl_called = NULL;
1643 }
1644
1645 /*
1646 * This is called from fork1() in the child.
1647 * Reset our data structures to reflect one lwp.
1648 */
1649 void
1650 postfork1_child()
1651 {
1652 ulwp_t *self = curthread;
1653 uberdata_t *udp = self->ul_uberdata;
1654 queue_head_t *qp;
1655 ulwp_t *next;
1656 ulwp_t *ulwp;
1657 int i;
1658
1659 /* daemon threads shouldn't call fork1(), but oh well... */
1660 self->ul_usropts &= ~THR_DAEMON;
1661 udp->nthreads = 1;
1662 udp->ndaemons = 0;
1663 udp->uberflags.uf_mt = 0;
1664 __libc_threaded = 0;
1665 for (i = 0; i < udp->hash_size; i++)
1666 udp->thr_hash_table[i].hash_bucket = NULL;
1667 self->ul_lwpid = _lwp_self();
1668 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1669
1670 /*
1671 * Some thread in the parent might have been suspended
1672 * while holding udp->callout_lock or udp->ld_lock.
1673 * Reinitialize the child's copies.
1674 */
1675 (void) mutex_init(&udp->callout_lock,
1676 USYNC_THREAD | LOCK_RECURSIVE, NULL);
1677 (void) mutex_init(&udp->ld_lock,
1678 USYNC_THREAD | LOCK_RECURSIVE, NULL);
1679
1680 /* no one in the child is on a sleep queue; reinitialize */
1681 if ((qp = udp->queue_head) != NULL) {
1682 (void) memset(qp, 0, 2 * QHASHSIZE * sizeof (queue_head_t));
1683 for (i = 0; i < 2 * QHASHSIZE; qp++, i++) {
1684 qp->qh_type = (i < QHASHSIZE)? MX : CV;
1685 qp->qh_lock.mutex_flag = LOCK_INITED;
1686 qp->qh_lock.mutex_magic = MUTEX_MAGIC;
1687 qp->qh_hlist = &qp->qh_def_root;
1688 #if defined(THREAD_DEBUG)
1689 qp->qh_hlen = 1;
1690 qp->qh_hmax = 1;
1691 #endif
1692 }
1693 }
1694
1695 /*
1696 * Do post-fork1 processing for subsystems that need it.
1697 * We need to do this before unmapping all of the abandoned
1698 * threads' stacks, below(), because the post-fork1 actions
1699 * might require access to those stacks.
1700 */
1701 postfork1_child_sigev_aio();
1702 postfork1_child_sigev_mq();
1703 postfork1_child_sigev_timer();
1704 postfork1_child_aio();
1705 /*
1706 * The above subsystems use thread pools, so this action
1707 * must be performed after those actions.
1708 */
1709 postfork1_child_tpool();
1710
1711 /*
1712 * All lwps except ourself are gone. Mark them so.
1713 * First mark all of the lwps that have already been freed.
1714 * Then mark and free all of the active lwps except ourself.
1715 * Since we are single-threaded, no locks are required here.
1716 */
1717 for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next)
1718 mark_dead_and_buried(ulwp);
1719 for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next)
1720 mark_dead_and_buried(ulwp);
1721 for (ulwp = self->ul_forw; ulwp != self; ulwp = next) {
1722 next = ulwp->ul_forw;
1723 ulwp->ul_forw = ulwp->ul_back = NULL;
1724 mark_dead_and_buried(ulwp);
1725 tsd_free(ulwp);
1726 tls_free(ulwp);
1727 rwl_free(ulwp);
1728 heldlock_free(ulwp);
1729 ulwp_free(ulwp);
1730 }
1731 self->ul_forw = self->ul_back = udp->all_lwps = self;
1732 if (self != udp->ulwp_one)
1733 mark_dead_and_buried(udp->ulwp_one);
1734 if ((ulwp = udp->all_zombies) != NULL) {
1735 ASSERT(udp->nzombies != 0);
1736 do {
1737 next = ulwp->ul_forw;
1738 ulwp->ul_forw = ulwp->ul_back = NULL;
1739 mark_dead_and_buried(ulwp);
1740 udp->nzombies--;
1741 if (ulwp->ul_replace) {
1742 ulwp->ul_next = NULL;
1743 if (udp->ulwp_replace_free == NULL) {
1744 udp->ulwp_replace_free =
1745 udp->ulwp_replace_last = ulwp;
1746 } else {
1747 udp->ulwp_replace_last->ul_next = ulwp;
1748 udp->ulwp_replace_last = ulwp;
1749 }
1750 }
1751 } while ((ulwp = next) != udp->all_zombies);
1752 ASSERT(udp->nzombies == 0);
1753 udp->all_zombies = NULL;
1754 udp->nzombies = 0;
1755 }
1756 trim_stack_cache(0);
1757 }
1758
1759 lwpid_t
1760 lwp_self(void)
1761 {
1762 return (curthread->ul_lwpid);
1763 }
1764
1765 #pragma weak _ti_thr_self = thr_self
1766 #pragma weak pthread_self = thr_self
1767 thread_t
1768 thr_self()
1769 {
1770 return (curthread->ul_lwpid);
1771 }
1772
1773 int
1774 thr_main()
1775 {
1776 ulwp_t *self = __curthread();
1777
1778 return ((self == NULL)? -1 : self->ul_main);
1779 }
1780
1781 int
1782 _thrp_cancelled(void)
1783 {
1784 return (curthread->ul_rval == PTHREAD_CANCELED);
1785 }
1786
1787 int
1788 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk)
1789 {
1790 stk->ss_sp = (void *)ulwp->ul_stktop;
1791 stk->ss_size = ulwp->ul_stksiz;
1792 stk->ss_flags = 0;
1793 return (0);
1794 }
1795
1796 #pragma weak _thr_stksegment = thr_stksegment
1797 int
1798 thr_stksegment(stack_t *stk)
1799 {
1800 return (_thrp_stksegment(curthread, stk));
1801 }
1802
1803 void
1804 force_continue(ulwp_t *ulwp)
1805 {
1806 #if defined(THREAD_DEBUG)
1807 ulwp_t *self = curthread;
1808 uberdata_t *udp = self->ul_uberdata;
1809 #endif
1810 int error;
1811 timespec_t ts;
1812
1813 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1814 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
1815
1816 for (;;) {
1817 error = _lwp_continue(ulwp->ul_lwpid);
1818 if (error != 0 && error != EINTR)
1819 break;
1820 error = 0;
1821 if (ulwp->ul_stopping) { /* it is stopping itsself */
1822 ts.tv_sec = 0; /* give it a chance to run */
1823 ts.tv_nsec = 100000; /* 100 usecs or clock tick */
1824 (void) __nanosleep(&ts, NULL);
1825 }
1826 if (!ulwp->ul_stopping) /* it is running now */
1827 break; /* so we are done */
1828 /*
1829 * It is marked as being in the process of stopping
1830 * itself. Loop around and continue it again.
1831 * It may not have been stopped the first time.
1832 */
1833 }
1834 }
1835
1836 /*
1837 * Suspend an lwp with lwp_suspend(), then move it to a safe point,
1838 * that is, to a point where ul_critical and ul_rtld are both zero.
1839 * On return, the ulwp_lock() is dropped as with ulwp_unlock().
1840 * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry.
1841 * If we have to drop link_lock, we store 1 through link_dropped.
1842 * If the lwp exits before it can be suspended, we return ESRCH.
1843 */
1844 int
1845 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped)
1846 {
1847 ulwp_t *self = curthread;
1848 uberdata_t *udp = self->ul_uberdata;
1849 cond_t *cvp = ulwp_condvar(ulwp, udp);
1850 mutex_t *mp = ulwp_mutex(ulwp, udp);
1851 thread_t tid = ulwp->ul_lwpid;
1852 int ix = ulwp->ul_ix;
1853 int error = 0;
1854
1855 ASSERT(whystopped == TSTP_REGULAR ||
1856 whystopped == TSTP_MUTATOR ||
1857 whystopped == TSTP_FORK);
1858 ASSERT(ulwp != self);
1859 ASSERT(!ulwp->ul_stop);
1860 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1861 ASSERT(MUTEX_OWNED(mp, self));
1862
1863 if (link_dropped != NULL)
1864 *link_dropped = 0;
1865
1866 /*
1867 * We must grab the target's spin lock before suspending it.
1868 * See the comments below and in _thrp_suspend() for why.
1869 */
1870 spin_lock_set(&ulwp->ul_spinlock);
1871 (void) ___lwp_suspend(tid);
1872 spin_lock_clear(&ulwp->ul_spinlock);
1873
1874 top:
1875 if ((ulwp->ul_critical == 0 && ulwp->ul_rtld == 0) ||
1876 ulwp->ul_stopping) {
1877 /* thread is already safe */
1878 ulwp->ul_stop |= whystopped;
1879 } else {
1880 /*
1881 * Setting ul_pleasestop causes the target thread to stop
1882 * itself in _thrp_suspend(), below, after we drop its lock.
1883 * We must continue the critical thread before dropping
1884 * link_lock because the critical thread may be holding
1885 * the queue lock for link_lock. This is delicate.
1886 */
1887 ulwp->ul_pleasestop |= whystopped;
1888 force_continue(ulwp);
1889 if (link_dropped != NULL) {
1890 *link_dropped = 1;
1891 lmutex_unlock(&udp->link_lock);
1892 /* be sure to drop link_lock only once */
1893 link_dropped = NULL;
1894 }
1895
1896 /*
1897 * The thread may disappear by calling thr_exit() so we
1898 * cannot rely on the ulwp pointer after dropping the lock.
1899 * Instead, we search the hash table to find it again.
1900 * When we return, we may find that the thread has been
1901 * continued by some other thread. The suspend/continue
1902 * interfaces are prone to such race conditions by design.
1903 */
1904 while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop &&
1905 (ulwp->ul_pleasestop & whystopped)) {
1906 (void) __cond_wait(cvp, mp);
1907 for (ulwp = udp->thr_hash_table[ix].hash_bucket;
1908 ulwp != NULL; ulwp = ulwp->ul_hash) {
1909 if (ulwp->ul_lwpid == tid)
1910 break;
1911 }
1912 }
1913
1914 if (ulwp == NULL || ulwp->ul_dead)
1915 error = ESRCH;
1916 else {
1917 /*
1918 * Do another lwp_suspend() to make sure we don't
1919 * return until the target thread is fully stopped
1920 * in the kernel. Don't apply lwp_suspend() until
1921 * we know that the target is not holding any
1922 * queue locks, that is, that it has completed
1923 * ulwp_unlock(self) and has, or at least is
1924 * about to, call lwp_suspend() on itself. We do
1925 * this by grabbing the target's spin lock.
1926 */
1927 ASSERT(ulwp->ul_lwpid == tid);
1928 spin_lock_set(&ulwp->ul_spinlock);
1929 (void) ___lwp_suspend(tid);
1930 spin_lock_clear(&ulwp->ul_spinlock);
1931 /*
1932 * If some other thread did a thr_continue()
1933 * on the target thread we have to start over.
1934 */
1935 if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped))
1936 goto top;
1937 }
1938 }
1939
1940 (void) cond_broadcast(cvp);
1941 lmutex_unlock(mp);
1942 return (error);
1943 }
1944
1945 int
1946 _thrp_suspend(thread_t tid, uchar_t whystopped)
1947 {
1948 ulwp_t *self = curthread;
1949 uberdata_t *udp = self->ul_uberdata;
1950 ulwp_t *ulwp;
1951 int error = 0;
1952
1953 ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0);
1954 ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0);
1955
1956 /*
1957 * We can't suspend anyone except ourself while
1958 * some other thread is performing a fork.
1959 * This also allows only one suspension at a time.
1960 */
1961 if (tid != self->ul_lwpid)
1962 fork_lock_enter();
1963
1964 if ((ulwp = find_lwp(tid)) == NULL)
1965 error = ESRCH;
1966 else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) {
1967 ulwp_unlock(ulwp, udp);
1968 error = EINVAL;
1969 } else if (ulwp->ul_stop) { /* already stopped */
1970 ulwp->ul_stop |= whystopped;
1971 ulwp_broadcast(ulwp);
1972 ulwp_unlock(ulwp, udp);
1973 } else if (ulwp != self) {
1974 /*
1975 * After suspending the other thread, move it out of a
1976 * critical section and deal with the schedctl mappings.
1977 * safe_suspend() suspends the other thread, calls
1978 * ulwp_broadcast(ulwp) and drops the ulwp lock.
1979 */
1980 error = safe_suspend(ulwp, whystopped, NULL);
1981 } else {
1982 int schedctl_after_fork = 0;
1983
1984 /*
1985 * We are suspending ourself. We must not take a signal
1986 * until we return from lwp_suspend() and clear ul_stopping.
1987 * This is to guard against siglongjmp().
1988 */
1989 enter_critical(self);
1990 self->ul_sp = stkptr();
1991 _flush_windows(); /* sparc */
1992 self->ul_pleasestop = 0;
1993 self->ul_stop |= whystopped;
1994 /*
1995 * Grab our spin lock before dropping ulwp_mutex(self).
1996 * This prevents the suspending thread from applying
1997 * lwp_suspend() to us before we emerge from
1998 * lmutex_unlock(mp) and have dropped mp's queue lock.
1999 */
2000 spin_lock_set(&self->ul_spinlock);
2001 self->ul_stopping = 1;
2002 ulwp_broadcast(self);
2003 ulwp_unlock(self, udp);
2004 /*
2005 * From this point until we return from lwp_suspend(),
2006 * we must not call any function that might invoke the
2007 * dynamic linker, that is, we can only call functions
2008 * private to the library.
2009 *
2010 * Also, this is a nasty race condition for a process
2011 * that is undergoing a forkall() operation:
2012 * Once we clear our spinlock (below), we are vulnerable
2013 * to being suspended by the forkall() thread before
2014 * we manage to suspend ourself in ___lwp_suspend().
2015 * See safe_suspend() and force_continue().
2016 *
2017 * To avoid a SIGSEGV due to the disappearance
2018 * of the schedctl mappings in the child process,
2019 * which can happen in spin_lock_clear() if we
2020 * are suspended while we are in the middle of
2021 * its call to preempt(), we preemptively clear
2022 * our own schedctl pointer before dropping our
2023 * spinlock. We reinstate it, in both the parent
2024 * and (if this really is a forkall()) the child.
2025 */
2026 if (whystopped & TSTP_FORK) {
2027 schedctl_after_fork = 1;
2028 self->ul_schedctl = NULL;
2029 self->ul_schedctl_called = &udp->uberflags;
2030 }
2031 spin_lock_clear(&self->ul_spinlock);
2032 (void) ___lwp_suspend(tid);
2033 /*
2034 * Somebody else continued us.
2035 * We can't grab ulwp_lock(self)
2036 * until after clearing ul_stopping.
2037 * force_continue() relies on this.
2038 */
2039 self->ul_stopping = 0;
2040 self->ul_sp = 0;
2041 if (schedctl_after_fork) {
2042 self->ul_schedctl_called = NULL;
2043 self->ul_schedctl = NULL;
2044 (void) setup_schedctl();
2045 }
2046 ulwp_lock(self, udp);
2047 ulwp_broadcast(self);
2048 ulwp_unlock(self, udp);
2049 exit_critical(self);
2050 }
2051
2052 if (tid != self->ul_lwpid)
2053 fork_lock_exit();
2054
2055 return (error);
2056 }
2057
2058 /*
2059 * Suspend all lwps other than ourself in preparation for fork.
2060 */
2061 void
2062 suspend_fork()
2063 {
2064 ulwp_t *self = curthread;
2065 uberdata_t *udp = self->ul_uberdata;
2066 ulwp_t *ulwp;
2067 int link_dropped;
2068
2069 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2070 top:
2071 lmutex_lock(&udp->link_lock);
2072
2073 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2074 ulwp_lock(ulwp, udp);
2075 if (ulwp->ul_stop) { /* already stopped */
2076 ulwp->ul_stop |= TSTP_FORK;
2077 ulwp_broadcast(ulwp);
2078 ulwp_unlock(ulwp, udp);
2079 } else {
2080 /*
2081 * Move the stopped lwp out of a critical section.
2082 */
2083 if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) ||
2084 link_dropped)
2085 goto top;
2086 }
2087 }
2088
2089 lmutex_unlock(&udp->link_lock);
2090 }
2091
2092 void
2093 continue_fork(int child)
2094 {
2095 ulwp_t *self = curthread;
2096 uberdata_t *udp = self->ul_uberdata;
2097 ulwp_t *ulwp;
2098
2099 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2100
2101 /*
2102 * Clear the schedctl pointers in the child of forkall().
2103 */
2104 if (child) {
2105 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2106 ulwp->ul_schedctl_called =
2107 ulwp->ul_dead? &udp->uberflags : NULL;
2108 ulwp->ul_schedctl = NULL;
2109 }
2110 }
2111
2112 /*
2113 * Set all lwps that were stopped for fork() running again.
2114 */
2115 lmutex_lock(&udp->link_lock);
2116 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2117 mutex_t *mp = ulwp_mutex(ulwp, udp);
2118 lmutex_lock(mp);
2119 ASSERT(ulwp->ul_stop & TSTP_FORK);
2120 ulwp->ul_stop &= ~TSTP_FORK;
2121 ulwp_broadcast(ulwp);
2122 if (!ulwp->ul_stop)
2123 force_continue(ulwp);
2124 lmutex_unlock(mp);
2125 }
2126 lmutex_unlock(&udp->link_lock);
2127 }
2128
2129 int
2130 _thrp_continue(thread_t tid, uchar_t whystopped)
2131 {
2132 uberdata_t *udp = curthread->ul_uberdata;
2133 ulwp_t *ulwp;
2134 mutex_t *mp;
2135 int error = 0;
2136
2137 ASSERT(whystopped == TSTP_REGULAR ||
2138 whystopped == TSTP_MUTATOR);
2139
2140 /*
2141 * We single-thread the entire thread suspend/continue mechanism.
2142 */
2143 fork_lock_enter();
2144
2145 if ((ulwp = find_lwp(tid)) == NULL) {
2146 fork_lock_exit();
2147 return (ESRCH);
2148 }
2149
2150 mp = ulwp_mutex(ulwp, udp);
2151 if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) {
2152 error = EINVAL;
2153 } else if (ulwp->ul_stop & whystopped) {
2154 ulwp->ul_stop &= ~whystopped;
2155 ulwp_broadcast(ulwp);
2156 if (!ulwp->ul_stop) {
2157 if (whystopped == TSTP_REGULAR && ulwp->ul_created) {
2158 ulwp->ul_sp = 0;
2159 ulwp->ul_created = 0;
2160 }
2161 force_continue(ulwp);
2162 }
2163 }
2164 lmutex_unlock(mp);
2165
2166 fork_lock_exit();
2167 return (error);
2168 }
2169
2170 int
2171 thr_suspend(thread_t tid)
2172 {
2173 return (_thrp_suspend(tid, TSTP_REGULAR));
2174 }
2175
2176 int
2177 thr_continue(thread_t tid)
2178 {
2179 return (_thrp_continue(tid, TSTP_REGULAR));
2180 }
2181
2182 void
2183 thr_yield()
2184 {
2185 yield();
2186 }
2187
2188 #pragma weak pthread_kill = thr_kill
2189 #pragma weak _thr_kill = thr_kill
2190 int
2191 thr_kill(thread_t tid, int sig)
2192 {
2193 if (sig == SIGCANCEL)
2194 return (EINVAL);
2195 return (_lwp_kill(tid, sig));
2196 }
2197
2198 /*
2199 * Exit a critical section, take deferred actions if necessary.
2200 * Called from exit_critical() and from sigon().
2201 */
2202 void
2203 do_exit_critical()
2204 {
2205 ulwp_t *self = curthread;
2206 int sig;
2207
2208 ASSERT(self->ul_critical == 0);
2209
2210 /*
2211 * Don't suspend ourself or take a deferred signal while dying
2212 * or while executing inside the dynamic linker (ld.so.1).
2213 */
2214 if (self->ul_dead || self->ul_rtld)
2215 return;
2216
2217 while (self->ul_pleasestop ||
2218 (self->ul_cursig != 0 && self->ul_sigdefer == 0)) {
2219 /*
2220 * Avoid a recursive call to exit_critical() in _thrp_suspend()
2221 * by keeping self->ul_critical == 1 here.
2222 */
2223 self->ul_critical++;
2224 while (self->ul_pleasestop) {
2225 /*
2226 * Guard against suspending ourself while on a sleep
2227 * queue. See the comments in call_user_handler().
2228 */
2229 unsleep_self();
2230 set_parking_flag(self, 0);
2231 (void) _thrp_suspend(self->ul_lwpid,
2232 self->ul_pleasestop);
2233 }
2234 self->ul_critical--;
2235
2236 if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) {
2237 /*
2238 * Clear ul_cursig before proceeding.
2239 * This protects us from the dynamic linker's
2240 * calls to bind_guard()/bind_clear() in the
2241 * event that it is invoked to resolve a symbol
2242 * like take_deferred_signal() below.
2243 */
2244 self->ul_cursig = 0;
2245 take_deferred_signal(sig);
2246 ASSERT(self->ul_cursig == 0);
2247 }
2248 }
2249 ASSERT(self->ul_critical == 0);
2250 }
2251
2252 /*
2253 * _ti_bind_guard() and _ti_bind_clear() are called by the dynamic linker
2254 * (ld.so.1) when it has do do something, like resolve a symbol to be called
2255 * by the application or one of its libraries. _ti_bind_guard() is called
2256 * on entry to ld.so.1, _ti_bind_clear() on exit from ld.so.1 back to the
2257 * application. The dynamic linker gets special dispensation from libc to
2258 * run in a critical region (all signals deferred and no thread suspension
2259 * or forking allowed), and to be immune from cancellation for the duration.
2260 */
2261 int
2262 _ti_bind_guard(int flags)
2263 {
2264 ulwp_t *self = curthread;
2265 uberdata_t *udp = self->ul_uberdata;
2266 int bindflag = (flags & THR_FLG_RTLD);
2267
2268 if ((self->ul_bindflags & bindflag) == bindflag)
2269 return (0);
2270 self->ul_bindflags |= bindflag;
2271 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) {
2272 sigoff(self); /* see no signals while holding ld_lock */
2273 self->ul_rtld++; /* don't suspend while in ld.so.1 */
2274 (void) mutex_lock(&udp->ld_lock);
2275 }
2276 enter_critical(self);
2277 self->ul_save_state = self->ul_cancel_disabled;
2278 self->ul_cancel_disabled = 1;
2279 set_cancel_pending_flag(self, 0);
2280 return (1);
2281 }
2282
2283 int
2284 _ti_bind_clear(int flags)
2285 {
2286 ulwp_t *self = curthread;
2287 uberdata_t *udp = self->ul_uberdata;
2288 int bindflag = (flags & THR_FLG_RTLD);
2289
2290 if ((self->ul_bindflags & bindflag) == 0)
2291 return (self->ul_bindflags);
2292 self->ul_bindflags &= ~bindflag;
2293 self->ul_cancel_disabled = self->ul_save_state;
2294 set_cancel_pending_flag(self, 0);
2295 exit_critical(self);
2296 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) {
2297 if (MUTEX_OWNED(&udp->ld_lock, self)) {
2298 (void) mutex_unlock(&udp->ld_lock);
2299 self->ul_rtld--;
2300 sigon(self); /* reenable signals */
2301 }
2302 }
2303 return (self->ul_bindflags);
2304 }
2305
2306 /*
2307 * Tell the dynamic linker (ld.so.1) whether or not it was entered from
2308 * a critical region in libc. Return zero if not, else return non-zero.
2309 */
2310 int
2311 _ti_critical(void)
2312 {
2313 ulwp_t *self = curthread;
2314 int level = self->ul_critical;
2315
2316 if ((self->ul_bindflags & THR_FLG_RTLD) == 0 || level == 0)
2317 return (level); /* ld.so.1 hasn't (yet) called enter() */
2318 return (level - 1);
2319 }
2320
2321 /*
2322 * sigoff() and sigon() enable cond_wait() to behave (optionally) like
2323 * it does in the old libthread (see the comments in cond_wait_queue()).
2324 * Also, signals are deferred at thread startup until TLS constructors
2325 * have all been called, at which time _thrp_setup() calls sigon().
2326 *
2327 * _sigoff() and _sigon() are external consolidation-private interfaces to
2328 * sigoff() and sigon(), respectively, in libc. These are used in libnsl.
2329 * Also, _sigoff() and _sigon() are called from dbx's run-time checking
2330 * (librtc.so) to defer signals during its critical sections (not to be
2331 * confused with libc critical sections [see exit_critical() above]).
2332 */
2333 void
2334 _sigoff(void)
2335 {
2336 ulwp_t *self = curthread;
2337
2338 sigoff(self);
2339 }
2340
2341 void
2342 _sigon(void)
2343 {
2344 ulwp_t *self = curthread;
2345
2346 ASSERT(self->ul_sigdefer > 0);
2347 sigon(self);
2348 }
2349
2350 int
2351 thr_getconcurrency()
2352 {
2353 return (thr_concurrency);
2354 }
2355
2356 int
2357 pthread_getconcurrency()
2358 {
2359 return (pthread_concurrency);
2360 }
2361
2362 int
2363 thr_setconcurrency(int new_level)
2364 {
2365 uberdata_t *udp = curthread->ul_uberdata;
2366
2367 if (new_level < 0)
2368 return (EINVAL);
2369 if (new_level > 65536) /* 65536 is totally arbitrary */
2370 return (EAGAIN);
2371 lmutex_lock(&udp->link_lock);
2372 if (new_level > thr_concurrency)
2373 thr_concurrency = new_level;
2374 lmutex_unlock(&udp->link_lock);
2375 return (0);
2376 }
2377
2378 int
2379 pthread_setconcurrency(int new_level)
2380 {
2381 if (new_level < 0)
2382 return (EINVAL);
2383 if (new_level > 65536) /* 65536 is totally arbitrary */
2384 return (EAGAIN);
2385 pthread_concurrency = new_level;
2386 return (0);
2387 }
2388
2389 size_t
2390 thr_min_stack(void)
2391 {
2392 return (MINSTACK);
2393 }
2394
2395 int
2396 __nthreads(void)
2397 {
2398 return (curthread->ul_uberdata->nthreads);
2399 }
2400
2401 /*
2402 * XXX
2403 * The remainder of this file implements the private interfaces to java for
2404 * garbage collection. It is no longer used, at least by java 1.2.
2405 * It can all go away once all old JVMs have disappeared.
2406 */
2407
2408 int suspendingallmutators; /* when non-zero, suspending all mutators. */
2409 int suspendedallmutators; /* when non-zero, all mutators suspended. */
2410 int mutatorsbarrier; /* when non-zero, mutators barrier imposed. */
2411 mutex_t mutatorslock = DEFAULTMUTEX; /* used to enforce mutators barrier. */
2412 cond_t mutatorscv = DEFAULTCV; /* where non-mutators sleep. */
2413
2414 /*
2415 * Get the available register state for the target thread.
2416 * Return non-volatile registers: TRS_NONVOLATILE
2417 */
2418 #pragma weak _thr_getstate = thr_getstate
2419 int
2420 thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs)
2421 {
2422 ulwp_t *self = curthread;
2423 uberdata_t *udp = self->ul_uberdata;
2424 ulwp_t **ulwpp;
2425 ulwp_t *ulwp;
2426 int error = 0;
2427 int trs_flag = TRS_LWPID;
2428
2429 if (tid == 0 || self->ul_lwpid == tid) {
2430 ulwp = self;
2431 ulwp_lock(ulwp, udp);
2432 } else if ((ulwpp = find_lwpp(tid)) != NULL) {
2433 ulwp = *ulwpp;
2434 } else {
2435 if (flag)
2436 *flag = TRS_INVALID;
2437 return (ESRCH);
2438 }
2439
2440 if (ulwp->ul_dead) {
2441 trs_flag = TRS_INVALID;
2442 } else if (!ulwp->ul_stop && !suspendedallmutators) {
2443 error = EINVAL;
2444 trs_flag = TRS_INVALID;
2445 } else if (ulwp->ul_stop) {
2446 trs_flag = TRS_NONVOLATILE;
2447 getgregs(ulwp, rs);
2448 }
2449
2450 if (flag)
2451 *flag = trs_flag;
2452 if (lwp)
2453 *lwp = tid;
2454 if (ss != NULL)
2455 (void) _thrp_stksegment(ulwp, ss);
2456
2457 ulwp_unlock(ulwp, udp);
2458 return (error);
2459 }
2460
2461 /*
2462 * Set the appropriate register state for the target thread.
2463 * This is not used by java. It exists solely for the MSTC test suite.
2464 */
2465 #pragma weak _thr_setstate = thr_setstate
2466 int
2467 thr_setstate(thread_t tid, int flag, gregset_t rs)
2468 {
2469 uberdata_t *udp = curthread->ul_uberdata;
2470 ulwp_t *ulwp;
2471 int error = 0;
2472
2473 if ((ulwp = find_lwp(tid)) == NULL)
2474 return (ESRCH);
2475
2476 if (!ulwp->ul_stop && !suspendedallmutators)
2477 error = EINVAL;
2478 else if (rs != NULL) {
2479 switch (flag) {
2480 case TRS_NONVOLATILE:
2481 /* do /proc stuff here? */
2482 if (ulwp->ul_stop)
2483 setgregs(ulwp, rs);
2484 else
2485 error = EINVAL;
2486 break;
2487 case TRS_LWPID: /* do /proc stuff here? */
2488 default:
2489 error = EINVAL;
2490 break;
2491 }
2492 }
2493
2494 ulwp_unlock(ulwp, udp);
2495 return (error);
2496 }
2497
2498 int
2499 getlwpstatus(thread_t tid, struct lwpstatus *sp)
2500 {
2501 extern ssize_t __pread(int, void *, size_t, off_t);
2502 char buf[100];
2503 int fd;
2504
2505 /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */
2506 (void) strcpy(buf, "/proc/self/lwp/");
2507 ultos((uint64_t)tid, 10, buf + strlen(buf));
2508 (void) strcat(buf, "/lwpstatus");
2509 if ((fd = __open(buf, O_RDONLY, 0)) >= 0) {
2510 while (__pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) {
2511 if (sp->pr_flags & PR_STOPPED) {
2512 (void) __close(fd);
2513 return (0);
2514 }
2515 yield(); /* give it a chance to stop */
2516 }
2517 (void) __close(fd);
2518 }
2519 return (-1);
2520 }
2521
2522 int
2523 putlwpregs(thread_t tid, prgregset_t prp)
2524 {
2525 extern ssize_t __writev(int, const struct iovec *, int);
2526 char buf[100];
2527 int fd;
2528 long dstop_sreg[2];
2529 long run_null[2];
2530 iovec_t iov[3];
2531
2532 /* "/proc/self/lwp/%u/lwpctl" w/o stdio */
2533 (void) strcpy(buf, "/proc/self/lwp/");
2534 ultos((uint64_t)tid, 10, buf + strlen(buf));
2535 (void) strcat(buf, "/lwpctl");
2536 if ((fd = __open(buf, O_WRONLY, 0)) >= 0) {
2537 dstop_sreg[0] = PCDSTOP; /* direct it to stop */
2538 dstop_sreg[1] = PCSREG; /* set the registers */
2539 iov[0].iov_base = (caddr_t)dstop_sreg;
2540 iov[0].iov_len = sizeof (dstop_sreg);
2541 iov[1].iov_base = (caddr_t)prp; /* from the register set */
2542 iov[1].iov_len = sizeof (prgregset_t);
2543 run_null[0] = PCRUN; /* make it runnable again */
2544 run_null[1] = 0;
2545 iov[2].iov_base = (caddr_t)run_null;
2546 iov[2].iov_len = sizeof (run_null);
2547 if (__writev(fd, iov, 3) >= 0) {
2548 (void) __close(fd);
2549 return (0);
2550 }
2551 (void) __close(fd);
2552 }
2553 return (-1);
2554 }
2555
2556 static ulong_t
2557 gettsp_slow(thread_t tid)
2558 {
2559 char buf[100];
2560 struct lwpstatus status;
2561
2562 if (getlwpstatus(tid, &status) != 0) {
2563 /* "__gettsp(%u): can't read lwpstatus" w/o stdio */
2564 (void) strcpy(buf, "__gettsp(");
2565 ultos((uint64_t)tid, 10, buf + strlen(buf));
2566 (void) strcat(buf, "): can't read lwpstatus");
2567 thr_panic(buf);
2568 }
2569 return (status.pr_reg[R_SP]);
2570 }
2571
2572 ulong_t
2573 __gettsp(thread_t tid)
2574 {
2575 uberdata_t *udp = curthread->ul_uberdata;
2576 ulwp_t *ulwp;
2577 ulong_t result;
2578
2579 if ((ulwp = find_lwp(tid)) == NULL)
2580 return (0);
2581
2582 if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) {
2583 ulwp_unlock(ulwp, udp);
2584 return (result);
2585 }
2586
2587 result = gettsp_slow(tid);
2588 ulwp_unlock(ulwp, udp);
2589 return (result);
2590 }
2591
2592 /*
2593 * This tells java stack walkers how to find the ucontext
2594 * structure passed to signal handlers.
2595 */
2596 #pragma weak _thr_sighndlrinfo = thr_sighndlrinfo
2597 void
2598 thr_sighndlrinfo(void (**func)(), int *funcsize)
2599 {
2600 *func = &__sighndlr;
2601 *funcsize = (char *)&__sighndlrend - (char *)&__sighndlr;
2602 }
2603
2604 /*
2605 * Mark a thread a mutator or reset a mutator to being a default,
2606 * non-mutator thread.
2607 */
2608 #pragma weak _thr_setmutator = thr_setmutator
2609 int
2610 thr_setmutator(thread_t tid, int enabled)
2611 {
2612 ulwp_t *self = curthread;
2613 uberdata_t *udp = self->ul_uberdata;
2614 ulwp_t *ulwp;
2615 int error;
2616 int cancel_state;
2617
2618 enabled = enabled? 1 : 0;
2619 top:
2620 if (tid == 0) {
2621 ulwp = self;
2622 ulwp_lock(ulwp, udp);
2623 } else if ((ulwp = find_lwp(tid)) == NULL) {
2624 return (ESRCH);
2625 }
2626
2627 /*
2628 * The target thread should be the caller itself or a suspended thread.
2629 * This prevents the target from also changing its ul_mutator field.
2630 */
2631 error = 0;
2632 if (ulwp != self && !ulwp->ul_stop && enabled)
2633 error = EINVAL;
2634 else if (ulwp->ul_mutator != enabled) {
2635 lmutex_lock(&mutatorslock);
2636 if (mutatorsbarrier) {
2637 ulwp_unlock(ulwp, udp);
2638 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
2639 &cancel_state);
2640 while (mutatorsbarrier)
2641 (void) cond_wait(&mutatorscv, &mutatorslock);
2642 (void) pthread_setcancelstate(cancel_state, NULL);
2643 lmutex_unlock(&mutatorslock);
2644 goto top;
2645 }
2646 ulwp->ul_mutator = enabled;
2647 lmutex_unlock(&mutatorslock);
2648 }
2649
2650 ulwp_unlock(ulwp, udp);
2651 return (error);
2652 }
2653
2654 /*
2655 * Establish a barrier against new mutators. Any non-mutator trying
2656 * to become a mutator is suspended until the barrier is removed.
2657 */
2658 #pragma weak _thr_mutators_barrier = thr_mutators_barrier
2659 void
2660 thr_mutators_barrier(int enabled)
2661 {
2662 int oldvalue;
2663 int cancel_state;
2664
2665 lmutex_lock(&mutatorslock);
2666
2667 /*
2668 * Wait if trying to set the barrier while it is already set.
2669 */
2670 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2671 while (mutatorsbarrier && enabled)
2672 (void) cond_wait(&mutatorscv, &mutatorslock);
2673 (void) pthread_setcancelstate(cancel_state, NULL);
2674
2675 oldvalue = mutatorsbarrier;
2676 mutatorsbarrier = enabled;
2677 /*
2678 * Wakeup any blocked non-mutators when barrier is removed.
2679 */
2680 if (oldvalue && !enabled)
2681 (void) cond_broadcast(&mutatorscv);
2682 lmutex_unlock(&mutatorslock);
2683 }
2684
2685 /*
2686 * Suspend the set of all mutators except for the caller. The list
2687 * of actively running threads is searched and only the mutators
2688 * in this list are suspended. Actively running non-mutators remain
2689 * running. Any other thread is suspended.
2690 */
2691 #pragma weak _thr_suspend_allmutators = thr_suspend_allmutators
2692 int
2693 thr_suspend_allmutators(void)
2694 {
2695 ulwp_t *self = curthread;
2696 uberdata_t *udp = self->ul_uberdata;
2697 ulwp_t *ulwp;
2698 int link_dropped;
2699
2700 /*
2701 * We single-thread the entire thread suspend/continue mechanism.
2702 */
2703 fork_lock_enter();
2704
2705 top:
2706 lmutex_lock(&udp->link_lock);
2707
2708 if (suspendingallmutators || suspendedallmutators) {
2709 lmutex_unlock(&udp->link_lock);
2710 fork_lock_exit();
2711 return (EINVAL);
2712 }
2713 suspendingallmutators = 1;
2714
2715 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2716 ulwp_lock(ulwp, udp);
2717 if (!ulwp->ul_mutator) {
2718 ulwp_unlock(ulwp, udp);
2719 } else if (ulwp->ul_stop) { /* already stopped */
2720 ulwp->ul_stop |= TSTP_MUTATOR;
2721 ulwp_broadcast(ulwp);
2722 ulwp_unlock(ulwp, udp);
2723 } else {
2724 /*
2725 * Move the stopped lwp out of a critical section.
2726 */
2727 if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) ||
2728 link_dropped) {
2729 suspendingallmutators = 0;
2730 goto top;
2731 }
2732 }
2733 }
2734
2735 suspendedallmutators = 1;
2736 suspendingallmutators = 0;
2737 lmutex_unlock(&udp->link_lock);
2738 fork_lock_exit();
2739 return (0);
2740 }
2741
2742 /*
2743 * Suspend the target mutator. The caller is permitted to suspend
2744 * itself. If a mutator barrier is enabled, the caller will suspend
2745 * itself as though it had been suspended by thr_suspend_allmutators().
2746 * When the barrier is removed, this thread will be resumed. Any
2747 * suspended mutator, whether suspended by thr_suspend_mutator(), or by
2748 * thr_suspend_allmutators(), can be resumed by thr_continue_mutator().
2749 */
2750 #pragma weak _thr_suspend_mutator = thr_suspend_mutator
2751 int
2752 thr_suspend_mutator(thread_t tid)
2753 {
2754 if (tid == 0)
2755 tid = curthread->ul_lwpid;
2756 return (_thrp_suspend(tid, TSTP_MUTATOR));
2757 }
2758
2759 /*
2760 * Resume the set of all suspended mutators.
2761 */
2762 #pragma weak _thr_continue_allmutators = thr_continue_allmutators
2763 int
2764 thr_continue_allmutators()
2765 {
2766 ulwp_t *self = curthread;
2767 uberdata_t *udp = self->ul_uberdata;
2768 ulwp_t *ulwp;
2769
2770 /*
2771 * We single-thread the entire thread suspend/continue mechanism.
2772 */
2773 fork_lock_enter();
2774
2775 lmutex_lock(&udp->link_lock);
2776 if (!suspendedallmutators) {
2777 lmutex_unlock(&udp->link_lock);
2778 fork_lock_exit();
2779 return (EINVAL);
2780 }
2781 suspendedallmutators = 0;
2782
2783 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2784 mutex_t *mp = ulwp_mutex(ulwp, udp);
2785 lmutex_lock(mp);
2786 if (ulwp->ul_stop & TSTP_MUTATOR) {
2787 ulwp->ul_stop &= ~TSTP_MUTATOR;
2788 ulwp_broadcast(ulwp);
2789 if (!ulwp->ul_stop)
2790 force_continue(ulwp);
2791 }
2792 lmutex_unlock(mp);
2793 }
2794
2795 lmutex_unlock(&udp->link_lock);
2796 fork_lock_exit();
2797 return (0);
2798 }
2799
2800 /*
2801 * Resume a suspended mutator.
2802 */
2803 #pragma weak _thr_continue_mutator = thr_continue_mutator
2804 int
2805 thr_continue_mutator(thread_t tid)
2806 {
2807 return (_thrp_continue(tid, TSTP_MUTATOR));
2808 }
2809
2810 #pragma weak _thr_wait_mutator = thr_wait_mutator
2811 int
2812 thr_wait_mutator(thread_t tid, int dontwait)
2813 {
2814 uberdata_t *udp = curthread->ul_uberdata;
2815 ulwp_t *ulwp;
2816 int cancel_state;
2817 int error = 0;
2818
2819 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2820 top:
2821 if ((ulwp = find_lwp(tid)) == NULL) {
2822 (void) pthread_setcancelstate(cancel_state, NULL);
2823 return (ESRCH);
2824 }
2825
2826 if (!ulwp->ul_mutator)
2827 error = EINVAL;
2828 else if (dontwait) {
2829 if (!(ulwp->ul_stop & TSTP_MUTATOR))
2830 error = EWOULDBLOCK;
2831 } else if (!(ulwp->ul_stop & TSTP_MUTATOR)) {
2832 cond_t *cvp = ulwp_condvar(ulwp, udp);
2833 mutex_t *mp = ulwp_mutex(ulwp, udp);
2834
2835 (void) cond_wait(cvp, mp);
2836 (void) lmutex_unlock(mp);
2837 goto top;
2838 }
2839
2840 ulwp_unlock(ulwp, udp);
2841 (void) pthread_setcancelstate(cancel_state, NULL);
2842 return (error);
2843 }
2844
2845 /* PROBE_SUPPORT begin */
2846
2847 void
2848 thr_probe_setup(void *data)
2849 {
2850 curthread->ul_tpdp = data;
2851 }
2852
2853 static void *
2854 _thread_probe_getfunc()
2855 {
2856 return (curthread->ul_tpdp);
2857 }
2858
2859 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc;
2860
2861 /* ARGSUSED */
2862 void
2863 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave)
2864 {
2865 /* never called */
2866 }
2867
2868 /* ARGSUSED */
2869 void
2870 _resume_ret(ulwp_t *oldlwp)
2871 {
2872 /* never called */
2873 }
2874
2875 /* PROBE_SUPPORT end */