Print this page
OS-4470 lxbrand unblocking signals in new threads must be atomic
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libc/port/threads/sigaction.c
+++ new/usr/src/lib/libc/port/threads/sigaction.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 * Copyright 2015 Joyent, Inc.
26 26 */
27 27
28 28 #include "lint.h"
29 29 #include <sys/feature_tests.h>
30 30 /*
31 31 * setcontext() really can return, if UC_CPU is not specified.
32 32 * Make the compiler shut up about it.
33 33 */
34 34 #if defined(__NORETURN)
35 35 #undef __NORETURN
36 36 #endif
37 37 #define __NORETURN
38 38 #include "thr_uberdata.h"
39 39 #include "asyncio.h"
40 40 #include <signal.h>
41 41 #include <siginfo.h>
42 42 #include <sys/systm.h>
43 43
44 44 /* maskable signals */
45 45 const sigset_t maskset = {MASKSET0, MASKSET1, MASKSET2, MASKSET3};
46 46
47 47 /*
48 48 * Return true if the valid signal bits in both sets are the same.
49 49 */
50 50 int
51 51 sigequalset(const sigset_t *s1, const sigset_t *s2)
52 52 {
53 53 /*
54 54 * We only test valid signal bits, not rubbish following MAXSIG
55 55 * (for speed). Algorithm:
56 56 * if (s1 & fillset) == (s2 & fillset) then (s1 ^ s2) & fillset == 0
57 57 */
58 58 /* see lib/libc/inc/thr_uberdata.h for why this must be true */
59 59 #if (MAXSIG > (2 * 32) && MAXSIG <= (3 * 32))
60 60 return (!((s1->__sigbits[0] ^ s2->__sigbits[0]) |
61 61 (s1->__sigbits[1] ^ s2->__sigbits[1]) |
62 62 ((s1->__sigbits[2] ^ s2->__sigbits[2]) & FILLSET2)));
63 63 #else
64 64 #error "fix me: MAXSIG out of bounds"
65 65 #endif
66 66 }
67 67
68 68 /*
69 69 * Common code for calling the user-specified signal handler.
70 70 */
71 71 void
72 72 call_user_handler(int sig, siginfo_t *sip, ucontext_t *ucp)
73 73 {
74 74 ulwp_t *self = curthread;
75 75 uberdata_t *udp = self->ul_uberdata;
76 76 struct sigaction uact;
77 77 volatile struct sigaction *sap;
78 78
79 79 /*
80 80 * If we are taking a signal while parked or about to be parked
81 81 * on __lwp_park() then remove ourself from the sleep queue so
82 82 * that we can grab locks. The code in mutex_lock_queue() and
83 83 * cond_wait_common() will detect this and deal with it when
84 84 * __lwp_park() returns.
85 85 */
86 86 unsleep_self();
87 87 set_parking_flag(self, 0);
88 88
89 89 if (__td_event_report(self, TD_CATCHSIG, udp)) {
90 90 self->ul_td_evbuf.eventnum = TD_CATCHSIG;
91 91 self->ul_td_evbuf.eventdata = (void *)(intptr_t)sig;
92 92 tdb_event(TD_CATCHSIG, udp);
93 93 }
94 94
95 95 /*
96 96 * Get a self-consistent set of flags, handler, and mask
97 97 * while holding the sig's sig_lock for the least possible time.
98 98 * We must acquire the sig's sig_lock because some thread running
99 99 * in sigaction() might be establishing a new signal handler.
100 100 * The code in sigaction() acquires the writer lock; here
101 101 * we acquire the readers lock to ehance concurrency in the
102 102 * face of heavy signal traffic, such as generated by java.
103 103 *
104 104 * Locking exceptions:
105 105 * No locking for a child of vfork().
106 106 * If the signal is SIGPROF with an si_code of PROF_SIG,
107 107 * then we assume that this signal was generated by
108 108 * setitimer(ITIMER_REALPROF) set up by the dbx collector.
109 109 * If the signal is SIGEMT with an si_code of EMT_CPCOVF,
110 110 * then we assume that the signal was generated by
111 111 * a hardware performance counter overflow.
112 112 * In these cases, assume that we need no locking. It is the
113 113 * monitoring program's responsibility to ensure correctness.
114 114 */
115 115 sap = &udp->siguaction[sig].sig_uaction;
116 116 if (self->ul_vfork ||
117 117 (sip != NULL &&
118 118 ((sig == SIGPROF && sip->si_code == PROF_SIG) ||
119 119 (sig == SIGEMT && sip->si_code == EMT_CPCOVF)))) {
120 120 /* we wish this assignment could be atomic */
121 121 (void) memcpy(&uact, (void *)sap, sizeof (uact));
122 122 } else {
123 123 rwlock_t *rwlp = &udp->siguaction[sig].sig_lock;
124 124 lrw_rdlock(rwlp);
125 125 (void) memcpy(&uact, (void *)sap, sizeof (uact));
126 126 if ((sig == SIGCANCEL || sig == SIGAIOCANCEL) &&
127 127 (sap->sa_flags & SA_RESETHAND))
128 128 sap->sa_sigaction = SIG_DFL;
129 129 lrw_unlock(rwlp);
130 130 }
131 131
132 132 /*
133 133 * Set the proper signal mask and call the user's signal handler.
134 134 * (We overrode the user-requested signal mask with maskset
135 135 * so we currently have all blockable signals blocked.)
136 136 *
137 137 * We would like to ASSERT() that the signal is not a member of the
138 138 * signal mask at the previous level (ucp->uc_sigmask) or the specified
139 139 * signal mask for sigsuspend() or pollsys() (self->ul_tmpmask) but
140 140 * /proc can override this via PCSSIG, so we don't bother.
141 141 *
142 142 * We would also like to ASSERT() that the signal mask at the previous
143 143 * level equals self->ul_sigmask (maskset for sigsuspend() / pollsys()),
144 144 * but /proc can change the thread's signal mask via PCSHOLD, so we
145 145 * don't bother with that either.
146 146 */
147 147 ASSERT(ucp->uc_flags & UC_SIGMASK);
148 148 if (self->ul_sigsuspend) {
149 149 ucp->uc_sigmask = self->ul_sigmask;
150 150 self->ul_sigsuspend = 0;
151 151 /* the sigsuspend() or pollsys() signal mask */
152 152 sigorset(&uact.sa_mask, &self->ul_tmpmask);
153 153 } else {
154 154 /* the signal mask at the previous level */
155 155 sigorset(&uact.sa_mask, &ucp->uc_sigmask);
156 156 }
157 157 if (!(uact.sa_flags & SA_NODEFER)) /* add current signal */
158 158 (void) sigaddset(&uact.sa_mask, sig);
159 159 self->ul_sigmask = uact.sa_mask;
160 160 self->ul_siglink = ucp;
161 161 (void) __lwp_sigmask(SIG_SETMASK, &uact.sa_mask);
162 162
163 163 /*
164 164 * If this thread has been sent SIGCANCEL from the kernel
165 165 * or from pthread_cancel(), it is being asked to exit.
166 166 * The kernel may send SIGCANCEL without a siginfo struct.
167 167 * If the SIGCANCEL is process-directed (from kill() or
168 168 * sigqueue()), treat it as an ordinary signal.
169 169 */
170 170 if (sig == SIGCANCEL) {
171 171 if (sip == NULL || SI_FROMKERNEL(sip) ||
172 172 sip->si_code == SI_LWP) {
173 173 do_sigcancel();
174 174 goto out;
175 175 }
176 176 /* SIGCANCEL is ignored by default */
177 177 if (uact.sa_sigaction == SIG_DFL ||
178 178 uact.sa_sigaction == SIG_IGN)
179 179 goto out;
180 180 }
181 181
182 182 /*
183 183 * If this thread has been sent SIGAIOCANCEL (SIGLWP) and
184 184 * we are an aio worker thread, cancel the aio request.
185 185 */
186 186 if (sig == SIGAIOCANCEL) {
187 187 aio_worker_t *aiowp = pthread_getspecific(_aio_key);
188 188
189 189 if (sip != NULL && sip->si_code == SI_LWP && aiowp != NULL)
190 190 siglongjmp(aiowp->work_jmp_buf, 1);
191 191 /* SIGLWP is ignored by default */
192 192 if (uact.sa_sigaction == SIG_DFL ||
193 193 uact.sa_sigaction == SIG_IGN)
194 194 goto out;
195 195 }
196 196
197 197 if (!(uact.sa_flags & SA_SIGINFO))
198 198 sip = NULL;
199 199 __sighndlr(sig, sip, ucp, uact.sa_sigaction);
200 200
201 201 #if defined(sparc) || defined(__sparc)
202 202 /*
203 203 * If this is a floating point exception and the queue
204 204 * is non-empty, pop the top entry from the queue. This
205 205 * is to maintain expected behavior.
206 206 */
207 207 if (sig == SIGFPE && ucp->uc_mcontext.fpregs.fpu_qcnt) {
208 208 fpregset_t *fp = &ucp->uc_mcontext.fpregs;
209 209
210 210 if (--fp->fpu_qcnt > 0) {
211 211 unsigned char i;
212 212 struct fq *fqp;
213 213
214 214 fqp = fp->fpu_q;
215 215 for (i = 0; i < fp->fpu_qcnt; i++)
216 216 fqp[i] = fqp[i+1];
217 217 }
218 218 }
219 219 #endif /* sparc */
220 220
221 221 out:
222 222 (void) setcontext(ucp);
223 223 thr_panic("call_user_handler(): setcontext() returned");
224 224 }
225 225
226 226 /*
227 227 * take_deferred_signal() is called when ul_critical and ul_sigdefer become
228 228 * zero and a deferred signal has been recorded on the current thread.
229 229 * We are out of the critical region and are ready to take a signal.
230 230 * The kernel has all signals blocked on this lwp, but our value of
231 231 * ul_sigmask is the correct signal mask for the previous context.
232 232 *
233 233 * We call __sigresend() to atomically restore the signal mask and
234 234 * cause the signal to be sent again with the remembered siginfo.
235 235 * We will not return successfully from __sigresend() until the
236 236 * application's signal handler has been run via sigacthandler().
237 237 */
238 238 void
239 239 take_deferred_signal(int sig)
240 240 {
241 241 extern int __sigresend(int, siginfo_t *, sigset_t *);
242 242 ulwp_t *self = curthread;
243 243 siguaction_t *suap = &self->ul_uberdata->siguaction[sig];
244 244 siginfo_t *sip;
245 245 int error;
246 246
247 247 ASSERT((self->ul_critical | self->ul_sigdefer | self->ul_cursig) == 0);
248 248
249 249 /*
250 250 * If the signal handler was established with SA_RESETHAND,
251 251 * the kernel has reset the handler to SIG_DFL, so we have
252 252 * to reestablish the handler now so that it will be entered
253 253 * again when we call __sigresend(), below.
254 254 *
255 255 * Logically, we should acquire and release the signal's
256 256 * sig_lock around this operation to protect the integrity
257 257 * of the signal action while we copy it, as is done below
258 258 * in _libc_sigaction(). However, we may be on a user-level
259 259 * sleep queue at this point and lrw_wrlock(&suap->sig_lock)
260 260 * might attempt to sleep on a different sleep queue and
261 261 * that would corrupt the entire sleep queue mechanism.
262 262 *
263 263 * If we are on a sleep queue we will remove ourself from
264 264 * it in call_user_handler(), called from sigacthandler(),
265 265 * before entering the application's signal handler.
266 266 * In the meantime, we must not acquire any locks.
267 267 */
268 268 if (suap->sig_uaction.sa_flags & SA_RESETHAND) {
269 269 struct sigaction tact = suap->sig_uaction;
270 270 tact.sa_flags &= ~SA_NODEFER;
271 271 tact.sa_sigaction = self->ul_uberdata->sigacthandler;
272 272 tact.sa_mask = maskset;
273 273 (void) __sigaction(sig, &tact, NULL);
274 274 }
275 275
276 276 if (self->ul_siginfo.si_signo == 0)
277 277 sip = NULL;
278 278 else
279 279 sip = &self->ul_siginfo;
280 280
281 281 /* EAGAIN can happen only for a pending SIGSTOP signal */
282 282 while ((error = __sigresend(sig, sip, &self->ul_sigmask)) == EAGAIN)
283 283 continue;
284 284 if (error)
285 285 thr_panic("take_deferred_signal(): __sigresend() failed");
286 286 }
287 287
288 288 /*
289 289 * sigacthandler() attempts to clean up dangling uc_link pointers in
290 290 * signal handling contexts when libc believes us to have escaped
291 291 * a signal handler incorrectly in the past.
292 292 *
293 293 * Branded processes have a legitimate use for a chain including contexts
294 294 * other than those used for signal handling when tracking emulation
295 295 * requests from the kernel. We allow them to disable this cleanup
296 296 * behaviour.
297 297 */
298 298 static int escaped_context_cleanup = 1;
299 299
300 300 void
301 301 set_escaped_context_cleanup(int on)
302 302 {
303 303 escaped_context_cleanup = on;
304 304 }
305 305
306 306 void
307 307 sigacthandler(int sig, siginfo_t *sip, void *uvp)
308 308 {
309 309 ucontext_t *ucp = uvp;
310 310 ulwp_t *self = curthread;
311 311
312 312 /*
313 313 * Do this in case we took a signal while in a cancelable system call.
314 314 * It does no harm if we were not in such a system call.
315 315 */
316 316 self->ul_sp = 0;
317 317 if (sig != SIGCANCEL)
318 318 self->ul_cancel_async = self->ul_save_async;
319 319
320 320 /*
321 321 * If this thread has performed a longjmp() from a signal handler
322 322 * back to main level some time in the past, it has left the kernel
323 323 * thinking that it is still in the signal context. We repair this
324 324 * possible damage by setting ucp->uc_link to NULL if we know that
325 325 * we are actually executing at main level (self->ul_siglink == NULL).
326 326 * See the code for setjmp()/longjmp() for more details.
327 327 */
328 328 if (escaped_context_cleanup && self->ul_siglink == NULL)
329 329 ucp->uc_link = NULL;
330 330
331 331 /*
332 332 * If we are not in a critical region and are
333 333 * not deferring signals, take the signal now.
334 334 */
335 335 if ((self->ul_critical + self->ul_sigdefer) == 0) {
336 336 call_user_handler(sig, sip, ucp);
337 337 /*
338 338 * On the surface, the following call seems redundant
339 339 * because call_user_handler() cannot return. However,
340 340 * we don't want to return from here because the compiler
341 341 * might recycle our frame. We want to keep it on the
342 342 * stack to assist debuggers such as pstack in identifying
343 343 * signal frames. The call to thr_panic() serves to prevent
344 344 * tail-call optimisation here.
345 345 */
346 346 thr_panic("sigacthandler(): call_user_handler() returned");
347 347 }
348 348
349 349 /*
350 350 * We are in a critical region or we are deferring signals. When
351 351 * we emerge from the region we will call take_deferred_signal().
352 352 */
353 353 ASSERT(self->ul_cursig == 0);
354 354 self->ul_cursig = (char)sig;
355 355 if (sip != NULL)
356 356 (void) memcpy(&self->ul_siginfo,
357 357 sip, sizeof (siginfo_t));
358 358 else
359 359 self->ul_siginfo.si_signo = 0;
360 360
361 361 /*
362 362 * Make sure that if we return to a call to __lwp_park()
363 363 * or ___lwp_cond_wait() that it returns right away
364 364 * (giving us a spurious wakeup but not a deadlock).
365 365 */
366 366 set_parking_flag(self, 0);
367 367
368 368 /*
369 369 * Return to the previous context with all signals blocked.
370 370 * We will restore the signal mask in take_deferred_signal().
371 371 * Note that we are calling the system call trap here, not
372 372 * the setcontext() wrapper. We don't want to change the
373 373 * thread's ul_sigmask by this operation.
374 374 */
375 375 ucp->uc_sigmask = maskset;
376 376 (void) __setcontext(ucp);
377 377 thr_panic("sigacthandler(): __setcontext() returned");
378 378 }
379 379
380 380 #pragma weak _sigaction = sigaction
381 381 int
382 382 sigaction(int sig, const struct sigaction *nact, struct sigaction *oact)
383 383 {
384 384 ulwp_t *self = curthread;
385 385 uberdata_t *udp = self->ul_uberdata;
386 386 struct sigaction oaction;
387 387 struct sigaction tact;
388 388 struct sigaction *tactp = NULL;
389 389 int rv;
390 390
391 391 if (sig <= 0 || sig >= NSIG) {
392 392 errno = EINVAL;
393 393 return (-1);
394 394 }
395 395
396 396 if (!self->ul_vfork)
397 397 lrw_wrlock(&udp->siguaction[sig].sig_lock);
398 398
399 399 oaction = udp->siguaction[sig].sig_uaction;
400 400
401 401 if (nact != NULL) {
402 402 tact = *nact; /* make a copy so we can modify it */
403 403 tactp = &tact;
404 404 delete_reserved_signals(&tact.sa_mask);
405 405
406 406 #if !defined(_LP64)
407 407 tact.sa_resv[0] = tact.sa_resv[1] = 0; /* cleanliness */
408 408 #endif
409 409 /*
410 410 * To be compatible with the behavior of SunOS 4.x:
411 411 * If the new signal handler is SIG_IGN or SIG_DFL, do
412 412 * not change the signal's entry in the siguaction array.
413 413 * This allows a child of vfork(2) to set signal handlers
414 414 * to SIG_IGN or SIG_DFL without affecting the parent.
415 415 *
416 416 * This also covers a race condition with some thread
417 417 * setting the signal action to SIG_DFL or SIG_IGN
418 418 * when the thread has also received and deferred
419 419 * that signal. When the thread takes the deferred
420 420 * signal, even though it has set the action to SIG_DFL
421 421 * or SIG_IGN, it will execute the old signal handler
422 422 * anyway. This is an inherent signaling race condition
423 423 * and is not a bug.
424 424 *
425 425 * A child of vfork() is not allowed to change signal
426 426 * handlers to anything other than SIG_DFL or SIG_IGN.
427 427 */
428 428 if (self->ul_vfork) {
429 429 if (tact.sa_sigaction != SIG_IGN)
430 430 tact.sa_sigaction = SIG_DFL;
431 431 } else if (sig == SIGCANCEL || sig == SIGAIOCANCEL) {
432 432 /*
433 433 * Always catch these signals.
434 434 * We need SIGCANCEL for pthread_cancel() to work.
435 435 * We need SIGAIOCANCEL for aio_cancel() to work.
436 436 */
437 437 udp->siguaction[sig].sig_uaction = tact;
438 438 if (tact.sa_sigaction == SIG_DFL ||
439 439 tact.sa_sigaction == SIG_IGN)
440 440 tact.sa_flags = SA_SIGINFO;
441 441 else {
442 442 tact.sa_flags |= SA_SIGINFO;
443 443 tact.sa_flags &=
444 444 ~(SA_NODEFER | SA_RESETHAND | SA_RESTART);
445 445 }
446 446 tact.sa_sigaction = udp->sigacthandler;
447 447 tact.sa_mask = maskset;
448 448 } else if (tact.sa_sigaction != SIG_DFL &&
449 449 tact.sa_sigaction != SIG_IGN) {
450 450 udp->siguaction[sig].sig_uaction = tact;
451 451 tact.sa_flags &= ~SA_NODEFER;
452 452 tact.sa_sigaction = udp->sigacthandler;
453 453 tact.sa_mask = maskset;
454 454 }
455 455 }
456 456
457 457 if ((rv = __sigaction(sig, tactp, oact)) != 0)
458 458 udp->siguaction[sig].sig_uaction = oaction;
459 459 else if (oact != NULL &&
460 460 oact->sa_sigaction != SIG_DFL &&
461 461 oact->sa_sigaction != SIG_IGN)
462 462 *oact = oaction;
463 463
464 464 /*
465 465 * We detect setting the disposition of SIGIO just to set the
466 466 * _sigio_enabled flag for the asynchronous i/o (aio) code.
467 467 */
468 468 if (sig == SIGIO && rv == 0 && tactp != NULL) {
469 469 _sigio_enabled =
470 470 (tactp->sa_handler != SIG_DFL &&
471 471 tactp->sa_handler != SIG_IGN);
472 472 }
473 473
474 474 if (!self->ul_vfork)
475 475 lrw_unlock(&udp->siguaction[sig].sig_lock);
476 476 return (rv);
477 477 }
478 478
479 479 /*
480 480 * This is a private interface for the lx brand.
481 481 */
482 482 void
483 483 setsigacthandler(void (*nsigacthandler)(int, siginfo_t *, void *),
484 484 void (**osigacthandler)(int, siginfo_t *, void *),
485 485 int (*brsetctxt)(const ucontext_t *))
486 486 {
487 487 ulwp_t *self = curthread;
488 488 uberdata_t *udp = self->ul_uberdata;
489 489
490 490 if (osigacthandler != NULL)
491 491 *osigacthandler = udp->sigacthandler;
492 492
493 493 udp->sigacthandler = nsigacthandler;
494 494
495 495 if (brsetctxt != NULL)
496 496 udp->setctxt = brsetctxt;
497 497 }
498 498
499 499 /*
500 500 * Tell the kernel to block all signals.
501 501 * Use the schedctl interface, or failing that, use __lwp_sigmask().
502 502 * This action can be rescinded only by making a system call that
503 503 * sets the signal mask:
504 504 * __lwp_sigmask(), __sigprocmask(), __setcontext(),
505 505 * __sigsuspend() or __pollsys().
506 506 * In particular, this action cannot be reversed by assigning
507 507 * scp->sc_sigblock = 0. That would be a way to lose signals.
508 508 * See the definition of restore_signals(self).
509 509 */
510 510 void
511 511 block_all_signals(ulwp_t *self)
512 512 {
513 513 volatile sc_shared_t *scp;
514 514
515 515 enter_critical(self);
516 516 if ((scp = self->ul_schedctl) != NULL ||
517 517 (scp = setup_schedctl()) != NULL)
518 518 scp->sc_sigblock = 1;
519 519 else
520 520 (void) __lwp_sigmask(SIG_SETMASK, &maskset);
521 521 exit_critical(self);
522 522 }
523 523
524 524 /*
525 525 * setcontext() has code that forcibly restores the curthread
526 526 * pointer in a context passed to the setcontext(2) syscall.
527 527 *
528 528 * Certain processes may need to disable this feature, so these routines
529 529 * provide the mechanism to do so.
530 530 *
531 531 * (As an example, branded 32-bit x86 processes may use %gs for their own
532 532 * purposes, so they need to be able to specify a %gs value to be restored
↓ open down ↓ |
532 lines elided |
↑ open up ↑ |
533 533 * on return from a signal handler via the passed ucontext_t.)
534 534 */
535 535 static int setcontext_enforcement = 1;
536 536
537 537 void
538 538 set_setcontext_enforcement(int on)
539 539 {
540 540 setcontext_enforcement = on;
541 541 }
542 542
543 +/*
544 + * The LX brand emulation library implements an operation that is analogous to
545 + * setcontext(), but takes a different path in to the kernel. So that it can
546 + * correctly restore a signal mask, we expose just the signal mask handling
547 + * part of the regular setcontext() routine as a private interface.
548 + */
549 +void
550 +setcontext_sigmask(ucontext_t *ucp)
551 +{
552 + ulwp_t *self = curthread;
553 +
554 + if (ucp->uc_flags & UC_SIGMASK) {
555 + block_all_signals(self);
556 + delete_reserved_signals(&ucp->uc_sigmask);
557 + self->ul_sigmask = ucp->uc_sigmask;
558 + if (self->ul_cursig) {
559 + /*
560 + * We have a deferred signal present.
561 + * The signal mask will be set when the
562 + * signal is taken in take_deferred_signal().
563 + */
564 + ASSERT(self->ul_critical + self->ul_sigdefer != 0);
565 + ucp->uc_flags &= ~UC_SIGMASK;
566 + }
567 + }
568 +}
569 +
543 570 #pragma weak _setcontext = setcontext
544 571 int
545 572 setcontext(const ucontext_t *ucp)
546 573 {
547 574 ulwp_t *self = curthread;
548 575 uberdata_t *udp = self->ul_uberdata;
549 576 int ret;
550 577 ucontext_t uc;
551 578
552 579 /*
553 580 * Returning from the main context (uc_link == NULL) causes
554 581 * the thread to exit. See setcontext(2) and makecontext(3C).
555 582 */
556 583 if (ucp == NULL)
557 584 thr_exit(NULL);
558 585 (void) memcpy(&uc, ucp, sizeof (uc));
559 586
560 587 /*
561 588 * Restore previous signal mask and context link.
562 589 */
563 - if (uc.uc_flags & UC_SIGMASK) {
564 - block_all_signals(self);
565 - delete_reserved_signals(&uc.uc_sigmask);
566 - self->ul_sigmask = uc.uc_sigmask;
567 - if (self->ul_cursig) {
568 - /*
569 - * We have a deferred signal present.
570 - * The signal mask will be set when the
571 - * signal is taken in take_deferred_signal().
572 - */
573 - ASSERT(self->ul_critical + self->ul_sigdefer != 0);
574 - uc.uc_flags &= ~UC_SIGMASK;
575 - }
576 - }
590 + setcontext_sigmask(&uc);
577 591 self->ul_siglink = uc.uc_link;
578 592
579 593 /*
580 594 * We don't know where this context structure has been.
581 595 * Preserve the curthread pointer, at least.
582 596 *
583 597 * Allow this feature to be disabled if a particular process
584 598 * requests it.
585 599 */
586 600 if (setcontext_enforcement) {
587 601 #if defined(__sparc)
588 602 uc.uc_mcontext.gregs[REG_G7] = (greg_t)self;
589 603 #elif defined(__amd64)
590 604 uc.uc_mcontext.gregs[REG_FS] = (greg_t)0; /* null for fsbase */
591 605 #elif defined(__i386)
592 606 uc.uc_mcontext.gregs[GS] = (greg_t)LWPGS_SEL;
593 607 #else
594 608 #error "none of __sparc, __amd64, __i386 defined"
595 609 #endif
596 610 }
597 611
598 612 /*
599 613 * Make sure that if we return to a call to __lwp_park()
600 614 * or ___lwp_cond_wait() that it returns right away
601 615 * (giving us a spurious wakeup but not a deadlock).
602 616 */
603 617 set_parking_flag(self, 0);
604 618 self->ul_sp = 0;
605 619 ret = udp->setctxt(&uc);
606 620
607 621 /*
608 622 * It is OK for setcontext() to return if the user has not specified
609 623 * UC_CPU.
610 624 */
611 625 if (uc.uc_flags & UC_CPU)
612 626 thr_panic("setcontext(): __setcontext() returned");
613 627 return (ret);
614 628 }
615 629
616 630 #pragma weak _thr_sigsetmask = thr_sigsetmask
617 631 int
618 632 thr_sigsetmask(int how, const sigset_t *set, sigset_t *oset)
619 633 {
620 634 ulwp_t *self = curthread;
621 635 sigset_t saveset;
622 636
623 637 if (set == NULL) {
624 638 enter_critical(self);
625 639 if (oset != NULL)
626 640 *oset = self->ul_sigmask;
627 641 exit_critical(self);
628 642 } else {
629 643 switch (how) {
630 644 case SIG_BLOCK:
631 645 case SIG_UNBLOCK:
632 646 case SIG_SETMASK:
633 647 break;
634 648 default:
635 649 return (EINVAL);
636 650 }
637 651
638 652 /*
639 653 * The assignments to self->ul_sigmask must be protected from
640 654 * signals. The nuances of this code are subtle. Be careful.
641 655 */
642 656 block_all_signals(self);
643 657 if (oset != NULL)
644 658 saveset = self->ul_sigmask;
645 659 switch (how) {
646 660 case SIG_BLOCK:
647 661 self->ul_sigmask.__sigbits[0] |= set->__sigbits[0];
648 662 self->ul_sigmask.__sigbits[1] |= set->__sigbits[1];
649 663 self->ul_sigmask.__sigbits[2] |= set->__sigbits[2];
650 664 self->ul_sigmask.__sigbits[3] |= set->__sigbits[3];
651 665 break;
652 666 case SIG_UNBLOCK:
653 667 self->ul_sigmask.__sigbits[0] &= ~set->__sigbits[0];
654 668 self->ul_sigmask.__sigbits[1] &= ~set->__sigbits[1];
655 669 self->ul_sigmask.__sigbits[2] &= ~set->__sigbits[2];
656 670 self->ul_sigmask.__sigbits[3] &= ~set->__sigbits[3];
657 671 break;
658 672 case SIG_SETMASK:
659 673 self->ul_sigmask.__sigbits[0] = set->__sigbits[0];
660 674 self->ul_sigmask.__sigbits[1] = set->__sigbits[1];
661 675 self->ul_sigmask.__sigbits[2] = set->__sigbits[2];
662 676 self->ul_sigmask.__sigbits[3] = set->__sigbits[3];
663 677 break;
664 678 }
665 679 delete_reserved_signals(&self->ul_sigmask);
666 680 if (oset != NULL)
667 681 *oset = saveset;
668 682 restore_signals(self);
669 683 }
670 684
671 685 return (0);
672 686 }
673 687
674 688 #pragma weak _pthread_sigmask = pthread_sigmask
675 689 int
676 690 pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
677 691 {
678 692 return (thr_sigsetmask(how, set, oset));
679 693 }
680 694
681 695 #pragma weak _sigprocmask = sigprocmask
682 696 int
683 697 sigprocmask(int how, const sigset_t *set, sigset_t *oset)
684 698 {
685 699 int error;
686 700
687 701 /*
688 702 * Guard against children of vfork().
689 703 */
690 704 if (curthread->ul_vfork)
691 705 return (__sigprocmask(how, set, oset));
692 706
693 707 if ((error = thr_sigsetmask(how, set, oset)) != 0) {
694 708 errno = error;
695 709 return (-1);
696 710 }
697 711
698 712 return (0);
699 713 }
700 714
701 715 /*
702 716 * Called at library initialization to set up signal handling.
703 717 * All we really do is initialize the sig_lock rwlocks.
704 718 * All signal handlers are either SIG_DFL or SIG_IGN on exec().
705 719 * However, if any signal handlers were established on alternate
706 720 * link maps before the primary link map has been initialized,
707 721 * then inform the kernel of the new sigacthandler.
708 722 */
709 723 void
710 724 signal_init()
711 725 {
712 726 uberdata_t *udp = curthread->ul_uberdata;
713 727 struct sigaction *sap;
714 728 struct sigaction act;
715 729 rwlock_t *rwlp;
716 730 int sig;
717 731
718 732 for (sig = 0; sig < NSIG; sig++) {
719 733 rwlp = &udp->siguaction[sig].sig_lock;
720 734 rwlp->rwlock_magic = RWL_MAGIC;
721 735 rwlp->mutex.mutex_flag = LOCK_INITED;
722 736 rwlp->mutex.mutex_magic = MUTEX_MAGIC;
723 737 sap = &udp->siguaction[sig].sig_uaction;
724 738 if (sap->sa_sigaction != SIG_DFL &&
725 739 sap->sa_sigaction != SIG_IGN &&
726 740 __sigaction(sig, NULL, &act) == 0 &&
727 741 act.sa_sigaction != SIG_DFL &&
728 742 act.sa_sigaction != SIG_IGN) {
729 743 act = *sap;
730 744 act.sa_flags &= ~SA_NODEFER;
731 745 act.sa_sigaction = udp->sigacthandler;
732 746 act.sa_mask = maskset;
733 747 (void) __sigaction(sig, &act, NULL);
734 748 }
735 749 }
736 750 }
737 751
738 752 /*
739 753 * Common code for cancelling self in _sigcancel() and pthread_cancel().
740 754 * First record the fact that a cancellation is pending.
741 755 * Then, if cancellation is disabled or if we are holding unprotected
742 756 * libc locks, just return to defer the cancellation.
743 757 * Then, if we are at a cancellation point (ul_cancelable) just
744 758 * return and let _canceloff() do the exit.
745 759 * Else exit immediately if async mode is in effect.
746 760 */
747 761 void
748 762 do_sigcancel(void)
749 763 {
750 764 ulwp_t *self = curthread;
751 765
752 766 ASSERT(self->ul_critical == 0);
753 767 ASSERT(self->ul_sigdefer == 0);
754 768 self->ul_cancel_pending = 1;
755 769 if (self->ul_cancel_async &&
756 770 !self->ul_cancel_disabled &&
757 771 self->ul_libc_locks == 0 &&
758 772 !self->ul_cancelable)
759 773 pthread_exit(PTHREAD_CANCELED);
760 774 set_cancel_pending_flag(self, 0);
761 775 }
762 776
763 777 /*
764 778 * Set up the SIGCANCEL handler for threads cancellation,
765 779 * needed only when we have more than one thread,
766 780 * or the SIGAIOCANCEL handler for aio cancellation,
767 781 * called when aio is initialized, in __uaio_init().
768 782 */
769 783 void
770 784 setup_cancelsig(int sig)
771 785 {
772 786 uberdata_t *udp = curthread->ul_uberdata;
773 787 rwlock_t *rwlp = &udp->siguaction[sig].sig_lock;
774 788 struct sigaction act;
775 789
776 790 ASSERT(sig == SIGCANCEL || sig == SIGAIOCANCEL);
777 791 lrw_rdlock(rwlp);
778 792 act = udp->siguaction[sig].sig_uaction;
779 793 lrw_unlock(rwlp);
780 794 if (act.sa_sigaction == SIG_DFL ||
781 795 act.sa_sigaction == SIG_IGN)
782 796 act.sa_flags = SA_SIGINFO;
783 797 else {
784 798 act.sa_flags |= SA_SIGINFO;
785 799 act.sa_flags &= ~(SA_NODEFER | SA_RESETHAND | SA_RESTART);
786 800 }
787 801 act.sa_sigaction = udp->sigacthandler;
788 802 act.sa_mask = maskset;
789 803 (void) __sigaction(sig, &act, NULL);
790 804 }
↓ open down ↓ |
204 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX