Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/sig.c
+++ new/usr/src/uts/common/os/sig.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 29 /* All Rights Reserved */
30 30
31 31 #include <sys/param.h>
32 32 #include <sys/types.h>
33 33 #include <sys/bitmap.h>
34 34 #include <sys/sysmacros.h>
35 35 #include <sys/systm.h>
36 36 #include <sys/cred.h>
37 37 #include <sys/user.h>
38 38 #include <sys/errno.h>
39 39 #include <sys/proc.h>
40 40 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
41 41 #include <sys/signal.h>
42 42 #include <sys/siginfo.h>
43 43 #include <sys/fault.h>
44 44 #include <sys/ucontext.h>
45 45 #include <sys/procfs.h>
46 46 #include <sys/wait.h>
47 47 #include <sys/class.h>
48 48 #include <sys/mman.h>
49 49 #include <sys/procset.h>
50 50 #include <sys/kmem.h>
51 51 #include <sys/cpuvar.h>
52 52 #include <sys/prsystm.h>
53 53 #include <sys/debug.h>
54 54 #include <vm/as.h>
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
55 55 #include <sys/bitmap.h>
56 56 #include <c2/audit.h>
57 57 #include <sys/core.h>
58 58 #include <sys/schedctl.h>
59 59 #include <sys/contract/process_impl.h>
60 60 #include <sys/cyclic.h>
61 61 #include <sys/dtrace.h>
62 62 #include <sys/sdt.h>
63 63 #include <sys/signalfd.h>
64 64
65 -const k_sigset_t nullsmask = {0, 0, 0};
65 +const k_sigset_t nullsmask = {{0, 0, 0}};
66 66
67 -const k_sigset_t fillset = /* MUST be contiguous */
68 - {FILLSET0, FILLSET1, FILLSET2};
67 +const k_sigset_t fillset = { /* MUST be contiguous */
68 + {FILLSET0, FILLSET1, FILLSET2}};
69 69
70 -const k_sigset_t cantmask =
71 - {CANTMASK0, CANTMASK1, CANTMASK2};
70 +const k_sigset_t cantmask = {
71 + {CANTMASK0, CANTMASK1, CANTMASK2}};
72 72
73 -const k_sigset_t cantreset =
74 - {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0};
73 +const k_sigset_t cantreset = {
74 + {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0}};
75 75
76 -const k_sigset_t ignoredefault =
76 +const k_sigset_t ignoredefault = {
77 77 {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR)
78 78 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)),
79 79 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE)
80 80 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1)
81 - |sigmask(SIGJVM2)|sigmask(SIGINFO)), 0};
81 + |sigmask(SIGJVM2)|sigmask(SIGINFO)), 0}};
82 82
83 -const k_sigset_t stopdefault =
83 +const k_sigset_t stopdefault = {
84 84 {(sigmask(SIGSTOP)|sigmask(SIGTSTP)|sigmask(SIGTTOU)|sigmask(SIGTTIN)),
85 - 0, 0};
85 + 0, 0}};
86 86
87 -const k_sigset_t coredefault =
87 +const k_sigset_t coredefault = {
88 88 {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGIOT)
89 89 |sigmask(SIGEMT)|sigmask(SIGFPE)|sigmask(SIGBUS)|sigmask(SIGSEGV)
90 - |sigmask(SIGSYS)|sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0, 0};
90 + |sigmask(SIGSYS)|sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0, 0}};
91 91
92 -const k_sigset_t holdvfork =
93 - {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 0, 0};
92 +const k_sigset_t holdvfork = {
93 + {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 0, 0}};
94 94
95 95 static int isjobstop(int);
96 96 static void post_sigcld(proc_t *, sigqueue_t *);
97 97
98 98
99 99 /*
100 100 * signalfd helper function which is set when the signalfd driver loads.
101 101 */
102 102 void (*sigfd_exit_helper)();
103 103
104 104 /*
105 105 * Internal variables for counting number of user thread stop requests posted.
106 106 * They may not be accurate at some special situation such as that a virtually
107 107 * stopped thread starts to run.
108 108 */
109 109 static int num_utstop;
110 110 /*
111 111 * Internal variables for broadcasting an event when all thread stop requests
112 112 * are processed.
113 113 */
114 114 static kcondvar_t utstop_cv;
115 115
116 116 static kmutex_t thread_stop_lock;
117 117 void del_one_utstop(void);
118 118
119 119 /*
120 120 * Send the specified signal to the specified process.
121 121 */
122 122 void
123 123 psignal(proc_t *p, int sig)
124 124 {
125 125 mutex_enter(&p->p_lock);
126 126 sigtoproc(p, NULL, sig);
127 127 mutex_exit(&p->p_lock);
128 128 }
129 129
130 130 /*
131 131 * Send the specified signal to the specified thread.
132 132 */
133 133 void
134 134 tsignal(kthread_t *t, int sig)
135 135 {
136 136 proc_t *p = ttoproc(t);
137 137
138 138 mutex_enter(&p->p_lock);
139 139 sigtoproc(p, t, sig);
140 140 mutex_exit(&p->p_lock);
141 141 }
142 142
143 143 int
144 144 signal_is_blocked(kthread_t *t, int sig)
145 145 {
146 146 return (sigismember(&t->t_hold, sig) ||
147 147 (schedctl_sigblock(t) && !sigismember(&cantmask, sig)));
148 148 }
149 149
150 150 /*
151 151 * Return true if the signal can safely be discarded on generation.
152 152 * That is, if there is no need for the signal on the receiving end.
153 153 * The answer is true if the process is a zombie or
154 154 * if all of these conditions are true:
155 155 * the signal is being ignored
156 156 * the process is single-threaded
157 157 * the signal is not being traced by /proc
158 158 * the signal is not blocked by the process
159 159 * the signal is not being accepted via sigwait()
160 160 */
161 161 static int
162 162 sig_discardable(proc_t *p, int sig)
163 163 {
164 164 kthread_t *t = p->p_tlist;
165 165
166 166 return (t == NULL || /* if zombie or ... */
167 167 (sigismember(&p->p_ignore, sig) && /* signal is ignored */
168 168 t->t_forw == t && /* and single-threaded */
169 169 !tracing(p, sig) && /* and no /proc tracing */
170 170 !signal_is_blocked(t, sig) && /* and signal not blocked */
171 171 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */
172 172 }
173 173
174 174 /*
175 175 * Return true if this thread is going to eat this signal soon.
176 176 * Note that, if the signal is SIGKILL, we force stopped threads to be
177 177 * set running (to make SIGKILL be a sure kill), but only if the process
178 178 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc
179 179 * relies on the fact that a process will not change shape while P_PR_LOCK
180 180 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
181 181 * We wish that we could simply call prbarrier() below, in sigtoproc(), to
182 182 * ensure that the process is not locked by /proc, but prbarrier() drops
183 183 * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
184 184 */
185 185 int
186 186 eat_signal(kthread_t *t, int sig)
187 187 {
188 188 int rval = 0;
189 189 ASSERT(THREAD_LOCK_HELD(t));
190 190
191 191 /*
192 192 * Do not do anything if the target thread has the signal blocked.
193 193 */
194 194 if (!signal_is_blocked(t, sig)) {
195 195 t->t_sig_check = 1; /* have thread do an issig */
196 196 if (ISWAKEABLE(t) || ISWAITING(t)) {
197 197 setrun_locked(t);
198 198 rval = 1;
199 199 } else if (t->t_state == TS_STOPPED && sig == SIGKILL &&
200 200 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) {
201 201 ttoproc(t)->p_stopsig = 0;
202 202 t->t_dtrace_stop = 0;
203 203 t->t_schedflag |= TS_XSTART | TS_PSTART;
204 204 setrun_locked(t);
205 205 } else if (t != curthread && t->t_state == TS_ONPROC) {
206 206 aston(t); /* make it do issig promptly */
207 207 if (t->t_cpu != CPU)
208 208 poke_cpu(t->t_cpu->cpu_id);
209 209 rval = 1;
210 210 } else if (t->t_state == TS_RUN) {
211 211 rval = 1;
212 212 }
213 213 }
214 214
215 215 return (rval);
216 216 }
217 217
218 218 /*
219 219 * Post a signal.
220 220 * If a non-null thread pointer is passed, then post the signal
221 221 * to the thread/lwp, otherwise post the signal to the process.
222 222 */
223 223 void
224 224 sigtoproc(proc_t *p, kthread_t *t, int sig)
225 225 {
226 226 kthread_t *tt;
227 227 int ext = !(curproc->p_flag & SSYS) &&
228 228 (curproc->p_ct_process != p->p_ct_process);
229 229
230 230 ASSERT(MUTEX_HELD(&p->p_lock));
231 231
232 232 /* System processes don't get signals */
233 233 if (sig <= 0 || sig >= NSIG || (p->p_flag & SSYS))
234 234 return;
235 235
236 236 /*
237 237 * Regardless of origin or directedness,
238 238 * SIGKILL kills all lwps in the process immediately
239 239 * and jobcontrol signals affect all lwps in the process.
240 240 */
241 241 if (sig == SIGKILL) {
242 242 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0);
243 243 t = NULL;
244 244 } else if (sig == SIGCONT) {
245 245 /*
246 246 * The SSCONT flag will remain set until a stopping
247 247 * signal comes in (below). This is harmless.
248 248 */
249 249 p->p_flag |= SSCONT;
250 250 sigdelq(p, NULL, SIGSTOP);
251 251 sigdelq(p, NULL, SIGTSTP);
252 252 sigdelq(p, NULL, SIGTTOU);
253 253 sigdelq(p, NULL, SIGTTIN);
254 254 sigdiffset(&p->p_sig, &stopdefault);
255 255 sigdiffset(&p->p_extsig, &stopdefault);
256 256 p->p_stopsig = 0;
257 257 if ((tt = p->p_tlist) != NULL) {
258 258 do {
259 259 sigdelq(p, tt, SIGSTOP);
260 260 sigdelq(p, tt, SIGTSTP);
261 261 sigdelq(p, tt, SIGTTOU);
262 262 sigdelq(p, tt, SIGTTIN);
263 263 sigdiffset(&tt->t_sig, &stopdefault);
264 264 sigdiffset(&tt->t_extsig, &stopdefault);
265 265 } while ((tt = tt->t_forw) != p->p_tlist);
266 266 }
267 267 if ((tt = p->p_tlist) != NULL) {
268 268 do {
269 269 thread_lock(tt);
270 270 if (tt->t_state == TS_STOPPED &&
271 271 tt->t_whystop == PR_JOBCONTROL) {
272 272 tt->t_schedflag |= TS_XSTART;
273 273 setrun_locked(tt);
274 274 }
275 275 thread_unlock(tt);
276 276 } while ((tt = tt->t_forw) != p->p_tlist);
277 277 }
278 278 } else if (sigismember(&stopdefault, sig)) {
279 279 /*
280 280 * This test has a race condition which we can't fix:
281 281 * By the time the stopping signal is received by
282 282 * the target process/thread, the signal handler
283 283 * and/or the detached state might have changed.
284 284 */
285 285 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
286 286 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
287 287 p->p_flag &= ~SSCONT;
288 288 sigdelq(p, NULL, SIGCONT);
289 289 sigdelset(&p->p_sig, SIGCONT);
290 290 sigdelset(&p->p_extsig, SIGCONT);
291 291 if ((tt = p->p_tlist) != NULL) {
292 292 do {
293 293 sigdelq(p, tt, SIGCONT);
294 294 sigdelset(&tt->t_sig, SIGCONT);
295 295 sigdelset(&tt->t_extsig, SIGCONT);
296 296 } while ((tt = tt->t_forw) != p->p_tlist);
297 297 }
298 298 }
299 299
300 300 if (sig_discardable(p, sig)) {
301 301 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist,
302 302 proc_t *, p, int, sig);
303 303 return;
304 304 }
305 305
306 306 if (t != NULL) {
307 307 /*
308 308 * This is a directed signal, wake up the lwp.
309 309 */
310 310 sigaddset(&t->t_sig, sig);
311 311 if (ext)
312 312 sigaddset(&t->t_extsig, sig);
313 313 thread_lock(t);
314 314 (void) eat_signal(t, sig);
315 315 thread_unlock(t);
316 316 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig);
317 317 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
318 318 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
319 319 (*((sigfd_proc_state_t *)(p->p_sigfd))->
320 320 sigfd_pollwake_cb)(p, sig);
321 321
322 322 } else if ((tt = p->p_tlist) != NULL) {
323 323 /*
324 324 * Make sure that some lwp that already exists
325 325 * in the process fields the signal soon.
326 326 * Wake up an interruptibly sleeping lwp if necessary.
327 327 * For SIGKILL make all of the lwps see the signal;
328 328 * This is needed to guarantee a sure kill for processes
329 329 * with a mix of realtime and non-realtime threads.
330 330 */
331 331 int su = 0;
332 332
333 333 sigaddset(&p->p_sig, sig);
334 334 if (ext)
335 335 sigaddset(&p->p_extsig, sig);
336 336 do {
337 337 thread_lock(tt);
338 338 if (eat_signal(tt, sig) && sig != SIGKILL) {
339 339 thread_unlock(tt);
340 340 break;
341 341 }
342 342 if (SUSPENDED(tt))
343 343 su++;
344 344 thread_unlock(tt);
345 345 } while ((tt = tt->t_forw) != p->p_tlist);
346 346 /*
347 347 * If the process is deadlocked, make somebody run and die.
348 348 */
349 349 if (sig == SIGKILL && p->p_stat != SIDL &&
350 350 p->p_lwprcnt == 0 && p->p_lwpcnt == su &&
351 351 !(p->p_proc_flag & P_PR_LOCK)) {
352 352 thread_lock(tt);
353 353 p->p_lwprcnt++;
354 354 tt->t_schedflag |= TS_CSTART;
355 355 setrun_locked(tt);
356 356 thread_unlock(tt);
357 357 }
358 358
359 359 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig);
360 360 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
361 361 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
362 362 (*((sigfd_proc_state_t *)(p->p_sigfd))->
363 363 sigfd_pollwake_cb)(p, sig);
364 364 }
365 365 }
366 366
367 367 static int
368 368 isjobstop(int sig)
369 369 {
370 370 proc_t *p = ttoproc(curthread);
371 371
372 372 ASSERT(MUTEX_HELD(&p->p_lock));
373 373
374 374 if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL &&
375 375 sigismember(&stopdefault, sig)) {
376 376 /*
377 377 * If SIGCONT has been posted since we promoted this signal
378 378 * from pending to current, then don't do a jobcontrol stop.
379 379 */
380 380 if (!(p->p_flag & SSCONT) &&
381 381 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) &&
382 382 curthread != p->p_agenttp) {
383 383 sigqueue_t *sqp;
384 384
385 385 stop(PR_JOBCONTROL, sig);
386 386 mutex_exit(&p->p_lock);
387 387 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
388 388 mutex_enter(&pidlock);
389 389 /*
390 390 * Only the first lwp to continue notifies the parent.
391 391 */
392 392 if (p->p_pidflag & CLDCONT)
393 393 siginfofree(sqp);
394 394 else {
395 395 p->p_pidflag |= CLDCONT;
396 396 p->p_wcode = CLD_CONTINUED;
397 397 p->p_wdata = SIGCONT;
398 398 sigcld(p, sqp);
399 399 }
400 400 mutex_exit(&pidlock);
401 401 mutex_enter(&p->p_lock);
402 402 }
403 403 return (1);
404 404 }
405 405 return (0);
406 406 }
407 407
408 408 /*
409 409 * Returns true if the current process has a signal to process, and
410 410 * the signal is not held. The signal to process is put in p_cursig.
411 411 * This is asked at least once each time a process enters the system
412 412 * (though this can usually be done without actually calling issig by
413 413 * checking the pending signal masks). A signal does not do anything
414 414 * directly to a process; it sets a flag that asks the process to do
415 415 * something to itself.
416 416 *
417 417 * The "why" argument indicates the allowable side-effects of the call:
418 418 *
419 419 * FORREAL: Extract the next pending signal from p_sig into p_cursig;
420 420 * stop the process if a stop has been requested or if a traced signal
421 421 * is pending.
422 422 *
423 423 * JUSTLOOKING: Don't stop the process, just indicate whether or not
424 424 * a signal might be pending (FORREAL is needed to tell for sure).
425 425 *
426 426 * XXX: Changes to the logic in these routines should be propagated
427 427 * to lm_sigispending(). See bug 1201594.
428 428 */
429 429
430 430 static int issig_forreal(void);
431 431 static int issig_justlooking(void);
432 432
433 433 int
434 434 issig(int why)
435 435 {
436 436 ASSERT(why == FORREAL || why == JUSTLOOKING);
437 437
438 438 return ((why == FORREAL)? issig_forreal() : issig_justlooking());
439 439 }
440 440
441 441
442 442 static int
443 443 issig_justlooking(void)
444 444 {
445 445 kthread_t *t = curthread;
446 446 klwp_t *lwp = ttolwp(t);
447 447 proc_t *p = ttoproc(t);
448 448 k_sigset_t set;
449 449
450 450 /*
451 451 * This function answers the question:
452 452 * "Is there any reason to call issig_forreal()?"
453 453 *
454 454 * We have to answer the question w/o grabbing any locks
455 455 * because we are (most likely) being called after we
456 456 * put ourselves on the sleep queue.
457 457 */
458 458
459 459 if (t->t_dtrace_stop | t->t_dtrace_sig)
460 460 return (1);
461 461
462 462 /*
463 463 * Another piece of complexity in this process. When single-stepping a
464 464 * process, we don't want an intervening signal or TP_PAUSE request to
465 465 * suspend the current thread. Otherwise, the controlling process will
466 466 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag.
467 467 * We will trigger any remaining signals when we re-enter the kernel on
468 468 * the single step trap.
469 469 */
470 470 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP)
471 471 return (0);
472 472
473 473 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) ||
474 474 (p->p_flag & (SEXITLWPS|SKILLED)) ||
475 475 (lwp->lwp_nostop == 0 &&
476 476 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) |
477 477 (t->t_proc_flag &
478 478 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) ||
479 479 lwp->lwp_cursig)
480 480 return (1);
481 481
482 482 if (p->p_flag & SVFWAIT)
483 483 return (0);
484 484 set = p->p_sig;
485 485 sigorset(&set, &t->t_sig);
486 486 if (schedctl_sigblock(t)) /* all blockable signals blocked */
487 487 sigandset(&set, &cantmask);
488 488 else
489 489 sigdiffset(&set, &t->t_hold);
490 490 if (p->p_flag & SVFORK)
491 491 sigdiffset(&set, &holdvfork);
492 492
493 493 if (!sigisempty(&set)) {
494 494 int sig;
495 495
496 496 for (sig = 1; sig < NSIG; sig++) {
497 497 if (sigismember(&set, sig) &&
498 498 (tracing(p, sig) ||
499 499 sigismember(&t->t_sigwait, sig) ||
500 500 !sigismember(&p->p_ignore, sig))) {
501 501 /*
502 502 * Don't promote a signal that will stop
503 503 * the process when lwp_nostop is set.
504 504 */
505 505 if (!lwp->lwp_nostop ||
506 506 PTOU(p)->u_signal[sig-1] != SIG_DFL ||
507 507 !sigismember(&stopdefault, sig))
508 508 return (1);
509 509 }
510 510 }
511 511 }
512 512
513 513 return (0);
514 514 }
515 515
516 516 static int
517 517 issig_forreal(void)
518 518 {
519 519 int sig = 0, ext = 0;
520 520 kthread_t *t = curthread;
521 521 klwp_t *lwp = ttolwp(t);
522 522 proc_t *p = ttoproc(t);
523 523 int toproc = 0;
524 524 int sigcld_found = 0;
525 525 int nostop_break = 0;
526 526
527 527 ASSERT(t->t_state == TS_ONPROC);
528 528
529 529 mutex_enter(&p->p_lock);
530 530 schedctl_finish_sigblock(t);
531 531
532 532 if (t->t_dtrace_stop | t->t_dtrace_sig) {
533 533 if (t->t_dtrace_stop) {
534 534 /*
535 535 * If DTrace's "stop" action has been invoked on us,
536 536 * set TP_PRSTOP.
537 537 */
538 538 t->t_proc_flag |= TP_PRSTOP;
539 539 }
540 540
541 541 if (t->t_dtrace_sig != 0) {
542 542 k_siginfo_t info;
543 543
544 544 /*
545 545 * Post the signal generated as the result of
546 546 * DTrace's "raise" action as a normal signal before
547 547 * the full-fledged signal checking begins.
548 548 */
549 549 bzero(&info, sizeof (info));
550 550 info.si_signo = t->t_dtrace_sig;
551 551 info.si_code = SI_DTRACE;
552 552
553 553 sigaddq(p, NULL, &info, KM_NOSLEEP);
554 554
555 555 t->t_dtrace_sig = 0;
556 556 }
557 557 }
558 558
559 559 for (;;) {
560 560 if (p->p_flag & (SEXITLWPS|SKILLED)) {
561 561 lwp->lwp_cursig = sig = SIGKILL;
562 562 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0;
563 563 t->t_sig_check = 1;
564 564 break;
565 565 }
566 566
567 567 /*
568 568 * Another piece of complexity in this process. When
569 569 * single-stepping a process, we don't want an intervening
570 570 * signal or TP_PAUSE request to suspend the current thread.
571 571 * Otherwise, the controlling process will hang beacuse we will
572 572 * be stopped with TS_PSTART set in t_schedflag. We will
573 573 * trigger any remaining signals when we re-enter the kernel on
574 574 * the single step trap.
575 575 */
576 576 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) {
577 577 sig = 0;
578 578 break;
579 579 }
580 580
581 581 /*
582 582 * Hold the lwp here for watchpoint manipulation.
583 583 */
584 584 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) {
585 585 stop(PR_SUSPENDED, SUSPEND_PAUSE);
586 586 continue;
587 587 }
588 588
589 589 if (lwp->lwp_asleep && MUSTRETURN(p, t)) {
590 590 if ((sig = lwp->lwp_cursig) != 0) {
591 591 /*
592 592 * Make sure we call ISSIG() in post_syscall()
593 593 * to re-validate this current signal.
594 594 */
595 595 t->t_sig_check = 1;
596 596 }
597 597 break;
598 598 }
599 599
600 600 /*
601 601 * If the request is PR_CHECKPOINT, ignore the rest of signals
602 602 * or requests. Honor other stop requests or signals later.
603 603 * Go back to top of loop here to check if an exit or hold
604 604 * event has occurred while stopped.
605 605 */
606 606 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) {
607 607 stop(PR_CHECKPOINT, 0);
608 608 continue;
609 609 }
610 610
611 611 /*
612 612 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
613 613 * with signals or /proc. Another lwp is executing fork1(),
614 614 * or is undergoing watchpoint activity (remapping a page),
615 615 * or is executing lwp_suspend() on this lwp.
616 616 * Again, go back to top of loop to check if an exit
617 617 * or hold event has occurred while stopped.
618 618 */
619 619 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
620 620 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) {
621 621 stop(PR_SUSPENDED, SUSPEND_NORMAL);
622 622 continue;
623 623 }
624 624
625 625 /*
626 626 * Honor requested stop before dealing with the
627 627 * current signal; a debugger may change it.
628 628 * Do not want to go back to loop here since this is a special
629 629 * stop that means: make incremental progress before the next
630 630 * stop. The danger is that returning to top of loop would most
631 631 * likely drop the thread right back here to stop soon after it
632 632 * was continued, violating the incremental progress request.
633 633 */
634 634 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop)
635 635 stop(PR_REQUESTED, 0);
636 636
637 637 /*
638 638 * If a debugger wants us to take a signal it will have
639 639 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared
640 640 * or if it's being ignored, we continue on looking for another
641 641 * signal. Otherwise we return the specified signal, provided
642 642 * it's not a signal that causes a job control stop.
643 643 *
644 644 * When stopped on PR_JOBCONTROL, there is no current
645 645 * signal; we cancel lwp->lwp_cursig temporarily before
646 646 * calling isjobstop(). The current signal may be reset
647 647 * by a debugger while we are stopped in isjobstop().
648 648 *
649 649 * If the current thread is accepting the signal
650 650 * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
651 651 * we allow the signal to be accepted, even if it is
652 652 * being ignored, and without causing a job control stop.
653 653 */
654 654 if ((sig = lwp->lwp_cursig) != 0) {
655 655 ext = lwp->lwp_extsig;
656 656 lwp->lwp_cursig = 0;
657 657 lwp->lwp_extsig = 0;
658 658 if (sigismember(&t->t_sigwait, sig) ||
659 659 (!sigismember(&p->p_ignore, sig) &&
660 660 !isjobstop(sig))) {
661 661 if (p->p_flag & (SEXITLWPS|SKILLED)) {
662 662 sig = SIGKILL;
663 663 ext = (p->p_flag & SEXTKILLED) != 0;
664 664 }
665 665 lwp->lwp_cursig = (uchar_t)sig;
666 666 lwp->lwp_extsig = (uchar_t)ext;
667 667 break;
668 668 }
669 669 /*
670 670 * The signal is being ignored or it caused a
671 671 * job-control stop. If another current signal
672 672 * has not been established, return the current
673 673 * siginfo, if any, to the memory manager.
674 674 */
675 675 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) {
676 676 siginfofree(lwp->lwp_curinfo);
677 677 lwp->lwp_curinfo = NULL;
678 678 }
679 679 /*
680 680 * Loop around again in case we were stopped
681 681 * on a job control signal and a /proc stop
682 682 * request was posted or another current signal
683 683 * was established while we were stopped.
684 684 */
685 685 continue;
686 686 }
687 687
688 688 if (p->p_stopsig && !lwp->lwp_nostop &&
689 689 curthread != p->p_agenttp) {
690 690 /*
691 691 * Some lwp in the process has already stopped
692 692 * showing PR_JOBCONTROL. This is a stop in
693 693 * sympathy with the other lwp, even if this
694 694 * lwp is blocking the stopping signal.
695 695 */
696 696 stop(PR_JOBCONTROL, p->p_stopsig);
697 697 continue;
698 698 }
699 699
700 700 /*
701 701 * Loop on the pending signals until we find a
702 702 * non-held signal that is traced or not ignored.
703 703 * First check the signals pending for the lwp,
704 704 * then the signals pending for the process as a whole.
705 705 */
706 706 for (;;) {
707 707 if ((sig = fsig(&t->t_sig, t)) != 0) {
708 708 toproc = 0;
709 709 if (tracing(p, sig) ||
710 710 sigismember(&t->t_sigwait, sig) ||
711 711 !sigismember(&p->p_ignore, sig)) {
712 712 if (sigismember(&t->t_extsig, sig))
713 713 ext = 1;
714 714 break;
715 715 }
716 716 sigdelset(&t->t_sig, sig);
717 717 sigdelset(&t->t_extsig, sig);
718 718 sigdelq(p, t, sig);
719 719 } else if ((sig = fsig(&p->p_sig, t)) != 0) {
720 720 if (sig == SIGCLD)
721 721 sigcld_found = 1;
722 722 toproc = 1;
723 723 if (tracing(p, sig) ||
724 724 sigismember(&t->t_sigwait, sig) ||
725 725 !sigismember(&p->p_ignore, sig)) {
726 726 if (sigismember(&p->p_extsig, sig))
727 727 ext = 1;
728 728 break;
729 729 }
730 730 sigdelset(&p->p_sig, sig);
731 731 sigdelset(&p->p_extsig, sig);
732 732 sigdelq(p, NULL, sig);
733 733 } else {
734 734 /* no signal was found */
735 735 break;
736 736 }
737 737 }
738 738
739 739 if (sig == 0) { /* no signal was found */
740 740 if (p->p_flag & (SEXITLWPS|SKILLED)) {
741 741 lwp->lwp_cursig = SIGKILL;
742 742 sig = SIGKILL;
743 743 ext = (p->p_flag & SEXTKILLED) != 0;
744 744 }
745 745 break;
746 746 }
747 747
748 748 /*
749 749 * If we have been informed not to stop (i.e., we are being
750 750 * called from within a network operation), then don't promote
751 751 * the signal at this time, just return the signal number.
752 752 * We will call issig() again later when it is safe.
753 753 *
754 754 * fsig() does not return a jobcontrol stopping signal
755 755 * with a default action of stopping the process if
756 756 * lwp_nostop is set, so we won't be causing a bogus
757 757 * EINTR by this action. (Such a signal is eaten by
758 758 * isjobstop() when we loop around to do final checks.)
759 759 */
760 760 if (lwp->lwp_nostop) {
761 761 nostop_break = 1;
762 762 break;
763 763 }
764 764
765 765 /*
766 766 * Promote the signal from pending to current.
767 767 *
768 768 * Note that sigdeq() will set lwp->lwp_curinfo to NULL
769 769 * if no siginfo_t exists for this signal.
770 770 */
771 771 lwp->lwp_cursig = (uchar_t)sig;
772 772 lwp->lwp_extsig = (uchar_t)ext;
773 773 t->t_sig_check = 1; /* so post_syscall will see signal */
774 774 ASSERT(lwp->lwp_curinfo == NULL);
775 775 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo);
776 776
777 777 if (tracing(p, sig))
778 778 stop(PR_SIGNALLED, sig);
779 779
780 780 /*
781 781 * Loop around to check for requested stop before
782 782 * performing the usual current-signal actions.
783 783 */
784 784 }
785 785
786 786 mutex_exit(&p->p_lock);
787 787
788 788 /*
789 789 * If SIGCLD was dequeued from the process's signal queue,
790 790 * search for other pending SIGCLD's from the list of children.
791 791 */
792 792 if (sigcld_found)
793 793 sigcld_repost();
794 794
795 795 if (sig != 0)
796 796 (void) undo_watch_step(NULL);
797 797
798 798 /*
799 799 * If we have been blocked since the p_lock was dropped off
800 800 * above, then this promoted signal might have been handled
801 801 * already when we were on the way back from sleep queue, so
802 802 * just ignore it.
803 803 * If we have been informed not to stop, just return the signal
804 804 * number. Also see comments above.
805 805 */
806 806 if (!nostop_break) {
807 807 sig = lwp->lwp_cursig;
808 808 }
809 809
810 810 return (sig != 0);
811 811 }
812 812
813 813 /*
814 814 * Return true if the process is currently stopped showing PR_JOBCONTROL.
815 815 * This is true only if all of the process's lwp's are so stopped.
816 816 * If this is asked by one of the lwps in the process, exclude that lwp.
817 817 */
818 818 int
819 819 jobstopped(proc_t *p)
820 820 {
821 821 kthread_t *t;
822 822
823 823 ASSERT(MUTEX_HELD(&p->p_lock));
824 824
825 825 if ((t = p->p_tlist) == NULL)
826 826 return (0);
827 827
828 828 do {
829 829 thread_lock(t);
830 830 /* ignore current, zombie and suspended lwps in the test */
831 831 if (!(t == curthread || t->t_state == TS_ZOMB ||
832 832 SUSPENDED(t)) &&
833 833 (t->t_state != TS_STOPPED ||
834 834 t->t_whystop != PR_JOBCONTROL)) {
835 835 thread_unlock(t);
836 836 return (0);
837 837 }
838 838 thread_unlock(t);
839 839 } while ((t = t->t_forw) != p->p_tlist);
840 840
841 841 return (1);
842 842 }
843 843
844 844 /*
845 845 * Put ourself (curthread) into the stopped state and notify tracers.
846 846 */
847 847 void
848 848 stop(int why, int what)
849 849 {
850 850 kthread_t *t = curthread;
851 851 proc_t *p = ttoproc(t);
852 852 klwp_t *lwp = ttolwp(t);
853 853 kthread_t *tx;
854 854 lwpent_t *lep;
855 855 int procstop;
856 856 int flags = TS_ALLSTART;
857 857 hrtime_t stoptime;
858 858
859 859 /*
860 860 * Can't stop a system process.
861 861 */
862 862 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas)
863 863 return;
864 864
865 865 ASSERT(MUTEX_HELD(&p->p_lock));
866 866
867 867 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
868 868 /*
869 869 * Don't stop an lwp with SIGKILL pending.
870 870 * Don't stop if the process or lwp is exiting.
871 871 */
872 872 if (lwp->lwp_cursig == SIGKILL ||
873 873 sigismember(&t->t_sig, SIGKILL) ||
874 874 sigismember(&p->p_sig, SIGKILL) ||
875 875 (t->t_proc_flag & TP_LWPEXIT) ||
876 876 (p->p_flag & (SEXITLWPS|SKILLED))) {
877 877 p->p_stopsig = 0;
878 878 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
879 879 return;
880 880 }
881 881 }
882 882
883 883 /*
884 884 * Make sure we don't deadlock on a recursive call to prstop().
885 885 * prstop() sets the lwp_nostop flag.
886 886 */
887 887 if (lwp->lwp_nostop)
888 888 return;
889 889
890 890 /*
891 891 * Make sure the lwp is in an orderly state for inspection
892 892 * by a debugger through /proc or for dumping via core().
893 893 */
894 894 schedctl_finish_sigblock(t);
895 895 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */
896 896 mutex_exit(&p->p_lock);
897 897 stoptime = gethrtime();
898 898 prstop(why, what);
899 899 (void) undo_watch_step(NULL);
900 900 mutex_enter(&p->p_lock);
901 901 ASSERT(t->t_state == TS_ONPROC);
902 902
903 903 switch (why) {
904 904 case PR_CHECKPOINT:
905 905 /*
906 906 * The situation may have changed since we dropped
907 907 * and reacquired p->p_lock. Double-check now
908 908 * whether we should stop or not.
909 909 */
910 910 if (!(t->t_proc_flag & TP_CHKPT)) {
911 911 t->t_proc_flag &= ~TP_STOPPING;
912 912 return;
913 913 }
914 914 t->t_proc_flag &= ~TP_CHKPT;
915 915 flags &= ~TS_RESUME;
916 916 break;
917 917
918 918 case PR_JOBCONTROL:
919 919 ASSERT(what == SIGSTOP || what == SIGTSTP ||
920 920 what == SIGTTIN || what == SIGTTOU);
921 921 flags &= ~TS_XSTART;
922 922 break;
923 923
924 924 case PR_SUSPENDED:
925 925 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE);
926 926 /*
927 927 * The situation may have changed since we dropped
928 928 * and reacquired p->p_lock. Double-check now
929 929 * whether we should stop or not.
930 930 */
931 931 if (what == SUSPEND_PAUSE) {
932 932 if (!(t->t_proc_flag & TP_PAUSE)) {
933 933 t->t_proc_flag &= ~TP_STOPPING;
934 934 return;
935 935 }
936 936 flags &= ~TS_UNPAUSE;
937 937 } else {
938 938 if (!((t->t_proc_flag & TP_HOLDLWP) ||
939 939 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) {
940 940 t->t_proc_flag &= ~TP_STOPPING;
941 941 return;
942 942 }
943 943 /*
944 944 * If SHOLDFORK is in effect and we are stopping
945 945 * while asleep (not at the top of the stack),
946 946 * we return now to allow the hold to take effect
947 947 * when we reach the top of the kernel stack.
948 948 */
949 949 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) {
950 950 t->t_proc_flag &= ~TP_STOPPING;
951 951 return;
952 952 }
953 953 flags &= ~TS_CSTART;
954 954 }
955 955 break;
956 956
957 957 default: /* /proc stop */
958 958 flags &= ~TS_PSTART;
959 959 /*
960 960 * Do synchronous stop unless the async-stop flag is set.
961 961 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
962 962 * then no debugger is present and we also do synchronous stop.
963 963 */
964 964 if ((why != PR_REQUESTED || t->t_dtrace_stop) &&
965 965 !(p->p_proc_flag & P_PR_ASYNC)) {
966 966 int notify;
967 967
968 968 for (tx = t->t_forw; tx != t; tx = tx->t_forw) {
969 969 notify = 0;
970 970 thread_lock(tx);
971 971 if (ISTOPPED(tx) ||
972 972 (tx->t_proc_flag & TP_PRSTOP)) {
973 973 thread_unlock(tx);
974 974 continue;
975 975 }
976 976 tx->t_proc_flag |= TP_PRSTOP;
977 977 tx->t_sig_check = 1;
978 978 if (tx->t_state == TS_SLEEP &&
979 979 (tx->t_flag & T_WAKEABLE)) {
980 980 /*
981 981 * Don't actually wake it up if it's
982 982 * in one of the lwp_*() syscalls.
983 983 * Mark it virtually stopped and
984 984 * notify /proc waiters (below).
985 985 */
986 986 if (tx->t_wchan0 == NULL)
987 987 setrun_locked(tx);
988 988 else {
989 989 tx->t_proc_flag |= TP_PRVSTOP;
990 990 tx->t_stoptime = stoptime;
991 991 notify = 1;
992 992 }
993 993 }
994 994
995 995 /* Move waiting thread to run queue */
996 996 if (ISWAITING(tx))
997 997 setrun_locked(tx);
998 998
999 999 /*
1000 1000 * force the thread into the kernel
1001 1001 * if it is not already there.
1002 1002 */
1003 1003 if (tx->t_state == TS_ONPROC &&
1004 1004 tx->t_cpu != CPU)
1005 1005 poke_cpu(tx->t_cpu->cpu_id);
1006 1006 thread_unlock(tx);
1007 1007 lep = p->p_lwpdir[tx->t_dslot].ld_entry;
1008 1008 if (notify && lep->le_trace)
1009 1009 prnotify(lep->le_trace);
1010 1010 }
1011 1011 /*
1012 1012 * We do this just in case one of the threads we asked
1013 1013 * to stop is in holdlwps() (called from cfork()) or
1014 1014 * lwp_suspend().
1015 1015 */
1016 1016 cv_broadcast(&p->p_holdlwps);
1017 1017 }
1018 1018 break;
1019 1019 }
1020 1020
1021 1021 t->t_stoptime = stoptime;
1022 1022
1023 1023 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) {
1024 1024 /*
1025 1025 * Determine if the whole process is jobstopped.
1026 1026 */
1027 1027 if (jobstopped(p)) {
1028 1028 sigqueue_t *sqp;
1029 1029 int sig;
1030 1030
1031 1031 if ((sig = p->p_stopsig) == 0)
1032 1032 p->p_stopsig = (uchar_t)(sig = what);
1033 1033 mutex_exit(&p->p_lock);
1034 1034 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1035 1035 mutex_enter(&pidlock);
1036 1036 /*
1037 1037 * The last lwp to stop notifies the parent.
1038 1038 * Turn off the CLDCONT flag now so the first
1039 1039 * lwp to continue knows what to do.
1040 1040 */
1041 1041 p->p_pidflag &= ~CLDCONT;
1042 1042 p->p_wcode = CLD_STOPPED;
1043 1043 p->p_wdata = sig;
1044 1044 sigcld(p, sqp);
1045 1045 /*
1046 1046 * Grab p->p_lock before releasing pidlock so the
1047 1047 * parent and the child don't have a race condition.
1048 1048 */
1049 1049 mutex_enter(&p->p_lock);
1050 1050 mutex_exit(&pidlock);
1051 1051 p->p_stopsig = 0;
1052 1052 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) {
1053 1053 /*
1054 1054 * Set p->p_stopsig and wake up sleeping lwps
1055 1055 * so they will stop in sympathy with this lwp.
1056 1056 */
1057 1057 p->p_stopsig = (uchar_t)what;
1058 1058 pokelwps(p);
1059 1059 /*
1060 1060 * We do this just in case one of the threads we asked
1061 1061 * to stop is in holdlwps() (called from cfork()) or
1062 1062 * lwp_suspend().
1063 1063 */
1064 1064 cv_broadcast(&p->p_holdlwps);
1065 1065 }
1066 1066 }
1067 1067
1068 1068 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) {
1069 1069 /*
1070 1070 * Do process-level notification when all lwps are
1071 1071 * either stopped on events of interest to /proc
1072 1072 * or are stopped showing PR_SUSPENDED or are zombies.
1073 1073 */
1074 1074 procstop = 1;
1075 1075 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) {
1076 1076 if (VSTOPPED(tx))
1077 1077 continue;
1078 1078 thread_lock(tx);
1079 1079 switch (tx->t_state) {
1080 1080 case TS_ZOMB:
1081 1081 break;
1082 1082 case TS_STOPPED:
1083 1083 /* neither ISTOPPED nor SUSPENDED? */
1084 1084 if ((tx->t_schedflag &
1085 1085 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) ==
1086 1086 (TS_CSTART | TS_UNPAUSE | TS_PSTART))
1087 1087 procstop = 0;
1088 1088 break;
1089 1089 case TS_SLEEP:
1090 1090 /* not paused for watchpoints? */
1091 1091 if (!(tx->t_flag & T_WAKEABLE) ||
1092 1092 tx->t_wchan0 == NULL ||
1093 1093 !(tx->t_proc_flag & TP_PAUSE))
1094 1094 procstop = 0;
1095 1095 break;
1096 1096 default:
1097 1097 procstop = 0;
1098 1098 break;
1099 1099 }
1100 1100 thread_unlock(tx);
1101 1101 }
1102 1102 if (procstop) {
1103 1103 /* there must not be any remapped watched pages now */
1104 1104 ASSERT(p->p_mapcnt == 0);
1105 1105 if (p->p_proc_flag & P_PR_PTRACE) {
1106 1106 /* ptrace() compatibility */
1107 1107 mutex_exit(&p->p_lock);
1108 1108 mutex_enter(&pidlock);
1109 1109 p->p_wcode = CLD_TRAPPED;
1110 1110 p->p_wdata = (why == PR_SIGNALLED)?
1111 1111 what : SIGTRAP;
1112 1112 cv_broadcast(&p->p_parent->p_cv);
1113 1113 /*
1114 1114 * Grab p->p_lock before releasing pidlock so
1115 1115 * parent and child don't have a race condition.
1116 1116 */
1117 1117 mutex_enter(&p->p_lock);
1118 1118 mutex_exit(&pidlock);
1119 1119 }
1120 1120 if (p->p_trace) /* /proc */
1121 1121 prnotify(p->p_trace);
1122 1122 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */
1123 1123 cv_broadcast(&p->p_holdlwps); /* holdwatch() */
1124 1124 }
1125 1125 if (why != PR_SUSPENDED) {
1126 1126 lep = p->p_lwpdir[t->t_dslot].ld_entry;
1127 1127 if (lep->le_trace) /* /proc */
1128 1128 prnotify(lep->le_trace);
1129 1129 /*
1130 1130 * Special notification for creation of the agent lwp.
1131 1131 */
1132 1132 if (t == p->p_agenttp &&
1133 1133 (t->t_proc_flag & TP_PRSTOP) &&
1134 1134 p->p_trace)
1135 1135 prnotify(p->p_trace);
1136 1136 /*
1137 1137 * The situation may have changed since we dropped
1138 1138 * and reacquired p->p_lock. Double-check now
1139 1139 * whether we should stop or not.
1140 1140 */
1141 1141 if (!(t->t_proc_flag & TP_STOPPING)) {
1142 1142 if (t->t_proc_flag & TP_PRSTOP)
1143 1143 t->t_proc_flag |= TP_STOPPING;
1144 1144 }
1145 1145 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
1146 1146 prnostep(lwp);
1147 1147 }
1148 1148 }
1149 1149
1150 1150 if (why == PR_SUSPENDED) {
1151 1151
1152 1152 /*
1153 1153 * We always broadcast in the case of SUSPEND_PAUSE. This is
1154 1154 * because checks for TP_PAUSE take precedence over checks for
1155 1155 * SHOLDWATCH. If a thread is trying to stop because of
1156 1156 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1157 1157 * waiting for the rest of the threads to enter a stopped state.
1158 1158 * If we are stopping for a SUSPEND_PAUSE, we may be the last
1159 1159 * lwp and not know it, so broadcast just in case.
1160 1160 */
1161 1161 if (what == SUSPEND_PAUSE ||
1162 1162 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP))
1163 1163 cv_broadcast(&p->p_holdlwps);
1164 1164
1165 1165 }
1166 1166
1167 1167 /*
1168 1168 * Need to do this here (rather than after the thread is officially
1169 1169 * stopped) because we can't call mutex_enter from a stopped thread.
1170 1170 */
1171 1171 if (why == PR_CHECKPOINT)
1172 1172 del_one_utstop();
1173 1173
1174 1174 thread_lock(t);
1175 1175 ASSERT((t->t_schedflag & TS_ALLSTART) == 0);
1176 1176 t->t_schedflag |= flags;
1177 1177 t->t_whystop = (short)why;
1178 1178 t->t_whatstop = (short)what;
1179 1179 CL_STOP(t, why, what);
1180 1180 (void) new_mstate(t, LMS_STOPPED);
1181 1181 thread_stop(t); /* set stop state and drop lock */
1182 1182
1183 1183 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
1184 1184 /*
1185 1185 * We may have gotten a SIGKILL or a SIGCONT when
1186 1186 * we released p->p_lock; make one last check.
1187 1187 * Also check for a /proc run-on-last-close.
1188 1188 */
1189 1189 if (sigismember(&t->t_sig, SIGKILL) ||
1190 1190 sigismember(&p->p_sig, SIGKILL) ||
1191 1191 (t->t_proc_flag & TP_LWPEXIT) ||
1192 1192 (p->p_flag & (SEXITLWPS|SKILLED))) {
1193 1193 p->p_stopsig = 0;
1194 1194 thread_lock(t);
1195 1195 t->t_schedflag |= TS_XSTART | TS_PSTART;
1196 1196 setrun_locked(t);
1197 1197 thread_unlock_nopreempt(t);
1198 1198 } else if (why == PR_JOBCONTROL) {
1199 1199 if (p->p_flag & SSCONT) {
1200 1200 /*
1201 1201 * This resulted from a SIGCONT posted
1202 1202 * while we were not holding p->p_lock.
1203 1203 */
1204 1204 p->p_stopsig = 0;
1205 1205 thread_lock(t);
1206 1206 t->t_schedflag |= TS_XSTART;
1207 1207 setrun_locked(t);
1208 1208 thread_unlock_nopreempt(t);
1209 1209 }
1210 1210 } else if (!(t->t_proc_flag & TP_STOPPING)) {
1211 1211 /*
1212 1212 * This resulted from a /proc run-on-last-close.
1213 1213 */
1214 1214 thread_lock(t);
1215 1215 t->t_schedflag |= TS_PSTART;
1216 1216 setrun_locked(t);
1217 1217 thread_unlock_nopreempt(t);
1218 1218 }
1219 1219 }
1220 1220
1221 1221 t->t_proc_flag &= ~TP_STOPPING;
1222 1222 mutex_exit(&p->p_lock);
1223 1223
1224 1224 swtch();
1225 1225 setallwatch(); /* reestablish any watchpoints set while stopped */
1226 1226 mutex_enter(&p->p_lock);
1227 1227 prbarrier(p); /* barrier against /proc locking */
1228 1228 }
1229 1229
1230 1230 /* Interface for resetting user thread stop count. */
1231 1231 void
1232 1232 utstop_init(void)
1233 1233 {
1234 1234 mutex_enter(&thread_stop_lock);
1235 1235 num_utstop = 0;
1236 1236 mutex_exit(&thread_stop_lock);
1237 1237 }
1238 1238
1239 1239 /* Interface for registering a user thread stop request. */
1240 1240 void
1241 1241 add_one_utstop(void)
1242 1242 {
1243 1243 mutex_enter(&thread_stop_lock);
1244 1244 num_utstop++;
1245 1245 mutex_exit(&thread_stop_lock);
1246 1246 }
1247 1247
1248 1248 /* Interface for cancelling a user thread stop request */
1249 1249 void
1250 1250 del_one_utstop(void)
1251 1251 {
1252 1252 mutex_enter(&thread_stop_lock);
1253 1253 num_utstop--;
1254 1254 if (num_utstop == 0)
1255 1255 cv_broadcast(&utstop_cv);
1256 1256 mutex_exit(&thread_stop_lock);
1257 1257 }
1258 1258
1259 1259 /* Interface to wait for all user threads to be stopped */
1260 1260 void
1261 1261 utstop_timedwait(clock_t ticks)
1262 1262 {
1263 1263 mutex_enter(&thread_stop_lock);
1264 1264 if (num_utstop > 0)
1265 1265 (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks,
1266 1266 TR_CLOCK_TICK);
1267 1267 mutex_exit(&thread_stop_lock);
1268 1268 }
1269 1269
1270 1270 /*
1271 1271 * Perform the action specified by the current signal.
1272 1272 * The usual sequence is:
1273 1273 * if (issig())
1274 1274 * psig();
1275 1275 * The signal bit has already been cleared by issig(),
1276 1276 * the current signal number has been stored in lwp_cursig,
1277 1277 * and the current siginfo is now referenced by lwp_curinfo.
1278 1278 */
1279 1279 void
1280 1280 psig(void)
1281 1281 {
1282 1282 kthread_t *t = curthread;
1283 1283 proc_t *p = ttoproc(t);
1284 1284 klwp_t *lwp = ttolwp(t);
1285 1285 void (*func)();
1286 1286 int sig, rc, code, ext;
1287 1287 pid_t pid = -1;
1288 1288 id_t ctid = 0;
1289 1289 zoneid_t zoneid = -1;
1290 1290 sigqueue_t *sqp = NULL;
1291 1291 uint32_t auditing = AU_AUDITING();
1292 1292
1293 1293 mutex_enter(&p->p_lock);
1294 1294 schedctl_finish_sigblock(t);
1295 1295 code = CLD_KILLED;
1296 1296
1297 1297 if (p->p_flag & SEXITLWPS) {
1298 1298 lwp_exit();
1299 1299 return; /* not reached */
1300 1300 }
1301 1301 sig = lwp->lwp_cursig;
1302 1302 ext = lwp->lwp_extsig;
1303 1303
1304 1304 ASSERT(sig < NSIG);
1305 1305
1306 1306 /*
1307 1307 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was
1308 1308 * dropped between issig() and psig(), a debugger may have cleared
1309 1309 * lwp_cursig via /proc in the intervening window.
1310 1310 */
1311 1311 if (sig == 0) {
1312 1312 if (lwp->lwp_curinfo) {
1313 1313 siginfofree(lwp->lwp_curinfo);
1314 1314 lwp->lwp_curinfo = NULL;
1315 1315 }
1316 1316 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1317 1317 t->t_flag &= ~T_TOMASK;
1318 1318 t->t_hold = lwp->lwp_sigoldmask;
1319 1319 }
1320 1320 mutex_exit(&p->p_lock);
1321 1321 return;
1322 1322 }
1323 1323 func = PTOU(curproc)->u_signal[sig-1];
1324 1324
1325 1325 /*
1326 1326 * The signal disposition could have changed since we promoted
1327 1327 * this signal from pending to current (we dropped p->p_lock).
1328 1328 * This can happen only in a multi-threaded process.
1329 1329 */
1330 1330 if (sigismember(&p->p_ignore, sig) ||
1331 1331 (func == SIG_DFL && sigismember(&stopdefault, sig))) {
1332 1332 lwp->lwp_cursig = 0;
1333 1333 lwp->lwp_extsig = 0;
1334 1334 if (lwp->lwp_curinfo) {
1335 1335 siginfofree(lwp->lwp_curinfo);
1336 1336 lwp->lwp_curinfo = NULL;
1337 1337 }
1338 1338 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1339 1339 t->t_flag &= ~T_TOMASK;
1340 1340 t->t_hold = lwp->lwp_sigoldmask;
1341 1341 }
1342 1342 mutex_exit(&p->p_lock);
1343 1343 return;
1344 1344 }
1345 1345
1346 1346 /*
1347 1347 * We check lwp_curinfo first since pr_setsig can actually
1348 1348 * stuff a sigqueue_t there for SIGKILL.
1349 1349 */
1350 1350 if (lwp->lwp_curinfo) {
1351 1351 sqp = lwp->lwp_curinfo;
1352 1352 } else if (sig == SIGKILL && p->p_killsqp) {
1353 1353 sqp = p->p_killsqp;
1354 1354 }
1355 1355
1356 1356 if (sqp != NULL) {
1357 1357 if (SI_FROMUSER(&sqp->sq_info)) {
1358 1358 pid = sqp->sq_info.si_pid;
1359 1359 ctid = sqp->sq_info.si_ctid;
1360 1360 zoneid = sqp->sq_info.si_zoneid;
1361 1361 }
1362 1362 /*
1363 1363 * If we have a sigqueue_t, its sq_external value
1364 1364 * trumps the lwp_extsig value. It is theoretically
1365 1365 * possible to make lwp_extsig reflect reality, but it
1366 1366 * would unnecessarily complicate things elsewhere.
1367 1367 */
1368 1368 ext = sqp->sq_external;
1369 1369 }
1370 1370
1371 1371 if (func == SIG_DFL) {
1372 1372 mutex_exit(&p->p_lock);
1373 1373 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1374 1374 NULL, void (*)(void), func);
1375 1375 } else {
1376 1376 k_siginfo_t *sip = NULL;
1377 1377
1378 1378 /*
1379 1379 * If DTrace user-land tracing is active, give DTrace a
1380 1380 * chance to defer the signal until after tracing is
1381 1381 * complete.
1382 1382 */
1383 1383 if (t->t_dtrace_on && dtrace_safe_defer_signal()) {
1384 1384 mutex_exit(&p->p_lock);
1385 1385 return;
1386 1386 }
1387 1387
1388 1388 /*
1389 1389 * save siginfo pointer here, in case the
1390 1390 * the signal's reset bit is on
1391 1391 *
1392 1392 * The presence of a current signal prevents paging
1393 1393 * from succeeding over a network. We copy the current
1394 1394 * signal information to the side and cancel the current
1395 1395 * signal so that sendsig() will succeed.
1396 1396 */
1397 1397 if (sigismember(&p->p_siginfo, sig)) {
1398 1398 sip = &lwp->lwp_siginfo;
1399 1399 if (sqp) {
1400 1400 bcopy(&sqp->sq_info, sip, sizeof (*sip));
1401 1401 /*
1402 1402 * If we were interrupted out of a system call
1403 1403 * due to pthread_cancel(), inform libc.
1404 1404 */
1405 1405 if (sig == SIGCANCEL &&
1406 1406 sip->si_code == SI_LWP &&
1407 1407 t->t_sysnum != 0)
1408 1408 schedctl_cancel_eintr();
1409 1409 } else if (sig == SIGPROF && sip->si_signo == SIGPROF &&
1410 1410 t->t_rprof != NULL && t->t_rprof->rp_anystate) {
1411 1411 /* EMPTY */;
1412 1412 } else {
1413 1413 bzero(sip, sizeof (*sip));
1414 1414 sip->si_signo = sig;
1415 1415 sip->si_code = SI_NOINFO;
1416 1416 }
1417 1417 }
1418 1418
1419 1419 if (t->t_flag & T_TOMASK)
1420 1420 t->t_flag &= ~T_TOMASK;
1421 1421 else
1422 1422 lwp->lwp_sigoldmask = t->t_hold;
1423 1423 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]);
1424 1424 if (!sigismember(&PTOU(curproc)->u_signodefer, sig))
1425 1425 sigaddset(&t->t_hold, sig);
1426 1426 if (sigismember(&PTOU(curproc)->u_sigresethand, sig))
1427 1427 setsigact(sig, SIG_DFL, &nullsmask, 0);
1428 1428
1429 1429 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1430 1430 sip, void (*)(void), func);
1431 1431
1432 1432 lwp->lwp_cursig = 0;
1433 1433 lwp->lwp_extsig = 0;
1434 1434 if (lwp->lwp_curinfo) {
1435 1435 /* p->p_killsqp is freed by freeproc */
1436 1436 siginfofree(lwp->lwp_curinfo);
1437 1437 lwp->lwp_curinfo = NULL;
1438 1438 }
1439 1439 mutex_exit(&p->p_lock);
1440 1440 lwp->lwp_ru.nsignals++;
1441 1441
1442 1442 if (p->p_model == DATAMODEL_NATIVE)
1443 1443 rc = sendsig(sig, sip, func);
1444 1444 #ifdef _SYSCALL32_IMPL
1445 1445 else
1446 1446 rc = sendsig32(sig, sip, func);
1447 1447 #endif /* _SYSCALL32_IMPL */
1448 1448 if (rc)
1449 1449 return;
1450 1450 sig = lwp->lwp_cursig = SIGSEGV;
1451 1451 ext = 0; /* lwp_extsig was set above */
1452 1452 pid = -1;
1453 1453 ctid = 0;
1454 1454 }
1455 1455
1456 1456 if (sigismember(&coredefault, sig)) {
1457 1457 /*
1458 1458 * Terminate all LWPs but don't discard them.
1459 1459 * If another lwp beat us to the punch by calling exit(),
1460 1460 * evaporate now.
1461 1461 */
1462 1462 proc_is_exiting(p);
1463 1463 if (exitlwps(1) != 0) {
1464 1464 mutex_enter(&p->p_lock);
1465 1465 lwp_exit();
1466 1466 }
1467 1467 /* if we got a SIGKILL from anywhere, no core dump */
1468 1468 if (p->p_flag & SKILLED) {
1469 1469 sig = SIGKILL;
1470 1470 ext = (p->p_flag & SEXTKILLED) != 0;
1471 1471 } else {
1472 1472 if (auditing) /* audit core dump */
1473 1473 audit_core_start(sig);
1474 1474 if (core(sig, ext) == 0)
1475 1475 code = CLD_DUMPED;
1476 1476 if (auditing) /* audit core dump */
1477 1477 audit_core_finish(code);
1478 1478 }
1479 1479 }
1480 1480
1481 1481 /*
1482 1482 * Generate a contract event once if the process is killed
1483 1483 * by a signal.
1484 1484 */
1485 1485 if (ext) {
1486 1486 proc_is_exiting(p);
1487 1487 if (exitlwps(0) != 0) {
1488 1488 mutex_enter(&p->p_lock);
1489 1489 lwp_exit();
1490 1490 }
1491 1491 contract_process_sig(p->p_ct_process, p, sig, pid, ctid,
1492 1492 zoneid);
1493 1493 }
1494 1494
1495 1495 exit(code, sig);
1496 1496 }
1497 1497
1498 1498 /*
1499 1499 * Find next unheld signal in ssp for thread t.
1500 1500 */
1501 1501 int
1502 1502 fsig(k_sigset_t *ssp, kthread_t *t)
1503 1503 {
1504 1504 proc_t *p = ttoproc(t);
1505 1505 user_t *up = PTOU(p);
1506 1506 int i;
1507 1507 k_sigset_t temp;
1508 1508
1509 1509 ASSERT(MUTEX_HELD(&p->p_lock));
1510 1510
1511 1511 /*
1512 1512 * Don't promote any signals for the parent of a vfork()d
1513 1513 * child that hasn't yet released the parent's memory.
1514 1514 */
1515 1515 if (p->p_flag & SVFWAIT)
1516 1516 return (0);
1517 1517
1518 1518 temp = *ssp;
1519 1519 sigdiffset(&temp, &t->t_hold);
1520 1520
1521 1521 /*
1522 1522 * Don't promote stopping signals (except SIGSTOP) for a child
1523 1523 * of vfork() that hasn't yet released the parent's memory.
1524 1524 */
1525 1525 if (p->p_flag & SVFORK)
1526 1526 sigdiffset(&temp, &holdvfork);
1527 1527
1528 1528 /*
1529 1529 * Don't promote a signal that will stop
1530 1530 * the process when lwp_nostop is set.
1531 1531 */
1532 1532 if (ttolwp(t)->lwp_nostop) {
1533 1533 sigdelset(&temp, SIGSTOP);
1534 1534 if (!p->p_pgidp->pid_pgorphaned) {
1535 1535 if (up->u_signal[SIGTSTP-1] == SIG_DFL)
1536 1536 sigdelset(&temp, SIGTSTP);
1537 1537 if (up->u_signal[SIGTTIN-1] == SIG_DFL)
1538 1538 sigdelset(&temp, SIGTTIN);
1539 1539 if (up->u_signal[SIGTTOU-1] == SIG_DFL)
1540 1540 sigdelset(&temp, SIGTTOU);
1541 1541 }
1542 1542 }
1543 1543
1544 1544 /*
1545 1545 * Choose SIGKILL and SIGPROF before all other pending signals.
1546 1546 * The rest are promoted in signal number order.
1547 1547 */
1548 1548 if (sigismember(&temp, SIGKILL))
1549 1549 return (SIGKILL);
1550 1550 if (sigismember(&temp, SIGPROF))
1551 1551 return (SIGPROF);
1552 1552
1553 1553 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) {
1554 1554 if (temp.__sigbits[i])
1555 1555 return ((i * NBBY * sizeof (temp.__sigbits[0])) +
1556 1556 lowbit(temp.__sigbits[i]));
1557 1557 }
1558 1558
1559 1559 return (0);
1560 1560 }
1561 1561
1562 1562 void
1563 1563 setsigact(int sig, void (*disp)(), const k_sigset_t *mask, int flags)
1564 1564 {
1565 1565 proc_t *p = ttoproc(curthread);
1566 1566 kthread_t *t;
1567 1567
1568 1568 ASSERT(MUTEX_HELD(&p->p_lock));
1569 1569
1570 1570 PTOU(curproc)->u_signal[sig - 1] = disp;
1571 1571
1572 1572 /*
1573 1573 * Honor the SA_SIGINFO flag if the signal is being caught.
1574 1574 * Force the SA_SIGINFO flag if the signal is not being caught.
1575 1575 * This is necessary to make sigqueue() and sigwaitinfo() work
1576 1576 * properly together when the signal is set to default or is
1577 1577 * being temporarily ignored.
1578 1578 */
1579 1579 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN)
1580 1580 sigaddset(&p->p_siginfo, sig);
1581 1581 else
1582 1582 sigdelset(&p->p_siginfo, sig);
1583 1583
1584 1584 if (disp != SIG_DFL && disp != SIG_IGN) {
1585 1585 sigdelset(&p->p_ignore, sig);
1586 1586 PTOU(curproc)->u_sigmask[sig - 1] = *mask;
1587 1587 if (!sigismember(&cantreset, sig)) {
1588 1588 if (flags & SA_RESETHAND)
1589 1589 sigaddset(&PTOU(curproc)->u_sigresethand, sig);
1590 1590 else
1591 1591 sigdelset(&PTOU(curproc)->u_sigresethand, sig);
1592 1592 }
1593 1593 if (flags & SA_NODEFER)
1594 1594 sigaddset(&PTOU(curproc)->u_signodefer, sig);
1595 1595 else
1596 1596 sigdelset(&PTOU(curproc)->u_signodefer, sig);
1597 1597 if (flags & SA_RESTART)
1598 1598 sigaddset(&PTOU(curproc)->u_sigrestart, sig);
1599 1599 else
1600 1600 sigdelset(&PTOU(curproc)->u_sigrestart, sig);
1601 1601 if (flags & SA_ONSTACK)
1602 1602 sigaddset(&PTOU(curproc)->u_sigonstack, sig);
1603 1603 else
1604 1604 sigdelset(&PTOU(curproc)->u_sigonstack, sig);
1605 1605 } else if (disp == SIG_IGN ||
1606 1606 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) {
1607 1607 /*
1608 1608 * Setting the signal action to SIG_IGN results in the
1609 1609 * discarding of all pending signals of that signal number.
1610 1610 * Setting the signal action to SIG_DFL does the same *only*
1611 1611 * if the signal's default behavior is to be ignored.
1612 1612 */
1613 1613 sigaddset(&p->p_ignore, sig);
1614 1614 sigdelset(&p->p_sig, sig);
1615 1615 sigdelset(&p->p_extsig, sig);
1616 1616 sigdelq(p, NULL, sig);
1617 1617 t = p->p_tlist;
1618 1618 do {
1619 1619 sigdelset(&t->t_sig, sig);
1620 1620 sigdelset(&t->t_extsig, sig);
1621 1621 sigdelq(p, t, sig);
1622 1622 } while ((t = t->t_forw) != p->p_tlist);
1623 1623 } else {
1624 1624 /*
1625 1625 * The signal action is being set to SIG_DFL and the default
1626 1626 * behavior is to do something: make sure it is not ignored.
1627 1627 */
1628 1628 sigdelset(&p->p_ignore, sig);
1629 1629 }
1630 1630
1631 1631 if (sig == SIGCLD) {
1632 1632 if (flags & SA_NOCLDWAIT)
1633 1633 p->p_flag |= SNOWAIT;
1634 1634 else
1635 1635 p->p_flag &= ~SNOWAIT;
1636 1636
1637 1637 if (flags & SA_NOCLDSTOP)
1638 1638 p->p_flag &= ~SJCTL;
1639 1639 else
1640 1640 p->p_flag |= SJCTL;
1641 1641
1642 1642 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) {
1643 1643 proc_t *cp, *tp;
1644 1644
1645 1645 mutex_exit(&p->p_lock);
1646 1646 mutex_enter(&pidlock);
1647 1647 for (cp = p->p_child; cp != NULL; cp = tp) {
1648 1648 tp = cp->p_sibling;
1649 1649 if (cp->p_stat == SZOMB &&
1650 1650 !(cp->p_pidflag & CLDWAITPID))
1651 1651 freeproc(cp);
1652 1652 }
1653 1653 mutex_exit(&pidlock);
1654 1654 mutex_enter(&p->p_lock);
1655 1655 }
1656 1656 }
1657 1657 }
1658 1658
1659 1659 /*
1660 1660 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL.
1661 1661 * Called from exec_common() for a process undergoing execve()
1662 1662 * and from cfork() for a newly-created child of vfork().
1663 1663 * In the vfork() case, 'p' is not the current process.
1664 1664 * In both cases, there is only one thread in the process.
1665 1665 */
1666 1666 void
1667 1667 sigdefault(proc_t *p)
1668 1668 {
1669 1669 kthread_t *t = p->p_tlist;
1670 1670 struct user *up = PTOU(p);
1671 1671 int sig;
1672 1672
1673 1673 ASSERT(MUTEX_HELD(&p->p_lock));
1674 1674
1675 1675 for (sig = 1; sig < NSIG; sig++) {
1676 1676 if (up->u_signal[sig - 1] != SIG_DFL &&
1677 1677 up->u_signal[sig - 1] != SIG_IGN) {
1678 1678 up->u_signal[sig - 1] = SIG_DFL;
1679 1679 sigemptyset(&up->u_sigmask[sig - 1]);
1680 1680 if (sigismember(&ignoredefault, sig)) {
1681 1681 sigdelq(p, NULL, sig);
1682 1682 sigdelq(p, t, sig);
1683 1683 }
1684 1684 if (sig == SIGCLD)
1685 1685 p->p_flag &= ~(SNOWAIT|SJCTL);
1686 1686 }
1687 1687 }
1688 1688 sigorset(&p->p_ignore, &ignoredefault);
1689 1689 sigfillset(&p->p_siginfo);
1690 1690 sigdiffset(&p->p_siginfo, &cantmask);
1691 1691 sigdiffset(&p->p_sig, &ignoredefault);
1692 1692 sigdiffset(&p->p_extsig, &ignoredefault);
1693 1693 sigdiffset(&t->t_sig, &ignoredefault);
1694 1694 sigdiffset(&t->t_extsig, &ignoredefault);
1695 1695 }
1696 1696
1697 1697 void
1698 1698 sigcld(proc_t *cp, sigqueue_t *sqp)
1699 1699 {
1700 1700 proc_t *pp = cp->p_parent;
1701 1701
1702 1702 ASSERT(MUTEX_HELD(&pidlock));
1703 1703
1704 1704 switch (cp->p_wcode) {
1705 1705 case CLD_EXITED:
1706 1706 case CLD_DUMPED:
1707 1707 case CLD_KILLED:
1708 1708 ASSERT(cp->p_stat == SZOMB);
1709 1709 /*
1710 1710 * The broadcast on p_srwchan_cv is a kludge to
1711 1711 * wakeup a possible thread in uadmin(A_SHUTDOWN).
1712 1712 */
1713 1713 cv_broadcast(&cp->p_srwchan_cv);
1714 1714
1715 1715 /*
1716 1716 * Add to newstate list of the parent
1717 1717 */
1718 1718 add_ns(pp, cp);
1719 1719
1720 1720 cv_broadcast(&pp->p_cv);
1721 1721 if ((pp->p_flag & SNOWAIT) ||
1722 1722 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) {
1723 1723 if (!(cp->p_pidflag & CLDWAITPID))
1724 1724 freeproc(cp);
1725 1725 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) {
1726 1726 post_sigcld(cp, sqp);
1727 1727 sqp = NULL;
1728 1728 }
1729 1729 break;
1730 1730
1731 1731 case CLD_STOPPED:
1732 1732 case CLD_CONTINUED:
1733 1733 cv_broadcast(&pp->p_cv);
1734 1734 if (pp->p_flag & SJCTL) {
1735 1735 post_sigcld(cp, sqp);
1736 1736 sqp = NULL;
1737 1737 }
1738 1738 break;
1739 1739 }
1740 1740
1741 1741 if (sqp)
1742 1742 siginfofree(sqp);
1743 1743 }
1744 1744
1745 1745 /*
1746 1746 * Common code called from sigcld() and from
1747 1747 * waitid() and issig_forreal() via sigcld_repost().
1748 1748 * Give the parent process a SIGCLD if it does not have one pending,
1749 1749 * else mark the child process so a SIGCLD can be posted later.
1750 1750 */
1751 1751 static void
1752 1752 post_sigcld(proc_t *cp, sigqueue_t *sqp)
1753 1753 {
1754 1754 proc_t *pp = cp->p_parent;
1755 1755 k_siginfo_t info;
1756 1756
1757 1757 ASSERT(MUTEX_HELD(&pidlock));
1758 1758 mutex_enter(&pp->p_lock);
1759 1759
1760 1760 /*
1761 1761 * If a SIGCLD is pending, then just mark the child process
1762 1762 * so that its SIGCLD will be posted later, when the first
1763 1763 * SIGCLD is taken off the queue or when the parent is ready
1764 1764 * to receive it or accept it, if ever.
1765 1765 */
1766 1766 if (sigismember(&pp->p_sig, SIGCLD)) {
1767 1767 cp->p_pidflag |= CLDPEND;
1768 1768 } else {
1769 1769 cp->p_pidflag &= ~CLDPEND;
1770 1770 if (sqp == NULL) {
1771 1771 /*
1772 1772 * This can only happen when the parent is init.
1773 1773 * (See call to sigcld(q, NULL) in exit().)
1774 1774 * Use KM_NOSLEEP to avoid deadlock.
1775 1775 */
1776 1776 ASSERT(pp == proc_init);
1777 1777 winfo(cp, &info, 0);
1778 1778 sigaddq(pp, NULL, &info, KM_NOSLEEP);
1779 1779 } else {
1780 1780 winfo(cp, &sqp->sq_info, 0);
1781 1781 sigaddqa(pp, NULL, sqp);
1782 1782 sqp = NULL;
1783 1783 }
1784 1784 }
1785 1785
1786 1786 mutex_exit(&pp->p_lock);
1787 1787
1788 1788 if (sqp)
1789 1789 siginfofree(sqp);
1790 1790 }
1791 1791
1792 1792 /*
1793 1793 * Search for a child that has a pending SIGCLD for us, the parent.
1794 1794 * The queue of SIGCLD signals is implied by the list of children.
1795 1795 * We post the SIGCLD signals one at a time so they don't get lost.
1796 1796 * When one is dequeued, another is enqueued, until there are no more.
1797 1797 */
1798 1798 void
1799 1799 sigcld_repost()
1800 1800 {
1801 1801 proc_t *pp = curproc;
1802 1802 proc_t *cp;
1803 1803 sigqueue_t *sqp;
1804 1804
1805 1805 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1806 1806 mutex_enter(&pidlock);
1807 1807 for (cp = pp->p_child; cp; cp = cp->p_sibling) {
1808 1808 if (cp->p_pidflag & CLDPEND) {
1809 1809 post_sigcld(cp, sqp);
1810 1810 mutex_exit(&pidlock);
1811 1811 return;
1812 1812 }
1813 1813 }
1814 1814 mutex_exit(&pidlock);
1815 1815 kmem_free(sqp, sizeof (sigqueue_t));
1816 1816 }
1817 1817
1818 1818 /*
1819 1819 * count number of sigqueue send by sigaddqa()
1820 1820 */
1821 1821 void
1822 1822 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp)
1823 1823 {
1824 1824 sigqhdr_t *sqh;
1825 1825
1826 1826 sqh = (sigqhdr_t *)sigqp->sq_backptr;
1827 1827 ASSERT(sqh);
1828 1828
1829 1829 mutex_enter(&sqh->sqb_lock);
1830 1830 sqh->sqb_sent++;
1831 1831 mutex_exit(&sqh->sqb_lock);
1832 1832
1833 1833 if (cmd == SN_SEND)
1834 1834 sigaddqa(p, t, sigqp);
1835 1835 else
1836 1836 siginfofree(sigqp);
1837 1837 }
1838 1838
1839 1839 int
1840 1840 sigsendproc(proc_t *p, sigsend_t *pv)
1841 1841 {
1842 1842 struct cred *cr;
1843 1843 proc_t *myprocp = curproc;
1844 1844
1845 1845 ASSERT(MUTEX_HELD(&pidlock));
1846 1846
1847 1847 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig))
1848 1848 return (EPERM);
1849 1849
1850 1850 cr = CRED();
1851 1851
1852 1852 if (pv->checkperm == 0 ||
1853 1853 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) ||
1854 1854 prochasprocperm(p, myprocp, cr)) {
1855 1855 pv->perm++;
1856 1856 if (pv->sig) {
1857 1857 /* Make sure we should be setting si_pid and friends */
1858 1858 ASSERT(pv->sicode <= 0);
1859 1859 if (SI_CANQUEUE(pv->sicode)) {
1860 1860 sigqueue_t *sqp;
1861 1861
1862 1862 mutex_enter(&myprocp->p_lock);
1863 1863 sqp = sigqalloc(myprocp->p_sigqhdr);
1864 1864 mutex_exit(&myprocp->p_lock);
1865 1865 if (sqp == NULL)
1866 1866 return (EAGAIN);
1867 1867 sqp->sq_info.si_signo = pv->sig;
1868 1868 sqp->sq_info.si_code = pv->sicode;
1869 1869 sqp->sq_info.si_pid = myprocp->p_pid;
1870 1870 sqp->sq_info.si_ctid = PRCTID(myprocp);
1871 1871 sqp->sq_info.si_zoneid = getzoneid();
1872 1872 sqp->sq_info.si_uid = crgetruid(cr);
1873 1873 sqp->sq_info.si_value = pv->value;
1874 1874 mutex_enter(&p->p_lock);
1875 1875 sigqsend(SN_SEND, p, NULL, sqp);
1876 1876 mutex_exit(&p->p_lock);
1877 1877 } else {
1878 1878 k_siginfo_t info;
1879 1879 bzero(&info, sizeof (info));
1880 1880 info.si_signo = pv->sig;
1881 1881 info.si_code = pv->sicode;
1882 1882 info.si_pid = myprocp->p_pid;
1883 1883 info.si_ctid = PRCTID(myprocp);
1884 1884 info.si_zoneid = getzoneid();
1885 1885 info.si_uid = crgetruid(cr);
1886 1886 mutex_enter(&p->p_lock);
1887 1887 /*
1888 1888 * XXX: Should be KM_SLEEP but
1889 1889 * we have to avoid deadlock.
1890 1890 */
1891 1891 sigaddq(p, NULL, &info, KM_NOSLEEP);
1892 1892 mutex_exit(&p->p_lock);
1893 1893 }
1894 1894 }
1895 1895 }
1896 1896
1897 1897 return (0);
1898 1898 }
1899 1899
1900 1900 int
1901 1901 sigsendset(procset_t *psp, sigsend_t *pv)
1902 1902 {
1903 1903 int error;
1904 1904
1905 1905 error = dotoprocs(psp, sigsendproc, (char *)pv);
1906 1906 if (error == 0 && pv->perm == 0)
1907 1907 return (EPERM);
1908 1908
1909 1909 return (error);
1910 1910 }
1911 1911
1912 1912 /*
1913 1913 * Dequeue a queued siginfo structure.
1914 1914 * If a non-null thread pointer is passed then dequeue from
1915 1915 * the thread queue, otherwise dequeue from the process queue.
1916 1916 */
1917 1917 void
1918 1918 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp)
1919 1919 {
1920 1920 sigqueue_t **psqp, *sqp;
1921 1921
1922 1922 ASSERT(MUTEX_HELD(&p->p_lock));
1923 1923
1924 1924 *qpp = NULL;
1925 1925
1926 1926 if (t != NULL) {
1927 1927 sigdelset(&t->t_sig, sig);
1928 1928 sigdelset(&t->t_extsig, sig);
1929 1929 psqp = &t->t_sigqueue;
1930 1930 } else {
1931 1931 sigdelset(&p->p_sig, sig);
1932 1932 sigdelset(&p->p_extsig, sig);
1933 1933 psqp = &p->p_sigqueue;
1934 1934 }
1935 1935
1936 1936 for (;;) {
1937 1937 if ((sqp = *psqp) == NULL)
1938 1938 return;
1939 1939 if (sqp->sq_info.si_signo == sig)
1940 1940 break;
1941 1941 else
1942 1942 psqp = &sqp->sq_next;
1943 1943 }
1944 1944 *qpp = sqp;
1945 1945 *psqp = sqp->sq_next;
1946 1946 for (sqp = *psqp; sqp; sqp = sqp->sq_next) {
1947 1947 if (sqp->sq_info.si_signo == sig) {
1948 1948 if (t != (kthread_t *)NULL) {
1949 1949 sigaddset(&t->t_sig, sig);
1950 1950 t->t_sig_check = 1;
1951 1951 } else {
1952 1952 sigaddset(&p->p_sig, sig);
1953 1953 set_proc_ast(p);
1954 1954 }
1955 1955 break;
1956 1956 }
1957 1957 }
1958 1958 }
1959 1959
1960 1960 /*
1961 1961 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument.
1962 1962 */
1963 1963 void
1964 1964 sigcld_delete(k_siginfo_t *ip)
1965 1965 {
1966 1966 proc_t *p = curproc;
1967 1967 int another_sigcld = 0;
1968 1968 sigqueue_t **psqp, *sqp;
1969 1969
1970 1970 ASSERT(ip->si_signo == SIGCLD);
1971 1971
1972 1972 mutex_enter(&p->p_lock);
1973 1973
1974 1974 if (!sigismember(&p->p_sig, SIGCLD)) {
1975 1975 mutex_exit(&p->p_lock);
1976 1976 return;
1977 1977 }
1978 1978
1979 1979 psqp = &p->p_sigqueue;
1980 1980 for (;;) {
1981 1981 if ((sqp = *psqp) == NULL) {
1982 1982 mutex_exit(&p->p_lock);
1983 1983 return;
1984 1984 }
1985 1985 if (sqp->sq_info.si_signo == SIGCLD) {
1986 1986 if (sqp->sq_info.si_pid == ip->si_pid &&
1987 1987 sqp->sq_info.si_code == ip->si_code &&
1988 1988 sqp->sq_info.si_status == ip->si_status)
1989 1989 break;
1990 1990 another_sigcld = 1;
1991 1991 }
1992 1992 psqp = &sqp->sq_next;
1993 1993 }
1994 1994 *psqp = sqp->sq_next;
1995 1995
1996 1996 siginfofree(sqp);
1997 1997
1998 1998 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) {
1999 1999 if (sqp->sq_info.si_signo == SIGCLD)
2000 2000 another_sigcld = 1;
2001 2001 }
2002 2002
2003 2003 if (!another_sigcld) {
2004 2004 sigdelset(&p->p_sig, SIGCLD);
2005 2005 sigdelset(&p->p_extsig, SIGCLD);
2006 2006 }
2007 2007
2008 2008 mutex_exit(&p->p_lock);
2009 2009 }
2010 2010
2011 2011 /*
2012 2012 * Delete queued siginfo structures.
2013 2013 * If a non-null thread pointer is passed then delete from
2014 2014 * the thread queue, otherwise delete from the process queue.
2015 2015 */
2016 2016 void
2017 2017 sigdelq(proc_t *p, kthread_t *t, int sig)
2018 2018 {
2019 2019 sigqueue_t **psqp, *sqp;
2020 2020
2021 2021 /*
2022 2022 * We must be holding p->p_lock unless the process is
2023 2023 * being reaped or has failed to get started on fork.
2024 2024 */
2025 2025 ASSERT(MUTEX_HELD(&p->p_lock) ||
2026 2026 p->p_stat == SIDL || p->p_stat == SZOMB);
2027 2027
2028 2028 if (t != (kthread_t *)NULL)
2029 2029 psqp = &t->t_sigqueue;
2030 2030 else
2031 2031 psqp = &p->p_sigqueue;
2032 2032
2033 2033 while (*psqp) {
2034 2034 sqp = *psqp;
2035 2035 if (sig == 0 || sqp->sq_info.si_signo == sig) {
2036 2036 *psqp = sqp->sq_next;
2037 2037 siginfofree(sqp);
2038 2038 } else
2039 2039 psqp = &sqp->sq_next;
2040 2040 }
2041 2041 }
2042 2042
2043 2043 /*
2044 2044 * Insert a siginfo structure into a queue.
2045 2045 * If a non-null thread pointer is passed then add to the thread queue,
2046 2046 * otherwise add to the process queue.
2047 2047 *
2048 2048 * The function sigaddqins() is called with sigqueue already allocated.
2049 2049 * It is called from sigaddqa() and sigaddq() below.
2050 2050 *
2051 2051 * The value of si_code implicitly indicates whether sigp is to be
2052 2052 * explicitly queued, or to be queued to depth one.
2053 2053 */
2054 2054 static void
2055 2055 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2056 2056 {
2057 2057 sigqueue_t **psqp;
2058 2058 int sig = sigqp->sq_info.si_signo;
2059 2059
2060 2060 sigqp->sq_external = (curproc != &p0) &&
2061 2061 (curproc->p_ct_process != p->p_ct_process);
2062 2062
2063 2063 /*
2064 2064 * issig_forreal() doesn't bother dequeueing signals if SKILLED
2065 2065 * is set, and even if it did, we would want to avoid situation
2066 2066 * (which would be unique to SIGKILL) where one thread dequeued
2067 2067 * the sigqueue_t and another executed psig(). So we create a
2068 2068 * separate stash for SIGKILL's sigqueue_t. Because a second
2069 2069 * SIGKILL can set SEXTKILLED, we overwrite the existing entry
2070 2070 * if (and only if) it was non-extracontractual.
2071 2071 */
2072 2072 if (sig == SIGKILL) {
2073 2073 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) {
2074 2074 if (p->p_killsqp != NULL)
2075 2075 siginfofree(p->p_killsqp);
2076 2076 p->p_killsqp = sigqp;
2077 2077 sigqp->sq_next = NULL;
2078 2078 } else {
2079 2079 siginfofree(sigqp);
2080 2080 }
2081 2081 return;
2082 2082 }
2083 2083
2084 2084 ASSERT(sig >= 1 && sig < NSIG);
2085 2085 if (t != NULL) /* directed to a thread */
2086 2086 psqp = &t->t_sigqueue;
2087 2087 else /* directed to a process */
2088 2088 psqp = &p->p_sigqueue;
2089 2089 if (SI_CANQUEUE(sigqp->sq_info.si_code) &&
2090 2090 sigismember(&p->p_siginfo, sig)) {
2091 2091 for (; *psqp != NULL; psqp = &(*psqp)->sq_next)
2092 2092 ;
2093 2093 } else {
2094 2094 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) {
2095 2095 if ((*psqp)->sq_info.si_signo == sig) {
2096 2096 siginfofree(sigqp);
2097 2097 return;
2098 2098 }
2099 2099 }
2100 2100 }
2101 2101 *psqp = sigqp;
2102 2102 sigqp->sq_next = NULL;
2103 2103 }
2104 2104
2105 2105 /*
2106 2106 * The function sigaddqa() is called with sigqueue already allocated.
2107 2107 * If signal is ignored, discard but guarantee KILL and generation semantics.
2108 2108 * It is called from sigqueue() and other places.
2109 2109 */
2110 2110 void
2111 2111 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2112 2112 {
2113 2113 int sig = sigqp->sq_info.si_signo;
2114 2114
2115 2115 ASSERT(MUTEX_HELD(&p->p_lock));
2116 2116 ASSERT(sig >= 1 && sig < NSIG);
2117 2117
2118 2118 if (sig_discardable(p, sig))
2119 2119 siginfofree(sigqp);
2120 2120 else
2121 2121 sigaddqins(p, t, sigqp);
2122 2122
2123 2123 sigtoproc(p, t, sig);
2124 2124 }
2125 2125
2126 2126 /*
2127 2127 * Allocate the sigqueue_t structure and call sigaddqins().
2128 2128 */
2129 2129 void
2130 2130 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags)
2131 2131 {
2132 2132 sigqueue_t *sqp;
2133 2133 int sig = infop->si_signo;
2134 2134
2135 2135 ASSERT(MUTEX_HELD(&p->p_lock));
2136 2136 ASSERT(sig >= 1 && sig < NSIG);
2137 2137
2138 2138 /*
2139 2139 * If the signal will be discarded by sigtoproc() or
2140 2140 * if the process isn't requesting siginfo and it isn't
2141 2141 * blocking the signal (it *could* change it's mind while
2142 2142 * the signal is pending) then don't bother creating one.
2143 2143 */
2144 2144 if (!sig_discardable(p, sig) &&
2145 2145 (sigismember(&p->p_siginfo, sig) ||
2146 2146 (curproc->p_ct_process != p->p_ct_process) ||
2147 2147 (sig == SIGCLD && SI_FROMKERNEL(infop))) &&
2148 2148 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) {
2149 2149 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t));
2150 2150 sqp->sq_func = NULL;
2151 2151 sqp->sq_next = NULL;
2152 2152 sigaddqins(p, t, sqp);
2153 2153 }
2154 2154 sigtoproc(p, t, sig);
2155 2155 }
2156 2156
2157 2157 /*
2158 2158 * Handle stop-on-fault processing for the debugger. Returns 0
2159 2159 * if the fault is cleared during the stop, nonzero if it isn't.
2160 2160 */
2161 2161 int
2162 2162 stop_on_fault(uint_t fault, k_siginfo_t *sip)
2163 2163 {
2164 2164 proc_t *p = ttoproc(curthread);
2165 2165 klwp_t *lwp = ttolwp(curthread);
2166 2166
2167 2167 ASSERT(prismember(&p->p_fltmask, fault));
2168 2168
2169 2169 /*
2170 2170 * Record current fault and siginfo structure so debugger can
2171 2171 * find it.
2172 2172 */
2173 2173 mutex_enter(&p->p_lock);
2174 2174 lwp->lwp_curflt = (uchar_t)fault;
2175 2175 lwp->lwp_siginfo = *sip;
2176 2176
2177 2177 stop(PR_FAULTED, fault);
2178 2178
2179 2179 fault = lwp->lwp_curflt;
2180 2180 lwp->lwp_curflt = 0;
2181 2181 mutex_exit(&p->p_lock);
2182 2182 return (fault);
2183 2183 }
2184 2184
2185 2185 void
2186 2186 sigorset(k_sigset_t *s1, const k_sigset_t *s2)
2187 2187 {
2188 2188 s1->__sigbits[0] |= s2->__sigbits[0];
2189 2189 s1->__sigbits[1] |= s2->__sigbits[1];
2190 2190 s1->__sigbits[2] |= s2->__sigbits[2];
2191 2191 }
2192 2192
2193 2193 void
2194 2194 sigandset(k_sigset_t *s1, const k_sigset_t *s2)
2195 2195 {
2196 2196 s1->__sigbits[0] &= s2->__sigbits[0];
2197 2197 s1->__sigbits[1] &= s2->__sigbits[1];
2198 2198 s1->__sigbits[2] &= s2->__sigbits[2];
2199 2199 }
2200 2200
2201 2201 void
2202 2202 sigdiffset(k_sigset_t *s1, const k_sigset_t *s2)
2203 2203 {
2204 2204 s1->__sigbits[0] &= ~(s2->__sigbits[0]);
2205 2205 s1->__sigbits[1] &= ~(s2->__sigbits[1]);
2206 2206 s1->__sigbits[2] &= ~(s2->__sigbits[2]);
2207 2207 }
2208 2208
2209 2209 /*
2210 2210 * Return non-zero if curthread->t_sig_check should be set to 1, that is,
2211 2211 * if there are any signals the thread might take on return from the kernel.
2212 2212 * If ksigset_t's were a single word, we would do:
2213 2213 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset);
2214 2214 */
2215 2215 int
2216 2216 sigcheck(proc_t *p, kthread_t *t)
2217 2217 {
2218 2218 sc_shared_t *tdp = t->t_schedctl;
2219 2219
2220 2220 /*
2221 2221 * If signals are blocked via the schedctl interface
2222 2222 * then we only check for the unmaskable signals.
2223 2223 * The unmaskable signal numbers should all be contained
2224 2224 * in __sigbits[0] and we assume this for speed.
2225 2225 */
2226 2226 #if (CANTMASK1 == 0 && CANTMASK2 == 0)
2227 2227 if (tdp != NULL && tdp->sc_sigblock)
2228 2228 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2229 2229 CANTMASK0);
2230 2230 #else
2231 2231 #error "fix me: CANTMASK1 and CANTMASK2 are not zero"
2232 2232 #endif
2233 2233
2234 2234 /* see uts/common/sys/signal.h for why this must be true */
2235 2235 #if ((MAXSIG > (2 * 32)) && (MAXSIG <= (3 * 32)))
2236 2236 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2237 2237 ~t->t_hold.__sigbits[0]) |
2238 2238 ((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) &
2239 2239 ~t->t_hold.__sigbits[1]) |
2240 2240 (((p->p_sig.__sigbits[2] | t->t_sig.__sigbits[2]) &
2241 2241 ~t->t_hold.__sigbits[2]) & FILLSET2));
2242 2242 #else
2243 2243 #error "fix me: MAXSIG out of bounds"
2244 2244 #endif
2245 2245 }
2246 2246
2247 2247 void
2248 2248 sigintr(k_sigset_t *smask, int intable)
2249 2249 {
2250 2250 proc_t *p;
2251 2251 int owned;
2252 2252 k_sigset_t lmask; /* local copy of cantmask */
2253 2253 klwp_t *lwp = ttolwp(curthread);
2254 2254
2255 2255 /*
2256 2256 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT
2257 2257 * and SIGTERM. (Preserving the existing masks).
2258 2258 * This function supports the -intr nfs and ufs mount option.
2259 2259 */
2260 2260
2261 2261 /*
2262 2262 * don't do kernel threads
2263 2263 */
2264 2264 if (lwp == NULL)
2265 2265 return;
2266 2266
2267 2267 /*
2268 2268 * get access to signal mask
2269 2269 */
2270 2270 p = ttoproc(curthread);
2271 2271 owned = mutex_owned(&p->p_lock); /* this is filthy */
2272 2272 if (!owned)
2273 2273 mutex_enter(&p->p_lock);
2274 2274
2275 2275 /*
2276 2276 * remember the current mask
2277 2277 */
2278 2278 schedctl_finish_sigblock(curthread);
2279 2279 *smask = curthread->t_hold;
2280 2280
2281 2281 /*
2282 2282 * mask out all signals
2283 2283 */
2284 2284 sigfillset(&curthread->t_hold);
2285 2285
2286 2286 /*
2287 2287 * Unmask the non-maskable signals (e.g., KILL), as long as
2288 2288 * they aren't already masked (which could happen at exit).
2289 2289 * The first sigdiffset sets lmask to (cantmask & ~curhold). The
2290 2290 * second sets the current hold mask to (~0 & ~lmask), which reduces
2291 2291 * to (~cantmask | curhold).
2292 2292 */
2293 2293 lmask = cantmask;
2294 2294 sigdiffset(&lmask, smask);
2295 2295 sigdiffset(&curthread->t_hold, &lmask);
2296 2296
2297 2297 /*
2298 2298 * Re-enable HUP, QUIT, and TERM iff they were originally enabled
2299 2299 * Re-enable INT if it's originally enabled and the NFS mount option
2300 2300 * nointr is not set.
2301 2301 */
2302 2302 if (!sigismember(smask, SIGHUP))
2303 2303 sigdelset(&curthread->t_hold, SIGHUP);
2304 2304 if (!sigismember(smask, SIGINT) && intable)
2305 2305 sigdelset(&curthread->t_hold, SIGINT);
2306 2306 if (!sigismember(smask, SIGQUIT))
2307 2307 sigdelset(&curthread->t_hold, SIGQUIT);
2308 2308 if (!sigismember(smask, SIGTERM))
2309 2309 sigdelset(&curthread->t_hold, SIGTERM);
2310 2310
2311 2311 /*
2312 2312 * release access to signal mask
2313 2313 */
2314 2314 if (!owned)
2315 2315 mutex_exit(&p->p_lock);
2316 2316
2317 2317 /*
2318 2318 * Indicate that this lwp is not to be stopped.
2319 2319 */
2320 2320 lwp->lwp_nostop++;
2321 2321
2322 2322 }
2323 2323
2324 2324 void
2325 2325 sigunintr(k_sigset_t *smask)
2326 2326 {
2327 2327 proc_t *p;
2328 2328 int owned;
2329 2329 klwp_t *lwp = ttolwp(curthread);
2330 2330
2331 2331 /*
2332 2332 * Reset previous mask (See sigintr() above)
2333 2333 */
2334 2334 if (lwp != NULL) {
2335 2335 lwp->lwp_nostop--; /* restore lwp stoppability */
2336 2336 p = ttoproc(curthread);
2337 2337 owned = mutex_owned(&p->p_lock); /* this is filthy */
2338 2338 if (!owned)
2339 2339 mutex_enter(&p->p_lock);
2340 2340 curthread->t_hold = *smask;
2341 2341 /* so unmasked signals will be seen */
2342 2342 curthread->t_sig_check = 1;
2343 2343 if (!owned)
2344 2344 mutex_exit(&p->p_lock);
2345 2345 }
2346 2346 }
2347 2347
2348 2348 void
2349 2349 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask)
2350 2350 {
2351 2351 proc_t *p;
2352 2352 int owned;
2353 2353 /*
2354 2354 * Save current signal mask in oldmask, then
2355 2355 * set it to newmask.
2356 2356 */
2357 2357 if (ttolwp(curthread) != NULL) {
2358 2358 p = ttoproc(curthread);
2359 2359 owned = mutex_owned(&p->p_lock); /* this is filthy */
2360 2360 if (!owned)
2361 2361 mutex_enter(&p->p_lock);
2362 2362 schedctl_finish_sigblock(curthread);
2363 2363 if (oldmask != NULL)
2364 2364 *oldmask = curthread->t_hold;
2365 2365 curthread->t_hold = *newmask;
2366 2366 curthread->t_sig_check = 1;
2367 2367 if (!owned)
2368 2368 mutex_exit(&p->p_lock);
2369 2369 }
2370 2370 }
2371 2371
2372 2372 /*
2373 2373 * Return true if the signal number is in range
2374 2374 * and the signal code specifies signal queueing.
2375 2375 */
2376 2376 int
2377 2377 sigwillqueue(int sig, int code)
2378 2378 {
2379 2379 if (sig >= 0 && sig < NSIG) {
2380 2380 switch (code) {
2381 2381 case SI_QUEUE:
2382 2382 case SI_TIMER:
2383 2383 case SI_ASYNCIO:
2384 2384 case SI_MESGQ:
2385 2385 return (1);
2386 2386 }
2387 2387 }
2388 2388 return (0);
2389 2389 }
2390 2390
2391 2391 /*
2392 2392 * The pre-allocated pool (with _SIGQUEUE_PREALLOC entries) is
2393 2393 * allocated at the first sigqueue/signotify call.
2394 2394 */
2395 2395 sigqhdr_t *
2396 2396 sigqhdralloc(size_t size, uint_t maxcount)
2397 2397 {
2398 2398 size_t i;
2399 2399 sigqueue_t *sq, *next;
2400 2400 sigqhdr_t *sqh;
2401 2401
2402 2402 /*
2403 2403 * Before the introduction of process.max-sigqueue-size
2404 2404 * _SC_SIGQUEUE_MAX had this static value.
2405 2405 */
2406 2406 #define _SIGQUEUE_PREALLOC 32
2407 2407
2408 2408 i = (_SIGQUEUE_PREALLOC * size) + sizeof (sigqhdr_t);
2409 2409 ASSERT(maxcount <= INT_MAX);
2410 2410 sqh = kmem_alloc(i, KM_SLEEP);
2411 2411 sqh->sqb_count = maxcount;
2412 2412 sqh->sqb_maxcount = maxcount;
2413 2413 sqh->sqb_size = i;
2414 2414 sqh->sqb_pexited = 0;
2415 2415 sqh->sqb_sent = 0;
2416 2416 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1);
2417 2417 for (i = _SIGQUEUE_PREALLOC - 1; i != 0; i--) {
2418 2418 next = (sigqueue_t *)((uintptr_t)sq + size);
2419 2419 sq->sq_next = next;
2420 2420 sq = next;
2421 2421 }
2422 2422 sq->sq_next = NULL;
2423 2423 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL);
2424 2424 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL);
2425 2425 return (sqh);
2426 2426 }
2427 2427
2428 2428 static void sigqrel(sigqueue_t *);
2429 2429
2430 2430 /*
2431 2431 * Allocate a sigqueue/signotify structure from the per process
2432 2432 * pre-allocated pool or allocate a new sigqueue/signotify structure
2433 2433 * if the pre-allocated pool is exhausted.
2434 2434 */
2435 2435 sigqueue_t *
2436 2436 sigqalloc(sigqhdr_t *sqh)
2437 2437 {
2438 2438 sigqueue_t *sq = NULL;
2439 2439
2440 2440 ASSERT(MUTEX_HELD(&curproc->p_lock));
2441 2441
2442 2442 if (sqh != NULL) {
2443 2443 mutex_enter(&sqh->sqb_lock);
2444 2444 if (sqh->sqb_count > 0) {
2445 2445 sqh->sqb_count--;
2446 2446 if (sqh->sqb_free == NULL) {
2447 2447 /*
2448 2448 * The pre-allocated pool is exhausted.
2449 2449 */
2450 2450 sq = kmem_alloc(sizeof (sigqueue_t), KM_SLEEP);
2451 2451 sq->sq_func = NULL;
2452 2452 } else {
2453 2453 sq = sqh->sqb_free;
2454 2454 sq->sq_func = sigqrel;
2455 2455 sqh->sqb_free = sq->sq_next;
2456 2456 }
2457 2457 mutex_exit(&sqh->sqb_lock);
2458 2458 bzero(&sq->sq_info, sizeof (k_siginfo_t));
2459 2459 sq->sq_backptr = sqh;
2460 2460 sq->sq_next = NULL;
2461 2461 sq->sq_external = 0;
2462 2462 } else {
2463 2463 mutex_exit(&sqh->sqb_lock);
2464 2464 }
2465 2465 }
2466 2466 return (sq);
2467 2467 }
2468 2468
2469 2469 /*
2470 2470 * Return a sigqueue structure back to the pre-allocated pool.
2471 2471 */
2472 2472 static void
2473 2473 sigqrel(sigqueue_t *sq)
2474 2474 {
2475 2475 sigqhdr_t *sqh;
2476 2476
2477 2477 /* make sure that p_lock of the affected process is held */
2478 2478
2479 2479 sqh = (sigqhdr_t *)sq->sq_backptr;
2480 2480 mutex_enter(&sqh->sqb_lock);
2481 2481 if (sqh->sqb_pexited && sqh->sqb_sent == 1) {
2482 2482 mutex_exit(&sqh->sqb_lock);
2483 2483 cv_destroy(&sqh->sqb_cv);
2484 2484 mutex_destroy(&sqh->sqb_lock);
2485 2485 kmem_free(sqh, sqh->sqb_size);
2486 2486 } else {
2487 2487 sqh->sqb_count++;
2488 2488 sqh->sqb_sent--;
2489 2489 sq->sq_next = sqh->sqb_free;
2490 2490 sq->sq_backptr = NULL;
2491 2491 sqh->sqb_free = sq;
2492 2492 cv_signal(&sqh->sqb_cv);
2493 2493 mutex_exit(&sqh->sqb_lock);
2494 2494 }
2495 2495 }
2496 2496
2497 2497 /*
2498 2498 * Free up the pre-allocated sigqueue headers of sigqueue pool
2499 2499 * and signotify pool, if possible.
2500 2500 * Called only by the owning process during exec() and exit().
2501 2501 */
2502 2502 void
2503 2503 sigqfree(proc_t *p)
2504 2504 {
2505 2505 ASSERT(MUTEX_HELD(&p->p_lock));
2506 2506
2507 2507 if (p->p_sigqhdr != NULL) { /* sigqueue pool */
2508 2508 sigqhdrfree(p->p_sigqhdr);
2509 2509 p->p_sigqhdr = NULL;
2510 2510 }
2511 2511 if (p->p_signhdr != NULL) { /* signotify pool */
2512 2512 sigqhdrfree(p->p_signhdr);
2513 2513 p->p_signhdr = NULL;
2514 2514 }
2515 2515 }
2516 2516
2517 2517 /*
2518 2518 * Free up the pre-allocated header and sigq pool if possible.
2519 2519 */
2520 2520 void
2521 2521 sigqhdrfree(sigqhdr_t *sqh)
2522 2522 {
2523 2523 mutex_enter(&sqh->sqb_lock);
2524 2524 if (sqh->sqb_sent == 0) {
2525 2525 mutex_exit(&sqh->sqb_lock);
2526 2526 cv_destroy(&sqh->sqb_cv);
2527 2527 mutex_destroy(&sqh->sqb_lock);
2528 2528 kmem_free(sqh, sqh->sqb_size);
2529 2529 } else {
2530 2530 sqh->sqb_pexited = 1;
2531 2531 mutex_exit(&sqh->sqb_lock);
2532 2532 }
2533 2533 }
2534 2534
2535 2535 /*
2536 2536 * Free up a single sigqueue structure.
2537 2537 * No other code should free a sigqueue directly.
2538 2538 */
2539 2539 void
2540 2540 siginfofree(sigqueue_t *sqp)
2541 2541 {
2542 2542 if (sqp != NULL) {
2543 2543 if (sqp->sq_func != NULL)
2544 2544 (sqp->sq_func)(sqp);
2545 2545 else
2546 2546 kmem_free(sqp, sizeof (sigqueue_t));
2547 2547 }
2548 2548 }
2549 2549
2550 2550 /*
2551 2551 * Generate a synchronous signal caused by a hardware
2552 2552 * condition encountered by an lwp. Called from trap().
2553 2553 */
2554 2554 void
2555 2555 trapsig(k_siginfo_t *ip, int restartable)
2556 2556 {
2557 2557 proc_t *p = ttoproc(curthread);
2558 2558 int sig = ip->si_signo;
2559 2559 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
2560 2560
2561 2561 ASSERT(sig > 0 && sig < NSIG);
2562 2562
2563 2563 if (curthread->t_dtrace_on)
2564 2564 dtrace_safe_synchronous_signal();
2565 2565
2566 2566 mutex_enter(&p->p_lock);
2567 2567 schedctl_finish_sigblock(curthread);
2568 2568 /*
2569 2569 * Avoid a possible infinite loop if the lwp is holding the
2570 2570 * signal generated by a trap of a restartable instruction or
2571 2571 * if the signal so generated is being ignored by the process.
2572 2572 */
2573 2573 if (restartable &&
2574 2574 (sigismember(&curthread->t_hold, sig) ||
2575 2575 p->p_user.u_signal[sig-1] == SIG_IGN)) {
2576 2576 sigdelset(&curthread->t_hold, sig);
2577 2577 p->p_user.u_signal[sig-1] = SIG_DFL;
2578 2578 sigdelset(&p->p_ignore, sig);
2579 2579 }
2580 2580 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t));
2581 2581 sigaddqa(p, curthread, sqp);
2582 2582 mutex_exit(&p->p_lock);
2583 2583 }
2584 2584
2585 2585 /*
2586 2586 * Dispatch the real time profiling signal in the traditional way,
2587 2587 * honoring all of the /proc tracing mechanism built into issig().
2588 2588 */
2589 2589 static void
2590 2590 realsigprof_slow(int sysnum, int nsysarg, int error)
2591 2591 {
2592 2592 kthread_t *t = curthread;
2593 2593 proc_t *p = ttoproc(t);
2594 2594 klwp_t *lwp = ttolwp(t);
2595 2595 k_siginfo_t *sip = &lwp->lwp_siginfo;
2596 2596 void (*func)();
2597 2597
2598 2598 mutex_enter(&p->p_lock);
2599 2599 func = PTOU(p)->u_signal[SIGPROF - 1];
2600 2600 if (p->p_rprof_cyclic == CYCLIC_NONE ||
2601 2601 func == SIG_DFL || func == SIG_IGN) {
2602 2602 bzero(t->t_rprof, sizeof (*t->t_rprof));
2603 2603 mutex_exit(&p->p_lock);
2604 2604 return;
2605 2605 }
2606 2606 if (sigismember(&t->t_hold, SIGPROF)) {
2607 2607 mutex_exit(&p->p_lock);
2608 2608 return;
2609 2609 }
2610 2610 sip->si_signo = SIGPROF;
2611 2611 sip->si_code = PROF_SIG;
2612 2612 sip->si_errno = error;
2613 2613 hrt2ts(gethrtime(), &sip->si_tstamp);
2614 2614 sip->si_syscall = sysnum;
2615 2615 sip->si_nsysarg = nsysarg;
2616 2616 sip->si_fault = lwp->lwp_lastfault;
2617 2617 sip->si_faddr = lwp->lwp_lastfaddr;
2618 2618 lwp->lwp_lastfault = 0;
2619 2619 lwp->lwp_lastfaddr = NULL;
2620 2620 sigtoproc(p, t, SIGPROF);
2621 2621 mutex_exit(&p->p_lock);
2622 2622 ASSERT(lwp->lwp_cursig == 0);
2623 2623 if (issig(FORREAL))
2624 2624 psig();
2625 2625 sip->si_signo = 0;
2626 2626 bzero(t->t_rprof, sizeof (*t->t_rprof));
2627 2627 }
2628 2628
2629 2629 /*
2630 2630 * We are not tracing the SIGPROF signal, or doing any other unnatural
2631 2631 * acts, like watchpoints, so dispatch the real time profiling signal
2632 2632 * directly, bypassing all of the overhead built into issig().
2633 2633 */
2634 2634 static void
2635 2635 realsigprof_fast(int sysnum, int nsysarg, int error)
2636 2636 {
2637 2637 kthread_t *t = curthread;
2638 2638 proc_t *p = ttoproc(t);
2639 2639 klwp_t *lwp = ttolwp(t);
2640 2640 k_siginfo_t *sip = &lwp->lwp_siginfo;
2641 2641 void (*func)();
2642 2642 int rc;
2643 2643 int code;
2644 2644
2645 2645 /*
2646 2646 * We don't need to acquire p->p_lock here;
2647 2647 * we are manipulating thread-private data.
2648 2648 */
2649 2649 func = PTOU(p)->u_signal[SIGPROF - 1];
2650 2650 if (p->p_rprof_cyclic == CYCLIC_NONE ||
2651 2651 func == SIG_DFL || func == SIG_IGN) {
2652 2652 bzero(t->t_rprof, sizeof (*t->t_rprof));
2653 2653 return;
2654 2654 }
2655 2655 if (lwp->lwp_cursig != 0 ||
2656 2656 lwp->lwp_curinfo != NULL ||
2657 2657 sigismember(&t->t_hold, SIGPROF)) {
2658 2658 return;
2659 2659 }
2660 2660 sip->si_signo = SIGPROF;
2661 2661 sip->si_code = PROF_SIG;
2662 2662 sip->si_errno = error;
2663 2663 hrt2ts(gethrtime(), &sip->si_tstamp);
2664 2664 sip->si_syscall = sysnum;
2665 2665 sip->si_nsysarg = nsysarg;
2666 2666 sip->si_fault = lwp->lwp_lastfault;
2667 2667 sip->si_faddr = lwp->lwp_lastfaddr;
2668 2668 lwp->lwp_lastfault = 0;
2669 2669 lwp->lwp_lastfaddr = NULL;
2670 2670 if (t->t_flag & T_TOMASK)
2671 2671 t->t_flag &= ~T_TOMASK;
2672 2672 else
2673 2673 lwp->lwp_sigoldmask = t->t_hold;
2674 2674 sigorset(&t->t_hold, &PTOU(p)->u_sigmask[SIGPROF - 1]);
2675 2675 if (!sigismember(&PTOU(p)->u_signodefer, SIGPROF))
2676 2676 sigaddset(&t->t_hold, SIGPROF);
2677 2677 lwp->lwp_extsig = 0;
2678 2678 lwp->lwp_ru.nsignals++;
2679 2679 if (p->p_model == DATAMODEL_NATIVE)
2680 2680 rc = sendsig(SIGPROF, sip, func);
2681 2681 #ifdef _SYSCALL32_IMPL
2682 2682 else
2683 2683 rc = sendsig32(SIGPROF, sip, func);
2684 2684 #endif /* _SYSCALL32_IMPL */
2685 2685 sip->si_signo = 0;
2686 2686 bzero(t->t_rprof, sizeof (*t->t_rprof));
2687 2687 if (rc == 0) {
2688 2688 /*
2689 2689 * sendsig() failed; we must dump core with a SIGSEGV.
2690 2690 * See psig(). This code is copied from there.
2691 2691 */
2692 2692 lwp->lwp_cursig = SIGSEGV;
2693 2693 code = CLD_KILLED;
2694 2694 proc_is_exiting(p);
2695 2695 if (exitlwps(1) != 0) {
2696 2696 mutex_enter(&p->p_lock);
2697 2697 lwp_exit();
2698 2698 }
2699 2699 if (audit_active == C2AUDIT_LOADED)
2700 2700 audit_core_start(SIGSEGV);
2701 2701 if (core(SIGSEGV, 0) == 0)
2702 2702 code = CLD_DUMPED;
2703 2703 if (audit_active == C2AUDIT_LOADED)
2704 2704 audit_core_finish(code);
2705 2705 exit(code, SIGSEGV);
2706 2706 }
2707 2707 }
2708 2708
2709 2709 /*
2710 2710 * Arrange for the real time profiling signal to be dispatched.
2711 2711 */
2712 2712 void
2713 2713 realsigprof(int sysnum, int nsysarg, int error)
2714 2714 {
2715 2715 kthread_t *t = curthread;
2716 2716 proc_t *p = ttoproc(t);
2717 2717
2718 2718 if (t->t_rprof->rp_anystate == 0)
2719 2719 return;
2720 2720
2721 2721 schedctl_finish_sigblock(t);
2722 2722
2723 2723 /* test for any activity that requires p->p_lock */
2724 2724 if (tracing(p, SIGPROF) || pr_watch_active(p) ||
2725 2725 sigismember(&PTOU(p)->u_sigresethand, SIGPROF)) {
2726 2726 /* do it the classic slow way */
2727 2727 realsigprof_slow(sysnum, nsysarg, error);
2728 2728 } else {
2729 2729 /* do it the cheating-a-little fast way */
2730 2730 realsigprof_fast(sysnum, nsysarg, error);
2731 2731 }
2732 2732 }
2733 2733
2734 2734 #ifdef _SYSCALL32_IMPL
2735 2735
2736 2736 /*
2737 2737 * It's tricky to transmit a sigval between 32-bit and 64-bit
2738 2738 * process, since in the 64-bit world, a pointer and an integer
2739 2739 * are different sizes. Since we're constrained by the standards
2740 2740 * world not to change the types, and it's unclear how useful it is
2741 2741 * to send pointers between address spaces this way, we preserve
2742 2742 * the 'int' interpretation for 32-bit processes interoperating
2743 2743 * with 64-bit processes. The full semantics (pointers or integers)
2744 2744 * are available for N-bit processes interoperating with N-bit
2745 2745 * processes.
2746 2746 */
2747 2747 void
2748 2748 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest)
2749 2749 {
2750 2750 bzero(dest, sizeof (*dest));
2751 2751
2752 2752 /*
2753 2753 * The absolute minimum content is si_signo and si_code.
2754 2754 */
2755 2755 dest->si_signo = src->si_signo;
2756 2756 if ((dest->si_code = src->si_code) == SI_NOINFO)
2757 2757 return;
2758 2758
2759 2759 /*
2760 2760 * A siginfo generated by user level is structured
2761 2761 * differently from one generated by the kernel.
2762 2762 */
2763 2763 if (SI_FROMUSER(src)) {
2764 2764 dest->si_pid = src->si_pid;
2765 2765 dest->si_ctid = src->si_ctid;
2766 2766 dest->si_zoneid = src->si_zoneid;
2767 2767 dest->si_uid = src->si_uid;
2768 2768 if (SI_CANQUEUE(src->si_code))
2769 2769 dest->si_value.sival_int =
2770 2770 (int32_t)src->si_value.sival_int;
2771 2771 return;
2772 2772 }
2773 2773
2774 2774 dest->si_errno = src->si_errno;
2775 2775
2776 2776 switch (src->si_signo) {
2777 2777 default:
2778 2778 dest->si_pid = src->si_pid;
2779 2779 dest->si_ctid = src->si_ctid;
2780 2780 dest->si_zoneid = src->si_zoneid;
2781 2781 dest->si_uid = src->si_uid;
2782 2782 dest->si_value.sival_int = (int32_t)src->si_value.sival_int;
2783 2783 break;
2784 2784 case SIGCLD:
2785 2785 dest->si_pid = src->si_pid;
2786 2786 dest->si_ctid = src->si_ctid;
2787 2787 dest->si_zoneid = src->si_zoneid;
2788 2788 dest->si_status = src->si_status;
2789 2789 dest->si_stime = src->si_stime;
2790 2790 dest->si_utime = src->si_utime;
2791 2791 break;
2792 2792 case SIGSEGV:
2793 2793 case SIGBUS:
2794 2794 case SIGILL:
2795 2795 case SIGTRAP:
2796 2796 case SIGFPE:
2797 2797 case SIGEMT:
2798 2798 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr;
2799 2799 dest->si_trapno = src->si_trapno;
2800 2800 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc;
2801 2801 break;
2802 2802 case SIGPOLL:
2803 2803 case SIGXFSZ:
2804 2804 dest->si_fd = src->si_fd;
2805 2805 dest->si_band = src->si_band;
2806 2806 break;
2807 2807 case SIGPROF:
2808 2808 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr;
2809 2809 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2810 2810 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2811 2811 dest->si_syscall = src->si_syscall;
2812 2812 dest->si_nsysarg = src->si_nsysarg;
2813 2813 dest->si_fault = src->si_fault;
2814 2814 break;
2815 2815 }
2816 2816 }
2817 2817
2818 2818 void
2819 2819 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest)
2820 2820 {
2821 2821 bzero(dest, sizeof (*dest));
2822 2822
2823 2823 /*
2824 2824 * The absolute minimum content is si_signo and si_code.
2825 2825 */
2826 2826 dest->si_signo = src->si_signo;
2827 2827 if ((dest->si_code = src->si_code) == SI_NOINFO)
2828 2828 return;
2829 2829
2830 2830 /*
2831 2831 * A siginfo generated by user level is structured
2832 2832 * differently from one generated by the kernel.
2833 2833 */
2834 2834 if (SI_FROMUSER(src)) {
2835 2835 dest->si_pid = src->si_pid;
2836 2836 dest->si_ctid = src->si_ctid;
2837 2837 dest->si_zoneid = src->si_zoneid;
2838 2838 dest->si_uid = src->si_uid;
2839 2839 if (SI_CANQUEUE(src->si_code))
2840 2840 dest->si_value.sival_int =
2841 2841 (int)src->si_value.sival_int;
2842 2842 return;
2843 2843 }
2844 2844
2845 2845 dest->si_errno = src->si_errno;
2846 2846
2847 2847 switch (src->si_signo) {
2848 2848 default:
2849 2849 dest->si_pid = src->si_pid;
2850 2850 dest->si_ctid = src->si_ctid;
2851 2851 dest->si_zoneid = src->si_zoneid;
2852 2852 dest->si_uid = src->si_uid;
2853 2853 dest->si_value.sival_int = (int)src->si_value.sival_int;
2854 2854 break;
2855 2855 case SIGCLD:
2856 2856 dest->si_pid = src->si_pid;
2857 2857 dest->si_ctid = src->si_ctid;
2858 2858 dest->si_zoneid = src->si_zoneid;
2859 2859 dest->si_status = src->si_status;
2860 2860 dest->si_stime = src->si_stime;
2861 2861 dest->si_utime = src->si_utime;
2862 2862 break;
2863 2863 case SIGSEGV:
2864 2864 case SIGBUS:
2865 2865 case SIGILL:
2866 2866 case SIGTRAP:
2867 2867 case SIGFPE:
2868 2868 case SIGEMT:
2869 2869 dest->si_addr = (void *)(uintptr_t)src->si_addr;
2870 2870 dest->si_trapno = src->si_trapno;
2871 2871 dest->si_pc = (void *)(uintptr_t)src->si_pc;
2872 2872 break;
2873 2873 case SIGPOLL:
2874 2874 case SIGXFSZ:
2875 2875 dest->si_fd = src->si_fd;
2876 2876 dest->si_band = src->si_band;
2877 2877 break;
2878 2878 case SIGPROF:
2879 2879 dest->si_faddr = (void *)(uintptr_t)src->si_faddr;
2880 2880 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2881 2881 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2882 2882 dest->si_syscall = src->si_syscall;
2883 2883 dest->si_nsysarg = src->si_nsysarg;
2884 2884 dest->si_fault = src->si_fault;
2885 2885 break;
2886 2886 }
2887 2887 }
2888 2888
2889 2889 #endif /* _SYSCALL32_IMPL */
↓ open down ↓ |
2786 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX