1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2019 Joyent, Inc.
25 */
26
27 #include <sys/param.h>
28 #include <sys/vmparam.h>
29 #include <sys/types.h>
30 #include <sys/sysmacros.h>
31 #include <sys/systm.h>
32 #include <sys/signal.h>
33 #include <sys/stack.h>
34 #include <sys/cred.h>
35 #include <sys/cmn_err.h>
36 #include <sys/user.h>
37 #include <sys/privregs.h>
38 #include <sys/psw.h>
39 #include <sys/debug.h>
40 #include <sys/errno.h>
41 #include <sys/proc.h>
42 #include <sys/modctl.h>
43 #include <sys/var.h>
44 #include <sys/inline.h>
45 #include <sys/syscall.h>
46 #include <sys/ucontext.h>
47 #include <sys/cpuvar.h>
48 #include <sys/siginfo.h>
49 #include <sys/trap.h>
50 #include <sys/vtrace.h>
51 #include <sys/sysinfo.h>
52 #include <sys/procfs.h>
53 #include <sys/prsystm.h>
54 #include <c2/audit.h>
55 #include <sys/modctl.h>
56 #include <sys/aio_impl.h>
57 #include <sys/tnf.h>
58 #include <sys/tnf_probe.h>
59 #include <sys/copyops.h>
60 #include <sys/priv.h>
61 #include <sys/msacct.h>
62
63 int syscalltrace = 0;
64 #ifdef SYSCALLTRACE
65 static kmutex_t systrace_lock; /* syscall tracing lock */
66 #else
67 #define syscalltrace 0
68 #endif /* SYSCALLTRACE */
69
70 typedef int64_t (*llfcn_t)(); /* function returning long long */
71
72 int pre_syscall(void);
73 void post_syscall(long rval1, long rval2);
74 static krwlock_t *lock_syscall(struct sysent *, uint_t);
75 void deferred_singlestep_trap(caddr_t);
76
77 #ifdef _SYSCALL32_IMPL
78 #define LWP_GETSYSENT(lwp) \
79 (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ? sysent : sysent32)
80 #else
81 #define LWP_GETSYSENT(lwp) (sysent)
82 #endif
83
84 /*
85 * If watchpoints are active, don't make copying in of
86 * system call arguments take a read watchpoint trap.
87 */
88 static int
89 copyin_args(struct regs *rp, long *ap, uint_t nargs)
90 {
91 greg_t *sp = 1 + (greg_t *)rp->r_sp; /* skip ret addr */
92
93 ASSERT(nargs <= MAXSYSARGS);
94
95 return (copyin_nowatch(sp, ap, nargs * sizeof (*sp)));
96 }
97
98 #if defined(_SYSCALL32_IMPL)
99 static int
100 copyin_args32(struct regs *rp, long *ap, uint_t nargs)
101 {
102 greg32_t *sp = 1 + (greg32_t *)rp->r_sp; /* skip ret addr */
103 uint32_t a32[MAXSYSARGS];
104 int rc;
105
106 ASSERT(nargs <= MAXSYSARGS);
107
108 if ((rc = copyin_nowatch(sp, a32, nargs * sizeof (*sp))) == 0) {
109 uint32_t *a32p = &a32[0];
110
111 while (nargs--)
112 *ap++ = (ulong_t)*a32p++;
113 }
114 return (rc);
115 }
116 #define COPYIN_ARGS32 copyin_args32
117 #else
118 #define COPYIN_ARGS32 copyin_args
119 #endif
120
121 /*
122 * Error handler for system calls where arg copy gets fault.
123 */
124 static longlong_t
125 syscall_err()
126 {
127 return (0);
128 }
129
130 /*
131 * Corresponding sysent entry to allow syscall_entry caller
132 * to invoke syscall_err.
133 */
134 static struct sysent sysent_err = {
135 0, SE_32RVAL1, NULL, NULL, (llfcn_t)syscall_err
136 };
137
138 /*
139 * Called from syscall() when a non-trivial 32-bit system call occurs.
140 * Sets up the args and returns a pointer to the handler.
141 */
142 struct sysent *
143 syscall_entry(kthread_t *t, long *argp)
144 {
145 klwp_t *lwp = ttolwp(t);
146 struct regs *rp = lwptoregs(lwp);
147 unsigned int code;
148 struct sysent *callp;
149 struct sysent *se = LWP_GETSYSENT(lwp);
150 int error = 0;
151 uint_t nargs;
152
153 ASSERT(t == curthread && curthread->t_schedflag & TS_DONT_SWAP);
154
155 lwp->lwp_ru.sysc++;
156 lwp->lwp_eosys = NORMALRETURN; /* assume this will be normal */
157
158 /*
159 * Set lwp_ap to point to the args, even if none are needed for this
160 * system call. This is for the loadable-syscall case where the
161 * number of args won't be known until the system call is loaded, and
162 * also maintains a non-NULL lwp_ap setup for get_syscall_args(). Note
163 * that lwp_ap MUST be set to a non-NULL value _BEFORE_ t_sysnum is
164 * set to non-zero; otherwise get_syscall_args(), seeing a non-zero
165 * t_sysnum for this thread, will charge ahead and dereference lwp_ap.
166 */
167 lwp->lwp_ap = argp; /* for get_syscall_args */
168
169 code = rp->r_r0;
170 t->t_sysnum = (short)code;
171 callp = code >= NSYSCALL ? &nosys_ent : se + code;
172
173 if ((t->t_pre_sys | syscalltrace) != 0) {
174 error = pre_syscall();
175
176 /*
177 * pre_syscall() has taken care so that lwp_ap is current;
178 * it either points to syscall-entry-saved amd64 regs,
179 * or it points to lwp_arg[], which has been re-copied from
180 * the ia32 ustack, but either way, it's a current copy after
181 * /proc has possibly mucked with the syscall args.
182 */
183
184 if (error)
185 return (&sysent_err); /* use dummy handler */
186 }
187
188 /*
189 * Fetch the system call arguments to the kernel stack copy used
190 * for syscall handling.
191 * Note: for loadable system calls the number of arguments required
192 * may not be known at this point, and will be zero if the system call
193 * was never loaded. Once the system call has been loaded, the number
194 * of args is not allowed to be changed.
195 */
196 if ((nargs = (uint_t)callp->sy_narg) != 0 &&
197 COPYIN_ARGS32(rp, argp, nargs)) {
198 (void) set_errno(EFAULT);
199 return (&sysent_err); /* use dummy handler */
200 }
201
202 return (callp); /* return sysent entry for caller */
203 }
204
205 void
206 syscall_exit(kthread_t *t, long rval1, long rval2)
207 {
208 /*
209 * Handle signals and other post-call events if necessary.
210 */
211 if ((t->t_post_sys_ast | syscalltrace) == 0) {
212 klwp_t *lwp = ttolwp(t);
213 struct regs *rp = lwptoregs(lwp);
214
215 /*
216 * Normal return.
217 * Clear error indication and set return values.
218 */
219 rp->r_ps &= ~PS_C; /* reset carry bit */
220 rp->r_r0 = rval1;
221 rp->r_r1 = rval2;
222 lwp->lwp_state = LWP_USER;
223 } else {
224 post_syscall(rval1, rval2);
225 }
226 t->t_sysnum = 0; /* invalidate args */
227 }
228
229 /*
230 * Perform pre-system-call processing, including stopping for tracing,
231 * auditing, etc.
232 *
233 * This routine is called only if the t_pre_sys flag is set. Any condition
234 * requiring pre-syscall handling must set the t_pre_sys flag. If the
235 * condition is persistent, this routine will repost t_pre_sys.
236 */
237 int
238 pre_syscall()
239 {
240 kthread_t *t = curthread;
241 unsigned code = t->t_sysnum;
242 klwp_t *lwp = ttolwp(t);
243 proc_t *p = ttoproc(t);
244 int repost;
245
246 t->t_pre_sys = repost = 0; /* clear pre-syscall processing flag */
247
248 ASSERT(t->t_schedflag & TS_DONT_SWAP);
249
250 #if defined(DEBUG)
251 /*
252 * On the i386 kernel, lwp_ap points at the piece of the thread
253 * stack that we copy the users arguments into.
254 *
255 * On the amd64 kernel, the syscall arguments in the rdi..r9
256 * registers should be pointed at by lwp_ap. If the args need to
257 * be copied so that those registers can be changed without losing
258 * the ability to get the args for /proc, they can be saved by
259 * save_syscall_args(), and lwp_ap will be restored by post_syscall().
260 */
261 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
262 #if defined(_LP64)
263 ASSERT(lwp->lwp_ap == (long *)&lwptoregs(lwp)->r_rdi);
264 } else {
265 #endif
266 ASSERT((caddr_t)lwp->lwp_ap > t->t_stkbase &&
267 (caddr_t)lwp->lwp_ap < t->t_stk);
268 }
269 #endif /* DEBUG */
270
271 /*
272 * Make sure the thread is holding the latest credentials for the
273 * process. The credentials in the process right now apply to this
274 * thread for the entire system call.
275 */
276 if (t->t_cred != p->p_cred) {
277 cred_t *oldcred = t->t_cred;
278 /*
279 * DTrace accesses t_cred in probe context. t_cred must
280 * always be either NULL, or point to a valid, allocated cred
281 * structure.
282 */
283 t->t_cred = crgetcred();
284 crfree(oldcred);
285 }
286
287 /*
288 * From the proc(4) manual page:
289 * When entry to a system call is being traced, the traced process
290 * stops after having begun the call to the system but before the
291 * system call arguments have been fetched from the process.
292 */
293 if (PTOU(p)->u_systrap) {
294 if (prismember(&PTOU(p)->u_entrymask, code)) {
295 mutex_enter(&p->p_lock);
296 /*
297 * Recheck stop condition, now that lock is held.
298 */
299 if (PTOU(p)->u_systrap &&
300 prismember(&PTOU(p)->u_entrymask, code)) {
301 stop(PR_SYSENTRY, code);
302
303 /*
304 * /proc may have modified syscall args,
305 * either in regs for amd64 or on ustack
306 * for ia32. Either way, arrange to
307 * copy them again, both for the syscall
308 * handler and for other consumers in
309 * post_syscall (like audit). Here, we
310 * only do amd64, and just set lwp_ap
311 * back to the kernel-entry stack copy;
312 * the syscall ml code redoes
313 * move-from-regs to set up for the
314 * syscall handler after we return. For
315 * ia32, save_syscall_args() below makes
316 * an lwp_ap-accessible copy.
317 */
318 #if defined(_LP64)
319 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
320 lwp->lwp_argsaved = 0;
321 lwp->lwp_ap =
322 (long *)&lwptoregs(lwp)->r_rdi;
323 }
324 #endif
325 }
326 mutex_exit(&p->p_lock);
327 }
328 repost = 1;
329 }
330
331 /*
332 * ia32 kernel, or ia32 proc on amd64 kernel: keep args in
333 * lwp_arg for post-syscall processing, regardless of whether
334 * they might have been changed in /proc above.
335 */
336 #if defined(_LP64)
337 if (lwp_getdatamodel(lwp) != DATAMODEL_NATIVE)
338 #endif
339 (void) save_syscall_args();
340
341 if (lwp->lwp_sysabort) {
342 /*
343 * lwp_sysabort may have been set via /proc while the process
344 * was stopped on PR_SYSENTRY. If so, abort the system call.
345 * Override any error from the copyin() of the arguments.
346 */
347 lwp->lwp_sysabort = 0;
348 (void) set_errno(EINTR); /* forces post_sys */
349 t->t_pre_sys = 1; /* repost anyway */
350 return (1); /* don't do system call, return EINTR */
351 }
352
353 /*
354 * begin auditing for this syscall if the c2audit module is loaded
355 * and auditing is enabled
356 */
357 if (audit_active == C2AUDIT_LOADED) {
358 uint32_t auditing = au_zone_getstate(NULL);
359
360 if (auditing & AU_AUDIT_MASK) {
361 int error;
362 if (error = audit_start(T_SYSCALL, code, auditing, \
363 0, lwp)) {
364 t->t_pre_sys = 1; /* repost anyway */
365 (void) set_errno(error);
366 return (1);
367 }
368 repost = 1;
369 }
370 }
371
372 #ifndef NPROBE
373 /* Kernel probe */
374 if (tnf_tracing_active) {
375 TNF_PROBE_1(syscall_start, "syscall thread", /* CSTYLED */,
376 tnf_sysnum, sysnum, t->t_sysnum);
377 t->t_post_sys = 1; /* make sure post_syscall runs */
378 repost = 1;
379 }
380 #endif /* NPROBE */
381
382 #ifdef SYSCALLTRACE
383 if (syscalltrace) {
384 int i;
385 long *ap;
386 char *cp;
387 char *sysname;
388 struct sysent *callp;
389
390 if (code >= NSYSCALL)
391 callp = &nosys_ent; /* nosys has no args */
392 else
393 callp = LWP_GETSYSENT(lwp) + code;
394 (void) save_syscall_args();
395 mutex_enter(&systrace_lock);
396 printf("%d: ", p->p_pid);
397 if (code >= NSYSCALL) {
398 printf("0x%x", code);
399 } else {
400 sysname = mod_getsysname(code);
401 printf("%s[0x%x/0x%p]", sysname == NULL ? "NULL" :
402 sysname, code, callp->sy_callc);
403 }
404 cp = "(";
405 for (i = 0, ap = lwp->lwp_ap; i < callp->sy_narg; i++, ap++) {
406 printf("%s%lx", cp, *ap);
407 cp = ", ";
408 }
409 if (i)
410 printf(")");
411 printf(" %s id=0x%p\n", PTOU(p)->u_comm, curthread);
412 mutex_exit(&systrace_lock);
413 }
414 #endif /* SYSCALLTRACE */
415
416 /*
417 * If there was a continuing reason for pre-syscall processing,
418 * set the t_pre_sys flag for the next system call.
419 */
420 if (repost)
421 t->t_pre_sys = 1;
422 lwp->lwp_error = 0; /* for old drivers */
423 lwp->lwp_badpriv = PRIV_NONE;
424 return (0);
425 }
426
427
428 /*
429 * Post-syscall processing. Perform abnormal system call completion
430 * actions such as /proc tracing, profiling, signals, preemption, etc.
431 *
432 * This routine is called only if t_post_sys, t_sig_check, or t_astflag is set.
433 * Any condition requiring pre-syscall handling must set one of these.
434 * If the condition is persistent, this routine will repost t_post_sys.
435 */
436 void
437 post_syscall(long rval1, long rval2)
438 {
439 kthread_t *t = curthread;
440 klwp_t *lwp = ttolwp(t);
441 proc_t *p = ttoproc(t);
442 struct regs *rp = lwptoregs(lwp);
443 uint_t error;
444 uint_t code = t->t_sysnum;
445 int repost = 0;
446 int proc_stop = 0; /* non-zero if stopping */
447 int sigprof = 0; /* non-zero if sending SIGPROF */
448
449 t->t_post_sys = 0;
450
451 error = lwp->lwp_errno;
452
453 /*
454 * Code can be zero if this is a new LWP returning after a forkall(),
455 * other than the one which matches the one in the parent which called
456 * forkall(). In these LWPs, skip most of post-syscall activity.
457 */
458 if (code == 0)
459 goto sig_check;
460 /*
461 * If the trace flag is set, mark the lwp to take a single-step trap
462 * on return to user level (below). The x86 lcall interface and
463 * sysenter has already done this, and turned off the flag, but
464 * amd64 syscall interface has not.
465 */
466 if (rp->r_ps & PS_T) {
467 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
468 rp->r_ps &= ~PS_T;
469 aston(curthread);
470 }
471
472 /* put out audit record for this syscall */
473 if (AU_AUDITING()) {
474 rval_t rval;
475
476 /* XX64 -- truncation of 64-bit return values? */
477 rval.r_val1 = (int)rval1;
478 rval.r_val2 = (int)rval2;
479 audit_finish(T_SYSCALL, code, error, &rval);
480 repost = 1;
481 }
482
483 if (curthread->t_pdmsg != NULL) {
484 char *m = curthread->t_pdmsg;
485
486 uprintf("%s", m);
487 kmem_free(m, strlen(m) + 1);
488 curthread->t_pdmsg = NULL;
489 }
490
491 /*
492 * If we're going to stop for /proc tracing, set the flag and
493 * save the arguments so that the return values don't smash them.
494 */
495 if (PTOU(p)->u_systrap) {
496 if (prismember(&PTOU(p)->u_exitmask, code)) {
497 if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
498 (void) save_syscall_args();
499 proc_stop = 1;
500 }
501 repost = 1;
502 }
503
504 /*
505 * Similarly check to see if SIGPROF might be sent.
506 */
507 if (curthread->t_rprof != NULL &&
508 curthread->t_rprof->rp_anystate != 0) {
509 if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
510 (void) save_syscall_args();
511 sigprof = 1;
512 }
513
514 if (lwp->lwp_eosys == NORMALRETURN) {
515 if (error == 0) {
516 #ifdef SYSCALLTRACE
517 if (syscalltrace) {
518 mutex_enter(&systrace_lock);
519 printf(
520 "%d: r_val1=0x%lx, r_val2=0x%lx, id 0x%p\n",
521 p->p_pid, rval1, rval2, curthread);
522 mutex_exit(&systrace_lock);
523 }
524 #endif /* SYSCALLTRACE */
525 rp->r_ps &= ~PS_C;
526 rp->r_r0 = rval1;
527 rp->r_r1 = rval2;
528 } else {
529 int sig;
530 #ifdef SYSCALLTRACE
531 if (syscalltrace) {
532 mutex_enter(&systrace_lock);
533 printf("%d: error=%d, id 0x%p\n",
534 p->p_pid, error, curthread);
535 mutex_exit(&systrace_lock);
536 }
537 #endif /* SYSCALLTRACE */
538 if (error == EINTR && t->t_activefd.a_stale)
539 error = EBADF;
540 if (error == EINTR &&
541 (sig = lwp->lwp_cursig) != 0 &&
542 sigismember(&PTOU(p)->u_sigrestart, sig) &&
543 PTOU(p)->u_signal[sig - 1] != SIG_DFL &&
544 PTOU(p)->u_signal[sig - 1] != SIG_IGN)
545 error = ERESTART;
546 rp->r_r0 = error;
547 rp->r_ps |= PS_C;
548 }
549 }
550
551 /*
552 * From the proc(4) manual page:
553 * When exit from a system call is being traced, the traced process
554 * stops on completion of the system call just prior to checking for
555 * signals and returning to user level. At this point all return
556 * values have been stored into the traced process's saved registers.
557 */
558 if (proc_stop) {
559 mutex_enter(&p->p_lock);
560 if (PTOU(p)->u_systrap &&
561 prismember(&PTOU(p)->u_exitmask, code))
562 stop(PR_SYSEXIT, code);
563 mutex_exit(&p->p_lock);
564 }
565
566 /*
567 * If we are the parent returning from a successful
568 * vfork, wait for the child to exec or exit.
569 * This code must be here and not in the bowels of the system
570 * so that /proc can intercept exit from vfork in a timely way.
571 */
572 if (t->t_flag & T_VFPARENT) {
573 ASSERT(code == SYS_vfork || code == SYS_forksys);
574 ASSERT(rp->r_r1 == 0 && error == 0);
575 vfwait((pid_t)rval1);
576 t->t_flag &= ~T_VFPARENT;
577 }
578
579 /*
580 * If profiling is active, bill the current PC in user-land
581 * and keep reposting until profiling is disabled.
582 */
583 if (p->p_prof.pr_scale) {
584 if (lwp->lwp_oweupc)
585 profil_tick(rp->r_pc);
586 repost = 1;
587 }
588
589 sig_check:
590 /*
591 * Reset flag for next time.
592 * We must do this after stopping on PR_SYSEXIT
593 * because /proc uses the information in lwp_eosys.
594 */
595 lwp->lwp_eosys = NORMALRETURN;
596 clear_stale_fd();
597 t->t_flag &= ~T_FORKALL;
598
599 if (t->t_astflag | t->t_sig_check) {
600 /*
601 * Turn off the AST flag before checking all the conditions that
602 * may have caused an AST. This flag is on whenever a signal or
603 * unusual condition should be handled after the next trap or
604 * syscall.
605 */
606 astoff(t);
607 /*
608 * If a single-step trap occurred on a syscall (see trap())
609 * recognize it now. Do this before checking for signals
610 * because deferred_singlestep_trap() may generate a SIGTRAP to
611 * the LWP or may otherwise mark the LWP to call issig(FORREAL).
612 */
613 if (lwp->lwp_pcb.pcb_flags & DEBUG_PENDING)
614 deferred_singlestep_trap((caddr_t)rp->r_pc);
615
616 t->t_sig_check = 0;
617
618 /*
619 * The following check is legal for the following reasons:
620 * 1) The thread we are checking, is ourselves, so there is
621 * no way the proc can go away.
622 * 2) The only time we need to be protected by the
623 * lock is if the binding is changed.
624 *
625 * Note we will still take the lock and check the binding
626 * if the condition was true without the lock held. This
627 * prevents lock contention among threads owned by the
628 * same proc.
629 */
630
631 if (curthread->t_proc_flag & TP_CHANGEBIND) {
632 mutex_enter(&p->p_lock);
633 if (curthread->t_proc_flag & TP_CHANGEBIND) {
634 timer_lwpbind();
635 curthread->t_proc_flag &= ~TP_CHANGEBIND;
636 }
637 mutex_exit(&p->p_lock);
638 }
639
640 /*
641 * for kaio requests on the special kaio poll queue,
642 * copyout their results to user memory.
643 */
644 if (p->p_aio)
645 aio_cleanup(0);
646 /*
647 * If this LWP was asked to hold, call holdlwp(), which will
648 * stop. holdlwps() sets this up and calls pokelwps() which
649 * sets the AST flag.
650 *
651 * Also check TP_EXITLWP, since this is used by fresh new LWPs
652 * through lwp_rtt(). That flag is set if the lwp_create(2)
653 * syscall failed after creating the LWP.
654 */
655 if (ISHOLD(p) || (t->t_proc_flag & TP_EXITLWP))
656 holdlwp();
657
658 /*
659 * All code that sets signals and makes ISSIG_PENDING
660 * evaluate true must set t_sig_check afterwards.
661 */
662 if (ISSIG_PENDING(t, lwp, p)) {
663 if (issig(FORREAL))
664 psig();
665 t->t_sig_check = 1; /* recheck next time */
666 }
667
668 if (sigprof) {
669 int nargs = (code > 0 && code < NSYSCALL)?
670 LWP_GETSYSENT(lwp)[code].sy_narg : 0;
671 realsigprof(code, nargs, error);
672 t->t_sig_check = 1; /* recheck next time */
673 }
674
675 /*
676 * If a performance counter overflow interrupt was
677 * delivered *during* the syscall, then re-enable the
678 * AST so that we take a trip through trap() to cause
679 * the SIGEMT to be delivered.
680 */
681 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW)
682 aston(t);
683
684 /*
685 * /proc can't enable/disable the trace bit itself
686 * because that could race with the call gate used by
687 * system calls via "lcall". If that happened, an
688 * invalid EFLAGS would result. prstep()/prnostep()
689 * therefore schedule an AST for the purpose.
690 */
691 if (lwp->lwp_pcb.pcb_flags & REQUEST_STEP) {
692 lwp->lwp_pcb.pcb_flags &= ~REQUEST_STEP;
693 rp->r_ps |= PS_T;
694 }
695 if (lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP) {
696 lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
697 rp->r_ps &= ~PS_T;
698 }
699 }
700
701 lwp->lwp_errno = 0; /* clear error for next time */
702
703 #ifndef NPROBE
704 /* Kernel probe */
705 if (tnf_tracing_active) {
706 TNF_PROBE_3(syscall_end, "syscall thread", /* CSTYLED */,
707 tnf_long, rval1, rval1,
708 tnf_long, rval2, rval2,
709 tnf_long, errno, (long)error);
710 repost = 1;
711 }
712 #endif /* NPROBE */
713
714 /*
715 * Set state to LWP_USER here so preempt won't give us a kernel
716 * priority if it occurs after this point. Call CL_TRAPRET() to
717 * restore the user-level priority.
718 *
719 * It is important that no locks (other than spinlocks) be entered
720 * after this point before returning to user mode (unless lwp_state
721 * is set back to LWP_SYS).
722 *
723 * XXX Sampled times past this point are charged to the user.
724 */
725 lwp->lwp_state = LWP_USER;
726
727 if (t->t_trapret) {
728 t->t_trapret = 0;
729 thread_lock(t);
730 CL_TRAPRET(t);
731 thread_unlock(t);
732 }
733 if (CPU->cpu_runrun || t->t_schedflag & TS_ANYWAITQ)
734 preempt();
735 prunstop();
736
737 lwp->lwp_errno = 0; /* clear error for next time */
738
739 /*
740 * The thread lock must be held in order to clear sysnum and reset
741 * lwp_ap atomically with respect to other threads in the system that
742 * may be looking at the args via lwp_ap from get_syscall_args().
743 */
744
745 thread_lock(t);
746 t->t_sysnum = 0; /* no longer in a system call */
747
748 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
749 #if defined(_LP64)
750 /*
751 * In case the args were copied to the lwp, reset the
752 * pointer so the next syscall will have the right
753 * lwp_ap pointer.
754 */
755 lwp->lwp_ap = (long *)&rp->r_rdi;
756 } else {
757 #endif
758 lwp->lwp_ap = NULL; /* reset on every syscall entry */
759 }
760 thread_unlock(t);
761
762 lwp->lwp_argsaved = 0;
763
764 /*
765 * If there was a continuing reason for post-syscall processing,
766 * set the t_post_sys flag for the next system call.
767 */
768 if (repost)
769 t->t_post_sys = 1;
770
771 /*
772 * If there is a ustack registered for this lwp, and the stack rlimit
773 * has been altered, read in the ustack. If the saved stack rlimit
774 * matches the bounds of the ustack, update the ustack to reflect
775 * the new rlimit. If the new stack rlimit is RLIM_INFINITY, disable
776 * stack checking by setting the size to 0.
777 */
778 if (lwp->lwp_ustack != 0 && lwp->lwp_old_stk_ctl != 0) {
779 rlim64_t new_size;
780 caddr_t top;
781 stack_t stk;
782 struct rlimit64 rl;
783
784 mutex_enter(&p->p_lock);
785 new_size = p->p_stk_ctl;
786 top = p->p_usrstack;
787 (void) rctl_rlimit_get(rctlproc_legacy[RLIMIT_STACK], p, &rl);
788 mutex_exit(&p->p_lock);
789
790 if (rl.rlim_cur == RLIM64_INFINITY)
791 new_size = 0;
792
793 if (copyin((stack_t *)lwp->lwp_ustack, &stk,
794 sizeof (stack_t)) == 0 &&
795 (stk.ss_size == lwp->lwp_old_stk_ctl ||
796 stk.ss_size == 0) &&
797 stk.ss_sp == top - stk.ss_size) {
798 stk.ss_sp = (void *)((uintptr_t)stk.ss_sp +
799 stk.ss_size - (uintptr_t)new_size);
800 stk.ss_size = new_size;
801
802 (void) copyout(&stk, (stack_t *)lwp->lwp_ustack,
803 sizeof (stack_t));
804 }
805
806 lwp->lwp_old_stk_ctl = 0;
807 }
808 }
809
810 /*
811 * Called from post_syscall() when a deferred singlestep is to be taken.
812 */
813 void
814 deferred_singlestep_trap(caddr_t pc)
815 {
816 proc_t *p = ttoproc(curthread);
817 klwp_t *lwp = ttolwp(curthread);
818 pcb_t *pcb = &lwp->lwp_pcb;
819 uint_t fault = 0;
820 k_siginfo_t siginfo;
821
822 bzero(&siginfo, sizeof (siginfo));
823
824 /*
825 * If both NORMAL_STEP and WATCH_STEP are in
826 * effect, give precedence to WATCH_STEP.
827 * If neither is set, user must have set the
828 * PS_T bit in %efl; treat this as NORMAL_STEP.
829 */
830 if ((fault = undo_watch_step(&siginfo)) == 0 &&
831 ((pcb->pcb_flags & NORMAL_STEP) ||
832 !(pcb->pcb_flags & WATCH_STEP))) {
833 siginfo.si_signo = SIGTRAP;
834 siginfo.si_code = TRAP_TRACE;
835 siginfo.si_addr = pc;
836 fault = FLTTRACE;
837 }
838 pcb->pcb_flags &= ~(DEBUG_PENDING|NORMAL_STEP|WATCH_STEP);
839
840 if (fault) {
841 /*
842 * Remember the fault and fault adddress
843 * for real-time (SIGPROF) profiling.
844 */
845 lwp->lwp_lastfault = fault;
846 lwp->lwp_lastfaddr = siginfo.si_addr;
847 /*
848 * If a debugger has declared this fault to be an
849 * event of interest, stop the lwp. Otherwise just
850 * deliver the associated signal.
851 */
852 if (prismember(&p->p_fltmask, fault) &&
853 stop_on_fault(fault, &siginfo) == 0)
854 siginfo.si_signo = 0;
855 }
856
857 if (siginfo.si_signo)
858 trapsig(&siginfo, 1);
859 }
860
861 /*
862 * nonexistent system call-- signal lwp (may want to handle it)
863 * flag error if lwp won't see signal immediately
864 */
865 int64_t
866 nosys(void)
867 {
868 tsignal(curthread, SIGSYS);
869 return (set_errno(ENOSYS));
870 }
871
872 int
873 nosys32(void)
874 {
875 return (nosys());
876 }
877
878 /*
879 * Execute a 32-bit system call on behalf of the current thread.
880 */
881 void
882 dosyscall(void)
883 {
884 /*
885 * Need space on the stack to store syscall arguments.
886 */
887 long syscall_args[MAXSYSARGS];
888 struct sysent *se;
889 int64_t ret;
890
891 syscall_mstate(LMS_TRAP, LMS_SYSTEM);
892
893 ASSERT(curproc->p_model == DATAMODEL_ILP32);
894
895 CPU_STATS_ENTER_K();
896 CPU_STATS_ADDQ(CPU, sys, syscall, 1);
897 CPU_STATS_EXIT_K();
898
899 se = syscall_entry(curthread, syscall_args);
900
901 /*
902 * syscall_entry() copied all 8 arguments into syscall_args.
903 */
904 ret = se->sy_callc(syscall_args[0], syscall_args[1], syscall_args[2],
905 syscall_args[3], syscall_args[4], syscall_args[5], syscall_args[6],
906 syscall_args[7]);
907
908 syscall_exit(curthread, (int)ret & 0xffffffffu, (int)(ret >> 32));
909 syscall_mstate(LMS_SYSTEM, LMS_TRAP);
910 }
911
912 /*
913 * Get the arguments to the current system call. See comment atop
914 * save_syscall_args() regarding lwp_ap usage.
915 */
916
917 uint_t
918 get_syscall_args(klwp_t *lwp, long *argp, int *nargsp)
919 {
920 kthread_t *t = lwptot(lwp);
921 ulong_t mask = 0xfffffffful;
922 uint_t code;
923 long *ap;
924 int nargs;
925
926 #if defined(_LP64)
927 if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
928 mask = 0xfffffffffffffffful;
929 #endif
930
931 /*
932 * The thread lock must be held while looking at the arguments to ensure
933 * they don't go away via post_syscall().
934 * get_syscall_args() is the only routine to read them which is callable
935 * outside the LWP in question and hence the only one that must be
936 * synchronized in this manner.
937 */
938 thread_lock(t);
939
940 code = t->t_sysnum;
941 ap = lwp->lwp_ap;
942
943 thread_unlock(t);
944
945 if (code != 0 && code < NSYSCALL) {
946 nargs = LWP_GETSYSENT(lwp)[code].sy_narg;
947
948 ASSERT(nargs <= MAXSYSARGS);
949
950 *nargsp = nargs;
951 while (nargs-- > 0)
952 *argp++ = *ap++ & mask;
953 } else {
954 *nargsp = 0;
955 }
956
957 return (code);
958 }
959
960 #ifdef _SYSCALL32_IMPL
961 /*
962 * Get the arguments to the current 32-bit system call.
963 */
964 uint_t
965 get_syscall32_args(klwp_t *lwp, int *argp, int *nargsp)
966 {
967 long args[MAXSYSARGS];
968 uint_t i, code;
969
970 code = get_syscall_args(lwp, args, nargsp);
971
972 for (i = 0; i != *nargsp; i++)
973 *argp++ = (int)args[i];
974 return (code);
975 }
976 #endif
977
978 /*
979 * Save the system call arguments in a safe place.
980 *
981 * On the i386 kernel:
982 *
983 * Copy the users args prior to changing the stack or stack pointer.
984 * This is so /proc will be able to get a valid copy of the
985 * args from the user stack even after the user stack has been changed.
986 * Note that the kernel stack copy of the args may also have been
987 * changed by a system call handler which takes C-style arguments.
988 *
989 * Note that this may be called by stop() from trap(). In that case
990 * t_sysnum will be zero (syscall_exit clears it), so no args will be
991 * copied.
992 *
993 * On the amd64 kernel:
994 *
995 * For 64-bit applications, lwp->lwp_ap normally points to %rdi..%r9
996 * in the reg structure. If the user is going to change the argument
997 * registers, rax, or the stack and might want to get the args (for
998 * /proc tracing), it must copy the args elsewhere via save_syscall_args().
999 *
1000 * For 32-bit applications, lwp->lwp_ap normally points to a copy of
1001 * the system call arguments on the kernel stack made from the user
1002 * stack. Copy the args prior to change the stack or stack pointer.
1003 * This is so /proc will be able to get a valid copy of the args
1004 * from the user stack even after that stack has been changed.
1005 *
1006 * This may be called from stop() even when we're not in a system call.
1007 * Since there's no easy way to tell, this must be safe (not panic).
1008 * If the copyins get data faults, return non-zero.
1009 */
1010 int
1011 save_syscall_args()
1012 {
1013 kthread_t *t = curthread;
1014 klwp_t *lwp = ttolwp(t);
1015 uint_t code = t->t_sysnum;
1016 uint_t nargs;
1017
1018 if (lwp->lwp_argsaved || code == 0)
1019 return (0); /* args already saved or not needed */
1020
1021 if (code >= NSYSCALL) {
1022 nargs = 0; /* illegal syscall */
1023 } else {
1024 struct sysent *se = LWP_GETSYSENT(lwp);
1025 struct sysent *callp = se + code;
1026
1027 nargs = callp->sy_narg;
1028 if (LOADABLE_SYSCALL(callp) && nargs == 0) {
1029 krwlock_t *module_lock;
1030
1031 /*
1032 * Find out how many arguments the system
1033 * call uses.
1034 *
1035 * We have the property that loaded syscalls
1036 * never change the number of arguments they
1037 * use after they've been loaded once. This
1038 * allows us to stop for /proc tracing without
1039 * holding the module lock.
1040 * /proc is assured that sy_narg is valid.
1041 */
1042 module_lock = lock_syscall(se, code);
1043 nargs = callp->sy_narg;
1044 rw_exit(module_lock);
1045 }
1046 }
1047
1048 /*
1049 * Fetch the system call arguments.
1050 */
1051 if (nargs == 0)
1052 goto out;
1053
1054 ASSERT(nargs <= MAXSYSARGS);
1055
1056 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
1057 #if defined(_LP64)
1058 struct regs *rp = lwptoregs(lwp);
1059
1060 lwp->lwp_arg[0] = rp->r_rdi;
1061 lwp->lwp_arg[1] = rp->r_rsi;
1062 lwp->lwp_arg[2] = rp->r_rdx;
1063 lwp->lwp_arg[3] = rp->r_rcx;
1064 lwp->lwp_arg[4] = rp->r_r8;
1065 lwp->lwp_arg[5] = rp->r_r9;
1066 if (nargs > 6 && copyin_args(rp, &lwp->lwp_arg[6], nargs - 6))
1067 return (-1);
1068 } else {
1069 #endif
1070 if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_arg, nargs))
1071 return (-1);
1072 }
1073 out:
1074 lwp->lwp_ap = lwp->lwp_arg;
1075 lwp->lwp_argsaved = 1;
1076 t->t_post_sys = 1; /* so lwp_ap will be reset */
1077 return (0);
1078 }
1079
1080 void
1081 reset_syscall_args(void)
1082 {
1083 ttolwp(curthread)->lwp_argsaved = 0;
1084 }
1085
1086 /*
1087 * Call a system call which takes a pointer to the user args struct and
1088 * a pointer to the return values. This is a bit slower than the standard
1089 * C arg-passing method in some cases.
1090 */
1091 int64_t
1092 syscall_ap(void)
1093 {
1094 uint_t error;
1095 struct sysent *callp;
1096 rval_t rval;
1097 kthread_t *t = curthread;
1098 klwp_t *lwp = ttolwp(t);
1099 struct regs *rp = lwptoregs(lwp);
1100
1101 callp = LWP_GETSYSENT(lwp) + t->t_sysnum;
1102
1103 #if defined(__amd64)
1104 /*
1105 * If the arguments don't fit in registers %rdi-%r9, make sure they
1106 * have been copied to the lwp_arg array.
1107 */
1108 if (callp->sy_narg > 6 && save_syscall_args())
1109 return ((int64_t)set_errno(EFAULT));
1110 #endif
1111
1112 rval.r_val1 = 0;
1113 rval.r_val2 = rp->r_r1;
1114 lwp->lwp_error = 0; /* for old drivers */
1115 error = (*(callp->sy_call))(lwp->lwp_ap, &rval);
1116 if (error)
1117 return ((longlong_t)set_errno(error));
1118 return (rval.r_vals);
1119 }
1120
1121 /*
1122 * Load system call module.
1123 * Returns with pointer to held read lock for module.
1124 */
1125 static krwlock_t *
1126 lock_syscall(struct sysent *table, uint_t code)
1127 {
1128 krwlock_t *module_lock;
1129 struct modctl *modp;
1130 int id;
1131 struct sysent *callp;
1132
1133 callp = table + code;
1134 module_lock = callp->sy_lock;
1135
1136 /*
1137 * Optimization to only call modload if we don't have a loaded
1138 * syscall.
1139 */
1140 rw_enter(module_lock, RW_READER);
1141 if (LOADED_SYSCALL(callp))
1142 return (module_lock);
1143 rw_exit(module_lock);
1144
1145 for (;;) {
1146 if ((id = modload("sys", syscallnames[code])) == -1)
1147 break;
1148
1149 /*
1150 * If we loaded successfully at least once, the modctl
1151 * will still be valid, so we try to grab it by filename.
1152 * If this call fails, it's because the mod_filename
1153 * was changed after the call to modload() (mod_hold_by_name()
1154 * is the likely culprit). We can safely just take
1155 * another lap if this is the case; the modload() will
1156 * change the mod_filename back to one by which we can
1157 * find the modctl.
1158 */
1159 modp = mod_find_by_filename("sys", syscallnames[code]);
1160
1161 if (modp == NULL)
1162 continue;
1163
1164 mutex_enter(&mod_lock);
1165
1166 if (!modp->mod_installed) {
1167 mutex_exit(&mod_lock);
1168 continue;
1169 }
1170 break;
1171 }
1172 rw_enter(module_lock, RW_READER);
1173
1174 if (id != -1)
1175 mutex_exit(&mod_lock);
1176
1177 return (module_lock);
1178 }
1179
1180 /*
1181 * Loadable syscall support.
1182 * If needed, load the module, then reserve it by holding a read
1183 * lock for the duration of the call.
1184 * Later, if the syscall is not unloadable, it could patch the vector.
1185 */
1186 /*ARGSUSED*/
1187 int64_t
1188 loadable_syscall(
1189 long a0, long a1, long a2, long a3,
1190 long a4, long a5, long a6, long a7)
1191 {
1192 klwp_t *lwp = ttolwp(curthread);
1193 int64_t rval;
1194 struct sysent *callp;
1195 struct sysent *se = LWP_GETSYSENT(lwp);
1196 krwlock_t *module_lock;
1197 int code, error = 0;
1198
1199 code = curthread->t_sysnum;
1200 callp = se + code;
1201
1202 /*
1203 * Try to autoload the system call if necessary
1204 */
1205 module_lock = lock_syscall(se, code);
1206
1207 /*
1208 * we've locked either the loaded syscall or nosys
1209 */
1210
1211 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
1212 #if defined(_LP64)
1213 if (callp->sy_flags & SE_ARGC) {
1214 rval = (int64_t)(*callp->sy_call)(a0, a1, a2, a3,
1215 a4, a5);
1216 } else {
1217 rval = syscall_ap();
1218 }
1219 } else {
1220 #endif
1221 /*
1222 * Now that it's loaded, make sure enough args were copied.
1223 */
1224 if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_ap, callp->sy_narg))
1225 error = EFAULT;
1226 if (error) {
1227 rval = set_errno(error);
1228 } else if (callp->sy_flags & SE_ARGC) {
1229 rval = (int64_t)(*callp->sy_call)(lwp->lwp_ap[0],
1230 lwp->lwp_ap[1], lwp->lwp_ap[2], lwp->lwp_ap[3],
1231 lwp->lwp_ap[4], lwp->lwp_ap[5]);
1232 } else {
1233 rval = syscall_ap();
1234 }
1235 }
1236
1237 rw_exit(module_lock);
1238 return (rval);
1239 }
1240
1241 /*
1242 * Indirect syscall handled in libc on x86 architectures
1243 */
1244 int64_t
1245 indir()
1246 {
1247 return (nosys());
1248 }
1249
1250 /*
1251 * set_errno - set an error return from the current system call.
1252 * This could be a macro.
1253 * This returns the value it is passed, so that the caller can
1254 * use tail-recursion-elimination and do return (set_errno(ERRNO));
1255 */
1256 uint_t
1257 set_errno(uint_t error)
1258 {
1259 ASSERT(error != 0); /* must not be used to clear errno */
1260
1261 curthread->t_post_sys = 1; /* have post_syscall do error return */
1262 return (ttolwp(curthread)->lwp_errno = error);
1263 }
1264
1265 /*
1266 * set_proc_pre_sys - Set pre-syscall processing for entire process.
1267 */
1268 void
1269 set_proc_pre_sys(proc_t *p)
1270 {
1271 kthread_t *t;
1272 kthread_t *first;
1273
1274 ASSERT(MUTEX_HELD(&p->p_lock));
1275
1276 t = first = p->p_tlist;
1277 do {
1278 t->t_pre_sys = 1;
1279 } while ((t = t->t_forw) != first);
1280 }
1281
1282 /*
1283 * set_proc_post_sys - Set post-syscall processing for entire process.
1284 */
1285 void
1286 set_proc_post_sys(proc_t *p)
1287 {
1288 kthread_t *t;
1289 kthread_t *first;
1290
1291 ASSERT(MUTEX_HELD(&p->p_lock));
1292
1293 t = first = p->p_tlist;
1294 do {
1295 t->t_post_sys = 1;
1296 } while ((t = t->t_forw) != first);
1297 }
1298
1299 /*
1300 * set_proc_sys - Set pre- and post-syscall processing for entire process.
1301 */
1302 void
1303 set_proc_sys(proc_t *p)
1304 {
1305 kthread_t *t;
1306 kthread_t *first;
1307
1308 ASSERT(MUTEX_HELD(&p->p_lock));
1309
1310 t = first = p->p_tlist;
1311 do {
1312 t->t_pre_sys = 1;
1313 t->t_post_sys = 1;
1314 } while ((t = t->t_forw) != first);
1315 }
1316
1317 /*
1318 * set_all_proc_sys - set pre- and post-syscall processing flags for all
1319 * user processes.
1320 *
1321 * This is needed when auditing, tracing, or other facilities which affect
1322 * all processes are turned on.
1323 */
1324 void
1325 set_all_proc_sys()
1326 {
1327 kthread_t *t;
1328 kthread_t *first;
1329
1330 mutex_enter(&pidlock);
1331 t = first = curthread;
1332 do {
1333 t->t_pre_sys = 1;
1334 t->t_post_sys = 1;
1335 } while ((t = t->t_next) != first);
1336 mutex_exit(&pidlock);
1337 }
1338
1339 /*
1340 * set_all_zone_usr_proc_sys - set pre- and post-syscall processing flags for
1341 * all user processes running in the zone of the current process
1342 *
1343 * This is needed when auditing, tracing, or other facilities which affect
1344 * all processes are turned on.
1345 */
1346 void
1347 set_all_zone_usr_proc_sys(zoneid_t zoneid)
1348 {
1349 proc_t *p;
1350 kthread_t *t;
1351
1352 mutex_enter(&pidlock);
1353 for (p = practive; p != NULL; p = p->p_next) {
1354 /* skip kernel and incomplete processes */
1355 if (p->p_exec == NULLVP || p->p_as == &kas ||
1356 p->p_stat == SIDL || p->p_stat == SZOMB ||
1357 (p->p_flag & (SSYS | SEXITING | SEXITLWPS)))
1358 continue;
1359 /*
1360 * Only processes in the given zone (eventually in
1361 * all zones) are taken into account
1362 */
1363 if (zoneid == ALL_ZONES || p->p_zone->zone_id == zoneid) {
1364 mutex_enter(&p->p_lock);
1365 if ((t = p->p_tlist) == NULL) {
1366 mutex_exit(&p->p_lock);
1367 continue;
1368 }
1369 /*
1370 * Set pre- and post-syscall processing flags
1371 * for all threads of the process
1372 */
1373 do {
1374 t->t_pre_sys = 1;
1375 t->t_post_sys = 1;
1376 } while (p->p_tlist != (t = t->t_forw));
1377 mutex_exit(&p->p_lock);
1378 }
1379 }
1380 mutex_exit(&pidlock);
1381 }
1382
1383 /*
1384 * set_proc_ast - Set asynchronous service trap (AST) flag for all
1385 * threads in process.
1386 */
1387 void
1388 set_proc_ast(proc_t *p)
1389 {
1390 kthread_t *t;
1391 kthread_t *first;
1392
1393 ASSERT(MUTEX_HELD(&p->p_lock));
1394
1395 t = first = p->p_tlist;
1396 do {
1397 aston(t);
1398 } while ((t = t->t_forw) != first);
1399 }