Print this page
5798 fexecve() needed per POSIX 2008
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/exec.c
+++ new/usr/src/uts/common/os/exec.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 + * Copyright 2015 Garrett D'Amore <garrett@damore.org>
23 24 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24 25 */
25 26
26 27 /* Copyright (c) 1988 AT&T */
27 28 /* All Rights Reserved */
28 29 /*
29 30 * Copyright 2014, Joyent, Inc. All rights reserved.
30 31 */
31 32
32 33 #include <sys/types.h>
33 34 #include <sys/param.h>
34 35 #include <sys/sysmacros.h>
35 36 #include <sys/systm.h>
36 37 #include <sys/signal.h>
37 38 #include <sys/cred_impl.h>
38 39 #include <sys/policy.h>
39 40 #include <sys/user.h>
40 41 #include <sys/errno.h>
41 42 #include <sys/file.h>
42 43 #include <sys/vfs.h>
43 44 #include <sys/vnode.h>
44 45 #include <sys/mman.h>
45 46 #include <sys/acct.h>
46 47 #include <sys/cpuvar.h>
47 48 #include <sys/proc.h>
48 49 #include <sys/cmn_err.h>
49 50 #include <sys/debug.h>
50 51 #include <sys/pathname.h>
51 52 #include <sys/vm.h>
52 53 #include <sys/lgrp.h>
53 54 #include <sys/vtrace.h>
54 55 #include <sys/exec.h>
55 56 #include <sys/exechdr.h>
56 57 #include <sys/kmem.h>
57 58 #include <sys/prsystm.h>
58 59 #include <sys/modctl.h>
59 60 #include <sys/vmparam.h>
60 61 #include <sys/door.h>
61 62 #include <sys/schedctl.h>
62 63 #include <sys/utrap.h>
63 64 #include <sys/systeminfo.h>
64 65 #include <sys/stack.h>
65 66 #include <sys/rctl.h>
66 67 #include <sys/dtrace.h>
67 68 #include <sys/lwpchan_impl.h>
68 69 #include <sys/pool.h>
69 70 #include <sys/sdt.h>
70 71 #include <sys/brand.h>
71 72 #include <sys/klpd.h>
72 73
73 74 #include <c2/audit.h>
74 75
75 76 #include <vm/hat.h>
76 77 #include <vm/anon.h>
77 78 #include <vm/as.h>
78 79 #include <vm/seg.h>
79 80 #include <vm/seg_vn.h>
80 81
81 82 #define PRIV_RESET 0x01 /* needs to reset privs */
82 83 #define PRIV_SETID 0x02 /* needs to change uids */
83 84 #define PRIV_SETUGID 0x04 /* is setuid/setgid/forced privs */
84 85 #define PRIV_INCREASE 0x08 /* child runs with more privs */
85 86 #define MAC_FLAGS 0x10 /* need to adjust MAC flags */
86 87 #define PRIV_FORCED 0x20 /* has forced privileges */
87 88
88 89 static int execsetid(struct vnode *, struct vattr *, uid_t *, uid_t *,
89 90 priv_set_t *, cred_t *, const char *);
90 91 static int hold_execsw(struct execsw *);
↓ open down ↓ |
58 lines elided |
↑ open up ↑ |
91 92
92 93 uint_t auxv_hwcap = 0; /* auxv AT_SUN_HWCAP value; determined on the fly */
93 94 uint_t auxv_hwcap_2 = 0; /* AT_SUN_HWCAP2 */
94 95 #if defined(_SYSCALL32_IMPL)
95 96 uint_t auxv_hwcap32 = 0; /* 32-bit version of auxv_hwcap */
96 97 uint_t auxv_hwcap32_2 = 0; /* 32-bit version of auxv_hwcap2 */
97 98 #endif
98 99
99 100 #define PSUIDFLAGS (SNOCD|SUGID)
100 101
102 +#define DEVFD "/dev/fd/"
103 +
101 104 /*
102 105 * exece() - system call wrapper around exec_common()
103 106 */
104 107 int
105 108 exece(const char *fname, const char **argp, const char **envp)
106 109 {
107 110 int error;
108 111
109 112 error = exec_common(fname, argp, envp, EBA_NONE);
110 113 return (error ? (set_errno(error)) : 0);
111 114 }
112 115
113 116 int
114 117 exec_common(const char *fname, const char **argp, const char **envp,
115 118 int brand_action)
116 119 {
117 120 vnode_t *vp = NULL, *dir = NULL, *tmpvp = NULL;
118 121 proc_t *p = ttoproc(curthread);
119 122 klwp_t *lwp = ttolwp(curthread);
120 123 struct user *up = PTOU(p);
121 124 long execsz; /* temporary count of exec size */
122 125 int i;
123 126 int error;
124 127 char exec_file[MAXCOMLEN+1];
125 128 struct pathname pn;
126 129 struct pathname resolvepn;
127 130 struct uarg args;
128 131 struct execa ua;
129 132 k_sigset_t savedmask;
130 133 lwpdir_t *lwpdir = NULL;
131 134 tidhash_t *tidhash;
132 135 lwpdir_t *old_lwpdir = NULL;
133 136 uint_t old_lwpdir_sz;
134 137 tidhash_t *old_tidhash;
135 138 uint_t old_tidhash_sz;
136 139 ret_tidhash_t *ret_tidhash;
137 140 lwpent_t *lep;
138 141 boolean_t brandme = B_FALSE;
139 142
140 143 /*
141 144 * exec() is not supported for the /proc agent lwp.
142 145 */
143 146 if (curthread == p->p_agenttp)
144 147 return (ENOTSUP);
145 148
146 149 if (brand_action != EBA_NONE) {
147 150 /*
148 151 * Brand actions are not supported for processes that are not
149 152 * running in a branded zone.
150 153 */
151 154 if (!ZONE_IS_BRANDED(p->p_zone))
152 155 return (ENOTSUP);
153 156
154 157 if (brand_action == EBA_NATIVE) {
155 158 /* Only branded processes can be unbranded */
156 159 if (!PROC_IS_BRANDED(p))
157 160 return (ENOTSUP);
158 161 } else {
159 162 /* Only unbranded processes can be branded */
160 163 if (PROC_IS_BRANDED(p))
161 164 return (ENOTSUP);
162 165 brandme = B_TRUE;
163 166 }
164 167 } else {
165 168 /*
166 169 * If this is a native zone, or if the process is already
167 170 * branded, then we don't need to do anything. If this is
168 171 * a native process in a branded zone, we need to brand the
169 172 * process as it exec()s the new binary.
170 173 */
171 174 if (ZONE_IS_BRANDED(p->p_zone) && !PROC_IS_BRANDED(p))
172 175 brandme = B_TRUE;
173 176 }
174 177
175 178 /*
176 179 * Inform /proc that an exec() has started.
177 180 * Hold signals that are ignored by default so that we will
178 181 * not be interrupted by a signal that will be ignored after
179 182 * successful completion of gexec().
180 183 */
181 184 mutex_enter(&p->p_lock);
182 185 prexecstart();
183 186 schedctl_finish_sigblock(curthread);
184 187 savedmask = curthread->t_hold;
185 188 sigorset(&curthread->t_hold, &ignoredefault);
186 189 mutex_exit(&p->p_lock);
187 190
188 191 /*
189 192 * Look up path name and remember last component for later.
190 193 * To help coreadm expand its %d token, we attempt to save
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
191 194 * the directory containing the executable in p_execdir. The
192 195 * first call to lookuppn() may fail and return EINVAL because
193 196 * dirvpp is non-NULL. In that case, we make a second call to
194 197 * lookuppn() with dirvpp set to NULL; p_execdir will be NULL,
195 198 * but coreadm is allowed to expand %d to the empty string and
196 199 * there are other cases in which that failure may occur.
197 200 */
198 201 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
199 202 goto out;
200 203 pn_alloc(&resolvepn);
201 - if ((error = lookuppn(&pn, &resolvepn, FOLLOW, &dir, &vp)) != 0) {
204 +
205 + if (strncmp(pn.pn_path, DEVFD, strlen(DEVFD)) == 0) {
206 + /* looks like a /dev/fd node */
207 + char *p = pn.pn_path + strlen(DEVFD);
208 + int fd = stoi(&p);
209 + if ((fd < 0) || (*p != 0) || (p == pn.pn_path)) {
210 + error = EBADF;
211 + goto out;
212 + }
213 + if ((error = fgetstartvp(fd, NULL, &vp)) != 0) {
214 + goto out; /* error will be EBADF */
215 + }
216 + (void) pn_set(&resolvepn, pn.pn_path);
217 +
218 + } else if ((error =
219 + lookuppn(&pn, &resolvepn, FOLLOW, &dir, &vp)) != 0) {
202 220 pn_free(&resolvepn);
203 221 pn_free(&pn);
204 222 if (error != EINVAL)
205 223 goto out;
206 224
207 225 dir = NULL;
208 226 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
209 227 goto out;
210 228 pn_alloc(&resolvepn);
211 229 if ((error = lookuppn(&pn, &resolvepn, FOLLOW, NULLVPP,
212 230 &vp)) != 0) {
213 231 pn_free(&resolvepn);
214 232 pn_free(&pn);
215 233 goto out;
216 234 }
217 235 }
218 236 if (vp == NULL) {
219 237 if (dir != NULL)
220 238 VN_RELE(dir);
221 239 error = ENOENT;
222 240 pn_free(&resolvepn);
223 241 pn_free(&pn);
224 242 goto out;
225 243 }
226 244
227 245 if ((error = secpolicy_basic_exec(CRED(), vp)) != 0) {
228 246 if (dir != NULL)
229 247 VN_RELE(dir);
230 248 pn_free(&resolvepn);
231 249 pn_free(&pn);
232 250 VN_RELE(vp);
233 251 goto out;
234 252 }
235 253
236 254 /*
237 255 * We do not allow executing files in attribute directories.
238 256 * We test this by determining whether the resolved path
239 257 * contains a "/" when we're in an attribute directory;
240 258 * only if the pathname does not contain a "/" the resolved path
241 259 * points to a file in the current working (attribute) directory.
242 260 */
243 261 if ((p->p_user.u_cdir->v_flag & V_XATTRDIR) != 0 &&
244 262 strchr(resolvepn.pn_path, '/') == NULL) {
245 263 if (dir != NULL)
246 264 VN_RELE(dir);
247 265 error = EACCES;
248 266 pn_free(&resolvepn);
249 267 pn_free(&pn);
250 268 VN_RELE(vp);
251 269 goto out;
252 270 }
253 271
254 272 bzero(exec_file, MAXCOMLEN+1);
255 273 (void) strncpy(exec_file, pn.pn_path, MAXCOMLEN);
256 274 bzero(&args, sizeof (args));
257 275 args.pathname = resolvepn.pn_path;
258 276 /* don't free resolvepn until we are done with args */
259 277 pn_free(&pn);
260 278
261 279 /*
262 280 * If we're running in a profile shell, then call pfexecd.
263 281 */
264 282 if ((CR_FLAGS(p->p_cred) & PRIV_PFEXEC) != 0) {
265 283 error = pfexec_call(p->p_cred, &resolvepn, &args.pfcred,
266 284 &args.scrubenv);
267 285
268 286 /* Returning errno in case we're not allowed to execute. */
269 287 if (error > 0) {
270 288 if (dir != NULL)
271 289 VN_RELE(dir);
272 290 pn_free(&resolvepn);
273 291 VN_RELE(vp);
274 292 goto out;
275 293 }
276 294
277 295 /* Don't change the credentials when using old ptrace. */
278 296 if (args.pfcred != NULL &&
279 297 (p->p_proc_flag & P_PR_PTRACE) != 0) {
280 298 crfree(args.pfcred);
281 299 args.pfcred = NULL;
282 300 args.scrubenv = B_FALSE;
283 301 }
284 302 }
285 303
286 304 /*
287 305 * Specific exec handlers, or policies determined via
288 306 * /etc/system may override the historical default.
289 307 */
290 308 args.stk_prot = PROT_ZFOD;
291 309 args.dat_prot = PROT_ZFOD;
292 310
293 311 CPU_STATS_ADD_K(sys, sysexec, 1);
294 312 DTRACE_PROC1(exec, char *, args.pathname);
295 313
296 314 ua.fname = fname;
297 315 ua.argp = argp;
298 316 ua.envp = envp;
299 317
300 318 /* If necessary, brand this process before we start the exec. */
301 319 if (brandme)
302 320 brand_setbrand(p);
303 321
304 322 if ((error = gexec(&vp, &ua, &args, NULL, 0, &execsz,
305 323 exec_file, p->p_cred, brand_action)) != 0) {
306 324 if (brandme)
307 325 brand_clearbrand(p, B_FALSE);
308 326 VN_RELE(vp);
309 327 if (dir != NULL)
310 328 VN_RELE(dir);
311 329 pn_free(&resolvepn);
312 330 goto fail;
313 331 }
314 332
315 333 /*
316 334 * Free floating point registers (sun4u only)
317 335 */
318 336 ASSERT(lwp != NULL);
319 337 lwp_freeregs(lwp, 1);
320 338
321 339 /*
322 340 * Free thread and process context ops.
323 341 */
324 342 if (curthread->t_ctx)
325 343 freectx(curthread, 1);
326 344 if (p->p_pctx)
327 345 freepctx(p, 1);
328 346
329 347 /*
330 348 * Remember file name for accounting; clear any cached DTrace predicate.
331 349 */
332 350 up->u_acflag &= ~AFORK;
333 351 bcopy(exec_file, up->u_comm, MAXCOMLEN+1);
334 352 curthread->t_predcache = NULL;
335 353
336 354 /*
337 355 * Clear contract template state
338 356 */
339 357 lwp_ctmpl_clear(lwp);
340 358
341 359 /*
342 360 * Save the directory in which we found the executable for expanding
343 361 * the %d token used in core file patterns.
344 362 */
345 363 mutex_enter(&p->p_lock);
346 364 tmpvp = p->p_execdir;
347 365 p->p_execdir = dir;
348 366 if (p->p_execdir != NULL)
349 367 VN_HOLD(p->p_execdir);
350 368 mutex_exit(&p->p_lock);
351 369
352 370 if (tmpvp != NULL)
353 371 VN_RELE(tmpvp);
354 372
355 373 /*
356 374 * Reset stack state to the user stack, clear set of signals
357 375 * caught on the signal stack, and reset list of signals that
358 376 * restart system calls; the new program's environment should
359 377 * not be affected by detritus from the old program. Any
360 378 * pending held signals remain held, so don't clear t_hold.
361 379 */
362 380 mutex_enter(&p->p_lock);
363 381 lwp->lwp_oldcontext = 0;
364 382 lwp->lwp_ustack = 0;
365 383 lwp->lwp_old_stk_ctl = 0;
366 384 sigemptyset(&up->u_signodefer);
367 385 sigemptyset(&up->u_sigonstack);
368 386 sigemptyset(&up->u_sigresethand);
369 387 lwp->lwp_sigaltstack.ss_sp = 0;
370 388 lwp->lwp_sigaltstack.ss_size = 0;
371 389 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
372 390
373 391 /*
374 392 * Make saved resource limit == current resource limit.
375 393 */
376 394 for (i = 0; i < RLIM_NLIMITS; i++) {
377 395 /*CONSTCOND*/
378 396 if (RLIM_SAVED(i)) {
379 397 (void) rctl_rlimit_get(rctlproc_legacy[i], p,
380 398 &up->u_saved_rlimit[i]);
381 399 }
382 400 }
383 401
384 402 /*
385 403 * If the action was to catch the signal, then the action
386 404 * must be reset to SIG_DFL.
387 405 */
388 406 sigdefault(p);
389 407 p->p_flag &= ~(SNOWAIT|SJCTL);
390 408 p->p_flag |= (SEXECED|SMSACCT|SMSFORK);
391 409 up->u_signal[SIGCLD - 1] = SIG_DFL;
392 410
393 411 /*
394 412 * Delete the dot4 sigqueues/signotifies.
395 413 */
396 414 sigqfree(p);
397 415
398 416 mutex_exit(&p->p_lock);
399 417
400 418 mutex_enter(&p->p_pflock);
401 419 p->p_prof.pr_base = NULL;
402 420 p->p_prof.pr_size = 0;
403 421 p->p_prof.pr_off = 0;
404 422 p->p_prof.pr_scale = 0;
405 423 p->p_prof.pr_samples = 0;
406 424 mutex_exit(&p->p_pflock);
407 425
408 426 ASSERT(curthread->t_schedctl == NULL);
409 427
410 428 #if defined(__sparc)
411 429 if (p->p_utraps != NULL)
412 430 utrap_free(p);
413 431 #endif /* __sparc */
414 432
415 433 /*
416 434 * Close all close-on-exec files.
417 435 */
418 436 close_exec(P_FINFO(p));
419 437 TRACE_2(TR_FAC_PROC, TR_PROC_EXEC, "proc_exec:p %p up %p", p, up);
420 438
421 439 /* Unbrand ourself if necessary. */
422 440 if (PROC_IS_BRANDED(p) && (brand_action == EBA_NATIVE))
423 441 brand_clearbrand(p, B_FALSE);
424 442
425 443 setregs(&args);
426 444
427 445 /* Mark this as an executable vnode */
428 446 mutex_enter(&vp->v_lock);
429 447 vp->v_flag |= VVMEXEC;
430 448 mutex_exit(&vp->v_lock);
431 449
432 450 VN_RELE(vp);
433 451 if (dir != NULL)
434 452 VN_RELE(dir);
435 453 pn_free(&resolvepn);
436 454
437 455 /*
438 456 * Allocate a new lwp directory and lwpid hash table if necessary.
439 457 */
440 458 if (curthread->t_tid != 1 || p->p_lwpdir_sz != 2) {
441 459 lwpdir = kmem_zalloc(2 * sizeof (lwpdir_t), KM_SLEEP);
442 460 lwpdir->ld_next = lwpdir + 1;
443 461 tidhash = kmem_zalloc(2 * sizeof (tidhash_t), KM_SLEEP);
444 462 if (p->p_lwpdir != NULL)
445 463 lep = p->p_lwpdir[curthread->t_dslot].ld_entry;
446 464 else
447 465 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP);
448 466 }
449 467
450 468 if (PROC_IS_BRANDED(p))
451 469 BROP(p)->b_exec();
452 470
453 471 mutex_enter(&p->p_lock);
454 472 prbarrier(p);
455 473
456 474 /*
457 475 * Reset lwp id to the default value of 1.
458 476 * This is a single-threaded process now
459 477 * and lwp #1 is lwp_wait()able by default.
460 478 * The t_unpark flag should not be inherited.
461 479 */
462 480 ASSERT(p->p_lwpcnt == 1 && p->p_zombcnt == 0);
463 481 curthread->t_tid = 1;
464 482 kpreempt_disable();
465 483 ASSERT(curthread->t_lpl != NULL);
466 484 p->p_t1_lgrpid = curthread->t_lpl->lpl_lgrpid;
467 485 kpreempt_enable();
468 486 if (p->p_tr_lgrpid != LGRP_NONE && p->p_tr_lgrpid != p->p_t1_lgrpid) {
469 487 lgrp_update_trthr_migrations(1);
470 488 }
471 489 curthread->t_unpark = 0;
472 490 curthread->t_proc_flag |= TP_TWAIT;
473 491 curthread->t_proc_flag &= ~TP_DAEMON; /* daemons shouldn't exec */
474 492 p->p_lwpdaemon = 0; /* but oh well ... */
475 493 p->p_lwpid = 1;
476 494
477 495 /*
478 496 * Install the newly-allocated lwp directory and lwpid hash table
479 497 * and insert the current thread into the new hash table.
480 498 */
481 499 if (lwpdir != NULL) {
482 500 old_lwpdir = p->p_lwpdir;
483 501 old_lwpdir_sz = p->p_lwpdir_sz;
484 502 old_tidhash = p->p_tidhash;
485 503 old_tidhash_sz = p->p_tidhash_sz;
486 504 p->p_lwpdir = p->p_lwpfree = lwpdir;
487 505 p->p_lwpdir_sz = 2;
488 506 lep->le_thread = curthread;
489 507 lep->le_lwpid = curthread->t_tid;
490 508 lep->le_start = curthread->t_start;
491 509 lwp_hash_in(p, lep, tidhash, 2, 0);
492 510 p->p_tidhash = tidhash;
493 511 p->p_tidhash_sz = 2;
494 512 }
495 513 ret_tidhash = p->p_ret_tidhash;
496 514 p->p_ret_tidhash = NULL;
497 515
498 516 /*
499 517 * Restore the saved signal mask and
500 518 * inform /proc that the exec() has finished.
501 519 */
502 520 curthread->t_hold = savedmask;
503 521 prexecend();
504 522 mutex_exit(&p->p_lock);
505 523 if (old_lwpdir) {
506 524 kmem_free(old_lwpdir, old_lwpdir_sz * sizeof (lwpdir_t));
507 525 kmem_free(old_tidhash, old_tidhash_sz * sizeof (tidhash_t));
508 526 }
509 527 while (ret_tidhash != NULL) {
510 528 ret_tidhash_t *next = ret_tidhash->rth_next;
511 529 kmem_free(ret_tidhash->rth_tidhash,
512 530 ret_tidhash->rth_tidhash_sz * sizeof (tidhash_t));
513 531 kmem_free(ret_tidhash, sizeof (*ret_tidhash));
514 532 ret_tidhash = next;
515 533 }
516 534
517 535 ASSERT(error == 0);
518 536 DTRACE_PROC(exec__success);
519 537 return (0);
520 538
521 539 fail:
522 540 DTRACE_PROC1(exec__failure, int, error);
523 541 out: /* error return */
524 542 mutex_enter(&p->p_lock);
525 543 curthread->t_hold = savedmask;
526 544 prexecend();
527 545 mutex_exit(&p->p_lock);
528 546 ASSERT(error != 0);
529 547 return (error);
530 548 }
531 549
532 550
533 551 /*
534 552 * Perform generic exec duties and switchout to object-file specific
535 553 * handler.
536 554 */
537 555 int
538 556 gexec(
539 557 struct vnode **vpp,
540 558 struct execa *uap,
541 559 struct uarg *args,
542 560 struct intpdata *idatap,
543 561 int level,
544 562 long *execsz,
545 563 caddr_t exec_file,
546 564 struct cred *cred,
547 565 int brand_action)
548 566 {
549 567 struct vnode *vp, *execvp = NULL;
550 568 proc_t *pp = ttoproc(curthread);
551 569 struct execsw *eswp;
552 570 int error = 0;
553 571 int suidflags = 0;
554 572 ssize_t resid;
555 573 uid_t uid, gid;
556 574 struct vattr vattr;
557 575 char magbuf[MAGIC_BYTES];
558 576 int setid;
559 577 cred_t *oldcred, *newcred = NULL;
560 578 int privflags = 0;
561 579 int setidfl;
562 580 priv_set_t fset;
563 581
564 582 /*
565 583 * If the SNOCD or SUGID flag is set, turn it off and remember the
566 584 * previous setting so we can restore it if we encounter an error.
567 585 */
568 586 if (level == 0 && (pp->p_flag & PSUIDFLAGS)) {
569 587 mutex_enter(&pp->p_lock);
570 588 suidflags = pp->p_flag & PSUIDFLAGS;
571 589 pp->p_flag &= ~PSUIDFLAGS;
572 590 mutex_exit(&pp->p_lock);
573 591 }
574 592
575 593 if ((error = execpermissions(*vpp, &vattr, args)) != 0)
576 594 goto bad_noclose;
577 595
578 596 /* need to open vnode for stateful file systems */
579 597 if ((error = VOP_OPEN(vpp, FREAD, CRED(), NULL)) != 0)
580 598 goto bad_noclose;
581 599 vp = *vpp;
582 600
583 601 /*
584 602 * Note: to support binary compatibility with SunOS a.out
585 603 * executables, we read in the first four bytes, as the
586 604 * magic number is in bytes 2-3.
587 605 */
588 606 if (error = vn_rdwr(UIO_READ, vp, magbuf, sizeof (magbuf),
589 607 (offset_t)0, UIO_SYSSPACE, 0, (rlim64_t)0, CRED(), &resid))
590 608 goto bad;
591 609 if (resid != 0)
592 610 goto bad;
593 611
594 612 if ((eswp = findexec_by_hdr(magbuf)) == NULL)
595 613 goto bad;
596 614
597 615 if (level == 0 &&
598 616 (privflags = execsetid(vp, &vattr, &uid, &gid, &fset,
599 617 args->pfcred == NULL ? cred : args->pfcred, args->pathname)) != 0) {
600 618
601 619 /* Pfcred is a credential with a ref count of 1 */
602 620
603 621 if (args->pfcred != NULL) {
604 622 privflags |= PRIV_INCREASE|PRIV_RESET;
605 623 newcred = cred = args->pfcred;
606 624 } else {
607 625 newcred = cred = crdup(cred);
608 626 }
609 627
610 628 /* If we can, drop the PA bit */
611 629 if ((privflags & PRIV_RESET) != 0)
612 630 priv_adjust_PA(cred);
613 631
614 632 if (privflags & PRIV_SETID) {
615 633 cred->cr_uid = uid;
616 634 cred->cr_gid = gid;
617 635 cred->cr_suid = uid;
618 636 cred->cr_sgid = gid;
619 637 }
620 638
621 639 if (privflags & MAC_FLAGS) {
622 640 if (!(CR_FLAGS(cred) & NET_MAC_AWARE_INHERIT))
623 641 CR_FLAGS(cred) &= ~NET_MAC_AWARE;
624 642 CR_FLAGS(cred) &= ~NET_MAC_AWARE_INHERIT;
625 643 }
626 644
627 645 /*
628 646 * Implement the privilege updates:
629 647 *
630 648 * Restrict with L:
631 649 *
632 650 * I' = I & L
633 651 *
634 652 * E' = P' = (I' + F) & A
635 653 *
636 654 * But if running under ptrace, we cap I and F with P.
637 655 */
638 656 if ((privflags & (PRIV_RESET|PRIV_FORCED)) != 0) {
639 657 if ((privflags & PRIV_INCREASE) != 0 &&
640 658 (pp->p_proc_flag & P_PR_PTRACE) != 0) {
641 659 priv_intersect(&CR_OPPRIV(cred),
642 660 &CR_IPRIV(cred));
643 661 priv_intersect(&CR_OPPRIV(cred), &fset);
644 662 }
645 663 priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
646 664 CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
647 665 if (privflags & PRIV_FORCED) {
648 666 priv_set_PA(cred);
649 667 priv_union(&fset, &CR_EPRIV(cred));
650 668 priv_union(&fset, &CR_PPRIV(cred));
651 669 }
652 670 priv_adjust_PA(cred);
653 671 }
654 672 } else if (level == 0 && args->pfcred != NULL) {
655 673 newcred = cred = args->pfcred;
656 674 privflags |= PRIV_INCREASE;
657 675 /* pfcred is not forced to adhere to these settings */
658 676 priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
659 677 CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
660 678 priv_adjust_PA(cred);
661 679 }
662 680
663 681 /* SunOS 4.x buy-back */
664 682 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) &&
665 683 (vattr.va_mode & (VSUID|VSGID))) {
666 684 char path[MAXNAMELEN];
667 685 refstr_t *mntpt = NULL;
668 686 int ret = -1;
669 687
670 688 bzero(path, sizeof (path));
671 689 zone_hold(pp->p_zone);
672 690
673 691 ret = vnodetopath(pp->p_zone->zone_rootvp, vp, path,
674 692 sizeof (path), cred);
675 693
676 694 /* fallback to mountpoint if a path can't be found */
677 695 if ((ret != 0) || (ret == 0 && path[0] == '\0'))
678 696 mntpt = vfs_getmntpoint(vp->v_vfsp);
679 697
680 698 if (mntpt == NULL)
681 699 zcmn_err(pp->p_zone->zone_id, CE_NOTE,
682 700 "!uid %d: setuid execution not allowed, "
683 701 "file=%s", cred->cr_uid, path);
684 702 else
685 703 zcmn_err(pp->p_zone->zone_id, CE_NOTE,
686 704 "!uid %d: setuid execution not allowed, "
687 705 "fs=%s, file=%s", cred->cr_uid,
688 706 ZONE_PATH_TRANSLATE(refstr_value(mntpt),
689 707 pp->p_zone), exec_file);
690 708
691 709 if (!INGLOBALZONE(pp)) {
692 710 /* zone_rootpath always has trailing / */
693 711 if (mntpt == NULL)
694 712 cmn_err(CE_NOTE, "!zone: %s, uid: %d "
695 713 "setuid execution not allowed, file=%s%s",
696 714 pp->p_zone->zone_name, cred->cr_uid,
697 715 pp->p_zone->zone_rootpath, path + 1);
698 716 else
699 717 cmn_err(CE_NOTE, "!zone: %s, uid: %d "
700 718 "setuid execution not allowed, fs=%s, "
701 719 "file=%s", pp->p_zone->zone_name,
702 720 cred->cr_uid, refstr_value(mntpt),
703 721 exec_file);
704 722 }
705 723
706 724 if (mntpt != NULL)
707 725 refstr_rele(mntpt);
708 726
709 727 zone_rele(pp->p_zone);
710 728 }
711 729
712 730 /*
713 731 * execsetid() told us whether or not we had to change the
714 732 * credentials of the process. In privflags, it told us
715 733 * whether we gained any privileges or executed a set-uid executable.
716 734 */
717 735 setid = (privflags & (PRIV_SETUGID|PRIV_INCREASE|PRIV_FORCED));
718 736
719 737 /*
720 738 * Use /etc/system variable to determine if the stack
721 739 * should be marked as executable by default.
722 740 */
723 741 if (noexec_user_stack)
724 742 args->stk_prot &= ~PROT_EXEC;
725 743
726 744 args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */
727 745 args->ex_vp = vp;
728 746
729 747 /*
730 748 * Traditionally, the setid flags told the sub processes whether
731 749 * the file just executed was set-uid or set-gid; this caused
732 750 * some confusion as the 'setid' flag did not match the SUGID
733 751 * process flag which is only set when the uids/gids do not match.
734 752 * A script set-gid/set-uid to the real uid/gid would start with
735 753 * /dev/fd/X but an executable would happily trust LD_LIBRARY_PATH.
736 754 * Now we flag those cases where the calling process cannot
737 755 * be trusted to influence the newly exec'ed process, either
738 756 * because it runs with more privileges or when the uids/gids
739 757 * do in fact not match.
740 758 * This also makes the runtime linker agree with the on exec
741 759 * values of SNOCD and SUGID.
742 760 */
743 761 setidfl = 0;
744 762 if (cred->cr_uid != cred->cr_ruid || (cred->cr_rgid != cred->cr_gid &&
745 763 !supgroupmember(cred->cr_gid, cred))) {
746 764 setidfl |= EXECSETID_UGIDS;
747 765 }
748 766 if (setid & PRIV_SETUGID)
749 767 setidfl |= EXECSETID_SETID;
750 768 if (setid & PRIV_FORCED)
751 769 setidfl |= EXECSETID_PRIVS;
752 770
753 771 execvp = pp->p_exec;
754 772 if (execvp)
755 773 VN_HOLD(execvp);
756 774
757 775 error = (*eswp->exec_func)(vp, uap, args, idatap, level, execsz,
758 776 setidfl, exec_file, cred, brand_action);
759 777 rw_exit(eswp->exec_lock);
760 778 if (error != 0) {
761 779 if (execvp)
762 780 VN_RELE(execvp);
763 781 /*
764 782 * If this process's p_exec has been set to the vp of
765 783 * the executable by exec_func, we will return without
766 784 * calling VOP_CLOSE because proc_exit will close it
767 785 * on exit.
768 786 */
769 787 if (pp->p_exec == vp)
770 788 goto bad_noclose;
771 789 else
772 790 goto bad;
773 791 }
774 792
775 793 if (level == 0) {
776 794 uid_t oruid;
777 795
778 796 if (execvp != NULL) {
779 797 /*
780 798 * Close the previous executable only if we are
781 799 * at level 0.
782 800 */
783 801 (void) VOP_CLOSE(execvp, FREAD, 1, (offset_t)0,
784 802 cred, NULL);
785 803 }
786 804
787 805 mutex_enter(&pp->p_crlock);
788 806
789 807 oruid = pp->p_cred->cr_ruid;
790 808
791 809 if (newcred != NULL) {
792 810 /*
793 811 * Free the old credentials, and set the new ones.
794 812 * Do this for both the process and the (single) thread.
795 813 */
796 814 crfree(pp->p_cred);
797 815 pp->p_cred = cred; /* cred already held for proc */
798 816 crhold(cred); /* hold new cred for thread */
799 817 /*
800 818 * DTrace accesses t_cred in probe context. t_cred
801 819 * must always be either NULL, or point to a valid,
802 820 * allocated cred structure.
803 821 */
804 822 oldcred = curthread->t_cred;
805 823 curthread->t_cred = cred;
806 824 crfree(oldcred);
807 825
808 826 if (priv_basic_test >= 0 &&
809 827 !PRIV_ISASSERT(&CR_IPRIV(newcred),
810 828 priv_basic_test)) {
811 829 pid_t pid = pp->p_pid;
812 830 char *fn = PTOU(pp)->u_comm;
813 831
814 832 cmn_err(CE_WARN, "%s[%d]: exec: basic_test "
815 833 "privilege removed from E/I", fn, pid);
816 834 }
817 835 }
818 836 /*
819 837 * On emerging from a successful exec(), the saved
820 838 * uid and gid equal the effective uid and gid.
821 839 */
822 840 cred->cr_suid = cred->cr_uid;
823 841 cred->cr_sgid = cred->cr_gid;
824 842
825 843 /*
826 844 * If the real and effective ids do not match, this
827 845 * is a setuid process that should not dump core.
828 846 * The group comparison is tricky; we prevent the code
829 847 * from flagging SNOCD when executing with an effective gid
830 848 * which is a supplementary group.
831 849 */
832 850 if (cred->cr_ruid != cred->cr_uid ||
833 851 (cred->cr_rgid != cred->cr_gid &&
834 852 !supgroupmember(cred->cr_gid, cred)) ||
835 853 (privflags & PRIV_INCREASE) != 0)
836 854 suidflags = PSUIDFLAGS;
837 855 else
838 856 suidflags = 0;
839 857
840 858 mutex_exit(&pp->p_crlock);
841 859 if (newcred != NULL && oruid != newcred->cr_ruid) {
842 860 /* Note that the process remains in the same zone. */
843 861 mutex_enter(&pidlock);
844 862 upcount_dec(oruid, crgetzoneid(newcred));
845 863 upcount_inc(newcred->cr_ruid, crgetzoneid(newcred));
846 864 mutex_exit(&pidlock);
847 865 }
848 866 if (suidflags) {
849 867 mutex_enter(&pp->p_lock);
850 868 pp->p_flag |= suidflags;
851 869 mutex_exit(&pp->p_lock);
852 870 }
853 871 if (setid && (pp->p_proc_flag & P_PR_PTRACE) == 0) {
854 872 /*
855 873 * If process is traced via /proc, arrange to
856 874 * invalidate the associated /proc vnode.
857 875 */
858 876 if (pp->p_plist || (pp->p_proc_flag & P_PR_TRACE))
859 877 args->traceinval = 1;
860 878 }
861 879 if (pp->p_proc_flag & P_PR_PTRACE)
862 880 psignal(pp, SIGTRAP);
863 881 if (args->traceinval)
864 882 prinvalidate(&pp->p_user);
865 883 }
866 884 if (execvp)
867 885 VN_RELE(execvp);
868 886 return (0);
869 887
870 888 bad:
871 889 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, cred, NULL);
872 890
873 891 bad_noclose:
874 892 if (newcred != NULL)
875 893 crfree(newcred);
876 894 if (error == 0)
877 895 error = ENOEXEC;
878 896
879 897 if (suidflags) {
880 898 mutex_enter(&pp->p_lock);
881 899 pp->p_flag |= suidflags;
882 900 mutex_exit(&pp->p_lock);
883 901 }
884 902 return (error);
885 903 }
886 904
887 905 extern char *execswnames[];
888 906
889 907 struct execsw *
890 908 allocate_execsw(char *name, char *magic, size_t magic_size)
891 909 {
892 910 int i, j;
893 911 char *ename;
894 912 char *magicp;
895 913
896 914 mutex_enter(&execsw_lock);
897 915 for (i = 0; i < nexectype; i++) {
898 916 if (execswnames[i] == NULL) {
899 917 ename = kmem_alloc(strlen(name) + 1, KM_SLEEP);
900 918 (void) strcpy(ename, name);
901 919 execswnames[i] = ename;
902 920 /*
903 921 * Set the magic number last so that we
904 922 * don't need to hold the execsw_lock in
905 923 * findexectype().
906 924 */
907 925 magicp = kmem_alloc(magic_size, KM_SLEEP);
908 926 for (j = 0; j < magic_size; j++)
909 927 magicp[j] = magic[j];
910 928 execsw[i].exec_magic = magicp;
911 929 mutex_exit(&execsw_lock);
912 930 return (&execsw[i]);
913 931 }
914 932 }
915 933 mutex_exit(&execsw_lock);
916 934 return (NULL);
917 935 }
918 936
919 937 /*
920 938 * Find the exec switch table entry with the corresponding magic string.
921 939 */
922 940 struct execsw *
923 941 findexecsw(char *magic)
924 942 {
925 943 struct execsw *eswp;
926 944
927 945 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
928 946 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
929 947 if (magic && eswp->exec_maglen != 0 &&
930 948 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0)
931 949 return (eswp);
932 950 }
933 951 return (NULL);
934 952 }
935 953
936 954 /*
937 955 * Find the execsw[] index for the given exec header string by looking for the
938 956 * magic string at a specified offset and length for each kind of executable
939 957 * file format until one matches. If no execsw[] entry is found, try to
940 958 * autoload a module for this magic string.
941 959 */
942 960 struct execsw *
943 961 findexec_by_hdr(char *header)
944 962 {
945 963 struct execsw *eswp;
946 964
947 965 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
948 966 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
949 967 if (header && eswp->exec_maglen != 0 &&
950 968 bcmp(&header[eswp->exec_magoff], eswp->exec_magic,
951 969 eswp->exec_maglen) == 0) {
952 970 if (hold_execsw(eswp) != 0)
953 971 return (NULL);
954 972 return (eswp);
955 973 }
956 974 }
957 975 return (NULL); /* couldn't find the type */
958 976 }
959 977
960 978 /*
961 979 * Find the execsw[] index for the given magic string. If no execsw[] entry
962 980 * is found, try to autoload a module for this magic string.
963 981 */
964 982 struct execsw *
965 983 findexec_by_magic(char *magic)
966 984 {
967 985 struct execsw *eswp;
968 986
969 987 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
970 988 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
971 989 if (magic && eswp->exec_maglen != 0 &&
972 990 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0) {
973 991 if (hold_execsw(eswp) != 0)
974 992 return (NULL);
975 993 return (eswp);
976 994 }
977 995 }
978 996 return (NULL); /* couldn't find the type */
979 997 }
980 998
981 999 static int
982 1000 hold_execsw(struct execsw *eswp)
983 1001 {
984 1002 char *name;
985 1003
986 1004 rw_enter(eswp->exec_lock, RW_READER);
987 1005 while (!LOADED_EXEC(eswp)) {
988 1006 rw_exit(eswp->exec_lock);
989 1007 name = execswnames[eswp-execsw];
990 1008 ASSERT(name);
991 1009 if (modload("exec", name) == -1)
992 1010 return (-1);
993 1011 rw_enter(eswp->exec_lock, RW_READER);
994 1012 }
995 1013 return (0);
996 1014 }
997 1015
998 1016 static int
999 1017 execsetid(struct vnode *vp, struct vattr *vattrp, uid_t *uidp, uid_t *gidp,
1000 1018 priv_set_t *fset, cred_t *cr, const char *pathname)
1001 1019 {
1002 1020 proc_t *pp = ttoproc(curthread);
1003 1021 uid_t uid, gid;
1004 1022 int privflags = 0;
1005 1023
1006 1024 /*
1007 1025 * Remember credentials.
1008 1026 */
1009 1027 uid = cr->cr_uid;
1010 1028 gid = cr->cr_gid;
1011 1029
1012 1030 /* Will try to reset the PRIV_AWARE bit later. */
1013 1031 if ((CR_FLAGS(cr) & (PRIV_AWARE|PRIV_AWARE_INHERIT)) == PRIV_AWARE)
1014 1032 privflags |= PRIV_RESET;
1015 1033
1016 1034 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) == 0) {
1017 1035 /*
1018 1036 * If it's a set-uid root program we perform the
1019 1037 * forced privilege look-aside. This has three possible
1020 1038 * outcomes:
1021 1039 * no look aside information -> treat as before
1022 1040 * look aside in Limit set -> apply forced privs
1023 1041 * look aside not in Limit set -> ignore set-uid root
1024 1042 *
1025 1043 * Ordinary set-uid root execution only allowed if the limit
1026 1044 * set holds all unsafe privileges.
1027 1045 */
1028 1046 if (vattrp->va_mode & VSUID) {
1029 1047 if (vattrp->va_uid == 0) {
1030 1048 int res = get_forced_privs(cr, pathname, fset);
1031 1049
1032 1050 switch (res) {
1033 1051 case -1:
1034 1052 if (priv_issubset(&priv_unsafe,
1035 1053 &CR_LPRIV(cr))) {
1036 1054 uid = vattrp->va_uid;
1037 1055 privflags |= PRIV_SETUGID;
1038 1056 }
1039 1057 break;
1040 1058 case 0:
1041 1059 privflags |= PRIV_FORCED|PRIV_INCREASE;
1042 1060 break;
1043 1061 default:
1044 1062 break;
1045 1063 }
1046 1064 } else {
1047 1065 uid = vattrp->va_uid;
1048 1066 privflags |= PRIV_SETUGID;
1049 1067 }
1050 1068 }
1051 1069 if (vattrp->va_mode & VSGID) {
1052 1070 gid = vattrp->va_gid;
1053 1071 privflags |= PRIV_SETUGID;
1054 1072 }
1055 1073 }
1056 1074
1057 1075 /*
1058 1076 * Do we need to change our credential anyway?
1059 1077 * This is the case when E != I or P != I, as
1060 1078 * we need to do the assignments (with F empty and A full)
1061 1079 * Or when I is not a subset of L; in that case we need to
1062 1080 * enforce L.
1063 1081 *
1064 1082 * I' = L & I
1065 1083 *
1066 1084 * E' = P' = (I' + F) & A
1067 1085 * or
1068 1086 * E' = P' = I'
1069 1087 */
1070 1088 if (!priv_isequalset(&CR_EPRIV(cr), &CR_IPRIV(cr)) ||
1071 1089 !priv_issubset(&CR_IPRIV(cr), &CR_LPRIV(cr)) ||
1072 1090 !priv_isequalset(&CR_PPRIV(cr), &CR_IPRIV(cr)))
1073 1091 privflags |= PRIV_RESET;
1074 1092
1075 1093 /* Child has more privileges than parent */
1076 1094 if (!priv_issubset(&CR_IPRIV(cr), &CR_PPRIV(cr)))
1077 1095 privflags |= PRIV_INCREASE;
1078 1096
1079 1097 /* If MAC-aware flag(s) are on, need to update cred to remove. */
1080 1098 if ((CR_FLAGS(cr) & NET_MAC_AWARE) ||
1081 1099 (CR_FLAGS(cr) & NET_MAC_AWARE_INHERIT))
1082 1100 privflags |= MAC_FLAGS;
1083 1101 /*
1084 1102 * Set setuid/setgid protections if no ptrace() compatibility.
1085 1103 * For privileged processes, honor setuid/setgid even in
1086 1104 * the presence of ptrace() compatibility.
1087 1105 */
1088 1106 if (((pp->p_proc_flag & P_PR_PTRACE) == 0 ||
1089 1107 PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, (uid == 0))) &&
1090 1108 (cr->cr_uid != uid ||
1091 1109 cr->cr_gid != gid ||
1092 1110 cr->cr_suid != uid ||
1093 1111 cr->cr_sgid != gid)) {
1094 1112 *uidp = uid;
1095 1113 *gidp = gid;
1096 1114 privflags |= PRIV_SETID;
1097 1115 }
1098 1116 return (privflags);
1099 1117 }
1100 1118
1101 1119 int
1102 1120 execpermissions(struct vnode *vp, struct vattr *vattrp, struct uarg *args)
1103 1121 {
1104 1122 int error;
1105 1123 proc_t *p = ttoproc(curthread);
1106 1124
1107 1125 vattrp->va_mask = AT_MODE | AT_UID | AT_GID | AT_SIZE;
1108 1126 if (error = VOP_GETATTR(vp, vattrp, ATTR_EXEC, p->p_cred, NULL))
1109 1127 return (error);
1110 1128 /*
1111 1129 * Check the access mode.
1112 1130 * If VPROC, ask /proc if the file is an object file.
1113 1131 */
1114 1132 if ((error = VOP_ACCESS(vp, VEXEC, 0, p->p_cred, NULL)) != 0 ||
1115 1133 !(vp->v_type == VREG || (vp->v_type == VPROC && pr_isobject(vp))) ||
1116 1134 (vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0 ||
1117 1135 (vattrp->va_mode & (VEXEC|(VEXEC>>3)|(VEXEC>>6))) == 0) {
1118 1136 if (error == 0)
1119 1137 error = EACCES;
1120 1138 return (error);
1121 1139 }
1122 1140
1123 1141 if ((p->p_plist || (p->p_proc_flag & (P_PR_PTRACE|P_PR_TRACE))) &&
1124 1142 (error = VOP_ACCESS(vp, VREAD, 0, p->p_cred, NULL))) {
1125 1143 /*
1126 1144 * If process is under ptrace(2) compatibility,
1127 1145 * fail the exec(2).
1128 1146 */
1129 1147 if (p->p_proc_flag & P_PR_PTRACE)
1130 1148 goto bad;
1131 1149 /*
1132 1150 * Process is traced via /proc.
1133 1151 * Arrange to invalidate the /proc vnode.
1134 1152 */
1135 1153 args->traceinval = 1;
1136 1154 }
1137 1155 return (0);
1138 1156 bad:
1139 1157 if (error == 0)
1140 1158 error = ENOEXEC;
1141 1159 return (error);
1142 1160 }
1143 1161
1144 1162 /*
1145 1163 * Map a section of an executable file into the user's
1146 1164 * address space.
1147 1165 */
1148 1166 int
1149 1167 execmap(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen,
1150 1168 off_t offset, int prot, int page, uint_t szc)
1151 1169 {
1152 1170 int error = 0;
1153 1171 off_t oldoffset;
1154 1172 caddr_t zfodbase, oldaddr;
1155 1173 size_t end, oldlen;
1156 1174 size_t zfoddiff;
1157 1175 label_t ljb;
1158 1176 proc_t *p = ttoproc(curthread);
1159 1177
1160 1178 oldaddr = addr;
1161 1179 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1162 1180 if (len) {
1163 1181 oldlen = len;
1164 1182 len += ((size_t)oldaddr - (size_t)addr);
1165 1183 oldoffset = offset;
1166 1184 offset = (off_t)((uintptr_t)offset & PAGEMASK);
1167 1185 if (page) {
1168 1186 spgcnt_t prefltmem, availm, npages;
1169 1187 int preread;
1170 1188 uint_t mflag = MAP_PRIVATE | MAP_FIXED;
1171 1189
1172 1190 if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) {
1173 1191 mflag |= MAP_TEXT;
1174 1192 } else {
1175 1193 mflag |= MAP_INITDATA;
1176 1194 }
1177 1195
1178 1196 if (valid_usr_range(addr, len, prot, p->p_as,
1179 1197 p->p_as->a_userlimit) != RANGE_OKAY) {
1180 1198 error = ENOMEM;
1181 1199 goto bad;
1182 1200 }
1183 1201 if (error = VOP_MAP(vp, (offset_t)offset,
1184 1202 p->p_as, &addr, len, prot, PROT_ALL,
1185 1203 mflag, CRED(), NULL))
1186 1204 goto bad;
1187 1205
1188 1206 /*
1189 1207 * If the segment can fit, then we prefault
1190 1208 * the entire segment in. This is based on the
1191 1209 * model that says the best working set of a
1192 1210 * small program is all of its pages.
1193 1211 */
1194 1212 npages = (spgcnt_t)btopr(len);
1195 1213 prefltmem = freemem - desfree;
1196 1214 preread =
1197 1215 (npages < prefltmem && len < PGTHRESH) ? 1 : 0;
1198 1216
1199 1217 /*
1200 1218 * If we aren't prefaulting the segment,
1201 1219 * increment "deficit", if necessary to ensure
1202 1220 * that pages will become available when this
1203 1221 * process starts executing.
1204 1222 */
1205 1223 availm = freemem - lotsfree;
1206 1224 if (preread == 0 && npages > availm &&
1207 1225 deficit < lotsfree) {
1208 1226 deficit += MIN((pgcnt_t)(npages - availm),
1209 1227 lotsfree - deficit);
1210 1228 }
1211 1229
1212 1230 if (preread) {
1213 1231 TRACE_2(TR_FAC_PROC, TR_EXECMAP_PREREAD,
1214 1232 "execmap preread:freemem %d size %lu",
1215 1233 freemem, len);
1216 1234 (void) as_fault(p->p_as->a_hat, p->p_as,
1217 1235 (caddr_t)addr, len, F_INVAL, S_READ);
1218 1236 }
1219 1237 } else {
1220 1238 if (valid_usr_range(addr, len, prot, p->p_as,
1221 1239 p->p_as->a_userlimit) != RANGE_OKAY) {
1222 1240 error = ENOMEM;
1223 1241 goto bad;
1224 1242 }
1225 1243
1226 1244 if (error = as_map(p->p_as, addr, len,
1227 1245 segvn_create, zfod_argsp))
1228 1246 goto bad;
1229 1247 /*
1230 1248 * Read in the segment in one big chunk.
1231 1249 */
1232 1250 if (error = vn_rdwr(UIO_READ, vp, (caddr_t)oldaddr,
1233 1251 oldlen, (offset_t)oldoffset, UIO_USERSPACE, 0,
1234 1252 (rlim64_t)0, CRED(), (ssize_t *)0))
1235 1253 goto bad;
1236 1254 /*
1237 1255 * Now set protections.
1238 1256 */
1239 1257 if (prot != PROT_ZFOD) {
1240 1258 (void) as_setprot(p->p_as, (caddr_t)addr,
1241 1259 len, prot);
1242 1260 }
1243 1261 }
1244 1262 }
1245 1263
1246 1264 if (zfodlen) {
1247 1265 struct as *as = curproc->p_as;
1248 1266 struct seg *seg;
1249 1267 uint_t zprot = 0;
1250 1268
1251 1269 end = (size_t)addr + len;
1252 1270 zfodbase = (caddr_t)roundup(end, PAGESIZE);
1253 1271 zfoddiff = (uintptr_t)zfodbase - end;
1254 1272 if (zfoddiff) {
1255 1273 /*
1256 1274 * Before we go to zero the remaining space on the last
1257 1275 * page, make sure we have write permission.
1258 1276 *
1259 1277 * Normal illumos binaries don't even hit the case
1260 1278 * where we have to change permission on the last page
1261 1279 * since their protection is typically either
1262 1280 * PROT_USER | PROT_WRITE | PROT_READ
1263 1281 * or
1264 1282 * PROT_ZFOD (same as PROT_ALL).
1265 1283 *
1266 1284 * We need to be careful how we zero-fill the last page
1267 1285 * if the segment protection does not include
1268 1286 * PROT_WRITE. Using as_setprot() can cause the VM
1269 1287 * segment code to call segvn_vpage(), which must
1270 1288 * allocate a page struct for each page in the segment.
1271 1289 * If we have a very large segment, this may fail, so
1272 1290 * we have to check for that, even though we ignore
1273 1291 * other return values from as_setprot.
1274 1292 */
1275 1293
1276 1294 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1277 1295 seg = as_segat(curproc->p_as, (caddr_t)end);
1278 1296 if (seg != NULL)
1279 1297 SEGOP_GETPROT(seg, (caddr_t)end, zfoddiff - 1,
1280 1298 &zprot);
1281 1299 AS_LOCK_EXIT(as, &as->a_lock);
1282 1300
1283 1301 if (seg != NULL && (zprot & PROT_WRITE) == 0) {
1284 1302 if (as_setprot(as, (caddr_t)end, zfoddiff - 1,
1285 1303 zprot | PROT_WRITE) == ENOMEM) {
1286 1304 error = ENOMEM;
1287 1305 goto bad;
1288 1306 }
1289 1307 }
1290 1308
1291 1309 if (on_fault(&ljb)) {
1292 1310 no_fault();
1293 1311 if (seg != NULL && (zprot & PROT_WRITE) == 0)
1294 1312 (void) as_setprot(as, (caddr_t)end,
1295 1313 zfoddiff - 1, zprot);
1296 1314 error = EFAULT;
1297 1315 goto bad;
1298 1316 }
1299 1317 uzero((void *)end, zfoddiff);
1300 1318 no_fault();
1301 1319 if (seg != NULL && (zprot & PROT_WRITE) == 0)
1302 1320 (void) as_setprot(as, (caddr_t)end,
1303 1321 zfoddiff - 1, zprot);
1304 1322 }
1305 1323 if (zfodlen > zfoddiff) {
1306 1324 struct segvn_crargs crargs =
1307 1325 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
1308 1326
1309 1327 zfodlen -= zfoddiff;
1310 1328 if (valid_usr_range(zfodbase, zfodlen, prot, p->p_as,
1311 1329 p->p_as->a_userlimit) != RANGE_OKAY) {
1312 1330 error = ENOMEM;
1313 1331 goto bad;
1314 1332 }
1315 1333 if (szc > 0) {
1316 1334 /*
1317 1335 * ASSERT alignment because the mapelfexec()
1318 1336 * caller for the szc > 0 case extended zfod
1319 1337 * so it's end is pgsz aligned.
1320 1338 */
1321 1339 size_t pgsz = page_get_pagesize(szc);
1322 1340 ASSERT(IS_P2ALIGNED(zfodbase + zfodlen, pgsz));
1323 1341
1324 1342 if (IS_P2ALIGNED(zfodbase, pgsz)) {
1325 1343 crargs.szc = szc;
1326 1344 } else {
1327 1345 crargs.szc = AS_MAP_HEAP;
1328 1346 }
1329 1347 } else {
1330 1348 crargs.szc = AS_MAP_NO_LPOOB;
1331 1349 }
1332 1350 if (error = as_map(p->p_as, (caddr_t)zfodbase,
1333 1351 zfodlen, segvn_create, &crargs))
1334 1352 goto bad;
1335 1353 if (prot != PROT_ZFOD) {
1336 1354 (void) as_setprot(p->p_as, (caddr_t)zfodbase,
1337 1355 zfodlen, prot);
1338 1356 }
1339 1357 }
1340 1358 }
1341 1359 return (0);
1342 1360 bad:
1343 1361 return (error);
1344 1362 }
1345 1363
1346 1364 void
1347 1365 setexecenv(struct execenv *ep)
1348 1366 {
1349 1367 proc_t *p = ttoproc(curthread);
1350 1368 klwp_t *lwp = ttolwp(curthread);
1351 1369 struct vnode *vp;
1352 1370
1353 1371 p->p_bssbase = ep->ex_bssbase;
1354 1372 p->p_brkbase = ep->ex_brkbase;
1355 1373 p->p_brksize = ep->ex_brksize;
1356 1374 if (p->p_exec)
1357 1375 VN_RELE(p->p_exec); /* out with the old */
1358 1376 vp = p->p_exec = ep->ex_vp;
1359 1377 if (vp != NULL)
1360 1378 VN_HOLD(vp); /* in with the new */
1361 1379
1362 1380 lwp->lwp_sigaltstack.ss_sp = 0;
1363 1381 lwp->lwp_sigaltstack.ss_size = 0;
1364 1382 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
1365 1383 }
1366 1384
1367 1385 int
1368 1386 execopen(struct vnode **vpp, int *fdp)
1369 1387 {
1370 1388 struct vnode *vp = *vpp;
1371 1389 file_t *fp;
1372 1390 int error = 0;
1373 1391 int filemode = FREAD;
1374 1392
1375 1393 VN_HOLD(vp); /* open reference */
1376 1394 if (error = falloc(NULL, filemode, &fp, fdp)) {
1377 1395 VN_RELE(vp);
1378 1396 *fdp = -1; /* just in case falloc changed value */
1379 1397 return (error);
1380 1398 }
1381 1399 if (error = VOP_OPEN(&vp, filemode, CRED(), NULL)) {
1382 1400 VN_RELE(vp);
1383 1401 setf(*fdp, NULL);
1384 1402 unfalloc(fp);
1385 1403 *fdp = -1;
1386 1404 return (error);
1387 1405 }
1388 1406 *vpp = vp; /* vnode should not have changed */
1389 1407 fp->f_vnode = vp;
1390 1408 mutex_exit(&fp->f_tlock);
1391 1409 setf(*fdp, fp);
1392 1410 return (0);
1393 1411 }
1394 1412
1395 1413 int
1396 1414 execclose(int fd)
1397 1415 {
1398 1416 return (closeandsetf(fd, NULL));
1399 1417 }
1400 1418
1401 1419
1402 1420 /*
1403 1421 * noexec stub function.
1404 1422 */
1405 1423 /*ARGSUSED*/
1406 1424 int
1407 1425 noexec(
1408 1426 struct vnode *vp,
1409 1427 struct execa *uap,
1410 1428 struct uarg *args,
1411 1429 struct intpdata *idatap,
1412 1430 int level,
1413 1431 long *execsz,
1414 1432 int setid,
1415 1433 caddr_t exec_file,
1416 1434 struct cred *cred)
1417 1435 {
1418 1436 cmn_err(CE_WARN, "missing exec capability for %s", uap->fname);
1419 1437 return (ENOEXEC);
1420 1438 }
1421 1439
1422 1440 /*
1423 1441 * Support routines for building a user stack.
1424 1442 *
1425 1443 * execve(path, argv, envp) must construct a new stack with the specified
1426 1444 * arguments and environment variables (see exec_args() for a description
1427 1445 * of the user stack layout). To do this, we copy the arguments and
1428 1446 * environment variables from the old user address space into the kernel,
1429 1447 * free the old as, create the new as, and copy our buffered information
1430 1448 * to the new stack. Our kernel buffer has the following structure:
1431 1449 *
1432 1450 * +-----------------------+ <--- stk_base + stk_size
1433 1451 * | string offsets |
1434 1452 * +-----------------------+ <--- stk_offp
1435 1453 * | |
1436 1454 * | STK_AVAIL() space |
1437 1455 * | |
1438 1456 * +-----------------------+ <--- stk_strp
1439 1457 * | strings |
1440 1458 * +-----------------------+ <--- stk_base
1441 1459 *
1442 1460 * When we add a string, we store the string's contents (including the null
1443 1461 * terminator) at stk_strp, and we store the offset of the string relative to
1444 1462 * stk_base at --stk_offp. At strings are added, stk_strp increases and
1445 1463 * stk_offp decreases. The amount of space remaining, STK_AVAIL(), is just
1446 1464 * the difference between these pointers. If we run out of space, we return
1447 1465 * an error and exec_args() starts all over again with a buffer twice as large.
1448 1466 * When we're all done, the kernel buffer looks like this:
1449 1467 *
1450 1468 * +-----------------------+ <--- stk_base + stk_size
1451 1469 * | argv[0] offset |
1452 1470 * +-----------------------+
1453 1471 * | ... |
1454 1472 * +-----------------------+
1455 1473 * | argv[argc-1] offset |
1456 1474 * +-----------------------+
1457 1475 * | envp[0] offset |
1458 1476 * +-----------------------+
1459 1477 * | ... |
1460 1478 * +-----------------------+
1461 1479 * | envp[envc-1] offset |
1462 1480 * +-----------------------+
1463 1481 * | AT_SUN_PLATFORM offset|
1464 1482 * +-----------------------+
1465 1483 * | AT_SUN_EXECNAME offset|
1466 1484 * +-----------------------+ <--- stk_offp
1467 1485 * | |
1468 1486 * | STK_AVAIL() space |
1469 1487 * | |
1470 1488 * +-----------------------+ <--- stk_strp
1471 1489 * | AT_SUN_EXECNAME offset|
1472 1490 * +-----------------------+
1473 1491 * | AT_SUN_PLATFORM offset|
1474 1492 * +-----------------------+
1475 1493 * | envp[envc-1] string |
1476 1494 * +-----------------------+
1477 1495 * | ... |
1478 1496 * +-----------------------+
1479 1497 * | envp[0] string |
1480 1498 * +-----------------------+
1481 1499 * | argv[argc-1] string |
1482 1500 * +-----------------------+
1483 1501 * | ... |
1484 1502 * +-----------------------+
1485 1503 * | argv[0] string |
1486 1504 * +-----------------------+ <--- stk_base
1487 1505 */
1488 1506
1489 1507 #define STK_AVAIL(args) ((char *)(args)->stk_offp - (args)->stk_strp)
1490 1508
1491 1509 /*
1492 1510 * Add a string to the stack.
1493 1511 */
1494 1512 static int
1495 1513 stk_add(uarg_t *args, const char *sp, enum uio_seg segflg)
1496 1514 {
1497 1515 int error;
1498 1516 size_t len;
1499 1517
1500 1518 if (STK_AVAIL(args) < sizeof (int))
1501 1519 return (E2BIG);
1502 1520 *--args->stk_offp = args->stk_strp - args->stk_base;
1503 1521
1504 1522 if (segflg == UIO_USERSPACE) {
1505 1523 error = copyinstr(sp, args->stk_strp, STK_AVAIL(args), &len);
1506 1524 if (error != 0)
1507 1525 return (error);
1508 1526 } else {
1509 1527 len = strlen(sp) + 1;
1510 1528 if (len > STK_AVAIL(args))
1511 1529 return (E2BIG);
1512 1530 bcopy(sp, args->stk_strp, len);
1513 1531 }
1514 1532
1515 1533 args->stk_strp += len;
1516 1534
1517 1535 return (0);
1518 1536 }
1519 1537
1520 1538 static int
1521 1539 stk_getptr(uarg_t *args, char *src, char **dst)
1522 1540 {
1523 1541 int error;
1524 1542
1525 1543 if (args->from_model == DATAMODEL_NATIVE) {
1526 1544 ulong_t ptr;
1527 1545 error = fulword(src, &ptr);
1528 1546 *dst = (caddr_t)ptr;
1529 1547 } else {
1530 1548 uint32_t ptr;
1531 1549 error = fuword32(src, &ptr);
1532 1550 *dst = (caddr_t)(uintptr_t)ptr;
1533 1551 }
1534 1552 return (error);
1535 1553 }
1536 1554
1537 1555 static int
1538 1556 stk_putptr(uarg_t *args, char *addr, char *value)
1539 1557 {
1540 1558 if (args->to_model == DATAMODEL_NATIVE)
1541 1559 return (sulword(addr, (ulong_t)value));
1542 1560 else
1543 1561 return (suword32(addr, (uint32_t)(uintptr_t)value));
1544 1562 }
1545 1563
1546 1564 static int
1547 1565 stk_copyin(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1548 1566 {
1549 1567 char *sp;
1550 1568 int argc, error;
1551 1569 int argv_empty = 0;
1552 1570 size_t ptrsize = args->from_ptrsize;
1553 1571 size_t size, pad;
1554 1572 char *argv = (char *)uap->argp;
1555 1573 char *envp = (char *)uap->envp;
1556 1574
1557 1575 /*
1558 1576 * Copy interpreter's name and argument to argv[0] and argv[1].
1559 1577 */
1560 1578 if (intp != NULL && intp->intp_name != NULL) {
1561 1579 if ((error = stk_add(args, intp->intp_name, UIO_SYSSPACE)) != 0)
1562 1580 return (error);
1563 1581 if (intp->intp_arg != NULL &&
1564 1582 (error = stk_add(args, intp->intp_arg, UIO_SYSSPACE)) != 0)
1565 1583 return (error);
1566 1584 if (args->fname != NULL)
1567 1585 error = stk_add(args, args->fname, UIO_SYSSPACE);
1568 1586 else
1569 1587 error = stk_add(args, uap->fname, UIO_USERSPACE);
1570 1588 if (error)
1571 1589 return (error);
1572 1590
1573 1591 /*
1574 1592 * Check for an empty argv[].
1575 1593 */
1576 1594 if (stk_getptr(args, argv, &sp))
1577 1595 return (EFAULT);
1578 1596 if (sp == NULL)
1579 1597 argv_empty = 1;
1580 1598
1581 1599 argv += ptrsize; /* ignore original argv[0] */
1582 1600 }
1583 1601
1584 1602 if (argv_empty == 0) {
1585 1603 /*
1586 1604 * Add argv[] strings to the stack.
1587 1605 */
1588 1606 for (;;) {
1589 1607 if (stk_getptr(args, argv, &sp))
1590 1608 return (EFAULT);
1591 1609 if (sp == NULL)
1592 1610 break;
1593 1611 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1594 1612 return (error);
1595 1613 argv += ptrsize;
1596 1614 }
1597 1615 }
1598 1616 argc = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1599 1617 args->arglen = args->stk_strp - args->stk_base;
1600 1618
1601 1619 /*
1602 1620 * Add environ[] strings to the stack.
1603 1621 */
1604 1622 if (envp != NULL) {
1605 1623 for (;;) {
1606 1624 char *tmp = args->stk_strp;
1607 1625 if (stk_getptr(args, envp, &sp))
1608 1626 return (EFAULT);
1609 1627 if (sp == NULL)
1610 1628 break;
1611 1629 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1612 1630 return (error);
1613 1631 if (args->scrubenv && strncmp(tmp, "LD_", 3) == 0) {
1614 1632 /* Undo the copied string */
1615 1633 args->stk_strp = tmp;
1616 1634 *(args->stk_offp++) = NULL;
1617 1635 }
1618 1636 envp += ptrsize;
1619 1637 }
1620 1638 }
1621 1639 args->na = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1622 1640 args->ne = args->na - argc;
1623 1641
1624 1642 /*
1625 1643 * Add AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME, and
1626 1644 * AT_SUN_EMULATOR strings to the stack.
1627 1645 */
1628 1646 if (auxvpp != NULL && *auxvpp != NULL) {
1629 1647 if ((error = stk_add(args, platform, UIO_SYSSPACE)) != 0)
1630 1648 return (error);
1631 1649 if ((error = stk_add(args, args->pathname, UIO_SYSSPACE)) != 0)
1632 1650 return (error);
1633 1651 if (args->brandname != NULL &&
1634 1652 (error = stk_add(args, args->brandname, UIO_SYSSPACE)) != 0)
1635 1653 return (error);
1636 1654 if (args->emulator != NULL &&
1637 1655 (error = stk_add(args, args->emulator, UIO_SYSSPACE)) != 0)
1638 1656 return (error);
1639 1657 }
1640 1658
1641 1659 /*
1642 1660 * Compute the size of the stack. This includes all the pointers,
1643 1661 * the space reserved for the aux vector, and all the strings.
1644 1662 * The total number of pointers is args->na (which is argc + envc)
1645 1663 * plus 4 more: (1) a pointer's worth of space for argc; (2) the NULL
1646 1664 * after the last argument (i.e. argv[argc]); (3) the NULL after the
1647 1665 * last environment variable (i.e. envp[envc]); and (4) the NULL after
1648 1666 * all the strings, at the very top of the stack.
1649 1667 */
1650 1668 size = (args->na + 4) * args->to_ptrsize + args->auxsize +
1651 1669 (args->stk_strp - args->stk_base);
1652 1670
1653 1671 /*
1654 1672 * Pad the string section with zeroes to align the stack size.
1655 1673 */
1656 1674 pad = P2NPHASE(size, args->stk_align);
1657 1675
1658 1676 if (STK_AVAIL(args) < pad)
1659 1677 return (E2BIG);
1660 1678
1661 1679 args->usrstack_size = size + pad;
1662 1680
1663 1681 while (pad-- != 0)
1664 1682 *args->stk_strp++ = 0;
1665 1683
1666 1684 args->nc = args->stk_strp - args->stk_base;
1667 1685
1668 1686 return (0);
1669 1687 }
1670 1688
1671 1689 static int
1672 1690 stk_copyout(uarg_t *args, char *usrstack, void **auxvpp, user_t *up)
1673 1691 {
1674 1692 size_t ptrsize = args->to_ptrsize;
1675 1693 ssize_t pslen;
1676 1694 char *kstrp = args->stk_base;
1677 1695 char *ustrp = usrstack - args->nc - ptrsize;
1678 1696 char *usp = usrstack - args->usrstack_size;
1679 1697 int *offp = (int *)(args->stk_base + args->stk_size);
1680 1698 int envc = args->ne;
1681 1699 int argc = args->na - envc;
1682 1700 int i;
1683 1701
1684 1702 /*
1685 1703 * Record argc for /proc.
1686 1704 */
1687 1705 up->u_argc = argc;
1688 1706
1689 1707 /*
1690 1708 * Put argc on the stack. Note that even though it's an int,
1691 1709 * it always consumes ptrsize bytes (for alignment).
1692 1710 */
1693 1711 if (stk_putptr(args, usp, (char *)(uintptr_t)argc))
1694 1712 return (-1);
1695 1713
1696 1714 /*
1697 1715 * Add argc space (ptrsize) to usp and record argv for /proc.
1698 1716 */
1699 1717 up->u_argv = (uintptr_t)(usp += ptrsize);
1700 1718
1701 1719 /*
1702 1720 * Put the argv[] pointers on the stack.
1703 1721 */
1704 1722 for (i = 0; i < argc; i++, usp += ptrsize)
1705 1723 if (stk_putptr(args, usp, &ustrp[*--offp]))
1706 1724 return (-1);
1707 1725
1708 1726 /*
1709 1727 * Copy arguments to u_psargs.
1710 1728 */
1711 1729 pslen = MIN(args->arglen, PSARGSZ) - 1;
1712 1730 for (i = 0; i < pslen; i++)
1713 1731 up->u_psargs[i] = (kstrp[i] == '\0' ? ' ' : kstrp[i]);
1714 1732 while (i < PSARGSZ)
1715 1733 up->u_psargs[i++] = '\0';
1716 1734
1717 1735 /*
1718 1736 * Add space for argv[]'s NULL terminator (ptrsize) to usp and
1719 1737 * record envp for /proc.
1720 1738 */
1721 1739 up->u_envp = (uintptr_t)(usp += ptrsize);
1722 1740
1723 1741 /*
1724 1742 * Put the envp[] pointers on the stack.
1725 1743 */
1726 1744 for (i = 0; i < envc; i++, usp += ptrsize)
1727 1745 if (stk_putptr(args, usp, &ustrp[*--offp]))
1728 1746 return (-1);
1729 1747
1730 1748 /*
1731 1749 * Add space for envp[]'s NULL terminator (ptrsize) to usp and
1732 1750 * remember where the stack ends, which is also where auxv begins.
1733 1751 */
1734 1752 args->stackend = usp += ptrsize;
1735 1753
1736 1754 /*
1737 1755 * Put all the argv[], envp[], and auxv strings on the stack.
1738 1756 */
1739 1757 if (copyout(args->stk_base, ustrp, args->nc))
1740 1758 return (-1);
1741 1759
1742 1760 /*
1743 1761 * Fill in the aux vector now that we know the user stack addresses
1744 1762 * for the AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME and
1745 1763 * AT_SUN_EMULATOR strings.
1746 1764 */
1747 1765 if (auxvpp != NULL && *auxvpp != NULL) {
1748 1766 if (args->to_model == DATAMODEL_NATIVE) {
1749 1767 auxv_t **a = (auxv_t **)auxvpp;
1750 1768 ADDAUX(*a, AT_SUN_PLATFORM, (long)&ustrp[*--offp])
1751 1769 ADDAUX(*a, AT_SUN_EXECNAME, (long)&ustrp[*--offp])
1752 1770 if (args->brandname != NULL)
1753 1771 ADDAUX(*a,
1754 1772 AT_SUN_BRANDNAME, (long)&ustrp[*--offp])
1755 1773 if (args->emulator != NULL)
1756 1774 ADDAUX(*a,
1757 1775 AT_SUN_EMULATOR, (long)&ustrp[*--offp])
1758 1776 } else {
1759 1777 auxv32_t **a = (auxv32_t **)auxvpp;
1760 1778 ADDAUX(*a,
1761 1779 AT_SUN_PLATFORM, (int)(uintptr_t)&ustrp[*--offp])
1762 1780 ADDAUX(*a,
1763 1781 AT_SUN_EXECNAME, (int)(uintptr_t)&ustrp[*--offp])
1764 1782 if (args->brandname != NULL)
1765 1783 ADDAUX(*a, AT_SUN_BRANDNAME,
1766 1784 (int)(uintptr_t)&ustrp[*--offp])
1767 1785 if (args->emulator != NULL)
1768 1786 ADDAUX(*a, AT_SUN_EMULATOR,
1769 1787 (int)(uintptr_t)&ustrp[*--offp])
1770 1788 }
1771 1789 }
1772 1790
1773 1791 return (0);
1774 1792 }
1775 1793
1776 1794 /*
1777 1795 * Initialize a new user stack with the specified arguments and environment.
1778 1796 * The initial user stack layout is as follows:
1779 1797 *
1780 1798 * User Stack
1781 1799 * +---------------+ <--- curproc->p_usrstack
1782 1800 * | |
1783 1801 * | slew |
1784 1802 * | |
1785 1803 * +---------------+
1786 1804 * | NULL |
1787 1805 * +---------------+
1788 1806 * | |
1789 1807 * | auxv strings |
1790 1808 * | |
1791 1809 * +---------------+
1792 1810 * | |
1793 1811 * | envp strings |
1794 1812 * | |
1795 1813 * +---------------+
1796 1814 * | |
1797 1815 * | argv strings |
1798 1816 * | |
1799 1817 * +---------------+ <--- ustrp
1800 1818 * | |
1801 1819 * | aux vector |
1802 1820 * | |
1803 1821 * +---------------+ <--- auxv
1804 1822 * | NULL |
1805 1823 * +---------------+
1806 1824 * | envp[envc-1] |
1807 1825 * +---------------+
1808 1826 * | ... |
1809 1827 * +---------------+
1810 1828 * | envp[0] |
1811 1829 * +---------------+ <--- envp[]
1812 1830 * | NULL |
1813 1831 * +---------------+
1814 1832 * | argv[argc-1] |
1815 1833 * +---------------+
1816 1834 * | ... |
1817 1835 * +---------------+
1818 1836 * | argv[0] |
1819 1837 * +---------------+ <--- argv[]
1820 1838 * | argc |
1821 1839 * +---------------+ <--- stack base
1822 1840 */
1823 1841 int
1824 1842 exec_args(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1825 1843 {
1826 1844 size_t size;
1827 1845 int error;
1828 1846 proc_t *p = ttoproc(curthread);
1829 1847 user_t *up = PTOU(p);
1830 1848 char *usrstack;
1831 1849 rctl_entity_p_t e;
1832 1850 struct as *as;
1833 1851 extern int use_stk_lpg;
1834 1852 size_t sp_slew;
1835 1853
1836 1854 args->from_model = p->p_model;
1837 1855 if (p->p_model == DATAMODEL_NATIVE) {
1838 1856 args->from_ptrsize = sizeof (long);
1839 1857 } else {
1840 1858 args->from_ptrsize = sizeof (int32_t);
1841 1859 }
1842 1860
1843 1861 if (args->to_model == DATAMODEL_NATIVE) {
1844 1862 args->to_ptrsize = sizeof (long);
1845 1863 args->ncargs = NCARGS;
1846 1864 args->stk_align = STACK_ALIGN;
1847 1865 if (args->addr32)
1848 1866 usrstack = (char *)USRSTACK64_32;
1849 1867 else
1850 1868 usrstack = (char *)USRSTACK;
1851 1869 } else {
1852 1870 args->to_ptrsize = sizeof (int32_t);
1853 1871 args->ncargs = NCARGS32;
1854 1872 args->stk_align = STACK_ALIGN32;
1855 1873 usrstack = (char *)USRSTACK32;
1856 1874 }
1857 1875
1858 1876 ASSERT(P2PHASE((uintptr_t)usrstack, args->stk_align) == 0);
1859 1877
1860 1878 #if defined(__sparc)
1861 1879 /*
1862 1880 * Make sure user register windows are empty before
1863 1881 * attempting to make a new stack.
1864 1882 */
1865 1883 (void) flush_user_windows_to_stack(NULL);
1866 1884 #endif
1867 1885
1868 1886 for (size = PAGESIZE; ; size *= 2) {
1869 1887 args->stk_size = size;
1870 1888 args->stk_base = kmem_alloc(size, KM_SLEEP);
1871 1889 args->stk_strp = args->stk_base;
1872 1890 args->stk_offp = (int *)(args->stk_base + size);
1873 1891 error = stk_copyin(uap, args, intp, auxvpp);
1874 1892 if (error == 0)
1875 1893 break;
1876 1894 kmem_free(args->stk_base, size);
1877 1895 if (error != E2BIG && error != ENAMETOOLONG)
1878 1896 return (error);
1879 1897 if (size >= args->ncargs)
1880 1898 return (E2BIG);
1881 1899 }
1882 1900
1883 1901 size = args->usrstack_size;
1884 1902
1885 1903 ASSERT(error == 0);
1886 1904 ASSERT(P2PHASE(size, args->stk_align) == 0);
1887 1905 ASSERT((ssize_t)STK_AVAIL(args) >= 0);
1888 1906
1889 1907 if (size > args->ncargs) {
1890 1908 kmem_free(args->stk_base, args->stk_size);
1891 1909 return (E2BIG);
1892 1910 }
1893 1911
1894 1912 /*
1895 1913 * Leave only the current lwp and force the other lwps to exit.
1896 1914 * If another lwp beat us to the punch by calling exit(), bail out.
1897 1915 */
1898 1916 if ((error = exitlwps(0)) != 0) {
1899 1917 kmem_free(args->stk_base, args->stk_size);
1900 1918 return (error);
1901 1919 }
1902 1920
1903 1921 /*
1904 1922 * Revoke any doors created by the process.
1905 1923 */
1906 1924 if (p->p_door_list)
1907 1925 door_exit();
1908 1926
1909 1927 /*
1910 1928 * Release schedctl data structures.
1911 1929 */
1912 1930 if (p->p_pagep)
1913 1931 schedctl_proc_cleanup();
1914 1932
1915 1933 /*
1916 1934 * Clean up any DTrace helpers for the process.
1917 1935 */
1918 1936 if (p->p_dtrace_helpers != NULL) {
1919 1937 ASSERT(dtrace_helpers_cleanup != NULL);
1920 1938 (*dtrace_helpers_cleanup)();
1921 1939 }
1922 1940
1923 1941 mutex_enter(&p->p_lock);
1924 1942 /*
1925 1943 * Cleanup the DTrace provider associated with this process.
1926 1944 */
1927 1945 if (p->p_dtrace_probes) {
1928 1946 ASSERT(dtrace_fasttrap_exec_ptr != NULL);
1929 1947 dtrace_fasttrap_exec_ptr(p);
1930 1948 }
1931 1949 mutex_exit(&p->p_lock);
1932 1950
1933 1951 /*
1934 1952 * discard the lwpchan cache.
1935 1953 */
1936 1954 if (p->p_lcp != NULL)
1937 1955 lwpchan_destroy_cache(1);
1938 1956
1939 1957 /*
1940 1958 * Delete the POSIX timers.
1941 1959 */
1942 1960 if (p->p_itimer != NULL)
1943 1961 timer_exit();
1944 1962
1945 1963 /*
1946 1964 * Delete the ITIMER_REALPROF interval timer.
1947 1965 * The other ITIMER_* interval timers are specified
1948 1966 * to be inherited across exec().
1949 1967 */
1950 1968 delete_itimer_realprof();
1951 1969
1952 1970 if (AU_AUDITING())
1953 1971 audit_exec(args->stk_base, args->stk_base + args->arglen,
1954 1972 args->na - args->ne, args->ne, args->pfcred);
1955 1973
1956 1974 /*
1957 1975 * Ensure that we don't change resource associations while we
1958 1976 * change address spaces.
1959 1977 */
1960 1978 mutex_enter(&p->p_lock);
1961 1979 pool_barrier_enter();
1962 1980 mutex_exit(&p->p_lock);
1963 1981
1964 1982 /*
1965 1983 * Destroy the old address space and create a new one.
1966 1984 * From here on, any errors are fatal to the exec()ing process.
1967 1985 * On error we return -1, which means the caller must SIGKILL
1968 1986 * the process.
1969 1987 */
1970 1988 relvm();
1971 1989
1972 1990 mutex_enter(&p->p_lock);
1973 1991 pool_barrier_exit();
1974 1992 mutex_exit(&p->p_lock);
1975 1993
1976 1994 up->u_execsw = args->execswp;
1977 1995
1978 1996 p->p_brkbase = NULL;
1979 1997 p->p_brksize = 0;
1980 1998 p->p_brkpageszc = 0;
1981 1999 p->p_stksize = 0;
1982 2000 p->p_stkpageszc = 0;
1983 2001 p->p_model = args->to_model;
1984 2002 p->p_usrstack = usrstack;
1985 2003 p->p_stkprot = args->stk_prot;
1986 2004 p->p_datprot = args->dat_prot;
1987 2005
1988 2006 /*
1989 2007 * Reset resource controls such that all controls are again active as
1990 2008 * well as appropriate to the potentially new address model for the
1991 2009 * process.
1992 2010 */
1993 2011 e.rcep_p.proc = p;
1994 2012 e.rcep_t = RCENTITY_PROCESS;
1995 2013 rctl_set_reset(p->p_rctls, p, &e);
1996 2014
1997 2015 /* Too early to call map_pgsz for the heap */
1998 2016 if (use_stk_lpg) {
1999 2017 p->p_stkpageszc = page_szc(map_pgsz(MAPPGSZ_STK, p, 0, 0, 0));
2000 2018 }
2001 2019
2002 2020 mutex_enter(&p->p_lock);
2003 2021 p->p_flag |= SAUTOLPG; /* kernel controls page sizes */
2004 2022 mutex_exit(&p->p_lock);
2005 2023
2006 2024 /*
2007 2025 * Some platforms may choose to randomize real stack start by adding a
2008 2026 * small slew (not more than a few hundred bytes) to the top of the
2009 2027 * stack. This helps avoid cache thrashing when identical processes
2010 2028 * simultaneously share caches that don't provide enough associativity
2011 2029 * (e.g. sun4v systems). In this case stack slewing makes the same hot
2012 2030 * stack variables in different processes to live in different cache
2013 2031 * sets increasing effective associativity.
2014 2032 */
2015 2033 sp_slew = exec_get_spslew();
2016 2034 ASSERT(P2PHASE(sp_slew, args->stk_align) == 0);
2017 2035 exec_set_sp(size + sp_slew);
2018 2036
2019 2037 as = as_alloc();
2020 2038 p->p_as = as;
2021 2039 as->a_proc = p;
2022 2040 if (p->p_model == DATAMODEL_ILP32 || args->addr32)
2023 2041 as->a_userlimit = (caddr_t)USERLIMIT32;
2024 2042 (void) hat_setup(as->a_hat, HAT_ALLOC);
2025 2043 hat_join_srd(as->a_hat, args->ex_vp);
2026 2044
2027 2045 /*
2028 2046 * Finally, write out the contents of the new stack.
2029 2047 */
2030 2048 error = stk_copyout(args, usrstack - sp_slew, auxvpp, up);
2031 2049 kmem_free(args->stk_base, args->stk_size);
2032 2050 return (error);
2033 2051 }
↓ open down ↓ |
1822 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX