Print this page
uts: give privilege macros more sensible names
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/exec.c
+++ new/usr/src/uts/common/os/exec.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /* Copyright (c) 1988 AT&T */
27 27 /* All Rights Reserved */
28 28 /*
29 29 * Copyright 2014, Joyent, Inc. All rights reserved.
30 30 */
31 31
32 32 #include <sys/types.h>
33 33 #include <sys/param.h>
34 34 #include <sys/sysmacros.h>
35 35 #include <sys/systm.h>
36 36 #include <sys/signal.h>
37 37 #include <sys/cred_impl.h>
38 38 #include <sys/policy.h>
39 39 #include <sys/user.h>
40 40 #include <sys/errno.h>
41 41 #include <sys/file.h>
42 42 #include <sys/vfs.h>
43 43 #include <sys/vnode.h>
44 44 #include <sys/mman.h>
45 45 #include <sys/acct.h>
46 46 #include <sys/cpuvar.h>
47 47 #include <sys/proc.h>
48 48 #include <sys/cmn_err.h>
49 49 #include <sys/debug.h>
50 50 #include <sys/pathname.h>
51 51 #include <sys/vm.h>
52 52 #include <sys/lgrp.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/exec.h>
55 55 #include <sys/exechdr.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/prsystm.h>
58 58 #include <sys/modctl.h>
59 59 #include <sys/vmparam.h>
60 60 #include <sys/door.h>
61 61 #include <sys/schedctl.h>
62 62 #include <sys/utrap.h>
63 63 #include <sys/systeminfo.h>
64 64 #include <sys/stack.h>
65 65 #include <sys/rctl.h>
66 66 #include <sys/dtrace.h>
67 67 #include <sys/lwpchan_impl.h>
68 68 #include <sys/pool.h>
69 69 #include <sys/sdt.h>
70 70 #include <sys/brand.h>
71 71 #include <sys/klpd.h>
72 72
73 73 #include <c2/audit.h>
74 74
75 75 #include <vm/hat.h>
76 76 #include <vm/anon.h>
77 77 #include <vm/as.h>
78 78 #include <vm/seg.h>
79 79 #include <vm/seg_vn.h>
80 80
81 81 #define PRIV_RESET 0x01 /* needs to reset privs */
82 82 #define PRIV_SETID 0x02 /* needs to change uids */
83 83 #define PRIV_SETUGID 0x04 /* is setuid/setgid/forced privs */
84 84 #define PRIV_INCREASE 0x08 /* child runs with more privs */
85 85 #define MAC_FLAGS 0x10 /* need to adjust MAC flags */
86 86 #define PRIV_FORCED 0x20 /* has forced privileges */
87 87
88 88 static int execsetid(struct vnode *, struct vattr *, uid_t *, uid_t *,
89 89 priv_set_t *, cred_t *, const char *);
90 90 static int hold_execsw(struct execsw *);
91 91
92 92 uint_t auxv_hwcap = 0; /* auxv AT_SUN_HWCAP value; determined on the fly */
93 93 uint_t auxv_hwcap_2 = 0; /* AT_SUN_HWCAP2 */
94 94 #if defined(_SYSCALL32_IMPL)
95 95 uint_t auxv_hwcap32 = 0; /* 32-bit version of auxv_hwcap */
96 96 uint_t auxv_hwcap32_2 = 0; /* 32-bit version of auxv_hwcap2 */
97 97 #endif
98 98
99 99 #define PSUIDFLAGS (SNOCD|SUGID)
100 100
101 101 /*
102 102 * exece() - system call wrapper around exec_common()
103 103 */
104 104 int
105 105 exece(const char *fname, const char **argp, const char **envp)
106 106 {
107 107 int error;
108 108
109 109 error = exec_common(fname, argp, envp, EBA_NONE);
110 110 return (error ? (set_errno(error)) : 0);
111 111 }
112 112
113 113 int
114 114 exec_common(const char *fname, const char **argp, const char **envp,
115 115 int brand_action)
116 116 {
117 117 vnode_t *vp = NULL, *dir = NULL, *tmpvp = NULL;
118 118 proc_t *p = ttoproc(curthread);
119 119 klwp_t *lwp = ttolwp(curthread);
120 120 struct user *up = PTOU(p);
121 121 long execsz; /* temporary count of exec size */
122 122 int i;
123 123 int error;
124 124 char exec_file[MAXCOMLEN+1];
125 125 struct pathname pn;
126 126 struct pathname resolvepn;
127 127 struct uarg args;
128 128 struct execa ua;
129 129 k_sigset_t savedmask;
130 130 lwpdir_t *lwpdir = NULL;
131 131 tidhash_t *tidhash;
132 132 lwpdir_t *old_lwpdir = NULL;
133 133 uint_t old_lwpdir_sz;
134 134 tidhash_t *old_tidhash;
135 135 uint_t old_tidhash_sz;
136 136 ret_tidhash_t *ret_tidhash;
137 137 lwpent_t *lep;
138 138 boolean_t brandme = B_FALSE;
139 139
140 140 /*
141 141 * exec() is not supported for the /proc agent lwp.
142 142 */
143 143 if (curthread == p->p_agenttp)
144 144 return (ENOTSUP);
145 145
146 146 if (brand_action != EBA_NONE) {
147 147 /*
148 148 * Brand actions are not supported for processes that are not
149 149 * running in a branded zone.
150 150 */
151 151 if (!ZONE_IS_BRANDED(p->p_zone))
152 152 return (ENOTSUP);
153 153
154 154 if (brand_action == EBA_NATIVE) {
155 155 /* Only branded processes can be unbranded */
156 156 if (!PROC_IS_BRANDED(p))
157 157 return (ENOTSUP);
158 158 } else {
159 159 /* Only unbranded processes can be branded */
160 160 if (PROC_IS_BRANDED(p))
161 161 return (ENOTSUP);
162 162 brandme = B_TRUE;
163 163 }
164 164 } else {
165 165 /*
166 166 * If this is a native zone, or if the process is already
167 167 * branded, then we don't need to do anything. If this is
168 168 * a native process in a branded zone, we need to brand the
169 169 * process as it exec()s the new binary.
170 170 */
171 171 if (ZONE_IS_BRANDED(p->p_zone) && !PROC_IS_BRANDED(p))
172 172 brandme = B_TRUE;
173 173 }
174 174
175 175 /*
176 176 * Inform /proc that an exec() has started.
177 177 * Hold signals that are ignored by default so that we will
178 178 * not be interrupted by a signal that will be ignored after
179 179 * successful completion of gexec().
180 180 */
181 181 mutex_enter(&p->p_lock);
182 182 prexecstart();
183 183 schedctl_finish_sigblock(curthread);
184 184 savedmask = curthread->t_hold;
185 185 sigorset(&curthread->t_hold, &ignoredefault);
186 186 mutex_exit(&p->p_lock);
187 187
188 188 /*
189 189 * Look up path name and remember last component for later.
190 190 * To help coreadm expand its %d token, we attempt to save
191 191 * the directory containing the executable in p_execdir. The
192 192 * first call to lookuppn() may fail and return EINVAL because
193 193 * dirvpp is non-NULL. In that case, we make a second call to
194 194 * lookuppn() with dirvpp set to NULL; p_execdir will be NULL,
195 195 * but coreadm is allowed to expand %d to the empty string and
196 196 * there are other cases in which that failure may occur.
197 197 */
198 198 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
199 199 goto out;
200 200 pn_alloc(&resolvepn);
201 201 if ((error = lookuppn(&pn, &resolvepn, FOLLOW, &dir, &vp)) != 0) {
202 202 pn_free(&resolvepn);
203 203 pn_free(&pn);
204 204 if (error != EINVAL)
205 205 goto out;
206 206
207 207 dir = NULL;
208 208 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
209 209 goto out;
210 210 pn_alloc(&resolvepn);
211 211 if ((error = lookuppn(&pn, &resolvepn, FOLLOW, NULLVPP,
212 212 &vp)) != 0) {
213 213 pn_free(&resolvepn);
214 214 pn_free(&pn);
215 215 goto out;
216 216 }
217 217 }
218 218 if (vp == NULL) {
219 219 if (dir != NULL)
220 220 VN_RELE(dir);
221 221 error = ENOENT;
222 222 pn_free(&resolvepn);
223 223 pn_free(&pn);
224 224 goto out;
225 225 }
226 226
227 227 if ((error = secpolicy_basic_exec(CRED(), vp)) != 0) {
228 228 if (dir != NULL)
229 229 VN_RELE(dir);
230 230 pn_free(&resolvepn);
231 231 pn_free(&pn);
232 232 VN_RELE(vp);
233 233 goto out;
234 234 }
235 235
236 236 /*
237 237 * We do not allow executing files in attribute directories.
238 238 * We test this by determining whether the resolved path
239 239 * contains a "/" when we're in an attribute directory;
240 240 * only if the pathname does not contain a "/" the resolved path
241 241 * points to a file in the current working (attribute) directory.
242 242 */
243 243 if ((p->p_user.u_cdir->v_flag & V_XATTRDIR) != 0 &&
244 244 strchr(resolvepn.pn_path, '/') == NULL) {
245 245 if (dir != NULL)
246 246 VN_RELE(dir);
247 247 error = EACCES;
248 248 pn_free(&resolvepn);
249 249 pn_free(&pn);
250 250 VN_RELE(vp);
251 251 goto out;
252 252 }
253 253
254 254 bzero(exec_file, MAXCOMLEN+1);
255 255 (void) strncpy(exec_file, pn.pn_path, MAXCOMLEN);
256 256 bzero(&args, sizeof (args));
257 257 args.pathname = resolvepn.pn_path;
258 258 /* don't free resolvepn until we are done with args */
259 259 pn_free(&pn);
260 260
261 261 /*
262 262 * If we're running in a profile shell, then call pfexecd.
263 263 */
264 264 if ((CR_FLAGS(p->p_cred) & PRIV_PFEXEC) != 0) {
265 265 error = pfexec_call(p->p_cred, &resolvepn, &args.pfcred,
266 266 &args.scrubenv);
267 267
268 268 /* Returning errno in case we're not allowed to execute. */
269 269 if (error > 0) {
270 270 if (dir != NULL)
271 271 VN_RELE(dir);
272 272 pn_free(&resolvepn);
273 273 VN_RELE(vp);
274 274 goto out;
275 275 }
276 276
277 277 /* Don't change the credentials when using old ptrace. */
278 278 if (args.pfcred != NULL &&
279 279 (p->p_proc_flag & P_PR_PTRACE) != 0) {
280 280 crfree(args.pfcred);
281 281 args.pfcred = NULL;
282 282 args.scrubenv = B_FALSE;
283 283 }
284 284 }
285 285
286 286 /*
287 287 * Specific exec handlers, or policies determined via
288 288 * /etc/system may override the historical default.
289 289 */
290 290 args.stk_prot = PROT_ZFOD;
291 291 args.dat_prot = PROT_ZFOD;
292 292
293 293 CPU_STATS_ADD_K(sys, sysexec, 1);
294 294 DTRACE_PROC1(exec, char *, args.pathname);
295 295
296 296 ua.fname = fname;
297 297 ua.argp = argp;
298 298 ua.envp = envp;
299 299
300 300 /* If necessary, brand this process before we start the exec. */
301 301 if (brandme)
302 302 brand_setbrand(p);
303 303
304 304 if ((error = gexec(&vp, &ua, &args, NULL, 0, &execsz,
305 305 exec_file, p->p_cred, brand_action)) != 0) {
306 306 if (brandme)
307 307 brand_clearbrand(p, B_FALSE);
308 308 VN_RELE(vp);
309 309 if (dir != NULL)
310 310 VN_RELE(dir);
311 311 pn_free(&resolvepn);
312 312 goto fail;
313 313 }
314 314
315 315 /*
316 316 * Free floating point registers (sun4u only)
317 317 */
318 318 ASSERT(lwp != NULL);
319 319 lwp_freeregs(lwp, 1);
320 320
321 321 /*
322 322 * Free thread and process context ops.
323 323 */
324 324 if (curthread->t_ctx)
325 325 freectx(curthread, 1);
326 326 if (p->p_pctx)
327 327 freepctx(p, 1);
328 328
329 329 /*
330 330 * Remember file name for accounting; clear any cached DTrace predicate.
331 331 */
332 332 up->u_acflag &= ~AFORK;
333 333 bcopy(exec_file, up->u_comm, MAXCOMLEN+1);
334 334 curthread->t_predcache = NULL;
335 335
336 336 /*
337 337 * Clear contract template state
338 338 */
339 339 lwp_ctmpl_clear(lwp);
340 340
341 341 /*
342 342 * Save the directory in which we found the executable for expanding
343 343 * the %d token used in core file patterns.
344 344 */
345 345 mutex_enter(&p->p_lock);
346 346 tmpvp = p->p_execdir;
347 347 p->p_execdir = dir;
348 348 if (p->p_execdir != NULL)
349 349 VN_HOLD(p->p_execdir);
350 350 mutex_exit(&p->p_lock);
351 351
352 352 if (tmpvp != NULL)
353 353 VN_RELE(tmpvp);
354 354
355 355 /*
356 356 * Reset stack state to the user stack, clear set of signals
357 357 * caught on the signal stack, and reset list of signals that
358 358 * restart system calls; the new program's environment should
359 359 * not be affected by detritus from the old program. Any
360 360 * pending held signals remain held, so don't clear t_hold.
361 361 */
362 362 mutex_enter(&p->p_lock);
363 363 lwp->lwp_oldcontext = 0;
364 364 lwp->lwp_ustack = 0;
365 365 lwp->lwp_old_stk_ctl = 0;
366 366 sigemptyset(&up->u_signodefer);
367 367 sigemptyset(&up->u_sigonstack);
368 368 sigemptyset(&up->u_sigresethand);
369 369 lwp->lwp_sigaltstack.ss_sp = 0;
370 370 lwp->lwp_sigaltstack.ss_size = 0;
371 371 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
372 372
373 373 /*
374 374 * Make saved resource limit == current resource limit.
375 375 */
376 376 for (i = 0; i < RLIM_NLIMITS; i++) {
377 377 /*CONSTCOND*/
378 378 if (RLIM_SAVED(i)) {
379 379 (void) rctl_rlimit_get(rctlproc_legacy[i], p,
380 380 &up->u_saved_rlimit[i]);
381 381 }
382 382 }
383 383
384 384 /*
385 385 * If the action was to catch the signal, then the action
386 386 * must be reset to SIG_DFL.
387 387 */
388 388 sigdefault(p);
389 389 p->p_flag &= ~(SNOWAIT|SJCTL);
390 390 p->p_flag |= (SEXECED|SMSACCT|SMSFORK);
391 391 up->u_signal[SIGCLD - 1] = SIG_DFL;
392 392
393 393 /*
394 394 * Delete the dot4 sigqueues/signotifies.
395 395 */
396 396 sigqfree(p);
397 397
398 398 mutex_exit(&p->p_lock);
399 399
400 400 mutex_enter(&p->p_pflock);
401 401 p->p_prof.pr_base = NULL;
402 402 p->p_prof.pr_size = 0;
403 403 p->p_prof.pr_off = 0;
404 404 p->p_prof.pr_scale = 0;
405 405 p->p_prof.pr_samples = 0;
406 406 mutex_exit(&p->p_pflock);
407 407
408 408 ASSERT(curthread->t_schedctl == NULL);
409 409
410 410 #if defined(__sparc)
411 411 if (p->p_utraps != NULL)
412 412 utrap_free(p);
413 413 #endif /* __sparc */
414 414
415 415 /*
416 416 * Close all close-on-exec files.
417 417 */
418 418 close_exec(P_FINFO(p));
419 419 TRACE_2(TR_FAC_PROC, TR_PROC_EXEC, "proc_exec:p %p up %p", p, up);
420 420
421 421 /* Unbrand ourself if necessary. */
422 422 if (PROC_IS_BRANDED(p) && (brand_action == EBA_NATIVE))
423 423 brand_clearbrand(p, B_FALSE);
424 424
425 425 setregs(&args);
426 426
427 427 /* Mark this as an executable vnode */
428 428 mutex_enter(&vp->v_lock);
429 429 vp->v_flag |= VVMEXEC;
430 430 mutex_exit(&vp->v_lock);
431 431
432 432 VN_RELE(vp);
433 433 if (dir != NULL)
434 434 VN_RELE(dir);
435 435 pn_free(&resolvepn);
436 436
437 437 /*
438 438 * Allocate a new lwp directory and lwpid hash table if necessary.
439 439 */
440 440 if (curthread->t_tid != 1 || p->p_lwpdir_sz != 2) {
441 441 lwpdir = kmem_zalloc(2 * sizeof (lwpdir_t), KM_SLEEP);
442 442 lwpdir->ld_next = lwpdir + 1;
443 443 tidhash = kmem_zalloc(2 * sizeof (tidhash_t), KM_SLEEP);
444 444 if (p->p_lwpdir != NULL)
445 445 lep = p->p_lwpdir[curthread->t_dslot].ld_entry;
446 446 else
447 447 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP);
448 448 }
449 449
450 450 if (PROC_IS_BRANDED(p))
451 451 BROP(p)->b_exec();
452 452
453 453 mutex_enter(&p->p_lock);
454 454 prbarrier(p);
455 455
456 456 /*
457 457 * Reset lwp id to the default value of 1.
458 458 * This is a single-threaded process now
459 459 * and lwp #1 is lwp_wait()able by default.
460 460 * The t_unpark flag should not be inherited.
461 461 */
462 462 ASSERT(p->p_lwpcnt == 1 && p->p_zombcnt == 0);
463 463 curthread->t_tid = 1;
464 464 kpreempt_disable();
465 465 ASSERT(curthread->t_lpl != NULL);
466 466 p->p_t1_lgrpid = curthread->t_lpl->lpl_lgrpid;
467 467 kpreempt_enable();
468 468 if (p->p_tr_lgrpid != LGRP_NONE && p->p_tr_lgrpid != p->p_t1_lgrpid) {
469 469 lgrp_update_trthr_migrations(1);
470 470 }
471 471 curthread->t_unpark = 0;
472 472 curthread->t_proc_flag |= TP_TWAIT;
473 473 curthread->t_proc_flag &= ~TP_DAEMON; /* daemons shouldn't exec */
474 474 p->p_lwpdaemon = 0; /* but oh well ... */
475 475 p->p_lwpid = 1;
476 476
477 477 /*
478 478 * Install the newly-allocated lwp directory and lwpid hash table
479 479 * and insert the current thread into the new hash table.
480 480 */
481 481 if (lwpdir != NULL) {
482 482 old_lwpdir = p->p_lwpdir;
483 483 old_lwpdir_sz = p->p_lwpdir_sz;
484 484 old_tidhash = p->p_tidhash;
485 485 old_tidhash_sz = p->p_tidhash_sz;
486 486 p->p_lwpdir = p->p_lwpfree = lwpdir;
487 487 p->p_lwpdir_sz = 2;
488 488 lep->le_thread = curthread;
489 489 lep->le_lwpid = curthread->t_tid;
490 490 lep->le_start = curthread->t_start;
491 491 lwp_hash_in(p, lep, tidhash, 2, 0);
492 492 p->p_tidhash = tidhash;
493 493 p->p_tidhash_sz = 2;
494 494 }
495 495 ret_tidhash = p->p_ret_tidhash;
496 496 p->p_ret_tidhash = NULL;
497 497
498 498 /*
499 499 * Restore the saved signal mask and
500 500 * inform /proc that the exec() has finished.
501 501 */
502 502 curthread->t_hold = savedmask;
503 503 prexecend();
504 504 mutex_exit(&p->p_lock);
505 505 if (old_lwpdir) {
506 506 kmem_free(old_lwpdir, old_lwpdir_sz * sizeof (lwpdir_t));
507 507 kmem_free(old_tidhash, old_tidhash_sz * sizeof (tidhash_t));
508 508 }
509 509 while (ret_tidhash != NULL) {
510 510 ret_tidhash_t *next = ret_tidhash->rth_next;
511 511 kmem_free(ret_tidhash->rth_tidhash,
512 512 ret_tidhash->rth_tidhash_sz * sizeof (tidhash_t));
513 513 kmem_free(ret_tidhash, sizeof (*ret_tidhash));
514 514 ret_tidhash = next;
515 515 }
516 516
517 517 ASSERT(error == 0);
518 518 DTRACE_PROC(exec__success);
519 519 return (0);
520 520
521 521 fail:
522 522 DTRACE_PROC1(exec__failure, int, error);
523 523 out: /* error return */
524 524 mutex_enter(&p->p_lock);
525 525 curthread->t_hold = savedmask;
526 526 prexecend();
527 527 mutex_exit(&p->p_lock);
528 528 ASSERT(error != 0);
529 529 return (error);
530 530 }
531 531
532 532
533 533 /*
534 534 * Perform generic exec duties and switchout to object-file specific
535 535 * handler.
536 536 */
537 537 int
538 538 gexec(
539 539 struct vnode **vpp,
540 540 struct execa *uap,
541 541 struct uarg *args,
542 542 struct intpdata *idatap,
543 543 int level,
544 544 long *execsz,
545 545 caddr_t exec_file,
546 546 struct cred *cred,
547 547 int brand_action)
548 548 {
549 549 struct vnode *vp, *execvp = NULL;
550 550 proc_t *pp = ttoproc(curthread);
551 551 struct execsw *eswp;
552 552 int error = 0;
553 553 int suidflags = 0;
554 554 ssize_t resid;
555 555 uid_t uid, gid;
556 556 struct vattr vattr;
557 557 char magbuf[MAGIC_BYTES];
558 558 int setid;
559 559 cred_t *oldcred, *newcred = NULL;
560 560 int privflags = 0;
561 561 int setidfl;
562 562 priv_set_t fset;
563 563
564 564 /*
565 565 * If the SNOCD or SUGID flag is set, turn it off and remember the
566 566 * previous setting so we can restore it if we encounter an error.
567 567 */
568 568 if (level == 0 && (pp->p_flag & PSUIDFLAGS)) {
569 569 mutex_enter(&pp->p_lock);
570 570 suidflags = pp->p_flag & PSUIDFLAGS;
571 571 pp->p_flag &= ~PSUIDFLAGS;
572 572 mutex_exit(&pp->p_lock);
573 573 }
574 574
575 575 if ((error = execpermissions(*vpp, &vattr, args)) != 0)
576 576 goto bad_noclose;
577 577
578 578 /* need to open vnode for stateful file systems */
579 579 if ((error = VOP_OPEN(vpp, FREAD, CRED(), NULL)) != 0)
580 580 goto bad_noclose;
581 581 vp = *vpp;
582 582
583 583 /*
584 584 * Note: to support binary compatibility with SunOS a.out
585 585 * executables, we read in the first four bytes, as the
586 586 * magic number is in bytes 2-3.
587 587 */
588 588 if (error = vn_rdwr(UIO_READ, vp, magbuf, sizeof (magbuf),
589 589 (offset_t)0, UIO_SYSSPACE, 0, (rlim64_t)0, CRED(), &resid))
590 590 goto bad;
591 591 if (resid != 0)
592 592 goto bad;
593 593
594 594 if ((eswp = findexec_by_hdr(magbuf)) == NULL)
595 595 goto bad;
596 596
597 597 if (level == 0 &&
598 598 (privflags = execsetid(vp, &vattr, &uid, &gid, &fset,
599 599 args->pfcred == NULL ? cred : args->pfcred, args->pathname)) != 0) {
600 600
601 601 /* Pfcred is a credential with a ref count of 1 */
602 602
603 603 if (args->pfcred != NULL) {
604 604 privflags |= PRIV_INCREASE|PRIV_RESET;
605 605 newcred = cred = args->pfcred;
606 606 } else {
607 607 newcred = cred = crdup(cred);
608 608 }
609 609
610 610 /* If we can, drop the PA bit */
611 611 if ((privflags & PRIV_RESET) != 0)
612 612 priv_adjust_PA(cred);
613 613
614 614 if (privflags & PRIV_SETID) {
615 615 cred->cr_uid = uid;
616 616 cred->cr_gid = gid;
617 617 cred->cr_suid = uid;
618 618 cred->cr_sgid = gid;
619 619 }
620 620
621 621 if (privflags & MAC_FLAGS) {
622 622 if (!(CR_FLAGS(cred) & NET_MAC_AWARE_INHERIT))
623 623 CR_FLAGS(cred) &= ~NET_MAC_AWARE;
624 624 CR_FLAGS(cred) &= ~NET_MAC_AWARE_INHERIT;
625 625 }
626 626
627 627 /*
628 628 * Implement the privilege updates:
629 629 *
630 630 * Restrict with L:
631 631 *
632 632 * I' = I & L
633 633 *
634 634 * E' = P' = (I' + F) & A
635 635 *
636 636 * But if running under ptrace, we cap I and F with P.
637 637 */
638 638 if ((privflags & (PRIV_RESET|PRIV_FORCED)) != 0) {
639 639 if ((privflags & PRIV_INCREASE) != 0 &&
640 640 (pp->p_proc_flag & P_PR_PTRACE) != 0) {
641 641 priv_intersect(&CR_OPPRIV(cred),
642 642 &CR_IPRIV(cred));
643 643 priv_intersect(&CR_OPPRIV(cred), &fset);
644 644 }
645 645 priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
646 646 CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
647 647 if (privflags & PRIV_FORCED) {
648 648 priv_set_PA(cred);
649 649 priv_union(&fset, &CR_EPRIV(cred));
650 650 priv_union(&fset, &CR_PPRIV(cred));
651 651 }
652 652 priv_adjust_PA(cred);
653 653 }
654 654 } else if (level == 0 && args->pfcred != NULL) {
655 655 newcred = cred = args->pfcred;
656 656 privflags |= PRIV_INCREASE;
657 657 /* pfcred is not forced to adhere to these settings */
658 658 priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
659 659 CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
660 660 priv_adjust_PA(cred);
661 661 }
662 662
663 663 /* SunOS 4.x buy-back */
664 664 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) &&
665 665 (vattr.va_mode & (VSUID|VSGID))) {
666 666 char path[MAXNAMELEN];
667 667 refstr_t *mntpt = NULL;
668 668 int ret = -1;
669 669
670 670 bzero(path, sizeof (path));
671 671 zone_hold(pp->p_zone);
672 672
673 673 ret = vnodetopath(pp->p_zone->zone_rootvp, vp, path,
674 674 sizeof (path), cred);
675 675
676 676 /* fallback to mountpoint if a path can't be found */
677 677 if ((ret != 0) || (ret == 0 && path[0] == '\0'))
678 678 mntpt = vfs_getmntpoint(vp->v_vfsp);
679 679
680 680 if (mntpt == NULL)
681 681 zcmn_err(pp->p_zone->zone_id, CE_NOTE,
682 682 "!uid %d: setuid execution not allowed, "
683 683 "file=%s", cred->cr_uid, path);
684 684 else
685 685 zcmn_err(pp->p_zone->zone_id, CE_NOTE,
686 686 "!uid %d: setuid execution not allowed, "
687 687 "fs=%s, file=%s", cred->cr_uid,
688 688 ZONE_PATH_TRANSLATE(refstr_value(mntpt),
689 689 pp->p_zone), exec_file);
690 690
691 691 if (!INGLOBALZONE(pp)) {
692 692 /* zone_rootpath always has trailing / */
693 693 if (mntpt == NULL)
694 694 cmn_err(CE_NOTE, "!zone: %s, uid: %d "
695 695 "setuid execution not allowed, file=%s%s",
696 696 pp->p_zone->zone_name, cred->cr_uid,
697 697 pp->p_zone->zone_rootpath, path + 1);
698 698 else
699 699 cmn_err(CE_NOTE, "!zone: %s, uid: %d "
700 700 "setuid execution not allowed, fs=%s, "
701 701 "file=%s", pp->p_zone->zone_name,
702 702 cred->cr_uid, refstr_value(mntpt),
703 703 exec_file);
704 704 }
705 705
706 706 if (mntpt != NULL)
707 707 refstr_rele(mntpt);
708 708
709 709 zone_rele(pp->p_zone);
710 710 }
711 711
712 712 /*
713 713 * execsetid() told us whether or not we had to change the
714 714 * credentials of the process. In privflags, it told us
715 715 * whether we gained any privileges or executed a set-uid executable.
716 716 */
717 717 setid = (privflags & (PRIV_SETUGID|PRIV_INCREASE|PRIV_FORCED));
718 718
719 719 /*
720 720 * Use /etc/system variable to determine if the stack
721 721 * should be marked as executable by default.
722 722 */
723 723 if (noexec_user_stack)
724 724 args->stk_prot &= ~PROT_EXEC;
725 725
726 726 args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */
727 727 args->ex_vp = vp;
728 728
729 729 /*
730 730 * Traditionally, the setid flags told the sub processes whether
731 731 * the file just executed was set-uid or set-gid; this caused
732 732 * some confusion as the 'setid' flag did not match the SUGID
733 733 * process flag which is only set when the uids/gids do not match.
734 734 * A script set-gid/set-uid to the real uid/gid would start with
735 735 * /dev/fd/X but an executable would happily trust LD_LIBRARY_PATH.
736 736 * Now we flag those cases where the calling process cannot
737 737 * be trusted to influence the newly exec'ed process, either
738 738 * because it runs with more privileges or when the uids/gids
739 739 * do in fact not match.
740 740 * This also makes the runtime linker agree with the on exec
741 741 * values of SNOCD and SUGID.
742 742 */
743 743 setidfl = 0;
744 744 if (cred->cr_uid != cred->cr_ruid || (cred->cr_rgid != cred->cr_gid &&
745 745 !supgroupmember(cred->cr_gid, cred))) {
746 746 setidfl |= EXECSETID_UGIDS;
747 747 }
748 748 if (setid & PRIV_SETUGID)
749 749 setidfl |= EXECSETID_SETID;
750 750 if (setid & PRIV_FORCED)
751 751 setidfl |= EXECSETID_PRIVS;
752 752
753 753 execvp = pp->p_exec;
754 754 if (execvp)
755 755 VN_HOLD(execvp);
756 756
757 757 error = (*eswp->exec_func)(vp, uap, args, idatap, level, execsz,
758 758 setidfl, exec_file, cred, brand_action);
759 759 rw_exit(eswp->exec_lock);
760 760 if (error != 0) {
761 761 if (execvp)
762 762 VN_RELE(execvp);
763 763 /*
764 764 * If this process's p_exec has been set to the vp of
765 765 * the executable by exec_func, we will return without
766 766 * calling VOP_CLOSE because proc_exit will close it
767 767 * on exit.
768 768 */
769 769 if (pp->p_exec == vp)
770 770 goto bad_noclose;
771 771 else
772 772 goto bad;
773 773 }
774 774
775 775 if (level == 0) {
776 776 uid_t oruid;
777 777
778 778 if (execvp != NULL) {
779 779 /*
780 780 * Close the previous executable only if we are
781 781 * at level 0.
782 782 */
783 783 (void) VOP_CLOSE(execvp, FREAD, 1, (offset_t)0,
784 784 cred, NULL);
785 785 }
786 786
787 787 mutex_enter(&pp->p_crlock);
788 788
789 789 oruid = pp->p_cred->cr_ruid;
790 790
791 791 if (newcred != NULL) {
792 792 /*
793 793 * Free the old credentials, and set the new ones.
794 794 * Do this for both the process and the (single) thread.
795 795 */
796 796 crfree(pp->p_cred);
797 797 pp->p_cred = cred; /* cred already held for proc */
798 798 crhold(cred); /* hold new cred for thread */
↓ open down ↓ |
798 lines elided |
↑ open up ↑ |
799 799 /*
800 800 * DTrace accesses t_cred in probe context. t_cred
801 801 * must always be either NULL, or point to a valid,
802 802 * allocated cred structure.
803 803 */
804 804 oldcred = curthread->t_cred;
805 805 curthread->t_cred = cred;
806 806 crfree(oldcred);
807 807
808 808 if (priv_basic_test >= 0 &&
809 - !PRIV_ISASSERT(&CR_IPRIV(newcred),
809 + !PRIV_ISMEMBER(&CR_IPRIV(newcred),
810 810 priv_basic_test)) {
811 811 pid_t pid = pp->p_pid;
812 812 char *fn = PTOU(pp)->u_comm;
813 813
814 814 cmn_err(CE_WARN, "%s[%d]: exec: basic_test "
815 815 "privilege removed from E/I", fn, pid);
816 816 }
817 817 }
818 818 /*
819 819 * On emerging from a successful exec(), the saved
820 820 * uid and gid equal the effective uid and gid.
821 821 */
822 822 cred->cr_suid = cred->cr_uid;
823 823 cred->cr_sgid = cred->cr_gid;
824 824
825 825 /*
826 826 * If the real and effective ids do not match, this
827 827 * is a setuid process that should not dump core.
828 828 * The group comparison is tricky; we prevent the code
829 829 * from flagging SNOCD when executing with an effective gid
830 830 * which is a supplementary group.
831 831 */
832 832 if (cred->cr_ruid != cred->cr_uid ||
833 833 (cred->cr_rgid != cred->cr_gid &&
834 834 !supgroupmember(cred->cr_gid, cred)) ||
835 835 (privflags & PRIV_INCREASE) != 0)
836 836 suidflags = PSUIDFLAGS;
837 837 else
838 838 suidflags = 0;
839 839
840 840 mutex_exit(&pp->p_crlock);
841 841 if (newcred != NULL && oruid != newcred->cr_ruid) {
842 842 /* Note that the process remains in the same zone. */
843 843 mutex_enter(&pidlock);
844 844 upcount_dec(oruid, crgetzoneid(newcred));
845 845 upcount_inc(newcred->cr_ruid, crgetzoneid(newcred));
846 846 mutex_exit(&pidlock);
847 847 }
848 848 if (suidflags) {
849 849 mutex_enter(&pp->p_lock);
850 850 pp->p_flag |= suidflags;
851 851 mutex_exit(&pp->p_lock);
852 852 }
853 853 if (setid && (pp->p_proc_flag & P_PR_PTRACE) == 0) {
854 854 /*
855 855 * If process is traced via /proc, arrange to
856 856 * invalidate the associated /proc vnode.
857 857 */
858 858 if (pp->p_plist || (pp->p_proc_flag & P_PR_TRACE))
859 859 args->traceinval = 1;
860 860 }
861 861 if (pp->p_proc_flag & P_PR_PTRACE)
862 862 psignal(pp, SIGTRAP);
863 863 if (args->traceinval)
864 864 prinvalidate(&pp->p_user);
865 865 }
866 866 if (execvp)
867 867 VN_RELE(execvp);
868 868 return (0);
869 869
870 870 bad:
871 871 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, cred, NULL);
872 872
873 873 bad_noclose:
874 874 if (newcred != NULL)
875 875 crfree(newcred);
876 876 if (error == 0)
877 877 error = ENOEXEC;
878 878
879 879 if (suidflags) {
880 880 mutex_enter(&pp->p_lock);
881 881 pp->p_flag |= suidflags;
882 882 mutex_exit(&pp->p_lock);
883 883 }
884 884 return (error);
885 885 }
886 886
887 887 extern char *execswnames[];
888 888
889 889 struct execsw *
890 890 allocate_execsw(char *name, char *magic, size_t magic_size)
891 891 {
892 892 int i, j;
893 893 char *ename;
894 894 char *magicp;
895 895
896 896 mutex_enter(&execsw_lock);
897 897 for (i = 0; i < nexectype; i++) {
898 898 if (execswnames[i] == NULL) {
899 899 ename = kmem_alloc(strlen(name) + 1, KM_SLEEP);
900 900 (void) strcpy(ename, name);
901 901 execswnames[i] = ename;
902 902 /*
903 903 * Set the magic number last so that we
904 904 * don't need to hold the execsw_lock in
905 905 * findexectype().
906 906 */
907 907 magicp = kmem_alloc(magic_size, KM_SLEEP);
908 908 for (j = 0; j < magic_size; j++)
909 909 magicp[j] = magic[j];
910 910 execsw[i].exec_magic = magicp;
911 911 mutex_exit(&execsw_lock);
912 912 return (&execsw[i]);
913 913 }
914 914 }
915 915 mutex_exit(&execsw_lock);
916 916 return (NULL);
917 917 }
918 918
919 919 /*
920 920 * Find the exec switch table entry with the corresponding magic string.
921 921 */
922 922 struct execsw *
923 923 findexecsw(char *magic)
924 924 {
925 925 struct execsw *eswp;
926 926
927 927 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
928 928 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
929 929 if (magic && eswp->exec_maglen != 0 &&
930 930 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0)
931 931 return (eswp);
932 932 }
933 933 return (NULL);
934 934 }
935 935
936 936 /*
937 937 * Find the execsw[] index for the given exec header string by looking for the
938 938 * magic string at a specified offset and length for each kind of executable
939 939 * file format until one matches. If no execsw[] entry is found, try to
940 940 * autoload a module for this magic string.
941 941 */
942 942 struct execsw *
943 943 findexec_by_hdr(char *header)
944 944 {
945 945 struct execsw *eswp;
946 946
947 947 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
948 948 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
949 949 if (header && eswp->exec_maglen != 0 &&
950 950 bcmp(&header[eswp->exec_magoff], eswp->exec_magic,
951 951 eswp->exec_maglen) == 0) {
952 952 if (hold_execsw(eswp) != 0)
953 953 return (NULL);
954 954 return (eswp);
955 955 }
956 956 }
957 957 return (NULL); /* couldn't find the type */
958 958 }
959 959
960 960 /*
961 961 * Find the execsw[] index for the given magic string. If no execsw[] entry
962 962 * is found, try to autoload a module for this magic string.
963 963 */
964 964 struct execsw *
965 965 findexec_by_magic(char *magic)
966 966 {
967 967 struct execsw *eswp;
968 968
969 969 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
970 970 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
971 971 if (magic && eswp->exec_maglen != 0 &&
972 972 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0) {
973 973 if (hold_execsw(eswp) != 0)
974 974 return (NULL);
975 975 return (eswp);
976 976 }
977 977 }
978 978 return (NULL); /* couldn't find the type */
979 979 }
980 980
981 981 static int
982 982 hold_execsw(struct execsw *eswp)
983 983 {
984 984 char *name;
985 985
986 986 rw_enter(eswp->exec_lock, RW_READER);
987 987 while (!LOADED_EXEC(eswp)) {
988 988 rw_exit(eswp->exec_lock);
989 989 name = execswnames[eswp-execsw];
990 990 ASSERT(name);
991 991 if (modload("exec", name) == -1)
992 992 return (-1);
993 993 rw_enter(eswp->exec_lock, RW_READER);
994 994 }
995 995 return (0);
996 996 }
997 997
998 998 static int
999 999 execsetid(struct vnode *vp, struct vattr *vattrp, uid_t *uidp, uid_t *gidp,
1000 1000 priv_set_t *fset, cred_t *cr, const char *pathname)
1001 1001 {
1002 1002 proc_t *pp = ttoproc(curthread);
1003 1003 uid_t uid, gid;
1004 1004 int privflags = 0;
1005 1005
1006 1006 /*
1007 1007 * Remember credentials.
1008 1008 */
1009 1009 uid = cr->cr_uid;
1010 1010 gid = cr->cr_gid;
1011 1011
1012 1012 /* Will try to reset the PRIV_AWARE bit later. */
1013 1013 if ((CR_FLAGS(cr) & (PRIV_AWARE|PRIV_AWARE_INHERIT)) == PRIV_AWARE)
1014 1014 privflags |= PRIV_RESET;
1015 1015
1016 1016 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) == 0) {
1017 1017 /*
1018 1018 * If it's a set-uid root program we perform the
1019 1019 * forced privilege look-aside. This has three possible
1020 1020 * outcomes:
1021 1021 * no look aside information -> treat as before
1022 1022 * look aside in Limit set -> apply forced privs
1023 1023 * look aside not in Limit set -> ignore set-uid root
1024 1024 *
1025 1025 * Ordinary set-uid root execution only allowed if the limit
1026 1026 * set holds all unsafe privileges.
1027 1027 */
1028 1028 if (vattrp->va_mode & VSUID) {
1029 1029 if (vattrp->va_uid == 0) {
1030 1030 int res = get_forced_privs(cr, pathname, fset);
1031 1031
1032 1032 switch (res) {
1033 1033 case -1:
1034 1034 if (priv_issubset(&priv_unsafe,
1035 1035 &CR_LPRIV(cr))) {
1036 1036 uid = vattrp->va_uid;
1037 1037 privflags |= PRIV_SETUGID;
1038 1038 }
1039 1039 break;
1040 1040 case 0:
1041 1041 privflags |= PRIV_FORCED|PRIV_INCREASE;
1042 1042 break;
1043 1043 default:
1044 1044 break;
1045 1045 }
1046 1046 } else {
1047 1047 uid = vattrp->va_uid;
1048 1048 privflags |= PRIV_SETUGID;
1049 1049 }
1050 1050 }
1051 1051 if (vattrp->va_mode & VSGID) {
1052 1052 gid = vattrp->va_gid;
1053 1053 privflags |= PRIV_SETUGID;
1054 1054 }
1055 1055 }
1056 1056
1057 1057 /*
1058 1058 * Do we need to change our credential anyway?
1059 1059 * This is the case when E != I or P != I, as
1060 1060 * we need to do the assignments (with F empty and A full)
1061 1061 * Or when I is not a subset of L; in that case we need to
1062 1062 * enforce L.
1063 1063 *
1064 1064 * I' = L & I
1065 1065 *
1066 1066 * E' = P' = (I' + F) & A
1067 1067 * or
1068 1068 * E' = P' = I'
1069 1069 */
1070 1070 if (!priv_isequalset(&CR_EPRIV(cr), &CR_IPRIV(cr)) ||
1071 1071 !priv_issubset(&CR_IPRIV(cr), &CR_LPRIV(cr)) ||
1072 1072 !priv_isequalset(&CR_PPRIV(cr), &CR_IPRIV(cr)))
1073 1073 privflags |= PRIV_RESET;
1074 1074
1075 1075 /* Child has more privileges than parent */
1076 1076 if (!priv_issubset(&CR_IPRIV(cr), &CR_PPRIV(cr)))
1077 1077 privflags |= PRIV_INCREASE;
1078 1078
1079 1079 /* If MAC-aware flag(s) are on, need to update cred to remove. */
1080 1080 if ((CR_FLAGS(cr) & NET_MAC_AWARE) ||
1081 1081 (CR_FLAGS(cr) & NET_MAC_AWARE_INHERIT))
1082 1082 privflags |= MAC_FLAGS;
1083 1083 /*
1084 1084 * Set setuid/setgid protections if no ptrace() compatibility.
1085 1085 * For privileged processes, honor setuid/setgid even in
1086 1086 * the presence of ptrace() compatibility.
1087 1087 */
1088 1088 if (((pp->p_proc_flag & P_PR_PTRACE) == 0 ||
1089 1089 PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, (uid == 0))) &&
1090 1090 (cr->cr_uid != uid ||
1091 1091 cr->cr_gid != gid ||
1092 1092 cr->cr_suid != uid ||
1093 1093 cr->cr_sgid != gid)) {
1094 1094 *uidp = uid;
1095 1095 *gidp = gid;
1096 1096 privflags |= PRIV_SETID;
1097 1097 }
1098 1098 return (privflags);
1099 1099 }
1100 1100
1101 1101 int
1102 1102 execpermissions(struct vnode *vp, struct vattr *vattrp, struct uarg *args)
1103 1103 {
1104 1104 int error;
1105 1105 proc_t *p = ttoproc(curthread);
1106 1106
1107 1107 vattrp->va_mask = AT_MODE | AT_UID | AT_GID | AT_SIZE;
1108 1108 if (error = VOP_GETATTR(vp, vattrp, ATTR_EXEC, p->p_cred, NULL))
1109 1109 return (error);
1110 1110 /*
1111 1111 * Check the access mode.
1112 1112 * If VPROC, ask /proc if the file is an object file.
1113 1113 */
1114 1114 if ((error = VOP_ACCESS(vp, VEXEC, 0, p->p_cred, NULL)) != 0 ||
1115 1115 !(vp->v_type == VREG || (vp->v_type == VPROC && pr_isobject(vp))) ||
1116 1116 (vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0 ||
1117 1117 (vattrp->va_mode & (VEXEC|(VEXEC>>3)|(VEXEC>>6))) == 0) {
1118 1118 if (error == 0)
1119 1119 error = EACCES;
1120 1120 return (error);
1121 1121 }
1122 1122
1123 1123 if ((p->p_plist || (p->p_proc_flag & (P_PR_PTRACE|P_PR_TRACE))) &&
1124 1124 (error = VOP_ACCESS(vp, VREAD, 0, p->p_cred, NULL))) {
1125 1125 /*
1126 1126 * If process is under ptrace(2) compatibility,
1127 1127 * fail the exec(2).
1128 1128 */
1129 1129 if (p->p_proc_flag & P_PR_PTRACE)
1130 1130 goto bad;
1131 1131 /*
1132 1132 * Process is traced via /proc.
1133 1133 * Arrange to invalidate the /proc vnode.
1134 1134 */
1135 1135 args->traceinval = 1;
1136 1136 }
1137 1137 return (0);
1138 1138 bad:
1139 1139 if (error == 0)
1140 1140 error = ENOEXEC;
1141 1141 return (error);
1142 1142 }
1143 1143
1144 1144 /*
1145 1145 * Map a section of an executable file into the user's
1146 1146 * address space.
1147 1147 */
1148 1148 int
1149 1149 execmap(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen,
1150 1150 off_t offset, int prot, int page, uint_t szc)
1151 1151 {
1152 1152 int error = 0;
1153 1153 off_t oldoffset;
1154 1154 caddr_t zfodbase, oldaddr;
1155 1155 size_t end, oldlen;
1156 1156 size_t zfoddiff;
1157 1157 label_t ljb;
1158 1158 proc_t *p = ttoproc(curthread);
1159 1159
1160 1160 oldaddr = addr;
1161 1161 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1162 1162 if (len) {
1163 1163 oldlen = len;
1164 1164 len += ((size_t)oldaddr - (size_t)addr);
1165 1165 oldoffset = offset;
1166 1166 offset = (off_t)((uintptr_t)offset & PAGEMASK);
1167 1167 if (page) {
1168 1168 spgcnt_t prefltmem, availm, npages;
1169 1169 int preread;
1170 1170 uint_t mflag = MAP_PRIVATE | MAP_FIXED;
1171 1171
1172 1172 if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) {
1173 1173 mflag |= MAP_TEXT;
1174 1174 } else {
1175 1175 mflag |= MAP_INITDATA;
1176 1176 }
1177 1177
1178 1178 if (valid_usr_range(addr, len, prot, p->p_as,
1179 1179 p->p_as->a_userlimit) != RANGE_OKAY) {
1180 1180 error = ENOMEM;
1181 1181 goto bad;
1182 1182 }
1183 1183 if (error = VOP_MAP(vp, (offset_t)offset,
1184 1184 p->p_as, &addr, len, prot, PROT_ALL,
1185 1185 mflag, CRED(), NULL))
1186 1186 goto bad;
1187 1187
1188 1188 /*
1189 1189 * If the segment can fit, then we prefault
1190 1190 * the entire segment in. This is based on the
1191 1191 * model that says the best working set of a
1192 1192 * small program is all of its pages.
1193 1193 */
1194 1194 npages = (spgcnt_t)btopr(len);
1195 1195 prefltmem = freemem - desfree;
1196 1196 preread =
1197 1197 (npages < prefltmem && len < PGTHRESH) ? 1 : 0;
1198 1198
1199 1199 /*
1200 1200 * If we aren't prefaulting the segment,
1201 1201 * increment "deficit", if necessary to ensure
1202 1202 * that pages will become available when this
1203 1203 * process starts executing.
1204 1204 */
1205 1205 availm = freemem - lotsfree;
1206 1206 if (preread == 0 && npages > availm &&
1207 1207 deficit < lotsfree) {
1208 1208 deficit += MIN((pgcnt_t)(npages - availm),
1209 1209 lotsfree - deficit);
1210 1210 }
1211 1211
1212 1212 if (preread) {
1213 1213 TRACE_2(TR_FAC_PROC, TR_EXECMAP_PREREAD,
1214 1214 "execmap preread:freemem %d size %lu",
1215 1215 freemem, len);
1216 1216 (void) as_fault(p->p_as->a_hat, p->p_as,
1217 1217 (caddr_t)addr, len, F_INVAL, S_READ);
1218 1218 }
1219 1219 } else {
1220 1220 if (valid_usr_range(addr, len, prot, p->p_as,
1221 1221 p->p_as->a_userlimit) != RANGE_OKAY) {
1222 1222 error = ENOMEM;
1223 1223 goto bad;
1224 1224 }
1225 1225
1226 1226 if (error = as_map(p->p_as, addr, len,
1227 1227 segvn_create, zfod_argsp))
1228 1228 goto bad;
1229 1229 /*
1230 1230 * Read in the segment in one big chunk.
1231 1231 */
1232 1232 if (error = vn_rdwr(UIO_READ, vp, (caddr_t)oldaddr,
1233 1233 oldlen, (offset_t)oldoffset, UIO_USERSPACE, 0,
1234 1234 (rlim64_t)0, CRED(), (ssize_t *)0))
1235 1235 goto bad;
1236 1236 /*
1237 1237 * Now set protections.
1238 1238 */
1239 1239 if (prot != PROT_ZFOD) {
1240 1240 (void) as_setprot(p->p_as, (caddr_t)addr,
1241 1241 len, prot);
1242 1242 }
1243 1243 }
1244 1244 }
1245 1245
1246 1246 if (zfodlen) {
1247 1247 struct as *as = curproc->p_as;
1248 1248 struct seg *seg;
1249 1249 uint_t zprot = 0;
1250 1250
1251 1251 end = (size_t)addr + len;
1252 1252 zfodbase = (caddr_t)roundup(end, PAGESIZE);
1253 1253 zfoddiff = (uintptr_t)zfodbase - end;
1254 1254 if (zfoddiff) {
1255 1255 /*
1256 1256 * Before we go to zero the remaining space on the last
1257 1257 * page, make sure we have write permission.
1258 1258 *
1259 1259 * Normal illumos binaries don't even hit the case
1260 1260 * where we have to change permission on the last page
1261 1261 * since their protection is typically either
1262 1262 * PROT_USER | PROT_WRITE | PROT_READ
1263 1263 * or
1264 1264 * PROT_ZFOD (same as PROT_ALL).
1265 1265 *
1266 1266 * We need to be careful how we zero-fill the last page
1267 1267 * if the segment protection does not include
1268 1268 * PROT_WRITE. Using as_setprot() can cause the VM
1269 1269 * segment code to call segvn_vpage(), which must
1270 1270 * allocate a page struct for each page in the segment.
1271 1271 * If we have a very large segment, this may fail, so
1272 1272 * we have to check for that, even though we ignore
1273 1273 * other return values from as_setprot.
1274 1274 */
1275 1275
1276 1276 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1277 1277 seg = as_segat(curproc->p_as, (caddr_t)end);
1278 1278 if (seg != NULL)
1279 1279 SEGOP_GETPROT(seg, (caddr_t)end, zfoddiff - 1,
1280 1280 &zprot);
1281 1281 AS_LOCK_EXIT(as, &as->a_lock);
1282 1282
1283 1283 if (seg != NULL && (zprot & PROT_WRITE) == 0) {
1284 1284 if (as_setprot(as, (caddr_t)end, zfoddiff - 1,
1285 1285 zprot | PROT_WRITE) == ENOMEM) {
1286 1286 error = ENOMEM;
1287 1287 goto bad;
1288 1288 }
1289 1289 }
1290 1290
1291 1291 if (on_fault(&ljb)) {
1292 1292 no_fault();
1293 1293 if (seg != NULL && (zprot & PROT_WRITE) == 0)
1294 1294 (void) as_setprot(as, (caddr_t)end,
1295 1295 zfoddiff - 1, zprot);
1296 1296 error = EFAULT;
1297 1297 goto bad;
1298 1298 }
1299 1299 uzero((void *)end, zfoddiff);
1300 1300 no_fault();
1301 1301 if (seg != NULL && (zprot & PROT_WRITE) == 0)
1302 1302 (void) as_setprot(as, (caddr_t)end,
1303 1303 zfoddiff - 1, zprot);
1304 1304 }
1305 1305 if (zfodlen > zfoddiff) {
1306 1306 struct segvn_crargs crargs =
1307 1307 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
1308 1308
1309 1309 zfodlen -= zfoddiff;
1310 1310 if (valid_usr_range(zfodbase, zfodlen, prot, p->p_as,
1311 1311 p->p_as->a_userlimit) != RANGE_OKAY) {
1312 1312 error = ENOMEM;
1313 1313 goto bad;
1314 1314 }
1315 1315 if (szc > 0) {
1316 1316 /*
1317 1317 * ASSERT alignment because the mapelfexec()
1318 1318 * caller for the szc > 0 case extended zfod
1319 1319 * so it's end is pgsz aligned.
1320 1320 */
1321 1321 size_t pgsz = page_get_pagesize(szc);
1322 1322 ASSERT(IS_P2ALIGNED(zfodbase + zfodlen, pgsz));
1323 1323
1324 1324 if (IS_P2ALIGNED(zfodbase, pgsz)) {
1325 1325 crargs.szc = szc;
1326 1326 } else {
1327 1327 crargs.szc = AS_MAP_HEAP;
1328 1328 }
1329 1329 } else {
1330 1330 crargs.szc = AS_MAP_NO_LPOOB;
1331 1331 }
1332 1332 if (error = as_map(p->p_as, (caddr_t)zfodbase,
1333 1333 zfodlen, segvn_create, &crargs))
1334 1334 goto bad;
1335 1335 if (prot != PROT_ZFOD) {
1336 1336 (void) as_setprot(p->p_as, (caddr_t)zfodbase,
1337 1337 zfodlen, prot);
1338 1338 }
1339 1339 }
1340 1340 }
1341 1341 return (0);
1342 1342 bad:
1343 1343 return (error);
1344 1344 }
1345 1345
1346 1346 void
1347 1347 setexecenv(struct execenv *ep)
1348 1348 {
1349 1349 proc_t *p = ttoproc(curthread);
1350 1350 klwp_t *lwp = ttolwp(curthread);
1351 1351 struct vnode *vp;
1352 1352
1353 1353 p->p_bssbase = ep->ex_bssbase;
1354 1354 p->p_brkbase = ep->ex_brkbase;
1355 1355 p->p_brksize = ep->ex_brksize;
1356 1356 if (p->p_exec)
1357 1357 VN_RELE(p->p_exec); /* out with the old */
1358 1358 vp = p->p_exec = ep->ex_vp;
1359 1359 if (vp != NULL)
1360 1360 VN_HOLD(vp); /* in with the new */
1361 1361
1362 1362 lwp->lwp_sigaltstack.ss_sp = 0;
1363 1363 lwp->lwp_sigaltstack.ss_size = 0;
1364 1364 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
1365 1365 }
1366 1366
1367 1367 int
1368 1368 execopen(struct vnode **vpp, int *fdp)
1369 1369 {
1370 1370 struct vnode *vp = *vpp;
1371 1371 file_t *fp;
1372 1372 int error = 0;
1373 1373 int filemode = FREAD;
1374 1374
1375 1375 VN_HOLD(vp); /* open reference */
1376 1376 if (error = falloc(NULL, filemode, &fp, fdp)) {
1377 1377 VN_RELE(vp);
1378 1378 *fdp = -1; /* just in case falloc changed value */
1379 1379 return (error);
1380 1380 }
1381 1381 if (error = VOP_OPEN(&vp, filemode, CRED(), NULL)) {
1382 1382 VN_RELE(vp);
1383 1383 setf(*fdp, NULL);
1384 1384 unfalloc(fp);
1385 1385 *fdp = -1;
1386 1386 return (error);
1387 1387 }
1388 1388 *vpp = vp; /* vnode should not have changed */
1389 1389 fp->f_vnode = vp;
1390 1390 mutex_exit(&fp->f_tlock);
1391 1391 setf(*fdp, fp);
1392 1392 return (0);
1393 1393 }
1394 1394
1395 1395 int
1396 1396 execclose(int fd)
1397 1397 {
1398 1398 return (closeandsetf(fd, NULL));
1399 1399 }
1400 1400
1401 1401
1402 1402 /*
1403 1403 * noexec stub function.
1404 1404 */
1405 1405 /*ARGSUSED*/
1406 1406 int
1407 1407 noexec(
1408 1408 struct vnode *vp,
1409 1409 struct execa *uap,
1410 1410 struct uarg *args,
1411 1411 struct intpdata *idatap,
1412 1412 int level,
1413 1413 long *execsz,
1414 1414 int setid,
1415 1415 caddr_t exec_file,
1416 1416 struct cred *cred)
1417 1417 {
1418 1418 cmn_err(CE_WARN, "missing exec capability for %s", uap->fname);
1419 1419 return (ENOEXEC);
1420 1420 }
1421 1421
1422 1422 /*
1423 1423 * Support routines for building a user stack.
1424 1424 *
1425 1425 * execve(path, argv, envp) must construct a new stack with the specified
1426 1426 * arguments and environment variables (see exec_args() for a description
1427 1427 * of the user stack layout). To do this, we copy the arguments and
1428 1428 * environment variables from the old user address space into the kernel,
1429 1429 * free the old as, create the new as, and copy our buffered information
1430 1430 * to the new stack. Our kernel buffer has the following structure:
1431 1431 *
1432 1432 * +-----------------------+ <--- stk_base + stk_size
1433 1433 * | string offsets |
1434 1434 * +-----------------------+ <--- stk_offp
1435 1435 * | |
1436 1436 * | STK_AVAIL() space |
1437 1437 * | |
1438 1438 * +-----------------------+ <--- stk_strp
1439 1439 * | strings |
1440 1440 * +-----------------------+ <--- stk_base
1441 1441 *
1442 1442 * When we add a string, we store the string's contents (including the null
1443 1443 * terminator) at stk_strp, and we store the offset of the string relative to
1444 1444 * stk_base at --stk_offp. At strings are added, stk_strp increases and
1445 1445 * stk_offp decreases. The amount of space remaining, STK_AVAIL(), is just
1446 1446 * the difference between these pointers. If we run out of space, we return
1447 1447 * an error and exec_args() starts all over again with a buffer twice as large.
1448 1448 * When we're all done, the kernel buffer looks like this:
1449 1449 *
1450 1450 * +-----------------------+ <--- stk_base + stk_size
1451 1451 * | argv[0] offset |
1452 1452 * +-----------------------+
1453 1453 * | ... |
1454 1454 * +-----------------------+
1455 1455 * | argv[argc-1] offset |
1456 1456 * +-----------------------+
1457 1457 * | envp[0] offset |
1458 1458 * +-----------------------+
1459 1459 * | ... |
1460 1460 * +-----------------------+
1461 1461 * | envp[envc-1] offset |
1462 1462 * +-----------------------+
1463 1463 * | AT_SUN_PLATFORM offset|
1464 1464 * +-----------------------+
1465 1465 * | AT_SUN_EXECNAME offset|
1466 1466 * +-----------------------+ <--- stk_offp
1467 1467 * | |
1468 1468 * | STK_AVAIL() space |
1469 1469 * | |
1470 1470 * +-----------------------+ <--- stk_strp
1471 1471 * | AT_SUN_EXECNAME offset|
1472 1472 * +-----------------------+
1473 1473 * | AT_SUN_PLATFORM offset|
1474 1474 * +-----------------------+
1475 1475 * | envp[envc-1] string |
1476 1476 * +-----------------------+
1477 1477 * | ... |
1478 1478 * +-----------------------+
1479 1479 * | envp[0] string |
1480 1480 * +-----------------------+
1481 1481 * | argv[argc-1] string |
1482 1482 * +-----------------------+
1483 1483 * | ... |
1484 1484 * +-----------------------+
1485 1485 * | argv[0] string |
1486 1486 * +-----------------------+ <--- stk_base
1487 1487 */
1488 1488
1489 1489 #define STK_AVAIL(args) ((char *)(args)->stk_offp - (args)->stk_strp)
1490 1490
1491 1491 /*
1492 1492 * Add a string to the stack.
1493 1493 */
1494 1494 static int
1495 1495 stk_add(uarg_t *args, const char *sp, enum uio_seg segflg)
1496 1496 {
1497 1497 int error;
1498 1498 size_t len;
1499 1499
1500 1500 if (STK_AVAIL(args) < sizeof (int))
1501 1501 return (E2BIG);
1502 1502 *--args->stk_offp = args->stk_strp - args->stk_base;
1503 1503
1504 1504 if (segflg == UIO_USERSPACE) {
1505 1505 error = copyinstr(sp, args->stk_strp, STK_AVAIL(args), &len);
1506 1506 if (error != 0)
1507 1507 return (error);
1508 1508 } else {
1509 1509 len = strlen(sp) + 1;
1510 1510 if (len > STK_AVAIL(args))
1511 1511 return (E2BIG);
1512 1512 bcopy(sp, args->stk_strp, len);
1513 1513 }
1514 1514
1515 1515 args->stk_strp += len;
1516 1516
1517 1517 return (0);
1518 1518 }
1519 1519
1520 1520 static int
1521 1521 stk_getptr(uarg_t *args, char *src, char **dst)
1522 1522 {
1523 1523 int error;
1524 1524
1525 1525 if (args->from_model == DATAMODEL_NATIVE) {
1526 1526 ulong_t ptr;
1527 1527 error = fulword(src, &ptr);
1528 1528 *dst = (caddr_t)ptr;
1529 1529 } else {
1530 1530 uint32_t ptr;
1531 1531 error = fuword32(src, &ptr);
1532 1532 *dst = (caddr_t)(uintptr_t)ptr;
1533 1533 }
1534 1534 return (error);
1535 1535 }
1536 1536
1537 1537 static int
1538 1538 stk_putptr(uarg_t *args, char *addr, char *value)
1539 1539 {
1540 1540 if (args->to_model == DATAMODEL_NATIVE)
1541 1541 return (sulword(addr, (ulong_t)value));
1542 1542 else
1543 1543 return (suword32(addr, (uint32_t)(uintptr_t)value));
1544 1544 }
1545 1545
1546 1546 static int
1547 1547 stk_copyin(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1548 1548 {
1549 1549 char *sp;
1550 1550 int argc, error;
1551 1551 int argv_empty = 0;
1552 1552 size_t ptrsize = args->from_ptrsize;
1553 1553 size_t size, pad;
1554 1554 char *argv = (char *)uap->argp;
1555 1555 char *envp = (char *)uap->envp;
1556 1556
1557 1557 /*
1558 1558 * Copy interpreter's name and argument to argv[0] and argv[1].
1559 1559 */
1560 1560 if (intp != NULL && intp->intp_name != NULL) {
1561 1561 if ((error = stk_add(args, intp->intp_name, UIO_SYSSPACE)) != 0)
1562 1562 return (error);
1563 1563 if (intp->intp_arg != NULL &&
1564 1564 (error = stk_add(args, intp->intp_arg, UIO_SYSSPACE)) != 0)
1565 1565 return (error);
1566 1566 if (args->fname != NULL)
1567 1567 error = stk_add(args, args->fname, UIO_SYSSPACE);
1568 1568 else
1569 1569 error = stk_add(args, uap->fname, UIO_USERSPACE);
1570 1570 if (error)
1571 1571 return (error);
1572 1572
1573 1573 /*
1574 1574 * Check for an empty argv[].
1575 1575 */
1576 1576 if (stk_getptr(args, argv, &sp))
1577 1577 return (EFAULT);
1578 1578 if (sp == NULL)
1579 1579 argv_empty = 1;
1580 1580
1581 1581 argv += ptrsize; /* ignore original argv[0] */
1582 1582 }
1583 1583
1584 1584 if (argv_empty == 0) {
1585 1585 /*
1586 1586 * Add argv[] strings to the stack.
1587 1587 */
1588 1588 for (;;) {
1589 1589 if (stk_getptr(args, argv, &sp))
1590 1590 return (EFAULT);
1591 1591 if (sp == NULL)
1592 1592 break;
1593 1593 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1594 1594 return (error);
1595 1595 argv += ptrsize;
1596 1596 }
1597 1597 }
1598 1598 argc = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1599 1599 args->arglen = args->stk_strp - args->stk_base;
1600 1600
1601 1601 /*
1602 1602 * Add environ[] strings to the stack.
1603 1603 */
1604 1604 if (envp != NULL) {
1605 1605 for (;;) {
1606 1606 char *tmp = args->stk_strp;
1607 1607 if (stk_getptr(args, envp, &sp))
1608 1608 return (EFAULT);
1609 1609 if (sp == NULL)
1610 1610 break;
1611 1611 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1612 1612 return (error);
1613 1613 if (args->scrubenv && strncmp(tmp, "LD_", 3) == 0) {
1614 1614 /* Undo the copied string */
1615 1615 args->stk_strp = tmp;
1616 1616 *(args->stk_offp++) = NULL;
1617 1617 }
1618 1618 envp += ptrsize;
1619 1619 }
1620 1620 }
1621 1621 args->na = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1622 1622 args->ne = args->na - argc;
1623 1623
1624 1624 /*
1625 1625 * Add AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME, and
1626 1626 * AT_SUN_EMULATOR strings to the stack.
1627 1627 */
1628 1628 if (auxvpp != NULL && *auxvpp != NULL) {
1629 1629 if ((error = stk_add(args, platform, UIO_SYSSPACE)) != 0)
1630 1630 return (error);
1631 1631 if ((error = stk_add(args, args->pathname, UIO_SYSSPACE)) != 0)
1632 1632 return (error);
1633 1633 if (args->brandname != NULL &&
1634 1634 (error = stk_add(args, args->brandname, UIO_SYSSPACE)) != 0)
1635 1635 return (error);
1636 1636 if (args->emulator != NULL &&
1637 1637 (error = stk_add(args, args->emulator, UIO_SYSSPACE)) != 0)
1638 1638 return (error);
1639 1639 }
1640 1640
1641 1641 /*
1642 1642 * Compute the size of the stack. This includes all the pointers,
1643 1643 * the space reserved for the aux vector, and all the strings.
1644 1644 * The total number of pointers is args->na (which is argc + envc)
1645 1645 * plus 4 more: (1) a pointer's worth of space for argc; (2) the NULL
1646 1646 * after the last argument (i.e. argv[argc]); (3) the NULL after the
1647 1647 * last environment variable (i.e. envp[envc]); and (4) the NULL after
1648 1648 * all the strings, at the very top of the stack.
1649 1649 */
1650 1650 size = (args->na + 4) * args->to_ptrsize + args->auxsize +
1651 1651 (args->stk_strp - args->stk_base);
1652 1652
1653 1653 /*
1654 1654 * Pad the string section with zeroes to align the stack size.
1655 1655 */
1656 1656 pad = P2NPHASE(size, args->stk_align);
1657 1657
1658 1658 if (STK_AVAIL(args) < pad)
1659 1659 return (E2BIG);
1660 1660
1661 1661 args->usrstack_size = size + pad;
1662 1662
1663 1663 while (pad-- != 0)
1664 1664 *args->stk_strp++ = 0;
1665 1665
1666 1666 args->nc = args->stk_strp - args->stk_base;
1667 1667
1668 1668 return (0);
1669 1669 }
1670 1670
1671 1671 static int
1672 1672 stk_copyout(uarg_t *args, char *usrstack, void **auxvpp, user_t *up)
1673 1673 {
1674 1674 size_t ptrsize = args->to_ptrsize;
1675 1675 ssize_t pslen;
1676 1676 char *kstrp = args->stk_base;
1677 1677 char *ustrp = usrstack - args->nc - ptrsize;
1678 1678 char *usp = usrstack - args->usrstack_size;
1679 1679 int *offp = (int *)(args->stk_base + args->stk_size);
1680 1680 int envc = args->ne;
1681 1681 int argc = args->na - envc;
1682 1682 int i;
1683 1683
1684 1684 /*
1685 1685 * Record argc for /proc.
1686 1686 */
1687 1687 up->u_argc = argc;
1688 1688
1689 1689 /*
1690 1690 * Put argc on the stack. Note that even though it's an int,
1691 1691 * it always consumes ptrsize bytes (for alignment).
1692 1692 */
1693 1693 if (stk_putptr(args, usp, (char *)(uintptr_t)argc))
1694 1694 return (-1);
1695 1695
1696 1696 /*
1697 1697 * Add argc space (ptrsize) to usp and record argv for /proc.
1698 1698 */
1699 1699 up->u_argv = (uintptr_t)(usp += ptrsize);
1700 1700
1701 1701 /*
1702 1702 * Put the argv[] pointers on the stack.
1703 1703 */
1704 1704 for (i = 0; i < argc; i++, usp += ptrsize)
1705 1705 if (stk_putptr(args, usp, &ustrp[*--offp]))
1706 1706 return (-1);
1707 1707
1708 1708 /*
1709 1709 * Copy arguments to u_psargs.
1710 1710 */
1711 1711 pslen = MIN(args->arglen, PSARGSZ) - 1;
1712 1712 for (i = 0; i < pslen; i++)
1713 1713 up->u_psargs[i] = (kstrp[i] == '\0' ? ' ' : kstrp[i]);
1714 1714 while (i < PSARGSZ)
1715 1715 up->u_psargs[i++] = '\0';
1716 1716
1717 1717 /*
1718 1718 * Add space for argv[]'s NULL terminator (ptrsize) to usp and
1719 1719 * record envp for /proc.
1720 1720 */
1721 1721 up->u_envp = (uintptr_t)(usp += ptrsize);
1722 1722
1723 1723 /*
1724 1724 * Put the envp[] pointers on the stack.
1725 1725 */
1726 1726 for (i = 0; i < envc; i++, usp += ptrsize)
1727 1727 if (stk_putptr(args, usp, &ustrp[*--offp]))
1728 1728 return (-1);
1729 1729
1730 1730 /*
1731 1731 * Add space for envp[]'s NULL terminator (ptrsize) to usp and
1732 1732 * remember where the stack ends, which is also where auxv begins.
1733 1733 */
1734 1734 args->stackend = usp += ptrsize;
1735 1735
1736 1736 /*
1737 1737 * Put all the argv[], envp[], and auxv strings on the stack.
1738 1738 */
1739 1739 if (copyout(args->stk_base, ustrp, args->nc))
1740 1740 return (-1);
1741 1741
1742 1742 /*
1743 1743 * Fill in the aux vector now that we know the user stack addresses
1744 1744 * for the AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME and
1745 1745 * AT_SUN_EMULATOR strings.
1746 1746 */
1747 1747 if (auxvpp != NULL && *auxvpp != NULL) {
1748 1748 if (args->to_model == DATAMODEL_NATIVE) {
1749 1749 auxv_t **a = (auxv_t **)auxvpp;
1750 1750 ADDAUX(*a, AT_SUN_PLATFORM, (long)&ustrp[*--offp])
1751 1751 ADDAUX(*a, AT_SUN_EXECNAME, (long)&ustrp[*--offp])
1752 1752 if (args->brandname != NULL)
1753 1753 ADDAUX(*a,
1754 1754 AT_SUN_BRANDNAME, (long)&ustrp[*--offp])
1755 1755 if (args->emulator != NULL)
1756 1756 ADDAUX(*a,
1757 1757 AT_SUN_EMULATOR, (long)&ustrp[*--offp])
1758 1758 } else {
1759 1759 auxv32_t **a = (auxv32_t **)auxvpp;
1760 1760 ADDAUX(*a,
1761 1761 AT_SUN_PLATFORM, (int)(uintptr_t)&ustrp[*--offp])
1762 1762 ADDAUX(*a,
1763 1763 AT_SUN_EXECNAME, (int)(uintptr_t)&ustrp[*--offp])
1764 1764 if (args->brandname != NULL)
1765 1765 ADDAUX(*a, AT_SUN_BRANDNAME,
1766 1766 (int)(uintptr_t)&ustrp[*--offp])
1767 1767 if (args->emulator != NULL)
1768 1768 ADDAUX(*a, AT_SUN_EMULATOR,
1769 1769 (int)(uintptr_t)&ustrp[*--offp])
1770 1770 }
1771 1771 }
1772 1772
1773 1773 return (0);
1774 1774 }
1775 1775
1776 1776 /*
1777 1777 * Initialize a new user stack with the specified arguments and environment.
1778 1778 * The initial user stack layout is as follows:
1779 1779 *
1780 1780 * User Stack
1781 1781 * +---------------+ <--- curproc->p_usrstack
1782 1782 * | |
1783 1783 * | slew |
1784 1784 * | |
1785 1785 * +---------------+
1786 1786 * | NULL |
1787 1787 * +---------------+
1788 1788 * | |
1789 1789 * | auxv strings |
1790 1790 * | |
1791 1791 * +---------------+
1792 1792 * | |
1793 1793 * | envp strings |
1794 1794 * | |
1795 1795 * +---------------+
1796 1796 * | |
1797 1797 * | argv strings |
1798 1798 * | |
1799 1799 * +---------------+ <--- ustrp
1800 1800 * | |
1801 1801 * | aux vector |
1802 1802 * | |
1803 1803 * +---------------+ <--- auxv
1804 1804 * | NULL |
1805 1805 * +---------------+
1806 1806 * | envp[envc-1] |
1807 1807 * +---------------+
1808 1808 * | ... |
1809 1809 * +---------------+
1810 1810 * | envp[0] |
1811 1811 * +---------------+ <--- envp[]
1812 1812 * | NULL |
1813 1813 * +---------------+
1814 1814 * | argv[argc-1] |
1815 1815 * +---------------+
1816 1816 * | ... |
1817 1817 * +---------------+
1818 1818 * | argv[0] |
1819 1819 * +---------------+ <--- argv[]
1820 1820 * | argc |
1821 1821 * +---------------+ <--- stack base
1822 1822 */
1823 1823 int
1824 1824 exec_args(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1825 1825 {
1826 1826 size_t size;
1827 1827 int error;
1828 1828 proc_t *p = ttoproc(curthread);
1829 1829 user_t *up = PTOU(p);
1830 1830 char *usrstack;
1831 1831 rctl_entity_p_t e;
1832 1832 struct as *as;
1833 1833 extern int use_stk_lpg;
1834 1834 size_t sp_slew;
1835 1835
1836 1836 args->from_model = p->p_model;
1837 1837 if (p->p_model == DATAMODEL_NATIVE) {
1838 1838 args->from_ptrsize = sizeof (long);
1839 1839 } else {
1840 1840 args->from_ptrsize = sizeof (int32_t);
1841 1841 }
1842 1842
1843 1843 if (args->to_model == DATAMODEL_NATIVE) {
1844 1844 args->to_ptrsize = sizeof (long);
1845 1845 args->ncargs = NCARGS;
1846 1846 args->stk_align = STACK_ALIGN;
1847 1847 if (args->addr32)
1848 1848 usrstack = (char *)USRSTACK64_32;
1849 1849 else
1850 1850 usrstack = (char *)USRSTACK;
1851 1851 } else {
1852 1852 args->to_ptrsize = sizeof (int32_t);
1853 1853 args->ncargs = NCARGS32;
1854 1854 args->stk_align = STACK_ALIGN32;
1855 1855 usrstack = (char *)USRSTACK32;
1856 1856 }
1857 1857
1858 1858 ASSERT(P2PHASE((uintptr_t)usrstack, args->stk_align) == 0);
1859 1859
1860 1860 #if defined(__sparc)
1861 1861 /*
1862 1862 * Make sure user register windows are empty before
1863 1863 * attempting to make a new stack.
1864 1864 */
1865 1865 (void) flush_user_windows_to_stack(NULL);
1866 1866 #endif
1867 1867
1868 1868 for (size = PAGESIZE; ; size *= 2) {
1869 1869 args->stk_size = size;
1870 1870 args->stk_base = kmem_alloc(size, KM_SLEEP);
1871 1871 args->stk_strp = args->stk_base;
1872 1872 args->stk_offp = (int *)(args->stk_base + size);
1873 1873 error = stk_copyin(uap, args, intp, auxvpp);
1874 1874 if (error == 0)
1875 1875 break;
1876 1876 kmem_free(args->stk_base, size);
1877 1877 if (error != E2BIG && error != ENAMETOOLONG)
1878 1878 return (error);
1879 1879 if (size >= args->ncargs)
1880 1880 return (E2BIG);
1881 1881 }
1882 1882
1883 1883 size = args->usrstack_size;
1884 1884
1885 1885 ASSERT(error == 0);
1886 1886 ASSERT(P2PHASE(size, args->stk_align) == 0);
1887 1887 ASSERT((ssize_t)STK_AVAIL(args) >= 0);
1888 1888
1889 1889 if (size > args->ncargs) {
1890 1890 kmem_free(args->stk_base, args->stk_size);
1891 1891 return (E2BIG);
1892 1892 }
1893 1893
1894 1894 /*
1895 1895 * Leave only the current lwp and force the other lwps to exit.
1896 1896 * If another lwp beat us to the punch by calling exit(), bail out.
1897 1897 */
1898 1898 if ((error = exitlwps(0)) != 0) {
1899 1899 kmem_free(args->stk_base, args->stk_size);
1900 1900 return (error);
1901 1901 }
1902 1902
1903 1903 /*
1904 1904 * Revoke any doors created by the process.
1905 1905 */
1906 1906 if (p->p_door_list)
1907 1907 door_exit();
1908 1908
1909 1909 /*
1910 1910 * Release schedctl data structures.
1911 1911 */
1912 1912 if (p->p_pagep)
1913 1913 schedctl_proc_cleanup();
1914 1914
1915 1915 /*
1916 1916 * Clean up any DTrace helpers for the process.
1917 1917 */
1918 1918 if (p->p_dtrace_helpers != NULL) {
1919 1919 ASSERT(dtrace_helpers_cleanup != NULL);
1920 1920 (*dtrace_helpers_cleanup)();
1921 1921 }
1922 1922
1923 1923 mutex_enter(&p->p_lock);
1924 1924 /*
1925 1925 * Cleanup the DTrace provider associated with this process.
1926 1926 */
1927 1927 if (p->p_dtrace_probes) {
1928 1928 ASSERT(dtrace_fasttrap_exec_ptr != NULL);
1929 1929 dtrace_fasttrap_exec_ptr(p);
1930 1930 }
1931 1931 mutex_exit(&p->p_lock);
1932 1932
1933 1933 /*
1934 1934 * discard the lwpchan cache.
1935 1935 */
1936 1936 if (p->p_lcp != NULL)
1937 1937 lwpchan_destroy_cache(1);
1938 1938
1939 1939 /*
1940 1940 * Delete the POSIX timers.
1941 1941 */
1942 1942 if (p->p_itimer != NULL)
1943 1943 timer_exit();
1944 1944
1945 1945 /*
1946 1946 * Delete the ITIMER_REALPROF interval timer.
1947 1947 * The other ITIMER_* interval timers are specified
1948 1948 * to be inherited across exec().
1949 1949 */
1950 1950 delete_itimer_realprof();
1951 1951
1952 1952 if (AU_AUDITING())
1953 1953 audit_exec(args->stk_base, args->stk_base + args->arglen,
1954 1954 args->na - args->ne, args->ne, args->pfcred);
1955 1955
1956 1956 /*
1957 1957 * Ensure that we don't change resource associations while we
1958 1958 * change address spaces.
1959 1959 */
1960 1960 mutex_enter(&p->p_lock);
1961 1961 pool_barrier_enter();
1962 1962 mutex_exit(&p->p_lock);
1963 1963
1964 1964 /*
1965 1965 * Destroy the old address space and create a new one.
1966 1966 * From here on, any errors are fatal to the exec()ing process.
1967 1967 * On error we return -1, which means the caller must SIGKILL
1968 1968 * the process.
1969 1969 */
1970 1970 relvm();
1971 1971
1972 1972 mutex_enter(&p->p_lock);
1973 1973 pool_barrier_exit();
1974 1974 mutex_exit(&p->p_lock);
1975 1975
1976 1976 up->u_execsw = args->execswp;
1977 1977
1978 1978 p->p_brkbase = NULL;
1979 1979 p->p_brksize = 0;
1980 1980 p->p_brkpageszc = 0;
1981 1981 p->p_stksize = 0;
1982 1982 p->p_stkpageszc = 0;
1983 1983 p->p_model = args->to_model;
1984 1984 p->p_usrstack = usrstack;
1985 1985 p->p_stkprot = args->stk_prot;
1986 1986 p->p_datprot = args->dat_prot;
1987 1987
1988 1988 /*
1989 1989 * Reset resource controls such that all controls are again active as
1990 1990 * well as appropriate to the potentially new address model for the
1991 1991 * process.
1992 1992 */
1993 1993 e.rcep_p.proc = p;
1994 1994 e.rcep_t = RCENTITY_PROCESS;
1995 1995 rctl_set_reset(p->p_rctls, p, &e);
1996 1996
1997 1997 /* Too early to call map_pgsz for the heap */
1998 1998 if (use_stk_lpg) {
1999 1999 p->p_stkpageszc = page_szc(map_pgsz(MAPPGSZ_STK, p, 0, 0, 0));
2000 2000 }
2001 2001
2002 2002 mutex_enter(&p->p_lock);
2003 2003 p->p_flag |= SAUTOLPG; /* kernel controls page sizes */
2004 2004 mutex_exit(&p->p_lock);
2005 2005
2006 2006 /*
2007 2007 * Some platforms may choose to randomize real stack start by adding a
2008 2008 * small slew (not more than a few hundred bytes) to the top of the
2009 2009 * stack. This helps avoid cache thrashing when identical processes
2010 2010 * simultaneously share caches that don't provide enough associativity
2011 2011 * (e.g. sun4v systems). In this case stack slewing makes the same hot
2012 2012 * stack variables in different processes to live in different cache
2013 2013 * sets increasing effective associativity.
2014 2014 */
2015 2015 sp_slew = exec_get_spslew();
2016 2016 ASSERT(P2PHASE(sp_slew, args->stk_align) == 0);
2017 2017 exec_set_sp(size + sp_slew);
2018 2018
2019 2019 as = as_alloc();
2020 2020 p->p_as = as;
2021 2021 as->a_proc = p;
2022 2022 if (p->p_model == DATAMODEL_ILP32 || args->addr32)
2023 2023 as->a_userlimit = (caddr_t)USERLIMIT32;
2024 2024 (void) hat_setup(as->a_hat, HAT_ALLOC);
2025 2025 hat_join_srd(as->a_hat, args->ex_vp);
2026 2026
2027 2027 /*
2028 2028 * Finally, write out the contents of the new stack.
2029 2029 */
2030 2030 error = stk_copyout(args, usrstack - sp_slew, auxvpp, up);
2031 2031 kmem_free(args->stk_base, args->stk_size);
2032 2032 return (error);
2033 2033 }
↓ open down ↓ |
1214 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX