Print this page
uts: Allow for address space randomisation.
Randomise the base addresses of shared objects, non-fixed mappings, the
stack and the heap. Introduce a service, svc:/system/process-security,
and a tool psecflags(1) to control and observe it
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/exec.c
+++ new/usr/src/uts/common/os/exec.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /* Copyright (c) 1988 AT&T */
27 27 /* All Rights Reserved */
28 28 /*
29 29 * Copyright 2014, Joyent, Inc. All rights reserved.
30 30 */
31 31
32 32 #include <sys/types.h>
33 33 #include <sys/param.h>
34 34 #include <sys/sysmacros.h>
35 35 #include <sys/systm.h>
36 36 #include <sys/signal.h>
37 37 #include <sys/cred_impl.h>
38 38 #include <sys/policy.h>
39 39 #include <sys/user.h>
40 40 #include <sys/errno.h>
41 41 #include <sys/file.h>
42 42 #include <sys/vfs.h>
43 43 #include <sys/vnode.h>
44 44 #include <sys/mman.h>
45 45 #include <sys/acct.h>
46 46 #include <sys/cpuvar.h>
47 47 #include <sys/proc.h>
48 48 #include <sys/cmn_err.h>
49 49 #include <sys/debug.h>
50 50 #include <sys/pathname.h>
51 51 #include <sys/vm.h>
52 52 #include <sys/lgrp.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/exec.h>
55 55 #include <sys/exechdr.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/prsystm.h>
58 58 #include <sys/modctl.h>
59 59 #include <sys/vmparam.h>
60 60 #include <sys/door.h>
61 61 #include <sys/schedctl.h>
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
62 62 #include <sys/utrap.h>
63 63 #include <sys/systeminfo.h>
64 64 #include <sys/stack.h>
65 65 #include <sys/rctl.h>
66 66 #include <sys/dtrace.h>
67 67 #include <sys/lwpchan_impl.h>
68 68 #include <sys/pool.h>
69 69 #include <sys/sdt.h>
70 70 #include <sys/brand.h>
71 71 #include <sys/klpd.h>
72 +#include <sys/random.h>
72 73
73 74 #include <c2/audit.h>
74 75
75 76 #include <vm/hat.h>
76 77 #include <vm/anon.h>
77 78 #include <vm/as.h>
78 79 #include <vm/seg.h>
79 80 #include <vm/seg_vn.h>
80 81
81 82 #define PRIV_RESET 0x01 /* needs to reset privs */
82 83 #define PRIV_SETID 0x02 /* needs to change uids */
83 84 #define PRIV_SETUGID 0x04 /* is setuid/setgid/forced privs */
84 85 #define PRIV_INCREASE 0x08 /* child runs with more privs */
85 86 #define MAC_FLAGS 0x10 /* need to adjust MAC flags */
86 87 #define PRIV_FORCED 0x20 /* has forced privileges */
87 88
88 89 static int execsetid(struct vnode *, struct vattr *, uid_t *, uid_t *,
89 90 priv_set_t *, cred_t *, const char *);
90 91 static int hold_execsw(struct execsw *);
91 92
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
92 93 uint_t auxv_hwcap = 0; /* auxv AT_SUN_HWCAP value; determined on the fly */
93 94 uint_t auxv_hwcap_2 = 0; /* AT_SUN_HWCAP2 */
94 95 #if defined(_SYSCALL32_IMPL)
95 96 uint_t auxv_hwcap32 = 0; /* 32-bit version of auxv_hwcap */
96 97 uint_t auxv_hwcap32_2 = 0; /* 32-bit version of auxv_hwcap2 */
97 98 #endif
98 99
99 100 #define PSUIDFLAGS (SNOCD|SUGID)
100 101
101 102 /*
103 + * These are consumed within the specific exec modules, but are defined here because
104 + *
105 + * 1) The exec modules are unloadable, which would make this near useless.
106 + *
107 + * 2) We want them to be common across all of them, should more than ELF come
108 + * to support them.
109 + *
110 + * All must be powers of 2.
111 + */
112 +volatile size_t aslr_max_brk_skew = 16 * 1024 * 1024; /* 16MB */
113 +#pragma weak exec_stackgap = aslr_max_stack_skew /* Old, compatible name */
114 +volatile size_t aslr_max_stack_skew = 64 * 1024; /* 64KB */
115 +
116 +/*
102 117 * exece() - system call wrapper around exec_common()
103 118 */
104 119 int
105 120 exece(const char *fname, const char **argp, const char **envp)
106 121 {
107 122 int error;
108 123
109 124 error = exec_common(fname, argp, envp, EBA_NONE);
110 125 return (error ? (set_errno(error)) : 0);
111 126 }
112 127
113 128 int
114 129 exec_common(const char *fname, const char **argp, const char **envp,
115 130 int brand_action)
116 131 {
117 132 vnode_t *vp = NULL, *dir = NULL, *tmpvp = NULL;
118 133 proc_t *p = ttoproc(curthread);
119 134 klwp_t *lwp = ttolwp(curthread);
120 135 struct user *up = PTOU(p);
121 136 long execsz; /* temporary count of exec size */
122 137 int i;
123 138 int error;
124 139 char exec_file[MAXCOMLEN+1];
125 140 struct pathname pn;
126 141 struct pathname resolvepn;
127 142 struct uarg args;
128 143 struct execa ua;
129 144 k_sigset_t savedmask;
130 145 lwpdir_t *lwpdir = NULL;
131 146 tidhash_t *tidhash;
132 147 lwpdir_t *old_lwpdir = NULL;
133 148 uint_t old_lwpdir_sz;
134 149 tidhash_t *old_tidhash;
135 150 uint_t old_tidhash_sz;
136 151 ret_tidhash_t *ret_tidhash;
137 152 lwpent_t *lep;
138 153 boolean_t brandme = B_FALSE;
139 154
140 155 /*
141 156 * exec() is not supported for the /proc agent lwp.
142 157 */
143 158 if (curthread == p->p_agenttp)
144 159 return (ENOTSUP);
145 160
146 161 if (brand_action != EBA_NONE) {
147 162 /*
148 163 * Brand actions are not supported for processes that are not
149 164 * running in a branded zone.
150 165 */
151 166 if (!ZONE_IS_BRANDED(p->p_zone))
152 167 return (ENOTSUP);
153 168
154 169 if (brand_action == EBA_NATIVE) {
155 170 /* Only branded processes can be unbranded */
156 171 if (!PROC_IS_BRANDED(p))
157 172 return (ENOTSUP);
158 173 } else {
159 174 /* Only unbranded processes can be branded */
160 175 if (PROC_IS_BRANDED(p))
161 176 return (ENOTSUP);
162 177 brandme = B_TRUE;
163 178 }
164 179 } else {
165 180 /*
166 181 * If this is a native zone, or if the process is already
167 182 * branded, then we don't need to do anything. If this is
168 183 * a native process in a branded zone, we need to brand the
169 184 * process as it exec()s the new binary.
170 185 */
171 186 if (ZONE_IS_BRANDED(p->p_zone) && !PROC_IS_BRANDED(p))
172 187 brandme = B_TRUE;
173 188 }
174 189
175 190 /*
176 191 * Inform /proc that an exec() has started.
177 192 * Hold signals that are ignored by default so that we will
178 193 * not be interrupted by a signal that will be ignored after
179 194 * successful completion of gexec().
180 195 */
181 196 mutex_enter(&p->p_lock);
182 197 prexecstart();
183 198 schedctl_finish_sigblock(curthread);
184 199 savedmask = curthread->t_hold;
185 200 sigorset(&curthread->t_hold, &ignoredefault);
186 201 mutex_exit(&p->p_lock);
187 202
188 203 /*
189 204 * Look up path name and remember last component for later.
190 205 * To help coreadm expand its %d token, we attempt to save
191 206 * the directory containing the executable in p_execdir. The
192 207 * first call to lookuppn() may fail and return EINVAL because
193 208 * dirvpp is non-NULL. In that case, we make a second call to
194 209 * lookuppn() with dirvpp set to NULL; p_execdir will be NULL,
195 210 * but coreadm is allowed to expand %d to the empty string and
196 211 * there are other cases in which that failure may occur.
197 212 */
198 213 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
199 214 goto out;
200 215 pn_alloc(&resolvepn);
201 216 if ((error = lookuppn(&pn, &resolvepn, FOLLOW, &dir, &vp)) != 0) {
202 217 pn_free(&resolvepn);
203 218 pn_free(&pn);
204 219 if (error != EINVAL)
205 220 goto out;
206 221
207 222 dir = NULL;
208 223 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
209 224 goto out;
210 225 pn_alloc(&resolvepn);
211 226 if ((error = lookuppn(&pn, &resolvepn, FOLLOW, NULLVPP,
212 227 &vp)) != 0) {
213 228 pn_free(&resolvepn);
214 229 pn_free(&pn);
215 230 goto out;
216 231 }
217 232 }
218 233 if (vp == NULL) {
219 234 if (dir != NULL)
220 235 VN_RELE(dir);
221 236 error = ENOENT;
222 237 pn_free(&resolvepn);
223 238 pn_free(&pn);
224 239 goto out;
225 240 }
226 241
227 242 if ((error = secpolicy_basic_exec(CRED(), vp)) != 0) {
228 243 if (dir != NULL)
229 244 VN_RELE(dir);
230 245 pn_free(&resolvepn);
231 246 pn_free(&pn);
232 247 VN_RELE(vp);
233 248 goto out;
234 249 }
235 250
236 251 /*
237 252 * We do not allow executing files in attribute directories.
238 253 * We test this by determining whether the resolved path
239 254 * contains a "/" when we're in an attribute directory;
240 255 * only if the pathname does not contain a "/" the resolved path
241 256 * points to a file in the current working (attribute) directory.
242 257 */
243 258 if ((p->p_user.u_cdir->v_flag & V_XATTRDIR) != 0 &&
244 259 strchr(resolvepn.pn_path, '/') == NULL) {
245 260 if (dir != NULL)
246 261 VN_RELE(dir);
247 262 error = EACCES;
248 263 pn_free(&resolvepn);
249 264 pn_free(&pn);
250 265 VN_RELE(vp);
251 266 goto out;
252 267 }
253 268
254 269 bzero(exec_file, MAXCOMLEN+1);
255 270 (void) strncpy(exec_file, pn.pn_path, MAXCOMLEN);
256 271 bzero(&args, sizeof (args));
257 272 args.pathname = resolvepn.pn_path;
258 273 /* don't free resolvepn until we are done with args */
259 274 pn_free(&pn);
260 275
261 276 /*
262 277 * If we're running in a profile shell, then call pfexecd.
263 278 */
264 279 if ((CR_FLAGS(p->p_cred) & PRIV_PFEXEC) != 0) {
265 280 error = pfexec_call(p->p_cred, &resolvepn, &args.pfcred,
266 281 &args.scrubenv);
267 282
268 283 /* Returning errno in case we're not allowed to execute. */
269 284 if (error > 0) {
270 285 if (dir != NULL)
271 286 VN_RELE(dir);
272 287 pn_free(&resolvepn);
273 288 VN_RELE(vp);
274 289 goto out;
275 290 }
276 291
277 292 /* Don't change the credentials when using old ptrace. */
278 293 if (args.pfcred != NULL &&
279 294 (p->p_proc_flag & P_PR_PTRACE) != 0) {
280 295 crfree(args.pfcred);
281 296 args.pfcred = NULL;
282 297 args.scrubenv = B_FALSE;
283 298 }
284 299 }
285 300
286 301 /*
287 302 * Specific exec handlers, or policies determined via
288 303 * /etc/system may override the historical default.
289 304 */
290 305 args.stk_prot = PROT_ZFOD;
291 306 args.dat_prot = PROT_ZFOD;
292 307
293 308 CPU_STATS_ADD_K(sys, sysexec, 1);
294 309 DTRACE_PROC1(exec, char *, args.pathname);
295 310
296 311 ua.fname = fname;
297 312 ua.argp = argp;
298 313 ua.envp = envp;
299 314
300 315 /* If necessary, brand this process before we start the exec. */
301 316 if (brandme)
302 317 brand_setbrand(p);
303 318
304 319 if ((error = gexec(&vp, &ua, &args, NULL, 0, &execsz,
305 320 exec_file, p->p_cred, brand_action)) != 0) {
306 321 if (brandme)
307 322 brand_clearbrand(p, B_FALSE);
308 323 VN_RELE(vp);
309 324 if (dir != NULL)
310 325 VN_RELE(dir);
311 326 pn_free(&resolvepn);
312 327 goto fail;
313 328 }
314 329
315 330 /*
316 331 * Free floating point registers (sun4u only)
317 332 */
318 333 ASSERT(lwp != NULL);
319 334 lwp_freeregs(lwp, 1);
320 335
321 336 /*
322 337 * Free thread and process context ops.
323 338 */
324 339 if (curthread->t_ctx)
325 340 freectx(curthread, 1);
326 341 if (p->p_pctx)
327 342 freepctx(p, 1);
328 343
329 344 /*
330 345 * Remember file name for accounting; clear any cached DTrace predicate.
331 346 */
332 347 up->u_acflag &= ~AFORK;
333 348 bcopy(exec_file, up->u_comm, MAXCOMLEN+1);
334 349 curthread->t_predcache = NULL;
335 350
336 351 /*
337 352 * Clear contract template state
338 353 */
339 354 lwp_ctmpl_clear(lwp);
340 355
341 356 /*
342 357 * Save the directory in which we found the executable for expanding
343 358 * the %d token used in core file patterns.
344 359 */
345 360 mutex_enter(&p->p_lock);
346 361 tmpvp = p->p_execdir;
347 362 p->p_execdir = dir;
348 363 if (p->p_execdir != NULL)
349 364 VN_HOLD(p->p_execdir);
350 365 mutex_exit(&p->p_lock);
351 366
352 367 if (tmpvp != NULL)
353 368 VN_RELE(tmpvp);
354 369
355 370 /*
356 371 * Reset stack state to the user stack, clear set of signals
357 372 * caught on the signal stack, and reset list of signals that
358 373 * restart system calls; the new program's environment should
359 374 * not be affected by detritus from the old program. Any
360 375 * pending held signals remain held, so don't clear t_hold.
361 376 */
362 377 mutex_enter(&p->p_lock);
363 378 lwp->lwp_oldcontext = 0;
364 379 lwp->lwp_ustack = 0;
365 380 lwp->lwp_old_stk_ctl = 0;
366 381 sigemptyset(&up->u_signodefer);
367 382 sigemptyset(&up->u_sigonstack);
368 383 sigemptyset(&up->u_sigresethand);
369 384 lwp->lwp_sigaltstack.ss_sp = 0;
370 385 lwp->lwp_sigaltstack.ss_size = 0;
371 386 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
372 387
373 388 /*
374 389 * Make saved resource limit == current resource limit.
375 390 */
376 391 for (i = 0; i < RLIM_NLIMITS; i++) {
377 392 /*CONSTCOND*/
378 393 if (RLIM_SAVED(i)) {
379 394 (void) rctl_rlimit_get(rctlproc_legacy[i], p,
380 395 &up->u_saved_rlimit[i]);
381 396 }
382 397 }
383 398
384 399 /*
385 400 * If the action was to catch the signal, then the action
386 401 * must be reset to SIG_DFL.
387 402 */
388 403 sigdefault(p);
389 404 p->p_flag &= ~(SNOWAIT|SJCTL);
390 405 p->p_flag |= (SEXECED|SMSACCT|SMSFORK);
391 406 up->u_signal[SIGCLD - 1] = SIG_DFL;
392 407
393 408 /*
394 409 * Delete the dot4 sigqueues/signotifies.
395 410 */
396 411 sigqfree(p);
397 412
398 413 mutex_exit(&p->p_lock);
399 414
400 415 mutex_enter(&p->p_pflock);
401 416 p->p_prof.pr_base = NULL;
402 417 p->p_prof.pr_size = 0;
403 418 p->p_prof.pr_off = 0;
404 419 p->p_prof.pr_scale = 0;
405 420 p->p_prof.pr_samples = 0;
406 421 mutex_exit(&p->p_pflock);
407 422
408 423 ASSERT(curthread->t_schedctl == NULL);
409 424
410 425 #if defined(__sparc)
411 426 if (p->p_utraps != NULL)
412 427 utrap_free(p);
413 428 #endif /* __sparc */
414 429
415 430 /*
416 431 * Close all close-on-exec files.
417 432 */
418 433 close_exec(P_FINFO(p));
419 434 TRACE_2(TR_FAC_PROC, TR_PROC_EXEC, "proc_exec:p %p up %p", p, up);
420 435
421 436 /* Unbrand ourself if necessary. */
422 437 if (PROC_IS_BRANDED(p) && (brand_action == EBA_NATIVE))
423 438 brand_clearbrand(p, B_FALSE);
424 439
425 440 setregs(&args);
426 441
427 442 /* Mark this as an executable vnode */
428 443 mutex_enter(&vp->v_lock);
429 444 vp->v_flag |= VVMEXEC;
430 445 mutex_exit(&vp->v_lock);
431 446
432 447 VN_RELE(vp);
433 448 if (dir != NULL)
434 449 VN_RELE(dir);
435 450 pn_free(&resolvepn);
436 451
437 452 /*
438 453 * Allocate a new lwp directory and lwpid hash table if necessary.
439 454 */
440 455 if (curthread->t_tid != 1 || p->p_lwpdir_sz != 2) {
441 456 lwpdir = kmem_zalloc(2 * sizeof (lwpdir_t), KM_SLEEP);
442 457 lwpdir->ld_next = lwpdir + 1;
443 458 tidhash = kmem_zalloc(2 * sizeof (tidhash_t), KM_SLEEP);
444 459 if (p->p_lwpdir != NULL)
445 460 lep = p->p_lwpdir[curthread->t_dslot].ld_entry;
446 461 else
447 462 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP);
448 463 }
449 464
450 465 if (PROC_IS_BRANDED(p))
451 466 BROP(p)->b_exec();
452 467
453 468 mutex_enter(&p->p_lock);
454 469 prbarrier(p);
455 470
456 471 /*
457 472 * Reset lwp id to the default value of 1.
458 473 * This is a single-threaded process now
459 474 * and lwp #1 is lwp_wait()able by default.
460 475 * The t_unpark flag should not be inherited.
461 476 */
462 477 ASSERT(p->p_lwpcnt == 1 && p->p_zombcnt == 0);
463 478 curthread->t_tid = 1;
464 479 kpreempt_disable();
465 480 ASSERT(curthread->t_lpl != NULL);
466 481 p->p_t1_lgrpid = curthread->t_lpl->lpl_lgrpid;
467 482 kpreempt_enable();
468 483 if (p->p_tr_lgrpid != LGRP_NONE && p->p_tr_lgrpid != p->p_t1_lgrpid) {
469 484 lgrp_update_trthr_migrations(1);
470 485 }
471 486 curthread->t_unpark = 0;
472 487 curthread->t_proc_flag |= TP_TWAIT;
473 488 curthread->t_proc_flag &= ~TP_DAEMON; /* daemons shouldn't exec */
474 489 p->p_lwpdaemon = 0; /* but oh well ... */
475 490 p->p_lwpid = 1;
476 491
477 492 /*
478 493 * Install the newly-allocated lwp directory and lwpid hash table
479 494 * and insert the current thread into the new hash table.
480 495 */
481 496 if (lwpdir != NULL) {
482 497 old_lwpdir = p->p_lwpdir;
483 498 old_lwpdir_sz = p->p_lwpdir_sz;
484 499 old_tidhash = p->p_tidhash;
485 500 old_tidhash_sz = p->p_tidhash_sz;
486 501 p->p_lwpdir = p->p_lwpfree = lwpdir;
487 502 p->p_lwpdir_sz = 2;
488 503 lep->le_thread = curthread;
489 504 lep->le_lwpid = curthread->t_tid;
490 505 lep->le_start = curthread->t_start;
491 506 lwp_hash_in(p, lep, tidhash, 2, 0);
492 507 p->p_tidhash = tidhash;
493 508 p->p_tidhash_sz = 2;
494 509 }
495 510 ret_tidhash = p->p_ret_tidhash;
496 511 p->p_ret_tidhash = NULL;
497 512
498 513 /*
499 514 * Restore the saved signal mask and
500 515 * inform /proc that the exec() has finished.
501 516 */
502 517 curthread->t_hold = savedmask;
503 518 prexecend();
504 519 mutex_exit(&p->p_lock);
505 520 if (old_lwpdir) {
506 521 kmem_free(old_lwpdir, old_lwpdir_sz * sizeof (lwpdir_t));
507 522 kmem_free(old_tidhash, old_tidhash_sz * sizeof (tidhash_t));
508 523 }
509 524 while (ret_tidhash != NULL) {
510 525 ret_tidhash_t *next = ret_tidhash->rth_next;
511 526 kmem_free(ret_tidhash->rth_tidhash,
512 527 ret_tidhash->rth_tidhash_sz * sizeof (tidhash_t));
513 528 kmem_free(ret_tidhash, sizeof (*ret_tidhash));
514 529 ret_tidhash = next;
515 530 }
516 531
517 532 ASSERT(error == 0);
518 533 DTRACE_PROC(exec__success);
519 534 return (0);
520 535
521 536 fail:
522 537 DTRACE_PROC1(exec__failure, int, error);
523 538 out: /* error return */
524 539 mutex_enter(&p->p_lock);
525 540 curthread->t_hold = savedmask;
526 541 prexecend();
527 542 mutex_exit(&p->p_lock);
528 543 ASSERT(error != 0);
529 544 return (error);
530 545 }
531 546
532 547
533 548 /*
534 549 * Perform generic exec duties and switchout to object-file specific
535 550 * handler.
536 551 */
537 552 int
538 553 gexec(
539 554 struct vnode **vpp,
540 555 struct execa *uap,
541 556 struct uarg *args,
542 557 struct intpdata *idatap,
543 558 int level,
544 559 long *execsz,
545 560 caddr_t exec_file,
546 561 struct cred *cred,
547 562 int brand_action)
548 563 {
549 564 struct vnode *vp, *execvp = NULL;
550 565 proc_t *pp = ttoproc(curthread);
551 566 struct execsw *eswp;
552 567 int error = 0;
553 568 int suidflags = 0;
554 569 ssize_t resid;
555 570 uid_t uid, gid;
556 571 struct vattr vattr;
557 572 char magbuf[MAGIC_BYTES];
558 573 int setid;
559 574 cred_t *oldcred, *newcred = NULL;
560 575 int privflags = 0;
561 576 int setidfl;
562 577 priv_set_t fset;
563 578
564 579 /*
565 580 * If the SNOCD or SUGID flag is set, turn it off and remember the
566 581 * previous setting so we can restore it if we encounter an error.
567 582 */
568 583 if (level == 0 && (pp->p_flag & PSUIDFLAGS)) {
569 584 mutex_enter(&pp->p_lock);
570 585 suidflags = pp->p_flag & PSUIDFLAGS;
571 586 pp->p_flag &= ~PSUIDFLAGS;
572 587 mutex_exit(&pp->p_lock);
573 588 }
574 589
575 590 if ((error = execpermissions(*vpp, &vattr, args)) != 0)
576 591 goto bad_noclose;
577 592
578 593 /* need to open vnode for stateful file systems */
579 594 if ((error = VOP_OPEN(vpp, FREAD, CRED(), NULL)) != 0)
580 595 goto bad_noclose;
581 596 vp = *vpp;
582 597
583 598 /*
584 599 * Note: to support binary compatibility with SunOS a.out
585 600 * executables, we read in the first four bytes, as the
586 601 * magic number is in bytes 2-3.
587 602 */
588 603 if (error = vn_rdwr(UIO_READ, vp, magbuf, sizeof (magbuf),
589 604 (offset_t)0, UIO_SYSSPACE, 0, (rlim64_t)0, CRED(), &resid))
590 605 goto bad;
591 606 if (resid != 0)
592 607 goto bad;
593 608
594 609 if ((eswp = findexec_by_hdr(magbuf)) == NULL)
595 610 goto bad;
596 611
597 612 if (level == 0 &&
598 613 (privflags = execsetid(vp, &vattr, &uid, &gid, &fset,
599 614 args->pfcred == NULL ? cred : args->pfcred, args->pathname)) != 0) {
600 615
601 616 /* Pfcred is a credential with a ref count of 1 */
602 617
603 618 if (args->pfcred != NULL) {
604 619 privflags |= PRIV_INCREASE|PRIV_RESET;
605 620 newcred = cred = args->pfcred;
606 621 } else {
607 622 newcred = cred = crdup(cred);
608 623 }
609 624
610 625 /* If we can, drop the PA bit */
611 626 if ((privflags & PRIV_RESET) != 0)
612 627 priv_adjust_PA(cred);
613 628
614 629 if (privflags & PRIV_SETID) {
615 630 cred->cr_uid = uid;
616 631 cred->cr_gid = gid;
617 632 cred->cr_suid = uid;
618 633 cred->cr_sgid = gid;
619 634 }
620 635
621 636 if (privflags & MAC_FLAGS) {
622 637 if (!(CR_FLAGS(cred) & NET_MAC_AWARE_INHERIT))
623 638 CR_FLAGS(cred) &= ~NET_MAC_AWARE;
624 639 CR_FLAGS(cred) &= ~NET_MAC_AWARE_INHERIT;
625 640 }
626 641
627 642 /*
628 643 * Implement the privilege updates:
629 644 *
630 645 * Restrict with L:
631 646 *
632 647 * I' = I & L
633 648 *
634 649 * E' = P' = (I' + F) & A
635 650 *
636 651 * But if running under ptrace, we cap I and F with P.
637 652 */
638 653 if ((privflags & (PRIV_RESET|PRIV_FORCED)) != 0) {
639 654 if ((privflags & PRIV_INCREASE) != 0 &&
640 655 (pp->p_proc_flag & P_PR_PTRACE) != 0) {
641 656 priv_intersect(&CR_OPPRIV(cred),
642 657 &CR_IPRIV(cred));
643 658 priv_intersect(&CR_OPPRIV(cred), &fset);
644 659 }
645 660 priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
646 661 CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
647 662 if (privflags & PRIV_FORCED) {
648 663 priv_set_PA(cred);
649 664 priv_union(&fset, &CR_EPRIV(cred));
650 665 priv_union(&fset, &CR_PPRIV(cred));
651 666 }
652 667 priv_adjust_PA(cred);
↓ open down ↓ |
541 lines elided |
↑ open up ↑ |
653 668 }
654 669 } else if (level == 0 && args->pfcred != NULL) {
655 670 newcred = cred = args->pfcred;
656 671 privflags |= PRIV_INCREASE;
657 672 /* pfcred is not forced to adhere to these settings */
658 673 priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
659 674 CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
660 675 priv_adjust_PA(cred);
661 676 }
662 677
678 + /* The new image gets the inheritable secflags as its secflags */
679 + /* XXX: This probably means we have the wrong secflags when exec fails */
680 + secflag_promote(pp);
681 +
663 682 /* SunOS 4.x buy-back */
664 683 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) &&
665 684 (vattr.va_mode & (VSUID|VSGID))) {
666 685 char path[MAXNAMELEN];
667 686 refstr_t *mntpt = NULL;
668 687 int ret = -1;
669 688
670 689 bzero(path, sizeof (path));
671 690 zone_hold(pp->p_zone);
672 691
673 692 ret = vnodetopath(pp->p_zone->zone_rootvp, vp, path,
674 693 sizeof (path), cred);
675 694
676 695 /* fallback to mountpoint if a path can't be found */
677 696 if ((ret != 0) || (ret == 0 && path[0] == '\0'))
678 697 mntpt = vfs_getmntpoint(vp->v_vfsp);
679 698
680 699 if (mntpt == NULL)
681 700 zcmn_err(pp->p_zone->zone_id, CE_NOTE,
682 701 "!uid %d: setuid execution not allowed, "
683 702 "file=%s", cred->cr_uid, path);
684 703 else
685 704 zcmn_err(pp->p_zone->zone_id, CE_NOTE,
686 705 "!uid %d: setuid execution not allowed, "
687 706 "fs=%s, file=%s", cred->cr_uid,
688 707 ZONE_PATH_TRANSLATE(refstr_value(mntpt),
689 708 pp->p_zone), exec_file);
690 709
691 710 if (!INGLOBALZONE(pp)) {
692 711 /* zone_rootpath always has trailing / */
693 712 if (mntpt == NULL)
694 713 cmn_err(CE_NOTE, "!zone: %s, uid: %d "
695 714 "setuid execution not allowed, file=%s%s",
696 715 pp->p_zone->zone_name, cred->cr_uid,
697 716 pp->p_zone->zone_rootpath, path + 1);
698 717 else
699 718 cmn_err(CE_NOTE, "!zone: %s, uid: %d "
700 719 "setuid execution not allowed, fs=%s, "
701 720 "file=%s", pp->p_zone->zone_name,
702 721 cred->cr_uid, refstr_value(mntpt),
703 722 exec_file);
704 723 }
705 724
706 725 if (mntpt != NULL)
707 726 refstr_rele(mntpt);
708 727
709 728 zone_rele(pp->p_zone);
710 729 }
711 730
712 731 /*
713 732 * execsetid() told us whether or not we had to change the
714 733 * credentials of the process. In privflags, it told us
715 734 * whether we gained any privileges or executed a set-uid executable.
716 735 */
717 736 setid = (privflags & (PRIV_SETUGID|PRIV_INCREASE|PRIV_FORCED));
718 737
719 738 /*
720 739 * Use /etc/system variable to determine if the stack
721 740 * should be marked as executable by default.
722 741 */
723 742 if (noexec_user_stack)
724 743 args->stk_prot &= ~PROT_EXEC;
725 744
726 745 args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */
727 746 args->ex_vp = vp;
728 747
729 748 /*
730 749 * Traditionally, the setid flags told the sub processes whether
731 750 * the file just executed was set-uid or set-gid; this caused
732 751 * some confusion as the 'setid' flag did not match the SUGID
733 752 * process flag which is only set when the uids/gids do not match.
734 753 * A script set-gid/set-uid to the real uid/gid would start with
735 754 * /dev/fd/X but an executable would happily trust LD_LIBRARY_PATH.
736 755 * Now we flag those cases where the calling process cannot
737 756 * be trusted to influence the newly exec'ed process, either
738 757 * because it runs with more privileges or when the uids/gids
739 758 * do in fact not match.
740 759 * This also makes the runtime linker agree with the on exec
741 760 * values of SNOCD and SUGID.
742 761 */
743 762 setidfl = 0;
744 763 if (cred->cr_uid != cred->cr_ruid || (cred->cr_rgid != cred->cr_gid &&
745 764 !supgroupmember(cred->cr_gid, cred))) {
746 765 setidfl |= EXECSETID_UGIDS;
747 766 }
748 767 if (setid & PRIV_SETUGID)
749 768 setidfl |= EXECSETID_SETID;
750 769 if (setid & PRIV_FORCED)
751 770 setidfl |= EXECSETID_PRIVS;
752 771
753 772 execvp = pp->p_exec;
754 773 if (execvp)
755 774 VN_HOLD(execvp);
756 775
757 776 error = (*eswp->exec_func)(vp, uap, args, idatap, level, execsz,
758 777 setidfl, exec_file, cred, brand_action);
759 778 rw_exit(eswp->exec_lock);
760 779 if (error != 0) {
761 780 if (execvp)
762 781 VN_RELE(execvp);
763 782 /*
764 783 * If this process's p_exec has been set to the vp of
765 784 * the executable by exec_func, we will return without
766 785 * calling VOP_CLOSE because proc_exit will close it
767 786 * on exit.
768 787 */
769 788 if (pp->p_exec == vp)
770 789 goto bad_noclose;
771 790 else
772 791 goto bad;
773 792 }
774 793
775 794 if (level == 0) {
776 795 uid_t oruid;
777 796
778 797 if (execvp != NULL) {
779 798 /*
780 799 * Close the previous executable only if we are
781 800 * at level 0.
782 801 */
783 802 (void) VOP_CLOSE(execvp, FREAD, 1, (offset_t)0,
784 803 cred, NULL);
785 804 }
786 805
787 806 mutex_enter(&pp->p_crlock);
788 807
789 808 oruid = pp->p_cred->cr_ruid;
790 809
791 810 if (newcred != NULL) {
792 811 /*
793 812 * Free the old credentials, and set the new ones.
794 813 * Do this for both the process and the (single) thread.
795 814 */
796 815 crfree(pp->p_cred);
797 816 pp->p_cred = cred; /* cred already held for proc */
798 817 crhold(cred); /* hold new cred for thread */
799 818 /*
800 819 * DTrace accesses t_cred in probe context. t_cred
801 820 * must always be either NULL, or point to a valid,
802 821 * allocated cred structure.
803 822 */
804 823 oldcred = curthread->t_cred;
805 824 curthread->t_cred = cred;
806 825 crfree(oldcred);
807 826
808 827 if (priv_basic_test >= 0 &&
809 828 !PRIV_ISASSERT(&CR_IPRIV(newcred),
810 829 priv_basic_test)) {
811 830 pid_t pid = pp->p_pid;
812 831 char *fn = PTOU(pp)->u_comm;
813 832
814 833 cmn_err(CE_WARN, "%s[%d]: exec: basic_test "
815 834 "privilege removed from E/I", fn, pid);
816 835 }
817 836 }
818 837 /*
819 838 * On emerging from a successful exec(), the saved
820 839 * uid and gid equal the effective uid and gid.
821 840 */
822 841 cred->cr_suid = cred->cr_uid;
823 842 cred->cr_sgid = cred->cr_gid;
824 843
825 844 /*
826 845 * If the real and effective ids do not match, this
827 846 * is a setuid process that should not dump core.
828 847 * The group comparison is tricky; we prevent the code
829 848 * from flagging SNOCD when executing with an effective gid
830 849 * which is a supplementary group.
831 850 */
832 851 if (cred->cr_ruid != cred->cr_uid ||
833 852 (cred->cr_rgid != cred->cr_gid &&
834 853 !supgroupmember(cred->cr_gid, cred)) ||
835 854 (privflags & PRIV_INCREASE) != 0)
836 855 suidflags = PSUIDFLAGS;
837 856 else
838 857 suidflags = 0;
839 858
840 859 mutex_exit(&pp->p_crlock);
841 860 if (newcred != NULL && oruid != newcred->cr_ruid) {
842 861 /* Note that the process remains in the same zone. */
843 862 mutex_enter(&pidlock);
844 863 upcount_dec(oruid, crgetzoneid(newcred));
845 864 upcount_inc(newcred->cr_ruid, crgetzoneid(newcred));
846 865 mutex_exit(&pidlock);
847 866 }
848 867 if (suidflags) {
849 868 mutex_enter(&pp->p_lock);
850 869 pp->p_flag |= suidflags;
851 870 mutex_exit(&pp->p_lock);
852 871 }
853 872 if (setid && (pp->p_proc_flag & P_PR_PTRACE) == 0) {
854 873 /*
855 874 * If process is traced via /proc, arrange to
856 875 * invalidate the associated /proc vnode.
857 876 */
858 877 if (pp->p_plist || (pp->p_proc_flag & P_PR_TRACE))
859 878 args->traceinval = 1;
860 879 }
861 880 if (pp->p_proc_flag & P_PR_PTRACE)
862 881 psignal(pp, SIGTRAP);
863 882 if (args->traceinval)
864 883 prinvalidate(&pp->p_user);
865 884 }
866 885 if (execvp)
867 886 VN_RELE(execvp);
868 887 return (0);
869 888
870 889 bad:
871 890 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, cred, NULL);
872 891
873 892 bad_noclose:
874 893 if (newcred != NULL)
875 894 crfree(newcred);
876 895 if (error == 0)
877 896 error = ENOEXEC;
878 897
879 898 if (suidflags) {
880 899 mutex_enter(&pp->p_lock);
881 900 pp->p_flag |= suidflags;
882 901 mutex_exit(&pp->p_lock);
883 902 }
884 903 return (error);
885 904 }
886 905
887 906 extern char *execswnames[];
888 907
889 908 struct execsw *
890 909 allocate_execsw(char *name, char *magic, size_t magic_size)
891 910 {
892 911 int i, j;
893 912 char *ename;
894 913 char *magicp;
895 914
896 915 mutex_enter(&execsw_lock);
897 916 for (i = 0; i < nexectype; i++) {
898 917 if (execswnames[i] == NULL) {
899 918 ename = kmem_alloc(strlen(name) + 1, KM_SLEEP);
900 919 (void) strcpy(ename, name);
901 920 execswnames[i] = ename;
902 921 /*
903 922 * Set the magic number last so that we
904 923 * don't need to hold the execsw_lock in
905 924 * findexectype().
906 925 */
907 926 magicp = kmem_alloc(magic_size, KM_SLEEP);
908 927 for (j = 0; j < magic_size; j++)
909 928 magicp[j] = magic[j];
910 929 execsw[i].exec_magic = magicp;
911 930 mutex_exit(&execsw_lock);
912 931 return (&execsw[i]);
913 932 }
914 933 }
915 934 mutex_exit(&execsw_lock);
916 935 return (NULL);
917 936 }
918 937
919 938 /*
920 939 * Find the exec switch table entry with the corresponding magic string.
921 940 */
922 941 struct execsw *
923 942 findexecsw(char *magic)
924 943 {
925 944 struct execsw *eswp;
926 945
927 946 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
928 947 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
929 948 if (magic && eswp->exec_maglen != 0 &&
930 949 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0)
931 950 return (eswp);
932 951 }
933 952 return (NULL);
934 953 }
935 954
936 955 /*
937 956 * Find the execsw[] index for the given exec header string by looking for the
938 957 * magic string at a specified offset and length for each kind of executable
939 958 * file format until one matches. If no execsw[] entry is found, try to
940 959 * autoload a module for this magic string.
941 960 */
942 961 struct execsw *
943 962 findexec_by_hdr(char *header)
944 963 {
945 964 struct execsw *eswp;
946 965
947 966 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
948 967 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
949 968 if (header && eswp->exec_maglen != 0 &&
950 969 bcmp(&header[eswp->exec_magoff], eswp->exec_magic,
951 970 eswp->exec_maglen) == 0) {
952 971 if (hold_execsw(eswp) != 0)
953 972 return (NULL);
954 973 return (eswp);
955 974 }
956 975 }
957 976 return (NULL); /* couldn't find the type */
958 977 }
959 978
960 979 /*
961 980 * Find the execsw[] index for the given magic string. If no execsw[] entry
962 981 * is found, try to autoload a module for this magic string.
963 982 */
964 983 struct execsw *
965 984 findexec_by_magic(char *magic)
966 985 {
967 986 struct execsw *eswp;
968 987
969 988 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
970 989 ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
971 990 if (magic && eswp->exec_maglen != 0 &&
972 991 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0) {
973 992 if (hold_execsw(eswp) != 0)
974 993 return (NULL);
975 994 return (eswp);
976 995 }
977 996 }
978 997 return (NULL); /* couldn't find the type */
979 998 }
980 999
981 1000 static int
982 1001 hold_execsw(struct execsw *eswp)
983 1002 {
984 1003 char *name;
985 1004
986 1005 rw_enter(eswp->exec_lock, RW_READER);
987 1006 while (!LOADED_EXEC(eswp)) {
988 1007 rw_exit(eswp->exec_lock);
989 1008 name = execswnames[eswp-execsw];
990 1009 ASSERT(name);
991 1010 if (modload("exec", name) == -1)
992 1011 return (-1);
993 1012 rw_enter(eswp->exec_lock, RW_READER);
994 1013 }
995 1014 return (0);
996 1015 }
997 1016
998 1017 static int
999 1018 execsetid(struct vnode *vp, struct vattr *vattrp, uid_t *uidp, uid_t *gidp,
1000 1019 priv_set_t *fset, cred_t *cr, const char *pathname)
1001 1020 {
1002 1021 proc_t *pp = ttoproc(curthread);
1003 1022 uid_t uid, gid;
1004 1023 int privflags = 0;
1005 1024
1006 1025 /*
1007 1026 * Remember credentials.
1008 1027 */
1009 1028 uid = cr->cr_uid;
1010 1029 gid = cr->cr_gid;
1011 1030
1012 1031 /* Will try to reset the PRIV_AWARE bit later. */
1013 1032 if ((CR_FLAGS(cr) & (PRIV_AWARE|PRIV_AWARE_INHERIT)) == PRIV_AWARE)
1014 1033 privflags |= PRIV_RESET;
1015 1034
1016 1035 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) == 0) {
1017 1036 /*
1018 1037 * If it's a set-uid root program we perform the
1019 1038 * forced privilege look-aside. This has three possible
1020 1039 * outcomes:
1021 1040 * no look aside information -> treat as before
1022 1041 * look aside in Limit set -> apply forced privs
1023 1042 * look aside not in Limit set -> ignore set-uid root
1024 1043 *
1025 1044 * Ordinary set-uid root execution only allowed if the limit
1026 1045 * set holds all unsafe privileges.
1027 1046 */
1028 1047 if (vattrp->va_mode & VSUID) {
1029 1048 if (vattrp->va_uid == 0) {
1030 1049 int res = get_forced_privs(cr, pathname, fset);
1031 1050
1032 1051 switch (res) {
1033 1052 case -1:
1034 1053 if (priv_issubset(&priv_unsafe,
1035 1054 &CR_LPRIV(cr))) {
1036 1055 uid = vattrp->va_uid;
1037 1056 privflags |= PRIV_SETUGID;
1038 1057 }
1039 1058 break;
1040 1059 case 0:
1041 1060 privflags |= PRIV_FORCED|PRIV_INCREASE;
1042 1061 break;
1043 1062 default:
1044 1063 break;
1045 1064 }
1046 1065 } else {
1047 1066 uid = vattrp->va_uid;
1048 1067 privflags |= PRIV_SETUGID;
1049 1068 }
1050 1069 }
1051 1070 if (vattrp->va_mode & VSGID) {
1052 1071 gid = vattrp->va_gid;
1053 1072 privflags |= PRIV_SETUGID;
1054 1073 }
1055 1074 }
1056 1075
1057 1076 /*
1058 1077 * Do we need to change our credential anyway?
1059 1078 * This is the case when E != I or P != I, as
1060 1079 * we need to do the assignments (with F empty and A full)
1061 1080 * Or when I is not a subset of L; in that case we need to
1062 1081 * enforce L.
1063 1082 *
1064 1083 * I' = L & I
1065 1084 *
1066 1085 * E' = P' = (I' + F) & A
1067 1086 * or
1068 1087 * E' = P' = I'
1069 1088 */
1070 1089 if (!priv_isequalset(&CR_EPRIV(cr), &CR_IPRIV(cr)) ||
1071 1090 !priv_issubset(&CR_IPRIV(cr), &CR_LPRIV(cr)) ||
1072 1091 !priv_isequalset(&CR_PPRIV(cr), &CR_IPRIV(cr)))
1073 1092 privflags |= PRIV_RESET;
1074 1093
1075 1094 /* Child has more privileges than parent */
1076 1095 if (!priv_issubset(&CR_IPRIV(cr), &CR_PPRIV(cr)))
1077 1096 privflags |= PRIV_INCREASE;
1078 1097
1079 1098 /* If MAC-aware flag(s) are on, need to update cred to remove. */
1080 1099 if ((CR_FLAGS(cr) & NET_MAC_AWARE) ||
1081 1100 (CR_FLAGS(cr) & NET_MAC_AWARE_INHERIT))
1082 1101 privflags |= MAC_FLAGS;
1083 1102 /*
1084 1103 * Set setuid/setgid protections if no ptrace() compatibility.
1085 1104 * For privileged processes, honor setuid/setgid even in
1086 1105 * the presence of ptrace() compatibility.
1087 1106 */
1088 1107 if (((pp->p_proc_flag & P_PR_PTRACE) == 0 ||
1089 1108 PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, (uid == 0))) &&
1090 1109 (cr->cr_uid != uid ||
1091 1110 cr->cr_gid != gid ||
1092 1111 cr->cr_suid != uid ||
1093 1112 cr->cr_sgid != gid)) {
1094 1113 *uidp = uid;
1095 1114 *gidp = gid;
1096 1115 privflags |= PRIV_SETID;
1097 1116 }
1098 1117 return (privflags);
1099 1118 }
1100 1119
1101 1120 int
1102 1121 execpermissions(struct vnode *vp, struct vattr *vattrp, struct uarg *args)
1103 1122 {
1104 1123 int error;
1105 1124 proc_t *p = ttoproc(curthread);
1106 1125
1107 1126 vattrp->va_mask = AT_MODE | AT_UID | AT_GID | AT_SIZE;
1108 1127 if (error = VOP_GETATTR(vp, vattrp, ATTR_EXEC, p->p_cred, NULL))
1109 1128 return (error);
1110 1129 /*
1111 1130 * Check the access mode.
1112 1131 * If VPROC, ask /proc if the file is an object file.
1113 1132 */
1114 1133 if ((error = VOP_ACCESS(vp, VEXEC, 0, p->p_cred, NULL)) != 0 ||
1115 1134 !(vp->v_type == VREG || (vp->v_type == VPROC && pr_isobject(vp))) ||
1116 1135 (vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0 ||
1117 1136 (vattrp->va_mode & (VEXEC|(VEXEC>>3)|(VEXEC>>6))) == 0) {
1118 1137 if (error == 0)
1119 1138 error = EACCES;
1120 1139 return (error);
1121 1140 }
1122 1141
1123 1142 if ((p->p_plist || (p->p_proc_flag & (P_PR_PTRACE|P_PR_TRACE))) &&
1124 1143 (error = VOP_ACCESS(vp, VREAD, 0, p->p_cred, NULL))) {
1125 1144 /*
1126 1145 * If process is under ptrace(2) compatibility,
1127 1146 * fail the exec(2).
1128 1147 */
1129 1148 if (p->p_proc_flag & P_PR_PTRACE)
1130 1149 goto bad;
1131 1150 /*
1132 1151 * Process is traced via /proc.
1133 1152 * Arrange to invalidate the /proc vnode.
1134 1153 */
1135 1154 args->traceinval = 1;
1136 1155 }
1137 1156 return (0);
1138 1157 bad:
1139 1158 if (error == 0)
1140 1159 error = ENOEXEC;
1141 1160 return (error);
1142 1161 }
1143 1162
1144 1163 /*
1145 1164 * Map a section of an executable file into the user's
1146 1165 * address space.
1147 1166 */
1148 1167 int
1149 1168 execmap(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen,
1150 1169 off_t offset, int prot, int page, uint_t szc)
1151 1170 {
1152 1171 int error = 0;
1153 1172 off_t oldoffset;
1154 1173 caddr_t zfodbase, oldaddr;
1155 1174 size_t end, oldlen;
1156 1175 size_t zfoddiff;
1157 1176 label_t ljb;
1158 1177 proc_t *p = ttoproc(curthread);
1159 1178
1160 1179 oldaddr = addr;
1161 1180 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1162 1181 if (len) {
1163 1182 oldlen = len;
1164 1183 len += ((size_t)oldaddr - (size_t)addr);
1165 1184 oldoffset = offset;
1166 1185 offset = (off_t)((uintptr_t)offset & PAGEMASK);
1167 1186 if (page) {
1168 1187 spgcnt_t prefltmem, availm, npages;
1169 1188 int preread;
1170 1189 uint_t mflag = MAP_PRIVATE | MAP_FIXED;
1171 1190
1172 1191 if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) {
1173 1192 mflag |= MAP_TEXT;
1174 1193 } else {
1175 1194 mflag |= MAP_INITDATA;
1176 1195 }
1177 1196
1178 1197 if (valid_usr_range(addr, len, prot, p->p_as,
1179 1198 p->p_as->a_userlimit) != RANGE_OKAY) {
1180 1199 error = ENOMEM;
1181 1200 goto bad;
1182 1201 }
1183 1202 if (error = VOP_MAP(vp, (offset_t)offset,
1184 1203 p->p_as, &addr, len, prot, PROT_ALL,
1185 1204 mflag, CRED(), NULL))
1186 1205 goto bad;
1187 1206
1188 1207 /*
1189 1208 * If the segment can fit, then we prefault
1190 1209 * the entire segment in. This is based on the
1191 1210 * model that says the best working set of a
1192 1211 * small program is all of its pages.
1193 1212 */
1194 1213 npages = (spgcnt_t)btopr(len);
1195 1214 prefltmem = freemem - desfree;
1196 1215 preread =
1197 1216 (npages < prefltmem && len < PGTHRESH) ? 1 : 0;
1198 1217
1199 1218 /*
1200 1219 * If we aren't prefaulting the segment,
1201 1220 * increment "deficit", if necessary to ensure
1202 1221 * that pages will become available when this
1203 1222 * process starts executing.
1204 1223 */
1205 1224 availm = freemem - lotsfree;
1206 1225 if (preread == 0 && npages > availm &&
1207 1226 deficit < lotsfree) {
1208 1227 deficit += MIN((pgcnt_t)(npages - availm),
1209 1228 lotsfree - deficit);
1210 1229 }
1211 1230
1212 1231 if (preread) {
1213 1232 TRACE_2(TR_FAC_PROC, TR_EXECMAP_PREREAD,
1214 1233 "execmap preread:freemem %d size %lu",
1215 1234 freemem, len);
1216 1235 (void) as_fault(p->p_as->a_hat, p->p_as,
1217 1236 (caddr_t)addr, len, F_INVAL, S_READ);
1218 1237 }
1219 1238 } else {
1220 1239 if (valid_usr_range(addr, len, prot, p->p_as,
1221 1240 p->p_as->a_userlimit) != RANGE_OKAY) {
1222 1241 error = ENOMEM;
1223 1242 goto bad;
1224 1243 }
1225 1244
1226 1245 if (error = as_map(p->p_as, addr, len,
1227 1246 segvn_create, zfod_argsp))
1228 1247 goto bad;
1229 1248 /*
1230 1249 * Read in the segment in one big chunk.
1231 1250 */
1232 1251 if (error = vn_rdwr(UIO_READ, vp, (caddr_t)oldaddr,
1233 1252 oldlen, (offset_t)oldoffset, UIO_USERSPACE, 0,
1234 1253 (rlim64_t)0, CRED(), (ssize_t *)0))
1235 1254 goto bad;
1236 1255 /*
1237 1256 * Now set protections.
1238 1257 */
1239 1258 if (prot != PROT_ZFOD) {
1240 1259 (void) as_setprot(p->p_as, (caddr_t)addr,
1241 1260 len, prot);
1242 1261 }
1243 1262 }
1244 1263 }
1245 1264
1246 1265 if (zfodlen) {
1247 1266 struct as *as = curproc->p_as;
1248 1267 struct seg *seg;
1249 1268 uint_t zprot = 0;
1250 1269
1251 1270 end = (size_t)addr + len;
1252 1271 zfodbase = (caddr_t)roundup(end, PAGESIZE);
1253 1272 zfoddiff = (uintptr_t)zfodbase - end;
1254 1273 if (zfoddiff) {
1255 1274 /*
1256 1275 * Before we go to zero the remaining space on the last
1257 1276 * page, make sure we have write permission.
1258 1277 *
1259 1278 * Normal illumos binaries don't even hit the case
1260 1279 * where we have to change permission on the last page
1261 1280 * since their protection is typically either
1262 1281 * PROT_USER | PROT_WRITE | PROT_READ
1263 1282 * or
1264 1283 * PROT_ZFOD (same as PROT_ALL).
1265 1284 *
1266 1285 * We need to be careful how we zero-fill the last page
1267 1286 * if the segment protection does not include
1268 1287 * PROT_WRITE. Using as_setprot() can cause the VM
1269 1288 * segment code to call segvn_vpage(), which must
1270 1289 * allocate a page struct for each page in the segment.
1271 1290 * If we have a very large segment, this may fail, so
1272 1291 * we have to check for that, even though we ignore
1273 1292 * other return values from as_setprot.
1274 1293 */
1275 1294
1276 1295 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1277 1296 seg = as_segat(curproc->p_as, (caddr_t)end);
1278 1297 if (seg != NULL)
1279 1298 SEGOP_GETPROT(seg, (caddr_t)end, zfoddiff - 1,
1280 1299 &zprot);
1281 1300 AS_LOCK_EXIT(as, &as->a_lock);
1282 1301
1283 1302 if (seg != NULL && (zprot & PROT_WRITE) == 0) {
1284 1303 if (as_setprot(as, (caddr_t)end, zfoddiff - 1,
1285 1304 zprot | PROT_WRITE) == ENOMEM) {
1286 1305 error = ENOMEM;
1287 1306 goto bad;
1288 1307 }
1289 1308 }
1290 1309
1291 1310 if (on_fault(&ljb)) {
1292 1311 no_fault();
1293 1312 if (seg != NULL && (zprot & PROT_WRITE) == 0)
1294 1313 (void) as_setprot(as, (caddr_t)end,
1295 1314 zfoddiff - 1, zprot);
1296 1315 error = EFAULT;
1297 1316 goto bad;
1298 1317 }
1299 1318 uzero((void *)end, zfoddiff);
1300 1319 no_fault();
1301 1320 if (seg != NULL && (zprot & PROT_WRITE) == 0)
1302 1321 (void) as_setprot(as, (caddr_t)end,
1303 1322 zfoddiff - 1, zprot);
1304 1323 }
1305 1324 if (zfodlen > zfoddiff) {
1306 1325 struct segvn_crargs crargs =
1307 1326 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
1308 1327
1309 1328 zfodlen -= zfoddiff;
1310 1329 if (valid_usr_range(zfodbase, zfodlen, prot, p->p_as,
1311 1330 p->p_as->a_userlimit) != RANGE_OKAY) {
1312 1331 error = ENOMEM;
1313 1332 goto bad;
1314 1333 }
1315 1334 if (szc > 0) {
1316 1335 /*
1317 1336 * ASSERT alignment because the mapelfexec()
1318 1337 * caller for the szc > 0 case extended zfod
1319 1338 * so it's end is pgsz aligned.
1320 1339 */
1321 1340 size_t pgsz = page_get_pagesize(szc);
1322 1341 ASSERT(IS_P2ALIGNED(zfodbase + zfodlen, pgsz));
1323 1342
1324 1343 if (IS_P2ALIGNED(zfodbase, pgsz)) {
1325 1344 crargs.szc = szc;
1326 1345 } else {
1327 1346 crargs.szc = AS_MAP_HEAP;
1328 1347 }
1329 1348 } else {
1330 1349 crargs.szc = AS_MAP_NO_LPOOB;
1331 1350 }
1332 1351 if (error = as_map(p->p_as, (caddr_t)zfodbase,
1333 1352 zfodlen, segvn_create, &crargs))
1334 1353 goto bad;
1335 1354 if (prot != PROT_ZFOD) {
1336 1355 (void) as_setprot(p->p_as, (caddr_t)zfodbase,
1337 1356 zfodlen, prot);
1338 1357 }
1339 1358 }
1340 1359 }
1341 1360 return (0);
1342 1361 bad:
1343 1362 return (error);
1344 1363 }
1345 1364
1346 1365 void
1347 1366 setexecenv(struct execenv *ep)
1348 1367 {
1349 1368 proc_t *p = ttoproc(curthread);
1350 1369 klwp_t *lwp = ttolwp(curthread);
1351 1370 struct vnode *vp;
1352 1371
1353 1372 p->p_bssbase = ep->ex_bssbase;
1354 1373 p->p_brkbase = ep->ex_brkbase;
1355 1374 p->p_brksize = ep->ex_brksize;
1356 1375 if (p->p_exec)
1357 1376 VN_RELE(p->p_exec); /* out with the old */
1358 1377 vp = p->p_exec = ep->ex_vp;
1359 1378 if (vp != NULL)
1360 1379 VN_HOLD(vp); /* in with the new */
1361 1380
1362 1381 lwp->lwp_sigaltstack.ss_sp = 0;
1363 1382 lwp->lwp_sigaltstack.ss_size = 0;
1364 1383 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
1365 1384 }
1366 1385
1367 1386 int
1368 1387 execopen(struct vnode **vpp, int *fdp)
1369 1388 {
1370 1389 struct vnode *vp = *vpp;
1371 1390 file_t *fp;
1372 1391 int error = 0;
1373 1392 int filemode = FREAD;
1374 1393
1375 1394 VN_HOLD(vp); /* open reference */
1376 1395 if (error = falloc(NULL, filemode, &fp, fdp)) {
1377 1396 VN_RELE(vp);
1378 1397 *fdp = -1; /* just in case falloc changed value */
1379 1398 return (error);
1380 1399 }
1381 1400 if (error = VOP_OPEN(&vp, filemode, CRED(), NULL)) {
1382 1401 VN_RELE(vp);
1383 1402 setf(*fdp, NULL);
1384 1403 unfalloc(fp);
1385 1404 *fdp = -1;
1386 1405 return (error);
1387 1406 }
1388 1407 *vpp = vp; /* vnode should not have changed */
1389 1408 fp->f_vnode = vp;
1390 1409 mutex_exit(&fp->f_tlock);
1391 1410 setf(*fdp, fp);
1392 1411 return (0);
1393 1412 }
1394 1413
1395 1414 int
1396 1415 execclose(int fd)
1397 1416 {
1398 1417 return (closeandsetf(fd, NULL));
1399 1418 }
1400 1419
1401 1420
1402 1421 /*
1403 1422 * noexec stub function.
1404 1423 */
1405 1424 /*ARGSUSED*/
1406 1425 int
1407 1426 noexec(
1408 1427 struct vnode *vp,
1409 1428 struct execa *uap,
1410 1429 struct uarg *args,
1411 1430 struct intpdata *idatap,
1412 1431 int level,
1413 1432 long *execsz,
1414 1433 int setid,
1415 1434 caddr_t exec_file,
1416 1435 struct cred *cred)
1417 1436 {
1418 1437 cmn_err(CE_WARN, "missing exec capability for %s", uap->fname);
1419 1438 return (ENOEXEC);
1420 1439 }
1421 1440
1422 1441 /*
1423 1442 * Support routines for building a user stack.
1424 1443 *
1425 1444 * execve(path, argv, envp) must construct a new stack with the specified
1426 1445 * arguments and environment variables (see exec_args() for a description
1427 1446 * of the user stack layout). To do this, we copy the arguments and
1428 1447 * environment variables from the old user address space into the kernel,
1429 1448 * free the old as, create the new as, and copy our buffered information
1430 1449 * to the new stack. Our kernel buffer has the following structure:
1431 1450 *
1432 1451 * +-----------------------+ <--- stk_base + stk_size
1433 1452 * | string offsets |
1434 1453 * +-----------------------+ <--- stk_offp
1435 1454 * | |
1436 1455 * | STK_AVAIL() space |
1437 1456 * | |
1438 1457 * +-----------------------+ <--- stk_strp
1439 1458 * | strings |
1440 1459 * +-----------------------+ <--- stk_base
1441 1460 *
1442 1461 * When we add a string, we store the string's contents (including the null
1443 1462 * terminator) at stk_strp, and we store the offset of the string relative to
1444 1463 * stk_base at --stk_offp. At strings are added, stk_strp increases and
1445 1464 * stk_offp decreases. The amount of space remaining, STK_AVAIL(), is just
1446 1465 * the difference between these pointers. If we run out of space, we return
1447 1466 * an error and exec_args() starts all over again with a buffer twice as large.
1448 1467 * When we're all done, the kernel buffer looks like this:
1449 1468 *
1450 1469 * +-----------------------+ <--- stk_base + stk_size
1451 1470 * | argv[0] offset |
1452 1471 * +-----------------------+
1453 1472 * | ... |
1454 1473 * +-----------------------+
1455 1474 * | argv[argc-1] offset |
1456 1475 * +-----------------------+
1457 1476 * | envp[0] offset |
1458 1477 * +-----------------------+
1459 1478 * | ... |
1460 1479 * +-----------------------+
1461 1480 * | envp[envc-1] offset |
1462 1481 * +-----------------------+
1463 1482 * | AT_SUN_PLATFORM offset|
1464 1483 * +-----------------------+
1465 1484 * | AT_SUN_EXECNAME offset|
1466 1485 * +-----------------------+ <--- stk_offp
1467 1486 * | |
1468 1487 * | STK_AVAIL() space |
1469 1488 * | |
1470 1489 * +-----------------------+ <--- stk_strp
1471 1490 * | AT_SUN_EXECNAME offset|
1472 1491 * +-----------------------+
1473 1492 * | AT_SUN_PLATFORM offset|
1474 1493 * +-----------------------+
1475 1494 * | envp[envc-1] string |
1476 1495 * +-----------------------+
1477 1496 * | ... |
1478 1497 * +-----------------------+
1479 1498 * | envp[0] string |
1480 1499 * +-----------------------+
1481 1500 * | argv[argc-1] string |
1482 1501 * +-----------------------+
1483 1502 * | ... |
1484 1503 * +-----------------------+
1485 1504 * | argv[0] string |
1486 1505 * +-----------------------+ <--- stk_base
1487 1506 */
1488 1507
1489 1508 #define STK_AVAIL(args) ((char *)(args)->stk_offp - (args)->stk_strp)
1490 1509
1491 1510 /*
1492 1511 * Add a string to the stack.
1493 1512 */
1494 1513 static int
1495 1514 stk_add(uarg_t *args, const char *sp, enum uio_seg segflg)
1496 1515 {
1497 1516 int error;
1498 1517 size_t len;
1499 1518
1500 1519 if (STK_AVAIL(args) < sizeof (int))
1501 1520 return (E2BIG);
1502 1521 *--args->stk_offp = args->stk_strp - args->stk_base;
1503 1522
1504 1523 if (segflg == UIO_USERSPACE) {
1505 1524 error = copyinstr(sp, args->stk_strp, STK_AVAIL(args), &len);
1506 1525 if (error != 0)
1507 1526 return (error);
1508 1527 } else {
1509 1528 len = strlen(sp) + 1;
1510 1529 if (len > STK_AVAIL(args))
1511 1530 return (E2BIG);
1512 1531 bcopy(sp, args->stk_strp, len);
1513 1532 }
1514 1533
1515 1534 args->stk_strp += len;
1516 1535
1517 1536 return (0);
1518 1537 }
1519 1538
1520 1539 static int
1521 1540 stk_getptr(uarg_t *args, char *src, char **dst)
1522 1541 {
1523 1542 int error;
1524 1543
1525 1544 if (args->from_model == DATAMODEL_NATIVE) {
1526 1545 ulong_t ptr;
1527 1546 error = fulword(src, &ptr);
1528 1547 *dst = (caddr_t)ptr;
1529 1548 } else {
1530 1549 uint32_t ptr;
1531 1550 error = fuword32(src, &ptr);
1532 1551 *dst = (caddr_t)(uintptr_t)ptr;
1533 1552 }
1534 1553 return (error);
1535 1554 }
1536 1555
1537 1556 static int
1538 1557 stk_putptr(uarg_t *args, char *addr, char *value)
1539 1558 {
1540 1559 if (args->to_model == DATAMODEL_NATIVE)
1541 1560 return (sulword(addr, (ulong_t)value));
1542 1561 else
1543 1562 return (suword32(addr, (uint32_t)(uintptr_t)value));
1544 1563 }
1545 1564
1546 1565 static int
1547 1566 stk_copyin(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1548 1567 {
1549 1568 char *sp;
1550 1569 int argc, error;
1551 1570 int argv_empty = 0;
1552 1571 size_t ptrsize = args->from_ptrsize;
1553 1572 size_t size, pad;
1554 1573 char *argv = (char *)uap->argp;
1555 1574 char *envp = (char *)uap->envp;
1556 1575
1557 1576 /*
1558 1577 * Copy interpreter's name and argument to argv[0] and argv[1].
1559 1578 */
1560 1579 if (intp != NULL && intp->intp_name != NULL) {
1561 1580 if ((error = stk_add(args, intp->intp_name, UIO_SYSSPACE)) != 0)
1562 1581 return (error);
1563 1582 if (intp->intp_arg != NULL &&
1564 1583 (error = stk_add(args, intp->intp_arg, UIO_SYSSPACE)) != 0)
1565 1584 return (error);
1566 1585 if (args->fname != NULL)
1567 1586 error = stk_add(args, args->fname, UIO_SYSSPACE);
1568 1587 else
1569 1588 error = stk_add(args, uap->fname, UIO_USERSPACE);
1570 1589 if (error)
1571 1590 return (error);
1572 1591
1573 1592 /*
1574 1593 * Check for an empty argv[].
1575 1594 */
1576 1595 if (stk_getptr(args, argv, &sp))
1577 1596 return (EFAULT);
1578 1597 if (sp == NULL)
1579 1598 argv_empty = 1;
1580 1599
1581 1600 argv += ptrsize; /* ignore original argv[0] */
1582 1601 }
1583 1602
1584 1603 if (argv_empty == 0) {
1585 1604 /*
1586 1605 * Add argv[] strings to the stack.
1587 1606 */
1588 1607 for (;;) {
1589 1608 if (stk_getptr(args, argv, &sp))
1590 1609 return (EFAULT);
1591 1610 if (sp == NULL)
1592 1611 break;
1593 1612 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1594 1613 return (error);
1595 1614 argv += ptrsize;
1596 1615 }
1597 1616 }
1598 1617 argc = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1599 1618 args->arglen = args->stk_strp - args->stk_base;
1600 1619
1601 1620 /*
1602 1621 * Add environ[] strings to the stack.
1603 1622 */
1604 1623 if (envp != NULL) {
1605 1624 for (;;) {
1606 1625 char *tmp = args->stk_strp;
1607 1626 if (stk_getptr(args, envp, &sp))
1608 1627 return (EFAULT);
1609 1628 if (sp == NULL)
1610 1629 break;
1611 1630 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1612 1631 return (error);
1613 1632 if (args->scrubenv && strncmp(tmp, "LD_", 3) == 0) {
1614 1633 /* Undo the copied string */
1615 1634 args->stk_strp = tmp;
1616 1635 *(args->stk_offp++) = NULL;
1617 1636 }
1618 1637 envp += ptrsize;
1619 1638 }
1620 1639 }
1621 1640 args->na = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1622 1641 args->ne = args->na - argc;
1623 1642
1624 1643 /*
1625 1644 * Add AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME, and
1626 1645 * AT_SUN_EMULATOR strings to the stack.
1627 1646 */
1628 1647 if (auxvpp != NULL && *auxvpp != NULL) {
1629 1648 if ((error = stk_add(args, platform, UIO_SYSSPACE)) != 0)
1630 1649 return (error);
1631 1650 if ((error = stk_add(args, args->pathname, UIO_SYSSPACE)) != 0)
1632 1651 return (error);
1633 1652 if (args->brandname != NULL &&
1634 1653 (error = stk_add(args, args->brandname, UIO_SYSSPACE)) != 0)
1635 1654 return (error);
1636 1655 if (args->emulator != NULL &&
1637 1656 (error = stk_add(args, args->emulator, UIO_SYSSPACE)) != 0)
1638 1657 return (error);
1639 1658 }
1640 1659
1641 1660 /*
1642 1661 * Compute the size of the stack. This includes all the pointers,
1643 1662 * the space reserved for the aux vector, and all the strings.
1644 1663 * The total number of pointers is args->na (which is argc + envc)
1645 1664 * plus 4 more: (1) a pointer's worth of space for argc; (2) the NULL
1646 1665 * after the last argument (i.e. argv[argc]); (3) the NULL after the
1647 1666 * last environment variable (i.e. envp[envc]); and (4) the NULL after
1648 1667 * all the strings, at the very top of the stack.
1649 1668 */
1650 1669 size = (args->na + 4) * args->to_ptrsize + args->auxsize +
1651 1670 (args->stk_strp - args->stk_base);
1652 1671
1653 1672 /*
1654 1673 * Pad the string section with zeroes to align the stack size.
1655 1674 */
1656 1675 pad = P2NPHASE(size, args->stk_align);
1657 1676
1658 1677 if (STK_AVAIL(args) < pad)
1659 1678 return (E2BIG);
1660 1679
1661 1680 args->usrstack_size = size + pad;
1662 1681
1663 1682 while (pad-- != 0)
1664 1683 *args->stk_strp++ = 0;
1665 1684
1666 1685 args->nc = args->stk_strp - args->stk_base;
1667 1686
1668 1687 return (0);
1669 1688 }
1670 1689
1671 1690 static int
1672 1691 stk_copyout(uarg_t *args, char *usrstack, void **auxvpp, user_t *up)
1673 1692 {
1674 1693 size_t ptrsize = args->to_ptrsize;
1675 1694 ssize_t pslen;
1676 1695 char *kstrp = args->stk_base;
1677 1696 char *ustrp = usrstack - args->nc - ptrsize;
1678 1697 char *usp = usrstack - args->usrstack_size;
1679 1698 int *offp = (int *)(args->stk_base + args->stk_size);
1680 1699 int envc = args->ne;
1681 1700 int argc = args->na - envc;
1682 1701 int i;
1683 1702
1684 1703 /*
1685 1704 * Record argc for /proc.
1686 1705 */
1687 1706 up->u_argc = argc;
1688 1707
1689 1708 /*
1690 1709 * Put argc on the stack. Note that even though it's an int,
1691 1710 * it always consumes ptrsize bytes (for alignment).
1692 1711 */
1693 1712 if (stk_putptr(args, usp, (char *)(uintptr_t)argc))
1694 1713 return (-1);
1695 1714
1696 1715 /*
1697 1716 * Add argc space (ptrsize) to usp and record argv for /proc.
1698 1717 */
1699 1718 up->u_argv = (uintptr_t)(usp += ptrsize);
1700 1719
1701 1720 /*
1702 1721 * Put the argv[] pointers on the stack.
1703 1722 */
1704 1723 for (i = 0; i < argc; i++, usp += ptrsize)
1705 1724 if (stk_putptr(args, usp, &ustrp[*--offp]))
1706 1725 return (-1);
1707 1726
1708 1727 /*
1709 1728 * Copy arguments to u_psargs.
1710 1729 */
1711 1730 pslen = MIN(args->arglen, PSARGSZ) - 1;
1712 1731 for (i = 0; i < pslen; i++)
1713 1732 up->u_psargs[i] = (kstrp[i] == '\0' ? ' ' : kstrp[i]);
1714 1733 while (i < PSARGSZ)
1715 1734 up->u_psargs[i++] = '\0';
1716 1735
1717 1736 /*
1718 1737 * Add space for argv[]'s NULL terminator (ptrsize) to usp and
1719 1738 * record envp for /proc.
1720 1739 */
1721 1740 up->u_envp = (uintptr_t)(usp += ptrsize);
1722 1741
1723 1742 /*
1724 1743 * Put the envp[] pointers on the stack.
1725 1744 */
1726 1745 for (i = 0; i < envc; i++, usp += ptrsize)
1727 1746 if (stk_putptr(args, usp, &ustrp[*--offp]))
1728 1747 return (-1);
1729 1748
1730 1749 /*
1731 1750 * Add space for envp[]'s NULL terminator (ptrsize) to usp and
1732 1751 * remember where the stack ends, which is also where auxv begins.
1733 1752 */
1734 1753 args->stackend = usp += ptrsize;
1735 1754
1736 1755 /*
1737 1756 * Put all the argv[], envp[], and auxv strings on the stack.
1738 1757 */
1739 1758 if (copyout(args->stk_base, ustrp, args->nc))
1740 1759 return (-1);
1741 1760
1742 1761 /*
1743 1762 * Fill in the aux vector now that we know the user stack addresses
1744 1763 * for the AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME and
1745 1764 * AT_SUN_EMULATOR strings.
1746 1765 */
1747 1766 if (auxvpp != NULL && *auxvpp != NULL) {
1748 1767 if (args->to_model == DATAMODEL_NATIVE) {
1749 1768 auxv_t **a = (auxv_t **)auxvpp;
1750 1769 ADDAUX(*a, AT_SUN_PLATFORM, (long)&ustrp[*--offp])
1751 1770 ADDAUX(*a, AT_SUN_EXECNAME, (long)&ustrp[*--offp])
1752 1771 if (args->brandname != NULL)
1753 1772 ADDAUX(*a,
1754 1773 AT_SUN_BRANDNAME, (long)&ustrp[*--offp])
1755 1774 if (args->emulator != NULL)
1756 1775 ADDAUX(*a,
1757 1776 AT_SUN_EMULATOR, (long)&ustrp[*--offp])
1758 1777 } else {
1759 1778 auxv32_t **a = (auxv32_t **)auxvpp;
1760 1779 ADDAUX(*a,
1761 1780 AT_SUN_PLATFORM, (int)(uintptr_t)&ustrp[*--offp])
1762 1781 ADDAUX(*a,
1763 1782 AT_SUN_EXECNAME, (int)(uintptr_t)&ustrp[*--offp])
1764 1783 if (args->brandname != NULL)
1765 1784 ADDAUX(*a, AT_SUN_BRANDNAME,
1766 1785 (int)(uintptr_t)&ustrp[*--offp])
↓ open down ↓ |
1094 lines elided |
↑ open up ↑ |
1767 1786 if (args->emulator != NULL)
1768 1787 ADDAUX(*a, AT_SUN_EMULATOR,
1769 1788 (int)(uintptr_t)&ustrp[*--offp])
1770 1789 }
1771 1790 }
1772 1791
1773 1792 return (0);
1774 1793 }
1775 1794
1776 1795 /*
1796 + * Though the actual stack base is constant, slew the %sp by a random aligned
1797 + * amount in [0,aslr_max_stack_skew). Mostly, this makes life slightly more
1798 + * complicated for buffer overflows hoping to overwrite the return address.
1799 + *
1800 + * On some platforms this helps avoid cache thrashing when identical processes
1801 + * simultaneously share caches that don't provide enough associativity
1802 + * (e.g. sun4v systems). In this case stack slewing makes the same hot stack
1803 + * variables in different processes live in different cache sets increasing
1804 + * effective associativity.
1805 + */
1806 +size_t
1807 +exec_get_spslew(void)
1808 +{
1809 +#ifdef sun4v
1810 + static uint_t sp_color_stride = 16;
1811 + static uint_t sp_color_mask = 0x1f;
1812 + static uint_t sp_current_color = (uint_t)-1;
1813 +#endif
1814 + size_t off;
1815 +
1816 + ASSERT(ISP2(aslr_max_stack_skew));
1817 +
1818 + if ((aslr_max_stack_skew == 0) ||
1819 + !secflag_enabled(curproc, PROC_SEC_ASLR)) {
1820 +#ifdef sun4v
1821 + uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
1822 + return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride)));
1823 +#else
1824 + return (0);
1825 +#endif
1826 + }
1827 +
1828 + (void) random_get_pseudo_bytes((uint8_t *)&off, sizeof (off));
1829 + return SA(P2PHASE(off, aslr_max_stack_skew));
1830 +}
1831 +
1832 +/*
1777 1833 * Initialize a new user stack with the specified arguments and environment.
1778 1834 * The initial user stack layout is as follows:
1779 1835 *
1780 1836 * User Stack
1781 1837 * +---------------+ <--- curproc->p_usrstack
1782 1838 * | |
1783 1839 * | slew |
1784 1840 * | |
1785 1841 * +---------------+
1786 1842 * | NULL |
1787 1843 * +---------------+
1788 1844 * | |
1789 1845 * | auxv strings |
1790 1846 * | |
1791 1847 * +---------------+
1792 1848 * | |
1793 1849 * | envp strings |
1794 1850 * | |
1795 1851 * +---------------+
1796 1852 * | |
1797 1853 * | argv strings |
1798 1854 * | |
1799 1855 * +---------------+ <--- ustrp
1800 1856 * | |
1801 1857 * | aux vector |
1802 1858 * | |
1803 1859 * +---------------+ <--- auxv
1804 1860 * | NULL |
1805 1861 * +---------------+
1806 1862 * | envp[envc-1] |
1807 1863 * +---------------+
1808 1864 * | ... |
1809 1865 * +---------------+
1810 1866 * | envp[0] |
1811 1867 * +---------------+ <--- envp[]
1812 1868 * | NULL |
1813 1869 * +---------------+
1814 1870 * | argv[argc-1] |
1815 1871 * +---------------+
1816 1872 * | ... |
1817 1873 * +---------------+
1818 1874 * | argv[0] |
1819 1875 * +---------------+ <--- argv[]
1820 1876 * | argc |
1821 1877 * +---------------+ <--- stack base
1822 1878 */
1823 1879 int
1824 1880 exec_args(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1825 1881 {
1826 1882 size_t size;
1827 1883 int error;
1828 1884 proc_t *p = ttoproc(curthread);
1829 1885 user_t *up = PTOU(p);
1830 1886 char *usrstack;
1831 1887 rctl_entity_p_t e;
1832 1888 struct as *as;
1833 1889 extern int use_stk_lpg;
1834 1890 size_t sp_slew;
1835 1891
1836 1892 args->from_model = p->p_model;
1837 1893 if (p->p_model == DATAMODEL_NATIVE) {
1838 1894 args->from_ptrsize = sizeof (long);
1839 1895 } else {
1840 1896 args->from_ptrsize = sizeof (int32_t);
1841 1897 }
1842 1898
1843 1899 if (args->to_model == DATAMODEL_NATIVE) {
1844 1900 args->to_ptrsize = sizeof (long);
1845 1901 args->ncargs = NCARGS;
1846 1902 args->stk_align = STACK_ALIGN;
1847 1903 if (args->addr32)
1848 1904 usrstack = (char *)USRSTACK64_32;
1849 1905 else
1850 1906 usrstack = (char *)USRSTACK;
1851 1907 } else {
1852 1908 args->to_ptrsize = sizeof (int32_t);
1853 1909 args->ncargs = NCARGS32;
1854 1910 args->stk_align = STACK_ALIGN32;
1855 1911 usrstack = (char *)USRSTACK32;
1856 1912 }
1857 1913
1858 1914 ASSERT(P2PHASE((uintptr_t)usrstack, args->stk_align) == 0);
1859 1915
1860 1916 #if defined(__sparc)
1861 1917 /*
1862 1918 * Make sure user register windows are empty before
1863 1919 * attempting to make a new stack.
1864 1920 */
1865 1921 (void) flush_user_windows_to_stack(NULL);
1866 1922 #endif
1867 1923
1868 1924 for (size = PAGESIZE; ; size *= 2) {
1869 1925 args->stk_size = size;
1870 1926 args->stk_base = kmem_alloc(size, KM_SLEEP);
1871 1927 args->stk_strp = args->stk_base;
1872 1928 args->stk_offp = (int *)(args->stk_base + size);
1873 1929 error = stk_copyin(uap, args, intp, auxvpp);
1874 1930 if (error == 0)
1875 1931 break;
1876 1932 kmem_free(args->stk_base, size);
1877 1933 if (error != E2BIG && error != ENAMETOOLONG)
1878 1934 return (error);
1879 1935 if (size >= args->ncargs)
1880 1936 return (E2BIG);
1881 1937 }
1882 1938
1883 1939 size = args->usrstack_size;
1884 1940
1885 1941 ASSERT(error == 0);
1886 1942 ASSERT(P2PHASE(size, args->stk_align) == 0);
1887 1943 ASSERT((ssize_t)STK_AVAIL(args) >= 0);
1888 1944
1889 1945 if (size > args->ncargs) {
1890 1946 kmem_free(args->stk_base, args->stk_size);
1891 1947 return (E2BIG);
1892 1948 }
1893 1949
1894 1950 /*
1895 1951 * Leave only the current lwp and force the other lwps to exit.
1896 1952 * If another lwp beat us to the punch by calling exit(), bail out.
1897 1953 */
1898 1954 if ((error = exitlwps(0)) != 0) {
1899 1955 kmem_free(args->stk_base, args->stk_size);
1900 1956 return (error);
1901 1957 }
1902 1958
1903 1959 /*
1904 1960 * Revoke any doors created by the process.
1905 1961 */
1906 1962 if (p->p_door_list)
1907 1963 door_exit();
1908 1964
1909 1965 /*
1910 1966 * Release schedctl data structures.
1911 1967 */
1912 1968 if (p->p_pagep)
1913 1969 schedctl_proc_cleanup();
1914 1970
1915 1971 /*
1916 1972 * Clean up any DTrace helpers for the process.
1917 1973 */
1918 1974 if (p->p_dtrace_helpers != NULL) {
1919 1975 ASSERT(dtrace_helpers_cleanup != NULL);
1920 1976 (*dtrace_helpers_cleanup)();
1921 1977 }
1922 1978
1923 1979 mutex_enter(&p->p_lock);
1924 1980 /*
1925 1981 * Cleanup the DTrace provider associated with this process.
1926 1982 */
1927 1983 if (p->p_dtrace_probes) {
1928 1984 ASSERT(dtrace_fasttrap_exec_ptr != NULL);
1929 1985 dtrace_fasttrap_exec_ptr(p);
1930 1986 }
1931 1987 mutex_exit(&p->p_lock);
1932 1988
1933 1989 /*
1934 1990 * discard the lwpchan cache.
1935 1991 */
1936 1992 if (p->p_lcp != NULL)
1937 1993 lwpchan_destroy_cache(1);
1938 1994
1939 1995 /*
1940 1996 * Delete the POSIX timers.
1941 1997 */
1942 1998 if (p->p_itimer != NULL)
1943 1999 timer_exit();
1944 2000
1945 2001 /*
1946 2002 * Delete the ITIMER_REALPROF interval timer.
1947 2003 * The other ITIMER_* interval timers are specified
1948 2004 * to be inherited across exec().
1949 2005 */
1950 2006 delete_itimer_realprof();
1951 2007
1952 2008 if (AU_AUDITING())
1953 2009 audit_exec(args->stk_base, args->stk_base + args->arglen,
1954 2010 args->na - args->ne, args->ne, args->pfcred);
1955 2011
1956 2012 /*
1957 2013 * Ensure that we don't change resource associations while we
1958 2014 * change address spaces.
1959 2015 */
1960 2016 mutex_enter(&p->p_lock);
1961 2017 pool_barrier_enter();
1962 2018 mutex_exit(&p->p_lock);
1963 2019
1964 2020 /*
1965 2021 * Destroy the old address space and create a new one.
1966 2022 * From here on, any errors are fatal to the exec()ing process.
1967 2023 * On error we return -1, which means the caller must SIGKILL
1968 2024 * the process.
1969 2025 */
1970 2026 relvm();
1971 2027
1972 2028 mutex_enter(&p->p_lock);
1973 2029 pool_barrier_exit();
1974 2030 mutex_exit(&p->p_lock);
1975 2031
1976 2032 up->u_execsw = args->execswp;
1977 2033
1978 2034 p->p_brkbase = NULL;
1979 2035 p->p_brksize = 0;
1980 2036 p->p_brkpageszc = 0;
1981 2037 p->p_stksize = 0;
1982 2038 p->p_stkpageszc = 0;
1983 2039 p->p_model = args->to_model;
1984 2040 p->p_usrstack = usrstack;
1985 2041 p->p_stkprot = args->stk_prot;
1986 2042 p->p_datprot = args->dat_prot;
1987 2043
1988 2044 /*
1989 2045 * Reset resource controls such that all controls are again active as
1990 2046 * well as appropriate to the potentially new address model for the
1991 2047 * process.
1992 2048 */
1993 2049 e.rcep_p.proc = p;
1994 2050 e.rcep_t = RCENTITY_PROCESS;
1995 2051 rctl_set_reset(p->p_rctls, p, &e);
↓ open down ↓ |
209 lines elided |
↑ open up ↑ |
1996 2052
1997 2053 /* Too early to call map_pgsz for the heap */
1998 2054 if (use_stk_lpg) {
1999 2055 p->p_stkpageszc = page_szc(map_pgsz(MAPPGSZ_STK, p, 0, 0, 0));
2000 2056 }
2001 2057
2002 2058 mutex_enter(&p->p_lock);
2003 2059 p->p_flag |= SAUTOLPG; /* kernel controls page sizes */
2004 2060 mutex_exit(&p->p_lock);
2005 2061
2006 - /*
2007 - * Some platforms may choose to randomize real stack start by adding a
2008 - * small slew (not more than a few hundred bytes) to the top of the
2009 - * stack. This helps avoid cache thrashing when identical processes
2010 - * simultaneously share caches that don't provide enough associativity
2011 - * (e.g. sun4v systems). In this case stack slewing makes the same hot
2012 - * stack variables in different processes to live in different cache
2013 - * sets increasing effective associativity.
2014 - */
2015 2062 sp_slew = exec_get_spslew();
2016 2063 ASSERT(P2PHASE(sp_slew, args->stk_align) == 0);
2064 + /* Be certain we don't underflow */
2065 + VERIFY((curproc->p_usrstack - (size + sp_slew)) < curproc->p_usrstack);
2017 2066 exec_set_sp(size + sp_slew);
2018 2067
2019 2068 as = as_alloc();
2020 2069 p->p_as = as;
2021 2070 as->a_proc = p;
2022 2071 if (p->p_model == DATAMODEL_ILP32 || args->addr32)
2023 2072 as->a_userlimit = (caddr_t)USERLIMIT32;
2024 2073 (void) hat_setup(as->a_hat, HAT_ALLOC);
2025 2074 hat_join_srd(as->a_hat, args->ex_vp);
2026 2075
2027 2076 /*
2028 2077 * Finally, write out the contents of the new stack.
2029 2078 */
2030 2079 error = stk_copyout(args, usrstack - sp_slew, auxvpp, up);
2031 2080 kmem_free(args->stk_base, args->stk_size);
2032 2081 return (error);
2033 2082 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX