Print this page
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/proc/prsubr.c
+++ new/usr/src/uts/common/fs/proc/prsubr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/t_lock.h>
32 32 #include <sys/param.h>
33 33 #include <sys/cmn_err.h>
34 34 #include <sys/cred.h>
35 35 #include <sys/priv.h>
36 36 #include <sys/debug.h>
37 37 #include <sys/errno.h>
38 38 #include <sys/inline.h>
39 39 #include <sys/kmem.h>
40 40 #include <sys/mman.h>
41 41 #include <sys/proc.h>
42 42 #include <sys/brand.h>
43 43 #include <sys/sobject.h>
44 44 #include <sys/sysmacros.h>
45 45 #include <sys/systm.h>
46 46 #include <sys/uio.h>
47 47 #include <sys/var.h>
48 48 #include <sys/vfs.h>
49 49 #include <sys/vnode.h>
50 50 #include <sys/session.h>
51 51 #include <sys/pcb.h>
52 52 #include <sys/signal.h>
53 53 #include <sys/user.h>
54 54 #include <sys/disp.h>
55 55 #include <sys/class.h>
56 56 #include <sys/ts.h>
57 57 #include <sys/bitmap.h>
58 58 #include <sys/poll.h>
59 59 #include <sys/shm_impl.h>
60 60 #include <sys/fault.h>
61 61 #include <sys/syscall.h>
62 62 #include <sys/procfs.h>
63 63 #include <sys/processor.h>
64 64 #include <sys/cpuvar.h>
65 65 #include <sys/copyops.h>
66 66 #include <sys/time.h>
67 67 #include <sys/msacct.h>
68 68 #include <vm/as.h>
69 69 #include <vm/rm.h>
70 70 #include <vm/seg.h>
71 71 #include <vm/seg_vn.h>
72 72 #include <vm/seg_dev.h>
73 73 #include <vm/seg_spt.h>
74 74 #include <vm/page.h>
75 75 #include <sys/vmparam.h>
76 76 #include <sys/swap.h>
77 77 #include <fs/proc/prdata.h>
78 78 #include <sys/task.h>
79 79 #include <sys/project.h>
80 80 #include <sys/contract_impl.h>
81 81 #include <sys/contract/process.h>
82 82 #include <sys/contract/process_impl.h>
83 83 #include <sys/schedctl.h>
84 84 #include <sys/pool.h>
85 85 #include <sys/zone.h>
86 86 #include <sys/atomic.h>
87 87 #include <sys/sdt.h>
88 88
89 89 #define MAX_ITERS_SPIN 5
90 90
91 91 typedef struct prpagev {
92 92 uint_t *pg_protv; /* vector of page permissions */
93 93 char *pg_incore; /* vector of incore flags */
94 94 size_t pg_npages; /* number of pages in protv and incore */
95 95 ulong_t pg_pnbase; /* pn within segment of first protv element */
96 96 } prpagev_t;
97 97
98 98 size_t pagev_lim = 256 * 1024; /* limit on number of pages in prpagev_t */
99 99
100 100 extern struct seg_ops segdev_ops; /* needs a header file */
101 101 extern struct seg_ops segspt_shmops; /* needs a header file */
102 102
103 103 static int set_watched_page(proc_t *, caddr_t, caddr_t, ulong_t, ulong_t);
104 104 static void clear_watched_page(proc_t *, caddr_t, caddr_t, ulong_t);
105 105
106 106 /*
107 107 * Choose an lwp from the complete set of lwps for the process.
108 108 * This is called for any operation applied to the process
109 109 * file descriptor that requires an lwp to operate upon.
110 110 *
111 111 * Returns a pointer to the thread for the selected LWP,
112 112 * and with the dispatcher lock held for the thread.
113 113 *
114 114 * The algorithm for choosing an lwp is critical for /proc semantics;
115 115 * don't touch this code unless you know all of the implications.
116 116 */
117 117 kthread_t *
118 118 prchoose(proc_t *p)
119 119 {
120 120 kthread_t *t;
121 121 kthread_t *t_onproc = NULL; /* running on processor */
122 122 kthread_t *t_run = NULL; /* runnable, on disp queue */
123 123 kthread_t *t_sleep = NULL; /* sleeping */
124 124 kthread_t *t_hold = NULL; /* sleeping, performing hold */
125 125 kthread_t *t_susp = NULL; /* suspended stop */
126 126 kthread_t *t_jstop = NULL; /* jobcontrol stop, w/o directed stop */
127 127 kthread_t *t_jdstop = NULL; /* jobcontrol stop with directed stop */
128 128 kthread_t *t_req = NULL; /* requested stop */
129 129 kthread_t *t_istop = NULL; /* event-of-interest stop */
130 130 kthread_t *t_dtrace = NULL; /* DTrace stop */
131 131
132 132 ASSERT(MUTEX_HELD(&p->p_lock));
133 133
134 134 /*
135 135 * If the agent lwp exists, it takes precedence over all others.
136 136 */
137 137 if ((t = p->p_agenttp) != NULL) {
138 138 thread_lock(t);
139 139 return (t);
140 140 }
141 141
142 142 if ((t = p->p_tlist) == NULL) /* start at the head of the list */
143 143 return (t);
144 144 do { /* for eacn lwp in the process */
145 145 if (VSTOPPED(t)) { /* virtually stopped */
146 146 if (t_req == NULL)
147 147 t_req = t;
148 148 continue;
149 149 }
150 150
151 151 thread_lock(t); /* make sure thread is in good state */
152 152 switch (t->t_state) {
153 153 default:
154 154 panic("prchoose: bad thread state %d, thread 0x%p",
155 155 t->t_state, (void *)t);
156 156 /*NOTREACHED*/
157 157 case TS_SLEEP:
158 158 /* this is filthy */
159 159 if (t->t_wchan == (caddr_t)&p->p_holdlwps &&
160 160 t->t_wchan0 == NULL) {
161 161 if (t_hold == NULL)
162 162 t_hold = t;
163 163 } else {
164 164 if (t_sleep == NULL)
165 165 t_sleep = t;
166 166 }
167 167 break;
168 168 case TS_RUN:
169 169 case TS_WAIT:
170 170 if (t_run == NULL)
171 171 t_run = t;
172 172 break;
173 173 case TS_ONPROC:
174 174 if (t_onproc == NULL)
175 175 t_onproc = t;
176 176 break;
177 177 case TS_ZOMB: /* last possible choice */
178 178 break;
179 179 case TS_STOPPED:
180 180 switch (t->t_whystop) {
181 181 case PR_SUSPENDED:
182 182 if (t_susp == NULL)
183 183 t_susp = t;
184 184 break;
185 185 case PR_JOBCONTROL:
186 186 if (t->t_proc_flag & TP_PRSTOP) {
187 187 if (t_jdstop == NULL)
188 188 t_jdstop = t;
189 189 } else {
190 190 if (t_jstop == NULL)
191 191 t_jstop = t;
192 192 }
193 193 break;
194 194 case PR_REQUESTED:
195 195 if (t->t_dtrace_stop && t_dtrace == NULL)
196 196 t_dtrace = t;
197 197 else if (t_req == NULL)
198 198 t_req = t;
199 199 break;
200 200 case PR_SYSENTRY:
201 201 case PR_SYSEXIT:
202 202 case PR_SIGNALLED:
203 203 case PR_FAULTED:
204 204 /*
205 205 * Make an lwp calling exit() be the
206 206 * last lwp seen in the process.
207 207 */
208 208 if (t_istop == NULL ||
209 209 (t_istop->t_whystop == PR_SYSENTRY &&
210 210 t_istop->t_whatstop == SYS_exit))
211 211 t_istop = t;
212 212 break;
213 213 case PR_CHECKPOINT: /* can't happen? */
214 214 break;
215 215 default:
216 216 panic("prchoose: bad t_whystop %d, thread 0x%p",
217 217 t->t_whystop, (void *)t);
218 218 /*NOTREACHED*/
219 219 }
220 220 break;
221 221 }
222 222 thread_unlock(t);
223 223 } while ((t = t->t_forw) != p->p_tlist);
224 224
225 225 if (t_onproc)
226 226 t = t_onproc;
227 227 else if (t_run)
228 228 t = t_run;
229 229 else if (t_sleep)
230 230 t = t_sleep;
231 231 else if (t_jstop)
232 232 t = t_jstop;
233 233 else if (t_jdstop)
234 234 t = t_jdstop;
235 235 else if (t_istop)
236 236 t = t_istop;
237 237 else if (t_dtrace)
238 238 t = t_dtrace;
239 239 else if (t_req)
240 240 t = t_req;
241 241 else if (t_hold)
242 242 t = t_hold;
243 243 else if (t_susp)
244 244 t = t_susp;
245 245 else /* TS_ZOMB */
246 246 t = p->p_tlist;
247 247
248 248 if (t != NULL)
249 249 thread_lock(t);
250 250 return (t);
251 251 }
252 252
253 253 /*
254 254 * Wakeup anyone sleeping on the /proc vnode for the process/lwp to stop.
255 255 * Also call pollwakeup() if any lwps are waiting in poll() for POLLPRI
256 256 * on the /proc file descriptor. Called from stop() when a traced
257 257 * process stops on an event of interest. Also called from exit()
258 258 * and prinvalidate() to indicate POLLHUP and POLLERR respectively.
259 259 */
260 260 void
261 261 prnotify(struct vnode *vp)
262 262 {
263 263 prcommon_t *pcp = VTOP(vp)->pr_common;
264 264
265 265 mutex_enter(&pcp->prc_mutex);
266 266 cv_broadcast(&pcp->prc_wait);
267 267 mutex_exit(&pcp->prc_mutex);
268 268 if (pcp->prc_flags & PRC_POLL) {
269 269 /*
270 270 * We call pollwakeup() with POLLHUP to ensure that
271 271 * the pollers are awakened even if they are polling
272 272 * for nothing (i.e., waiting for the process to exit).
273 273 * This enables the use of the PRC_POLL flag for optimization
274 274 * (we can turn off PRC_POLL only if we know no pollers remain).
275 275 */
276 276 pcp->prc_flags &= ~PRC_POLL;
277 277 pollwakeup(&pcp->prc_pollhead, POLLHUP);
278 278 }
279 279 }
280 280
281 281 /* called immediately below, in prfree() */
282 282 static void
283 283 prfreenotify(vnode_t *vp)
284 284 {
285 285 prnode_t *pnp;
286 286 prcommon_t *pcp;
287 287
288 288 while (vp != NULL) {
289 289 pnp = VTOP(vp);
290 290 pcp = pnp->pr_common;
291 291 ASSERT(pcp->prc_thread == NULL);
292 292 pcp->prc_proc = NULL;
293 293 /*
294 294 * We can't call prnotify() here because we are holding
295 295 * pidlock. We assert that there is no need to.
296 296 */
297 297 mutex_enter(&pcp->prc_mutex);
298 298 cv_broadcast(&pcp->prc_wait);
299 299 mutex_exit(&pcp->prc_mutex);
300 300 ASSERT(!(pcp->prc_flags & PRC_POLL));
301 301
302 302 vp = pnp->pr_next;
303 303 pnp->pr_next = NULL;
304 304 }
305 305 }
306 306
307 307 /*
308 308 * Called from a hook in freeproc() when a traced process is removed
309 309 * from the process table. The proc-table pointers of all associated
310 310 * /proc vnodes are cleared to indicate that the process has gone away.
311 311 */
312 312 void
313 313 prfree(proc_t *p)
314 314 {
315 315 uint_t slot = p->p_slot;
316 316
317 317 ASSERT(MUTEX_HELD(&pidlock));
318 318
319 319 /*
320 320 * Block the process against /proc so it can be freed.
321 321 * It cannot be freed while locked by some controlling process.
322 322 * Lock ordering:
323 323 * pidlock -> pr_pidlock -> p->p_lock -> pcp->prc_mutex
324 324 */
325 325 mutex_enter(&pr_pidlock); /* protects pcp->prc_proc */
326 326 mutex_enter(&p->p_lock);
327 327 while (p->p_proc_flag & P_PR_LOCK) {
328 328 mutex_exit(&pr_pidlock);
329 329 cv_wait(&pr_pid_cv[slot], &p->p_lock);
330 330 mutex_exit(&p->p_lock);
331 331 mutex_enter(&pr_pidlock);
332 332 mutex_enter(&p->p_lock);
333 333 }
334 334
335 335 ASSERT(p->p_tlist == NULL);
336 336
337 337 prfreenotify(p->p_plist);
338 338 p->p_plist = NULL;
339 339
340 340 prfreenotify(p->p_trace);
341 341 p->p_trace = NULL;
342 342
343 343 /*
344 344 * We broadcast to wake up everyone waiting for this process.
345 345 * No one can reach this process from this point on.
346 346 */
347 347 cv_broadcast(&pr_pid_cv[slot]);
348 348
349 349 mutex_exit(&p->p_lock);
350 350 mutex_exit(&pr_pidlock);
351 351 }
352 352
353 353 /*
354 354 * Called from a hook in exit() when a traced process is becoming a zombie.
355 355 */
356 356 void
357 357 prexit(proc_t *p)
358 358 {
359 359 ASSERT(MUTEX_HELD(&p->p_lock));
360 360
361 361 if (pr_watch_active(p)) {
362 362 pr_free_watchpoints(p);
363 363 watch_disable(curthread);
364 364 }
365 365 /* pr_free_watched_pages() is called in exit(), after dropping p_lock */
366 366 if (p->p_trace) {
367 367 VTOP(p->p_trace)->pr_common->prc_flags |= PRC_DESTROY;
368 368 prnotify(p->p_trace);
369 369 }
370 370 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */
371 371 }
372 372
373 373 /*
374 374 * Called when a thread calls lwp_exit().
375 375 */
376 376 void
377 377 prlwpexit(kthread_t *t)
378 378 {
379 379 vnode_t *vp;
380 380 prnode_t *pnp;
381 381 prcommon_t *pcp;
382 382 proc_t *p = ttoproc(t);
383 383 lwpent_t *lep = p->p_lwpdir[t->t_dslot].ld_entry;
384 384
385 385 ASSERT(t == curthread);
386 386 ASSERT(MUTEX_HELD(&p->p_lock));
387 387
388 388 /*
389 389 * The process must be blocked against /proc to do this safely.
390 390 * The lwp must not disappear while the process is marked P_PR_LOCK.
391 391 * It is the caller's responsibility to have called prbarrier(p).
392 392 */
393 393 ASSERT(!(p->p_proc_flag & P_PR_LOCK));
394 394
395 395 for (vp = p->p_plist; vp != NULL; vp = pnp->pr_next) {
396 396 pnp = VTOP(vp);
397 397 pcp = pnp->pr_common;
398 398 if (pcp->prc_thread == t) {
399 399 pcp->prc_thread = NULL;
400 400 pcp->prc_flags |= PRC_DESTROY;
401 401 }
402 402 }
403 403
404 404 for (vp = lep->le_trace; vp != NULL; vp = pnp->pr_next) {
405 405 pnp = VTOP(vp);
406 406 pcp = pnp->pr_common;
407 407 pcp->prc_thread = NULL;
408 408 pcp->prc_flags |= PRC_DESTROY;
409 409 prnotify(vp);
410 410 }
411 411
412 412 if (p->p_trace)
413 413 prnotify(p->p_trace);
414 414 }
415 415
416 416 /*
417 417 * Called when a zombie thread is joined or when a
418 418 * detached lwp exits. Called from lwp_hash_out().
419 419 */
420 420 void
421 421 prlwpfree(proc_t *p, lwpent_t *lep)
422 422 {
423 423 vnode_t *vp;
424 424 prnode_t *pnp;
425 425 prcommon_t *pcp;
426 426
427 427 ASSERT(MUTEX_HELD(&p->p_lock));
428 428
429 429 /*
430 430 * The process must be blocked against /proc to do this safely.
431 431 * The lwp must not disappear while the process is marked P_PR_LOCK.
432 432 * It is the caller's responsibility to have called prbarrier(p).
433 433 */
434 434 ASSERT(!(p->p_proc_flag & P_PR_LOCK));
435 435
436 436 vp = lep->le_trace;
437 437 lep->le_trace = NULL;
438 438 while (vp) {
439 439 prnotify(vp);
440 440 pnp = VTOP(vp);
441 441 pcp = pnp->pr_common;
442 442 ASSERT(pcp->prc_thread == NULL &&
443 443 (pcp->prc_flags & PRC_DESTROY));
444 444 pcp->prc_tslot = -1;
445 445 vp = pnp->pr_next;
446 446 pnp->pr_next = NULL;
447 447 }
448 448
449 449 if (p->p_trace)
450 450 prnotify(p->p_trace);
451 451 }
452 452
453 453 /*
454 454 * Called from a hook in exec() when a thread starts exec().
455 455 */
456 456 void
457 457 prexecstart(void)
458 458 {
459 459 proc_t *p = ttoproc(curthread);
460 460 klwp_t *lwp = ttolwp(curthread);
461 461
462 462 /*
463 463 * The P_PR_EXEC flag blocks /proc operations for
464 464 * the duration of the exec().
465 465 * We can't start exec() while the process is
466 466 * locked by /proc, so we call prbarrier().
467 467 * lwp_nostop keeps the process from being stopped
468 468 * via job control for the duration of the exec().
469 469 */
470 470
471 471 ASSERT(MUTEX_HELD(&p->p_lock));
472 472 prbarrier(p);
473 473 lwp->lwp_nostop++;
474 474 p->p_proc_flag |= P_PR_EXEC;
475 475 }
476 476
477 477 /*
478 478 * Called from a hook in exec() when a thread finishes exec().
479 479 * The thread may or may not have succeeded. Some other thread
480 480 * may have beat it to the punch.
481 481 */
482 482 void
483 483 prexecend(void)
484 484 {
485 485 proc_t *p = ttoproc(curthread);
486 486 klwp_t *lwp = ttolwp(curthread);
487 487 vnode_t *vp;
488 488 prnode_t *pnp;
489 489 prcommon_t *pcp;
490 490 model_t model = p->p_model;
491 491 id_t tid = curthread->t_tid;
492 492 int tslot = curthread->t_dslot;
493 493
494 494 ASSERT(MUTEX_HELD(&p->p_lock));
495 495
496 496 lwp->lwp_nostop--;
497 497 if (p->p_flag & SEXITLWPS) {
498 498 /*
499 499 * We are on our way to exiting because some
500 500 * other thread beat us in the race to exec().
501 501 * Don't clear the P_PR_EXEC flag in this case.
502 502 */
503 503 return;
504 504 }
505 505
506 506 /*
507 507 * Wake up anyone waiting in /proc for the process to complete exec().
508 508 */
509 509 p->p_proc_flag &= ~P_PR_EXEC;
510 510 if ((vp = p->p_trace) != NULL) {
511 511 pcp = VTOP(vp)->pr_common;
512 512 mutex_enter(&pcp->prc_mutex);
513 513 cv_broadcast(&pcp->prc_wait);
514 514 mutex_exit(&pcp->prc_mutex);
515 515 for (; vp != NULL; vp = pnp->pr_next) {
516 516 pnp = VTOP(vp);
517 517 pnp->pr_common->prc_datamodel = model;
518 518 }
519 519 }
520 520 if ((vp = p->p_lwpdir[tslot].ld_entry->le_trace) != NULL) {
521 521 /*
522 522 * We dealt with the process common above.
523 523 */
524 524 ASSERT(p->p_trace != NULL);
525 525 pcp = VTOP(vp)->pr_common;
526 526 mutex_enter(&pcp->prc_mutex);
527 527 cv_broadcast(&pcp->prc_wait);
528 528 mutex_exit(&pcp->prc_mutex);
529 529 for (; vp != NULL; vp = pnp->pr_next) {
530 530 pnp = VTOP(vp);
531 531 pcp = pnp->pr_common;
532 532 pcp->prc_datamodel = model;
533 533 pcp->prc_tid = tid;
534 534 pcp->prc_tslot = tslot;
535 535 }
536 536 }
537 537 }
538 538
539 539 /*
540 540 * Called from a hook in relvm() just before freeing the address space.
541 541 * We free all the watched areas now.
542 542 */
543 543 void
544 544 prrelvm(void)
545 545 {
546 546 proc_t *p = ttoproc(curthread);
547 547
548 548 mutex_enter(&p->p_lock);
549 549 prbarrier(p); /* block all other /proc operations */
550 550 if (pr_watch_active(p)) {
551 551 pr_free_watchpoints(p);
552 552 watch_disable(curthread);
553 553 }
554 554 mutex_exit(&p->p_lock);
555 555 pr_free_watched_pages(p);
556 556 }
557 557
558 558 /*
559 559 * Called from hooks in exec-related code when a traced process
560 560 * attempts to exec(2) a setuid/setgid program or an unreadable
561 561 * file. Rather than fail the exec we invalidate the associated
562 562 * /proc vnodes so that subsequent attempts to use them will fail.
563 563 *
564 564 * All /proc vnodes, except directory vnodes, are retained on a linked
565 565 * list (rooted at p_plist in the process structure) until last close.
566 566 *
567 567 * A controlling process must re-open the /proc files in order to
568 568 * regain control.
569 569 */
570 570 void
571 571 prinvalidate(struct user *up)
572 572 {
573 573 kthread_t *t = curthread;
574 574 proc_t *p = ttoproc(t);
575 575 vnode_t *vp;
576 576 prnode_t *pnp;
577 577 int writers = 0;
578 578
579 579 mutex_enter(&p->p_lock);
580 580 prbarrier(p); /* block all other /proc operations */
581 581
582 582 /*
583 583 * At this moment, there can be only one lwp in the process.
584 584 */
585 585 ASSERT(p->p_lwpcnt == 1 && p->p_zombcnt == 0);
586 586
587 587 /*
588 588 * Invalidate any currently active /proc vnodes.
589 589 */
590 590 for (vp = p->p_plist; vp != NULL; vp = pnp->pr_next) {
591 591 pnp = VTOP(vp);
592 592 switch (pnp->pr_type) {
593 593 case PR_PSINFO: /* these files can read by anyone */
594 594 case PR_LPSINFO:
595 595 case PR_LWPSINFO:
596 596 case PR_LWPDIR:
597 597 case PR_LWPIDDIR:
598 598 case PR_USAGE:
599 599 case PR_LUSAGE:
600 600 case PR_LWPUSAGE:
601 601 break;
602 602 default:
603 603 pnp->pr_flags |= PR_INVAL;
604 604 break;
605 605 }
606 606 }
607 607 /*
608 608 * Wake up anyone waiting for the process or lwp.
609 609 * p->p_trace is guaranteed to be non-NULL if there
610 610 * are any open /proc files for this process.
611 611 */
612 612 if ((vp = p->p_trace) != NULL) {
613 613 prcommon_t *pcp = VTOP(vp)->pr_pcommon;
614 614
615 615 prnotify(vp);
616 616 /*
617 617 * Are there any writers?
618 618 */
619 619 if ((writers = pcp->prc_writers) != 0) {
620 620 /*
621 621 * Clear the exclusive open flag (old /proc interface).
622 622 * Set prc_selfopens equal to prc_writers so that
623 623 * the next O_EXCL|O_WRITE open will succeed
624 624 * even with existing (though invalid) writers.
625 625 * prclose() must decrement prc_selfopens when
626 626 * the invalid files are closed.
627 627 */
628 628 pcp->prc_flags &= ~PRC_EXCL;
629 629 ASSERT(pcp->prc_selfopens <= writers);
630 630 pcp->prc_selfopens = writers;
631 631 }
632 632 }
633 633 vp = p->p_lwpdir[t->t_dslot].ld_entry->le_trace;
634 634 while (vp != NULL) {
635 635 /*
636 636 * We should not invalidate the lwpiddir vnodes,
637 637 * but the necessities of maintaining the old
638 638 * ioctl()-based version of /proc require it.
639 639 */
640 640 pnp = VTOP(vp);
641 641 pnp->pr_flags |= PR_INVAL;
642 642 prnotify(vp);
643 643 vp = pnp->pr_next;
644 644 }
645 645
646 646 /*
647 647 * If any tracing flags are in effect and any vnodes are open for
648 648 * writing then set the requested-stop and run-on-last-close flags.
649 649 * Otherwise, clear all tracing flags.
650 650 */
651 651 t->t_proc_flag &= ~TP_PAUSE;
652 652 if ((p->p_proc_flag & P_PR_TRACE) && writers) {
653 653 t->t_proc_flag |= TP_PRSTOP;
654 654 aston(t); /* so ISSIG will see the flag */
655 655 p->p_proc_flag |= P_PR_RUNLCL;
656 656 } else {
657 657 premptyset(&up->u_entrymask); /* syscalls */
658 658 premptyset(&up->u_exitmask);
659 659 up->u_systrap = 0;
660 660 premptyset(&p->p_sigmask); /* signals */
661 661 premptyset(&p->p_fltmask); /* faults */
662 662 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP|TP_STOPPING);
663 663 p->p_proc_flag &= ~(P_PR_RUNLCL|P_PR_KILLCL|P_PR_TRACE);
664 664 prnostep(ttolwp(t));
665 665 }
666 666
667 667 mutex_exit(&p->p_lock);
668 668 }
669 669
670 670 /*
671 671 * Acquire the controlled process's p_lock and mark it P_PR_LOCK.
672 672 * Return with pr_pidlock held in all cases.
673 673 * Return with p_lock held if the the process still exists.
674 674 * Return value is the process pointer if the process still exists, else NULL.
675 675 * If we lock the process, give ourself kernel priority to avoid deadlocks;
676 676 * this is undone in prunlock().
677 677 */
678 678 proc_t *
679 679 pr_p_lock(prnode_t *pnp)
680 680 {
681 681 proc_t *p;
682 682 prcommon_t *pcp;
683 683
684 684 mutex_enter(&pr_pidlock);
685 685 if ((pcp = pnp->pr_pcommon) == NULL || (p = pcp->prc_proc) == NULL)
686 686 return (NULL);
687 687 mutex_enter(&p->p_lock);
688 688 while (p->p_proc_flag & P_PR_LOCK) {
689 689 /*
690 690 * This cv/mutex pair is persistent even if
691 691 * the process disappears while we sleep.
692 692 */
693 693 kcondvar_t *cv = &pr_pid_cv[p->p_slot];
694 694 kmutex_t *mp = &p->p_lock;
695 695
696 696 mutex_exit(&pr_pidlock);
697 697 cv_wait(cv, mp);
698 698 mutex_exit(mp);
699 699 mutex_enter(&pr_pidlock);
700 700 if (pcp->prc_proc == NULL)
701 701 return (NULL);
702 702 ASSERT(p == pcp->prc_proc);
703 703 mutex_enter(&p->p_lock);
704 704 }
705 705 p->p_proc_flag |= P_PR_LOCK;
706 706 THREAD_KPRI_REQUEST();
707 707 return (p);
708 708 }
709 709
710 710 /*
711 711 * Lock the target process by setting P_PR_LOCK and grabbing p->p_lock.
712 712 * This prevents any lwp of the process from disappearing and
713 713 * blocks most operations that a process can perform on itself.
714 714 * Returns 0 on success, a non-zero error number on failure.
715 715 *
716 716 * 'zdisp' is ZYES or ZNO to indicate whether prlock() should succeed when
717 717 * the subject process is a zombie (ZYES) or fail for zombies (ZNO).
718 718 *
719 719 * error returns:
720 720 * ENOENT: process or lwp has disappeared or process is exiting
721 721 * (or has become a zombie and zdisp == ZNO).
722 722 * EAGAIN: procfs vnode has become invalid.
723 723 * EINTR: signal arrived while waiting for exec to complete.
724 724 */
725 725 int
726 726 prlock(prnode_t *pnp, int zdisp)
727 727 {
728 728 prcommon_t *pcp;
729 729 proc_t *p;
730 730
731 731 again:
732 732 pcp = pnp->pr_common;
733 733 p = pr_p_lock(pnp);
734 734 mutex_exit(&pr_pidlock);
735 735
736 736 /*
737 737 * Return ENOENT immediately if there is no process.
738 738 */
739 739 if (p == NULL)
740 740 return (ENOENT);
741 741
742 742 ASSERT(p == pcp->prc_proc && p->p_stat != 0 && p->p_stat != SIDL);
743 743
744 744 /*
745 745 * Return ENOENT if process entered zombie state or is exiting
746 746 * and the 'zdisp' flag is set to ZNO indicating not to lock zombies.
747 747 */
748 748 if (zdisp == ZNO &&
749 749 ((pcp->prc_flags & PRC_DESTROY) || (p->p_flag & SEXITING))) {
750 750 prunlock(pnp);
751 751 return (ENOENT);
752 752 }
753 753
754 754 /*
755 755 * If lwp-specific, check to see if lwp has disappeared.
756 756 */
757 757 if (pcp->prc_flags & PRC_LWP) {
758 758 if ((zdisp == ZNO && (pcp->prc_flags & PRC_DESTROY)) ||
759 759 pcp->prc_tslot == -1) {
760 760 prunlock(pnp);
761 761 return (ENOENT);
762 762 }
763 763 }
764 764
765 765 /*
766 766 * Return EAGAIN if we have encountered a security violation.
767 767 * (The process exec'd a set-id or unreadable executable file.)
768 768 */
769 769 if (pnp->pr_flags & PR_INVAL) {
770 770 prunlock(pnp);
771 771 return (EAGAIN);
772 772 }
773 773
774 774 /*
775 775 * If process is undergoing an exec(), wait for
776 776 * completion and then start all over again.
777 777 */
778 778 if (p->p_proc_flag & P_PR_EXEC) {
779 779 pcp = pnp->pr_pcommon; /* Put on the correct sleep queue */
780 780 mutex_enter(&pcp->prc_mutex);
781 781 prunlock(pnp);
782 782 if (!cv_wait_sig(&pcp->prc_wait, &pcp->prc_mutex)) {
783 783 mutex_exit(&pcp->prc_mutex);
784 784 return (EINTR);
785 785 }
786 786 mutex_exit(&pcp->prc_mutex);
787 787 goto again;
788 788 }
789 789
790 790 /*
791 791 * We return holding p->p_lock.
792 792 */
793 793 return (0);
794 794 }
795 795
796 796 /*
797 797 * Undo prlock() and pr_p_lock().
798 798 * p->p_lock is still held; pr_pidlock is no longer held.
799 799 *
800 800 * prunmark() drops the P_PR_LOCK flag and wakes up another thread,
801 801 * if any, waiting for the flag to be dropped; it retains p->p_lock.
802 802 *
803 803 * prunlock() calls prunmark() and then drops p->p_lock.
804 804 */
805 805 void
806 806 prunmark(proc_t *p)
807 807 {
808 808 ASSERT(p->p_proc_flag & P_PR_LOCK);
809 809 ASSERT(MUTEX_HELD(&p->p_lock));
810 810
811 811 cv_signal(&pr_pid_cv[p->p_slot]);
812 812 p->p_proc_flag &= ~P_PR_LOCK;
813 813 THREAD_KPRI_RELEASE();
814 814 }
815 815
816 816 void
817 817 prunlock(prnode_t *pnp)
818 818 {
819 819 prcommon_t *pcp = pnp->pr_common;
820 820 proc_t *p = pcp->prc_proc;
821 821
822 822 /*
823 823 * If we (or someone) gave it a SIGKILL, and it is not
824 824 * already a zombie, set it running unconditionally.
825 825 */
826 826 if ((p->p_flag & SKILLED) &&
827 827 !(p->p_flag & SEXITING) &&
828 828 !(pcp->prc_flags & PRC_DESTROY) &&
829 829 !((pcp->prc_flags & PRC_LWP) && pcp->prc_tslot == -1))
830 830 (void) pr_setrun(pnp, 0);
831 831 prunmark(p);
832 832 mutex_exit(&p->p_lock);
833 833 }
834 834
835 835 /*
836 836 * Called while holding p->p_lock to delay until the process is unlocked.
837 837 * We enter holding p->p_lock; p->p_lock is dropped and reacquired.
838 838 * The process cannot become locked again until p->p_lock is dropped.
839 839 */
840 840 void
841 841 prbarrier(proc_t *p)
842 842 {
843 843 ASSERT(MUTEX_HELD(&p->p_lock));
844 844
845 845 if (p->p_proc_flag & P_PR_LOCK) {
846 846 /* The process is locked; delay until not locked */
847 847 uint_t slot = p->p_slot;
848 848
849 849 while (p->p_proc_flag & P_PR_LOCK)
850 850 cv_wait(&pr_pid_cv[slot], &p->p_lock);
851 851 cv_signal(&pr_pid_cv[slot]);
852 852 }
853 853 }
854 854
855 855 /*
856 856 * Return process/lwp status.
857 857 * The u-block is mapped in by this routine and unmapped at the end.
858 858 */
859 859 void
860 860 prgetstatus(proc_t *p, pstatus_t *sp, zone_t *zp)
861 861 {
862 862 kthread_t *t;
863 863
864 864 ASSERT(MUTEX_HELD(&p->p_lock));
865 865
866 866 t = prchoose(p); /* returns locked thread */
867 867 ASSERT(t != NULL);
868 868 thread_unlock(t);
869 869
870 870 /* just bzero the process part, prgetlwpstatus() does the rest */
871 871 bzero(sp, sizeof (pstatus_t) - sizeof (lwpstatus_t));
872 872 sp->pr_nlwp = p->p_lwpcnt;
873 873 sp->pr_nzomb = p->p_zombcnt;
874 874 prassignset(&sp->pr_sigpend, &p->p_sig);
875 875 sp->pr_brkbase = (uintptr_t)p->p_brkbase;
876 876 sp->pr_brksize = p->p_brksize;
877 877 sp->pr_stkbase = (uintptr_t)prgetstackbase(p);
878 878 sp->pr_stksize = p->p_stksize;
879 879 sp->pr_pid = p->p_pid;
880 880 if (curproc->p_zone->zone_id != GLOBAL_ZONEID &&
881 881 (p->p_flag & SZONETOP)) {
882 882 ASSERT(p->p_zone->zone_id != GLOBAL_ZONEID);
883 883 /*
884 884 * Inside local zones, fake zsched's pid as parent pids for
885 885 * processes which reference processes outside of the zone.
886 886 */
887 887 sp->pr_ppid = curproc->p_zone->zone_zsched->p_pid;
888 888 } else {
889 889 sp->pr_ppid = p->p_ppid;
890 890 }
891 891 sp->pr_pgid = p->p_pgrp;
892 892 sp->pr_sid = p->p_sessp->s_sid;
893 893 sp->pr_taskid = p->p_task->tk_tkid;
894 894 sp->pr_projid = p->p_task->tk_proj->kpj_id;
895 895 sp->pr_zoneid = p->p_zone->zone_id;
896 896 hrt2ts(mstate_aggr_state(p, LMS_USER), &sp->pr_utime);
897 897 hrt2ts(mstate_aggr_state(p, LMS_SYSTEM), &sp->pr_stime);
898 898 TICK_TO_TIMESTRUC(p->p_cutime, &sp->pr_cutime);
899 899 TICK_TO_TIMESTRUC(p->p_cstime, &sp->pr_cstime);
900 900 prassignset(&sp->pr_sigtrace, &p->p_sigmask);
901 901 prassignset(&sp->pr_flttrace, &p->p_fltmask);
902 902 prassignset(&sp->pr_sysentry, &PTOU(p)->u_entrymask);
903 903 prassignset(&sp->pr_sysexit, &PTOU(p)->u_exitmask);
904 904 switch (p->p_model) {
905 905 case DATAMODEL_ILP32:
906 906 sp->pr_dmodel = PR_MODEL_ILP32;
907 907 break;
908 908 case DATAMODEL_LP64:
909 909 sp->pr_dmodel = PR_MODEL_LP64;
910 910 break;
911 911 }
912 912 if (p->p_agenttp)
913 913 sp->pr_agentid = p->p_agenttp->t_tid;
914 914
915 915 /* get the chosen lwp's status */
916 916 prgetlwpstatus(t, &sp->pr_lwp, zp);
917 917
918 918 /* replicate the flags */
919 919 sp->pr_flags = sp->pr_lwp.pr_flags;
920 920 }
921 921
922 922 #ifdef _SYSCALL32_IMPL
923 923 void
924 924 prgetlwpstatus32(kthread_t *t, lwpstatus32_t *sp, zone_t *zp)
925 925 {
926 926 proc_t *p = ttoproc(t);
927 927 klwp_t *lwp = ttolwp(t);
928 928 struct mstate *ms = &lwp->lwp_mstate;
929 929 hrtime_t usr, sys;
930 930 int flags;
931 931 ulong_t instr;
932 932
933 933 ASSERT(MUTEX_HELD(&p->p_lock));
934 934
935 935 bzero(sp, sizeof (*sp));
936 936 flags = 0L;
937 937 if (t->t_state == TS_STOPPED) {
938 938 flags |= PR_STOPPED;
939 939 if ((t->t_schedflag & TS_PSTART) == 0)
940 940 flags |= PR_ISTOP;
941 941 } else if (VSTOPPED(t)) {
942 942 flags |= PR_STOPPED|PR_ISTOP;
943 943 }
944 944 if (!(flags & PR_ISTOP) && (t->t_proc_flag & TP_PRSTOP))
945 945 flags |= PR_DSTOP;
946 946 if (lwp->lwp_asleep)
947 947 flags |= PR_ASLEEP;
948 948 if (t == p->p_agenttp)
949 949 flags |= PR_AGENT;
950 950 if (!(t->t_proc_flag & TP_TWAIT))
951 951 flags |= PR_DETACH;
952 952 if (t->t_proc_flag & TP_DAEMON)
953 953 flags |= PR_DAEMON;
954 954 if (p->p_proc_flag & P_PR_FORK)
955 955 flags |= PR_FORK;
956 956 if (p->p_proc_flag & P_PR_RUNLCL)
957 957 flags |= PR_RLC;
958 958 if (p->p_proc_flag & P_PR_KILLCL)
959 959 flags |= PR_KLC;
960 960 if (p->p_proc_flag & P_PR_ASYNC)
961 961 flags |= PR_ASYNC;
962 962 if (p->p_proc_flag & P_PR_BPTADJ)
963 963 flags |= PR_BPTADJ;
964 964 if (p->p_proc_flag & P_PR_PTRACE)
965 965 flags |= PR_PTRACE;
966 966 if (p->p_flag & SMSACCT)
967 967 flags |= PR_MSACCT;
968 968 if (p->p_flag & SMSFORK)
969 969 flags |= PR_MSFORK;
970 970 if (p->p_flag & SVFWAIT)
971 971 flags |= PR_VFORKP;
972 972 sp->pr_flags = flags;
973 973 if (VSTOPPED(t)) {
974 974 sp->pr_why = PR_REQUESTED;
975 975 sp->pr_what = 0;
976 976 } else {
977 977 sp->pr_why = t->t_whystop;
978 978 sp->pr_what = t->t_whatstop;
979 979 }
980 980 sp->pr_lwpid = t->t_tid;
981 981 sp->pr_cursig = lwp->lwp_cursig;
982 982 prassignset(&sp->pr_lwppend, &t->t_sig);
983 983 schedctl_finish_sigblock(t);
984 984 prassignset(&sp->pr_lwphold, &t->t_hold);
985 985 if (t->t_whystop == PR_FAULTED) {
986 986 siginfo_kto32(&lwp->lwp_siginfo, &sp->pr_info);
987 987 if (t->t_whatstop == FLTPAGE)
988 988 sp->pr_info.si_addr =
989 989 (caddr32_t)(uintptr_t)lwp->lwp_siginfo.si_addr;
990 990 } else if (lwp->lwp_curinfo)
991 991 siginfo_kto32(&lwp->lwp_curinfo->sq_info, &sp->pr_info);
992 992 if (SI_FROMUSER(&lwp->lwp_siginfo) && zp->zone_id != GLOBAL_ZONEID &&
993 993 sp->pr_info.si_zoneid != zp->zone_id) {
994 994 sp->pr_info.si_pid = zp->zone_zsched->p_pid;
995 995 sp->pr_info.si_uid = 0;
996 996 sp->pr_info.si_ctid = -1;
997 997 sp->pr_info.si_zoneid = zp->zone_id;
998 998 }
999 999 sp->pr_altstack.ss_sp =
1000 1000 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
1001 1001 sp->pr_altstack.ss_size = (size32_t)lwp->lwp_sigaltstack.ss_size;
1002 1002 sp->pr_altstack.ss_flags = (int32_t)lwp->lwp_sigaltstack.ss_flags;
1003 1003 prgetaction32(p, PTOU(p), lwp->lwp_cursig, &sp->pr_action);
1004 1004 sp->pr_oldcontext = (caddr32_t)lwp->lwp_oldcontext;
1005 1005 sp->pr_ustack = (caddr32_t)lwp->lwp_ustack;
1006 1006 (void) strncpy(sp->pr_clname, sclass[t->t_cid].cl_name,
1007 1007 sizeof (sp->pr_clname) - 1);
1008 1008 if (flags & PR_STOPPED)
1009 1009 hrt2ts32(t->t_stoptime, &sp->pr_tstamp);
1010 1010 usr = ms->ms_acct[LMS_USER];
1011 1011 sys = ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP];
1012 1012 scalehrtime(&usr);
1013 1013 scalehrtime(&sys);
1014 1014 hrt2ts32(usr, &sp->pr_utime);
1015 1015 hrt2ts32(sys, &sp->pr_stime);
1016 1016
1017 1017 /*
1018 1018 * Fetch the current instruction, if not a system process.
1019 1019 * We don't attempt this unless the lwp is stopped.
1020 1020 */
1021 1021 if ((p->p_flag & SSYS) || p->p_as == &kas)
1022 1022 sp->pr_flags |= (PR_ISSYS|PR_PCINVAL);
1023 1023 else if (!(flags & PR_STOPPED))
1024 1024 sp->pr_flags |= PR_PCINVAL;
1025 1025 else if (!prfetchinstr(lwp, &instr))
1026 1026 sp->pr_flags |= PR_PCINVAL;
1027 1027 else
1028 1028 sp->pr_instr = (uint32_t)instr;
1029 1029
1030 1030 /*
1031 1031 * Drop p_lock while touching the lwp's stack.
1032 1032 */
1033 1033 mutex_exit(&p->p_lock);
1034 1034 if (prisstep(lwp))
1035 1035 sp->pr_flags |= PR_STEP;
1036 1036 if ((flags & (PR_STOPPED|PR_ASLEEP)) && t->t_sysnum) {
1037 1037 int i;
1038 1038
1039 1039 sp->pr_syscall = get_syscall32_args(lwp,
1040 1040 (int *)sp->pr_sysarg, &i);
1041 1041 sp->pr_nsysarg = (ushort_t)i;
1042 1042 }
1043 1043 if ((flags & PR_STOPPED) || t == curthread)
1044 1044 prgetprregs32(lwp, sp->pr_reg);
1045 1045 if ((t->t_state == TS_STOPPED && t->t_whystop == PR_SYSEXIT) ||
1046 1046 (flags & PR_VFORKP)) {
1047 1047 long r1, r2;
1048 1048 user_t *up;
1049 1049 auxv_t *auxp;
1050 1050 int i;
1051 1051
1052 1052 sp->pr_errno = prgetrvals(lwp, &r1, &r2);
1053 1053 if (sp->pr_errno == 0) {
1054 1054 sp->pr_rval1 = (int32_t)r1;
1055 1055 sp->pr_rval2 = (int32_t)r2;
1056 1056 sp->pr_errpriv = PRIV_NONE;
1057 1057 } else
1058 1058 sp->pr_errpriv = lwp->lwp_badpriv;
1059 1059
1060 1060 if (t->t_sysnum == SYS_execve) {
1061 1061 up = PTOU(p);
1062 1062 sp->pr_sysarg[0] = 0;
1063 1063 sp->pr_sysarg[1] = (caddr32_t)up->u_argv;
1064 1064 sp->pr_sysarg[2] = (caddr32_t)up->u_envp;
1065 1065 for (i = 0, auxp = up->u_auxv;
1066 1066 i < sizeof (up->u_auxv) / sizeof (up->u_auxv[0]);
1067 1067 i++, auxp++) {
1068 1068 if (auxp->a_type == AT_SUN_EXECNAME) {
1069 1069 sp->pr_sysarg[0] =
1070 1070 (caddr32_t)
1071 1071 (uintptr_t)auxp->a_un.a_ptr;
1072 1072 break;
1073 1073 }
1074 1074 }
1075 1075 }
1076 1076 }
1077 1077 if (prhasfp())
1078 1078 prgetprfpregs32(lwp, &sp->pr_fpreg);
1079 1079 mutex_enter(&p->p_lock);
1080 1080 }
1081 1081
1082 1082 void
1083 1083 prgetstatus32(proc_t *p, pstatus32_t *sp, zone_t *zp)
1084 1084 {
1085 1085 kthread_t *t;
1086 1086
1087 1087 ASSERT(MUTEX_HELD(&p->p_lock));
1088 1088
1089 1089 t = prchoose(p); /* returns locked thread */
1090 1090 ASSERT(t != NULL);
1091 1091 thread_unlock(t);
1092 1092
1093 1093 /* just bzero the process part, prgetlwpstatus32() does the rest */
1094 1094 bzero(sp, sizeof (pstatus32_t) - sizeof (lwpstatus32_t));
1095 1095 sp->pr_nlwp = p->p_lwpcnt;
1096 1096 sp->pr_nzomb = p->p_zombcnt;
1097 1097 prassignset(&sp->pr_sigpend, &p->p_sig);
1098 1098 sp->pr_brkbase = (uint32_t)(uintptr_t)p->p_brkbase;
1099 1099 sp->pr_brksize = (uint32_t)p->p_brksize;
1100 1100 sp->pr_stkbase = (uint32_t)(uintptr_t)prgetstackbase(p);
1101 1101 sp->pr_stksize = (uint32_t)p->p_stksize;
1102 1102 sp->pr_pid = p->p_pid;
1103 1103 if (curproc->p_zone->zone_id != GLOBAL_ZONEID &&
1104 1104 (p->p_flag & SZONETOP)) {
1105 1105 ASSERT(p->p_zone->zone_id != GLOBAL_ZONEID);
1106 1106 /*
1107 1107 * Inside local zones, fake zsched's pid as parent pids for
1108 1108 * processes which reference processes outside of the zone.
1109 1109 */
1110 1110 sp->pr_ppid = curproc->p_zone->zone_zsched->p_pid;
1111 1111 } else {
1112 1112 sp->pr_ppid = p->p_ppid;
1113 1113 }
1114 1114 sp->pr_pgid = p->p_pgrp;
1115 1115 sp->pr_sid = p->p_sessp->s_sid;
1116 1116 sp->pr_taskid = p->p_task->tk_tkid;
1117 1117 sp->pr_projid = p->p_task->tk_proj->kpj_id;
1118 1118 sp->pr_zoneid = p->p_zone->zone_id;
1119 1119 hrt2ts32(mstate_aggr_state(p, LMS_USER), &sp->pr_utime);
1120 1120 hrt2ts32(mstate_aggr_state(p, LMS_SYSTEM), &sp->pr_stime);
1121 1121 TICK_TO_TIMESTRUC32(p->p_cutime, &sp->pr_cutime);
1122 1122 TICK_TO_TIMESTRUC32(p->p_cstime, &sp->pr_cstime);
1123 1123 prassignset(&sp->pr_sigtrace, &p->p_sigmask);
1124 1124 prassignset(&sp->pr_flttrace, &p->p_fltmask);
1125 1125 prassignset(&sp->pr_sysentry, &PTOU(p)->u_entrymask);
1126 1126 prassignset(&sp->pr_sysexit, &PTOU(p)->u_exitmask);
1127 1127 switch (p->p_model) {
1128 1128 case DATAMODEL_ILP32:
1129 1129 sp->pr_dmodel = PR_MODEL_ILP32;
1130 1130 break;
1131 1131 case DATAMODEL_LP64:
1132 1132 sp->pr_dmodel = PR_MODEL_LP64;
1133 1133 break;
1134 1134 }
1135 1135 if (p->p_agenttp)
1136 1136 sp->pr_agentid = p->p_agenttp->t_tid;
1137 1137
1138 1138 /* get the chosen lwp's status */
1139 1139 prgetlwpstatus32(t, &sp->pr_lwp, zp);
1140 1140
1141 1141 /* replicate the flags */
1142 1142 sp->pr_flags = sp->pr_lwp.pr_flags;
1143 1143 }
1144 1144 #endif /* _SYSCALL32_IMPL */
1145 1145
1146 1146 /*
1147 1147 * Return lwp status.
1148 1148 */
1149 1149 void
1150 1150 prgetlwpstatus(kthread_t *t, lwpstatus_t *sp, zone_t *zp)
1151 1151 {
1152 1152 proc_t *p = ttoproc(t);
1153 1153 klwp_t *lwp = ttolwp(t);
1154 1154 struct mstate *ms = &lwp->lwp_mstate;
1155 1155 hrtime_t usr, sys;
1156 1156 int flags;
1157 1157 ulong_t instr;
1158 1158
1159 1159 ASSERT(MUTEX_HELD(&p->p_lock));
1160 1160
1161 1161 bzero(sp, sizeof (*sp));
1162 1162 flags = 0L;
1163 1163 if (t->t_state == TS_STOPPED) {
1164 1164 flags |= PR_STOPPED;
1165 1165 if ((t->t_schedflag & TS_PSTART) == 0)
1166 1166 flags |= PR_ISTOP;
1167 1167 } else if (VSTOPPED(t)) {
1168 1168 flags |= PR_STOPPED|PR_ISTOP;
1169 1169 }
1170 1170 if (!(flags & PR_ISTOP) && (t->t_proc_flag & TP_PRSTOP))
1171 1171 flags |= PR_DSTOP;
1172 1172 if (lwp->lwp_asleep)
1173 1173 flags |= PR_ASLEEP;
1174 1174 if (t == p->p_agenttp)
1175 1175 flags |= PR_AGENT;
1176 1176 if (!(t->t_proc_flag & TP_TWAIT))
1177 1177 flags |= PR_DETACH;
1178 1178 if (t->t_proc_flag & TP_DAEMON)
1179 1179 flags |= PR_DAEMON;
1180 1180 if (p->p_proc_flag & P_PR_FORK)
1181 1181 flags |= PR_FORK;
1182 1182 if (p->p_proc_flag & P_PR_RUNLCL)
1183 1183 flags |= PR_RLC;
1184 1184 if (p->p_proc_flag & P_PR_KILLCL)
1185 1185 flags |= PR_KLC;
1186 1186 if (p->p_proc_flag & P_PR_ASYNC)
1187 1187 flags |= PR_ASYNC;
1188 1188 if (p->p_proc_flag & P_PR_BPTADJ)
1189 1189 flags |= PR_BPTADJ;
1190 1190 if (p->p_proc_flag & P_PR_PTRACE)
1191 1191 flags |= PR_PTRACE;
1192 1192 if (p->p_flag & SMSACCT)
1193 1193 flags |= PR_MSACCT;
1194 1194 if (p->p_flag & SMSFORK)
1195 1195 flags |= PR_MSFORK;
1196 1196 if (p->p_flag & SVFWAIT)
1197 1197 flags |= PR_VFORKP;
1198 1198 if (p->p_pgidp->pid_pgorphaned)
1199 1199 flags |= PR_ORPHAN;
1200 1200 if (p->p_pidflag & CLDNOSIGCHLD)
1201 1201 flags |= PR_NOSIGCHLD;
1202 1202 if (p->p_pidflag & CLDWAITPID)
1203 1203 flags |= PR_WAITPID;
1204 1204 sp->pr_flags = flags;
1205 1205 if (VSTOPPED(t)) {
1206 1206 sp->pr_why = PR_REQUESTED;
1207 1207 sp->pr_what = 0;
1208 1208 } else {
1209 1209 sp->pr_why = t->t_whystop;
1210 1210 sp->pr_what = t->t_whatstop;
1211 1211 }
1212 1212 sp->pr_lwpid = t->t_tid;
1213 1213 sp->pr_cursig = lwp->lwp_cursig;
1214 1214 prassignset(&sp->pr_lwppend, &t->t_sig);
1215 1215 schedctl_finish_sigblock(t);
1216 1216 prassignset(&sp->pr_lwphold, &t->t_hold);
1217 1217 if (t->t_whystop == PR_FAULTED)
1218 1218 bcopy(&lwp->lwp_siginfo,
1219 1219 &sp->pr_info, sizeof (k_siginfo_t));
1220 1220 else if (lwp->lwp_curinfo)
1221 1221 bcopy(&lwp->lwp_curinfo->sq_info,
1222 1222 &sp->pr_info, sizeof (k_siginfo_t));
1223 1223 if (SI_FROMUSER(&lwp->lwp_siginfo) && zp->zone_id != GLOBAL_ZONEID &&
1224 1224 sp->pr_info.si_zoneid != zp->zone_id) {
1225 1225 sp->pr_info.si_pid = zp->zone_zsched->p_pid;
1226 1226 sp->pr_info.si_uid = 0;
1227 1227 sp->pr_info.si_ctid = -1;
1228 1228 sp->pr_info.si_zoneid = zp->zone_id;
1229 1229 }
1230 1230 sp->pr_altstack = lwp->lwp_sigaltstack;
1231 1231 prgetaction(p, PTOU(p), lwp->lwp_cursig, &sp->pr_action);
1232 1232 sp->pr_oldcontext = (uintptr_t)lwp->lwp_oldcontext;
1233 1233 sp->pr_ustack = lwp->lwp_ustack;
1234 1234 (void) strncpy(sp->pr_clname, sclass[t->t_cid].cl_name,
1235 1235 sizeof (sp->pr_clname) - 1);
1236 1236 if (flags & PR_STOPPED)
1237 1237 hrt2ts(t->t_stoptime, &sp->pr_tstamp);
1238 1238 usr = ms->ms_acct[LMS_USER];
1239 1239 sys = ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP];
1240 1240 scalehrtime(&usr);
1241 1241 scalehrtime(&sys);
1242 1242 hrt2ts(usr, &sp->pr_utime);
1243 1243 hrt2ts(sys, &sp->pr_stime);
1244 1244
1245 1245 /*
1246 1246 * Fetch the current instruction, if not a system process.
1247 1247 * We don't attempt this unless the lwp is stopped.
1248 1248 */
1249 1249 if ((p->p_flag & SSYS) || p->p_as == &kas)
1250 1250 sp->pr_flags |= (PR_ISSYS|PR_PCINVAL);
1251 1251 else if (!(flags & PR_STOPPED))
1252 1252 sp->pr_flags |= PR_PCINVAL;
1253 1253 else if (!prfetchinstr(lwp, &instr))
1254 1254 sp->pr_flags |= PR_PCINVAL;
1255 1255 else
1256 1256 sp->pr_instr = instr;
1257 1257
1258 1258 /*
1259 1259 * Drop p_lock while touching the lwp's stack.
1260 1260 */
1261 1261 mutex_exit(&p->p_lock);
1262 1262 if (prisstep(lwp))
1263 1263 sp->pr_flags |= PR_STEP;
1264 1264 if ((flags & (PR_STOPPED|PR_ASLEEP)) && t->t_sysnum) {
1265 1265 int i;
1266 1266
1267 1267 sp->pr_syscall = get_syscall_args(lwp,
1268 1268 (long *)sp->pr_sysarg, &i);
1269 1269 sp->pr_nsysarg = (ushort_t)i;
1270 1270 }
1271 1271 if ((flags & PR_STOPPED) || t == curthread)
1272 1272 prgetprregs(lwp, sp->pr_reg);
1273 1273 if ((t->t_state == TS_STOPPED && t->t_whystop == PR_SYSEXIT) ||
1274 1274 (flags & PR_VFORKP)) {
1275 1275 user_t *up;
1276 1276 auxv_t *auxp;
1277 1277 int i;
1278 1278
1279 1279 sp->pr_errno = prgetrvals(lwp, &sp->pr_rval1, &sp->pr_rval2);
1280 1280 if (sp->pr_errno == 0)
1281 1281 sp->pr_errpriv = PRIV_NONE;
1282 1282 else
1283 1283 sp->pr_errpriv = lwp->lwp_badpriv;
1284 1284
1285 1285 if (t->t_sysnum == SYS_execve) {
1286 1286 up = PTOU(p);
1287 1287 sp->pr_sysarg[0] = 0;
1288 1288 sp->pr_sysarg[1] = (uintptr_t)up->u_argv;
1289 1289 sp->pr_sysarg[2] = (uintptr_t)up->u_envp;
1290 1290 for (i = 0, auxp = up->u_auxv;
1291 1291 i < sizeof (up->u_auxv) / sizeof (up->u_auxv[0]);
1292 1292 i++, auxp++) {
1293 1293 if (auxp->a_type == AT_SUN_EXECNAME) {
1294 1294 sp->pr_sysarg[0] =
1295 1295 (uintptr_t)auxp->a_un.a_ptr;
1296 1296 break;
1297 1297 }
1298 1298 }
1299 1299 }
1300 1300 }
1301 1301 if (prhasfp())
1302 1302 prgetprfpregs(lwp, &sp->pr_fpreg);
1303 1303 mutex_enter(&p->p_lock);
1304 1304 }
1305 1305
1306 1306 /*
1307 1307 * Get the sigaction structure for the specified signal. The u-block
1308 1308 * must already have been mapped in by the caller.
1309 1309 */
1310 1310 void
1311 1311 prgetaction(proc_t *p, user_t *up, uint_t sig, struct sigaction *sp)
1312 1312 {
1313 1313 int nsig = PROC_IS_BRANDED(curproc)? BROP(curproc)->b_nsig : NSIG;
1314 1314
1315 1315 bzero(sp, sizeof (*sp));
1316 1316
1317 1317 if (sig != 0 && (unsigned)sig < nsig) {
1318 1318 sp->sa_handler = up->u_signal[sig-1];
1319 1319 prassignset(&sp->sa_mask, &up->u_sigmask[sig-1]);
1320 1320 if (sigismember(&up->u_sigonstack, sig))
1321 1321 sp->sa_flags |= SA_ONSTACK;
1322 1322 if (sigismember(&up->u_sigresethand, sig))
1323 1323 sp->sa_flags |= SA_RESETHAND;
1324 1324 if (sigismember(&up->u_sigrestart, sig))
1325 1325 sp->sa_flags |= SA_RESTART;
1326 1326 if (sigismember(&p->p_siginfo, sig))
1327 1327 sp->sa_flags |= SA_SIGINFO;
1328 1328 if (sigismember(&up->u_signodefer, sig))
1329 1329 sp->sa_flags |= SA_NODEFER;
1330 1330 if (sig == SIGCLD) {
1331 1331 if (p->p_flag & SNOWAIT)
1332 1332 sp->sa_flags |= SA_NOCLDWAIT;
1333 1333 if ((p->p_flag & SJCTL) == 0)
1334 1334 sp->sa_flags |= SA_NOCLDSTOP;
1335 1335 }
1336 1336 }
1337 1337 }
1338 1338
1339 1339 #ifdef _SYSCALL32_IMPL
1340 1340 void
1341 1341 prgetaction32(proc_t *p, user_t *up, uint_t sig, struct sigaction32 *sp)
1342 1342 {
1343 1343 int nsig = PROC_IS_BRANDED(curproc)? BROP(curproc)->b_nsig : NSIG;
1344 1344
1345 1345 bzero(sp, sizeof (*sp));
1346 1346
1347 1347 if (sig != 0 && (unsigned)sig < nsig) {
1348 1348 sp->sa_handler = (caddr32_t)(uintptr_t)up->u_signal[sig-1];
1349 1349 prassignset(&sp->sa_mask, &up->u_sigmask[sig-1]);
1350 1350 if (sigismember(&up->u_sigonstack, sig))
1351 1351 sp->sa_flags |= SA_ONSTACK;
1352 1352 if (sigismember(&up->u_sigresethand, sig))
1353 1353 sp->sa_flags |= SA_RESETHAND;
1354 1354 if (sigismember(&up->u_sigrestart, sig))
1355 1355 sp->sa_flags |= SA_RESTART;
1356 1356 if (sigismember(&p->p_siginfo, sig))
1357 1357 sp->sa_flags |= SA_SIGINFO;
1358 1358 if (sigismember(&up->u_signodefer, sig))
1359 1359 sp->sa_flags |= SA_NODEFER;
1360 1360 if (sig == SIGCLD) {
1361 1361 if (p->p_flag & SNOWAIT)
1362 1362 sp->sa_flags |= SA_NOCLDWAIT;
1363 1363 if ((p->p_flag & SJCTL) == 0)
1364 1364 sp->sa_flags |= SA_NOCLDSTOP;
1365 1365 }
1366 1366 }
1367 1367 }
1368 1368 #endif /* _SYSCALL32_IMPL */
1369 1369
1370 1370 /*
1371 1371 * Count the number of segments in this process's address space.
1372 1372 */
1373 1373 int
1374 1374 prnsegs(struct as *as, int reserved)
1375 1375 {
1376 1376 int n = 0;
1377 1377 struct seg *seg;
1378 1378
1379 1379 ASSERT(as != &kas && AS_WRITE_HELD(as));
1380 1380
1381 1381 for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
1382 1382 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved);
1383 1383 caddr_t saddr, naddr;
1384 1384 void *tmp = NULL;
1385 1385
1386 1386 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1387 1387 (void) pr_getprot(seg, reserved, &tmp,
1388 1388 &saddr, &naddr, eaddr);
1389 1389 if (saddr != naddr)
1390 1390 n++;
1391 1391 }
1392 1392
1393 1393 ASSERT(tmp == NULL);
1394 1394 }
1395 1395
1396 1396 return (n);
1397 1397 }
1398 1398
1399 1399 /*
1400 1400 * Convert uint32_t to decimal string w/o leading zeros.
1401 1401 * Add trailing null characters if 'len' is greater than string length.
1402 1402 * Return the string length.
1403 1403 */
1404 1404 int
1405 1405 pr_u32tos(uint32_t n, char *s, int len)
1406 1406 {
1407 1407 char cbuf[11]; /* 32-bit unsigned integer fits in 10 digits */
1408 1408 char *cp = cbuf;
1409 1409 char *end = s + len;
1410 1410
1411 1411 do {
1412 1412 *cp++ = (char)(n % 10 + '0');
1413 1413 n /= 10;
1414 1414 } while (n);
1415 1415
1416 1416 len = (int)(cp - cbuf);
1417 1417
1418 1418 do {
1419 1419 *s++ = *--cp;
1420 1420 } while (cp > cbuf);
1421 1421
1422 1422 while (s < end) /* optional pad */
1423 1423 *s++ = '\0';
1424 1424
1425 1425 return (len);
1426 1426 }
1427 1427
1428 1428 /*
1429 1429 * Convert uint64_t to decimal string w/o leading zeros.
1430 1430 * Return the string length.
1431 1431 */
1432 1432 static int
1433 1433 pr_u64tos(uint64_t n, char *s)
1434 1434 {
1435 1435 char cbuf[21]; /* 64-bit unsigned integer fits in 20 digits */
1436 1436 char *cp = cbuf;
1437 1437 int len;
1438 1438
1439 1439 do {
1440 1440 *cp++ = (char)(n % 10 + '0');
1441 1441 n /= 10;
1442 1442 } while (n);
1443 1443
1444 1444 len = (int)(cp - cbuf);
1445 1445
1446 1446 do {
1447 1447 *s++ = *--cp;
1448 1448 } while (cp > cbuf);
1449 1449
1450 1450 return (len);
1451 1451 }
1452 1452
1453 1453 void
1454 1454 pr_object_name(char *name, vnode_t *vp, struct vattr *vattr)
1455 1455 {
1456 1456 char *s = name;
1457 1457 struct vfs *vfsp;
1458 1458 struct vfssw *vfsswp;
1459 1459
1460 1460 if ((vfsp = vp->v_vfsp) != NULL &&
1461 1461 ((vfsswp = vfssw + vfsp->vfs_fstype), vfsswp->vsw_name) &&
1462 1462 *vfsswp->vsw_name) {
1463 1463 (void) strcpy(s, vfsswp->vsw_name);
1464 1464 s += strlen(s);
1465 1465 *s++ = '.';
1466 1466 }
1467 1467 s += pr_u32tos(getmajor(vattr->va_fsid), s, 0);
1468 1468 *s++ = '.';
1469 1469 s += pr_u32tos(getminor(vattr->va_fsid), s, 0);
1470 1470 *s++ = '.';
1471 1471 s += pr_u64tos(vattr->va_nodeid, s);
1472 1472 *s++ = '\0';
1473 1473 }
1474 1474
1475 1475 struct seg *
1476 1476 break_seg(proc_t *p)
1477 1477 {
1478 1478 caddr_t addr = p->p_brkbase;
1479 1479 struct seg *seg;
1480 1480 struct vnode *vp;
1481 1481
1482 1482 if (p->p_brksize != 0)
1483 1483 addr += p->p_brksize - 1;
1484 1484 seg = as_segat(p->p_as, addr);
1485 1485 if (seg != NULL && seg->s_ops == &segvn_ops &&
1486 1486 (SEGOP_GETVP(seg, seg->s_base, &vp) != 0 || vp == NULL))
1487 1487 return (seg);
1488 1488 return (NULL);
1489 1489 }
1490 1490
1491 1491 /*
1492 1492 * Implementation of service functions to handle procfs generic chained
1493 1493 * copyout buffers.
1494 1494 */
1495 1495 typedef struct pr_iobuf_list {
1496 1496 list_node_t piol_link; /* buffer linkage */
1497 1497 size_t piol_size; /* total size (header + data) */
1498 1498 size_t piol_usedsize; /* amount to copy out from this buf */
1499 1499 } piol_t;
1500 1500
1501 1501 #define MAPSIZE (64 * 1024)
1502 1502 #define PIOL_DATABUF(iol) ((void *)(&(iol)[1]))
1503 1503
1504 1504 void
1505 1505 pr_iol_initlist(list_t *iolhead, size_t itemsize, int n)
1506 1506 {
1507 1507 piol_t *iol;
1508 1508 size_t initial_size = MIN(1, n) * itemsize;
1509 1509
1510 1510 list_create(iolhead, sizeof (piol_t), offsetof(piol_t, piol_link));
1511 1511
1512 1512 ASSERT(list_head(iolhead) == NULL);
1513 1513 ASSERT(itemsize < MAPSIZE - sizeof (*iol));
1514 1514 ASSERT(initial_size > 0);
1515 1515
1516 1516 /*
1517 1517 * Someone creating chained copyout buffers may ask for less than
1518 1518 * MAPSIZE if the amount of data to be buffered is known to be
1519 1519 * smaller than that.
1520 1520 * But in order to prevent involuntary self-denial of service,
1521 1521 * the requested input size is clamped at MAPSIZE.
1522 1522 */
1523 1523 initial_size = MIN(MAPSIZE, initial_size + sizeof (*iol));
1524 1524 iol = kmem_alloc(initial_size, KM_SLEEP);
1525 1525 list_insert_head(iolhead, iol);
1526 1526 iol->piol_usedsize = 0;
1527 1527 iol->piol_size = initial_size;
1528 1528 }
1529 1529
1530 1530 void *
1531 1531 pr_iol_newbuf(list_t *iolhead, size_t itemsize)
1532 1532 {
1533 1533 piol_t *iol;
1534 1534 char *new;
1535 1535
1536 1536 ASSERT(itemsize < MAPSIZE - sizeof (*iol));
1537 1537 ASSERT(list_head(iolhead) != NULL);
1538 1538
1539 1539 iol = (piol_t *)list_tail(iolhead);
1540 1540
1541 1541 if (iol->piol_size <
1542 1542 iol->piol_usedsize + sizeof (*iol) + itemsize) {
1543 1543 /*
1544 1544 * Out of space in the current buffer. Allocate more.
1545 1545 */
1546 1546 piol_t *newiol;
1547 1547
1548 1548 newiol = kmem_alloc(MAPSIZE, KM_SLEEP);
1549 1549 newiol->piol_size = MAPSIZE;
1550 1550 newiol->piol_usedsize = 0;
1551 1551
1552 1552 list_insert_after(iolhead, iol, newiol);
1553 1553 iol = list_next(iolhead, iol);
1554 1554 ASSERT(iol == newiol);
1555 1555 }
1556 1556 new = (char *)PIOL_DATABUF(iol) + iol->piol_usedsize;
1557 1557 iol->piol_usedsize += itemsize;
1558 1558 bzero(new, itemsize);
1559 1559 return (new);
1560 1560 }
1561 1561
1562 1562 int
1563 1563 pr_iol_copyout_and_free(list_t *iolhead, caddr_t *tgt, int errin)
1564 1564 {
1565 1565 int error = errin;
1566 1566 piol_t *iol;
1567 1567
1568 1568 while ((iol = list_head(iolhead)) != NULL) {
1569 1569 list_remove(iolhead, iol);
1570 1570 if (!error) {
1571 1571 if (copyout(PIOL_DATABUF(iol), *tgt,
1572 1572 iol->piol_usedsize))
1573 1573 error = EFAULT;
1574 1574 *tgt += iol->piol_usedsize;
1575 1575 }
1576 1576 kmem_free(iol, iol->piol_size);
1577 1577 }
1578 1578 list_destroy(iolhead);
1579 1579
1580 1580 return (error);
1581 1581 }
1582 1582
1583 1583 int
1584 1584 pr_iol_uiomove_and_free(list_t *iolhead, uio_t *uiop, int errin)
1585 1585 {
1586 1586 offset_t off = uiop->uio_offset;
1587 1587 char *base;
1588 1588 size_t size;
1589 1589 piol_t *iol;
1590 1590 int error = errin;
1591 1591
1592 1592 while ((iol = list_head(iolhead)) != NULL) {
1593 1593 list_remove(iolhead, iol);
1594 1594 base = PIOL_DATABUF(iol);
1595 1595 size = iol->piol_usedsize;
1596 1596 if (off <= size && error == 0 && uiop->uio_resid > 0)
1597 1597 error = uiomove(base + off, size - off,
1598 1598 UIO_READ, uiop);
1599 1599 off = MAX(0, off - (offset_t)size);
1600 1600 kmem_free(iol, iol->piol_size);
1601 1601 }
1602 1602 list_destroy(iolhead);
1603 1603
1604 1604 return (error);
1605 1605 }
1606 1606
1607 1607 /*
1608 1608 * Return an array of structures with memory map information.
1609 1609 * We allocate here; the caller must deallocate.
1610 1610 */
1611 1611 int
1612 1612 prgetmap(proc_t *p, int reserved, list_t *iolhead)
1613 1613 {
1614 1614 struct as *as = p->p_as;
1615 1615 prmap_t *mp;
1616 1616 struct seg *seg;
1617 1617 struct seg *brkseg, *stkseg;
1618 1618 struct vnode *vp;
1619 1619 struct vattr vattr;
1620 1620 uint_t prot;
1621 1621
1622 1622 ASSERT(as != &kas && AS_WRITE_HELD(as));
1623 1623
1624 1624 /*
1625 1625 * Request an initial buffer size that doesn't waste memory
1626 1626 * if the address space has only a small number of segments.
1627 1627 */
1628 1628 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
1629 1629
1630 1630 if ((seg = AS_SEGFIRST(as)) == NULL)
1631 1631 return (0);
1632 1632
1633 1633 brkseg = break_seg(p);
1634 1634 stkseg = as_segat(as, prgetstackbase(p));
1635 1635
1636 1636 do {
1637 1637 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved);
1638 1638 caddr_t saddr, naddr;
1639 1639 void *tmp = NULL;
1640 1640
1641 1641 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1642 1642 prot = pr_getprot(seg, reserved, &tmp,
1643 1643 &saddr, &naddr, eaddr);
1644 1644 if (saddr == naddr)
1645 1645 continue;
1646 1646
1647 1647 mp = pr_iol_newbuf(iolhead, sizeof (*mp));
1648 1648
1649 1649 mp->pr_vaddr = (uintptr_t)saddr;
1650 1650 mp->pr_size = naddr - saddr;
1651 1651 mp->pr_offset = SEGOP_GETOFFSET(seg, saddr);
1652 1652 mp->pr_mflags = 0;
1653 1653 if (prot & PROT_READ)
1654 1654 mp->pr_mflags |= MA_READ;
1655 1655 if (prot & PROT_WRITE)
1656 1656 mp->pr_mflags |= MA_WRITE;
1657 1657 if (prot & PROT_EXEC)
1658 1658 mp->pr_mflags |= MA_EXEC;
1659 1659 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
1660 1660 mp->pr_mflags |= MA_SHARED;
1661 1661 if (SEGOP_GETTYPE(seg, saddr) & MAP_NORESERVE)
1662 1662 mp->pr_mflags |= MA_NORESERVE;
1663 1663 if (seg->s_ops == &segspt_shmops ||
1664 1664 (seg->s_ops == &segvn_ops &&
1665 1665 (SEGOP_GETVP(seg, saddr, &vp) != 0 || vp == NULL)))
1666 1666 mp->pr_mflags |= MA_ANON;
1667 1667 if (seg == brkseg)
1668 1668 mp->pr_mflags |= MA_BREAK;
1669 1669 else if (seg == stkseg) {
1670 1670 mp->pr_mflags |= MA_STACK;
1671 1671 if (reserved) {
1672 1672 size_t maxstack =
1673 1673 ((size_t)p->p_stk_ctl +
1674 1674 PAGEOFFSET) & PAGEMASK;
1675 1675 mp->pr_vaddr =
1676 1676 (uintptr_t)prgetstackbase(p) +
1677 1677 p->p_stksize - maxstack;
1678 1678 mp->pr_size = (uintptr_t)naddr -
1679 1679 mp->pr_vaddr;
1680 1680 }
1681 1681 }
1682 1682 if (seg->s_ops == &segspt_shmops)
1683 1683 mp->pr_mflags |= MA_ISM | MA_SHM;
1684 1684 mp->pr_pagesize = PAGESIZE;
1685 1685
1686 1686 /*
1687 1687 * Manufacture a filename for the "object" directory.
1688 1688 */
1689 1689 vattr.va_mask = AT_FSID|AT_NODEID;
1690 1690 if (seg->s_ops == &segvn_ops &&
1691 1691 SEGOP_GETVP(seg, saddr, &vp) == 0 &&
1692 1692 vp != NULL && vp->v_type == VREG &&
1693 1693 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
1694 1694 if (vp == p->p_exec)
1695 1695 (void) strcpy(mp->pr_mapname, "a.out");
1696 1696 else
1697 1697 pr_object_name(mp->pr_mapname,
1698 1698 vp, &vattr);
1699 1699 }
1700 1700
1701 1701 /*
1702 1702 * Get the SysV shared memory id, if any.
1703 1703 */
1704 1704 if ((mp->pr_mflags & MA_SHARED) && p->p_segacct &&
1705 1705 (mp->pr_shmid = shmgetid(p, seg->s_base)) !=
1706 1706 SHMID_NONE) {
1707 1707 if (mp->pr_shmid == SHMID_FREE)
1708 1708 mp->pr_shmid = -1;
1709 1709
1710 1710 mp->pr_mflags |= MA_SHM;
1711 1711 } else {
1712 1712 mp->pr_shmid = -1;
1713 1713 }
1714 1714 }
1715 1715 ASSERT(tmp == NULL);
1716 1716 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
1717 1717
1718 1718 return (0);
1719 1719 }
1720 1720
1721 1721 #ifdef _SYSCALL32_IMPL
1722 1722 int
1723 1723 prgetmap32(proc_t *p, int reserved, list_t *iolhead)
1724 1724 {
1725 1725 struct as *as = p->p_as;
1726 1726 prmap32_t *mp;
1727 1727 struct seg *seg;
1728 1728 struct seg *brkseg, *stkseg;
1729 1729 struct vnode *vp;
1730 1730 struct vattr vattr;
1731 1731 uint_t prot;
1732 1732
1733 1733 ASSERT(as != &kas && AS_WRITE_HELD(as));
1734 1734
1735 1735 /*
1736 1736 * Request an initial buffer size that doesn't waste memory
1737 1737 * if the address space has only a small number of segments.
1738 1738 */
1739 1739 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
1740 1740
1741 1741 if ((seg = AS_SEGFIRST(as)) == NULL)
1742 1742 return (0);
1743 1743
1744 1744 brkseg = break_seg(p);
1745 1745 stkseg = as_segat(as, prgetstackbase(p));
1746 1746
1747 1747 do {
1748 1748 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved);
1749 1749 caddr_t saddr, naddr;
1750 1750 void *tmp = NULL;
1751 1751
1752 1752 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1753 1753 prot = pr_getprot(seg, reserved, &tmp,
1754 1754 &saddr, &naddr, eaddr);
1755 1755 if (saddr == naddr)
1756 1756 continue;
1757 1757
1758 1758 mp = pr_iol_newbuf(iolhead, sizeof (*mp));
1759 1759
1760 1760 mp->pr_vaddr = (caddr32_t)(uintptr_t)saddr;
1761 1761 mp->pr_size = (size32_t)(naddr - saddr);
1762 1762 mp->pr_offset = SEGOP_GETOFFSET(seg, saddr);
1763 1763 mp->pr_mflags = 0;
1764 1764 if (prot & PROT_READ)
1765 1765 mp->pr_mflags |= MA_READ;
1766 1766 if (prot & PROT_WRITE)
1767 1767 mp->pr_mflags |= MA_WRITE;
1768 1768 if (prot & PROT_EXEC)
1769 1769 mp->pr_mflags |= MA_EXEC;
1770 1770 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
1771 1771 mp->pr_mflags |= MA_SHARED;
1772 1772 if (SEGOP_GETTYPE(seg, saddr) & MAP_NORESERVE)
1773 1773 mp->pr_mflags |= MA_NORESERVE;
1774 1774 if (seg->s_ops == &segspt_shmops ||
1775 1775 (seg->s_ops == &segvn_ops &&
1776 1776 (SEGOP_GETVP(seg, saddr, &vp) != 0 || vp == NULL)))
1777 1777 mp->pr_mflags |= MA_ANON;
1778 1778 if (seg == brkseg)
1779 1779 mp->pr_mflags |= MA_BREAK;
1780 1780 else if (seg == stkseg) {
1781 1781 mp->pr_mflags |= MA_STACK;
1782 1782 if (reserved) {
1783 1783 size_t maxstack =
1784 1784 ((size_t)p->p_stk_ctl +
1785 1785 PAGEOFFSET) & PAGEMASK;
1786 1786 uintptr_t vaddr =
1787 1787 (uintptr_t)prgetstackbase(p) +
1788 1788 p->p_stksize - maxstack;
1789 1789 mp->pr_vaddr = (caddr32_t)vaddr;
1790 1790 mp->pr_size = (size32_t)
1791 1791 ((uintptr_t)naddr - vaddr);
1792 1792 }
1793 1793 }
1794 1794 if (seg->s_ops == &segspt_shmops)
1795 1795 mp->pr_mflags |= MA_ISM | MA_SHM;
1796 1796 mp->pr_pagesize = PAGESIZE;
1797 1797
1798 1798 /*
1799 1799 * Manufacture a filename for the "object" directory.
1800 1800 */
1801 1801 vattr.va_mask = AT_FSID|AT_NODEID;
1802 1802 if (seg->s_ops == &segvn_ops &&
1803 1803 SEGOP_GETVP(seg, saddr, &vp) == 0 &&
1804 1804 vp != NULL && vp->v_type == VREG &&
1805 1805 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
1806 1806 if (vp == p->p_exec)
1807 1807 (void) strcpy(mp->pr_mapname, "a.out");
1808 1808 else
1809 1809 pr_object_name(mp->pr_mapname,
1810 1810 vp, &vattr);
1811 1811 }
1812 1812
1813 1813 /*
1814 1814 * Get the SysV shared memory id, if any.
1815 1815 */
1816 1816 if ((mp->pr_mflags & MA_SHARED) && p->p_segacct &&
1817 1817 (mp->pr_shmid = shmgetid(p, seg->s_base)) !=
1818 1818 SHMID_NONE) {
1819 1819 if (mp->pr_shmid == SHMID_FREE)
1820 1820 mp->pr_shmid = -1;
1821 1821
1822 1822 mp->pr_mflags |= MA_SHM;
1823 1823 } else {
1824 1824 mp->pr_shmid = -1;
1825 1825 }
1826 1826 }
1827 1827 ASSERT(tmp == NULL);
1828 1828 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
1829 1829
1830 1830 return (0);
1831 1831 }
1832 1832 #endif /* _SYSCALL32_IMPL */
1833 1833
1834 1834 /*
1835 1835 * Return the size of the /proc page data file.
1836 1836 */
1837 1837 size_t
1838 1838 prpdsize(struct as *as)
1839 1839 {
1840 1840 struct seg *seg;
1841 1841 size_t size;
1842 1842
1843 1843 ASSERT(as != &kas && AS_WRITE_HELD(as));
1844 1844
1845 1845 if ((seg = AS_SEGFIRST(as)) == NULL)
1846 1846 return (0);
1847 1847
1848 1848 size = sizeof (prpageheader_t);
1849 1849 do {
1850 1850 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
1851 1851 caddr_t saddr, naddr;
1852 1852 void *tmp = NULL;
1853 1853 size_t npage;
1854 1854
1855 1855 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1856 1856 (void) pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
1857 1857 if ((npage = (naddr - saddr) / PAGESIZE) != 0)
1858 1858 size += sizeof (prasmap_t) + round8(npage);
1859 1859 }
1860 1860 ASSERT(tmp == NULL);
1861 1861 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
1862 1862
1863 1863 return (size);
1864 1864 }
1865 1865
1866 1866 #ifdef _SYSCALL32_IMPL
1867 1867 size_t
1868 1868 prpdsize32(struct as *as)
1869 1869 {
1870 1870 struct seg *seg;
1871 1871 size_t size;
1872 1872
1873 1873 ASSERT(as != &kas && AS_WRITE_HELD(as));
1874 1874
1875 1875 if ((seg = AS_SEGFIRST(as)) == NULL)
1876 1876 return (0);
1877 1877
1878 1878 size = sizeof (prpageheader32_t);
1879 1879 do {
1880 1880 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
1881 1881 caddr_t saddr, naddr;
1882 1882 void *tmp = NULL;
1883 1883 size_t npage;
1884 1884
1885 1885 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1886 1886 (void) pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
1887 1887 if ((npage = (naddr - saddr) / PAGESIZE) != 0)
1888 1888 size += sizeof (prasmap32_t) + round8(npage);
1889 1889 }
1890 1890 ASSERT(tmp == NULL);
1891 1891 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
1892 1892
1893 1893 return (size);
1894 1894 }
1895 1895 #endif /* _SYSCALL32_IMPL */
1896 1896
1897 1897 /*
1898 1898 * Read page data information.
1899 1899 */
1900 1900 int
1901 1901 prpdread(proc_t *p, uint_t hatid, struct uio *uiop)
1902 1902 {
1903 1903 struct as *as = p->p_as;
1904 1904 caddr_t buf;
1905 1905 size_t size;
1906 1906 prpageheader_t *php;
1907 1907 prasmap_t *pmp;
1908 1908 struct seg *seg;
1909 1909 int error;
1910 1910
1911 1911 again:
1912 1912 AS_LOCK_ENTER(as, RW_WRITER);
1913 1913
1914 1914 if ((seg = AS_SEGFIRST(as)) == NULL) {
1915 1915 AS_LOCK_EXIT(as);
1916 1916 return (0);
1917 1917 }
1918 1918 size = prpdsize(as);
1919 1919 if (uiop->uio_resid < size) {
1920 1920 AS_LOCK_EXIT(as);
1921 1921 return (E2BIG);
1922 1922 }
1923 1923
1924 1924 buf = kmem_zalloc(size, KM_SLEEP);
1925 1925 php = (prpageheader_t *)buf;
1926 1926 pmp = (prasmap_t *)(buf + sizeof (prpageheader_t));
1927 1927
1928 1928 hrt2ts(gethrtime(), &php->pr_tstamp);
1929 1929 php->pr_nmap = 0;
1930 1930 php->pr_npage = 0;
1931 1931 do {
1932 1932 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
1933 1933 caddr_t saddr, naddr;
1934 1934 void *tmp = NULL;
1935 1935
1936 1936 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1937 1937 struct vnode *vp;
1938 1938 struct vattr vattr;
1939 1939 size_t len;
1940 1940 size_t npage;
1941 1941 uint_t prot;
1942 1942 uintptr_t next;
1943 1943
1944 1944 prot = pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
1945 1945 if ((len = (size_t)(naddr - saddr)) == 0)
1946 1946 continue;
1947 1947 npage = len / PAGESIZE;
1948 1948 next = (uintptr_t)(pmp + 1) + round8(npage);
1949 1949 /*
1950 1950 * It's possible that the address space can change
1951 1951 * subtlely even though we're holding as->a_lock
1952 1952 * due to the nondeterminism of page_exists() in
1953 1953 * the presence of asychronously flushed pages or
1954 1954 * mapped files whose sizes are changing.
1955 1955 * page_exists() may be called indirectly from
1956 1956 * pr_getprot() by a SEGOP_INCORE() routine.
1957 1957 * If this happens we need to make sure we don't
1958 1958 * overrun the buffer whose size we computed based
1959 1959 * on the initial iteration through the segments.
1960 1960 * Once we've detected an overflow, we need to clean
1961 1961 * up the temporary memory allocated in pr_getprot()
1962 1962 * and retry. If there's a pending signal, we return
1963 1963 * EINTR so that this thread can be dislodged if
1964 1964 * a latent bug causes us to spin indefinitely.
1965 1965 */
1966 1966 if (next > (uintptr_t)buf + size) {
1967 1967 pr_getprot_done(&tmp);
1968 1968 AS_LOCK_EXIT(as);
1969 1969
1970 1970 kmem_free(buf, size);
1971 1971
1972 1972 if (ISSIG(curthread, JUSTLOOKING))
1973 1973 return (EINTR);
1974 1974
1975 1975 goto again;
1976 1976 }
1977 1977
1978 1978 php->pr_nmap++;
1979 1979 php->pr_npage += npage;
1980 1980 pmp->pr_vaddr = (uintptr_t)saddr;
1981 1981 pmp->pr_npage = npage;
1982 1982 pmp->pr_offset = SEGOP_GETOFFSET(seg, saddr);
1983 1983 pmp->pr_mflags = 0;
1984 1984 if (prot & PROT_READ)
1985 1985 pmp->pr_mflags |= MA_READ;
1986 1986 if (prot & PROT_WRITE)
1987 1987 pmp->pr_mflags |= MA_WRITE;
1988 1988 if (prot & PROT_EXEC)
1989 1989 pmp->pr_mflags |= MA_EXEC;
1990 1990 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
1991 1991 pmp->pr_mflags |= MA_SHARED;
1992 1992 if (SEGOP_GETTYPE(seg, saddr) & MAP_NORESERVE)
1993 1993 pmp->pr_mflags |= MA_NORESERVE;
1994 1994 if (seg->s_ops == &segspt_shmops ||
1995 1995 (seg->s_ops == &segvn_ops &&
1996 1996 (SEGOP_GETVP(seg, saddr, &vp) != 0 || vp == NULL)))
1997 1997 pmp->pr_mflags |= MA_ANON;
1998 1998 if (seg->s_ops == &segspt_shmops)
1999 1999 pmp->pr_mflags |= MA_ISM | MA_SHM;
2000 2000 pmp->pr_pagesize = PAGESIZE;
2001 2001 /*
2002 2002 * Manufacture a filename for the "object" directory.
2003 2003 */
2004 2004 vattr.va_mask = AT_FSID|AT_NODEID;
2005 2005 if (seg->s_ops == &segvn_ops &&
2006 2006 SEGOP_GETVP(seg, saddr, &vp) == 0 &&
2007 2007 vp != NULL && vp->v_type == VREG &&
2008 2008 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
2009 2009 if (vp == p->p_exec)
2010 2010 (void) strcpy(pmp->pr_mapname, "a.out");
2011 2011 else
2012 2012 pr_object_name(pmp->pr_mapname,
2013 2013 vp, &vattr);
2014 2014 }
2015 2015
2016 2016 /*
2017 2017 * Get the SysV shared memory id, if any.
2018 2018 */
2019 2019 if ((pmp->pr_mflags & MA_SHARED) && p->p_segacct &&
2020 2020 (pmp->pr_shmid = shmgetid(p, seg->s_base)) !=
2021 2021 SHMID_NONE) {
2022 2022 if (pmp->pr_shmid == SHMID_FREE)
2023 2023 pmp->pr_shmid = -1;
2024 2024
2025 2025 pmp->pr_mflags |= MA_SHM;
2026 2026 } else {
2027 2027 pmp->pr_shmid = -1;
2028 2028 }
2029 2029
2030 2030 hat_getstat(as, saddr, len, hatid,
2031 2031 (char *)(pmp + 1), HAT_SYNC_ZERORM);
2032 2032 pmp = (prasmap_t *)next;
2033 2033 }
2034 2034 ASSERT(tmp == NULL);
2035 2035 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2036 2036
2037 2037 AS_LOCK_EXIT(as);
2038 2038
2039 2039 ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
2040 2040 error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
2041 2041 kmem_free(buf, size);
2042 2042
2043 2043 return (error);
2044 2044 }
2045 2045
2046 2046 #ifdef _SYSCALL32_IMPL
2047 2047 int
2048 2048 prpdread32(proc_t *p, uint_t hatid, struct uio *uiop)
2049 2049 {
2050 2050 struct as *as = p->p_as;
2051 2051 caddr_t buf;
2052 2052 size_t size;
2053 2053 prpageheader32_t *php;
2054 2054 prasmap32_t *pmp;
2055 2055 struct seg *seg;
2056 2056 int error;
2057 2057
2058 2058 again:
2059 2059 AS_LOCK_ENTER(as, RW_WRITER);
2060 2060
2061 2061 if ((seg = AS_SEGFIRST(as)) == NULL) {
2062 2062 AS_LOCK_EXIT(as);
2063 2063 return (0);
2064 2064 }
2065 2065 size = prpdsize32(as);
2066 2066 if (uiop->uio_resid < size) {
2067 2067 AS_LOCK_EXIT(as);
2068 2068 return (E2BIG);
2069 2069 }
2070 2070
2071 2071 buf = kmem_zalloc(size, KM_SLEEP);
2072 2072 php = (prpageheader32_t *)buf;
2073 2073 pmp = (prasmap32_t *)(buf + sizeof (prpageheader32_t));
2074 2074
2075 2075 hrt2ts32(gethrtime(), &php->pr_tstamp);
2076 2076 php->pr_nmap = 0;
2077 2077 php->pr_npage = 0;
2078 2078 do {
2079 2079 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
2080 2080 caddr_t saddr, naddr;
2081 2081 void *tmp = NULL;
2082 2082
2083 2083 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
2084 2084 struct vnode *vp;
2085 2085 struct vattr vattr;
2086 2086 size_t len;
2087 2087 size_t npage;
2088 2088 uint_t prot;
2089 2089 uintptr_t next;
2090 2090
2091 2091 prot = pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
2092 2092 if ((len = (size_t)(naddr - saddr)) == 0)
2093 2093 continue;
2094 2094 npage = len / PAGESIZE;
2095 2095 next = (uintptr_t)(pmp + 1) + round8(npage);
2096 2096 /*
2097 2097 * It's possible that the address space can change
2098 2098 * subtlely even though we're holding as->a_lock
2099 2099 * due to the nondeterminism of page_exists() in
2100 2100 * the presence of asychronously flushed pages or
2101 2101 * mapped files whose sizes are changing.
2102 2102 * page_exists() may be called indirectly from
2103 2103 * pr_getprot() by a SEGOP_INCORE() routine.
2104 2104 * If this happens we need to make sure we don't
2105 2105 * overrun the buffer whose size we computed based
2106 2106 * on the initial iteration through the segments.
2107 2107 * Once we've detected an overflow, we need to clean
2108 2108 * up the temporary memory allocated in pr_getprot()
2109 2109 * and retry. If there's a pending signal, we return
2110 2110 * EINTR so that this thread can be dislodged if
2111 2111 * a latent bug causes us to spin indefinitely.
2112 2112 */
2113 2113 if (next > (uintptr_t)buf + size) {
2114 2114 pr_getprot_done(&tmp);
2115 2115 AS_LOCK_EXIT(as);
2116 2116
2117 2117 kmem_free(buf, size);
2118 2118
2119 2119 if (ISSIG(curthread, JUSTLOOKING))
2120 2120 return (EINTR);
2121 2121
2122 2122 goto again;
2123 2123 }
2124 2124
2125 2125 php->pr_nmap++;
2126 2126 php->pr_npage += npage;
2127 2127 pmp->pr_vaddr = (caddr32_t)(uintptr_t)saddr;
2128 2128 pmp->pr_npage = (size32_t)npage;
2129 2129 pmp->pr_offset = SEGOP_GETOFFSET(seg, saddr);
2130 2130 pmp->pr_mflags = 0;
2131 2131 if (prot & PROT_READ)
2132 2132 pmp->pr_mflags |= MA_READ;
2133 2133 if (prot & PROT_WRITE)
2134 2134 pmp->pr_mflags |= MA_WRITE;
2135 2135 if (prot & PROT_EXEC)
2136 2136 pmp->pr_mflags |= MA_EXEC;
2137 2137 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
2138 2138 pmp->pr_mflags |= MA_SHARED;
2139 2139 if (SEGOP_GETTYPE(seg, saddr) & MAP_NORESERVE)
2140 2140 pmp->pr_mflags |= MA_NORESERVE;
2141 2141 if (seg->s_ops == &segspt_shmops ||
2142 2142 (seg->s_ops == &segvn_ops &&
2143 2143 (SEGOP_GETVP(seg, saddr, &vp) != 0 || vp == NULL)))
2144 2144 pmp->pr_mflags |= MA_ANON;
2145 2145 if (seg->s_ops == &segspt_shmops)
2146 2146 pmp->pr_mflags |= MA_ISM | MA_SHM;
2147 2147 pmp->pr_pagesize = PAGESIZE;
2148 2148 /*
2149 2149 * Manufacture a filename for the "object" directory.
2150 2150 */
2151 2151 vattr.va_mask = AT_FSID|AT_NODEID;
2152 2152 if (seg->s_ops == &segvn_ops &&
2153 2153 SEGOP_GETVP(seg, saddr, &vp) == 0 &&
2154 2154 vp != NULL && vp->v_type == VREG &&
2155 2155 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
2156 2156 if (vp == p->p_exec)
2157 2157 (void) strcpy(pmp->pr_mapname, "a.out");
2158 2158 else
2159 2159 pr_object_name(pmp->pr_mapname,
2160 2160 vp, &vattr);
2161 2161 }
2162 2162
2163 2163 /*
2164 2164 * Get the SysV shared memory id, if any.
2165 2165 */
2166 2166 if ((pmp->pr_mflags & MA_SHARED) && p->p_segacct &&
2167 2167 (pmp->pr_shmid = shmgetid(p, seg->s_base)) !=
2168 2168 SHMID_NONE) {
2169 2169 if (pmp->pr_shmid == SHMID_FREE)
2170 2170 pmp->pr_shmid = -1;
2171 2171
2172 2172 pmp->pr_mflags |= MA_SHM;
2173 2173 } else {
2174 2174 pmp->pr_shmid = -1;
2175 2175 }
2176 2176
2177 2177 hat_getstat(as, saddr, len, hatid,
2178 2178 (char *)(pmp + 1), HAT_SYNC_ZERORM);
2179 2179 pmp = (prasmap32_t *)next;
2180 2180 }
2181 2181 ASSERT(tmp == NULL);
2182 2182 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2183 2183
2184 2184 AS_LOCK_EXIT(as);
2185 2185
2186 2186 ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
2187 2187 error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
2188 2188 kmem_free(buf, size);
2189 2189
2190 2190 return (error);
2191 2191 }
2192 2192 #endif /* _SYSCALL32_IMPL */
2193 2193
2194 2194 ushort_t
2195 2195 prgetpctcpu(uint64_t pct)
2196 2196 {
2197 2197 /*
2198 2198 * The value returned will be relevant in the zone of the examiner,
2199 2199 * which may not be the same as the zone which performed the procfs
2200 2200 * mount.
2201 2201 */
2202 2202 int nonline = zone_ncpus_online_get(curproc->p_zone);
2203 2203
2204 2204 /*
2205 2205 * Prorate over online cpus so we don't exceed 100%
2206 2206 */
2207 2207 if (nonline > 1)
2208 2208 pct /= nonline;
2209 2209 pct >>= 16; /* convert to 16-bit scaled integer */
2210 2210 if (pct > 0x8000) /* might happen, due to rounding */
2211 2211 pct = 0x8000;
2212 2212 return ((ushort_t)pct);
2213 2213 }
2214 2214
2215 2215 /*
2216 2216 * Return information used by ps(1).
2217 2217 */
2218 2218 void
2219 2219 prgetpsinfo(proc_t *p, psinfo_t *psp)
2220 2220 {
2221 2221 kthread_t *t;
2222 2222 struct cred *cred;
2223 2223 hrtime_t hrutime, hrstime;
2224 2224
2225 2225 ASSERT(MUTEX_HELD(&p->p_lock));
2226 2226
2227 2227 if ((t = prchoose(p)) == NULL) /* returns locked thread */
2228 2228 bzero(psp, sizeof (*psp));
2229 2229 else {
2230 2230 thread_unlock(t);
2231 2231 bzero(psp, sizeof (*psp) - sizeof (psp->pr_lwp));
2232 2232 }
2233 2233
2234 2234 /*
2235 2235 * only export SSYS and SMSACCT; everything else is off-limits to
2236 2236 * userland apps.
2237 2237 */
2238 2238 psp->pr_flag = p->p_flag & (SSYS | SMSACCT);
2239 2239 psp->pr_nlwp = p->p_lwpcnt;
2240 2240 psp->pr_nzomb = p->p_zombcnt;
2241 2241 mutex_enter(&p->p_crlock);
2242 2242 cred = p->p_cred;
2243 2243 psp->pr_uid = crgetruid(cred);
2244 2244 psp->pr_euid = crgetuid(cred);
2245 2245 psp->pr_gid = crgetrgid(cred);
2246 2246 psp->pr_egid = crgetgid(cred);
2247 2247 mutex_exit(&p->p_crlock);
2248 2248 psp->pr_pid = p->p_pid;
2249 2249 if (curproc->p_zone->zone_id != GLOBAL_ZONEID &&
2250 2250 (p->p_flag & SZONETOP)) {
2251 2251 ASSERT(p->p_zone->zone_id != GLOBAL_ZONEID);
2252 2252 /*
2253 2253 * Inside local zones, fake zsched's pid as parent pids for
2254 2254 * processes which reference processes outside of the zone.
2255 2255 */
2256 2256 psp->pr_ppid = curproc->p_zone->zone_zsched->p_pid;
2257 2257 } else {
2258 2258 psp->pr_ppid = p->p_ppid;
2259 2259 }
2260 2260 psp->pr_pgid = p->p_pgrp;
2261 2261 psp->pr_sid = p->p_sessp->s_sid;
2262 2262 psp->pr_taskid = p->p_task->tk_tkid;
2263 2263 psp->pr_projid = p->p_task->tk_proj->kpj_id;
2264 2264 psp->pr_poolid = p->p_pool->pool_id;
2265 2265 psp->pr_zoneid = p->p_zone->zone_id;
2266 2266 if ((psp->pr_contract = PRCTID(p)) == 0)
2267 2267 psp->pr_contract = -1;
2268 2268 psp->pr_addr = (uintptr_t)prgetpsaddr(p);
2269 2269 switch (p->p_model) {
2270 2270 case DATAMODEL_ILP32:
2271 2271 psp->pr_dmodel = PR_MODEL_ILP32;
2272 2272 break;
2273 2273 case DATAMODEL_LP64:
2274 2274 psp->pr_dmodel = PR_MODEL_LP64;
2275 2275 break;
2276 2276 }
2277 2277 hrutime = mstate_aggr_state(p, LMS_USER);
2278 2278 hrstime = mstate_aggr_state(p, LMS_SYSTEM);
2279 2279 hrt2ts((hrutime + hrstime), &psp->pr_time);
2280 2280 TICK_TO_TIMESTRUC(p->p_cutime + p->p_cstime, &psp->pr_ctime);
2281 2281
2282 2282 if (t == NULL) {
2283 2283 int wcode = p->p_wcode; /* must be atomic read */
2284 2284
2285 2285 if (wcode)
2286 2286 psp->pr_wstat = wstat(wcode, p->p_wdata);
2287 2287 psp->pr_ttydev = PRNODEV;
2288 2288 psp->pr_lwp.pr_state = SZOMB;
2289 2289 psp->pr_lwp.pr_sname = 'Z';
2290 2290 psp->pr_lwp.pr_bindpro = PBIND_NONE;
2291 2291 psp->pr_lwp.pr_bindpset = PS_NONE;
2292 2292 } else {
2293 2293 user_t *up = PTOU(p);
2294 2294 struct as *as;
2295 2295 dev_t d;
2296 2296 extern dev_t rwsconsdev, rconsdev, uconsdev;
2297 2297
2298 2298 d = cttydev(p);
2299 2299 /*
2300 2300 * If the controlling terminal is the real
2301 2301 * or workstation console device, map to what the
2302 2302 * user thinks is the console device. Handle case when
2303 2303 * rwsconsdev or rconsdev is set to NODEV for Starfire.
2304 2304 */
2305 2305 if ((d == rwsconsdev || d == rconsdev) && d != NODEV)
2306 2306 d = uconsdev;
2307 2307 psp->pr_ttydev = (d == NODEV) ? PRNODEV : d;
2308 2308 psp->pr_start = up->u_start;
2309 2309 bcopy(up->u_comm, psp->pr_fname,
2310 2310 MIN(sizeof (up->u_comm), sizeof (psp->pr_fname)-1));
2311 2311 bcopy(up->u_psargs, psp->pr_psargs,
2312 2312 MIN(PRARGSZ-1, PSARGSZ));
2313 2313 psp->pr_argc = up->u_argc;
2314 2314 psp->pr_argv = up->u_argv;
2315 2315 psp->pr_envp = up->u_envp;
2316 2316
2317 2317 /* get the chosen lwp's lwpsinfo */
2318 2318 prgetlwpsinfo(t, &psp->pr_lwp);
2319 2319
2320 2320 /* compute %cpu for the process */
2321 2321 if (p->p_lwpcnt == 1)
2322 2322 psp->pr_pctcpu = psp->pr_lwp.pr_pctcpu;
2323 2323 else {
2324 2324 uint64_t pct = 0;
2325 2325 hrtime_t cur_time = gethrtime_unscaled();
2326 2326
2327 2327 t = p->p_tlist;
2328 2328 do {
2329 2329 pct += cpu_update_pct(t, cur_time);
2330 2330 } while ((t = t->t_forw) != p->p_tlist);
2331 2331
2332 2332 psp->pr_pctcpu = prgetpctcpu(pct);
2333 2333 }
2334 2334 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
2335 2335 psp->pr_size = 0;
2336 2336 psp->pr_rssize = 0;
2337 2337 } else {
2338 2338 mutex_exit(&p->p_lock);
2339 2339 AS_LOCK_ENTER(as, RW_READER);
2340 2340 psp->pr_size = btopr(as->a_resvsize) *
2341 2341 (PAGESIZE / 1024);
2342 2342 psp->pr_rssize = rm_asrss(as) * (PAGESIZE / 1024);
2343 2343 psp->pr_pctmem = rm_pctmemory(as);
2344 2344 AS_LOCK_EXIT(as);
2345 2345 mutex_enter(&p->p_lock);
2346 2346 }
2347 2347 }
2348 2348 }
2349 2349
2350 2350 #ifdef _SYSCALL32_IMPL
2351 2351 void
2352 2352 prgetpsinfo32(proc_t *p, psinfo32_t *psp)
2353 2353 {
2354 2354 kthread_t *t;
2355 2355 struct cred *cred;
2356 2356 hrtime_t hrutime, hrstime;
2357 2357
2358 2358 ASSERT(MUTEX_HELD(&p->p_lock));
2359 2359
2360 2360 if ((t = prchoose(p)) == NULL) /* returns locked thread */
2361 2361 bzero(psp, sizeof (*psp));
2362 2362 else {
2363 2363 thread_unlock(t);
2364 2364 bzero(psp, sizeof (*psp) - sizeof (psp->pr_lwp));
2365 2365 }
2366 2366
2367 2367 /*
2368 2368 * only export SSYS and SMSACCT; everything else is off-limits to
2369 2369 * userland apps.
2370 2370 */
2371 2371 psp->pr_flag = p->p_flag & (SSYS | SMSACCT);
2372 2372 psp->pr_nlwp = p->p_lwpcnt;
2373 2373 psp->pr_nzomb = p->p_zombcnt;
2374 2374 mutex_enter(&p->p_crlock);
2375 2375 cred = p->p_cred;
2376 2376 psp->pr_uid = crgetruid(cred);
2377 2377 psp->pr_euid = crgetuid(cred);
2378 2378 psp->pr_gid = crgetrgid(cred);
2379 2379 psp->pr_egid = crgetgid(cred);
2380 2380 mutex_exit(&p->p_crlock);
2381 2381 psp->pr_pid = p->p_pid;
2382 2382 if (curproc->p_zone->zone_id != GLOBAL_ZONEID &&
2383 2383 (p->p_flag & SZONETOP)) {
2384 2384 ASSERT(p->p_zone->zone_id != GLOBAL_ZONEID);
2385 2385 /*
2386 2386 * Inside local zones, fake zsched's pid as parent pids for
2387 2387 * processes which reference processes outside of the zone.
2388 2388 */
2389 2389 psp->pr_ppid = curproc->p_zone->zone_zsched->p_pid;
2390 2390 } else {
2391 2391 psp->pr_ppid = p->p_ppid;
2392 2392 }
2393 2393 psp->pr_pgid = p->p_pgrp;
2394 2394 psp->pr_sid = p->p_sessp->s_sid;
2395 2395 psp->pr_taskid = p->p_task->tk_tkid;
2396 2396 psp->pr_projid = p->p_task->tk_proj->kpj_id;
2397 2397 psp->pr_poolid = p->p_pool->pool_id;
2398 2398 psp->pr_zoneid = p->p_zone->zone_id;
2399 2399 if ((psp->pr_contract = PRCTID(p)) == 0)
2400 2400 psp->pr_contract = -1;
2401 2401 psp->pr_addr = 0; /* cannot represent 64-bit addr in 32 bits */
2402 2402 switch (p->p_model) {
2403 2403 case DATAMODEL_ILP32:
2404 2404 psp->pr_dmodel = PR_MODEL_ILP32;
2405 2405 break;
2406 2406 case DATAMODEL_LP64:
2407 2407 psp->pr_dmodel = PR_MODEL_LP64;
2408 2408 break;
2409 2409 }
2410 2410 hrutime = mstate_aggr_state(p, LMS_USER);
2411 2411 hrstime = mstate_aggr_state(p, LMS_SYSTEM);
2412 2412 hrt2ts32(hrutime + hrstime, &psp->pr_time);
2413 2413 TICK_TO_TIMESTRUC32(p->p_cutime + p->p_cstime, &psp->pr_ctime);
2414 2414
2415 2415 if (t == NULL) {
2416 2416 extern int wstat(int, int); /* needs a header file */
2417 2417 int wcode = p->p_wcode; /* must be atomic read */
2418 2418
2419 2419 if (wcode)
2420 2420 psp->pr_wstat = wstat(wcode, p->p_wdata);
2421 2421 psp->pr_ttydev = PRNODEV32;
2422 2422 psp->pr_lwp.pr_state = SZOMB;
2423 2423 psp->pr_lwp.pr_sname = 'Z';
2424 2424 } else {
2425 2425 user_t *up = PTOU(p);
2426 2426 struct as *as;
2427 2427 dev_t d;
2428 2428 extern dev_t rwsconsdev, rconsdev, uconsdev;
2429 2429
2430 2430 d = cttydev(p);
2431 2431 /*
2432 2432 * If the controlling terminal is the real
2433 2433 * or workstation console device, map to what the
2434 2434 * user thinks is the console device. Handle case when
2435 2435 * rwsconsdev or rconsdev is set to NODEV for Starfire.
2436 2436 */
2437 2437 if ((d == rwsconsdev || d == rconsdev) && d != NODEV)
2438 2438 d = uconsdev;
2439 2439 (void) cmpldev(&psp->pr_ttydev, d);
2440 2440 TIMESPEC_TO_TIMESPEC32(&psp->pr_start, &up->u_start);
2441 2441 bcopy(up->u_comm, psp->pr_fname,
2442 2442 MIN(sizeof (up->u_comm), sizeof (psp->pr_fname)-1));
2443 2443 bcopy(up->u_psargs, psp->pr_psargs,
2444 2444 MIN(PRARGSZ-1, PSARGSZ));
2445 2445 psp->pr_argc = up->u_argc;
2446 2446 psp->pr_argv = (caddr32_t)up->u_argv;
2447 2447 psp->pr_envp = (caddr32_t)up->u_envp;
2448 2448
2449 2449 /* get the chosen lwp's lwpsinfo */
2450 2450 prgetlwpsinfo32(t, &psp->pr_lwp);
2451 2451
2452 2452 /* compute %cpu for the process */
2453 2453 if (p->p_lwpcnt == 1)
2454 2454 psp->pr_pctcpu = psp->pr_lwp.pr_pctcpu;
2455 2455 else {
2456 2456 uint64_t pct = 0;
2457 2457 hrtime_t cur_time;
2458 2458
2459 2459 t = p->p_tlist;
2460 2460 cur_time = gethrtime_unscaled();
2461 2461 do {
2462 2462 pct += cpu_update_pct(t, cur_time);
2463 2463 } while ((t = t->t_forw) != p->p_tlist);
2464 2464
2465 2465 psp->pr_pctcpu = prgetpctcpu(pct);
2466 2466 }
2467 2467 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
2468 2468 psp->pr_size = 0;
2469 2469 psp->pr_rssize = 0;
2470 2470 } else {
2471 2471 mutex_exit(&p->p_lock);
2472 2472 AS_LOCK_ENTER(as, RW_READER);
2473 2473 psp->pr_size = (size32_t)
2474 2474 (btopr(as->a_resvsize) * (PAGESIZE / 1024));
2475 2475 psp->pr_rssize = (size32_t)
2476 2476 (rm_asrss(as) * (PAGESIZE / 1024));
2477 2477 psp->pr_pctmem = rm_pctmemory(as);
2478 2478 AS_LOCK_EXIT(as);
2479 2479 mutex_enter(&p->p_lock);
2480 2480 }
2481 2481 }
2482 2482
2483 2483 /*
2484 2484 * If we are looking at an LP64 process, zero out
2485 2485 * the fields that cannot be represented in ILP32.
2486 2486 */
2487 2487 if (p->p_model != DATAMODEL_ILP32) {
2488 2488 psp->pr_size = 0;
2489 2489 psp->pr_rssize = 0;
2490 2490 psp->pr_argv = 0;
2491 2491 psp->pr_envp = 0;
2492 2492 }
2493 2493 }
2494 2494
2495 2495 #endif /* _SYSCALL32_IMPL */
2496 2496
2497 2497 void
2498 2498 prgetlwpsinfo(kthread_t *t, lwpsinfo_t *psp)
2499 2499 {
2500 2500 klwp_t *lwp = ttolwp(t);
2501 2501 sobj_ops_t *sobj;
2502 2502 char c, state;
2503 2503 uint64_t pct;
2504 2504 int retval, niceval;
2505 2505 hrtime_t hrutime, hrstime;
2506 2506
2507 2507 ASSERT(MUTEX_HELD(&ttoproc(t)->p_lock));
2508 2508
2509 2509 bzero(psp, sizeof (*psp));
2510 2510
2511 2511 psp->pr_flag = 0; /* lwpsinfo_t.pr_flag is deprecated */
2512 2512 psp->pr_lwpid = t->t_tid;
2513 2513 psp->pr_addr = (uintptr_t)t;
2514 2514 psp->pr_wchan = (uintptr_t)t->t_wchan;
2515 2515
2516 2516 /* map the thread state enum into a process state enum */
2517 2517 state = VSTOPPED(t) ? TS_STOPPED : t->t_state;
2518 2518 switch (state) {
2519 2519 case TS_SLEEP: state = SSLEEP; c = 'S'; break;
2520 2520 case TS_RUN: state = SRUN; c = 'R'; break;
2521 2521 case TS_ONPROC: state = SONPROC; c = 'O'; break;
2522 2522 case TS_ZOMB: state = SZOMB; c = 'Z'; break;
2523 2523 case TS_STOPPED: state = SSTOP; c = 'T'; break;
2524 2524 case TS_WAIT: state = SWAIT; c = 'W'; break;
2525 2525 default: state = 0; c = '?'; break;
2526 2526 }
2527 2527 psp->pr_state = state;
2528 2528 psp->pr_sname = c;
2529 2529 if ((sobj = t->t_sobj_ops) != NULL)
2530 2530 psp->pr_stype = SOBJ_TYPE(sobj);
2531 2531 retval = CL_DONICE(t, NULL, 0, &niceval);
2532 2532 if (retval == 0) {
2533 2533 psp->pr_oldpri = v.v_maxsyspri - t->t_pri;
2534 2534 psp->pr_nice = niceval + NZERO;
2535 2535 }
2536 2536 psp->pr_syscall = t->t_sysnum;
2537 2537 psp->pr_pri = t->t_pri;
2538 2538 psp->pr_start.tv_sec = t->t_start;
2539 2539 psp->pr_start.tv_nsec = 0L;
2540 2540 hrutime = lwp->lwp_mstate.ms_acct[LMS_USER];
2541 2541 scalehrtime(&hrutime);
2542 2542 hrstime = lwp->lwp_mstate.ms_acct[LMS_SYSTEM] +
2543 2543 lwp->lwp_mstate.ms_acct[LMS_TRAP];
2544 2544 scalehrtime(&hrstime);
2545 2545 hrt2ts(hrutime + hrstime, &psp->pr_time);
2546 2546 /* compute %cpu for the lwp */
2547 2547 pct = cpu_update_pct(t, gethrtime_unscaled());
2548 2548 psp->pr_pctcpu = prgetpctcpu(pct);
2549 2549 psp->pr_cpu = (psp->pr_pctcpu*100 + 0x6000) >> 15; /* [0..99] */
2550 2550 if (psp->pr_cpu > 99)
2551 2551 psp->pr_cpu = 99;
2552 2552
2553 2553 (void) strncpy(psp->pr_clname, sclass[t->t_cid].cl_name,
2554 2554 sizeof (psp->pr_clname) - 1);
2555 2555 bzero(psp->pr_name, sizeof (psp->pr_name)); /* XXX ??? */
2556 2556 psp->pr_onpro = t->t_cpu->cpu_id;
2557 2557 psp->pr_bindpro = t->t_bind_cpu;
2558 2558 psp->pr_bindpset = t->t_bind_pset;
2559 2559 psp->pr_lgrp = t->t_lpl->lpl_lgrpid;
2560 2560 }
2561 2561
2562 2562 #ifdef _SYSCALL32_IMPL
2563 2563 void
2564 2564 prgetlwpsinfo32(kthread_t *t, lwpsinfo32_t *psp)
2565 2565 {
2566 2566 proc_t *p = ttoproc(t);
2567 2567 klwp_t *lwp = ttolwp(t);
2568 2568 sobj_ops_t *sobj;
2569 2569 char c, state;
2570 2570 uint64_t pct;
2571 2571 int retval, niceval;
2572 2572 hrtime_t hrutime, hrstime;
2573 2573
2574 2574 ASSERT(MUTEX_HELD(&p->p_lock));
2575 2575
2576 2576 bzero(psp, sizeof (*psp));
2577 2577
2578 2578 psp->pr_flag = 0; /* lwpsinfo_t.pr_flag is deprecated */
2579 2579 psp->pr_lwpid = t->t_tid;
2580 2580 psp->pr_addr = 0; /* cannot represent 64-bit addr in 32 bits */
2581 2581 psp->pr_wchan = 0; /* cannot represent 64-bit addr in 32 bits */
2582 2582
2583 2583 /* map the thread state enum into a process state enum */
2584 2584 state = VSTOPPED(t) ? TS_STOPPED : t->t_state;
2585 2585 switch (state) {
2586 2586 case TS_SLEEP: state = SSLEEP; c = 'S'; break;
2587 2587 case TS_RUN: state = SRUN; c = 'R'; break;
2588 2588 case TS_ONPROC: state = SONPROC; c = 'O'; break;
2589 2589 case TS_ZOMB: state = SZOMB; c = 'Z'; break;
2590 2590 case TS_STOPPED: state = SSTOP; c = 'T'; break;
2591 2591 case TS_WAIT: state = SWAIT; c = 'W'; break;
2592 2592 default: state = 0; c = '?'; break;
2593 2593 }
2594 2594 psp->pr_state = state;
2595 2595 psp->pr_sname = c;
2596 2596 if ((sobj = t->t_sobj_ops) != NULL)
2597 2597 psp->pr_stype = SOBJ_TYPE(sobj);
2598 2598 retval = CL_DONICE(t, NULL, 0, &niceval);
2599 2599 if (retval == 0) {
2600 2600 psp->pr_oldpri = v.v_maxsyspri - t->t_pri;
2601 2601 psp->pr_nice = niceval + NZERO;
2602 2602 } else {
2603 2603 psp->pr_oldpri = 0;
2604 2604 psp->pr_nice = 0;
2605 2605 }
2606 2606 psp->pr_syscall = t->t_sysnum;
2607 2607 psp->pr_pri = t->t_pri;
2608 2608 psp->pr_start.tv_sec = (time32_t)t->t_start;
2609 2609 psp->pr_start.tv_nsec = 0L;
2610 2610 hrutime = lwp->lwp_mstate.ms_acct[LMS_USER];
2611 2611 scalehrtime(&hrutime);
2612 2612 hrstime = lwp->lwp_mstate.ms_acct[LMS_SYSTEM] +
2613 2613 lwp->lwp_mstate.ms_acct[LMS_TRAP];
2614 2614 scalehrtime(&hrstime);
2615 2615 hrt2ts32(hrutime + hrstime, &psp->pr_time);
2616 2616 /* compute %cpu for the lwp */
2617 2617 pct = cpu_update_pct(t, gethrtime_unscaled());
2618 2618 psp->pr_pctcpu = prgetpctcpu(pct);
2619 2619 psp->pr_cpu = (psp->pr_pctcpu*100 + 0x6000) >> 15; /* [0..99] */
2620 2620 if (psp->pr_cpu > 99)
2621 2621 psp->pr_cpu = 99;
2622 2622
2623 2623 (void) strncpy(psp->pr_clname, sclass[t->t_cid].cl_name,
2624 2624 sizeof (psp->pr_clname) - 1);
2625 2625 bzero(psp->pr_name, sizeof (psp->pr_name)); /* XXX ??? */
2626 2626 psp->pr_onpro = t->t_cpu->cpu_id;
2627 2627 psp->pr_bindpro = t->t_bind_cpu;
2628 2628 psp->pr_bindpset = t->t_bind_pset;
2629 2629 psp->pr_lgrp = t->t_lpl->lpl_lgrpid;
2630 2630 }
2631 2631 #endif /* _SYSCALL32_IMPL */
2632 2632
2633 2633 #ifdef _SYSCALL32_IMPL
2634 2634
2635 2635 #define PR_COPY_FIELD(s, d, field) d->field = s->field
2636 2636
2637 2637 #define PR_COPY_FIELD_ILP32(s, d, field) \
2638 2638 if (s->pr_dmodel == PR_MODEL_ILP32) { \
2639 2639 d->field = s->field; \
2640 2640 }
2641 2641
2642 2642 #define PR_COPY_TIMESPEC(s, d, field) \
2643 2643 TIMESPEC_TO_TIMESPEC32(&d->field, &s->field);
2644 2644
2645 2645 #define PR_COPY_BUF(s, d, field) \
2646 2646 bcopy(s->field, d->field, sizeof (d->field));
2647 2647
2648 2648 #define PR_IGNORE_FIELD(s, d, field)
2649 2649
2650 2650 void
2651 2651 lwpsinfo_kto32(const struct lwpsinfo *src, struct lwpsinfo32 *dest)
2652 2652 {
2653 2653 bzero(dest, sizeof (*dest));
2654 2654
2655 2655 PR_COPY_FIELD(src, dest, pr_flag);
2656 2656 PR_COPY_FIELD(src, dest, pr_lwpid);
2657 2657 PR_IGNORE_FIELD(src, dest, pr_addr);
2658 2658 PR_IGNORE_FIELD(src, dest, pr_wchan);
2659 2659 PR_COPY_FIELD(src, dest, pr_stype);
2660 2660 PR_COPY_FIELD(src, dest, pr_state);
2661 2661 PR_COPY_FIELD(src, dest, pr_sname);
2662 2662 PR_COPY_FIELD(src, dest, pr_nice);
2663 2663 PR_COPY_FIELD(src, dest, pr_syscall);
2664 2664 PR_COPY_FIELD(src, dest, pr_oldpri);
2665 2665 PR_COPY_FIELD(src, dest, pr_cpu);
2666 2666 PR_COPY_FIELD(src, dest, pr_pri);
2667 2667 PR_COPY_FIELD(src, dest, pr_pctcpu);
2668 2668 PR_COPY_TIMESPEC(src, dest, pr_start);
2669 2669 PR_COPY_BUF(src, dest, pr_clname);
2670 2670 PR_COPY_BUF(src, dest, pr_name);
2671 2671 PR_COPY_FIELD(src, dest, pr_onpro);
2672 2672 PR_COPY_FIELD(src, dest, pr_bindpro);
2673 2673 PR_COPY_FIELD(src, dest, pr_bindpset);
2674 2674 PR_COPY_FIELD(src, dest, pr_lgrp);
2675 2675 }
2676 2676
2677 2677 void
2678 2678 psinfo_kto32(const struct psinfo *src, struct psinfo32 *dest)
2679 2679 {
2680 2680 bzero(dest, sizeof (*dest));
2681 2681
2682 2682 PR_COPY_FIELD(src, dest, pr_flag);
2683 2683 PR_COPY_FIELD(src, dest, pr_nlwp);
2684 2684 PR_COPY_FIELD(src, dest, pr_pid);
2685 2685 PR_COPY_FIELD(src, dest, pr_ppid);
2686 2686 PR_COPY_FIELD(src, dest, pr_pgid);
2687 2687 PR_COPY_FIELD(src, dest, pr_sid);
2688 2688 PR_COPY_FIELD(src, dest, pr_uid);
2689 2689 PR_COPY_FIELD(src, dest, pr_euid);
2690 2690 PR_COPY_FIELD(src, dest, pr_gid);
2691 2691 PR_COPY_FIELD(src, dest, pr_egid);
2692 2692 PR_IGNORE_FIELD(src, dest, pr_addr);
2693 2693 PR_COPY_FIELD_ILP32(src, dest, pr_size);
2694 2694 PR_COPY_FIELD_ILP32(src, dest, pr_rssize);
2695 2695 PR_COPY_FIELD(src, dest, pr_ttydev);
2696 2696 PR_COPY_FIELD(src, dest, pr_pctcpu);
2697 2697 PR_COPY_FIELD(src, dest, pr_pctmem);
2698 2698 PR_COPY_TIMESPEC(src, dest, pr_start);
2699 2699 PR_COPY_TIMESPEC(src, dest, pr_time);
2700 2700 PR_COPY_TIMESPEC(src, dest, pr_ctime);
2701 2701 PR_COPY_BUF(src, dest, pr_fname);
2702 2702 PR_COPY_BUF(src, dest, pr_psargs);
2703 2703 PR_COPY_FIELD(src, dest, pr_wstat);
2704 2704 PR_COPY_FIELD(src, dest, pr_argc);
2705 2705 PR_COPY_FIELD_ILP32(src, dest, pr_argv);
2706 2706 PR_COPY_FIELD_ILP32(src, dest, pr_envp);
2707 2707 PR_COPY_FIELD(src, dest, pr_dmodel);
2708 2708 PR_COPY_FIELD(src, dest, pr_taskid);
2709 2709 PR_COPY_FIELD(src, dest, pr_projid);
2710 2710 PR_COPY_FIELD(src, dest, pr_nzomb);
2711 2711 PR_COPY_FIELD(src, dest, pr_poolid);
2712 2712 PR_COPY_FIELD(src, dest, pr_contract);
2713 2713 PR_COPY_FIELD(src, dest, pr_poolid);
2714 2714 PR_COPY_FIELD(src, dest, pr_poolid);
2715 2715
2716 2716 lwpsinfo_kto32(&src->pr_lwp, &dest->pr_lwp);
2717 2717 }
2718 2718
2719 2719 #undef PR_COPY_FIELD
2720 2720 #undef PR_COPY_FIELD_ILP32
2721 2721 #undef PR_COPY_TIMESPEC
2722 2722 #undef PR_COPY_BUF
2723 2723 #undef PR_IGNORE_FIELD
2724 2724
2725 2725 #endif /* _SYSCALL32_IMPL */
2726 2726
2727 2727 /*
2728 2728 * This used to get called when microstate accounting was disabled but
2729 2729 * microstate information was requested. Since Microstate accounting is on
2730 2730 * regardless of the proc flags, this simply makes it appear to procfs that
2731 2731 * microstate accounting is on. This is relatively meaningless since you
2732 2732 * can't turn it off, but this is here for the sake of appearances.
2733 2733 */
2734 2734
2735 2735 /*ARGSUSED*/
2736 2736 void
2737 2737 estimate_msacct(kthread_t *t, hrtime_t curtime)
2738 2738 {
2739 2739 proc_t *p;
2740 2740
2741 2741 if (t == NULL)
2742 2742 return;
2743 2743
2744 2744 p = ttoproc(t);
2745 2745 ASSERT(MUTEX_HELD(&p->p_lock));
2746 2746
2747 2747 /*
2748 2748 * A system process (p0) could be referenced if the thread is
2749 2749 * in the process of exiting. Don't turn on microstate accounting
2750 2750 * in that case.
2751 2751 */
2752 2752 if (p->p_flag & SSYS)
2753 2753 return;
2754 2754
2755 2755 /*
2756 2756 * Loop through all the LWPs (kernel threads) in the process.
2757 2757 */
2758 2758 t = p->p_tlist;
2759 2759 do {
2760 2760 t->t_proc_flag |= TP_MSACCT;
2761 2761 } while ((t = t->t_forw) != p->p_tlist);
2762 2762
2763 2763 p->p_flag |= SMSACCT; /* set process-wide MSACCT */
2764 2764 }
2765 2765
2766 2766 /*
2767 2767 * It's not really possible to disable microstate accounting anymore.
2768 2768 * However, this routine simply turns off the ms accounting flags in a process
2769 2769 * This way procfs can still pretend to turn microstate accounting on and
2770 2770 * off for a process, but it actually doesn't do anything. This is
2771 2771 * a neutered form of preemptive idiot-proofing.
2772 2772 */
2773 2773 void
2774 2774 disable_msacct(proc_t *p)
2775 2775 {
2776 2776 kthread_t *t;
2777 2777
2778 2778 ASSERT(MUTEX_HELD(&p->p_lock));
2779 2779
2780 2780 p->p_flag &= ~SMSACCT; /* clear process-wide MSACCT */
2781 2781 /*
2782 2782 * Loop through all the LWPs (kernel threads) in the process.
2783 2783 */
2784 2784 if ((t = p->p_tlist) != NULL) {
2785 2785 do {
2786 2786 /* clear per-thread flag */
2787 2787 t->t_proc_flag &= ~TP_MSACCT;
2788 2788 } while ((t = t->t_forw) != p->p_tlist);
2789 2789 }
2790 2790 }
2791 2791
2792 2792 /*
2793 2793 * Return resource usage information.
2794 2794 */
2795 2795 void
2796 2796 prgetusage(kthread_t *t, prhusage_t *pup)
2797 2797 {
2798 2798 klwp_t *lwp = ttolwp(t);
2799 2799 hrtime_t *mstimep;
2800 2800 struct mstate *ms = &lwp->lwp_mstate;
2801 2801 int state;
2802 2802 int i;
2803 2803 hrtime_t curtime;
2804 2804 hrtime_t waitrq;
2805 2805 hrtime_t tmp1;
2806 2806
2807 2807 curtime = gethrtime_unscaled();
2808 2808
2809 2809 pup->pr_lwpid = t->t_tid;
2810 2810 pup->pr_count = 1;
2811 2811 pup->pr_create = ms->ms_start;
2812 2812 pup->pr_term = ms->ms_term;
2813 2813 scalehrtime(&pup->pr_create);
2814 2814 scalehrtime(&pup->pr_term);
2815 2815 if (ms->ms_term == 0) {
2816 2816 pup->pr_rtime = curtime - ms->ms_start;
2817 2817 scalehrtime(&pup->pr_rtime);
2818 2818 } else {
2819 2819 pup->pr_rtime = ms->ms_term - ms->ms_start;
2820 2820 scalehrtime(&pup->pr_rtime);
2821 2821 }
2822 2822
2823 2823
2824 2824 pup->pr_utime = ms->ms_acct[LMS_USER];
2825 2825 pup->pr_stime = ms->ms_acct[LMS_SYSTEM];
2826 2826 pup->pr_ttime = ms->ms_acct[LMS_TRAP];
2827 2827 pup->pr_tftime = ms->ms_acct[LMS_TFAULT];
2828 2828 pup->pr_dftime = ms->ms_acct[LMS_DFAULT];
2829 2829 pup->pr_kftime = ms->ms_acct[LMS_KFAULT];
2830 2830 pup->pr_ltime = ms->ms_acct[LMS_USER_LOCK];
2831 2831 pup->pr_slptime = ms->ms_acct[LMS_SLEEP];
2832 2832 pup->pr_wtime = ms->ms_acct[LMS_WAIT_CPU];
2833 2833 pup->pr_stoptime = ms->ms_acct[LMS_STOPPED];
2834 2834
2835 2835 prscaleusage(pup);
2836 2836
2837 2837 /*
2838 2838 * Adjust for time waiting in the dispatcher queue.
2839 2839 */
2840 2840 waitrq = t->t_waitrq; /* hopefully atomic */
2841 2841 if (waitrq != 0) {
2842 2842 if (waitrq > curtime) {
2843 2843 curtime = gethrtime_unscaled();
2844 2844 }
2845 2845 tmp1 = curtime - waitrq;
2846 2846 scalehrtime(&tmp1);
2847 2847 pup->pr_wtime += tmp1;
2848 2848 curtime = waitrq;
2849 2849 }
2850 2850
2851 2851 /*
2852 2852 * Adjust for time spent in current microstate.
2853 2853 */
2854 2854 if (ms->ms_state_start > curtime) {
2855 2855 curtime = gethrtime_unscaled();
2856 2856 }
2857 2857
2858 2858 i = 0;
2859 2859 do {
2860 2860 switch (state = t->t_mstate) {
2861 2861 case LMS_SLEEP:
2862 2862 /*
2863 2863 * Update the timer for the current sleep state.
2864 2864 */
2865 2865 switch (state = ms->ms_prev) {
2866 2866 case LMS_TFAULT:
2867 2867 case LMS_DFAULT:
2868 2868 case LMS_KFAULT:
2869 2869 case LMS_USER_LOCK:
2870 2870 break;
2871 2871 default:
2872 2872 state = LMS_SLEEP;
2873 2873 break;
2874 2874 }
2875 2875 break;
2876 2876 case LMS_TFAULT:
2877 2877 case LMS_DFAULT:
2878 2878 case LMS_KFAULT:
2879 2879 case LMS_USER_LOCK:
2880 2880 state = LMS_SYSTEM;
2881 2881 break;
2882 2882 }
2883 2883 switch (state) {
2884 2884 case LMS_USER: mstimep = &pup->pr_utime; break;
2885 2885 case LMS_SYSTEM: mstimep = &pup->pr_stime; break;
2886 2886 case LMS_TRAP: mstimep = &pup->pr_ttime; break;
2887 2887 case LMS_TFAULT: mstimep = &pup->pr_tftime; break;
2888 2888 case LMS_DFAULT: mstimep = &pup->pr_dftime; break;
2889 2889 case LMS_KFAULT: mstimep = &pup->pr_kftime; break;
2890 2890 case LMS_USER_LOCK: mstimep = &pup->pr_ltime; break;
2891 2891 case LMS_SLEEP: mstimep = &pup->pr_slptime; break;
2892 2892 case LMS_WAIT_CPU: mstimep = &pup->pr_wtime; break;
2893 2893 case LMS_STOPPED: mstimep = &pup->pr_stoptime; break;
2894 2894 default: panic("prgetusage: unknown microstate");
2895 2895 }
2896 2896 tmp1 = curtime - ms->ms_state_start;
2897 2897 if (tmp1 < 0) {
2898 2898 curtime = gethrtime_unscaled();
2899 2899 i++;
2900 2900 continue;
2901 2901 }
2902 2902 scalehrtime(&tmp1);
2903 2903 } while (tmp1 < 0 && i < MAX_ITERS_SPIN);
2904 2904
2905 2905 *mstimep += tmp1;
2906 2906
2907 2907 /* update pup timestamp */
2908 2908 pup->pr_tstamp = curtime;
2909 2909 scalehrtime(&pup->pr_tstamp);
2910 2910
2911 2911 /*
2912 2912 * Resource usage counters.
2913 2913 */
2914 2914 pup->pr_minf = lwp->lwp_ru.minflt;
2915 2915 pup->pr_majf = lwp->lwp_ru.majflt;
2916 2916 pup->pr_nswap = lwp->lwp_ru.nswap;
2917 2917 pup->pr_inblk = lwp->lwp_ru.inblock;
2918 2918 pup->pr_oublk = lwp->lwp_ru.oublock;
2919 2919 pup->pr_msnd = lwp->lwp_ru.msgsnd;
2920 2920 pup->pr_mrcv = lwp->lwp_ru.msgrcv;
2921 2921 pup->pr_sigs = lwp->lwp_ru.nsignals;
2922 2922 pup->pr_vctx = lwp->lwp_ru.nvcsw;
2923 2923 pup->pr_ictx = lwp->lwp_ru.nivcsw;
2924 2924 pup->pr_sysc = lwp->lwp_ru.sysc;
2925 2925 pup->pr_ioch = lwp->lwp_ru.ioch;
2926 2926 }
2927 2927
2928 2928 /*
2929 2929 * Convert ms_acct stats from unscaled high-res time to nanoseconds
2930 2930 */
2931 2931 void
2932 2932 prscaleusage(prhusage_t *usg)
2933 2933 {
2934 2934 scalehrtime(&usg->pr_utime);
2935 2935 scalehrtime(&usg->pr_stime);
2936 2936 scalehrtime(&usg->pr_ttime);
2937 2937 scalehrtime(&usg->pr_tftime);
2938 2938 scalehrtime(&usg->pr_dftime);
2939 2939 scalehrtime(&usg->pr_kftime);
2940 2940 scalehrtime(&usg->pr_ltime);
2941 2941 scalehrtime(&usg->pr_slptime);
2942 2942 scalehrtime(&usg->pr_wtime);
2943 2943 scalehrtime(&usg->pr_stoptime);
2944 2944 }
2945 2945
2946 2946
2947 2947 /*
2948 2948 * Sum resource usage information.
2949 2949 */
2950 2950 void
2951 2951 praddusage(kthread_t *t, prhusage_t *pup)
2952 2952 {
2953 2953 klwp_t *lwp = ttolwp(t);
2954 2954 hrtime_t *mstimep;
2955 2955 struct mstate *ms = &lwp->lwp_mstate;
2956 2956 int state;
2957 2957 int i;
2958 2958 hrtime_t curtime;
2959 2959 hrtime_t waitrq;
2960 2960 hrtime_t tmp;
2961 2961 prhusage_t conv;
2962 2962
2963 2963 curtime = gethrtime_unscaled();
2964 2964
2965 2965 if (ms->ms_term == 0) {
2966 2966 tmp = curtime - ms->ms_start;
2967 2967 scalehrtime(&tmp);
2968 2968 pup->pr_rtime += tmp;
2969 2969 } else {
2970 2970 tmp = ms->ms_term - ms->ms_start;
2971 2971 scalehrtime(&tmp);
2972 2972 pup->pr_rtime += tmp;
2973 2973 }
2974 2974
2975 2975 conv.pr_utime = ms->ms_acct[LMS_USER];
2976 2976 conv.pr_stime = ms->ms_acct[LMS_SYSTEM];
2977 2977 conv.pr_ttime = ms->ms_acct[LMS_TRAP];
2978 2978 conv.pr_tftime = ms->ms_acct[LMS_TFAULT];
2979 2979 conv.pr_dftime = ms->ms_acct[LMS_DFAULT];
2980 2980 conv.pr_kftime = ms->ms_acct[LMS_KFAULT];
2981 2981 conv.pr_ltime = ms->ms_acct[LMS_USER_LOCK];
2982 2982 conv.pr_slptime = ms->ms_acct[LMS_SLEEP];
2983 2983 conv.pr_wtime = ms->ms_acct[LMS_WAIT_CPU];
2984 2984 conv.pr_stoptime = ms->ms_acct[LMS_STOPPED];
2985 2985
2986 2986 prscaleusage(&conv);
2987 2987
2988 2988 pup->pr_utime += conv.pr_utime;
2989 2989 pup->pr_stime += conv.pr_stime;
2990 2990 pup->pr_ttime += conv.pr_ttime;
2991 2991 pup->pr_tftime += conv.pr_tftime;
2992 2992 pup->pr_dftime += conv.pr_dftime;
2993 2993 pup->pr_kftime += conv.pr_kftime;
2994 2994 pup->pr_ltime += conv.pr_ltime;
2995 2995 pup->pr_slptime += conv.pr_slptime;
2996 2996 pup->pr_wtime += conv.pr_wtime;
2997 2997 pup->pr_stoptime += conv.pr_stoptime;
2998 2998
2999 2999 /*
3000 3000 * Adjust for time waiting in the dispatcher queue.
3001 3001 */
3002 3002 waitrq = t->t_waitrq; /* hopefully atomic */
3003 3003 if (waitrq != 0) {
3004 3004 if (waitrq > curtime) {
3005 3005 curtime = gethrtime_unscaled();
3006 3006 }
3007 3007 tmp = curtime - waitrq;
3008 3008 scalehrtime(&tmp);
3009 3009 pup->pr_wtime += tmp;
3010 3010 curtime = waitrq;
3011 3011 }
3012 3012
3013 3013 /*
3014 3014 * Adjust for time spent in current microstate.
3015 3015 */
3016 3016 if (ms->ms_state_start > curtime) {
3017 3017 curtime = gethrtime_unscaled();
3018 3018 }
3019 3019
3020 3020 i = 0;
3021 3021 do {
3022 3022 switch (state = t->t_mstate) {
3023 3023 case LMS_SLEEP:
3024 3024 /*
3025 3025 * Update the timer for the current sleep state.
3026 3026 */
3027 3027 switch (state = ms->ms_prev) {
3028 3028 case LMS_TFAULT:
3029 3029 case LMS_DFAULT:
3030 3030 case LMS_KFAULT:
3031 3031 case LMS_USER_LOCK:
3032 3032 break;
3033 3033 default:
3034 3034 state = LMS_SLEEP;
3035 3035 break;
3036 3036 }
3037 3037 break;
3038 3038 case LMS_TFAULT:
3039 3039 case LMS_DFAULT:
3040 3040 case LMS_KFAULT:
3041 3041 case LMS_USER_LOCK:
3042 3042 state = LMS_SYSTEM;
3043 3043 break;
3044 3044 }
3045 3045 switch (state) {
3046 3046 case LMS_USER: mstimep = &pup->pr_utime; break;
3047 3047 case LMS_SYSTEM: mstimep = &pup->pr_stime; break;
3048 3048 case LMS_TRAP: mstimep = &pup->pr_ttime; break;
3049 3049 case LMS_TFAULT: mstimep = &pup->pr_tftime; break;
3050 3050 case LMS_DFAULT: mstimep = &pup->pr_dftime; break;
3051 3051 case LMS_KFAULT: mstimep = &pup->pr_kftime; break;
3052 3052 case LMS_USER_LOCK: mstimep = &pup->pr_ltime; break;
3053 3053 case LMS_SLEEP: mstimep = &pup->pr_slptime; break;
3054 3054 case LMS_WAIT_CPU: mstimep = &pup->pr_wtime; break;
3055 3055 case LMS_STOPPED: mstimep = &pup->pr_stoptime; break;
3056 3056 default: panic("praddusage: unknown microstate");
3057 3057 }
3058 3058 tmp = curtime - ms->ms_state_start;
3059 3059 if (tmp < 0) {
3060 3060 curtime = gethrtime_unscaled();
3061 3061 i++;
3062 3062 continue;
3063 3063 }
3064 3064 scalehrtime(&tmp);
3065 3065 } while (tmp < 0 && i < MAX_ITERS_SPIN);
3066 3066
3067 3067 *mstimep += tmp;
3068 3068
3069 3069 /* update pup timestamp */
3070 3070 pup->pr_tstamp = curtime;
3071 3071 scalehrtime(&pup->pr_tstamp);
3072 3072
3073 3073 /*
3074 3074 * Resource usage counters.
3075 3075 */
3076 3076 pup->pr_minf += lwp->lwp_ru.minflt;
3077 3077 pup->pr_majf += lwp->lwp_ru.majflt;
3078 3078 pup->pr_nswap += lwp->lwp_ru.nswap;
3079 3079 pup->pr_inblk += lwp->lwp_ru.inblock;
3080 3080 pup->pr_oublk += lwp->lwp_ru.oublock;
3081 3081 pup->pr_msnd += lwp->lwp_ru.msgsnd;
3082 3082 pup->pr_mrcv += lwp->lwp_ru.msgrcv;
3083 3083 pup->pr_sigs += lwp->lwp_ru.nsignals;
3084 3084 pup->pr_vctx += lwp->lwp_ru.nvcsw;
3085 3085 pup->pr_ictx += lwp->lwp_ru.nivcsw;
3086 3086 pup->pr_sysc += lwp->lwp_ru.sysc;
3087 3087 pup->pr_ioch += lwp->lwp_ru.ioch;
3088 3088 }
3089 3089
3090 3090 /*
3091 3091 * Convert a prhusage_t to a prusage_t.
3092 3092 * This means convert each hrtime_t to a timestruc_t
3093 3093 * and copy the count fields uint64_t => ulong_t.
3094 3094 */
3095 3095 void
3096 3096 prcvtusage(prhusage_t *pup, prusage_t *upup)
3097 3097 {
3098 3098 uint64_t *ullp;
3099 3099 ulong_t *ulp;
3100 3100 int i;
3101 3101
3102 3102 upup->pr_lwpid = pup->pr_lwpid;
3103 3103 upup->pr_count = pup->pr_count;
3104 3104
3105 3105 hrt2ts(pup->pr_tstamp, &upup->pr_tstamp);
3106 3106 hrt2ts(pup->pr_create, &upup->pr_create);
3107 3107 hrt2ts(pup->pr_term, &upup->pr_term);
3108 3108 hrt2ts(pup->pr_rtime, &upup->pr_rtime);
3109 3109 hrt2ts(pup->pr_utime, &upup->pr_utime);
3110 3110 hrt2ts(pup->pr_stime, &upup->pr_stime);
3111 3111 hrt2ts(pup->pr_ttime, &upup->pr_ttime);
3112 3112 hrt2ts(pup->pr_tftime, &upup->pr_tftime);
3113 3113 hrt2ts(pup->pr_dftime, &upup->pr_dftime);
3114 3114 hrt2ts(pup->pr_kftime, &upup->pr_kftime);
3115 3115 hrt2ts(pup->pr_ltime, &upup->pr_ltime);
3116 3116 hrt2ts(pup->pr_slptime, &upup->pr_slptime);
3117 3117 hrt2ts(pup->pr_wtime, &upup->pr_wtime);
3118 3118 hrt2ts(pup->pr_stoptime, &upup->pr_stoptime);
3119 3119 bzero(upup->filltime, sizeof (upup->filltime));
3120 3120
3121 3121 ullp = &pup->pr_minf;
3122 3122 ulp = &upup->pr_minf;
3123 3123 for (i = 0; i < 22; i++)
3124 3124 *ulp++ = (ulong_t)*ullp++;
3125 3125 }
3126 3126
3127 3127 #ifdef _SYSCALL32_IMPL
3128 3128 void
3129 3129 prcvtusage32(prhusage_t *pup, prusage32_t *upup)
3130 3130 {
3131 3131 uint64_t *ullp;
3132 3132 uint32_t *ulp;
3133 3133 int i;
3134 3134
3135 3135 upup->pr_lwpid = pup->pr_lwpid;
3136 3136 upup->pr_count = pup->pr_count;
3137 3137
3138 3138 hrt2ts32(pup->pr_tstamp, &upup->pr_tstamp);
3139 3139 hrt2ts32(pup->pr_create, &upup->pr_create);
3140 3140 hrt2ts32(pup->pr_term, &upup->pr_term);
3141 3141 hrt2ts32(pup->pr_rtime, &upup->pr_rtime);
3142 3142 hrt2ts32(pup->pr_utime, &upup->pr_utime);
3143 3143 hrt2ts32(pup->pr_stime, &upup->pr_stime);
3144 3144 hrt2ts32(pup->pr_ttime, &upup->pr_ttime);
3145 3145 hrt2ts32(pup->pr_tftime, &upup->pr_tftime);
3146 3146 hrt2ts32(pup->pr_dftime, &upup->pr_dftime);
3147 3147 hrt2ts32(pup->pr_kftime, &upup->pr_kftime);
3148 3148 hrt2ts32(pup->pr_ltime, &upup->pr_ltime);
3149 3149 hrt2ts32(pup->pr_slptime, &upup->pr_slptime);
3150 3150 hrt2ts32(pup->pr_wtime, &upup->pr_wtime);
3151 3151 hrt2ts32(pup->pr_stoptime, &upup->pr_stoptime);
3152 3152 bzero(upup->filltime, sizeof (upup->filltime));
3153 3153
3154 3154 ullp = &pup->pr_minf;
3155 3155 ulp = &upup->pr_minf;
3156 3156 for (i = 0; i < 22; i++)
3157 3157 *ulp++ = (uint32_t)*ullp++;
3158 3158 }
3159 3159 #endif /* _SYSCALL32_IMPL */
3160 3160
3161 3161 /*
3162 3162 * Determine whether a set is empty.
3163 3163 */
3164 3164 int
3165 3165 setisempty(uint32_t *sp, uint_t n)
3166 3166 {
3167 3167 while (n--)
3168 3168 if (*sp++)
3169 3169 return (0);
3170 3170 return (1);
3171 3171 }
3172 3172
3173 3173 /*
3174 3174 * Utility routine for establishing a watched area in the process.
3175 3175 * Keep the list of watched areas sorted by virtual address.
3176 3176 */
3177 3177 int
3178 3178 set_watched_area(proc_t *p, struct watched_area *pwa)
3179 3179 {
3180 3180 caddr_t vaddr = pwa->wa_vaddr;
3181 3181 caddr_t eaddr = pwa->wa_eaddr;
3182 3182 ulong_t flags = pwa->wa_flags;
3183 3183 struct watched_area *target;
3184 3184 avl_index_t where;
3185 3185 int error = 0;
3186 3186
3187 3187 /* we must not be holding p->p_lock, but the process must be locked */
3188 3188 ASSERT(MUTEX_NOT_HELD(&p->p_lock));
3189 3189 ASSERT(p->p_proc_flag & P_PR_LOCK);
3190 3190
3191 3191 /*
3192 3192 * If this is our first watchpoint, enable watchpoints for the process.
3193 3193 */
3194 3194 if (!pr_watch_active(p)) {
3195 3195 kthread_t *t;
3196 3196
3197 3197 mutex_enter(&p->p_lock);
3198 3198 if ((t = p->p_tlist) != NULL) {
3199 3199 do {
3200 3200 watch_enable(t);
3201 3201 } while ((t = t->t_forw) != p->p_tlist);
3202 3202 }
3203 3203 mutex_exit(&p->p_lock);
3204 3204 }
3205 3205
3206 3206 target = pr_find_watched_area(p, pwa, &where);
3207 3207 if (target != NULL) {
3208 3208 /*
3209 3209 * We discovered an existing, overlapping watched area.
3210 3210 * Allow it only if it is an exact match.
3211 3211 */
3212 3212 if (target->wa_vaddr != vaddr ||
3213 3213 target->wa_eaddr != eaddr)
3214 3214 error = EINVAL;
3215 3215 else if (target->wa_flags != flags) {
3216 3216 error = set_watched_page(p, vaddr, eaddr,
3217 3217 flags, target->wa_flags);
3218 3218 target->wa_flags = flags;
3219 3219 }
3220 3220 kmem_free(pwa, sizeof (struct watched_area));
3221 3221 } else {
3222 3222 avl_insert(&p->p_warea, pwa, where);
3223 3223 error = set_watched_page(p, vaddr, eaddr, flags, 0);
3224 3224 }
3225 3225
3226 3226 return (error);
3227 3227 }
3228 3228
3229 3229 /*
3230 3230 * Utility routine for clearing a watched area in the process.
3231 3231 * Must be an exact match of the virtual address.
3232 3232 * size and flags don't matter.
3233 3233 */
3234 3234 int
3235 3235 clear_watched_area(proc_t *p, struct watched_area *pwa)
3236 3236 {
3237 3237 struct watched_area *found;
3238 3238
3239 3239 /* we must not be holding p->p_lock, but the process must be locked */
3240 3240 ASSERT(MUTEX_NOT_HELD(&p->p_lock));
3241 3241 ASSERT(p->p_proc_flag & P_PR_LOCK);
3242 3242
3243 3243
3244 3244 if (!pr_watch_active(p)) {
3245 3245 kmem_free(pwa, sizeof (struct watched_area));
3246 3246 return (0);
3247 3247 }
3248 3248
3249 3249 /*
3250 3250 * Look for a matching address in the watched areas. If a match is
3251 3251 * found, clear the old watched area and adjust the watched page(s). It
3252 3252 * is not an error if there is no match.
3253 3253 */
3254 3254 if ((found = pr_find_watched_area(p, pwa, NULL)) != NULL &&
3255 3255 found->wa_vaddr == pwa->wa_vaddr) {
3256 3256 clear_watched_page(p, found->wa_vaddr, found->wa_eaddr,
3257 3257 found->wa_flags);
3258 3258 avl_remove(&p->p_warea, found);
3259 3259 kmem_free(found, sizeof (struct watched_area));
3260 3260 }
3261 3261
3262 3262 kmem_free(pwa, sizeof (struct watched_area));
3263 3263
3264 3264 /*
3265 3265 * If we removed the last watched area from the process, disable
3266 3266 * watchpoints.
3267 3267 */
3268 3268 if (!pr_watch_active(p)) {
3269 3269 kthread_t *t;
3270 3270
3271 3271 mutex_enter(&p->p_lock);
3272 3272 if ((t = p->p_tlist) != NULL) {
3273 3273 do {
3274 3274 watch_disable(t);
3275 3275 } while ((t = t->t_forw) != p->p_tlist);
3276 3276 }
3277 3277 mutex_exit(&p->p_lock);
3278 3278 }
3279 3279
3280 3280 return (0);
3281 3281 }
3282 3282
3283 3283 /*
3284 3284 * Frees all the watched_area structures
3285 3285 */
3286 3286 void
3287 3287 pr_free_watchpoints(proc_t *p)
3288 3288 {
3289 3289 struct watched_area *delp;
3290 3290 void *cookie;
3291 3291
3292 3292 cookie = NULL;
3293 3293 while ((delp = avl_destroy_nodes(&p->p_warea, &cookie)) != NULL)
3294 3294 kmem_free(delp, sizeof (struct watched_area));
3295 3295
3296 3296 avl_destroy(&p->p_warea);
3297 3297 }
3298 3298
3299 3299 /*
3300 3300 * This one is called by the traced process to unwatch all the
3301 3301 * pages while deallocating the list of watched_page structs.
3302 3302 */
3303 3303 void
3304 3304 pr_free_watched_pages(proc_t *p)
3305 3305 {
3306 3306 struct as *as = p->p_as;
3307 3307 struct watched_page *pwp;
3308 3308 uint_t prot;
3309 3309 int retrycnt, err;
3310 3310 void *cookie;
3311 3311
3312 3312 if (as == NULL || avl_numnodes(&as->a_wpage) == 0)
3313 3313 return;
3314 3314
3315 3315 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
3316 3316 AS_LOCK_ENTER(as, RW_WRITER);
3317 3317
3318 3318 pwp = avl_first(&as->a_wpage);
3319 3319
3320 3320 cookie = NULL;
3321 3321 while ((pwp = avl_destroy_nodes(&as->a_wpage, &cookie)) != NULL) {
3322 3322 retrycnt = 0;
3323 3323 if ((prot = pwp->wp_oprot) != 0) {
3324 3324 caddr_t addr = pwp->wp_vaddr;
3325 3325 struct seg *seg;
3326 3326 retry:
3327 3327
3328 3328 if ((pwp->wp_prot != prot ||
3329 3329 (pwp->wp_flags & WP_NOWATCH)) &&
3330 3330 (seg = as_segat(as, addr)) != NULL) {
3331 3331 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
3332 3332 if (err == IE_RETRY) {
3333 3333 ASSERT(retrycnt == 0);
3334 3334 retrycnt++;
3335 3335 goto retry;
3336 3336 }
3337 3337 }
3338 3338 }
3339 3339 kmem_free(pwp, sizeof (struct watched_page));
3340 3340 }
3341 3341
3342 3342 avl_destroy(&as->a_wpage);
3343 3343 p->p_wprot = NULL;
3344 3344
↓ open down ↓ |
3344 lines elided |
↑ open up ↑ |
3345 3345 AS_LOCK_EXIT(as);
3346 3346 }
3347 3347
3348 3348 /*
3349 3349 * Insert a watched area into the list of watched pages.
3350 3350 * If oflags is zero then we are adding a new watched area.
3351 3351 * Otherwise we are changing the flags of an existing watched area.
3352 3352 */
3353 3353 static int
3354 3354 set_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr,
3355 - ulong_t flags, ulong_t oflags)
3355 + ulong_t flags, ulong_t oflags)
3356 3356 {
3357 3357 struct as *as = p->p_as;
3358 3358 avl_tree_t *pwp_tree;
3359 3359 struct watched_page *pwp, *newpwp;
3360 3360 struct watched_page tpw;
3361 3361 avl_index_t where;
3362 3362 struct seg *seg;
3363 3363 uint_t prot;
3364 3364 caddr_t addr;
3365 3365
3366 3366 /*
3367 3367 * We need to pre-allocate a list of structures before we grab the
3368 3368 * address space lock to avoid calling kmem_alloc(KM_SLEEP) with locks
3369 3369 * held.
3370 3370 */
3371 3371 newpwp = NULL;
3372 3372 for (addr = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
3373 3373 addr < eaddr; addr += PAGESIZE) {
3374 3374 pwp = kmem_zalloc(sizeof (struct watched_page), KM_SLEEP);
3375 3375 pwp->wp_list = newpwp;
3376 3376 newpwp = pwp;
3377 3377 }
3378 3378
3379 3379 AS_LOCK_ENTER(as, RW_WRITER);
3380 3380
3381 3381 /*
3382 3382 * Search for an existing watched page to contain the watched area.
3383 3383 * If none is found, grab a new one from the available list
3384 3384 * and insert it in the active list, keeping the list sorted
3385 3385 * by user-level virtual address.
3386 3386 */
3387 3387 if (p->p_flag & SVFWAIT)
3388 3388 pwp_tree = &p->p_wpage;
3389 3389 else
3390 3390 pwp_tree = &as->a_wpage;
3391 3391
3392 3392 again:
3393 3393 if (avl_numnodes(pwp_tree) > prnwatch) {
3394 3394 AS_LOCK_EXIT(as);
3395 3395 while (newpwp != NULL) {
3396 3396 pwp = newpwp->wp_list;
3397 3397 kmem_free(newpwp, sizeof (struct watched_page));
3398 3398 newpwp = pwp;
3399 3399 }
3400 3400 return (E2BIG);
3401 3401 }
3402 3402
3403 3403 tpw.wp_vaddr = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
3404 3404 if ((pwp = avl_find(pwp_tree, &tpw, &where)) == NULL) {
3405 3405 pwp = newpwp;
3406 3406 newpwp = newpwp->wp_list;
3407 3407 pwp->wp_list = NULL;
3408 3408 pwp->wp_vaddr = (caddr_t)((uintptr_t)vaddr &
3409 3409 (uintptr_t)PAGEMASK);
3410 3410 avl_insert(pwp_tree, pwp, where);
3411 3411 }
3412 3412
3413 3413 ASSERT(vaddr >= pwp->wp_vaddr && vaddr < pwp->wp_vaddr + PAGESIZE);
3414 3414
3415 3415 if (oflags & WA_READ)
3416 3416 pwp->wp_read--;
3417 3417 if (oflags & WA_WRITE)
3418 3418 pwp->wp_write--;
3419 3419 if (oflags & WA_EXEC)
3420 3420 pwp->wp_exec--;
3421 3421
3422 3422 ASSERT(pwp->wp_read >= 0);
3423 3423 ASSERT(pwp->wp_write >= 0);
3424 3424 ASSERT(pwp->wp_exec >= 0);
3425 3425
3426 3426 if (flags & WA_READ)
3427 3427 pwp->wp_read++;
3428 3428 if (flags & WA_WRITE)
3429 3429 pwp->wp_write++;
3430 3430 if (flags & WA_EXEC)
3431 3431 pwp->wp_exec++;
3432 3432
3433 3433 if (!(p->p_flag & SVFWAIT)) {
3434 3434 vaddr = pwp->wp_vaddr;
3435 3435 if (pwp->wp_oprot == 0 &&
3436 3436 (seg = as_segat(as, vaddr)) != NULL) {
3437 3437 SEGOP_GETPROT(seg, vaddr, 0, &prot);
3438 3438 pwp->wp_oprot = (uchar_t)prot;
3439 3439 pwp->wp_prot = (uchar_t)prot;
3440 3440 }
3441 3441 if (pwp->wp_oprot != 0) {
3442 3442 prot = pwp->wp_oprot;
3443 3443 if (pwp->wp_read)
3444 3444 prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3445 3445 if (pwp->wp_write)
3446 3446 prot &= ~PROT_WRITE;
3447 3447 if (pwp->wp_exec)
3448 3448 prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3449 3449 if (!(pwp->wp_flags & WP_NOWATCH) &&
3450 3450 pwp->wp_prot != prot &&
3451 3451 (pwp->wp_flags & WP_SETPROT) == 0) {
3452 3452 pwp->wp_flags |= WP_SETPROT;
3453 3453 pwp->wp_list = p->p_wprot;
3454 3454 p->p_wprot = pwp;
3455 3455 }
3456 3456 pwp->wp_prot = (uchar_t)prot;
3457 3457 }
3458 3458 }
3459 3459
3460 3460 /*
3461 3461 * If the watched area extends into the next page then do
3462 3462 * it over again with the virtual address of the next page.
3463 3463 */
3464 3464 if ((vaddr = pwp->wp_vaddr + PAGESIZE) < eaddr)
3465 3465 goto again;
3466 3466
3467 3467 AS_LOCK_EXIT(as);
3468 3468
3469 3469 /*
3470 3470 * Free any pages we may have over-allocated
3471 3471 */
3472 3472 while (newpwp != NULL) {
3473 3473 pwp = newpwp->wp_list;
3474 3474 kmem_free(newpwp, sizeof (struct watched_page));
3475 3475 newpwp = pwp;
3476 3476 }
3477 3477
3478 3478 return (0);
3479 3479 }
3480 3480
3481 3481 /*
3482 3482 * Remove a watched area from the list of watched pages.
3483 3483 * A watched area may extend over more than one page.
3484 3484 */
3485 3485 static void
3486 3486 clear_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr, ulong_t flags)
3487 3487 {
3488 3488 struct as *as = p->p_as;
3489 3489 struct watched_page *pwp;
3490 3490 struct watched_page tpw;
3491 3491 avl_tree_t *tree;
3492 3492 avl_index_t where;
3493 3493
3494 3494 AS_LOCK_ENTER(as, RW_WRITER);
3495 3495
3496 3496 if (p->p_flag & SVFWAIT)
3497 3497 tree = &p->p_wpage;
3498 3498 else
3499 3499 tree = &as->a_wpage;
3500 3500
3501 3501 tpw.wp_vaddr = vaddr =
3502 3502 (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
3503 3503 pwp = avl_find(tree, &tpw, &where);
3504 3504 if (pwp == NULL)
3505 3505 pwp = avl_nearest(tree, where, AVL_AFTER);
3506 3506
3507 3507 while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3508 3508 ASSERT(vaddr <= pwp->wp_vaddr);
3509 3509
3510 3510 if (flags & WA_READ)
3511 3511 pwp->wp_read--;
3512 3512 if (flags & WA_WRITE)
3513 3513 pwp->wp_write--;
3514 3514 if (flags & WA_EXEC)
3515 3515 pwp->wp_exec--;
3516 3516
3517 3517 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec != 0) {
3518 3518 /*
3519 3519 * Reset the hat layer's protections on this page.
3520 3520 */
3521 3521 if (pwp->wp_oprot != 0) {
3522 3522 uint_t prot = pwp->wp_oprot;
3523 3523
3524 3524 if (pwp->wp_read)
3525 3525 prot &=
3526 3526 ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3527 3527 if (pwp->wp_write)
3528 3528 prot &= ~PROT_WRITE;
3529 3529 if (pwp->wp_exec)
3530 3530 prot &=
3531 3531 ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3532 3532 if (!(pwp->wp_flags & WP_NOWATCH) &&
3533 3533 pwp->wp_prot != prot &&
3534 3534 (pwp->wp_flags & WP_SETPROT) == 0) {
3535 3535 pwp->wp_flags |= WP_SETPROT;
3536 3536 pwp->wp_list = p->p_wprot;
3537 3537 p->p_wprot = pwp;
3538 3538 }
3539 3539 pwp->wp_prot = (uchar_t)prot;
3540 3540 }
3541 3541 } else {
3542 3542 /*
3543 3543 * No watched areas remain in this page.
3544 3544 * Reset everything to normal.
3545 3545 */
3546 3546 if (pwp->wp_oprot != 0) {
3547 3547 pwp->wp_prot = pwp->wp_oprot;
3548 3548 if ((pwp->wp_flags & WP_SETPROT) == 0) {
3549 3549 pwp->wp_flags |= WP_SETPROT;
3550 3550 pwp->wp_list = p->p_wprot;
3551 3551 p->p_wprot = pwp;
3552 3552 }
3553 3553 }
3554 3554 }
3555 3555
3556 3556 pwp = AVL_NEXT(tree, pwp);
3557 3557 }
3558 3558
3559 3559 AS_LOCK_EXIT(as);
3560 3560 }
3561 3561
3562 3562 /*
3563 3563 * Return the original protections for the specified page.
3564 3564 */
3565 3565 static void
3566 3566 getwatchprot(struct as *as, caddr_t addr, uint_t *prot)
3567 3567 {
3568 3568 struct watched_page *pwp;
3569 3569 struct watched_page tpw;
3570 3570
3571 3571 ASSERT(AS_LOCK_HELD(as));
3572 3572
3573 3573 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3574 3574 if ((pwp = avl_find(&as->a_wpage, &tpw, NULL)) != NULL)
3575 3575 *prot = pwp->wp_oprot;
3576 3576 }
3577 3577
3578 3578 static prpagev_t *
3579 3579 pr_pagev_create(struct seg *seg, int check_noreserve)
3580 3580 {
3581 3581 prpagev_t *pagev = kmem_alloc(sizeof (prpagev_t), KM_SLEEP);
3582 3582 size_t total_pages = seg_pages(seg);
3583 3583
3584 3584 /*
3585 3585 * Limit the size of our vectors to pagev_lim pages at a time. We need
3586 3586 * 4 or 5 bytes of storage per page, so this means we limit ourself
3587 3587 * to about a megabyte of kernel heap by default.
3588 3588 */
3589 3589 pagev->pg_npages = MIN(total_pages, pagev_lim);
3590 3590 pagev->pg_pnbase = 0;
3591 3591
3592 3592 pagev->pg_protv =
3593 3593 kmem_alloc(pagev->pg_npages * sizeof (uint_t), KM_SLEEP);
3594 3594
3595 3595 if (check_noreserve)
3596 3596 pagev->pg_incore =
3597 3597 kmem_alloc(pagev->pg_npages * sizeof (char), KM_SLEEP);
3598 3598 else
3599 3599 pagev->pg_incore = NULL;
3600 3600
3601 3601 return (pagev);
3602 3602 }
3603 3603
3604 3604 static void
3605 3605 pr_pagev_destroy(prpagev_t *pagev)
3606 3606 {
3607 3607 if (pagev->pg_incore != NULL)
3608 3608 kmem_free(pagev->pg_incore, pagev->pg_npages * sizeof (char));
3609 3609
3610 3610 kmem_free(pagev->pg_protv, pagev->pg_npages * sizeof (uint_t));
3611 3611 kmem_free(pagev, sizeof (prpagev_t));
3612 3612 }
3613 3613
3614 3614 static caddr_t
3615 3615 pr_pagev_fill(prpagev_t *pagev, struct seg *seg, caddr_t addr, caddr_t eaddr)
3616 3616 {
3617 3617 ulong_t lastpg = seg_page(seg, eaddr - 1);
3618 3618 ulong_t pn, pnlim;
3619 3619 caddr_t saddr;
3620 3620 size_t len;
3621 3621
3622 3622 ASSERT(addr >= seg->s_base && addr <= eaddr);
3623 3623
3624 3624 if (addr == eaddr)
3625 3625 return (eaddr);
3626 3626
3627 3627 refill:
3628 3628 ASSERT(addr < eaddr);
3629 3629 pagev->pg_pnbase = seg_page(seg, addr);
3630 3630 pnlim = pagev->pg_pnbase + pagev->pg_npages;
3631 3631 saddr = addr;
3632 3632
3633 3633 if (lastpg < pnlim)
3634 3634 len = (size_t)(eaddr - addr);
3635 3635 else
3636 3636 len = pagev->pg_npages * PAGESIZE;
3637 3637
3638 3638 if (pagev->pg_incore != NULL) {
3639 3639 /*
3640 3640 * INCORE cleverly has different semantics than GETPROT:
3641 3641 * it returns info on pages up to but NOT including addr + len.
3642 3642 */
3643 3643 SEGOP_INCORE(seg, addr, len, pagev->pg_incore);
3644 3644 pn = pagev->pg_pnbase;
3645 3645
3646 3646 do {
3647 3647 /*
3648 3648 * Guilty knowledge here: We know that segvn_incore
3649 3649 * returns more than just the low-order bit that
3650 3650 * indicates the page is actually in memory. If any
3651 3651 * bits are set, then the page has backing store.
3652 3652 */
3653 3653 if (pagev->pg_incore[pn++ - pagev->pg_pnbase])
3654 3654 goto out;
3655 3655
3656 3656 } while ((addr += PAGESIZE) < eaddr && pn < pnlim);
3657 3657
3658 3658 /*
3659 3659 * If we examined all the pages in the vector but we're not
3660 3660 * at the end of the segment, take another lap.
3661 3661 */
3662 3662 if (addr < eaddr)
3663 3663 goto refill;
3664 3664 }
3665 3665
3666 3666 /*
3667 3667 * Need to take len - 1 because addr + len is the address of the
3668 3668 * first byte of the page just past the end of what we want.
3669 3669 */
3670 3670 out:
3671 3671 SEGOP_GETPROT(seg, saddr, len - 1, pagev->pg_protv);
3672 3672 return (addr);
3673 3673 }
3674 3674
3675 3675 static caddr_t
3676 3676 pr_pagev_nextprot(prpagev_t *pagev, struct seg *seg,
3677 3677 caddr_t *saddrp, caddr_t eaddr, uint_t *protp)
3678 3678 {
3679 3679 /*
3680 3680 * Our starting address is either the specified address, or the base
3681 3681 * address from the start of the pagev. If the latter is greater,
3682 3682 * this means a previous call to pr_pagev_fill has already scanned
3683 3683 * further than the end of the previous mapping.
3684 3684 */
3685 3685 caddr_t base = seg->s_base + pagev->pg_pnbase * PAGESIZE;
3686 3686 caddr_t addr = MAX(*saddrp, base);
3687 3687 ulong_t pn = seg_page(seg, addr);
3688 3688 uint_t prot, nprot;
3689 3689
3690 3690 /*
3691 3691 * If we're dealing with noreserve pages, then advance addr to
3692 3692 * the address of the next page which has backing store.
3693 3693 */
3694 3694 if (pagev->pg_incore != NULL) {
3695 3695 while (pagev->pg_incore[pn - pagev->pg_pnbase] == 0) {
3696 3696 if ((addr += PAGESIZE) == eaddr) {
3697 3697 *saddrp = addr;
3698 3698 prot = 0;
3699 3699 goto out;
3700 3700 }
3701 3701 if (++pn == pagev->pg_pnbase + pagev->pg_npages) {
3702 3702 addr = pr_pagev_fill(pagev, seg, addr, eaddr);
3703 3703 if (addr == eaddr) {
3704 3704 *saddrp = addr;
3705 3705 prot = 0;
3706 3706 goto out;
3707 3707 }
3708 3708 pn = seg_page(seg, addr);
3709 3709 }
3710 3710 }
3711 3711 }
3712 3712
3713 3713 /*
3714 3714 * Get the protections on the page corresponding to addr.
3715 3715 */
3716 3716 pn = seg_page(seg, addr);
3717 3717 ASSERT(pn >= pagev->pg_pnbase);
3718 3718 ASSERT(pn < (pagev->pg_pnbase + pagev->pg_npages));
3719 3719
3720 3720 prot = pagev->pg_protv[pn - pagev->pg_pnbase];
3721 3721 getwatchprot(seg->s_as, addr, &prot);
3722 3722 *saddrp = addr;
3723 3723
3724 3724 /*
3725 3725 * Now loop until we find a backed page with different protections
3726 3726 * or we reach the end of this segment.
3727 3727 */
3728 3728 while ((addr += PAGESIZE) < eaddr) {
3729 3729 /*
3730 3730 * If pn has advanced to the page number following what we
3731 3731 * have information on, refill the page vector and reset
3732 3732 * addr and pn. If pr_pagev_fill does not return the
3733 3733 * address of the next page, we have a discontiguity and
3734 3734 * thus have reached the end of the current mapping.
3735 3735 */
3736 3736 if (++pn == pagev->pg_pnbase + pagev->pg_npages) {
3737 3737 caddr_t naddr = pr_pagev_fill(pagev, seg, addr, eaddr);
3738 3738 if (naddr != addr)
3739 3739 goto out;
3740 3740 pn = seg_page(seg, addr);
3741 3741 }
3742 3742
3743 3743 /*
3744 3744 * The previous page's protections are in prot, and it has
3745 3745 * backing. If this page is MAP_NORESERVE and has no backing,
3746 3746 * then end this mapping and return the previous protections.
3747 3747 */
3748 3748 if (pagev->pg_incore != NULL &&
3749 3749 pagev->pg_incore[pn - pagev->pg_pnbase] == 0)
3750 3750 break;
3751 3751
3752 3752 /*
3753 3753 * Otherwise end the mapping if this page's protections (nprot)
3754 3754 * are different than those in the previous page (prot).
3755 3755 */
3756 3756 nprot = pagev->pg_protv[pn - pagev->pg_pnbase];
3757 3757 getwatchprot(seg->s_as, addr, &nprot);
3758 3758
3759 3759 if (nprot != prot)
3760 3760 break;
3761 3761 }
3762 3762
3763 3763 out:
3764 3764 *protp = prot;
3765 3765 return (addr);
3766 3766 }
3767 3767
3768 3768 size_t
3769 3769 pr_getsegsize(struct seg *seg, int reserved)
3770 3770 {
3771 3771 size_t size = seg->s_size;
3772 3772
3773 3773 /*
3774 3774 * If we're interested in the reserved space, return the size of the
3775 3775 * segment itself. Everything else in this function is a special case
3776 3776 * to determine the actual underlying size of various segment types.
3777 3777 */
3778 3778 if (reserved)
3779 3779 return (size);
3780 3780
3781 3781 /*
3782 3782 * If this is a segvn mapping of a regular file, return the smaller
3783 3783 * of the segment size and the remaining size of the file beyond
3784 3784 * the file offset corresponding to seg->s_base.
3785 3785 */
3786 3786 if (seg->s_ops == &segvn_ops) {
3787 3787 vattr_t vattr;
3788 3788 vnode_t *vp;
3789 3789
3790 3790 vattr.va_mask = AT_SIZE;
3791 3791
3792 3792 if (SEGOP_GETVP(seg, seg->s_base, &vp) == 0 &&
3793 3793 vp != NULL && vp->v_type == VREG &&
3794 3794 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
3795 3795
3796 3796 u_offset_t fsize = vattr.va_size;
3797 3797 u_offset_t offset = SEGOP_GETOFFSET(seg, seg->s_base);
3798 3798
3799 3799 if (fsize < offset)
3800 3800 fsize = 0;
3801 3801 else
3802 3802 fsize -= offset;
3803 3803
3804 3804 fsize = roundup(fsize, (u_offset_t)PAGESIZE);
3805 3805
3806 3806 if (fsize < (u_offset_t)size)
3807 3807 size = (size_t)fsize;
3808 3808 }
3809 3809
3810 3810 return (size);
3811 3811 }
3812 3812
3813 3813 /*
3814 3814 * If this is an ISM shared segment, don't include pages that are
3815 3815 * beyond the real size of the spt segment that backs it.
3816 3816 */
3817 3817 if (seg->s_ops == &segspt_shmops)
3818 3818 return (MIN(spt_realsize(seg), size));
3819 3819
3820 3820 /*
3821 3821 * If this is segment is a mapping from /dev/null, then this is a
3822 3822 * reservation of virtual address space and has no actual size.
3823 3823 * Such segments are backed by segdev and have type set to neither
3824 3824 * MAP_SHARED nor MAP_PRIVATE.
3825 3825 */
3826 3826 if (seg->s_ops == &segdev_ops &&
3827 3827 ((SEGOP_GETTYPE(seg, seg->s_base) &
3828 3828 (MAP_SHARED | MAP_PRIVATE)) == 0))
3829 3829 return (0);
↓ open down ↓ |
464 lines elided |
↑ open up ↑ |
3830 3830
3831 3831 /*
3832 3832 * If this segment doesn't match one of the special types we handle,
3833 3833 * just return the size of the segment itself.
3834 3834 */
3835 3835 return (size);
3836 3836 }
3837 3837
3838 3838 uint_t
3839 3839 pr_getprot(struct seg *seg, int reserved, void **tmp,
3840 - caddr_t *saddrp, caddr_t *naddrp, caddr_t eaddr)
3840 + caddr_t *saddrp, caddr_t *naddrp, caddr_t eaddr)
3841 3841 {
3842 3842 struct as *as = seg->s_as;
3843 3843
3844 3844 caddr_t saddr = *saddrp;
3845 3845 caddr_t naddr;
3846 3846
3847 3847 int check_noreserve;
3848 3848 uint_t prot;
3849 3849
3850 3850 union {
3851 3851 struct segvn_data *svd;
3852 3852 struct segdev_data *sdp;
3853 3853 void *data;
3854 3854 } s;
3855 3855
3856 3856 s.data = seg->s_data;
3857 3857
3858 3858 ASSERT(AS_WRITE_HELD(as));
3859 3859 ASSERT(saddr >= seg->s_base && saddr < eaddr);
3860 3860 ASSERT(eaddr <= seg->s_base + seg->s_size);
3861 3861
3862 3862 /*
3863 3863 * Don't include MAP_NORESERVE pages in the address range
3864 3864 * unless their mappings have actually materialized.
3865 3865 * We cheat by knowing that segvn is the only segment
3866 3866 * driver that supports MAP_NORESERVE.
3867 3867 */
3868 3868 check_noreserve =
3869 3869 (!reserved && seg->s_ops == &segvn_ops && s.svd != NULL &&
3870 3870 (s.svd->vp == NULL || s.svd->vp->v_type != VREG) &&
3871 3871 (s.svd->flags & MAP_NORESERVE));
3872 3872
3873 3873 /*
3874 3874 * Examine every page only as a last resort. We use guilty knowledge
3875 3875 * of segvn and segdev to avoid this: if there are no per-page
3876 3876 * protections present in the segment and we don't care about
3877 3877 * MAP_NORESERVE, then s_data->prot is the prot for the whole segment.
3878 3878 */
3879 3879 if (!check_noreserve && saddr == seg->s_base &&
3880 3880 seg->s_ops == &segvn_ops && s.svd != NULL && s.svd->pageprot == 0) {
3881 3881 prot = s.svd->prot;
3882 3882 getwatchprot(as, saddr, &prot);
3883 3883 naddr = eaddr;
3884 3884
3885 3885 } else if (saddr == seg->s_base && seg->s_ops == &segdev_ops &&
3886 3886 s.sdp != NULL && s.sdp->pageprot == 0) {
3887 3887 prot = s.sdp->prot;
3888 3888 getwatchprot(as, saddr, &prot);
3889 3889 naddr = eaddr;
3890 3890
3891 3891 } else {
3892 3892 prpagev_t *pagev;
3893 3893
3894 3894 /*
3895 3895 * If addr is sitting at the start of the segment, then
3896 3896 * create a page vector to store protection and incore
3897 3897 * information for pages in the segment, and fill it.
3898 3898 * Otherwise, we expect *tmp to address the prpagev_t
3899 3899 * allocated by a previous call to this function.
3900 3900 */
3901 3901 if (saddr == seg->s_base) {
3902 3902 pagev = pr_pagev_create(seg, check_noreserve);
3903 3903 saddr = pr_pagev_fill(pagev, seg, saddr, eaddr);
3904 3904
3905 3905 ASSERT(*tmp == NULL);
3906 3906 *tmp = pagev;
3907 3907
3908 3908 ASSERT(saddr <= eaddr);
3909 3909 *saddrp = saddr;
3910 3910
3911 3911 if (saddr == eaddr) {
3912 3912 naddr = saddr;
3913 3913 prot = 0;
3914 3914 goto out;
3915 3915 }
3916 3916
3917 3917 } else {
3918 3918 ASSERT(*tmp != NULL);
3919 3919 pagev = (prpagev_t *)*tmp;
3920 3920 }
3921 3921
3922 3922 naddr = pr_pagev_nextprot(pagev, seg, saddrp, eaddr, &prot);
3923 3923 ASSERT(naddr <= eaddr);
3924 3924 }
3925 3925
3926 3926 out:
3927 3927 if (naddr == eaddr)
3928 3928 pr_getprot_done(tmp);
3929 3929 *naddrp = naddr;
3930 3930 return (prot);
3931 3931 }
3932 3932
3933 3933 void
3934 3934 pr_getprot_done(void **tmp)
3935 3935 {
3936 3936 if (*tmp != NULL) {
3937 3937 pr_pagev_destroy((prpagev_t *)*tmp);
3938 3938 *tmp = NULL;
3939 3939 }
3940 3940 }
3941 3941
3942 3942 /*
3943 3943 * Return true iff the vnode is a /proc file from the object directory.
3944 3944 */
3945 3945 int
3946 3946 pr_isobject(vnode_t *vp)
3947 3947 {
3948 3948 return (vn_matchops(vp, prvnodeops) && VTOP(vp)->pr_type == PR_OBJECT);
3949 3949 }
3950 3950
3951 3951 /*
3952 3952 * Return true iff the vnode is a /proc file opened by the process itself.
3953 3953 */
3954 3954 int
3955 3955 pr_isself(vnode_t *vp)
3956 3956 {
3957 3957 /*
3958 3958 * XXX: To retain binary compatibility with the old
3959 3959 * ioctl()-based version of /proc, we exempt self-opens
3960 3960 * of /proc/<pid> from being marked close-on-exec.
3961 3961 */
3962 3962 return (vn_matchops(vp, prvnodeops) &&
3963 3963 (VTOP(vp)->pr_flags & PR_ISSELF) &&
3964 3964 VTOP(vp)->pr_type != PR_PIDDIR);
3965 3965 }
3966 3966
3967 3967 static ssize_t
3968 3968 pr_getpagesize(struct seg *seg, caddr_t saddr, caddr_t *naddrp, caddr_t eaddr)
3969 3969 {
3970 3970 ssize_t pagesize, hatsize;
3971 3971
3972 3972 ASSERT(AS_WRITE_HELD(seg->s_as));
3973 3973 ASSERT(IS_P2ALIGNED(saddr, PAGESIZE));
3974 3974 ASSERT(IS_P2ALIGNED(eaddr, PAGESIZE));
3975 3975 ASSERT(saddr < eaddr);
3976 3976
3977 3977 pagesize = hatsize = hat_getpagesize(seg->s_as->a_hat, saddr);
3978 3978 ASSERT(pagesize == -1 || IS_P2ALIGNED(pagesize, pagesize));
3979 3979 ASSERT(pagesize != 0);
3980 3980
3981 3981 if (pagesize == -1)
3982 3982 pagesize = PAGESIZE;
3983 3983
3984 3984 saddr += P2NPHASE((uintptr_t)saddr, pagesize);
3985 3985
3986 3986 while (saddr < eaddr) {
3987 3987 if (hatsize != hat_getpagesize(seg->s_as->a_hat, saddr))
3988 3988 break;
3989 3989 ASSERT(IS_P2ALIGNED(saddr, pagesize));
3990 3990 saddr += pagesize;
3991 3991 }
3992 3992
3993 3993 *naddrp = ((saddr < eaddr) ? saddr : eaddr);
3994 3994 return (hatsize);
3995 3995 }
3996 3996
3997 3997 /*
3998 3998 * Return an array of structures with extended memory map information.
3999 3999 * We allocate here; the caller must deallocate.
4000 4000 */
4001 4001 int
4002 4002 prgetxmap(proc_t *p, list_t *iolhead)
4003 4003 {
4004 4004 struct as *as = p->p_as;
4005 4005 prxmap_t *mp;
4006 4006 struct seg *seg;
4007 4007 struct seg *brkseg, *stkseg;
4008 4008 struct vnode *vp;
4009 4009 struct vattr vattr;
4010 4010 uint_t prot;
4011 4011
4012 4012 ASSERT(as != &kas && AS_WRITE_HELD(as));
4013 4013
4014 4014 /*
4015 4015 * Request an initial buffer size that doesn't waste memory
4016 4016 * if the address space has only a small number of segments.
4017 4017 */
4018 4018 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
4019 4019
4020 4020 if ((seg = AS_SEGFIRST(as)) == NULL)
4021 4021 return (0);
4022 4022
4023 4023 brkseg = break_seg(p);
4024 4024 stkseg = as_segat(as, prgetstackbase(p));
4025 4025
4026 4026 do {
4027 4027 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
4028 4028 caddr_t saddr, naddr, baddr;
4029 4029 void *tmp = NULL;
4030 4030 ssize_t psz;
4031 4031 char *parr;
4032 4032 uint64_t npages;
4033 4033 uint64_t pagenum;
4034 4034
4035 4035 /*
4036 4036 * Segment loop part one: iterate from the base of the segment
4037 4037 * to its end, pausing at each address boundary (baddr) between
4038 4038 * ranges that have different virtual memory protections.
4039 4039 */
4040 4040 for (saddr = seg->s_base; saddr < eaddr; saddr = baddr) {
4041 4041 prot = pr_getprot(seg, 0, &tmp, &saddr, &baddr, eaddr);
4042 4042 ASSERT(baddr >= saddr && baddr <= eaddr);
4043 4043
4044 4044 /*
4045 4045 * Segment loop part two: iterate from the current
4046 4046 * position to the end of the protection boundary,
4047 4047 * pausing at each address boundary (naddr) between
4048 4048 * ranges that have different underlying page sizes.
4049 4049 */
4050 4050 for (; saddr < baddr; saddr = naddr) {
4051 4051 psz = pr_getpagesize(seg, saddr, &naddr, baddr);
4052 4052 ASSERT(naddr >= saddr && naddr <= baddr);
4053 4053
4054 4054 mp = pr_iol_newbuf(iolhead, sizeof (*mp));
4055 4055
4056 4056 mp->pr_vaddr = (uintptr_t)saddr;
4057 4057 mp->pr_size = naddr - saddr;
4058 4058 mp->pr_offset = SEGOP_GETOFFSET(seg, saddr);
4059 4059 mp->pr_mflags = 0;
4060 4060 if (prot & PROT_READ)
4061 4061 mp->pr_mflags |= MA_READ;
4062 4062 if (prot & PROT_WRITE)
4063 4063 mp->pr_mflags |= MA_WRITE;
4064 4064 if (prot & PROT_EXEC)
4065 4065 mp->pr_mflags |= MA_EXEC;
4066 4066 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
4067 4067 mp->pr_mflags |= MA_SHARED;
4068 4068 if (SEGOP_GETTYPE(seg, saddr) & MAP_NORESERVE)
4069 4069 mp->pr_mflags |= MA_NORESERVE;
4070 4070 if (seg->s_ops == &segspt_shmops ||
4071 4071 (seg->s_ops == &segvn_ops &&
4072 4072 (SEGOP_GETVP(seg, saddr, &vp) != 0 ||
4073 4073 vp == NULL)))
4074 4074 mp->pr_mflags |= MA_ANON;
4075 4075 if (seg == brkseg)
4076 4076 mp->pr_mflags |= MA_BREAK;
4077 4077 else if (seg == stkseg)
4078 4078 mp->pr_mflags |= MA_STACK;
4079 4079 if (seg->s_ops == &segspt_shmops)
4080 4080 mp->pr_mflags |= MA_ISM | MA_SHM;
4081 4081
4082 4082 mp->pr_pagesize = PAGESIZE;
4083 4083 if (psz == -1) {
4084 4084 mp->pr_hatpagesize = 0;
4085 4085 } else {
4086 4086 mp->pr_hatpagesize = psz;
4087 4087 }
4088 4088
4089 4089 /*
4090 4090 * Manufacture a filename for the "object" dir.
4091 4091 */
4092 4092 mp->pr_dev = PRNODEV;
4093 4093 vattr.va_mask = AT_FSID|AT_NODEID;
4094 4094 if (seg->s_ops == &segvn_ops &&
4095 4095 SEGOP_GETVP(seg, saddr, &vp) == 0 &&
4096 4096 vp != NULL && vp->v_type == VREG &&
4097 4097 VOP_GETATTR(vp, &vattr, 0, CRED(),
4098 4098 NULL) == 0) {
4099 4099 mp->pr_dev = vattr.va_fsid;
4100 4100 mp->pr_ino = vattr.va_nodeid;
4101 4101 if (vp == p->p_exec)
4102 4102 (void) strcpy(mp->pr_mapname,
4103 4103 "a.out");
4104 4104 else
4105 4105 pr_object_name(mp->pr_mapname,
4106 4106 vp, &vattr);
4107 4107 }
4108 4108
4109 4109 /*
4110 4110 * Get the SysV shared memory id, if any.
4111 4111 */
4112 4112 if ((mp->pr_mflags & MA_SHARED) &&
4113 4113 p->p_segacct && (mp->pr_shmid = shmgetid(p,
4114 4114 seg->s_base)) != SHMID_NONE) {
4115 4115 if (mp->pr_shmid == SHMID_FREE)
4116 4116 mp->pr_shmid = -1;
4117 4117
4118 4118 mp->pr_mflags |= MA_SHM;
4119 4119 } else {
4120 4120 mp->pr_shmid = -1;
4121 4121 }
4122 4122
4123 4123 npages = ((uintptr_t)(naddr - saddr)) >>
4124 4124 PAGESHIFT;
4125 4125 parr = kmem_zalloc(npages, KM_SLEEP);
4126 4126
4127 4127 SEGOP_INCORE(seg, saddr, naddr - saddr, parr);
4128 4128
4129 4129 for (pagenum = 0; pagenum < npages; pagenum++) {
4130 4130 if (parr[pagenum] & SEG_PAGE_INCORE)
4131 4131 mp->pr_rss++;
4132 4132 if (parr[pagenum] & SEG_PAGE_ANON)
4133 4133 mp->pr_anon++;
4134 4134 if (parr[pagenum] & SEG_PAGE_LOCKED)
4135 4135 mp->pr_locked++;
4136 4136 }
4137 4137 kmem_free(parr, npages);
4138 4138 }
4139 4139 }
4140 4140 ASSERT(tmp == NULL);
4141 4141 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
4142 4142
4143 4143 return (0);
4144 4144 }
4145 4145
4146 4146 /*
4147 4147 * Return the process's credentials. We don't need a 32-bit equivalent of
↓ open down ↓ |
297 lines elided |
↑ open up ↑ |
4148 4148 * this function because prcred_t and prcred32_t are actually the same.
4149 4149 */
4150 4150 void
4151 4151 prgetcred(proc_t *p, prcred_t *pcrp)
4152 4152 {
4153 4153 mutex_enter(&p->p_crlock);
4154 4154 cred2prcred(p->p_cred, pcrp);
4155 4155 mutex_exit(&p->p_crlock);
4156 4156 }
4157 4157
4158 +void
4159 +prgetsecflags(proc_t *p, prsecflags_t *psfp)
4160 +{
4161 + ASSERT(psfp != NULL);
4162 +
4163 + psfp->pr_version = PRSECFLAGS_VERSION_CURRENT;
4164 + psfp->pr_lower = p->p_secflags.psf_lower;
4165 + psfp->pr_upper = p->p_secflags.psf_upper;
4166 + psfp->pr_effective = p->p_secflags.psf_effective;
4167 + psfp->pr_inherit = p->p_secflags.psf_inherit;
4168 +}
4169 +
4158 4170 /*
4159 4171 * Compute actual size of the prpriv_t structure.
4160 4172 */
4161 4173
4162 4174 size_t
4163 4175 prgetprivsize(void)
4164 4176 {
4165 4177 return (priv_prgetprivsize(NULL));
4166 4178 }
4167 4179
4168 4180 /*
4169 4181 * Return the process's privileges. We don't need a 32-bit equivalent of
4170 4182 * this function because prpriv_t and prpriv32_t are actually the same.
4171 4183 */
4172 4184 void
4173 4185 prgetpriv(proc_t *p, prpriv_t *pprp)
4174 4186 {
4175 4187 mutex_enter(&p->p_crlock);
4176 4188 cred2prpriv(p->p_cred, pprp);
4177 4189 mutex_exit(&p->p_crlock);
4178 4190 }
4179 4191
4180 4192 #ifdef _SYSCALL32_IMPL
4181 4193 /*
4182 4194 * Return an array of structures with HAT memory map information.
4183 4195 * We allocate here; the caller must deallocate.
4184 4196 */
4185 4197 int
4186 4198 prgetxmap32(proc_t *p, list_t *iolhead)
4187 4199 {
4188 4200 struct as *as = p->p_as;
4189 4201 prxmap32_t *mp;
4190 4202 struct seg *seg;
4191 4203 struct seg *brkseg, *stkseg;
4192 4204 struct vnode *vp;
4193 4205 struct vattr vattr;
4194 4206 uint_t prot;
4195 4207
4196 4208 ASSERT(as != &kas && AS_WRITE_HELD(as));
4197 4209
4198 4210 /*
4199 4211 * Request an initial buffer size that doesn't waste memory
4200 4212 * if the address space has only a small number of segments.
4201 4213 */
4202 4214 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
4203 4215
4204 4216 if ((seg = AS_SEGFIRST(as)) == NULL)
4205 4217 return (0);
4206 4218
4207 4219 brkseg = break_seg(p);
4208 4220 stkseg = as_segat(as, prgetstackbase(p));
4209 4221
4210 4222 do {
4211 4223 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
4212 4224 caddr_t saddr, naddr, baddr;
4213 4225 void *tmp = NULL;
4214 4226 ssize_t psz;
4215 4227 char *parr;
4216 4228 uint64_t npages;
4217 4229 uint64_t pagenum;
4218 4230
4219 4231 /*
4220 4232 * Segment loop part one: iterate from the base of the segment
4221 4233 * to its end, pausing at each address boundary (baddr) between
4222 4234 * ranges that have different virtual memory protections.
4223 4235 */
4224 4236 for (saddr = seg->s_base; saddr < eaddr; saddr = baddr) {
4225 4237 prot = pr_getprot(seg, 0, &tmp, &saddr, &baddr, eaddr);
4226 4238 ASSERT(baddr >= saddr && baddr <= eaddr);
4227 4239
4228 4240 /*
4229 4241 * Segment loop part two: iterate from the current
4230 4242 * position to the end of the protection boundary,
4231 4243 * pausing at each address boundary (naddr) between
4232 4244 * ranges that have different underlying page sizes.
4233 4245 */
4234 4246 for (; saddr < baddr; saddr = naddr) {
4235 4247 psz = pr_getpagesize(seg, saddr, &naddr, baddr);
4236 4248 ASSERT(naddr >= saddr && naddr <= baddr);
4237 4249
4238 4250 mp = pr_iol_newbuf(iolhead, sizeof (*mp));
4239 4251
4240 4252 mp->pr_vaddr = (caddr32_t)(uintptr_t)saddr;
4241 4253 mp->pr_size = (size32_t)(naddr - saddr);
4242 4254 mp->pr_offset = SEGOP_GETOFFSET(seg, saddr);
4243 4255 mp->pr_mflags = 0;
4244 4256 if (prot & PROT_READ)
4245 4257 mp->pr_mflags |= MA_READ;
4246 4258 if (prot & PROT_WRITE)
4247 4259 mp->pr_mflags |= MA_WRITE;
4248 4260 if (prot & PROT_EXEC)
4249 4261 mp->pr_mflags |= MA_EXEC;
4250 4262 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
4251 4263 mp->pr_mflags |= MA_SHARED;
4252 4264 if (SEGOP_GETTYPE(seg, saddr) & MAP_NORESERVE)
4253 4265 mp->pr_mflags |= MA_NORESERVE;
4254 4266 if (seg->s_ops == &segspt_shmops ||
4255 4267 (seg->s_ops == &segvn_ops &&
4256 4268 (SEGOP_GETVP(seg, saddr, &vp) != 0 ||
4257 4269 vp == NULL)))
4258 4270 mp->pr_mflags |= MA_ANON;
4259 4271 if (seg == brkseg)
4260 4272 mp->pr_mflags |= MA_BREAK;
4261 4273 else if (seg == stkseg)
4262 4274 mp->pr_mflags |= MA_STACK;
4263 4275 if (seg->s_ops == &segspt_shmops)
4264 4276 mp->pr_mflags |= MA_ISM | MA_SHM;
4265 4277
4266 4278 mp->pr_pagesize = PAGESIZE;
4267 4279 if (psz == -1) {
4268 4280 mp->pr_hatpagesize = 0;
4269 4281 } else {
4270 4282 mp->pr_hatpagesize = psz;
4271 4283 }
4272 4284
4273 4285 /*
4274 4286 * Manufacture a filename for the "object" dir.
4275 4287 */
4276 4288 mp->pr_dev = PRNODEV32;
4277 4289 vattr.va_mask = AT_FSID|AT_NODEID;
4278 4290 if (seg->s_ops == &segvn_ops &&
4279 4291 SEGOP_GETVP(seg, saddr, &vp) == 0 &&
4280 4292 vp != NULL && vp->v_type == VREG &&
4281 4293 VOP_GETATTR(vp, &vattr, 0, CRED(),
4282 4294 NULL) == 0) {
4283 4295 (void) cmpldev(&mp->pr_dev,
4284 4296 vattr.va_fsid);
4285 4297 mp->pr_ino = vattr.va_nodeid;
4286 4298 if (vp == p->p_exec)
4287 4299 (void) strcpy(mp->pr_mapname,
4288 4300 "a.out");
4289 4301 else
4290 4302 pr_object_name(mp->pr_mapname,
4291 4303 vp, &vattr);
4292 4304 }
4293 4305
4294 4306 /*
4295 4307 * Get the SysV shared memory id, if any.
4296 4308 */
4297 4309 if ((mp->pr_mflags & MA_SHARED) &&
4298 4310 p->p_segacct && (mp->pr_shmid = shmgetid(p,
4299 4311 seg->s_base)) != SHMID_NONE) {
4300 4312 if (mp->pr_shmid == SHMID_FREE)
4301 4313 mp->pr_shmid = -1;
4302 4314
4303 4315 mp->pr_mflags |= MA_SHM;
4304 4316 } else {
4305 4317 mp->pr_shmid = -1;
4306 4318 }
4307 4319
4308 4320 npages = ((uintptr_t)(naddr - saddr)) >>
4309 4321 PAGESHIFT;
4310 4322 parr = kmem_zalloc(npages, KM_SLEEP);
4311 4323
4312 4324 SEGOP_INCORE(seg, saddr, naddr - saddr, parr);
4313 4325
4314 4326 for (pagenum = 0; pagenum < npages; pagenum++) {
4315 4327 if (parr[pagenum] & SEG_PAGE_INCORE)
4316 4328 mp->pr_rss++;
4317 4329 if (parr[pagenum] & SEG_PAGE_ANON)
4318 4330 mp->pr_anon++;
4319 4331 if (parr[pagenum] & SEG_PAGE_LOCKED)
4320 4332 mp->pr_locked++;
4321 4333 }
4322 4334 kmem_free(parr, npages);
4323 4335 }
4324 4336 }
4325 4337 ASSERT(tmp == NULL);
4326 4338 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
4327 4339
4328 4340 return (0);
4329 4341 }
4330 4342 #endif /* _SYSCALL32_IMPL */
↓ open down ↓ |
163 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX