Print this page
Bring back LX zones.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/pid.c
+++ new/usr/src/uts/common/os/pid.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29 #include <sys/types.h>
30 30 #include <sys/param.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/proc.h>
33 33 #include <sys/kmem.h>
34 34 #include <sys/tuneable.h>
35 35 #include <sys/var.h>
36 36 #include <sys/cred.h>
37 37 #include <sys/systm.h>
38 38 #include <sys/prsystm.h>
39 39 #include <sys/vnode.h>
40 40 #include <sys/session.h>
41 41 #include <sys/cpuvar.h>
42 42 #include <sys/cmn_err.h>
43 43 #include <sys/bitmap.h>
44 44 #include <sys/debug.h>
45 45 #include <c2/audit.h>
46 46 #include <sys/project.h>
47 47 #include <sys/task.h>
48 48 #include <sys/zone.h>
49 49
50 50 /* directory entries for /proc */
51 51 union procent {
52 52 proc_t *pe_proc;
53 53 union procent *pe_next;
54 54 };
55 55
56 56 struct pid pid0 = {
57 57 0, /* pid_prinactive */
58 58 1, /* pid_pgorphaned */
59 59 0, /* pid_padding */
60 60 0, /* pid_prslot */
61 61 0, /* pid_id */
62 62 NULL, /* pid_pglink */
63 63 NULL, /* pid_pgtail */
64 64 NULL, /* pid_link */
65 65 3 /* pid_ref */
66 66 };
67 67
68 68 static int pid_hashlen = 4; /* desired average hash chain length */
69 69 static int pid_hashsz; /* number of buckets in the hash table */
70 70
71 71 #define HASHPID(pid) (pidhash[((pid)&(pid_hashsz-1))])
72 72
73 73 extern uint_t nproc;
74 74 extern struct kmem_cache *process_cache;
75 75 static void upcount_init(void);
76 76
77 77 kmutex_t pidlock; /* global process lock */
78 78 kmutex_t pr_pidlock; /* /proc global process lock */
79 79 kcondvar_t *pr_pid_cv; /* for /proc, one per process slot */
80 80 struct plock *proc_lock; /* persistent array of p_lock's */
81 81
82 82 /*
83 83 * See the comment above pid_getlockslot() for a detailed explanation of this
84 84 * constant. Note that a PLOCK_SHIFT of 3 implies 64-byte coherence
85 85 * granularity; if the coherence granularity is ever changed, this constant
86 86 * should be modified to reflect the change to minimize proc_lock false
87 87 * sharing (correctness, however, is guaranteed regardless of the coherence
88 88 * granularity).
89 89 */
90 90 #define PLOCK_SHIFT 3
91 91
92 92 static kmutex_t pidlinklock;
93 93 static struct pid **pidhash;
94 94 static pid_t minpid;
95 95 static pid_t mpid = FAMOUS_PIDS; /* one more than the last famous pid */
96 96 static union procent *procdir;
97 97 static union procent *procentfree;
98 98
99 99 static struct pid *
100 100 pid_lookup(pid_t pid)
101 101 {
102 102 struct pid *pidp;
103 103
104 104 ASSERT(MUTEX_HELD(&pidlinklock));
↓ open down ↓ |
104 lines elided |
↑ open up ↑ |
105 105
106 106 for (pidp = HASHPID(pid); pidp; pidp = pidp->pid_link) {
107 107 if (pidp->pid_id == pid) {
108 108 ASSERT(pidp->pid_ref > 0);
109 109 break;
110 110 }
111 111 }
112 112 return (pidp);
113 113 }
114 114
115 +struct pid *
116 +pid_find(pid_t pid)
117 +{
118 + struct pid *pidp;
119 +
120 + mutex_enter(&pidlinklock);
121 + pidp = pid_lookup(pid);
122 + mutex_exit(&pidlinklock);
123 +
124 + return (pidp);
125 +}
126 +
115 127 void
116 128 pid_setmin(void)
117 129 {
118 130 if (jump_pid && jump_pid > mpid)
119 131 minpid = mpid = jump_pid;
120 132 else
121 133 minpid = mpid;
122 134 }
123 135
124 136 /*
125 137 * When prslots are simply used as an index to determine a process' p_lock,
126 138 * adjacent prslots share adjacent p_locks. On machines where the size
127 139 * of a mutex is smaller than that of a cache line (which, as of this writing,
128 140 * is true for all machines on which Solaris runs), this can potentially
129 141 * induce false sharing. The standard solution for false sharing is to pad
130 142 * out one's data structures (in this case, struct plock). However,
131 143 * given the size and (generally) sparse use of the proc_lock array, this
132 144 * is suboptimal. We therefore stride through the proc_lock array with
133 145 * a stride of PLOCK_SHIFT. PLOCK_SHIFT should be defined as:
134 146 *
135 147 * log_2 (coherence_granularity / sizeof (kmutex_t))
136 148 *
137 149 * Under this scheme, false sharing is still possible -- but only when
138 150 * the number of active processes is very large. Note that the one-to-one
139 151 * mapping between prslots and lockslots is maintained.
140 152 */
141 153 static int
142 154 pid_getlockslot(int prslot)
143 155 {
144 156 int even = (v.v_proc >> PLOCK_SHIFT) << PLOCK_SHIFT;
145 157 int perlap = even >> PLOCK_SHIFT;
146 158
147 159 if (prslot >= even)
148 160 return (prslot);
149 161
150 162 return (((prslot % perlap) << PLOCK_SHIFT) + (prslot / perlap));
151 163 }
152 164
153 165 /*
154 166 * This function allocates a pid structure, a free pid, and optionally a
155 167 * slot in the proc table for it.
156 168 *
157 169 * pid_allocate() returns the new pid on success, -1 on failure.
158 170 */
159 171 pid_t
160 172 pid_allocate(proc_t *prp, pid_t pid, int flags)
161 173 {
162 174 struct pid *pidp;
163 175 union procent *pep;
164 176 pid_t newpid, startpid;
165 177
166 178 pidp = kmem_zalloc(sizeof (struct pid), KM_SLEEP);
167 179
168 180 mutex_enter(&pidlinklock);
169 181 if ((flags & PID_ALLOC_PROC) && (pep = procentfree) == NULL) {
170 182 /*
171 183 * ran out of /proc directory entries
172 184 */
173 185 goto failed;
174 186 }
175 187
176 188 if (pid != 0) {
177 189 VERIFY(minpid == 0);
178 190 VERIFY3P(pid, <, mpid);
179 191 VERIFY3P(pid_lookup(pid), ==, NULL);
180 192 newpid = pid;
181 193 } else {
182 194 /*
183 195 * Allocate a pid
184 196 */
185 197 ASSERT(minpid <= mpid && mpid < maxpid);
186 198
187 199 startpid = mpid;
188 200 for (;;) {
189 201 newpid = mpid;
190 202 if (++mpid == maxpid)
191 203 mpid = minpid;
192 204
193 205 if (pid_lookup(newpid) == NULL)
194 206 break;
195 207
196 208 if (mpid == startpid)
197 209 goto failed;
198 210 }
199 211 }
200 212
201 213 /*
202 214 * Put pid into the pid hash table.
203 215 */
204 216 pidp->pid_link = HASHPID(newpid);
205 217 HASHPID(newpid) = pidp;
206 218 pidp->pid_ref = 1;
207 219 pidp->pid_id = newpid;
208 220
209 221 if (flags & PID_ALLOC_PROC) {
210 222 procentfree = pep->pe_next;
211 223 pidp->pid_prslot = pep - procdir;
212 224 pep->pe_proc = prp;
213 225 prp->p_pidp = pidp;
214 226 prp->p_lockp = &proc_lock[pid_getlockslot(pidp->pid_prslot)];
215 227 } else {
216 228 pidp->pid_prslot = 0;
217 229 }
218 230
219 231 mutex_exit(&pidlinklock);
220 232
221 233 return (newpid);
222 234
223 235 failed:
224 236 mutex_exit(&pidlinklock);
225 237 kmem_free(pidp, sizeof (struct pid));
226 238 return (-1);
227 239 }
228 240
229 241 /*
230 242 * decrement the reference count for pid
231 243 */
232 244 int
233 245 pid_rele(struct pid *pidp)
234 246 {
235 247 struct pid **pidpp;
236 248
237 249 mutex_enter(&pidlinklock);
238 250 ASSERT(pidp != &pid0);
239 251
240 252 pidpp = &HASHPID(pidp->pid_id);
241 253 for (;;) {
242 254 ASSERT(*pidpp != NULL);
243 255 if (*pidpp == pidp)
244 256 break;
245 257 pidpp = &(*pidpp)->pid_link;
246 258 }
247 259
248 260 *pidpp = pidp->pid_link;
249 261 mutex_exit(&pidlinklock);
250 262
251 263 kmem_free(pidp, sizeof (*pidp));
252 264 return (0);
253 265 }
254 266
255 267 void
256 268 proc_entry_free(struct pid *pidp)
257 269 {
258 270 mutex_enter(&pidlinklock);
259 271 pidp->pid_prinactive = 1;
260 272 procdir[pidp->pid_prslot].pe_next = procentfree;
261 273 procentfree = &procdir[pidp->pid_prslot];
262 274 mutex_exit(&pidlinklock);
263 275 }
264 276
265 277 /*
266 278 * The original task needs to be passed in since the process has already been
267 279 * detached from the task at this point in time.
268 280 */
269 281 void
270 282 pid_exit(proc_t *prp, struct task *tk)
271 283 {
272 284 struct pid *pidp;
273 285 zone_t *zone = prp->p_zone;
274 286
275 287 ASSERT(MUTEX_HELD(&pidlock));
276 288
277 289 /*
278 290 * Exit process group. If it is NULL, it's because fork failed
279 291 * before calling pgjoin().
280 292 */
281 293 ASSERT(prp->p_pgidp != NULL || prp->p_stat == SIDL);
282 294 if (prp->p_pgidp != NULL)
283 295 pgexit(prp);
284 296
285 297 sess_rele(prp->p_sessp, B_TRUE);
286 298
287 299 pidp = prp->p_pidp;
288 300
289 301 proc_entry_free(pidp);
290 302
291 303 if (audit_active)
292 304 audit_pfree(prp);
293 305
294 306 if (practive == prp) {
295 307 practive = prp->p_next;
296 308 }
297 309
298 310 if (prp->p_next) {
299 311 prp->p_next->p_prev = prp->p_prev;
300 312 }
301 313 if (prp->p_prev) {
302 314 prp->p_prev->p_next = prp->p_next;
303 315 }
304 316
305 317 PID_RELE(pidp);
306 318
307 319 mutex_destroy(&prp->p_crlock);
308 320 kmem_cache_free(process_cache, prp);
309 321 nproc--;
310 322
311 323 /*
312 324 * Decrement the process counts of the original task, project and zone.
313 325 */
314 326 mutex_enter(&zone->zone_nlwps_lock);
315 327 tk->tk_nprocs--;
316 328 tk->tk_proj->kpj_nprocs--;
317 329 zone->zone_nprocs--;
318 330 mutex_exit(&zone->zone_nlwps_lock);
319 331 }
320 332
321 333 /*
322 334 * Find a process visible from the specified zone given its process ID.
323 335 */
324 336 proc_t *
325 337 prfind_zone(pid_t pid, zoneid_t zoneid)
326 338 {
327 339 struct pid *pidp;
328 340 proc_t *p;
329 341
330 342 ASSERT(MUTEX_HELD(&pidlock));
331 343
332 344 mutex_enter(&pidlinklock);
333 345 pidp = pid_lookup(pid);
334 346 mutex_exit(&pidlinklock);
335 347 if (pidp != NULL && pidp->pid_prinactive == 0) {
336 348 p = procdir[pidp->pid_prslot].pe_proc;
337 349 if (zoneid == ALL_ZONES || p->p_zone->zone_id == zoneid)
338 350 return (p);
339 351 }
340 352 return (NULL);
341 353 }
342 354
343 355 /*
344 356 * Find a process given its process ID. This obeys zone restrictions,
345 357 * so if the caller is in a non-global zone it won't find processes
346 358 * associated with other zones. Use prfind_zone(pid, ALL_ZONES) to
347 359 * bypass this restriction.
348 360 */
349 361 proc_t *
350 362 prfind(pid_t pid)
351 363 {
352 364 zoneid_t zoneid;
353 365
354 366 if (INGLOBALZONE(curproc))
355 367 zoneid = ALL_ZONES;
356 368 else
357 369 zoneid = getzoneid();
358 370 return (prfind_zone(pid, zoneid));
359 371 }
360 372
361 373 proc_t *
362 374 pgfind_zone(pid_t pgid, zoneid_t zoneid)
363 375 {
364 376 struct pid *pidp;
365 377
366 378 ASSERT(MUTEX_HELD(&pidlock));
367 379
368 380 mutex_enter(&pidlinklock);
369 381 pidp = pid_lookup(pgid);
370 382 mutex_exit(&pidlinklock);
371 383 if (pidp != NULL) {
372 384 proc_t *p = pidp->pid_pglink;
373 385
374 386 if (zoneid == ALL_ZONES || pgid == 0 || p == NULL ||
375 387 p->p_zone->zone_id == zoneid)
376 388 return (p);
377 389 }
378 390 return (NULL);
379 391 }
380 392
381 393 /*
382 394 * return the head of the list of processes whose process group ID is 'pgid',
383 395 * or NULL, if no such process group
384 396 */
385 397 proc_t *
386 398 pgfind(pid_t pgid)
387 399 {
388 400 zoneid_t zoneid;
389 401
390 402 if (INGLOBALZONE(curproc))
391 403 zoneid = ALL_ZONES;
392 404 else
393 405 zoneid = getzoneid();
394 406 return (pgfind_zone(pgid, zoneid));
395 407 }
396 408
397 409 /*
398 410 * Sets P_PR_LOCK on a non-system process. Process must be fully created
399 411 * and not exiting to succeed.
400 412 *
401 413 * Returns 0 on success.
402 414 * Returns 1 if P_PR_LOCK is set.
403 415 * Returns -1 if proc is in invalid state.
404 416 */
405 417 int
406 418 sprtrylock_proc(proc_t *p)
407 419 {
408 420 ASSERT(MUTEX_HELD(&p->p_lock));
409 421
410 422 /* skip system and incomplete processes */
411 423 if (p->p_stat == SIDL || p->p_stat == SZOMB ||
412 424 (p->p_flag & (SSYS | SEXITING | SEXITLWPS))) {
413 425 return (-1);
414 426 }
415 427
416 428 if (p->p_proc_flag & P_PR_LOCK)
417 429 return (1);
418 430
419 431 p->p_proc_flag |= P_PR_LOCK;
420 432 THREAD_KPRI_REQUEST();
421 433
422 434 return (0);
423 435 }
424 436
425 437 /*
426 438 * Wait for P_PR_LOCK to become clear. Returns with p_lock dropped,
427 439 * and the proc pointer no longer valid, as the proc may have exited.
428 440 */
429 441 void
430 442 sprwaitlock_proc(proc_t *p)
431 443 {
432 444 kmutex_t *mp;
433 445
434 446 ASSERT(MUTEX_HELD(&p->p_lock));
435 447 ASSERT(p->p_proc_flag & P_PR_LOCK);
436 448
437 449 /*
438 450 * p_lock is persistent, but p itself is not -- it could
439 451 * vanish during cv_wait(). Load p->p_lock now so we can
440 452 * drop it after cv_wait() without referencing p.
441 453 */
442 454 mp = &p->p_lock;
443 455 cv_wait(&pr_pid_cv[p->p_slot], mp);
444 456 mutex_exit(mp);
445 457 }
446 458
447 459 /*
448 460 * If pid exists, find its proc, acquire its p_lock and mark it P_PR_LOCK.
449 461 * Returns the proc pointer on success, NULL on failure. sprlock() is
450 462 * really just a stripped-down version of pr_p_lock() to allow practive
451 463 * walkers like dofusers() and dumpsys() to synchronize with /proc.
452 464 */
453 465 proc_t *
454 466 sprlock_zone(pid_t pid, zoneid_t zoneid)
455 467 {
456 468 proc_t *p;
457 469 int ret;
458 470
459 471 for (;;) {
460 472 mutex_enter(&pidlock);
461 473 if ((p = prfind_zone(pid, zoneid)) == NULL) {
462 474 mutex_exit(&pidlock);
463 475 return (NULL);
464 476 }
465 477 mutex_enter(&p->p_lock);
466 478 mutex_exit(&pidlock);
467 479
468 480 if (panicstr)
469 481 return (p);
470 482
471 483 ret = sprtrylock_proc(p);
472 484 if (ret == -1) {
473 485 mutex_exit(&p->p_lock);
474 486 return (NULL);
475 487 } else if (ret == 0) {
476 488 break;
477 489 }
478 490 sprwaitlock_proc(p);
479 491 }
480 492 return (p);
481 493 }
482 494
483 495 proc_t *
484 496 sprlock(pid_t pid)
485 497 {
486 498 zoneid_t zoneid;
487 499
488 500 if (INGLOBALZONE(curproc))
489 501 zoneid = ALL_ZONES;
490 502 else
491 503 zoneid = getzoneid();
492 504 return (sprlock_zone(pid, zoneid));
493 505 }
494 506
495 507 void
496 508 sprlock_proc(proc_t *p)
497 509 {
498 510 ASSERT(MUTEX_HELD(&p->p_lock));
499 511
500 512 while (p->p_proc_flag & P_PR_LOCK) {
501 513 cv_wait(&pr_pid_cv[p->p_slot], &p->p_lock);
502 514 }
503 515
504 516 p->p_proc_flag |= P_PR_LOCK;
505 517 THREAD_KPRI_REQUEST();
506 518 }
507 519
508 520 void
509 521 sprunlock(proc_t *p)
510 522 {
511 523 if (panicstr) {
512 524 mutex_exit(&p->p_lock);
513 525 return;
514 526 }
515 527
516 528 ASSERT(p->p_proc_flag & P_PR_LOCK);
517 529 ASSERT(MUTEX_HELD(&p->p_lock));
518 530
519 531 cv_signal(&pr_pid_cv[p->p_slot]);
520 532 p->p_proc_flag &= ~P_PR_LOCK;
521 533 mutex_exit(&p->p_lock);
522 534 THREAD_KPRI_RELEASE();
523 535 }
524 536
525 537 void
526 538 pid_init(void)
527 539 {
528 540 int i;
529 541
530 542 pid_hashsz = 1 << highbit(v.v_proc / pid_hashlen);
531 543
532 544 pidhash = kmem_zalloc(sizeof (struct pid *) * pid_hashsz, KM_SLEEP);
533 545 procdir = kmem_alloc(sizeof (union procent) * v.v_proc, KM_SLEEP);
534 546 pr_pid_cv = kmem_zalloc(sizeof (kcondvar_t) * v.v_proc, KM_SLEEP);
535 547 proc_lock = kmem_zalloc(sizeof (struct plock) * v.v_proc, KM_SLEEP);
536 548
537 549 nproc = 1;
538 550 practive = proc_sched;
539 551 proc_sched->p_next = NULL;
540 552 procdir[0].pe_proc = proc_sched;
541 553
542 554 procentfree = &procdir[1];
543 555 for (i = 1; i < v.v_proc - 1; i++)
544 556 procdir[i].pe_next = &procdir[i+1];
545 557 procdir[i].pe_next = NULL;
546 558
547 559 HASHPID(0) = &pid0;
548 560
549 561 upcount_init();
550 562 }
551 563
552 564 proc_t *
553 565 pid_entry(int slot)
554 566 {
555 567 union procent *pep;
556 568 proc_t *prp;
557 569
558 570 ASSERT(MUTEX_HELD(&pidlock));
559 571 ASSERT(slot >= 0 && slot < v.v_proc);
560 572
561 573 pep = procdir[slot].pe_next;
562 574 if (pep >= procdir && pep < &procdir[v.v_proc])
563 575 return (NULL);
564 576 prp = procdir[slot].pe_proc;
565 577 if (prp != 0 && prp->p_stat == SIDL)
566 578 return (NULL);
567 579 return (prp);
568 580 }
569 581
570 582 /*
571 583 * Send the specified signal to all processes whose process group ID is
572 584 * equal to 'pgid'
573 585 */
574 586
575 587 void
576 588 signal(pid_t pgid, int sig)
577 589 {
578 590 struct pid *pidp;
579 591 proc_t *prp;
580 592
581 593 mutex_enter(&pidlock);
582 594 mutex_enter(&pidlinklock);
583 595 if (pgid == 0 || (pidp = pid_lookup(pgid)) == NULL) {
584 596 mutex_exit(&pidlinklock);
585 597 mutex_exit(&pidlock);
586 598 return;
587 599 }
588 600 mutex_exit(&pidlinklock);
589 601 for (prp = pidp->pid_pglink; prp; prp = prp->p_pglink) {
590 602 mutex_enter(&prp->p_lock);
591 603 sigtoproc(prp, NULL, sig);
592 604 mutex_exit(&prp->p_lock);
593 605 }
594 606 mutex_exit(&pidlock);
595 607 }
596 608
597 609 /*
598 610 * Send the specified signal to the specified process
599 611 */
600 612
601 613 void
602 614 prsignal(struct pid *pidp, int sig)
603 615 {
604 616 if (!(pidp->pid_prinactive))
605 617 psignal(procdir[pidp->pid_prslot].pe_proc, sig);
606 618 }
607 619
608 620 #include <sys/sunddi.h>
609 621
610 622 /*
611 623 * DDI/DKI interfaces for drivers to send signals to processes
612 624 */
613 625
614 626 /*
615 627 * obtain an opaque reference to a process for signaling
616 628 */
617 629 void *
618 630 proc_ref(void)
619 631 {
620 632 struct pid *pidp;
621 633
622 634 mutex_enter(&pidlock);
623 635 pidp = curproc->p_pidp;
624 636 PID_HOLD(pidp);
625 637 mutex_exit(&pidlock);
626 638
627 639 return (pidp);
628 640 }
629 641
630 642 /*
631 643 * release a reference to a process
632 644 * - a process can exit even if a driver has a reference to it
633 645 * - one proc_unref for every proc_ref
634 646 */
635 647 void
636 648 proc_unref(void *pref)
637 649 {
638 650 mutex_enter(&pidlock);
639 651 PID_RELE((struct pid *)pref);
640 652 mutex_exit(&pidlock);
641 653 }
642 654
643 655 /*
644 656 * send a signal to a process
645 657 *
646 658 * - send the process the signal
647 659 * - if the process went away, return a -1
648 660 * - if the process is still there return 0
649 661 */
650 662 int
651 663 proc_signal(void *pref, int sig)
652 664 {
653 665 struct pid *pidp = pref;
654 666
655 667 prsignal(pidp, sig);
656 668 return (pidp->pid_prinactive ? -1 : 0);
657 669 }
658 670
659 671
660 672 static struct upcount **upc_hash; /* a boot time allocated array */
661 673 static ulong_t upc_hashmask;
662 674 #define UPC_HASH(x, y) ((ulong_t)(x ^ y) & upc_hashmask)
663 675
664 676 /*
665 677 * Get us off the ground. Called once at boot.
666 678 */
667 679 void
668 680 upcount_init(void)
669 681 {
670 682 ulong_t upc_hashsize;
671 683
672 684 /*
673 685 * An entry per MB of memory is our current guess
674 686 */
675 687 /*
676 688 * 2^20 is a meg, so shifting right by 20 - PAGESHIFT
677 689 * converts pages to megs (without overflowing a u_int
678 690 * if you have more than 4G of memory, like ptob(physmem)/1M
679 691 * would).
680 692 */
681 693 upc_hashsize = (1 << highbit(physmem >> (20 - PAGESHIFT)));
682 694 upc_hashmask = upc_hashsize - 1;
683 695 upc_hash = kmem_zalloc(upc_hashsize * sizeof (struct upcount *),
684 696 KM_SLEEP);
685 697 }
686 698
687 699 /*
688 700 * Increment the number of processes associated with a given uid and zoneid.
689 701 */
690 702 void
691 703 upcount_inc(uid_t uid, zoneid_t zoneid)
692 704 {
693 705 struct upcount **upc, **hupc;
694 706 struct upcount *new;
695 707
696 708 ASSERT(MUTEX_HELD(&pidlock));
697 709 new = NULL;
698 710 hupc = &upc_hash[UPC_HASH(uid, zoneid)];
699 711 top:
700 712 upc = hupc;
701 713 while ((*upc) != NULL) {
702 714 if ((*upc)->up_uid == uid && (*upc)->up_zoneid == zoneid) {
703 715 (*upc)->up_count++;
704 716 if (new) {
705 717 /*
706 718 * did not need `new' afterall.
707 719 */
708 720 kmem_free(new, sizeof (*new));
709 721 }
710 722 return;
711 723 }
712 724 upc = &(*upc)->up_next;
713 725 }
714 726
715 727 /*
716 728 * There is no entry for this <uid,zoneid> pair.
717 729 * Allocate one. If we have to drop pidlock, check
718 730 * again.
719 731 */
720 732 if (new == NULL) {
721 733 new = (struct upcount *)kmem_alloc(sizeof (*new), KM_NOSLEEP);
722 734 if (new == NULL) {
723 735 mutex_exit(&pidlock);
724 736 new = (struct upcount *)kmem_alloc(sizeof (*new),
725 737 KM_SLEEP);
726 738 mutex_enter(&pidlock);
727 739 goto top;
728 740 }
729 741 }
730 742
731 743
732 744 /*
733 745 * On the assumption that a new user is going to do some
734 746 * more forks, put the new upcount structure on the front.
735 747 */
736 748 upc = hupc;
737 749
738 750 new->up_uid = uid;
739 751 new->up_zoneid = zoneid;
740 752 new->up_count = 1;
741 753 new->up_next = *upc;
742 754
743 755 *upc = new;
744 756 }
745 757
746 758 /*
747 759 * Decrement the number of processes a given uid and zoneid has.
748 760 */
749 761 void
750 762 upcount_dec(uid_t uid, zoneid_t zoneid)
751 763 {
752 764 struct upcount **upc;
753 765 struct upcount *done;
754 766
755 767 ASSERT(MUTEX_HELD(&pidlock));
756 768
757 769 upc = &upc_hash[UPC_HASH(uid, zoneid)];
758 770 while ((*upc) != NULL) {
759 771 if ((*upc)->up_uid == uid && (*upc)->up_zoneid == zoneid) {
760 772 (*upc)->up_count--;
761 773 if ((*upc)->up_count == 0) {
762 774 done = *upc;
763 775 *upc = (*upc)->up_next;
764 776 kmem_free(done, sizeof (*done));
765 777 }
766 778 return;
767 779 }
768 780 upc = &(*upc)->up_next;
769 781 }
770 782 cmn_err(CE_PANIC, "decr_upcount-off the end");
771 783 }
772 784
773 785 /*
774 786 * Returns the number of processes a uid has.
775 787 * Non-existent uid's are assumed to have no processes.
776 788 */
777 789 int
778 790 upcount_get(uid_t uid, zoneid_t zoneid)
779 791 {
780 792 struct upcount *upc;
781 793
782 794 ASSERT(MUTEX_HELD(&pidlock));
783 795
784 796 upc = upc_hash[UPC_HASH(uid, zoneid)];
785 797 while (upc != NULL) {
786 798 if (upc->up_uid == uid && upc->up_zoneid == zoneid) {
787 799 return (upc->up_count);
788 800 }
789 801 upc = upc->up_next;
790 802 }
791 803 return (0);
792 804 }
↓ open down ↓ |
668 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX