95 static pid_t mpid = FAMOUS_PIDS; /* one more than the last famous pid */
96 static union procent *procdir;
97 static union procent *procentfree;
98
99 static struct pid *
100 pid_lookup(pid_t pid)
101 {
102 struct pid *pidp;
103
104 ASSERT(MUTEX_HELD(&pidlinklock));
105
106 for (pidp = HASHPID(pid); pidp; pidp = pidp->pid_link) {
107 if (pidp->pid_id == pid) {
108 ASSERT(pidp->pid_ref > 0);
109 break;
110 }
111 }
112 return (pidp);
113 }
114
115 void
116 pid_setmin(void)
117 {
118 if (jump_pid && jump_pid > mpid)
119 minpid = mpid = jump_pid;
120 else
121 minpid = mpid;
122 }
123
124 /*
125 * When prslots are simply used as an index to determine a process' p_lock,
126 * adjacent prslots share adjacent p_locks. On machines where the size
127 * of a mutex is smaller than that of a cache line (which, as of this writing,
128 * is true for all machines on which Solaris runs), this can potentially
129 * induce false sharing. The standard solution for false sharing is to pad
130 * out one's data structures (in this case, struct plock). However,
131 * given the size and (generally) sparse use of the proc_lock array, this
132 * is suboptimal. We therefore stride through the proc_lock array with
133 * a stride of PLOCK_SHIFT. PLOCK_SHIFT should be defined as:
134 *
|
95 static pid_t mpid = FAMOUS_PIDS; /* one more than the last famous pid */
96 static union procent *procdir;
97 static union procent *procentfree;
98
99 static struct pid *
100 pid_lookup(pid_t pid)
101 {
102 struct pid *pidp;
103
104 ASSERT(MUTEX_HELD(&pidlinklock));
105
106 for (pidp = HASHPID(pid); pidp; pidp = pidp->pid_link) {
107 if (pidp->pid_id == pid) {
108 ASSERT(pidp->pid_ref > 0);
109 break;
110 }
111 }
112 return (pidp);
113 }
114
115 struct pid *
116 pid_find(pid_t pid)
117 {
118 struct pid *pidp;
119
120 mutex_enter(&pidlinklock);
121 pidp = pid_lookup(pid);
122 mutex_exit(&pidlinklock);
123
124 return (pidp);
125 }
126
127 void
128 pid_setmin(void)
129 {
130 if (jump_pid && jump_pid > mpid)
131 minpid = mpid = jump_pid;
132 else
133 minpid = mpid;
134 }
135
136 /*
137 * When prslots are simply used as an index to determine a process' p_lock,
138 * adjacent prslots share adjacent p_locks. On machines where the size
139 * of a mutex is smaller than that of a cache line (which, as of this writing,
140 * is true for all machines on which Solaris runs), this can potentially
141 * induce false sharing. The standard solution for false sharing is to pad
142 * out one's data structures (in this case, struct plock). However,
143 * given the size and (generally) sparse use of the proc_lock array, this
144 * is suboptimal. We therefore stride through the proc_lock array with
145 * a stride of PLOCK_SHIFT. PLOCK_SHIFT should be defined as:
146 *
|