Print this page
XXXX adding PID information to netstat output
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/sys/strsubr.h
+++ new/usr/src/uts/common/sys/strsubr.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 22 /* All Rights Reserved */
23 23
24 24
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 #ifndef _SYS_STRSUBR_H
31 31 #define _SYS_STRSUBR_H
32 32
33 33 /*
34 34 * WARNING:
35 35 * Everything in this file is private, belonging to the
36 36 * STREAMS subsystem. The only guarantee made about the
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
37 37 * contents of this file is that if you include it, your
38 38 * code will not port to the next release.
39 39 */
40 40 #include <sys/stream.h>
41 41 #include <sys/stropts.h>
42 42 #include <sys/kstat.h>
43 43 #include <sys/uio.h>
44 44 #include <sys/proc.h>
45 45 #include <sys/netstack.h>
46 46 #include <sys/modhash.h>
47 +#include <sys/pidnode.h>
47 48
48 49 #ifdef __cplusplus
49 50 extern "C" {
50 51 #endif
51 52
52 53 /*
53 54 * In general, the STREAMS locks are disjoint; they are only held
54 55 * locally, and not simultaneously by a thread. However, module
55 56 * code, including at the stream head, requires some locks to be
56 57 * acquired in order for its safety.
57 58 * 1. Stream level claim. This prevents the value of q_next
58 59 * from changing while module code is executing.
59 60 * 2. Queue level claim. This prevents the value of q_ptr
60 61 * from changing while put or service code is executing.
61 62 * In addition, it provides for queue single-threading
62 63 * for QPAIR and PERQ MT-safe modules.
63 64 * 3. Stream head lock. May be held by the stream head module
64 65 * to implement a read/write/open/close monitor.
65 66 * Note: that the only types of twisted stream supported are
66 67 * the pipe and transports which have read and write service
67 68 * procedures on both sides of the twist.
68 69 * 4. Queue lock. May be acquired by utility routines on
69 70 * behalf of a module.
70 71 */
71 72
72 73 /*
73 74 * In general, sd_lock protects the consistency of the stdata
74 75 * structure. Additionally, it is used with sd_monitor
75 76 * to implement an open/close monitor. In particular, it protects
76 77 * the following fields:
77 78 * sd_iocblk
78 79 * sd_flag
79 80 * sd_copyflag
80 81 * sd_iocid
81 82 * sd_iocwait
82 83 * sd_sidp
83 84 * sd_pgidp
84 85 * sd_wroff
85 86 * sd_tail
86 87 * sd_rerror
87 88 * sd_werror
88 89 * sd_pushcnt
89 90 * sd_sigflags
90 91 * sd_siglist
91 92 * sd_pollist
92 93 * sd_mark
93 94 * sd_closetime
94 95 * sd_wakeq
95 96 * sd_maxblk
96 97 *
97 98 * The following fields are modified only by the allocator, which
98 99 * has exclusive access to them at that time:
99 100 * sd_wrq
100 101 * sd_strtab
101 102 *
102 103 * The following field is protected by the overlying file system
103 104 * code, guaranteeing single-threading of opens:
104 105 * sd_vnode
105 106 *
106 107 * Stream-level locks should be acquired before any queue-level locks
107 108 * are acquired.
108 109 *
109 110 * The stream head write queue lock(sd_wrq) is used to protect the
110 111 * fields qn_maxpsz and qn_minpsz because freezestr() which is
111 112 * necessary for strqset() only gets the queue lock.
112 113 */
113 114
114 115 /*
115 116 * Function types for the parameterized stream head.
116 117 * The msgfunc_t takes the parameters:
117 118 * msgfunc(vnode_t *vp, mblk_t *mp, strwakeup_t *wakeups,
118 119 * strsigset_t *firstmsgsigs, strsigset_t *allmsgsigs,
119 120 * strpollset_t *pollwakeups);
120 121 * It returns an optional message to be processed by the stream head.
121 122 *
122 123 * The parameters for errfunc_t are:
123 124 * errfunc(vnode *vp, int ispeek, int *clearerr);
124 125 * It returns an errno and zero if there was no pending error.
125 126 */
126 127 typedef uint_t strwakeup_t;
127 128 typedef uint_t strsigset_t;
128 129 typedef short strpollset_t;
129 130 typedef uintptr_t callbparams_id_t;
130 131 typedef mblk_t *(*msgfunc_t)(vnode_t *, mblk_t *, strwakeup_t *,
131 132 strsigset_t *, strsigset_t *, strpollset_t *);
132 133 typedef int (*errfunc_t)(vnode_t *, int, int *);
133 134
134 135 /*
135 136 * Per stream sd_lock in putnext may be replaced by per cpu stream_putlocks
136 137 * each living in a separate cache line. putnext/canputnext grabs only one of
137 138 * stream_putlocks while strlock() (called on behalf of insertq()/removeq())
138 139 * acquires all stream_putlocks. Normally stream_putlocks are only employed
139 140 * for highly contended streams that have SQ_CIPUT queues in the critical path
140 141 * (e.g. NFS/UDP stream).
141 142 *
142 143 * stream_putlocks are dynamically assigned to stdata structure through
143 144 * sd_ciputctrl pointer possibly when a stream is already in use. Since
144 145 * strlock() uses stream_putlocks only under sd_lock acquiring sd_lock when
145 146 * assigning stream_putlocks to the stream ensures synchronization with
146 147 * strlock().
147 148 *
148 149 * For lock ordering purposes stream_putlocks are treated as the extension of
149 150 * sd_lock and are always grabbed right after grabbing sd_lock and released
150 151 * right before releasing sd_lock except putnext/canputnext where only one of
151 152 * stream_putlocks locks is used and where it is the first lock to grab.
152 153 */
153 154
154 155 typedef struct ciputctrl_str {
155 156 union _ciput_un {
156 157 uchar_t pad[64];
157 158 struct _ciput_str {
158 159 kmutex_t ciput_lck;
159 160 ushort_t ciput_cnt;
160 161 } ciput_str;
161 162 } ciput_un;
162 163 } ciputctrl_t;
163 164
164 165 #define ciputctrl_lock ciput_un.ciput_str.ciput_lck
165 166 #define ciputctrl_count ciput_un.ciput_str.ciput_cnt
166 167
167 168 /*
168 169 * Header for a stream: interface to rest of system.
169 170 *
170 171 * NOTE: While this is a consolidation-private structure, some unbundled and
171 172 * third-party products inappropriately make use of some of the fields.
172 173 * As such, please take care to not gratuitously change any offsets of
173 174 * existing members.
174 175 */
175 176 typedef struct stdata {
176 177 struct queue *sd_wrq; /* write queue */
177 178 struct msgb *sd_iocblk; /* return block for ioctl */
178 179 struct vnode *sd_vnode; /* pointer to associated vnode */
179 180 struct streamtab *sd_strtab; /* pointer to streamtab for stream */
180 181 uint_t sd_flag; /* state/flags */
181 182 uint_t sd_iocid; /* ioctl id */
182 183 struct pid *sd_sidp; /* controlling session info */
183 184 struct pid *sd_pgidp; /* controlling process group info */
184 185 ushort_t sd_tail; /* reserved space in written mblks */
185 186 ushort_t sd_wroff; /* write offset */
186 187 int sd_rerror; /* error to return on read ops */
187 188 int sd_werror; /* error to return on write ops */
188 189 int sd_pushcnt; /* number of pushes done on stream */
189 190 int sd_sigflags; /* logical OR of all siglist events */
190 191 struct strsig *sd_siglist; /* pid linked list to rcv SIGPOLL sig */
191 192 struct pollhead sd_pollist; /* list of all pollers to wake up */
192 193 struct msgb *sd_mark; /* "marked" message on read queue */
193 194 clock_t sd_closetime; /* time to wait to drain q in close */
194 195 kmutex_t sd_lock; /* protect head consistency */
195 196 kcondvar_t sd_monitor; /* open/close/push/pop monitor */
196 197 kcondvar_t sd_iocmonitor; /* ioctl single-threading */
197 198 kcondvar_t sd_refmonitor; /* sd_refcnt monitor */
198 199 ssize_t sd_qn_minpsz; /* These two fields are a performance */
199 200 ssize_t sd_qn_maxpsz; /* enhancements, cache the values in */
200 201 /* the stream head so we don't have */
201 202 /* to ask the module below the stream */
202 203 /* head to get this information. */
203 204 struct stdata *sd_mate; /* pointer to twisted stream mate */
204 205 kthread_id_t sd_freezer; /* thread that froze stream */
205 206 kmutex_t sd_reflock; /* Protects sd_refcnt */
206 207 int sd_refcnt; /* number of claimstr */
207 208 uint_t sd_wakeq; /* strwakeq()'s copy of sd_flag */
208 209 struct queue *sd_struiordq; /* sync barrier struio() read queue */
209 210 struct queue *sd_struiowrq; /* sync barrier struio() write queue */
210 211 char sd_struiodnak; /* defer NAK of M_IOCTL by rput() */
211 212 struct msgb *sd_struionak; /* pointer M_IOCTL mblk(s) to NAK */
212 213 caddr_t sd_t_audit_data; /* For audit purposes only */
213 214 ssize_t sd_maxblk; /* maximum message block size */
214 215 uint_t sd_rput_opt; /* options/flags for strrput */
215 216 uint_t sd_wput_opt; /* options/flags for write/putmsg */
216 217 uint_t sd_read_opt; /* options/flags for strread */
217 218 msgfunc_t sd_rprotofunc; /* rput M_*PROTO routine */
218 219 msgfunc_t sd_rputdatafunc; /* read M_DATA routine */
219 220 msgfunc_t sd_rmiscfunc; /* rput routine (non-data/proto) */
220 221 msgfunc_t sd_wputdatafunc; /* wput M_DATA routine */
221 222 errfunc_t sd_rderrfunc; /* read side error callback */
222 223 errfunc_t sd_wrerrfunc; /* write side error callback */
223 224 /*
224 225 * support for low contention concurrent putnext.
225 226 */
226 227 ciputctrl_t *sd_ciputctrl;
227 228 uint_t sd_nciputctrl;
228 229
229 230 int sd_anchor; /* position of anchor in stream */
230 231 /*
231 232 * Service scheduling at the stream head.
232 233 */
233 234 kmutex_t sd_qlock;
↓ open down ↓ |
177 lines elided |
↑ open up ↑ |
234 235 struct queue *sd_qhead; /* Head of queues to be serviced. */
235 236 struct queue *sd_qtail; /* Tail of queues to be serviced. */
236 237 void *sd_servid; /* Service ID for bckgrnd schedule */
237 238 ushort_t sd_svcflags; /* Servicing flags */
238 239 short sd_nqueues; /* Number of queues in the list */
239 240 kcondvar_t sd_qcv; /* Waiters for qhead to become empty */
240 241 kcondvar_t sd_zcopy_wait;
241 242 uint_t sd_copyflag; /* copy-related flags */
242 243 zoneid_t sd_anchorzone; /* Allow removal from same zone only */
243 244 struct msgb *sd_cmdblk; /* reply from _I_CMD */
245 + /*
246 + * pids associated with this stream head.
247 + */
248 + avl_tree_t sd_pid_tree;
249 + kmutex_t sd_pid_tree_lock;
244 250 } stdata_t;
245 251
246 252 /*
247 253 * stdata servicing flags.
248 254 */
249 255 #define STRS_WILLSERVICE 0x01
250 256 #define STRS_SCHEDULED 0x02
251 257
252 258 #define STREAM_NEEDSERVICE(stp) ((stp)->sd_qhead != NULL)
253 259
254 260 /*
255 261 * stdata flag field defines
256 262 */
257 263 #define IOCWAIT 0x00000001 /* Someone is doing an ioctl */
258 264 #define RSLEEP 0x00000002 /* Someone wants to read/recv msg */
259 265 #define WSLEEP 0x00000004 /* Someone wants to write */
260 266 #define STRPRI 0x00000008 /* An M_PCPROTO is at stream head */
261 267 #define STRHUP 0x00000010 /* Device has vanished */
262 268 #define STWOPEN 0x00000020 /* waiting for 1st open */
263 269 #define STPLEX 0x00000040 /* stream is being multiplexed */
264 270 #define STRISTTY 0x00000080 /* stream is a terminal */
265 271 #define STRGETINPROG 0x00000100 /* (k)strgetmsg is running */
266 272 #define IOCWAITNE 0x00000200 /* STR_NOERROR ioctl running */
267 273 #define STRDERR 0x00000400 /* fatal read error from M_ERROR */
268 274 #define STWRERR 0x00000800 /* fatal write error from M_ERROR */
269 275 #define STRDERRNONPERSIST 0x00001000 /* nonpersistent read errors */
270 276 #define STWRERRNONPERSIST 0x00002000 /* nonpersistent write errors */
271 277 #define STRCLOSE 0x00004000 /* wait for a close to complete */
272 278 #define SNDMREAD 0x00008000 /* used for read notification */
273 279 #define OLDNDELAY 0x00010000 /* use old TTY semantics for */
274 280 /* NDELAY reads and writes */
275 281 /* 0x00020000 unused */
276 282 /* 0x00040000 unused */
277 283 #define STRTOSTOP 0x00080000 /* block background writes */
278 284 #define STRCMDWAIT 0x00100000 /* someone is doing an _I_CMD */
279 285 /* 0x00200000 unused */
280 286 #define STRMOUNT 0x00400000 /* stream is mounted */
281 287 #define STRNOTATMARK 0x00800000 /* Not at mark (when empty read q) */
282 288 #define STRDELIM 0x01000000 /* generate delimited messages */
283 289 #define STRATMARK 0x02000000 /* At mark (due to MSGMARKNEXT) */
284 290 #define STZCNOTIFY 0x04000000 /* wait for zerocopy mblk to be acked */
285 291 #define STRPLUMB 0x08000000 /* push/pop pending */
286 292 #define STREOF 0x10000000 /* End-of-file indication */
287 293 #define STREOPENFAIL 0x20000000 /* indicates if re-open has failed */
288 294 #define STRMATE 0x40000000 /* this stream is a mate */
289 295 #define STRHASLINKS 0x80000000 /* I_LINKs under this stream */
290 296
291 297 /*
292 298 * Copy-related flags (sd_copyflag), set by SO_COPYOPT.
293 299 */
294 300 #define STZCVMSAFE 0x00000001 /* safe to borrow file (segmapped) */
295 301 /* pages instead of bcopy */
296 302 #define STZCVMUNSAFE 0x00000002 /* unsafe to borrow file pages */
297 303 #define STRCOPYCACHED 0x00000004 /* copy should NOT bypass cache */
298 304
299 305 /*
300 306 * Options and flags for strrput (sd_rput_opt)
301 307 */
302 308 #define SR_POLLIN 0x00000001 /* pollwakeup needed for band0 data */
303 309 #define SR_SIGALLDATA 0x00000002 /* Send SIGPOLL for all M_DATA */
304 310 #define SR_CONSOL_DATA 0x00000004 /* Consolidate M_DATA onto q_last */
305 311 #define SR_IGN_ZEROLEN 0x00000008 /* Ignore zero-length M_DATA */
306 312
307 313 /*
308 314 * Options and flags for strwrite/strputmsg (sd_wput_opt)
309 315 */
310 316 #define SW_SIGPIPE 0x00000001 /* Send SIGPIPE for write error */
311 317 #define SW_RECHECK_ERR 0x00000002 /* Recheck errors in strwrite loop */
312 318 #define SW_SNDZERO 0x00000004 /* send 0-length msg down pipe/FIFO */
313 319
314 320 /*
315 321 * Options and flags for strread (sd_read_opt)
316 322 */
317 323 #define RD_MSGDIS 0x00000001 /* read msg discard */
318 324 #define RD_MSGNODIS 0x00000002 /* read msg no discard */
319 325 #define RD_PROTDAT 0x00000004 /* read M_[PC]PROTO contents as data */
320 326 #define RD_PROTDIS 0x00000008 /* discard M_[PC]PROTO blocks and */
321 327 /* retain data blocks */
322 328 /*
323 329 * Flags parameter for strsetrputhooks() and strsetwputhooks().
324 330 * These flags define the interface for setting the above internal
325 331 * flags in sd_rput_opt and sd_wput_opt.
326 332 */
327 333 #define SH_CONSOL_DATA 0x00000001 /* Consolidate M_DATA onto q_last */
328 334 #define SH_SIGALLDATA 0x00000002 /* Send SIGPOLL for all M_DATA */
329 335 #define SH_IGN_ZEROLEN 0x00000004 /* Drop zero-length M_DATA */
330 336
331 337 #define SH_SIGPIPE 0x00000100 /* Send SIGPIPE for write error */
332 338 #define SH_RECHECK_ERR 0x00000200 /* Recheck errors in strwrite loop */
333 339
334 340 /*
335 341 * Each queue points to a sync queue (the inner perimeter) which keeps
336 342 * track of the number of threads that are inside a given queue (sq_count)
337 343 * and also is used to implement the asynchronous putnext
338 344 * (by queuing messages if the queue can not be entered.)
339 345 *
340 346 * Messages are queued on sq_head/sq_tail including deferred qwriter(INNER)
341 347 * messages. The sq_head/sq_tail list is a singly-linked list with
342 348 * b_queue recording the queue and b_prev recording the function to
343 349 * be called (either the put procedure or a qwriter callback function.)
344 350 *
345 351 * The sq_count counter tracks the number of threads that are
346 352 * executing inside the perimeter or (in the case of outer perimeters)
347 353 * have some work queued for them relating to the perimeter. The sq_rmqcount
348 354 * counter tracks the subset which are in removeq() (usually invoked from
349 355 * qprocsoff(9F)).
350 356 *
351 357 * In addition a module writer can declare that the module has an outer
352 358 * perimeter (by setting D_MTOUTPERIM) in which case all inner perimeter
353 359 * syncq's for the module point (through sq_outer) to an outer perimeter
354 360 * syncq. The outer perimeter consists of the doubly linked list (sq_onext and
355 361 * sq_oprev) linking all the inner perimeter syncq's with out outer perimeter
356 362 * syncq. This is used to implement qwriter(OUTER) (an asynchronous way of
357 363 * getting exclusive access at the outer perimeter) and outer_enter/exit
358 364 * which are used by the framework to acquire exclusive access to the outer
359 365 * perimeter during open and close of modules that have set D_MTOUTPERIM.
360 366 *
361 367 * In the inner perimeter case sq_save is available for use by machine
362 368 * dependent code. sq_head/sq_tail are used to queue deferred messages on
363 369 * the inner perimeter syncqs and to queue become_writer requests on the
364 370 * outer perimeter syncqs.
365 371 *
366 372 * Note: machine dependent optimized versions of putnext may depend
367 373 * on the order of sq_flags and sq_count (so that they can e.g.
368 374 * read these two fields in a single load instruction.)
369 375 *
370 376 * Per perimeter SQLOCK/sq_count in putnext/put may be replaced by per cpu
371 377 * sq_putlocks/sq_putcounts each living in a separate cache line. Obviously
372 378 * sq_putlock[x] protects sq_putcount[x]. putnext/put routine will grab only 1
373 379 * of sq_putlocks and update only 1 of sq_putcounts. strlock() and many
374 380 * other routines in strsubr.c and ddi.c will grab all sq_putlocks (as well as
375 381 * SQLOCK) and figure out the count value as the sum of sq_count and all of
376 382 * sq_putcounts. The idea is to make critical fast path -- putnext -- much
377 383 * faster at the expense of much less often used slower path like
378 384 * strlock(). One known case where entersq/strlock is executed pretty often is
379 385 * SpecWeb but since IP is SQ_CIOC and socket TCP/IP stream is nextless
380 386 * there's no need to grab multiple sq_putlocks and look at sq_putcounts. See
381 387 * strsubr.c for more comments.
382 388 *
383 389 * Note regular SQLOCK and sq_count are still used in many routines
384 390 * (e.g. entersq(), rwnext()) in the same way as before sq_putlocks were
385 391 * introduced.
386 392 *
387 393 * To understand when all sq_putlocks need to be held and all sq_putcounts
388 394 * need to be added up one needs to look closely at putnext code. Basically if
389 395 * a routine like e.g. wait_syncq() needs to be sure that perimeter is empty
390 396 * all sq_putlocks/sq_putcounts need to be held/added up. On the other hand
391 397 * there's no need to hold all sq_putlocks and count all sq_putcounts in
392 398 * routines like leavesq()/dropsq() and etc. since the are usually exit
393 399 * counterparts of entersq/outer_enter() and etc. which have already either
394 400 * prevented put entry poins from executing or did not care about put
395 401 * entrypoints. entersq() doesn't need to care about sq_putlocks/sq_putcounts
396 402 * if the entry point has a shared access since put has the highest degree of
397 403 * concurrency and such entersq() does not intend to block out put
398 404 * entrypoints.
399 405 *
400 406 * Before sq_putcounts were introduced the standard way to wait for perimeter
401 407 * to become empty was:
402 408 *
403 409 * mutex_enter(SQLOCK(sq));
404 410 * while (sq->sq_count > 0) {
405 411 * sq->sq_flags |= SQ_WANTWAKEUP;
406 412 * cv_wait(&sq->sq_wait, SQLOCK(sq));
407 413 * }
408 414 * mutex_exit(SQLOCK(sq));
409 415 *
410 416 * The new way is:
411 417 *
412 418 * mutex_enter(SQLOCK(sq));
413 419 * count = sq->sq_count;
414 420 * SQ_PUTLOCKS_ENTER(sq);
415 421 * SUM_SQ_PUTCOUNTS(sq, count);
416 422 * while (count != 0) {
417 423 * sq->sq_flags |= SQ_WANTWAKEUP;
418 424 * SQ_PUTLOCKS_EXIT(sq);
419 425 * cv_wait(&sq->sq_wait, SQLOCK(sq));
420 426 * count = sq->sq_count;
421 427 * SQ_PUTLOCKS_ENTER(sq);
422 428 * SUM_SQ_PUTCOUNTS(sq, count);
423 429 * }
424 430 * SQ_PUTLOCKS_EXIT(sq);
425 431 * mutex_exit(SQLOCK(sq));
426 432 *
427 433 * Note that SQ_WANTWAKEUP is set before dropping SQ_PUTLOCKS. This makes sure
428 434 * putnext won't skip a wakeup.
429 435 *
430 436 * sq_putlocks are treated as the extension of SQLOCK for lock ordering
431 437 * purposes and are always grabbed right after grabbing SQLOCK and released
432 438 * right before releasing SQLOCK. This also allows dynamic creation of
433 439 * sq_putlocks while holding SQLOCK (by making sq_ciputctrl non null even when
434 440 * the stream is already in use). Only in putnext one of sq_putlocks
435 441 * is grabbed instead of SQLOCK. putnext return path remembers what counter it
436 442 * incremented and decrements the right counter on its way out.
437 443 */
438 444
439 445 struct syncq {
440 446 kmutex_t sq_lock; /* atomic access to syncq */
441 447 uint16_t sq_count; /* # threads inside */
442 448 uint16_t sq_flags; /* state and some type info */
443 449 /*
444 450 * Distributed syncq scheduling
445 451 * The list of queue's is handled by sq_head and
446 452 * sq_tail fields.
447 453 *
448 454 * The list of events is handled by the sq_evhead and sq_evtail
449 455 * fields.
450 456 */
451 457 queue_t *sq_head; /* queue of deferred messages */
452 458 queue_t *sq_tail; /* queue of deferred messages */
453 459 mblk_t *sq_evhead; /* Event message on the syncq */
454 460 mblk_t *sq_evtail;
455 461 uint_t sq_nqueues; /* # of queues on this sq */
456 462 /*
457 463 * Concurrency and condition variables
458 464 */
459 465 uint16_t sq_type; /* type (concurrency) of syncq */
460 466 uint16_t sq_rmqcount; /* # threads inside removeq() */
461 467 kcondvar_t sq_wait; /* block on this sync queue */
462 468 kcondvar_t sq_exitwait; /* waiting for thread to leave the */
463 469 /* inner perimeter */
464 470 /*
465 471 * Handling synchronous callbacks such as qtimeout and qbufcall
466 472 */
467 473 ushort_t sq_callbflags; /* flags for callback synchronization */
468 474 callbparams_id_t sq_cancelid; /* id of callback being cancelled */
469 475 struct callbparams *sq_callbpend; /* Pending callbacks */
470 476
471 477 /*
472 478 * Links forming an outer perimeter from one outer syncq and
473 479 * a set of inner sync queues.
474 480 */
475 481 struct syncq *sq_outer; /* Pointer to outer perimeter */
476 482 struct syncq *sq_onext; /* Linked list of syncq's making */
477 483 struct syncq *sq_oprev; /* up the outer perimeter. */
478 484 /*
479 485 * support for low contention concurrent putnext.
480 486 */
481 487 ciputctrl_t *sq_ciputctrl;
482 488 uint_t sq_nciputctrl;
483 489 /*
484 490 * Counter for the number of threads wanting to become exclusive.
485 491 */
486 492 uint_t sq_needexcl;
487 493 /*
488 494 * These two fields are used for scheduling a syncq for
489 495 * background processing. The sq_svcflag is protected by
490 496 * SQLOCK lock.
491 497 */
492 498 struct syncq *sq_next; /* for syncq scheduling */
493 499 void * sq_servid;
494 500 uint_t sq_servcount; /* # pending background threads */
495 501 uint_t sq_svcflags; /* Scheduling flags */
496 502 clock_t sq_tstamp; /* Time when was enabled */
497 503 /*
498 504 * Maximum priority of the queues on this syncq.
499 505 */
500 506 pri_t sq_pri;
501 507 };
502 508 typedef struct syncq syncq_t;
503 509
504 510 /*
505 511 * sync queue scheduling flags (for sq_svcflags).
506 512 */
507 513 #define SQ_SERVICE 0x1 /* being serviced */
508 514 #define SQ_BGTHREAD 0x2 /* awaiting service by bg thread */
509 515 #define SQ_DISABLED 0x4 /* don't put syncq in service list */
510 516
511 517 /*
512 518 * FASTPUT bit in sd_count/putcount.
513 519 */
514 520 #define SQ_FASTPUT 0x8000
515 521 #define SQ_FASTMASK 0x7FFF
516 522
517 523 /*
518 524 * sync queue state flags
519 525 */
520 526 #define SQ_EXCL 0x0001 /* exclusive access to inner */
521 527 /* perimeter */
522 528 #define SQ_BLOCKED 0x0002 /* qprocsoff */
523 529 #define SQ_FROZEN 0x0004 /* freezestr */
524 530 #define SQ_WRITER 0x0008 /* qwriter(OUTER) pending or running */
525 531 #define SQ_MESSAGES 0x0010 /* messages on syncq */
526 532 #define SQ_WANTWAKEUP 0x0020 /* do cv_broadcast on sq_wait */
527 533 #define SQ_WANTEXWAKEUP 0x0040 /* do cv_broadcast on sq_exitwait */
528 534 #define SQ_EVENTS 0x0080 /* Events pending */
529 535 #define SQ_QUEUED (SQ_MESSAGES | SQ_EVENTS)
530 536 #define SQ_FLAGMASK 0x00FF
531 537
532 538 /*
533 539 * Test a queue to see if inner perimeter is exclusive.
534 540 */
535 541 #define PERIM_EXCL(q) ((q)->q_syncq->sq_flags & SQ_EXCL)
536 542
537 543 /*
538 544 * If any of these flags are set it is not possible for a thread to
539 545 * enter a put or service procedure. Instead it must either block
540 546 * or put the message on the syncq.
541 547 */
542 548 #define SQ_GOAWAY (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN|SQ_WRITER|\
543 549 SQ_QUEUED)
544 550 /*
545 551 * If any of these flags are set it not possible to drain the syncq
546 552 */
547 553 #define SQ_STAYAWAY (SQ_BLOCKED|SQ_FROZEN|SQ_WRITER)
548 554
549 555 /*
550 556 * Flags to trigger syncq tail processing.
551 557 */
552 558 #define SQ_TAIL (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)
553 559
554 560 /*
555 561 * Syncq types (stored in sq_type)
556 562 * The SQ_TYPES_IN_FLAGS (ciput) are also stored in sq_flags
557 563 * for performance reasons. Thus these type values have to be in the low
558 564 * 16 bits and not conflict with the sq_flags values above.
559 565 *
560 566 * Notes:
561 567 * - putnext() and put() assume that the put procedures have the highest
562 568 * degree of concurrency. Thus if any of the SQ_CI* are set then SQ_CIPUT
563 569 * has to be set. This restriction can be lifted by adding code to putnext
564 570 * and put that check that sq_count == 0 like entersq does.
565 571 * - putnext() and put() does currently not handle !SQ_COPUT
566 572 * - In order to implement !SQ_COCB outer_enter has to be fixed so that
567 573 * the callback can be cancelled while cv_waiting in outer_enter.
568 574 * - If SQ_CISVC needs to be implemented, qprocsoff() needs to wait
569 575 * for the currently running services to stop (wait for QINSERVICE
570 576 * to go off). disable_svc called from qprcosoff disables only
571 577 * services that will be run in future.
572 578 *
573 579 * All the SQ_CO flags are set when there is no outer perimeter.
574 580 */
575 581 #define SQ_CIPUT 0x0100 /* Concurrent inner put proc */
576 582 #define SQ_CISVC 0x0200 /* Concurrent inner svc proc */
577 583 #define SQ_CIOC 0x0400 /* Concurrent inner open/close */
578 584 #define SQ_CICB 0x0800 /* Concurrent inner callback */
579 585 #define SQ_COPUT 0x1000 /* Concurrent outer put proc */
580 586 #define SQ_COSVC 0x2000 /* Concurrent outer svc proc */
581 587 #define SQ_COOC 0x4000 /* Concurrent outer open/close */
582 588 #define SQ_COCB 0x8000 /* Concurrent outer callback */
583 589
584 590 /* Types also kept in sq_flags for performance */
585 591 #define SQ_TYPES_IN_FLAGS (SQ_CIPUT)
586 592
587 593 #define SQ_CI (SQ_CIPUT|SQ_CISVC|SQ_CIOC|SQ_CICB)
588 594 #define SQ_CO (SQ_COPUT|SQ_COSVC|SQ_COOC|SQ_COCB)
589 595 #define SQ_TYPEMASK (SQ_CI|SQ_CO)
590 596
591 597 /*
592 598 * Flag combinations passed to entersq and leavesq to specify the type
593 599 * of entry point.
594 600 */
595 601 #define SQ_PUT (SQ_CIPUT|SQ_COPUT)
596 602 #define SQ_SVC (SQ_CISVC|SQ_COSVC)
597 603 #define SQ_OPENCLOSE (SQ_CIOC|SQ_COOC)
598 604 #define SQ_CALLBACK (SQ_CICB|SQ_COCB)
599 605
600 606 /*
601 607 * Other syncq types which are not copied into flags.
602 608 */
603 609 #define SQ_PERMOD 0x01 /* Syncq is PERMOD */
604 610
605 611 /*
606 612 * Asynchronous callback qun*** flag.
607 613 * The mechanism these flags are used in is one where callbacks enter
608 614 * the perimeter thanks to framework support. To use this mechanism
609 615 * the q* and qun* flavors of the callback routines must be used.
610 616 * e.g. qtimeout and quntimeout. The synchronization provided by the flags
611 617 * avoids deadlocks between blocking qun* routines and the perimeter
612 618 * lock.
613 619 */
614 620 #define SQ_CALLB_BYPASSED 0x01 /* bypassed callback fn */
615 621
616 622 /*
617 623 * Cancel callback mask.
618 624 * The mask expands as the number of cancelable callback types grows
619 625 * Note - separate callback flag because different callbacks have
620 626 * overlapping id space.
621 627 */
622 628 #define SQ_CALLB_CANCEL_MASK (SQ_CANCEL_TOUT|SQ_CANCEL_BUFCALL)
623 629
624 630 #define SQ_CANCEL_TOUT 0x02 /* cancel timeout request */
625 631 #define SQ_CANCEL_BUFCALL 0x04 /* cancel bufcall request */
626 632
627 633 typedef struct callbparams {
628 634 syncq_t *cbp_sq;
629 635 void (*cbp_func)(void *);
630 636 void *cbp_arg;
631 637 callbparams_id_t cbp_id;
632 638 uint_t cbp_flags;
633 639 struct callbparams *cbp_next;
634 640 size_t cbp_size;
635 641 } callbparams_t;
636 642
637 643 typedef struct strbufcall {
638 644 void (*bc_func)(void *);
639 645 void *bc_arg;
640 646 size_t bc_size;
641 647 bufcall_id_t bc_id;
642 648 struct strbufcall *bc_next;
643 649 kthread_id_t bc_executor;
644 650 } strbufcall_t;
645 651
646 652 /*
647 653 * Structure of list of processes to be sent SIGPOLL/SIGURG signal
648 654 * on request. The valid S_* events are defined in stropts.h.
649 655 */
650 656 typedef struct strsig {
651 657 struct pid *ss_pidp; /* pid/pgrp pointer */
652 658 pid_t ss_pid; /* positive pid, negative pgrp */
653 659 int ss_events; /* S_* events */
654 660 struct strsig *ss_next;
655 661 } strsig_t;
656 662
657 663 /*
658 664 * bufcall list
659 665 */
660 666 struct bclist {
661 667 strbufcall_t *bc_head;
662 668 strbufcall_t *bc_tail;
663 669 };
664 670
665 671 /*
666 672 * Structure used to track mux links and unlinks.
667 673 */
668 674 struct mux_node {
669 675 major_t mn_imaj; /* internal major device number */
670 676 uint16_t mn_indegree; /* number of incoming edges */
671 677 struct mux_node *mn_originp; /* where we came from during search */
672 678 struct mux_edge *mn_startp; /* where search left off in mn_outp */
673 679 struct mux_edge *mn_outp; /* list of outgoing edges */
674 680 uint_t mn_flags; /* see below */
675 681 };
676 682
677 683 /*
678 684 * Flags for mux_nodes.
679 685 */
680 686 #define VISITED 1
681 687
682 688 /*
683 689 * Edge structure - a list of these is hung off the
684 690 * mux_node to represent the outgoing edges.
685 691 */
686 692 struct mux_edge {
687 693 struct mux_node *me_nodep; /* edge leads to this node */
688 694 struct mux_edge *me_nextp; /* next edge */
689 695 int me_muxid; /* id of link */
690 696 dev_t me_dev; /* dev_t - used for kernel PUNLINK */
691 697 };
692 698
693 699 /*
694 700 * Queue info
695 701 *
696 702 * The syncq is included here to reduce memory fragmentation
697 703 * for kernel memory allocators that only allocate in sizes that are
698 704 * powers of two. If the kernel memory allocator changes this should
699 705 * be revisited.
700 706 */
701 707 typedef struct queinfo {
702 708 struct queue qu_rqueue; /* read queue - must be first */
703 709 struct queue qu_wqueue; /* write queue - must be second */
704 710 struct syncq qu_syncq; /* syncq - must be third */
705 711 } queinfo_t;
706 712
707 713 /*
708 714 * Multiplexed streams info
709 715 */
710 716 typedef struct linkinfo {
711 717 struct linkblk li_lblk; /* must be first */
712 718 struct file *li_fpdown; /* file pointer for lower stream */
713 719 struct linkinfo *li_next; /* next in list */
714 720 struct linkinfo *li_prev; /* previous in list */
715 721 } linkinfo_t;
716 722
717 723 /*
718 724 * List of syncq's used by freeezestr/unfreezestr
719 725 */
720 726 typedef struct syncql {
721 727 struct syncql *sql_next;
722 728 syncq_t *sql_sq;
723 729 } syncql_t;
724 730
725 731 typedef struct sqlist {
726 732 syncql_t *sqlist_head;
727 733 size_t sqlist_size; /* structure size in bytes */
728 734 size_t sqlist_index; /* next free entry in array */
729 735 syncql_t sqlist_array[4]; /* 4 or more entries */
730 736 } sqlist_t;
731 737
732 738 typedef struct perdm {
733 739 struct perdm *dm_next;
734 740 syncq_t *dm_sq;
735 741 struct streamtab *dm_str;
736 742 uint_t dm_ref;
737 743 } perdm_t;
738 744
739 745 #define NEED_DM(dmp, qflag) \
740 746 (dmp == NULL && (qflag & (QPERMOD | QMTOUTPERIM)))
741 747
742 748 /*
743 749 * fmodsw_impl_t is used within the kernel. fmodsw is used by
744 750 * the modules/drivers. The information is copied from fmodsw
745 751 * defined in the module/driver into the fmodsw_impl_t structure
746 752 * during the module/driver initialization.
747 753 */
748 754 typedef struct fmodsw_impl fmodsw_impl_t;
749 755
750 756 struct fmodsw_impl {
751 757 fmodsw_impl_t *f_next;
752 758 char f_name[FMNAMESZ + 1];
753 759 struct streamtab *f_str;
754 760 uint32_t f_qflag;
755 761 uint32_t f_sqtype;
756 762 perdm_t *f_dmp;
757 763 uint32_t f_ref;
758 764 uint32_t f_hits;
759 765 };
760 766
761 767 typedef enum {
762 768 FMODSW_HOLD = 0x00000001,
763 769 FMODSW_LOAD = 0x00000002
764 770 } fmodsw_flags_t;
765 771
766 772 typedef struct cdevsw_impl {
767 773 struct streamtab *d_str;
768 774 uint32_t d_qflag;
769 775 uint32_t d_sqtype;
770 776 perdm_t *d_dmp;
771 777 } cdevsw_impl_t;
772 778
773 779 /*
774 780 * Enumeration of the types of access that can be requested for a
775 781 * controlling terminal under job control.
776 782 */
777 783 enum jcaccess {
778 784 JCREAD, /* read data on a ctty */
779 785 JCWRITE, /* write data to a ctty */
780 786 JCSETP, /* set ctty parameters */
781 787 JCGETP /* get ctty parameters */
782 788 };
783 789
784 790 struct str_stack {
785 791 netstack_t *ss_netstack; /* Common netstack */
786 792
787 793 kmutex_t ss_sad_lock; /* autopush lock */
788 794 mod_hash_t *ss_sad_hash;
789 795 size_t ss_sad_hash_nchains;
790 796 struct saddev *ss_saddev; /* sad device array */
791 797 int ss_sadcnt; /* number of sad devices */
792 798
793 799 int ss_devcnt; /* number of mux_nodes */
794 800 struct mux_node *ss_mux_nodes; /* mux info for cycle checking */
795 801 };
796 802 typedef struct str_stack str_stack_t;
797 803
798 804 /*
799 805 * Finding related queues
800 806 */
801 807 #define STREAM(q) ((q)->q_stream)
802 808 #define SQ(rq) ((syncq_t *)((rq) + 2))
803 809
804 810 /*
805 811 * Get the module/driver name for a queue. Since some queues don't have
806 812 * q_info structures (e.g., see log_makeq()), fall back to "?".
807 813 */
808 814 #define Q2NAME(q) \
809 815 (((q)->q_qinfo != NULL && (q)->q_qinfo->qi_minfo->mi_idname != NULL) ? \
810 816 (q)->q_qinfo->qi_minfo->mi_idname : "?")
811 817
812 818 /*
813 819 * Locking macros
814 820 */
815 821 #define QLOCK(q) (&(q)->q_lock)
816 822 #define SQLOCK(sq) (&(sq)->sq_lock)
817 823
818 824 #define STREAM_PUTLOCKS_ENTER(stp) { \
819 825 ASSERT(MUTEX_HELD(&(stp)->sd_lock)); \
820 826 if ((stp)->sd_ciputctrl != NULL) { \
821 827 int i; \
822 828 int nlocks = (stp)->sd_nciputctrl; \
823 829 ciputctrl_t *cip = (stp)->sd_ciputctrl; \
824 830 for (i = 0; i <= nlocks; i++) { \
825 831 mutex_enter(&cip[i].ciputctrl_lock); \
826 832 } \
827 833 } \
828 834 }
829 835
830 836 #define STREAM_PUTLOCKS_EXIT(stp) { \
831 837 ASSERT(MUTEX_HELD(&(stp)->sd_lock)); \
832 838 if ((stp)->sd_ciputctrl != NULL) { \
833 839 int i; \
834 840 int nlocks = (stp)->sd_nciputctrl; \
835 841 ciputctrl_t *cip = (stp)->sd_ciputctrl; \
836 842 for (i = 0; i <= nlocks; i++) { \
837 843 mutex_exit(&cip[i].ciputctrl_lock); \
838 844 } \
839 845 } \
840 846 }
841 847
842 848 #define SQ_PUTLOCKS_ENTER(sq) { \
843 849 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
844 850 if ((sq)->sq_ciputctrl != NULL) { \
845 851 int i; \
846 852 int nlocks = (sq)->sq_nciputctrl; \
847 853 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
848 854 ASSERT((sq)->sq_type & SQ_CIPUT); \
849 855 for (i = 0; i <= nlocks; i++) { \
850 856 mutex_enter(&cip[i].ciputctrl_lock); \
851 857 } \
852 858 } \
853 859 }
854 860
855 861 #define SQ_PUTLOCKS_EXIT(sq) { \
856 862 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
857 863 if ((sq)->sq_ciputctrl != NULL) { \
858 864 int i; \
859 865 int nlocks = (sq)->sq_nciputctrl; \
860 866 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
861 867 ASSERT((sq)->sq_type & SQ_CIPUT); \
862 868 for (i = 0; i <= nlocks; i++) { \
863 869 mutex_exit(&cip[i].ciputctrl_lock); \
864 870 } \
865 871 } \
866 872 }
867 873
868 874 #define SQ_PUTCOUNT_SETFAST(sq) { \
869 875 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
870 876 if ((sq)->sq_ciputctrl != NULL) { \
871 877 int i; \
872 878 int nlocks = (sq)->sq_nciputctrl; \
873 879 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
874 880 ASSERT((sq)->sq_type & SQ_CIPUT); \
875 881 for (i = 0; i <= nlocks; i++) { \
876 882 mutex_enter(&cip[i].ciputctrl_lock); \
877 883 cip[i].ciputctrl_count |= SQ_FASTPUT; \
878 884 mutex_exit(&cip[i].ciputctrl_lock); \
879 885 } \
880 886 } \
881 887 }
882 888
883 889 #define SQ_PUTCOUNT_CLRFAST(sq) { \
884 890 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
885 891 if ((sq)->sq_ciputctrl != NULL) { \
886 892 int i; \
887 893 int nlocks = (sq)->sq_nciputctrl; \
888 894 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
889 895 ASSERT((sq)->sq_type & SQ_CIPUT); \
890 896 for (i = 0; i <= nlocks; i++) { \
891 897 mutex_enter(&cip[i].ciputctrl_lock); \
892 898 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \
893 899 mutex_exit(&cip[i].ciputctrl_lock); \
894 900 } \
895 901 } \
896 902 }
897 903
898 904
899 905 #ifdef DEBUG
900 906
901 907 #define SQ_PUTLOCKS_HELD(sq) { \
902 908 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
903 909 if ((sq)->sq_ciputctrl != NULL) { \
904 910 int i; \
905 911 int nlocks = (sq)->sq_nciputctrl; \
906 912 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
907 913 ASSERT((sq)->sq_type & SQ_CIPUT); \
908 914 for (i = 0; i <= nlocks; i++) { \
909 915 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
910 916 } \
911 917 } \
912 918 }
913 919
914 920 #define SUMCHECK_SQ_PUTCOUNTS(sq, countcheck) { \
915 921 if ((sq)->sq_ciputctrl != NULL) { \
916 922 int i; \
917 923 uint_t count = 0; \
918 924 int ncounts = (sq)->sq_nciputctrl; \
919 925 ASSERT((sq)->sq_type & SQ_CIPUT); \
920 926 for (i = 0; i <= ncounts; i++) { \
921 927 count += \
922 928 (((sq)->sq_ciputctrl[i].ciputctrl_count) & \
923 929 SQ_FASTMASK); \
924 930 } \
925 931 ASSERT(count == (countcheck)); \
926 932 } \
927 933 }
928 934
929 935 #define SUMCHECK_CIPUTCTRL_COUNTS(ciput, nciput, countcheck) { \
930 936 int i; \
931 937 uint_t count = 0; \
932 938 ASSERT((ciput) != NULL); \
933 939 for (i = 0; i <= (nciput); i++) { \
934 940 count += (((ciput)[i].ciputctrl_count) & \
935 941 SQ_FASTMASK); \
936 942 } \
937 943 ASSERT(count == (countcheck)); \
938 944 }
939 945
940 946 #else /* DEBUG */
941 947
942 948 #define SQ_PUTLOCKS_HELD(sq)
943 949 #define SUMCHECK_SQ_PUTCOUNTS(sq, countcheck)
944 950 #define SUMCHECK_CIPUTCTRL_COUNTS(sq, nciput, countcheck)
945 951
946 952 #endif /* DEBUG */
947 953
948 954 #define SUM_SQ_PUTCOUNTS(sq, count) { \
949 955 if ((sq)->sq_ciputctrl != NULL) { \
950 956 int i; \
951 957 int ncounts = (sq)->sq_nciputctrl; \
952 958 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
953 959 ASSERT((sq)->sq_type & SQ_CIPUT); \
954 960 for (i = 0; i <= ncounts; i++) { \
955 961 (count) += ((cip[i].ciputctrl_count) & \
956 962 SQ_FASTMASK); \
957 963 } \
958 964 } \
959 965 }
960 966
961 967 #define CLAIM_QNEXT_LOCK(stp) mutex_enter(&(stp)->sd_lock)
962 968 #define RELEASE_QNEXT_LOCK(stp) mutex_exit(&(stp)->sd_lock)
963 969
964 970 /*
965 971 * syncq message manipulation macros.
966 972 */
967 973 /*
968 974 * Put a message on the queue syncq.
969 975 * Assumes QLOCK held.
970 976 */
971 977 #define SQPUT_MP(qp, mp) \
972 978 { \
973 979 qp->q_syncqmsgs++; \
974 980 if (qp->q_sqhead == NULL) { \
975 981 qp->q_sqhead = qp->q_sqtail = mp; \
976 982 } else { \
977 983 qp->q_sqtail->b_next = mp; \
978 984 qp->q_sqtail = mp; \
979 985 } \
980 986 set_qfull(qp); \
981 987 }
982 988
983 989 /*
984 990 * Miscellaneous parameters and flags.
985 991 */
986 992
987 993 /*
988 994 * Default timeout in milliseconds for ioctls and close
989 995 */
990 996 #define STRTIMOUT 15000
991 997
992 998 /*
993 999 * Flag values for stream io
994 1000 */
995 1001 #define WRITEWAIT 0x1 /* waiting for write event */
996 1002 #define READWAIT 0x2 /* waiting for read event */
997 1003 #define NOINTR 0x4 /* error is not to be set for signal */
998 1004 #define GETWAIT 0x8 /* waiting for getmsg event */
999 1005
1000 1006 /*
1001 1007 * These flags need to be unique for stream io name space
1002 1008 * and copy modes name space. These flags allow strwaitq
1003 1009 * and strdoioctl to proceed as if signals or errors on the stream
1004 1010 * head have not occurred; i.e. they will be detected by some other
1005 1011 * means.
1006 1012 * STR_NOSIG does not allow signals to interrupt the call
1007 1013 * STR_NOERROR does not allow stream head read, write or hup errors to
1008 1014 * affect the call. When used with strdoioctl(), if a previous ioctl
1009 1015 * is pending and times out, STR_NOERROR will cause strdoioctl() to not
1010 1016 * return ETIME. If, however, the requested ioctl times out, ETIME
1011 1017 * will be returned (use ic_timout instead)
1012 1018 * STR_PEEK is used to inform strwaitq that the reader is peeking at data
1013 1019 * and that a non-persistent error should not be cleared.
1014 1020 * STR_DELAYERR is used to inform strwaitq that it should not check errors
1015 1021 * after being awoken since, in addition to an error, there might also be
1016 1022 * data queued on the stream head read queue.
1017 1023 */
1018 1024 #define STR_NOSIG 0x10 /* Ignore signals during strdoioctl/strwaitq */
1019 1025 #define STR_NOERROR 0x20 /* Ignore errors during strdoioctl/strwaitq */
1020 1026 #define STR_PEEK 0x40 /* Peeking behavior on non-persistent errors */
1021 1027 #define STR_DELAYERR 0x80 /* Do not check errors on return */
1022 1028
1023 1029 /*
1024 1030 * Copy modes for tty and I_STR ioctls
1025 1031 */
1026 1032 #define U_TO_K 01 /* User to Kernel */
1027 1033 #define K_TO_K 02 /* Kernel to Kernel */
1028 1034
1029 1035 /*
1030 1036 * Mux defines.
1031 1037 */
1032 1038 #define LINKNORMAL 0x01 /* normal mux link */
1033 1039 #define LINKPERSIST 0x02 /* persistent mux link */
1034 1040 #define LINKTYPEMASK 0x03 /* bitmask of all link types */
1035 1041 #define LINKCLOSE 0x04 /* unlink from strclose */
1036 1042
1037 1043 /*
1038 1044 * Definitions of Streams macros and function interfaces.
1039 1045 */
1040 1046
1041 1047 /*
1042 1048 * Obsolete queue scheduling macros. They are not used anymore, but still kept
1043 1049 * here for 3-d party modules and drivers who might still use them.
1044 1050 */
1045 1051 #define setqsched()
1046 1052 #define qready() 1
1047 1053
1048 1054 #ifdef _KERNEL
1049 1055 #define runqueues()
1050 1056 #define queuerun()
1051 1057 #endif
1052 1058
1053 1059 /* compatibility module for style 2 drivers with DR race condition */
1054 1060 #define DRMODNAME "drcompat"
1055 1061
1056 1062 /*
1057 1063 * Macros dealing with mux_nodes.
1058 1064 */
1059 1065 #define MUX_VISIT(X) ((X)->mn_flags |= VISITED)
1060 1066 #define MUX_CLEAR(X) ((X)->mn_flags &= (~VISITED)); \
1061 1067 ((X)->mn_originp = NULL)
1062 1068 #define MUX_DIDVISIT(X) ((X)->mn_flags & VISITED)
1063 1069
1064 1070
1065 1071 /*
1066 1072 * Twisted stream macros
1067 1073 */
1068 1074 #define STRMATED(X) ((X)->sd_flag & STRMATE)
1069 1075 #define STRLOCKMATES(X) if (&((X)->sd_lock) > &(((X)->sd_mate)->sd_lock)) { \
1070 1076 mutex_enter(&((X)->sd_lock)); \
1071 1077 mutex_enter(&(((X)->sd_mate)->sd_lock)); \
1072 1078 } else { \
1073 1079 mutex_enter(&(((X)->sd_mate)->sd_lock)); \
1074 1080 mutex_enter(&((X)->sd_lock)); \
1075 1081 }
1076 1082 #define STRUNLOCKMATES(X) mutex_exit(&((X)->sd_lock)); \
1077 1083 mutex_exit(&(((X)->sd_mate)->sd_lock))
1078 1084
1079 1085 #ifdef _KERNEL
1080 1086
1081 1087 extern void strinit(void);
1082 1088 extern int strdoioctl(struct stdata *, struct strioctl *, int, int,
1083 1089 cred_t *, int *);
1084 1090 extern void strsendsig(struct strsig *, int, uchar_t, int);
1085 1091 extern void str_sendsig(vnode_t *, int, uchar_t, int);
1086 1092 extern void strhup(struct stdata *);
1087 1093 extern int qattach(queue_t *, dev_t *, int, cred_t *, fmodsw_impl_t *,
1088 1094 boolean_t);
1089 1095 extern int qreopen(queue_t *, dev_t *, int, cred_t *);
1090 1096 extern void qdetach(queue_t *, int, int, cred_t *, boolean_t);
1091 1097 extern void enterq(queue_t *);
1092 1098 extern void leaveq(queue_t *);
1093 1099 extern int putiocd(mblk_t *, caddr_t, int, cred_t *);
1094 1100 extern int getiocd(mblk_t *, caddr_t, int);
1095 1101 extern struct linkinfo *alloclink(queue_t *, queue_t *, struct file *);
1096 1102 extern void lbfree(struct linkinfo *);
1097 1103 extern int linkcycle(stdata_t *, stdata_t *, str_stack_t *);
1098 1104 extern struct linkinfo *findlinks(stdata_t *, int, int, str_stack_t *);
1099 1105 extern queue_t *getendq(queue_t *);
1100 1106 extern int mlink(vnode_t *, int, int, cred_t *, int *, int);
1101 1107 extern int mlink_file(vnode_t *, int, struct file *, cred_t *, int *, int);
1102 1108 extern int munlink(struct stdata *, struct linkinfo *, int, cred_t *, int *,
1103 1109 str_stack_t *);
1104 1110 extern int munlinkall(struct stdata *, int, cred_t *, int *, str_stack_t *);
1105 1111 extern void mux_addedge(stdata_t *, stdata_t *, int, str_stack_t *);
1106 1112 extern void mux_rmvedge(stdata_t *, int, str_stack_t *);
1107 1113 extern int devflg_to_qflag(struct streamtab *, uint32_t, uint32_t *,
1108 1114 uint32_t *);
1109 1115 extern void setq(queue_t *, struct qinit *, struct qinit *, perdm_t *,
1110 1116 uint32_t, uint32_t, boolean_t);
1111 1117 extern perdm_t *hold_dm(struct streamtab *, uint32_t, uint32_t);
1112 1118 extern void rele_dm(perdm_t *);
1113 1119 extern int strmakectl(struct strbuf *, int32_t, int32_t, mblk_t **);
1114 1120 extern int strmakedata(ssize_t *, struct uio *, stdata_t *, int32_t, mblk_t **);
1115 1121 extern int strmakemsg(struct strbuf *, ssize_t *, struct uio *,
1116 1122 struct stdata *, int32_t, mblk_t **);
1117 1123 extern int strgetmsg(vnode_t *, struct strbuf *, struct strbuf *, uchar_t *,
1118 1124 int *, int, rval_t *);
1119 1125 extern int strputmsg(vnode_t *, struct strbuf *, struct strbuf *, uchar_t,
1120 1126 int flag, int fmode);
1121 1127 extern int strstartplumb(struct stdata *, int, int);
1122 1128 extern void strendplumb(struct stdata *);
1123 1129 extern int stropen(struct vnode *, dev_t *, int, cred_t *);
1124 1130 extern int strclose(struct vnode *, int, cred_t *);
1125 1131 extern int strpoll(register struct stdata *, short, int, short *,
1126 1132 struct pollhead **);
1127 1133 extern void strclean(struct vnode *);
1128 1134 extern void str_cn_clean(); /* XXX hook for consoles signal cleanup */
1129 1135 extern int strwrite(struct vnode *, struct uio *, cred_t *);
1130 1136 extern int strwrite_common(struct vnode *, struct uio *, cred_t *, int);
1131 1137 extern int strread(struct vnode *, struct uio *, cred_t *);
1132 1138 extern int strioctl(struct vnode *, int, intptr_t, int, int, cred_t *, int *);
↓ open down ↓ |
879 lines elided |
↑ open up ↑ |
1133 1139 extern int strrput(queue_t *, mblk_t *);
1134 1140 extern int strrput_nondata(queue_t *, mblk_t *);
1135 1141 extern mblk_t *strrput_proto(vnode_t *, mblk_t *,
1136 1142 strwakeup_t *, strsigset_t *, strsigset_t *, strpollset_t *);
1137 1143 extern mblk_t *strrput_misc(vnode_t *, mblk_t *,
1138 1144 strwakeup_t *, strsigset_t *, strsigset_t *, strpollset_t *);
1139 1145 extern int getiocseqno(void);
1140 1146 extern int strwaitbuf(size_t, int);
1141 1147 extern int strwaitq(stdata_t *, int, ssize_t, int, clock_t, int *);
1142 1148 extern struct stdata *shalloc(queue_t *);
1149 +extern void sh_insert_pid(struct stdata *, pid_t);
1150 +extern void sh_remove_pid(struct stdata *, pid_t);
1151 +extern mblk_t *sh_get_pid_mblk(struct stdata *);
1143 1152 extern void shfree(struct stdata *s);
1144 1153 extern queue_t *allocq(void);
1145 1154 extern void freeq(queue_t *);
1146 1155 extern qband_t *allocband(void);
1147 1156 extern void freeband(qband_t *);
1148 1157 extern void freebs_enqueue(mblk_t *, dblk_t *);
1149 1158 extern void setqback(queue_t *, unsigned char);
1150 1159 extern int strcopyin(void *, void *, size_t, int);
1151 1160 extern int strcopyout(void *, void *, size_t, int);
1152 1161 extern void strsignal(struct stdata *, int, int32_t);
1153 1162 extern clock_t str_cv_wait(kcondvar_t *, kmutex_t *, clock_t, int);
1154 1163 extern void disable_svc(queue_t *);
1155 1164 extern void enable_svc(queue_t *);
1156 1165 extern void remove_runlist(queue_t *);
1157 1166 extern void wait_svc(queue_t *);
1158 1167 extern void backenable(queue_t *, uchar_t);
1159 1168 extern void set_qend(queue_t *);
1160 1169 extern int strgeterr(stdata_t *, int32_t, int);
1161 1170 extern void qenable_locked(queue_t *);
1162 1171 extern mblk_t *getq_noenab(queue_t *, ssize_t);
1163 1172 extern void rmvq_noenab(queue_t *, mblk_t *);
1164 1173 extern void qbackenable(queue_t *, uchar_t);
1165 1174 extern void set_qfull(queue_t *);
1166 1175
1167 1176 extern void strblock(queue_t *);
1168 1177 extern void strunblock(queue_t *);
1169 1178 extern int qclaimed(queue_t *);
1170 1179 extern int straccess(struct stdata *, enum jcaccess);
1171 1180
1172 1181 extern void entersq(syncq_t *, int);
1173 1182 extern void leavesq(syncq_t *, int);
1174 1183 extern void claimq(queue_t *);
1175 1184 extern void releaseq(queue_t *);
1176 1185 extern void claimstr(queue_t *);
1177 1186 extern void releasestr(queue_t *);
1178 1187 extern void removeq(queue_t *);
1179 1188 extern void insertq(struct stdata *, queue_t *);
1180 1189 extern void drain_syncq(syncq_t *);
1181 1190 extern void qfill_syncq(syncq_t *, queue_t *, mblk_t *);
1182 1191 extern void qdrain_syncq(syncq_t *, queue_t *);
1183 1192 extern int flush_syncq(syncq_t *, queue_t *);
1184 1193 extern void wait_sq_svc(syncq_t *);
1185 1194
1186 1195 extern void outer_enter(syncq_t *, uint16_t);
1187 1196 extern void outer_exit(syncq_t *);
1188 1197 extern void qwriter_inner(queue_t *, mblk_t *, void (*)());
1189 1198 extern void qwriter_outer(queue_t *, mblk_t *, void (*)());
1190 1199
1191 1200 extern callbparams_t *callbparams_alloc(syncq_t *, void (*)(void *),
1192 1201 void *, int);
1193 1202 extern void callbparams_free(syncq_t *, callbparams_t *);
1194 1203 extern void callbparams_free_id(syncq_t *, callbparams_id_t, int32_t);
1195 1204 extern void qcallbwrapper(void *);
1196 1205
1197 1206 extern mblk_t *esballoc_wait(unsigned char *, size_t, uint_t, frtn_t *);
1198 1207 extern mblk_t *esballoca(unsigned char *, size_t, uint_t, frtn_t *);
1199 1208 extern mblk_t *desballoca(unsigned char *, size_t, uint_t, frtn_t *);
1200 1209 extern int do_sendfp(struct stdata *, struct file *, struct cred *);
1201 1210 extern int frozenstr(queue_t *);
1202 1211 extern size_t xmsgsize(mblk_t *);
1203 1212
1204 1213 extern void putnext_tail(syncq_t *, queue_t *, uint32_t);
1205 1214 extern void stream_willservice(stdata_t *);
1206 1215 extern void stream_runservice(stdata_t *);
1207 1216
1208 1217 extern void strmate(vnode_t *, vnode_t *);
1209 1218 extern queue_t *strvp2wq(vnode_t *);
1210 1219 extern vnode_t *strq2vp(queue_t *);
1211 1220 extern mblk_t *allocb_wait(size_t, uint_t, uint_t, int *);
1212 1221 extern mblk_t *allocb_cred(size_t, cred_t *, pid_t);
1213 1222 extern mblk_t *allocb_cred_wait(size_t, uint_t, int *, cred_t *, pid_t);
1214 1223 extern mblk_t *allocb_tmpl(size_t, const mblk_t *);
1215 1224 extern mblk_t *allocb_tryhard(size_t);
1216 1225 extern void mblk_copycred(mblk_t *, const mblk_t *);
1217 1226 extern void mblk_setcred(mblk_t *, cred_t *, pid_t);
1218 1227 extern cred_t *msg_getcred(const mblk_t *, pid_t *);
1219 1228 extern struct ts_label_s *msg_getlabel(const mblk_t *);
1220 1229 extern cred_t *msg_extractcred(mblk_t *, pid_t *);
1221 1230 extern void strpollwakeup(vnode_t *, short);
1222 1231 extern int putnextctl_wait(queue_t *, int);
1223 1232
1224 1233 extern int kstrputmsg(struct vnode *, mblk_t *, struct uio *, ssize_t,
1225 1234 unsigned char, int, int);
1226 1235 extern int kstrgetmsg(struct vnode *, mblk_t **, struct uio *,
1227 1236 unsigned char *, int *, clock_t, rval_t *);
1228 1237
1229 1238 extern void strsetrerror(vnode_t *, int, int, errfunc_t);
1230 1239 extern void strsetwerror(vnode_t *, int, int, errfunc_t);
1231 1240 extern void strseteof(vnode_t *, int);
1232 1241 extern void strflushrq(vnode_t *, int);
1233 1242 extern void strsetrputhooks(vnode_t *, uint_t, msgfunc_t, msgfunc_t);
1234 1243 extern void strsetwputhooks(vnode_t *, uint_t, clock_t);
1235 1244 extern void strsetrwputdatahooks(vnode_t *, msgfunc_t, msgfunc_t);
1236 1245 extern int strwaitmark(vnode_t *);
1237 1246 extern void strsignal_nolock(stdata_t *, int, uchar_t);
1238 1247
1239 1248 struct multidata_s;
1240 1249 struct pdesc_s;
1241 1250 extern int hcksum_assoc(mblk_t *, struct multidata_s *, struct pdesc_s *,
1242 1251 uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, int);
1243 1252 extern void hcksum_retrieve(mblk_t *, struct multidata_s *, struct pdesc_s *,
1244 1253 uint32_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
1245 1254 extern void lso_info_set(mblk_t *, uint32_t, uint32_t);
1246 1255 extern void lso_info_cleanup(mblk_t *);
1247 1256 extern unsigned int bcksum(uchar_t *, int, unsigned int);
1248 1257 extern boolean_t is_vmloaned_mblk(mblk_t *, struct multidata_s *,
1249 1258 struct pdesc_s *);
1250 1259
1251 1260 extern int fmodsw_register(const char *, struct streamtab *, int);
1252 1261 extern int fmodsw_unregister(const char *);
1253 1262 extern fmodsw_impl_t *fmodsw_find(const char *, fmodsw_flags_t);
1254 1263 extern void fmodsw_rele(fmodsw_impl_t *);
1255 1264
1256 1265 extern void freemsgchain(mblk_t *);
1257 1266 extern mblk_t *copymsgchain(mblk_t *);
1258 1267
1259 1268 extern mblk_t *mcopyinuio(struct stdata *, uio_t *, ssize_t, ssize_t, int *);
1260 1269
1261 1270 /*
1262 1271 * shared or externally configured data structures
1263 1272 */
1264 1273 extern ssize_t strmsgsz; /* maximum stream message size */
1265 1274 extern ssize_t strctlsz; /* maximum size of ctl message */
1266 1275 extern int nstrpush; /* maximum number of pushes allowed */
1267 1276
1268 1277 /*
1269 1278 * Bufcalls related variables.
1270 1279 */
1271 1280 extern struct bclist strbcalls; /* List of bufcalls */
1272 1281 extern kmutex_t strbcall_lock; /* Protects the list of bufcalls */
1273 1282 extern kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */
1274 1283 extern kcondvar_t bcall_cv; /* wait of executing bufcall completes */
1275 1284
1276 1285 extern frtn_t frnop;
1277 1286
1278 1287 extern struct kmem_cache *ciputctrl_cache;
1279 1288 extern int n_ciputctrl;
1280 1289 extern int max_n_ciputctrl;
1281 1290 extern int min_n_ciputctrl;
1282 1291
1283 1292 extern cdevsw_impl_t *devimpl;
1284 1293
1285 1294 /*
1286 1295 * esballoc queue for throttling
1287 1296 */
1288 1297 typedef struct esb_queue {
1289 1298 kmutex_t eq_lock;
1290 1299 uint_t eq_len; /* number of queued messages */
1291 1300 mblk_t *eq_head; /* head of queue */
1292 1301 mblk_t *eq_tail; /* tail of queue */
1293 1302 uint_t eq_flags; /* esballoc queue flags */
1294 1303 } esb_queue_t;
1295 1304
1296 1305 /*
1297 1306 * esballoc flags for queue processing.
1298 1307 */
1299 1308 #define ESBQ_PROCESSING 0x01 /* queue is being processed */
1300 1309 #define ESBQ_TIMER 0x02 /* timer is active */
1301 1310
1302 1311 extern void esballoc_queue_init(void);
1303 1312
1304 1313 #endif /* _KERNEL */
1305 1314
1306 1315 /*
1307 1316 * Note: Use of these macros are restricted to kernel/unix and
1308 1317 * intended for the STREAMS framework.
1309 1318 * All modules/drivers should include sys/ddi.h.
1310 1319 *
1311 1320 * Finding related queues
1312 1321 */
1313 1322 #define _OTHERQ(q) ((q)->q_flag&QREADR? (q)+1: (q)-1)
1314 1323 #define _WR(q) ((q)->q_flag&QREADR? (q)+1: (q))
1315 1324 #define _RD(q) ((q)->q_flag&QREADR? (q): (q)-1)
1316 1325 #define _SAMESTR(q) (!((q)->q_flag & QEND))
1317 1326
1318 1327 /*
1319 1328 * These are also declared here for modules/drivers that erroneously
1320 1329 * include strsubr.h after ddi.h or fail to include ddi.h at all.
1321 1330 */
1322 1331 extern struct queue *OTHERQ(queue_t *); /* stream.h */
1323 1332 extern struct queue *RD(queue_t *);
1324 1333 extern struct queue *WR(queue_t *);
1325 1334 extern int SAMESTR(queue_t *);
1326 1335
1327 1336 /*
1328 1337 * The following hardware checksum related macros are private
1329 1338 * interfaces that are subject to change without notice.
1330 1339 */
1331 1340 #ifdef _KERNEL
1332 1341 #define DB_CKSUMSTART(mp) ((mp)->b_datap->db_cksumstart)
1333 1342 #define DB_CKSUMEND(mp) ((mp)->b_datap->db_cksumend)
1334 1343 #define DB_CKSUMSTUFF(mp) ((mp)->b_datap->db_cksumstuff)
1335 1344 #define DB_CKSUMFLAGS(mp) ((mp)->b_datap->db_struioun.cksum.flags)
1336 1345 #define DB_CKSUM16(mp) ((mp)->b_datap->db_cksum16)
1337 1346 #define DB_CKSUM32(mp) ((mp)->b_datap->db_cksum32)
1338 1347 #define DB_LSOFLAGS(mp) ((mp)->b_datap->db_struioun.cksum.flags)
1339 1348 #define DB_LSOMSS(mp) ((mp)->b_datap->db_struioun.cksum.pad)
1340 1349 #endif /* _KERNEL */
1341 1350
1342 1351 #ifdef __cplusplus
1343 1352 }
1344 1353 #endif
1345 1354
1346 1355
1347 1356 #endif /* _SYS_STRSUBR_H */
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX