Print this page
XXXX adding PID information to netstat output
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/sys/strsubr.h
+++ new/usr/src/uts/common/sys/strsubr.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 22 /* All Rights Reserved */
23 23
24 24
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 #ifndef _SYS_STRSUBR_H
31 31 #define _SYS_STRSUBR_H
32 32
33 33 /*
34 34 * WARNING:
35 35 * Everything in this file is private, belonging to the
36 36 * STREAMS subsystem. The only guarantee made about the
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
37 37 * contents of this file is that if you include it, your
38 38 * code will not port to the next release.
39 39 */
40 40 #include <sys/stream.h>
41 41 #include <sys/stropts.h>
42 42 #include <sys/kstat.h>
43 43 #include <sys/uio.h>
44 44 #include <sys/proc.h>
45 45 #include <sys/netstack.h>
46 46 #include <sys/modhash.h>
47 +#include <sys/pidnode.h>
47 48
48 49 #ifdef __cplusplus
49 50 extern "C" {
50 51 #endif
51 52
52 53 /*
53 54 * In general, the STREAMS locks are disjoint; they are only held
54 55 * locally, and not simultaneously by a thread. However, module
55 56 * code, including at the stream head, requires some locks to be
56 57 * acquired in order for its safety.
57 58 * 1. Stream level claim. This prevents the value of q_next
58 59 * from changing while module code is executing.
59 60 * 2. Queue level claim. This prevents the value of q_ptr
60 61 * from changing while put or service code is executing.
61 62 * In addition, it provides for queue single-threading
62 63 * for QPAIR and PERQ MT-safe modules.
63 64 * 3. Stream head lock. May be held by the stream head module
64 65 * to implement a read/write/open/close monitor.
65 66 * Note: that the only types of twisted stream supported are
66 67 * the pipe and transports which have read and write service
67 68 * procedures on both sides of the twist.
68 69 * 4. Queue lock. May be acquired by utility routines on
69 70 * behalf of a module.
70 71 */
71 72
72 73 /*
73 74 * In general, sd_lock protects the consistency of the stdata
74 75 * structure. Additionally, it is used with sd_monitor
75 76 * to implement an open/close monitor. In particular, it protects
76 77 * the following fields:
77 78 * sd_iocblk
78 79 * sd_flag
79 80 * sd_copyflag
80 81 * sd_iocid
81 82 * sd_iocwait
82 83 * sd_sidp
83 84 * sd_pgidp
84 85 * sd_wroff
85 86 * sd_tail
86 87 * sd_rerror
87 88 * sd_werror
88 89 * sd_pushcnt
89 90 * sd_sigflags
90 91 * sd_siglist
91 92 * sd_pollist
92 93 * sd_mark
93 94 * sd_closetime
94 95 * sd_wakeq
95 96 * sd_maxblk
96 97 *
97 98 * The following fields are modified only by the allocator, which
98 99 * has exclusive access to them at that time:
99 100 * sd_wrq
100 101 * sd_strtab
101 102 *
102 103 * The following field is protected by the overlying file system
103 104 * code, guaranteeing single-threading of opens:
104 105 * sd_vnode
105 106 *
106 107 * Stream-level locks should be acquired before any queue-level locks
107 108 * are acquired.
108 109 *
109 110 * The stream head write queue lock(sd_wrq) is used to protect the
110 111 * fields qn_maxpsz and qn_minpsz because freezestr() which is
111 112 * necessary for strqset() only gets the queue lock.
112 113 */
113 114
114 115 /*
115 116 * Function types for the parameterized stream head.
116 117 * The msgfunc_t takes the parameters:
117 118 * msgfunc(vnode_t *vp, mblk_t *mp, strwakeup_t *wakeups,
118 119 * strsigset_t *firstmsgsigs, strsigset_t *allmsgsigs,
119 120 * strpollset_t *pollwakeups);
120 121 * It returns an optional message to be processed by the stream head.
121 122 *
122 123 * The parameters for errfunc_t are:
123 124 * errfunc(vnode *vp, int ispeek, int *clearerr);
124 125 * It returns an errno and zero if there was no pending error.
125 126 */
126 127 typedef uint_t strwakeup_t;
127 128 typedef uint_t strsigset_t;
128 129 typedef short strpollset_t;
129 130 typedef uintptr_t callbparams_id_t;
130 131 typedef mblk_t *(*msgfunc_t)(vnode_t *, mblk_t *, strwakeup_t *,
131 132 strsigset_t *, strsigset_t *, strpollset_t *);
132 133 typedef int (*errfunc_t)(vnode_t *, int, int *);
133 134
134 135 /*
135 136 * Per stream sd_lock in putnext may be replaced by per cpu stream_putlocks
136 137 * each living in a separate cache line. putnext/canputnext grabs only one of
137 138 * stream_putlocks while strlock() (called on behalf of insertq()/removeq())
138 139 * acquires all stream_putlocks. Normally stream_putlocks are only employed
139 140 * for highly contended streams that have SQ_CIPUT queues in the critical path
140 141 * (e.g. NFS/UDP stream).
141 142 *
142 143 * stream_putlocks are dynamically assigned to stdata structure through
143 144 * sd_ciputctrl pointer possibly when a stream is already in use. Since
144 145 * strlock() uses stream_putlocks only under sd_lock acquiring sd_lock when
145 146 * assigning stream_putlocks to the stream ensures synchronization with
146 147 * strlock().
147 148 *
148 149 * For lock ordering purposes stream_putlocks are treated as the extension of
149 150 * sd_lock and are always grabbed right after grabbing sd_lock and released
150 151 * right before releasing sd_lock except putnext/canputnext where only one of
151 152 * stream_putlocks locks is used and where it is the first lock to grab.
152 153 */
153 154
154 155 typedef struct ciputctrl_str {
155 156 union _ciput_un {
156 157 uchar_t pad[64];
157 158 struct _ciput_str {
158 159 kmutex_t ciput_lck;
159 160 ushort_t ciput_cnt;
160 161 } ciput_str;
161 162 } ciput_un;
162 163 } ciputctrl_t;
163 164
164 165 #define ciputctrl_lock ciput_un.ciput_str.ciput_lck
165 166 #define ciputctrl_count ciput_un.ciput_str.ciput_cnt
166 167
167 168 /*
168 169 * Header for a stream: interface to rest of system.
169 170 *
170 171 * NOTE: While this is a consolidation-private structure, some unbundled and
171 172 * third-party products inappropriately make use of some of the fields.
172 173 * As such, please take care to not gratuitously change any offsets of
173 174 * existing members.
174 175 */
175 176 typedef struct stdata {
176 177 struct queue *sd_wrq; /* write queue */
177 178 struct msgb *sd_iocblk; /* return block for ioctl */
178 179 struct vnode *sd_vnode; /* pointer to associated vnode */
179 180 struct streamtab *sd_strtab; /* pointer to streamtab for stream */
180 181 uint_t sd_flag; /* state/flags */
181 182 uint_t sd_iocid; /* ioctl id */
182 183 struct pid *sd_sidp; /* controlling session info */
183 184 struct pid *sd_pgidp; /* controlling process group info */
184 185 ushort_t sd_tail; /* reserved space in written mblks */
185 186 ushort_t sd_wroff; /* write offset */
186 187 int sd_rerror; /* error to return on read ops */
187 188 int sd_werror; /* error to return on write ops */
188 189 int sd_pushcnt; /* number of pushes done on stream */
189 190 int sd_sigflags; /* logical OR of all siglist events */
190 191 struct strsig *sd_siglist; /* pid linked list to rcv SIGPOLL sig */
191 192 struct pollhead sd_pollist; /* list of all pollers to wake up */
192 193 struct msgb *sd_mark; /* "marked" message on read queue */
193 194 clock_t sd_closetime; /* time to wait to drain q in close */
194 195 kmutex_t sd_lock; /* protect head consistency */
195 196 kcondvar_t sd_monitor; /* open/close/push/pop monitor */
196 197 kcondvar_t sd_iocmonitor; /* ioctl single-threading */
197 198 kcondvar_t sd_refmonitor; /* sd_refcnt monitor */
198 199 ssize_t sd_qn_minpsz; /* These two fields are a performance */
199 200 ssize_t sd_qn_maxpsz; /* enhancements, cache the values in */
200 201 /* the stream head so we don't have */
201 202 /* to ask the module below the stream */
202 203 /* head to get this information. */
203 204 struct stdata *sd_mate; /* pointer to twisted stream mate */
204 205 kthread_id_t sd_freezer; /* thread that froze stream */
205 206 kmutex_t sd_reflock; /* Protects sd_refcnt */
206 207 int sd_refcnt; /* number of claimstr */
207 208 uint_t sd_wakeq; /* strwakeq()'s copy of sd_flag */
208 209 struct queue *sd_struiordq; /* sync barrier struio() read queue */
209 210 struct queue *sd_struiowrq; /* sync barrier struio() write queue */
210 211 char sd_struiodnak; /* defer NAK of M_IOCTL by rput() */
211 212 struct msgb *sd_struionak; /* pointer M_IOCTL mblk(s) to NAK */
212 213 caddr_t sd_t_audit_data; /* For audit purposes only */
213 214 ssize_t sd_maxblk; /* maximum message block size */
214 215 uint_t sd_rput_opt; /* options/flags for strrput */
215 216 uint_t sd_wput_opt; /* options/flags for write/putmsg */
216 217 uint_t sd_read_opt; /* options/flags for strread */
217 218 msgfunc_t sd_rprotofunc; /* rput M_*PROTO routine */
218 219 msgfunc_t sd_rputdatafunc; /* read M_DATA routine */
219 220 msgfunc_t sd_rmiscfunc; /* rput routine (non-data/proto) */
220 221 msgfunc_t sd_wputdatafunc; /* wput M_DATA routine */
221 222 errfunc_t sd_rderrfunc; /* read side error callback */
222 223 errfunc_t sd_wrerrfunc; /* write side error callback */
223 224 /*
224 225 * support for low contention concurrent putnext.
225 226 */
226 227 ciputctrl_t *sd_ciputctrl;
227 228 uint_t sd_nciputctrl;
228 229
229 230 int sd_anchor; /* position of anchor in stream */
230 231 /*
231 232 * Service scheduling at the stream head.
232 233 */
233 234 kmutex_t sd_qlock;
↓ open down ↓ |
177 lines elided |
↑ open up ↑ |
234 235 struct queue *sd_qhead; /* Head of queues to be serviced. */
235 236 struct queue *sd_qtail; /* Tail of queues to be serviced. */
236 237 void *sd_servid; /* Service ID for bckgrnd schedule */
237 238 ushort_t sd_svcflags; /* Servicing flags */
238 239 short sd_nqueues; /* Number of queues in the list */
239 240 kcondvar_t sd_qcv; /* Waiters for qhead to become empty */
240 241 kcondvar_t sd_zcopy_wait;
241 242 uint_t sd_copyflag; /* copy-related flags */
242 243 zoneid_t sd_anchorzone; /* Allow removal from same zone only */
243 244 struct msgb *sd_cmdblk; /* reply from _I_CMD */
245 + list_t sd_pid_list;
246 + kmutex_t sd_pid_list_lock;
244 247 } stdata_t;
245 248
246 249 /*
247 250 * stdata servicing flags.
248 251 */
249 252 #define STRS_WILLSERVICE 0x01
250 253 #define STRS_SCHEDULED 0x02
251 254
252 255 #define STREAM_NEEDSERVICE(stp) ((stp)->sd_qhead != NULL)
253 256
254 257 /*
255 258 * stdata flag field defines
256 259 */
257 260 #define IOCWAIT 0x00000001 /* Someone is doing an ioctl */
258 261 #define RSLEEP 0x00000002 /* Someone wants to read/recv msg */
259 262 #define WSLEEP 0x00000004 /* Someone wants to write */
260 263 #define STRPRI 0x00000008 /* An M_PCPROTO is at stream head */
261 264 #define STRHUP 0x00000010 /* Device has vanished */
262 265 #define STWOPEN 0x00000020 /* waiting for 1st open */
263 266 #define STPLEX 0x00000040 /* stream is being multiplexed */
264 267 #define STRISTTY 0x00000080 /* stream is a terminal */
265 268 #define STRGETINPROG 0x00000100 /* (k)strgetmsg is running */
266 269 #define IOCWAITNE 0x00000200 /* STR_NOERROR ioctl running */
267 270 #define STRDERR 0x00000400 /* fatal read error from M_ERROR */
268 271 #define STWRERR 0x00000800 /* fatal write error from M_ERROR */
269 272 #define STRDERRNONPERSIST 0x00001000 /* nonpersistent read errors */
270 273 #define STWRERRNONPERSIST 0x00002000 /* nonpersistent write errors */
271 274 #define STRCLOSE 0x00004000 /* wait for a close to complete */
272 275 #define SNDMREAD 0x00008000 /* used for read notification */
273 276 #define OLDNDELAY 0x00010000 /* use old TTY semantics for */
274 277 /* NDELAY reads and writes */
275 278 /* 0x00020000 unused */
276 279 /* 0x00040000 unused */
277 280 #define STRTOSTOP 0x00080000 /* block background writes */
278 281 #define STRCMDWAIT 0x00100000 /* someone is doing an _I_CMD */
279 282 /* 0x00200000 unused */
280 283 #define STRMOUNT 0x00400000 /* stream is mounted */
281 284 #define STRNOTATMARK 0x00800000 /* Not at mark (when empty read q) */
282 285 #define STRDELIM 0x01000000 /* generate delimited messages */
283 286 #define STRATMARK 0x02000000 /* At mark (due to MSGMARKNEXT) */
284 287 #define STZCNOTIFY 0x04000000 /* wait for zerocopy mblk to be acked */
285 288 #define STRPLUMB 0x08000000 /* push/pop pending */
286 289 #define STREOF 0x10000000 /* End-of-file indication */
287 290 #define STREOPENFAIL 0x20000000 /* indicates if re-open has failed */
288 291 #define STRMATE 0x40000000 /* this stream is a mate */
289 292 #define STRHASLINKS 0x80000000 /* I_LINKs under this stream */
290 293
291 294 /*
292 295 * Copy-related flags (sd_copyflag), set by SO_COPYOPT.
293 296 */
294 297 #define STZCVMSAFE 0x00000001 /* safe to borrow file (segmapped) */
295 298 /* pages instead of bcopy */
296 299 #define STZCVMUNSAFE 0x00000002 /* unsafe to borrow file pages */
297 300 #define STRCOPYCACHED 0x00000004 /* copy should NOT bypass cache */
298 301
299 302 /*
300 303 * Options and flags for strrput (sd_rput_opt)
301 304 */
302 305 #define SR_POLLIN 0x00000001 /* pollwakeup needed for band0 data */
303 306 #define SR_SIGALLDATA 0x00000002 /* Send SIGPOLL for all M_DATA */
304 307 #define SR_CONSOL_DATA 0x00000004 /* Consolidate M_DATA onto q_last */
305 308 #define SR_IGN_ZEROLEN 0x00000008 /* Ignore zero-length M_DATA */
306 309
307 310 /*
308 311 * Options and flags for strwrite/strputmsg (sd_wput_opt)
309 312 */
310 313 #define SW_SIGPIPE 0x00000001 /* Send SIGPIPE for write error */
311 314 #define SW_RECHECK_ERR 0x00000002 /* Recheck errors in strwrite loop */
312 315 #define SW_SNDZERO 0x00000004 /* send 0-length msg down pipe/FIFO */
313 316
314 317 /*
315 318 * Options and flags for strread (sd_read_opt)
316 319 */
317 320 #define RD_MSGDIS 0x00000001 /* read msg discard */
318 321 #define RD_MSGNODIS 0x00000002 /* read msg no discard */
319 322 #define RD_PROTDAT 0x00000004 /* read M_[PC]PROTO contents as data */
320 323 #define RD_PROTDIS 0x00000008 /* discard M_[PC]PROTO blocks and */
321 324 /* retain data blocks */
322 325 /*
323 326 * Flags parameter for strsetrputhooks() and strsetwputhooks().
324 327 * These flags define the interface for setting the above internal
325 328 * flags in sd_rput_opt and sd_wput_opt.
326 329 */
327 330 #define SH_CONSOL_DATA 0x00000001 /* Consolidate M_DATA onto q_last */
328 331 #define SH_SIGALLDATA 0x00000002 /* Send SIGPOLL for all M_DATA */
329 332 #define SH_IGN_ZEROLEN 0x00000004 /* Drop zero-length M_DATA */
330 333
331 334 #define SH_SIGPIPE 0x00000100 /* Send SIGPIPE for write error */
332 335 #define SH_RECHECK_ERR 0x00000200 /* Recheck errors in strwrite loop */
333 336
334 337 /*
335 338 * Each queue points to a sync queue (the inner perimeter) which keeps
336 339 * track of the number of threads that are inside a given queue (sq_count)
337 340 * and also is used to implement the asynchronous putnext
338 341 * (by queuing messages if the queue can not be entered.)
339 342 *
340 343 * Messages are queued on sq_head/sq_tail including deferred qwriter(INNER)
341 344 * messages. The sq_head/sq_tail list is a singly-linked list with
342 345 * b_queue recording the queue and b_prev recording the function to
343 346 * be called (either the put procedure or a qwriter callback function.)
344 347 *
345 348 * The sq_count counter tracks the number of threads that are
346 349 * executing inside the perimeter or (in the case of outer perimeters)
347 350 * have some work queued for them relating to the perimeter. The sq_rmqcount
348 351 * counter tracks the subset which are in removeq() (usually invoked from
349 352 * qprocsoff(9F)).
350 353 *
351 354 * In addition a module writer can declare that the module has an outer
352 355 * perimeter (by setting D_MTOUTPERIM) in which case all inner perimeter
353 356 * syncq's for the module point (through sq_outer) to an outer perimeter
354 357 * syncq. The outer perimeter consists of the doubly linked list (sq_onext and
355 358 * sq_oprev) linking all the inner perimeter syncq's with out outer perimeter
356 359 * syncq. This is used to implement qwriter(OUTER) (an asynchronous way of
357 360 * getting exclusive access at the outer perimeter) and outer_enter/exit
358 361 * which are used by the framework to acquire exclusive access to the outer
359 362 * perimeter during open and close of modules that have set D_MTOUTPERIM.
360 363 *
361 364 * In the inner perimeter case sq_save is available for use by machine
362 365 * dependent code. sq_head/sq_tail are used to queue deferred messages on
363 366 * the inner perimeter syncqs and to queue become_writer requests on the
364 367 * outer perimeter syncqs.
365 368 *
366 369 * Note: machine dependent optimized versions of putnext may depend
367 370 * on the order of sq_flags and sq_count (so that they can e.g.
368 371 * read these two fields in a single load instruction.)
369 372 *
370 373 * Per perimeter SQLOCK/sq_count in putnext/put may be replaced by per cpu
371 374 * sq_putlocks/sq_putcounts each living in a separate cache line. Obviously
372 375 * sq_putlock[x] protects sq_putcount[x]. putnext/put routine will grab only 1
373 376 * of sq_putlocks and update only 1 of sq_putcounts. strlock() and many
374 377 * other routines in strsubr.c and ddi.c will grab all sq_putlocks (as well as
375 378 * SQLOCK) and figure out the count value as the sum of sq_count and all of
376 379 * sq_putcounts. The idea is to make critical fast path -- putnext -- much
377 380 * faster at the expense of much less often used slower path like
378 381 * strlock(). One known case where entersq/strlock is executed pretty often is
379 382 * SpecWeb but since IP is SQ_CIOC and socket TCP/IP stream is nextless
380 383 * there's no need to grab multiple sq_putlocks and look at sq_putcounts. See
381 384 * strsubr.c for more comments.
382 385 *
383 386 * Note regular SQLOCK and sq_count are still used in many routines
384 387 * (e.g. entersq(), rwnext()) in the same way as before sq_putlocks were
385 388 * introduced.
386 389 *
387 390 * To understand when all sq_putlocks need to be held and all sq_putcounts
388 391 * need to be added up one needs to look closely at putnext code. Basically if
389 392 * a routine like e.g. wait_syncq() needs to be sure that perimeter is empty
390 393 * all sq_putlocks/sq_putcounts need to be held/added up. On the other hand
391 394 * there's no need to hold all sq_putlocks and count all sq_putcounts in
392 395 * routines like leavesq()/dropsq() and etc. since the are usually exit
393 396 * counterparts of entersq/outer_enter() and etc. which have already either
394 397 * prevented put entry poins from executing or did not care about put
395 398 * entrypoints. entersq() doesn't need to care about sq_putlocks/sq_putcounts
396 399 * if the entry point has a shared access since put has the highest degree of
397 400 * concurrency and such entersq() does not intend to block out put
398 401 * entrypoints.
399 402 *
400 403 * Before sq_putcounts were introduced the standard way to wait for perimeter
401 404 * to become empty was:
402 405 *
403 406 * mutex_enter(SQLOCK(sq));
404 407 * while (sq->sq_count > 0) {
405 408 * sq->sq_flags |= SQ_WANTWAKEUP;
406 409 * cv_wait(&sq->sq_wait, SQLOCK(sq));
407 410 * }
408 411 * mutex_exit(SQLOCK(sq));
409 412 *
410 413 * The new way is:
411 414 *
412 415 * mutex_enter(SQLOCK(sq));
413 416 * count = sq->sq_count;
414 417 * SQ_PUTLOCKS_ENTER(sq);
415 418 * SUM_SQ_PUTCOUNTS(sq, count);
416 419 * while (count != 0) {
417 420 * sq->sq_flags |= SQ_WANTWAKEUP;
418 421 * SQ_PUTLOCKS_EXIT(sq);
419 422 * cv_wait(&sq->sq_wait, SQLOCK(sq));
420 423 * count = sq->sq_count;
421 424 * SQ_PUTLOCKS_ENTER(sq);
422 425 * SUM_SQ_PUTCOUNTS(sq, count);
423 426 * }
424 427 * SQ_PUTLOCKS_EXIT(sq);
425 428 * mutex_exit(SQLOCK(sq));
426 429 *
427 430 * Note that SQ_WANTWAKEUP is set before dropping SQ_PUTLOCKS. This makes sure
428 431 * putnext won't skip a wakeup.
429 432 *
430 433 * sq_putlocks are treated as the extension of SQLOCK for lock ordering
431 434 * purposes and are always grabbed right after grabbing SQLOCK and released
432 435 * right before releasing SQLOCK. This also allows dynamic creation of
433 436 * sq_putlocks while holding SQLOCK (by making sq_ciputctrl non null even when
434 437 * the stream is already in use). Only in putnext one of sq_putlocks
435 438 * is grabbed instead of SQLOCK. putnext return path remembers what counter it
436 439 * incremented and decrements the right counter on its way out.
437 440 */
438 441
439 442 struct syncq {
440 443 kmutex_t sq_lock; /* atomic access to syncq */
441 444 uint16_t sq_count; /* # threads inside */
442 445 uint16_t sq_flags; /* state and some type info */
443 446 /*
444 447 * Distributed syncq scheduling
445 448 * The list of queue's is handled by sq_head and
446 449 * sq_tail fields.
447 450 *
448 451 * The list of events is handled by the sq_evhead and sq_evtail
449 452 * fields.
450 453 */
451 454 queue_t *sq_head; /* queue of deferred messages */
452 455 queue_t *sq_tail; /* queue of deferred messages */
453 456 mblk_t *sq_evhead; /* Event message on the syncq */
454 457 mblk_t *sq_evtail;
455 458 uint_t sq_nqueues; /* # of queues on this sq */
456 459 /*
457 460 * Concurrency and condition variables
458 461 */
459 462 uint16_t sq_type; /* type (concurrency) of syncq */
460 463 uint16_t sq_rmqcount; /* # threads inside removeq() */
461 464 kcondvar_t sq_wait; /* block on this sync queue */
462 465 kcondvar_t sq_exitwait; /* waiting for thread to leave the */
463 466 /* inner perimeter */
464 467 /*
465 468 * Handling synchronous callbacks such as qtimeout and qbufcall
466 469 */
467 470 ushort_t sq_callbflags; /* flags for callback synchronization */
468 471 callbparams_id_t sq_cancelid; /* id of callback being cancelled */
469 472 struct callbparams *sq_callbpend; /* Pending callbacks */
470 473
471 474 /*
472 475 * Links forming an outer perimeter from one outer syncq and
473 476 * a set of inner sync queues.
474 477 */
475 478 struct syncq *sq_outer; /* Pointer to outer perimeter */
476 479 struct syncq *sq_onext; /* Linked list of syncq's making */
477 480 struct syncq *sq_oprev; /* up the outer perimeter. */
478 481 /*
479 482 * support for low contention concurrent putnext.
480 483 */
481 484 ciputctrl_t *sq_ciputctrl;
482 485 uint_t sq_nciputctrl;
483 486 /*
484 487 * Counter for the number of threads wanting to become exclusive.
485 488 */
486 489 uint_t sq_needexcl;
487 490 /*
488 491 * These two fields are used for scheduling a syncq for
489 492 * background processing. The sq_svcflag is protected by
490 493 * SQLOCK lock.
491 494 */
492 495 struct syncq *sq_next; /* for syncq scheduling */
493 496 void * sq_servid;
494 497 uint_t sq_servcount; /* # pending background threads */
495 498 uint_t sq_svcflags; /* Scheduling flags */
496 499 clock_t sq_tstamp; /* Time when was enabled */
497 500 /*
498 501 * Maximum priority of the queues on this syncq.
499 502 */
500 503 pri_t sq_pri;
501 504 };
502 505 typedef struct syncq syncq_t;
503 506
504 507 /*
505 508 * sync queue scheduling flags (for sq_svcflags).
506 509 */
507 510 #define SQ_SERVICE 0x1 /* being serviced */
508 511 #define SQ_BGTHREAD 0x2 /* awaiting service by bg thread */
509 512 #define SQ_DISABLED 0x4 /* don't put syncq in service list */
510 513
511 514 /*
512 515 * FASTPUT bit in sd_count/putcount.
513 516 */
514 517 #define SQ_FASTPUT 0x8000
515 518 #define SQ_FASTMASK 0x7FFF
516 519
517 520 /*
518 521 * sync queue state flags
519 522 */
520 523 #define SQ_EXCL 0x0001 /* exclusive access to inner */
521 524 /* perimeter */
522 525 #define SQ_BLOCKED 0x0002 /* qprocsoff */
523 526 #define SQ_FROZEN 0x0004 /* freezestr */
524 527 #define SQ_WRITER 0x0008 /* qwriter(OUTER) pending or running */
525 528 #define SQ_MESSAGES 0x0010 /* messages on syncq */
526 529 #define SQ_WANTWAKEUP 0x0020 /* do cv_broadcast on sq_wait */
527 530 #define SQ_WANTEXWAKEUP 0x0040 /* do cv_broadcast on sq_exitwait */
528 531 #define SQ_EVENTS 0x0080 /* Events pending */
529 532 #define SQ_QUEUED (SQ_MESSAGES | SQ_EVENTS)
530 533 #define SQ_FLAGMASK 0x00FF
531 534
532 535 /*
533 536 * Test a queue to see if inner perimeter is exclusive.
534 537 */
535 538 #define PERIM_EXCL(q) ((q)->q_syncq->sq_flags & SQ_EXCL)
536 539
537 540 /*
538 541 * If any of these flags are set it is not possible for a thread to
539 542 * enter a put or service procedure. Instead it must either block
540 543 * or put the message on the syncq.
541 544 */
542 545 #define SQ_GOAWAY (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN|SQ_WRITER|\
543 546 SQ_QUEUED)
544 547 /*
545 548 * If any of these flags are set it not possible to drain the syncq
546 549 */
547 550 #define SQ_STAYAWAY (SQ_BLOCKED|SQ_FROZEN|SQ_WRITER)
548 551
549 552 /*
550 553 * Flags to trigger syncq tail processing.
551 554 */
552 555 #define SQ_TAIL (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)
553 556
554 557 /*
555 558 * Syncq types (stored in sq_type)
556 559 * The SQ_TYPES_IN_FLAGS (ciput) are also stored in sq_flags
557 560 * for performance reasons. Thus these type values have to be in the low
558 561 * 16 bits and not conflict with the sq_flags values above.
559 562 *
560 563 * Notes:
561 564 * - putnext() and put() assume that the put procedures have the highest
562 565 * degree of concurrency. Thus if any of the SQ_CI* are set then SQ_CIPUT
563 566 * has to be set. This restriction can be lifted by adding code to putnext
564 567 * and put that check that sq_count == 0 like entersq does.
565 568 * - putnext() and put() does currently not handle !SQ_COPUT
566 569 * - In order to implement !SQ_COCB outer_enter has to be fixed so that
567 570 * the callback can be cancelled while cv_waiting in outer_enter.
568 571 * - If SQ_CISVC needs to be implemented, qprocsoff() needs to wait
569 572 * for the currently running services to stop (wait for QINSERVICE
570 573 * to go off). disable_svc called from qprcosoff disables only
571 574 * services that will be run in future.
572 575 *
573 576 * All the SQ_CO flags are set when there is no outer perimeter.
574 577 */
575 578 #define SQ_CIPUT 0x0100 /* Concurrent inner put proc */
576 579 #define SQ_CISVC 0x0200 /* Concurrent inner svc proc */
577 580 #define SQ_CIOC 0x0400 /* Concurrent inner open/close */
578 581 #define SQ_CICB 0x0800 /* Concurrent inner callback */
579 582 #define SQ_COPUT 0x1000 /* Concurrent outer put proc */
580 583 #define SQ_COSVC 0x2000 /* Concurrent outer svc proc */
581 584 #define SQ_COOC 0x4000 /* Concurrent outer open/close */
582 585 #define SQ_COCB 0x8000 /* Concurrent outer callback */
583 586
584 587 /* Types also kept in sq_flags for performance */
585 588 #define SQ_TYPES_IN_FLAGS (SQ_CIPUT)
586 589
587 590 #define SQ_CI (SQ_CIPUT|SQ_CISVC|SQ_CIOC|SQ_CICB)
588 591 #define SQ_CO (SQ_COPUT|SQ_COSVC|SQ_COOC|SQ_COCB)
589 592 #define SQ_TYPEMASK (SQ_CI|SQ_CO)
590 593
591 594 /*
592 595 * Flag combinations passed to entersq and leavesq to specify the type
593 596 * of entry point.
594 597 */
595 598 #define SQ_PUT (SQ_CIPUT|SQ_COPUT)
596 599 #define SQ_SVC (SQ_CISVC|SQ_COSVC)
597 600 #define SQ_OPENCLOSE (SQ_CIOC|SQ_COOC)
598 601 #define SQ_CALLBACK (SQ_CICB|SQ_COCB)
599 602
600 603 /*
601 604 * Other syncq types which are not copied into flags.
602 605 */
603 606 #define SQ_PERMOD 0x01 /* Syncq is PERMOD */
604 607
605 608 /*
606 609 * Asynchronous callback qun*** flag.
607 610 * The mechanism these flags are used in is one where callbacks enter
608 611 * the perimeter thanks to framework support. To use this mechanism
609 612 * the q* and qun* flavors of the callback routines must be used.
610 613 * e.g. qtimeout and quntimeout. The synchronization provided by the flags
611 614 * avoids deadlocks between blocking qun* routines and the perimeter
612 615 * lock.
613 616 */
614 617 #define SQ_CALLB_BYPASSED 0x01 /* bypassed callback fn */
615 618
616 619 /*
617 620 * Cancel callback mask.
618 621 * The mask expands as the number of cancelable callback types grows
619 622 * Note - separate callback flag because different callbacks have
620 623 * overlapping id space.
621 624 */
622 625 #define SQ_CALLB_CANCEL_MASK (SQ_CANCEL_TOUT|SQ_CANCEL_BUFCALL)
623 626
624 627 #define SQ_CANCEL_TOUT 0x02 /* cancel timeout request */
625 628 #define SQ_CANCEL_BUFCALL 0x04 /* cancel bufcall request */
626 629
627 630 typedef struct callbparams {
628 631 syncq_t *cbp_sq;
629 632 void (*cbp_func)(void *);
630 633 void *cbp_arg;
631 634 callbparams_id_t cbp_id;
632 635 uint_t cbp_flags;
633 636 struct callbparams *cbp_next;
634 637 size_t cbp_size;
635 638 } callbparams_t;
636 639
637 640 typedef struct strbufcall {
638 641 void (*bc_func)(void *);
639 642 void *bc_arg;
640 643 size_t bc_size;
641 644 bufcall_id_t bc_id;
642 645 struct strbufcall *bc_next;
643 646 kthread_id_t bc_executor;
644 647 } strbufcall_t;
645 648
646 649 /*
647 650 * Structure of list of processes to be sent SIGPOLL/SIGURG signal
648 651 * on request. The valid S_* events are defined in stropts.h.
649 652 */
650 653 typedef struct strsig {
651 654 struct pid *ss_pidp; /* pid/pgrp pointer */
652 655 pid_t ss_pid; /* positive pid, negative pgrp */
653 656 int ss_events; /* S_* events */
654 657 struct strsig *ss_next;
655 658 } strsig_t;
656 659
657 660 /*
658 661 * bufcall list
659 662 */
660 663 struct bclist {
661 664 strbufcall_t *bc_head;
662 665 strbufcall_t *bc_tail;
663 666 };
664 667
665 668 /*
666 669 * Structure used to track mux links and unlinks.
667 670 */
668 671 struct mux_node {
669 672 major_t mn_imaj; /* internal major device number */
670 673 uint16_t mn_indegree; /* number of incoming edges */
671 674 struct mux_node *mn_originp; /* where we came from during search */
672 675 struct mux_edge *mn_startp; /* where search left off in mn_outp */
673 676 struct mux_edge *mn_outp; /* list of outgoing edges */
674 677 uint_t mn_flags; /* see below */
675 678 };
676 679
677 680 /*
678 681 * Flags for mux_nodes.
679 682 */
680 683 #define VISITED 1
681 684
682 685 /*
683 686 * Edge structure - a list of these is hung off the
684 687 * mux_node to represent the outgoing edges.
685 688 */
686 689 struct mux_edge {
687 690 struct mux_node *me_nodep; /* edge leads to this node */
688 691 struct mux_edge *me_nextp; /* next edge */
689 692 int me_muxid; /* id of link */
690 693 dev_t me_dev; /* dev_t - used for kernel PUNLINK */
691 694 };
692 695
693 696 /*
694 697 * Queue info
695 698 *
696 699 * The syncq is included here to reduce memory fragmentation
697 700 * for kernel memory allocators that only allocate in sizes that are
698 701 * powers of two. If the kernel memory allocator changes this should
699 702 * be revisited.
700 703 */
701 704 typedef struct queinfo {
702 705 struct queue qu_rqueue; /* read queue - must be first */
703 706 struct queue qu_wqueue; /* write queue - must be second */
704 707 struct syncq qu_syncq; /* syncq - must be third */
705 708 } queinfo_t;
706 709
707 710 /*
708 711 * Multiplexed streams info
709 712 */
710 713 typedef struct linkinfo {
711 714 struct linkblk li_lblk; /* must be first */
712 715 struct file *li_fpdown; /* file pointer for lower stream */
713 716 struct linkinfo *li_next; /* next in list */
714 717 struct linkinfo *li_prev; /* previous in list */
715 718 } linkinfo_t;
716 719
717 720 /*
718 721 * List of syncq's used by freeezestr/unfreezestr
719 722 */
720 723 typedef struct syncql {
721 724 struct syncql *sql_next;
722 725 syncq_t *sql_sq;
723 726 } syncql_t;
724 727
725 728 typedef struct sqlist {
726 729 syncql_t *sqlist_head;
727 730 size_t sqlist_size; /* structure size in bytes */
728 731 size_t sqlist_index; /* next free entry in array */
729 732 syncql_t sqlist_array[4]; /* 4 or more entries */
730 733 } sqlist_t;
731 734
732 735 typedef struct perdm {
733 736 struct perdm *dm_next;
734 737 syncq_t *dm_sq;
735 738 struct streamtab *dm_str;
736 739 uint_t dm_ref;
737 740 } perdm_t;
738 741
739 742 #define NEED_DM(dmp, qflag) \
740 743 (dmp == NULL && (qflag & (QPERMOD | QMTOUTPERIM)))
741 744
742 745 /*
743 746 * fmodsw_impl_t is used within the kernel. fmodsw is used by
744 747 * the modules/drivers. The information is copied from fmodsw
745 748 * defined in the module/driver into the fmodsw_impl_t structure
746 749 * during the module/driver initialization.
747 750 */
748 751 typedef struct fmodsw_impl fmodsw_impl_t;
749 752
750 753 struct fmodsw_impl {
751 754 fmodsw_impl_t *f_next;
752 755 char f_name[FMNAMESZ + 1];
753 756 struct streamtab *f_str;
754 757 uint32_t f_qflag;
755 758 uint32_t f_sqtype;
756 759 perdm_t *f_dmp;
757 760 uint32_t f_ref;
758 761 uint32_t f_hits;
759 762 };
760 763
761 764 typedef enum {
762 765 FMODSW_HOLD = 0x00000001,
763 766 FMODSW_LOAD = 0x00000002
764 767 } fmodsw_flags_t;
765 768
766 769 typedef struct cdevsw_impl {
767 770 struct streamtab *d_str;
768 771 uint32_t d_qflag;
769 772 uint32_t d_sqtype;
770 773 perdm_t *d_dmp;
771 774 } cdevsw_impl_t;
772 775
773 776 /*
774 777 * Enumeration of the types of access that can be requested for a
775 778 * controlling terminal under job control.
776 779 */
777 780 enum jcaccess {
778 781 JCREAD, /* read data on a ctty */
779 782 JCWRITE, /* write data to a ctty */
780 783 JCSETP, /* set ctty parameters */
781 784 JCGETP /* get ctty parameters */
782 785 };
783 786
784 787 struct str_stack {
785 788 netstack_t *ss_netstack; /* Common netstack */
786 789
787 790 kmutex_t ss_sad_lock; /* autopush lock */
788 791 mod_hash_t *ss_sad_hash;
789 792 size_t ss_sad_hash_nchains;
790 793 struct saddev *ss_saddev; /* sad device array */
791 794 int ss_sadcnt; /* number of sad devices */
792 795
793 796 int ss_devcnt; /* number of mux_nodes */
794 797 struct mux_node *ss_mux_nodes; /* mux info for cycle checking */
795 798 };
796 799 typedef struct str_stack str_stack_t;
797 800
798 801 /*
799 802 * Finding related queues
800 803 */
801 804 #define STREAM(q) ((q)->q_stream)
802 805 #define SQ(rq) ((syncq_t *)((rq) + 2))
803 806
804 807 /*
805 808 * Get the module/driver name for a queue. Since some queues don't have
806 809 * q_info structures (e.g., see log_makeq()), fall back to "?".
807 810 */
808 811 #define Q2NAME(q) \
809 812 (((q)->q_qinfo != NULL && (q)->q_qinfo->qi_minfo->mi_idname != NULL) ? \
810 813 (q)->q_qinfo->qi_minfo->mi_idname : "?")
811 814
812 815 /*
813 816 * Locking macros
814 817 */
815 818 #define QLOCK(q) (&(q)->q_lock)
816 819 #define SQLOCK(sq) (&(sq)->sq_lock)
817 820
818 821 #define STREAM_PUTLOCKS_ENTER(stp) { \
819 822 ASSERT(MUTEX_HELD(&(stp)->sd_lock)); \
820 823 if ((stp)->sd_ciputctrl != NULL) { \
821 824 int i; \
822 825 int nlocks = (stp)->sd_nciputctrl; \
823 826 ciputctrl_t *cip = (stp)->sd_ciputctrl; \
824 827 for (i = 0; i <= nlocks; i++) { \
825 828 mutex_enter(&cip[i].ciputctrl_lock); \
826 829 } \
827 830 } \
828 831 }
829 832
830 833 #define STREAM_PUTLOCKS_EXIT(stp) { \
831 834 ASSERT(MUTEX_HELD(&(stp)->sd_lock)); \
832 835 if ((stp)->sd_ciputctrl != NULL) { \
833 836 int i; \
834 837 int nlocks = (stp)->sd_nciputctrl; \
835 838 ciputctrl_t *cip = (stp)->sd_ciputctrl; \
836 839 for (i = 0; i <= nlocks; i++) { \
837 840 mutex_exit(&cip[i].ciputctrl_lock); \
838 841 } \
839 842 } \
840 843 }
841 844
842 845 #define SQ_PUTLOCKS_ENTER(sq) { \
843 846 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
844 847 if ((sq)->sq_ciputctrl != NULL) { \
845 848 int i; \
846 849 int nlocks = (sq)->sq_nciputctrl; \
847 850 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
848 851 ASSERT((sq)->sq_type & SQ_CIPUT); \
849 852 for (i = 0; i <= nlocks; i++) { \
850 853 mutex_enter(&cip[i].ciputctrl_lock); \
851 854 } \
852 855 } \
853 856 }
854 857
855 858 #define SQ_PUTLOCKS_EXIT(sq) { \
856 859 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
857 860 if ((sq)->sq_ciputctrl != NULL) { \
858 861 int i; \
859 862 int nlocks = (sq)->sq_nciputctrl; \
860 863 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
861 864 ASSERT((sq)->sq_type & SQ_CIPUT); \
862 865 for (i = 0; i <= nlocks; i++) { \
863 866 mutex_exit(&cip[i].ciputctrl_lock); \
864 867 } \
865 868 } \
866 869 }
867 870
868 871 #define SQ_PUTCOUNT_SETFAST(sq) { \
869 872 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
870 873 if ((sq)->sq_ciputctrl != NULL) { \
871 874 int i; \
872 875 int nlocks = (sq)->sq_nciputctrl; \
873 876 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
874 877 ASSERT((sq)->sq_type & SQ_CIPUT); \
875 878 for (i = 0; i <= nlocks; i++) { \
876 879 mutex_enter(&cip[i].ciputctrl_lock); \
877 880 cip[i].ciputctrl_count |= SQ_FASTPUT; \
878 881 mutex_exit(&cip[i].ciputctrl_lock); \
879 882 } \
880 883 } \
881 884 }
882 885
883 886 #define SQ_PUTCOUNT_CLRFAST(sq) { \
884 887 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
885 888 if ((sq)->sq_ciputctrl != NULL) { \
886 889 int i; \
887 890 int nlocks = (sq)->sq_nciputctrl; \
888 891 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
889 892 ASSERT((sq)->sq_type & SQ_CIPUT); \
890 893 for (i = 0; i <= nlocks; i++) { \
891 894 mutex_enter(&cip[i].ciputctrl_lock); \
892 895 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \
893 896 mutex_exit(&cip[i].ciputctrl_lock); \
894 897 } \
895 898 } \
896 899 }
897 900
898 901
899 902 #ifdef DEBUG
900 903
901 904 #define SQ_PUTLOCKS_HELD(sq) { \
902 905 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
903 906 if ((sq)->sq_ciputctrl != NULL) { \
904 907 int i; \
905 908 int nlocks = (sq)->sq_nciputctrl; \
906 909 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
907 910 ASSERT((sq)->sq_type & SQ_CIPUT); \
908 911 for (i = 0; i <= nlocks; i++) { \
909 912 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
910 913 } \
911 914 } \
912 915 }
913 916
914 917 #define SUMCHECK_SQ_PUTCOUNTS(sq, countcheck) { \
915 918 if ((sq)->sq_ciputctrl != NULL) { \
916 919 int i; \
917 920 uint_t count = 0; \
918 921 int ncounts = (sq)->sq_nciputctrl; \
919 922 ASSERT((sq)->sq_type & SQ_CIPUT); \
920 923 for (i = 0; i <= ncounts; i++) { \
921 924 count += \
922 925 (((sq)->sq_ciputctrl[i].ciputctrl_count) & \
923 926 SQ_FASTMASK); \
924 927 } \
925 928 ASSERT(count == (countcheck)); \
926 929 } \
927 930 }
928 931
929 932 #define SUMCHECK_CIPUTCTRL_COUNTS(ciput, nciput, countcheck) { \
930 933 int i; \
931 934 uint_t count = 0; \
932 935 ASSERT((ciput) != NULL); \
933 936 for (i = 0; i <= (nciput); i++) { \
934 937 count += (((ciput)[i].ciputctrl_count) & \
935 938 SQ_FASTMASK); \
936 939 } \
937 940 ASSERT(count == (countcheck)); \
938 941 }
939 942
940 943 #else /* DEBUG */
941 944
942 945 #define SQ_PUTLOCKS_HELD(sq)
943 946 #define SUMCHECK_SQ_PUTCOUNTS(sq, countcheck)
944 947 #define SUMCHECK_CIPUTCTRL_COUNTS(sq, nciput, countcheck)
945 948
946 949 #endif /* DEBUG */
947 950
948 951 #define SUM_SQ_PUTCOUNTS(sq, count) { \
949 952 if ((sq)->sq_ciputctrl != NULL) { \
950 953 int i; \
951 954 int ncounts = (sq)->sq_nciputctrl; \
952 955 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
953 956 ASSERT((sq)->sq_type & SQ_CIPUT); \
954 957 for (i = 0; i <= ncounts; i++) { \
955 958 (count) += ((cip[i].ciputctrl_count) & \
956 959 SQ_FASTMASK); \
957 960 } \
958 961 } \
959 962 }
960 963
961 964 #define CLAIM_QNEXT_LOCK(stp) mutex_enter(&(stp)->sd_lock)
962 965 #define RELEASE_QNEXT_LOCK(stp) mutex_exit(&(stp)->sd_lock)
963 966
964 967 /*
965 968 * syncq message manipulation macros.
966 969 */
967 970 /*
968 971 * Put a message on the queue syncq.
969 972 * Assumes QLOCK held.
970 973 */
971 974 #define SQPUT_MP(qp, mp) \
972 975 { \
973 976 qp->q_syncqmsgs++; \
974 977 if (qp->q_sqhead == NULL) { \
975 978 qp->q_sqhead = qp->q_sqtail = mp; \
976 979 } else { \
977 980 qp->q_sqtail->b_next = mp; \
978 981 qp->q_sqtail = mp; \
979 982 } \
980 983 set_qfull(qp); \
981 984 }
982 985
983 986 /*
984 987 * Miscellaneous parameters and flags.
985 988 */
986 989
987 990 /*
988 991 * Default timeout in milliseconds for ioctls and close
989 992 */
990 993 #define STRTIMOUT 15000
991 994
992 995 /*
993 996 * Flag values for stream io
994 997 */
995 998 #define WRITEWAIT 0x1 /* waiting for write event */
996 999 #define READWAIT 0x2 /* waiting for read event */
997 1000 #define NOINTR 0x4 /* error is not to be set for signal */
998 1001 #define GETWAIT 0x8 /* waiting for getmsg event */
999 1002
1000 1003 /*
1001 1004 * These flags need to be unique for stream io name space
1002 1005 * and copy modes name space. These flags allow strwaitq
1003 1006 * and strdoioctl to proceed as if signals or errors on the stream
1004 1007 * head have not occurred; i.e. they will be detected by some other
1005 1008 * means.
1006 1009 * STR_NOSIG does not allow signals to interrupt the call
1007 1010 * STR_NOERROR does not allow stream head read, write or hup errors to
1008 1011 * affect the call. When used with strdoioctl(), if a previous ioctl
1009 1012 * is pending and times out, STR_NOERROR will cause strdoioctl() to not
1010 1013 * return ETIME. If, however, the requested ioctl times out, ETIME
1011 1014 * will be returned (use ic_timout instead)
1012 1015 * STR_PEEK is used to inform strwaitq that the reader is peeking at data
1013 1016 * and that a non-persistent error should not be cleared.
1014 1017 * STR_DELAYERR is used to inform strwaitq that it should not check errors
1015 1018 * after being awoken since, in addition to an error, there might also be
1016 1019 * data queued on the stream head read queue.
1017 1020 */
1018 1021 #define STR_NOSIG 0x10 /* Ignore signals during strdoioctl/strwaitq */
1019 1022 #define STR_NOERROR 0x20 /* Ignore errors during strdoioctl/strwaitq */
1020 1023 #define STR_PEEK 0x40 /* Peeking behavior on non-persistent errors */
1021 1024 #define STR_DELAYERR 0x80 /* Do not check errors on return */
1022 1025
1023 1026 /*
1024 1027 * Copy modes for tty and I_STR ioctls
1025 1028 */
1026 1029 #define U_TO_K 01 /* User to Kernel */
1027 1030 #define K_TO_K 02 /* Kernel to Kernel */
1028 1031
1029 1032 /*
1030 1033 * Mux defines.
1031 1034 */
1032 1035 #define LINKNORMAL 0x01 /* normal mux link */
1033 1036 #define LINKPERSIST 0x02 /* persistent mux link */
1034 1037 #define LINKTYPEMASK 0x03 /* bitmask of all link types */
1035 1038 #define LINKCLOSE 0x04 /* unlink from strclose */
1036 1039
1037 1040 /*
1038 1041 * Definitions of Streams macros and function interfaces.
1039 1042 */
1040 1043
1041 1044 /*
1042 1045 * Obsolete queue scheduling macros. They are not used anymore, but still kept
1043 1046 * here for 3-d party modules and drivers who might still use them.
1044 1047 */
1045 1048 #define setqsched()
1046 1049 #define qready() 1
1047 1050
1048 1051 #ifdef _KERNEL
1049 1052 #define runqueues()
1050 1053 #define queuerun()
1051 1054 #endif
1052 1055
1053 1056 /* compatibility module for style 2 drivers with DR race condition */
1054 1057 #define DRMODNAME "drcompat"
1055 1058
1056 1059 /*
1057 1060 * Macros dealing with mux_nodes.
1058 1061 */
1059 1062 #define MUX_VISIT(X) ((X)->mn_flags |= VISITED)
1060 1063 #define MUX_CLEAR(X) ((X)->mn_flags &= (~VISITED)); \
1061 1064 ((X)->mn_originp = NULL)
1062 1065 #define MUX_DIDVISIT(X) ((X)->mn_flags & VISITED)
1063 1066
1064 1067
1065 1068 /*
1066 1069 * Twisted stream macros
1067 1070 */
1068 1071 #define STRMATED(X) ((X)->sd_flag & STRMATE)
1069 1072 #define STRLOCKMATES(X) if (&((X)->sd_lock) > &(((X)->sd_mate)->sd_lock)) { \
1070 1073 mutex_enter(&((X)->sd_lock)); \
1071 1074 mutex_enter(&(((X)->sd_mate)->sd_lock)); \
1072 1075 } else { \
1073 1076 mutex_enter(&(((X)->sd_mate)->sd_lock)); \
1074 1077 mutex_enter(&((X)->sd_lock)); \
1075 1078 }
1076 1079 #define STRUNLOCKMATES(X) mutex_exit(&((X)->sd_lock)); \
1077 1080 mutex_exit(&(((X)->sd_mate)->sd_lock))
1078 1081
1079 1082 #ifdef _KERNEL
1080 1083
1081 1084 extern void strinit(void);
1082 1085 extern int strdoioctl(struct stdata *, struct strioctl *, int, int,
1083 1086 cred_t *, int *);
1084 1087 extern void strsendsig(struct strsig *, int, uchar_t, int);
1085 1088 extern void str_sendsig(vnode_t *, int, uchar_t, int);
1086 1089 extern void strhup(struct stdata *);
1087 1090 extern int qattach(queue_t *, dev_t *, int, cred_t *, fmodsw_impl_t *,
1088 1091 boolean_t);
1089 1092 extern int qreopen(queue_t *, dev_t *, int, cred_t *);
1090 1093 extern void qdetach(queue_t *, int, int, cred_t *, boolean_t);
1091 1094 extern void enterq(queue_t *);
1092 1095 extern void leaveq(queue_t *);
1093 1096 extern int putiocd(mblk_t *, caddr_t, int, cred_t *);
1094 1097 extern int getiocd(mblk_t *, caddr_t, int);
1095 1098 extern struct linkinfo *alloclink(queue_t *, queue_t *, struct file *);
1096 1099 extern void lbfree(struct linkinfo *);
1097 1100 extern int linkcycle(stdata_t *, stdata_t *, str_stack_t *);
1098 1101 extern struct linkinfo *findlinks(stdata_t *, int, int, str_stack_t *);
1099 1102 extern queue_t *getendq(queue_t *);
1100 1103 extern int mlink(vnode_t *, int, int, cred_t *, int *, int);
1101 1104 extern int mlink_file(vnode_t *, int, struct file *, cred_t *, int *, int);
1102 1105 extern int munlink(struct stdata *, struct linkinfo *, int, cred_t *, int *,
1103 1106 str_stack_t *);
1104 1107 extern int munlinkall(struct stdata *, int, cred_t *, int *, str_stack_t *);
1105 1108 extern void mux_addedge(stdata_t *, stdata_t *, int, str_stack_t *);
1106 1109 extern void mux_rmvedge(stdata_t *, int, str_stack_t *);
1107 1110 extern int devflg_to_qflag(struct streamtab *, uint32_t, uint32_t *,
1108 1111 uint32_t *);
1109 1112 extern void setq(queue_t *, struct qinit *, struct qinit *, perdm_t *,
1110 1113 uint32_t, uint32_t, boolean_t);
1111 1114 extern perdm_t *hold_dm(struct streamtab *, uint32_t, uint32_t);
1112 1115 extern void rele_dm(perdm_t *);
1113 1116 extern int strmakectl(struct strbuf *, int32_t, int32_t, mblk_t **);
1114 1117 extern int strmakedata(ssize_t *, struct uio *, stdata_t *, int32_t, mblk_t **);
1115 1118 extern int strmakemsg(struct strbuf *, ssize_t *, struct uio *,
1116 1119 struct stdata *, int32_t, mblk_t **);
1117 1120 extern int strgetmsg(vnode_t *, struct strbuf *, struct strbuf *, uchar_t *,
1118 1121 int *, int, rval_t *);
1119 1122 extern int strputmsg(vnode_t *, struct strbuf *, struct strbuf *, uchar_t,
1120 1123 int flag, int fmode);
1121 1124 extern int strstartplumb(struct stdata *, int, int);
1122 1125 extern void strendplumb(struct stdata *);
1123 1126 extern int stropen(struct vnode *, dev_t *, int, cred_t *);
1124 1127 extern int strclose(struct vnode *, int, cred_t *);
1125 1128 extern int strpoll(register struct stdata *, short, int, short *,
1126 1129 struct pollhead **);
1127 1130 extern void strclean(struct vnode *);
1128 1131 extern void str_cn_clean(); /* XXX hook for consoles signal cleanup */
1129 1132 extern int strwrite(struct vnode *, struct uio *, cred_t *);
1130 1133 extern int strwrite_common(struct vnode *, struct uio *, cred_t *, int);
1131 1134 extern int strread(struct vnode *, struct uio *, cred_t *);
1132 1135 extern int strioctl(struct vnode *, int, intptr_t, int, int, cred_t *, int *);
↓ open down ↓ |
879 lines elided |
↑ open up ↑ |
1133 1136 extern int strrput(queue_t *, mblk_t *);
1134 1137 extern int strrput_nondata(queue_t *, mblk_t *);
1135 1138 extern mblk_t *strrput_proto(vnode_t *, mblk_t *,
1136 1139 strwakeup_t *, strsigset_t *, strsigset_t *, strpollset_t *);
1137 1140 extern mblk_t *strrput_misc(vnode_t *, mblk_t *,
1138 1141 strwakeup_t *, strsigset_t *, strsigset_t *, strpollset_t *);
1139 1142 extern int getiocseqno(void);
1140 1143 extern int strwaitbuf(size_t, int);
1141 1144 extern int strwaitq(stdata_t *, int, ssize_t, int, clock_t, int *);
1142 1145 extern struct stdata *shalloc(queue_t *);
1146 +extern void sh_insert_pid(struct stdata *, proc_t *);
1147 +extern void sh_remove_pid(struct stdata *, proc_t *);
1148 +extern conn_pid_node_list_hdr_t *sh_get_pid_list(struct stdata *);
1143 1149 extern void shfree(struct stdata *s);
1144 1150 extern queue_t *allocq(void);
1145 1151 extern void freeq(queue_t *);
1146 1152 extern qband_t *allocband(void);
1147 1153 extern void freeband(qband_t *);
1148 1154 extern void freebs_enqueue(mblk_t *, dblk_t *);
1149 1155 extern void setqback(queue_t *, unsigned char);
1150 1156 extern int strcopyin(void *, void *, size_t, int);
1151 1157 extern int strcopyout(void *, void *, size_t, int);
1152 1158 extern void strsignal(struct stdata *, int, int32_t);
1153 1159 extern clock_t str_cv_wait(kcondvar_t *, kmutex_t *, clock_t, int);
1154 1160 extern void disable_svc(queue_t *);
1155 1161 extern void enable_svc(queue_t *);
1156 1162 extern void remove_runlist(queue_t *);
1157 1163 extern void wait_svc(queue_t *);
1158 1164 extern void backenable(queue_t *, uchar_t);
1159 1165 extern void set_qend(queue_t *);
1160 1166 extern int strgeterr(stdata_t *, int32_t, int);
1161 1167 extern void qenable_locked(queue_t *);
1162 1168 extern mblk_t *getq_noenab(queue_t *, ssize_t);
1163 1169 extern void rmvq_noenab(queue_t *, mblk_t *);
1164 1170 extern void qbackenable(queue_t *, uchar_t);
1165 1171 extern void set_qfull(queue_t *);
1166 1172
1167 1173 extern void strblock(queue_t *);
1168 1174 extern void strunblock(queue_t *);
1169 1175 extern int qclaimed(queue_t *);
1170 1176 extern int straccess(struct stdata *, enum jcaccess);
1171 1177
1172 1178 extern void entersq(syncq_t *, int);
1173 1179 extern void leavesq(syncq_t *, int);
1174 1180 extern void claimq(queue_t *);
1175 1181 extern void releaseq(queue_t *);
1176 1182 extern void claimstr(queue_t *);
1177 1183 extern void releasestr(queue_t *);
1178 1184 extern void removeq(queue_t *);
1179 1185 extern void insertq(struct stdata *, queue_t *);
1180 1186 extern void drain_syncq(syncq_t *);
1181 1187 extern void qfill_syncq(syncq_t *, queue_t *, mblk_t *);
1182 1188 extern void qdrain_syncq(syncq_t *, queue_t *);
1183 1189 extern int flush_syncq(syncq_t *, queue_t *);
1184 1190 extern void wait_sq_svc(syncq_t *);
1185 1191
1186 1192 extern void outer_enter(syncq_t *, uint16_t);
1187 1193 extern void outer_exit(syncq_t *);
1188 1194 extern void qwriter_inner(queue_t *, mblk_t *, void (*)());
1189 1195 extern void qwriter_outer(queue_t *, mblk_t *, void (*)());
1190 1196
1191 1197 extern callbparams_t *callbparams_alloc(syncq_t *, void (*)(void *),
1192 1198 void *, int);
1193 1199 extern void callbparams_free(syncq_t *, callbparams_t *);
1194 1200 extern void callbparams_free_id(syncq_t *, callbparams_id_t, int32_t);
1195 1201 extern void qcallbwrapper(void *);
1196 1202
1197 1203 extern mblk_t *esballoc_wait(unsigned char *, size_t, uint_t, frtn_t *);
1198 1204 extern mblk_t *esballoca(unsigned char *, size_t, uint_t, frtn_t *);
1199 1205 extern mblk_t *desballoca(unsigned char *, size_t, uint_t, frtn_t *);
1200 1206 extern int do_sendfp(struct stdata *, struct file *, struct cred *);
1201 1207 extern int frozenstr(queue_t *);
1202 1208 extern size_t xmsgsize(mblk_t *);
1203 1209
1204 1210 extern void putnext_tail(syncq_t *, queue_t *, uint32_t);
1205 1211 extern void stream_willservice(stdata_t *);
1206 1212 extern void stream_runservice(stdata_t *);
1207 1213
1208 1214 extern void strmate(vnode_t *, vnode_t *);
1209 1215 extern queue_t *strvp2wq(vnode_t *);
1210 1216 extern vnode_t *strq2vp(queue_t *);
1211 1217 extern mblk_t *allocb_wait(size_t, uint_t, uint_t, int *);
1212 1218 extern mblk_t *allocb_cred(size_t, cred_t *, pid_t);
1213 1219 extern mblk_t *allocb_cred_wait(size_t, uint_t, int *, cred_t *, pid_t);
1214 1220 extern mblk_t *allocb_tmpl(size_t, const mblk_t *);
1215 1221 extern mblk_t *allocb_tryhard(size_t);
1216 1222 extern void mblk_copycred(mblk_t *, const mblk_t *);
1217 1223 extern void mblk_setcred(mblk_t *, cred_t *, pid_t);
1218 1224 extern cred_t *msg_getcred(const mblk_t *, pid_t *);
1219 1225 extern struct ts_label_s *msg_getlabel(const mblk_t *);
1220 1226 extern cred_t *msg_extractcred(mblk_t *, pid_t *);
1221 1227 extern void strpollwakeup(vnode_t *, short);
1222 1228 extern int putnextctl_wait(queue_t *, int);
1223 1229
1224 1230 extern int kstrputmsg(struct vnode *, mblk_t *, struct uio *, ssize_t,
1225 1231 unsigned char, int, int);
1226 1232 extern int kstrgetmsg(struct vnode *, mblk_t **, struct uio *,
1227 1233 unsigned char *, int *, clock_t, rval_t *);
1228 1234
1229 1235 extern void strsetrerror(vnode_t *, int, int, errfunc_t);
1230 1236 extern void strsetwerror(vnode_t *, int, int, errfunc_t);
1231 1237 extern void strseteof(vnode_t *, int);
1232 1238 extern void strflushrq(vnode_t *, int);
1233 1239 extern void strsetrputhooks(vnode_t *, uint_t, msgfunc_t, msgfunc_t);
1234 1240 extern void strsetwputhooks(vnode_t *, uint_t, clock_t);
1235 1241 extern void strsetrwputdatahooks(vnode_t *, msgfunc_t, msgfunc_t);
1236 1242 extern int strwaitmark(vnode_t *);
1237 1243 extern void strsignal_nolock(stdata_t *, int, uchar_t);
1238 1244
1239 1245 struct multidata_s;
1240 1246 struct pdesc_s;
1241 1247 extern int hcksum_assoc(mblk_t *, struct multidata_s *, struct pdesc_s *,
1242 1248 uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, int);
1243 1249 extern void hcksum_retrieve(mblk_t *, struct multidata_s *, struct pdesc_s *,
1244 1250 uint32_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
1245 1251 extern void lso_info_set(mblk_t *, uint32_t, uint32_t);
1246 1252 extern void lso_info_cleanup(mblk_t *);
1247 1253 extern unsigned int bcksum(uchar_t *, int, unsigned int);
1248 1254 extern boolean_t is_vmloaned_mblk(mblk_t *, struct multidata_s *,
1249 1255 struct pdesc_s *);
1250 1256
1251 1257 extern int fmodsw_register(const char *, struct streamtab *, int);
1252 1258 extern int fmodsw_unregister(const char *);
1253 1259 extern fmodsw_impl_t *fmodsw_find(const char *, fmodsw_flags_t);
1254 1260 extern void fmodsw_rele(fmodsw_impl_t *);
1255 1261
1256 1262 extern void freemsgchain(mblk_t *);
1257 1263 extern mblk_t *copymsgchain(mblk_t *);
1258 1264
1259 1265 extern mblk_t *mcopyinuio(struct stdata *, uio_t *, ssize_t, ssize_t, int *);
1260 1266
1261 1267 /*
1262 1268 * shared or externally configured data structures
1263 1269 */
1264 1270 extern ssize_t strmsgsz; /* maximum stream message size */
1265 1271 extern ssize_t strctlsz; /* maximum size of ctl message */
1266 1272 extern int nstrpush; /* maximum number of pushes allowed */
1267 1273
1268 1274 /*
1269 1275 * Bufcalls related variables.
1270 1276 */
1271 1277 extern struct bclist strbcalls; /* List of bufcalls */
1272 1278 extern kmutex_t strbcall_lock; /* Protects the list of bufcalls */
1273 1279 extern kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */
1274 1280 extern kcondvar_t bcall_cv; /* wait of executing bufcall completes */
1275 1281
1276 1282 extern frtn_t frnop;
1277 1283
1278 1284 extern struct kmem_cache *ciputctrl_cache;
1279 1285 extern int n_ciputctrl;
1280 1286 extern int max_n_ciputctrl;
1281 1287 extern int min_n_ciputctrl;
1282 1288
1283 1289 extern cdevsw_impl_t *devimpl;
1284 1290
1285 1291 /*
1286 1292 * esballoc queue for throttling
1287 1293 */
1288 1294 typedef struct esb_queue {
1289 1295 kmutex_t eq_lock;
1290 1296 uint_t eq_len; /* number of queued messages */
1291 1297 mblk_t *eq_head; /* head of queue */
1292 1298 mblk_t *eq_tail; /* tail of queue */
1293 1299 uint_t eq_flags; /* esballoc queue flags */
1294 1300 } esb_queue_t;
1295 1301
1296 1302 /*
1297 1303 * esballoc flags for queue processing.
1298 1304 */
1299 1305 #define ESBQ_PROCESSING 0x01 /* queue is being processed */
1300 1306 #define ESBQ_TIMER 0x02 /* timer is active */
1301 1307
1302 1308 extern void esballoc_queue_init(void);
1303 1309
1304 1310 #endif /* _KERNEL */
1305 1311
1306 1312 /*
1307 1313 * Note: Use of these macros are restricted to kernel/unix and
1308 1314 * intended for the STREAMS framework.
1309 1315 * All modules/drivers should include sys/ddi.h.
1310 1316 *
1311 1317 * Finding related queues
1312 1318 */
1313 1319 #define _OTHERQ(q) ((q)->q_flag&QREADR? (q)+1: (q)-1)
1314 1320 #define _WR(q) ((q)->q_flag&QREADR? (q)+1: (q))
1315 1321 #define _RD(q) ((q)->q_flag&QREADR? (q): (q)-1)
1316 1322 #define _SAMESTR(q) (!((q)->q_flag & QEND))
1317 1323
1318 1324 /*
1319 1325 * These are also declared here for modules/drivers that erroneously
1320 1326 * include strsubr.h after ddi.h or fail to include ddi.h at all.
1321 1327 */
1322 1328 extern struct queue *OTHERQ(queue_t *); /* stream.h */
1323 1329 extern struct queue *RD(queue_t *);
1324 1330 extern struct queue *WR(queue_t *);
1325 1331 extern int SAMESTR(queue_t *);
1326 1332
1327 1333 /*
1328 1334 * The following hardware checksum related macros are private
1329 1335 * interfaces that are subject to change without notice.
1330 1336 */
1331 1337 #ifdef _KERNEL
1332 1338 #define DB_CKSUMSTART(mp) ((mp)->b_datap->db_cksumstart)
1333 1339 #define DB_CKSUMEND(mp) ((mp)->b_datap->db_cksumend)
1334 1340 #define DB_CKSUMSTUFF(mp) ((mp)->b_datap->db_cksumstuff)
1335 1341 #define DB_CKSUMFLAGS(mp) ((mp)->b_datap->db_struioun.cksum.flags)
1336 1342 #define DB_CKSUM16(mp) ((mp)->b_datap->db_cksum16)
1337 1343 #define DB_CKSUM32(mp) ((mp)->b_datap->db_cksum32)
1338 1344 #define DB_LSOFLAGS(mp) ((mp)->b_datap->db_struioun.cksum.flags)
1339 1345 #define DB_LSOMSS(mp) ((mp)->b_datap->db_struioun.cksum.pad)
1340 1346 #endif /* _KERNEL */
1341 1347
1342 1348 #ifdef __cplusplus
1343 1349 }
1344 1350 #endif
1345 1351
1346 1352
1347 1353 #endif /* _SYS_STRSUBR_H */
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX