Print this page
XXXX adding PID information to netstat output
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/strsubr.c
+++ new/usr/src/uts/common/os/strsubr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 22 /* All Rights Reserved */
23 23
24 24
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/param.h>
33 33 #include <sys/errno.h>
34 34 #include <sys/signal.h>
35 35 #include <sys/proc.h>
36 36 #include <sys/conf.h>
37 37 #include <sys/cred.h>
38 38 #include <sys/user.h>
39 39 #include <sys/vnode.h>
40 40 #include <sys/file.h>
41 41 #include <sys/session.h>
42 42 #include <sys/stream.h>
43 43 #include <sys/strsubr.h>
44 44 #include <sys/stropts.h>
45 45 #include <sys/poll.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/cpuvar.h>
48 48 #include <sys/uio.h>
49 49 #include <sys/cmn_err.h>
50 50 #include <sys/priocntl.h>
51 51 #include <sys/procset.h>
52 52 #include <sys/vmem.h>
53 53 #include <sys/bitmap.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/siginfo.h>
56 56 #include <sys/vtrace.h>
57 57 #include <sys/callb.h>
58 58 #include <sys/debug.h>
59 59 #include <sys/modctl.h>
60 60 #include <sys/vmsystm.h>
61 61 #include <vm/page.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/suntpi.h>
64 64 #include <sys/strlog.h>
65 65 #include <sys/promif.h>
66 66 #include <sys/project.h>
67 67 #include <sys/vm.h>
68 68 #include <sys/taskq.h>
69 69 #include <sys/sunddi.h>
70 70 #include <sys/sunldi_impl.h>
71 71 #include <sys/strsun.h>
72 72 #include <sys/isa_defs.h>
73 73 #include <sys/multidata.h>
74 74 #include <sys/pattr.h>
75 75 #include <sys/strft.h>
76 76 #include <sys/fs/snode.h>
77 77 #include <sys/zone.h>
78 78 #include <sys/open.h>
79 79 #include <sys/sunldi.h>
80 80 #include <sys/sad.h>
81 81 #include <sys/netstack.h>
82 82
83 83 #define O_SAMESTR(q) (((q)->q_next) && \
84 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR)))
85 85
86 86 /*
87 87 * WARNING:
88 88 * The variables and routines in this file are private, belonging
89 89 * to the STREAMS subsystem. These should not be used by modules
90 90 * or drivers. Compatibility will not be guaranteed.
91 91 */
92 92
93 93 /*
94 94 * Id value used to distinguish between different multiplexor links.
95 95 */
96 96 static int32_t lnk_id = 0;
97 97
98 98 #define STREAMS_LOPRI MINCLSYSPRI
99 99 static pri_t streams_lopri = STREAMS_LOPRI;
100 100
101 101 #define STRSTAT(x) (str_statistics.x.value.ui64++)
102 102 typedef struct str_stat {
103 103 kstat_named_t sqenables;
104 104 kstat_named_t stenables;
105 105 kstat_named_t syncqservice;
106 106 kstat_named_t freebs;
107 107 kstat_named_t qwr_outer;
108 108 kstat_named_t rservice;
109 109 kstat_named_t strwaits;
110 110 kstat_named_t taskqfails;
111 111 kstat_named_t bufcalls;
112 112 kstat_named_t qhelps;
113 113 kstat_named_t qremoved;
114 114 kstat_named_t sqremoved;
115 115 kstat_named_t bcwaits;
116 116 kstat_named_t sqtoomany;
117 117 } str_stat_t;
118 118
119 119 static str_stat_t str_statistics = {
120 120 { "sqenables", KSTAT_DATA_UINT64 },
121 121 { "stenables", KSTAT_DATA_UINT64 },
122 122 { "syncqservice", KSTAT_DATA_UINT64 },
123 123 { "freebs", KSTAT_DATA_UINT64 },
124 124 { "qwr_outer", KSTAT_DATA_UINT64 },
125 125 { "rservice", KSTAT_DATA_UINT64 },
126 126 { "strwaits", KSTAT_DATA_UINT64 },
127 127 { "taskqfails", KSTAT_DATA_UINT64 },
128 128 { "bufcalls", KSTAT_DATA_UINT64 },
129 129 { "qhelps", KSTAT_DATA_UINT64 },
130 130 { "qremoved", KSTAT_DATA_UINT64 },
131 131 { "sqremoved", KSTAT_DATA_UINT64 },
132 132 { "bcwaits", KSTAT_DATA_UINT64 },
133 133 { "sqtoomany", KSTAT_DATA_UINT64 },
134 134 };
135 135
136 136 static kstat_t *str_kstat;
137 137
138 138 /*
139 139 * qrunflag was used previously to control background scheduling of queues. It
140 140 * is not used anymore, but kept here in case some module still wants to access
141 141 * it via qready() and setqsched macros.
142 142 */
143 143 char qrunflag; /* Unused */
144 144
145 145 /*
146 146 * Most of the streams scheduling is done via task queues. Task queues may fail
147 147 * for non-sleep dispatches, so there are two backup threads servicing failed
148 148 * requests for queues and syncqs. Both of these threads also service failed
149 149 * dispatches freebs requests. Queues are put in the list specified by `qhead'
150 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
151 151 * requests are put into `freebs_list' which has no tail pointer. All three
152 152 * lists are protected by a single `service_queue' lock and use
153 153 * `services_to_run' condition variable for signaling background threads. Use of
154 154 * a single lock should not be a problem because it is only used under heavy
155 155 * loads when task queues start to fail and at that time it may be a good idea
156 156 * to throttle scheduling requests.
157 157 *
158 158 * NOTE: queues and syncqs should be scheduled by two separate threads because
159 159 * queue servicing may be blocked waiting for a syncq which may be also
160 160 * scheduled for background execution. This may create a deadlock when only one
161 161 * thread is used for both.
162 162 */
163 163
164 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */
165 165
166 166 static kmutex_t service_queue; /* protects all of servicing vars */
167 167 static kcondvar_t services_to_run; /* wake up background service thread */
168 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */
169 169
170 170 /*
171 171 * List of queues scheduled for background processing due to lack of resources
172 172 * in the task queues. Protected by service_queue lock;
173 173 */
174 174 static struct queue *qhead;
175 175 static struct queue *qtail;
176 176
177 177 /*
178 178 * Same list for syncqs
179 179 */
180 180 static syncq_t *sqhead;
181 181 static syncq_t *sqtail;
182 182
183 183 static mblk_t *freebs_list; /* list of buffers to free */
184 184
185 185 /*
186 186 * Backup threads for servicing queues and syncqs
187 187 */
188 188 kthread_t *streams_qbkgrnd_thread;
189 189 kthread_t *streams_sqbkgrnd_thread;
190 190
191 191 /*
192 192 * Bufcalls related variables.
193 193 */
194 194 struct bclist strbcalls; /* list of waiting bufcalls */
195 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */
196 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */
197 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */
198 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */
199 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */
200 200
201 201 kmutex_t strresources; /* protects global resources */
202 202 kmutex_t muxifier; /* single-threads multiplexor creation */
203 203
204 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns);
205 205 static void str_stack_shutdown(netstackid_t stackid, void *arg);
206 206 static void str_stack_fini(netstackid_t stackid, void *arg);
207 207
208 208 /*
209 209 * run_queues is no longer used, but is kept in case some 3rd party
210 210 * module/driver decides to use it.
211 211 */
212 212 int run_queues = 0;
213 213
214 214 /*
215 215 * sq_max_size is the depth of the syncq (in number of messages) before
216 216 * qfill_syncq() starts QFULL'ing destination queues. As its primary
217 217 * consumer - IP is no longer D_MTPERMOD, but there may be other
218 218 * modules/drivers depend on this syncq flow control, we prefer to
219 219 * choose a large number as the default value. For potential
220 220 * performance gain, this value is tunable in /etc/system.
221 221 */
222 222 int sq_max_size = 10000;
223 223
224 224 /*
225 225 * The number of ciputctrl structures per syncq and stream we create when
226 226 * needed.
227 227 */
228 228 int n_ciputctrl;
229 229 int max_n_ciputctrl = 16;
230 230 /*
231 231 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
232 232 */
233 233 int min_n_ciputctrl = 2;
234 234
235 235 /*
236 236 * Per-driver/module syncqs
237 237 * ========================
238 238 *
239 239 * For drivers/modules that use PERMOD or outer syncqs we keep a list of
240 240 * perdm structures, new entries being added (and new syncqs allocated) when
241 241 * setq() encounters a module/driver with a streamtab that it hasn't seen
242 242 * before.
243 243 * The reason for this mechanism is that some modules and drivers share a
244 244 * common streamtab and it is necessary for those modules and drivers to also
245 245 * share a common PERMOD syncq.
246 246 *
247 247 * perdm_list --> dm_str == streamtab_1
248 248 * dm_sq == syncq_1
249 249 * dm_ref
250 250 * dm_next --> dm_str == streamtab_2
251 251 * dm_sq == syncq_2
252 252 * dm_ref
253 253 * dm_next --> ... NULL
254 254 *
255 255 * The dm_ref field is incremented for each new driver/module that takes
256 256 * a reference to the perdm structure and hence shares the syncq.
257 257 * References are held in the fmodsw_impl_t structure for each STREAMS module
258 258 * or the dev_impl array (indexed by device major number) for each driver.
259 259 *
260 260 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
261 261 * ^ ^ ^ ^
262 262 * | ______________/ | |
263 263 * | / | |
264 264 * dev_impl: ...|x|y|... module A module B
265 265 *
266 266 * When a module/driver is unloaded the reference count is decremented and,
267 267 * when it falls to zero, the perdm structure is removed from the list and
268 268 * the syncq is freed (see rele_dm()).
269 269 */
270 270 perdm_t *perdm_list = NULL;
271 271 static krwlock_t perdm_rwlock;
272 272 cdevsw_impl_t *devimpl;
273 273
274 274 extern struct qinit strdata;
275 275 extern struct qinit stwdata;
276 276
277 277 static void runservice(queue_t *);
278 278 static void streams_bufcall_service(void);
279 279 static void streams_qbkgrnd_service(void);
280 280 static void streams_sqbkgrnd_service(void);
281 281 static syncq_t *new_syncq(void);
282 282 static void free_syncq(syncq_t *);
283 283 static void outer_insert(syncq_t *, syncq_t *);
284 284 static void outer_remove(syncq_t *, syncq_t *);
285 285 static void write_now(syncq_t *);
286 286 static void clr_qfull(queue_t *);
287 287 static void runbufcalls(void);
288 288 static void sqenable(syncq_t *);
289 289 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)());
290 290 static void wait_q_syncq(queue_t *);
291 291 static void backenable_insertedq(queue_t *);
292 292
293 293 static void queue_service(queue_t *);
294 294 static void stream_service(stdata_t *);
295 295 static void syncq_service(syncq_t *);
296 296 static void qwriter_outer_service(syncq_t *);
297 297 static void mblk_free(mblk_t *);
298 298 #ifdef DEBUG
299 299 static int qprocsareon(queue_t *);
300 300 #endif
301 301
302 302 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *);
303 303 static void reset_nfsrv_ptr(queue_t *, queue_t *);
304 304 void set_qfull(queue_t *);
305 305
306 306 static void sq_run_events(syncq_t *);
307 307 static int propagate_syncq(queue_t *);
308 308
309 309 static void blocksq(syncq_t *, ushort_t, int);
310 310 static void unblocksq(syncq_t *, ushort_t, int);
311 311 static int dropsq(syncq_t *, uint16_t);
312 312 static void emptysq(syncq_t *);
313 313 static sqlist_t *sqlist_alloc(struct stdata *, int);
314 314 static void sqlist_free(sqlist_t *);
315 315 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t);
316 316 static void sqlist_insert(sqlist_t *, syncq_t *);
317 317 static void sqlist_insertall(sqlist_t *, queue_t *);
318 318
319 319 static void strsetuio(stdata_t *);
320 320
321 321 struct kmem_cache *stream_head_cache;
322 322 struct kmem_cache *queue_cache;
323 323 struct kmem_cache *syncq_cache;
324 324 struct kmem_cache *qband_cache;
325 325 struct kmem_cache *linkinfo_cache;
326 326 struct kmem_cache *ciputctrl_cache = NULL;
327 327
328 328 static linkinfo_t *linkinfo_list;
329 329
330 330 /* Global esballoc throttling queue */
331 331 static esb_queue_t system_esbq;
332 332
333 333 /* Array of esballoc throttling queues, of length esbq_nelem */
334 334 static esb_queue_t *volatile system_esbq_array;
335 335 static int esbq_nelem;
336 336 static kmutex_t esbq_lock;
337 337 static int esbq_log2_cpus_per_q = 0;
338 338
339 339 /* Scale the system_esbq length by setting number of CPUs per queue. */
340 340 uint_t esbq_cpus_per_q = 1;
341 341
342 342 /*
343 343 * esballoc tunable parameters.
344 344 */
345 345 int esbq_max_qlen = 0x16; /* throttled queue length */
346 346 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */
347 347
348 348 /*
349 349 * Routines to handle esballoc queueing.
350 350 */
351 351 static void esballoc_process_queue(esb_queue_t *);
352 352 static void esballoc_enqueue_mblk(mblk_t *);
353 353 static void esballoc_timer(void *);
354 354 static void esballoc_set_timer(esb_queue_t *, clock_t);
355 355 static void esballoc_mblk_free(mblk_t *);
356 356
357 357 /*
358 358 * Qinit structure and Module_info structures
359 359 * for passthru read and write queues
360 360 */
361 361
362 362 static void pass_wput(queue_t *, mblk_t *);
363 363 static queue_t *link_addpassthru(stdata_t *);
364 364 static void link_rempassthru(queue_t *);
365 365
366 366 struct module_info passthru_info = {
367 367 0,
368 368 "passthru",
369 369 0,
370 370 INFPSZ,
371 371 STRHIGH,
372 372 STRLOW
373 373 };
374 374
375 375 struct qinit passthru_rinit = {
376 376 (int (*)())putnext,
377 377 NULL,
378 378 NULL,
379 379 NULL,
380 380 NULL,
381 381 &passthru_info,
382 382 NULL
383 383 };
384 384
385 385 struct qinit passthru_winit = {
386 386 (int (*)()) pass_wput,
387 387 NULL,
388 388 NULL,
389 389 NULL,
390 390 NULL,
391 391 &passthru_info,
392 392 NULL
393 393 };
394 394
395 395 /*
396 396 * Verify correctness of list head/tail pointers.
397 397 */
398 398 #define LISTCHECK(head, tail, link) { \
399 399 EQUIV(head, tail); \
400 400 IMPLY(tail != NULL, tail->link == NULL); \
401 401 }
402 402
403 403 /*
404 404 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
405 405 * using a `link' field.
406 406 */
407 407 #define ENQUEUE(el, head, tail, link) { \
408 408 ASSERT(el->link == NULL); \
409 409 LISTCHECK(head, tail, link); \
410 410 if (head == NULL) \
411 411 head = el; \
412 412 else \
413 413 tail->link = el; \
414 414 tail = el; \
415 415 }
416 416
417 417 /*
418 418 * Dequeue the first element of the list denoted by `head' and `tail' pointers
419 419 * using a `link' field and put result into `el'.
420 420 */
421 421 #define DQ(el, head, tail, link) { \
422 422 LISTCHECK(head, tail, link); \
423 423 el = head; \
424 424 if (head != NULL) { \
425 425 head = head->link; \
426 426 if (head == NULL) \
427 427 tail = NULL; \
428 428 el->link = NULL; \
429 429 } \
430 430 }
431 431
432 432 /*
433 433 * Remove `el' from the list using `chase' and `curr' pointers and return result
434 434 * in `succeed'.
435 435 */
436 436 #define RMQ(el, head, tail, link, chase, curr, succeed) { \
437 437 LISTCHECK(head, tail, link); \
438 438 chase = NULL; \
439 439 succeed = 0; \
440 440 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \
441 441 chase = curr; \
442 442 if (curr != NULL) { \
443 443 succeed = 1; \
444 444 ASSERT(curr == el); \
445 445 if (chase != NULL) \
446 446 chase->link = curr->link; \
447 447 else \
448 448 head = curr->link; \
449 449 curr->link = NULL; \
450 450 if (curr == tail) \
451 451 tail = chase; \
452 452 } \
453 453 LISTCHECK(head, tail, link); \
454 454 }
455 455
456 456 /* Handling of delayed messages on the inner syncq. */
457 457
458 458 /*
459 459 * DEBUG versions should use function versions (to simplify tracing) and
460 460 * non-DEBUG kernels should use macro versions.
461 461 */
462 462
463 463 /*
464 464 * Put a queue on the syncq list of queues.
465 465 * Assumes SQLOCK held.
466 466 */
467 467 #define SQPUT_Q(sq, qp) \
468 468 { \
469 469 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
470 470 if (!(qp->q_sqflags & Q_SQQUEUED)) { \
471 471 /* The queue should not be linked anywhere */ \
472 472 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \
473 473 /* Head and tail may only be NULL simultaneously */ \
474 474 EQUIV(sq->sq_head, sq->sq_tail); \
475 475 /* Queue may be only enqueued on its syncq */ \
476 476 ASSERT(sq == qp->q_syncq); \
477 477 /* Check the correctness of SQ_MESSAGES flag */ \
478 478 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \
479 479 /* Sanity check first/last elements of the list */ \
480 480 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\
481 481 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\
482 482 /* \
483 483 * Sanity check of priority field: empty queue should \
484 484 * have zero priority \
485 485 * and nqueues equal to zero. \
486 486 */ \
487 487 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \
488 488 /* Sanity check of sq_nqueues field */ \
489 489 EQUIV(sq->sq_head, sq->sq_nqueues); \
490 490 if (sq->sq_head == NULL) { \
491 491 sq->sq_head = sq->sq_tail = qp; \
492 492 sq->sq_flags |= SQ_MESSAGES; \
493 493 } else if (qp->q_spri == 0) { \
494 494 qp->q_sqprev = sq->sq_tail; \
495 495 sq->sq_tail->q_sqnext = qp; \
496 496 sq->sq_tail = qp; \
497 497 } else { \
498 498 /* \
499 499 * Put this queue in priority order: higher \
500 500 * priority gets closer to the head. \
501 501 */ \
502 502 queue_t **qpp = &sq->sq_tail; \
503 503 queue_t *qnext = NULL; \
504 504 \
505 505 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \
506 506 qnext = *qpp; \
507 507 qpp = &(*qpp)->q_sqprev; \
508 508 } \
509 509 qp->q_sqnext = qnext; \
510 510 qp->q_sqprev = *qpp; \
511 511 if (*qpp != NULL) { \
512 512 (*qpp)->q_sqnext = qp; \
513 513 } else { \
514 514 sq->sq_head = qp; \
515 515 sq->sq_pri = sq->sq_head->q_spri; \
516 516 } \
517 517 *qpp = qp; \
518 518 } \
519 519 qp->q_sqflags |= Q_SQQUEUED; \
520 520 qp->q_sqtstamp = ddi_get_lbolt(); \
521 521 sq->sq_nqueues++; \
522 522 } \
523 523 }
524 524
525 525 /*
526 526 * Remove a queue from the syncq list
527 527 * Assumes SQLOCK held.
528 528 */
529 529 #define SQRM_Q(sq, qp) \
530 530 { \
531 531 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
532 532 ASSERT(qp->q_sqflags & Q_SQQUEUED); \
533 533 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \
534 534 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \
535 535 /* Check that the queue is actually in the list */ \
536 536 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \
537 537 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \
538 538 ASSERT(sq->sq_nqueues != 0); \
539 539 if (qp->q_sqprev == NULL) { \
540 540 /* First queue on list, make head q_sqnext */ \
541 541 sq->sq_head = qp->q_sqnext; \
542 542 } else { \
543 543 /* Make prev->next == next */ \
544 544 qp->q_sqprev->q_sqnext = qp->q_sqnext; \
545 545 } \
546 546 if (qp->q_sqnext == NULL) { \
547 547 /* Last queue on list, make tail sqprev */ \
548 548 sq->sq_tail = qp->q_sqprev; \
549 549 } else { \
550 550 /* Make next->prev == prev */ \
551 551 qp->q_sqnext->q_sqprev = qp->q_sqprev; \
552 552 } \
553 553 /* clear out references on this queue */ \
554 554 qp->q_sqprev = qp->q_sqnext = NULL; \
555 555 qp->q_sqflags &= ~Q_SQQUEUED; \
556 556 /* If there is nothing queued, clear SQ_MESSAGES */ \
557 557 if (sq->sq_head != NULL) { \
558 558 sq->sq_pri = sq->sq_head->q_spri; \
559 559 } else { \
560 560 sq->sq_flags &= ~SQ_MESSAGES; \
561 561 sq->sq_pri = 0; \
562 562 } \
563 563 sq->sq_nqueues--; \
564 564 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \
565 565 (sq->sq_flags & SQ_QUEUED) == 0); \
566 566 }
567 567
568 568 /* Hide the definition from the header file. */
569 569 #ifdef SQPUT_MP
570 570 #undef SQPUT_MP
571 571 #endif
572 572
573 573 /*
574 574 * Put a message on the queue syncq.
575 575 * Assumes QLOCK held.
576 576 */
577 577 #define SQPUT_MP(qp, mp) \
578 578 { \
579 579 ASSERT(MUTEX_HELD(QLOCK(qp))); \
580 580 ASSERT(qp->q_sqhead == NULL || \
581 581 (qp->q_sqtail != NULL && \
582 582 qp->q_sqtail->b_next == NULL)); \
583 583 qp->q_syncqmsgs++; \
584 584 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \
585 585 if (qp->q_sqhead == NULL) { \
586 586 qp->q_sqhead = qp->q_sqtail = mp; \
587 587 } else { \
588 588 qp->q_sqtail->b_next = mp; \
589 589 qp->q_sqtail = mp; \
590 590 } \
591 591 ASSERT(qp->q_syncqmsgs > 0); \
592 592 set_qfull(qp); \
593 593 }
594 594
595 595 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \
596 596 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
597 597 if ((sq)->sq_ciputctrl != NULL) { \
598 598 int i; \
599 599 int nlocks = (sq)->sq_nciputctrl; \
600 600 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
601 601 ASSERT((sq)->sq_type & SQ_CIPUT); \
602 602 for (i = 0; i <= nlocks; i++) { \
603 603 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
604 604 cip[i].ciputctrl_count |= SQ_FASTPUT; \
605 605 } \
606 606 } \
607 607 }
608 608
609 609
610 610 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \
611 611 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
612 612 if ((sq)->sq_ciputctrl != NULL) { \
613 613 int i; \
614 614 int nlocks = (sq)->sq_nciputctrl; \
615 615 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
616 616 ASSERT((sq)->sq_type & SQ_CIPUT); \
617 617 for (i = 0; i <= nlocks; i++) { \
618 618 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
619 619 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \
620 620 } \
621 621 } \
622 622 }
623 623
624 624 /*
625 625 * Run service procedures for all queues in the stream head.
626 626 */
627 627 #define STR_SERVICE(stp, q) { \
628 628 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \
629 629 while (stp->sd_qhead != NULL) { \
630 630 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \
631 631 ASSERT(stp->sd_nqueues > 0); \
632 632 stp->sd_nqueues--; \
633 633 ASSERT(!(q->q_flag & QINSERVICE)); \
634 634 mutex_exit(&stp->sd_qlock); \
635 635 queue_service(q); \
636 636 mutex_enter(&stp->sd_qlock); \
637 637 } \
638 638 ASSERT(stp->sd_nqueues == 0); \
639 639 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \
640 640 }
641 641
642 642 /*
643 643 * Constructor/destructor routines for the stream head cache
↓ open down ↓ |
643 lines elided |
↑ open up ↑ |
644 644 */
645 645 /* ARGSUSED */
646 646 static int
647 647 stream_head_constructor(void *buf, void *cdrarg, int kmflags)
648 648 {
649 649 stdata_t *stp = buf;
650 650
651 651 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
652 652 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL);
653 653 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL);
654 + mutex_init(&stp->sd_pid_list_lock, NULL, MUTEX_DEFAULT, NULL);
654 655 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL);
655 656 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL);
656 657 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL);
657 658 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL);
658 659 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL);
660 + list_create(&stp->sd_pid_list, sizeof (pid_node_t),
661 + offsetof(pid_node_t, pn_ref_link));
659 662 stp->sd_wrq = NULL;
660 663
661 664 return (0);
662 665 }
663 666
664 667 /* ARGSUSED */
665 668 static void
666 669 stream_head_destructor(void *buf, void *cdrarg)
667 670 {
668 671 stdata_t *stp = buf;
669 672
670 673 mutex_destroy(&stp->sd_lock);
671 674 mutex_destroy(&stp->sd_reflock);
672 675 mutex_destroy(&stp->sd_qlock);
676 + mutex_destroy(&stp->sd_pid_list_lock);
673 677 cv_destroy(&stp->sd_monitor);
674 678 cv_destroy(&stp->sd_iocmonitor);
675 679 cv_destroy(&stp->sd_refmonitor);
676 680 cv_destroy(&stp->sd_qcv);
677 681 cv_destroy(&stp->sd_zcopy_wait);
682 + list_destroy(&stp->sd_pid_list);
678 683 }
679 684
680 685 /*
681 686 * Constructor/destructor routines for the queue cache
682 687 */
683 688 /* ARGSUSED */
684 689 static int
685 690 queue_constructor(void *buf, void *cdrarg, int kmflags)
686 691 {
687 692 queinfo_t *qip = buf;
688 693 queue_t *qp = &qip->qu_rqueue;
689 694 queue_t *wqp = &qip->qu_wqueue;
690 695 syncq_t *sq = &qip->qu_syncq;
691 696
692 697 qp->q_first = NULL;
693 698 qp->q_link = NULL;
694 699 qp->q_count = 0;
695 700 qp->q_mblkcnt = 0;
696 701 qp->q_sqhead = NULL;
697 702 qp->q_sqtail = NULL;
698 703 qp->q_sqnext = NULL;
699 704 qp->q_sqprev = NULL;
700 705 qp->q_sqflags = 0;
701 706 qp->q_rwcnt = 0;
702 707 qp->q_spri = 0;
703 708
704 709 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL);
705 710 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL);
706 711
707 712 wqp->q_first = NULL;
708 713 wqp->q_link = NULL;
709 714 wqp->q_count = 0;
710 715 wqp->q_mblkcnt = 0;
711 716 wqp->q_sqhead = NULL;
712 717 wqp->q_sqtail = NULL;
713 718 wqp->q_sqnext = NULL;
714 719 wqp->q_sqprev = NULL;
715 720 wqp->q_sqflags = 0;
716 721 wqp->q_rwcnt = 0;
717 722 wqp->q_spri = 0;
718 723
719 724 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL);
720 725 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL);
721 726
722 727 sq->sq_head = NULL;
723 728 sq->sq_tail = NULL;
724 729 sq->sq_evhead = NULL;
725 730 sq->sq_evtail = NULL;
726 731 sq->sq_callbpend = NULL;
727 732 sq->sq_outer = NULL;
728 733 sq->sq_onext = NULL;
729 734 sq->sq_oprev = NULL;
730 735 sq->sq_next = NULL;
731 736 sq->sq_svcflags = 0;
732 737 sq->sq_servcount = 0;
733 738 sq->sq_needexcl = 0;
734 739 sq->sq_nqueues = 0;
735 740 sq->sq_pri = 0;
736 741
737 742 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
738 743 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
739 744 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
740 745
741 746 return (0);
742 747 }
743 748
744 749 /* ARGSUSED */
745 750 static void
746 751 queue_destructor(void *buf, void *cdrarg)
747 752 {
748 753 queinfo_t *qip = buf;
749 754 queue_t *qp = &qip->qu_rqueue;
750 755 queue_t *wqp = &qip->qu_wqueue;
751 756 syncq_t *sq = &qip->qu_syncq;
752 757
753 758 ASSERT(qp->q_sqhead == NULL);
754 759 ASSERT(wqp->q_sqhead == NULL);
755 760 ASSERT(qp->q_sqnext == NULL);
756 761 ASSERT(wqp->q_sqnext == NULL);
757 762 ASSERT(qp->q_rwcnt == 0);
758 763 ASSERT(wqp->q_rwcnt == 0);
759 764
760 765 mutex_destroy(&qp->q_lock);
761 766 cv_destroy(&qp->q_wait);
762 767
763 768 mutex_destroy(&wqp->q_lock);
764 769 cv_destroy(&wqp->q_wait);
765 770
766 771 mutex_destroy(&sq->sq_lock);
767 772 cv_destroy(&sq->sq_wait);
768 773 cv_destroy(&sq->sq_exitwait);
769 774 }
770 775
771 776 /*
772 777 * Constructor/destructor routines for the syncq cache
773 778 */
774 779 /* ARGSUSED */
775 780 static int
776 781 syncq_constructor(void *buf, void *cdrarg, int kmflags)
777 782 {
778 783 syncq_t *sq = buf;
779 784
780 785 bzero(buf, sizeof (syncq_t));
781 786
782 787 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
783 788 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
784 789 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
785 790
786 791 return (0);
787 792 }
788 793
789 794 /* ARGSUSED */
790 795 static void
791 796 syncq_destructor(void *buf, void *cdrarg)
792 797 {
793 798 syncq_t *sq = buf;
794 799
795 800 ASSERT(sq->sq_head == NULL);
796 801 ASSERT(sq->sq_tail == NULL);
797 802 ASSERT(sq->sq_evhead == NULL);
798 803 ASSERT(sq->sq_evtail == NULL);
799 804 ASSERT(sq->sq_callbpend == NULL);
800 805 ASSERT(sq->sq_callbflags == 0);
801 806 ASSERT(sq->sq_outer == NULL);
802 807 ASSERT(sq->sq_onext == NULL);
803 808 ASSERT(sq->sq_oprev == NULL);
804 809 ASSERT(sq->sq_next == NULL);
805 810 ASSERT(sq->sq_needexcl == 0);
806 811 ASSERT(sq->sq_svcflags == 0);
807 812 ASSERT(sq->sq_servcount == 0);
808 813 ASSERT(sq->sq_nqueues == 0);
809 814 ASSERT(sq->sq_pri == 0);
810 815 ASSERT(sq->sq_count == 0);
811 816 ASSERT(sq->sq_rmqcount == 0);
812 817 ASSERT(sq->sq_cancelid == 0);
813 818 ASSERT(sq->sq_ciputctrl == NULL);
814 819 ASSERT(sq->sq_nciputctrl == 0);
815 820 ASSERT(sq->sq_type == 0);
816 821 ASSERT(sq->sq_flags == 0);
817 822
818 823 mutex_destroy(&sq->sq_lock);
819 824 cv_destroy(&sq->sq_wait);
820 825 cv_destroy(&sq->sq_exitwait);
821 826 }
822 827
823 828 /* ARGSUSED */
824 829 static int
825 830 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags)
826 831 {
827 832 ciputctrl_t *cip = buf;
828 833 int i;
829 834
830 835 for (i = 0; i < n_ciputctrl; i++) {
831 836 cip[i].ciputctrl_count = SQ_FASTPUT;
832 837 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL);
833 838 }
834 839
835 840 return (0);
836 841 }
837 842
838 843 /* ARGSUSED */
839 844 static void
840 845 ciputctrl_destructor(void *buf, void *cdrarg)
841 846 {
842 847 ciputctrl_t *cip = buf;
843 848 int i;
844 849
845 850 for (i = 0; i < n_ciputctrl; i++) {
846 851 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT);
847 852 mutex_destroy(&cip[i].ciputctrl_lock);
848 853 }
849 854 }
850 855
851 856 /*
852 857 * Init routine run from main at boot time.
853 858 */
854 859 void
855 860 strinit(void)
856 861 {
857 862 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
858 863
859 864 stream_head_cache = kmem_cache_create("stream_head_cache",
860 865 sizeof (stdata_t), 0,
861 866 stream_head_constructor, stream_head_destructor, NULL,
862 867 NULL, NULL, 0);
863 868
864 869 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0,
865 870 queue_constructor, queue_destructor, NULL, NULL, NULL, 0);
866 871
867 872 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0,
868 873 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0);
869 874
870 875 qband_cache = kmem_cache_create("qband_cache",
871 876 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
872 877
873 878 linkinfo_cache = kmem_cache_create("linkinfo_cache",
874 879 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
875 880
876 881 n_ciputctrl = ncpus;
877 882 n_ciputctrl = 1 << highbit(n_ciputctrl - 1);
878 883 ASSERT(n_ciputctrl >= 1);
879 884 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl);
880 885 if (n_ciputctrl >= min_n_ciputctrl) {
881 886 ciputctrl_cache = kmem_cache_create("ciputctrl_cache",
882 887 sizeof (ciputctrl_t) * n_ciputctrl,
883 888 sizeof (ciputctrl_t), ciputctrl_constructor,
884 889 ciputctrl_destructor, NULL, NULL, NULL, 0);
885 890 }
886 891
887 892 streams_taskq = system_taskq;
888 893
889 894 if (streams_taskq == NULL)
890 895 panic("strinit: no memory for streams taskq!");
891 896
892 897 bc_bkgrnd_thread = thread_create(NULL, 0,
893 898 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri);
894 899
895 900 streams_qbkgrnd_thread = thread_create(NULL, 0,
896 901 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
897 902
898 903 streams_sqbkgrnd_thread = thread_create(NULL, 0,
899 904 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
900 905
901 906 /*
902 907 * Create STREAMS kstats.
903 908 */
904 909 str_kstat = kstat_create("streams", 0, "strstat",
905 910 "net", KSTAT_TYPE_NAMED,
906 911 sizeof (str_statistics) / sizeof (kstat_named_t),
907 912 KSTAT_FLAG_VIRTUAL);
908 913
909 914 if (str_kstat != NULL) {
910 915 str_kstat->ks_data = &str_statistics;
911 916 kstat_install(str_kstat);
912 917 }
913 918
914 919 /*
915 920 * TPI support routine initialisation.
916 921 */
917 922 tpi_init();
918 923
919 924 /*
920 925 * Handle to have autopush and persistent link information per
921 926 * zone.
922 927 * Note: uses shutdown hook instead of destroy hook so that the
923 928 * persistent links can be torn down before the destroy hooks
924 929 * in the TCP/IP stack are called.
925 930 */
926 931 netstack_register(NS_STR, str_stack_init, str_stack_shutdown,
927 932 str_stack_fini);
928 933 }
929 934
930 935 void
931 936 str_sendsig(vnode_t *vp, int event, uchar_t band, int error)
932 937 {
933 938 struct stdata *stp;
934 939
935 940 ASSERT(vp->v_stream);
936 941 stp = vp->v_stream;
937 942 /* Have to hold sd_lock to prevent siglist from changing */
938 943 mutex_enter(&stp->sd_lock);
939 944 if (stp->sd_sigflags & event)
940 945 strsendsig(stp->sd_siglist, event, band, error);
941 946 mutex_exit(&stp->sd_lock);
942 947 }
943 948
944 949 /*
945 950 * Send the "sevent" set of signals to a process.
946 951 * This might send more than one signal if the process is registered
947 952 * for multiple events. The caller should pass in an sevent that only
948 953 * includes the events for which the process has registered.
949 954 */
950 955 static void
951 956 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info,
952 957 uchar_t band, int error)
953 958 {
954 959 ASSERT(MUTEX_HELD(&proc->p_lock));
955 960
956 961 info->si_band = 0;
957 962 info->si_errno = 0;
958 963
959 964 if (sevent & S_ERROR) {
960 965 sevent &= ~S_ERROR;
961 966 info->si_code = POLL_ERR;
962 967 info->si_errno = error;
963 968 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
964 969 "strsendsig:proc %p info %p", proc, info);
965 970 sigaddq(proc, NULL, info, KM_NOSLEEP);
966 971 info->si_errno = 0;
967 972 }
968 973 if (sevent & S_HANGUP) {
969 974 sevent &= ~S_HANGUP;
970 975 info->si_code = POLL_HUP;
971 976 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
972 977 "strsendsig:proc %p info %p", proc, info);
973 978 sigaddq(proc, NULL, info, KM_NOSLEEP);
974 979 }
975 980 if (sevent & S_HIPRI) {
976 981 sevent &= ~S_HIPRI;
977 982 info->si_code = POLL_PRI;
978 983 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
979 984 "strsendsig:proc %p info %p", proc, info);
980 985 sigaddq(proc, NULL, info, KM_NOSLEEP);
981 986 }
982 987 if (sevent & S_RDBAND) {
983 988 sevent &= ~S_RDBAND;
984 989 if (events & S_BANDURG)
985 990 sigtoproc(proc, NULL, SIGURG);
986 991 else
987 992 sigtoproc(proc, NULL, SIGPOLL);
988 993 }
989 994 if (sevent & S_WRBAND) {
990 995 sevent &= ~S_WRBAND;
991 996 sigtoproc(proc, NULL, SIGPOLL);
992 997 }
993 998 if (sevent & S_INPUT) {
994 999 sevent &= ~S_INPUT;
995 1000 info->si_code = POLL_IN;
996 1001 info->si_band = band;
997 1002 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
998 1003 "strsendsig:proc %p info %p", proc, info);
999 1004 sigaddq(proc, NULL, info, KM_NOSLEEP);
1000 1005 info->si_band = 0;
1001 1006 }
1002 1007 if (sevent & S_OUTPUT) {
1003 1008 sevent &= ~S_OUTPUT;
1004 1009 info->si_code = POLL_OUT;
1005 1010 info->si_band = band;
1006 1011 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1007 1012 "strsendsig:proc %p info %p", proc, info);
1008 1013 sigaddq(proc, NULL, info, KM_NOSLEEP);
1009 1014 info->si_band = 0;
1010 1015 }
1011 1016 if (sevent & S_MSG) {
1012 1017 sevent &= ~S_MSG;
1013 1018 info->si_code = POLL_MSG;
1014 1019 info->si_band = band;
1015 1020 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1016 1021 "strsendsig:proc %p info %p", proc, info);
1017 1022 sigaddq(proc, NULL, info, KM_NOSLEEP);
1018 1023 info->si_band = 0;
1019 1024 }
1020 1025 if (sevent & S_RDNORM) {
1021 1026 sevent &= ~S_RDNORM;
1022 1027 sigtoproc(proc, NULL, SIGPOLL);
1023 1028 }
1024 1029 if (sevent != 0) {
1025 1030 panic("strsendsig: unknown event(s) %x", sevent);
1026 1031 }
1027 1032 }
1028 1033
1029 1034 /*
1030 1035 * Send SIGPOLL/SIGURG signal to all processes and process groups
1031 1036 * registered on the given signal list that want a signal for at
1032 1037 * least one of the specified events.
1033 1038 *
1034 1039 * Must be called with exclusive access to siglist (caller holding sd_lock).
1035 1040 *
1036 1041 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding
1037 1042 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
1038 1043 * while it is in the siglist.
1039 1044 *
1040 1045 * For performance reasons (MP scalability) the code drops pidlock
1041 1046 * when sending signals to a single process.
1042 1047 * When sending to a process group the code holds
1043 1048 * pidlock to prevent the membership in the process group from changing
1044 1049 * while walking the p_pglink list.
1045 1050 */
1046 1051 void
1047 1052 strsendsig(strsig_t *siglist, int event, uchar_t band, int error)
1048 1053 {
1049 1054 strsig_t *ssp;
1050 1055 k_siginfo_t info;
1051 1056 struct pid *pidp;
1052 1057 proc_t *proc;
1053 1058
1054 1059 info.si_signo = SIGPOLL;
1055 1060 info.si_errno = 0;
1056 1061 for (ssp = siglist; ssp; ssp = ssp->ss_next) {
1057 1062 int sevent;
1058 1063
1059 1064 sevent = ssp->ss_events & event;
1060 1065 if (sevent == 0)
1061 1066 continue;
1062 1067
1063 1068 if ((pidp = ssp->ss_pidp) == NULL) {
1064 1069 /* pid was released but still on event list */
1065 1070 continue;
1066 1071 }
1067 1072
1068 1073
1069 1074 if (ssp->ss_pid > 0) {
1070 1075 /*
1071 1076 * XXX This unfortunately still generates
1072 1077 * a signal when a fd is closed but
1073 1078 * the proc is active.
1074 1079 */
1075 1080 ASSERT(ssp->ss_pid == pidp->pid_id);
1076 1081
1077 1082 mutex_enter(&pidlock);
1078 1083 proc = prfind_zone(pidp->pid_id, ALL_ZONES);
1079 1084 if (proc == NULL) {
1080 1085 mutex_exit(&pidlock);
1081 1086 continue;
1082 1087 }
1083 1088 mutex_enter(&proc->p_lock);
1084 1089 mutex_exit(&pidlock);
1085 1090 dosendsig(proc, ssp->ss_events, sevent, &info,
1086 1091 band, error);
1087 1092 mutex_exit(&proc->p_lock);
1088 1093 } else {
1089 1094 /*
1090 1095 * Send to process group. Hold pidlock across
1091 1096 * calls to dosendsig().
1092 1097 */
1093 1098 pid_t pgrp = -ssp->ss_pid;
1094 1099
1095 1100 mutex_enter(&pidlock);
1096 1101 proc = pgfind_zone(pgrp, ALL_ZONES);
1097 1102 while (proc != NULL) {
1098 1103 mutex_enter(&proc->p_lock);
1099 1104 dosendsig(proc, ssp->ss_events, sevent,
1100 1105 &info, band, error);
1101 1106 mutex_exit(&proc->p_lock);
1102 1107 proc = proc->p_pglink;
1103 1108 }
1104 1109 mutex_exit(&pidlock);
1105 1110 }
1106 1111 }
1107 1112 }
1108 1113
1109 1114 /*
1110 1115 * Attach a stream device or module.
1111 1116 * qp is a read queue; the new queue goes in so its next
1112 1117 * read ptr is the argument, and the write queue corresponding
1113 1118 * to the argument points to this queue. Return 0 on success,
1114 1119 * or a non-zero errno on failure.
1115 1120 */
1116 1121 int
1117 1122 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp,
1118 1123 boolean_t is_insert)
1119 1124 {
1120 1125 major_t major;
1121 1126 cdevsw_impl_t *dp;
1122 1127 struct streamtab *str;
1123 1128 queue_t *rq;
1124 1129 queue_t *wrq;
1125 1130 uint32_t qflag;
1126 1131 uint32_t sqtype;
1127 1132 perdm_t *dmp;
1128 1133 int error;
1129 1134 int sflag;
1130 1135
1131 1136 rq = allocq();
1132 1137 wrq = _WR(rq);
1133 1138 STREAM(rq) = STREAM(wrq) = STREAM(qp);
1134 1139
1135 1140 if (fp != NULL) {
1136 1141 str = fp->f_str;
1137 1142 qflag = fp->f_qflag;
1138 1143 sqtype = fp->f_sqtype;
1139 1144 dmp = fp->f_dmp;
1140 1145 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
1141 1146 sflag = MODOPEN;
1142 1147
1143 1148 /*
1144 1149 * stash away a pointer to the module structure so we can
1145 1150 * unref it in qdetach.
1146 1151 */
1147 1152 rq->q_fp = fp;
1148 1153 } else {
1149 1154 ASSERT(!is_insert);
1150 1155
1151 1156 major = getmajor(*devp);
1152 1157 dp = &devimpl[major];
1153 1158
1154 1159 str = dp->d_str;
1155 1160 ASSERT(str == STREAMSTAB(major));
1156 1161
1157 1162 qflag = dp->d_qflag;
1158 1163 ASSERT(qflag & QISDRV);
1159 1164 sqtype = dp->d_sqtype;
1160 1165
1161 1166 /* create perdm_t if needed */
1162 1167 if (NEED_DM(dp->d_dmp, qflag))
1163 1168 dp->d_dmp = hold_dm(str, qflag, sqtype);
1164 1169
1165 1170 dmp = dp->d_dmp;
1166 1171 sflag = 0;
1167 1172 }
1168 1173
1169 1174 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS,
1170 1175 "qattach:qflag == %X(%X)", qflag, *devp);
1171 1176
1172 1177 /* setq might sleep in allocator - avoid holding locks. */
1173 1178 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE);
1174 1179
1175 1180 /*
1176 1181 * Before calling the module's open routine, set up the q_next
1177 1182 * pointer for inserting a module in the middle of a stream.
1178 1183 *
1179 1184 * Note that we can always set _QINSERTING and set up q_next
1180 1185 * pointer for both inserting and pushing a module. Then there
1181 1186 * is no need for the is_insert parameter. In insertq(), called
1182 1187 * by qprocson(), assume that q_next of the new module always points
1183 1188 * to the correct queue and use it for insertion. Everything should
1184 1189 * work out fine. But in the first release of _I_INSERT, we
1185 1190 * distinguish between inserting and pushing to make sure that
1186 1191 * pushing a module follows the same code path as before.
1187 1192 */
1188 1193 if (is_insert) {
1189 1194 rq->q_flag |= _QINSERTING;
1190 1195 rq->q_next = qp;
1191 1196 }
1192 1197
1193 1198 /*
1194 1199 * If there is an outer perimeter get exclusive access during
1195 1200 * the open procedure. Bump up the reference count on the queue.
1196 1201 */
1197 1202 entersq(rq->q_syncq, SQ_OPENCLOSE);
1198 1203 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp);
1199 1204 if (error != 0)
1200 1205 goto failed;
1201 1206 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1202 1207 ASSERT(qprocsareon(rq));
1203 1208 return (0);
1204 1209
1205 1210 failed:
1206 1211 rq->q_flag &= ~_QINSERTING;
1207 1212 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq)
1208 1213 qprocsoff(rq);
1209 1214 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1210 1215 rq->q_next = wrq->q_next = NULL;
1211 1216 qdetach(rq, 0, 0, crp, B_FALSE);
1212 1217 return (error);
1213 1218 }
1214 1219
1215 1220 /*
1216 1221 * Handle second open of stream. For modules, set the
1217 1222 * last argument to MODOPEN and do not pass any open flags.
1218 1223 * Ignore dummydev since this is not the first open.
1219 1224 */
1220 1225 int
1221 1226 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp)
1222 1227 {
1223 1228 int error;
1224 1229 dev_t dummydev;
1225 1230 queue_t *wqp = _WR(qp);
1226 1231
1227 1232 ASSERT(qp->q_flag & QREADR);
1228 1233 entersq(qp->q_syncq, SQ_OPENCLOSE);
1229 1234
1230 1235 dummydev = *devp;
1231 1236 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev,
1232 1237 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) {
1233 1238 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1234 1239 mutex_enter(&STREAM(qp)->sd_lock);
1235 1240 qp->q_stream->sd_flag |= STREOPENFAIL;
1236 1241 mutex_exit(&STREAM(qp)->sd_lock);
1237 1242 return (error);
1238 1243 }
1239 1244 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1240 1245
1241 1246 /*
1242 1247 * successful open should have done qprocson()
1243 1248 */
1244 1249 ASSERT(qprocsareon(_RD(qp)));
1245 1250 return (0);
1246 1251 }
1247 1252
1248 1253 /*
1249 1254 * Detach a stream module or device.
1250 1255 * If clmode == 1 then the module or driver was opened and its
1251 1256 * close routine must be called. If clmode == 0, the module
1252 1257 * or driver was never opened or the open failed, and so its close
1253 1258 * should not be called.
1254 1259 */
1255 1260 void
1256 1261 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove)
1257 1262 {
1258 1263 queue_t *wqp = _WR(qp);
1259 1264 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB));
1260 1265
1261 1266 if (STREAM_NEEDSERVICE(STREAM(qp)))
1262 1267 stream_runservice(STREAM(qp));
1263 1268
1264 1269 if (clmode) {
1265 1270 /*
1266 1271 * Make sure that all the messages on the write side syncq are
1267 1272 * processed and nothing is left. Since we are closing, no new
1268 1273 * messages may appear there.
1269 1274 */
1270 1275 wait_q_syncq(wqp);
1271 1276
1272 1277 entersq(qp->q_syncq, SQ_OPENCLOSE);
1273 1278 if (is_remove) {
1274 1279 mutex_enter(QLOCK(qp));
1275 1280 qp->q_flag |= _QREMOVING;
1276 1281 mutex_exit(QLOCK(qp));
1277 1282 }
1278 1283 (*qp->q_qinfo->qi_qclose)(qp, flag, crp);
1279 1284 /*
1280 1285 * Check that qprocsoff() was actually called.
1281 1286 */
1282 1287 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE));
1283 1288
1284 1289 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1285 1290 } else {
1286 1291 disable_svc(qp);
1287 1292 }
1288 1293
1289 1294 /*
1290 1295 * Allow any threads blocked in entersq to proceed and discover
1291 1296 * the QWCLOSE is set.
1292 1297 * Note: This assumes that all users of entersq check QWCLOSE.
1293 1298 * Currently runservice is the only entersq that can happen
1294 1299 * after removeq has finished.
1295 1300 * Removeq will have discarded all messages destined to the closing
1296 1301 * pair of queues from the syncq.
1297 1302 * NOTE: Calling a function inside an assert is unconventional.
1298 1303 * However, it does not cause any problem since flush_syncq() does
1299 1304 * not change any state except when it returns non-zero i.e.
1300 1305 * when the assert will trigger.
1301 1306 */
1302 1307 ASSERT(flush_syncq(qp->q_syncq, qp) == 0);
1303 1308 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0);
1304 1309 ASSERT((qp->q_flag & QPERMOD) ||
1305 1310 ((qp->q_syncq->sq_head == NULL) &&
1306 1311 (wqp->q_syncq->sq_head == NULL)));
1307 1312
1308 1313 /* release any fmodsw_impl_t structure held on behalf of the queue */
1309 1314 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV);
1310 1315 if (qp->q_fp != NULL)
1311 1316 fmodsw_rele(qp->q_fp);
1312 1317
1313 1318 /* freeq removes us from the outer perimeter if any */
1314 1319 freeq(qp);
1315 1320 }
1316 1321
1317 1322 /* Prevent service procedures from being called */
1318 1323 void
1319 1324 disable_svc(queue_t *qp)
1320 1325 {
1321 1326 queue_t *wqp = _WR(qp);
1322 1327
1323 1328 ASSERT(qp->q_flag & QREADR);
1324 1329 mutex_enter(QLOCK(qp));
1325 1330 qp->q_flag |= QWCLOSE;
1326 1331 mutex_exit(QLOCK(qp));
1327 1332 mutex_enter(QLOCK(wqp));
1328 1333 wqp->q_flag |= QWCLOSE;
1329 1334 mutex_exit(QLOCK(wqp));
1330 1335 }
1331 1336
1332 1337 /* Allow service procedures to be called again */
1333 1338 void
1334 1339 enable_svc(queue_t *qp)
1335 1340 {
1336 1341 queue_t *wqp = _WR(qp);
1337 1342
1338 1343 ASSERT(qp->q_flag & QREADR);
1339 1344 mutex_enter(QLOCK(qp));
1340 1345 qp->q_flag &= ~QWCLOSE;
1341 1346 mutex_exit(QLOCK(qp));
1342 1347 mutex_enter(QLOCK(wqp));
1343 1348 wqp->q_flag &= ~QWCLOSE;
1344 1349 mutex_exit(QLOCK(wqp));
1345 1350 }
1346 1351
1347 1352 /*
1348 1353 * Remove queue from qhead/qtail if it is enabled.
1349 1354 * Only reset QENAB if the queue was removed from the runlist.
1350 1355 * A queue goes through 3 stages:
1351 1356 * It is on the service list and QENAB is set.
1352 1357 * It is removed from the service list but QENAB is still set.
1353 1358 * QENAB gets changed to QINSERVICE.
1354 1359 * QINSERVICE is reset (when the service procedure is done)
1355 1360 * Thus we can not reset QENAB unless we actually removed it from the service
1356 1361 * queue.
1357 1362 */
1358 1363 void
1359 1364 remove_runlist(queue_t *qp)
1360 1365 {
1361 1366 if (qp->q_flag & QENAB && qhead != NULL) {
1362 1367 queue_t *q_chase;
1363 1368 queue_t *q_curr;
1364 1369 int removed;
1365 1370
1366 1371 mutex_enter(&service_queue);
1367 1372 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed);
1368 1373 mutex_exit(&service_queue);
1369 1374 if (removed) {
1370 1375 STRSTAT(qremoved);
1371 1376 qp->q_flag &= ~QENAB;
1372 1377 }
1373 1378 }
1374 1379 }
1375 1380
1376 1381
1377 1382 /*
1378 1383 * Wait for any pending service processing to complete.
1379 1384 * The removal of queues from the runlist is not atomic with the
1380 1385 * clearing of the QENABLED flag and setting the INSERVICE flag.
1381 1386 * consequently it is possible for remove_runlist in strclose
1382 1387 * to not find the queue on the runlist but for it to be QENABLED
1383 1388 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED
1384 1389 * as well as INSERVICE.
1385 1390 */
1386 1391 void
1387 1392 wait_svc(queue_t *qp)
1388 1393 {
1389 1394 queue_t *wqp = _WR(qp);
1390 1395
1391 1396 ASSERT(qp->q_flag & QREADR);
1392 1397
1393 1398 /*
1394 1399 * Try to remove queues from qhead/qtail list.
1395 1400 */
1396 1401 if (qhead != NULL) {
1397 1402 remove_runlist(qp);
1398 1403 remove_runlist(wqp);
1399 1404 }
1400 1405 /*
1401 1406 * Wait till the syncqs associated with the queue disappear from the
1402 1407 * background processing list.
1403 1408 * This only needs to be done for non-PERMOD perimeters since
1404 1409 * for PERMOD perimeters the syncq may be shared and will only be freed
1405 1410 * when the last module/driver is unloaded.
1406 1411 * If for PERMOD perimeters queue was on the syncq list, removeq()
1407 1412 * should call propagate_syncq() or drain_syncq() for it. Both of these
1408 1413 * functions remove the queue from its syncq list, so sqthread will not
1409 1414 * try to access the queue.
1410 1415 */
1411 1416 if (!(qp->q_flag & QPERMOD)) {
1412 1417 syncq_t *rsq = qp->q_syncq;
1413 1418 syncq_t *wsq = wqp->q_syncq;
1414 1419
1415 1420 /*
1416 1421 * Disable rsq and wsq and wait for any background processing of
1417 1422 * syncq to complete.
1418 1423 */
1419 1424 wait_sq_svc(rsq);
1420 1425 if (wsq != rsq)
1421 1426 wait_sq_svc(wsq);
1422 1427 }
1423 1428
1424 1429 mutex_enter(QLOCK(qp));
1425 1430 while (qp->q_flag & (QINSERVICE|QENAB))
1426 1431 cv_wait(&qp->q_wait, QLOCK(qp));
1427 1432 mutex_exit(QLOCK(qp));
1428 1433 mutex_enter(QLOCK(wqp));
1429 1434 while (wqp->q_flag & (QINSERVICE|QENAB))
1430 1435 cv_wait(&wqp->q_wait, QLOCK(wqp));
1431 1436 mutex_exit(QLOCK(wqp));
1432 1437 }
1433 1438
1434 1439 /*
1435 1440 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
1436 1441 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
1437 1442 * also be set, and is passed through to allocb_cred_wait().
1438 1443 *
1439 1444 * Returns errno on failure, zero on success.
1440 1445 */
1441 1446 int
1442 1447 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr)
1443 1448 {
1444 1449 mblk_t *tmp;
1445 1450 ssize_t count;
1446 1451 int error = 0;
1447 1452
1448 1453 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K ||
1449 1454 (flag & (U_TO_K | K_TO_K)) == K_TO_K);
1450 1455
1451 1456 if (bp->b_datap->db_type == M_IOCTL) {
1452 1457 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1453 1458 } else {
1454 1459 ASSERT(bp->b_datap->db_type == M_COPYIN);
1455 1460 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1456 1461 }
1457 1462 /*
1458 1463 * strdoioctl validates ioc_count, so if this assert fails it
1459 1464 * cannot be due to user error.
1460 1465 */
1461 1466 ASSERT(count >= 0);
1462 1467
1463 1468 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr,
1464 1469 curproc->p_pid)) == NULL) {
1465 1470 return (error);
1466 1471 }
1467 1472 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K));
1468 1473 if (error != 0) {
1469 1474 freeb(tmp);
1470 1475 return (error);
1471 1476 }
1472 1477 DB_CPID(tmp) = curproc->p_pid;
1473 1478 tmp->b_wptr += count;
1474 1479 bp->b_cont = tmp;
1475 1480
1476 1481 return (0);
1477 1482 }
1478 1483
1479 1484 /*
1480 1485 * Copy ioctl data to user-land. Return non-zero errno on failure,
1481 1486 * 0 for success.
1482 1487 */
1483 1488 int
1484 1489 getiocd(mblk_t *bp, char *arg, int copymode)
1485 1490 {
1486 1491 ssize_t count;
1487 1492 size_t n;
1488 1493 int error;
1489 1494
1490 1495 if (bp->b_datap->db_type == M_IOCACK)
1491 1496 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1492 1497 else {
1493 1498 ASSERT(bp->b_datap->db_type == M_COPYOUT);
1494 1499 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1495 1500 }
1496 1501 ASSERT(count >= 0);
1497 1502
1498 1503 for (bp = bp->b_cont; bp && count;
1499 1504 count -= n, bp = bp->b_cont, arg += n) {
1500 1505 n = MIN(count, bp->b_wptr - bp->b_rptr);
1501 1506 error = strcopyout(bp->b_rptr, arg, n, copymode);
1502 1507 if (error)
1503 1508 return (error);
1504 1509 }
1505 1510 ASSERT(count == 0);
1506 1511 return (0);
1507 1512 }
1508 1513
1509 1514 /*
1510 1515 * Allocate a linkinfo entry given the write queue of the
1511 1516 * bottom module of the top stream and the write queue of the
1512 1517 * stream head of the bottom stream.
1513 1518 */
1514 1519 linkinfo_t *
1515 1520 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown)
1516 1521 {
1517 1522 linkinfo_t *linkp;
1518 1523
1519 1524 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP);
1520 1525
1521 1526 linkp->li_lblk.l_qtop = qup;
1522 1527 linkp->li_lblk.l_qbot = qdown;
1523 1528 linkp->li_fpdown = fpdown;
1524 1529
1525 1530 mutex_enter(&strresources);
1526 1531 linkp->li_next = linkinfo_list;
1527 1532 linkp->li_prev = NULL;
1528 1533 if (linkp->li_next)
1529 1534 linkp->li_next->li_prev = linkp;
1530 1535 linkinfo_list = linkp;
1531 1536 linkp->li_lblk.l_index = ++lnk_id;
1532 1537 ASSERT(lnk_id != 0); /* this should never wrap in practice */
1533 1538 mutex_exit(&strresources);
1534 1539
1535 1540 return (linkp);
1536 1541 }
1537 1542
1538 1543 /*
1539 1544 * Free a linkinfo entry.
1540 1545 */
1541 1546 void
1542 1547 lbfree(linkinfo_t *linkp)
1543 1548 {
1544 1549 mutex_enter(&strresources);
1545 1550 if (linkp->li_next)
1546 1551 linkp->li_next->li_prev = linkp->li_prev;
1547 1552 if (linkp->li_prev)
1548 1553 linkp->li_prev->li_next = linkp->li_next;
1549 1554 else
1550 1555 linkinfo_list = linkp->li_next;
1551 1556 mutex_exit(&strresources);
1552 1557
1553 1558 kmem_cache_free(linkinfo_cache, linkp);
1554 1559 }
1555 1560
1556 1561 /*
1557 1562 * Check for a potential linking cycle.
1558 1563 * Return 1 if a link will result in a cycle,
1559 1564 * and 0 otherwise.
1560 1565 */
1561 1566 int
1562 1567 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss)
1563 1568 {
1564 1569 struct mux_node *np;
1565 1570 struct mux_edge *ep;
1566 1571 int i;
1567 1572 major_t lomaj;
1568 1573 major_t upmaj;
1569 1574 /*
1570 1575 * if the lower stream is a pipe/FIFO, return, since link
1571 1576 * cycles can not happen on pipes/FIFOs
1572 1577 */
1573 1578 if (lostp->sd_vnode->v_type == VFIFO)
1574 1579 return (0);
1575 1580
1576 1581 for (i = 0; i < ss->ss_devcnt; i++) {
1577 1582 np = &ss->ss_mux_nodes[i];
1578 1583 MUX_CLEAR(np);
1579 1584 }
1580 1585 lomaj = getmajor(lostp->sd_vnode->v_rdev);
1581 1586 upmaj = getmajor(upstp->sd_vnode->v_rdev);
1582 1587 np = &ss->ss_mux_nodes[lomaj];
1583 1588 for (;;) {
1584 1589 if (!MUX_DIDVISIT(np)) {
1585 1590 if (np->mn_imaj == upmaj)
1586 1591 return (1);
1587 1592 if (np->mn_outp == NULL) {
1588 1593 MUX_VISIT(np);
1589 1594 if (np->mn_originp == NULL)
1590 1595 return (0);
1591 1596 np = np->mn_originp;
1592 1597 continue;
1593 1598 }
1594 1599 MUX_VISIT(np);
1595 1600 np->mn_startp = np->mn_outp;
1596 1601 } else {
1597 1602 if (np->mn_startp == NULL) {
1598 1603 if (np->mn_originp == NULL)
1599 1604 return (0);
1600 1605 else {
1601 1606 np = np->mn_originp;
1602 1607 continue;
1603 1608 }
1604 1609 }
1605 1610 /*
1606 1611 * If ep->me_nodep is a FIFO (me_nodep == NULL),
1607 1612 * ignore the edge and move on. ep->me_nodep gets
1608 1613 * set to NULL in mux_addedge() if it is a FIFO.
1609 1614 *
1610 1615 */
1611 1616 ep = np->mn_startp;
1612 1617 np->mn_startp = ep->me_nextp;
1613 1618 if (ep->me_nodep == NULL)
1614 1619 continue;
1615 1620 ep->me_nodep->mn_originp = np;
1616 1621 np = ep->me_nodep;
1617 1622 }
1618 1623 }
1619 1624 }
1620 1625
1621 1626 /*
1622 1627 * Find linkinfo entry corresponding to the parameters.
1623 1628 */
1624 1629 linkinfo_t *
1625 1630 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss)
1626 1631 {
1627 1632 linkinfo_t *linkp;
1628 1633 struct mux_edge *mep;
1629 1634 struct mux_node *mnp;
1630 1635 queue_t *qup;
1631 1636
1632 1637 mutex_enter(&strresources);
1633 1638 if ((type & LINKTYPEMASK) == LINKNORMAL) {
1634 1639 qup = getendq(stp->sd_wrq);
1635 1640 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1636 1641 if ((qup == linkp->li_lblk.l_qtop) &&
1637 1642 (!index || (index == linkp->li_lblk.l_index))) {
1638 1643 mutex_exit(&strresources);
1639 1644 return (linkp);
1640 1645 }
1641 1646 }
1642 1647 } else {
1643 1648 ASSERT((type & LINKTYPEMASK) == LINKPERSIST);
1644 1649 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)];
1645 1650 mep = mnp->mn_outp;
1646 1651 while (mep) {
1647 1652 if ((index == 0) || (index == mep->me_muxid))
1648 1653 break;
1649 1654 mep = mep->me_nextp;
1650 1655 }
1651 1656 if (!mep) {
1652 1657 mutex_exit(&strresources);
1653 1658 return (NULL);
1654 1659 }
1655 1660 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1656 1661 if ((!linkp->li_lblk.l_qtop) &&
1657 1662 (mep->me_muxid == linkp->li_lblk.l_index)) {
1658 1663 mutex_exit(&strresources);
1659 1664 return (linkp);
1660 1665 }
1661 1666 }
1662 1667 }
1663 1668 mutex_exit(&strresources);
1664 1669 return (NULL);
1665 1670 }
1666 1671
1667 1672 /*
1668 1673 * Given a queue ptr, follow the chain of q_next pointers until you reach the
1669 1674 * last queue on the chain and return it.
1670 1675 */
1671 1676 queue_t *
1672 1677 getendq(queue_t *q)
1673 1678 {
1674 1679 ASSERT(q != NULL);
1675 1680 while (_SAMESTR(q))
1676 1681 q = q->q_next;
1677 1682 return (q);
1678 1683 }
1679 1684
1680 1685 /*
1681 1686 * Wait for the syncq count to drop to zero.
1682 1687 * sq could be either outer or inner.
1683 1688 */
1684 1689
1685 1690 static void
1686 1691 wait_syncq(syncq_t *sq)
1687 1692 {
1688 1693 uint16_t count;
1689 1694
1690 1695 mutex_enter(SQLOCK(sq));
1691 1696 count = sq->sq_count;
1692 1697 SQ_PUTLOCKS_ENTER(sq);
1693 1698 SUM_SQ_PUTCOUNTS(sq, count);
1694 1699 while (count != 0) {
1695 1700 sq->sq_flags |= SQ_WANTWAKEUP;
1696 1701 SQ_PUTLOCKS_EXIT(sq);
1697 1702 cv_wait(&sq->sq_wait, SQLOCK(sq));
1698 1703 count = sq->sq_count;
1699 1704 SQ_PUTLOCKS_ENTER(sq);
1700 1705 SUM_SQ_PUTCOUNTS(sq, count);
1701 1706 }
1702 1707 SQ_PUTLOCKS_EXIT(sq);
1703 1708 mutex_exit(SQLOCK(sq));
1704 1709 }
1705 1710
1706 1711 /*
1707 1712 * Wait while there are any messages for the queue in its syncq.
1708 1713 */
1709 1714 static void
1710 1715 wait_q_syncq(queue_t *q)
1711 1716 {
1712 1717 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1713 1718 syncq_t *sq = q->q_syncq;
1714 1719
1715 1720 mutex_enter(SQLOCK(sq));
1716 1721 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1717 1722 sq->sq_flags |= SQ_WANTWAKEUP;
1718 1723 cv_wait(&sq->sq_wait, SQLOCK(sq));
1719 1724 }
1720 1725 mutex_exit(SQLOCK(sq));
1721 1726 }
1722 1727 }
1723 1728
1724 1729
1725 1730 int
1726 1731 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp,
1727 1732 int lhlink)
1728 1733 {
1729 1734 struct stdata *stp;
1730 1735 struct strioctl strioc;
1731 1736 struct linkinfo *linkp;
1732 1737 struct stdata *stpdown;
1733 1738 struct streamtab *str;
1734 1739 queue_t *passq;
1735 1740 syncq_t *passyncq;
1736 1741 queue_t *rq;
1737 1742 cdevsw_impl_t *dp;
1738 1743 uint32_t qflag;
1739 1744 uint32_t sqtype;
1740 1745 perdm_t *dmp;
1741 1746 int error = 0;
1742 1747 netstack_t *ns;
1743 1748 str_stack_t *ss;
1744 1749
1745 1750 stp = vp->v_stream;
1746 1751 TRACE_1(TR_FAC_STREAMS_FR,
1747 1752 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp);
1748 1753 /*
1749 1754 * Test for invalid upper stream
1750 1755 */
1751 1756 if (stp->sd_flag & STRHUP) {
1752 1757 return (ENXIO);
1753 1758 }
1754 1759 if (vp->v_type == VFIFO) {
1755 1760 return (EINVAL);
1756 1761 }
1757 1762 if (stp->sd_strtab == NULL) {
1758 1763 return (EINVAL);
1759 1764 }
1760 1765 if (!stp->sd_strtab->st_muxwinit) {
1761 1766 return (EINVAL);
1762 1767 }
1763 1768 if (fpdown == NULL) {
1764 1769 return (EBADF);
1765 1770 }
1766 1771 ns = netstack_find_by_cred(crp);
1767 1772 ASSERT(ns != NULL);
1768 1773 ss = ns->netstack_str;
1769 1774 ASSERT(ss != NULL);
1770 1775
1771 1776 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) {
1772 1777 netstack_rele(ss->ss_netstack);
1773 1778 return (EINVAL);
1774 1779 }
1775 1780 mutex_enter(&muxifier);
1776 1781 if (stp->sd_flag & STPLEX) {
1777 1782 mutex_exit(&muxifier);
1778 1783 netstack_rele(ss->ss_netstack);
1779 1784 return (ENXIO);
1780 1785 }
1781 1786
1782 1787 /*
1783 1788 * Test for invalid lower stream.
1784 1789 * The check for the v_type != VFIFO and having a major
1785 1790 * number not >= devcnt is done to avoid problems with
1786 1791 * adding mux_node entry past the end of mux_nodes[].
1787 1792 * For FIFO's we don't add an entry so this isn't a
1788 1793 * problem.
1789 1794 */
1790 1795 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) ||
1791 1796 (stpdown == stp) || (stpdown->sd_flag &
1792 1797 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) ||
1793 1798 ((stpdown->sd_vnode->v_type != VFIFO) &&
1794 1799 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) ||
1795 1800 linkcycle(stp, stpdown, ss)) {
1796 1801 mutex_exit(&muxifier);
1797 1802 netstack_rele(ss->ss_netstack);
1798 1803 return (EINVAL);
1799 1804 }
1800 1805 TRACE_1(TR_FAC_STREAMS_FR,
1801 1806 TR_STPDOWN, "stpdown:%p", stpdown);
1802 1807 rq = getendq(stp->sd_wrq);
1803 1808 if (cmd == I_PLINK)
1804 1809 rq = NULL;
1805 1810
1806 1811 linkp = alloclink(rq, stpdown->sd_wrq, fpdown);
1807 1812
1808 1813 strioc.ic_cmd = cmd;
1809 1814 strioc.ic_timout = INFTIM;
1810 1815 strioc.ic_len = sizeof (struct linkblk);
1811 1816 strioc.ic_dp = (char *)&linkp->li_lblk;
1812 1817
1813 1818 /*
1814 1819 * STRPLUMB protects plumbing changes and should be set before
1815 1820 * link_addpassthru()/link_rempassthru() are called, so it is set here
1816 1821 * and cleared in the end of mlink when passthru queue is removed.
1817 1822 * Setting of STRPLUMB prevents reopens of the stream while passthru
1818 1823 * queue is in-place (it is not a proper module and doesn't have open
1819 1824 * entry point).
1820 1825 *
1821 1826 * STPLEX prevents any threads from entering the stream from above. It
1822 1827 * can't be set before the call to link_addpassthru() because putnext
1823 1828 * from below may cause stream head I/O routines to be called and these
1824 1829 * routines assert that STPLEX is not set. After link_addpassthru()
1825 1830 * nothing may come from below since the pass queue syncq is blocked.
1826 1831 * Note also that STPLEX should be cleared before the call to
1827 1832 * link_rempassthru() since when messages start flowing to the stream
1828 1833 * head (e.g. because of message propagation from the pass queue) stream
1829 1834 * head I/O routines may be called with STPLEX flag set.
1830 1835 *
1831 1836 * When STPLEX is set, nothing may come into the stream from above and
1832 1837 * it is safe to do a setq which will change stream head. So, the
1833 1838 * correct sequence of actions is:
1834 1839 *
1835 1840 * 1) Set STRPLUMB
1836 1841 * 2) Call link_addpassthru()
1837 1842 * 3) Set STPLEX
1838 1843 * 4) Call setq and update the stream state
1839 1844 * 5) Clear STPLEX
1840 1845 * 6) Call link_rempassthru()
1841 1846 * 7) Clear STRPLUMB
1842 1847 *
1843 1848 * The same sequence applies to munlink() code.
1844 1849 */
1845 1850 mutex_enter(&stpdown->sd_lock);
1846 1851 stpdown->sd_flag |= STRPLUMB;
1847 1852 mutex_exit(&stpdown->sd_lock);
1848 1853 /*
1849 1854 * Add passthru queue below lower mux. This will block
1850 1855 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
1851 1856 */
1852 1857 passq = link_addpassthru(stpdown);
1853 1858
1854 1859 mutex_enter(&stpdown->sd_lock);
1855 1860 stpdown->sd_flag |= STPLEX;
1856 1861 mutex_exit(&stpdown->sd_lock);
1857 1862
1858 1863 rq = _RD(stpdown->sd_wrq);
1859 1864 /*
1860 1865 * There may be messages in the streamhead's syncq due to messages
1861 1866 * that arrived before link_addpassthru() was done. To avoid
1862 1867 * background processing of the syncq happening simultaneous with
1863 1868 * setq processing, we disable the streamhead syncq and wait until
1864 1869 * existing background thread finishes working on it.
1865 1870 */
1866 1871 wait_sq_svc(rq->q_syncq);
1867 1872 passyncq = passq->q_syncq;
1868 1873 if (!(passyncq->sq_flags & SQ_BLOCKED))
1869 1874 blocksq(passyncq, SQ_BLOCKED, 0);
1870 1875
1871 1876 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
1872 1877 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
1873 1878 rq->q_ptr = _WR(rq)->q_ptr = NULL;
1874 1879
1875 1880 /* setq might sleep in allocator - avoid holding locks. */
1876 1881 /* Note: we are holding muxifier here. */
1877 1882
1878 1883 str = stp->sd_strtab;
1879 1884 dp = &devimpl[getmajor(vp->v_rdev)];
1880 1885 ASSERT(dp->d_str == str);
1881 1886
1882 1887 qflag = dp->d_qflag;
1883 1888 sqtype = dp->d_sqtype;
1884 1889
1885 1890 /* create perdm_t if needed */
1886 1891 if (NEED_DM(dp->d_dmp, qflag))
1887 1892 dp->d_dmp = hold_dm(str, qflag, sqtype);
1888 1893
1889 1894 dmp = dp->d_dmp;
1890 1895
1891 1896 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype,
1892 1897 B_TRUE);
1893 1898
1894 1899 /*
1895 1900 * XXX Remove any "odd" messages from the queue.
1896 1901 * Keep only M_DATA, M_PROTO, M_PCPROTO.
1897 1902 */
1898 1903 error = strdoioctl(stp, &strioc, FNATIVE,
1899 1904 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
1900 1905 if (error != 0) {
1901 1906 lbfree(linkp);
1902 1907
1903 1908 if (!(passyncq->sq_flags & SQ_BLOCKED))
1904 1909 blocksq(passyncq, SQ_BLOCKED, 0);
1905 1910 /*
1906 1911 * Restore the stream head queue and then remove
1907 1912 * the passq. Turn off STPLEX before we turn on
1908 1913 * the stream by removing the passq.
1909 1914 */
1910 1915 rq->q_ptr = _WR(rq)->q_ptr = stpdown;
1911 1916 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO,
1912 1917 B_TRUE);
1913 1918
1914 1919 mutex_enter(&stpdown->sd_lock);
1915 1920 stpdown->sd_flag &= ~STPLEX;
1916 1921 mutex_exit(&stpdown->sd_lock);
1917 1922
1918 1923 link_rempassthru(passq);
1919 1924
1920 1925 mutex_enter(&stpdown->sd_lock);
1921 1926 stpdown->sd_flag &= ~STRPLUMB;
1922 1927 /* Wakeup anyone waiting for STRPLUMB to clear. */
1923 1928 cv_broadcast(&stpdown->sd_monitor);
1924 1929 mutex_exit(&stpdown->sd_lock);
1925 1930
1926 1931 mutex_exit(&muxifier);
1927 1932 netstack_rele(ss->ss_netstack);
1928 1933 return (error);
1929 1934 }
1930 1935 mutex_enter(&fpdown->f_tlock);
1931 1936 fpdown->f_count++;
1932 1937 mutex_exit(&fpdown->f_tlock);
1933 1938
1934 1939 /*
1935 1940 * if we've made it here the linkage is all set up so we should also
1936 1941 * set up the layered driver linkages
1937 1942 */
1938 1943
1939 1944 ASSERT((cmd == I_LINK) || (cmd == I_PLINK));
1940 1945 if (cmd == I_LINK) {
1941 1946 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL);
1942 1947 } else {
1943 1948 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST);
1944 1949 }
1945 1950
1946 1951 link_rempassthru(passq);
1947 1952
1948 1953 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss);
1949 1954
1950 1955 /*
1951 1956 * Mark the upper stream as having dependent links
1952 1957 * so that strclose can clean it up.
1953 1958 */
1954 1959 if (cmd == I_LINK) {
1955 1960 mutex_enter(&stp->sd_lock);
1956 1961 stp->sd_flag |= STRHASLINKS;
1957 1962 mutex_exit(&stp->sd_lock);
1958 1963 }
1959 1964 /*
1960 1965 * Wake up any other processes that may have been
1961 1966 * waiting on the lower stream. These will all
1962 1967 * error out.
1963 1968 */
1964 1969 mutex_enter(&stpdown->sd_lock);
1965 1970 /* The passthru module is removed so we may release STRPLUMB */
1966 1971 stpdown->sd_flag &= ~STRPLUMB;
1967 1972 cv_broadcast(&rq->q_wait);
1968 1973 cv_broadcast(&_WR(rq)->q_wait);
1969 1974 cv_broadcast(&stpdown->sd_monitor);
1970 1975 mutex_exit(&stpdown->sd_lock);
1971 1976 mutex_exit(&muxifier);
1972 1977 *rvalp = linkp->li_lblk.l_index;
1973 1978 netstack_rele(ss->ss_netstack);
1974 1979 return (0);
1975 1980 }
1976 1981
1977 1982 int
1978 1983 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink)
1979 1984 {
1980 1985 int ret;
1981 1986 struct file *fpdown;
1982 1987
1983 1988 fpdown = getf(arg);
1984 1989 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink);
1985 1990 if (fpdown != NULL)
1986 1991 releasef(arg);
1987 1992 return (ret);
1988 1993 }
1989 1994
1990 1995 /*
1991 1996 * Unlink a multiplexor link. Stp is the controlling stream for the
1992 1997 * link, and linkp points to the link's entry in the linkinfo list.
1993 1998 * The muxifier lock must be held on entry and is dropped on exit.
1994 1999 *
1995 2000 * NOTE : Currently it is assumed that mux would process all the messages
1996 2001 * sitting on it's queue before ACKing the UNLINK. It is the responsibility
1997 2002 * of the mux to handle all the messages that arrive before UNLINK.
1998 2003 * If the mux has to send down messages on its lower stream before
1999 2004 * ACKing I_UNLINK, then it *should* know to handle messages even
2000 2005 * after the UNLINK is acked (actually it should be able to handle till we
2001 2006 * re-block the read side of the pass queue here). If the mux does not
2002 2007 * open up the lower stream, any messages that arrive during UNLINK
2003 2008 * will be put in the stream head. In the case of lower stream opening
2004 2009 * up, some messages might land in the stream head depending on when
2005 2010 * the message arrived and when the read side of the pass queue was
2006 2011 * re-blocked.
2007 2012 */
2008 2013 int
2009 2014 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp,
2010 2015 str_stack_t *ss)
2011 2016 {
2012 2017 struct strioctl strioc;
2013 2018 struct stdata *stpdown;
2014 2019 queue_t *rq, *wrq;
2015 2020 queue_t *passq;
2016 2021 syncq_t *passyncq;
2017 2022 int error = 0;
2018 2023 file_t *fpdown;
2019 2024
2020 2025 ASSERT(MUTEX_HELD(&muxifier));
2021 2026
2022 2027 stpdown = linkp->li_fpdown->f_vnode->v_stream;
2023 2028
2024 2029 /*
2025 2030 * See the comment in mlink() concerning STRPLUMB/STPLEX flags.
2026 2031 */
2027 2032 mutex_enter(&stpdown->sd_lock);
2028 2033 stpdown->sd_flag |= STRPLUMB;
2029 2034 mutex_exit(&stpdown->sd_lock);
2030 2035
2031 2036 /*
2032 2037 * Add passthru queue below lower mux. This will block
2033 2038 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
2034 2039 */
2035 2040 passq = link_addpassthru(stpdown);
2036 2041
2037 2042 if ((flag & LINKTYPEMASK) == LINKNORMAL)
2038 2043 strioc.ic_cmd = I_UNLINK;
2039 2044 else
2040 2045 strioc.ic_cmd = I_PUNLINK;
2041 2046 strioc.ic_timout = INFTIM;
2042 2047 strioc.ic_len = sizeof (struct linkblk);
2043 2048 strioc.ic_dp = (char *)&linkp->li_lblk;
2044 2049
2045 2050 error = strdoioctl(stp, &strioc, FNATIVE,
2046 2051 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
2047 2052
2048 2053 /*
2049 2054 * If there was an error and this is not called via strclose,
2050 2055 * return to the user. Otherwise, pretend there was no error
2051 2056 * and close the link.
2052 2057 */
2053 2058 if (error) {
2054 2059 if (flag & LINKCLOSE) {
2055 2060 cmn_err(CE_WARN, "KERNEL: munlink: could not perform "
2056 2061 "unlink ioctl, closing anyway (%d)\n", error);
2057 2062 } else {
2058 2063 link_rempassthru(passq);
2059 2064 mutex_enter(&stpdown->sd_lock);
2060 2065 stpdown->sd_flag &= ~STRPLUMB;
2061 2066 cv_broadcast(&stpdown->sd_monitor);
2062 2067 mutex_exit(&stpdown->sd_lock);
2063 2068 mutex_exit(&muxifier);
2064 2069 return (error);
2065 2070 }
2066 2071 }
2067 2072
2068 2073 mux_rmvedge(stp, linkp->li_lblk.l_index, ss);
2069 2074 fpdown = linkp->li_fpdown;
2070 2075 lbfree(linkp);
2071 2076
2072 2077 /*
2073 2078 * We go ahead and drop muxifier here--it's a nasty global lock that
2074 2079 * can slow others down. It's okay to since attempts to mlink() this
2075 2080 * stream will be stopped because STPLEX is still set in the stdata
2076 2081 * structure, and munlink() is stopped because mux_rmvedge() and
2077 2082 * lbfree() have removed it from mux_nodes[] and linkinfo_list,
2078 2083 * respectively. Note that we defer the closef() of fpdown until
2079 2084 * after we drop muxifier since strclose() can call munlinkall().
2080 2085 */
2081 2086 mutex_exit(&muxifier);
2082 2087
2083 2088 wrq = stpdown->sd_wrq;
2084 2089 rq = _RD(wrq);
2085 2090
2086 2091 /*
2087 2092 * Get rid of outstanding service procedure runs, before we make
2088 2093 * it a stream head, since a stream head doesn't have any service
2089 2094 * procedure.
2090 2095 */
2091 2096 disable_svc(rq);
2092 2097 wait_svc(rq);
2093 2098
2094 2099 /*
2095 2100 * Since we don't disable the syncq for QPERMOD, we wait for whatever
2096 2101 * is queued up to be finished. mux should take care that nothing is
2097 2102 * send down to this queue. We should do it now as we're going to block
2098 2103 * passyncq if it was unblocked.
2099 2104 */
2100 2105 if (wrq->q_flag & QPERMOD) {
2101 2106 syncq_t *sq = wrq->q_syncq;
2102 2107
2103 2108 mutex_enter(SQLOCK(sq));
2104 2109 while (wrq->q_sqflags & Q_SQQUEUED) {
2105 2110 sq->sq_flags |= SQ_WANTWAKEUP;
2106 2111 cv_wait(&sq->sq_wait, SQLOCK(sq));
2107 2112 }
2108 2113 mutex_exit(SQLOCK(sq));
2109 2114 }
2110 2115 passyncq = passq->q_syncq;
2111 2116 if (!(passyncq->sq_flags & SQ_BLOCKED)) {
2112 2117
2113 2118 syncq_t *sq, *outer;
2114 2119
2115 2120 /*
2116 2121 * Messages could be flowing from underneath. We will
2117 2122 * block the read side of the passq. This would be
2118 2123 * sufficient for QPAIR and QPERQ muxes to ensure
2119 2124 * that no data is flowing up into this queue
2120 2125 * and hence no thread active in this instance of
2121 2126 * lower mux. But for QPERMOD and QMTOUTPERIM there
2122 2127 * could be messages on the inner and outer/inner
2123 2128 * syncqs respectively. We will wait for them to drain.
2124 2129 * Because passq is blocked messages end up in the syncq
2125 2130 * And qfill_syncq could possibly end up setting QFULL
2126 2131 * which will access the rq->q_flag. Hence, we have to
2127 2132 * acquire the QLOCK in setq.
2128 2133 *
2129 2134 * XXX Messages can also flow from top into this
2130 2135 * queue though the unlink is over (Ex. some instance
2131 2136 * in putnext() called from top that has still not
2132 2137 * accessed this queue. And also putq(lowerq) ?).
2133 2138 * Solution : How about blocking the l_qtop queue ?
2134 2139 * Do we really care about such pure D_MP muxes ?
2135 2140 */
2136 2141
2137 2142 blocksq(passyncq, SQ_BLOCKED, 0);
2138 2143
2139 2144 sq = rq->q_syncq;
2140 2145 if ((outer = sq->sq_outer) != NULL) {
2141 2146
2142 2147 /*
2143 2148 * We have to just wait for the outer sq_count
2144 2149 * drop to zero. As this does not prevent new
2145 2150 * messages to enter the outer perimeter, this
2146 2151 * is subject to starvation.
2147 2152 *
2148 2153 * NOTE :Because of blocksq above, messages could
2149 2154 * be in the inner syncq only because of some
2150 2155 * thread holding the outer perimeter exclusively.
2151 2156 * Hence it would be sufficient to wait for the
2152 2157 * exclusive holder of the outer perimeter to drain
2153 2158 * the inner and outer syncqs. But we will not depend
2154 2159 * on this feature and hence check the inner syncqs
2155 2160 * separately.
2156 2161 */
2157 2162 wait_syncq(outer);
2158 2163 }
2159 2164
2160 2165
2161 2166 /*
2162 2167 * There could be messages destined for
2163 2168 * this queue. Let the exclusive holder
2164 2169 * drain it.
2165 2170 */
2166 2171
2167 2172 wait_syncq(sq);
2168 2173 ASSERT((rq->q_flag & QPERMOD) ||
2169 2174 ((rq->q_syncq->sq_head == NULL) &&
2170 2175 (_WR(rq)->q_syncq->sq_head == NULL)));
2171 2176 }
2172 2177
2173 2178 /*
2174 2179 * We haven't taken care of QPERMOD case yet. QPERMOD is a special
2175 2180 * case as we don't disable its syncq or remove it off the syncq
2176 2181 * service list.
2177 2182 */
2178 2183 if (rq->q_flag & QPERMOD) {
2179 2184 syncq_t *sq = rq->q_syncq;
2180 2185
2181 2186 mutex_enter(SQLOCK(sq));
2182 2187 while (rq->q_sqflags & Q_SQQUEUED) {
2183 2188 sq->sq_flags |= SQ_WANTWAKEUP;
2184 2189 cv_wait(&sq->sq_wait, SQLOCK(sq));
2185 2190 }
2186 2191 mutex_exit(SQLOCK(sq));
2187 2192 }
2188 2193
2189 2194 /*
2190 2195 * flush_syncq changes states only when there are some messages to
2191 2196 * free, i.e. when it returns non-zero value to return.
2192 2197 */
2193 2198 ASSERT(flush_syncq(rq->q_syncq, rq) == 0);
2194 2199 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0);
2195 2200
2196 2201 /*
2197 2202 * Nobody else should know about this queue now.
2198 2203 * If the mux did not process the messages before
2199 2204 * acking the I_UNLINK, free them now.
2200 2205 */
2201 2206
2202 2207 flushq(rq, FLUSHALL);
2203 2208 flushq(_WR(rq), FLUSHALL);
2204 2209
2205 2210 /*
2206 2211 * Convert the mux lower queue into a stream head queue.
2207 2212 * Turn off STPLEX before we turn on the stream by removing the passq.
2208 2213 */
2209 2214 rq->q_ptr = wrq->q_ptr = stpdown;
2210 2215 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE);
2211 2216
2212 2217 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
2213 2218 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
2214 2219
2215 2220 enable_svc(rq);
2216 2221
2217 2222 /*
2218 2223 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
2219 2224 * needs to be set to prevent reopen() of the stream - such reopen may
2220 2225 * try to call non-existent pass queue open routine and panic.
2221 2226 */
2222 2227 mutex_enter(&stpdown->sd_lock);
2223 2228 stpdown->sd_flag &= ~STPLEX;
2224 2229 mutex_exit(&stpdown->sd_lock);
2225 2230
2226 2231 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) ||
2227 2232 ((flag & LINKTYPEMASK) == LINKPERSIST));
2228 2233
2229 2234 /* clean up the layered driver linkages */
2230 2235 if ((flag & LINKTYPEMASK) == LINKNORMAL) {
2231 2236 ldi_munlink_fp(stp, fpdown, LINKNORMAL);
2232 2237 } else {
2233 2238 ldi_munlink_fp(stp, fpdown, LINKPERSIST);
2234 2239 }
2235 2240
2236 2241 link_rempassthru(passq);
2237 2242
2238 2243 /*
2239 2244 * Now all plumbing changes are finished and STRPLUMB is no
2240 2245 * longer needed.
2241 2246 */
2242 2247 mutex_enter(&stpdown->sd_lock);
2243 2248 stpdown->sd_flag &= ~STRPLUMB;
2244 2249 cv_broadcast(&stpdown->sd_monitor);
2245 2250 mutex_exit(&stpdown->sd_lock);
2246 2251
2247 2252 (void) closef(fpdown);
2248 2253 return (0);
2249 2254 }
2250 2255
2251 2256 /*
2252 2257 * Unlink all multiplexor links for which stp is the controlling stream.
2253 2258 * Return 0, or a non-zero errno on failure.
2254 2259 */
2255 2260 int
2256 2261 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss)
2257 2262 {
2258 2263 linkinfo_t *linkp;
2259 2264 int error = 0;
2260 2265
2261 2266 mutex_enter(&muxifier);
2262 2267 while (linkp = findlinks(stp, 0, flag, ss)) {
2263 2268 /*
2264 2269 * munlink() releases the muxifier lock.
2265 2270 */
2266 2271 if (error = munlink(stp, linkp, flag, crp, rvalp, ss))
2267 2272 return (error);
2268 2273 mutex_enter(&muxifier);
2269 2274 }
2270 2275 mutex_exit(&muxifier);
2271 2276 return (0);
2272 2277 }
2273 2278
2274 2279 /*
2275 2280 * A multiplexor link has been made. Add an
2276 2281 * edge to the directed graph.
2277 2282 */
2278 2283 void
2279 2284 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss)
2280 2285 {
2281 2286 struct mux_node *np;
2282 2287 struct mux_edge *ep;
2283 2288 major_t upmaj;
2284 2289 major_t lomaj;
2285 2290
2286 2291 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2287 2292 lomaj = getmajor(lostp->sd_vnode->v_rdev);
2288 2293 np = &ss->ss_mux_nodes[upmaj];
2289 2294 if (np->mn_outp) {
2290 2295 ep = np->mn_outp;
2291 2296 while (ep->me_nextp)
2292 2297 ep = ep->me_nextp;
2293 2298 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2294 2299 ep = ep->me_nextp;
2295 2300 } else {
2296 2301 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2297 2302 ep = np->mn_outp;
2298 2303 }
2299 2304 ep->me_nextp = NULL;
2300 2305 ep->me_muxid = muxid;
2301 2306 /*
2302 2307 * Save the dev_t for the purposes of str_stack_shutdown.
2303 2308 * str_stack_shutdown assumes that the device allows reopen, since
2304 2309 * this dev_t is the one after any cloning by xx_open().
2305 2310 * Would prefer finding the dev_t from before any cloning,
2306 2311 * but specfs doesn't retain that.
2307 2312 */
2308 2313 ep->me_dev = upstp->sd_vnode->v_rdev;
2309 2314 if (lostp->sd_vnode->v_type == VFIFO)
2310 2315 ep->me_nodep = NULL;
2311 2316 else
2312 2317 ep->me_nodep = &ss->ss_mux_nodes[lomaj];
2313 2318 }
2314 2319
2315 2320 /*
2316 2321 * A multiplexor link has been removed. Remove the
2317 2322 * edge in the directed graph.
2318 2323 */
2319 2324 void
2320 2325 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss)
2321 2326 {
2322 2327 struct mux_node *np;
2323 2328 struct mux_edge *ep;
2324 2329 struct mux_edge *pep = NULL;
2325 2330 major_t upmaj;
2326 2331
2327 2332 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2328 2333 np = &ss->ss_mux_nodes[upmaj];
2329 2334 ASSERT(np->mn_outp != NULL);
2330 2335 ep = np->mn_outp;
2331 2336 while (ep) {
2332 2337 if (ep->me_muxid == muxid) {
2333 2338 if (pep)
2334 2339 pep->me_nextp = ep->me_nextp;
2335 2340 else
2336 2341 np->mn_outp = ep->me_nextp;
2337 2342 kmem_free(ep, sizeof (struct mux_edge));
2338 2343 return;
2339 2344 }
2340 2345 pep = ep;
2341 2346 ep = ep->me_nextp;
2342 2347 }
2343 2348 ASSERT(0); /* should not reach here */
2344 2349 }
2345 2350
2346 2351 /*
2347 2352 * Translate the device flags (from conf.h) to the corresponding
2348 2353 * qflag and sq_flag (type) values.
2349 2354 */
2350 2355 int
2351 2356 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
2352 2357 uint32_t *sqtypep)
2353 2358 {
2354 2359 uint32_t qflag = 0;
2355 2360 uint32_t sqtype = 0;
2356 2361
2357 2362 if (devflag & _D_OLD)
2358 2363 goto bad;
2359 2364
2360 2365 /* Inner perimeter presence and scope */
2361 2366 switch (devflag & D_MTINNER_MASK) {
2362 2367 case D_MP:
2363 2368 qflag |= QMTSAFE;
2364 2369 sqtype |= SQ_CI;
2365 2370 break;
2366 2371 case D_MTPERQ|D_MP:
2367 2372 qflag |= QPERQ;
2368 2373 break;
2369 2374 case D_MTQPAIR|D_MP:
2370 2375 qflag |= QPAIR;
2371 2376 break;
2372 2377 case D_MTPERMOD|D_MP:
2373 2378 qflag |= QPERMOD;
2374 2379 break;
2375 2380 default:
2376 2381 goto bad;
2377 2382 }
2378 2383
2379 2384 /* Outer perimeter */
2380 2385 if (devflag & D_MTOUTPERIM) {
2381 2386 switch (devflag & D_MTINNER_MASK) {
2382 2387 case D_MP:
2383 2388 case D_MTPERQ|D_MP:
2384 2389 case D_MTQPAIR|D_MP:
2385 2390 break;
2386 2391 default:
2387 2392 goto bad;
2388 2393 }
2389 2394 qflag |= QMTOUTPERIM;
2390 2395 }
2391 2396
2392 2397 /* Inner perimeter modifiers */
2393 2398 if (devflag & D_MTINNER_MOD) {
2394 2399 switch (devflag & D_MTINNER_MASK) {
2395 2400 case D_MP:
2396 2401 goto bad;
2397 2402 default:
2398 2403 break;
2399 2404 }
2400 2405 if (devflag & D_MTPUTSHARED)
2401 2406 sqtype |= SQ_CIPUT;
2402 2407 if (devflag & _D_MTOCSHARED) {
2403 2408 /*
2404 2409 * The code in putnext assumes that it has the
2405 2410 * highest concurrency by not checking sq_count.
2406 2411 * Thus _D_MTOCSHARED can only be supported when
2407 2412 * D_MTPUTSHARED is set.
2408 2413 */
2409 2414 if (!(devflag & D_MTPUTSHARED))
2410 2415 goto bad;
2411 2416 sqtype |= SQ_CIOC;
2412 2417 }
2413 2418 if (devflag & _D_MTCBSHARED) {
2414 2419 /*
2415 2420 * The code in putnext assumes that it has the
2416 2421 * highest concurrency by not checking sq_count.
2417 2422 * Thus _D_MTCBSHARED can only be supported when
2418 2423 * D_MTPUTSHARED is set.
2419 2424 */
2420 2425 if (!(devflag & D_MTPUTSHARED))
2421 2426 goto bad;
2422 2427 sqtype |= SQ_CICB;
2423 2428 }
2424 2429 if (devflag & _D_MTSVCSHARED) {
2425 2430 /*
2426 2431 * The code in putnext assumes that it has the
2427 2432 * highest concurrency by not checking sq_count.
2428 2433 * Thus _D_MTSVCSHARED can only be supported when
2429 2434 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
2430 2435 * supported only for QPERMOD.
2431 2436 */
2432 2437 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD))
2433 2438 goto bad;
2434 2439 sqtype |= SQ_CISVC;
2435 2440 }
2436 2441 }
2437 2442
2438 2443 /* Default outer perimeter concurrency */
2439 2444 sqtype |= SQ_CO;
2440 2445
2441 2446 /* Outer perimeter modifiers */
2442 2447 if (devflag & D_MTOCEXCL) {
2443 2448 if (!(devflag & D_MTOUTPERIM)) {
2444 2449 /* No outer perimeter */
2445 2450 goto bad;
2446 2451 }
2447 2452 sqtype &= ~SQ_COOC;
2448 2453 }
2449 2454
2450 2455 /* Synchronous Streams extended qinit structure */
2451 2456 if (devflag & D_SYNCSTR)
2452 2457 qflag |= QSYNCSTR;
2453 2458
2454 2459 /*
2455 2460 * Private flag used by a transport module to indicate
2456 2461 * to sockfs that it supports direct-access mode without
2457 2462 * having to go through STREAMS.
2458 2463 */
2459 2464 if (devflag & _D_DIRECT) {
2460 2465 /* Reject unless the module is fully-MT (no perimeter) */
2461 2466 if ((qflag & QMT_TYPEMASK) != QMTSAFE)
2462 2467 goto bad;
2463 2468 qflag |= _QDIRECT;
2464 2469 }
2465 2470
2466 2471 *qflagp = qflag;
2467 2472 *sqtypep = sqtype;
2468 2473 return (0);
2469 2474
2470 2475 bad:
2471 2476 cmn_err(CE_WARN,
2472 2477 "stropen: bad MT flags (0x%x) in driver '%s'",
2473 2478 (int)(qflag & D_MTSAFETY_MASK),
2474 2479 stp->st_rdinit->qi_minfo->mi_idname);
2475 2480
2476 2481 return (EINVAL);
2477 2482 }
2478 2483
2479 2484 /*
2480 2485 * Set the interface values for a pair of queues (qinit structure,
2481 2486 * packet sizes, water marks).
2482 2487 * setq assumes that the caller does not have a claim (entersq or claimq)
2483 2488 * on the queue.
2484 2489 */
2485 2490 void
2486 2491 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit,
2487 2492 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed)
2488 2493 {
2489 2494 queue_t *wq;
2490 2495 syncq_t *sq, *outer;
2491 2496
2492 2497 ASSERT(rq->q_flag & QREADR);
2493 2498 ASSERT((qflag & QMT_TYPEMASK) != 0);
2494 2499 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
2495 2500
2496 2501 wq = _WR(rq);
2497 2502 rq->q_qinfo = rinit;
2498 2503 rq->q_hiwat = rinit->qi_minfo->mi_hiwat;
2499 2504 rq->q_lowat = rinit->qi_minfo->mi_lowat;
2500 2505 rq->q_minpsz = rinit->qi_minfo->mi_minpsz;
2501 2506 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz;
2502 2507 wq->q_qinfo = winit;
2503 2508 wq->q_hiwat = winit->qi_minfo->mi_hiwat;
2504 2509 wq->q_lowat = winit->qi_minfo->mi_lowat;
2505 2510 wq->q_minpsz = winit->qi_minfo->mi_minpsz;
2506 2511 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz;
2507 2512
2508 2513 /* Remove old syncqs */
2509 2514 sq = rq->q_syncq;
2510 2515 outer = sq->sq_outer;
2511 2516 if (outer != NULL) {
2512 2517 ASSERT(wq->q_syncq->sq_outer == outer);
2513 2518 outer_remove(outer, rq->q_syncq);
2514 2519 if (wq->q_syncq != rq->q_syncq)
2515 2520 outer_remove(outer, wq->q_syncq);
2516 2521 }
2517 2522 ASSERT(sq->sq_outer == NULL);
2518 2523 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2519 2524
2520 2525 if (sq != SQ(rq)) {
2521 2526 if (!(rq->q_flag & QPERMOD))
2522 2527 free_syncq(sq);
2523 2528 if (wq->q_syncq == rq->q_syncq)
2524 2529 wq->q_syncq = NULL;
2525 2530 rq->q_syncq = NULL;
2526 2531 }
2527 2532 if (wq->q_syncq != NULL && wq->q_syncq != sq &&
2528 2533 wq->q_syncq != SQ(rq)) {
2529 2534 free_syncq(wq->q_syncq);
2530 2535 wq->q_syncq = NULL;
2531 2536 }
2532 2537 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL &&
2533 2538 rq->q_syncq->sq_tail == NULL));
2534 2539 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL &&
2535 2540 wq->q_syncq->sq_tail == NULL));
2536 2541
2537 2542 if (!(rq->q_flag & QPERMOD) &&
2538 2543 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) {
2539 2544 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2540 2545 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl,
2541 2546 rq->q_syncq->sq_nciputctrl, 0);
2542 2547 ASSERT(ciputctrl_cache != NULL);
2543 2548 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl);
2544 2549 rq->q_syncq->sq_ciputctrl = NULL;
2545 2550 rq->q_syncq->sq_nciputctrl = 0;
2546 2551 }
2547 2552
2548 2553 if (!(wq->q_flag & QPERMOD) &&
2549 2554 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) {
2550 2555 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2551 2556 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl,
2552 2557 wq->q_syncq->sq_nciputctrl, 0);
2553 2558 ASSERT(ciputctrl_cache != NULL);
2554 2559 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl);
2555 2560 wq->q_syncq->sq_ciputctrl = NULL;
2556 2561 wq->q_syncq->sq_nciputctrl = 0;
2557 2562 }
2558 2563
2559 2564 sq = SQ(rq);
2560 2565 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
2561 2566 ASSERT(sq->sq_outer == NULL);
2562 2567 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2563 2568
2564 2569 /*
2565 2570 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
2566 2571 * bits in sq_flag based on the sqtype.
2567 2572 */
2568 2573 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0);
2569 2574
2570 2575 rq->q_syncq = wq->q_syncq = sq;
2571 2576 sq->sq_type = sqtype;
2572 2577 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS);
2573 2578
2574 2579 /*
2575 2580 * We are making sq_svcflags zero,
2576 2581 * resetting SQ_DISABLED in case it was set by
2577 2582 * wait_svc() in the munlink path.
2578 2583 *
2579 2584 */
2580 2585 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0);
2581 2586 sq->sq_svcflags = 0;
2582 2587
2583 2588 /*
2584 2589 * We need to acquire the lock here for the mlink and munlink case,
2585 2590 * where canputnext, backenable, etc can access the q_flag.
2586 2591 */
2587 2592 if (lock_needed) {
2588 2593 mutex_enter(QLOCK(rq));
2589 2594 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2590 2595 mutex_exit(QLOCK(rq));
2591 2596 mutex_enter(QLOCK(wq));
2592 2597 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2593 2598 mutex_exit(QLOCK(wq));
2594 2599 } else {
2595 2600 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2596 2601 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2597 2602 }
2598 2603
2599 2604 if (qflag & QPERQ) {
2600 2605 /* Allocate a separate syncq for the write side */
2601 2606 sq = new_syncq();
2602 2607 sq->sq_type = rq->q_syncq->sq_type;
2603 2608 sq->sq_flags = rq->q_syncq->sq_flags;
2604 2609 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2605 2610 sq->sq_oprev == NULL);
2606 2611 wq->q_syncq = sq;
2607 2612 }
2608 2613 if (qflag & QPERMOD) {
2609 2614 sq = dmp->dm_sq;
2610 2615
2611 2616 /*
2612 2617 * Assert that we do have an inner perimeter syncq and that it
2613 2618 * does not have an outer perimeter associated with it.
2614 2619 */
2615 2620 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2616 2621 sq->sq_oprev == NULL);
2617 2622 rq->q_syncq = wq->q_syncq = sq;
2618 2623 }
2619 2624 if (qflag & QMTOUTPERIM) {
2620 2625 outer = dmp->dm_sq;
2621 2626
2622 2627 ASSERT(outer->sq_outer == NULL);
2623 2628 outer_insert(outer, rq->q_syncq);
2624 2629 if (wq->q_syncq != rq->q_syncq)
2625 2630 outer_insert(outer, wq->q_syncq);
2626 2631 }
2627 2632 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2628 2633 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2629 2634 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2630 2635 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2631 2636 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK));
2632 2637
2633 2638 /*
2634 2639 * Initialize struio() types.
2635 2640 */
2636 2641 rq->q_struiot =
2637 2642 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE;
2638 2643 wq->q_struiot =
2639 2644 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE;
2640 2645 }
2641 2646
2642 2647 perdm_t *
2643 2648 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype)
2644 2649 {
2645 2650 syncq_t *sq;
2646 2651 perdm_t **pp;
2647 2652 perdm_t *p;
2648 2653 perdm_t *dmp;
2649 2654
2650 2655 ASSERT(str != NULL);
2651 2656 ASSERT(qflag & (QPERMOD | QMTOUTPERIM));
2652 2657
2653 2658 rw_enter(&perdm_rwlock, RW_READER);
2654 2659 for (p = perdm_list; p != NULL; p = p->dm_next) {
2655 2660 if (p->dm_str == str) { /* found one */
2656 2661 atomic_inc_32(&(p->dm_ref));
2657 2662 rw_exit(&perdm_rwlock);
2658 2663 return (p);
2659 2664 }
2660 2665 }
2661 2666 rw_exit(&perdm_rwlock);
2662 2667
2663 2668 sq = new_syncq();
2664 2669 if (qflag & QPERMOD) {
2665 2670 sq->sq_type = sqtype | SQ_PERMOD;
2666 2671 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS;
2667 2672 } else {
2668 2673 ASSERT(qflag & QMTOUTPERIM);
2669 2674 sq->sq_onext = sq->sq_oprev = sq;
2670 2675 }
2671 2676
2672 2677 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP);
2673 2678 dmp->dm_sq = sq;
2674 2679 dmp->dm_str = str;
2675 2680 dmp->dm_ref = 1;
2676 2681 dmp->dm_next = NULL;
2677 2682
2678 2683 rw_enter(&perdm_rwlock, RW_WRITER);
2679 2684 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) {
2680 2685 if (p->dm_str == str) { /* already present */
2681 2686 p->dm_ref++;
2682 2687 rw_exit(&perdm_rwlock);
2683 2688 free_syncq(sq);
2684 2689 kmem_free(dmp, sizeof (perdm_t));
2685 2690 return (p);
2686 2691 }
2687 2692 }
2688 2693
2689 2694 *pp = dmp;
2690 2695 rw_exit(&perdm_rwlock);
2691 2696 return (dmp);
2692 2697 }
2693 2698
2694 2699 void
2695 2700 rele_dm(perdm_t *dmp)
2696 2701 {
2697 2702 perdm_t **pp;
2698 2703 perdm_t *p;
2699 2704
2700 2705 rw_enter(&perdm_rwlock, RW_WRITER);
2701 2706 ASSERT(dmp->dm_ref > 0);
2702 2707
2703 2708 if (--dmp->dm_ref > 0) {
2704 2709 rw_exit(&perdm_rwlock);
2705 2710 return;
2706 2711 }
2707 2712
2708 2713 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next))
2709 2714 if (p == dmp)
2710 2715 break;
2711 2716 ASSERT(p == dmp);
2712 2717 *pp = p->dm_next;
2713 2718 rw_exit(&perdm_rwlock);
2714 2719
2715 2720 /*
2716 2721 * Wait for any background processing that relies on the
2717 2722 * syncq to complete before it is freed.
2718 2723 */
2719 2724 wait_sq_svc(p->dm_sq);
2720 2725 free_syncq(p->dm_sq);
2721 2726 kmem_free(p, sizeof (perdm_t));
2722 2727 }
2723 2728
2724 2729 /*
2725 2730 * Make a protocol message given control and data buffers.
2726 2731 * n.b., this can block; be careful of what locks you hold when calling it.
2727 2732 *
2728 2733 * If sd_maxblk is less than *iosize this routine can fail part way through
2729 2734 * (due to an allocation failure). In this case on return *iosize will contain
2730 2735 * the amount that was consumed. Otherwise *iosize will not be modified
2731 2736 * i.e. it will contain the amount that was consumed.
2732 2737 */
2733 2738 int
2734 2739 strmakemsg(
2735 2740 struct strbuf *mctl,
2736 2741 ssize_t *iosize,
2737 2742 struct uio *uiop,
2738 2743 stdata_t *stp,
2739 2744 int32_t flag,
2740 2745 mblk_t **mpp)
2741 2746 {
2742 2747 mblk_t *mpctl = NULL;
2743 2748 mblk_t *mpdata = NULL;
2744 2749 int error;
2745 2750
2746 2751 ASSERT(uiop != NULL);
2747 2752
2748 2753 *mpp = NULL;
2749 2754 /* Create control part, if any */
2750 2755 if ((mctl != NULL) && (mctl->len >= 0)) {
2751 2756 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl);
2752 2757 if (error)
2753 2758 return (error);
2754 2759 }
2755 2760 /* Create data part, if any */
2756 2761 if (*iosize >= 0) {
2757 2762 error = strmakedata(iosize, uiop, stp, flag, &mpdata);
2758 2763 if (error) {
2759 2764 freemsg(mpctl);
2760 2765 return (error);
2761 2766 }
2762 2767 }
2763 2768 if (mpctl != NULL) {
2764 2769 if (mpdata != NULL)
2765 2770 linkb(mpctl, mpdata);
2766 2771 *mpp = mpctl;
2767 2772 } else {
2768 2773 *mpp = mpdata;
2769 2774 }
2770 2775 return (0);
2771 2776 }
2772 2777
2773 2778 /*
2774 2779 * Make the control part of a protocol message given a control buffer.
2775 2780 * n.b., this can block; be careful of what locks you hold when calling it.
2776 2781 */
2777 2782 int
2778 2783 strmakectl(
2779 2784 struct strbuf *mctl,
2780 2785 int32_t flag,
2781 2786 int32_t fflag,
2782 2787 mblk_t **mpp)
2783 2788 {
2784 2789 mblk_t *bp = NULL;
2785 2790 unsigned char msgtype;
2786 2791 int error = 0;
2787 2792 cred_t *cr = CRED();
2788 2793
2789 2794 /* We do not support interrupt threads using the stream head to send */
2790 2795 ASSERT(cr != NULL);
2791 2796
2792 2797 *mpp = NULL;
2793 2798 /*
2794 2799 * Create control part of message, if any.
2795 2800 */
2796 2801 if ((mctl != NULL) && (mctl->len >= 0)) {
2797 2802 caddr_t base;
2798 2803 int ctlcount;
2799 2804 int allocsz;
2800 2805
2801 2806 if (flag & RS_HIPRI)
2802 2807 msgtype = M_PCPROTO;
2803 2808 else
2804 2809 msgtype = M_PROTO;
2805 2810
2806 2811 ctlcount = mctl->len;
2807 2812 base = mctl->buf;
2808 2813
2809 2814 /*
2810 2815 * Give modules a better chance to reuse M_PROTO/M_PCPROTO
2811 2816 * blocks by increasing the size to something more usable.
2812 2817 */
2813 2818 allocsz = MAX(ctlcount, 64);
2814 2819
2815 2820 /*
2816 2821 * Range checking has already been done; simply try
2817 2822 * to allocate a message block for the ctl part.
2818 2823 */
2819 2824 while ((bp = allocb_cred(allocsz, cr,
2820 2825 curproc->p_pid)) == NULL) {
2821 2826 if (fflag & (FNDELAY|FNONBLOCK))
2822 2827 return (EAGAIN);
2823 2828 if (error = strwaitbuf(allocsz, BPRI_MED))
2824 2829 return (error);
2825 2830 }
2826 2831
2827 2832 bp->b_datap->db_type = msgtype;
2828 2833 if (copyin(base, bp->b_wptr, ctlcount)) {
2829 2834 freeb(bp);
2830 2835 return (EFAULT);
2831 2836 }
2832 2837 bp->b_wptr += ctlcount;
2833 2838 }
2834 2839 *mpp = bp;
2835 2840 return (0);
2836 2841 }
2837 2842
2838 2843 /*
2839 2844 * Make a protocol message given data buffers.
2840 2845 * n.b., this can block; be careful of what locks you hold when calling it.
2841 2846 *
2842 2847 * If sd_maxblk is less than *iosize this routine can fail part way through
2843 2848 * (due to an allocation failure). In this case on return *iosize will contain
2844 2849 * the amount that was consumed. Otherwise *iosize will not be modified
2845 2850 * i.e. it will contain the amount that was consumed.
2846 2851 */
2847 2852 int
2848 2853 strmakedata(
2849 2854 ssize_t *iosize,
2850 2855 struct uio *uiop,
2851 2856 stdata_t *stp,
2852 2857 int32_t flag,
2853 2858 mblk_t **mpp)
2854 2859 {
2855 2860 mblk_t *mp = NULL;
2856 2861 mblk_t *bp;
2857 2862 int wroff = (int)stp->sd_wroff;
2858 2863 int tail_len = (int)stp->sd_tail;
2859 2864 int extra = wroff + tail_len;
2860 2865 int error = 0;
2861 2866 ssize_t maxblk;
2862 2867 ssize_t count = *iosize;
2863 2868 cred_t *cr;
2864 2869
2865 2870 *mpp = NULL;
2866 2871 if (count < 0)
2867 2872 return (0);
2868 2873
2869 2874 /* We do not support interrupt threads using the stream head to send */
2870 2875 cr = CRED();
2871 2876 ASSERT(cr != NULL);
2872 2877
2873 2878 maxblk = stp->sd_maxblk;
2874 2879 if (maxblk == INFPSZ)
2875 2880 maxblk = count;
2876 2881
2877 2882 /*
2878 2883 * Create data part of message, if any.
2879 2884 */
2880 2885 do {
2881 2886 ssize_t size;
2882 2887 dblk_t *dp;
2883 2888
2884 2889 ASSERT(uiop);
2885 2890
2886 2891 size = MIN(count, maxblk);
2887 2892
2888 2893 while ((bp = allocb_cred(size + extra, cr,
2889 2894 curproc->p_pid)) == NULL) {
2890 2895 error = EAGAIN;
2891 2896 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
2892 2897 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) {
2893 2898 if (count == *iosize) {
2894 2899 freemsg(mp);
2895 2900 return (error);
2896 2901 } else {
2897 2902 *iosize -= count;
2898 2903 *mpp = mp;
2899 2904 return (0);
2900 2905 }
2901 2906 }
2902 2907 }
2903 2908 dp = bp->b_datap;
2904 2909 dp->db_cpid = curproc->p_pid;
2905 2910 ASSERT(wroff <= dp->db_lim - bp->b_wptr);
2906 2911 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff;
2907 2912
2908 2913 if (flag & STRUIO_POSTPONE) {
2909 2914 /*
2910 2915 * Setup the stream uio portion of the
2911 2916 * dblk for subsequent use by struioget().
2912 2917 */
2913 2918 dp->db_struioflag = STRUIO_SPEC;
2914 2919 dp->db_cksumstart = 0;
2915 2920 dp->db_cksumstuff = 0;
2916 2921 dp->db_cksumend = size;
2917 2922 *(long long *)dp->db_struioun.data = 0ll;
2918 2923 bp->b_wptr += size;
2919 2924 } else {
2920 2925 if (stp->sd_copyflag & STRCOPYCACHED)
2921 2926 uiop->uio_extflg |= UIO_COPY_CACHED;
2922 2927
2923 2928 if (size != 0) {
2924 2929 error = uiomove(bp->b_wptr, size, UIO_WRITE,
2925 2930 uiop);
2926 2931 if (error != 0) {
2927 2932 freeb(bp);
2928 2933 freemsg(mp);
2929 2934 return (error);
2930 2935 }
2931 2936 }
2932 2937 bp->b_wptr += size;
2933 2938
2934 2939 if (stp->sd_wputdatafunc != NULL) {
2935 2940 mblk_t *newbp;
2936 2941
2937 2942 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode,
2938 2943 bp, NULL, NULL, NULL, NULL);
2939 2944 if (newbp == NULL) {
2940 2945 freeb(bp);
2941 2946 freemsg(mp);
2942 2947 return (ECOMM);
2943 2948 }
2944 2949 bp = newbp;
2945 2950 }
2946 2951 }
2947 2952
2948 2953 count -= size;
2949 2954
2950 2955 if (mp == NULL)
2951 2956 mp = bp;
2952 2957 else
2953 2958 linkb(mp, bp);
2954 2959 } while (count > 0);
2955 2960
2956 2961 *mpp = mp;
2957 2962 return (0);
2958 2963 }
2959 2964
2960 2965 /*
2961 2966 * Wait for a buffer to become available. Return non-zero errno
2962 2967 * if not able to wait, 0 if buffer is probably there.
2963 2968 */
2964 2969 int
2965 2970 strwaitbuf(size_t size, int pri)
2966 2971 {
2967 2972 bufcall_id_t id;
2968 2973
2969 2974 mutex_enter(&bcall_monitor);
2970 2975 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast,
2971 2976 &ttoproc(curthread)->p_flag_cv)) == 0) {
2972 2977 mutex_exit(&bcall_monitor);
2973 2978 return (ENOSR);
2974 2979 }
2975 2980 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) {
2976 2981 unbufcall(id);
2977 2982 mutex_exit(&bcall_monitor);
2978 2983 return (EINTR);
2979 2984 }
2980 2985 unbufcall(id);
2981 2986 mutex_exit(&bcall_monitor);
2982 2987 return (0);
2983 2988 }
2984 2989
2985 2990 /*
2986 2991 * This function waits for a read or write event to happen on a stream.
2987 2992 * fmode can specify FNDELAY and/or FNONBLOCK.
2988 2993 * The timeout is in ms with -1 meaning infinite.
2989 2994 * The flag values work as follows:
2990 2995 * READWAIT Check for read side errors, send M_READ
2991 2996 * GETWAIT Check for read side errors, no M_READ
2992 2997 * WRITEWAIT Check for write side errors.
2993 2998 * NOINTR Do not return error if nonblocking or timeout.
2994 2999 * STR_NOERROR Ignore all errors except STPLEX.
2995 3000 * STR_NOSIG Ignore/hold signals during the duration of the call.
2996 3001 * STR_PEEK Pass through the strgeterr().
2997 3002 */
2998 3003 int
2999 3004 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout,
3000 3005 int *done)
3001 3006 {
3002 3007 int slpflg, errs;
3003 3008 int error;
3004 3009 kcondvar_t *sleepon;
3005 3010 mblk_t *mp;
3006 3011 ssize_t *rd_count;
3007 3012 clock_t rval;
3008 3013
3009 3014 ASSERT(MUTEX_HELD(&stp->sd_lock));
3010 3015 if ((flag & READWAIT) || (flag & GETWAIT)) {
3011 3016 slpflg = RSLEEP;
3012 3017 sleepon = &_RD(stp->sd_wrq)->q_wait;
3013 3018 errs = STRDERR|STPLEX;
3014 3019 } else {
3015 3020 slpflg = WSLEEP;
3016 3021 sleepon = &stp->sd_wrq->q_wait;
3017 3022 errs = STWRERR|STRHUP|STPLEX;
3018 3023 }
3019 3024 if (flag & STR_NOERROR)
3020 3025 errs = STPLEX;
3021 3026
3022 3027 if (stp->sd_wakeq & slpflg) {
3023 3028 /*
3024 3029 * A strwakeq() is pending, no need to sleep.
3025 3030 */
3026 3031 stp->sd_wakeq &= ~slpflg;
3027 3032 *done = 0;
3028 3033 return (0);
3029 3034 }
3030 3035
3031 3036 if (stp->sd_flag & errs) {
3032 3037 /*
3033 3038 * Check for errors before going to sleep since the
3034 3039 * caller might not have checked this while holding
3035 3040 * sd_lock.
3036 3041 */
3037 3042 error = strgeterr(stp, errs, (flag & STR_PEEK));
3038 3043 if (error != 0) {
3039 3044 *done = 1;
3040 3045 return (error);
3041 3046 }
3042 3047 }
3043 3048
3044 3049 /*
3045 3050 * If any module downstream has requested read notification
3046 3051 * by setting SNDMREAD flag using M_SETOPTS, send a message
3047 3052 * down stream.
3048 3053 */
3049 3054 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) {
3050 3055 mutex_exit(&stp->sd_lock);
3051 3056 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED,
3052 3057 (flag & STR_NOSIG), &error))) {
3053 3058 mutex_enter(&stp->sd_lock);
3054 3059 *done = 1;
3055 3060 return (error);
3056 3061 }
3057 3062 mp->b_datap->db_type = M_READ;
3058 3063 rd_count = (ssize_t *)mp->b_wptr;
3059 3064 *rd_count = count;
3060 3065 mp->b_wptr += sizeof (ssize_t);
3061 3066 /*
3062 3067 * Send the number of bytes requested by the
3063 3068 * read as the argument to M_READ.
3064 3069 */
3065 3070 stream_willservice(stp);
3066 3071 putnext(stp->sd_wrq, mp);
3067 3072 stream_runservice(stp);
3068 3073 mutex_enter(&stp->sd_lock);
3069 3074
3070 3075 /*
3071 3076 * If any data arrived due to inline processing
3072 3077 * of putnext(), don't sleep.
3073 3078 */
3074 3079 if (_RD(stp->sd_wrq)->q_first != NULL) {
3075 3080 *done = 0;
3076 3081 return (0);
3077 3082 }
3078 3083 }
3079 3084
3080 3085 if (fmode & (FNDELAY|FNONBLOCK)) {
3081 3086 if (!(flag & NOINTR))
3082 3087 error = EAGAIN;
3083 3088 else
3084 3089 error = 0;
3085 3090 *done = 1;
3086 3091 return (error);
3087 3092 }
3088 3093
3089 3094 stp->sd_flag |= slpflg;
3090 3095 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2,
3091 3096 "strwaitq sleeps (2):%p, %X, %lX, %X, %p",
3092 3097 stp, flag, count, fmode, done);
3093 3098
3094 3099 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG);
3095 3100 if (rval > 0) {
3096 3101 /* EMPTY */
3097 3102 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2,
3098 3103 "strwaitq awakes(2):%X, %X, %X, %X, %X",
3099 3104 stp, flag, count, fmode, done);
3100 3105 } else if (rval == 0) {
3101 3106 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2,
3102 3107 "strwaitq interrupt #2:%p, %X, %lX, %X, %p",
3103 3108 stp, flag, count, fmode, done);
3104 3109 stp->sd_flag &= ~slpflg;
3105 3110 cv_broadcast(sleepon);
3106 3111 if (!(flag & NOINTR))
3107 3112 error = EINTR;
3108 3113 else
3109 3114 error = 0;
3110 3115 *done = 1;
3111 3116 return (error);
3112 3117 } else {
3113 3118 /* timeout */
3114 3119 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME,
3115 3120 "strwaitq timeout:%p, %X, %lX, %X, %p",
3116 3121 stp, flag, count, fmode, done);
3117 3122 *done = 1;
3118 3123 if (!(flag & NOINTR))
3119 3124 return (ETIME);
3120 3125 else
3121 3126 return (0);
3122 3127 }
3123 3128 /*
3124 3129 * If the caller implements delayed errors (i.e. queued after data)
3125 3130 * we can not check for errors here since data as well as an
3126 3131 * error might have arrived at the stream head. We return to
3127 3132 * have the caller check the read queue before checking for errors.
3128 3133 */
3129 3134 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) {
3130 3135 error = strgeterr(stp, errs, (flag & STR_PEEK));
3131 3136 if (error != 0) {
3132 3137 *done = 1;
3133 3138 return (error);
3134 3139 }
3135 3140 }
3136 3141 *done = 0;
3137 3142 return (0);
3138 3143 }
3139 3144
3140 3145 /*
3141 3146 * Perform job control discipline access checks.
3142 3147 * Return 0 for success and the errno for failure.
3143 3148 */
3144 3149
3145 3150 #define cantsend(p, t, sig) \
3146 3151 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig))
3147 3152
3148 3153 int
3149 3154 straccess(struct stdata *stp, enum jcaccess mode)
3150 3155 {
3151 3156 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */
3152 3157 kthread_t *t = curthread;
3153 3158 proc_t *p = ttoproc(t);
3154 3159 sess_t *sp;
3155 3160
3156 3161 ASSERT(mutex_owned(&stp->sd_lock));
3157 3162
3158 3163 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO)
3159 3164 return (0);
3160 3165
3161 3166 mutex_enter(&p->p_lock); /* protects p_pgidp */
3162 3167
3163 3168 for (;;) {
3164 3169 mutex_enter(&p->p_splock); /* protects p->p_sessp */
3165 3170 sp = p->p_sessp;
3166 3171 mutex_enter(&sp->s_lock); /* protects sp->* */
3167 3172
3168 3173 /*
3169 3174 * If this is not the calling process's controlling terminal
3170 3175 * or if the calling process is already in the foreground
3171 3176 * then allow access.
3172 3177 */
3173 3178 if (sp->s_dev != stp->sd_vnode->v_rdev ||
3174 3179 p->p_pgidp == stp->sd_pgidp) {
3175 3180 mutex_exit(&sp->s_lock);
3176 3181 mutex_exit(&p->p_splock);
3177 3182 mutex_exit(&p->p_lock);
3178 3183 return (0);
3179 3184 }
3180 3185
3181 3186 /*
3182 3187 * Check to see if controlling terminal has been deallocated.
3183 3188 */
3184 3189 if (sp->s_vp == NULL) {
3185 3190 if (!cantsend(p, t, SIGHUP))
3186 3191 sigtoproc(p, t, SIGHUP);
3187 3192 mutex_exit(&sp->s_lock);
3188 3193 mutex_exit(&p->p_splock);
3189 3194 mutex_exit(&p->p_lock);
3190 3195 return (EIO);
3191 3196 }
3192 3197
3193 3198 mutex_exit(&sp->s_lock);
3194 3199 mutex_exit(&p->p_splock);
3195 3200
3196 3201 if (mode == JCGETP) {
3197 3202 mutex_exit(&p->p_lock);
3198 3203 return (0);
3199 3204 }
3200 3205
3201 3206 if (mode == JCREAD) {
3202 3207 if (p->p_detached || cantsend(p, t, SIGTTIN)) {
3203 3208 mutex_exit(&p->p_lock);
3204 3209 return (EIO);
3205 3210 }
3206 3211 mutex_exit(&p->p_lock);
3207 3212 mutex_exit(&stp->sd_lock);
3208 3213 pgsignal(p->p_pgidp, SIGTTIN);
3209 3214 mutex_enter(&stp->sd_lock);
3210 3215 mutex_enter(&p->p_lock);
3211 3216 } else { /* mode == JCWRITE or JCSETP */
3212 3217 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) ||
3213 3218 cantsend(p, t, SIGTTOU)) {
3214 3219 mutex_exit(&p->p_lock);
3215 3220 return (0);
3216 3221 }
3217 3222 if (p->p_detached) {
3218 3223 mutex_exit(&p->p_lock);
3219 3224 return (EIO);
3220 3225 }
3221 3226 mutex_exit(&p->p_lock);
3222 3227 mutex_exit(&stp->sd_lock);
3223 3228 pgsignal(p->p_pgidp, SIGTTOU);
3224 3229 mutex_enter(&stp->sd_lock);
3225 3230 mutex_enter(&p->p_lock);
3226 3231 }
3227 3232
3228 3233 /*
3229 3234 * We call cv_wait_sig_swap() to cause the appropriate
3230 3235 * action for the jobcontrol signal to take place.
3231 3236 * If the signal is being caught, we will take the
3232 3237 * EINTR error return. Otherwise, the default action
3233 3238 * of causing the process to stop will take place.
3234 3239 * In this case, we rely on the periodic cv_broadcast() on
3235 3240 * &lbolt_cv to wake us up to loop around and test again.
3236 3241 * We can't get here if the signal is ignored or
3237 3242 * if the current thread is blocking the signal.
3238 3243 */
3239 3244 mutex_exit(&stp->sd_lock);
3240 3245 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) {
3241 3246 mutex_exit(&p->p_lock);
3242 3247 mutex_enter(&stp->sd_lock);
3243 3248 return (EINTR);
3244 3249 }
3245 3250 mutex_exit(&p->p_lock);
3246 3251 mutex_enter(&stp->sd_lock);
3247 3252 mutex_enter(&p->p_lock);
3248 3253 }
3249 3254 }
3250 3255
3251 3256 /*
3252 3257 * Return size of message of block type (bp->b_datap->db_type)
3253 3258 */
3254 3259 size_t
3255 3260 xmsgsize(mblk_t *bp)
3256 3261 {
3257 3262 unsigned char type;
3258 3263 size_t count = 0;
3259 3264
3260 3265 type = bp->b_datap->db_type;
3261 3266
3262 3267 for (; bp; bp = bp->b_cont) {
3263 3268 if (type != bp->b_datap->db_type)
3264 3269 break;
3265 3270 ASSERT(bp->b_wptr >= bp->b_rptr);
3266 3271 count += bp->b_wptr - bp->b_rptr;
3267 3272 }
3268 3273 return (count);
3269 3274 }
3270 3275
3271 3276 /*
3272 3277 * Allocate a stream head.
3273 3278 */
3274 3279 struct stdata *
3275 3280 shalloc(queue_t *qp)
3276 3281 {
3277 3282 stdata_t *stp;
3278 3283
3279 3284 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP);
3280 3285
3281 3286 stp->sd_wrq = _WR(qp);
3282 3287 stp->sd_strtab = NULL;
3283 3288 stp->sd_iocid = 0;
3284 3289 stp->sd_mate = NULL;
3285 3290 stp->sd_freezer = NULL;
3286 3291 stp->sd_refcnt = 0;
3287 3292 stp->sd_wakeq = 0;
3288 3293 stp->sd_anchor = 0;
3289 3294 stp->sd_struiowrq = NULL;
3290 3295 stp->sd_struiordq = NULL;
3291 3296 stp->sd_struiodnak = 0;
3292 3297 stp->sd_struionak = NULL;
3293 3298 stp->sd_t_audit_data = NULL;
3294 3299 stp->sd_rput_opt = 0;
3295 3300 stp->sd_wput_opt = 0;
3296 3301 stp->sd_read_opt = 0;
3297 3302 stp->sd_rprotofunc = strrput_proto;
3298 3303 stp->sd_rmiscfunc = strrput_misc;
3299 3304 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL;
3300 3305 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL;
3301 3306 stp->sd_ciputctrl = NULL;
3302 3307 stp->sd_nciputctrl = 0;
3303 3308 stp->sd_qhead = NULL;
3304 3309 stp->sd_qtail = NULL;
3305 3310 stp->sd_servid = NULL;
3306 3311 stp->sd_nqueues = 0;
3307 3312 stp->sd_svcflags = 0;
3308 3313 stp->sd_copyflag = 0;
↓ open down ↓ |
2621 lines elided |
↑ open up ↑ |
3309 3314
3310 3315 return (stp);
3311 3316 }
3312 3317
3313 3318 /*
3314 3319 * Free a stream head.
3315 3320 */
3316 3321 void
3317 3322 shfree(stdata_t *stp)
3318 3323 {
3324 + pid_node_t *pn;
3325 +
3319 3326 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
3320 3327
3321 3328 stp->sd_wrq = NULL;
3322 3329
3323 3330 mutex_enter(&stp->sd_qlock);
3324 3331 while (stp->sd_svcflags & STRS_SCHEDULED) {
3325 3332 STRSTAT(strwaits);
3326 3333 cv_wait(&stp->sd_qcv, &stp->sd_qlock);
3327 3334 }
3328 3335 mutex_exit(&stp->sd_qlock);
3329 3336
3330 3337 if (stp->sd_ciputctrl != NULL) {
3331 3338 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1);
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3332 3339 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl,
3333 3340 stp->sd_nciputctrl, 0);
3334 3341 ASSERT(ciputctrl_cache != NULL);
3335 3342 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl);
3336 3343 stp->sd_ciputctrl = NULL;
3337 3344 stp->sd_nciputctrl = 0;
3338 3345 }
3339 3346 ASSERT(stp->sd_qhead == NULL);
3340 3347 ASSERT(stp->sd_qtail == NULL);
3341 3348 ASSERT(stp->sd_nqueues == 0);
3349 +
3350 + mutex_enter(&stp->sd_pid_list_lock);
3351 + while ((pn = list_head(&stp->sd_pid_list)) != NULL) {
3352 + list_remove(&stp->sd_pid_list, pn);
3353 + kmem_free(pn, sizeof (*pn));
3354 + }
3355 + mutex_exit(&stp->sd_pid_list_lock);
3356 +
3342 3357 kmem_cache_free(stream_head_cache, stp);
3358 +}
3359 +
3360 +void
3361 +sh_insert_pid(struct stdata *stp, pid_t pid)
3362 +{
3363 + pid_node_t *pn;
3364 +
3365 + mutex_enter(&stp->sd_pid_list_lock);
3366 + for (pn = list_head(&stp->sd_pid_list);
3367 + pn != NULL && pn->pn_pid != pid;
3368 + pn = list_next(&stp->sd_pid_list, pn))
3369 + ;
3370 +
3371 + if (pn != NULL) {
3372 + pn->pn_count++;
3373 + } else {
3374 + pn = kmem_zalloc(sizeof (*pn), KM_SLEEP);
3375 + list_link_init(&pn->pn_ref_link);
3376 + pn->pn_pid = pid;
3377 + pn->pn_count = 1;
3378 + list_insert_tail(&stp->sd_pid_list, pn);
3379 + }
3380 + mutex_exit(&stp->sd_pid_list_lock);
3381 +}
3382 +
3383 +void
3384 +sh_remove_pid(struct stdata *stp, pid_t pid)
3385 +{
3386 + pid_node_t *pn;
3387 +
3388 + mutex_enter(&stp->sd_pid_list_lock);
3389 + for (pn = list_head(&stp->sd_pid_list);
3390 + pn != NULL && pn->pn_pid != pid;
3391 + pn = list_next(&stp->sd_pid_list, pn))
3392 + ;
3393 +
3394 + if (pn != NULL) {
3395 + if (pn->pn_count > 1) {
3396 + pn->pn_count--;
3397 + } else {
3398 + list_remove(&stp->sd_pid_list, pn);
3399 + kmem_free(pn, sizeof (*pn));
3400 + }
3401 + }
3402 + mutex_exit(&stp->sd_pid_list_lock);
3403 +}
3404 +
3405 +mblk_t *
3406 +sh_get_pid_mblk(struct stdata *stp)
3407 +{
3408 + mblk_t *mblk;
3409 + int sz, n = 0;
3410 + pid_t *pids;
3411 + pid_node_t *pn;
3412 + conn_pid_info_t *cpi;
3413 +
3414 + mutex_enter(&stp->sd_pid_list_lock);
3415 +
3416 + n = list_numnodes(&stp->sd_pid_list);
3417 + sz = sizeof (conn_pid_info_t);
3418 + sz += (n > 1) ? ((n - 1) * sizeof (pid_t)) : 0;
3419 + if ((mblk = allocb(sz, BPRI_HI)) == NULL) {
3420 + mutex_exit(&stp->sd_pid_list_lock);
3421 + return (NULL);
3422 + }
3423 + mblk->b_wptr += sz;
3424 + cpi = (conn_pid_info_t *)mblk->b_datap->db_base;
3425 + cpi->cpi_magic = CONN_PID_INFO_MGC;
3426 + cpi->cpi_contents = CONN_PID_INFO_XTI;
3427 + cpi->cpi_pids_cnt = n;
3428 + cpi->cpi_tot_size = sz;
3429 + cpi->cpi_pids[0] = 0;
3430 +
3431 + if (cpi->cpi_pids_cnt > 0) {
3432 + pids = cpi->cpi_pids;
3433 + for (pn = list_head(&stp->sd_pid_list); pn != NULL;
3434 + pids++, pn = list_next(&stp->sd_pid_list, pn))
3435 + *pids = pn->pn_pid;
3436 + }
3437 + mutex_exit(&stp->sd_pid_list_lock);
3438 + return (mblk);
3343 3439 }
3344 3440
3345 3441 /*
3346 3442 * Allocate a pair of queues and a syncq for the pair
3347 3443 */
3348 3444 queue_t *
3349 3445 allocq(void)
3350 3446 {
3351 3447 queinfo_t *qip;
3352 3448 queue_t *qp, *wqp;
3353 3449 syncq_t *sq;
3354 3450
3355 3451 qip = kmem_cache_alloc(queue_cache, KM_SLEEP);
3356 3452
3357 3453 qp = &qip->qu_rqueue;
3358 3454 wqp = &qip->qu_wqueue;
3359 3455 sq = &qip->qu_syncq;
3360 3456
3361 3457 qp->q_last = NULL;
3362 3458 qp->q_next = NULL;
3363 3459 qp->q_ptr = NULL;
3364 3460 qp->q_flag = QUSE | QREADR;
3365 3461 qp->q_bandp = NULL;
3366 3462 qp->q_stream = NULL;
3367 3463 qp->q_syncq = sq;
3368 3464 qp->q_nband = 0;
3369 3465 qp->q_nfsrv = NULL;
3370 3466 qp->q_draining = 0;
3371 3467 qp->q_syncqmsgs = 0;
3372 3468 qp->q_spri = 0;
3373 3469 qp->q_qtstamp = 0;
3374 3470 qp->q_sqtstamp = 0;
3375 3471 qp->q_fp = NULL;
3376 3472
3377 3473 wqp->q_last = NULL;
3378 3474 wqp->q_next = NULL;
3379 3475 wqp->q_ptr = NULL;
3380 3476 wqp->q_flag = QUSE;
3381 3477 wqp->q_bandp = NULL;
3382 3478 wqp->q_stream = NULL;
3383 3479 wqp->q_syncq = sq;
3384 3480 wqp->q_nband = 0;
3385 3481 wqp->q_nfsrv = NULL;
3386 3482 wqp->q_draining = 0;
3387 3483 wqp->q_syncqmsgs = 0;
3388 3484 wqp->q_qtstamp = 0;
3389 3485 wqp->q_sqtstamp = 0;
3390 3486 wqp->q_spri = 0;
3391 3487
3392 3488 sq->sq_count = 0;
3393 3489 sq->sq_rmqcount = 0;
3394 3490 sq->sq_flags = 0;
3395 3491 sq->sq_type = 0;
3396 3492 sq->sq_callbflags = 0;
3397 3493 sq->sq_cancelid = 0;
3398 3494 sq->sq_ciputctrl = NULL;
3399 3495 sq->sq_nciputctrl = 0;
3400 3496 sq->sq_needexcl = 0;
3401 3497 sq->sq_svcflags = 0;
3402 3498
3403 3499 return (qp);
3404 3500 }
3405 3501
3406 3502 /*
3407 3503 * Free a pair of queues and the "attached" syncq.
3408 3504 * Discard any messages left on the syncq(s), remove the syncq(s) from the
3409 3505 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
3410 3506 */
3411 3507 void
3412 3508 freeq(queue_t *qp)
3413 3509 {
3414 3510 qband_t *qbp, *nqbp;
3415 3511 syncq_t *sq, *outer;
3416 3512 queue_t *wqp = _WR(qp);
3417 3513
3418 3514 ASSERT(qp->q_flag & QREADR);
3419 3515
3420 3516 /*
3421 3517 * If a previously dispatched taskq job is scheduled to run
3422 3518 * sync_service() or a service routine is scheduled for the
3423 3519 * queues about to be freed, wait here until all service is
3424 3520 * done on the queue and all associated queues and syncqs.
3425 3521 */
3426 3522 wait_svc(qp);
3427 3523
3428 3524 (void) flush_syncq(qp->q_syncq, qp);
3429 3525 (void) flush_syncq(wqp->q_syncq, wqp);
3430 3526 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0);
3431 3527
3432 3528 /*
3433 3529 * Flush the queues before q_next is set to NULL This is needed
3434 3530 * in order to backenable any downstream queue before we go away.
3435 3531 * Note: we are already removed from the stream so that the
3436 3532 * backenabling will not cause any messages to be delivered to our
3437 3533 * put procedures.
3438 3534 */
3439 3535 flushq(qp, FLUSHALL);
3440 3536 flushq(wqp, FLUSHALL);
3441 3537
3442 3538 /* Tidy up - removeq only does a half-remove from stream */
3443 3539 qp->q_next = wqp->q_next = NULL;
3444 3540 ASSERT(!(qp->q_flag & QENAB));
3445 3541 ASSERT(!(wqp->q_flag & QENAB));
3446 3542
3447 3543 outer = qp->q_syncq->sq_outer;
3448 3544 if (outer != NULL) {
3449 3545 outer_remove(outer, qp->q_syncq);
3450 3546 if (wqp->q_syncq != qp->q_syncq)
3451 3547 outer_remove(outer, wqp->q_syncq);
3452 3548 }
3453 3549 /*
3454 3550 * Free any syncqs that are outside what allocq returned.
3455 3551 */
3456 3552 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD))
3457 3553 free_syncq(qp->q_syncq);
3458 3554 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp))
3459 3555 free_syncq(wqp->q_syncq);
3460 3556
3461 3557 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3462 3558 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3463 3559 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
3464 3560 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp)));
3465 3561 sq = SQ(qp);
3466 3562 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
3467 3563 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
3468 3564 ASSERT(sq->sq_outer == NULL);
3469 3565 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
3470 3566 ASSERT(sq->sq_callbpend == NULL);
3471 3567 ASSERT(sq->sq_needexcl == 0);
3472 3568
3473 3569 if (sq->sq_ciputctrl != NULL) {
3474 3570 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
3475 3571 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
3476 3572 sq->sq_nciputctrl, 0);
3477 3573 ASSERT(ciputctrl_cache != NULL);
3478 3574 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
3479 3575 sq->sq_ciputctrl = NULL;
3480 3576 sq->sq_nciputctrl = 0;
3481 3577 }
3482 3578
3483 3579 ASSERT(qp->q_first == NULL && wqp->q_first == NULL);
3484 3580 ASSERT(qp->q_count == 0 && wqp->q_count == 0);
3485 3581 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0);
3486 3582
3487 3583 qp->q_flag &= ~QUSE;
3488 3584 wqp->q_flag &= ~QUSE;
3489 3585
3490 3586 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
3491 3587 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
3492 3588
3493 3589 qbp = qp->q_bandp;
3494 3590 while (qbp) {
3495 3591 nqbp = qbp->qb_next;
3496 3592 freeband(qbp);
3497 3593 qbp = nqbp;
3498 3594 }
3499 3595 qbp = wqp->q_bandp;
3500 3596 while (qbp) {
3501 3597 nqbp = qbp->qb_next;
3502 3598 freeband(qbp);
3503 3599 qbp = nqbp;
3504 3600 }
3505 3601 kmem_cache_free(queue_cache, qp);
3506 3602 }
3507 3603
3508 3604 /*
3509 3605 * Allocate a qband structure.
3510 3606 */
3511 3607 qband_t *
3512 3608 allocband(void)
3513 3609 {
3514 3610 qband_t *qbp;
3515 3611
3516 3612 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP);
3517 3613 if (qbp == NULL)
3518 3614 return (NULL);
3519 3615
3520 3616 qbp->qb_next = NULL;
3521 3617 qbp->qb_count = 0;
3522 3618 qbp->qb_mblkcnt = 0;
3523 3619 qbp->qb_first = NULL;
3524 3620 qbp->qb_last = NULL;
3525 3621 qbp->qb_flag = 0;
3526 3622
3527 3623 return (qbp);
3528 3624 }
3529 3625
3530 3626 /*
3531 3627 * Free a qband structure.
3532 3628 */
3533 3629 void
3534 3630 freeband(qband_t *qbp)
3535 3631 {
3536 3632 kmem_cache_free(qband_cache, qbp);
3537 3633 }
3538 3634
3539 3635 /*
3540 3636 * Just like putnextctl(9F), except that allocb_wait() is used.
3541 3637 *
3542 3638 * Consolidation Private, and of course only callable from the stream head or
3543 3639 * routines that may block.
3544 3640 */
3545 3641 int
3546 3642 putnextctl_wait(queue_t *q, int type)
3547 3643 {
3548 3644 mblk_t *bp;
3549 3645 int error;
3550 3646
3551 3647 if ((datamsg(type) && (type != M_DELAY)) ||
3552 3648 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL)
3553 3649 return (0);
3554 3650
3555 3651 bp->b_datap->db_type = (unsigned char)type;
3556 3652 putnext(q, bp);
3557 3653 return (1);
3558 3654 }
3559 3655
3560 3656 /*
3561 3657 * Run any possible bufcalls.
3562 3658 */
3563 3659 void
3564 3660 runbufcalls(void)
3565 3661 {
3566 3662 strbufcall_t *bcp;
3567 3663
3568 3664 mutex_enter(&bcall_monitor);
3569 3665 mutex_enter(&strbcall_lock);
3570 3666
3571 3667 if (strbcalls.bc_head) {
3572 3668 size_t count;
3573 3669 int nevent;
3574 3670
3575 3671 /*
3576 3672 * count how many events are on the list
3577 3673 * now so we can check to avoid looping
3578 3674 * in low memory situations
3579 3675 */
3580 3676 nevent = 0;
3581 3677 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next)
3582 3678 nevent++;
3583 3679
3584 3680 /*
3585 3681 * get estimate of available memory from kmem_avail().
3586 3682 * awake all bufcall functions waiting for
3587 3683 * memory whose request could be satisfied
3588 3684 * by 'count' memory and let 'em fight for it.
3589 3685 */
3590 3686 count = kmem_avail();
3591 3687 while ((bcp = strbcalls.bc_head) != NULL && nevent) {
3592 3688 STRSTAT(bufcalls);
3593 3689 --nevent;
3594 3690 if (bcp->bc_size <= count) {
3595 3691 bcp->bc_executor = curthread;
3596 3692 mutex_exit(&strbcall_lock);
3597 3693 (*bcp->bc_func)(bcp->bc_arg);
3598 3694 mutex_enter(&strbcall_lock);
3599 3695 bcp->bc_executor = NULL;
3600 3696 cv_broadcast(&bcall_cv);
3601 3697 strbcalls.bc_head = bcp->bc_next;
3602 3698 kmem_free(bcp, sizeof (strbufcall_t));
3603 3699 } else {
3604 3700 /*
3605 3701 * too big, try again later - note
3606 3702 * that nevent was decremented above
3607 3703 * so we won't retry this one on this
3608 3704 * iteration of the loop
3609 3705 */
3610 3706 if (bcp->bc_next != NULL) {
3611 3707 strbcalls.bc_head = bcp->bc_next;
3612 3708 bcp->bc_next = NULL;
3613 3709 strbcalls.bc_tail->bc_next = bcp;
3614 3710 strbcalls.bc_tail = bcp;
3615 3711 }
3616 3712 }
3617 3713 }
3618 3714 if (strbcalls.bc_head == NULL)
3619 3715 strbcalls.bc_tail = NULL;
3620 3716 }
3621 3717
3622 3718 mutex_exit(&strbcall_lock);
3623 3719 mutex_exit(&bcall_monitor);
3624 3720 }
3625 3721
3626 3722
3627 3723 /*
3628 3724 * Actually run queue's service routine.
3629 3725 */
3630 3726 static void
3631 3727 runservice(queue_t *q)
3632 3728 {
3633 3729 qband_t *qbp;
3634 3730
3635 3731 ASSERT(q->q_qinfo->qi_srvp);
3636 3732 again:
3637 3733 entersq(q->q_syncq, SQ_SVC);
3638 3734 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START,
3639 3735 "runservice starts:%p", q);
3640 3736
3641 3737 if (!(q->q_flag & QWCLOSE))
3642 3738 (*q->q_qinfo->qi_srvp)(q);
3643 3739
3644 3740 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END,
3645 3741 "runservice ends:(%p)", q);
3646 3742
3647 3743 leavesq(q->q_syncq, SQ_SVC);
3648 3744
3649 3745 mutex_enter(QLOCK(q));
3650 3746 if (q->q_flag & QENAB) {
3651 3747 q->q_flag &= ~QENAB;
3652 3748 mutex_exit(QLOCK(q));
3653 3749 goto again;
3654 3750 }
3655 3751 q->q_flag &= ~QINSERVICE;
3656 3752 q->q_flag &= ~QBACK;
3657 3753 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next)
3658 3754 qbp->qb_flag &= ~QB_BACK;
3659 3755 /*
3660 3756 * Wakeup thread waiting for the service procedure
3661 3757 * to be run (strclose and qdetach).
3662 3758 */
3663 3759 cv_broadcast(&q->q_wait);
3664 3760
3665 3761 mutex_exit(QLOCK(q));
3666 3762 }
3667 3763
3668 3764 /*
3669 3765 * Background processing of bufcalls.
3670 3766 */
3671 3767 void
3672 3768 streams_bufcall_service(void)
3673 3769 {
3674 3770 callb_cpr_t cprinfo;
3675 3771
3676 3772 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr,
3677 3773 "streams_bufcall_service");
3678 3774
3679 3775 mutex_enter(&strbcall_lock);
3680 3776
3681 3777 for (;;) {
3682 3778 if (strbcalls.bc_head != NULL && kmem_avail() > 0) {
3683 3779 mutex_exit(&strbcall_lock);
3684 3780 runbufcalls();
3685 3781 mutex_enter(&strbcall_lock);
3686 3782 }
3687 3783 if (strbcalls.bc_head != NULL) {
3688 3784 STRSTAT(bcwaits);
3689 3785 /* Wait for memory to become available */
3690 3786 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3691 3787 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock,
3692 3788 SEC_TO_TICK(60), TR_CLOCK_TICK);
3693 3789 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3694 3790 }
3695 3791
3696 3792 /* Wait for new work to arrive */
3697 3793 if (strbcalls.bc_head == NULL) {
3698 3794 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3699 3795 cv_wait(&strbcall_cv, &strbcall_lock);
3700 3796 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3701 3797 }
3702 3798 }
3703 3799 }
3704 3800
3705 3801 /*
3706 3802 * Background processing of streams background tasks which failed
3707 3803 * taskq_dispatch.
3708 3804 */
3709 3805 static void
3710 3806 streams_qbkgrnd_service(void)
3711 3807 {
3712 3808 callb_cpr_t cprinfo;
3713 3809 queue_t *q;
3714 3810
3715 3811 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3716 3812 "streams_bkgrnd_service");
3717 3813
3718 3814 mutex_enter(&service_queue);
3719 3815
3720 3816 for (;;) {
3721 3817 /*
3722 3818 * Wait for work to arrive.
3723 3819 */
3724 3820 while ((freebs_list == NULL) && (qhead == NULL)) {
3725 3821 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3726 3822 cv_wait(&services_to_run, &service_queue);
3727 3823 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3728 3824 }
3729 3825 /*
3730 3826 * Handle all pending freebs requests to free memory.
3731 3827 */
3732 3828 while (freebs_list != NULL) {
3733 3829 mblk_t *mp = freebs_list;
3734 3830 freebs_list = mp->b_next;
3735 3831 mutex_exit(&service_queue);
3736 3832 mblk_free(mp);
3737 3833 mutex_enter(&service_queue);
3738 3834 }
3739 3835 /*
3740 3836 * Run pending queues.
3741 3837 */
3742 3838 while (qhead != NULL) {
3743 3839 DQ(q, qhead, qtail, q_link);
3744 3840 ASSERT(q != NULL);
3745 3841 mutex_exit(&service_queue);
3746 3842 queue_service(q);
3747 3843 mutex_enter(&service_queue);
3748 3844 }
3749 3845 ASSERT(qhead == NULL && qtail == NULL);
3750 3846 }
3751 3847 }
3752 3848
3753 3849 /*
3754 3850 * Background processing of streams background tasks which failed
3755 3851 * taskq_dispatch.
3756 3852 */
3757 3853 static void
3758 3854 streams_sqbkgrnd_service(void)
3759 3855 {
3760 3856 callb_cpr_t cprinfo;
3761 3857 syncq_t *sq;
3762 3858
3763 3859 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3764 3860 "streams_sqbkgrnd_service");
3765 3861
3766 3862 mutex_enter(&service_queue);
3767 3863
3768 3864 for (;;) {
3769 3865 /*
3770 3866 * Wait for work to arrive.
3771 3867 */
3772 3868 while (sqhead == NULL) {
3773 3869 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3774 3870 cv_wait(&syncqs_to_run, &service_queue);
3775 3871 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3776 3872 }
3777 3873
3778 3874 /*
3779 3875 * Run pending syncqs.
3780 3876 */
3781 3877 while (sqhead != NULL) {
3782 3878 DQ(sq, sqhead, sqtail, sq_next);
3783 3879 ASSERT(sq != NULL);
3784 3880 ASSERT(sq->sq_svcflags & SQ_BGTHREAD);
3785 3881 mutex_exit(&service_queue);
3786 3882 syncq_service(sq);
3787 3883 mutex_enter(&service_queue);
3788 3884 }
3789 3885 }
3790 3886 }
3791 3887
3792 3888 /*
3793 3889 * Disable the syncq and wait for background syncq processing to complete.
3794 3890 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the
3795 3891 * list.
3796 3892 */
3797 3893 void
3798 3894 wait_sq_svc(syncq_t *sq)
3799 3895 {
3800 3896 mutex_enter(SQLOCK(sq));
3801 3897 sq->sq_svcflags |= SQ_DISABLED;
3802 3898 if (sq->sq_svcflags & SQ_BGTHREAD) {
3803 3899 syncq_t *sq_chase;
3804 3900 syncq_t *sq_curr;
3805 3901 int removed;
3806 3902
3807 3903 ASSERT(sq->sq_servcount == 1);
3808 3904 mutex_enter(&service_queue);
3809 3905 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed);
3810 3906 mutex_exit(&service_queue);
3811 3907 if (removed) {
3812 3908 sq->sq_svcflags &= ~SQ_BGTHREAD;
3813 3909 sq->sq_servcount = 0;
3814 3910 STRSTAT(sqremoved);
3815 3911 goto done;
3816 3912 }
3817 3913 }
3818 3914 while (sq->sq_servcount != 0) {
3819 3915 sq->sq_flags |= SQ_WANTWAKEUP;
3820 3916 cv_wait(&sq->sq_wait, SQLOCK(sq));
3821 3917 }
3822 3918 done:
3823 3919 mutex_exit(SQLOCK(sq));
3824 3920 }
3825 3921
3826 3922 /*
3827 3923 * Put a syncq on the list of syncq's to be serviced by the sqthread.
3828 3924 * Add the argument to the end of the sqhead list and set the flag
3829 3925 * indicating this syncq has been enabled. If it has already been
3830 3926 * enabled, don't do anything.
3831 3927 * This routine assumes that SQLOCK is held.
3832 3928 * NOTE that the lock order is to have the SQLOCK first,
3833 3929 * so if the service_syncq lock is held, we need to release it
3834 3930 * before acquiring the SQLOCK (mostly relevant for the background
3835 3931 * thread, and this seems to be common among the STREAMS global locks).
3836 3932 * Note that the sq_svcflags are protected by the SQLOCK.
3837 3933 */
3838 3934 void
3839 3935 sqenable(syncq_t *sq)
3840 3936 {
3841 3937 /*
3842 3938 * This is probably not important except for where I believe it
3843 3939 * is being called. At that point, it should be held (and it
3844 3940 * is a pain to release it just for this routine, so don't do
3845 3941 * it).
3846 3942 */
3847 3943 ASSERT(MUTEX_HELD(SQLOCK(sq)));
3848 3944
3849 3945 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL);
3850 3946 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD);
3851 3947
3852 3948 /*
3853 3949 * Do not put on list if background thread is scheduled or
3854 3950 * syncq is disabled.
3855 3951 */
3856 3952 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD))
3857 3953 return;
3858 3954
3859 3955 /*
3860 3956 * Check whether we should enable sq at all.
3861 3957 * Non PERMOD syncqs may be drained by at most one thread.
3862 3958 * PERMOD syncqs may be drained by several threads but we limit the
3863 3959 * total amount to the lesser of
3864 3960 * Number of queues on the squeue and
3865 3961 * Number of CPUs.
3866 3962 */
3867 3963 if (sq->sq_servcount != 0) {
3868 3964 if (((sq->sq_type & SQ_PERMOD) == 0) ||
3869 3965 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) {
3870 3966 STRSTAT(sqtoomany);
3871 3967 return;
3872 3968 }
3873 3969 }
3874 3970
3875 3971 sq->sq_tstamp = ddi_get_lbolt();
3876 3972 STRSTAT(sqenables);
3877 3973
3878 3974 /* Attempt a taskq dispatch */
3879 3975 sq->sq_servid = (void *)taskq_dispatch(streams_taskq,
3880 3976 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE);
3881 3977 if (sq->sq_servid != NULL) {
3882 3978 sq->sq_servcount++;
3883 3979 return;
3884 3980 }
3885 3981
3886 3982 /*
3887 3983 * This taskq dispatch failed, but a previous one may have succeeded.
3888 3984 * Don't try to schedule on the background thread whilst there is
3889 3985 * outstanding taskq processing.
3890 3986 */
3891 3987 if (sq->sq_servcount != 0)
3892 3988 return;
3893 3989
3894 3990 /*
3895 3991 * System is low on resources and can't perform a non-sleeping
3896 3992 * dispatch. Schedule the syncq for a background thread and mark the
3897 3993 * syncq to avoid any further taskq dispatch attempts.
3898 3994 */
3899 3995 mutex_enter(&service_queue);
3900 3996 STRSTAT(taskqfails);
3901 3997 ENQUEUE(sq, sqhead, sqtail, sq_next);
3902 3998 sq->sq_svcflags |= SQ_BGTHREAD;
3903 3999 sq->sq_servcount = 1;
3904 4000 cv_signal(&syncqs_to_run);
3905 4001 mutex_exit(&service_queue);
3906 4002 }
3907 4003
3908 4004 /*
3909 4005 * Note: fifo_close() depends on the mblk_t on the queue being freed
3910 4006 * asynchronously. The asynchronous freeing of messages breaks the
3911 4007 * recursive call chain of fifo_close() while there are I_SENDFD type of
3912 4008 * messages referring to other file pointers on the queue. Then when
3913 4009 * closing pipes it can avoid stack overflow in case of daisy-chained
3914 4010 * pipes, and also avoid deadlock in case of fifonode_t pairs (which
3915 4011 * share the same fifolock_t).
3916 4012 *
3917 4013 * No need to kpreempt_disable to access cpu_seqid. If we migrate and
3918 4014 * the esb queue does not match the new CPU, that is OK.
3919 4015 */
3920 4016 void
3921 4017 freebs_enqueue(mblk_t *mp, dblk_t *dbp)
3922 4018 {
3923 4019 int qindex = CPU->cpu_seqid >> esbq_log2_cpus_per_q;
3924 4020 esb_queue_t *eqp;
3925 4021
3926 4022 ASSERT(dbp->db_mblk == mp);
3927 4023 ASSERT(qindex < esbq_nelem);
3928 4024
3929 4025 eqp = system_esbq_array;
3930 4026 if (eqp != NULL) {
3931 4027 eqp += qindex;
3932 4028 } else {
3933 4029 mutex_enter(&esbq_lock);
3934 4030 if (kmem_ready && system_esbq_array == NULL)
3935 4031 system_esbq_array = (esb_queue_t *)kmem_zalloc(
3936 4032 esbq_nelem * sizeof (esb_queue_t), KM_NOSLEEP);
3937 4033 mutex_exit(&esbq_lock);
3938 4034 eqp = system_esbq_array;
3939 4035 if (eqp != NULL)
3940 4036 eqp += qindex;
3941 4037 else
3942 4038 eqp = &system_esbq;
3943 4039 }
3944 4040
3945 4041 /*
3946 4042 * Check data sanity. The dblock should have non-empty free function.
3947 4043 * It is better to panic here then later when the dblock is freed
3948 4044 * asynchronously when the context is lost.
3949 4045 */
3950 4046 if (dbp->db_frtnp->free_func == NULL) {
3951 4047 panic("freebs_enqueue: dblock %p has a NULL free callback",
3952 4048 (void *)dbp);
3953 4049 }
3954 4050
3955 4051 mutex_enter(&eqp->eq_lock);
3956 4052 /* queue the new mblk on the esballoc queue */
3957 4053 if (eqp->eq_head == NULL) {
3958 4054 eqp->eq_head = eqp->eq_tail = mp;
3959 4055 } else {
3960 4056 eqp->eq_tail->b_next = mp;
3961 4057 eqp->eq_tail = mp;
3962 4058 }
3963 4059 eqp->eq_len++;
3964 4060
3965 4061 /* If we're the first thread to reach the threshold, process */
3966 4062 if (eqp->eq_len >= esbq_max_qlen &&
3967 4063 !(eqp->eq_flags & ESBQ_PROCESSING))
3968 4064 esballoc_process_queue(eqp);
3969 4065
3970 4066 esballoc_set_timer(eqp, esbq_timeout);
3971 4067 mutex_exit(&eqp->eq_lock);
3972 4068 }
3973 4069
3974 4070 static void
3975 4071 esballoc_process_queue(esb_queue_t *eqp)
3976 4072 {
3977 4073 mblk_t *mp;
3978 4074
3979 4075 ASSERT(MUTEX_HELD(&eqp->eq_lock));
3980 4076
3981 4077 eqp->eq_flags |= ESBQ_PROCESSING;
3982 4078
3983 4079 do {
3984 4080 /*
3985 4081 * Detach the message chain for processing.
3986 4082 */
3987 4083 mp = eqp->eq_head;
3988 4084 eqp->eq_tail->b_next = NULL;
3989 4085 eqp->eq_head = eqp->eq_tail = NULL;
3990 4086 eqp->eq_len = 0;
3991 4087 mutex_exit(&eqp->eq_lock);
3992 4088
3993 4089 /*
3994 4090 * Process the message chain.
3995 4091 */
3996 4092 esballoc_enqueue_mblk(mp);
3997 4093 mutex_enter(&eqp->eq_lock);
3998 4094 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0));
3999 4095
4000 4096 eqp->eq_flags &= ~ESBQ_PROCESSING;
4001 4097 }
4002 4098
4003 4099 /*
4004 4100 * taskq callback routine to free esballoced mblk's
4005 4101 */
4006 4102 static void
4007 4103 esballoc_mblk_free(mblk_t *mp)
4008 4104 {
4009 4105 mblk_t *nextmp;
4010 4106
4011 4107 for (; mp != NULL; mp = nextmp) {
4012 4108 nextmp = mp->b_next;
4013 4109 mp->b_next = NULL;
4014 4110 mblk_free(mp);
4015 4111 }
4016 4112 }
4017 4113
4018 4114 static void
4019 4115 esballoc_enqueue_mblk(mblk_t *mp)
4020 4116 {
4021 4117
4022 4118 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp,
4023 4119 TQ_NOSLEEP) == NULL) {
4024 4120 mblk_t *first_mp = mp;
4025 4121 /*
4026 4122 * System is low on resources and can't perform a non-sleeping
4027 4123 * dispatch. Schedule for a background thread.
4028 4124 */
4029 4125 mutex_enter(&service_queue);
4030 4126 STRSTAT(taskqfails);
4031 4127
4032 4128 while (mp->b_next != NULL)
4033 4129 mp = mp->b_next;
4034 4130
4035 4131 mp->b_next = freebs_list;
4036 4132 freebs_list = first_mp;
4037 4133 cv_signal(&services_to_run);
4038 4134 mutex_exit(&service_queue);
4039 4135 }
4040 4136 }
4041 4137
4042 4138 static void
4043 4139 esballoc_timer(void *arg)
4044 4140 {
4045 4141 esb_queue_t *eqp = arg;
4046 4142
4047 4143 mutex_enter(&eqp->eq_lock);
4048 4144 eqp->eq_flags &= ~ESBQ_TIMER;
4049 4145
4050 4146 if (!(eqp->eq_flags & ESBQ_PROCESSING) &&
4051 4147 eqp->eq_len > 0)
4052 4148 esballoc_process_queue(eqp);
4053 4149
4054 4150 esballoc_set_timer(eqp, esbq_timeout);
4055 4151 mutex_exit(&eqp->eq_lock);
4056 4152 }
4057 4153
4058 4154 static void
4059 4155 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout)
4060 4156 {
4061 4157 ASSERT(MUTEX_HELD(&eqp->eq_lock));
4062 4158
4063 4159 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) {
4064 4160 (void) timeout(esballoc_timer, eqp, eq_timeout);
4065 4161 eqp->eq_flags |= ESBQ_TIMER;
4066 4162 }
4067 4163 }
4068 4164
4069 4165 /*
4070 4166 * Setup esbq array length based upon NCPU scaled by CPUs per
4071 4167 * queue. Use static system_esbq until kmem_ready and we can
4072 4168 * create an array in freebs_enqueue().
4073 4169 */
4074 4170 void
4075 4171 esballoc_queue_init(void)
4076 4172 {
4077 4173 esbq_log2_cpus_per_q = highbit(esbq_cpus_per_q - 1);
4078 4174 esbq_cpus_per_q = 1 << esbq_log2_cpus_per_q;
4079 4175 esbq_nelem = howmany(NCPU, esbq_cpus_per_q);
4080 4176 system_esbq.eq_len = 0;
4081 4177 system_esbq.eq_head = system_esbq.eq_tail = NULL;
4082 4178 system_esbq.eq_flags = 0;
4083 4179 }
4084 4180
4085 4181 /*
4086 4182 * Set the QBACK or QB_BACK flag in the given queue for
4087 4183 * the given priority band.
4088 4184 */
4089 4185 void
4090 4186 setqback(queue_t *q, unsigned char pri)
4091 4187 {
4092 4188 int i;
4093 4189 qband_t *qbp;
4094 4190 qband_t **qbpp;
4095 4191
4096 4192 ASSERT(MUTEX_HELD(QLOCK(q)));
4097 4193 if (pri != 0) {
4098 4194 if (pri > q->q_nband) {
4099 4195 qbpp = &q->q_bandp;
4100 4196 while (*qbpp)
4101 4197 qbpp = &(*qbpp)->qb_next;
4102 4198 while (pri > q->q_nband) {
4103 4199 if ((*qbpp = allocband()) == NULL) {
4104 4200 cmn_err(CE_WARN,
4105 4201 "setqback: can't allocate qband\n");
4106 4202 return;
4107 4203 }
4108 4204 (*qbpp)->qb_hiwat = q->q_hiwat;
4109 4205 (*qbpp)->qb_lowat = q->q_lowat;
4110 4206 q->q_nband++;
4111 4207 qbpp = &(*qbpp)->qb_next;
4112 4208 }
4113 4209 }
4114 4210 qbp = q->q_bandp;
4115 4211 i = pri;
4116 4212 while (--i)
4117 4213 qbp = qbp->qb_next;
4118 4214 qbp->qb_flag |= QB_BACK;
4119 4215 } else {
4120 4216 q->q_flag |= QBACK;
4121 4217 }
4122 4218 }
4123 4219
4124 4220 int
4125 4221 strcopyin(void *from, void *to, size_t len, int copyflag)
4126 4222 {
4127 4223 if (copyflag & U_TO_K) {
4128 4224 ASSERT((copyflag & K_TO_K) == 0);
4129 4225 if (copyin(from, to, len))
4130 4226 return (EFAULT);
4131 4227 } else {
4132 4228 ASSERT(copyflag & K_TO_K);
4133 4229 bcopy(from, to, len);
4134 4230 }
4135 4231 return (0);
4136 4232 }
4137 4233
4138 4234 int
4139 4235 strcopyout(void *from, void *to, size_t len, int copyflag)
4140 4236 {
4141 4237 if (copyflag & U_TO_K) {
4142 4238 if (copyout(from, to, len))
4143 4239 return (EFAULT);
4144 4240 } else {
4145 4241 ASSERT(copyflag & K_TO_K);
4146 4242 bcopy(from, to, len);
4147 4243 }
4148 4244 return (0);
4149 4245 }
4150 4246
4151 4247 /*
4152 4248 * strsignal_nolock() posts a signal to the process(es) at the stream head.
4153 4249 * It assumes that the stream head lock is already held, whereas strsignal()
4154 4250 * acquires the lock first. This routine was created because a few callers
4155 4251 * release the stream head lock before calling only to re-acquire it after
4156 4252 * it returns.
4157 4253 */
4158 4254 void
4159 4255 strsignal_nolock(stdata_t *stp, int sig, uchar_t band)
4160 4256 {
4161 4257 ASSERT(MUTEX_HELD(&stp->sd_lock));
4162 4258 switch (sig) {
4163 4259 case SIGPOLL:
4164 4260 if (stp->sd_sigflags & S_MSG)
4165 4261 strsendsig(stp->sd_siglist, S_MSG, band, 0);
4166 4262 break;
4167 4263 default:
4168 4264 if (stp->sd_pgidp)
4169 4265 pgsignal(stp->sd_pgidp, sig);
4170 4266 break;
4171 4267 }
4172 4268 }
4173 4269
4174 4270 void
4175 4271 strsignal(stdata_t *stp, int sig, int32_t band)
4176 4272 {
4177 4273 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG,
4178 4274 "strsignal:%p, %X, %X", stp, sig, band);
4179 4275
4180 4276 mutex_enter(&stp->sd_lock);
4181 4277 switch (sig) {
4182 4278 case SIGPOLL:
4183 4279 if (stp->sd_sigflags & S_MSG)
4184 4280 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
4185 4281 break;
4186 4282
4187 4283 default:
4188 4284 if (stp->sd_pgidp) {
4189 4285 pgsignal(stp->sd_pgidp, sig);
4190 4286 }
4191 4287 break;
4192 4288 }
4193 4289 mutex_exit(&stp->sd_lock);
4194 4290 }
4195 4291
4196 4292 void
4197 4293 strhup(stdata_t *stp)
4198 4294 {
4199 4295 ASSERT(mutex_owned(&stp->sd_lock));
4200 4296 pollwakeup(&stp->sd_pollist, POLLHUP);
4201 4297 if (stp->sd_sigflags & S_HANGUP)
4202 4298 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0);
4203 4299 }
4204 4300
4205 4301 /*
4206 4302 * Backenable the first queue upstream from `q' with a service procedure.
4207 4303 */
4208 4304 void
4209 4305 backenable(queue_t *q, uchar_t pri)
4210 4306 {
4211 4307 queue_t *nq;
4212 4308
4213 4309 /*
4214 4310 * Our presence might not prevent other modules in our own
4215 4311 * stream from popping/pushing since the caller of getq might not
4216 4312 * have a claim on the queue (some drivers do a getq on somebody
4217 4313 * else's queue - they know that the queue itself is not going away
4218 4314 * but the framework has to guarantee q_next in that stream).
4219 4315 */
4220 4316 claimstr(q);
4221 4317
4222 4318 /* Find nearest back queue with service proc */
4223 4319 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) {
4224 4320 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq));
4225 4321 }
4226 4322
4227 4323 if (nq) {
4228 4324 kthread_t *freezer;
4229 4325 /*
4230 4326 * backenable can be called either with no locks held
4231 4327 * or with the stream frozen (the latter occurs when a module
4232 4328 * calls rmvq with the stream frozen). If the stream is frozen
4233 4329 * by the caller the caller will hold all qlocks in the stream.
4234 4330 * Note that a frozen stream doesn't freeze a mated stream,
4235 4331 * so we explicitly check for that.
4236 4332 */
4237 4333 freezer = STREAM(q)->sd_freezer;
4238 4334 if (freezer != curthread || STREAM(q) != STREAM(nq)) {
4239 4335 mutex_enter(QLOCK(nq));
4240 4336 }
4241 4337 #ifdef DEBUG
4242 4338 else {
4243 4339 ASSERT(frozenstr(q));
4244 4340 ASSERT(MUTEX_HELD(QLOCK(q)));
4245 4341 ASSERT(MUTEX_HELD(QLOCK(nq)));
4246 4342 }
4247 4343 #endif
4248 4344 setqback(nq, pri);
4249 4345 qenable_locked(nq);
4250 4346 if (freezer != curthread || STREAM(q) != STREAM(nq))
4251 4347 mutex_exit(QLOCK(nq));
4252 4348 }
4253 4349 releasestr(q);
4254 4350 }
4255 4351
4256 4352 /*
4257 4353 * Return the appropriate errno when one of flags_to_check is set
4258 4354 * in sd_flags. Uses the exported error routines if they are set.
4259 4355 * Will return 0 if non error is set (or if the exported error routines
4260 4356 * do not return an error).
4261 4357 *
4262 4358 * If there is both a read and write error to check, we prefer the read error.
4263 4359 * Also, give preference to recorded errno's over the error functions.
4264 4360 * The flags that are handled are:
4265 4361 * STPLEX return EINVAL
4266 4362 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST)
4267 4363 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST)
4268 4364 * STRHUP return sd_werror
4269 4365 *
4270 4366 * If the caller indicates that the operation is a peek, a nonpersistent error
4271 4367 * is not cleared.
4272 4368 */
4273 4369 int
4274 4370 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek)
4275 4371 {
4276 4372 int32_t sd_flag = stp->sd_flag & flags_to_check;
4277 4373 int error = 0;
4278 4374
4279 4375 ASSERT(MUTEX_HELD(&stp->sd_lock));
4280 4376 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0);
4281 4377 if (sd_flag & STPLEX)
4282 4378 error = EINVAL;
4283 4379 else if (sd_flag & STRDERR) {
4284 4380 error = stp->sd_rerror;
4285 4381 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) {
4286 4382 /*
4287 4383 * Read errors are non-persistent i.e. discarded once
4288 4384 * returned to a non-peeking caller,
4289 4385 */
4290 4386 stp->sd_rerror = 0;
4291 4387 stp->sd_flag &= ~STRDERR;
4292 4388 }
4293 4389 if (error == 0 && stp->sd_rderrfunc != NULL) {
4294 4390 int clearerr = 0;
4295 4391
4296 4392 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek,
4297 4393 &clearerr);
4298 4394 if (clearerr) {
4299 4395 stp->sd_flag &= ~STRDERR;
4300 4396 stp->sd_rderrfunc = NULL;
4301 4397 }
4302 4398 }
4303 4399 } else if (sd_flag & STWRERR) {
4304 4400 error = stp->sd_werror;
4305 4401 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) {
4306 4402 /*
4307 4403 * Write errors are non-persistent i.e. discarded once
4308 4404 * returned to a non-peeking caller,
4309 4405 */
4310 4406 stp->sd_werror = 0;
4311 4407 stp->sd_flag &= ~STWRERR;
4312 4408 }
4313 4409 if (error == 0 && stp->sd_wrerrfunc != NULL) {
4314 4410 int clearerr = 0;
4315 4411
4316 4412 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek,
4317 4413 &clearerr);
4318 4414 if (clearerr) {
4319 4415 stp->sd_flag &= ~STWRERR;
4320 4416 stp->sd_wrerrfunc = NULL;
4321 4417 }
4322 4418 }
4323 4419 } else if (sd_flag & STRHUP) {
4324 4420 /* sd_werror set when STRHUP */
4325 4421 error = stp->sd_werror;
4326 4422 }
4327 4423 return (error);
4328 4424 }
4329 4425
4330 4426
4331 4427 /*
4332 4428 * Single-thread open/close/push/pop
4333 4429 * for twisted streams also
4334 4430 */
4335 4431 int
4336 4432 strstartplumb(stdata_t *stp, int flag, int cmd)
4337 4433 {
4338 4434 int waited = 1;
4339 4435 int error = 0;
4340 4436
4341 4437 if (STRMATED(stp)) {
4342 4438 struct stdata *stmatep = stp->sd_mate;
4343 4439
4344 4440 STRLOCKMATES(stp);
4345 4441 while (waited) {
4346 4442 waited = 0;
4347 4443 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4348 4444 if ((cmd == I_POP) &&
4349 4445 (flag & (FNDELAY|FNONBLOCK))) {
4350 4446 STRUNLOCKMATES(stp);
4351 4447 return (EAGAIN);
4352 4448 }
4353 4449 waited = 1;
4354 4450 mutex_exit(&stp->sd_lock);
4355 4451 if (!cv_wait_sig(&stmatep->sd_monitor,
4356 4452 &stmatep->sd_lock)) {
4357 4453 mutex_exit(&stmatep->sd_lock);
4358 4454 return (EINTR);
4359 4455 }
4360 4456 mutex_exit(&stmatep->sd_lock);
4361 4457 STRLOCKMATES(stp);
4362 4458 }
4363 4459 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4364 4460 if ((cmd == I_POP) &&
4365 4461 (flag & (FNDELAY|FNONBLOCK))) {
4366 4462 STRUNLOCKMATES(stp);
4367 4463 return (EAGAIN);
4368 4464 }
4369 4465 waited = 1;
4370 4466 mutex_exit(&stmatep->sd_lock);
4371 4467 if (!cv_wait_sig(&stp->sd_monitor,
4372 4468 &stp->sd_lock)) {
4373 4469 mutex_exit(&stp->sd_lock);
4374 4470 return (EINTR);
4375 4471 }
4376 4472 mutex_exit(&stp->sd_lock);
4377 4473 STRLOCKMATES(stp);
4378 4474 }
4379 4475 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4380 4476 error = strgeterr(stp,
4381 4477 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4382 4478 if (error != 0) {
4383 4479 STRUNLOCKMATES(stp);
4384 4480 return (error);
4385 4481 }
4386 4482 }
4387 4483 }
4388 4484 stp->sd_flag |= STRPLUMB;
4389 4485 STRUNLOCKMATES(stp);
4390 4486 } else {
4391 4487 mutex_enter(&stp->sd_lock);
4392 4488 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4393 4489 if (((cmd == I_POP) || (cmd == _I_REMOVE)) &&
4394 4490 (flag & (FNDELAY|FNONBLOCK))) {
4395 4491 mutex_exit(&stp->sd_lock);
4396 4492 return (EAGAIN);
4397 4493 }
4398 4494 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) {
4399 4495 mutex_exit(&stp->sd_lock);
4400 4496 return (EINTR);
4401 4497 }
4402 4498 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4403 4499 error = strgeterr(stp,
4404 4500 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4405 4501 if (error != 0) {
4406 4502 mutex_exit(&stp->sd_lock);
4407 4503 return (error);
4408 4504 }
4409 4505 }
4410 4506 }
4411 4507 stp->sd_flag |= STRPLUMB;
4412 4508 mutex_exit(&stp->sd_lock);
4413 4509 }
4414 4510 return (0);
4415 4511 }
4416 4512
4417 4513 /*
4418 4514 * Complete the plumbing operation associated with stream `stp'.
4419 4515 */
4420 4516 void
4421 4517 strendplumb(stdata_t *stp)
4422 4518 {
4423 4519 ASSERT(MUTEX_HELD(&stp->sd_lock));
4424 4520 ASSERT(stp->sd_flag & STRPLUMB);
4425 4521 stp->sd_flag &= ~STRPLUMB;
4426 4522 cv_broadcast(&stp->sd_monitor);
4427 4523 }
4428 4524
4429 4525 /*
4430 4526 * This describes how the STREAMS framework handles synchronization
4431 4527 * during open/push and close/pop.
4432 4528 * The key interfaces for open and close are qprocson and qprocsoff,
4433 4529 * respectively. While the close case in general is harder both open
4434 4530 * have close have significant similarities.
4435 4531 *
4436 4532 * During close the STREAMS framework has to both ensure that there
4437 4533 * are no stale references to the queue pair (and syncq) that
4438 4534 * are being closed and also provide the guarantees that are documented
4439 4535 * in qprocsoff(9F).
4440 4536 * If there are stale references to the queue that is closing it can
4441 4537 * result in kernel memory corruption or kernel panics.
4442 4538 *
4443 4539 * Note that is it up to the module/driver to ensure that it itself
4444 4540 * does not have any stale references to the closing queues once its close
4445 4541 * routine returns. This includes:
4446 4542 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines
4447 4543 * associated with the queues. For timeout and bufcall callbacks the
4448 4544 * module/driver also has to ensure (or wait for) any callbacks that
4449 4545 * are in progress.
4450 4546 * - If the module/driver is using esballoc it has to ensure that any
4451 4547 * esballoc free functions do not refer to a queue that has closed.
4452 4548 * (Note that in general the close routine can not wait for the esballoc'ed
4453 4549 * messages to be freed since that can cause a deadlock.)
4454 4550 * - Cancelling any interrupts that refer to the closing queues and
4455 4551 * also ensuring that there are no interrupts in progress that will
4456 4552 * refer to the closing queues once the close routine returns.
4457 4553 * - For multiplexors removing any driver global state that refers to
4458 4554 * the closing queue and also ensuring that there are no threads in
4459 4555 * the multiplexor that has picked up a queue pointer but not yet
4460 4556 * finished using it.
4461 4557 *
4462 4558 * In addition, a driver/module can only reference the q_next pointer
4463 4559 * in its open, close, put, or service procedures or in a
4464 4560 * qtimeout/qbufcall callback procedure executing "on" the correct
4465 4561 * stream. Thus it can not reference the q_next pointer in an interrupt
4466 4562 * routine or a timeout, bufcall or esballoc callback routine. Likewise
4467 4563 * it can not reference q_next of a different queue e.g. in a mux that
4468 4564 * passes messages from one queues put/service procedure to another queue.
4469 4565 * In all the cases when the driver/module can not access the q_next
4470 4566 * field it must use the *next* versions e.g. canputnext instead of
4471 4567 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
4472 4568 *
4473 4569 *
4474 4570 * Assuming that the driver/module conforms to the above constraints
4475 4571 * the STREAMS framework has to avoid stale references to q_next for all
4476 4572 * the framework internal cases which include (but are not limited to):
4477 4573 * - Threads in canput/canputnext/backenable and elsewhere that are
4478 4574 * walking q_next.
4479 4575 * - Messages on a syncq that have a reference to the queue through b_queue.
4480 4576 * - Messages on an outer perimeter (syncq) that have a reference to the
4481 4577 * queue through b_queue.
4482 4578 * - Threads that use q_nfsrv (e.g. canput) to find a queue.
4483 4579 * Note that only canput and bcanput use q_nfsrv without any locking.
4484 4580 *
4485 4581 * The STREAMS framework providing the qprocsoff(9F) guarantees means that
4486 4582 * after qprocsoff returns, the framework has to ensure that no threads can
4487 4583 * enter the put or service routines for the closing read or write-side queue.
4488 4584 * In addition to preventing "direct" entry into the put procedures
4489 4585 * the framework also has to prevent messages being drained from
4490 4586 * the syncq or the outer perimeter.
4491 4587 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
4492 4588 * mechanism to prevent qwriter(PERIM_OUTER) from running after
4493 4589 * qprocsoff has returned.
4494 4590 * Note that if a module/driver uses put(9F) on one of its own queues
4495 4591 * it is up to the module/driver to ensure that the put() doesn't
4496 4592 * get called when the queue is closing.
4497 4593 *
4498 4594 *
4499 4595 * The framework aspects of the above "contract" is implemented by
4500 4596 * qprocsoff, removeq, and strlock:
4501 4597 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
4502 4598 * entering the service procedures.
4503 4599 * - strlock acquires the sd_lock and sd_reflock to prevent putnext,
4504 4600 * canputnext, backenable etc from dereferencing the q_next that will
4505 4601 * soon change.
4506 4602 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
4507 4603 * or other q_next walker that uses claimstr/releasestr to finish.
4508 4604 * - optionally for every syncq in the stream strlock acquires all the
4509 4605 * sq_lock's and waits for all sq_counts to drop to a value that indicates
4510 4606 * that no thread executes in the put or service procedures and that no
4511 4607 * thread is draining into the module/driver. This ensures that no
4512 4608 * open, close, put, service, or qtimeout/qbufcall callback procedure is
4513 4609 * currently executing hence no such thread can end up with the old stale
4514 4610 * q_next value and no canput/backenable can have the old stale
4515 4611 * q_nfsrv/q_next.
4516 4612 * - qdetach (wait_svc) makes sure that any scheduled or running threads
4517 4613 * have either finished or observed the QWCLOSE flag and gone away.
4518 4614 */
4519 4615
4520 4616
4521 4617 /*
4522 4618 * Get all the locks necessary to change q_next.
4523 4619 *
4524 4620 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the
4525 4621 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
4526 4622 * the only threads inside the syncq are threads currently calling removeq().
4527 4623 * Since threads calling removeq() are in the process of removing their queues
4528 4624 * from the stream, we do not need to worry about them accessing a stale q_next
4529 4625 * pointer and thus we do not need to wait for them to exit (in fact, waiting
4530 4626 * for them can cause deadlock).
4531 4627 *
4532 4628 * This routine is subject to starvation since it does not set any flag to
4533 4629 * prevent threads from entering a module in the stream (i.e. sq_count can
4534 4630 * increase on some syncq while it is waiting on some other syncq).
4535 4631 *
4536 4632 * Assumes that only one thread attempts to call strlock for a given
4537 4633 * stream. If this is not the case the two threads would deadlock.
4538 4634 * This assumption is guaranteed since strlock is only called by insertq
4539 4635 * and removeq and streams plumbing changes are single-threaded for
4540 4636 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
4541 4637 *
4542 4638 * For pipes, it is not difficult to atomically designate a pair of streams
4543 4639 * to be mated. Once mated atomically by the framework the twisted pair remain
4544 4640 * configured that way until dismantled atomically by the framework.
4545 4641 * When plumbing takes place on a twisted stream it is necessary to ensure that
4546 4642 * this operation is done exclusively on the twisted stream since two such
4547 4643 * operations, each initiated on different ends of the pipe will deadlock
4548 4644 * waiting for each other to complete.
4549 4645 *
4550 4646 * On entry, no locks should be held.
4551 4647 * The locks acquired and held by strlock depends on a few factors.
4552 4648 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
4553 4649 * and held on exit and all sq_count are at an acceptable level.
4554 4650 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with
4555 4651 * sd_refcnt being zero.
4556 4652 */
4557 4653
4558 4654 static void
4559 4655 strlock(struct stdata *stp, sqlist_t *sqlist)
4560 4656 {
4561 4657 syncql_t *sql, *sql2;
4562 4658 retry:
4563 4659 /*
4564 4660 * Wait for any claimstr to go away.
4565 4661 */
4566 4662 if (STRMATED(stp)) {
4567 4663 struct stdata *stp1, *stp2;
4568 4664
4569 4665 STRLOCKMATES(stp);
4570 4666 /*
4571 4667 * Note that the selection of locking order is not
4572 4668 * important, just that they are always acquired in
4573 4669 * the same order. To assure this, we choose this
4574 4670 * order based on the value of the pointer, and since
4575 4671 * the pointer will not change for the life of this
4576 4672 * pair, we will always grab the locks in the same
4577 4673 * order (and hence, prevent deadlocks).
4578 4674 */
4579 4675 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) {
4580 4676 stp1 = stp;
4581 4677 stp2 = stp->sd_mate;
4582 4678 } else {
4583 4679 stp2 = stp;
4584 4680 stp1 = stp->sd_mate;
4585 4681 }
4586 4682 mutex_enter(&stp1->sd_reflock);
4587 4683 if (stp1->sd_refcnt > 0) {
4588 4684 STRUNLOCKMATES(stp);
4589 4685 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock);
4590 4686 mutex_exit(&stp1->sd_reflock);
4591 4687 goto retry;
4592 4688 }
4593 4689 mutex_enter(&stp2->sd_reflock);
4594 4690 if (stp2->sd_refcnt > 0) {
4595 4691 STRUNLOCKMATES(stp);
4596 4692 mutex_exit(&stp1->sd_reflock);
4597 4693 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock);
4598 4694 mutex_exit(&stp2->sd_reflock);
4599 4695 goto retry;
4600 4696 }
4601 4697 STREAM_PUTLOCKS_ENTER(stp1);
4602 4698 STREAM_PUTLOCKS_ENTER(stp2);
4603 4699 } else {
4604 4700 mutex_enter(&stp->sd_lock);
4605 4701 mutex_enter(&stp->sd_reflock);
4606 4702 while (stp->sd_refcnt > 0) {
4607 4703 mutex_exit(&stp->sd_lock);
4608 4704 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock);
4609 4705 if (mutex_tryenter(&stp->sd_lock) == 0) {
4610 4706 mutex_exit(&stp->sd_reflock);
4611 4707 mutex_enter(&stp->sd_lock);
4612 4708 mutex_enter(&stp->sd_reflock);
4613 4709 }
4614 4710 }
4615 4711 STREAM_PUTLOCKS_ENTER(stp);
4616 4712 }
4617 4713
4618 4714 if (sqlist == NULL)
4619 4715 return;
4620 4716
4621 4717 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4622 4718 syncq_t *sq = sql->sql_sq;
4623 4719 uint16_t count;
4624 4720
4625 4721 mutex_enter(SQLOCK(sq));
4626 4722 count = sq->sq_count;
4627 4723 ASSERT(sq->sq_rmqcount <= count);
4628 4724 SQ_PUTLOCKS_ENTER(sq);
4629 4725 SUM_SQ_PUTCOUNTS(sq, count);
4630 4726 if (count == sq->sq_rmqcount)
4631 4727 continue;
4632 4728
4633 4729 /* Failed - drop all locks that we have acquired so far */
4634 4730 if (STRMATED(stp)) {
4635 4731 STREAM_PUTLOCKS_EXIT(stp);
4636 4732 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4637 4733 STRUNLOCKMATES(stp);
4638 4734 mutex_exit(&stp->sd_reflock);
4639 4735 mutex_exit(&stp->sd_mate->sd_reflock);
4640 4736 } else {
4641 4737 STREAM_PUTLOCKS_EXIT(stp);
4642 4738 mutex_exit(&stp->sd_lock);
4643 4739 mutex_exit(&stp->sd_reflock);
4644 4740 }
4645 4741 for (sql2 = sqlist->sqlist_head; sql2 != sql;
4646 4742 sql2 = sql2->sql_next) {
4647 4743 SQ_PUTLOCKS_EXIT(sql2->sql_sq);
4648 4744 mutex_exit(SQLOCK(sql2->sql_sq));
4649 4745 }
4650 4746
4651 4747 /*
4652 4748 * The wait loop below may starve when there are many threads
4653 4749 * claiming the syncq. This is especially a problem with permod
4654 4750 * syncqs (IP). To lessen the impact of the problem we increment
4655 4751 * sq_needexcl and clear fastbits so that putnexts will slow
4656 4752 * down and call sqenable instead of draining right away.
4657 4753 */
4658 4754 sq->sq_needexcl++;
4659 4755 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
4660 4756 while (count > sq->sq_rmqcount) {
4661 4757 sq->sq_flags |= SQ_WANTWAKEUP;
4662 4758 SQ_PUTLOCKS_EXIT(sq);
4663 4759 cv_wait(&sq->sq_wait, SQLOCK(sq));
4664 4760 count = sq->sq_count;
4665 4761 SQ_PUTLOCKS_ENTER(sq);
4666 4762 SUM_SQ_PUTCOUNTS(sq, count);
4667 4763 }
4668 4764 sq->sq_needexcl--;
4669 4765 if (sq->sq_needexcl == 0)
4670 4766 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
4671 4767 SQ_PUTLOCKS_EXIT(sq);
4672 4768 ASSERT(count == sq->sq_rmqcount);
4673 4769 mutex_exit(SQLOCK(sq));
4674 4770 goto retry;
4675 4771 }
4676 4772 }
4677 4773
4678 4774 /*
4679 4775 * Drop all the locks that strlock acquired.
4680 4776 */
4681 4777 static void
4682 4778 strunlock(struct stdata *stp, sqlist_t *sqlist)
4683 4779 {
4684 4780 syncql_t *sql;
4685 4781
4686 4782 if (STRMATED(stp)) {
4687 4783 STREAM_PUTLOCKS_EXIT(stp);
4688 4784 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4689 4785 STRUNLOCKMATES(stp);
4690 4786 mutex_exit(&stp->sd_reflock);
4691 4787 mutex_exit(&stp->sd_mate->sd_reflock);
4692 4788 } else {
4693 4789 STREAM_PUTLOCKS_EXIT(stp);
4694 4790 mutex_exit(&stp->sd_lock);
4695 4791 mutex_exit(&stp->sd_reflock);
4696 4792 }
4697 4793
4698 4794 if (sqlist == NULL)
4699 4795 return;
4700 4796
4701 4797 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4702 4798 SQ_PUTLOCKS_EXIT(sql->sql_sq);
4703 4799 mutex_exit(SQLOCK(sql->sql_sq));
4704 4800 }
4705 4801 }
4706 4802
4707 4803 /*
4708 4804 * When the module has service procedure, we need check if the next
4709 4805 * module which has service procedure is in flow control to trigger
4710 4806 * the backenable.
4711 4807 */
4712 4808 static void
4713 4809 backenable_insertedq(queue_t *q)
4714 4810 {
4715 4811 qband_t *qbp;
4716 4812
4717 4813 claimstr(q);
4718 4814 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) {
4719 4815 if (q->q_next->q_nfsrv->q_flag & QWANTW)
4720 4816 backenable(q, 0);
4721 4817
4722 4818 qbp = q->q_next->q_nfsrv->q_bandp;
4723 4819 for (; qbp != NULL; qbp = qbp->qb_next)
4724 4820 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL)
4725 4821 backenable(q, qbp->qb_first->b_band);
4726 4822 }
4727 4823 releasestr(q);
4728 4824 }
4729 4825
4730 4826 /*
4731 4827 * Given two read queues, insert a new single one after another.
4732 4828 *
4733 4829 * This routine acquires all the necessary locks in order to change
4734 4830 * q_next and related pointer using strlock().
4735 4831 * It depends on the stream head ensuring that there are no concurrent
4736 4832 * insertq or removeq on the same stream. The stream head ensures this
4737 4833 * using the flags STWOPEN, STRCLOSE, and STRPLUMB.
4738 4834 *
4739 4835 * Note that no syncq locks are held during the q_next change. This is
4740 4836 * applied to all streams since, unlike removeq, there is no problem of stale
4741 4837 * pointers when adding a module to the stream. Thus drivers/modules that do a
4742 4838 * canput(rq->q_next) would never get a closed/freed queue pointer even if we
4743 4839 * applied this optimization to all streams.
4744 4840 */
4745 4841 void
4746 4842 insertq(struct stdata *stp, queue_t *new)
4747 4843 {
4748 4844 queue_t *after;
4749 4845 queue_t *wafter;
4750 4846 queue_t *wnew = _WR(new);
4751 4847 boolean_t have_fifo = B_FALSE;
4752 4848
4753 4849 if (new->q_flag & _QINSERTING) {
4754 4850 ASSERT(stp->sd_vnode->v_type != VFIFO);
4755 4851 after = new->q_next;
4756 4852 wafter = _WR(new->q_next);
4757 4853 } else {
4758 4854 after = _RD(stp->sd_wrq);
4759 4855 wafter = stp->sd_wrq;
4760 4856 }
4761 4857
4762 4858 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ,
4763 4859 "insertq:%p, %p", after, new);
4764 4860 ASSERT(after->q_flag & QREADR);
4765 4861 ASSERT(new->q_flag & QREADR);
4766 4862
4767 4863 strlock(stp, NULL);
4768 4864
4769 4865 /* Do we have a FIFO? */
4770 4866 if (wafter->q_next == after) {
4771 4867 have_fifo = B_TRUE;
4772 4868 wnew->q_next = new;
4773 4869 } else {
4774 4870 wnew->q_next = wafter->q_next;
4775 4871 }
4776 4872 new->q_next = after;
4777 4873
4778 4874 set_nfsrv_ptr(new, wnew, after, wafter);
4779 4875 /*
4780 4876 * set_nfsrv_ptr() needs to know if this is an insertion or not,
4781 4877 * so only reset this flag after calling it.
4782 4878 */
4783 4879 new->q_flag &= ~_QINSERTING;
4784 4880
4785 4881 if (have_fifo) {
4786 4882 wafter->q_next = wnew;
4787 4883 } else {
4788 4884 if (wafter->q_next)
4789 4885 _OTHERQ(wafter->q_next)->q_next = new;
4790 4886 wafter->q_next = wnew;
4791 4887 }
4792 4888
4793 4889 set_qend(new);
4794 4890 /* The QEND flag might have to be updated for the upstream guy */
4795 4891 set_qend(after);
4796 4892
4797 4893 ASSERT(_SAMESTR(new) == O_SAMESTR(new));
4798 4894 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew));
4799 4895 ASSERT(_SAMESTR(after) == O_SAMESTR(after));
4800 4896 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter));
4801 4897 strsetuio(stp);
4802 4898
4803 4899 /*
4804 4900 * If this was a module insertion, bump the push count.
4805 4901 */
4806 4902 if (!(new->q_flag & QISDRV))
4807 4903 stp->sd_pushcnt++;
4808 4904
4809 4905 strunlock(stp, NULL);
4810 4906
4811 4907 /* check if the write Q needs backenable */
4812 4908 backenable_insertedq(wnew);
4813 4909
4814 4910 /* check if the read Q needs backenable */
4815 4911 backenable_insertedq(new);
4816 4912 }
4817 4913
4818 4914 /*
4819 4915 * Given a read queue, unlink it from any neighbors.
4820 4916 *
4821 4917 * This routine acquires all the necessary locks in order to
4822 4918 * change q_next and related pointers and also guard against
4823 4919 * stale references (e.g. through q_next) to the queue that
4824 4920 * is being removed. It also plays part of the role in ensuring
4825 4921 * that the module's/driver's put procedure doesn't get called
4826 4922 * after qprocsoff returns.
4827 4923 *
4828 4924 * Removeq depends on the stream head ensuring that there are
4829 4925 * no concurrent insertq or removeq on the same stream. The
4830 4926 * stream head ensures this using the flags STWOPEN, STRCLOSE and
4831 4927 * STRPLUMB.
4832 4928 *
4833 4929 * The set of locks needed to remove the queue is different in
4834 4930 * different cases:
4835 4931 *
4836 4932 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
4837 4933 * waiting for the syncq reference count to drop to 0 indicating that no
4838 4934 * non-close threads are present anywhere in the stream. This ensures that any
4839 4935 * module/driver can reference q_next in its open, close, put, or service
4840 4936 * procedures.
4841 4937 *
4842 4938 * The sq_rmqcount counter tracks the number of threads inside removeq().
4843 4939 * strlock() ensures that there is either no threads executing inside perimeter
4844 4940 * or there is only a thread calling qprocsoff().
4845 4941 *
4846 4942 * strlock() compares the value of sq_count with the number of threads inside
4847 4943 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
4848 4944 * any threads waiting in strlock() when the sq_rmqcount increases.
4849 4945 */
4850 4946
4851 4947 void
4852 4948 removeq(queue_t *qp)
4853 4949 {
4854 4950 queue_t *wqp = _WR(qp);
4855 4951 struct stdata *stp = STREAM(qp);
4856 4952 sqlist_t *sqlist = NULL;
4857 4953 boolean_t isdriver;
4858 4954 int moved;
4859 4955 syncq_t *sq = qp->q_syncq;
4860 4956 syncq_t *wsq = wqp->q_syncq;
4861 4957
4862 4958 ASSERT(stp);
4863 4959
4864 4960 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ,
4865 4961 "removeq:%p %p", qp, wqp);
4866 4962 ASSERT(qp->q_flag&QREADR);
4867 4963
4868 4964 /*
4869 4965 * For queues using Synchronous streams, we must wait for all threads in
4870 4966 * rwnext() to drain out before proceeding.
4871 4967 */
4872 4968 if (qp->q_flag & QSYNCSTR) {
4873 4969 /* First, we need wakeup any threads blocked in rwnext() */
4874 4970 mutex_enter(SQLOCK(sq));
4875 4971 if (sq->sq_flags & SQ_WANTWAKEUP) {
4876 4972 sq->sq_flags &= ~SQ_WANTWAKEUP;
4877 4973 cv_broadcast(&sq->sq_wait);
4878 4974 }
4879 4975 mutex_exit(SQLOCK(sq));
4880 4976
4881 4977 if (wsq != sq) {
4882 4978 mutex_enter(SQLOCK(wsq));
4883 4979 if (wsq->sq_flags & SQ_WANTWAKEUP) {
4884 4980 wsq->sq_flags &= ~SQ_WANTWAKEUP;
4885 4981 cv_broadcast(&wsq->sq_wait);
4886 4982 }
4887 4983 mutex_exit(SQLOCK(wsq));
4888 4984 }
4889 4985
4890 4986 mutex_enter(QLOCK(qp));
4891 4987 while (qp->q_rwcnt > 0) {
4892 4988 qp->q_flag |= QWANTRMQSYNC;
4893 4989 cv_wait(&qp->q_wait, QLOCK(qp));
4894 4990 }
4895 4991 mutex_exit(QLOCK(qp));
4896 4992
4897 4993 mutex_enter(QLOCK(wqp));
4898 4994 while (wqp->q_rwcnt > 0) {
4899 4995 wqp->q_flag |= QWANTRMQSYNC;
4900 4996 cv_wait(&wqp->q_wait, QLOCK(wqp));
4901 4997 }
4902 4998 mutex_exit(QLOCK(wqp));
4903 4999 }
4904 5000
4905 5001 mutex_enter(SQLOCK(sq));
4906 5002 sq->sq_rmqcount++;
4907 5003 if (sq->sq_flags & SQ_WANTWAKEUP) {
4908 5004 sq->sq_flags &= ~SQ_WANTWAKEUP;
4909 5005 cv_broadcast(&sq->sq_wait);
4910 5006 }
4911 5007 mutex_exit(SQLOCK(sq));
4912 5008
4913 5009 isdriver = (qp->q_flag & QISDRV);
4914 5010
4915 5011 sqlist = sqlist_build(qp, stp, STRMATED(stp));
4916 5012 strlock(stp, sqlist);
4917 5013
4918 5014 reset_nfsrv_ptr(qp, wqp);
4919 5015
4920 5016 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp);
4921 5017 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp);
4922 5018 /* Do we have a FIFO? */
4923 5019 if (wqp->q_next == qp) {
4924 5020 stp->sd_wrq->q_next = _RD(stp->sd_wrq);
4925 5021 } else {
4926 5022 if (wqp->q_next)
4927 5023 backq(qp)->q_next = qp->q_next;
4928 5024 if (qp->q_next)
4929 5025 backq(wqp)->q_next = wqp->q_next;
4930 5026 }
4931 5027
4932 5028 /* The QEND flag might have to be updated for the upstream guy */
4933 5029 if (qp->q_next)
4934 5030 set_qend(qp->q_next);
4935 5031
4936 5032 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq));
4937 5033 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq)));
4938 5034
4939 5035 /*
4940 5036 * Move any messages destined for the put procedures to the next
4941 5037 * syncq in line. Otherwise free them.
4942 5038 */
4943 5039 moved = 0;
4944 5040 /*
4945 5041 * Quick check to see whether there are any messages or events.
4946 5042 */
4947 5043 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS))
4948 5044 moved += propagate_syncq(qp);
4949 5045 if (wqp->q_syncqmsgs != 0 ||
4950 5046 (wqp->q_syncq->sq_flags & SQ_EVENTS))
4951 5047 moved += propagate_syncq(wqp);
4952 5048
4953 5049 strsetuio(stp);
4954 5050
4955 5051 /*
4956 5052 * If this was a module removal, decrement the push count.
4957 5053 */
4958 5054 if (!isdriver)
4959 5055 stp->sd_pushcnt--;
4960 5056
4961 5057 strunlock(stp, sqlist);
4962 5058 sqlist_free(sqlist);
4963 5059
4964 5060 /*
4965 5061 * Make sure any messages that were propagated are drained.
4966 5062 * Also clear any QFULL bit caused by messages that were propagated.
4967 5063 */
4968 5064
4969 5065 if (qp->q_next != NULL) {
4970 5066 clr_qfull(qp);
4971 5067 /*
4972 5068 * For the driver calling qprocsoff, propagate_syncq
4973 5069 * frees all the messages instead of putting it in
4974 5070 * the stream head
4975 5071 */
4976 5072 if (!isdriver && (moved > 0))
4977 5073 emptysq(qp->q_next->q_syncq);
4978 5074 }
4979 5075 if (wqp->q_next != NULL) {
4980 5076 clr_qfull(wqp);
4981 5077 /*
4982 5078 * We come here for any pop of a module except for the
4983 5079 * case of driver being removed. We don't call emptysq
4984 5080 * if we did not move any messages. This will avoid holding
4985 5081 * PERMOD syncq locks in emptysq
4986 5082 */
4987 5083 if (moved > 0)
4988 5084 emptysq(wqp->q_next->q_syncq);
4989 5085 }
4990 5086
4991 5087 mutex_enter(SQLOCK(sq));
4992 5088 sq->sq_rmqcount--;
4993 5089 mutex_exit(SQLOCK(sq));
4994 5090 }
4995 5091
4996 5092 /*
4997 5093 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
4998 5094 * SQ_WRITER) on a syncq.
4999 5095 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
5000 5096 * sync queue and waits until sq_count reaches maxcnt.
5001 5097 *
5002 5098 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller
5003 5099 * does not care about putnext threads that are in the middle of calling put
5004 5100 * entry points.
5005 5101 *
5006 5102 * This routine is used for both inner and outer syncqs.
5007 5103 */
5008 5104 static void
5009 5105 blocksq(syncq_t *sq, ushort_t flag, int maxcnt)
5010 5106 {
5011 5107 uint16_t count = 0;
5012 5108
5013 5109 mutex_enter(SQLOCK(sq));
5014 5110 /*
5015 5111 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
5016 5112 * SQ_FROZEN will be set if there is a frozen stream that has a
5017 5113 * queue which also refers to this "shared" syncq.
5018 5114 * SQ_BLOCKED will be set if there is "off" queue which also
5019 5115 * refers to this "shared" syncq.
5020 5116 */
5021 5117 if (maxcnt != -1) {
5022 5118 count = sq->sq_count;
5023 5119 SQ_PUTLOCKS_ENTER(sq);
5024 5120 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5025 5121 SUM_SQ_PUTCOUNTS(sq, count);
5026 5122 }
5027 5123 sq->sq_needexcl++;
5028 5124 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5029 5125
5030 5126 while ((sq->sq_flags & flag) ||
5031 5127 (maxcnt != -1 && count > (unsigned)maxcnt)) {
5032 5128 sq->sq_flags |= SQ_WANTWAKEUP;
5033 5129 if (maxcnt != -1) {
5034 5130 SQ_PUTLOCKS_EXIT(sq);
5035 5131 }
5036 5132 cv_wait(&sq->sq_wait, SQLOCK(sq));
5037 5133 if (maxcnt != -1) {
5038 5134 count = sq->sq_count;
5039 5135 SQ_PUTLOCKS_ENTER(sq);
5040 5136 SUM_SQ_PUTCOUNTS(sq, count);
5041 5137 }
5042 5138 }
5043 5139 sq->sq_needexcl--;
5044 5140 sq->sq_flags |= flag;
5045 5141 ASSERT(maxcnt == -1 || count == maxcnt);
5046 5142 if (maxcnt != -1) {
5047 5143 if (sq->sq_needexcl == 0) {
5048 5144 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5049 5145 }
5050 5146 SQ_PUTLOCKS_EXIT(sq);
5051 5147 } else if (sq->sq_needexcl == 0) {
5052 5148 SQ_PUTCOUNT_SETFAST(sq);
5053 5149 }
5054 5150
5055 5151 mutex_exit(SQLOCK(sq));
5056 5152 }
5057 5153
5058 5154 /*
5059 5155 * Reset a flag that was set with blocksq.
5060 5156 *
5061 5157 * Can not use this routine to reset SQ_WRITER.
5062 5158 *
5063 5159 * If "isouter" is set then the syncq is assumed to be an outer perimeter
5064 5160 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread
5065 5161 * to handle the queued qwriter operations.
5066 5162 *
5067 5163 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5068 5164 * sq_putlocks are used.
5069 5165 */
5070 5166 static void
5071 5167 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter)
5072 5168 {
5073 5169 uint16_t flags;
5074 5170
5075 5171 mutex_enter(SQLOCK(sq));
5076 5172 ASSERT(resetflag != SQ_WRITER);
5077 5173 ASSERT(sq->sq_flags & resetflag);
5078 5174 flags = sq->sq_flags & ~resetflag;
5079 5175 sq->sq_flags = flags;
5080 5176 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) {
5081 5177 if (flags & SQ_WANTWAKEUP) {
5082 5178 flags &= ~SQ_WANTWAKEUP;
5083 5179 cv_broadcast(&sq->sq_wait);
5084 5180 }
5085 5181 sq->sq_flags = flags;
5086 5182 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5087 5183 if (!isouter) {
5088 5184 /* drain_syncq drops SQLOCK */
5089 5185 drain_syncq(sq);
5090 5186 return;
5091 5187 }
5092 5188 }
5093 5189 }
5094 5190 mutex_exit(SQLOCK(sq));
5095 5191 }
5096 5192
5097 5193 /*
5098 5194 * Reset a flag that was set with blocksq.
5099 5195 * Does not drain the syncq. Use emptysq() for that.
5100 5196 * Returns 1 if SQ_QUEUED is set. Otherwise 0.
5101 5197 *
5102 5198 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5103 5199 * sq_putlocks are used.
5104 5200 */
5105 5201 static int
5106 5202 dropsq(syncq_t *sq, uint16_t resetflag)
5107 5203 {
5108 5204 uint16_t flags;
5109 5205
5110 5206 mutex_enter(SQLOCK(sq));
5111 5207 ASSERT(sq->sq_flags & resetflag);
5112 5208 flags = sq->sq_flags & ~resetflag;
5113 5209 if (flags & SQ_WANTWAKEUP) {
5114 5210 flags &= ~SQ_WANTWAKEUP;
5115 5211 cv_broadcast(&sq->sq_wait);
5116 5212 }
5117 5213 sq->sq_flags = flags;
5118 5214 mutex_exit(SQLOCK(sq));
5119 5215 if (flags & SQ_QUEUED)
5120 5216 return (1);
5121 5217 return (0);
5122 5218 }
5123 5219
5124 5220 /*
5125 5221 * Empty all the messages on a syncq.
5126 5222 *
5127 5223 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5128 5224 * sq_putlocks are used.
5129 5225 */
5130 5226 static void
5131 5227 emptysq(syncq_t *sq)
5132 5228 {
5133 5229 uint16_t flags;
5134 5230
5135 5231 mutex_enter(SQLOCK(sq));
5136 5232 flags = sq->sq_flags;
5137 5233 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5138 5234 /*
5139 5235 * To prevent potential recursive invocation of drain_syncq we
5140 5236 * do not call drain_syncq if count is non-zero.
5141 5237 */
5142 5238 if (sq->sq_count == 0) {
5143 5239 /* drain_syncq() drops SQLOCK */
5144 5240 drain_syncq(sq);
5145 5241 return;
5146 5242 } else
5147 5243 sqenable(sq);
5148 5244 }
5149 5245 mutex_exit(SQLOCK(sq));
5150 5246 }
5151 5247
5152 5248 /*
5153 5249 * Ordered insert while removing duplicates.
5154 5250 */
5155 5251 static void
5156 5252 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp)
5157 5253 {
5158 5254 syncql_t *sqlp, **prev_sqlpp, *new_sqlp;
5159 5255
5160 5256 prev_sqlpp = &sqlist->sqlist_head;
5161 5257 while ((sqlp = *prev_sqlpp) != NULL) {
5162 5258 if (sqlp->sql_sq >= sqp) {
5163 5259 if (sqlp->sql_sq == sqp) /* duplicate */
5164 5260 return;
5165 5261 break;
5166 5262 }
5167 5263 prev_sqlpp = &sqlp->sql_next;
5168 5264 }
5169 5265 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++];
5170 5266 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size);
5171 5267 new_sqlp->sql_next = sqlp;
5172 5268 new_sqlp->sql_sq = sqp;
5173 5269 *prev_sqlpp = new_sqlp;
5174 5270 }
5175 5271
5176 5272 /*
5177 5273 * Walk the write side queues until we hit either the driver
5178 5274 * or a twist in the stream (_SAMESTR will return false in both
5179 5275 * these cases) then turn around and walk the read side queues
5180 5276 * back up to the stream head.
5181 5277 */
5182 5278 static void
5183 5279 sqlist_insertall(sqlist_t *sqlist, queue_t *q)
5184 5280 {
5185 5281 while (q != NULL) {
5186 5282 sqlist_insert(sqlist, q->q_syncq);
5187 5283
5188 5284 if (_SAMESTR(q))
5189 5285 q = q->q_next;
5190 5286 else if (!(q->q_flag & QREADR))
5191 5287 q = _RD(q);
5192 5288 else
5193 5289 q = NULL;
5194 5290 }
5195 5291 }
5196 5292
5197 5293 /*
5198 5294 * Allocate and build a list of all syncqs in a stream and the syncq(s)
5199 5295 * associated with the "q" parameter. The resulting list is sorted in a
5200 5296 * canonical order and is free of duplicates.
5201 5297 * Assumes the passed queue is a _RD(q).
5202 5298 */
5203 5299 static sqlist_t *
5204 5300 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist)
5205 5301 {
5206 5302 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP);
5207 5303
5208 5304 /*
5209 5305 * start with the current queue/qpair
5210 5306 */
5211 5307 ASSERT(q->q_flag & QREADR);
5212 5308
5213 5309 sqlist_insert(sqlist, q->q_syncq);
5214 5310 sqlist_insert(sqlist, _WR(q)->q_syncq);
5215 5311
5216 5312 sqlist_insertall(sqlist, stp->sd_wrq);
5217 5313 if (do_twist)
5218 5314 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq);
5219 5315
5220 5316 return (sqlist);
5221 5317 }
5222 5318
5223 5319 static sqlist_t *
5224 5320 sqlist_alloc(struct stdata *stp, int kmflag)
5225 5321 {
5226 5322 size_t sqlist_size;
5227 5323 sqlist_t *sqlist;
5228 5324
5229 5325 /*
5230 5326 * Allocate 2 syncql_t's for each pushed module. Note that
5231 5327 * the sqlist_t structure already has 4 syncql_t's built in:
5232 5328 * 2 for the stream head, and 2 for the driver/other stream head.
5233 5329 */
5234 5330 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt +
5235 5331 sizeof (sqlist_t);
5236 5332 if (STRMATED(stp))
5237 5333 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt;
5238 5334 sqlist = kmem_alloc(sqlist_size, kmflag);
5239 5335
5240 5336 sqlist->sqlist_head = NULL;
5241 5337 sqlist->sqlist_size = sqlist_size;
5242 5338 sqlist->sqlist_index = 0;
5243 5339
5244 5340 return (sqlist);
5245 5341 }
5246 5342
5247 5343 /*
5248 5344 * Free the list created by sqlist_alloc()
5249 5345 */
5250 5346 static void
5251 5347 sqlist_free(sqlist_t *sqlist)
5252 5348 {
5253 5349 kmem_free(sqlist, sqlist->sqlist_size);
5254 5350 }
5255 5351
5256 5352 /*
5257 5353 * Prevent any new entries into any syncq in this stream.
5258 5354 * Used by freezestr.
5259 5355 */
5260 5356 void
5261 5357 strblock(queue_t *q)
5262 5358 {
5263 5359 struct stdata *stp;
5264 5360 syncql_t *sql;
5265 5361 sqlist_t *sqlist;
5266 5362
5267 5363 q = _RD(q);
5268 5364
5269 5365 stp = STREAM(q);
5270 5366 ASSERT(stp != NULL);
5271 5367
5272 5368 /*
5273 5369 * Get a sorted list with all the duplicates removed containing
5274 5370 * all the syncqs referenced by this stream.
5275 5371 */
5276 5372 sqlist = sqlist_build(q, stp, B_FALSE);
5277 5373 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5278 5374 blocksq(sql->sql_sq, SQ_FROZEN, -1);
5279 5375 sqlist_free(sqlist);
5280 5376 }
5281 5377
5282 5378 /*
5283 5379 * Release the block on new entries into this stream
5284 5380 */
5285 5381 void
5286 5382 strunblock(queue_t *q)
5287 5383 {
5288 5384 struct stdata *stp;
5289 5385 syncql_t *sql;
5290 5386 sqlist_t *sqlist;
5291 5387 int drain_needed;
5292 5388
5293 5389 q = _RD(q);
5294 5390
5295 5391 /*
5296 5392 * Get a sorted list with all the duplicates removed containing
5297 5393 * all the syncqs referenced by this stream.
5298 5394 * Have to drop the SQ_FROZEN flag on all the syncqs before
5299 5395 * starting to drain them; otherwise the draining might
5300 5396 * cause a freezestr in some module on the stream (which
5301 5397 * would deadlock).
5302 5398 */
5303 5399 stp = STREAM(q);
5304 5400 ASSERT(stp != NULL);
5305 5401 sqlist = sqlist_build(q, stp, B_FALSE);
5306 5402 drain_needed = 0;
5307 5403 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5308 5404 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN);
5309 5405 if (drain_needed) {
5310 5406 for (sql = sqlist->sqlist_head; sql != NULL;
5311 5407 sql = sql->sql_next)
5312 5408 emptysq(sql->sql_sq);
5313 5409 }
5314 5410 sqlist_free(sqlist);
5315 5411 }
5316 5412
5317 5413 #ifdef DEBUG
5318 5414 static int
5319 5415 qprocsareon(queue_t *rq)
5320 5416 {
5321 5417 if (rq->q_next == NULL)
5322 5418 return (0);
5323 5419 return (_WR(rq->q_next)->q_next == _WR(rq));
5324 5420 }
5325 5421
5326 5422 int
5327 5423 qclaimed(queue_t *q)
5328 5424 {
5329 5425 uint_t count;
5330 5426
5331 5427 count = q->q_syncq->sq_count;
5332 5428 SUM_SQ_PUTCOUNTS(q->q_syncq, count);
5333 5429 return (count != 0);
5334 5430 }
5335 5431
5336 5432 /*
5337 5433 * Check if anyone has frozen this stream with freezestr
5338 5434 */
5339 5435 int
5340 5436 frozenstr(queue_t *q)
5341 5437 {
5342 5438 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0);
5343 5439 }
5344 5440 #endif /* DEBUG */
5345 5441
5346 5442 /*
5347 5443 * Enter a queue.
5348 5444 * Obsoleted interface. Should not be used.
5349 5445 */
5350 5446 void
5351 5447 enterq(queue_t *q)
5352 5448 {
5353 5449 entersq(q->q_syncq, SQ_CALLBACK);
5354 5450 }
5355 5451
5356 5452 void
5357 5453 leaveq(queue_t *q)
5358 5454 {
5359 5455 leavesq(q->q_syncq, SQ_CALLBACK);
5360 5456 }
5361 5457
5362 5458 /*
5363 5459 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits
5364 5460 * to check.
5365 5461 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
5366 5462 * calls and the running of open, close and service procedures.
5367 5463 *
5368 5464 * If c_inner bit is set no need to grab sq_putlocks since we don't care
5369 5465 * if other threads have entered or are entering put entry point.
5370 5466 *
5371 5467 * If c_inner bit is set it might have been possible to use
5372 5468 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
5373 5469 * open/close path for IP) but since the count may need to be decremented in
5374 5470 * qwait() we wouldn't know which counter to decrement. Currently counter is
5375 5471 * selected by current cpu_seqid and current CPU can change at any moment. XXX
5376 5472 * in the future we might use curthread id bits to select the counter and this
5377 5473 * would stay constant across routine calls.
5378 5474 */
5379 5475 void
5380 5476 entersq(syncq_t *sq, int entrypoint)
5381 5477 {
5382 5478 uint16_t count = 0;
5383 5479 uint16_t flags;
5384 5480 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
5385 5481 uint16_t type;
5386 5482 uint_t c_inner = entrypoint & SQ_CI;
5387 5483 uint_t c_outer = entrypoint & SQ_CO;
5388 5484
5389 5485 /*
5390 5486 * Increment ref count to keep closes out of this queue.
5391 5487 */
5392 5488 ASSERT(sq);
5393 5489 ASSERT(c_inner && c_outer);
5394 5490 mutex_enter(SQLOCK(sq));
5395 5491 flags = sq->sq_flags;
5396 5492 type = sq->sq_type;
5397 5493 if (!(type & c_inner)) {
5398 5494 /* Make sure all putcounts now use slowlock. */
5399 5495 count = sq->sq_count;
5400 5496 SQ_PUTLOCKS_ENTER(sq);
5401 5497 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5402 5498 SUM_SQ_PUTCOUNTS(sq, count);
5403 5499 sq->sq_needexcl++;
5404 5500 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5405 5501 waitflags |= SQ_MESSAGES;
5406 5502 }
5407 5503 /*
5408 5504 * Wait until we can enter the inner perimeter.
5409 5505 * If we want exclusive access we wait until sq_count is 0.
5410 5506 * We have to do this before entering the outer perimeter in order
5411 5507 * to preserve put/close message ordering.
5412 5508 */
5413 5509 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) {
5414 5510 sq->sq_flags = flags | SQ_WANTWAKEUP;
5415 5511 if (!(type & c_inner)) {
5416 5512 SQ_PUTLOCKS_EXIT(sq);
5417 5513 }
5418 5514 cv_wait(&sq->sq_wait, SQLOCK(sq));
5419 5515 if (!(type & c_inner)) {
5420 5516 count = sq->sq_count;
5421 5517 SQ_PUTLOCKS_ENTER(sq);
5422 5518 SUM_SQ_PUTCOUNTS(sq, count);
5423 5519 }
5424 5520 flags = sq->sq_flags;
5425 5521 }
5426 5522
5427 5523 if (!(type & c_inner)) {
5428 5524 ASSERT(sq->sq_needexcl > 0);
5429 5525 sq->sq_needexcl--;
5430 5526 if (sq->sq_needexcl == 0) {
5431 5527 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5432 5528 }
5433 5529 }
5434 5530
5435 5531 /* Check if we need to enter the outer perimeter */
5436 5532 if (!(type & c_outer)) {
5437 5533 /*
5438 5534 * We have to enter the outer perimeter exclusively before
5439 5535 * we can increment sq_count to avoid deadlock. This implies
5440 5536 * that we have to re-check sq_flags and sq_count.
5441 5537 *
5442 5538 * is it possible to have c_inner set when c_outer is not set?
5443 5539 */
5444 5540 if (!(type & c_inner)) {
5445 5541 SQ_PUTLOCKS_EXIT(sq);
5446 5542 }
5447 5543 mutex_exit(SQLOCK(sq));
5448 5544 outer_enter(sq->sq_outer, SQ_GOAWAY);
5449 5545 mutex_enter(SQLOCK(sq));
5450 5546 flags = sq->sq_flags;
5451 5547 /*
5452 5548 * there should be no need to recheck sq_putcounts
5453 5549 * because outer_enter() has already waited for them to clear
5454 5550 * after setting SQ_WRITER.
5455 5551 */
5456 5552 count = sq->sq_count;
5457 5553 #ifdef DEBUG
5458 5554 /*
5459 5555 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead
5460 5556 * of doing an ASSERT internally. Others should do
5461 5557 * something like
5462 5558 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
5463 5559 * without the need to #ifdef DEBUG it.
5464 5560 */
5465 5561 SUMCHECK_SQ_PUTCOUNTS(sq, 0);
5466 5562 #endif
5467 5563 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) ||
5468 5564 (!(type & c_inner) && count != 0)) {
5469 5565 sq->sq_flags = flags | SQ_WANTWAKEUP;
5470 5566 cv_wait(&sq->sq_wait, SQLOCK(sq));
5471 5567 count = sq->sq_count;
5472 5568 flags = sq->sq_flags;
5473 5569 }
5474 5570 }
5475 5571
5476 5572 sq->sq_count++;
5477 5573 ASSERT(sq->sq_count != 0); /* Wraparound */
5478 5574 if (!(type & c_inner)) {
5479 5575 /* Exclusive entry */
5480 5576 ASSERT(sq->sq_count == 1);
5481 5577 sq->sq_flags |= SQ_EXCL;
5482 5578 if (type & c_outer) {
5483 5579 SQ_PUTLOCKS_EXIT(sq);
5484 5580 }
5485 5581 }
5486 5582 mutex_exit(SQLOCK(sq));
5487 5583 }
5488 5584
5489 5585 /*
5490 5586 * Leave a syncq. Announce to framework that closes may proceed.
5491 5587 * c_inner and c_outer specify which concurrency bits to check.
5492 5588 *
5493 5589 * Must never be called from driver or module put entry point.
5494 5590 *
5495 5591 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5496 5592 * sq_putlocks are used.
5497 5593 */
5498 5594 void
5499 5595 leavesq(syncq_t *sq, int entrypoint)
5500 5596 {
5501 5597 uint16_t flags;
5502 5598 uint16_t type;
5503 5599 uint_t c_outer = entrypoint & SQ_CO;
5504 5600 #ifdef DEBUG
5505 5601 uint_t c_inner = entrypoint & SQ_CI;
5506 5602 #endif
5507 5603
5508 5604 /*
5509 5605 * Decrement ref count, drain the syncq if possible, and wake up
5510 5606 * any waiting close.
5511 5607 */
5512 5608 ASSERT(sq);
5513 5609 ASSERT(c_inner && c_outer);
5514 5610 mutex_enter(SQLOCK(sq));
5515 5611 flags = sq->sq_flags;
5516 5612 type = sq->sq_type;
5517 5613 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) {
5518 5614
5519 5615 if (flags & SQ_WANTWAKEUP) {
5520 5616 flags &= ~SQ_WANTWAKEUP;
5521 5617 cv_broadcast(&sq->sq_wait);
5522 5618 }
5523 5619 if (flags & SQ_WANTEXWAKEUP) {
5524 5620 flags &= ~SQ_WANTEXWAKEUP;
5525 5621 cv_broadcast(&sq->sq_exitwait);
5526 5622 }
5527 5623
5528 5624 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
5529 5625 /*
5530 5626 * The syncq needs to be drained. "Exit" the syncq
5531 5627 * before calling drain_syncq.
5532 5628 */
5533 5629 ASSERT(sq->sq_count != 0);
5534 5630 sq->sq_count--;
5535 5631 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5536 5632 sq->sq_flags = flags & ~SQ_EXCL;
5537 5633 drain_syncq(sq);
5538 5634 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5539 5635 /* Check if we need to exit the outer perimeter */
5540 5636 /* XXX will this ever be true? */
5541 5637 if (!(type & c_outer))
5542 5638 outer_exit(sq->sq_outer);
5543 5639 return;
5544 5640 }
5545 5641 }
5546 5642 ASSERT(sq->sq_count != 0);
5547 5643 sq->sq_count--;
5548 5644 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5549 5645 sq->sq_flags = flags & ~SQ_EXCL;
5550 5646 mutex_exit(SQLOCK(sq));
5551 5647
5552 5648 /* Check if we need to exit the outer perimeter */
5553 5649 if (!(sq->sq_type & c_outer))
5554 5650 outer_exit(sq->sq_outer);
5555 5651 }
5556 5652
5557 5653 /*
5558 5654 * Prevent q_next from changing in this stream by incrementing sq_count.
5559 5655 *
5560 5656 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5561 5657 * sq_putlocks are used.
5562 5658 */
5563 5659 void
5564 5660 claimq(queue_t *qp)
5565 5661 {
5566 5662 syncq_t *sq = qp->q_syncq;
5567 5663
5568 5664 mutex_enter(SQLOCK(sq));
5569 5665 sq->sq_count++;
5570 5666 ASSERT(sq->sq_count != 0); /* Wraparound */
5571 5667 mutex_exit(SQLOCK(sq));
5572 5668 }
5573 5669
5574 5670 /*
5575 5671 * Undo claimq.
5576 5672 *
5577 5673 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5578 5674 * sq_putlocks are used.
5579 5675 */
5580 5676 void
5581 5677 releaseq(queue_t *qp)
5582 5678 {
5583 5679 syncq_t *sq = qp->q_syncq;
5584 5680 uint16_t flags;
5585 5681
5586 5682 mutex_enter(SQLOCK(sq));
5587 5683 ASSERT(sq->sq_count > 0);
5588 5684 sq->sq_count--;
5589 5685
5590 5686 flags = sq->sq_flags;
5591 5687 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) {
5592 5688 if (flags & SQ_WANTWAKEUP) {
5593 5689 flags &= ~SQ_WANTWAKEUP;
5594 5690 cv_broadcast(&sq->sq_wait);
5595 5691 }
5596 5692 sq->sq_flags = flags;
5597 5693 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5598 5694 /*
5599 5695 * To prevent potential recursive invocation of
5600 5696 * drain_syncq we do not call drain_syncq if count is
5601 5697 * non-zero.
5602 5698 */
5603 5699 if (sq->sq_count == 0) {
5604 5700 drain_syncq(sq);
5605 5701 return;
5606 5702 } else
5607 5703 sqenable(sq);
5608 5704 }
5609 5705 }
5610 5706 mutex_exit(SQLOCK(sq));
5611 5707 }
5612 5708
5613 5709 /*
5614 5710 * Prevent q_next from changing in this stream by incrementing sd_refcnt.
5615 5711 */
5616 5712 void
5617 5713 claimstr(queue_t *qp)
5618 5714 {
5619 5715 struct stdata *stp = STREAM(qp);
5620 5716
5621 5717 mutex_enter(&stp->sd_reflock);
5622 5718 stp->sd_refcnt++;
5623 5719 ASSERT(stp->sd_refcnt != 0); /* Wraparound */
5624 5720 mutex_exit(&stp->sd_reflock);
5625 5721 }
5626 5722
5627 5723 /*
5628 5724 * Undo claimstr.
5629 5725 */
5630 5726 void
5631 5727 releasestr(queue_t *qp)
5632 5728 {
5633 5729 struct stdata *stp = STREAM(qp);
5634 5730
5635 5731 mutex_enter(&stp->sd_reflock);
5636 5732 ASSERT(stp->sd_refcnt != 0);
5637 5733 if (--stp->sd_refcnt == 0)
5638 5734 cv_broadcast(&stp->sd_refmonitor);
5639 5735 mutex_exit(&stp->sd_reflock);
5640 5736 }
5641 5737
5642 5738 static syncq_t *
5643 5739 new_syncq(void)
5644 5740 {
5645 5741 return (kmem_cache_alloc(syncq_cache, KM_SLEEP));
5646 5742 }
5647 5743
5648 5744 static void
5649 5745 free_syncq(syncq_t *sq)
5650 5746 {
5651 5747 ASSERT(sq->sq_head == NULL);
5652 5748 ASSERT(sq->sq_outer == NULL);
5653 5749 ASSERT(sq->sq_callbpend == NULL);
5654 5750 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) ||
5655 5751 (sq->sq_onext == sq && sq->sq_oprev == sq));
5656 5752
5657 5753 if (sq->sq_ciputctrl != NULL) {
5658 5754 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
5659 5755 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
5660 5756 sq->sq_nciputctrl, 0);
5661 5757 ASSERT(ciputctrl_cache != NULL);
5662 5758 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
5663 5759 }
5664 5760
5665 5761 sq->sq_tail = NULL;
5666 5762 sq->sq_evhead = NULL;
5667 5763 sq->sq_evtail = NULL;
5668 5764 sq->sq_ciputctrl = NULL;
5669 5765 sq->sq_nciputctrl = 0;
5670 5766 sq->sq_count = 0;
5671 5767 sq->sq_rmqcount = 0;
5672 5768 sq->sq_callbflags = 0;
5673 5769 sq->sq_cancelid = 0;
5674 5770 sq->sq_next = NULL;
5675 5771 sq->sq_needexcl = 0;
5676 5772 sq->sq_svcflags = 0;
5677 5773 sq->sq_nqueues = 0;
5678 5774 sq->sq_pri = 0;
5679 5775 sq->sq_onext = NULL;
5680 5776 sq->sq_oprev = NULL;
5681 5777 sq->sq_flags = 0;
5682 5778 sq->sq_type = 0;
5683 5779 sq->sq_servcount = 0;
5684 5780
5685 5781 kmem_cache_free(syncq_cache, sq);
5686 5782 }
5687 5783
5688 5784 /* Outer perimeter code */
5689 5785
5690 5786 /*
5691 5787 * The outer syncq uses the fields and flags in the syncq slightly
5692 5788 * differently from the inner syncqs.
5693 5789 * sq_count Incremented when there are pending or running
5694 5790 * writers at the outer perimeter to prevent the set of
5695 5791 * inner syncqs that belong to the outer perimeter from
5696 5792 * changing.
5697 5793 * sq_head/tail List of deferred qwriter(OUTER) operations.
5698 5794 *
5699 5795 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while
5700 5796 * inner syncqs are added to or removed from the
5701 5797 * outer perimeter.
5702 5798 * SQ_QUEUED sq_head/tail has messages or events queued.
5703 5799 *
5704 5800 * SQ_WRITER A thread is currently traversing all the inner syncqs
5705 5801 * setting the SQ_WRITER flag.
5706 5802 */
5707 5803
5708 5804 /*
5709 5805 * Get write access at the outer perimeter.
5710 5806 * Note that read access is done by entersq, putnext, and put by simply
5711 5807 * incrementing sq_count in the inner syncq.
5712 5808 *
5713 5809 * Waits until "flags" is no longer set in the outer to prevent multiple
5714 5810 * threads from having write access at the same time. SQ_WRITER has to be part
5715 5811 * of "flags".
5716 5812 *
5717 5813 * Increases sq_count on the outer syncq to keep away outer_insert/remove
5718 5814 * until the outer_exit is finished.
5719 5815 *
5720 5816 * outer_enter is vulnerable to starvation since it does not prevent new
5721 5817 * threads from entering the inner syncqs while it is waiting for sq_count to
5722 5818 * go to zero.
5723 5819 */
5724 5820 void
5725 5821 outer_enter(syncq_t *outer, uint16_t flags)
5726 5822 {
5727 5823 syncq_t *sq;
5728 5824 int wait_needed;
5729 5825 uint16_t count;
5730 5826
5731 5827 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5732 5828 outer->sq_oprev != NULL);
5733 5829 ASSERT(flags & SQ_WRITER);
5734 5830
5735 5831 retry:
5736 5832 mutex_enter(SQLOCK(outer));
5737 5833 while (outer->sq_flags & flags) {
5738 5834 outer->sq_flags |= SQ_WANTWAKEUP;
5739 5835 cv_wait(&outer->sq_wait, SQLOCK(outer));
5740 5836 }
5741 5837
5742 5838 ASSERT(!(outer->sq_flags & SQ_WRITER));
5743 5839 outer->sq_flags |= SQ_WRITER;
5744 5840 outer->sq_count++;
5745 5841 ASSERT(outer->sq_count != 0); /* wraparound */
5746 5842 wait_needed = 0;
5747 5843 /*
5748 5844 * Set SQ_WRITER on all the inner syncqs while holding
5749 5845 * the SQLOCK on the outer syncq. This ensures that the changing
5750 5846 * of SQ_WRITER is atomic under the outer SQLOCK.
5751 5847 */
5752 5848 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5753 5849 mutex_enter(SQLOCK(sq));
5754 5850 count = sq->sq_count;
5755 5851 SQ_PUTLOCKS_ENTER(sq);
5756 5852 sq->sq_flags |= SQ_WRITER;
5757 5853 SUM_SQ_PUTCOUNTS(sq, count);
5758 5854 if (count != 0)
5759 5855 wait_needed = 1;
5760 5856 SQ_PUTLOCKS_EXIT(sq);
5761 5857 mutex_exit(SQLOCK(sq));
5762 5858 }
5763 5859 mutex_exit(SQLOCK(outer));
5764 5860
5765 5861 /*
5766 5862 * Get everybody out of the syncqs sequentially.
5767 5863 * Note that we don't actually need to acquire the PUTLOCKS, since
5768 5864 * we have already cleared the fastbit, and set QWRITER. By
5769 5865 * definition, the count can not increase since putnext will
5770 5866 * take the slowlock path (and the purpose of acquiring the
5771 5867 * putlocks was to make sure it didn't increase while we were
5772 5868 * waiting).
5773 5869 *
5774 5870 * Note that we still acquire the PUTLOCKS to be safe.
5775 5871 */
5776 5872 if (wait_needed) {
5777 5873 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5778 5874 mutex_enter(SQLOCK(sq));
5779 5875 count = sq->sq_count;
5780 5876 SQ_PUTLOCKS_ENTER(sq);
5781 5877 SUM_SQ_PUTCOUNTS(sq, count);
5782 5878 while (count != 0) {
5783 5879 sq->sq_flags |= SQ_WANTWAKEUP;
5784 5880 SQ_PUTLOCKS_EXIT(sq);
5785 5881 cv_wait(&sq->sq_wait, SQLOCK(sq));
5786 5882 count = sq->sq_count;
5787 5883 SQ_PUTLOCKS_ENTER(sq);
5788 5884 SUM_SQ_PUTCOUNTS(sq, count);
5789 5885 }
5790 5886 SQ_PUTLOCKS_EXIT(sq);
5791 5887 mutex_exit(SQLOCK(sq));
5792 5888 }
5793 5889 /*
5794 5890 * Verify that none of the flags got set while we
5795 5891 * were waiting for the sq_counts to drop.
5796 5892 * If this happens we exit and retry entering the
5797 5893 * outer perimeter.
5798 5894 */
5799 5895 mutex_enter(SQLOCK(outer));
5800 5896 if (outer->sq_flags & (flags & ~SQ_WRITER)) {
5801 5897 mutex_exit(SQLOCK(outer));
5802 5898 outer_exit(outer);
5803 5899 goto retry;
5804 5900 }
5805 5901 mutex_exit(SQLOCK(outer));
5806 5902 }
5807 5903 }
5808 5904
5809 5905 /*
5810 5906 * Drop the write access at the outer perimeter.
5811 5907 * Read access is dropped implicitly (by putnext, put, and leavesq) by
5812 5908 * decrementing sq_count.
5813 5909 */
5814 5910 void
5815 5911 outer_exit(syncq_t *outer)
5816 5912 {
5817 5913 syncq_t *sq;
5818 5914 int drain_needed;
5819 5915 uint16_t flags;
5820 5916
5821 5917 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5822 5918 outer->sq_oprev != NULL);
5823 5919 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer)));
5824 5920
5825 5921 /*
5826 5922 * Atomically (from the perspective of threads calling become_writer)
5827 5923 * drop the write access at the outer perimeter by holding
5828 5924 * SQLOCK(outer) across all the dropsq calls and the resetting of
5829 5925 * SQ_WRITER.
5830 5926 * This defines a locking order between the outer perimeter
5831 5927 * SQLOCK and the inner perimeter SQLOCKs.
5832 5928 */
5833 5929 mutex_enter(SQLOCK(outer));
5834 5930 flags = outer->sq_flags;
5835 5931 ASSERT(outer->sq_flags & SQ_WRITER);
5836 5932 if (flags & SQ_QUEUED) {
5837 5933 write_now(outer);
5838 5934 flags = outer->sq_flags;
5839 5935 }
5840 5936
5841 5937 /*
5842 5938 * sq_onext is stable since sq_count has not yet been decreased.
5843 5939 * Reset the SQ_WRITER flags in all syncqs.
5844 5940 * After dropping SQ_WRITER on the outer syncq we empty all the
5845 5941 * inner syncqs.
5846 5942 */
5847 5943 drain_needed = 0;
5848 5944 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5849 5945 drain_needed += dropsq(sq, SQ_WRITER);
5850 5946 ASSERT(!(outer->sq_flags & SQ_QUEUED));
5851 5947 flags &= ~SQ_WRITER;
5852 5948 if (drain_needed) {
5853 5949 outer->sq_flags = flags;
5854 5950 mutex_exit(SQLOCK(outer));
5855 5951 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5856 5952 emptysq(sq);
5857 5953 mutex_enter(SQLOCK(outer));
5858 5954 flags = outer->sq_flags;
5859 5955 }
5860 5956 if (flags & SQ_WANTWAKEUP) {
5861 5957 flags &= ~SQ_WANTWAKEUP;
5862 5958 cv_broadcast(&outer->sq_wait);
5863 5959 }
5864 5960 outer->sq_flags = flags;
5865 5961 ASSERT(outer->sq_count > 0);
5866 5962 outer->sq_count--;
5867 5963 mutex_exit(SQLOCK(outer));
5868 5964 }
5869 5965
5870 5966 /*
5871 5967 * Add another syncq to an outer perimeter.
5872 5968 * Block out all other access to the outer perimeter while it is being
5873 5969 * changed using blocksq.
5874 5970 * Assumes that the caller has *not* done an outer_enter.
5875 5971 *
5876 5972 * Vulnerable to starvation in blocksq.
5877 5973 */
5878 5974 static void
5879 5975 outer_insert(syncq_t *outer, syncq_t *sq)
5880 5976 {
5881 5977 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5882 5978 outer->sq_oprev != NULL);
5883 5979 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
5884 5980 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */
5885 5981
5886 5982 /* Get exclusive access to the outer perimeter list */
5887 5983 blocksq(outer, SQ_BLOCKED, 0);
5888 5984 ASSERT(outer->sq_flags & SQ_BLOCKED);
5889 5985 ASSERT(!(outer->sq_flags & SQ_WRITER));
5890 5986
5891 5987 mutex_enter(SQLOCK(sq));
5892 5988 sq->sq_outer = outer;
5893 5989 outer->sq_onext->sq_oprev = sq;
5894 5990 sq->sq_onext = outer->sq_onext;
5895 5991 outer->sq_onext = sq;
5896 5992 sq->sq_oprev = outer;
5897 5993 mutex_exit(SQLOCK(sq));
5898 5994 unblocksq(outer, SQ_BLOCKED, 1);
5899 5995 }
5900 5996
5901 5997 /*
5902 5998 * Remove a syncq from an outer perimeter.
5903 5999 * Block out all other access to the outer perimeter while it is being
5904 6000 * changed using blocksq.
5905 6001 * Assumes that the caller has *not* done an outer_enter.
5906 6002 *
5907 6003 * Vulnerable to starvation in blocksq.
5908 6004 */
5909 6005 static void
5910 6006 outer_remove(syncq_t *outer, syncq_t *sq)
5911 6007 {
5912 6008 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5913 6009 outer->sq_oprev != NULL);
5914 6010 ASSERT(sq->sq_outer == outer);
5915 6011
5916 6012 /* Get exclusive access to the outer perimeter list */
5917 6013 blocksq(outer, SQ_BLOCKED, 0);
5918 6014 ASSERT(outer->sq_flags & SQ_BLOCKED);
5919 6015 ASSERT(!(outer->sq_flags & SQ_WRITER));
5920 6016
5921 6017 mutex_enter(SQLOCK(sq));
5922 6018 sq->sq_outer = NULL;
5923 6019 sq->sq_onext->sq_oprev = sq->sq_oprev;
5924 6020 sq->sq_oprev->sq_onext = sq->sq_onext;
5925 6021 sq->sq_oprev = sq->sq_onext = NULL;
5926 6022 mutex_exit(SQLOCK(sq));
5927 6023 unblocksq(outer, SQ_BLOCKED, 1);
5928 6024 }
5929 6025
5930 6026 /*
5931 6027 * Queue a deferred qwriter(OUTER) callback for this outer perimeter.
5932 6028 * If this is the first callback for this outer perimeter then add
5933 6029 * this outer perimeter to the list of outer perimeters that
5934 6030 * the qwriter_outer_thread will process.
5935 6031 *
5936 6032 * Increments sq_count in the outer syncq to prevent the membership
5937 6033 * of the outer perimeter (in terms of inner syncqs) to change while
5938 6034 * the callback is pending.
5939 6035 */
5940 6036 static void
5941 6037 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp)
5942 6038 {
5943 6039 ASSERT(MUTEX_HELD(SQLOCK(outer)));
5944 6040
5945 6041 mp->b_prev = (mblk_t *)func;
5946 6042 mp->b_queue = q;
5947 6043 mp->b_next = NULL;
5948 6044 outer->sq_count++; /* Decremented when dequeued */
5949 6045 ASSERT(outer->sq_count != 0); /* Wraparound */
5950 6046 if (outer->sq_evhead == NULL) {
5951 6047 /* First message. */
5952 6048 outer->sq_evhead = outer->sq_evtail = mp;
5953 6049 outer->sq_flags |= SQ_EVENTS;
5954 6050 mutex_exit(SQLOCK(outer));
5955 6051 STRSTAT(qwr_outer);
5956 6052 (void) taskq_dispatch(streams_taskq,
5957 6053 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP);
5958 6054 } else {
5959 6055 ASSERT(outer->sq_flags & SQ_EVENTS);
5960 6056 outer->sq_evtail->b_next = mp;
5961 6057 outer->sq_evtail = mp;
5962 6058 mutex_exit(SQLOCK(outer));
5963 6059 }
5964 6060 }
5965 6061
5966 6062 /*
5967 6063 * Try and upgrade to write access at the outer perimeter. If this can
5968 6064 * not be done without blocking then queue the callback to be done
5969 6065 * by the qwriter_outer_thread.
5970 6066 *
5971 6067 * This routine can only be called from put or service procedures plus
5972 6068 * asynchronous callback routines that have properly entered the queue (with
5973 6069 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq
5974 6070 * associated with q.
5975 6071 */
5976 6072 void
5977 6073 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)())
5978 6074 {
5979 6075 syncq_t *osq, *sq, *outer;
5980 6076 int failed;
5981 6077 uint16_t flags;
5982 6078
5983 6079 osq = q->q_syncq;
5984 6080 outer = osq->sq_outer;
5985 6081 if (outer == NULL)
5986 6082 panic("qwriter(PERIM_OUTER): no outer perimeter");
5987 6083 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5988 6084 outer->sq_oprev != NULL);
5989 6085
5990 6086 mutex_enter(SQLOCK(outer));
5991 6087 flags = outer->sq_flags;
5992 6088 /*
5993 6089 * If some thread is traversing sq_next, or if we are blocked by
5994 6090 * outer_insert or outer_remove, or if the we already have queued
5995 6091 * callbacks, then queue this callback for later processing.
5996 6092 *
5997 6093 * Also queue the qwriter for an interrupt thread in order
5998 6094 * to reduce the time spent running at high IPL.
5999 6095 * to identify there are events.
6000 6096 */
6001 6097 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) {
6002 6098 /*
6003 6099 * Queue the become_writer request.
6004 6100 * The queueing is atomic under SQLOCK(outer) in order
6005 6101 * to synchronize with outer_exit.
6006 6102 * queue_writer will drop the outer SQLOCK
6007 6103 */
6008 6104 if (flags & SQ_BLOCKED) {
6009 6105 /* Must set SQ_WRITER on inner perimeter */
6010 6106 mutex_enter(SQLOCK(osq));
6011 6107 osq->sq_flags |= SQ_WRITER;
6012 6108 mutex_exit(SQLOCK(osq));
6013 6109 } else {
6014 6110 if (!(flags & SQ_WRITER)) {
6015 6111 /*
6016 6112 * The outer could have been SQ_BLOCKED thus
6017 6113 * SQ_WRITER might not be set on the inner.
6018 6114 */
6019 6115 mutex_enter(SQLOCK(osq));
6020 6116 osq->sq_flags |= SQ_WRITER;
6021 6117 mutex_exit(SQLOCK(osq));
6022 6118 }
6023 6119 ASSERT(osq->sq_flags & SQ_WRITER);
6024 6120 }
6025 6121 queue_writer(outer, func, q, mp);
6026 6122 return;
6027 6123 }
6028 6124 /*
6029 6125 * We are half-way to exclusive access to the outer perimeter.
6030 6126 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
6031 6127 * while the inner syncqs are traversed.
6032 6128 */
6033 6129 outer->sq_count++;
6034 6130 ASSERT(outer->sq_count != 0); /* wraparound */
6035 6131 flags |= SQ_WRITER;
6036 6132 /*
6037 6133 * Check if we can run the function immediately. Mark all
6038 6134 * syncqs with the writer flag to prevent new entries into
6039 6135 * put and service procedures.
6040 6136 *
6041 6137 * Set SQ_WRITER on all the inner syncqs while holding
6042 6138 * the SQLOCK on the outer syncq. This ensures that the changing
6043 6139 * of SQ_WRITER is atomic under the outer SQLOCK.
6044 6140 */
6045 6141 failed = 0;
6046 6142 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
6047 6143 uint16_t count;
6048 6144 uint_t maxcnt = (sq == osq) ? 1 : 0;
6049 6145
6050 6146 mutex_enter(SQLOCK(sq));
6051 6147 count = sq->sq_count;
6052 6148 SQ_PUTLOCKS_ENTER(sq);
6053 6149 SUM_SQ_PUTCOUNTS(sq, count);
6054 6150 if (sq->sq_count > maxcnt)
6055 6151 failed = 1;
6056 6152 sq->sq_flags |= SQ_WRITER;
6057 6153 SQ_PUTLOCKS_EXIT(sq);
6058 6154 mutex_exit(SQLOCK(sq));
6059 6155 }
6060 6156 if (failed) {
6061 6157 /*
6062 6158 * Some other thread has a read claim on the outer perimeter.
6063 6159 * Queue the callback for deferred processing.
6064 6160 *
6065 6161 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER
6066 6162 * so that other qwriter(OUTER) calls will queue their
6067 6163 * callbacks as well. queue_writer increments sq_count so we
6068 6164 * decrement to compensate for the our increment.
6069 6165 *
6070 6166 * Dropping SQ_WRITER enables the writer thread to work
6071 6167 * on this outer perimeter.
6072 6168 */
6073 6169 outer->sq_flags = flags;
6074 6170 queue_writer(outer, func, q, mp);
6075 6171 /* queue_writer dropper the lock */
6076 6172 mutex_enter(SQLOCK(outer));
6077 6173 ASSERT(outer->sq_count > 0);
6078 6174 outer->sq_count--;
6079 6175 ASSERT(outer->sq_flags & SQ_WRITER);
6080 6176 flags = outer->sq_flags;
6081 6177 flags &= ~SQ_WRITER;
6082 6178 if (flags & SQ_WANTWAKEUP) {
6083 6179 flags &= ~SQ_WANTWAKEUP;
6084 6180 cv_broadcast(&outer->sq_wait);
6085 6181 }
6086 6182 outer->sq_flags = flags;
6087 6183 mutex_exit(SQLOCK(outer));
6088 6184 return;
6089 6185 } else {
6090 6186 outer->sq_flags = flags;
6091 6187 mutex_exit(SQLOCK(outer));
6092 6188 }
6093 6189
6094 6190 /* Can run it immediately */
6095 6191 (*func)(q, mp);
6096 6192
6097 6193 outer_exit(outer);
6098 6194 }
6099 6195
6100 6196 /*
6101 6197 * Dequeue all writer callbacks from the outer perimeter and run them.
6102 6198 */
6103 6199 static void
6104 6200 write_now(syncq_t *outer)
6105 6201 {
6106 6202 mblk_t *mp;
6107 6203 queue_t *q;
6108 6204 void (*func)();
6109 6205
6110 6206 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6111 6207 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
6112 6208 outer->sq_oprev != NULL);
6113 6209 while ((mp = outer->sq_evhead) != NULL) {
6114 6210 /*
6115 6211 * queues cannot be placed on the queuelist on the outer
6116 6212 * perimeter.
6117 6213 */
6118 6214 ASSERT(!(outer->sq_flags & SQ_MESSAGES));
6119 6215 ASSERT((outer->sq_flags & SQ_EVENTS));
6120 6216
6121 6217 outer->sq_evhead = mp->b_next;
6122 6218 if (outer->sq_evhead == NULL) {
6123 6219 outer->sq_evtail = NULL;
6124 6220 outer->sq_flags &= ~SQ_EVENTS;
6125 6221 }
6126 6222 ASSERT(outer->sq_count != 0);
6127 6223 outer->sq_count--; /* Incremented when enqueued. */
6128 6224 mutex_exit(SQLOCK(outer));
6129 6225 /*
6130 6226 * Drop the message if the queue is closing.
6131 6227 * Make sure that the queue is "claimed" when the callback
6132 6228 * is run in order to satisfy various ASSERTs.
6133 6229 */
6134 6230 q = mp->b_queue;
6135 6231 func = (void (*)())mp->b_prev;
6136 6232 ASSERT(func != NULL);
6137 6233 mp->b_next = mp->b_prev = NULL;
6138 6234 if (q->q_flag & QWCLOSE) {
6139 6235 freemsg(mp);
6140 6236 } else {
6141 6237 claimq(q);
6142 6238 (*func)(q, mp);
6143 6239 releaseq(q);
6144 6240 }
6145 6241 mutex_enter(SQLOCK(outer));
6146 6242 }
6147 6243 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6148 6244 }
6149 6245
6150 6246 /*
6151 6247 * The list of messages on the inner syncq is effectively hashed
6152 6248 * by destination queue. These destination queues are doubly
6153 6249 * linked lists (hopefully) in priority order. Messages are then
6154 6250 * put on the queue referenced by the q_sqhead/q_sqtail elements.
6155 6251 * Additional messages are linked together by the b_next/b_prev
6156 6252 * elements in the mblk, with (similar to putq()) the first message
6157 6253 * having a NULL b_prev and the last message having a NULL b_next.
6158 6254 *
6159 6255 * Events, such as qwriter callbacks, are put onto a list in FIFO
6160 6256 * order referenced by sq_evhead, and sq_evtail. This is a singly
6161 6257 * linked list, and messages here MUST be processed in the order queued.
6162 6258 */
6163 6259
6164 6260 /*
6165 6261 * Run the events on the syncq event list (sq_evhead).
6166 6262 * Assumes there is only one claim on the syncq, it is
6167 6263 * already exclusive (SQ_EXCL set), and the SQLOCK held.
6168 6264 * Messages here are processed in order, with the SQ_EXCL bit
6169 6265 * held all the way through till the last message is processed.
6170 6266 */
6171 6267 void
6172 6268 sq_run_events(syncq_t *sq)
6173 6269 {
6174 6270 mblk_t *bp;
6175 6271 queue_t *qp;
6176 6272 uint16_t flags = sq->sq_flags;
6177 6273 void (*func)();
6178 6274
6179 6275 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6180 6276 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6181 6277 sq->sq_oprev == NULL) ||
6182 6278 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6183 6279 sq->sq_oprev != NULL));
6184 6280
6185 6281 ASSERT(flags & SQ_EXCL);
6186 6282 ASSERT(sq->sq_count == 1);
6187 6283
6188 6284 /*
6189 6285 * We need to process all of the events on this list. It
6190 6286 * is possible that new events will be added while we are
6191 6287 * away processing a callback, so on every loop, we start
6192 6288 * back at the beginning of the list.
6193 6289 */
6194 6290 /*
6195 6291 * We have to reaccess sq_evhead since there is a
6196 6292 * possibility of a new entry while we were running
6197 6293 * the callback.
6198 6294 */
6199 6295 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) {
6200 6296 ASSERT(bp->b_queue->q_syncq == sq);
6201 6297 ASSERT(sq->sq_flags & SQ_EVENTS);
6202 6298
6203 6299 qp = bp->b_queue;
6204 6300 func = (void (*)())bp->b_prev;
6205 6301 ASSERT(func != NULL);
6206 6302
6207 6303 /*
6208 6304 * Messages from the event queue must be taken off in
6209 6305 * FIFO order.
6210 6306 */
6211 6307 ASSERT(sq->sq_evhead == bp);
6212 6308 sq->sq_evhead = bp->b_next;
6213 6309
6214 6310 if (bp->b_next == NULL) {
6215 6311 /* Deleting last */
6216 6312 ASSERT(sq->sq_evtail == bp);
6217 6313 sq->sq_evtail = NULL;
6218 6314 sq->sq_flags &= ~SQ_EVENTS;
6219 6315 }
6220 6316 bp->b_prev = bp->b_next = NULL;
6221 6317 ASSERT(bp->b_datap->db_ref != 0);
6222 6318
6223 6319 mutex_exit(SQLOCK(sq));
6224 6320
6225 6321 (*func)(qp, bp);
6226 6322
6227 6323 mutex_enter(SQLOCK(sq));
6228 6324 /*
6229 6325 * re-read the flags, since they could have changed.
6230 6326 */
6231 6327 flags = sq->sq_flags;
6232 6328 ASSERT(flags & SQ_EXCL);
6233 6329 }
6234 6330 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL);
6235 6331 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6236 6332
6237 6333 if (flags & SQ_WANTWAKEUP) {
6238 6334 flags &= ~SQ_WANTWAKEUP;
6239 6335 cv_broadcast(&sq->sq_wait);
6240 6336 }
6241 6337 if (flags & SQ_WANTEXWAKEUP) {
6242 6338 flags &= ~SQ_WANTEXWAKEUP;
6243 6339 cv_broadcast(&sq->sq_exitwait);
6244 6340 }
6245 6341 sq->sq_flags = flags;
6246 6342 }
6247 6343
6248 6344 /*
6249 6345 * Put messages on the event list.
6250 6346 * If we can go exclusive now, do so and process the event list, otherwise
6251 6347 * let the last claim service this list (or wake the sqthread).
6252 6348 * This procedure assumes SQLOCK is held. To run the event list, it
6253 6349 * must be called with no claims.
6254 6350 */
6255 6351 static void
6256 6352 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)())
6257 6353 {
6258 6354 uint16_t count;
6259 6355
6260 6356 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6261 6357 ASSERT(func != NULL);
6262 6358
6263 6359 /*
6264 6360 * This is a callback. Add it to the list of callbacks
6265 6361 * and see about upgrading.
6266 6362 */
6267 6363 mp->b_prev = (mblk_t *)func;
6268 6364 mp->b_queue = q;
6269 6365 mp->b_next = NULL;
6270 6366 if (sq->sq_evhead == NULL) {
6271 6367 sq->sq_evhead = sq->sq_evtail = mp;
6272 6368 sq->sq_flags |= SQ_EVENTS;
6273 6369 } else {
6274 6370 ASSERT(sq->sq_evtail != NULL);
6275 6371 ASSERT(sq->sq_evtail->b_next == NULL);
6276 6372 ASSERT(sq->sq_flags & SQ_EVENTS);
6277 6373 sq->sq_evtail->b_next = mp;
6278 6374 sq->sq_evtail = mp;
6279 6375 }
6280 6376 /*
6281 6377 * We have set SQ_EVENTS, so threads will have to
6282 6378 * unwind out of the perimeter, and new entries will
6283 6379 * not grab a putlock. But we still need to know
6284 6380 * how many threads have already made a claim to the
6285 6381 * syncq, so grab the putlocks, and sum the counts.
6286 6382 * If there are no claims on the syncq, we can upgrade
6287 6383 * to exclusive, and run the event list.
6288 6384 * NOTE: We hold the SQLOCK, so we can just grab the
6289 6385 * putlocks.
6290 6386 */
6291 6387 count = sq->sq_count;
6292 6388 SQ_PUTLOCKS_ENTER(sq);
6293 6389 SUM_SQ_PUTCOUNTS(sq, count);
6294 6390 /*
6295 6391 * We have no claim, so we need to check if there
6296 6392 * are no others, then we can upgrade.
6297 6393 */
6298 6394 /*
6299 6395 * There are currently no claims on
6300 6396 * the syncq by this thread (at least on this entry). The thread who has
6301 6397 * the claim should drain syncq.
6302 6398 */
6303 6399 if (count > 0) {
6304 6400 /*
6305 6401 * Can't upgrade - other threads inside.
6306 6402 */
6307 6403 SQ_PUTLOCKS_EXIT(sq);
6308 6404 mutex_exit(SQLOCK(sq));
6309 6405 return;
6310 6406 }
6311 6407 /*
6312 6408 * Need to set SQ_EXCL and make a claim on the syncq.
6313 6409 */
6314 6410 ASSERT((sq->sq_flags & SQ_EXCL) == 0);
6315 6411 sq->sq_flags |= SQ_EXCL;
6316 6412 ASSERT(sq->sq_count == 0);
6317 6413 sq->sq_count++;
6318 6414 SQ_PUTLOCKS_EXIT(sq);
6319 6415
6320 6416 /* Process the events list */
6321 6417 sq_run_events(sq);
6322 6418
6323 6419 /*
6324 6420 * Release our claim...
6325 6421 */
6326 6422 sq->sq_count--;
6327 6423
6328 6424 /*
6329 6425 * And release SQ_EXCL.
6330 6426 * We don't need to acquire the putlocks to release
6331 6427 * SQ_EXCL, since we are exclusive, and hold the SQLOCK.
6332 6428 */
6333 6429 sq->sq_flags &= ~SQ_EXCL;
6334 6430
6335 6431 /*
6336 6432 * sq_run_events should have released SQ_EXCL
6337 6433 */
6338 6434 ASSERT(!(sq->sq_flags & SQ_EXCL));
6339 6435
6340 6436 /*
6341 6437 * If anything happened while we were running the
6342 6438 * events (or was there before), we need to process
6343 6439 * them now. We shouldn't be exclusive sine we
6344 6440 * released the perimeter above (plus, we asserted
6345 6441 * for it).
6346 6442 */
6347 6443 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED))
6348 6444 drain_syncq(sq);
6349 6445 else
6350 6446 mutex_exit(SQLOCK(sq));
6351 6447 }
6352 6448
6353 6449 /*
6354 6450 * Perform delayed processing. The caller has to make sure that it is safe
6355 6451 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
6356 6452 * set).
6357 6453 *
6358 6454 * Assume that the caller has NO claims on the syncq. However, a claim
6359 6455 * on the syncq does not indicate that a thread is draining the syncq.
6360 6456 * There may be more claims on the syncq than there are threads draining
6361 6457 * (i.e. #_threads_draining <= sq_count)
6362 6458 *
6363 6459 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
6364 6460 * in order to preserve qwriter(OUTER) ordering constraints.
6365 6461 *
6366 6462 * sq_putcount only needs to be checked when dispatching the queued
6367 6463 * writer call for CIPUT sync queue, but this is handled in sq_run_events.
6368 6464 */
6369 6465 void
6370 6466 drain_syncq(syncq_t *sq)
6371 6467 {
6372 6468 queue_t *qp;
6373 6469 uint16_t count;
6374 6470 uint16_t type = sq->sq_type;
6375 6471 uint16_t flags = sq->sq_flags;
6376 6472 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE;
6377 6473
6378 6474 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6379 6475 "drain_syncq start:%p", sq);
6380 6476 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6381 6477 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6382 6478 sq->sq_oprev == NULL) ||
6383 6479 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6384 6480 sq->sq_oprev != NULL));
6385 6481
6386 6482 /*
6387 6483 * Drop SQ_SERVICE flag.
6388 6484 */
6389 6485 if (bg_service)
6390 6486 sq->sq_svcflags &= ~SQ_SERVICE;
6391 6487
6392 6488 /*
6393 6489 * If SQ_EXCL is set, someone else is processing this syncq - let him
6394 6490 * finish the job.
6395 6491 */
6396 6492 if (flags & SQ_EXCL) {
6397 6493 if (bg_service) {
6398 6494 ASSERT(sq->sq_servcount != 0);
6399 6495 sq->sq_servcount--;
6400 6496 }
6401 6497 mutex_exit(SQLOCK(sq));
6402 6498 return;
6403 6499 }
6404 6500
6405 6501 /*
6406 6502 * This routine can be called by a background thread if
6407 6503 * it was scheduled by a hi-priority thread. SO, if there are
6408 6504 * NOT messages queued, return (remember, we have the SQLOCK,
6409 6505 * and it cannot change until we release it). Wakeup any waiters also.
6410 6506 */
6411 6507 if (!(flags & SQ_QUEUED)) {
6412 6508 if (flags & SQ_WANTWAKEUP) {
6413 6509 flags &= ~SQ_WANTWAKEUP;
6414 6510 cv_broadcast(&sq->sq_wait);
6415 6511 }
6416 6512 if (flags & SQ_WANTEXWAKEUP) {
6417 6513 flags &= ~SQ_WANTEXWAKEUP;
6418 6514 cv_broadcast(&sq->sq_exitwait);
6419 6515 }
6420 6516 sq->sq_flags = flags;
6421 6517 if (bg_service) {
6422 6518 ASSERT(sq->sq_servcount != 0);
6423 6519 sq->sq_servcount--;
6424 6520 }
6425 6521 mutex_exit(SQLOCK(sq));
6426 6522 return;
6427 6523 }
6428 6524
6429 6525 /*
6430 6526 * If this is not a concurrent put perimeter, we need to
6431 6527 * become exclusive to drain. Also, if not CIPUT, we would
6432 6528 * not have acquired a putlock, so we don't need to check
6433 6529 * the putcounts. If not entering with a claim, we test
6434 6530 * for sq_count == 0.
6435 6531 */
6436 6532 type = sq->sq_type;
6437 6533 if (!(type & SQ_CIPUT)) {
6438 6534 if (sq->sq_count > 1) {
6439 6535 if (bg_service) {
6440 6536 ASSERT(sq->sq_servcount != 0);
6441 6537 sq->sq_servcount--;
6442 6538 }
6443 6539 mutex_exit(SQLOCK(sq));
6444 6540 return;
6445 6541 }
6446 6542 sq->sq_flags |= SQ_EXCL;
6447 6543 }
6448 6544
6449 6545 /*
6450 6546 * This is where we make a claim to the syncq.
6451 6547 * This can either be done by incrementing a putlock, or
6452 6548 * the sq_count. But since we already have the SQLOCK
6453 6549 * here, we just bump the sq_count.
6454 6550 *
6455 6551 * Note that after we make a claim, we need to let the code
6456 6552 * fall through to the end of this routine to clean itself
6457 6553 * up. A return in the while loop will put the syncq in a
6458 6554 * very bad state.
6459 6555 */
6460 6556 sq->sq_count++;
6461 6557 ASSERT(sq->sq_count != 0); /* wraparound */
6462 6558
6463 6559 while ((flags = sq->sq_flags) & SQ_QUEUED) {
6464 6560 /*
6465 6561 * If we are told to stayaway or went exclusive,
6466 6562 * we are done.
6467 6563 */
6468 6564 if (flags & (SQ_STAYAWAY)) {
6469 6565 break;
6470 6566 }
6471 6567
6472 6568 /*
6473 6569 * If there are events to run, do so.
6474 6570 * We have one claim to the syncq, so if there are
6475 6571 * more than one, other threads are running.
6476 6572 */
6477 6573 if (sq->sq_evhead != NULL) {
6478 6574 ASSERT(sq->sq_flags & SQ_EVENTS);
6479 6575
6480 6576 count = sq->sq_count;
6481 6577 SQ_PUTLOCKS_ENTER(sq);
6482 6578 SUM_SQ_PUTCOUNTS(sq, count);
6483 6579 if (count > 1) {
6484 6580 SQ_PUTLOCKS_EXIT(sq);
6485 6581 /* Can't upgrade - other threads inside */
6486 6582 break;
6487 6583 }
6488 6584 ASSERT((flags & SQ_EXCL) == 0);
6489 6585 sq->sq_flags = flags | SQ_EXCL;
6490 6586 SQ_PUTLOCKS_EXIT(sq);
6491 6587 /*
6492 6588 * we have the only claim, run the events,
6493 6589 * sq_run_events will clear the SQ_EXCL flag.
6494 6590 */
6495 6591 sq_run_events(sq);
6496 6592
6497 6593 /*
6498 6594 * If this is a CIPUT perimeter, we need
6499 6595 * to drop the SQ_EXCL flag so we can properly
6500 6596 * continue draining the syncq.
6501 6597 */
6502 6598 if (type & SQ_CIPUT) {
6503 6599 ASSERT(sq->sq_flags & SQ_EXCL);
6504 6600 sq->sq_flags &= ~SQ_EXCL;
6505 6601 }
6506 6602
6507 6603 /*
6508 6604 * And go back to the beginning just in case
6509 6605 * anything changed while we were away.
6510 6606 */
6511 6607 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT));
6512 6608 continue;
6513 6609 }
6514 6610
6515 6611 ASSERT(sq->sq_evhead == NULL);
6516 6612 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6517 6613
6518 6614 /*
6519 6615 * Find the queue that is not draining.
6520 6616 *
6521 6617 * q_draining is protected by QLOCK which we do not hold.
6522 6618 * But if it was set, then a thread was draining, and if it gets
6523 6619 * cleared, then it was because the thread has successfully
6524 6620 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY
6525 6621 * state to happen, a thread needs the SQLOCK which we hold, and
6526 6622 * if there was such a flag, we would have already seen it.
6527 6623 */
6528 6624
6529 6625 for (qp = sq->sq_head;
6530 6626 qp != NULL && (qp->q_draining ||
6531 6627 (qp->q_sqflags & Q_SQDRAINING));
6532 6628 qp = qp->q_sqnext)
6533 6629 ;
6534 6630
6535 6631 if (qp == NULL)
6536 6632 break;
6537 6633
6538 6634 /*
6539 6635 * We have a queue to work on, and we hold the
6540 6636 * SQLOCK and one claim, call qdrain_syncq.
6541 6637 * This means we need to release the SQLOCK and
6542 6638 * acquire the QLOCK (OK since we have a claim).
6543 6639 * Note that qdrain_syncq will actually dequeue
6544 6640 * this queue from the sq_head list when it is
6545 6641 * convinced all the work is done and release
6546 6642 * the QLOCK before returning.
6547 6643 */
6548 6644 qp->q_sqflags |= Q_SQDRAINING;
6549 6645 mutex_exit(SQLOCK(sq));
6550 6646 mutex_enter(QLOCK(qp));
6551 6647 qdrain_syncq(sq, qp);
6552 6648 mutex_enter(SQLOCK(sq));
6553 6649
6554 6650 /* The queue is drained */
6555 6651 ASSERT(qp->q_sqflags & Q_SQDRAINING);
6556 6652 qp->q_sqflags &= ~Q_SQDRAINING;
6557 6653 /*
6558 6654 * NOTE: After this point qp should not be used since it may be
6559 6655 * closed.
6560 6656 */
6561 6657 }
6562 6658
6563 6659 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6564 6660 flags = sq->sq_flags;
6565 6661
6566 6662 /*
6567 6663 * sq->sq_head cannot change because we hold the
6568 6664 * sqlock. However, a thread CAN decide that it is no longer
6569 6665 * going to drain that queue. However, this should be due to
6570 6666 * a GOAWAY state, and we should see that here.
6571 6667 *
6572 6668 * This loop is not very efficient. One solution may be adding a second
6573 6669 * pointer to the "draining" queue, but it is difficult to do when
6574 6670 * queues are inserted in the middle due to priority ordering. Another
6575 6671 * possibility is to yank the queue out of the sq list and put it onto
6576 6672 * the "draining list" and then put it back if it can't be drained.
6577 6673 */
6578 6674
6579 6675 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) ||
6580 6676 (type & SQ_CI) || sq->sq_head->q_draining);
6581 6677
6582 6678 /* Drop SQ_EXCL for non-CIPUT perimeters */
6583 6679 if (!(type & SQ_CIPUT))
6584 6680 flags &= ~SQ_EXCL;
6585 6681 ASSERT((flags & SQ_EXCL) == 0);
6586 6682
6587 6683 /* Wake up any waiters. */
6588 6684 if (flags & SQ_WANTWAKEUP) {
6589 6685 flags &= ~SQ_WANTWAKEUP;
6590 6686 cv_broadcast(&sq->sq_wait);
6591 6687 }
6592 6688 if (flags & SQ_WANTEXWAKEUP) {
6593 6689 flags &= ~SQ_WANTEXWAKEUP;
6594 6690 cv_broadcast(&sq->sq_exitwait);
6595 6691 }
6596 6692 sq->sq_flags = flags;
6597 6693
6598 6694 ASSERT(sq->sq_count != 0);
6599 6695 /* Release our claim. */
6600 6696 sq->sq_count--;
6601 6697
6602 6698 if (bg_service) {
6603 6699 ASSERT(sq->sq_servcount != 0);
6604 6700 sq->sq_servcount--;
6605 6701 }
6606 6702
6607 6703 mutex_exit(SQLOCK(sq));
6608 6704
6609 6705 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6610 6706 "drain_syncq end:%p", sq);
6611 6707 }
6612 6708
6613 6709
6614 6710 /*
6615 6711 *
6616 6712 * qdrain_syncq can be called (currently) from only one of two places:
6617 6713 * drain_syncq
6618 6714 * putnext (or some variation of it).
6619 6715 * and eventually
6620 6716 * qwait(_sig)
6621 6717 *
6622 6718 * If called from drain_syncq, we found it in the list of queues needing
6623 6719 * service, so there is work to be done (or it wouldn't be in the list).
6624 6720 *
6625 6721 * If called from some putnext variation, it was because the
6626 6722 * perimeter is open, but messages are blocking a putnext and
6627 6723 * there is not a thread working on it. Now a thread could start
6628 6724 * working on it while we are getting ready to do so ourself, but
6629 6725 * the thread would set the q_draining flag, and we can spin out.
6630 6726 *
6631 6727 * As for qwait(_sig), I think I shall let it continue to call
6632 6728 * drain_syncq directly (after all, it will get here eventually).
6633 6729 *
6634 6730 * qdrain_syncq has to terminate when:
6635 6731 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
6636 6732 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
6637 6733 *
6638 6734 * ASSUMES:
6639 6735 * One claim
6640 6736 * QLOCK held
6641 6737 * SQLOCK not held
6642 6738 * Will release QLOCK before returning
6643 6739 */
6644 6740 void
6645 6741 qdrain_syncq(syncq_t *sq, queue_t *q)
6646 6742 {
6647 6743 mblk_t *bp;
6648 6744 #ifdef DEBUG
6649 6745 uint16_t count;
6650 6746 #endif
6651 6747
6652 6748 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6653 6749 "drain_syncq start:%p", sq);
6654 6750 ASSERT(q->q_syncq == sq);
6655 6751 ASSERT(MUTEX_HELD(QLOCK(q)));
6656 6752 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6657 6753 /*
6658 6754 * For non-CIPUT perimeters, we should be called with the exclusive bit
6659 6755 * set already. For CIPUT perimeters, we will be doing a concurrent
6660 6756 * drain, so it better not be set.
6661 6757 */
6662 6758 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
6663 6759 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)));
6664 6760 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL));
6665 6761 /*
6666 6762 * All outer pointers are set, or none of them are
6667 6763 */
6668 6764 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6669 6765 sq->sq_oprev == NULL) ||
6670 6766 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6671 6767 sq->sq_oprev != NULL));
6672 6768 #ifdef DEBUG
6673 6769 count = sq->sq_count;
6674 6770 /*
6675 6771 * This is OK without the putlocks, because we have one
6676 6772 * claim either from the sq_count, or a putcount. We could
6677 6773 * get an erroneous value from other counts, but ours won't
6678 6774 * change, so one way or another, we will have at least a
6679 6775 * value of one.
6680 6776 */
6681 6777 SUM_SQ_PUTCOUNTS(sq, count);
6682 6778 ASSERT(count >= 1);
6683 6779 #endif /* DEBUG */
6684 6780
6685 6781 /*
6686 6782 * The first thing to do is find out if a thread is already draining
6687 6783 * this queue. If so, we are done, just return.
6688 6784 */
6689 6785 if (q->q_draining) {
6690 6786 mutex_exit(QLOCK(q));
6691 6787 return;
6692 6788 }
6693 6789
6694 6790 /*
6695 6791 * If the perimeter is exclusive, there is nothing we can do right now,
6696 6792 * go away. Note that there is nothing to prevent this case from
6697 6793 * changing right after this check, but the spin-out will catch it.
6698 6794 */
6699 6795
6700 6796 /* Tell other threads that we are draining this queue */
6701 6797 q->q_draining = 1; /* Protected by QLOCK */
6702 6798
6703 6799 /*
6704 6800 * If there is nothing to do, clear QFULL as necessary. This caters for
6705 6801 * the case where an empty queue was enqueued onto the syncq.
6706 6802 */
6707 6803 if (q->q_sqhead == NULL) {
6708 6804 ASSERT(q->q_syncqmsgs == 0);
6709 6805 mutex_exit(QLOCK(q));
6710 6806 clr_qfull(q);
6711 6807 mutex_enter(QLOCK(q));
6712 6808 }
6713 6809
6714 6810 /*
6715 6811 * Note that q_sqhead must be re-checked here in case another message
6716 6812 * was enqueued whilst QLOCK was dropped during the call to clr_qfull.
6717 6813 */
6718 6814 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) {
6719 6815 /*
6720 6816 * Because we can enter this routine just because a putnext is
6721 6817 * blocked, we need to spin out if the perimeter wants to go
6722 6818 * exclusive as well as just blocked. We need to spin out also
6723 6819 * if events are queued on the syncq.
6724 6820 * Don't check for SQ_EXCL, because non-CIPUT perimeters would
6725 6821 * set it, and it can't become exclusive while we hold a claim.
6726 6822 */
6727 6823 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) {
6728 6824 break;
6729 6825 }
6730 6826
6731 6827 #ifdef DEBUG
6732 6828 /*
6733 6829 * Since we are in qdrain_syncq, we already know the queue,
6734 6830 * but for sanity, we want to check this against the qp that
6735 6831 * was passed in by bp->b_queue.
6736 6832 */
6737 6833
6738 6834 ASSERT(bp->b_queue == q);
6739 6835 ASSERT(bp->b_queue->q_syncq == sq);
6740 6836 bp->b_queue = NULL;
6741 6837
6742 6838 /*
6743 6839 * We would have the following check in the DEBUG code:
6744 6840 *
6745 6841 * if (bp->b_prev != NULL) {
6746 6842 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
6747 6843 * }
6748 6844 *
6749 6845 * This can't be done, however, since IP modifies qinfo
6750 6846 * structure at run-time (switching between IPv4 qinfo and IPv6
6751 6847 * qinfo), invalidating the check.
6752 6848 * So the assignment to func is left here, but the ASSERT itself
6753 6849 * is removed until the whole issue is resolved.
6754 6850 */
6755 6851 #endif
6756 6852 ASSERT(q->q_sqhead == bp);
6757 6853 q->q_sqhead = bp->b_next;
6758 6854 bp->b_prev = bp->b_next = NULL;
6759 6855 ASSERT(q->q_syncqmsgs > 0);
6760 6856 mutex_exit(QLOCK(q));
6761 6857
6762 6858 ASSERT(bp->b_datap->db_ref != 0);
6763 6859
6764 6860 (void) (*q->q_qinfo->qi_putp)(q, bp);
6765 6861
6766 6862 mutex_enter(QLOCK(q));
6767 6863
6768 6864 /*
6769 6865 * q_syncqmsgs should only be decremented after executing the
6770 6866 * put procedure to avoid message re-ordering. This is due to an
6771 6867 * optimisation in putnext() which can call the put procedure
6772 6868 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED
6773 6869 * being set).
6774 6870 *
6775 6871 * We also need to clear QFULL in the next service procedure
6776 6872 * queue if this is the last message destined for that queue.
6777 6873 *
6778 6874 * It would make better sense to have some sort of tunable for
6779 6875 * the low water mark, but these semantics are not yet defined.
6780 6876 * So, alas, we use a constant.
6781 6877 */
6782 6878 if (--q->q_syncqmsgs == 0) {
6783 6879 mutex_exit(QLOCK(q));
6784 6880 clr_qfull(q);
6785 6881 mutex_enter(QLOCK(q));
6786 6882 }
6787 6883
6788 6884 /*
6789 6885 * Always clear SQ_EXCL when CIPUT in order to handle
6790 6886 * qwriter(INNER). The putp() can call qwriter and get exclusive
6791 6887 * access IFF this is the only claim. So, we need to test for
6792 6888 * this possibility, acquire the mutex and clear the bit.
6793 6889 */
6794 6890 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) {
6795 6891 mutex_enter(SQLOCK(sq));
6796 6892 sq->sq_flags &= ~SQ_EXCL;
6797 6893 mutex_exit(SQLOCK(sq));
6798 6894 }
6799 6895 }
6800 6896
6801 6897 /*
6802 6898 * We should either have no messages on this queue, or we were told to
6803 6899 * goaway by a waiter (which we will wake up at the end of this
6804 6900 * function).
6805 6901 */
6806 6902 ASSERT((q->q_sqhead == NULL) ||
6807 6903 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)));
6808 6904
6809 6905 ASSERT(MUTEX_HELD(QLOCK(q)));
6810 6906 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6811 6907
6812 6908 /* Remove the q from the syncq list if all the messages are drained. */
6813 6909 if (q->q_sqhead == NULL) {
6814 6910 ASSERT(q->q_syncqmsgs == 0);
6815 6911 mutex_enter(SQLOCK(sq));
6816 6912 if (q->q_sqflags & Q_SQQUEUED)
6817 6913 SQRM_Q(sq, q);
6818 6914 mutex_exit(SQLOCK(sq));
6819 6915 /*
6820 6916 * Since the queue is removed from the list, reset its priority.
6821 6917 */
6822 6918 q->q_spri = 0;
6823 6919 }
6824 6920
6825 6921 /*
6826 6922 * Remember, the q_draining flag is used to let another thread know
6827 6923 * that there is a thread currently draining the messages for a queue.
6828 6924 * Since we are now done with this queue (even if there may be messages
6829 6925 * still there), we need to clear this flag so some thread will work on
6830 6926 * it if needed.
6831 6927 */
6832 6928 ASSERT(q->q_draining);
6833 6929 q->q_draining = 0;
6834 6930
6835 6931 /* Called with a claim, so OK to drop all locks. */
6836 6932 mutex_exit(QLOCK(q));
6837 6933
6838 6934 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6839 6935 "drain_syncq end:%p", sq);
6840 6936 }
6841 6937 /* END OF QDRAIN_SYNCQ */
6842 6938
6843 6939
6844 6940 /*
6845 6941 * This is the mate to qdrain_syncq, except that it is putting the message onto
6846 6942 * the queue instead of draining. Since the message is destined for the queue
6847 6943 * that is selected, there is no need to identify the function because the
6848 6944 * message is intended for the put routine for the queue. For debug kernels,
6849 6945 * this routine will do it anyway just in case.
6850 6946 *
6851 6947 * After the message is enqueued on the syncq, it calls putnext_tail()
6852 6948 * which will schedule a background thread to actually process the message.
6853 6949 *
6854 6950 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and
6855 6951 * SQLOCK(sq) and QLOCK(q) are not held.
6856 6952 */
6857 6953 void
6858 6954 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp)
6859 6955 {
6860 6956 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6861 6957 ASSERT(MUTEX_NOT_HELD(QLOCK(q)));
6862 6958 ASSERT(sq->sq_count > 0);
6863 6959 ASSERT(q->q_syncq == sq);
6864 6960 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6865 6961 sq->sq_oprev == NULL) ||
6866 6962 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6867 6963 sq->sq_oprev != NULL));
6868 6964
6869 6965 mutex_enter(QLOCK(q));
6870 6966
6871 6967 #ifdef DEBUG
6872 6968 /*
6873 6969 * This is used for debug in the qfill_syncq/qdrain_syncq case
6874 6970 * to trace the queue that the message is intended for. Note
6875 6971 * that the original use was to identify the queue and function
6876 6972 * to call on the drain. In the new syncq, we have the context
6877 6973 * of the queue that we are draining, so call it's putproc and
6878 6974 * don't rely on the saved values. But for debug this is still
6879 6975 * useful information.
6880 6976 */
6881 6977 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp;
6882 6978 mp->b_queue = q;
6883 6979 mp->b_next = NULL;
6884 6980 #endif
6885 6981 ASSERT(q->q_syncq == sq);
6886 6982 /*
6887 6983 * Enqueue the message on the list.
6888 6984 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to
6889 6985 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP().
6890 6986 */
6891 6987 SQPUT_MP(q, mp);
6892 6988 mutex_enter(SQLOCK(sq));
6893 6989
6894 6990 /*
6895 6991 * And queue on syncq for scheduling, if not already queued.
6896 6992 * Note that we need the SQLOCK for this, and for testing flags
6897 6993 * at the end to see if we will drain. So grab it now, and
6898 6994 * release it before we call qdrain_syncq or return.
6899 6995 */
6900 6996 if (!(q->q_sqflags & Q_SQQUEUED)) {
6901 6997 q->q_spri = curthread->t_pri;
6902 6998 SQPUT_Q(sq, q);
6903 6999 }
6904 7000 #ifdef DEBUG
6905 7001 else {
6906 7002 /*
6907 7003 * All of these conditions MUST be true!
6908 7004 */
6909 7005 ASSERT(sq->sq_tail != NULL);
6910 7006 if (sq->sq_tail == sq->sq_head) {
6911 7007 ASSERT((q->q_sqprev == NULL) &&
6912 7008 (q->q_sqnext == NULL));
6913 7009 } else {
6914 7010 ASSERT((q->q_sqprev != NULL) ||
6915 7011 (q->q_sqnext != NULL));
6916 7012 }
6917 7013 ASSERT(sq->sq_flags & SQ_QUEUED);
6918 7014 ASSERT(q->q_syncqmsgs != 0);
6919 7015 ASSERT(q->q_sqflags & Q_SQQUEUED);
6920 7016 }
6921 7017 #endif
6922 7018 mutex_exit(QLOCK(q));
6923 7019 /*
6924 7020 * SQLOCK is still held, so sq_count can be safely decremented.
6925 7021 */
6926 7022 sq->sq_count--;
6927 7023
6928 7024 putnext_tail(sq, q, 0);
6929 7025 /* Should not reference sq or q after this point. */
6930 7026 }
6931 7027
6932 7028 /* End of qfill_syncq */
6933 7029
6934 7030 /*
6935 7031 * Remove all messages from a syncq (if qp is NULL) or remove all messages
6936 7032 * that would be put into qp by drain_syncq.
6937 7033 * Used when deleting the syncq (qp == NULL) or when detaching
6938 7034 * a queue (qp != NULL).
6939 7035 * Return non-zero if one or more messages were freed.
6940 7036 *
6941 7037 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
6942 7038 * sq_putlocks are used.
6943 7039 *
6944 7040 * NOTE: This function assumes that it is called from the close() context and
6945 7041 * that all the queues in the syncq are going away. For this reason it doesn't
6946 7042 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is
6947 7043 * currently valid, but it is useful to rethink this function to behave properly
6948 7044 * in other cases.
6949 7045 */
6950 7046 int
6951 7047 flush_syncq(syncq_t *sq, queue_t *qp)
6952 7048 {
6953 7049 mblk_t *bp, *mp_head, *mp_next, *mp_prev;
6954 7050 queue_t *q;
6955 7051 int ret = 0;
6956 7052
6957 7053 mutex_enter(SQLOCK(sq));
6958 7054
6959 7055 /*
6960 7056 * Before we leave, we need to make sure there are no
6961 7057 * events listed for this queue. All events for this queue
6962 7058 * will just be freed.
6963 7059 */
6964 7060 if (qp != NULL && sq->sq_evhead != NULL) {
6965 7061 ASSERT(sq->sq_flags & SQ_EVENTS);
6966 7062
6967 7063 mp_prev = NULL;
6968 7064 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) {
6969 7065 mp_next = bp->b_next;
6970 7066 if (bp->b_queue == qp) {
6971 7067 /* Delete this message */
6972 7068 if (mp_prev != NULL) {
6973 7069 mp_prev->b_next = mp_next;
6974 7070 /*
6975 7071 * Update sq_evtail if the last element
6976 7072 * is removed.
6977 7073 */
6978 7074 if (bp == sq->sq_evtail) {
6979 7075 ASSERT(mp_next == NULL);
6980 7076 sq->sq_evtail = mp_prev;
6981 7077 }
6982 7078 } else
6983 7079 sq->sq_evhead = mp_next;
6984 7080 if (sq->sq_evhead == NULL)
6985 7081 sq->sq_flags &= ~SQ_EVENTS;
6986 7082 bp->b_prev = bp->b_next = NULL;
6987 7083 freemsg(bp);
6988 7084 ret++;
6989 7085 } else {
6990 7086 mp_prev = bp;
6991 7087 }
6992 7088 }
6993 7089 }
6994 7090
6995 7091 /*
6996 7092 * Walk sq_head and:
6997 7093 * - match qp if qp is set, remove it's messages
6998 7094 * - all if qp is not set
6999 7095 */
7000 7096 q = sq->sq_head;
7001 7097 while (q != NULL) {
7002 7098 ASSERT(q->q_syncq == sq);
7003 7099 if ((qp == NULL) || (qp == q)) {
7004 7100 /*
7005 7101 * Yank the messages as a list off the queue
7006 7102 */
7007 7103 mp_head = q->q_sqhead;
7008 7104 /*
7009 7105 * We do not have QLOCK(q) here (which is safe due to
7010 7106 * assumptions mentioned above). To obtain the lock we
7011 7107 * need to release SQLOCK which may allow lots of things
7012 7108 * to change upon us. This place requires more analysis.
7013 7109 */
7014 7110 q->q_sqhead = q->q_sqtail = NULL;
7015 7111 ASSERT(mp_head->b_queue &&
7016 7112 mp_head->b_queue->q_syncq == sq);
7017 7113
7018 7114 /*
7019 7115 * Free each of the messages.
7020 7116 */
7021 7117 for (bp = mp_head; bp != NULL; bp = mp_next) {
7022 7118 mp_next = bp->b_next;
7023 7119 bp->b_prev = bp->b_next = NULL;
7024 7120 freemsg(bp);
7025 7121 ret++;
7026 7122 }
7027 7123 /*
7028 7124 * Now remove the queue from the syncq.
7029 7125 */
7030 7126 ASSERT(q->q_sqflags & Q_SQQUEUED);
7031 7127 SQRM_Q(sq, q);
7032 7128 q->q_spri = 0;
7033 7129 q->q_syncqmsgs = 0;
7034 7130
7035 7131 /*
7036 7132 * If qp was specified, we are done with it and are
7037 7133 * going to drop SQLOCK(sq) and return. We wakeup syncq
7038 7134 * waiters while we still have the SQLOCK.
7039 7135 */
7040 7136 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) {
7041 7137 sq->sq_flags &= ~SQ_WANTWAKEUP;
7042 7138 cv_broadcast(&sq->sq_wait);
7043 7139 }
7044 7140 /* Drop SQLOCK across clr_qfull */
7045 7141 mutex_exit(SQLOCK(sq));
7046 7142
7047 7143 /*
7048 7144 * We avoid doing the test that drain_syncq does and
7049 7145 * unconditionally clear qfull for every flushed
7050 7146 * message. Since flush_syncq is only called during
7051 7147 * close this should not be a problem.
7052 7148 */
7053 7149 clr_qfull(q);
7054 7150 if (qp != NULL) {
7055 7151 return (ret);
7056 7152 } else {
7057 7153 mutex_enter(SQLOCK(sq));
7058 7154 /*
7059 7155 * The head was removed by SQRM_Q above.
7060 7156 * reread the new head and flush it.
7061 7157 */
7062 7158 q = sq->sq_head;
7063 7159 }
7064 7160 } else {
7065 7161 q = q->q_sqnext;
7066 7162 }
7067 7163 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7068 7164 }
7069 7165
7070 7166 if (sq->sq_flags & SQ_WANTWAKEUP) {
7071 7167 sq->sq_flags &= ~SQ_WANTWAKEUP;
7072 7168 cv_broadcast(&sq->sq_wait);
7073 7169 }
7074 7170
7075 7171 mutex_exit(SQLOCK(sq));
7076 7172 return (ret);
7077 7173 }
7078 7174
7079 7175 /*
7080 7176 * Propagate all messages from a syncq to the next syncq that are associated
7081 7177 * with the specified queue. If the queue is attached to a driver or if the
7082 7178 * messages have been added due to a qwriter(PERIM_INNER), free the messages.
7083 7179 *
7084 7180 * Assumes that the stream is strlock()'ed. We don't come here if there
7085 7181 * are no messages to propagate.
7086 7182 *
7087 7183 * NOTE : If the queue is attached to a driver, all the messages are freed
7088 7184 * as there is no point in propagating the messages from the driver syncq
7089 7185 * to the closing stream head which will in turn get freed later.
7090 7186 */
7091 7187 static int
7092 7188 propagate_syncq(queue_t *qp)
7093 7189 {
7094 7190 mblk_t *bp, *head, *tail, *prev, *next;
7095 7191 syncq_t *sq;
7096 7192 queue_t *nqp;
7097 7193 syncq_t *nsq;
7098 7194 boolean_t isdriver;
7099 7195 int moved = 0;
7100 7196 uint16_t flags;
7101 7197 pri_t priority = curthread->t_pri;
7102 7198 #ifdef DEBUG
7103 7199 void (*func)();
7104 7200 #endif
7105 7201
7106 7202 sq = qp->q_syncq;
7107 7203 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7108 7204 /* debug macro */
7109 7205 SQ_PUTLOCKS_HELD(sq);
7110 7206 /*
7111 7207 * As entersq() does not increment the sq_count for
7112 7208 * the write side, check sq_count for non-QPERQ
7113 7209 * perimeters alone.
7114 7210 */
7115 7211 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1));
7116 7212
7117 7213 /*
7118 7214 * propagate_syncq() can be called because of either messages on the
7119 7215 * queue syncq or because on events on the queue syncq. Do actual
7120 7216 * message propagations if there are any messages.
7121 7217 */
7122 7218 if (qp->q_syncqmsgs) {
7123 7219 isdriver = (qp->q_flag & QISDRV);
7124 7220
7125 7221 if (!isdriver) {
7126 7222 nqp = qp->q_next;
7127 7223 nsq = nqp->q_syncq;
7128 7224 ASSERT(MUTEX_HELD(SQLOCK(nsq)));
7129 7225 /* debug macro */
7130 7226 SQ_PUTLOCKS_HELD(nsq);
7131 7227 #ifdef DEBUG
7132 7228 func = (void (*)())nqp->q_qinfo->qi_putp;
7133 7229 #endif
7134 7230 }
7135 7231
7136 7232 SQRM_Q(sq, qp);
7137 7233 priority = MAX(qp->q_spri, priority);
7138 7234 qp->q_spri = 0;
7139 7235 head = qp->q_sqhead;
7140 7236 tail = qp->q_sqtail;
7141 7237 qp->q_sqhead = qp->q_sqtail = NULL;
7142 7238 qp->q_syncqmsgs = 0;
7143 7239
7144 7240 /*
7145 7241 * Walk the list of messages, and free them if this is a driver,
7146 7242 * otherwise reset the b_prev and b_queue value to the new putp.
7147 7243 * Afterward, we will just add the head to the end of the next
7148 7244 * syncq, and point the tail to the end of this one.
7149 7245 */
7150 7246
7151 7247 for (bp = head; bp != NULL; bp = next) {
7152 7248 next = bp->b_next;
7153 7249 if (isdriver) {
7154 7250 bp->b_prev = bp->b_next = NULL;
7155 7251 freemsg(bp);
7156 7252 continue;
7157 7253 }
7158 7254 /* Change the q values for this message */
7159 7255 bp->b_queue = nqp;
7160 7256 #ifdef DEBUG
7161 7257 bp->b_prev = (mblk_t *)func;
7162 7258 #endif
7163 7259 moved++;
7164 7260 }
7165 7261 /*
7166 7262 * Attach list of messages to the end of the new queue (if there
7167 7263 * is a list of messages).
7168 7264 */
7169 7265
7170 7266 if (!isdriver && head != NULL) {
7171 7267 ASSERT(tail != NULL);
7172 7268 if (nqp->q_sqhead == NULL) {
7173 7269 nqp->q_sqhead = head;
7174 7270 } else {
7175 7271 ASSERT(nqp->q_sqtail != NULL);
7176 7272 nqp->q_sqtail->b_next = head;
7177 7273 }
7178 7274 nqp->q_sqtail = tail;
7179 7275 /*
7180 7276 * When messages are moved from high priority queue to
7181 7277 * another queue, the destination queue priority is
7182 7278 * upgraded.
7183 7279 */
7184 7280
7185 7281 if (priority > nqp->q_spri)
7186 7282 nqp->q_spri = priority;
7187 7283
7188 7284 SQPUT_Q(nsq, nqp);
7189 7285
7190 7286 nqp->q_syncqmsgs += moved;
7191 7287 ASSERT(nqp->q_syncqmsgs != 0);
7192 7288 }
7193 7289 }
7194 7290
7195 7291 /*
7196 7292 * Before we leave, we need to make sure there are no
7197 7293 * events listed for this queue. All events for this queue
7198 7294 * will just be freed.
7199 7295 */
7200 7296 if (sq->sq_evhead != NULL) {
7201 7297 ASSERT(sq->sq_flags & SQ_EVENTS);
7202 7298 prev = NULL;
7203 7299 for (bp = sq->sq_evhead; bp != NULL; bp = next) {
7204 7300 next = bp->b_next;
7205 7301 if (bp->b_queue == qp) {
7206 7302 /* Delete this message */
7207 7303 if (prev != NULL) {
7208 7304 prev->b_next = next;
7209 7305 /*
7210 7306 * Update sq_evtail if the last element
7211 7307 * is removed.
7212 7308 */
7213 7309 if (bp == sq->sq_evtail) {
7214 7310 ASSERT(next == NULL);
7215 7311 sq->sq_evtail = prev;
7216 7312 }
7217 7313 } else
7218 7314 sq->sq_evhead = next;
7219 7315 if (sq->sq_evhead == NULL)
7220 7316 sq->sq_flags &= ~SQ_EVENTS;
7221 7317 bp->b_prev = bp->b_next = NULL;
7222 7318 freemsg(bp);
7223 7319 } else {
7224 7320 prev = bp;
7225 7321 }
7226 7322 }
7227 7323 }
7228 7324
7229 7325 flags = sq->sq_flags;
7230 7326
7231 7327 /* Wake up any waiter before leaving. */
7232 7328 if (flags & SQ_WANTWAKEUP) {
7233 7329 flags &= ~SQ_WANTWAKEUP;
7234 7330 cv_broadcast(&sq->sq_wait);
7235 7331 }
7236 7332 sq->sq_flags = flags;
7237 7333
7238 7334 return (moved);
7239 7335 }
7240 7336
7241 7337 /*
7242 7338 * Try and upgrade to exclusive access at the inner perimeter. If this can
7243 7339 * not be done without blocking then request will be queued on the syncq
7244 7340 * and drain_syncq will run it later.
7245 7341 *
7246 7342 * This routine can only be called from put or service procedures plus
7247 7343 * asynchronous callback routines that have properly entered the queue (with
7248 7344 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq
7249 7345 * associated with q.
7250 7346 */
7251 7347 void
7252 7348 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)())
7253 7349 {
7254 7350 syncq_t *sq = q->q_syncq;
7255 7351 uint16_t count;
7256 7352
7257 7353 mutex_enter(SQLOCK(sq));
7258 7354 count = sq->sq_count;
7259 7355 SQ_PUTLOCKS_ENTER(sq);
7260 7356 SUM_SQ_PUTCOUNTS(sq, count);
7261 7357 ASSERT(count >= 1);
7262 7358 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC));
7263 7359
7264 7360 if (count == 1) {
7265 7361 /*
7266 7362 * Can upgrade. This case also handles nested qwriter calls
7267 7363 * (when the qwriter callback function calls qwriter). In that
7268 7364 * case SQ_EXCL is already set.
7269 7365 */
7270 7366 sq->sq_flags |= SQ_EXCL;
7271 7367 SQ_PUTLOCKS_EXIT(sq);
7272 7368 mutex_exit(SQLOCK(sq));
7273 7369 (*func)(q, mp);
7274 7370 /*
7275 7371 * Assumes that leavesq, putnext, and drain_syncq will reset
7276 7372 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on
7277 7373 * until putnext, leavesq, or drain_syncq drops it.
7278 7374 * That way we handle nested qwriter(INNER) without dropping
7279 7375 * SQ_EXCL until the outermost qwriter callback routine is
7280 7376 * done.
7281 7377 */
7282 7378 return;
7283 7379 }
7284 7380 SQ_PUTLOCKS_EXIT(sq);
7285 7381 sqfill_events(sq, q, mp, func);
7286 7382 }
7287 7383
7288 7384 /*
7289 7385 * Synchronous callback support functions
7290 7386 */
7291 7387
7292 7388 /*
7293 7389 * Allocate a callback parameter structure.
7294 7390 * Assumes that caller initializes the flags and the id.
7295 7391 * Acquires SQLOCK(sq) if non-NULL is returned.
7296 7392 */
7297 7393 callbparams_t *
7298 7394 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags)
7299 7395 {
7300 7396 callbparams_t *cbp;
7301 7397 size_t size = sizeof (callbparams_t);
7302 7398
7303 7399 cbp = kmem_alloc(size, kmflags & ~KM_PANIC);
7304 7400
7305 7401 /*
7306 7402 * Only try tryhard allocation if the caller is ready to panic.
7307 7403 * Otherwise just fail.
7308 7404 */
7309 7405 if (cbp == NULL) {
7310 7406 if (kmflags & KM_PANIC)
7311 7407 cbp = kmem_alloc_tryhard(sizeof (callbparams_t),
7312 7408 &size, kmflags);
7313 7409 else
7314 7410 return (NULL);
7315 7411 }
7316 7412
7317 7413 ASSERT(size >= sizeof (callbparams_t));
7318 7414 cbp->cbp_size = size;
7319 7415 cbp->cbp_sq = sq;
7320 7416 cbp->cbp_func = func;
7321 7417 cbp->cbp_arg = arg;
7322 7418 mutex_enter(SQLOCK(sq));
7323 7419 cbp->cbp_next = sq->sq_callbpend;
7324 7420 sq->sq_callbpend = cbp;
7325 7421 return (cbp);
7326 7422 }
7327 7423
7328 7424 void
7329 7425 callbparams_free(syncq_t *sq, callbparams_t *cbp)
7330 7426 {
7331 7427 callbparams_t **pp, *p;
7332 7428
7333 7429 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7334 7430
7335 7431 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7336 7432 if (p == cbp) {
7337 7433 *pp = p->cbp_next;
7338 7434 kmem_free(p, p->cbp_size);
7339 7435 return;
7340 7436 }
7341 7437 }
7342 7438 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7343 7439 "callbparams_free: not found\n"));
7344 7440 }
7345 7441
7346 7442 void
7347 7443 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag)
7348 7444 {
7349 7445 callbparams_t **pp, *p;
7350 7446
7351 7447 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7352 7448
7353 7449 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7354 7450 if (p->cbp_id == id && p->cbp_flags == flag) {
7355 7451 *pp = p->cbp_next;
7356 7452 kmem_free(p, p->cbp_size);
7357 7453 return;
7358 7454 }
7359 7455 }
7360 7456 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7361 7457 "callbparams_free_id: not found\n"));
7362 7458 }
7363 7459
7364 7460 /*
7365 7461 * Callback wrapper function used by once-only callbacks that can be
7366 7462 * cancelled (qtimeout and qbufcall)
7367 7463 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be
7368 7464 * cancelled by the qun* functions.
7369 7465 */
7370 7466 void
7371 7467 qcallbwrapper(void *arg)
7372 7468 {
7373 7469 callbparams_t *cbp = arg;
7374 7470 syncq_t *sq;
7375 7471 uint16_t count = 0;
7376 7472 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
7377 7473 uint16_t type;
7378 7474
7379 7475 sq = cbp->cbp_sq;
7380 7476 mutex_enter(SQLOCK(sq));
7381 7477 type = sq->sq_type;
7382 7478 if (!(type & SQ_CICB)) {
7383 7479 count = sq->sq_count;
7384 7480 SQ_PUTLOCKS_ENTER(sq);
7385 7481 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
7386 7482 SUM_SQ_PUTCOUNTS(sq, count);
7387 7483 sq->sq_needexcl++;
7388 7484 ASSERT(sq->sq_needexcl != 0); /* wraparound */
7389 7485 waitflags |= SQ_MESSAGES;
7390 7486 }
7391 7487 /* Can not handle exclusive entry at outer perimeter */
7392 7488 ASSERT(type & SQ_COCB);
7393 7489
7394 7490 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) {
7395 7491 if ((sq->sq_callbflags & cbp->cbp_flags) &&
7396 7492 (sq->sq_cancelid == cbp->cbp_id)) {
7397 7493 /* timeout has been cancelled */
7398 7494 sq->sq_callbflags |= SQ_CALLB_BYPASSED;
7399 7495 callbparams_free(sq, cbp);
7400 7496 if (!(type & SQ_CICB)) {
7401 7497 ASSERT(sq->sq_needexcl > 0);
7402 7498 sq->sq_needexcl--;
7403 7499 if (sq->sq_needexcl == 0) {
7404 7500 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7405 7501 }
7406 7502 SQ_PUTLOCKS_EXIT(sq);
7407 7503 }
7408 7504 mutex_exit(SQLOCK(sq));
7409 7505 return;
7410 7506 }
7411 7507 sq->sq_flags |= SQ_WANTWAKEUP;
7412 7508 if (!(type & SQ_CICB)) {
7413 7509 SQ_PUTLOCKS_EXIT(sq);
7414 7510 }
7415 7511 cv_wait(&sq->sq_wait, SQLOCK(sq));
7416 7512 if (!(type & SQ_CICB)) {
7417 7513 count = sq->sq_count;
7418 7514 SQ_PUTLOCKS_ENTER(sq);
7419 7515 SUM_SQ_PUTCOUNTS(sq, count);
7420 7516 }
7421 7517 }
7422 7518
7423 7519 sq->sq_count++;
7424 7520 ASSERT(sq->sq_count != 0); /* Wraparound */
7425 7521 if (!(type & SQ_CICB)) {
7426 7522 ASSERT(count == 0);
7427 7523 sq->sq_flags |= SQ_EXCL;
7428 7524 ASSERT(sq->sq_needexcl > 0);
7429 7525 sq->sq_needexcl--;
7430 7526 if (sq->sq_needexcl == 0) {
7431 7527 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7432 7528 }
7433 7529 SQ_PUTLOCKS_EXIT(sq);
7434 7530 }
7435 7531
7436 7532 mutex_exit(SQLOCK(sq));
7437 7533
7438 7534 cbp->cbp_func(cbp->cbp_arg);
7439 7535
7440 7536 /*
7441 7537 * We drop the lock only for leavesq to re-acquire it.
7442 7538 * Possible optimization is inline of leavesq.
7443 7539 */
7444 7540 mutex_enter(SQLOCK(sq));
7445 7541 callbparams_free(sq, cbp);
7446 7542 mutex_exit(SQLOCK(sq));
7447 7543 leavesq(sq, SQ_CALLBACK);
7448 7544 }
7449 7545
7450 7546 /*
7451 7547 * No need to grab sq_putlocks here. See comment in strsubr.h that
7452 7548 * explains when sq_putlocks are used.
7453 7549 *
7454 7550 * sq_count (or one of the sq_putcounts) has already been
7455 7551 * decremented by the caller, and if SQ_QUEUED, we need to call
7456 7552 * drain_syncq (the global syncq drain).
7457 7553 * If putnext_tail is called with the SQ_EXCL bit set, we are in
7458 7554 * one of two states, non-CIPUT perimeter, and we need to clear
7459 7555 * it, or we went exclusive in the put procedure. In any case,
7460 7556 * we want to clear the bit now, and it is probably easier to do
7461 7557 * this at the beginning of this function (remember, we hold
7462 7558 * the SQLOCK). Lastly, if there are other messages queued
7463 7559 * on the syncq (and not for our destination), enable the syncq
7464 7560 * for background work.
7465 7561 */
7466 7562
7467 7563 /* ARGSUSED */
7468 7564 void
7469 7565 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags)
7470 7566 {
7471 7567 uint16_t flags = sq->sq_flags;
7472 7568
7473 7569 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7474 7570 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
7475 7571
7476 7572 /* Clear SQ_EXCL if set in passflags */
7477 7573 if (passflags & SQ_EXCL) {
7478 7574 flags &= ~SQ_EXCL;
7479 7575 }
7480 7576 if (flags & SQ_WANTWAKEUP) {
7481 7577 flags &= ~SQ_WANTWAKEUP;
7482 7578 cv_broadcast(&sq->sq_wait);
7483 7579 }
7484 7580 if (flags & SQ_WANTEXWAKEUP) {
7485 7581 flags &= ~SQ_WANTEXWAKEUP;
7486 7582 cv_broadcast(&sq->sq_exitwait);
7487 7583 }
7488 7584 sq->sq_flags = flags;
7489 7585
7490 7586 /*
7491 7587 * We have cleared SQ_EXCL if we were asked to, and started
7492 7588 * the wakeup process for waiters. If there are no writers
7493 7589 * then we need to drain the syncq if we were told to, or
7494 7590 * enable the background thread to do it.
7495 7591 */
7496 7592 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) {
7497 7593 if ((passflags & SQ_QUEUED) ||
7498 7594 (sq->sq_svcflags & SQ_DISABLED)) {
7499 7595 /* drain_syncq will take care of events in the list */
7500 7596 drain_syncq(sq);
7501 7597 return;
7502 7598 } else if (flags & SQ_QUEUED) {
7503 7599 sqenable(sq);
7504 7600 }
7505 7601 }
7506 7602 /* Drop the SQLOCK on exit */
7507 7603 mutex_exit(SQLOCK(sq));
7508 7604 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
7509 7605 "putnext_end:(%p, %p, %p) done", NULL, qp, sq);
7510 7606 }
7511 7607
7512 7608 void
7513 7609 set_qend(queue_t *q)
7514 7610 {
7515 7611 mutex_enter(QLOCK(q));
7516 7612 if (!O_SAMESTR(q))
7517 7613 q->q_flag |= QEND;
7518 7614 else
7519 7615 q->q_flag &= ~QEND;
7520 7616 mutex_exit(QLOCK(q));
7521 7617 q = _OTHERQ(q);
7522 7618 mutex_enter(QLOCK(q));
7523 7619 if (!O_SAMESTR(q))
7524 7620 q->q_flag |= QEND;
7525 7621 else
7526 7622 q->q_flag &= ~QEND;
7527 7623 mutex_exit(QLOCK(q));
7528 7624 }
7529 7625
7530 7626 /*
7531 7627 * Set QFULL in next service procedure queue (that cares) if not already
7532 7628 * set and if there are already more messages on the syncq than
7533 7629 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on
7534 7630 * any syncq.
7535 7631 *
7536 7632 * The fq here is the next queue with a service procedure. This is where
7537 7633 * we would fail canputnext, so this is where we need to set QFULL.
7538 7634 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag.
7539 7635 *
7540 7636 * We already have QLOCK at this point. To avoid cross-locks with
7541 7637 * freezestr() which grabs all QLOCKs and with strlock() which grabs both
7542 7638 * SQLOCK and sd_reflock, we need to drop respective locks first.
7543 7639 */
7544 7640 void
7545 7641 set_qfull(queue_t *q)
7546 7642 {
7547 7643 queue_t *fq = NULL;
7548 7644
7549 7645 ASSERT(MUTEX_HELD(QLOCK(q)));
7550 7646 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) &&
7551 7647 (q->q_syncqmsgs > sq_max_size)) {
7552 7648 if ((fq = q->q_nfsrv) == q) {
7553 7649 fq->q_flag |= QFULL;
7554 7650 } else {
7555 7651 mutex_exit(QLOCK(q));
7556 7652 mutex_enter(QLOCK(fq));
7557 7653 fq->q_flag |= QFULL;
7558 7654 mutex_exit(QLOCK(fq));
7559 7655 mutex_enter(QLOCK(q));
7560 7656 }
7561 7657 }
7562 7658 }
7563 7659
7564 7660 void
7565 7661 clr_qfull(queue_t *q)
7566 7662 {
7567 7663 queue_t *oq = q;
7568 7664
7569 7665 q = q->q_nfsrv;
7570 7666 /* Fast check if there is any work to do before getting the lock. */
7571 7667 if ((q->q_flag & (QFULL|QWANTW)) == 0) {
7572 7668 return;
7573 7669 }
7574 7670
7575 7671 /*
7576 7672 * Do not reset QFULL (and backenable) if the q_count is the reason
7577 7673 * for QFULL being set.
7578 7674 */
7579 7675 mutex_enter(QLOCK(q));
7580 7676 /*
7581 7677 * If queue is empty i.e q_mblkcnt is zero, queue can not be full.
7582 7678 * Hence clear the QFULL.
7583 7679 * If both q_count and q_mblkcnt are less than the hiwat mark,
7584 7680 * clear the QFULL.
7585 7681 */
7586 7682 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) &&
7587 7683 (q->q_mblkcnt < q->q_hiwat))) {
7588 7684 q->q_flag &= ~QFULL;
7589 7685 /*
7590 7686 * A little more confusing, how about this way:
7591 7687 * if someone wants to write,
7592 7688 * AND
7593 7689 * both counts are less than the lowat mark
7594 7690 * OR
7595 7691 * the lowat mark is zero
7596 7692 * THEN
7597 7693 * backenable
7598 7694 */
7599 7695 if ((q->q_flag & QWANTW) &&
7600 7696 (((q->q_count < q->q_lowat) &&
7601 7697 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) {
7602 7698 q->q_flag &= ~QWANTW;
7603 7699 mutex_exit(QLOCK(q));
7604 7700 backenable(oq, 0);
7605 7701 } else
7606 7702 mutex_exit(QLOCK(q));
7607 7703 } else
7608 7704 mutex_exit(QLOCK(q));
7609 7705 }
7610 7706
7611 7707 /*
7612 7708 * Set the forward service procedure pointer.
7613 7709 *
7614 7710 * Called at insert-time to cache a queue's next forward service procedure in
7615 7711 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted
7616 7712 * has a service procedure then q_nfsrv points to itself. If the queue to be
7617 7713 * inserted does not have a service procedure, then q_nfsrv points to the next
7618 7714 * queue forward that has a service procedure. If the queue is at the logical
7619 7715 * end of the stream (driver for write side, stream head for the read side)
7620 7716 * and does not have a service procedure, then q_nfsrv also points to itself.
7621 7717 */
7622 7718 void
7623 7719 set_nfsrv_ptr(
7624 7720 queue_t *rnew, /* read queue pointer to new module */
7625 7721 queue_t *wnew, /* write queue pointer to new module */
7626 7722 queue_t *prev_rq, /* read queue pointer to the module above */
7627 7723 queue_t *prev_wq) /* write queue pointer to the module above */
7628 7724 {
7629 7725 queue_t *qp;
7630 7726
7631 7727 if (prev_wq->q_next == NULL) {
7632 7728 /*
7633 7729 * Insert the driver, initialize the driver and stream head.
7634 7730 * In this case, prev_rq/prev_wq should be the stream head.
7635 7731 * _I_INSERT does not allow inserting a driver. Make sure
7636 7732 * that it is not an insertion.
7637 7733 */
7638 7734 ASSERT(!(rnew->q_flag & _QINSERTING));
7639 7735 wnew->q_nfsrv = wnew;
7640 7736 if (rnew->q_qinfo->qi_srvp)
7641 7737 rnew->q_nfsrv = rnew;
7642 7738 else
7643 7739 rnew->q_nfsrv = prev_rq;
7644 7740 prev_rq->q_nfsrv = prev_rq;
7645 7741 prev_wq->q_nfsrv = prev_wq;
7646 7742 } else {
7647 7743 /*
7648 7744 * set up read side q_nfsrv pointer. This MUST be done
7649 7745 * before setting the write side, because the setting of
7650 7746 * the write side for a fifo may depend on it.
7651 7747 *
7652 7748 * Suppose we have a fifo that only has pipemod pushed.
7653 7749 * pipemod has no read or write service procedures, so
7654 7750 * nfsrv for both pipemod queues points to prev_rq (the
7655 7751 * stream read head). Now push bufmod (which has only a
7656 7752 * read service procedure). Doing the write side first,
7657 7753 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
7658 7754 * is WRONG; the next queue forward from wnew with a
7659 7755 * service procedure will be rnew, not the stream read head.
7660 7756 * Since the downstream queue (which in the case of a fifo
7661 7757 * is the read queue rnew) can affect upstream queues, it
7662 7758 * needs to be done first. Setting up the read side first
7663 7759 * sets nfsrv for both pipemod queues to rnew and then
7664 7760 * when the write side is set up, wnew-q_nfsrv will also
7665 7761 * point to rnew.
7666 7762 */
7667 7763 if (rnew->q_qinfo->qi_srvp) {
7668 7764 /*
7669 7765 * use _OTHERQ() because, if this is a pipe, next
7670 7766 * module may have been pushed from other end and
7671 7767 * q_next could be a read queue.
7672 7768 */
7673 7769 qp = _OTHERQ(prev_wq->q_next);
7674 7770 while (qp && qp->q_nfsrv != qp) {
7675 7771 qp->q_nfsrv = rnew;
7676 7772 qp = backq(qp);
7677 7773 }
7678 7774 rnew->q_nfsrv = rnew;
7679 7775 } else
7680 7776 rnew->q_nfsrv = prev_rq->q_nfsrv;
7681 7777
7682 7778 /* set up write side q_nfsrv pointer */
7683 7779 if (wnew->q_qinfo->qi_srvp) {
7684 7780 wnew->q_nfsrv = wnew;
7685 7781
7686 7782 /*
7687 7783 * For insertion, need to update nfsrv of the modules
7688 7784 * above which do not have a service routine.
7689 7785 */
7690 7786 if (rnew->q_flag & _QINSERTING) {
7691 7787 for (qp = prev_wq;
7692 7788 qp != NULL && qp->q_nfsrv != qp;
7693 7789 qp = backq(qp)) {
7694 7790 qp->q_nfsrv = wnew->q_nfsrv;
7695 7791 }
7696 7792 }
7697 7793 } else {
7698 7794 if (prev_wq->q_next == prev_rq)
7699 7795 /*
7700 7796 * Since prev_wq/prev_rq are the middle of a
7701 7797 * fifo, wnew/rnew will also be the middle of
7702 7798 * a fifo and wnew's nfsrv is same as rnew's.
7703 7799 */
7704 7800 wnew->q_nfsrv = rnew->q_nfsrv;
7705 7801 else
7706 7802 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv;
7707 7803 }
7708 7804 }
7709 7805 }
7710 7806
7711 7807 /*
7712 7808 * Reset the forward service procedure pointer; called at remove-time.
7713 7809 */
7714 7810 void
7715 7811 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp)
7716 7812 {
7717 7813 queue_t *tmp_qp;
7718 7814
7719 7815 /* Reset the write side q_nfsrv pointer for _I_REMOVE */
7720 7816 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) {
7721 7817 for (tmp_qp = backq(wqp);
7722 7818 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp;
7723 7819 tmp_qp = backq(tmp_qp)) {
7724 7820 tmp_qp->q_nfsrv = wqp->q_nfsrv;
7725 7821 }
7726 7822 }
7727 7823
7728 7824 /* reset the read side q_nfsrv pointer */
7729 7825 if (rqp->q_qinfo->qi_srvp) {
7730 7826 if (wqp->q_next) { /* non-driver case */
7731 7827 tmp_qp = _OTHERQ(wqp->q_next);
7732 7828 while (tmp_qp && tmp_qp->q_nfsrv == rqp) {
7733 7829 /* Note that rqp->q_next cannot be NULL */
7734 7830 ASSERT(rqp->q_next != NULL);
7735 7831 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv;
7736 7832 tmp_qp = backq(tmp_qp);
7737 7833 }
7738 7834 }
7739 7835 }
7740 7836 }
7741 7837
7742 7838 /*
7743 7839 * This routine should be called after all stream geometry changes to update
7744 7840 * the stream head cached struio() rd/wr queue pointers. Note must be called
7745 7841 * with the streamlock()ed.
7746 7842 *
7747 7843 * Note: only enables Synchronous STREAMS for a side of a Stream which has
7748 7844 * an explicit synchronous barrier module queue. That is, a queue that
7749 7845 * has specified a struio() type.
7750 7846 */
7751 7847 static void
7752 7848 strsetuio(stdata_t *stp)
7753 7849 {
7754 7850 queue_t *wrq;
7755 7851
7756 7852 if (stp->sd_flag & STPLEX) {
7757 7853 /*
7758 7854 * Not streamhead, but a mux, so no Synchronous STREAMS.
7759 7855 */
7760 7856 stp->sd_struiowrq = NULL;
7761 7857 stp->sd_struiordq = NULL;
7762 7858 return;
7763 7859 }
7764 7860 /*
7765 7861 * Scan the write queue(s) while synchronous
7766 7862 * until we find a qinfo uio type specified.
7767 7863 */
7768 7864 wrq = stp->sd_wrq->q_next;
7769 7865 while (wrq) {
7770 7866 if (wrq->q_struiot == STRUIOT_NONE) {
7771 7867 wrq = 0;
7772 7868 break;
7773 7869 }
7774 7870 if (wrq->q_struiot != STRUIOT_DONTCARE)
7775 7871 break;
7776 7872 if (! _SAMESTR(wrq)) {
7777 7873 wrq = 0;
7778 7874 break;
7779 7875 }
7780 7876 wrq = wrq->q_next;
7781 7877 }
7782 7878 stp->sd_struiowrq = wrq;
7783 7879 /*
7784 7880 * Scan the read queue(s) while synchronous
7785 7881 * until we find a qinfo uio type specified.
7786 7882 */
7787 7883 wrq = stp->sd_wrq->q_next;
7788 7884 while (wrq) {
7789 7885 if (_RD(wrq)->q_struiot == STRUIOT_NONE) {
7790 7886 wrq = 0;
7791 7887 break;
7792 7888 }
7793 7889 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE)
7794 7890 break;
7795 7891 if (! _SAMESTR(wrq)) {
7796 7892 wrq = 0;
7797 7893 break;
7798 7894 }
7799 7895 wrq = wrq->q_next;
7800 7896 }
7801 7897 stp->sd_struiordq = wrq ? _RD(wrq) : 0;
7802 7898 }
7803 7899
7804 7900 /*
7805 7901 * pass_wput, unblocks the passthru queues, so that
7806 7902 * messages can arrive at muxs lower read queue, before
7807 7903 * I_LINK/I_UNLINK is acked/nacked.
7808 7904 */
7809 7905 static void
7810 7906 pass_wput(queue_t *q, mblk_t *mp)
7811 7907 {
7812 7908 syncq_t *sq;
7813 7909
7814 7910 sq = _RD(q)->q_syncq;
7815 7911 if (sq->sq_flags & SQ_BLOCKED)
7816 7912 unblocksq(sq, SQ_BLOCKED, 0);
7817 7913 putnext(q, mp);
7818 7914 }
7819 7915
7820 7916 /*
7821 7917 * Set up queues for the link/unlink.
7822 7918 * Create a new queue and block it and then insert it
7823 7919 * below the stream head on the lower stream.
7824 7920 * This prevents any messages from arriving during the setq
7825 7921 * as well as while the mux is processing the LINK/I_UNLINK.
7826 7922 * The blocked passq is unblocked once the LINK/I_UNLINK has
7827 7923 * been acked or nacked or if a message is generated and sent
7828 7924 * down muxs write put procedure.
7829 7925 * See pass_wput().
7830 7926 *
7831 7927 * After the new queue is inserted, all messages coming from below are
7832 7928 * blocked. The call to strlock will ensure that all activity in the stream head
7833 7929 * read queue syncq is stopped (sq_count drops to zero).
7834 7930 */
7835 7931 static queue_t *
7836 7932 link_addpassthru(stdata_t *stpdown)
7837 7933 {
7838 7934 queue_t *passq;
7839 7935 sqlist_t sqlist;
7840 7936
7841 7937 passq = allocq();
7842 7938 STREAM(passq) = STREAM(_WR(passq)) = stpdown;
7843 7939 /* setq might sleep in allocator - avoid holding locks. */
7844 7940 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ,
7845 7941 SQ_CI|SQ_CO, B_FALSE);
7846 7942 claimq(passq);
7847 7943 blocksq(passq->q_syncq, SQ_BLOCKED, 1);
7848 7944 insertq(STREAM(passq), passq);
7849 7945
7850 7946 /*
7851 7947 * Use strlock() to wait for the stream head sq_count to drop to zero
7852 7948 * since we are going to change q_ptr in the stream head. Note that
7853 7949 * insertq() doesn't wait for any syncq counts to drop to zero.
7854 7950 */
7855 7951 sqlist.sqlist_head = NULL;
7856 7952 sqlist.sqlist_index = 0;
7857 7953 sqlist.sqlist_size = sizeof (sqlist_t);
7858 7954 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq);
7859 7955 strlock(stpdown, &sqlist);
7860 7956 strunlock(stpdown, &sqlist);
7861 7957
7862 7958 releaseq(passq);
7863 7959 return (passq);
7864 7960 }
7865 7961
7866 7962 /*
7867 7963 * Let messages flow up into the mux by removing
7868 7964 * the passq.
7869 7965 */
7870 7966 static void
7871 7967 link_rempassthru(queue_t *passq)
7872 7968 {
7873 7969 claimq(passq);
7874 7970 removeq(passq);
7875 7971 releaseq(passq);
7876 7972 freeq(passq);
7877 7973 }
7878 7974
7879 7975 /*
7880 7976 * Wait for the condition variable pointed to by `cvp' to be signaled,
7881 7977 * or for `tim' milliseconds to elapse, whichever comes first. If `tim'
7882 7978 * is negative, then there is no time limit. If `nosigs' is non-zero,
7883 7979 * then the wait will be non-interruptible.
7884 7980 *
7885 7981 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout.
7886 7982 */
7887 7983 clock_t
7888 7984 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
7889 7985 {
7890 7986 clock_t ret;
7891 7987
7892 7988 if (tim < 0) {
7893 7989 if (nosigs) {
7894 7990 cv_wait(cvp, mp);
7895 7991 ret = 1;
7896 7992 } else {
7897 7993 ret = cv_wait_sig(cvp, mp);
7898 7994 }
7899 7995 } else if (tim > 0) {
7900 7996 /*
7901 7997 * convert milliseconds to clock ticks
7902 7998 */
7903 7999 if (nosigs) {
7904 8000 ret = cv_reltimedwait(cvp, mp,
7905 8001 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7906 8002 } else {
7907 8003 ret = cv_reltimedwait_sig(cvp, mp,
7908 8004 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7909 8005 }
7910 8006 } else {
7911 8007 ret = -1;
7912 8008 }
7913 8009 return (ret);
7914 8010 }
7915 8011
7916 8012 /*
7917 8013 * Wait until the stream head can determine if it is at the mark but
7918 8014 * don't wait forever to prevent a race condition between the "mark" state
7919 8015 * in the stream head and any mark state in the caller/user of this routine.
7920 8016 *
7921 8017 * This is used by sockets and for a socket it would be incorrect
7922 8018 * to return a failure for SIOCATMARK when there is no data in the receive
7923 8019 * queue and the marked urgent data is traveling up the stream.
7924 8020 *
7925 8021 * This routine waits until the mark is known by waiting for one of these
7926 8022 * three events:
7927 8023 * The stream head read queue becoming non-empty (including an EOF).
7928 8024 * The STRATMARK flag being set (due to a MSGMARKNEXT message).
7929 8025 * The STRNOTATMARK flag being set (which indicates that the transport
7930 8026 * has sent a MSGNOTMARKNEXT message to indicate that it is not at
7931 8027 * the mark).
7932 8028 *
7933 8029 * The routine returns 1 if the stream is at the mark; 0 if it can
7934 8030 * be determined that the stream is not at the mark.
7935 8031 * If the wait times out and it can't determine
7936 8032 * whether or not the stream might be at the mark the routine will return -1.
7937 8033 *
7938 8034 * Note: This routine should only be used when a mark is pending i.e.,
7939 8035 * in the socket case the SIGURG has been posted.
7940 8036 * Note2: This can not wakeup just because synchronous streams indicate
7941 8037 * that data is available since it is not possible to use the synchronous
7942 8038 * streams interfaces to determine the b_flag value for the data queued below
7943 8039 * the stream head.
7944 8040 */
7945 8041 int
7946 8042 strwaitmark(vnode_t *vp)
7947 8043 {
7948 8044 struct stdata *stp = vp->v_stream;
7949 8045 queue_t *rq = _RD(stp->sd_wrq);
7950 8046 int mark;
7951 8047
7952 8048 mutex_enter(&stp->sd_lock);
7953 8049 while (rq->q_first == NULL &&
7954 8050 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) {
7955 8051 stp->sd_flag |= RSLEEP;
7956 8052
7957 8053 /* Wait for 100 milliseconds for any state change. */
7958 8054 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) {
7959 8055 mutex_exit(&stp->sd_lock);
7960 8056 return (-1);
7961 8057 }
7962 8058 }
7963 8059 if (stp->sd_flag & STRATMARK)
7964 8060 mark = 1;
7965 8061 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK))
7966 8062 mark = 1;
7967 8063 else
7968 8064 mark = 0;
7969 8065
7970 8066 mutex_exit(&stp->sd_lock);
7971 8067 return (mark);
7972 8068 }
7973 8069
7974 8070 /*
7975 8071 * Set a read side error. If persist is set change the socket error
7976 8072 * to persistent. If errfunc is set install the function as the exported
7977 8073 * error handler.
7978 8074 */
7979 8075 void
7980 8076 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
7981 8077 {
7982 8078 struct stdata *stp = vp->v_stream;
7983 8079
7984 8080 mutex_enter(&stp->sd_lock);
7985 8081 stp->sd_rerror = error;
7986 8082 if (error == 0 && errfunc == NULL)
7987 8083 stp->sd_flag &= ~STRDERR;
7988 8084 else
7989 8085 stp->sd_flag |= STRDERR;
7990 8086 if (persist) {
7991 8087 stp->sd_flag &= ~STRDERRNONPERSIST;
7992 8088 } else {
7993 8089 stp->sd_flag |= STRDERRNONPERSIST;
7994 8090 }
7995 8091 stp->sd_rderrfunc = errfunc;
7996 8092 if (error != 0 || errfunc != NULL) {
7997 8093 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
7998 8094 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
7999 8095 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8000 8096
8001 8097 mutex_exit(&stp->sd_lock);
8002 8098 pollwakeup(&stp->sd_pollist, POLLERR);
8003 8099 mutex_enter(&stp->sd_lock);
8004 8100
8005 8101 if (stp->sd_sigflags & S_ERROR)
8006 8102 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8007 8103 }
8008 8104 mutex_exit(&stp->sd_lock);
8009 8105 }
8010 8106
8011 8107 /*
8012 8108 * Set a write side error. If persist is set change the socket error
8013 8109 * to persistent.
8014 8110 */
8015 8111 void
8016 8112 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
8017 8113 {
8018 8114 struct stdata *stp = vp->v_stream;
8019 8115
8020 8116 mutex_enter(&stp->sd_lock);
8021 8117 stp->sd_werror = error;
8022 8118 if (error == 0 && errfunc == NULL)
8023 8119 stp->sd_flag &= ~STWRERR;
8024 8120 else
8025 8121 stp->sd_flag |= STWRERR;
8026 8122 if (persist) {
8027 8123 stp->sd_flag &= ~STWRERRNONPERSIST;
8028 8124 } else {
8029 8125 stp->sd_flag |= STWRERRNONPERSIST;
8030 8126 }
8031 8127 stp->sd_wrerrfunc = errfunc;
8032 8128 if (error != 0 || errfunc != NULL) {
8033 8129 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
8034 8130 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
8035 8131 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8036 8132
8037 8133 mutex_exit(&stp->sd_lock);
8038 8134 pollwakeup(&stp->sd_pollist, POLLERR);
8039 8135 mutex_enter(&stp->sd_lock);
8040 8136
8041 8137 if (stp->sd_sigflags & S_ERROR)
8042 8138 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8043 8139 }
8044 8140 mutex_exit(&stp->sd_lock);
8045 8141 }
8046 8142
8047 8143 /*
8048 8144 * Make the stream return 0 (EOF) when all data has been read.
8049 8145 * No effect on write side.
8050 8146 */
8051 8147 void
8052 8148 strseteof(vnode_t *vp, int eof)
8053 8149 {
8054 8150 struct stdata *stp = vp->v_stream;
8055 8151
8056 8152 mutex_enter(&stp->sd_lock);
8057 8153 if (!eof) {
8058 8154 stp->sd_flag &= ~STREOF;
8059 8155 mutex_exit(&stp->sd_lock);
8060 8156 return;
8061 8157 }
8062 8158 stp->sd_flag |= STREOF;
8063 8159 if (stp->sd_flag & RSLEEP) {
8064 8160 stp->sd_flag &= ~RSLEEP;
8065 8161 cv_broadcast(&_RD(stp->sd_wrq)->q_wait);
8066 8162 }
8067 8163
8068 8164 mutex_exit(&stp->sd_lock);
8069 8165 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM);
8070 8166 mutex_enter(&stp->sd_lock);
8071 8167
8072 8168 if (stp->sd_sigflags & (S_INPUT|S_RDNORM))
8073 8169 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0);
8074 8170 mutex_exit(&stp->sd_lock);
8075 8171 }
8076 8172
8077 8173 void
8078 8174 strflushrq(vnode_t *vp, int flag)
8079 8175 {
8080 8176 struct stdata *stp = vp->v_stream;
8081 8177
8082 8178 mutex_enter(&stp->sd_lock);
8083 8179 flushq(_RD(stp->sd_wrq), flag);
8084 8180 mutex_exit(&stp->sd_lock);
8085 8181 }
8086 8182
8087 8183 void
8088 8184 strsetrputhooks(vnode_t *vp, uint_t flags,
8089 8185 msgfunc_t protofunc, msgfunc_t miscfunc)
8090 8186 {
8091 8187 struct stdata *stp = vp->v_stream;
8092 8188
8093 8189 mutex_enter(&stp->sd_lock);
8094 8190
8095 8191 if (protofunc == NULL)
8096 8192 stp->sd_rprotofunc = strrput_proto;
8097 8193 else
8098 8194 stp->sd_rprotofunc = protofunc;
8099 8195
8100 8196 if (miscfunc == NULL)
8101 8197 stp->sd_rmiscfunc = strrput_misc;
8102 8198 else
8103 8199 stp->sd_rmiscfunc = miscfunc;
8104 8200
8105 8201 if (flags & SH_CONSOL_DATA)
8106 8202 stp->sd_rput_opt |= SR_CONSOL_DATA;
8107 8203 else
8108 8204 stp->sd_rput_opt &= ~SR_CONSOL_DATA;
8109 8205
8110 8206 if (flags & SH_SIGALLDATA)
8111 8207 stp->sd_rput_opt |= SR_SIGALLDATA;
8112 8208 else
8113 8209 stp->sd_rput_opt &= ~SR_SIGALLDATA;
8114 8210
8115 8211 if (flags & SH_IGN_ZEROLEN)
8116 8212 stp->sd_rput_opt |= SR_IGN_ZEROLEN;
8117 8213 else
8118 8214 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN;
8119 8215
8120 8216 mutex_exit(&stp->sd_lock);
8121 8217 }
8122 8218
8123 8219 void
8124 8220 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime)
8125 8221 {
8126 8222 struct stdata *stp = vp->v_stream;
8127 8223
8128 8224 mutex_enter(&stp->sd_lock);
8129 8225 stp->sd_closetime = closetime;
8130 8226
8131 8227 if (flags & SH_SIGPIPE)
8132 8228 stp->sd_wput_opt |= SW_SIGPIPE;
8133 8229 else
8134 8230 stp->sd_wput_opt &= ~SW_SIGPIPE;
8135 8231 if (flags & SH_RECHECK_ERR)
8136 8232 stp->sd_wput_opt |= SW_RECHECK_ERR;
8137 8233 else
8138 8234 stp->sd_wput_opt &= ~SW_RECHECK_ERR;
8139 8235
8140 8236 mutex_exit(&stp->sd_lock);
8141 8237 }
8142 8238
8143 8239 void
8144 8240 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc)
8145 8241 {
8146 8242 struct stdata *stp = vp->v_stream;
8147 8243
8148 8244 mutex_enter(&stp->sd_lock);
8149 8245
8150 8246 stp->sd_rputdatafunc = rdatafunc;
8151 8247 stp->sd_wputdatafunc = wdatafunc;
8152 8248
8153 8249 mutex_exit(&stp->sd_lock);
8154 8250 }
8155 8251
8156 8252 /* Used within framework when the queue is already locked */
8157 8253 void
8158 8254 qenable_locked(queue_t *q)
8159 8255 {
8160 8256 stdata_t *stp = STREAM(q);
8161 8257
8162 8258 ASSERT(MUTEX_HELD(QLOCK(q)));
8163 8259
8164 8260 if (!q->q_qinfo->qi_srvp)
8165 8261 return;
8166 8262
8167 8263 /*
8168 8264 * Do not place on run queue if already enabled or closing.
8169 8265 */
8170 8266 if (q->q_flag & (QWCLOSE|QENAB))
8171 8267 return;
8172 8268
8173 8269 /*
8174 8270 * mark queue enabled and place on run list if it is not already being
8175 8271 * serviced. If it is serviced, the runservice() function will detect
8176 8272 * that QENAB is set and call service procedure before clearing
8177 8273 * QINSERVICE flag.
8178 8274 */
8179 8275 q->q_flag |= QENAB;
8180 8276 if (q->q_flag & QINSERVICE)
8181 8277 return;
8182 8278
8183 8279 /* Record the time of qenable */
8184 8280 q->q_qtstamp = ddi_get_lbolt();
8185 8281
8186 8282 /*
8187 8283 * Put the queue in the stp list and schedule it for background
8188 8284 * processing if it is not already scheduled or if stream head does not
8189 8285 * intent to process it in the foreground later by setting
8190 8286 * STRS_WILLSERVICE flag.
8191 8287 */
8192 8288 mutex_enter(&stp->sd_qlock);
8193 8289 /*
8194 8290 * If there are already something on the list, stp flags should show
8195 8291 * intention to drain it.
8196 8292 */
8197 8293 IMPLY(STREAM_NEEDSERVICE(stp),
8198 8294 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED)));
8199 8295
8200 8296 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link);
8201 8297 stp->sd_nqueues++;
8202 8298
8203 8299 /*
8204 8300 * If no one will drain this stream we are the first producer and
8205 8301 * need to schedule it for background thread.
8206 8302 */
8207 8303 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) {
8208 8304 /*
8209 8305 * No one will service this stream later, so we have to
8210 8306 * schedule it now.
8211 8307 */
8212 8308 STRSTAT(stenables);
8213 8309 stp->sd_svcflags |= STRS_SCHEDULED;
8214 8310 stp->sd_servid = (void *)taskq_dispatch(streams_taskq,
8215 8311 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE);
8216 8312
8217 8313 if (stp->sd_servid == NULL) {
8218 8314 /*
8219 8315 * Task queue failed so fail over to the backup
8220 8316 * servicing thread.
8221 8317 */
8222 8318 STRSTAT(taskqfails);
8223 8319 /*
8224 8320 * It is safe to clear STRS_SCHEDULED flag because it
8225 8321 * was set by this thread above.
8226 8322 */
8227 8323 stp->sd_svcflags &= ~STRS_SCHEDULED;
8228 8324
8229 8325 /*
8230 8326 * Failover scheduling is protected by service_queue
8231 8327 * lock.
8232 8328 */
8233 8329 mutex_enter(&service_queue);
8234 8330 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q));
8235 8331 ASSERT(q->q_link == NULL);
8236 8332 /*
8237 8333 * Append the queue to qhead/qtail list.
8238 8334 */
8239 8335 if (qhead == NULL)
8240 8336 qhead = q;
8241 8337 else
8242 8338 qtail->q_link = q;
8243 8339 qtail = q;
8244 8340 /*
8245 8341 * Clear stp queue list.
8246 8342 */
8247 8343 stp->sd_qhead = stp->sd_qtail = NULL;
8248 8344 stp->sd_nqueues = 0;
8249 8345 /*
8250 8346 * Wakeup background queue processing thread.
8251 8347 */
8252 8348 cv_signal(&services_to_run);
8253 8349 mutex_exit(&service_queue);
8254 8350 }
8255 8351 }
8256 8352 mutex_exit(&stp->sd_qlock);
8257 8353 }
8258 8354
8259 8355 static void
8260 8356 queue_service(queue_t *q)
8261 8357 {
8262 8358 /*
8263 8359 * The queue in the list should have
8264 8360 * QENAB flag set and should not have
8265 8361 * QINSERVICE flag set. QINSERVICE is
8266 8362 * set when the queue is dequeued and
8267 8363 * qenable_locked doesn't enqueue a
8268 8364 * queue with QINSERVICE set.
8269 8365 */
8270 8366
8271 8367 ASSERT(!(q->q_flag & QINSERVICE));
8272 8368 ASSERT((q->q_flag & QENAB));
8273 8369 mutex_enter(QLOCK(q));
8274 8370 q->q_flag &= ~QENAB;
8275 8371 q->q_flag |= QINSERVICE;
8276 8372 mutex_exit(QLOCK(q));
8277 8373 runservice(q);
8278 8374 }
8279 8375
8280 8376 static void
8281 8377 syncq_service(syncq_t *sq)
8282 8378 {
8283 8379 STRSTAT(syncqservice);
8284 8380 mutex_enter(SQLOCK(sq));
8285 8381 ASSERT(!(sq->sq_svcflags & SQ_SERVICE));
8286 8382 ASSERT(sq->sq_servcount != 0);
8287 8383 ASSERT(sq->sq_next == NULL);
8288 8384
8289 8385 /* if we came here from the background thread, clear the flag */
8290 8386 if (sq->sq_svcflags & SQ_BGTHREAD)
8291 8387 sq->sq_svcflags &= ~SQ_BGTHREAD;
8292 8388
8293 8389 /* let drain_syncq know that it's being called in the background */
8294 8390 sq->sq_svcflags |= SQ_SERVICE;
8295 8391 drain_syncq(sq);
8296 8392 }
8297 8393
8298 8394 static void
8299 8395 qwriter_outer_service(syncq_t *outer)
8300 8396 {
8301 8397 /*
8302 8398 * Note that SQ_WRITER is used on the outer perimeter
8303 8399 * to signal that a qwriter(OUTER) is either investigating
8304 8400 * running or that it is actually running a function.
8305 8401 */
8306 8402 outer_enter(outer, SQ_BLOCKED|SQ_WRITER);
8307 8403
8308 8404 /*
8309 8405 * All inner syncq are empty and have SQ_WRITER set
8310 8406 * to block entering the outer perimeter.
8311 8407 *
8312 8408 * We do not need to explicitly call write_now since
8313 8409 * outer_exit does it for us.
8314 8410 */
8315 8411 outer_exit(outer);
8316 8412 }
8317 8413
8318 8414 static void
8319 8415 mblk_free(mblk_t *mp)
8320 8416 {
8321 8417 dblk_t *dbp = mp->b_datap;
8322 8418 frtn_t *frp = dbp->db_frtnp;
8323 8419
8324 8420 mp->b_next = NULL;
8325 8421 if (dbp->db_fthdr != NULL)
8326 8422 str_ftfree(dbp);
8327 8423
8328 8424 ASSERT(dbp->db_fthdr == NULL);
8329 8425 frp->free_func(frp->free_arg);
8330 8426 ASSERT(dbp->db_mblk == mp);
8331 8427
8332 8428 if (dbp->db_credp != NULL) {
8333 8429 crfree(dbp->db_credp);
8334 8430 dbp->db_credp = NULL;
8335 8431 }
8336 8432 dbp->db_cpid = -1;
8337 8433 dbp->db_struioflag = 0;
8338 8434 dbp->db_struioun.cksum.flags = 0;
8339 8435
8340 8436 kmem_cache_free(dbp->db_cache, dbp);
8341 8437 }
8342 8438
8343 8439 /*
8344 8440 * Background processing of the stream queue list.
8345 8441 */
8346 8442 static void
8347 8443 stream_service(stdata_t *stp)
8348 8444 {
8349 8445 queue_t *q;
8350 8446
8351 8447 mutex_enter(&stp->sd_qlock);
8352 8448
8353 8449 STR_SERVICE(stp, q);
8354 8450
8355 8451 stp->sd_svcflags &= ~STRS_SCHEDULED;
8356 8452 stp->sd_servid = NULL;
8357 8453 cv_signal(&stp->sd_qcv);
8358 8454 mutex_exit(&stp->sd_qlock);
8359 8455 }
8360 8456
8361 8457 /*
8362 8458 * Foreground processing of the stream queue list.
8363 8459 */
8364 8460 void
8365 8461 stream_runservice(stdata_t *stp)
8366 8462 {
8367 8463 queue_t *q;
8368 8464
8369 8465 mutex_enter(&stp->sd_qlock);
8370 8466 STRSTAT(rservice);
8371 8467 /*
8372 8468 * We are going to drain this stream queue list, so qenable_locked will
8373 8469 * not schedule it until we finish.
8374 8470 */
8375 8471 stp->sd_svcflags |= STRS_WILLSERVICE;
8376 8472
8377 8473 STR_SERVICE(stp, q);
8378 8474
8379 8475 stp->sd_svcflags &= ~STRS_WILLSERVICE;
8380 8476 mutex_exit(&stp->sd_qlock);
8381 8477 /*
8382 8478 * Help backup background thread to drain the qhead/qtail list.
8383 8479 */
8384 8480 while (qhead != NULL) {
8385 8481 STRSTAT(qhelps);
8386 8482 mutex_enter(&service_queue);
8387 8483 DQ(q, qhead, qtail, q_link);
8388 8484 mutex_exit(&service_queue);
8389 8485 if (q != NULL)
8390 8486 queue_service(q);
8391 8487 }
8392 8488 }
8393 8489
8394 8490 void
8395 8491 stream_willservice(stdata_t *stp)
8396 8492 {
8397 8493 mutex_enter(&stp->sd_qlock);
8398 8494 stp->sd_svcflags |= STRS_WILLSERVICE;
8399 8495 mutex_exit(&stp->sd_qlock);
8400 8496 }
8401 8497
8402 8498 /*
8403 8499 * Replace the cred currently in the mblk with a different one.
8404 8500 * Also update db_cpid.
8405 8501 */
8406 8502 void
8407 8503 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid)
8408 8504 {
8409 8505 dblk_t *dbp = mp->b_datap;
8410 8506 cred_t *ocr = dbp->db_credp;
8411 8507
8412 8508 ASSERT(cr != NULL);
8413 8509
8414 8510 if (cr != ocr) {
8415 8511 crhold(dbp->db_credp = cr);
8416 8512 if (ocr != NULL)
8417 8513 crfree(ocr);
8418 8514 }
8419 8515 /* Don't overwrite with NOPID */
8420 8516 if (cpid != NOPID)
8421 8517 dbp->db_cpid = cpid;
8422 8518 }
8423 8519
8424 8520 /*
8425 8521 * If the src message has a cred, then replace the cred currently in the mblk
8426 8522 * with it.
8427 8523 * Also update db_cpid.
8428 8524 */
8429 8525 void
8430 8526 mblk_copycred(mblk_t *mp, const mblk_t *src)
8431 8527 {
8432 8528 dblk_t *dbp = mp->b_datap;
8433 8529 cred_t *cr, *ocr;
8434 8530 pid_t cpid;
8435 8531
8436 8532 cr = msg_getcred(src, &cpid);
8437 8533 if (cr == NULL)
8438 8534 return;
8439 8535
8440 8536 ocr = dbp->db_credp;
8441 8537 if (cr != ocr) {
8442 8538 crhold(dbp->db_credp = cr);
8443 8539 if (ocr != NULL)
8444 8540 crfree(ocr);
8445 8541 }
8446 8542 /* Don't overwrite with NOPID */
8447 8543 if (cpid != NOPID)
8448 8544 dbp->db_cpid = cpid;
8449 8545 }
8450 8546
8451 8547 int
8452 8548 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8453 8549 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value,
8454 8550 uint32_t flags, int km_flags)
8455 8551 {
8456 8552 int rc = 0;
8457 8553
8458 8554 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8459 8555 if (mp->b_datap->db_type == M_DATA) {
8460 8556 /* Associate values for M_DATA type */
8461 8557 DB_CKSUMSTART(mp) = (intptr_t)start;
8462 8558 DB_CKSUMSTUFF(mp) = (intptr_t)stuff;
8463 8559 DB_CKSUMEND(mp) = (intptr_t)end;
8464 8560 DB_CKSUMFLAGS(mp) = flags;
8465 8561 DB_CKSUM16(mp) = (uint16_t)value;
8466 8562
8467 8563 } else {
8468 8564 pattrinfo_t pa_info;
8469 8565
8470 8566 ASSERT(mmd != NULL);
8471 8567
8472 8568 pa_info.type = PATTR_HCKSUM;
8473 8569 pa_info.len = sizeof (pattr_hcksum_t);
8474 8570
8475 8571 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) {
8476 8572 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf;
8477 8573
8478 8574 hck->hcksum_start_offset = start;
8479 8575 hck->hcksum_stuff_offset = stuff;
8480 8576 hck->hcksum_end_offset = end;
8481 8577 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value;
8482 8578 hck->hcksum_flags = flags;
8483 8579 } else {
8484 8580 rc = -1;
8485 8581 }
8486 8582 }
8487 8583 return (rc);
8488 8584 }
8489 8585
8490 8586 void
8491 8587 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8492 8588 uint32_t *start, uint32_t *stuff, uint32_t *end,
8493 8589 uint32_t *value, uint32_t *flags)
8494 8590 {
8495 8591 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8496 8592 if (mp->b_datap->db_type == M_DATA) {
8497 8593 if (flags != NULL) {
8498 8594 *flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS;
8499 8595 if ((*flags & (HCK_PARTIALCKSUM |
8500 8596 HCK_FULLCKSUM)) != 0) {
8501 8597 if (value != NULL)
8502 8598 *value = (uint32_t)DB_CKSUM16(mp);
8503 8599 if ((*flags & HCK_PARTIALCKSUM) != 0) {
8504 8600 if (start != NULL)
8505 8601 *start =
8506 8602 (uint32_t)DB_CKSUMSTART(mp);
8507 8603 if (stuff != NULL)
8508 8604 *stuff =
8509 8605 (uint32_t)DB_CKSUMSTUFF(mp);
8510 8606 if (end != NULL)
8511 8607 *end =
8512 8608 (uint32_t)DB_CKSUMEND(mp);
8513 8609 }
8514 8610 }
8515 8611 }
8516 8612 } else {
8517 8613 pattrinfo_t hck_attr = {PATTR_HCKSUM};
8518 8614
8519 8615 ASSERT(mmd != NULL);
8520 8616
8521 8617 /* get hardware checksum attribute */
8522 8618 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) {
8523 8619 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf;
8524 8620
8525 8621 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t));
8526 8622 if (flags != NULL)
8527 8623 *flags = hck->hcksum_flags;
8528 8624 if (start != NULL)
8529 8625 *start = hck->hcksum_start_offset;
8530 8626 if (stuff != NULL)
8531 8627 *stuff = hck->hcksum_stuff_offset;
8532 8628 if (end != NULL)
8533 8629 *end = hck->hcksum_end_offset;
8534 8630 if (value != NULL)
8535 8631 *value = (uint32_t)
8536 8632 hck->hcksum_cksum_val.inet_cksum;
8537 8633 }
8538 8634 }
8539 8635 }
8540 8636
8541 8637 void
8542 8638 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags)
8543 8639 {
8544 8640 ASSERT(DB_TYPE(mp) == M_DATA);
8545 8641 ASSERT((flags & ~HW_LSO_FLAGS) == 0);
8546 8642
8547 8643 /* Set the flags */
8548 8644 DB_LSOFLAGS(mp) |= flags;
8549 8645 DB_LSOMSS(mp) = mss;
8550 8646 }
8551 8647
8552 8648 void
8553 8649 lso_info_cleanup(mblk_t *mp)
8554 8650 {
8555 8651 ASSERT(DB_TYPE(mp) == M_DATA);
8556 8652
8557 8653 /* Clear the flags */
8558 8654 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS;
8559 8655 DB_LSOMSS(mp) = 0;
8560 8656 }
8561 8657
8562 8658 /*
8563 8659 * Checksum buffer *bp for len bytes with psum partial checksum,
8564 8660 * or 0 if none, and return the 16 bit partial checksum.
8565 8661 */
8566 8662 unsigned
8567 8663 bcksum(uchar_t *bp, int len, unsigned int psum)
8568 8664 {
8569 8665 int odd = len & 1;
8570 8666 extern unsigned int ip_ocsum();
8571 8667
8572 8668 if (((intptr_t)bp & 1) == 0 && !odd) {
8573 8669 /*
8574 8670 * Bp is 16 bit aligned and len is multiple of 16 bit word.
8575 8671 */
8576 8672 return (ip_ocsum((ushort_t *)bp, len >> 1, psum));
8577 8673 }
8578 8674 if (((intptr_t)bp & 1) != 0) {
8579 8675 /*
8580 8676 * Bp isn't 16 bit aligned.
8581 8677 */
8582 8678 unsigned int tsum;
8583 8679
8584 8680 #ifdef _LITTLE_ENDIAN
8585 8681 psum += *bp;
8586 8682 #else
8587 8683 psum += *bp << 8;
8588 8684 #endif
8589 8685 len--;
8590 8686 bp++;
8591 8687 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0);
8592 8688 psum += (tsum << 8) & 0xffff | (tsum >> 8);
8593 8689 if (len & 1) {
8594 8690 bp += len - 1;
8595 8691 #ifdef _LITTLE_ENDIAN
8596 8692 psum += *bp << 8;
8597 8693 #else
8598 8694 psum += *bp;
8599 8695 #endif
8600 8696 }
8601 8697 } else {
8602 8698 /*
8603 8699 * Bp is 16 bit aligned.
8604 8700 */
8605 8701 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum);
8606 8702 if (odd) {
8607 8703 bp += len - 1;
8608 8704 #ifdef _LITTLE_ENDIAN
8609 8705 psum += *bp;
8610 8706 #else
8611 8707 psum += *bp << 8;
8612 8708 #endif
8613 8709 }
8614 8710 }
8615 8711 /*
8616 8712 * Normalize psum to 16 bits before returning the new partial
8617 8713 * checksum. The max psum value before normalization is 0x3FDFE.
8618 8714 */
8619 8715 return ((psum >> 16) + (psum & 0xFFFF));
8620 8716 }
8621 8717
8622 8718 boolean_t
8623 8719 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd)
8624 8720 {
8625 8721 boolean_t rc;
8626 8722
8627 8723 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8628 8724 if (DB_TYPE(mp) == M_DATA) {
8629 8725 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0);
8630 8726 } else {
8631 8727 pattrinfo_t zcopy_attr = {PATTR_ZCOPY};
8632 8728
8633 8729 ASSERT(mmd != NULL);
8634 8730 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL);
8635 8731 }
8636 8732 return (rc);
8637 8733 }
8638 8734
8639 8735 void
8640 8736 freemsgchain(mblk_t *mp)
8641 8737 {
8642 8738 mblk_t *next;
8643 8739
8644 8740 while (mp != NULL) {
8645 8741 next = mp->b_next;
8646 8742 mp->b_next = NULL;
8647 8743
8648 8744 freemsg(mp);
8649 8745 mp = next;
8650 8746 }
8651 8747 }
8652 8748
8653 8749 mblk_t *
8654 8750 copymsgchain(mblk_t *mp)
8655 8751 {
8656 8752 mblk_t *nmp = NULL;
8657 8753 mblk_t **nmpp = &nmp;
8658 8754
8659 8755 for (; mp != NULL; mp = mp->b_next) {
8660 8756 if ((*nmpp = copymsg(mp)) == NULL) {
8661 8757 freemsgchain(nmp);
8662 8758 return (NULL);
8663 8759 }
8664 8760
8665 8761 nmpp = &((*nmpp)->b_next);
8666 8762 }
8667 8763
8668 8764 return (nmp);
8669 8765 }
8670 8766
8671 8767 /* NOTE: Do not add code after this point. */
8672 8768 #undef QLOCK
8673 8769
8674 8770 /*
8675 8771 * Replacement for QLOCK macro for those that can't use it.
8676 8772 */
8677 8773 kmutex_t *
8678 8774 QLOCK(queue_t *q)
8679 8775 {
8680 8776 return (&(q)->q_lock);
8681 8777 }
8682 8778
8683 8779 /*
8684 8780 * Dummy runqueues/queuerun functions functions for backwards compatibility.
8685 8781 */
8686 8782 #undef runqueues
8687 8783 void
8688 8784 runqueues(void)
8689 8785 {
8690 8786 }
8691 8787
8692 8788 #undef queuerun
8693 8789 void
8694 8790 queuerun(void)
8695 8791 {
8696 8792 }
8697 8793
8698 8794 /*
8699 8795 * Initialize the STR stack instance, which tracks autopush and persistent
8700 8796 * links.
8701 8797 */
8702 8798 /* ARGSUSED */
8703 8799 static void *
8704 8800 str_stack_init(netstackid_t stackid, netstack_t *ns)
8705 8801 {
8706 8802 str_stack_t *ss;
8707 8803 int i;
8708 8804
8709 8805 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP);
8710 8806 ss->ss_netstack = ns;
8711 8807
8712 8808 /*
8713 8809 * set up autopush
8714 8810 */
8715 8811 sad_initspace(ss);
8716 8812
8717 8813 /*
8718 8814 * set up mux_node structures.
8719 8815 */
8720 8816 ss->ss_devcnt = devcnt; /* In case it should change before free */
8721 8817 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) *
8722 8818 ss->ss_devcnt), KM_SLEEP);
8723 8819 for (i = 0; i < ss->ss_devcnt; i++)
8724 8820 ss->ss_mux_nodes[i].mn_imaj = i;
8725 8821 return (ss);
8726 8822 }
8727 8823
8728 8824 /*
8729 8825 * Note: run at zone shutdown and not destroy so that the PLINKs are
8730 8826 * gone by the time other cleanup happens from the destroy callbacks.
8731 8827 */
8732 8828 static void
8733 8829 str_stack_shutdown(netstackid_t stackid, void *arg)
8734 8830 {
8735 8831 str_stack_t *ss = (str_stack_t *)arg;
8736 8832 int i;
8737 8833 cred_t *cr;
8738 8834
8739 8835 cr = zone_get_kcred(netstackid_to_zoneid(stackid));
8740 8836 ASSERT(cr != NULL);
8741 8837
8742 8838 /* Undo all the I_PLINKs for this zone */
8743 8839 for (i = 0; i < ss->ss_devcnt; i++) {
8744 8840 struct mux_edge *ep;
8745 8841 ldi_handle_t lh;
8746 8842 ldi_ident_t li;
8747 8843 int ret;
8748 8844 int rval;
8749 8845 dev_t rdev;
8750 8846
8751 8847 ep = ss->ss_mux_nodes[i].mn_outp;
8752 8848 if (ep == NULL)
8753 8849 continue;
8754 8850 ret = ldi_ident_from_major((major_t)i, &li);
8755 8851 if (ret != 0) {
8756 8852 continue;
8757 8853 }
8758 8854 rdev = ep->me_dev;
8759 8855 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE,
8760 8856 cr, &lh, li);
8761 8857 if (ret != 0) {
8762 8858 ldi_ident_release(li);
8763 8859 continue;
8764 8860 }
8765 8861
8766 8862 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL,
8767 8863 cr, &rval);
8768 8864 if (ret) {
8769 8865 (void) ldi_close(lh, FREAD|FWRITE, cr);
8770 8866 ldi_ident_release(li);
8771 8867 continue;
8772 8868 }
8773 8869 (void) ldi_close(lh, FREAD|FWRITE, cr);
8774 8870
8775 8871 /* Close layered handles */
8776 8872 ldi_ident_release(li);
8777 8873 }
8778 8874 crfree(cr);
8779 8875
8780 8876 sad_freespace(ss);
8781 8877
8782 8878 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt);
8783 8879 ss->ss_mux_nodes = NULL;
8784 8880 }
8785 8881
8786 8882 /*
8787 8883 * Free the structure; str_stack_shutdown did the other cleanup work.
8788 8884 */
8789 8885 /* ARGSUSED */
8790 8886 static void
8791 8887 str_stack_fini(netstackid_t stackid, void *arg)
8792 8888 {
8793 8889 str_stack_t *ss = (str_stack_t *)arg;
8794 8890
8795 8891 kmem_free(ss, sizeof (*ss));
8796 8892 }
↓ open down ↓ |
5444 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX