1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/param.h> 33 #include <sys/errno.h> 34 #include <sys/signal.h> 35 #include <sys/proc.h> 36 #include <sys/conf.h> 37 #include <sys/cred.h> 38 #include <sys/user.h> 39 #include <sys/vnode.h> 40 #include <sys/file.h> 41 #include <sys/session.h> 42 #include <sys/stream.h> 43 #include <sys/strsubr.h> 44 #include <sys/stropts.h> 45 #include <sys/poll.h> 46 #include <sys/systm.h> 47 #include <sys/cpuvar.h> 48 #include <sys/uio.h> 49 #include <sys/cmn_err.h> 50 #include <sys/priocntl.h> 51 #include <sys/procset.h> 52 #include <sys/vmem.h> 53 #include <sys/bitmap.h> 54 #include <sys/kmem.h> 55 #include <sys/siginfo.h> 56 #include <sys/vtrace.h> 57 #include <sys/callb.h> 58 #include <sys/debug.h> 59 #include <sys/modctl.h> 60 #include <sys/vmsystm.h> 61 #include <vm/page.h> 62 #include <sys/atomic.h> 63 #include <sys/suntpi.h> 64 #include <sys/strlog.h> 65 #include <sys/promif.h> 66 #include <sys/project.h> 67 #include <sys/vm.h> 68 #include <sys/taskq.h> 69 #include <sys/sunddi.h> 70 #include <sys/sunldi_impl.h> 71 #include <sys/strsun.h> 72 #include <sys/isa_defs.h> 73 #include <sys/multidata.h> 74 #include <sys/pattr.h> 75 #include <sys/strft.h> 76 #include <sys/fs/snode.h> 77 #include <sys/zone.h> 78 #include <sys/open.h> 79 #include <sys/sunldi.h> 80 #include <sys/sad.h> 81 #include <sys/netstack.h> 82 83 #define O_SAMESTR(q) (((q)->q_next) && \ 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 85 86 /* 87 * WARNING: 88 * The variables and routines in this file are private, belonging 89 * to the STREAMS subsystem. These should not be used by modules 90 * or drivers. Compatibility will not be guaranteed. 91 */ 92 93 /* 94 * Id value used to distinguish between different multiplexor links. 95 */ 96 static int32_t lnk_id = 0; 97 98 #define STREAMS_LOPRI MINCLSYSPRI 99 static pri_t streams_lopri = STREAMS_LOPRI; 100 101 #define STRSTAT(x) (str_statistics.x.value.ui64++) 102 typedef struct str_stat { 103 kstat_named_t sqenables; 104 kstat_named_t stenables; 105 kstat_named_t syncqservice; 106 kstat_named_t freebs; 107 kstat_named_t qwr_outer; 108 kstat_named_t rservice; 109 kstat_named_t strwaits; 110 kstat_named_t taskqfails; 111 kstat_named_t bufcalls; 112 kstat_named_t qhelps; 113 kstat_named_t qremoved; 114 kstat_named_t sqremoved; 115 kstat_named_t bcwaits; 116 kstat_named_t sqtoomany; 117 } str_stat_t; 118 119 static str_stat_t str_statistics = { 120 { "sqenables", KSTAT_DATA_UINT64 }, 121 { "stenables", KSTAT_DATA_UINT64 }, 122 { "syncqservice", KSTAT_DATA_UINT64 }, 123 { "freebs", KSTAT_DATA_UINT64 }, 124 { "qwr_outer", KSTAT_DATA_UINT64 }, 125 { "rservice", KSTAT_DATA_UINT64 }, 126 { "strwaits", KSTAT_DATA_UINT64 }, 127 { "taskqfails", KSTAT_DATA_UINT64 }, 128 { "bufcalls", KSTAT_DATA_UINT64 }, 129 { "qhelps", KSTAT_DATA_UINT64 }, 130 { "qremoved", KSTAT_DATA_UINT64 }, 131 { "sqremoved", KSTAT_DATA_UINT64 }, 132 { "bcwaits", KSTAT_DATA_UINT64 }, 133 { "sqtoomany", KSTAT_DATA_UINT64 }, 134 }; 135 136 static kstat_t *str_kstat; 137 138 /* 139 * qrunflag was used previously to control background scheduling of queues. It 140 * is not used anymore, but kept here in case some module still wants to access 141 * it via qready() and setqsched macros. 142 */ 143 char qrunflag; /* Unused */ 144 145 /* 146 * Most of the streams scheduling is done via task queues. Task queues may fail 147 * for non-sleep dispatches, so there are two backup threads servicing failed 148 * requests for queues and syncqs. Both of these threads also service failed 149 * dispatches freebs requests. Queues are put in the list specified by `qhead' 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 151 * requests are put into `freebs_list' which has no tail pointer. All three 152 * lists are protected by a single `service_queue' lock and use 153 * `services_to_run' condition variable for signaling background threads. Use of 154 * a single lock should not be a problem because it is only used under heavy 155 * loads when task queues start to fail and at that time it may be a good idea 156 * to throttle scheduling requests. 157 * 158 * NOTE: queues and syncqs should be scheduled by two separate threads because 159 * queue servicing may be blocked waiting for a syncq which may be also 160 * scheduled for background execution. This may create a deadlock when only one 161 * thread is used for both. 162 */ 163 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 165 166 static kmutex_t service_queue; /* protects all of servicing vars */ 167 static kcondvar_t services_to_run; /* wake up background service thread */ 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 169 170 /* 171 * List of queues scheduled for background processing due to lack of resources 172 * in the task queues. Protected by service_queue lock; 173 */ 174 static struct queue *qhead; 175 static struct queue *qtail; 176 177 /* 178 * Same list for syncqs 179 */ 180 static syncq_t *sqhead; 181 static syncq_t *sqtail; 182 183 static mblk_t *freebs_list; /* list of buffers to free */ 184 185 /* 186 * Backup threads for servicing queues and syncqs 187 */ 188 kthread_t *streams_qbkgrnd_thread; 189 kthread_t *streams_sqbkgrnd_thread; 190 191 /* 192 * Bufcalls related variables. 193 */ 194 struct bclist strbcalls; /* list of waiting bufcalls */ 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 200 201 kmutex_t strresources; /* protects global resources */ 202 kmutex_t muxifier; /* single-threads multiplexor creation */ 203 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns); 205 static void str_stack_shutdown(netstackid_t stackid, void *arg); 206 static void str_stack_fini(netstackid_t stackid, void *arg); 207 208 /* 209 * run_queues is no longer used, but is kept in case some 3rd party 210 * module/driver decides to use it. 211 */ 212 int run_queues = 0; 213 214 /* 215 * sq_max_size is the depth of the syncq (in number of messages) before 216 * qfill_syncq() starts QFULL'ing destination queues. As its primary 217 * consumer - IP is no longer D_MTPERMOD, but there may be other 218 * modules/drivers depend on this syncq flow control, we prefer to 219 * choose a large number as the default value. For potential 220 * performance gain, this value is tunable in /etc/system. 221 */ 222 int sq_max_size = 10000; 223 224 /* 225 * The number of ciputctrl structures per syncq and stream we create when 226 * needed. 227 */ 228 int n_ciputctrl; 229 int max_n_ciputctrl = 16; 230 /* 231 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 232 */ 233 int min_n_ciputctrl = 2; 234 235 /* 236 * Per-driver/module syncqs 237 * ======================== 238 * 239 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 240 * perdm structures, new entries being added (and new syncqs allocated) when 241 * setq() encounters a module/driver with a streamtab that it hasn't seen 242 * before. 243 * The reason for this mechanism is that some modules and drivers share a 244 * common streamtab and it is necessary for those modules and drivers to also 245 * share a common PERMOD syncq. 246 * 247 * perdm_list --> dm_str == streamtab_1 248 * dm_sq == syncq_1 249 * dm_ref 250 * dm_next --> dm_str == streamtab_2 251 * dm_sq == syncq_2 252 * dm_ref 253 * dm_next --> ... NULL 254 * 255 * The dm_ref field is incremented for each new driver/module that takes 256 * a reference to the perdm structure and hence shares the syncq. 257 * References are held in the fmodsw_impl_t structure for each STREAMS module 258 * or the dev_impl array (indexed by device major number) for each driver. 259 * 260 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 261 * ^ ^ ^ ^ 262 * | ______________/ | | 263 * | / | | 264 * dev_impl: ...|x|y|... module A module B 265 * 266 * When a module/driver is unloaded the reference count is decremented and, 267 * when it falls to zero, the perdm structure is removed from the list and 268 * the syncq is freed (see rele_dm()). 269 */ 270 perdm_t *perdm_list = NULL; 271 static krwlock_t perdm_rwlock; 272 cdevsw_impl_t *devimpl; 273 274 extern struct qinit strdata; 275 extern struct qinit stwdata; 276 277 static void runservice(queue_t *); 278 static void streams_bufcall_service(void); 279 static void streams_qbkgrnd_service(void); 280 static void streams_sqbkgrnd_service(void); 281 static syncq_t *new_syncq(void); 282 static void free_syncq(syncq_t *); 283 static void outer_insert(syncq_t *, syncq_t *); 284 static void outer_remove(syncq_t *, syncq_t *); 285 static void write_now(syncq_t *); 286 static void clr_qfull(queue_t *); 287 static void runbufcalls(void); 288 static void sqenable(syncq_t *); 289 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 290 static void wait_q_syncq(queue_t *); 291 static void backenable_insertedq(queue_t *); 292 293 static void queue_service(queue_t *); 294 static void stream_service(stdata_t *); 295 static void syncq_service(syncq_t *); 296 static void qwriter_outer_service(syncq_t *); 297 static void mblk_free(mblk_t *); 298 #ifdef DEBUG 299 static int qprocsareon(queue_t *); 300 #endif 301 302 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 303 static void reset_nfsrv_ptr(queue_t *, queue_t *); 304 void set_qfull(queue_t *); 305 306 static void sq_run_events(syncq_t *); 307 static int propagate_syncq(queue_t *); 308 309 static void blocksq(syncq_t *, ushort_t, int); 310 static void unblocksq(syncq_t *, ushort_t, int); 311 static int dropsq(syncq_t *, uint16_t); 312 static void emptysq(syncq_t *); 313 static sqlist_t *sqlist_alloc(struct stdata *, int); 314 static void sqlist_free(sqlist_t *); 315 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 316 static void sqlist_insert(sqlist_t *, syncq_t *); 317 static void sqlist_insertall(sqlist_t *, queue_t *); 318 319 static void strsetuio(stdata_t *); 320 321 struct kmem_cache *stream_head_cache; 322 struct kmem_cache *queue_cache; 323 struct kmem_cache *syncq_cache; 324 struct kmem_cache *qband_cache; 325 struct kmem_cache *linkinfo_cache; 326 struct kmem_cache *ciputctrl_cache = NULL; 327 328 static linkinfo_t *linkinfo_list; 329 330 /* Global esballoc throttling queue */ 331 static esb_queue_t system_esbq; 332 333 /* Array of esballoc throttling queues, of length esbq_nelem */ 334 static esb_queue_t *volatile system_esbq_array; 335 static int esbq_nelem; 336 static kmutex_t esbq_lock; 337 static int esbq_log2_cpus_per_q = 0; 338 339 /* Scale the system_esbq length by setting number of CPUs per queue. */ 340 uint_t esbq_cpus_per_q = 1; 341 342 /* 343 * esballoc tunable parameters. 344 */ 345 int esbq_max_qlen = 0x16; /* throttled queue length */ 346 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */ 347 348 /* 349 * Routines to handle esballoc queueing. 350 */ 351 static void esballoc_process_queue(esb_queue_t *); 352 static void esballoc_enqueue_mblk(mblk_t *); 353 static void esballoc_timer(void *); 354 static void esballoc_set_timer(esb_queue_t *, clock_t); 355 static void esballoc_mblk_free(mblk_t *); 356 357 /* 358 * Qinit structure and Module_info structures 359 * for passthru read and write queues 360 */ 361 362 static void pass_wput(queue_t *, mblk_t *); 363 static queue_t *link_addpassthru(stdata_t *); 364 static void link_rempassthru(queue_t *); 365 366 struct module_info passthru_info = { 367 0, 368 "passthru", 369 0, 370 INFPSZ, 371 STRHIGH, 372 STRLOW 373 }; 374 375 struct qinit passthru_rinit = { 376 (int (*)())putnext, 377 NULL, 378 NULL, 379 NULL, 380 NULL, 381 &passthru_info, 382 NULL 383 }; 384 385 struct qinit passthru_winit = { 386 (int (*)()) pass_wput, 387 NULL, 388 NULL, 389 NULL, 390 NULL, 391 &passthru_info, 392 NULL 393 }; 394 395 /* 396 * Verify correctness of list head/tail pointers. 397 */ 398 #define LISTCHECK(head, tail, link) { \ 399 EQUIV(head, tail); \ 400 IMPLY(tail != NULL, tail->link == NULL); \ 401 } 402 403 /* 404 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 405 * using a `link' field. 406 */ 407 #define ENQUEUE(el, head, tail, link) { \ 408 ASSERT(el->link == NULL); \ 409 LISTCHECK(head, tail, link); \ 410 if (head == NULL) \ 411 head = el; \ 412 else \ 413 tail->link = el; \ 414 tail = el; \ 415 } 416 417 /* 418 * Dequeue the first element of the list denoted by `head' and `tail' pointers 419 * using a `link' field and put result into `el'. 420 */ 421 #define DQ(el, head, tail, link) { \ 422 LISTCHECK(head, tail, link); \ 423 el = head; \ 424 if (head != NULL) { \ 425 head = head->link; \ 426 if (head == NULL) \ 427 tail = NULL; \ 428 el->link = NULL; \ 429 } \ 430 } 431 432 /* 433 * Remove `el' from the list using `chase' and `curr' pointers and return result 434 * in `succeed'. 435 */ 436 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 437 LISTCHECK(head, tail, link); \ 438 chase = NULL; \ 439 succeed = 0; \ 440 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 441 chase = curr; \ 442 if (curr != NULL) { \ 443 succeed = 1; \ 444 ASSERT(curr == el); \ 445 if (chase != NULL) \ 446 chase->link = curr->link; \ 447 else \ 448 head = curr->link; \ 449 curr->link = NULL; \ 450 if (curr == tail) \ 451 tail = chase; \ 452 } \ 453 LISTCHECK(head, tail, link); \ 454 } 455 456 /* Handling of delayed messages on the inner syncq. */ 457 458 /* 459 * DEBUG versions should use function versions (to simplify tracing) and 460 * non-DEBUG kernels should use macro versions. 461 */ 462 463 /* 464 * Put a queue on the syncq list of queues. 465 * Assumes SQLOCK held. 466 */ 467 #define SQPUT_Q(sq, qp) \ 468 { \ 469 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 470 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 471 /* The queue should not be linked anywhere */ \ 472 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 473 /* Head and tail may only be NULL simultaneously */ \ 474 EQUIV(sq->sq_head, sq->sq_tail); \ 475 /* Queue may be only enqueued on its syncq */ \ 476 ASSERT(sq == qp->q_syncq); \ 477 /* Check the correctness of SQ_MESSAGES flag */ \ 478 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 479 /* Sanity check first/last elements of the list */ \ 480 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 481 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 482 /* \ 483 * Sanity check of priority field: empty queue should \ 484 * have zero priority \ 485 * and nqueues equal to zero. \ 486 */ \ 487 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 488 /* Sanity check of sq_nqueues field */ \ 489 EQUIV(sq->sq_head, sq->sq_nqueues); \ 490 if (sq->sq_head == NULL) { \ 491 sq->sq_head = sq->sq_tail = qp; \ 492 sq->sq_flags |= SQ_MESSAGES; \ 493 } else if (qp->q_spri == 0) { \ 494 qp->q_sqprev = sq->sq_tail; \ 495 sq->sq_tail->q_sqnext = qp; \ 496 sq->sq_tail = qp; \ 497 } else { \ 498 /* \ 499 * Put this queue in priority order: higher \ 500 * priority gets closer to the head. \ 501 */ \ 502 queue_t **qpp = &sq->sq_tail; \ 503 queue_t *qnext = NULL; \ 504 \ 505 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 506 qnext = *qpp; \ 507 qpp = &(*qpp)->q_sqprev; \ 508 } \ 509 qp->q_sqnext = qnext; \ 510 qp->q_sqprev = *qpp; \ 511 if (*qpp != NULL) { \ 512 (*qpp)->q_sqnext = qp; \ 513 } else { \ 514 sq->sq_head = qp; \ 515 sq->sq_pri = sq->sq_head->q_spri; \ 516 } \ 517 *qpp = qp; \ 518 } \ 519 qp->q_sqflags |= Q_SQQUEUED; \ 520 qp->q_sqtstamp = ddi_get_lbolt(); \ 521 sq->sq_nqueues++; \ 522 } \ 523 } 524 525 /* 526 * Remove a queue from the syncq list 527 * Assumes SQLOCK held. 528 */ 529 #define SQRM_Q(sq, qp) \ 530 { \ 531 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 532 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 533 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 534 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 535 /* Check that the queue is actually in the list */ \ 536 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 537 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 538 ASSERT(sq->sq_nqueues != 0); \ 539 if (qp->q_sqprev == NULL) { \ 540 /* First queue on list, make head q_sqnext */ \ 541 sq->sq_head = qp->q_sqnext; \ 542 } else { \ 543 /* Make prev->next == next */ \ 544 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 545 } \ 546 if (qp->q_sqnext == NULL) { \ 547 /* Last queue on list, make tail sqprev */ \ 548 sq->sq_tail = qp->q_sqprev; \ 549 } else { \ 550 /* Make next->prev == prev */ \ 551 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 552 } \ 553 /* clear out references on this queue */ \ 554 qp->q_sqprev = qp->q_sqnext = NULL; \ 555 qp->q_sqflags &= ~Q_SQQUEUED; \ 556 /* If there is nothing queued, clear SQ_MESSAGES */ \ 557 if (sq->sq_head != NULL) { \ 558 sq->sq_pri = sq->sq_head->q_spri; \ 559 } else { \ 560 sq->sq_flags &= ~SQ_MESSAGES; \ 561 sq->sq_pri = 0; \ 562 } \ 563 sq->sq_nqueues--; \ 564 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 565 (sq->sq_flags & SQ_QUEUED) == 0); \ 566 } 567 568 /* Hide the definition from the header file. */ 569 #ifdef SQPUT_MP 570 #undef SQPUT_MP 571 #endif 572 573 /* 574 * Put a message on the queue syncq. 575 * Assumes QLOCK held. 576 */ 577 #define SQPUT_MP(qp, mp) \ 578 { \ 579 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 580 ASSERT(qp->q_sqhead == NULL || \ 581 (qp->q_sqtail != NULL && \ 582 qp->q_sqtail->b_next == NULL)); \ 583 qp->q_syncqmsgs++; \ 584 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 585 if (qp->q_sqhead == NULL) { \ 586 qp->q_sqhead = qp->q_sqtail = mp; \ 587 } else { \ 588 qp->q_sqtail->b_next = mp; \ 589 qp->q_sqtail = mp; \ 590 } \ 591 ASSERT(qp->q_syncqmsgs > 0); \ 592 set_qfull(qp); \ 593 } 594 595 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 596 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 597 if ((sq)->sq_ciputctrl != NULL) { \ 598 int i; \ 599 int nlocks = (sq)->sq_nciputctrl; \ 600 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 601 ASSERT((sq)->sq_type & SQ_CIPUT); \ 602 for (i = 0; i <= nlocks; i++) { \ 603 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 604 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 605 } \ 606 } \ 607 } 608 609 610 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 611 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 612 if ((sq)->sq_ciputctrl != NULL) { \ 613 int i; \ 614 int nlocks = (sq)->sq_nciputctrl; \ 615 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 616 ASSERT((sq)->sq_type & SQ_CIPUT); \ 617 for (i = 0; i <= nlocks; i++) { \ 618 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 619 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 620 } \ 621 } \ 622 } 623 624 /* 625 * Run service procedures for all queues in the stream head. 626 */ 627 #define STR_SERVICE(stp, q) { \ 628 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 629 while (stp->sd_qhead != NULL) { \ 630 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 631 ASSERT(stp->sd_nqueues > 0); \ 632 stp->sd_nqueues--; \ 633 ASSERT(!(q->q_flag & QINSERVICE)); \ 634 mutex_exit(&stp->sd_qlock); \ 635 queue_service(q); \ 636 mutex_enter(&stp->sd_qlock); \ 637 } \ 638 ASSERT(stp->sd_nqueues == 0); \ 639 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 640 } 641 642 /* 643 * Constructor/destructor routines for the stream head cache 644 */ 645 /* ARGSUSED */ 646 static int 647 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 648 { 649 stdata_t *stp = buf; 650 651 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 652 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 653 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 654 mutex_init(&stp->sd_pid_list_lock, NULL, MUTEX_DEFAULT, NULL); 655 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 656 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 657 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 658 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 659 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 660 list_create(&stp->sd_pid_list, sizeof (pid_node_t), 661 offsetof(pid_node_t, pn_ref_link)); 662 stp->sd_wrq = NULL; 663 664 return (0); 665 } 666 667 /* ARGSUSED */ 668 static void 669 stream_head_destructor(void *buf, void *cdrarg) 670 { 671 stdata_t *stp = buf; 672 673 mutex_destroy(&stp->sd_lock); 674 mutex_destroy(&stp->sd_reflock); 675 mutex_destroy(&stp->sd_qlock); 676 mutex_destroy(&stp->sd_pid_list_lock); 677 cv_destroy(&stp->sd_monitor); 678 cv_destroy(&stp->sd_iocmonitor); 679 cv_destroy(&stp->sd_refmonitor); 680 cv_destroy(&stp->sd_qcv); 681 cv_destroy(&stp->sd_zcopy_wait); 682 list_destroy(&stp->sd_pid_list); 683 } 684 685 /* 686 * Constructor/destructor routines for the queue cache 687 */ 688 /* ARGSUSED */ 689 static int 690 queue_constructor(void *buf, void *cdrarg, int kmflags) 691 { 692 queinfo_t *qip = buf; 693 queue_t *qp = &qip->qu_rqueue; 694 queue_t *wqp = &qip->qu_wqueue; 695 syncq_t *sq = &qip->qu_syncq; 696 697 qp->q_first = NULL; 698 qp->q_link = NULL; 699 qp->q_count = 0; 700 qp->q_mblkcnt = 0; 701 qp->q_sqhead = NULL; 702 qp->q_sqtail = NULL; 703 qp->q_sqnext = NULL; 704 qp->q_sqprev = NULL; 705 qp->q_sqflags = 0; 706 qp->q_rwcnt = 0; 707 qp->q_spri = 0; 708 709 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 710 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 711 712 wqp->q_first = NULL; 713 wqp->q_link = NULL; 714 wqp->q_count = 0; 715 wqp->q_mblkcnt = 0; 716 wqp->q_sqhead = NULL; 717 wqp->q_sqtail = NULL; 718 wqp->q_sqnext = NULL; 719 wqp->q_sqprev = NULL; 720 wqp->q_sqflags = 0; 721 wqp->q_rwcnt = 0; 722 wqp->q_spri = 0; 723 724 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 725 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 726 727 sq->sq_head = NULL; 728 sq->sq_tail = NULL; 729 sq->sq_evhead = NULL; 730 sq->sq_evtail = NULL; 731 sq->sq_callbpend = NULL; 732 sq->sq_outer = NULL; 733 sq->sq_onext = NULL; 734 sq->sq_oprev = NULL; 735 sq->sq_next = NULL; 736 sq->sq_svcflags = 0; 737 sq->sq_servcount = 0; 738 sq->sq_needexcl = 0; 739 sq->sq_nqueues = 0; 740 sq->sq_pri = 0; 741 742 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 743 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 744 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 745 746 return (0); 747 } 748 749 /* ARGSUSED */ 750 static void 751 queue_destructor(void *buf, void *cdrarg) 752 { 753 queinfo_t *qip = buf; 754 queue_t *qp = &qip->qu_rqueue; 755 queue_t *wqp = &qip->qu_wqueue; 756 syncq_t *sq = &qip->qu_syncq; 757 758 ASSERT(qp->q_sqhead == NULL); 759 ASSERT(wqp->q_sqhead == NULL); 760 ASSERT(qp->q_sqnext == NULL); 761 ASSERT(wqp->q_sqnext == NULL); 762 ASSERT(qp->q_rwcnt == 0); 763 ASSERT(wqp->q_rwcnt == 0); 764 765 mutex_destroy(&qp->q_lock); 766 cv_destroy(&qp->q_wait); 767 768 mutex_destroy(&wqp->q_lock); 769 cv_destroy(&wqp->q_wait); 770 771 mutex_destroy(&sq->sq_lock); 772 cv_destroy(&sq->sq_wait); 773 cv_destroy(&sq->sq_exitwait); 774 } 775 776 /* 777 * Constructor/destructor routines for the syncq cache 778 */ 779 /* ARGSUSED */ 780 static int 781 syncq_constructor(void *buf, void *cdrarg, int kmflags) 782 { 783 syncq_t *sq = buf; 784 785 bzero(buf, sizeof (syncq_t)); 786 787 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 788 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 789 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 790 791 return (0); 792 } 793 794 /* ARGSUSED */ 795 static void 796 syncq_destructor(void *buf, void *cdrarg) 797 { 798 syncq_t *sq = buf; 799 800 ASSERT(sq->sq_head == NULL); 801 ASSERT(sq->sq_tail == NULL); 802 ASSERT(sq->sq_evhead == NULL); 803 ASSERT(sq->sq_evtail == NULL); 804 ASSERT(sq->sq_callbpend == NULL); 805 ASSERT(sq->sq_callbflags == 0); 806 ASSERT(sq->sq_outer == NULL); 807 ASSERT(sq->sq_onext == NULL); 808 ASSERT(sq->sq_oprev == NULL); 809 ASSERT(sq->sq_next == NULL); 810 ASSERT(sq->sq_needexcl == 0); 811 ASSERT(sq->sq_svcflags == 0); 812 ASSERT(sq->sq_servcount == 0); 813 ASSERT(sq->sq_nqueues == 0); 814 ASSERT(sq->sq_pri == 0); 815 ASSERT(sq->sq_count == 0); 816 ASSERT(sq->sq_rmqcount == 0); 817 ASSERT(sq->sq_cancelid == 0); 818 ASSERT(sq->sq_ciputctrl == NULL); 819 ASSERT(sq->sq_nciputctrl == 0); 820 ASSERT(sq->sq_type == 0); 821 ASSERT(sq->sq_flags == 0); 822 823 mutex_destroy(&sq->sq_lock); 824 cv_destroy(&sq->sq_wait); 825 cv_destroy(&sq->sq_exitwait); 826 } 827 828 /* ARGSUSED */ 829 static int 830 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 831 { 832 ciputctrl_t *cip = buf; 833 int i; 834 835 for (i = 0; i < n_ciputctrl; i++) { 836 cip[i].ciputctrl_count = SQ_FASTPUT; 837 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 838 } 839 840 return (0); 841 } 842 843 /* ARGSUSED */ 844 static void 845 ciputctrl_destructor(void *buf, void *cdrarg) 846 { 847 ciputctrl_t *cip = buf; 848 int i; 849 850 for (i = 0; i < n_ciputctrl; i++) { 851 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 852 mutex_destroy(&cip[i].ciputctrl_lock); 853 } 854 } 855 856 /* 857 * Init routine run from main at boot time. 858 */ 859 void 860 strinit(void) 861 { 862 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 863 864 stream_head_cache = kmem_cache_create("stream_head_cache", 865 sizeof (stdata_t), 0, 866 stream_head_constructor, stream_head_destructor, NULL, 867 NULL, NULL, 0); 868 869 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 870 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 871 872 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 873 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 874 875 qband_cache = kmem_cache_create("qband_cache", 876 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 877 878 linkinfo_cache = kmem_cache_create("linkinfo_cache", 879 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 880 881 n_ciputctrl = ncpus; 882 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 883 ASSERT(n_ciputctrl >= 1); 884 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 885 if (n_ciputctrl >= min_n_ciputctrl) { 886 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 887 sizeof (ciputctrl_t) * n_ciputctrl, 888 sizeof (ciputctrl_t), ciputctrl_constructor, 889 ciputctrl_destructor, NULL, NULL, NULL, 0); 890 } 891 892 streams_taskq = system_taskq; 893 894 if (streams_taskq == NULL) 895 panic("strinit: no memory for streams taskq!"); 896 897 bc_bkgrnd_thread = thread_create(NULL, 0, 898 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 899 900 streams_qbkgrnd_thread = thread_create(NULL, 0, 901 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 902 903 streams_sqbkgrnd_thread = thread_create(NULL, 0, 904 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 905 906 /* 907 * Create STREAMS kstats. 908 */ 909 str_kstat = kstat_create("streams", 0, "strstat", 910 "net", KSTAT_TYPE_NAMED, 911 sizeof (str_statistics) / sizeof (kstat_named_t), 912 KSTAT_FLAG_VIRTUAL); 913 914 if (str_kstat != NULL) { 915 str_kstat->ks_data = &str_statistics; 916 kstat_install(str_kstat); 917 } 918 919 /* 920 * TPI support routine initialisation. 921 */ 922 tpi_init(); 923 924 /* 925 * Handle to have autopush and persistent link information per 926 * zone. 927 * Note: uses shutdown hook instead of destroy hook so that the 928 * persistent links can be torn down before the destroy hooks 929 * in the TCP/IP stack are called. 930 */ 931 netstack_register(NS_STR, str_stack_init, str_stack_shutdown, 932 str_stack_fini); 933 } 934 935 void 936 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 937 { 938 struct stdata *stp; 939 940 ASSERT(vp->v_stream); 941 stp = vp->v_stream; 942 /* Have to hold sd_lock to prevent siglist from changing */ 943 mutex_enter(&stp->sd_lock); 944 if (stp->sd_sigflags & event) 945 strsendsig(stp->sd_siglist, event, band, error); 946 mutex_exit(&stp->sd_lock); 947 } 948 949 /* 950 * Send the "sevent" set of signals to a process. 951 * This might send more than one signal if the process is registered 952 * for multiple events. The caller should pass in an sevent that only 953 * includes the events for which the process has registered. 954 */ 955 static void 956 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 957 uchar_t band, int error) 958 { 959 ASSERT(MUTEX_HELD(&proc->p_lock)); 960 961 info->si_band = 0; 962 info->si_errno = 0; 963 964 if (sevent & S_ERROR) { 965 sevent &= ~S_ERROR; 966 info->si_code = POLL_ERR; 967 info->si_errno = error; 968 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 969 "strsendsig:proc %p info %p", proc, info); 970 sigaddq(proc, NULL, info, KM_NOSLEEP); 971 info->si_errno = 0; 972 } 973 if (sevent & S_HANGUP) { 974 sevent &= ~S_HANGUP; 975 info->si_code = POLL_HUP; 976 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 977 "strsendsig:proc %p info %p", proc, info); 978 sigaddq(proc, NULL, info, KM_NOSLEEP); 979 } 980 if (sevent & S_HIPRI) { 981 sevent &= ~S_HIPRI; 982 info->si_code = POLL_PRI; 983 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 984 "strsendsig:proc %p info %p", proc, info); 985 sigaddq(proc, NULL, info, KM_NOSLEEP); 986 } 987 if (sevent & S_RDBAND) { 988 sevent &= ~S_RDBAND; 989 if (events & S_BANDURG) 990 sigtoproc(proc, NULL, SIGURG); 991 else 992 sigtoproc(proc, NULL, SIGPOLL); 993 } 994 if (sevent & S_WRBAND) { 995 sevent &= ~S_WRBAND; 996 sigtoproc(proc, NULL, SIGPOLL); 997 } 998 if (sevent & S_INPUT) { 999 sevent &= ~S_INPUT; 1000 info->si_code = POLL_IN; 1001 info->si_band = band; 1002 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1003 "strsendsig:proc %p info %p", proc, info); 1004 sigaddq(proc, NULL, info, KM_NOSLEEP); 1005 info->si_band = 0; 1006 } 1007 if (sevent & S_OUTPUT) { 1008 sevent &= ~S_OUTPUT; 1009 info->si_code = POLL_OUT; 1010 info->si_band = band; 1011 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1012 "strsendsig:proc %p info %p", proc, info); 1013 sigaddq(proc, NULL, info, KM_NOSLEEP); 1014 info->si_band = 0; 1015 } 1016 if (sevent & S_MSG) { 1017 sevent &= ~S_MSG; 1018 info->si_code = POLL_MSG; 1019 info->si_band = band; 1020 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1021 "strsendsig:proc %p info %p", proc, info); 1022 sigaddq(proc, NULL, info, KM_NOSLEEP); 1023 info->si_band = 0; 1024 } 1025 if (sevent & S_RDNORM) { 1026 sevent &= ~S_RDNORM; 1027 sigtoproc(proc, NULL, SIGPOLL); 1028 } 1029 if (sevent != 0) { 1030 panic("strsendsig: unknown event(s) %x", sevent); 1031 } 1032 } 1033 1034 /* 1035 * Send SIGPOLL/SIGURG signal to all processes and process groups 1036 * registered on the given signal list that want a signal for at 1037 * least one of the specified events. 1038 * 1039 * Must be called with exclusive access to siglist (caller holding sd_lock). 1040 * 1041 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1042 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1043 * while it is in the siglist. 1044 * 1045 * For performance reasons (MP scalability) the code drops pidlock 1046 * when sending signals to a single process. 1047 * When sending to a process group the code holds 1048 * pidlock to prevent the membership in the process group from changing 1049 * while walking the p_pglink list. 1050 */ 1051 void 1052 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1053 { 1054 strsig_t *ssp; 1055 k_siginfo_t info; 1056 struct pid *pidp; 1057 proc_t *proc; 1058 1059 info.si_signo = SIGPOLL; 1060 info.si_errno = 0; 1061 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1062 int sevent; 1063 1064 sevent = ssp->ss_events & event; 1065 if (sevent == 0) 1066 continue; 1067 1068 if ((pidp = ssp->ss_pidp) == NULL) { 1069 /* pid was released but still on event list */ 1070 continue; 1071 } 1072 1073 1074 if (ssp->ss_pid > 0) { 1075 /* 1076 * XXX This unfortunately still generates 1077 * a signal when a fd is closed but 1078 * the proc is active. 1079 */ 1080 ASSERT(ssp->ss_pid == pidp->pid_id); 1081 1082 mutex_enter(&pidlock); 1083 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1084 if (proc == NULL) { 1085 mutex_exit(&pidlock); 1086 continue; 1087 } 1088 mutex_enter(&proc->p_lock); 1089 mutex_exit(&pidlock); 1090 dosendsig(proc, ssp->ss_events, sevent, &info, 1091 band, error); 1092 mutex_exit(&proc->p_lock); 1093 } else { 1094 /* 1095 * Send to process group. Hold pidlock across 1096 * calls to dosendsig(). 1097 */ 1098 pid_t pgrp = -ssp->ss_pid; 1099 1100 mutex_enter(&pidlock); 1101 proc = pgfind_zone(pgrp, ALL_ZONES); 1102 while (proc != NULL) { 1103 mutex_enter(&proc->p_lock); 1104 dosendsig(proc, ssp->ss_events, sevent, 1105 &info, band, error); 1106 mutex_exit(&proc->p_lock); 1107 proc = proc->p_pglink; 1108 } 1109 mutex_exit(&pidlock); 1110 } 1111 } 1112 } 1113 1114 /* 1115 * Attach a stream device or module. 1116 * qp is a read queue; the new queue goes in so its next 1117 * read ptr is the argument, and the write queue corresponding 1118 * to the argument points to this queue. Return 0 on success, 1119 * or a non-zero errno on failure. 1120 */ 1121 int 1122 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1123 boolean_t is_insert) 1124 { 1125 major_t major; 1126 cdevsw_impl_t *dp; 1127 struct streamtab *str; 1128 queue_t *rq; 1129 queue_t *wrq; 1130 uint32_t qflag; 1131 uint32_t sqtype; 1132 perdm_t *dmp; 1133 int error; 1134 int sflag; 1135 1136 rq = allocq(); 1137 wrq = _WR(rq); 1138 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1139 1140 if (fp != NULL) { 1141 str = fp->f_str; 1142 qflag = fp->f_qflag; 1143 sqtype = fp->f_sqtype; 1144 dmp = fp->f_dmp; 1145 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1146 sflag = MODOPEN; 1147 1148 /* 1149 * stash away a pointer to the module structure so we can 1150 * unref it in qdetach. 1151 */ 1152 rq->q_fp = fp; 1153 } else { 1154 ASSERT(!is_insert); 1155 1156 major = getmajor(*devp); 1157 dp = &devimpl[major]; 1158 1159 str = dp->d_str; 1160 ASSERT(str == STREAMSTAB(major)); 1161 1162 qflag = dp->d_qflag; 1163 ASSERT(qflag & QISDRV); 1164 sqtype = dp->d_sqtype; 1165 1166 /* create perdm_t if needed */ 1167 if (NEED_DM(dp->d_dmp, qflag)) 1168 dp->d_dmp = hold_dm(str, qflag, sqtype); 1169 1170 dmp = dp->d_dmp; 1171 sflag = 0; 1172 } 1173 1174 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1175 "qattach:qflag == %X(%X)", qflag, *devp); 1176 1177 /* setq might sleep in allocator - avoid holding locks. */ 1178 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1179 1180 /* 1181 * Before calling the module's open routine, set up the q_next 1182 * pointer for inserting a module in the middle of a stream. 1183 * 1184 * Note that we can always set _QINSERTING and set up q_next 1185 * pointer for both inserting and pushing a module. Then there 1186 * is no need for the is_insert parameter. In insertq(), called 1187 * by qprocson(), assume that q_next of the new module always points 1188 * to the correct queue and use it for insertion. Everything should 1189 * work out fine. But in the first release of _I_INSERT, we 1190 * distinguish between inserting and pushing to make sure that 1191 * pushing a module follows the same code path as before. 1192 */ 1193 if (is_insert) { 1194 rq->q_flag |= _QINSERTING; 1195 rq->q_next = qp; 1196 } 1197 1198 /* 1199 * If there is an outer perimeter get exclusive access during 1200 * the open procedure. Bump up the reference count on the queue. 1201 */ 1202 entersq(rq->q_syncq, SQ_OPENCLOSE); 1203 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1204 if (error != 0) 1205 goto failed; 1206 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1207 ASSERT(qprocsareon(rq)); 1208 return (0); 1209 1210 failed: 1211 rq->q_flag &= ~_QINSERTING; 1212 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1213 qprocsoff(rq); 1214 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1215 rq->q_next = wrq->q_next = NULL; 1216 qdetach(rq, 0, 0, crp, B_FALSE); 1217 return (error); 1218 } 1219 1220 /* 1221 * Handle second open of stream. For modules, set the 1222 * last argument to MODOPEN and do not pass any open flags. 1223 * Ignore dummydev since this is not the first open. 1224 */ 1225 int 1226 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1227 { 1228 int error; 1229 dev_t dummydev; 1230 queue_t *wqp = _WR(qp); 1231 1232 ASSERT(qp->q_flag & QREADR); 1233 entersq(qp->q_syncq, SQ_OPENCLOSE); 1234 1235 dummydev = *devp; 1236 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1237 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1238 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1239 mutex_enter(&STREAM(qp)->sd_lock); 1240 qp->q_stream->sd_flag |= STREOPENFAIL; 1241 mutex_exit(&STREAM(qp)->sd_lock); 1242 return (error); 1243 } 1244 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1245 1246 /* 1247 * successful open should have done qprocson() 1248 */ 1249 ASSERT(qprocsareon(_RD(qp))); 1250 return (0); 1251 } 1252 1253 /* 1254 * Detach a stream module or device. 1255 * If clmode == 1 then the module or driver was opened and its 1256 * close routine must be called. If clmode == 0, the module 1257 * or driver was never opened or the open failed, and so its close 1258 * should not be called. 1259 */ 1260 void 1261 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1262 { 1263 queue_t *wqp = _WR(qp); 1264 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1265 1266 if (STREAM_NEEDSERVICE(STREAM(qp))) 1267 stream_runservice(STREAM(qp)); 1268 1269 if (clmode) { 1270 /* 1271 * Make sure that all the messages on the write side syncq are 1272 * processed and nothing is left. Since we are closing, no new 1273 * messages may appear there. 1274 */ 1275 wait_q_syncq(wqp); 1276 1277 entersq(qp->q_syncq, SQ_OPENCLOSE); 1278 if (is_remove) { 1279 mutex_enter(QLOCK(qp)); 1280 qp->q_flag |= _QREMOVING; 1281 mutex_exit(QLOCK(qp)); 1282 } 1283 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1284 /* 1285 * Check that qprocsoff() was actually called. 1286 */ 1287 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1288 1289 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1290 } else { 1291 disable_svc(qp); 1292 } 1293 1294 /* 1295 * Allow any threads blocked in entersq to proceed and discover 1296 * the QWCLOSE is set. 1297 * Note: This assumes that all users of entersq check QWCLOSE. 1298 * Currently runservice is the only entersq that can happen 1299 * after removeq has finished. 1300 * Removeq will have discarded all messages destined to the closing 1301 * pair of queues from the syncq. 1302 * NOTE: Calling a function inside an assert is unconventional. 1303 * However, it does not cause any problem since flush_syncq() does 1304 * not change any state except when it returns non-zero i.e. 1305 * when the assert will trigger. 1306 */ 1307 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1308 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1309 ASSERT((qp->q_flag & QPERMOD) || 1310 ((qp->q_syncq->sq_head == NULL) && 1311 (wqp->q_syncq->sq_head == NULL))); 1312 1313 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1314 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1315 if (qp->q_fp != NULL) 1316 fmodsw_rele(qp->q_fp); 1317 1318 /* freeq removes us from the outer perimeter if any */ 1319 freeq(qp); 1320 } 1321 1322 /* Prevent service procedures from being called */ 1323 void 1324 disable_svc(queue_t *qp) 1325 { 1326 queue_t *wqp = _WR(qp); 1327 1328 ASSERT(qp->q_flag & QREADR); 1329 mutex_enter(QLOCK(qp)); 1330 qp->q_flag |= QWCLOSE; 1331 mutex_exit(QLOCK(qp)); 1332 mutex_enter(QLOCK(wqp)); 1333 wqp->q_flag |= QWCLOSE; 1334 mutex_exit(QLOCK(wqp)); 1335 } 1336 1337 /* Allow service procedures to be called again */ 1338 void 1339 enable_svc(queue_t *qp) 1340 { 1341 queue_t *wqp = _WR(qp); 1342 1343 ASSERT(qp->q_flag & QREADR); 1344 mutex_enter(QLOCK(qp)); 1345 qp->q_flag &= ~QWCLOSE; 1346 mutex_exit(QLOCK(qp)); 1347 mutex_enter(QLOCK(wqp)); 1348 wqp->q_flag &= ~QWCLOSE; 1349 mutex_exit(QLOCK(wqp)); 1350 } 1351 1352 /* 1353 * Remove queue from qhead/qtail if it is enabled. 1354 * Only reset QENAB if the queue was removed from the runlist. 1355 * A queue goes through 3 stages: 1356 * It is on the service list and QENAB is set. 1357 * It is removed from the service list but QENAB is still set. 1358 * QENAB gets changed to QINSERVICE. 1359 * QINSERVICE is reset (when the service procedure is done) 1360 * Thus we can not reset QENAB unless we actually removed it from the service 1361 * queue. 1362 */ 1363 void 1364 remove_runlist(queue_t *qp) 1365 { 1366 if (qp->q_flag & QENAB && qhead != NULL) { 1367 queue_t *q_chase; 1368 queue_t *q_curr; 1369 int removed; 1370 1371 mutex_enter(&service_queue); 1372 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1373 mutex_exit(&service_queue); 1374 if (removed) { 1375 STRSTAT(qremoved); 1376 qp->q_flag &= ~QENAB; 1377 } 1378 } 1379 } 1380 1381 1382 /* 1383 * Wait for any pending service processing to complete. 1384 * The removal of queues from the runlist is not atomic with the 1385 * clearing of the QENABLED flag and setting the INSERVICE flag. 1386 * consequently it is possible for remove_runlist in strclose 1387 * to not find the queue on the runlist but for it to be QENABLED 1388 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1389 * as well as INSERVICE. 1390 */ 1391 void 1392 wait_svc(queue_t *qp) 1393 { 1394 queue_t *wqp = _WR(qp); 1395 1396 ASSERT(qp->q_flag & QREADR); 1397 1398 /* 1399 * Try to remove queues from qhead/qtail list. 1400 */ 1401 if (qhead != NULL) { 1402 remove_runlist(qp); 1403 remove_runlist(wqp); 1404 } 1405 /* 1406 * Wait till the syncqs associated with the queue disappear from the 1407 * background processing list. 1408 * This only needs to be done for non-PERMOD perimeters since 1409 * for PERMOD perimeters the syncq may be shared and will only be freed 1410 * when the last module/driver is unloaded. 1411 * If for PERMOD perimeters queue was on the syncq list, removeq() 1412 * should call propagate_syncq() or drain_syncq() for it. Both of these 1413 * functions remove the queue from its syncq list, so sqthread will not 1414 * try to access the queue. 1415 */ 1416 if (!(qp->q_flag & QPERMOD)) { 1417 syncq_t *rsq = qp->q_syncq; 1418 syncq_t *wsq = wqp->q_syncq; 1419 1420 /* 1421 * Disable rsq and wsq and wait for any background processing of 1422 * syncq to complete. 1423 */ 1424 wait_sq_svc(rsq); 1425 if (wsq != rsq) 1426 wait_sq_svc(wsq); 1427 } 1428 1429 mutex_enter(QLOCK(qp)); 1430 while (qp->q_flag & (QINSERVICE|QENAB)) 1431 cv_wait(&qp->q_wait, QLOCK(qp)); 1432 mutex_exit(QLOCK(qp)); 1433 mutex_enter(QLOCK(wqp)); 1434 while (wqp->q_flag & (QINSERVICE|QENAB)) 1435 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1436 mutex_exit(QLOCK(wqp)); 1437 } 1438 1439 /* 1440 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1441 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1442 * also be set, and is passed through to allocb_cred_wait(). 1443 * 1444 * Returns errno on failure, zero on success. 1445 */ 1446 int 1447 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1448 { 1449 mblk_t *tmp; 1450 ssize_t count; 1451 int error = 0; 1452 1453 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1454 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1455 1456 if (bp->b_datap->db_type == M_IOCTL) { 1457 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1458 } else { 1459 ASSERT(bp->b_datap->db_type == M_COPYIN); 1460 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1461 } 1462 /* 1463 * strdoioctl validates ioc_count, so if this assert fails it 1464 * cannot be due to user error. 1465 */ 1466 ASSERT(count >= 0); 1467 1468 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr, 1469 curproc->p_pid)) == NULL) { 1470 return (error); 1471 } 1472 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K)); 1473 if (error != 0) { 1474 freeb(tmp); 1475 return (error); 1476 } 1477 DB_CPID(tmp) = curproc->p_pid; 1478 tmp->b_wptr += count; 1479 bp->b_cont = tmp; 1480 1481 return (0); 1482 } 1483 1484 /* 1485 * Copy ioctl data to user-land. Return non-zero errno on failure, 1486 * 0 for success. 1487 */ 1488 int 1489 getiocd(mblk_t *bp, char *arg, int copymode) 1490 { 1491 ssize_t count; 1492 size_t n; 1493 int error; 1494 1495 if (bp->b_datap->db_type == M_IOCACK) 1496 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1497 else { 1498 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1499 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1500 } 1501 ASSERT(count >= 0); 1502 1503 for (bp = bp->b_cont; bp && count; 1504 count -= n, bp = bp->b_cont, arg += n) { 1505 n = MIN(count, bp->b_wptr - bp->b_rptr); 1506 error = strcopyout(bp->b_rptr, arg, n, copymode); 1507 if (error) 1508 return (error); 1509 } 1510 ASSERT(count == 0); 1511 return (0); 1512 } 1513 1514 /* 1515 * Allocate a linkinfo entry given the write queue of the 1516 * bottom module of the top stream and the write queue of the 1517 * stream head of the bottom stream. 1518 */ 1519 linkinfo_t * 1520 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1521 { 1522 linkinfo_t *linkp; 1523 1524 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1525 1526 linkp->li_lblk.l_qtop = qup; 1527 linkp->li_lblk.l_qbot = qdown; 1528 linkp->li_fpdown = fpdown; 1529 1530 mutex_enter(&strresources); 1531 linkp->li_next = linkinfo_list; 1532 linkp->li_prev = NULL; 1533 if (linkp->li_next) 1534 linkp->li_next->li_prev = linkp; 1535 linkinfo_list = linkp; 1536 linkp->li_lblk.l_index = ++lnk_id; 1537 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1538 mutex_exit(&strresources); 1539 1540 return (linkp); 1541 } 1542 1543 /* 1544 * Free a linkinfo entry. 1545 */ 1546 void 1547 lbfree(linkinfo_t *linkp) 1548 { 1549 mutex_enter(&strresources); 1550 if (linkp->li_next) 1551 linkp->li_next->li_prev = linkp->li_prev; 1552 if (linkp->li_prev) 1553 linkp->li_prev->li_next = linkp->li_next; 1554 else 1555 linkinfo_list = linkp->li_next; 1556 mutex_exit(&strresources); 1557 1558 kmem_cache_free(linkinfo_cache, linkp); 1559 } 1560 1561 /* 1562 * Check for a potential linking cycle. 1563 * Return 1 if a link will result in a cycle, 1564 * and 0 otherwise. 1565 */ 1566 int 1567 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss) 1568 { 1569 struct mux_node *np; 1570 struct mux_edge *ep; 1571 int i; 1572 major_t lomaj; 1573 major_t upmaj; 1574 /* 1575 * if the lower stream is a pipe/FIFO, return, since link 1576 * cycles can not happen on pipes/FIFOs 1577 */ 1578 if (lostp->sd_vnode->v_type == VFIFO) 1579 return (0); 1580 1581 for (i = 0; i < ss->ss_devcnt; i++) { 1582 np = &ss->ss_mux_nodes[i]; 1583 MUX_CLEAR(np); 1584 } 1585 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1586 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1587 np = &ss->ss_mux_nodes[lomaj]; 1588 for (;;) { 1589 if (!MUX_DIDVISIT(np)) { 1590 if (np->mn_imaj == upmaj) 1591 return (1); 1592 if (np->mn_outp == NULL) { 1593 MUX_VISIT(np); 1594 if (np->mn_originp == NULL) 1595 return (0); 1596 np = np->mn_originp; 1597 continue; 1598 } 1599 MUX_VISIT(np); 1600 np->mn_startp = np->mn_outp; 1601 } else { 1602 if (np->mn_startp == NULL) { 1603 if (np->mn_originp == NULL) 1604 return (0); 1605 else { 1606 np = np->mn_originp; 1607 continue; 1608 } 1609 } 1610 /* 1611 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1612 * ignore the edge and move on. ep->me_nodep gets 1613 * set to NULL in mux_addedge() if it is a FIFO. 1614 * 1615 */ 1616 ep = np->mn_startp; 1617 np->mn_startp = ep->me_nextp; 1618 if (ep->me_nodep == NULL) 1619 continue; 1620 ep->me_nodep->mn_originp = np; 1621 np = ep->me_nodep; 1622 } 1623 } 1624 } 1625 1626 /* 1627 * Find linkinfo entry corresponding to the parameters. 1628 */ 1629 linkinfo_t * 1630 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss) 1631 { 1632 linkinfo_t *linkp; 1633 struct mux_edge *mep; 1634 struct mux_node *mnp; 1635 queue_t *qup; 1636 1637 mutex_enter(&strresources); 1638 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1639 qup = getendq(stp->sd_wrq); 1640 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1641 if ((qup == linkp->li_lblk.l_qtop) && 1642 (!index || (index == linkp->li_lblk.l_index))) { 1643 mutex_exit(&strresources); 1644 return (linkp); 1645 } 1646 } 1647 } else { 1648 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1649 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1650 mep = mnp->mn_outp; 1651 while (mep) { 1652 if ((index == 0) || (index == mep->me_muxid)) 1653 break; 1654 mep = mep->me_nextp; 1655 } 1656 if (!mep) { 1657 mutex_exit(&strresources); 1658 return (NULL); 1659 } 1660 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1661 if ((!linkp->li_lblk.l_qtop) && 1662 (mep->me_muxid == linkp->li_lblk.l_index)) { 1663 mutex_exit(&strresources); 1664 return (linkp); 1665 } 1666 } 1667 } 1668 mutex_exit(&strresources); 1669 return (NULL); 1670 } 1671 1672 /* 1673 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1674 * last queue on the chain and return it. 1675 */ 1676 queue_t * 1677 getendq(queue_t *q) 1678 { 1679 ASSERT(q != NULL); 1680 while (_SAMESTR(q)) 1681 q = q->q_next; 1682 return (q); 1683 } 1684 1685 /* 1686 * Wait for the syncq count to drop to zero. 1687 * sq could be either outer or inner. 1688 */ 1689 1690 static void 1691 wait_syncq(syncq_t *sq) 1692 { 1693 uint16_t count; 1694 1695 mutex_enter(SQLOCK(sq)); 1696 count = sq->sq_count; 1697 SQ_PUTLOCKS_ENTER(sq); 1698 SUM_SQ_PUTCOUNTS(sq, count); 1699 while (count != 0) { 1700 sq->sq_flags |= SQ_WANTWAKEUP; 1701 SQ_PUTLOCKS_EXIT(sq); 1702 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1703 count = sq->sq_count; 1704 SQ_PUTLOCKS_ENTER(sq); 1705 SUM_SQ_PUTCOUNTS(sq, count); 1706 } 1707 SQ_PUTLOCKS_EXIT(sq); 1708 mutex_exit(SQLOCK(sq)); 1709 } 1710 1711 /* 1712 * Wait while there are any messages for the queue in its syncq. 1713 */ 1714 static void 1715 wait_q_syncq(queue_t *q) 1716 { 1717 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1718 syncq_t *sq = q->q_syncq; 1719 1720 mutex_enter(SQLOCK(sq)); 1721 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1722 sq->sq_flags |= SQ_WANTWAKEUP; 1723 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1724 } 1725 mutex_exit(SQLOCK(sq)); 1726 } 1727 } 1728 1729 1730 int 1731 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1732 int lhlink) 1733 { 1734 struct stdata *stp; 1735 struct strioctl strioc; 1736 struct linkinfo *linkp; 1737 struct stdata *stpdown; 1738 struct streamtab *str; 1739 queue_t *passq; 1740 syncq_t *passyncq; 1741 queue_t *rq; 1742 cdevsw_impl_t *dp; 1743 uint32_t qflag; 1744 uint32_t sqtype; 1745 perdm_t *dmp; 1746 int error = 0; 1747 netstack_t *ns; 1748 str_stack_t *ss; 1749 1750 stp = vp->v_stream; 1751 TRACE_1(TR_FAC_STREAMS_FR, 1752 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1753 /* 1754 * Test for invalid upper stream 1755 */ 1756 if (stp->sd_flag & STRHUP) { 1757 return (ENXIO); 1758 } 1759 if (vp->v_type == VFIFO) { 1760 return (EINVAL); 1761 } 1762 if (stp->sd_strtab == NULL) { 1763 return (EINVAL); 1764 } 1765 if (!stp->sd_strtab->st_muxwinit) { 1766 return (EINVAL); 1767 } 1768 if (fpdown == NULL) { 1769 return (EBADF); 1770 } 1771 ns = netstack_find_by_cred(crp); 1772 ASSERT(ns != NULL); 1773 ss = ns->netstack_str; 1774 ASSERT(ss != NULL); 1775 1776 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) { 1777 netstack_rele(ss->ss_netstack); 1778 return (EINVAL); 1779 } 1780 mutex_enter(&muxifier); 1781 if (stp->sd_flag & STPLEX) { 1782 mutex_exit(&muxifier); 1783 netstack_rele(ss->ss_netstack); 1784 return (ENXIO); 1785 } 1786 1787 /* 1788 * Test for invalid lower stream. 1789 * The check for the v_type != VFIFO and having a major 1790 * number not >= devcnt is done to avoid problems with 1791 * adding mux_node entry past the end of mux_nodes[]. 1792 * For FIFO's we don't add an entry so this isn't a 1793 * problem. 1794 */ 1795 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1796 (stpdown == stp) || (stpdown->sd_flag & 1797 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1798 ((stpdown->sd_vnode->v_type != VFIFO) && 1799 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) || 1800 linkcycle(stp, stpdown, ss)) { 1801 mutex_exit(&muxifier); 1802 netstack_rele(ss->ss_netstack); 1803 return (EINVAL); 1804 } 1805 TRACE_1(TR_FAC_STREAMS_FR, 1806 TR_STPDOWN, "stpdown:%p", stpdown); 1807 rq = getendq(stp->sd_wrq); 1808 if (cmd == I_PLINK) 1809 rq = NULL; 1810 1811 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1812 1813 strioc.ic_cmd = cmd; 1814 strioc.ic_timout = INFTIM; 1815 strioc.ic_len = sizeof (struct linkblk); 1816 strioc.ic_dp = (char *)&linkp->li_lblk; 1817 1818 /* 1819 * STRPLUMB protects plumbing changes and should be set before 1820 * link_addpassthru()/link_rempassthru() are called, so it is set here 1821 * and cleared in the end of mlink when passthru queue is removed. 1822 * Setting of STRPLUMB prevents reopens of the stream while passthru 1823 * queue is in-place (it is not a proper module and doesn't have open 1824 * entry point). 1825 * 1826 * STPLEX prevents any threads from entering the stream from above. It 1827 * can't be set before the call to link_addpassthru() because putnext 1828 * from below may cause stream head I/O routines to be called and these 1829 * routines assert that STPLEX is not set. After link_addpassthru() 1830 * nothing may come from below since the pass queue syncq is blocked. 1831 * Note also that STPLEX should be cleared before the call to 1832 * link_rempassthru() since when messages start flowing to the stream 1833 * head (e.g. because of message propagation from the pass queue) stream 1834 * head I/O routines may be called with STPLEX flag set. 1835 * 1836 * When STPLEX is set, nothing may come into the stream from above and 1837 * it is safe to do a setq which will change stream head. So, the 1838 * correct sequence of actions is: 1839 * 1840 * 1) Set STRPLUMB 1841 * 2) Call link_addpassthru() 1842 * 3) Set STPLEX 1843 * 4) Call setq and update the stream state 1844 * 5) Clear STPLEX 1845 * 6) Call link_rempassthru() 1846 * 7) Clear STRPLUMB 1847 * 1848 * The same sequence applies to munlink() code. 1849 */ 1850 mutex_enter(&stpdown->sd_lock); 1851 stpdown->sd_flag |= STRPLUMB; 1852 mutex_exit(&stpdown->sd_lock); 1853 /* 1854 * Add passthru queue below lower mux. This will block 1855 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1856 */ 1857 passq = link_addpassthru(stpdown); 1858 1859 mutex_enter(&stpdown->sd_lock); 1860 stpdown->sd_flag |= STPLEX; 1861 mutex_exit(&stpdown->sd_lock); 1862 1863 rq = _RD(stpdown->sd_wrq); 1864 /* 1865 * There may be messages in the streamhead's syncq due to messages 1866 * that arrived before link_addpassthru() was done. To avoid 1867 * background processing of the syncq happening simultaneous with 1868 * setq processing, we disable the streamhead syncq and wait until 1869 * existing background thread finishes working on it. 1870 */ 1871 wait_sq_svc(rq->q_syncq); 1872 passyncq = passq->q_syncq; 1873 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1874 blocksq(passyncq, SQ_BLOCKED, 0); 1875 1876 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1877 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1878 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1879 1880 /* setq might sleep in allocator - avoid holding locks. */ 1881 /* Note: we are holding muxifier here. */ 1882 1883 str = stp->sd_strtab; 1884 dp = &devimpl[getmajor(vp->v_rdev)]; 1885 ASSERT(dp->d_str == str); 1886 1887 qflag = dp->d_qflag; 1888 sqtype = dp->d_sqtype; 1889 1890 /* create perdm_t if needed */ 1891 if (NEED_DM(dp->d_dmp, qflag)) 1892 dp->d_dmp = hold_dm(str, qflag, sqtype); 1893 1894 dmp = dp->d_dmp; 1895 1896 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1897 B_TRUE); 1898 1899 /* 1900 * XXX Remove any "odd" messages from the queue. 1901 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1902 */ 1903 error = strdoioctl(stp, &strioc, FNATIVE, 1904 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1905 if (error != 0) { 1906 lbfree(linkp); 1907 1908 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1909 blocksq(passyncq, SQ_BLOCKED, 0); 1910 /* 1911 * Restore the stream head queue and then remove 1912 * the passq. Turn off STPLEX before we turn on 1913 * the stream by removing the passq. 1914 */ 1915 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1916 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1917 B_TRUE); 1918 1919 mutex_enter(&stpdown->sd_lock); 1920 stpdown->sd_flag &= ~STPLEX; 1921 mutex_exit(&stpdown->sd_lock); 1922 1923 link_rempassthru(passq); 1924 1925 mutex_enter(&stpdown->sd_lock); 1926 stpdown->sd_flag &= ~STRPLUMB; 1927 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1928 cv_broadcast(&stpdown->sd_monitor); 1929 mutex_exit(&stpdown->sd_lock); 1930 1931 mutex_exit(&muxifier); 1932 netstack_rele(ss->ss_netstack); 1933 return (error); 1934 } 1935 mutex_enter(&fpdown->f_tlock); 1936 fpdown->f_count++; 1937 mutex_exit(&fpdown->f_tlock); 1938 1939 /* 1940 * if we've made it here the linkage is all set up so we should also 1941 * set up the layered driver linkages 1942 */ 1943 1944 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1945 if (cmd == I_LINK) { 1946 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1947 } else { 1948 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1949 } 1950 1951 link_rempassthru(passq); 1952 1953 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss); 1954 1955 /* 1956 * Mark the upper stream as having dependent links 1957 * so that strclose can clean it up. 1958 */ 1959 if (cmd == I_LINK) { 1960 mutex_enter(&stp->sd_lock); 1961 stp->sd_flag |= STRHASLINKS; 1962 mutex_exit(&stp->sd_lock); 1963 } 1964 /* 1965 * Wake up any other processes that may have been 1966 * waiting on the lower stream. These will all 1967 * error out. 1968 */ 1969 mutex_enter(&stpdown->sd_lock); 1970 /* The passthru module is removed so we may release STRPLUMB */ 1971 stpdown->sd_flag &= ~STRPLUMB; 1972 cv_broadcast(&rq->q_wait); 1973 cv_broadcast(&_WR(rq)->q_wait); 1974 cv_broadcast(&stpdown->sd_monitor); 1975 mutex_exit(&stpdown->sd_lock); 1976 mutex_exit(&muxifier); 1977 *rvalp = linkp->li_lblk.l_index; 1978 netstack_rele(ss->ss_netstack); 1979 return (0); 1980 } 1981 1982 int 1983 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1984 { 1985 int ret; 1986 struct file *fpdown; 1987 1988 fpdown = getf(arg); 1989 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1990 if (fpdown != NULL) 1991 releasef(arg); 1992 return (ret); 1993 } 1994 1995 /* 1996 * Unlink a multiplexor link. Stp is the controlling stream for the 1997 * link, and linkp points to the link's entry in the linkinfo list. 1998 * The muxifier lock must be held on entry and is dropped on exit. 1999 * 2000 * NOTE : Currently it is assumed that mux would process all the messages 2001 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 2002 * of the mux to handle all the messages that arrive before UNLINK. 2003 * If the mux has to send down messages on its lower stream before 2004 * ACKing I_UNLINK, then it *should* know to handle messages even 2005 * after the UNLINK is acked (actually it should be able to handle till we 2006 * re-block the read side of the pass queue here). If the mux does not 2007 * open up the lower stream, any messages that arrive during UNLINK 2008 * will be put in the stream head. In the case of lower stream opening 2009 * up, some messages might land in the stream head depending on when 2010 * the message arrived and when the read side of the pass queue was 2011 * re-blocked. 2012 */ 2013 int 2014 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp, 2015 str_stack_t *ss) 2016 { 2017 struct strioctl strioc; 2018 struct stdata *stpdown; 2019 queue_t *rq, *wrq; 2020 queue_t *passq; 2021 syncq_t *passyncq; 2022 int error = 0; 2023 file_t *fpdown; 2024 2025 ASSERT(MUTEX_HELD(&muxifier)); 2026 2027 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2028 2029 /* 2030 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2031 */ 2032 mutex_enter(&stpdown->sd_lock); 2033 stpdown->sd_flag |= STRPLUMB; 2034 mutex_exit(&stpdown->sd_lock); 2035 2036 /* 2037 * Add passthru queue below lower mux. This will block 2038 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2039 */ 2040 passq = link_addpassthru(stpdown); 2041 2042 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2043 strioc.ic_cmd = I_UNLINK; 2044 else 2045 strioc.ic_cmd = I_PUNLINK; 2046 strioc.ic_timout = INFTIM; 2047 strioc.ic_len = sizeof (struct linkblk); 2048 strioc.ic_dp = (char *)&linkp->li_lblk; 2049 2050 error = strdoioctl(stp, &strioc, FNATIVE, 2051 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2052 2053 /* 2054 * If there was an error and this is not called via strclose, 2055 * return to the user. Otherwise, pretend there was no error 2056 * and close the link. 2057 */ 2058 if (error) { 2059 if (flag & LINKCLOSE) { 2060 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2061 "unlink ioctl, closing anyway (%d)\n", error); 2062 } else { 2063 link_rempassthru(passq); 2064 mutex_enter(&stpdown->sd_lock); 2065 stpdown->sd_flag &= ~STRPLUMB; 2066 cv_broadcast(&stpdown->sd_monitor); 2067 mutex_exit(&stpdown->sd_lock); 2068 mutex_exit(&muxifier); 2069 return (error); 2070 } 2071 } 2072 2073 mux_rmvedge(stp, linkp->li_lblk.l_index, ss); 2074 fpdown = linkp->li_fpdown; 2075 lbfree(linkp); 2076 2077 /* 2078 * We go ahead and drop muxifier here--it's a nasty global lock that 2079 * can slow others down. It's okay to since attempts to mlink() this 2080 * stream will be stopped because STPLEX is still set in the stdata 2081 * structure, and munlink() is stopped because mux_rmvedge() and 2082 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2083 * respectively. Note that we defer the closef() of fpdown until 2084 * after we drop muxifier since strclose() can call munlinkall(). 2085 */ 2086 mutex_exit(&muxifier); 2087 2088 wrq = stpdown->sd_wrq; 2089 rq = _RD(wrq); 2090 2091 /* 2092 * Get rid of outstanding service procedure runs, before we make 2093 * it a stream head, since a stream head doesn't have any service 2094 * procedure. 2095 */ 2096 disable_svc(rq); 2097 wait_svc(rq); 2098 2099 /* 2100 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2101 * is queued up to be finished. mux should take care that nothing is 2102 * send down to this queue. We should do it now as we're going to block 2103 * passyncq if it was unblocked. 2104 */ 2105 if (wrq->q_flag & QPERMOD) { 2106 syncq_t *sq = wrq->q_syncq; 2107 2108 mutex_enter(SQLOCK(sq)); 2109 while (wrq->q_sqflags & Q_SQQUEUED) { 2110 sq->sq_flags |= SQ_WANTWAKEUP; 2111 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2112 } 2113 mutex_exit(SQLOCK(sq)); 2114 } 2115 passyncq = passq->q_syncq; 2116 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2117 2118 syncq_t *sq, *outer; 2119 2120 /* 2121 * Messages could be flowing from underneath. We will 2122 * block the read side of the passq. This would be 2123 * sufficient for QPAIR and QPERQ muxes to ensure 2124 * that no data is flowing up into this queue 2125 * and hence no thread active in this instance of 2126 * lower mux. But for QPERMOD and QMTOUTPERIM there 2127 * could be messages on the inner and outer/inner 2128 * syncqs respectively. We will wait for them to drain. 2129 * Because passq is blocked messages end up in the syncq 2130 * And qfill_syncq could possibly end up setting QFULL 2131 * which will access the rq->q_flag. Hence, we have to 2132 * acquire the QLOCK in setq. 2133 * 2134 * XXX Messages can also flow from top into this 2135 * queue though the unlink is over (Ex. some instance 2136 * in putnext() called from top that has still not 2137 * accessed this queue. And also putq(lowerq) ?). 2138 * Solution : How about blocking the l_qtop queue ? 2139 * Do we really care about such pure D_MP muxes ? 2140 */ 2141 2142 blocksq(passyncq, SQ_BLOCKED, 0); 2143 2144 sq = rq->q_syncq; 2145 if ((outer = sq->sq_outer) != NULL) { 2146 2147 /* 2148 * We have to just wait for the outer sq_count 2149 * drop to zero. As this does not prevent new 2150 * messages to enter the outer perimeter, this 2151 * is subject to starvation. 2152 * 2153 * NOTE :Because of blocksq above, messages could 2154 * be in the inner syncq only because of some 2155 * thread holding the outer perimeter exclusively. 2156 * Hence it would be sufficient to wait for the 2157 * exclusive holder of the outer perimeter to drain 2158 * the inner and outer syncqs. But we will not depend 2159 * on this feature and hence check the inner syncqs 2160 * separately. 2161 */ 2162 wait_syncq(outer); 2163 } 2164 2165 2166 /* 2167 * There could be messages destined for 2168 * this queue. Let the exclusive holder 2169 * drain it. 2170 */ 2171 2172 wait_syncq(sq); 2173 ASSERT((rq->q_flag & QPERMOD) || 2174 ((rq->q_syncq->sq_head == NULL) && 2175 (_WR(rq)->q_syncq->sq_head == NULL))); 2176 } 2177 2178 /* 2179 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2180 * case as we don't disable its syncq or remove it off the syncq 2181 * service list. 2182 */ 2183 if (rq->q_flag & QPERMOD) { 2184 syncq_t *sq = rq->q_syncq; 2185 2186 mutex_enter(SQLOCK(sq)); 2187 while (rq->q_sqflags & Q_SQQUEUED) { 2188 sq->sq_flags |= SQ_WANTWAKEUP; 2189 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2190 } 2191 mutex_exit(SQLOCK(sq)); 2192 } 2193 2194 /* 2195 * flush_syncq changes states only when there are some messages to 2196 * free, i.e. when it returns non-zero value to return. 2197 */ 2198 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2199 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2200 2201 /* 2202 * Nobody else should know about this queue now. 2203 * If the mux did not process the messages before 2204 * acking the I_UNLINK, free them now. 2205 */ 2206 2207 flushq(rq, FLUSHALL); 2208 flushq(_WR(rq), FLUSHALL); 2209 2210 /* 2211 * Convert the mux lower queue into a stream head queue. 2212 * Turn off STPLEX before we turn on the stream by removing the passq. 2213 */ 2214 rq->q_ptr = wrq->q_ptr = stpdown; 2215 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2216 2217 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2218 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2219 2220 enable_svc(rq); 2221 2222 /* 2223 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2224 * needs to be set to prevent reopen() of the stream - such reopen may 2225 * try to call non-existent pass queue open routine and panic. 2226 */ 2227 mutex_enter(&stpdown->sd_lock); 2228 stpdown->sd_flag &= ~STPLEX; 2229 mutex_exit(&stpdown->sd_lock); 2230 2231 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2232 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2233 2234 /* clean up the layered driver linkages */ 2235 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2236 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2237 } else { 2238 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2239 } 2240 2241 link_rempassthru(passq); 2242 2243 /* 2244 * Now all plumbing changes are finished and STRPLUMB is no 2245 * longer needed. 2246 */ 2247 mutex_enter(&stpdown->sd_lock); 2248 stpdown->sd_flag &= ~STRPLUMB; 2249 cv_broadcast(&stpdown->sd_monitor); 2250 mutex_exit(&stpdown->sd_lock); 2251 2252 (void) closef(fpdown); 2253 return (0); 2254 } 2255 2256 /* 2257 * Unlink all multiplexor links for which stp is the controlling stream. 2258 * Return 0, or a non-zero errno on failure. 2259 */ 2260 int 2261 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss) 2262 { 2263 linkinfo_t *linkp; 2264 int error = 0; 2265 2266 mutex_enter(&muxifier); 2267 while (linkp = findlinks(stp, 0, flag, ss)) { 2268 /* 2269 * munlink() releases the muxifier lock. 2270 */ 2271 if (error = munlink(stp, linkp, flag, crp, rvalp, ss)) 2272 return (error); 2273 mutex_enter(&muxifier); 2274 } 2275 mutex_exit(&muxifier); 2276 return (0); 2277 } 2278 2279 /* 2280 * A multiplexor link has been made. Add an 2281 * edge to the directed graph. 2282 */ 2283 void 2284 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss) 2285 { 2286 struct mux_node *np; 2287 struct mux_edge *ep; 2288 major_t upmaj; 2289 major_t lomaj; 2290 2291 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2292 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2293 np = &ss->ss_mux_nodes[upmaj]; 2294 if (np->mn_outp) { 2295 ep = np->mn_outp; 2296 while (ep->me_nextp) 2297 ep = ep->me_nextp; 2298 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2299 ep = ep->me_nextp; 2300 } else { 2301 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2302 ep = np->mn_outp; 2303 } 2304 ep->me_nextp = NULL; 2305 ep->me_muxid = muxid; 2306 /* 2307 * Save the dev_t for the purposes of str_stack_shutdown. 2308 * str_stack_shutdown assumes that the device allows reopen, since 2309 * this dev_t is the one after any cloning by xx_open(). 2310 * Would prefer finding the dev_t from before any cloning, 2311 * but specfs doesn't retain that. 2312 */ 2313 ep->me_dev = upstp->sd_vnode->v_rdev; 2314 if (lostp->sd_vnode->v_type == VFIFO) 2315 ep->me_nodep = NULL; 2316 else 2317 ep->me_nodep = &ss->ss_mux_nodes[lomaj]; 2318 } 2319 2320 /* 2321 * A multiplexor link has been removed. Remove the 2322 * edge in the directed graph. 2323 */ 2324 void 2325 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss) 2326 { 2327 struct mux_node *np; 2328 struct mux_edge *ep; 2329 struct mux_edge *pep = NULL; 2330 major_t upmaj; 2331 2332 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2333 np = &ss->ss_mux_nodes[upmaj]; 2334 ASSERT(np->mn_outp != NULL); 2335 ep = np->mn_outp; 2336 while (ep) { 2337 if (ep->me_muxid == muxid) { 2338 if (pep) 2339 pep->me_nextp = ep->me_nextp; 2340 else 2341 np->mn_outp = ep->me_nextp; 2342 kmem_free(ep, sizeof (struct mux_edge)); 2343 return; 2344 } 2345 pep = ep; 2346 ep = ep->me_nextp; 2347 } 2348 ASSERT(0); /* should not reach here */ 2349 } 2350 2351 /* 2352 * Translate the device flags (from conf.h) to the corresponding 2353 * qflag and sq_flag (type) values. 2354 */ 2355 int 2356 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2357 uint32_t *sqtypep) 2358 { 2359 uint32_t qflag = 0; 2360 uint32_t sqtype = 0; 2361 2362 if (devflag & _D_OLD) 2363 goto bad; 2364 2365 /* Inner perimeter presence and scope */ 2366 switch (devflag & D_MTINNER_MASK) { 2367 case D_MP: 2368 qflag |= QMTSAFE; 2369 sqtype |= SQ_CI; 2370 break; 2371 case D_MTPERQ|D_MP: 2372 qflag |= QPERQ; 2373 break; 2374 case D_MTQPAIR|D_MP: 2375 qflag |= QPAIR; 2376 break; 2377 case D_MTPERMOD|D_MP: 2378 qflag |= QPERMOD; 2379 break; 2380 default: 2381 goto bad; 2382 } 2383 2384 /* Outer perimeter */ 2385 if (devflag & D_MTOUTPERIM) { 2386 switch (devflag & D_MTINNER_MASK) { 2387 case D_MP: 2388 case D_MTPERQ|D_MP: 2389 case D_MTQPAIR|D_MP: 2390 break; 2391 default: 2392 goto bad; 2393 } 2394 qflag |= QMTOUTPERIM; 2395 } 2396 2397 /* Inner perimeter modifiers */ 2398 if (devflag & D_MTINNER_MOD) { 2399 switch (devflag & D_MTINNER_MASK) { 2400 case D_MP: 2401 goto bad; 2402 default: 2403 break; 2404 } 2405 if (devflag & D_MTPUTSHARED) 2406 sqtype |= SQ_CIPUT; 2407 if (devflag & _D_MTOCSHARED) { 2408 /* 2409 * The code in putnext assumes that it has the 2410 * highest concurrency by not checking sq_count. 2411 * Thus _D_MTOCSHARED can only be supported when 2412 * D_MTPUTSHARED is set. 2413 */ 2414 if (!(devflag & D_MTPUTSHARED)) 2415 goto bad; 2416 sqtype |= SQ_CIOC; 2417 } 2418 if (devflag & _D_MTCBSHARED) { 2419 /* 2420 * The code in putnext assumes that it has the 2421 * highest concurrency by not checking sq_count. 2422 * Thus _D_MTCBSHARED can only be supported when 2423 * D_MTPUTSHARED is set. 2424 */ 2425 if (!(devflag & D_MTPUTSHARED)) 2426 goto bad; 2427 sqtype |= SQ_CICB; 2428 } 2429 if (devflag & _D_MTSVCSHARED) { 2430 /* 2431 * The code in putnext assumes that it has the 2432 * highest concurrency by not checking sq_count. 2433 * Thus _D_MTSVCSHARED can only be supported when 2434 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2435 * supported only for QPERMOD. 2436 */ 2437 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2438 goto bad; 2439 sqtype |= SQ_CISVC; 2440 } 2441 } 2442 2443 /* Default outer perimeter concurrency */ 2444 sqtype |= SQ_CO; 2445 2446 /* Outer perimeter modifiers */ 2447 if (devflag & D_MTOCEXCL) { 2448 if (!(devflag & D_MTOUTPERIM)) { 2449 /* No outer perimeter */ 2450 goto bad; 2451 } 2452 sqtype &= ~SQ_COOC; 2453 } 2454 2455 /* Synchronous Streams extended qinit structure */ 2456 if (devflag & D_SYNCSTR) 2457 qflag |= QSYNCSTR; 2458 2459 /* 2460 * Private flag used by a transport module to indicate 2461 * to sockfs that it supports direct-access mode without 2462 * having to go through STREAMS. 2463 */ 2464 if (devflag & _D_DIRECT) { 2465 /* Reject unless the module is fully-MT (no perimeter) */ 2466 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2467 goto bad; 2468 qflag |= _QDIRECT; 2469 } 2470 2471 *qflagp = qflag; 2472 *sqtypep = sqtype; 2473 return (0); 2474 2475 bad: 2476 cmn_err(CE_WARN, 2477 "stropen: bad MT flags (0x%x) in driver '%s'", 2478 (int)(qflag & D_MTSAFETY_MASK), 2479 stp->st_rdinit->qi_minfo->mi_idname); 2480 2481 return (EINVAL); 2482 } 2483 2484 /* 2485 * Set the interface values for a pair of queues (qinit structure, 2486 * packet sizes, water marks). 2487 * setq assumes that the caller does not have a claim (entersq or claimq) 2488 * on the queue. 2489 */ 2490 void 2491 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2492 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2493 { 2494 queue_t *wq; 2495 syncq_t *sq, *outer; 2496 2497 ASSERT(rq->q_flag & QREADR); 2498 ASSERT((qflag & QMT_TYPEMASK) != 0); 2499 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2500 2501 wq = _WR(rq); 2502 rq->q_qinfo = rinit; 2503 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2504 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2505 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2506 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2507 wq->q_qinfo = winit; 2508 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2509 wq->q_lowat = winit->qi_minfo->mi_lowat; 2510 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2511 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2512 2513 /* Remove old syncqs */ 2514 sq = rq->q_syncq; 2515 outer = sq->sq_outer; 2516 if (outer != NULL) { 2517 ASSERT(wq->q_syncq->sq_outer == outer); 2518 outer_remove(outer, rq->q_syncq); 2519 if (wq->q_syncq != rq->q_syncq) 2520 outer_remove(outer, wq->q_syncq); 2521 } 2522 ASSERT(sq->sq_outer == NULL); 2523 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2524 2525 if (sq != SQ(rq)) { 2526 if (!(rq->q_flag & QPERMOD)) 2527 free_syncq(sq); 2528 if (wq->q_syncq == rq->q_syncq) 2529 wq->q_syncq = NULL; 2530 rq->q_syncq = NULL; 2531 } 2532 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2533 wq->q_syncq != SQ(rq)) { 2534 free_syncq(wq->q_syncq); 2535 wq->q_syncq = NULL; 2536 } 2537 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2538 rq->q_syncq->sq_tail == NULL)); 2539 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2540 wq->q_syncq->sq_tail == NULL)); 2541 2542 if (!(rq->q_flag & QPERMOD) && 2543 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2544 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2545 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2546 rq->q_syncq->sq_nciputctrl, 0); 2547 ASSERT(ciputctrl_cache != NULL); 2548 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2549 rq->q_syncq->sq_ciputctrl = NULL; 2550 rq->q_syncq->sq_nciputctrl = 0; 2551 } 2552 2553 if (!(wq->q_flag & QPERMOD) && 2554 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2555 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2556 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2557 wq->q_syncq->sq_nciputctrl, 0); 2558 ASSERT(ciputctrl_cache != NULL); 2559 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2560 wq->q_syncq->sq_ciputctrl = NULL; 2561 wq->q_syncq->sq_nciputctrl = 0; 2562 } 2563 2564 sq = SQ(rq); 2565 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2566 ASSERT(sq->sq_outer == NULL); 2567 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2568 2569 /* 2570 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2571 * bits in sq_flag based on the sqtype. 2572 */ 2573 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2574 2575 rq->q_syncq = wq->q_syncq = sq; 2576 sq->sq_type = sqtype; 2577 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2578 2579 /* 2580 * We are making sq_svcflags zero, 2581 * resetting SQ_DISABLED in case it was set by 2582 * wait_svc() in the munlink path. 2583 * 2584 */ 2585 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2586 sq->sq_svcflags = 0; 2587 2588 /* 2589 * We need to acquire the lock here for the mlink and munlink case, 2590 * where canputnext, backenable, etc can access the q_flag. 2591 */ 2592 if (lock_needed) { 2593 mutex_enter(QLOCK(rq)); 2594 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2595 mutex_exit(QLOCK(rq)); 2596 mutex_enter(QLOCK(wq)); 2597 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2598 mutex_exit(QLOCK(wq)); 2599 } else { 2600 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2601 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2602 } 2603 2604 if (qflag & QPERQ) { 2605 /* Allocate a separate syncq for the write side */ 2606 sq = new_syncq(); 2607 sq->sq_type = rq->q_syncq->sq_type; 2608 sq->sq_flags = rq->q_syncq->sq_flags; 2609 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2610 sq->sq_oprev == NULL); 2611 wq->q_syncq = sq; 2612 } 2613 if (qflag & QPERMOD) { 2614 sq = dmp->dm_sq; 2615 2616 /* 2617 * Assert that we do have an inner perimeter syncq and that it 2618 * does not have an outer perimeter associated with it. 2619 */ 2620 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2621 sq->sq_oprev == NULL); 2622 rq->q_syncq = wq->q_syncq = sq; 2623 } 2624 if (qflag & QMTOUTPERIM) { 2625 outer = dmp->dm_sq; 2626 2627 ASSERT(outer->sq_outer == NULL); 2628 outer_insert(outer, rq->q_syncq); 2629 if (wq->q_syncq != rq->q_syncq) 2630 outer_insert(outer, wq->q_syncq); 2631 } 2632 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2633 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2634 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2635 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2636 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2637 2638 /* 2639 * Initialize struio() types. 2640 */ 2641 rq->q_struiot = 2642 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2643 wq->q_struiot = 2644 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2645 } 2646 2647 perdm_t * 2648 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2649 { 2650 syncq_t *sq; 2651 perdm_t **pp; 2652 perdm_t *p; 2653 perdm_t *dmp; 2654 2655 ASSERT(str != NULL); 2656 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2657 2658 rw_enter(&perdm_rwlock, RW_READER); 2659 for (p = perdm_list; p != NULL; p = p->dm_next) { 2660 if (p->dm_str == str) { /* found one */ 2661 atomic_inc_32(&(p->dm_ref)); 2662 rw_exit(&perdm_rwlock); 2663 return (p); 2664 } 2665 } 2666 rw_exit(&perdm_rwlock); 2667 2668 sq = new_syncq(); 2669 if (qflag & QPERMOD) { 2670 sq->sq_type = sqtype | SQ_PERMOD; 2671 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2672 } else { 2673 ASSERT(qflag & QMTOUTPERIM); 2674 sq->sq_onext = sq->sq_oprev = sq; 2675 } 2676 2677 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2678 dmp->dm_sq = sq; 2679 dmp->dm_str = str; 2680 dmp->dm_ref = 1; 2681 dmp->dm_next = NULL; 2682 2683 rw_enter(&perdm_rwlock, RW_WRITER); 2684 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2685 if (p->dm_str == str) { /* already present */ 2686 p->dm_ref++; 2687 rw_exit(&perdm_rwlock); 2688 free_syncq(sq); 2689 kmem_free(dmp, sizeof (perdm_t)); 2690 return (p); 2691 } 2692 } 2693 2694 *pp = dmp; 2695 rw_exit(&perdm_rwlock); 2696 return (dmp); 2697 } 2698 2699 void 2700 rele_dm(perdm_t *dmp) 2701 { 2702 perdm_t **pp; 2703 perdm_t *p; 2704 2705 rw_enter(&perdm_rwlock, RW_WRITER); 2706 ASSERT(dmp->dm_ref > 0); 2707 2708 if (--dmp->dm_ref > 0) { 2709 rw_exit(&perdm_rwlock); 2710 return; 2711 } 2712 2713 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2714 if (p == dmp) 2715 break; 2716 ASSERT(p == dmp); 2717 *pp = p->dm_next; 2718 rw_exit(&perdm_rwlock); 2719 2720 /* 2721 * Wait for any background processing that relies on the 2722 * syncq to complete before it is freed. 2723 */ 2724 wait_sq_svc(p->dm_sq); 2725 free_syncq(p->dm_sq); 2726 kmem_free(p, sizeof (perdm_t)); 2727 } 2728 2729 /* 2730 * Make a protocol message given control and data buffers. 2731 * n.b., this can block; be careful of what locks you hold when calling it. 2732 * 2733 * If sd_maxblk is less than *iosize this routine can fail part way through 2734 * (due to an allocation failure). In this case on return *iosize will contain 2735 * the amount that was consumed. Otherwise *iosize will not be modified 2736 * i.e. it will contain the amount that was consumed. 2737 */ 2738 int 2739 strmakemsg( 2740 struct strbuf *mctl, 2741 ssize_t *iosize, 2742 struct uio *uiop, 2743 stdata_t *stp, 2744 int32_t flag, 2745 mblk_t **mpp) 2746 { 2747 mblk_t *mpctl = NULL; 2748 mblk_t *mpdata = NULL; 2749 int error; 2750 2751 ASSERT(uiop != NULL); 2752 2753 *mpp = NULL; 2754 /* Create control part, if any */ 2755 if ((mctl != NULL) && (mctl->len >= 0)) { 2756 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2757 if (error) 2758 return (error); 2759 } 2760 /* Create data part, if any */ 2761 if (*iosize >= 0) { 2762 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2763 if (error) { 2764 freemsg(mpctl); 2765 return (error); 2766 } 2767 } 2768 if (mpctl != NULL) { 2769 if (mpdata != NULL) 2770 linkb(mpctl, mpdata); 2771 *mpp = mpctl; 2772 } else { 2773 *mpp = mpdata; 2774 } 2775 return (0); 2776 } 2777 2778 /* 2779 * Make the control part of a protocol message given a control buffer. 2780 * n.b., this can block; be careful of what locks you hold when calling it. 2781 */ 2782 int 2783 strmakectl( 2784 struct strbuf *mctl, 2785 int32_t flag, 2786 int32_t fflag, 2787 mblk_t **mpp) 2788 { 2789 mblk_t *bp = NULL; 2790 unsigned char msgtype; 2791 int error = 0; 2792 cred_t *cr = CRED(); 2793 2794 /* We do not support interrupt threads using the stream head to send */ 2795 ASSERT(cr != NULL); 2796 2797 *mpp = NULL; 2798 /* 2799 * Create control part of message, if any. 2800 */ 2801 if ((mctl != NULL) && (mctl->len >= 0)) { 2802 caddr_t base; 2803 int ctlcount; 2804 int allocsz; 2805 2806 if (flag & RS_HIPRI) 2807 msgtype = M_PCPROTO; 2808 else 2809 msgtype = M_PROTO; 2810 2811 ctlcount = mctl->len; 2812 base = mctl->buf; 2813 2814 /* 2815 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2816 * blocks by increasing the size to something more usable. 2817 */ 2818 allocsz = MAX(ctlcount, 64); 2819 2820 /* 2821 * Range checking has already been done; simply try 2822 * to allocate a message block for the ctl part. 2823 */ 2824 while ((bp = allocb_cred(allocsz, cr, 2825 curproc->p_pid)) == NULL) { 2826 if (fflag & (FNDELAY|FNONBLOCK)) 2827 return (EAGAIN); 2828 if (error = strwaitbuf(allocsz, BPRI_MED)) 2829 return (error); 2830 } 2831 2832 bp->b_datap->db_type = msgtype; 2833 if (copyin(base, bp->b_wptr, ctlcount)) { 2834 freeb(bp); 2835 return (EFAULT); 2836 } 2837 bp->b_wptr += ctlcount; 2838 } 2839 *mpp = bp; 2840 return (0); 2841 } 2842 2843 /* 2844 * Make a protocol message given data buffers. 2845 * n.b., this can block; be careful of what locks you hold when calling it. 2846 * 2847 * If sd_maxblk is less than *iosize this routine can fail part way through 2848 * (due to an allocation failure). In this case on return *iosize will contain 2849 * the amount that was consumed. Otherwise *iosize will not be modified 2850 * i.e. it will contain the amount that was consumed. 2851 */ 2852 int 2853 strmakedata( 2854 ssize_t *iosize, 2855 struct uio *uiop, 2856 stdata_t *stp, 2857 int32_t flag, 2858 mblk_t **mpp) 2859 { 2860 mblk_t *mp = NULL; 2861 mblk_t *bp; 2862 int wroff = (int)stp->sd_wroff; 2863 int tail_len = (int)stp->sd_tail; 2864 int extra = wroff + tail_len; 2865 int error = 0; 2866 ssize_t maxblk; 2867 ssize_t count = *iosize; 2868 cred_t *cr; 2869 2870 *mpp = NULL; 2871 if (count < 0) 2872 return (0); 2873 2874 /* We do not support interrupt threads using the stream head to send */ 2875 cr = CRED(); 2876 ASSERT(cr != NULL); 2877 2878 maxblk = stp->sd_maxblk; 2879 if (maxblk == INFPSZ) 2880 maxblk = count; 2881 2882 /* 2883 * Create data part of message, if any. 2884 */ 2885 do { 2886 ssize_t size; 2887 dblk_t *dp; 2888 2889 ASSERT(uiop); 2890 2891 size = MIN(count, maxblk); 2892 2893 while ((bp = allocb_cred(size + extra, cr, 2894 curproc->p_pid)) == NULL) { 2895 error = EAGAIN; 2896 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2897 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2898 if (count == *iosize) { 2899 freemsg(mp); 2900 return (error); 2901 } else { 2902 *iosize -= count; 2903 *mpp = mp; 2904 return (0); 2905 } 2906 } 2907 } 2908 dp = bp->b_datap; 2909 dp->db_cpid = curproc->p_pid; 2910 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2911 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2912 2913 if (flag & STRUIO_POSTPONE) { 2914 /* 2915 * Setup the stream uio portion of the 2916 * dblk for subsequent use by struioget(). 2917 */ 2918 dp->db_struioflag = STRUIO_SPEC; 2919 dp->db_cksumstart = 0; 2920 dp->db_cksumstuff = 0; 2921 dp->db_cksumend = size; 2922 *(long long *)dp->db_struioun.data = 0ll; 2923 bp->b_wptr += size; 2924 } else { 2925 if (stp->sd_copyflag & STRCOPYCACHED) 2926 uiop->uio_extflg |= UIO_COPY_CACHED; 2927 2928 if (size != 0) { 2929 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2930 uiop); 2931 if (error != 0) { 2932 freeb(bp); 2933 freemsg(mp); 2934 return (error); 2935 } 2936 } 2937 bp->b_wptr += size; 2938 2939 if (stp->sd_wputdatafunc != NULL) { 2940 mblk_t *newbp; 2941 2942 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2943 bp, NULL, NULL, NULL, NULL); 2944 if (newbp == NULL) { 2945 freeb(bp); 2946 freemsg(mp); 2947 return (ECOMM); 2948 } 2949 bp = newbp; 2950 } 2951 } 2952 2953 count -= size; 2954 2955 if (mp == NULL) 2956 mp = bp; 2957 else 2958 linkb(mp, bp); 2959 } while (count > 0); 2960 2961 *mpp = mp; 2962 return (0); 2963 } 2964 2965 /* 2966 * Wait for a buffer to become available. Return non-zero errno 2967 * if not able to wait, 0 if buffer is probably there. 2968 */ 2969 int 2970 strwaitbuf(size_t size, int pri) 2971 { 2972 bufcall_id_t id; 2973 2974 mutex_enter(&bcall_monitor); 2975 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2976 &ttoproc(curthread)->p_flag_cv)) == 0) { 2977 mutex_exit(&bcall_monitor); 2978 return (ENOSR); 2979 } 2980 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2981 unbufcall(id); 2982 mutex_exit(&bcall_monitor); 2983 return (EINTR); 2984 } 2985 unbufcall(id); 2986 mutex_exit(&bcall_monitor); 2987 return (0); 2988 } 2989 2990 /* 2991 * This function waits for a read or write event to happen on a stream. 2992 * fmode can specify FNDELAY and/or FNONBLOCK. 2993 * The timeout is in ms with -1 meaning infinite. 2994 * The flag values work as follows: 2995 * READWAIT Check for read side errors, send M_READ 2996 * GETWAIT Check for read side errors, no M_READ 2997 * WRITEWAIT Check for write side errors. 2998 * NOINTR Do not return error if nonblocking or timeout. 2999 * STR_NOERROR Ignore all errors except STPLEX. 3000 * STR_NOSIG Ignore/hold signals during the duration of the call. 3001 * STR_PEEK Pass through the strgeterr(). 3002 */ 3003 int 3004 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 3005 int *done) 3006 { 3007 int slpflg, errs; 3008 int error; 3009 kcondvar_t *sleepon; 3010 mblk_t *mp; 3011 ssize_t *rd_count; 3012 clock_t rval; 3013 3014 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3015 if ((flag & READWAIT) || (flag & GETWAIT)) { 3016 slpflg = RSLEEP; 3017 sleepon = &_RD(stp->sd_wrq)->q_wait; 3018 errs = STRDERR|STPLEX; 3019 } else { 3020 slpflg = WSLEEP; 3021 sleepon = &stp->sd_wrq->q_wait; 3022 errs = STWRERR|STRHUP|STPLEX; 3023 } 3024 if (flag & STR_NOERROR) 3025 errs = STPLEX; 3026 3027 if (stp->sd_wakeq & slpflg) { 3028 /* 3029 * A strwakeq() is pending, no need to sleep. 3030 */ 3031 stp->sd_wakeq &= ~slpflg; 3032 *done = 0; 3033 return (0); 3034 } 3035 3036 if (stp->sd_flag & errs) { 3037 /* 3038 * Check for errors before going to sleep since the 3039 * caller might not have checked this while holding 3040 * sd_lock. 3041 */ 3042 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3043 if (error != 0) { 3044 *done = 1; 3045 return (error); 3046 } 3047 } 3048 3049 /* 3050 * If any module downstream has requested read notification 3051 * by setting SNDMREAD flag using M_SETOPTS, send a message 3052 * down stream. 3053 */ 3054 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3055 mutex_exit(&stp->sd_lock); 3056 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3057 (flag & STR_NOSIG), &error))) { 3058 mutex_enter(&stp->sd_lock); 3059 *done = 1; 3060 return (error); 3061 } 3062 mp->b_datap->db_type = M_READ; 3063 rd_count = (ssize_t *)mp->b_wptr; 3064 *rd_count = count; 3065 mp->b_wptr += sizeof (ssize_t); 3066 /* 3067 * Send the number of bytes requested by the 3068 * read as the argument to M_READ. 3069 */ 3070 stream_willservice(stp); 3071 putnext(stp->sd_wrq, mp); 3072 stream_runservice(stp); 3073 mutex_enter(&stp->sd_lock); 3074 3075 /* 3076 * If any data arrived due to inline processing 3077 * of putnext(), don't sleep. 3078 */ 3079 if (_RD(stp->sd_wrq)->q_first != NULL) { 3080 *done = 0; 3081 return (0); 3082 } 3083 } 3084 3085 if (fmode & (FNDELAY|FNONBLOCK)) { 3086 if (!(flag & NOINTR)) 3087 error = EAGAIN; 3088 else 3089 error = 0; 3090 *done = 1; 3091 return (error); 3092 } 3093 3094 stp->sd_flag |= slpflg; 3095 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3096 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3097 stp, flag, count, fmode, done); 3098 3099 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3100 if (rval > 0) { 3101 /* EMPTY */ 3102 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3103 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3104 stp, flag, count, fmode, done); 3105 } else if (rval == 0) { 3106 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3107 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3108 stp, flag, count, fmode, done); 3109 stp->sd_flag &= ~slpflg; 3110 cv_broadcast(sleepon); 3111 if (!(flag & NOINTR)) 3112 error = EINTR; 3113 else 3114 error = 0; 3115 *done = 1; 3116 return (error); 3117 } else { 3118 /* timeout */ 3119 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3120 "strwaitq timeout:%p, %X, %lX, %X, %p", 3121 stp, flag, count, fmode, done); 3122 *done = 1; 3123 if (!(flag & NOINTR)) 3124 return (ETIME); 3125 else 3126 return (0); 3127 } 3128 /* 3129 * If the caller implements delayed errors (i.e. queued after data) 3130 * we can not check for errors here since data as well as an 3131 * error might have arrived at the stream head. We return to 3132 * have the caller check the read queue before checking for errors. 3133 */ 3134 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3135 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3136 if (error != 0) { 3137 *done = 1; 3138 return (error); 3139 } 3140 } 3141 *done = 0; 3142 return (0); 3143 } 3144 3145 /* 3146 * Perform job control discipline access checks. 3147 * Return 0 for success and the errno for failure. 3148 */ 3149 3150 #define cantsend(p, t, sig) \ 3151 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3152 3153 int 3154 straccess(struct stdata *stp, enum jcaccess mode) 3155 { 3156 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3157 kthread_t *t = curthread; 3158 proc_t *p = ttoproc(t); 3159 sess_t *sp; 3160 3161 ASSERT(mutex_owned(&stp->sd_lock)); 3162 3163 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3164 return (0); 3165 3166 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3167 3168 for (;;) { 3169 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3170 sp = p->p_sessp; 3171 mutex_enter(&sp->s_lock); /* protects sp->* */ 3172 3173 /* 3174 * If this is not the calling process's controlling terminal 3175 * or if the calling process is already in the foreground 3176 * then allow access. 3177 */ 3178 if (sp->s_dev != stp->sd_vnode->v_rdev || 3179 p->p_pgidp == stp->sd_pgidp) { 3180 mutex_exit(&sp->s_lock); 3181 mutex_exit(&p->p_splock); 3182 mutex_exit(&p->p_lock); 3183 return (0); 3184 } 3185 3186 /* 3187 * Check to see if controlling terminal has been deallocated. 3188 */ 3189 if (sp->s_vp == NULL) { 3190 if (!cantsend(p, t, SIGHUP)) 3191 sigtoproc(p, t, SIGHUP); 3192 mutex_exit(&sp->s_lock); 3193 mutex_exit(&p->p_splock); 3194 mutex_exit(&p->p_lock); 3195 return (EIO); 3196 } 3197 3198 mutex_exit(&sp->s_lock); 3199 mutex_exit(&p->p_splock); 3200 3201 if (mode == JCGETP) { 3202 mutex_exit(&p->p_lock); 3203 return (0); 3204 } 3205 3206 if (mode == JCREAD) { 3207 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3208 mutex_exit(&p->p_lock); 3209 return (EIO); 3210 } 3211 mutex_exit(&p->p_lock); 3212 mutex_exit(&stp->sd_lock); 3213 pgsignal(p->p_pgidp, SIGTTIN); 3214 mutex_enter(&stp->sd_lock); 3215 mutex_enter(&p->p_lock); 3216 } else { /* mode == JCWRITE or JCSETP */ 3217 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3218 cantsend(p, t, SIGTTOU)) { 3219 mutex_exit(&p->p_lock); 3220 return (0); 3221 } 3222 if (p->p_detached) { 3223 mutex_exit(&p->p_lock); 3224 return (EIO); 3225 } 3226 mutex_exit(&p->p_lock); 3227 mutex_exit(&stp->sd_lock); 3228 pgsignal(p->p_pgidp, SIGTTOU); 3229 mutex_enter(&stp->sd_lock); 3230 mutex_enter(&p->p_lock); 3231 } 3232 3233 /* 3234 * We call cv_wait_sig_swap() to cause the appropriate 3235 * action for the jobcontrol signal to take place. 3236 * If the signal is being caught, we will take the 3237 * EINTR error return. Otherwise, the default action 3238 * of causing the process to stop will take place. 3239 * In this case, we rely on the periodic cv_broadcast() on 3240 * &lbolt_cv to wake us up to loop around and test again. 3241 * We can't get here if the signal is ignored or 3242 * if the current thread is blocking the signal. 3243 */ 3244 mutex_exit(&stp->sd_lock); 3245 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3246 mutex_exit(&p->p_lock); 3247 mutex_enter(&stp->sd_lock); 3248 return (EINTR); 3249 } 3250 mutex_exit(&p->p_lock); 3251 mutex_enter(&stp->sd_lock); 3252 mutex_enter(&p->p_lock); 3253 } 3254 } 3255 3256 /* 3257 * Return size of message of block type (bp->b_datap->db_type) 3258 */ 3259 size_t 3260 xmsgsize(mblk_t *bp) 3261 { 3262 unsigned char type; 3263 size_t count = 0; 3264 3265 type = bp->b_datap->db_type; 3266 3267 for (; bp; bp = bp->b_cont) { 3268 if (type != bp->b_datap->db_type) 3269 break; 3270 ASSERT(bp->b_wptr >= bp->b_rptr); 3271 count += bp->b_wptr - bp->b_rptr; 3272 } 3273 return (count); 3274 } 3275 3276 /* 3277 * Allocate a stream head. 3278 */ 3279 struct stdata * 3280 shalloc(queue_t *qp) 3281 { 3282 stdata_t *stp; 3283 3284 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3285 3286 stp->sd_wrq = _WR(qp); 3287 stp->sd_strtab = NULL; 3288 stp->sd_iocid = 0; 3289 stp->sd_mate = NULL; 3290 stp->sd_freezer = NULL; 3291 stp->sd_refcnt = 0; 3292 stp->sd_wakeq = 0; 3293 stp->sd_anchor = 0; 3294 stp->sd_struiowrq = NULL; 3295 stp->sd_struiordq = NULL; 3296 stp->sd_struiodnak = 0; 3297 stp->sd_struionak = NULL; 3298 stp->sd_t_audit_data = NULL; 3299 stp->sd_rput_opt = 0; 3300 stp->sd_wput_opt = 0; 3301 stp->sd_read_opt = 0; 3302 stp->sd_rprotofunc = strrput_proto; 3303 stp->sd_rmiscfunc = strrput_misc; 3304 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3305 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3306 stp->sd_ciputctrl = NULL; 3307 stp->sd_nciputctrl = 0; 3308 stp->sd_qhead = NULL; 3309 stp->sd_qtail = NULL; 3310 stp->sd_servid = NULL; 3311 stp->sd_nqueues = 0; 3312 stp->sd_svcflags = 0; 3313 stp->sd_copyflag = 0; 3314 sh_insert_pid(stp, curproc); 3315 3316 return (stp); 3317 } 3318 3319 /* 3320 * Free a stream head. 3321 */ 3322 void 3323 shfree(stdata_t *stp) 3324 { 3325 pid_node_t *pn; 3326 3327 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3328 3329 stp->sd_wrq = NULL; 3330 3331 mutex_enter(&stp->sd_qlock); 3332 while (stp->sd_svcflags & STRS_SCHEDULED) { 3333 STRSTAT(strwaits); 3334 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3335 } 3336 mutex_exit(&stp->sd_qlock); 3337 3338 if (stp->sd_ciputctrl != NULL) { 3339 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3340 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3341 stp->sd_nciputctrl, 0); 3342 ASSERT(ciputctrl_cache != NULL); 3343 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3344 stp->sd_ciputctrl = NULL; 3345 stp->sd_nciputctrl = 0; 3346 } 3347 ASSERT(stp->sd_qhead == NULL); 3348 ASSERT(stp->sd_qtail == NULL); 3349 ASSERT(stp->sd_nqueues == 0); 3350 3351 mutex_enter(&stp->sd_pid_list_lock); 3352 while ((pn = list_head(&stp->sd_pid_list)) != NULL) { 3353 list_remove(&stp->sd_pid_list, pn); 3354 kmem_free(pn, sizeof (*pn)); 3355 } 3356 mutex_exit(&stp->sd_pid_list_lock); 3357 3358 kmem_cache_free(stream_head_cache, stp); 3359 } 3360 3361 void 3362 sh_insert_pid(struct stdata *stp, proc_t *p) 3363 { 3364 pid_node_t *pn; 3365 3366 mutex_enter(&stp->sd_pid_list_lock); 3367 pn = list_head(&stp->sd_pid_list); 3368 while (pn != NULL && pn->pn_pid != p->p_pidp->pid_id) { 3369 pn = list_next(&stp->sd_pid_list, pn); 3370 } 3371 3372 if (pn != NULL) { 3373 pn->pn_count++; 3374 } else { 3375 pn = kmem_zalloc(sizeof (*pn), KM_SLEEP); 3376 list_link_init(&pn->pn_ref_link); 3377 pn->pn_pid = p->p_pidp->pid_id; 3378 pn->pn_count = 1; 3379 list_insert_tail(&stp->sd_pid_list, pn); 3380 } 3381 mutex_exit(&stp->sd_pid_list_lock); 3382 } 3383 void 3384 sh_remove_pid(struct stdata *stp, proc_t *p) 3385 { 3386 pid_node_t *pn; 3387 3388 mutex_enter(&stp->sd_pid_list_lock); 3389 pn = list_head(&stp->sd_pid_list); 3390 while (pn != NULL && pn->pn_pid != p->p_pidp->pid_id) { 3391 pn = list_next(&stp->sd_pid_list, pn); 3392 } 3393 3394 if (pn != NULL) { 3395 if (pn->pn_count > 1) 3396 pn->pn_count--; 3397 else { 3398 list_remove(&stp->sd_pid_list, pn); 3399 kmem_free(pn, sizeof (*pn)); 3400 } 3401 } 3402 mutex_exit(&stp->sd_pid_list_lock); 3403 } 3404 3405 conn_pid_node_list_hdr_t * 3406 sh_get_pid_list(struct stdata *stp) 3407 { 3408 int sz, n = 0; 3409 pid_node_t *pn; 3410 conn_pid_node_t *cpn; 3411 conn_pid_node_list_hdr_t *cph; 3412 3413 mutex_enter(&stp->sd_pid_list_lock); 3414 3415 n = list_size(&stp->sd_pid_list); 3416 sz = sizeof (conn_pid_node_list_hdr_t); 3417 sz += (n > 1)?((n - 1) * sizeof (conn_pid_node_t)):0; 3418 3419 cph = kmem_zalloc(sz, KM_SLEEP); 3420 cph->cph_magic = CONN_PID_NODE_LIST_HDR_MAGIC; 3421 cph->cph_contents = CONN_PID_NODE_LIST_HDR_XTI; 3422 cph->cph_pn_cnt = n; 3423 cph->cph_tot_size = sz; 3424 cph->cph_flags = 0; 3425 cph->cph_optional1 = 0; 3426 cph->cph_optional2 = 0; 3427 3428 if (cph->cph_pn_cnt > 0) { 3429 cpn = cph->cph_cpns; 3430 pn = list_head(&stp->sd_pid_list); 3431 while (pn != NULL) { 3432 PIDNODE2CONNPIDNODE(pn, cpn); 3433 pn = list_next(&stp->sd_pid_list, pn); 3434 cpn++; 3435 } 3436 } 3437 3438 mutex_exit(&stp->sd_pid_list_lock); 3439 return (cph); 3440 } 3441 3442 /* 3443 * Allocate a pair of queues and a syncq for the pair 3444 */ 3445 queue_t * 3446 allocq(void) 3447 { 3448 queinfo_t *qip; 3449 queue_t *qp, *wqp; 3450 syncq_t *sq; 3451 3452 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3453 3454 qp = &qip->qu_rqueue; 3455 wqp = &qip->qu_wqueue; 3456 sq = &qip->qu_syncq; 3457 3458 qp->q_last = NULL; 3459 qp->q_next = NULL; 3460 qp->q_ptr = NULL; 3461 qp->q_flag = QUSE | QREADR; 3462 qp->q_bandp = NULL; 3463 qp->q_stream = NULL; 3464 qp->q_syncq = sq; 3465 qp->q_nband = 0; 3466 qp->q_nfsrv = NULL; 3467 qp->q_draining = 0; 3468 qp->q_syncqmsgs = 0; 3469 qp->q_spri = 0; 3470 qp->q_qtstamp = 0; 3471 qp->q_sqtstamp = 0; 3472 qp->q_fp = NULL; 3473 3474 wqp->q_last = NULL; 3475 wqp->q_next = NULL; 3476 wqp->q_ptr = NULL; 3477 wqp->q_flag = QUSE; 3478 wqp->q_bandp = NULL; 3479 wqp->q_stream = NULL; 3480 wqp->q_syncq = sq; 3481 wqp->q_nband = 0; 3482 wqp->q_nfsrv = NULL; 3483 wqp->q_draining = 0; 3484 wqp->q_syncqmsgs = 0; 3485 wqp->q_qtstamp = 0; 3486 wqp->q_sqtstamp = 0; 3487 wqp->q_spri = 0; 3488 3489 sq->sq_count = 0; 3490 sq->sq_rmqcount = 0; 3491 sq->sq_flags = 0; 3492 sq->sq_type = 0; 3493 sq->sq_callbflags = 0; 3494 sq->sq_cancelid = 0; 3495 sq->sq_ciputctrl = NULL; 3496 sq->sq_nciputctrl = 0; 3497 sq->sq_needexcl = 0; 3498 sq->sq_svcflags = 0; 3499 3500 return (qp); 3501 } 3502 3503 /* 3504 * Free a pair of queues and the "attached" syncq. 3505 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3506 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3507 */ 3508 void 3509 freeq(queue_t *qp) 3510 { 3511 qband_t *qbp, *nqbp; 3512 syncq_t *sq, *outer; 3513 queue_t *wqp = _WR(qp); 3514 3515 ASSERT(qp->q_flag & QREADR); 3516 3517 /* 3518 * If a previously dispatched taskq job is scheduled to run 3519 * sync_service() or a service routine is scheduled for the 3520 * queues about to be freed, wait here until all service is 3521 * done on the queue and all associated queues and syncqs. 3522 */ 3523 wait_svc(qp); 3524 3525 (void) flush_syncq(qp->q_syncq, qp); 3526 (void) flush_syncq(wqp->q_syncq, wqp); 3527 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3528 3529 /* 3530 * Flush the queues before q_next is set to NULL This is needed 3531 * in order to backenable any downstream queue before we go away. 3532 * Note: we are already removed from the stream so that the 3533 * backenabling will not cause any messages to be delivered to our 3534 * put procedures. 3535 */ 3536 flushq(qp, FLUSHALL); 3537 flushq(wqp, FLUSHALL); 3538 3539 /* Tidy up - removeq only does a half-remove from stream */ 3540 qp->q_next = wqp->q_next = NULL; 3541 ASSERT(!(qp->q_flag & QENAB)); 3542 ASSERT(!(wqp->q_flag & QENAB)); 3543 3544 outer = qp->q_syncq->sq_outer; 3545 if (outer != NULL) { 3546 outer_remove(outer, qp->q_syncq); 3547 if (wqp->q_syncq != qp->q_syncq) 3548 outer_remove(outer, wqp->q_syncq); 3549 } 3550 /* 3551 * Free any syncqs that are outside what allocq returned. 3552 */ 3553 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3554 free_syncq(qp->q_syncq); 3555 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3556 free_syncq(wqp->q_syncq); 3557 3558 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3559 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3560 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3561 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3562 sq = SQ(qp); 3563 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3564 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3565 ASSERT(sq->sq_outer == NULL); 3566 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3567 ASSERT(sq->sq_callbpend == NULL); 3568 ASSERT(sq->sq_needexcl == 0); 3569 3570 if (sq->sq_ciputctrl != NULL) { 3571 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3572 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3573 sq->sq_nciputctrl, 0); 3574 ASSERT(ciputctrl_cache != NULL); 3575 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3576 sq->sq_ciputctrl = NULL; 3577 sq->sq_nciputctrl = 0; 3578 } 3579 3580 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3581 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3582 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3583 3584 qp->q_flag &= ~QUSE; 3585 wqp->q_flag &= ~QUSE; 3586 3587 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3588 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3589 3590 qbp = qp->q_bandp; 3591 while (qbp) { 3592 nqbp = qbp->qb_next; 3593 freeband(qbp); 3594 qbp = nqbp; 3595 } 3596 qbp = wqp->q_bandp; 3597 while (qbp) { 3598 nqbp = qbp->qb_next; 3599 freeband(qbp); 3600 qbp = nqbp; 3601 } 3602 kmem_cache_free(queue_cache, qp); 3603 } 3604 3605 /* 3606 * Allocate a qband structure. 3607 */ 3608 qband_t * 3609 allocband(void) 3610 { 3611 qband_t *qbp; 3612 3613 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3614 if (qbp == NULL) 3615 return (NULL); 3616 3617 qbp->qb_next = NULL; 3618 qbp->qb_count = 0; 3619 qbp->qb_mblkcnt = 0; 3620 qbp->qb_first = NULL; 3621 qbp->qb_last = NULL; 3622 qbp->qb_flag = 0; 3623 3624 return (qbp); 3625 } 3626 3627 /* 3628 * Free a qband structure. 3629 */ 3630 void 3631 freeband(qband_t *qbp) 3632 { 3633 kmem_cache_free(qband_cache, qbp); 3634 } 3635 3636 /* 3637 * Just like putnextctl(9F), except that allocb_wait() is used. 3638 * 3639 * Consolidation Private, and of course only callable from the stream head or 3640 * routines that may block. 3641 */ 3642 int 3643 putnextctl_wait(queue_t *q, int type) 3644 { 3645 mblk_t *bp; 3646 int error; 3647 3648 if ((datamsg(type) && (type != M_DELAY)) || 3649 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3650 return (0); 3651 3652 bp->b_datap->db_type = (unsigned char)type; 3653 putnext(q, bp); 3654 return (1); 3655 } 3656 3657 /* 3658 * Run any possible bufcalls. 3659 */ 3660 void 3661 runbufcalls(void) 3662 { 3663 strbufcall_t *bcp; 3664 3665 mutex_enter(&bcall_monitor); 3666 mutex_enter(&strbcall_lock); 3667 3668 if (strbcalls.bc_head) { 3669 size_t count; 3670 int nevent; 3671 3672 /* 3673 * count how many events are on the list 3674 * now so we can check to avoid looping 3675 * in low memory situations 3676 */ 3677 nevent = 0; 3678 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3679 nevent++; 3680 3681 /* 3682 * get estimate of available memory from kmem_avail(). 3683 * awake all bufcall functions waiting for 3684 * memory whose request could be satisfied 3685 * by 'count' memory and let 'em fight for it. 3686 */ 3687 count = kmem_avail(); 3688 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3689 STRSTAT(bufcalls); 3690 --nevent; 3691 if (bcp->bc_size <= count) { 3692 bcp->bc_executor = curthread; 3693 mutex_exit(&strbcall_lock); 3694 (*bcp->bc_func)(bcp->bc_arg); 3695 mutex_enter(&strbcall_lock); 3696 bcp->bc_executor = NULL; 3697 cv_broadcast(&bcall_cv); 3698 strbcalls.bc_head = bcp->bc_next; 3699 kmem_free(bcp, sizeof (strbufcall_t)); 3700 } else { 3701 /* 3702 * too big, try again later - note 3703 * that nevent was decremented above 3704 * so we won't retry this one on this 3705 * iteration of the loop 3706 */ 3707 if (bcp->bc_next != NULL) { 3708 strbcalls.bc_head = bcp->bc_next; 3709 bcp->bc_next = NULL; 3710 strbcalls.bc_tail->bc_next = bcp; 3711 strbcalls.bc_tail = bcp; 3712 } 3713 } 3714 } 3715 if (strbcalls.bc_head == NULL) 3716 strbcalls.bc_tail = NULL; 3717 } 3718 3719 mutex_exit(&strbcall_lock); 3720 mutex_exit(&bcall_monitor); 3721 } 3722 3723 3724 /* 3725 * Actually run queue's service routine. 3726 */ 3727 static void 3728 runservice(queue_t *q) 3729 { 3730 qband_t *qbp; 3731 3732 ASSERT(q->q_qinfo->qi_srvp); 3733 again: 3734 entersq(q->q_syncq, SQ_SVC); 3735 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3736 "runservice starts:%p", q); 3737 3738 if (!(q->q_flag & QWCLOSE)) 3739 (*q->q_qinfo->qi_srvp)(q); 3740 3741 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3742 "runservice ends:(%p)", q); 3743 3744 leavesq(q->q_syncq, SQ_SVC); 3745 3746 mutex_enter(QLOCK(q)); 3747 if (q->q_flag & QENAB) { 3748 q->q_flag &= ~QENAB; 3749 mutex_exit(QLOCK(q)); 3750 goto again; 3751 } 3752 q->q_flag &= ~QINSERVICE; 3753 q->q_flag &= ~QBACK; 3754 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3755 qbp->qb_flag &= ~QB_BACK; 3756 /* 3757 * Wakeup thread waiting for the service procedure 3758 * to be run (strclose and qdetach). 3759 */ 3760 cv_broadcast(&q->q_wait); 3761 3762 mutex_exit(QLOCK(q)); 3763 } 3764 3765 /* 3766 * Background processing of bufcalls. 3767 */ 3768 void 3769 streams_bufcall_service(void) 3770 { 3771 callb_cpr_t cprinfo; 3772 3773 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3774 "streams_bufcall_service"); 3775 3776 mutex_enter(&strbcall_lock); 3777 3778 for (;;) { 3779 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3780 mutex_exit(&strbcall_lock); 3781 runbufcalls(); 3782 mutex_enter(&strbcall_lock); 3783 } 3784 if (strbcalls.bc_head != NULL) { 3785 STRSTAT(bcwaits); 3786 /* Wait for memory to become available */ 3787 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3788 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock, 3789 SEC_TO_TICK(60), TR_CLOCK_TICK); 3790 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3791 } 3792 3793 /* Wait for new work to arrive */ 3794 if (strbcalls.bc_head == NULL) { 3795 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3796 cv_wait(&strbcall_cv, &strbcall_lock); 3797 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3798 } 3799 } 3800 } 3801 3802 /* 3803 * Background processing of streams background tasks which failed 3804 * taskq_dispatch. 3805 */ 3806 static void 3807 streams_qbkgrnd_service(void) 3808 { 3809 callb_cpr_t cprinfo; 3810 queue_t *q; 3811 3812 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3813 "streams_bkgrnd_service"); 3814 3815 mutex_enter(&service_queue); 3816 3817 for (;;) { 3818 /* 3819 * Wait for work to arrive. 3820 */ 3821 while ((freebs_list == NULL) && (qhead == NULL)) { 3822 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3823 cv_wait(&services_to_run, &service_queue); 3824 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3825 } 3826 /* 3827 * Handle all pending freebs requests to free memory. 3828 */ 3829 while (freebs_list != NULL) { 3830 mblk_t *mp = freebs_list; 3831 freebs_list = mp->b_next; 3832 mutex_exit(&service_queue); 3833 mblk_free(mp); 3834 mutex_enter(&service_queue); 3835 } 3836 /* 3837 * Run pending queues. 3838 */ 3839 while (qhead != NULL) { 3840 DQ(q, qhead, qtail, q_link); 3841 ASSERT(q != NULL); 3842 mutex_exit(&service_queue); 3843 queue_service(q); 3844 mutex_enter(&service_queue); 3845 } 3846 ASSERT(qhead == NULL && qtail == NULL); 3847 } 3848 } 3849 3850 /* 3851 * Background processing of streams background tasks which failed 3852 * taskq_dispatch. 3853 */ 3854 static void 3855 streams_sqbkgrnd_service(void) 3856 { 3857 callb_cpr_t cprinfo; 3858 syncq_t *sq; 3859 3860 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3861 "streams_sqbkgrnd_service"); 3862 3863 mutex_enter(&service_queue); 3864 3865 for (;;) { 3866 /* 3867 * Wait for work to arrive. 3868 */ 3869 while (sqhead == NULL) { 3870 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3871 cv_wait(&syncqs_to_run, &service_queue); 3872 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3873 } 3874 3875 /* 3876 * Run pending syncqs. 3877 */ 3878 while (sqhead != NULL) { 3879 DQ(sq, sqhead, sqtail, sq_next); 3880 ASSERT(sq != NULL); 3881 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3882 mutex_exit(&service_queue); 3883 syncq_service(sq); 3884 mutex_enter(&service_queue); 3885 } 3886 } 3887 } 3888 3889 /* 3890 * Disable the syncq and wait for background syncq processing to complete. 3891 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3892 * list. 3893 */ 3894 void 3895 wait_sq_svc(syncq_t *sq) 3896 { 3897 mutex_enter(SQLOCK(sq)); 3898 sq->sq_svcflags |= SQ_DISABLED; 3899 if (sq->sq_svcflags & SQ_BGTHREAD) { 3900 syncq_t *sq_chase; 3901 syncq_t *sq_curr; 3902 int removed; 3903 3904 ASSERT(sq->sq_servcount == 1); 3905 mutex_enter(&service_queue); 3906 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3907 mutex_exit(&service_queue); 3908 if (removed) { 3909 sq->sq_svcflags &= ~SQ_BGTHREAD; 3910 sq->sq_servcount = 0; 3911 STRSTAT(sqremoved); 3912 goto done; 3913 } 3914 } 3915 while (sq->sq_servcount != 0) { 3916 sq->sq_flags |= SQ_WANTWAKEUP; 3917 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3918 } 3919 done: 3920 mutex_exit(SQLOCK(sq)); 3921 } 3922 3923 /* 3924 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3925 * Add the argument to the end of the sqhead list and set the flag 3926 * indicating this syncq has been enabled. If it has already been 3927 * enabled, don't do anything. 3928 * This routine assumes that SQLOCK is held. 3929 * NOTE that the lock order is to have the SQLOCK first, 3930 * so if the service_syncq lock is held, we need to release it 3931 * before acquiring the SQLOCK (mostly relevant for the background 3932 * thread, and this seems to be common among the STREAMS global locks). 3933 * Note that the sq_svcflags are protected by the SQLOCK. 3934 */ 3935 void 3936 sqenable(syncq_t *sq) 3937 { 3938 /* 3939 * This is probably not important except for where I believe it 3940 * is being called. At that point, it should be held (and it 3941 * is a pain to release it just for this routine, so don't do 3942 * it). 3943 */ 3944 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3945 3946 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3947 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3948 3949 /* 3950 * Do not put on list if background thread is scheduled or 3951 * syncq is disabled. 3952 */ 3953 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3954 return; 3955 3956 /* 3957 * Check whether we should enable sq at all. 3958 * Non PERMOD syncqs may be drained by at most one thread. 3959 * PERMOD syncqs may be drained by several threads but we limit the 3960 * total amount to the lesser of 3961 * Number of queues on the squeue and 3962 * Number of CPUs. 3963 */ 3964 if (sq->sq_servcount != 0) { 3965 if (((sq->sq_type & SQ_PERMOD) == 0) || 3966 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3967 STRSTAT(sqtoomany); 3968 return; 3969 } 3970 } 3971 3972 sq->sq_tstamp = ddi_get_lbolt(); 3973 STRSTAT(sqenables); 3974 3975 /* Attempt a taskq dispatch */ 3976 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3977 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3978 if (sq->sq_servid != NULL) { 3979 sq->sq_servcount++; 3980 return; 3981 } 3982 3983 /* 3984 * This taskq dispatch failed, but a previous one may have succeeded. 3985 * Don't try to schedule on the background thread whilst there is 3986 * outstanding taskq processing. 3987 */ 3988 if (sq->sq_servcount != 0) 3989 return; 3990 3991 /* 3992 * System is low on resources and can't perform a non-sleeping 3993 * dispatch. Schedule the syncq for a background thread and mark the 3994 * syncq to avoid any further taskq dispatch attempts. 3995 */ 3996 mutex_enter(&service_queue); 3997 STRSTAT(taskqfails); 3998 ENQUEUE(sq, sqhead, sqtail, sq_next); 3999 sq->sq_svcflags |= SQ_BGTHREAD; 4000 sq->sq_servcount = 1; 4001 cv_signal(&syncqs_to_run); 4002 mutex_exit(&service_queue); 4003 } 4004 4005 /* 4006 * Note: fifo_close() depends on the mblk_t on the queue being freed 4007 * asynchronously. The asynchronous freeing of messages breaks the 4008 * recursive call chain of fifo_close() while there are I_SENDFD type of 4009 * messages referring to other file pointers on the queue. Then when 4010 * closing pipes it can avoid stack overflow in case of daisy-chained 4011 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 4012 * share the same fifolock_t). 4013 * 4014 * No need to kpreempt_disable to access cpu_seqid. If we migrate and 4015 * the esb queue does not match the new CPU, that is OK. 4016 */ 4017 void 4018 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 4019 { 4020 int qindex = CPU->cpu_seqid >> esbq_log2_cpus_per_q; 4021 esb_queue_t *eqp; 4022 4023 ASSERT(dbp->db_mblk == mp); 4024 ASSERT(qindex < esbq_nelem); 4025 4026 eqp = system_esbq_array; 4027 if (eqp != NULL) { 4028 eqp += qindex; 4029 } else { 4030 mutex_enter(&esbq_lock); 4031 if (kmem_ready && system_esbq_array == NULL) 4032 system_esbq_array = (esb_queue_t *)kmem_zalloc( 4033 esbq_nelem * sizeof (esb_queue_t), KM_NOSLEEP); 4034 mutex_exit(&esbq_lock); 4035 eqp = system_esbq_array; 4036 if (eqp != NULL) 4037 eqp += qindex; 4038 else 4039 eqp = &system_esbq; 4040 } 4041 4042 /* 4043 * Check data sanity. The dblock should have non-empty free function. 4044 * It is better to panic here then later when the dblock is freed 4045 * asynchronously when the context is lost. 4046 */ 4047 if (dbp->db_frtnp->free_func == NULL) { 4048 panic("freebs_enqueue: dblock %p has a NULL free callback", 4049 (void *)dbp); 4050 } 4051 4052 mutex_enter(&eqp->eq_lock); 4053 /* queue the new mblk on the esballoc queue */ 4054 if (eqp->eq_head == NULL) { 4055 eqp->eq_head = eqp->eq_tail = mp; 4056 } else { 4057 eqp->eq_tail->b_next = mp; 4058 eqp->eq_tail = mp; 4059 } 4060 eqp->eq_len++; 4061 4062 /* If we're the first thread to reach the threshold, process */ 4063 if (eqp->eq_len >= esbq_max_qlen && 4064 !(eqp->eq_flags & ESBQ_PROCESSING)) 4065 esballoc_process_queue(eqp); 4066 4067 esballoc_set_timer(eqp, esbq_timeout); 4068 mutex_exit(&eqp->eq_lock); 4069 } 4070 4071 static void 4072 esballoc_process_queue(esb_queue_t *eqp) 4073 { 4074 mblk_t *mp; 4075 4076 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4077 4078 eqp->eq_flags |= ESBQ_PROCESSING; 4079 4080 do { 4081 /* 4082 * Detach the message chain for processing. 4083 */ 4084 mp = eqp->eq_head; 4085 eqp->eq_tail->b_next = NULL; 4086 eqp->eq_head = eqp->eq_tail = NULL; 4087 eqp->eq_len = 0; 4088 mutex_exit(&eqp->eq_lock); 4089 4090 /* 4091 * Process the message chain. 4092 */ 4093 esballoc_enqueue_mblk(mp); 4094 mutex_enter(&eqp->eq_lock); 4095 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0)); 4096 4097 eqp->eq_flags &= ~ESBQ_PROCESSING; 4098 } 4099 4100 /* 4101 * taskq callback routine to free esballoced mblk's 4102 */ 4103 static void 4104 esballoc_mblk_free(mblk_t *mp) 4105 { 4106 mblk_t *nextmp; 4107 4108 for (; mp != NULL; mp = nextmp) { 4109 nextmp = mp->b_next; 4110 mp->b_next = NULL; 4111 mblk_free(mp); 4112 } 4113 } 4114 4115 static void 4116 esballoc_enqueue_mblk(mblk_t *mp) 4117 { 4118 4119 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp, 4120 TQ_NOSLEEP) == NULL) { 4121 mblk_t *first_mp = mp; 4122 /* 4123 * System is low on resources and can't perform a non-sleeping 4124 * dispatch. Schedule for a background thread. 4125 */ 4126 mutex_enter(&service_queue); 4127 STRSTAT(taskqfails); 4128 4129 while (mp->b_next != NULL) 4130 mp = mp->b_next; 4131 4132 mp->b_next = freebs_list; 4133 freebs_list = first_mp; 4134 cv_signal(&services_to_run); 4135 mutex_exit(&service_queue); 4136 } 4137 } 4138 4139 static void 4140 esballoc_timer(void *arg) 4141 { 4142 esb_queue_t *eqp = arg; 4143 4144 mutex_enter(&eqp->eq_lock); 4145 eqp->eq_flags &= ~ESBQ_TIMER; 4146 4147 if (!(eqp->eq_flags & ESBQ_PROCESSING) && 4148 eqp->eq_len > 0) 4149 esballoc_process_queue(eqp); 4150 4151 esballoc_set_timer(eqp, esbq_timeout); 4152 mutex_exit(&eqp->eq_lock); 4153 } 4154 4155 static void 4156 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout) 4157 { 4158 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4159 4160 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) { 4161 (void) timeout(esballoc_timer, eqp, eq_timeout); 4162 eqp->eq_flags |= ESBQ_TIMER; 4163 } 4164 } 4165 4166 /* 4167 * Setup esbq array length based upon NCPU scaled by CPUs per 4168 * queue. Use static system_esbq until kmem_ready and we can 4169 * create an array in freebs_enqueue(). 4170 */ 4171 void 4172 esballoc_queue_init(void) 4173 { 4174 esbq_log2_cpus_per_q = highbit(esbq_cpus_per_q - 1); 4175 esbq_cpus_per_q = 1 << esbq_log2_cpus_per_q; 4176 esbq_nelem = howmany(NCPU, esbq_cpus_per_q); 4177 system_esbq.eq_len = 0; 4178 system_esbq.eq_head = system_esbq.eq_tail = NULL; 4179 system_esbq.eq_flags = 0; 4180 } 4181 4182 /* 4183 * Set the QBACK or QB_BACK flag in the given queue for 4184 * the given priority band. 4185 */ 4186 void 4187 setqback(queue_t *q, unsigned char pri) 4188 { 4189 int i; 4190 qband_t *qbp; 4191 qband_t **qbpp; 4192 4193 ASSERT(MUTEX_HELD(QLOCK(q))); 4194 if (pri != 0) { 4195 if (pri > q->q_nband) { 4196 qbpp = &q->q_bandp; 4197 while (*qbpp) 4198 qbpp = &(*qbpp)->qb_next; 4199 while (pri > q->q_nband) { 4200 if ((*qbpp = allocband()) == NULL) { 4201 cmn_err(CE_WARN, 4202 "setqback: can't allocate qband\n"); 4203 return; 4204 } 4205 (*qbpp)->qb_hiwat = q->q_hiwat; 4206 (*qbpp)->qb_lowat = q->q_lowat; 4207 q->q_nband++; 4208 qbpp = &(*qbpp)->qb_next; 4209 } 4210 } 4211 qbp = q->q_bandp; 4212 i = pri; 4213 while (--i) 4214 qbp = qbp->qb_next; 4215 qbp->qb_flag |= QB_BACK; 4216 } else { 4217 q->q_flag |= QBACK; 4218 } 4219 } 4220 4221 int 4222 strcopyin(void *from, void *to, size_t len, int copyflag) 4223 { 4224 if (copyflag & U_TO_K) { 4225 ASSERT((copyflag & K_TO_K) == 0); 4226 if (copyin(from, to, len)) 4227 return (EFAULT); 4228 } else { 4229 ASSERT(copyflag & K_TO_K); 4230 bcopy(from, to, len); 4231 } 4232 return (0); 4233 } 4234 4235 int 4236 strcopyout(void *from, void *to, size_t len, int copyflag) 4237 { 4238 if (copyflag & U_TO_K) { 4239 if (copyout(from, to, len)) 4240 return (EFAULT); 4241 } else { 4242 ASSERT(copyflag & K_TO_K); 4243 bcopy(from, to, len); 4244 } 4245 return (0); 4246 } 4247 4248 /* 4249 * strsignal_nolock() posts a signal to the process(es) at the stream head. 4250 * It assumes that the stream head lock is already held, whereas strsignal() 4251 * acquires the lock first. This routine was created because a few callers 4252 * release the stream head lock before calling only to re-acquire it after 4253 * it returns. 4254 */ 4255 void 4256 strsignal_nolock(stdata_t *stp, int sig, uchar_t band) 4257 { 4258 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4259 switch (sig) { 4260 case SIGPOLL: 4261 if (stp->sd_sigflags & S_MSG) 4262 strsendsig(stp->sd_siglist, S_MSG, band, 0); 4263 break; 4264 default: 4265 if (stp->sd_pgidp) 4266 pgsignal(stp->sd_pgidp, sig); 4267 break; 4268 } 4269 } 4270 4271 void 4272 strsignal(stdata_t *stp, int sig, int32_t band) 4273 { 4274 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4275 "strsignal:%p, %X, %X", stp, sig, band); 4276 4277 mutex_enter(&stp->sd_lock); 4278 switch (sig) { 4279 case SIGPOLL: 4280 if (stp->sd_sigflags & S_MSG) 4281 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4282 break; 4283 4284 default: 4285 if (stp->sd_pgidp) { 4286 pgsignal(stp->sd_pgidp, sig); 4287 } 4288 break; 4289 } 4290 mutex_exit(&stp->sd_lock); 4291 } 4292 4293 void 4294 strhup(stdata_t *stp) 4295 { 4296 ASSERT(mutex_owned(&stp->sd_lock)); 4297 pollwakeup(&stp->sd_pollist, POLLHUP); 4298 if (stp->sd_sigflags & S_HANGUP) 4299 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4300 } 4301 4302 /* 4303 * Backenable the first queue upstream from `q' with a service procedure. 4304 */ 4305 void 4306 backenable(queue_t *q, uchar_t pri) 4307 { 4308 queue_t *nq; 4309 4310 /* 4311 * Our presence might not prevent other modules in our own 4312 * stream from popping/pushing since the caller of getq might not 4313 * have a claim on the queue (some drivers do a getq on somebody 4314 * else's queue - they know that the queue itself is not going away 4315 * but the framework has to guarantee q_next in that stream). 4316 */ 4317 claimstr(q); 4318 4319 /* Find nearest back queue with service proc */ 4320 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4321 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4322 } 4323 4324 if (nq) { 4325 kthread_t *freezer; 4326 /* 4327 * backenable can be called either with no locks held 4328 * or with the stream frozen (the latter occurs when a module 4329 * calls rmvq with the stream frozen). If the stream is frozen 4330 * by the caller the caller will hold all qlocks in the stream. 4331 * Note that a frozen stream doesn't freeze a mated stream, 4332 * so we explicitly check for that. 4333 */ 4334 freezer = STREAM(q)->sd_freezer; 4335 if (freezer != curthread || STREAM(q) != STREAM(nq)) { 4336 mutex_enter(QLOCK(nq)); 4337 } 4338 #ifdef DEBUG 4339 else { 4340 ASSERT(frozenstr(q)); 4341 ASSERT(MUTEX_HELD(QLOCK(q))); 4342 ASSERT(MUTEX_HELD(QLOCK(nq))); 4343 } 4344 #endif 4345 setqback(nq, pri); 4346 qenable_locked(nq); 4347 if (freezer != curthread || STREAM(q) != STREAM(nq)) 4348 mutex_exit(QLOCK(nq)); 4349 } 4350 releasestr(q); 4351 } 4352 4353 /* 4354 * Return the appropriate errno when one of flags_to_check is set 4355 * in sd_flags. Uses the exported error routines if they are set. 4356 * Will return 0 if non error is set (or if the exported error routines 4357 * do not return an error). 4358 * 4359 * If there is both a read and write error to check, we prefer the read error. 4360 * Also, give preference to recorded errno's over the error functions. 4361 * The flags that are handled are: 4362 * STPLEX return EINVAL 4363 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4364 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4365 * STRHUP return sd_werror 4366 * 4367 * If the caller indicates that the operation is a peek, a nonpersistent error 4368 * is not cleared. 4369 */ 4370 int 4371 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4372 { 4373 int32_t sd_flag = stp->sd_flag & flags_to_check; 4374 int error = 0; 4375 4376 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4377 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4378 if (sd_flag & STPLEX) 4379 error = EINVAL; 4380 else if (sd_flag & STRDERR) { 4381 error = stp->sd_rerror; 4382 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4383 /* 4384 * Read errors are non-persistent i.e. discarded once 4385 * returned to a non-peeking caller, 4386 */ 4387 stp->sd_rerror = 0; 4388 stp->sd_flag &= ~STRDERR; 4389 } 4390 if (error == 0 && stp->sd_rderrfunc != NULL) { 4391 int clearerr = 0; 4392 4393 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4394 &clearerr); 4395 if (clearerr) { 4396 stp->sd_flag &= ~STRDERR; 4397 stp->sd_rderrfunc = NULL; 4398 } 4399 } 4400 } else if (sd_flag & STWRERR) { 4401 error = stp->sd_werror; 4402 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4403 /* 4404 * Write errors are non-persistent i.e. discarded once 4405 * returned to a non-peeking caller, 4406 */ 4407 stp->sd_werror = 0; 4408 stp->sd_flag &= ~STWRERR; 4409 } 4410 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4411 int clearerr = 0; 4412 4413 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4414 &clearerr); 4415 if (clearerr) { 4416 stp->sd_flag &= ~STWRERR; 4417 stp->sd_wrerrfunc = NULL; 4418 } 4419 } 4420 } else if (sd_flag & STRHUP) { 4421 /* sd_werror set when STRHUP */ 4422 error = stp->sd_werror; 4423 } 4424 return (error); 4425 } 4426 4427 4428 /* 4429 * Single-thread open/close/push/pop 4430 * for twisted streams also 4431 */ 4432 int 4433 strstartplumb(stdata_t *stp, int flag, int cmd) 4434 { 4435 int waited = 1; 4436 int error = 0; 4437 4438 if (STRMATED(stp)) { 4439 struct stdata *stmatep = stp->sd_mate; 4440 4441 STRLOCKMATES(stp); 4442 while (waited) { 4443 waited = 0; 4444 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4445 if ((cmd == I_POP) && 4446 (flag & (FNDELAY|FNONBLOCK))) { 4447 STRUNLOCKMATES(stp); 4448 return (EAGAIN); 4449 } 4450 waited = 1; 4451 mutex_exit(&stp->sd_lock); 4452 if (!cv_wait_sig(&stmatep->sd_monitor, 4453 &stmatep->sd_lock)) { 4454 mutex_exit(&stmatep->sd_lock); 4455 return (EINTR); 4456 } 4457 mutex_exit(&stmatep->sd_lock); 4458 STRLOCKMATES(stp); 4459 } 4460 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4461 if ((cmd == I_POP) && 4462 (flag & (FNDELAY|FNONBLOCK))) { 4463 STRUNLOCKMATES(stp); 4464 return (EAGAIN); 4465 } 4466 waited = 1; 4467 mutex_exit(&stmatep->sd_lock); 4468 if (!cv_wait_sig(&stp->sd_monitor, 4469 &stp->sd_lock)) { 4470 mutex_exit(&stp->sd_lock); 4471 return (EINTR); 4472 } 4473 mutex_exit(&stp->sd_lock); 4474 STRLOCKMATES(stp); 4475 } 4476 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4477 error = strgeterr(stp, 4478 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4479 if (error != 0) { 4480 STRUNLOCKMATES(stp); 4481 return (error); 4482 } 4483 } 4484 } 4485 stp->sd_flag |= STRPLUMB; 4486 STRUNLOCKMATES(stp); 4487 } else { 4488 mutex_enter(&stp->sd_lock); 4489 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4490 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4491 (flag & (FNDELAY|FNONBLOCK))) { 4492 mutex_exit(&stp->sd_lock); 4493 return (EAGAIN); 4494 } 4495 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4496 mutex_exit(&stp->sd_lock); 4497 return (EINTR); 4498 } 4499 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4500 error = strgeterr(stp, 4501 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4502 if (error != 0) { 4503 mutex_exit(&stp->sd_lock); 4504 return (error); 4505 } 4506 } 4507 } 4508 stp->sd_flag |= STRPLUMB; 4509 mutex_exit(&stp->sd_lock); 4510 } 4511 return (0); 4512 } 4513 4514 /* 4515 * Complete the plumbing operation associated with stream `stp'. 4516 */ 4517 void 4518 strendplumb(stdata_t *stp) 4519 { 4520 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4521 ASSERT(stp->sd_flag & STRPLUMB); 4522 stp->sd_flag &= ~STRPLUMB; 4523 cv_broadcast(&stp->sd_monitor); 4524 } 4525 4526 /* 4527 * This describes how the STREAMS framework handles synchronization 4528 * during open/push and close/pop. 4529 * The key interfaces for open and close are qprocson and qprocsoff, 4530 * respectively. While the close case in general is harder both open 4531 * have close have significant similarities. 4532 * 4533 * During close the STREAMS framework has to both ensure that there 4534 * are no stale references to the queue pair (and syncq) that 4535 * are being closed and also provide the guarantees that are documented 4536 * in qprocsoff(9F). 4537 * If there are stale references to the queue that is closing it can 4538 * result in kernel memory corruption or kernel panics. 4539 * 4540 * Note that is it up to the module/driver to ensure that it itself 4541 * does not have any stale references to the closing queues once its close 4542 * routine returns. This includes: 4543 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4544 * associated with the queues. For timeout and bufcall callbacks the 4545 * module/driver also has to ensure (or wait for) any callbacks that 4546 * are in progress. 4547 * - If the module/driver is using esballoc it has to ensure that any 4548 * esballoc free functions do not refer to a queue that has closed. 4549 * (Note that in general the close routine can not wait for the esballoc'ed 4550 * messages to be freed since that can cause a deadlock.) 4551 * - Cancelling any interrupts that refer to the closing queues and 4552 * also ensuring that there are no interrupts in progress that will 4553 * refer to the closing queues once the close routine returns. 4554 * - For multiplexors removing any driver global state that refers to 4555 * the closing queue and also ensuring that there are no threads in 4556 * the multiplexor that has picked up a queue pointer but not yet 4557 * finished using it. 4558 * 4559 * In addition, a driver/module can only reference the q_next pointer 4560 * in its open, close, put, or service procedures or in a 4561 * qtimeout/qbufcall callback procedure executing "on" the correct 4562 * stream. Thus it can not reference the q_next pointer in an interrupt 4563 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4564 * it can not reference q_next of a different queue e.g. in a mux that 4565 * passes messages from one queues put/service procedure to another queue. 4566 * In all the cases when the driver/module can not access the q_next 4567 * field it must use the *next* versions e.g. canputnext instead of 4568 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4569 * 4570 * 4571 * Assuming that the driver/module conforms to the above constraints 4572 * the STREAMS framework has to avoid stale references to q_next for all 4573 * the framework internal cases which include (but are not limited to): 4574 * - Threads in canput/canputnext/backenable and elsewhere that are 4575 * walking q_next. 4576 * - Messages on a syncq that have a reference to the queue through b_queue. 4577 * - Messages on an outer perimeter (syncq) that have a reference to the 4578 * queue through b_queue. 4579 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4580 * Note that only canput and bcanput use q_nfsrv without any locking. 4581 * 4582 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4583 * after qprocsoff returns, the framework has to ensure that no threads can 4584 * enter the put or service routines for the closing read or write-side queue. 4585 * In addition to preventing "direct" entry into the put procedures 4586 * the framework also has to prevent messages being drained from 4587 * the syncq or the outer perimeter. 4588 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4589 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4590 * qprocsoff has returned. 4591 * Note that if a module/driver uses put(9F) on one of its own queues 4592 * it is up to the module/driver to ensure that the put() doesn't 4593 * get called when the queue is closing. 4594 * 4595 * 4596 * The framework aspects of the above "contract" is implemented by 4597 * qprocsoff, removeq, and strlock: 4598 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4599 * entering the service procedures. 4600 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4601 * canputnext, backenable etc from dereferencing the q_next that will 4602 * soon change. 4603 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4604 * or other q_next walker that uses claimstr/releasestr to finish. 4605 * - optionally for every syncq in the stream strlock acquires all the 4606 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4607 * that no thread executes in the put or service procedures and that no 4608 * thread is draining into the module/driver. This ensures that no 4609 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4610 * currently executing hence no such thread can end up with the old stale 4611 * q_next value and no canput/backenable can have the old stale 4612 * q_nfsrv/q_next. 4613 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4614 * have either finished or observed the QWCLOSE flag and gone away. 4615 */ 4616 4617 4618 /* 4619 * Get all the locks necessary to change q_next. 4620 * 4621 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4622 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4623 * the only threads inside the syncq are threads currently calling removeq(). 4624 * Since threads calling removeq() are in the process of removing their queues 4625 * from the stream, we do not need to worry about them accessing a stale q_next 4626 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4627 * for them can cause deadlock). 4628 * 4629 * This routine is subject to starvation since it does not set any flag to 4630 * prevent threads from entering a module in the stream (i.e. sq_count can 4631 * increase on some syncq while it is waiting on some other syncq). 4632 * 4633 * Assumes that only one thread attempts to call strlock for a given 4634 * stream. If this is not the case the two threads would deadlock. 4635 * This assumption is guaranteed since strlock is only called by insertq 4636 * and removeq and streams plumbing changes are single-threaded for 4637 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4638 * 4639 * For pipes, it is not difficult to atomically designate a pair of streams 4640 * to be mated. Once mated atomically by the framework the twisted pair remain 4641 * configured that way until dismantled atomically by the framework. 4642 * When plumbing takes place on a twisted stream it is necessary to ensure that 4643 * this operation is done exclusively on the twisted stream since two such 4644 * operations, each initiated on different ends of the pipe will deadlock 4645 * waiting for each other to complete. 4646 * 4647 * On entry, no locks should be held. 4648 * The locks acquired and held by strlock depends on a few factors. 4649 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4650 * and held on exit and all sq_count are at an acceptable level. 4651 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4652 * sd_refcnt being zero. 4653 */ 4654 4655 static void 4656 strlock(struct stdata *stp, sqlist_t *sqlist) 4657 { 4658 syncql_t *sql, *sql2; 4659 retry: 4660 /* 4661 * Wait for any claimstr to go away. 4662 */ 4663 if (STRMATED(stp)) { 4664 struct stdata *stp1, *stp2; 4665 4666 STRLOCKMATES(stp); 4667 /* 4668 * Note that the selection of locking order is not 4669 * important, just that they are always acquired in 4670 * the same order. To assure this, we choose this 4671 * order based on the value of the pointer, and since 4672 * the pointer will not change for the life of this 4673 * pair, we will always grab the locks in the same 4674 * order (and hence, prevent deadlocks). 4675 */ 4676 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4677 stp1 = stp; 4678 stp2 = stp->sd_mate; 4679 } else { 4680 stp2 = stp; 4681 stp1 = stp->sd_mate; 4682 } 4683 mutex_enter(&stp1->sd_reflock); 4684 if (stp1->sd_refcnt > 0) { 4685 STRUNLOCKMATES(stp); 4686 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4687 mutex_exit(&stp1->sd_reflock); 4688 goto retry; 4689 } 4690 mutex_enter(&stp2->sd_reflock); 4691 if (stp2->sd_refcnt > 0) { 4692 STRUNLOCKMATES(stp); 4693 mutex_exit(&stp1->sd_reflock); 4694 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4695 mutex_exit(&stp2->sd_reflock); 4696 goto retry; 4697 } 4698 STREAM_PUTLOCKS_ENTER(stp1); 4699 STREAM_PUTLOCKS_ENTER(stp2); 4700 } else { 4701 mutex_enter(&stp->sd_lock); 4702 mutex_enter(&stp->sd_reflock); 4703 while (stp->sd_refcnt > 0) { 4704 mutex_exit(&stp->sd_lock); 4705 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4706 if (mutex_tryenter(&stp->sd_lock) == 0) { 4707 mutex_exit(&stp->sd_reflock); 4708 mutex_enter(&stp->sd_lock); 4709 mutex_enter(&stp->sd_reflock); 4710 } 4711 } 4712 STREAM_PUTLOCKS_ENTER(stp); 4713 } 4714 4715 if (sqlist == NULL) 4716 return; 4717 4718 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4719 syncq_t *sq = sql->sql_sq; 4720 uint16_t count; 4721 4722 mutex_enter(SQLOCK(sq)); 4723 count = sq->sq_count; 4724 ASSERT(sq->sq_rmqcount <= count); 4725 SQ_PUTLOCKS_ENTER(sq); 4726 SUM_SQ_PUTCOUNTS(sq, count); 4727 if (count == sq->sq_rmqcount) 4728 continue; 4729 4730 /* Failed - drop all locks that we have acquired so far */ 4731 if (STRMATED(stp)) { 4732 STREAM_PUTLOCKS_EXIT(stp); 4733 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4734 STRUNLOCKMATES(stp); 4735 mutex_exit(&stp->sd_reflock); 4736 mutex_exit(&stp->sd_mate->sd_reflock); 4737 } else { 4738 STREAM_PUTLOCKS_EXIT(stp); 4739 mutex_exit(&stp->sd_lock); 4740 mutex_exit(&stp->sd_reflock); 4741 } 4742 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4743 sql2 = sql2->sql_next) { 4744 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4745 mutex_exit(SQLOCK(sql2->sql_sq)); 4746 } 4747 4748 /* 4749 * The wait loop below may starve when there are many threads 4750 * claiming the syncq. This is especially a problem with permod 4751 * syncqs (IP). To lessen the impact of the problem we increment 4752 * sq_needexcl and clear fastbits so that putnexts will slow 4753 * down and call sqenable instead of draining right away. 4754 */ 4755 sq->sq_needexcl++; 4756 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4757 while (count > sq->sq_rmqcount) { 4758 sq->sq_flags |= SQ_WANTWAKEUP; 4759 SQ_PUTLOCKS_EXIT(sq); 4760 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4761 count = sq->sq_count; 4762 SQ_PUTLOCKS_ENTER(sq); 4763 SUM_SQ_PUTCOUNTS(sq, count); 4764 } 4765 sq->sq_needexcl--; 4766 if (sq->sq_needexcl == 0) 4767 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4768 SQ_PUTLOCKS_EXIT(sq); 4769 ASSERT(count == sq->sq_rmqcount); 4770 mutex_exit(SQLOCK(sq)); 4771 goto retry; 4772 } 4773 } 4774 4775 /* 4776 * Drop all the locks that strlock acquired. 4777 */ 4778 static void 4779 strunlock(struct stdata *stp, sqlist_t *sqlist) 4780 { 4781 syncql_t *sql; 4782 4783 if (STRMATED(stp)) { 4784 STREAM_PUTLOCKS_EXIT(stp); 4785 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4786 STRUNLOCKMATES(stp); 4787 mutex_exit(&stp->sd_reflock); 4788 mutex_exit(&stp->sd_mate->sd_reflock); 4789 } else { 4790 STREAM_PUTLOCKS_EXIT(stp); 4791 mutex_exit(&stp->sd_lock); 4792 mutex_exit(&stp->sd_reflock); 4793 } 4794 4795 if (sqlist == NULL) 4796 return; 4797 4798 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4799 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4800 mutex_exit(SQLOCK(sql->sql_sq)); 4801 } 4802 } 4803 4804 /* 4805 * When the module has service procedure, we need check if the next 4806 * module which has service procedure is in flow control to trigger 4807 * the backenable. 4808 */ 4809 static void 4810 backenable_insertedq(queue_t *q) 4811 { 4812 qband_t *qbp; 4813 4814 claimstr(q); 4815 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4816 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4817 backenable(q, 0); 4818 4819 qbp = q->q_next->q_nfsrv->q_bandp; 4820 for (; qbp != NULL; qbp = qbp->qb_next) 4821 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4822 backenable(q, qbp->qb_first->b_band); 4823 } 4824 releasestr(q); 4825 } 4826 4827 /* 4828 * Given two read queues, insert a new single one after another. 4829 * 4830 * This routine acquires all the necessary locks in order to change 4831 * q_next and related pointer using strlock(). 4832 * It depends on the stream head ensuring that there are no concurrent 4833 * insertq or removeq on the same stream. The stream head ensures this 4834 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4835 * 4836 * Note that no syncq locks are held during the q_next change. This is 4837 * applied to all streams since, unlike removeq, there is no problem of stale 4838 * pointers when adding a module to the stream. Thus drivers/modules that do a 4839 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4840 * applied this optimization to all streams. 4841 */ 4842 void 4843 insertq(struct stdata *stp, queue_t *new) 4844 { 4845 queue_t *after; 4846 queue_t *wafter; 4847 queue_t *wnew = _WR(new); 4848 boolean_t have_fifo = B_FALSE; 4849 4850 if (new->q_flag & _QINSERTING) { 4851 ASSERT(stp->sd_vnode->v_type != VFIFO); 4852 after = new->q_next; 4853 wafter = _WR(new->q_next); 4854 } else { 4855 after = _RD(stp->sd_wrq); 4856 wafter = stp->sd_wrq; 4857 } 4858 4859 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4860 "insertq:%p, %p", after, new); 4861 ASSERT(after->q_flag & QREADR); 4862 ASSERT(new->q_flag & QREADR); 4863 4864 strlock(stp, NULL); 4865 4866 /* Do we have a FIFO? */ 4867 if (wafter->q_next == after) { 4868 have_fifo = B_TRUE; 4869 wnew->q_next = new; 4870 } else { 4871 wnew->q_next = wafter->q_next; 4872 } 4873 new->q_next = after; 4874 4875 set_nfsrv_ptr(new, wnew, after, wafter); 4876 /* 4877 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4878 * so only reset this flag after calling it. 4879 */ 4880 new->q_flag &= ~_QINSERTING; 4881 4882 if (have_fifo) { 4883 wafter->q_next = wnew; 4884 } else { 4885 if (wafter->q_next) 4886 _OTHERQ(wafter->q_next)->q_next = new; 4887 wafter->q_next = wnew; 4888 } 4889 4890 set_qend(new); 4891 /* The QEND flag might have to be updated for the upstream guy */ 4892 set_qend(after); 4893 4894 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4895 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4896 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4897 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4898 strsetuio(stp); 4899 4900 /* 4901 * If this was a module insertion, bump the push count. 4902 */ 4903 if (!(new->q_flag & QISDRV)) 4904 stp->sd_pushcnt++; 4905 4906 strunlock(stp, NULL); 4907 4908 /* check if the write Q needs backenable */ 4909 backenable_insertedq(wnew); 4910 4911 /* check if the read Q needs backenable */ 4912 backenable_insertedq(new); 4913 } 4914 4915 /* 4916 * Given a read queue, unlink it from any neighbors. 4917 * 4918 * This routine acquires all the necessary locks in order to 4919 * change q_next and related pointers and also guard against 4920 * stale references (e.g. through q_next) to the queue that 4921 * is being removed. It also plays part of the role in ensuring 4922 * that the module's/driver's put procedure doesn't get called 4923 * after qprocsoff returns. 4924 * 4925 * Removeq depends on the stream head ensuring that there are 4926 * no concurrent insertq or removeq on the same stream. The 4927 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4928 * STRPLUMB. 4929 * 4930 * The set of locks needed to remove the queue is different in 4931 * different cases: 4932 * 4933 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4934 * waiting for the syncq reference count to drop to 0 indicating that no 4935 * non-close threads are present anywhere in the stream. This ensures that any 4936 * module/driver can reference q_next in its open, close, put, or service 4937 * procedures. 4938 * 4939 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4940 * strlock() ensures that there is either no threads executing inside perimeter 4941 * or there is only a thread calling qprocsoff(). 4942 * 4943 * strlock() compares the value of sq_count with the number of threads inside 4944 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4945 * any threads waiting in strlock() when the sq_rmqcount increases. 4946 */ 4947 4948 void 4949 removeq(queue_t *qp) 4950 { 4951 queue_t *wqp = _WR(qp); 4952 struct stdata *stp = STREAM(qp); 4953 sqlist_t *sqlist = NULL; 4954 boolean_t isdriver; 4955 int moved; 4956 syncq_t *sq = qp->q_syncq; 4957 syncq_t *wsq = wqp->q_syncq; 4958 4959 ASSERT(stp); 4960 4961 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4962 "removeq:%p %p", qp, wqp); 4963 ASSERT(qp->q_flag&QREADR); 4964 4965 /* 4966 * For queues using Synchronous streams, we must wait for all threads in 4967 * rwnext() to drain out before proceeding. 4968 */ 4969 if (qp->q_flag & QSYNCSTR) { 4970 /* First, we need wakeup any threads blocked in rwnext() */ 4971 mutex_enter(SQLOCK(sq)); 4972 if (sq->sq_flags & SQ_WANTWAKEUP) { 4973 sq->sq_flags &= ~SQ_WANTWAKEUP; 4974 cv_broadcast(&sq->sq_wait); 4975 } 4976 mutex_exit(SQLOCK(sq)); 4977 4978 if (wsq != sq) { 4979 mutex_enter(SQLOCK(wsq)); 4980 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4981 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4982 cv_broadcast(&wsq->sq_wait); 4983 } 4984 mutex_exit(SQLOCK(wsq)); 4985 } 4986 4987 mutex_enter(QLOCK(qp)); 4988 while (qp->q_rwcnt > 0) { 4989 qp->q_flag |= QWANTRMQSYNC; 4990 cv_wait(&qp->q_wait, QLOCK(qp)); 4991 } 4992 mutex_exit(QLOCK(qp)); 4993 4994 mutex_enter(QLOCK(wqp)); 4995 while (wqp->q_rwcnt > 0) { 4996 wqp->q_flag |= QWANTRMQSYNC; 4997 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4998 } 4999 mutex_exit(QLOCK(wqp)); 5000 } 5001 5002 mutex_enter(SQLOCK(sq)); 5003 sq->sq_rmqcount++; 5004 if (sq->sq_flags & SQ_WANTWAKEUP) { 5005 sq->sq_flags &= ~SQ_WANTWAKEUP; 5006 cv_broadcast(&sq->sq_wait); 5007 } 5008 mutex_exit(SQLOCK(sq)); 5009 5010 isdriver = (qp->q_flag & QISDRV); 5011 5012 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 5013 strlock(stp, sqlist); 5014 5015 reset_nfsrv_ptr(qp, wqp); 5016 5017 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 5018 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 5019 /* Do we have a FIFO? */ 5020 if (wqp->q_next == qp) { 5021 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 5022 } else { 5023 if (wqp->q_next) 5024 backq(qp)->q_next = qp->q_next; 5025 if (qp->q_next) 5026 backq(wqp)->q_next = wqp->q_next; 5027 } 5028 5029 /* The QEND flag might have to be updated for the upstream guy */ 5030 if (qp->q_next) 5031 set_qend(qp->q_next); 5032 5033 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 5034 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 5035 5036 /* 5037 * Move any messages destined for the put procedures to the next 5038 * syncq in line. Otherwise free them. 5039 */ 5040 moved = 0; 5041 /* 5042 * Quick check to see whether there are any messages or events. 5043 */ 5044 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 5045 moved += propagate_syncq(qp); 5046 if (wqp->q_syncqmsgs != 0 || 5047 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 5048 moved += propagate_syncq(wqp); 5049 5050 strsetuio(stp); 5051 5052 /* 5053 * If this was a module removal, decrement the push count. 5054 */ 5055 if (!isdriver) 5056 stp->sd_pushcnt--; 5057 5058 strunlock(stp, sqlist); 5059 sqlist_free(sqlist); 5060 5061 /* 5062 * Make sure any messages that were propagated are drained. 5063 * Also clear any QFULL bit caused by messages that were propagated. 5064 */ 5065 5066 if (qp->q_next != NULL) { 5067 clr_qfull(qp); 5068 /* 5069 * For the driver calling qprocsoff, propagate_syncq 5070 * frees all the messages instead of putting it in 5071 * the stream head 5072 */ 5073 if (!isdriver && (moved > 0)) 5074 emptysq(qp->q_next->q_syncq); 5075 } 5076 if (wqp->q_next != NULL) { 5077 clr_qfull(wqp); 5078 /* 5079 * We come here for any pop of a module except for the 5080 * case of driver being removed. We don't call emptysq 5081 * if we did not move any messages. This will avoid holding 5082 * PERMOD syncq locks in emptysq 5083 */ 5084 if (moved > 0) 5085 emptysq(wqp->q_next->q_syncq); 5086 } 5087 5088 mutex_enter(SQLOCK(sq)); 5089 sq->sq_rmqcount--; 5090 mutex_exit(SQLOCK(sq)); 5091 } 5092 5093 /* 5094 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 5095 * SQ_WRITER) on a syncq. 5096 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 5097 * sync queue and waits until sq_count reaches maxcnt. 5098 * 5099 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller 5100 * does not care about putnext threads that are in the middle of calling put 5101 * entry points. 5102 * 5103 * This routine is used for both inner and outer syncqs. 5104 */ 5105 static void 5106 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 5107 { 5108 uint16_t count = 0; 5109 5110 mutex_enter(SQLOCK(sq)); 5111 /* 5112 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 5113 * SQ_FROZEN will be set if there is a frozen stream that has a 5114 * queue which also refers to this "shared" syncq. 5115 * SQ_BLOCKED will be set if there is "off" queue which also 5116 * refers to this "shared" syncq. 5117 */ 5118 if (maxcnt != -1) { 5119 count = sq->sq_count; 5120 SQ_PUTLOCKS_ENTER(sq); 5121 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5122 SUM_SQ_PUTCOUNTS(sq, count); 5123 } 5124 sq->sq_needexcl++; 5125 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5126 5127 while ((sq->sq_flags & flag) || 5128 (maxcnt != -1 && count > (unsigned)maxcnt)) { 5129 sq->sq_flags |= SQ_WANTWAKEUP; 5130 if (maxcnt != -1) { 5131 SQ_PUTLOCKS_EXIT(sq); 5132 } 5133 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5134 if (maxcnt != -1) { 5135 count = sq->sq_count; 5136 SQ_PUTLOCKS_ENTER(sq); 5137 SUM_SQ_PUTCOUNTS(sq, count); 5138 } 5139 } 5140 sq->sq_needexcl--; 5141 sq->sq_flags |= flag; 5142 ASSERT(maxcnt == -1 || count == maxcnt); 5143 if (maxcnt != -1) { 5144 if (sq->sq_needexcl == 0) { 5145 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5146 } 5147 SQ_PUTLOCKS_EXIT(sq); 5148 } else if (sq->sq_needexcl == 0) { 5149 SQ_PUTCOUNT_SETFAST(sq); 5150 } 5151 5152 mutex_exit(SQLOCK(sq)); 5153 } 5154 5155 /* 5156 * Reset a flag that was set with blocksq. 5157 * 5158 * Can not use this routine to reset SQ_WRITER. 5159 * 5160 * If "isouter" is set then the syncq is assumed to be an outer perimeter 5161 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 5162 * to handle the queued qwriter operations. 5163 * 5164 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5165 * sq_putlocks are used. 5166 */ 5167 static void 5168 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 5169 { 5170 uint16_t flags; 5171 5172 mutex_enter(SQLOCK(sq)); 5173 ASSERT(resetflag != SQ_WRITER); 5174 ASSERT(sq->sq_flags & resetflag); 5175 flags = sq->sq_flags & ~resetflag; 5176 sq->sq_flags = flags; 5177 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 5178 if (flags & SQ_WANTWAKEUP) { 5179 flags &= ~SQ_WANTWAKEUP; 5180 cv_broadcast(&sq->sq_wait); 5181 } 5182 sq->sq_flags = flags; 5183 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5184 if (!isouter) { 5185 /* drain_syncq drops SQLOCK */ 5186 drain_syncq(sq); 5187 return; 5188 } 5189 } 5190 } 5191 mutex_exit(SQLOCK(sq)); 5192 } 5193 5194 /* 5195 * Reset a flag that was set with blocksq. 5196 * Does not drain the syncq. Use emptysq() for that. 5197 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 5198 * 5199 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5200 * sq_putlocks are used. 5201 */ 5202 static int 5203 dropsq(syncq_t *sq, uint16_t resetflag) 5204 { 5205 uint16_t flags; 5206 5207 mutex_enter(SQLOCK(sq)); 5208 ASSERT(sq->sq_flags & resetflag); 5209 flags = sq->sq_flags & ~resetflag; 5210 if (flags & SQ_WANTWAKEUP) { 5211 flags &= ~SQ_WANTWAKEUP; 5212 cv_broadcast(&sq->sq_wait); 5213 } 5214 sq->sq_flags = flags; 5215 mutex_exit(SQLOCK(sq)); 5216 if (flags & SQ_QUEUED) 5217 return (1); 5218 return (0); 5219 } 5220 5221 /* 5222 * Empty all the messages on a syncq. 5223 * 5224 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5225 * sq_putlocks are used. 5226 */ 5227 static void 5228 emptysq(syncq_t *sq) 5229 { 5230 uint16_t flags; 5231 5232 mutex_enter(SQLOCK(sq)); 5233 flags = sq->sq_flags; 5234 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5235 /* 5236 * To prevent potential recursive invocation of drain_syncq we 5237 * do not call drain_syncq if count is non-zero. 5238 */ 5239 if (sq->sq_count == 0) { 5240 /* drain_syncq() drops SQLOCK */ 5241 drain_syncq(sq); 5242 return; 5243 } else 5244 sqenable(sq); 5245 } 5246 mutex_exit(SQLOCK(sq)); 5247 } 5248 5249 /* 5250 * Ordered insert while removing duplicates. 5251 */ 5252 static void 5253 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 5254 { 5255 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 5256 5257 prev_sqlpp = &sqlist->sqlist_head; 5258 while ((sqlp = *prev_sqlpp) != NULL) { 5259 if (sqlp->sql_sq >= sqp) { 5260 if (sqlp->sql_sq == sqp) /* duplicate */ 5261 return; 5262 break; 5263 } 5264 prev_sqlpp = &sqlp->sql_next; 5265 } 5266 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 5267 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 5268 new_sqlp->sql_next = sqlp; 5269 new_sqlp->sql_sq = sqp; 5270 *prev_sqlpp = new_sqlp; 5271 } 5272 5273 /* 5274 * Walk the write side queues until we hit either the driver 5275 * or a twist in the stream (_SAMESTR will return false in both 5276 * these cases) then turn around and walk the read side queues 5277 * back up to the stream head. 5278 */ 5279 static void 5280 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5281 { 5282 while (q != NULL) { 5283 sqlist_insert(sqlist, q->q_syncq); 5284 5285 if (_SAMESTR(q)) 5286 q = q->q_next; 5287 else if (!(q->q_flag & QREADR)) 5288 q = _RD(q); 5289 else 5290 q = NULL; 5291 } 5292 } 5293 5294 /* 5295 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5296 * associated with the "q" parameter. The resulting list is sorted in a 5297 * canonical order and is free of duplicates. 5298 * Assumes the passed queue is a _RD(q). 5299 */ 5300 static sqlist_t * 5301 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5302 { 5303 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5304 5305 /* 5306 * start with the current queue/qpair 5307 */ 5308 ASSERT(q->q_flag & QREADR); 5309 5310 sqlist_insert(sqlist, q->q_syncq); 5311 sqlist_insert(sqlist, _WR(q)->q_syncq); 5312 5313 sqlist_insertall(sqlist, stp->sd_wrq); 5314 if (do_twist) 5315 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5316 5317 return (sqlist); 5318 } 5319 5320 static sqlist_t * 5321 sqlist_alloc(struct stdata *stp, int kmflag) 5322 { 5323 size_t sqlist_size; 5324 sqlist_t *sqlist; 5325 5326 /* 5327 * Allocate 2 syncql_t's for each pushed module. Note that 5328 * the sqlist_t structure already has 4 syncql_t's built in: 5329 * 2 for the stream head, and 2 for the driver/other stream head. 5330 */ 5331 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5332 sizeof (sqlist_t); 5333 if (STRMATED(stp)) 5334 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5335 sqlist = kmem_alloc(sqlist_size, kmflag); 5336 5337 sqlist->sqlist_head = NULL; 5338 sqlist->sqlist_size = sqlist_size; 5339 sqlist->sqlist_index = 0; 5340 5341 return (sqlist); 5342 } 5343 5344 /* 5345 * Free the list created by sqlist_alloc() 5346 */ 5347 static void 5348 sqlist_free(sqlist_t *sqlist) 5349 { 5350 kmem_free(sqlist, sqlist->sqlist_size); 5351 } 5352 5353 /* 5354 * Prevent any new entries into any syncq in this stream. 5355 * Used by freezestr. 5356 */ 5357 void 5358 strblock(queue_t *q) 5359 { 5360 struct stdata *stp; 5361 syncql_t *sql; 5362 sqlist_t *sqlist; 5363 5364 q = _RD(q); 5365 5366 stp = STREAM(q); 5367 ASSERT(stp != NULL); 5368 5369 /* 5370 * Get a sorted list with all the duplicates removed containing 5371 * all the syncqs referenced by this stream. 5372 */ 5373 sqlist = sqlist_build(q, stp, B_FALSE); 5374 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5375 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5376 sqlist_free(sqlist); 5377 } 5378 5379 /* 5380 * Release the block on new entries into this stream 5381 */ 5382 void 5383 strunblock(queue_t *q) 5384 { 5385 struct stdata *stp; 5386 syncql_t *sql; 5387 sqlist_t *sqlist; 5388 int drain_needed; 5389 5390 q = _RD(q); 5391 5392 /* 5393 * Get a sorted list with all the duplicates removed containing 5394 * all the syncqs referenced by this stream. 5395 * Have to drop the SQ_FROZEN flag on all the syncqs before 5396 * starting to drain them; otherwise the draining might 5397 * cause a freezestr in some module on the stream (which 5398 * would deadlock). 5399 */ 5400 stp = STREAM(q); 5401 ASSERT(stp != NULL); 5402 sqlist = sqlist_build(q, stp, B_FALSE); 5403 drain_needed = 0; 5404 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5405 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5406 if (drain_needed) { 5407 for (sql = sqlist->sqlist_head; sql != NULL; 5408 sql = sql->sql_next) 5409 emptysq(sql->sql_sq); 5410 } 5411 sqlist_free(sqlist); 5412 } 5413 5414 #ifdef DEBUG 5415 static int 5416 qprocsareon(queue_t *rq) 5417 { 5418 if (rq->q_next == NULL) 5419 return (0); 5420 return (_WR(rq->q_next)->q_next == _WR(rq)); 5421 } 5422 5423 int 5424 qclaimed(queue_t *q) 5425 { 5426 uint_t count; 5427 5428 count = q->q_syncq->sq_count; 5429 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5430 return (count != 0); 5431 } 5432 5433 /* 5434 * Check if anyone has frozen this stream with freezestr 5435 */ 5436 int 5437 frozenstr(queue_t *q) 5438 { 5439 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5440 } 5441 #endif /* DEBUG */ 5442 5443 /* 5444 * Enter a queue. 5445 * Obsoleted interface. Should not be used. 5446 */ 5447 void 5448 enterq(queue_t *q) 5449 { 5450 entersq(q->q_syncq, SQ_CALLBACK); 5451 } 5452 5453 void 5454 leaveq(queue_t *q) 5455 { 5456 leavesq(q->q_syncq, SQ_CALLBACK); 5457 } 5458 5459 /* 5460 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5461 * to check. 5462 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5463 * calls and the running of open, close and service procedures. 5464 * 5465 * If c_inner bit is set no need to grab sq_putlocks since we don't care 5466 * if other threads have entered or are entering put entry point. 5467 * 5468 * If c_inner bit is set it might have been possible to use 5469 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5470 * open/close path for IP) but since the count may need to be decremented in 5471 * qwait() we wouldn't know which counter to decrement. Currently counter is 5472 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5473 * in the future we might use curthread id bits to select the counter and this 5474 * would stay constant across routine calls. 5475 */ 5476 void 5477 entersq(syncq_t *sq, int entrypoint) 5478 { 5479 uint16_t count = 0; 5480 uint16_t flags; 5481 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5482 uint16_t type; 5483 uint_t c_inner = entrypoint & SQ_CI; 5484 uint_t c_outer = entrypoint & SQ_CO; 5485 5486 /* 5487 * Increment ref count to keep closes out of this queue. 5488 */ 5489 ASSERT(sq); 5490 ASSERT(c_inner && c_outer); 5491 mutex_enter(SQLOCK(sq)); 5492 flags = sq->sq_flags; 5493 type = sq->sq_type; 5494 if (!(type & c_inner)) { 5495 /* Make sure all putcounts now use slowlock. */ 5496 count = sq->sq_count; 5497 SQ_PUTLOCKS_ENTER(sq); 5498 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5499 SUM_SQ_PUTCOUNTS(sq, count); 5500 sq->sq_needexcl++; 5501 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5502 waitflags |= SQ_MESSAGES; 5503 } 5504 /* 5505 * Wait until we can enter the inner perimeter. 5506 * If we want exclusive access we wait until sq_count is 0. 5507 * We have to do this before entering the outer perimeter in order 5508 * to preserve put/close message ordering. 5509 */ 5510 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5511 sq->sq_flags = flags | SQ_WANTWAKEUP; 5512 if (!(type & c_inner)) { 5513 SQ_PUTLOCKS_EXIT(sq); 5514 } 5515 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5516 if (!(type & c_inner)) { 5517 count = sq->sq_count; 5518 SQ_PUTLOCKS_ENTER(sq); 5519 SUM_SQ_PUTCOUNTS(sq, count); 5520 } 5521 flags = sq->sq_flags; 5522 } 5523 5524 if (!(type & c_inner)) { 5525 ASSERT(sq->sq_needexcl > 0); 5526 sq->sq_needexcl--; 5527 if (sq->sq_needexcl == 0) { 5528 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5529 } 5530 } 5531 5532 /* Check if we need to enter the outer perimeter */ 5533 if (!(type & c_outer)) { 5534 /* 5535 * We have to enter the outer perimeter exclusively before 5536 * we can increment sq_count to avoid deadlock. This implies 5537 * that we have to re-check sq_flags and sq_count. 5538 * 5539 * is it possible to have c_inner set when c_outer is not set? 5540 */ 5541 if (!(type & c_inner)) { 5542 SQ_PUTLOCKS_EXIT(sq); 5543 } 5544 mutex_exit(SQLOCK(sq)); 5545 outer_enter(sq->sq_outer, SQ_GOAWAY); 5546 mutex_enter(SQLOCK(sq)); 5547 flags = sq->sq_flags; 5548 /* 5549 * there should be no need to recheck sq_putcounts 5550 * because outer_enter() has already waited for them to clear 5551 * after setting SQ_WRITER. 5552 */ 5553 count = sq->sq_count; 5554 #ifdef DEBUG 5555 /* 5556 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5557 * of doing an ASSERT internally. Others should do 5558 * something like 5559 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5560 * without the need to #ifdef DEBUG it. 5561 */ 5562 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5563 #endif 5564 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5565 (!(type & c_inner) && count != 0)) { 5566 sq->sq_flags = flags | SQ_WANTWAKEUP; 5567 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5568 count = sq->sq_count; 5569 flags = sq->sq_flags; 5570 } 5571 } 5572 5573 sq->sq_count++; 5574 ASSERT(sq->sq_count != 0); /* Wraparound */ 5575 if (!(type & c_inner)) { 5576 /* Exclusive entry */ 5577 ASSERT(sq->sq_count == 1); 5578 sq->sq_flags |= SQ_EXCL; 5579 if (type & c_outer) { 5580 SQ_PUTLOCKS_EXIT(sq); 5581 } 5582 } 5583 mutex_exit(SQLOCK(sq)); 5584 } 5585 5586 /* 5587 * Leave a syncq. Announce to framework that closes may proceed. 5588 * c_inner and c_outer specify which concurrency bits to check. 5589 * 5590 * Must never be called from driver or module put entry point. 5591 * 5592 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5593 * sq_putlocks are used. 5594 */ 5595 void 5596 leavesq(syncq_t *sq, int entrypoint) 5597 { 5598 uint16_t flags; 5599 uint16_t type; 5600 uint_t c_outer = entrypoint & SQ_CO; 5601 #ifdef DEBUG 5602 uint_t c_inner = entrypoint & SQ_CI; 5603 #endif 5604 5605 /* 5606 * Decrement ref count, drain the syncq if possible, and wake up 5607 * any waiting close. 5608 */ 5609 ASSERT(sq); 5610 ASSERT(c_inner && c_outer); 5611 mutex_enter(SQLOCK(sq)); 5612 flags = sq->sq_flags; 5613 type = sq->sq_type; 5614 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5615 5616 if (flags & SQ_WANTWAKEUP) { 5617 flags &= ~SQ_WANTWAKEUP; 5618 cv_broadcast(&sq->sq_wait); 5619 } 5620 if (flags & SQ_WANTEXWAKEUP) { 5621 flags &= ~SQ_WANTEXWAKEUP; 5622 cv_broadcast(&sq->sq_exitwait); 5623 } 5624 5625 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5626 /* 5627 * The syncq needs to be drained. "Exit" the syncq 5628 * before calling drain_syncq. 5629 */ 5630 ASSERT(sq->sq_count != 0); 5631 sq->sq_count--; 5632 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5633 sq->sq_flags = flags & ~SQ_EXCL; 5634 drain_syncq(sq); 5635 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5636 /* Check if we need to exit the outer perimeter */ 5637 /* XXX will this ever be true? */ 5638 if (!(type & c_outer)) 5639 outer_exit(sq->sq_outer); 5640 return; 5641 } 5642 } 5643 ASSERT(sq->sq_count != 0); 5644 sq->sq_count--; 5645 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5646 sq->sq_flags = flags & ~SQ_EXCL; 5647 mutex_exit(SQLOCK(sq)); 5648 5649 /* Check if we need to exit the outer perimeter */ 5650 if (!(sq->sq_type & c_outer)) 5651 outer_exit(sq->sq_outer); 5652 } 5653 5654 /* 5655 * Prevent q_next from changing in this stream by incrementing sq_count. 5656 * 5657 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5658 * sq_putlocks are used. 5659 */ 5660 void 5661 claimq(queue_t *qp) 5662 { 5663 syncq_t *sq = qp->q_syncq; 5664 5665 mutex_enter(SQLOCK(sq)); 5666 sq->sq_count++; 5667 ASSERT(sq->sq_count != 0); /* Wraparound */ 5668 mutex_exit(SQLOCK(sq)); 5669 } 5670 5671 /* 5672 * Undo claimq. 5673 * 5674 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5675 * sq_putlocks are used. 5676 */ 5677 void 5678 releaseq(queue_t *qp) 5679 { 5680 syncq_t *sq = qp->q_syncq; 5681 uint16_t flags; 5682 5683 mutex_enter(SQLOCK(sq)); 5684 ASSERT(sq->sq_count > 0); 5685 sq->sq_count--; 5686 5687 flags = sq->sq_flags; 5688 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5689 if (flags & SQ_WANTWAKEUP) { 5690 flags &= ~SQ_WANTWAKEUP; 5691 cv_broadcast(&sq->sq_wait); 5692 } 5693 sq->sq_flags = flags; 5694 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5695 /* 5696 * To prevent potential recursive invocation of 5697 * drain_syncq we do not call drain_syncq if count is 5698 * non-zero. 5699 */ 5700 if (sq->sq_count == 0) { 5701 drain_syncq(sq); 5702 return; 5703 } else 5704 sqenable(sq); 5705 } 5706 } 5707 mutex_exit(SQLOCK(sq)); 5708 } 5709 5710 /* 5711 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5712 */ 5713 void 5714 claimstr(queue_t *qp) 5715 { 5716 struct stdata *stp = STREAM(qp); 5717 5718 mutex_enter(&stp->sd_reflock); 5719 stp->sd_refcnt++; 5720 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5721 mutex_exit(&stp->sd_reflock); 5722 } 5723 5724 /* 5725 * Undo claimstr. 5726 */ 5727 void 5728 releasestr(queue_t *qp) 5729 { 5730 struct stdata *stp = STREAM(qp); 5731 5732 mutex_enter(&stp->sd_reflock); 5733 ASSERT(stp->sd_refcnt != 0); 5734 if (--stp->sd_refcnt == 0) 5735 cv_broadcast(&stp->sd_refmonitor); 5736 mutex_exit(&stp->sd_reflock); 5737 } 5738 5739 static syncq_t * 5740 new_syncq(void) 5741 { 5742 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5743 } 5744 5745 static void 5746 free_syncq(syncq_t *sq) 5747 { 5748 ASSERT(sq->sq_head == NULL); 5749 ASSERT(sq->sq_outer == NULL); 5750 ASSERT(sq->sq_callbpend == NULL); 5751 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5752 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5753 5754 if (sq->sq_ciputctrl != NULL) { 5755 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5756 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5757 sq->sq_nciputctrl, 0); 5758 ASSERT(ciputctrl_cache != NULL); 5759 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5760 } 5761 5762 sq->sq_tail = NULL; 5763 sq->sq_evhead = NULL; 5764 sq->sq_evtail = NULL; 5765 sq->sq_ciputctrl = NULL; 5766 sq->sq_nciputctrl = 0; 5767 sq->sq_count = 0; 5768 sq->sq_rmqcount = 0; 5769 sq->sq_callbflags = 0; 5770 sq->sq_cancelid = 0; 5771 sq->sq_next = NULL; 5772 sq->sq_needexcl = 0; 5773 sq->sq_svcflags = 0; 5774 sq->sq_nqueues = 0; 5775 sq->sq_pri = 0; 5776 sq->sq_onext = NULL; 5777 sq->sq_oprev = NULL; 5778 sq->sq_flags = 0; 5779 sq->sq_type = 0; 5780 sq->sq_servcount = 0; 5781 5782 kmem_cache_free(syncq_cache, sq); 5783 } 5784 5785 /* Outer perimeter code */ 5786 5787 /* 5788 * The outer syncq uses the fields and flags in the syncq slightly 5789 * differently from the inner syncqs. 5790 * sq_count Incremented when there are pending or running 5791 * writers at the outer perimeter to prevent the set of 5792 * inner syncqs that belong to the outer perimeter from 5793 * changing. 5794 * sq_head/tail List of deferred qwriter(OUTER) operations. 5795 * 5796 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5797 * inner syncqs are added to or removed from the 5798 * outer perimeter. 5799 * SQ_QUEUED sq_head/tail has messages or events queued. 5800 * 5801 * SQ_WRITER A thread is currently traversing all the inner syncqs 5802 * setting the SQ_WRITER flag. 5803 */ 5804 5805 /* 5806 * Get write access at the outer perimeter. 5807 * Note that read access is done by entersq, putnext, and put by simply 5808 * incrementing sq_count in the inner syncq. 5809 * 5810 * Waits until "flags" is no longer set in the outer to prevent multiple 5811 * threads from having write access at the same time. SQ_WRITER has to be part 5812 * of "flags". 5813 * 5814 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5815 * until the outer_exit is finished. 5816 * 5817 * outer_enter is vulnerable to starvation since it does not prevent new 5818 * threads from entering the inner syncqs while it is waiting for sq_count to 5819 * go to zero. 5820 */ 5821 void 5822 outer_enter(syncq_t *outer, uint16_t flags) 5823 { 5824 syncq_t *sq; 5825 int wait_needed; 5826 uint16_t count; 5827 5828 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5829 outer->sq_oprev != NULL); 5830 ASSERT(flags & SQ_WRITER); 5831 5832 retry: 5833 mutex_enter(SQLOCK(outer)); 5834 while (outer->sq_flags & flags) { 5835 outer->sq_flags |= SQ_WANTWAKEUP; 5836 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5837 } 5838 5839 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5840 outer->sq_flags |= SQ_WRITER; 5841 outer->sq_count++; 5842 ASSERT(outer->sq_count != 0); /* wraparound */ 5843 wait_needed = 0; 5844 /* 5845 * Set SQ_WRITER on all the inner syncqs while holding 5846 * the SQLOCK on the outer syncq. This ensures that the changing 5847 * of SQ_WRITER is atomic under the outer SQLOCK. 5848 */ 5849 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5850 mutex_enter(SQLOCK(sq)); 5851 count = sq->sq_count; 5852 SQ_PUTLOCKS_ENTER(sq); 5853 sq->sq_flags |= SQ_WRITER; 5854 SUM_SQ_PUTCOUNTS(sq, count); 5855 if (count != 0) 5856 wait_needed = 1; 5857 SQ_PUTLOCKS_EXIT(sq); 5858 mutex_exit(SQLOCK(sq)); 5859 } 5860 mutex_exit(SQLOCK(outer)); 5861 5862 /* 5863 * Get everybody out of the syncqs sequentially. 5864 * Note that we don't actually need to acquire the PUTLOCKS, since 5865 * we have already cleared the fastbit, and set QWRITER. By 5866 * definition, the count can not increase since putnext will 5867 * take the slowlock path (and the purpose of acquiring the 5868 * putlocks was to make sure it didn't increase while we were 5869 * waiting). 5870 * 5871 * Note that we still acquire the PUTLOCKS to be safe. 5872 */ 5873 if (wait_needed) { 5874 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5875 mutex_enter(SQLOCK(sq)); 5876 count = sq->sq_count; 5877 SQ_PUTLOCKS_ENTER(sq); 5878 SUM_SQ_PUTCOUNTS(sq, count); 5879 while (count != 0) { 5880 sq->sq_flags |= SQ_WANTWAKEUP; 5881 SQ_PUTLOCKS_EXIT(sq); 5882 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5883 count = sq->sq_count; 5884 SQ_PUTLOCKS_ENTER(sq); 5885 SUM_SQ_PUTCOUNTS(sq, count); 5886 } 5887 SQ_PUTLOCKS_EXIT(sq); 5888 mutex_exit(SQLOCK(sq)); 5889 } 5890 /* 5891 * Verify that none of the flags got set while we 5892 * were waiting for the sq_counts to drop. 5893 * If this happens we exit and retry entering the 5894 * outer perimeter. 5895 */ 5896 mutex_enter(SQLOCK(outer)); 5897 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5898 mutex_exit(SQLOCK(outer)); 5899 outer_exit(outer); 5900 goto retry; 5901 } 5902 mutex_exit(SQLOCK(outer)); 5903 } 5904 } 5905 5906 /* 5907 * Drop the write access at the outer perimeter. 5908 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5909 * decrementing sq_count. 5910 */ 5911 void 5912 outer_exit(syncq_t *outer) 5913 { 5914 syncq_t *sq; 5915 int drain_needed; 5916 uint16_t flags; 5917 5918 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5919 outer->sq_oprev != NULL); 5920 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5921 5922 /* 5923 * Atomically (from the perspective of threads calling become_writer) 5924 * drop the write access at the outer perimeter by holding 5925 * SQLOCK(outer) across all the dropsq calls and the resetting of 5926 * SQ_WRITER. 5927 * This defines a locking order between the outer perimeter 5928 * SQLOCK and the inner perimeter SQLOCKs. 5929 */ 5930 mutex_enter(SQLOCK(outer)); 5931 flags = outer->sq_flags; 5932 ASSERT(outer->sq_flags & SQ_WRITER); 5933 if (flags & SQ_QUEUED) { 5934 write_now(outer); 5935 flags = outer->sq_flags; 5936 } 5937 5938 /* 5939 * sq_onext is stable since sq_count has not yet been decreased. 5940 * Reset the SQ_WRITER flags in all syncqs. 5941 * After dropping SQ_WRITER on the outer syncq we empty all the 5942 * inner syncqs. 5943 */ 5944 drain_needed = 0; 5945 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5946 drain_needed += dropsq(sq, SQ_WRITER); 5947 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5948 flags &= ~SQ_WRITER; 5949 if (drain_needed) { 5950 outer->sq_flags = flags; 5951 mutex_exit(SQLOCK(outer)); 5952 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5953 emptysq(sq); 5954 mutex_enter(SQLOCK(outer)); 5955 flags = outer->sq_flags; 5956 } 5957 if (flags & SQ_WANTWAKEUP) { 5958 flags &= ~SQ_WANTWAKEUP; 5959 cv_broadcast(&outer->sq_wait); 5960 } 5961 outer->sq_flags = flags; 5962 ASSERT(outer->sq_count > 0); 5963 outer->sq_count--; 5964 mutex_exit(SQLOCK(outer)); 5965 } 5966 5967 /* 5968 * Add another syncq to an outer perimeter. 5969 * Block out all other access to the outer perimeter while it is being 5970 * changed using blocksq. 5971 * Assumes that the caller has *not* done an outer_enter. 5972 * 5973 * Vulnerable to starvation in blocksq. 5974 */ 5975 static void 5976 outer_insert(syncq_t *outer, syncq_t *sq) 5977 { 5978 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5979 outer->sq_oprev != NULL); 5980 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5981 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5982 5983 /* Get exclusive access to the outer perimeter list */ 5984 blocksq(outer, SQ_BLOCKED, 0); 5985 ASSERT(outer->sq_flags & SQ_BLOCKED); 5986 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5987 5988 mutex_enter(SQLOCK(sq)); 5989 sq->sq_outer = outer; 5990 outer->sq_onext->sq_oprev = sq; 5991 sq->sq_onext = outer->sq_onext; 5992 outer->sq_onext = sq; 5993 sq->sq_oprev = outer; 5994 mutex_exit(SQLOCK(sq)); 5995 unblocksq(outer, SQ_BLOCKED, 1); 5996 } 5997 5998 /* 5999 * Remove a syncq from an outer perimeter. 6000 * Block out all other access to the outer perimeter while it is being 6001 * changed using blocksq. 6002 * Assumes that the caller has *not* done an outer_enter. 6003 * 6004 * Vulnerable to starvation in blocksq. 6005 */ 6006 static void 6007 outer_remove(syncq_t *outer, syncq_t *sq) 6008 { 6009 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6010 outer->sq_oprev != NULL); 6011 ASSERT(sq->sq_outer == outer); 6012 6013 /* Get exclusive access to the outer perimeter list */ 6014 blocksq(outer, SQ_BLOCKED, 0); 6015 ASSERT(outer->sq_flags & SQ_BLOCKED); 6016 ASSERT(!(outer->sq_flags & SQ_WRITER)); 6017 6018 mutex_enter(SQLOCK(sq)); 6019 sq->sq_outer = NULL; 6020 sq->sq_onext->sq_oprev = sq->sq_oprev; 6021 sq->sq_oprev->sq_onext = sq->sq_onext; 6022 sq->sq_oprev = sq->sq_onext = NULL; 6023 mutex_exit(SQLOCK(sq)); 6024 unblocksq(outer, SQ_BLOCKED, 1); 6025 } 6026 6027 /* 6028 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 6029 * If this is the first callback for this outer perimeter then add 6030 * this outer perimeter to the list of outer perimeters that 6031 * the qwriter_outer_thread will process. 6032 * 6033 * Increments sq_count in the outer syncq to prevent the membership 6034 * of the outer perimeter (in terms of inner syncqs) to change while 6035 * the callback is pending. 6036 */ 6037 static void 6038 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 6039 { 6040 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6041 6042 mp->b_prev = (mblk_t *)func; 6043 mp->b_queue = q; 6044 mp->b_next = NULL; 6045 outer->sq_count++; /* Decremented when dequeued */ 6046 ASSERT(outer->sq_count != 0); /* Wraparound */ 6047 if (outer->sq_evhead == NULL) { 6048 /* First message. */ 6049 outer->sq_evhead = outer->sq_evtail = mp; 6050 outer->sq_flags |= SQ_EVENTS; 6051 mutex_exit(SQLOCK(outer)); 6052 STRSTAT(qwr_outer); 6053 (void) taskq_dispatch(streams_taskq, 6054 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 6055 } else { 6056 ASSERT(outer->sq_flags & SQ_EVENTS); 6057 outer->sq_evtail->b_next = mp; 6058 outer->sq_evtail = mp; 6059 mutex_exit(SQLOCK(outer)); 6060 } 6061 } 6062 6063 /* 6064 * Try and upgrade to write access at the outer perimeter. If this can 6065 * not be done without blocking then queue the callback to be done 6066 * by the qwriter_outer_thread. 6067 * 6068 * This routine can only be called from put or service procedures plus 6069 * asynchronous callback routines that have properly entered the queue (with 6070 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq 6071 * associated with q. 6072 */ 6073 void 6074 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 6075 { 6076 syncq_t *osq, *sq, *outer; 6077 int failed; 6078 uint16_t flags; 6079 6080 osq = q->q_syncq; 6081 outer = osq->sq_outer; 6082 if (outer == NULL) 6083 panic("qwriter(PERIM_OUTER): no outer perimeter"); 6084 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6085 outer->sq_oprev != NULL); 6086 6087 mutex_enter(SQLOCK(outer)); 6088 flags = outer->sq_flags; 6089 /* 6090 * If some thread is traversing sq_next, or if we are blocked by 6091 * outer_insert or outer_remove, or if the we already have queued 6092 * callbacks, then queue this callback for later processing. 6093 * 6094 * Also queue the qwriter for an interrupt thread in order 6095 * to reduce the time spent running at high IPL. 6096 * to identify there are events. 6097 */ 6098 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 6099 /* 6100 * Queue the become_writer request. 6101 * The queueing is atomic under SQLOCK(outer) in order 6102 * to synchronize with outer_exit. 6103 * queue_writer will drop the outer SQLOCK 6104 */ 6105 if (flags & SQ_BLOCKED) { 6106 /* Must set SQ_WRITER on inner perimeter */ 6107 mutex_enter(SQLOCK(osq)); 6108 osq->sq_flags |= SQ_WRITER; 6109 mutex_exit(SQLOCK(osq)); 6110 } else { 6111 if (!(flags & SQ_WRITER)) { 6112 /* 6113 * The outer could have been SQ_BLOCKED thus 6114 * SQ_WRITER might not be set on the inner. 6115 */ 6116 mutex_enter(SQLOCK(osq)); 6117 osq->sq_flags |= SQ_WRITER; 6118 mutex_exit(SQLOCK(osq)); 6119 } 6120 ASSERT(osq->sq_flags & SQ_WRITER); 6121 } 6122 queue_writer(outer, func, q, mp); 6123 return; 6124 } 6125 /* 6126 * We are half-way to exclusive access to the outer perimeter. 6127 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 6128 * while the inner syncqs are traversed. 6129 */ 6130 outer->sq_count++; 6131 ASSERT(outer->sq_count != 0); /* wraparound */ 6132 flags |= SQ_WRITER; 6133 /* 6134 * Check if we can run the function immediately. Mark all 6135 * syncqs with the writer flag to prevent new entries into 6136 * put and service procedures. 6137 * 6138 * Set SQ_WRITER on all the inner syncqs while holding 6139 * the SQLOCK on the outer syncq. This ensures that the changing 6140 * of SQ_WRITER is atomic under the outer SQLOCK. 6141 */ 6142 failed = 0; 6143 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 6144 uint16_t count; 6145 uint_t maxcnt = (sq == osq) ? 1 : 0; 6146 6147 mutex_enter(SQLOCK(sq)); 6148 count = sq->sq_count; 6149 SQ_PUTLOCKS_ENTER(sq); 6150 SUM_SQ_PUTCOUNTS(sq, count); 6151 if (sq->sq_count > maxcnt) 6152 failed = 1; 6153 sq->sq_flags |= SQ_WRITER; 6154 SQ_PUTLOCKS_EXIT(sq); 6155 mutex_exit(SQLOCK(sq)); 6156 } 6157 if (failed) { 6158 /* 6159 * Some other thread has a read claim on the outer perimeter. 6160 * Queue the callback for deferred processing. 6161 * 6162 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 6163 * so that other qwriter(OUTER) calls will queue their 6164 * callbacks as well. queue_writer increments sq_count so we 6165 * decrement to compensate for the our increment. 6166 * 6167 * Dropping SQ_WRITER enables the writer thread to work 6168 * on this outer perimeter. 6169 */ 6170 outer->sq_flags = flags; 6171 queue_writer(outer, func, q, mp); 6172 /* queue_writer dropper the lock */ 6173 mutex_enter(SQLOCK(outer)); 6174 ASSERT(outer->sq_count > 0); 6175 outer->sq_count--; 6176 ASSERT(outer->sq_flags & SQ_WRITER); 6177 flags = outer->sq_flags; 6178 flags &= ~SQ_WRITER; 6179 if (flags & SQ_WANTWAKEUP) { 6180 flags &= ~SQ_WANTWAKEUP; 6181 cv_broadcast(&outer->sq_wait); 6182 } 6183 outer->sq_flags = flags; 6184 mutex_exit(SQLOCK(outer)); 6185 return; 6186 } else { 6187 outer->sq_flags = flags; 6188 mutex_exit(SQLOCK(outer)); 6189 } 6190 6191 /* Can run it immediately */ 6192 (*func)(q, mp); 6193 6194 outer_exit(outer); 6195 } 6196 6197 /* 6198 * Dequeue all writer callbacks from the outer perimeter and run them. 6199 */ 6200 static void 6201 write_now(syncq_t *outer) 6202 { 6203 mblk_t *mp; 6204 queue_t *q; 6205 void (*func)(); 6206 6207 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6208 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6209 outer->sq_oprev != NULL); 6210 while ((mp = outer->sq_evhead) != NULL) { 6211 /* 6212 * queues cannot be placed on the queuelist on the outer 6213 * perimeter. 6214 */ 6215 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 6216 ASSERT((outer->sq_flags & SQ_EVENTS)); 6217 6218 outer->sq_evhead = mp->b_next; 6219 if (outer->sq_evhead == NULL) { 6220 outer->sq_evtail = NULL; 6221 outer->sq_flags &= ~SQ_EVENTS; 6222 } 6223 ASSERT(outer->sq_count != 0); 6224 outer->sq_count--; /* Incremented when enqueued. */ 6225 mutex_exit(SQLOCK(outer)); 6226 /* 6227 * Drop the message if the queue is closing. 6228 * Make sure that the queue is "claimed" when the callback 6229 * is run in order to satisfy various ASSERTs. 6230 */ 6231 q = mp->b_queue; 6232 func = (void (*)())mp->b_prev; 6233 ASSERT(func != NULL); 6234 mp->b_next = mp->b_prev = NULL; 6235 if (q->q_flag & QWCLOSE) { 6236 freemsg(mp); 6237 } else { 6238 claimq(q); 6239 (*func)(q, mp); 6240 releaseq(q); 6241 } 6242 mutex_enter(SQLOCK(outer)); 6243 } 6244 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6245 } 6246 6247 /* 6248 * The list of messages on the inner syncq is effectively hashed 6249 * by destination queue. These destination queues are doubly 6250 * linked lists (hopefully) in priority order. Messages are then 6251 * put on the queue referenced by the q_sqhead/q_sqtail elements. 6252 * Additional messages are linked together by the b_next/b_prev 6253 * elements in the mblk, with (similar to putq()) the first message 6254 * having a NULL b_prev and the last message having a NULL b_next. 6255 * 6256 * Events, such as qwriter callbacks, are put onto a list in FIFO 6257 * order referenced by sq_evhead, and sq_evtail. This is a singly 6258 * linked list, and messages here MUST be processed in the order queued. 6259 */ 6260 6261 /* 6262 * Run the events on the syncq event list (sq_evhead). 6263 * Assumes there is only one claim on the syncq, it is 6264 * already exclusive (SQ_EXCL set), and the SQLOCK held. 6265 * Messages here are processed in order, with the SQ_EXCL bit 6266 * held all the way through till the last message is processed. 6267 */ 6268 void 6269 sq_run_events(syncq_t *sq) 6270 { 6271 mblk_t *bp; 6272 queue_t *qp; 6273 uint16_t flags = sq->sq_flags; 6274 void (*func)(); 6275 6276 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6277 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6278 sq->sq_oprev == NULL) || 6279 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6280 sq->sq_oprev != NULL)); 6281 6282 ASSERT(flags & SQ_EXCL); 6283 ASSERT(sq->sq_count == 1); 6284 6285 /* 6286 * We need to process all of the events on this list. It 6287 * is possible that new events will be added while we are 6288 * away processing a callback, so on every loop, we start 6289 * back at the beginning of the list. 6290 */ 6291 /* 6292 * We have to reaccess sq_evhead since there is a 6293 * possibility of a new entry while we were running 6294 * the callback. 6295 */ 6296 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6297 ASSERT(bp->b_queue->q_syncq == sq); 6298 ASSERT(sq->sq_flags & SQ_EVENTS); 6299 6300 qp = bp->b_queue; 6301 func = (void (*)())bp->b_prev; 6302 ASSERT(func != NULL); 6303 6304 /* 6305 * Messages from the event queue must be taken off in 6306 * FIFO order. 6307 */ 6308 ASSERT(sq->sq_evhead == bp); 6309 sq->sq_evhead = bp->b_next; 6310 6311 if (bp->b_next == NULL) { 6312 /* Deleting last */ 6313 ASSERT(sq->sq_evtail == bp); 6314 sq->sq_evtail = NULL; 6315 sq->sq_flags &= ~SQ_EVENTS; 6316 } 6317 bp->b_prev = bp->b_next = NULL; 6318 ASSERT(bp->b_datap->db_ref != 0); 6319 6320 mutex_exit(SQLOCK(sq)); 6321 6322 (*func)(qp, bp); 6323 6324 mutex_enter(SQLOCK(sq)); 6325 /* 6326 * re-read the flags, since they could have changed. 6327 */ 6328 flags = sq->sq_flags; 6329 ASSERT(flags & SQ_EXCL); 6330 } 6331 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6332 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6333 6334 if (flags & SQ_WANTWAKEUP) { 6335 flags &= ~SQ_WANTWAKEUP; 6336 cv_broadcast(&sq->sq_wait); 6337 } 6338 if (flags & SQ_WANTEXWAKEUP) { 6339 flags &= ~SQ_WANTEXWAKEUP; 6340 cv_broadcast(&sq->sq_exitwait); 6341 } 6342 sq->sq_flags = flags; 6343 } 6344 6345 /* 6346 * Put messages on the event list. 6347 * If we can go exclusive now, do so and process the event list, otherwise 6348 * let the last claim service this list (or wake the sqthread). 6349 * This procedure assumes SQLOCK is held. To run the event list, it 6350 * must be called with no claims. 6351 */ 6352 static void 6353 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6354 { 6355 uint16_t count; 6356 6357 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6358 ASSERT(func != NULL); 6359 6360 /* 6361 * This is a callback. Add it to the list of callbacks 6362 * and see about upgrading. 6363 */ 6364 mp->b_prev = (mblk_t *)func; 6365 mp->b_queue = q; 6366 mp->b_next = NULL; 6367 if (sq->sq_evhead == NULL) { 6368 sq->sq_evhead = sq->sq_evtail = mp; 6369 sq->sq_flags |= SQ_EVENTS; 6370 } else { 6371 ASSERT(sq->sq_evtail != NULL); 6372 ASSERT(sq->sq_evtail->b_next == NULL); 6373 ASSERT(sq->sq_flags & SQ_EVENTS); 6374 sq->sq_evtail->b_next = mp; 6375 sq->sq_evtail = mp; 6376 } 6377 /* 6378 * We have set SQ_EVENTS, so threads will have to 6379 * unwind out of the perimeter, and new entries will 6380 * not grab a putlock. But we still need to know 6381 * how many threads have already made a claim to the 6382 * syncq, so grab the putlocks, and sum the counts. 6383 * If there are no claims on the syncq, we can upgrade 6384 * to exclusive, and run the event list. 6385 * NOTE: We hold the SQLOCK, so we can just grab the 6386 * putlocks. 6387 */ 6388 count = sq->sq_count; 6389 SQ_PUTLOCKS_ENTER(sq); 6390 SUM_SQ_PUTCOUNTS(sq, count); 6391 /* 6392 * We have no claim, so we need to check if there 6393 * are no others, then we can upgrade. 6394 */ 6395 /* 6396 * There are currently no claims on 6397 * the syncq by this thread (at least on this entry). The thread who has 6398 * the claim should drain syncq. 6399 */ 6400 if (count > 0) { 6401 /* 6402 * Can't upgrade - other threads inside. 6403 */ 6404 SQ_PUTLOCKS_EXIT(sq); 6405 mutex_exit(SQLOCK(sq)); 6406 return; 6407 } 6408 /* 6409 * Need to set SQ_EXCL and make a claim on the syncq. 6410 */ 6411 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6412 sq->sq_flags |= SQ_EXCL; 6413 ASSERT(sq->sq_count == 0); 6414 sq->sq_count++; 6415 SQ_PUTLOCKS_EXIT(sq); 6416 6417 /* Process the events list */ 6418 sq_run_events(sq); 6419 6420 /* 6421 * Release our claim... 6422 */ 6423 sq->sq_count--; 6424 6425 /* 6426 * And release SQ_EXCL. 6427 * We don't need to acquire the putlocks to release 6428 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6429 */ 6430 sq->sq_flags &= ~SQ_EXCL; 6431 6432 /* 6433 * sq_run_events should have released SQ_EXCL 6434 */ 6435 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6436 6437 /* 6438 * If anything happened while we were running the 6439 * events (or was there before), we need to process 6440 * them now. We shouldn't be exclusive sine we 6441 * released the perimeter above (plus, we asserted 6442 * for it). 6443 */ 6444 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6445 drain_syncq(sq); 6446 else 6447 mutex_exit(SQLOCK(sq)); 6448 } 6449 6450 /* 6451 * Perform delayed processing. The caller has to make sure that it is safe 6452 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6453 * set). 6454 * 6455 * Assume that the caller has NO claims on the syncq. However, a claim 6456 * on the syncq does not indicate that a thread is draining the syncq. 6457 * There may be more claims on the syncq than there are threads draining 6458 * (i.e. #_threads_draining <= sq_count) 6459 * 6460 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6461 * in order to preserve qwriter(OUTER) ordering constraints. 6462 * 6463 * sq_putcount only needs to be checked when dispatching the queued 6464 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6465 */ 6466 void 6467 drain_syncq(syncq_t *sq) 6468 { 6469 queue_t *qp; 6470 uint16_t count; 6471 uint16_t type = sq->sq_type; 6472 uint16_t flags = sq->sq_flags; 6473 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6474 6475 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6476 "drain_syncq start:%p", sq); 6477 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6478 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6479 sq->sq_oprev == NULL) || 6480 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6481 sq->sq_oprev != NULL)); 6482 6483 /* 6484 * Drop SQ_SERVICE flag. 6485 */ 6486 if (bg_service) 6487 sq->sq_svcflags &= ~SQ_SERVICE; 6488 6489 /* 6490 * If SQ_EXCL is set, someone else is processing this syncq - let him 6491 * finish the job. 6492 */ 6493 if (flags & SQ_EXCL) { 6494 if (bg_service) { 6495 ASSERT(sq->sq_servcount != 0); 6496 sq->sq_servcount--; 6497 } 6498 mutex_exit(SQLOCK(sq)); 6499 return; 6500 } 6501 6502 /* 6503 * This routine can be called by a background thread if 6504 * it was scheduled by a hi-priority thread. SO, if there are 6505 * NOT messages queued, return (remember, we have the SQLOCK, 6506 * and it cannot change until we release it). Wakeup any waiters also. 6507 */ 6508 if (!(flags & SQ_QUEUED)) { 6509 if (flags & SQ_WANTWAKEUP) { 6510 flags &= ~SQ_WANTWAKEUP; 6511 cv_broadcast(&sq->sq_wait); 6512 } 6513 if (flags & SQ_WANTEXWAKEUP) { 6514 flags &= ~SQ_WANTEXWAKEUP; 6515 cv_broadcast(&sq->sq_exitwait); 6516 } 6517 sq->sq_flags = flags; 6518 if (bg_service) { 6519 ASSERT(sq->sq_servcount != 0); 6520 sq->sq_servcount--; 6521 } 6522 mutex_exit(SQLOCK(sq)); 6523 return; 6524 } 6525 6526 /* 6527 * If this is not a concurrent put perimeter, we need to 6528 * become exclusive to drain. Also, if not CIPUT, we would 6529 * not have acquired a putlock, so we don't need to check 6530 * the putcounts. If not entering with a claim, we test 6531 * for sq_count == 0. 6532 */ 6533 type = sq->sq_type; 6534 if (!(type & SQ_CIPUT)) { 6535 if (sq->sq_count > 1) { 6536 if (bg_service) { 6537 ASSERT(sq->sq_servcount != 0); 6538 sq->sq_servcount--; 6539 } 6540 mutex_exit(SQLOCK(sq)); 6541 return; 6542 } 6543 sq->sq_flags |= SQ_EXCL; 6544 } 6545 6546 /* 6547 * This is where we make a claim to the syncq. 6548 * This can either be done by incrementing a putlock, or 6549 * the sq_count. But since we already have the SQLOCK 6550 * here, we just bump the sq_count. 6551 * 6552 * Note that after we make a claim, we need to let the code 6553 * fall through to the end of this routine to clean itself 6554 * up. A return in the while loop will put the syncq in a 6555 * very bad state. 6556 */ 6557 sq->sq_count++; 6558 ASSERT(sq->sq_count != 0); /* wraparound */ 6559 6560 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6561 /* 6562 * If we are told to stayaway or went exclusive, 6563 * we are done. 6564 */ 6565 if (flags & (SQ_STAYAWAY)) { 6566 break; 6567 } 6568 6569 /* 6570 * If there are events to run, do so. 6571 * We have one claim to the syncq, so if there are 6572 * more than one, other threads are running. 6573 */ 6574 if (sq->sq_evhead != NULL) { 6575 ASSERT(sq->sq_flags & SQ_EVENTS); 6576 6577 count = sq->sq_count; 6578 SQ_PUTLOCKS_ENTER(sq); 6579 SUM_SQ_PUTCOUNTS(sq, count); 6580 if (count > 1) { 6581 SQ_PUTLOCKS_EXIT(sq); 6582 /* Can't upgrade - other threads inside */ 6583 break; 6584 } 6585 ASSERT((flags & SQ_EXCL) == 0); 6586 sq->sq_flags = flags | SQ_EXCL; 6587 SQ_PUTLOCKS_EXIT(sq); 6588 /* 6589 * we have the only claim, run the events, 6590 * sq_run_events will clear the SQ_EXCL flag. 6591 */ 6592 sq_run_events(sq); 6593 6594 /* 6595 * If this is a CIPUT perimeter, we need 6596 * to drop the SQ_EXCL flag so we can properly 6597 * continue draining the syncq. 6598 */ 6599 if (type & SQ_CIPUT) { 6600 ASSERT(sq->sq_flags & SQ_EXCL); 6601 sq->sq_flags &= ~SQ_EXCL; 6602 } 6603 6604 /* 6605 * And go back to the beginning just in case 6606 * anything changed while we were away. 6607 */ 6608 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6609 continue; 6610 } 6611 6612 ASSERT(sq->sq_evhead == NULL); 6613 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6614 6615 /* 6616 * Find the queue that is not draining. 6617 * 6618 * q_draining is protected by QLOCK which we do not hold. 6619 * But if it was set, then a thread was draining, and if it gets 6620 * cleared, then it was because the thread has successfully 6621 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY 6622 * state to happen, a thread needs the SQLOCK which we hold, and 6623 * if there was such a flag, we would have already seen it. 6624 */ 6625 6626 for (qp = sq->sq_head; 6627 qp != NULL && (qp->q_draining || 6628 (qp->q_sqflags & Q_SQDRAINING)); 6629 qp = qp->q_sqnext) 6630 ; 6631 6632 if (qp == NULL) 6633 break; 6634 6635 /* 6636 * We have a queue to work on, and we hold the 6637 * SQLOCK and one claim, call qdrain_syncq. 6638 * This means we need to release the SQLOCK and 6639 * acquire the QLOCK (OK since we have a claim). 6640 * Note that qdrain_syncq will actually dequeue 6641 * this queue from the sq_head list when it is 6642 * convinced all the work is done and release 6643 * the QLOCK before returning. 6644 */ 6645 qp->q_sqflags |= Q_SQDRAINING; 6646 mutex_exit(SQLOCK(sq)); 6647 mutex_enter(QLOCK(qp)); 6648 qdrain_syncq(sq, qp); 6649 mutex_enter(SQLOCK(sq)); 6650 6651 /* The queue is drained */ 6652 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6653 qp->q_sqflags &= ~Q_SQDRAINING; 6654 /* 6655 * NOTE: After this point qp should not be used since it may be 6656 * closed. 6657 */ 6658 } 6659 6660 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6661 flags = sq->sq_flags; 6662 6663 /* 6664 * sq->sq_head cannot change because we hold the 6665 * sqlock. However, a thread CAN decide that it is no longer 6666 * going to drain that queue. However, this should be due to 6667 * a GOAWAY state, and we should see that here. 6668 * 6669 * This loop is not very efficient. One solution may be adding a second 6670 * pointer to the "draining" queue, but it is difficult to do when 6671 * queues are inserted in the middle due to priority ordering. Another 6672 * possibility is to yank the queue out of the sq list and put it onto 6673 * the "draining list" and then put it back if it can't be drained. 6674 */ 6675 6676 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6677 (type & SQ_CI) || sq->sq_head->q_draining); 6678 6679 /* Drop SQ_EXCL for non-CIPUT perimeters */ 6680 if (!(type & SQ_CIPUT)) 6681 flags &= ~SQ_EXCL; 6682 ASSERT((flags & SQ_EXCL) == 0); 6683 6684 /* Wake up any waiters. */ 6685 if (flags & SQ_WANTWAKEUP) { 6686 flags &= ~SQ_WANTWAKEUP; 6687 cv_broadcast(&sq->sq_wait); 6688 } 6689 if (flags & SQ_WANTEXWAKEUP) { 6690 flags &= ~SQ_WANTEXWAKEUP; 6691 cv_broadcast(&sq->sq_exitwait); 6692 } 6693 sq->sq_flags = flags; 6694 6695 ASSERT(sq->sq_count != 0); 6696 /* Release our claim. */ 6697 sq->sq_count--; 6698 6699 if (bg_service) { 6700 ASSERT(sq->sq_servcount != 0); 6701 sq->sq_servcount--; 6702 } 6703 6704 mutex_exit(SQLOCK(sq)); 6705 6706 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6707 "drain_syncq end:%p", sq); 6708 } 6709 6710 6711 /* 6712 * 6713 * qdrain_syncq can be called (currently) from only one of two places: 6714 * drain_syncq 6715 * putnext (or some variation of it). 6716 * and eventually 6717 * qwait(_sig) 6718 * 6719 * If called from drain_syncq, we found it in the list of queues needing 6720 * service, so there is work to be done (or it wouldn't be in the list). 6721 * 6722 * If called from some putnext variation, it was because the 6723 * perimeter is open, but messages are blocking a putnext and 6724 * there is not a thread working on it. Now a thread could start 6725 * working on it while we are getting ready to do so ourself, but 6726 * the thread would set the q_draining flag, and we can spin out. 6727 * 6728 * As for qwait(_sig), I think I shall let it continue to call 6729 * drain_syncq directly (after all, it will get here eventually). 6730 * 6731 * qdrain_syncq has to terminate when: 6732 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6733 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6734 * 6735 * ASSUMES: 6736 * One claim 6737 * QLOCK held 6738 * SQLOCK not held 6739 * Will release QLOCK before returning 6740 */ 6741 void 6742 qdrain_syncq(syncq_t *sq, queue_t *q) 6743 { 6744 mblk_t *bp; 6745 #ifdef DEBUG 6746 uint16_t count; 6747 #endif 6748 6749 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6750 "drain_syncq start:%p", sq); 6751 ASSERT(q->q_syncq == sq); 6752 ASSERT(MUTEX_HELD(QLOCK(q))); 6753 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6754 /* 6755 * For non-CIPUT perimeters, we should be called with the exclusive bit 6756 * set already. For CIPUT perimeters, we will be doing a concurrent 6757 * drain, so it better not be set. 6758 */ 6759 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6760 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6761 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6762 /* 6763 * All outer pointers are set, or none of them are 6764 */ 6765 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6766 sq->sq_oprev == NULL) || 6767 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6768 sq->sq_oprev != NULL)); 6769 #ifdef DEBUG 6770 count = sq->sq_count; 6771 /* 6772 * This is OK without the putlocks, because we have one 6773 * claim either from the sq_count, or a putcount. We could 6774 * get an erroneous value from other counts, but ours won't 6775 * change, so one way or another, we will have at least a 6776 * value of one. 6777 */ 6778 SUM_SQ_PUTCOUNTS(sq, count); 6779 ASSERT(count >= 1); 6780 #endif /* DEBUG */ 6781 6782 /* 6783 * The first thing to do is find out if a thread is already draining 6784 * this queue. If so, we are done, just return. 6785 */ 6786 if (q->q_draining) { 6787 mutex_exit(QLOCK(q)); 6788 return; 6789 } 6790 6791 /* 6792 * If the perimeter is exclusive, there is nothing we can do right now, 6793 * go away. Note that there is nothing to prevent this case from 6794 * changing right after this check, but the spin-out will catch it. 6795 */ 6796 6797 /* Tell other threads that we are draining this queue */ 6798 q->q_draining = 1; /* Protected by QLOCK */ 6799 6800 /* 6801 * If there is nothing to do, clear QFULL as necessary. This caters for 6802 * the case where an empty queue was enqueued onto the syncq. 6803 */ 6804 if (q->q_sqhead == NULL) { 6805 ASSERT(q->q_syncqmsgs == 0); 6806 mutex_exit(QLOCK(q)); 6807 clr_qfull(q); 6808 mutex_enter(QLOCK(q)); 6809 } 6810 6811 /* 6812 * Note that q_sqhead must be re-checked here in case another message 6813 * was enqueued whilst QLOCK was dropped during the call to clr_qfull. 6814 */ 6815 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6816 /* 6817 * Because we can enter this routine just because a putnext is 6818 * blocked, we need to spin out if the perimeter wants to go 6819 * exclusive as well as just blocked. We need to spin out also 6820 * if events are queued on the syncq. 6821 * Don't check for SQ_EXCL, because non-CIPUT perimeters would 6822 * set it, and it can't become exclusive while we hold a claim. 6823 */ 6824 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6825 break; 6826 } 6827 6828 #ifdef DEBUG 6829 /* 6830 * Since we are in qdrain_syncq, we already know the queue, 6831 * but for sanity, we want to check this against the qp that 6832 * was passed in by bp->b_queue. 6833 */ 6834 6835 ASSERT(bp->b_queue == q); 6836 ASSERT(bp->b_queue->q_syncq == sq); 6837 bp->b_queue = NULL; 6838 6839 /* 6840 * We would have the following check in the DEBUG code: 6841 * 6842 * if (bp->b_prev != NULL) { 6843 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6844 * } 6845 * 6846 * This can't be done, however, since IP modifies qinfo 6847 * structure at run-time (switching between IPv4 qinfo and IPv6 6848 * qinfo), invalidating the check. 6849 * So the assignment to func is left here, but the ASSERT itself 6850 * is removed until the whole issue is resolved. 6851 */ 6852 #endif 6853 ASSERT(q->q_sqhead == bp); 6854 q->q_sqhead = bp->b_next; 6855 bp->b_prev = bp->b_next = NULL; 6856 ASSERT(q->q_syncqmsgs > 0); 6857 mutex_exit(QLOCK(q)); 6858 6859 ASSERT(bp->b_datap->db_ref != 0); 6860 6861 (void) (*q->q_qinfo->qi_putp)(q, bp); 6862 6863 mutex_enter(QLOCK(q)); 6864 6865 /* 6866 * q_syncqmsgs should only be decremented after executing the 6867 * put procedure to avoid message re-ordering. This is due to an 6868 * optimisation in putnext() which can call the put procedure 6869 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED 6870 * being set). 6871 * 6872 * We also need to clear QFULL in the next service procedure 6873 * queue if this is the last message destined for that queue. 6874 * 6875 * It would make better sense to have some sort of tunable for 6876 * the low water mark, but these semantics are not yet defined. 6877 * So, alas, we use a constant. 6878 */ 6879 if (--q->q_syncqmsgs == 0) { 6880 mutex_exit(QLOCK(q)); 6881 clr_qfull(q); 6882 mutex_enter(QLOCK(q)); 6883 } 6884 6885 /* 6886 * Always clear SQ_EXCL when CIPUT in order to handle 6887 * qwriter(INNER). The putp() can call qwriter and get exclusive 6888 * access IFF this is the only claim. So, we need to test for 6889 * this possibility, acquire the mutex and clear the bit. 6890 */ 6891 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6892 mutex_enter(SQLOCK(sq)); 6893 sq->sq_flags &= ~SQ_EXCL; 6894 mutex_exit(SQLOCK(sq)); 6895 } 6896 } 6897 6898 /* 6899 * We should either have no messages on this queue, or we were told to 6900 * goaway by a waiter (which we will wake up at the end of this 6901 * function). 6902 */ 6903 ASSERT((q->q_sqhead == NULL) || 6904 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6905 6906 ASSERT(MUTEX_HELD(QLOCK(q))); 6907 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6908 6909 /* Remove the q from the syncq list if all the messages are drained. */ 6910 if (q->q_sqhead == NULL) { 6911 ASSERT(q->q_syncqmsgs == 0); 6912 mutex_enter(SQLOCK(sq)); 6913 if (q->q_sqflags & Q_SQQUEUED) 6914 SQRM_Q(sq, q); 6915 mutex_exit(SQLOCK(sq)); 6916 /* 6917 * Since the queue is removed from the list, reset its priority. 6918 */ 6919 q->q_spri = 0; 6920 } 6921 6922 /* 6923 * Remember, the q_draining flag is used to let another thread know 6924 * that there is a thread currently draining the messages for a queue. 6925 * Since we are now done with this queue (even if there may be messages 6926 * still there), we need to clear this flag so some thread will work on 6927 * it if needed. 6928 */ 6929 ASSERT(q->q_draining); 6930 q->q_draining = 0; 6931 6932 /* Called with a claim, so OK to drop all locks. */ 6933 mutex_exit(QLOCK(q)); 6934 6935 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6936 "drain_syncq end:%p", sq); 6937 } 6938 /* END OF QDRAIN_SYNCQ */ 6939 6940 6941 /* 6942 * This is the mate to qdrain_syncq, except that it is putting the message onto 6943 * the queue instead of draining. Since the message is destined for the queue 6944 * that is selected, there is no need to identify the function because the 6945 * message is intended for the put routine for the queue. For debug kernels, 6946 * this routine will do it anyway just in case. 6947 * 6948 * After the message is enqueued on the syncq, it calls putnext_tail() 6949 * which will schedule a background thread to actually process the message. 6950 * 6951 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6952 * SQLOCK(sq) and QLOCK(q) are not held. 6953 */ 6954 void 6955 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6956 { 6957 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6958 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6959 ASSERT(sq->sq_count > 0); 6960 ASSERT(q->q_syncq == sq); 6961 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6962 sq->sq_oprev == NULL) || 6963 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6964 sq->sq_oprev != NULL)); 6965 6966 mutex_enter(QLOCK(q)); 6967 6968 #ifdef DEBUG 6969 /* 6970 * This is used for debug in the qfill_syncq/qdrain_syncq case 6971 * to trace the queue that the message is intended for. Note 6972 * that the original use was to identify the queue and function 6973 * to call on the drain. In the new syncq, we have the context 6974 * of the queue that we are draining, so call it's putproc and 6975 * don't rely on the saved values. But for debug this is still 6976 * useful information. 6977 */ 6978 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6979 mp->b_queue = q; 6980 mp->b_next = NULL; 6981 #endif 6982 ASSERT(q->q_syncq == sq); 6983 /* 6984 * Enqueue the message on the list. 6985 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6986 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP(). 6987 */ 6988 SQPUT_MP(q, mp); 6989 mutex_enter(SQLOCK(sq)); 6990 6991 /* 6992 * And queue on syncq for scheduling, if not already queued. 6993 * Note that we need the SQLOCK for this, and for testing flags 6994 * at the end to see if we will drain. So grab it now, and 6995 * release it before we call qdrain_syncq or return. 6996 */ 6997 if (!(q->q_sqflags & Q_SQQUEUED)) { 6998 q->q_spri = curthread->t_pri; 6999 SQPUT_Q(sq, q); 7000 } 7001 #ifdef DEBUG 7002 else { 7003 /* 7004 * All of these conditions MUST be true! 7005 */ 7006 ASSERT(sq->sq_tail != NULL); 7007 if (sq->sq_tail == sq->sq_head) { 7008 ASSERT((q->q_sqprev == NULL) && 7009 (q->q_sqnext == NULL)); 7010 } else { 7011 ASSERT((q->q_sqprev != NULL) || 7012 (q->q_sqnext != NULL)); 7013 } 7014 ASSERT(sq->sq_flags & SQ_QUEUED); 7015 ASSERT(q->q_syncqmsgs != 0); 7016 ASSERT(q->q_sqflags & Q_SQQUEUED); 7017 } 7018 #endif 7019 mutex_exit(QLOCK(q)); 7020 /* 7021 * SQLOCK is still held, so sq_count can be safely decremented. 7022 */ 7023 sq->sq_count--; 7024 7025 putnext_tail(sq, q, 0); 7026 /* Should not reference sq or q after this point. */ 7027 } 7028 7029 /* End of qfill_syncq */ 7030 7031 /* 7032 * Remove all messages from a syncq (if qp is NULL) or remove all messages 7033 * that would be put into qp by drain_syncq. 7034 * Used when deleting the syncq (qp == NULL) or when detaching 7035 * a queue (qp != NULL). 7036 * Return non-zero if one or more messages were freed. 7037 * 7038 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 7039 * sq_putlocks are used. 7040 * 7041 * NOTE: This function assumes that it is called from the close() context and 7042 * that all the queues in the syncq are going away. For this reason it doesn't 7043 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 7044 * currently valid, but it is useful to rethink this function to behave properly 7045 * in other cases. 7046 */ 7047 int 7048 flush_syncq(syncq_t *sq, queue_t *qp) 7049 { 7050 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 7051 queue_t *q; 7052 int ret = 0; 7053 7054 mutex_enter(SQLOCK(sq)); 7055 7056 /* 7057 * Before we leave, we need to make sure there are no 7058 * events listed for this queue. All events for this queue 7059 * will just be freed. 7060 */ 7061 if (qp != NULL && sq->sq_evhead != NULL) { 7062 ASSERT(sq->sq_flags & SQ_EVENTS); 7063 7064 mp_prev = NULL; 7065 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 7066 mp_next = bp->b_next; 7067 if (bp->b_queue == qp) { 7068 /* Delete this message */ 7069 if (mp_prev != NULL) { 7070 mp_prev->b_next = mp_next; 7071 /* 7072 * Update sq_evtail if the last element 7073 * is removed. 7074 */ 7075 if (bp == sq->sq_evtail) { 7076 ASSERT(mp_next == NULL); 7077 sq->sq_evtail = mp_prev; 7078 } 7079 } else 7080 sq->sq_evhead = mp_next; 7081 if (sq->sq_evhead == NULL) 7082 sq->sq_flags &= ~SQ_EVENTS; 7083 bp->b_prev = bp->b_next = NULL; 7084 freemsg(bp); 7085 ret++; 7086 } else { 7087 mp_prev = bp; 7088 } 7089 } 7090 } 7091 7092 /* 7093 * Walk sq_head and: 7094 * - match qp if qp is set, remove it's messages 7095 * - all if qp is not set 7096 */ 7097 q = sq->sq_head; 7098 while (q != NULL) { 7099 ASSERT(q->q_syncq == sq); 7100 if ((qp == NULL) || (qp == q)) { 7101 /* 7102 * Yank the messages as a list off the queue 7103 */ 7104 mp_head = q->q_sqhead; 7105 /* 7106 * We do not have QLOCK(q) here (which is safe due to 7107 * assumptions mentioned above). To obtain the lock we 7108 * need to release SQLOCK which may allow lots of things 7109 * to change upon us. This place requires more analysis. 7110 */ 7111 q->q_sqhead = q->q_sqtail = NULL; 7112 ASSERT(mp_head->b_queue && 7113 mp_head->b_queue->q_syncq == sq); 7114 7115 /* 7116 * Free each of the messages. 7117 */ 7118 for (bp = mp_head; bp != NULL; bp = mp_next) { 7119 mp_next = bp->b_next; 7120 bp->b_prev = bp->b_next = NULL; 7121 freemsg(bp); 7122 ret++; 7123 } 7124 /* 7125 * Now remove the queue from the syncq. 7126 */ 7127 ASSERT(q->q_sqflags & Q_SQQUEUED); 7128 SQRM_Q(sq, q); 7129 q->q_spri = 0; 7130 q->q_syncqmsgs = 0; 7131 7132 /* 7133 * If qp was specified, we are done with it and are 7134 * going to drop SQLOCK(sq) and return. We wakeup syncq 7135 * waiters while we still have the SQLOCK. 7136 */ 7137 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 7138 sq->sq_flags &= ~SQ_WANTWAKEUP; 7139 cv_broadcast(&sq->sq_wait); 7140 } 7141 /* Drop SQLOCK across clr_qfull */ 7142 mutex_exit(SQLOCK(sq)); 7143 7144 /* 7145 * We avoid doing the test that drain_syncq does and 7146 * unconditionally clear qfull for every flushed 7147 * message. Since flush_syncq is only called during 7148 * close this should not be a problem. 7149 */ 7150 clr_qfull(q); 7151 if (qp != NULL) { 7152 return (ret); 7153 } else { 7154 mutex_enter(SQLOCK(sq)); 7155 /* 7156 * The head was removed by SQRM_Q above. 7157 * reread the new head and flush it. 7158 */ 7159 q = sq->sq_head; 7160 } 7161 } else { 7162 q = q->q_sqnext; 7163 } 7164 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7165 } 7166 7167 if (sq->sq_flags & SQ_WANTWAKEUP) { 7168 sq->sq_flags &= ~SQ_WANTWAKEUP; 7169 cv_broadcast(&sq->sq_wait); 7170 } 7171 7172 mutex_exit(SQLOCK(sq)); 7173 return (ret); 7174 } 7175 7176 /* 7177 * Propagate all messages from a syncq to the next syncq that are associated 7178 * with the specified queue. If the queue is attached to a driver or if the 7179 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 7180 * 7181 * Assumes that the stream is strlock()'ed. We don't come here if there 7182 * are no messages to propagate. 7183 * 7184 * NOTE : If the queue is attached to a driver, all the messages are freed 7185 * as there is no point in propagating the messages from the driver syncq 7186 * to the closing stream head which will in turn get freed later. 7187 */ 7188 static int 7189 propagate_syncq(queue_t *qp) 7190 { 7191 mblk_t *bp, *head, *tail, *prev, *next; 7192 syncq_t *sq; 7193 queue_t *nqp; 7194 syncq_t *nsq; 7195 boolean_t isdriver; 7196 int moved = 0; 7197 uint16_t flags; 7198 pri_t priority = curthread->t_pri; 7199 #ifdef DEBUG 7200 void (*func)(); 7201 #endif 7202 7203 sq = qp->q_syncq; 7204 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7205 /* debug macro */ 7206 SQ_PUTLOCKS_HELD(sq); 7207 /* 7208 * As entersq() does not increment the sq_count for 7209 * the write side, check sq_count for non-QPERQ 7210 * perimeters alone. 7211 */ 7212 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 7213 7214 /* 7215 * propagate_syncq() can be called because of either messages on the 7216 * queue syncq or because on events on the queue syncq. Do actual 7217 * message propagations if there are any messages. 7218 */ 7219 if (qp->q_syncqmsgs) { 7220 isdriver = (qp->q_flag & QISDRV); 7221 7222 if (!isdriver) { 7223 nqp = qp->q_next; 7224 nsq = nqp->q_syncq; 7225 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 7226 /* debug macro */ 7227 SQ_PUTLOCKS_HELD(nsq); 7228 #ifdef DEBUG 7229 func = (void (*)())nqp->q_qinfo->qi_putp; 7230 #endif 7231 } 7232 7233 SQRM_Q(sq, qp); 7234 priority = MAX(qp->q_spri, priority); 7235 qp->q_spri = 0; 7236 head = qp->q_sqhead; 7237 tail = qp->q_sqtail; 7238 qp->q_sqhead = qp->q_sqtail = NULL; 7239 qp->q_syncqmsgs = 0; 7240 7241 /* 7242 * Walk the list of messages, and free them if this is a driver, 7243 * otherwise reset the b_prev and b_queue value to the new putp. 7244 * Afterward, we will just add the head to the end of the next 7245 * syncq, and point the tail to the end of this one. 7246 */ 7247 7248 for (bp = head; bp != NULL; bp = next) { 7249 next = bp->b_next; 7250 if (isdriver) { 7251 bp->b_prev = bp->b_next = NULL; 7252 freemsg(bp); 7253 continue; 7254 } 7255 /* Change the q values for this message */ 7256 bp->b_queue = nqp; 7257 #ifdef DEBUG 7258 bp->b_prev = (mblk_t *)func; 7259 #endif 7260 moved++; 7261 } 7262 /* 7263 * Attach list of messages to the end of the new queue (if there 7264 * is a list of messages). 7265 */ 7266 7267 if (!isdriver && head != NULL) { 7268 ASSERT(tail != NULL); 7269 if (nqp->q_sqhead == NULL) { 7270 nqp->q_sqhead = head; 7271 } else { 7272 ASSERT(nqp->q_sqtail != NULL); 7273 nqp->q_sqtail->b_next = head; 7274 } 7275 nqp->q_sqtail = tail; 7276 /* 7277 * When messages are moved from high priority queue to 7278 * another queue, the destination queue priority is 7279 * upgraded. 7280 */ 7281 7282 if (priority > nqp->q_spri) 7283 nqp->q_spri = priority; 7284 7285 SQPUT_Q(nsq, nqp); 7286 7287 nqp->q_syncqmsgs += moved; 7288 ASSERT(nqp->q_syncqmsgs != 0); 7289 } 7290 } 7291 7292 /* 7293 * Before we leave, we need to make sure there are no 7294 * events listed for this queue. All events for this queue 7295 * will just be freed. 7296 */ 7297 if (sq->sq_evhead != NULL) { 7298 ASSERT(sq->sq_flags & SQ_EVENTS); 7299 prev = NULL; 7300 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7301 next = bp->b_next; 7302 if (bp->b_queue == qp) { 7303 /* Delete this message */ 7304 if (prev != NULL) { 7305 prev->b_next = next; 7306 /* 7307 * Update sq_evtail if the last element 7308 * is removed. 7309 */ 7310 if (bp == sq->sq_evtail) { 7311 ASSERT(next == NULL); 7312 sq->sq_evtail = prev; 7313 } 7314 } else 7315 sq->sq_evhead = next; 7316 if (sq->sq_evhead == NULL) 7317 sq->sq_flags &= ~SQ_EVENTS; 7318 bp->b_prev = bp->b_next = NULL; 7319 freemsg(bp); 7320 } else { 7321 prev = bp; 7322 } 7323 } 7324 } 7325 7326 flags = sq->sq_flags; 7327 7328 /* Wake up any waiter before leaving. */ 7329 if (flags & SQ_WANTWAKEUP) { 7330 flags &= ~SQ_WANTWAKEUP; 7331 cv_broadcast(&sq->sq_wait); 7332 } 7333 sq->sq_flags = flags; 7334 7335 return (moved); 7336 } 7337 7338 /* 7339 * Try and upgrade to exclusive access at the inner perimeter. If this can 7340 * not be done without blocking then request will be queued on the syncq 7341 * and drain_syncq will run it later. 7342 * 7343 * This routine can only be called from put or service procedures plus 7344 * asynchronous callback routines that have properly entered the queue (with 7345 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq 7346 * associated with q. 7347 */ 7348 void 7349 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7350 { 7351 syncq_t *sq = q->q_syncq; 7352 uint16_t count; 7353 7354 mutex_enter(SQLOCK(sq)); 7355 count = sq->sq_count; 7356 SQ_PUTLOCKS_ENTER(sq); 7357 SUM_SQ_PUTCOUNTS(sq, count); 7358 ASSERT(count >= 1); 7359 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7360 7361 if (count == 1) { 7362 /* 7363 * Can upgrade. This case also handles nested qwriter calls 7364 * (when the qwriter callback function calls qwriter). In that 7365 * case SQ_EXCL is already set. 7366 */ 7367 sq->sq_flags |= SQ_EXCL; 7368 SQ_PUTLOCKS_EXIT(sq); 7369 mutex_exit(SQLOCK(sq)); 7370 (*func)(q, mp); 7371 /* 7372 * Assumes that leavesq, putnext, and drain_syncq will reset 7373 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7374 * until putnext, leavesq, or drain_syncq drops it. 7375 * That way we handle nested qwriter(INNER) without dropping 7376 * SQ_EXCL until the outermost qwriter callback routine is 7377 * done. 7378 */ 7379 return; 7380 } 7381 SQ_PUTLOCKS_EXIT(sq); 7382 sqfill_events(sq, q, mp, func); 7383 } 7384 7385 /* 7386 * Synchronous callback support functions 7387 */ 7388 7389 /* 7390 * Allocate a callback parameter structure. 7391 * Assumes that caller initializes the flags and the id. 7392 * Acquires SQLOCK(sq) if non-NULL is returned. 7393 */ 7394 callbparams_t * 7395 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7396 { 7397 callbparams_t *cbp; 7398 size_t size = sizeof (callbparams_t); 7399 7400 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7401 7402 /* 7403 * Only try tryhard allocation if the caller is ready to panic. 7404 * Otherwise just fail. 7405 */ 7406 if (cbp == NULL) { 7407 if (kmflags & KM_PANIC) 7408 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7409 &size, kmflags); 7410 else 7411 return (NULL); 7412 } 7413 7414 ASSERT(size >= sizeof (callbparams_t)); 7415 cbp->cbp_size = size; 7416 cbp->cbp_sq = sq; 7417 cbp->cbp_func = func; 7418 cbp->cbp_arg = arg; 7419 mutex_enter(SQLOCK(sq)); 7420 cbp->cbp_next = sq->sq_callbpend; 7421 sq->sq_callbpend = cbp; 7422 return (cbp); 7423 } 7424 7425 void 7426 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7427 { 7428 callbparams_t **pp, *p; 7429 7430 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7431 7432 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7433 if (p == cbp) { 7434 *pp = p->cbp_next; 7435 kmem_free(p, p->cbp_size); 7436 return; 7437 } 7438 } 7439 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7440 "callbparams_free: not found\n")); 7441 } 7442 7443 void 7444 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7445 { 7446 callbparams_t **pp, *p; 7447 7448 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7449 7450 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7451 if (p->cbp_id == id && p->cbp_flags == flag) { 7452 *pp = p->cbp_next; 7453 kmem_free(p, p->cbp_size); 7454 return; 7455 } 7456 } 7457 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7458 "callbparams_free_id: not found\n")); 7459 } 7460 7461 /* 7462 * Callback wrapper function used by once-only callbacks that can be 7463 * cancelled (qtimeout and qbufcall) 7464 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7465 * cancelled by the qun* functions. 7466 */ 7467 void 7468 qcallbwrapper(void *arg) 7469 { 7470 callbparams_t *cbp = arg; 7471 syncq_t *sq; 7472 uint16_t count = 0; 7473 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7474 uint16_t type; 7475 7476 sq = cbp->cbp_sq; 7477 mutex_enter(SQLOCK(sq)); 7478 type = sq->sq_type; 7479 if (!(type & SQ_CICB)) { 7480 count = sq->sq_count; 7481 SQ_PUTLOCKS_ENTER(sq); 7482 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7483 SUM_SQ_PUTCOUNTS(sq, count); 7484 sq->sq_needexcl++; 7485 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7486 waitflags |= SQ_MESSAGES; 7487 } 7488 /* Can not handle exclusive entry at outer perimeter */ 7489 ASSERT(type & SQ_COCB); 7490 7491 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7492 if ((sq->sq_callbflags & cbp->cbp_flags) && 7493 (sq->sq_cancelid == cbp->cbp_id)) { 7494 /* timeout has been cancelled */ 7495 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7496 callbparams_free(sq, cbp); 7497 if (!(type & SQ_CICB)) { 7498 ASSERT(sq->sq_needexcl > 0); 7499 sq->sq_needexcl--; 7500 if (sq->sq_needexcl == 0) { 7501 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7502 } 7503 SQ_PUTLOCKS_EXIT(sq); 7504 } 7505 mutex_exit(SQLOCK(sq)); 7506 return; 7507 } 7508 sq->sq_flags |= SQ_WANTWAKEUP; 7509 if (!(type & SQ_CICB)) { 7510 SQ_PUTLOCKS_EXIT(sq); 7511 } 7512 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7513 if (!(type & SQ_CICB)) { 7514 count = sq->sq_count; 7515 SQ_PUTLOCKS_ENTER(sq); 7516 SUM_SQ_PUTCOUNTS(sq, count); 7517 } 7518 } 7519 7520 sq->sq_count++; 7521 ASSERT(sq->sq_count != 0); /* Wraparound */ 7522 if (!(type & SQ_CICB)) { 7523 ASSERT(count == 0); 7524 sq->sq_flags |= SQ_EXCL; 7525 ASSERT(sq->sq_needexcl > 0); 7526 sq->sq_needexcl--; 7527 if (sq->sq_needexcl == 0) { 7528 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7529 } 7530 SQ_PUTLOCKS_EXIT(sq); 7531 } 7532 7533 mutex_exit(SQLOCK(sq)); 7534 7535 cbp->cbp_func(cbp->cbp_arg); 7536 7537 /* 7538 * We drop the lock only for leavesq to re-acquire it. 7539 * Possible optimization is inline of leavesq. 7540 */ 7541 mutex_enter(SQLOCK(sq)); 7542 callbparams_free(sq, cbp); 7543 mutex_exit(SQLOCK(sq)); 7544 leavesq(sq, SQ_CALLBACK); 7545 } 7546 7547 /* 7548 * No need to grab sq_putlocks here. See comment in strsubr.h that 7549 * explains when sq_putlocks are used. 7550 * 7551 * sq_count (or one of the sq_putcounts) has already been 7552 * decremented by the caller, and if SQ_QUEUED, we need to call 7553 * drain_syncq (the global syncq drain). 7554 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7555 * one of two states, non-CIPUT perimeter, and we need to clear 7556 * it, or we went exclusive in the put procedure. In any case, 7557 * we want to clear the bit now, and it is probably easier to do 7558 * this at the beginning of this function (remember, we hold 7559 * the SQLOCK). Lastly, if there are other messages queued 7560 * on the syncq (and not for our destination), enable the syncq 7561 * for background work. 7562 */ 7563 7564 /* ARGSUSED */ 7565 void 7566 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7567 { 7568 uint16_t flags = sq->sq_flags; 7569 7570 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7571 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7572 7573 /* Clear SQ_EXCL if set in passflags */ 7574 if (passflags & SQ_EXCL) { 7575 flags &= ~SQ_EXCL; 7576 } 7577 if (flags & SQ_WANTWAKEUP) { 7578 flags &= ~SQ_WANTWAKEUP; 7579 cv_broadcast(&sq->sq_wait); 7580 } 7581 if (flags & SQ_WANTEXWAKEUP) { 7582 flags &= ~SQ_WANTEXWAKEUP; 7583 cv_broadcast(&sq->sq_exitwait); 7584 } 7585 sq->sq_flags = flags; 7586 7587 /* 7588 * We have cleared SQ_EXCL if we were asked to, and started 7589 * the wakeup process for waiters. If there are no writers 7590 * then we need to drain the syncq if we were told to, or 7591 * enable the background thread to do it. 7592 */ 7593 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7594 if ((passflags & SQ_QUEUED) || 7595 (sq->sq_svcflags & SQ_DISABLED)) { 7596 /* drain_syncq will take care of events in the list */ 7597 drain_syncq(sq); 7598 return; 7599 } else if (flags & SQ_QUEUED) { 7600 sqenable(sq); 7601 } 7602 } 7603 /* Drop the SQLOCK on exit */ 7604 mutex_exit(SQLOCK(sq)); 7605 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7606 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7607 } 7608 7609 void 7610 set_qend(queue_t *q) 7611 { 7612 mutex_enter(QLOCK(q)); 7613 if (!O_SAMESTR(q)) 7614 q->q_flag |= QEND; 7615 else 7616 q->q_flag &= ~QEND; 7617 mutex_exit(QLOCK(q)); 7618 q = _OTHERQ(q); 7619 mutex_enter(QLOCK(q)); 7620 if (!O_SAMESTR(q)) 7621 q->q_flag |= QEND; 7622 else 7623 q->q_flag &= ~QEND; 7624 mutex_exit(QLOCK(q)); 7625 } 7626 7627 /* 7628 * Set QFULL in next service procedure queue (that cares) if not already 7629 * set and if there are already more messages on the syncq than 7630 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on 7631 * any syncq. 7632 * 7633 * The fq here is the next queue with a service procedure. This is where 7634 * we would fail canputnext, so this is where we need to set QFULL. 7635 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag. 7636 * 7637 * We already have QLOCK at this point. To avoid cross-locks with 7638 * freezestr() which grabs all QLOCKs and with strlock() which grabs both 7639 * SQLOCK and sd_reflock, we need to drop respective locks first. 7640 */ 7641 void 7642 set_qfull(queue_t *q) 7643 { 7644 queue_t *fq = NULL; 7645 7646 ASSERT(MUTEX_HELD(QLOCK(q))); 7647 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 7648 (q->q_syncqmsgs > sq_max_size)) { 7649 if ((fq = q->q_nfsrv) == q) { 7650 fq->q_flag |= QFULL; 7651 } else { 7652 mutex_exit(QLOCK(q)); 7653 mutex_enter(QLOCK(fq)); 7654 fq->q_flag |= QFULL; 7655 mutex_exit(QLOCK(fq)); 7656 mutex_enter(QLOCK(q)); 7657 } 7658 } 7659 } 7660 7661 void 7662 clr_qfull(queue_t *q) 7663 { 7664 queue_t *oq = q; 7665 7666 q = q->q_nfsrv; 7667 /* Fast check if there is any work to do before getting the lock. */ 7668 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7669 return; 7670 } 7671 7672 /* 7673 * Do not reset QFULL (and backenable) if the q_count is the reason 7674 * for QFULL being set. 7675 */ 7676 mutex_enter(QLOCK(q)); 7677 /* 7678 * If queue is empty i.e q_mblkcnt is zero, queue can not be full. 7679 * Hence clear the QFULL. 7680 * If both q_count and q_mblkcnt are less than the hiwat mark, 7681 * clear the QFULL. 7682 */ 7683 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 7684 (q->q_mblkcnt < q->q_hiwat))) { 7685 q->q_flag &= ~QFULL; 7686 /* 7687 * A little more confusing, how about this way: 7688 * if someone wants to write, 7689 * AND 7690 * both counts are less than the lowat mark 7691 * OR 7692 * the lowat mark is zero 7693 * THEN 7694 * backenable 7695 */ 7696 if ((q->q_flag & QWANTW) && 7697 (((q->q_count < q->q_lowat) && 7698 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7699 q->q_flag &= ~QWANTW; 7700 mutex_exit(QLOCK(q)); 7701 backenable(oq, 0); 7702 } else 7703 mutex_exit(QLOCK(q)); 7704 } else 7705 mutex_exit(QLOCK(q)); 7706 } 7707 7708 /* 7709 * Set the forward service procedure pointer. 7710 * 7711 * Called at insert-time to cache a queue's next forward service procedure in 7712 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7713 * has a service procedure then q_nfsrv points to itself. If the queue to be 7714 * inserted does not have a service procedure, then q_nfsrv points to the next 7715 * queue forward that has a service procedure. If the queue is at the logical 7716 * end of the stream (driver for write side, stream head for the read side) 7717 * and does not have a service procedure, then q_nfsrv also points to itself. 7718 */ 7719 void 7720 set_nfsrv_ptr( 7721 queue_t *rnew, /* read queue pointer to new module */ 7722 queue_t *wnew, /* write queue pointer to new module */ 7723 queue_t *prev_rq, /* read queue pointer to the module above */ 7724 queue_t *prev_wq) /* write queue pointer to the module above */ 7725 { 7726 queue_t *qp; 7727 7728 if (prev_wq->q_next == NULL) { 7729 /* 7730 * Insert the driver, initialize the driver and stream head. 7731 * In this case, prev_rq/prev_wq should be the stream head. 7732 * _I_INSERT does not allow inserting a driver. Make sure 7733 * that it is not an insertion. 7734 */ 7735 ASSERT(!(rnew->q_flag & _QINSERTING)); 7736 wnew->q_nfsrv = wnew; 7737 if (rnew->q_qinfo->qi_srvp) 7738 rnew->q_nfsrv = rnew; 7739 else 7740 rnew->q_nfsrv = prev_rq; 7741 prev_rq->q_nfsrv = prev_rq; 7742 prev_wq->q_nfsrv = prev_wq; 7743 } else { 7744 /* 7745 * set up read side q_nfsrv pointer. This MUST be done 7746 * before setting the write side, because the setting of 7747 * the write side for a fifo may depend on it. 7748 * 7749 * Suppose we have a fifo that only has pipemod pushed. 7750 * pipemod has no read or write service procedures, so 7751 * nfsrv for both pipemod queues points to prev_rq (the 7752 * stream read head). Now push bufmod (which has only a 7753 * read service procedure). Doing the write side first, 7754 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7755 * is WRONG; the next queue forward from wnew with a 7756 * service procedure will be rnew, not the stream read head. 7757 * Since the downstream queue (which in the case of a fifo 7758 * is the read queue rnew) can affect upstream queues, it 7759 * needs to be done first. Setting up the read side first 7760 * sets nfsrv for both pipemod queues to rnew and then 7761 * when the write side is set up, wnew-q_nfsrv will also 7762 * point to rnew. 7763 */ 7764 if (rnew->q_qinfo->qi_srvp) { 7765 /* 7766 * use _OTHERQ() because, if this is a pipe, next 7767 * module may have been pushed from other end and 7768 * q_next could be a read queue. 7769 */ 7770 qp = _OTHERQ(prev_wq->q_next); 7771 while (qp && qp->q_nfsrv != qp) { 7772 qp->q_nfsrv = rnew; 7773 qp = backq(qp); 7774 } 7775 rnew->q_nfsrv = rnew; 7776 } else 7777 rnew->q_nfsrv = prev_rq->q_nfsrv; 7778 7779 /* set up write side q_nfsrv pointer */ 7780 if (wnew->q_qinfo->qi_srvp) { 7781 wnew->q_nfsrv = wnew; 7782 7783 /* 7784 * For insertion, need to update nfsrv of the modules 7785 * above which do not have a service routine. 7786 */ 7787 if (rnew->q_flag & _QINSERTING) { 7788 for (qp = prev_wq; 7789 qp != NULL && qp->q_nfsrv != qp; 7790 qp = backq(qp)) { 7791 qp->q_nfsrv = wnew->q_nfsrv; 7792 } 7793 } 7794 } else { 7795 if (prev_wq->q_next == prev_rq) 7796 /* 7797 * Since prev_wq/prev_rq are the middle of a 7798 * fifo, wnew/rnew will also be the middle of 7799 * a fifo and wnew's nfsrv is same as rnew's. 7800 */ 7801 wnew->q_nfsrv = rnew->q_nfsrv; 7802 else 7803 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7804 } 7805 } 7806 } 7807 7808 /* 7809 * Reset the forward service procedure pointer; called at remove-time. 7810 */ 7811 void 7812 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7813 { 7814 queue_t *tmp_qp; 7815 7816 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7817 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7818 for (tmp_qp = backq(wqp); 7819 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7820 tmp_qp = backq(tmp_qp)) { 7821 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7822 } 7823 } 7824 7825 /* reset the read side q_nfsrv pointer */ 7826 if (rqp->q_qinfo->qi_srvp) { 7827 if (wqp->q_next) { /* non-driver case */ 7828 tmp_qp = _OTHERQ(wqp->q_next); 7829 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7830 /* Note that rqp->q_next cannot be NULL */ 7831 ASSERT(rqp->q_next != NULL); 7832 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7833 tmp_qp = backq(tmp_qp); 7834 } 7835 } 7836 } 7837 } 7838 7839 /* 7840 * This routine should be called after all stream geometry changes to update 7841 * the stream head cached struio() rd/wr queue pointers. Note must be called 7842 * with the streamlock()ed. 7843 * 7844 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7845 * an explicit synchronous barrier module queue. That is, a queue that 7846 * has specified a struio() type. 7847 */ 7848 static void 7849 strsetuio(stdata_t *stp) 7850 { 7851 queue_t *wrq; 7852 7853 if (stp->sd_flag & STPLEX) { 7854 /* 7855 * Not streamhead, but a mux, so no Synchronous STREAMS. 7856 */ 7857 stp->sd_struiowrq = NULL; 7858 stp->sd_struiordq = NULL; 7859 return; 7860 } 7861 /* 7862 * Scan the write queue(s) while synchronous 7863 * until we find a qinfo uio type specified. 7864 */ 7865 wrq = stp->sd_wrq->q_next; 7866 while (wrq) { 7867 if (wrq->q_struiot == STRUIOT_NONE) { 7868 wrq = 0; 7869 break; 7870 } 7871 if (wrq->q_struiot != STRUIOT_DONTCARE) 7872 break; 7873 if (! _SAMESTR(wrq)) { 7874 wrq = 0; 7875 break; 7876 } 7877 wrq = wrq->q_next; 7878 } 7879 stp->sd_struiowrq = wrq; 7880 /* 7881 * Scan the read queue(s) while synchronous 7882 * until we find a qinfo uio type specified. 7883 */ 7884 wrq = stp->sd_wrq->q_next; 7885 while (wrq) { 7886 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7887 wrq = 0; 7888 break; 7889 } 7890 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7891 break; 7892 if (! _SAMESTR(wrq)) { 7893 wrq = 0; 7894 break; 7895 } 7896 wrq = wrq->q_next; 7897 } 7898 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7899 } 7900 7901 /* 7902 * pass_wput, unblocks the passthru queues, so that 7903 * messages can arrive at muxs lower read queue, before 7904 * I_LINK/I_UNLINK is acked/nacked. 7905 */ 7906 static void 7907 pass_wput(queue_t *q, mblk_t *mp) 7908 { 7909 syncq_t *sq; 7910 7911 sq = _RD(q)->q_syncq; 7912 if (sq->sq_flags & SQ_BLOCKED) 7913 unblocksq(sq, SQ_BLOCKED, 0); 7914 putnext(q, mp); 7915 } 7916 7917 /* 7918 * Set up queues for the link/unlink. 7919 * Create a new queue and block it and then insert it 7920 * below the stream head on the lower stream. 7921 * This prevents any messages from arriving during the setq 7922 * as well as while the mux is processing the LINK/I_UNLINK. 7923 * The blocked passq is unblocked once the LINK/I_UNLINK has 7924 * been acked or nacked or if a message is generated and sent 7925 * down muxs write put procedure. 7926 * See pass_wput(). 7927 * 7928 * After the new queue is inserted, all messages coming from below are 7929 * blocked. The call to strlock will ensure that all activity in the stream head 7930 * read queue syncq is stopped (sq_count drops to zero). 7931 */ 7932 static queue_t * 7933 link_addpassthru(stdata_t *stpdown) 7934 { 7935 queue_t *passq; 7936 sqlist_t sqlist; 7937 7938 passq = allocq(); 7939 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7940 /* setq might sleep in allocator - avoid holding locks. */ 7941 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7942 SQ_CI|SQ_CO, B_FALSE); 7943 claimq(passq); 7944 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7945 insertq(STREAM(passq), passq); 7946 7947 /* 7948 * Use strlock() to wait for the stream head sq_count to drop to zero 7949 * since we are going to change q_ptr in the stream head. Note that 7950 * insertq() doesn't wait for any syncq counts to drop to zero. 7951 */ 7952 sqlist.sqlist_head = NULL; 7953 sqlist.sqlist_index = 0; 7954 sqlist.sqlist_size = sizeof (sqlist_t); 7955 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7956 strlock(stpdown, &sqlist); 7957 strunlock(stpdown, &sqlist); 7958 7959 releaseq(passq); 7960 return (passq); 7961 } 7962 7963 /* 7964 * Let messages flow up into the mux by removing 7965 * the passq. 7966 */ 7967 static void 7968 link_rempassthru(queue_t *passq) 7969 { 7970 claimq(passq); 7971 removeq(passq); 7972 releaseq(passq); 7973 freeq(passq); 7974 } 7975 7976 /* 7977 * Wait for the condition variable pointed to by `cvp' to be signaled, 7978 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7979 * is negative, then there is no time limit. If `nosigs' is non-zero, 7980 * then the wait will be non-interruptible. 7981 * 7982 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7983 */ 7984 clock_t 7985 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7986 { 7987 clock_t ret; 7988 7989 if (tim < 0) { 7990 if (nosigs) { 7991 cv_wait(cvp, mp); 7992 ret = 1; 7993 } else { 7994 ret = cv_wait_sig(cvp, mp); 7995 } 7996 } else if (tim > 0) { 7997 /* 7998 * convert milliseconds to clock ticks 7999 */ 8000 if (nosigs) { 8001 ret = cv_reltimedwait(cvp, mp, 8002 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 8003 } else { 8004 ret = cv_reltimedwait_sig(cvp, mp, 8005 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 8006 } 8007 } else { 8008 ret = -1; 8009 } 8010 return (ret); 8011 } 8012 8013 /* 8014 * Wait until the stream head can determine if it is at the mark but 8015 * don't wait forever to prevent a race condition between the "mark" state 8016 * in the stream head and any mark state in the caller/user of this routine. 8017 * 8018 * This is used by sockets and for a socket it would be incorrect 8019 * to return a failure for SIOCATMARK when there is no data in the receive 8020 * queue and the marked urgent data is traveling up the stream. 8021 * 8022 * This routine waits until the mark is known by waiting for one of these 8023 * three events: 8024 * The stream head read queue becoming non-empty (including an EOF). 8025 * The STRATMARK flag being set (due to a MSGMARKNEXT message). 8026 * The STRNOTATMARK flag being set (which indicates that the transport 8027 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 8028 * the mark). 8029 * 8030 * The routine returns 1 if the stream is at the mark; 0 if it can 8031 * be determined that the stream is not at the mark. 8032 * If the wait times out and it can't determine 8033 * whether or not the stream might be at the mark the routine will return -1. 8034 * 8035 * Note: This routine should only be used when a mark is pending i.e., 8036 * in the socket case the SIGURG has been posted. 8037 * Note2: This can not wakeup just because synchronous streams indicate 8038 * that data is available since it is not possible to use the synchronous 8039 * streams interfaces to determine the b_flag value for the data queued below 8040 * the stream head. 8041 */ 8042 int 8043 strwaitmark(vnode_t *vp) 8044 { 8045 struct stdata *stp = vp->v_stream; 8046 queue_t *rq = _RD(stp->sd_wrq); 8047 int mark; 8048 8049 mutex_enter(&stp->sd_lock); 8050 while (rq->q_first == NULL && 8051 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 8052 stp->sd_flag |= RSLEEP; 8053 8054 /* Wait for 100 milliseconds for any state change. */ 8055 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 8056 mutex_exit(&stp->sd_lock); 8057 return (-1); 8058 } 8059 } 8060 if (stp->sd_flag & STRATMARK) 8061 mark = 1; 8062 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 8063 mark = 1; 8064 else 8065 mark = 0; 8066 8067 mutex_exit(&stp->sd_lock); 8068 return (mark); 8069 } 8070 8071 /* 8072 * Set a read side error. If persist is set change the socket error 8073 * to persistent. If errfunc is set install the function as the exported 8074 * error handler. 8075 */ 8076 void 8077 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 8078 { 8079 struct stdata *stp = vp->v_stream; 8080 8081 mutex_enter(&stp->sd_lock); 8082 stp->sd_rerror = error; 8083 if (error == 0 && errfunc == NULL) 8084 stp->sd_flag &= ~STRDERR; 8085 else 8086 stp->sd_flag |= STRDERR; 8087 if (persist) { 8088 stp->sd_flag &= ~STRDERRNONPERSIST; 8089 } else { 8090 stp->sd_flag |= STRDERRNONPERSIST; 8091 } 8092 stp->sd_rderrfunc = errfunc; 8093 if (error != 0 || errfunc != NULL) { 8094 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 8095 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 8096 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8097 8098 mutex_exit(&stp->sd_lock); 8099 pollwakeup(&stp->sd_pollist, POLLERR); 8100 mutex_enter(&stp->sd_lock); 8101 8102 if (stp->sd_sigflags & S_ERROR) 8103 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8104 } 8105 mutex_exit(&stp->sd_lock); 8106 } 8107 8108 /* 8109 * Set a write side error. If persist is set change the socket error 8110 * to persistent. 8111 */ 8112 void 8113 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 8114 { 8115 struct stdata *stp = vp->v_stream; 8116 8117 mutex_enter(&stp->sd_lock); 8118 stp->sd_werror = error; 8119 if (error == 0 && errfunc == NULL) 8120 stp->sd_flag &= ~STWRERR; 8121 else 8122 stp->sd_flag |= STWRERR; 8123 if (persist) { 8124 stp->sd_flag &= ~STWRERRNONPERSIST; 8125 } else { 8126 stp->sd_flag |= STWRERRNONPERSIST; 8127 } 8128 stp->sd_wrerrfunc = errfunc; 8129 if (error != 0 || errfunc != NULL) { 8130 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 8131 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 8132 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8133 8134 mutex_exit(&stp->sd_lock); 8135 pollwakeup(&stp->sd_pollist, POLLERR); 8136 mutex_enter(&stp->sd_lock); 8137 8138 if (stp->sd_sigflags & S_ERROR) 8139 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8140 } 8141 mutex_exit(&stp->sd_lock); 8142 } 8143 8144 /* 8145 * Make the stream return 0 (EOF) when all data has been read. 8146 * No effect on write side. 8147 */ 8148 void 8149 strseteof(vnode_t *vp, int eof) 8150 { 8151 struct stdata *stp = vp->v_stream; 8152 8153 mutex_enter(&stp->sd_lock); 8154 if (!eof) { 8155 stp->sd_flag &= ~STREOF; 8156 mutex_exit(&stp->sd_lock); 8157 return; 8158 } 8159 stp->sd_flag |= STREOF; 8160 if (stp->sd_flag & RSLEEP) { 8161 stp->sd_flag &= ~RSLEEP; 8162 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 8163 } 8164 8165 mutex_exit(&stp->sd_lock); 8166 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 8167 mutex_enter(&stp->sd_lock); 8168 8169 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 8170 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 8171 mutex_exit(&stp->sd_lock); 8172 } 8173 8174 void 8175 strflushrq(vnode_t *vp, int flag) 8176 { 8177 struct stdata *stp = vp->v_stream; 8178 8179 mutex_enter(&stp->sd_lock); 8180 flushq(_RD(stp->sd_wrq), flag); 8181 mutex_exit(&stp->sd_lock); 8182 } 8183 8184 void 8185 strsetrputhooks(vnode_t *vp, uint_t flags, 8186 msgfunc_t protofunc, msgfunc_t miscfunc) 8187 { 8188 struct stdata *stp = vp->v_stream; 8189 8190 mutex_enter(&stp->sd_lock); 8191 8192 if (protofunc == NULL) 8193 stp->sd_rprotofunc = strrput_proto; 8194 else 8195 stp->sd_rprotofunc = protofunc; 8196 8197 if (miscfunc == NULL) 8198 stp->sd_rmiscfunc = strrput_misc; 8199 else 8200 stp->sd_rmiscfunc = miscfunc; 8201 8202 if (flags & SH_CONSOL_DATA) 8203 stp->sd_rput_opt |= SR_CONSOL_DATA; 8204 else 8205 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 8206 8207 if (flags & SH_SIGALLDATA) 8208 stp->sd_rput_opt |= SR_SIGALLDATA; 8209 else 8210 stp->sd_rput_opt &= ~SR_SIGALLDATA; 8211 8212 if (flags & SH_IGN_ZEROLEN) 8213 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 8214 else 8215 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 8216 8217 mutex_exit(&stp->sd_lock); 8218 } 8219 8220 void 8221 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 8222 { 8223 struct stdata *stp = vp->v_stream; 8224 8225 mutex_enter(&stp->sd_lock); 8226 stp->sd_closetime = closetime; 8227 8228 if (flags & SH_SIGPIPE) 8229 stp->sd_wput_opt |= SW_SIGPIPE; 8230 else 8231 stp->sd_wput_opt &= ~SW_SIGPIPE; 8232 if (flags & SH_RECHECK_ERR) 8233 stp->sd_wput_opt |= SW_RECHECK_ERR; 8234 else 8235 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 8236 8237 mutex_exit(&stp->sd_lock); 8238 } 8239 8240 void 8241 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 8242 { 8243 struct stdata *stp = vp->v_stream; 8244 8245 mutex_enter(&stp->sd_lock); 8246 8247 stp->sd_rputdatafunc = rdatafunc; 8248 stp->sd_wputdatafunc = wdatafunc; 8249 8250 mutex_exit(&stp->sd_lock); 8251 } 8252 8253 /* Used within framework when the queue is already locked */ 8254 void 8255 qenable_locked(queue_t *q) 8256 { 8257 stdata_t *stp = STREAM(q); 8258 8259 ASSERT(MUTEX_HELD(QLOCK(q))); 8260 8261 if (!q->q_qinfo->qi_srvp) 8262 return; 8263 8264 /* 8265 * Do not place on run queue if already enabled or closing. 8266 */ 8267 if (q->q_flag & (QWCLOSE|QENAB)) 8268 return; 8269 8270 /* 8271 * mark queue enabled and place on run list if it is not already being 8272 * serviced. If it is serviced, the runservice() function will detect 8273 * that QENAB is set and call service procedure before clearing 8274 * QINSERVICE flag. 8275 */ 8276 q->q_flag |= QENAB; 8277 if (q->q_flag & QINSERVICE) 8278 return; 8279 8280 /* Record the time of qenable */ 8281 q->q_qtstamp = ddi_get_lbolt(); 8282 8283 /* 8284 * Put the queue in the stp list and schedule it for background 8285 * processing if it is not already scheduled or if stream head does not 8286 * intent to process it in the foreground later by setting 8287 * STRS_WILLSERVICE flag. 8288 */ 8289 mutex_enter(&stp->sd_qlock); 8290 /* 8291 * If there are already something on the list, stp flags should show 8292 * intention to drain it. 8293 */ 8294 IMPLY(STREAM_NEEDSERVICE(stp), 8295 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8296 8297 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8298 stp->sd_nqueues++; 8299 8300 /* 8301 * If no one will drain this stream we are the first producer and 8302 * need to schedule it for background thread. 8303 */ 8304 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8305 /* 8306 * No one will service this stream later, so we have to 8307 * schedule it now. 8308 */ 8309 STRSTAT(stenables); 8310 stp->sd_svcflags |= STRS_SCHEDULED; 8311 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8312 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8313 8314 if (stp->sd_servid == NULL) { 8315 /* 8316 * Task queue failed so fail over to the backup 8317 * servicing thread. 8318 */ 8319 STRSTAT(taskqfails); 8320 /* 8321 * It is safe to clear STRS_SCHEDULED flag because it 8322 * was set by this thread above. 8323 */ 8324 stp->sd_svcflags &= ~STRS_SCHEDULED; 8325 8326 /* 8327 * Failover scheduling is protected by service_queue 8328 * lock. 8329 */ 8330 mutex_enter(&service_queue); 8331 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8332 ASSERT(q->q_link == NULL); 8333 /* 8334 * Append the queue to qhead/qtail list. 8335 */ 8336 if (qhead == NULL) 8337 qhead = q; 8338 else 8339 qtail->q_link = q; 8340 qtail = q; 8341 /* 8342 * Clear stp queue list. 8343 */ 8344 stp->sd_qhead = stp->sd_qtail = NULL; 8345 stp->sd_nqueues = 0; 8346 /* 8347 * Wakeup background queue processing thread. 8348 */ 8349 cv_signal(&services_to_run); 8350 mutex_exit(&service_queue); 8351 } 8352 } 8353 mutex_exit(&stp->sd_qlock); 8354 } 8355 8356 static void 8357 queue_service(queue_t *q) 8358 { 8359 /* 8360 * The queue in the list should have 8361 * QENAB flag set and should not have 8362 * QINSERVICE flag set. QINSERVICE is 8363 * set when the queue is dequeued and 8364 * qenable_locked doesn't enqueue a 8365 * queue with QINSERVICE set. 8366 */ 8367 8368 ASSERT(!(q->q_flag & QINSERVICE)); 8369 ASSERT((q->q_flag & QENAB)); 8370 mutex_enter(QLOCK(q)); 8371 q->q_flag &= ~QENAB; 8372 q->q_flag |= QINSERVICE; 8373 mutex_exit(QLOCK(q)); 8374 runservice(q); 8375 } 8376 8377 static void 8378 syncq_service(syncq_t *sq) 8379 { 8380 STRSTAT(syncqservice); 8381 mutex_enter(SQLOCK(sq)); 8382 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8383 ASSERT(sq->sq_servcount != 0); 8384 ASSERT(sq->sq_next == NULL); 8385 8386 /* if we came here from the background thread, clear the flag */ 8387 if (sq->sq_svcflags & SQ_BGTHREAD) 8388 sq->sq_svcflags &= ~SQ_BGTHREAD; 8389 8390 /* let drain_syncq know that it's being called in the background */ 8391 sq->sq_svcflags |= SQ_SERVICE; 8392 drain_syncq(sq); 8393 } 8394 8395 static void 8396 qwriter_outer_service(syncq_t *outer) 8397 { 8398 /* 8399 * Note that SQ_WRITER is used on the outer perimeter 8400 * to signal that a qwriter(OUTER) is either investigating 8401 * running or that it is actually running a function. 8402 */ 8403 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8404 8405 /* 8406 * All inner syncq are empty and have SQ_WRITER set 8407 * to block entering the outer perimeter. 8408 * 8409 * We do not need to explicitly call write_now since 8410 * outer_exit does it for us. 8411 */ 8412 outer_exit(outer); 8413 } 8414 8415 static void 8416 mblk_free(mblk_t *mp) 8417 { 8418 dblk_t *dbp = mp->b_datap; 8419 frtn_t *frp = dbp->db_frtnp; 8420 8421 mp->b_next = NULL; 8422 if (dbp->db_fthdr != NULL) 8423 str_ftfree(dbp); 8424 8425 ASSERT(dbp->db_fthdr == NULL); 8426 frp->free_func(frp->free_arg); 8427 ASSERT(dbp->db_mblk == mp); 8428 8429 if (dbp->db_credp != NULL) { 8430 crfree(dbp->db_credp); 8431 dbp->db_credp = NULL; 8432 } 8433 dbp->db_cpid = -1; 8434 dbp->db_struioflag = 0; 8435 dbp->db_struioun.cksum.flags = 0; 8436 8437 kmem_cache_free(dbp->db_cache, dbp); 8438 } 8439 8440 /* 8441 * Background processing of the stream queue list. 8442 */ 8443 static void 8444 stream_service(stdata_t *stp) 8445 { 8446 queue_t *q; 8447 8448 mutex_enter(&stp->sd_qlock); 8449 8450 STR_SERVICE(stp, q); 8451 8452 stp->sd_svcflags &= ~STRS_SCHEDULED; 8453 stp->sd_servid = NULL; 8454 cv_signal(&stp->sd_qcv); 8455 mutex_exit(&stp->sd_qlock); 8456 } 8457 8458 /* 8459 * Foreground processing of the stream queue list. 8460 */ 8461 void 8462 stream_runservice(stdata_t *stp) 8463 { 8464 queue_t *q; 8465 8466 mutex_enter(&stp->sd_qlock); 8467 STRSTAT(rservice); 8468 /* 8469 * We are going to drain this stream queue list, so qenable_locked will 8470 * not schedule it until we finish. 8471 */ 8472 stp->sd_svcflags |= STRS_WILLSERVICE; 8473 8474 STR_SERVICE(stp, q); 8475 8476 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8477 mutex_exit(&stp->sd_qlock); 8478 /* 8479 * Help backup background thread to drain the qhead/qtail list. 8480 */ 8481 while (qhead != NULL) { 8482 STRSTAT(qhelps); 8483 mutex_enter(&service_queue); 8484 DQ(q, qhead, qtail, q_link); 8485 mutex_exit(&service_queue); 8486 if (q != NULL) 8487 queue_service(q); 8488 } 8489 } 8490 8491 void 8492 stream_willservice(stdata_t *stp) 8493 { 8494 mutex_enter(&stp->sd_qlock); 8495 stp->sd_svcflags |= STRS_WILLSERVICE; 8496 mutex_exit(&stp->sd_qlock); 8497 } 8498 8499 /* 8500 * Replace the cred currently in the mblk with a different one. 8501 * Also update db_cpid. 8502 */ 8503 void 8504 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid) 8505 { 8506 dblk_t *dbp = mp->b_datap; 8507 cred_t *ocr = dbp->db_credp; 8508 8509 ASSERT(cr != NULL); 8510 8511 if (cr != ocr) { 8512 crhold(dbp->db_credp = cr); 8513 if (ocr != NULL) 8514 crfree(ocr); 8515 } 8516 /* Don't overwrite with NOPID */ 8517 if (cpid != NOPID) 8518 dbp->db_cpid = cpid; 8519 } 8520 8521 /* 8522 * If the src message has a cred, then replace the cred currently in the mblk 8523 * with it. 8524 * Also update db_cpid. 8525 */ 8526 void 8527 mblk_copycred(mblk_t *mp, const mblk_t *src) 8528 { 8529 dblk_t *dbp = mp->b_datap; 8530 cred_t *cr, *ocr; 8531 pid_t cpid; 8532 8533 cr = msg_getcred(src, &cpid); 8534 if (cr == NULL) 8535 return; 8536 8537 ocr = dbp->db_credp; 8538 if (cr != ocr) { 8539 crhold(dbp->db_credp = cr); 8540 if (ocr != NULL) 8541 crfree(ocr); 8542 } 8543 /* Don't overwrite with NOPID */ 8544 if (cpid != NOPID) 8545 dbp->db_cpid = cpid; 8546 } 8547 8548 int 8549 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8550 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8551 uint32_t flags, int km_flags) 8552 { 8553 int rc = 0; 8554 8555 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8556 if (mp->b_datap->db_type == M_DATA) { 8557 /* Associate values for M_DATA type */ 8558 DB_CKSUMSTART(mp) = (intptr_t)start; 8559 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8560 DB_CKSUMEND(mp) = (intptr_t)end; 8561 DB_CKSUMFLAGS(mp) = flags; 8562 DB_CKSUM16(mp) = (uint16_t)value; 8563 8564 } else { 8565 pattrinfo_t pa_info; 8566 8567 ASSERT(mmd != NULL); 8568 8569 pa_info.type = PATTR_HCKSUM; 8570 pa_info.len = sizeof (pattr_hcksum_t); 8571 8572 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8573 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8574 8575 hck->hcksum_start_offset = start; 8576 hck->hcksum_stuff_offset = stuff; 8577 hck->hcksum_end_offset = end; 8578 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8579 hck->hcksum_flags = flags; 8580 } else { 8581 rc = -1; 8582 } 8583 } 8584 return (rc); 8585 } 8586 8587 void 8588 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8589 uint32_t *start, uint32_t *stuff, uint32_t *end, 8590 uint32_t *value, uint32_t *flags) 8591 { 8592 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8593 if (mp->b_datap->db_type == M_DATA) { 8594 if (flags != NULL) { 8595 *flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS; 8596 if ((*flags & (HCK_PARTIALCKSUM | 8597 HCK_FULLCKSUM)) != 0) { 8598 if (value != NULL) 8599 *value = (uint32_t)DB_CKSUM16(mp); 8600 if ((*flags & HCK_PARTIALCKSUM) != 0) { 8601 if (start != NULL) 8602 *start = 8603 (uint32_t)DB_CKSUMSTART(mp); 8604 if (stuff != NULL) 8605 *stuff = 8606 (uint32_t)DB_CKSUMSTUFF(mp); 8607 if (end != NULL) 8608 *end = 8609 (uint32_t)DB_CKSUMEND(mp); 8610 } 8611 } 8612 } 8613 } else { 8614 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8615 8616 ASSERT(mmd != NULL); 8617 8618 /* get hardware checksum attribute */ 8619 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8620 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8621 8622 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8623 if (flags != NULL) 8624 *flags = hck->hcksum_flags; 8625 if (start != NULL) 8626 *start = hck->hcksum_start_offset; 8627 if (stuff != NULL) 8628 *stuff = hck->hcksum_stuff_offset; 8629 if (end != NULL) 8630 *end = hck->hcksum_end_offset; 8631 if (value != NULL) 8632 *value = (uint32_t) 8633 hck->hcksum_cksum_val.inet_cksum; 8634 } 8635 } 8636 } 8637 8638 void 8639 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags) 8640 { 8641 ASSERT(DB_TYPE(mp) == M_DATA); 8642 ASSERT((flags & ~HW_LSO_FLAGS) == 0); 8643 8644 /* Set the flags */ 8645 DB_LSOFLAGS(mp) |= flags; 8646 DB_LSOMSS(mp) = mss; 8647 } 8648 8649 void 8650 lso_info_cleanup(mblk_t *mp) 8651 { 8652 ASSERT(DB_TYPE(mp) == M_DATA); 8653 8654 /* Clear the flags */ 8655 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS; 8656 DB_LSOMSS(mp) = 0; 8657 } 8658 8659 /* 8660 * Checksum buffer *bp for len bytes with psum partial checksum, 8661 * or 0 if none, and return the 16 bit partial checksum. 8662 */ 8663 unsigned 8664 bcksum(uchar_t *bp, int len, unsigned int psum) 8665 { 8666 int odd = len & 1; 8667 extern unsigned int ip_ocsum(); 8668 8669 if (((intptr_t)bp & 1) == 0 && !odd) { 8670 /* 8671 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8672 */ 8673 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8674 } 8675 if (((intptr_t)bp & 1) != 0) { 8676 /* 8677 * Bp isn't 16 bit aligned. 8678 */ 8679 unsigned int tsum; 8680 8681 #ifdef _LITTLE_ENDIAN 8682 psum += *bp; 8683 #else 8684 psum += *bp << 8; 8685 #endif 8686 len--; 8687 bp++; 8688 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8689 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8690 if (len & 1) { 8691 bp += len - 1; 8692 #ifdef _LITTLE_ENDIAN 8693 psum += *bp << 8; 8694 #else 8695 psum += *bp; 8696 #endif 8697 } 8698 } else { 8699 /* 8700 * Bp is 16 bit aligned. 8701 */ 8702 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8703 if (odd) { 8704 bp += len - 1; 8705 #ifdef _LITTLE_ENDIAN 8706 psum += *bp; 8707 #else 8708 psum += *bp << 8; 8709 #endif 8710 } 8711 } 8712 /* 8713 * Normalize psum to 16 bits before returning the new partial 8714 * checksum. The max psum value before normalization is 0x3FDFE. 8715 */ 8716 return ((psum >> 16) + (psum & 0xFFFF)); 8717 } 8718 8719 boolean_t 8720 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8721 { 8722 boolean_t rc; 8723 8724 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8725 if (DB_TYPE(mp) == M_DATA) { 8726 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8727 } else { 8728 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8729 8730 ASSERT(mmd != NULL); 8731 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8732 } 8733 return (rc); 8734 } 8735 8736 void 8737 freemsgchain(mblk_t *mp) 8738 { 8739 mblk_t *next; 8740 8741 while (mp != NULL) { 8742 next = mp->b_next; 8743 mp->b_next = NULL; 8744 8745 freemsg(mp); 8746 mp = next; 8747 } 8748 } 8749 8750 mblk_t * 8751 copymsgchain(mblk_t *mp) 8752 { 8753 mblk_t *nmp = NULL; 8754 mblk_t **nmpp = &nmp; 8755 8756 for (; mp != NULL; mp = mp->b_next) { 8757 if ((*nmpp = copymsg(mp)) == NULL) { 8758 freemsgchain(nmp); 8759 return (NULL); 8760 } 8761 8762 nmpp = &((*nmpp)->b_next); 8763 } 8764 8765 return (nmp); 8766 } 8767 8768 /* NOTE: Do not add code after this point. */ 8769 #undef QLOCK 8770 8771 /* 8772 * Replacement for QLOCK macro for those that can't use it. 8773 */ 8774 kmutex_t * 8775 QLOCK(queue_t *q) 8776 { 8777 return (&(q)->q_lock); 8778 } 8779 8780 /* 8781 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8782 */ 8783 #undef runqueues 8784 void 8785 runqueues(void) 8786 { 8787 } 8788 8789 #undef queuerun 8790 void 8791 queuerun(void) 8792 { 8793 } 8794 8795 /* 8796 * Initialize the STR stack instance, which tracks autopush and persistent 8797 * links. 8798 */ 8799 /* ARGSUSED */ 8800 static void * 8801 str_stack_init(netstackid_t stackid, netstack_t *ns) 8802 { 8803 str_stack_t *ss; 8804 int i; 8805 8806 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP); 8807 ss->ss_netstack = ns; 8808 8809 /* 8810 * set up autopush 8811 */ 8812 sad_initspace(ss); 8813 8814 /* 8815 * set up mux_node structures. 8816 */ 8817 ss->ss_devcnt = devcnt; /* In case it should change before free */ 8818 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) * 8819 ss->ss_devcnt), KM_SLEEP); 8820 for (i = 0; i < ss->ss_devcnt; i++) 8821 ss->ss_mux_nodes[i].mn_imaj = i; 8822 return (ss); 8823 } 8824 8825 /* 8826 * Note: run at zone shutdown and not destroy so that the PLINKs are 8827 * gone by the time other cleanup happens from the destroy callbacks. 8828 */ 8829 static void 8830 str_stack_shutdown(netstackid_t stackid, void *arg) 8831 { 8832 str_stack_t *ss = (str_stack_t *)arg; 8833 int i; 8834 cred_t *cr; 8835 8836 cr = zone_get_kcred(netstackid_to_zoneid(stackid)); 8837 ASSERT(cr != NULL); 8838 8839 /* Undo all the I_PLINKs for this zone */ 8840 for (i = 0; i < ss->ss_devcnt; i++) { 8841 struct mux_edge *ep; 8842 ldi_handle_t lh; 8843 ldi_ident_t li; 8844 int ret; 8845 int rval; 8846 dev_t rdev; 8847 8848 ep = ss->ss_mux_nodes[i].mn_outp; 8849 if (ep == NULL) 8850 continue; 8851 ret = ldi_ident_from_major((major_t)i, &li); 8852 if (ret != 0) { 8853 continue; 8854 } 8855 rdev = ep->me_dev; 8856 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE, 8857 cr, &lh, li); 8858 if (ret != 0) { 8859 ldi_ident_release(li); 8860 continue; 8861 } 8862 8863 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL, 8864 cr, &rval); 8865 if (ret) { 8866 (void) ldi_close(lh, FREAD|FWRITE, cr); 8867 ldi_ident_release(li); 8868 continue; 8869 } 8870 (void) ldi_close(lh, FREAD|FWRITE, cr); 8871 8872 /* Close layered handles */ 8873 ldi_ident_release(li); 8874 } 8875 crfree(cr); 8876 8877 sad_freespace(ss); 8878 8879 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt); 8880 ss->ss_mux_nodes = NULL; 8881 } 8882 8883 /* 8884 * Free the structure; str_stack_shutdown did the other cleanup work. 8885 */ 8886 /* ARGSUSED */ 8887 static void 8888 str_stack_fini(netstackid_t stackid, void *arg) 8889 { 8890 str_stack_t *ss = (str_stack_t *)arg; 8891 8892 kmem_free(ss, sizeof (*ss)); 8893 }