1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/param.h> 33 #include <sys/errno.h> 34 #include <sys/signal.h> 35 #include <sys/proc.h> 36 #include <sys/conf.h> 37 #include <sys/cred.h> 38 #include <sys/user.h> 39 #include <sys/vnode.h> 40 #include <sys/file.h> 41 #include <sys/session.h> 42 #include <sys/stream.h> 43 #include <sys/strsubr.h> 44 #include <sys/stropts.h> 45 #include <sys/poll.h> 46 #include <sys/systm.h> 47 #include <sys/cpuvar.h> 48 #include <sys/uio.h> 49 #include <sys/cmn_err.h> 50 #include <sys/priocntl.h> 51 #include <sys/procset.h> 52 #include <sys/vmem.h> 53 #include <sys/bitmap.h> 54 #include <sys/kmem.h> 55 #include <sys/siginfo.h> 56 #include <sys/vtrace.h> 57 #include <sys/callb.h> 58 #include <sys/debug.h> 59 #include <sys/modctl.h> 60 #include <sys/vmsystm.h> 61 #include <vm/page.h> 62 #include <sys/atomic.h> 63 #include <sys/suntpi.h> 64 #include <sys/strlog.h> 65 #include <sys/promif.h> 66 #include <sys/project.h> 67 #include <sys/vm.h> 68 #include <sys/taskq.h> 69 #include <sys/sunddi.h> 70 #include <sys/sunldi_impl.h> 71 #include <sys/strsun.h> 72 #include <sys/isa_defs.h> 73 #include <sys/multidata.h> 74 #include <sys/pattr.h> 75 #include <sys/strft.h> 76 #include <sys/fs/snode.h> 77 #include <sys/zone.h> 78 #include <sys/open.h> 79 #include <sys/sunldi.h> 80 #include <sys/sad.h> 81 #include <sys/netstack.h> 82 83 #define O_SAMESTR(q) (((q)->q_next) && \ 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 85 86 /* 87 * WARNING: 88 * The variables and routines in this file are private, belonging 89 * to the STREAMS subsystem. These should not be used by modules 90 * or drivers. Compatibility will not be guaranteed. 91 */ 92 93 /* 94 * Id value used to distinguish between different multiplexor links. 95 */ 96 static int32_t lnk_id = 0; 97 98 #define STREAMS_LOPRI MINCLSYSPRI 99 static pri_t streams_lopri = STREAMS_LOPRI; 100 101 #define STRSTAT(x) (str_statistics.x.value.ui64++) 102 typedef struct str_stat { 103 kstat_named_t sqenables; 104 kstat_named_t stenables; 105 kstat_named_t syncqservice; 106 kstat_named_t freebs; 107 kstat_named_t qwr_outer; 108 kstat_named_t rservice; 109 kstat_named_t strwaits; 110 kstat_named_t taskqfails; 111 kstat_named_t bufcalls; 112 kstat_named_t qhelps; 113 kstat_named_t qremoved; 114 kstat_named_t sqremoved; 115 kstat_named_t bcwaits; 116 kstat_named_t sqtoomany; 117 } str_stat_t; 118 119 static str_stat_t str_statistics = { 120 { "sqenables", KSTAT_DATA_UINT64 }, 121 { "stenables", KSTAT_DATA_UINT64 }, 122 { "syncqservice", KSTAT_DATA_UINT64 }, 123 { "freebs", KSTAT_DATA_UINT64 }, 124 { "qwr_outer", KSTAT_DATA_UINT64 }, 125 { "rservice", KSTAT_DATA_UINT64 }, 126 { "strwaits", KSTAT_DATA_UINT64 }, 127 { "taskqfails", KSTAT_DATA_UINT64 }, 128 { "bufcalls", KSTAT_DATA_UINT64 }, 129 { "qhelps", KSTAT_DATA_UINT64 }, 130 { "qremoved", KSTAT_DATA_UINT64 }, 131 { "sqremoved", KSTAT_DATA_UINT64 }, 132 { "bcwaits", KSTAT_DATA_UINT64 }, 133 { "sqtoomany", KSTAT_DATA_UINT64 }, 134 }; 135 136 static kstat_t *str_kstat; 137 138 /* 139 * qrunflag was used previously to control background scheduling of queues. It 140 * is not used anymore, but kept here in case some module still wants to access 141 * it via qready() and setqsched macros. 142 */ 143 char qrunflag; /* Unused */ 144 145 /* 146 * Most of the streams scheduling is done via task queues. Task queues may fail 147 * for non-sleep dispatches, so there are two backup threads servicing failed 148 * requests for queues and syncqs. Both of these threads also service failed 149 * dispatches freebs requests. Queues are put in the list specified by `qhead' 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 151 * requests are put into `freebs_list' which has no tail pointer. All three 152 * lists are protected by a single `service_queue' lock and use 153 * `services_to_run' condition variable for signaling background threads. Use of 154 * a single lock should not be a problem because it is only used under heavy 155 * loads when task queues start to fail and at that time it may be a good idea 156 * to throttle scheduling requests. 157 * 158 * NOTE: queues and syncqs should be scheduled by two separate threads because 159 * queue servicing may be blocked waiting for a syncq which may be also 160 * scheduled for background execution. This may create a deadlock when only one 161 * thread is used for both. 162 */ 163 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 165 166 static kmutex_t service_queue; /* protects all of servicing vars */ 167 static kcondvar_t services_to_run; /* wake up background service thread */ 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 169 170 /* 171 * List of queues scheduled for background processing due to lack of resources 172 * in the task queues. Protected by service_queue lock; 173 */ 174 static struct queue *qhead; 175 static struct queue *qtail; 176 177 /* 178 * Same list for syncqs 179 */ 180 static syncq_t *sqhead; 181 static syncq_t *sqtail; 182 183 static mblk_t *freebs_list; /* list of buffers to free */ 184 185 /* 186 * Backup threads for servicing queues and syncqs 187 */ 188 kthread_t *streams_qbkgrnd_thread; 189 kthread_t *streams_sqbkgrnd_thread; 190 191 /* 192 * Bufcalls related variables. 193 */ 194 struct bclist strbcalls; /* list of waiting bufcalls */ 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 200 201 kmutex_t strresources; /* protects global resources */ 202 kmutex_t muxifier; /* single-threads multiplexor creation */ 203 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns); 205 static void str_stack_shutdown(netstackid_t stackid, void *arg); 206 static void str_stack_fini(netstackid_t stackid, void *arg); 207 208 /* 209 * run_queues is no longer used, but is kept in case some 3rd party 210 * module/driver decides to use it. 211 */ 212 int run_queues = 0; 213 214 /* 215 * sq_max_size is the depth of the syncq (in number of messages) before 216 * qfill_syncq() starts QFULL'ing destination queues. As its primary 217 * consumer - IP is no longer D_MTPERMOD, but there may be other 218 * modules/drivers depend on this syncq flow control, we prefer to 219 * choose a large number as the default value. For potential 220 * performance gain, this value is tunable in /etc/system. 221 */ 222 int sq_max_size = 10000; 223 224 /* 225 * The number of ciputctrl structures per syncq and stream we create when 226 * needed. 227 */ 228 int n_ciputctrl; 229 int max_n_ciputctrl = 16; 230 /* 231 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 232 */ 233 int min_n_ciputctrl = 2; 234 235 /* 236 * Per-driver/module syncqs 237 * ======================== 238 * 239 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 240 * perdm structures, new entries being added (and new syncqs allocated) when 241 * setq() encounters a module/driver with a streamtab that it hasn't seen 242 * before. 243 * The reason for this mechanism is that some modules and drivers share a 244 * common streamtab and it is necessary for those modules and drivers to also 245 * share a common PERMOD syncq. 246 * 247 * perdm_list --> dm_str == streamtab_1 248 * dm_sq == syncq_1 249 * dm_ref 250 * dm_next --> dm_str == streamtab_2 251 * dm_sq == syncq_2 252 * dm_ref 253 * dm_next --> ... NULL 254 * 255 * The dm_ref field is incremented for each new driver/module that takes 256 * a reference to the perdm structure and hence shares the syncq. 257 * References are held in the fmodsw_impl_t structure for each STREAMS module 258 * or the dev_impl array (indexed by device major number) for each driver. 259 * 260 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 261 * ^ ^ ^ ^ 262 * | ______________/ | | 263 * | / | | 264 * dev_impl: ...|x|y|... module A module B 265 * 266 * When a module/driver is unloaded the reference count is decremented and, 267 * when it falls to zero, the perdm structure is removed from the list and 268 * the syncq is freed (see rele_dm()). 269 */ 270 perdm_t *perdm_list = NULL; 271 static krwlock_t perdm_rwlock; 272 cdevsw_impl_t *devimpl; 273 274 extern struct qinit strdata; 275 extern struct qinit stwdata; 276 277 static void runservice(queue_t *); 278 static void streams_bufcall_service(void); 279 static void streams_qbkgrnd_service(void); 280 static void streams_sqbkgrnd_service(void); 281 static syncq_t *new_syncq(void); 282 static void free_syncq(syncq_t *); 283 static void outer_insert(syncq_t *, syncq_t *); 284 static void outer_remove(syncq_t *, syncq_t *); 285 static void write_now(syncq_t *); 286 static void clr_qfull(queue_t *); 287 static void runbufcalls(void); 288 static void sqenable(syncq_t *); 289 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 290 static void wait_q_syncq(queue_t *); 291 static void backenable_insertedq(queue_t *); 292 293 static void queue_service(queue_t *); 294 static void stream_service(stdata_t *); 295 static void syncq_service(syncq_t *); 296 static void qwriter_outer_service(syncq_t *); 297 static void mblk_free(mblk_t *); 298 #ifdef DEBUG 299 static int qprocsareon(queue_t *); 300 #endif 301 302 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 303 static void reset_nfsrv_ptr(queue_t *, queue_t *); 304 void set_qfull(queue_t *); 305 306 static void sq_run_events(syncq_t *); 307 static int propagate_syncq(queue_t *); 308 309 static void blocksq(syncq_t *, ushort_t, int); 310 static void unblocksq(syncq_t *, ushort_t, int); 311 static int dropsq(syncq_t *, uint16_t); 312 static void emptysq(syncq_t *); 313 static sqlist_t *sqlist_alloc(struct stdata *, int); 314 static void sqlist_free(sqlist_t *); 315 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 316 static void sqlist_insert(sqlist_t *, syncq_t *); 317 static void sqlist_insertall(sqlist_t *, queue_t *); 318 319 static void strsetuio(stdata_t *); 320 321 struct kmem_cache *stream_head_cache; 322 struct kmem_cache *queue_cache; 323 struct kmem_cache *syncq_cache; 324 struct kmem_cache *qband_cache; 325 struct kmem_cache *linkinfo_cache; 326 struct kmem_cache *ciputctrl_cache = NULL; 327 328 static linkinfo_t *linkinfo_list; 329 330 /* Global esballoc throttling queue */ 331 static esb_queue_t system_esbq; 332 333 /* Array of esballoc throttling queues, of length esbq_nelem */ 334 static esb_queue_t *volatile system_esbq_array; 335 static int esbq_nelem; 336 static kmutex_t esbq_lock; 337 static int esbq_log2_cpus_per_q = 0; 338 339 /* Scale the system_esbq length by setting number of CPUs per queue. */ 340 uint_t esbq_cpus_per_q = 1; 341 342 /* 343 * esballoc tunable parameters. 344 */ 345 int esbq_max_qlen = 0x16; /* throttled queue length */ 346 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */ 347 348 /* 349 * Routines to handle esballoc queueing. 350 */ 351 static void esballoc_process_queue(esb_queue_t *); 352 static void esballoc_enqueue_mblk(mblk_t *); 353 static void esballoc_timer(void *); 354 static void esballoc_set_timer(esb_queue_t *, clock_t); 355 static void esballoc_mblk_free(mblk_t *); 356 357 /* 358 * Qinit structure and Module_info structures 359 * for passthru read and write queues 360 */ 361 362 static void pass_wput(queue_t *, mblk_t *); 363 static queue_t *link_addpassthru(stdata_t *); 364 static void link_rempassthru(queue_t *); 365 366 struct module_info passthru_info = { 367 0, 368 "passthru", 369 0, 370 INFPSZ, 371 STRHIGH, 372 STRLOW 373 }; 374 375 struct qinit passthru_rinit = { 376 (int (*)())putnext, 377 NULL, 378 NULL, 379 NULL, 380 NULL, 381 &passthru_info, 382 NULL 383 }; 384 385 struct qinit passthru_winit = { 386 (int (*)()) pass_wput, 387 NULL, 388 NULL, 389 NULL, 390 NULL, 391 &passthru_info, 392 NULL 393 }; 394 395 /* 396 * Verify correctness of list head/tail pointers. 397 */ 398 #define LISTCHECK(head, tail, link) { \ 399 EQUIV(head, tail); \ 400 IMPLY(tail != NULL, tail->link == NULL); \ 401 } 402 403 /* 404 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 405 * using a `link' field. 406 */ 407 #define ENQUEUE(el, head, tail, link) { \ 408 ASSERT(el->link == NULL); \ 409 LISTCHECK(head, tail, link); \ 410 if (head == NULL) \ 411 head = el; \ 412 else \ 413 tail->link = el; \ 414 tail = el; \ 415 } 416 417 /* 418 * Dequeue the first element of the list denoted by `head' and `tail' pointers 419 * using a `link' field and put result into `el'. 420 */ 421 #define DQ(el, head, tail, link) { \ 422 LISTCHECK(head, tail, link); \ 423 el = head; \ 424 if (head != NULL) { \ 425 head = head->link; \ 426 if (head == NULL) \ 427 tail = NULL; \ 428 el->link = NULL; \ 429 } \ 430 } 431 432 /* 433 * Remove `el' from the list using `chase' and `curr' pointers and return result 434 * in `succeed'. 435 */ 436 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 437 LISTCHECK(head, tail, link); \ 438 chase = NULL; \ 439 succeed = 0; \ 440 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 441 chase = curr; \ 442 if (curr != NULL) { \ 443 succeed = 1; \ 444 ASSERT(curr == el); \ 445 if (chase != NULL) \ 446 chase->link = curr->link; \ 447 else \ 448 head = curr->link; \ 449 curr->link = NULL; \ 450 if (curr == tail) \ 451 tail = chase; \ 452 } \ 453 LISTCHECK(head, tail, link); \ 454 } 455 456 /* Handling of delayed messages on the inner syncq. */ 457 458 /* 459 * DEBUG versions should use function versions (to simplify tracing) and 460 * non-DEBUG kernels should use macro versions. 461 */ 462 463 /* 464 * Put a queue on the syncq list of queues. 465 * Assumes SQLOCK held. 466 */ 467 #define SQPUT_Q(sq, qp) \ 468 { \ 469 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 470 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 471 /* The queue should not be linked anywhere */ \ 472 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 473 /* Head and tail may only be NULL simultaneously */ \ 474 EQUIV(sq->sq_head, sq->sq_tail); \ 475 /* Queue may be only enqueued on its syncq */ \ 476 ASSERT(sq == qp->q_syncq); \ 477 /* Check the correctness of SQ_MESSAGES flag */ \ 478 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 479 /* Sanity check first/last elements of the list */ \ 480 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 481 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 482 /* \ 483 * Sanity check of priority field: empty queue should \ 484 * have zero priority \ 485 * and nqueues equal to zero. \ 486 */ \ 487 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 488 /* Sanity check of sq_nqueues field */ \ 489 EQUIV(sq->sq_head, sq->sq_nqueues); \ 490 if (sq->sq_head == NULL) { \ 491 sq->sq_head = sq->sq_tail = qp; \ 492 sq->sq_flags |= SQ_MESSAGES; \ 493 } else if (qp->q_spri == 0) { \ 494 qp->q_sqprev = sq->sq_tail; \ 495 sq->sq_tail->q_sqnext = qp; \ 496 sq->sq_tail = qp; \ 497 } else { \ 498 /* \ 499 * Put this queue in priority order: higher \ 500 * priority gets closer to the head. \ 501 */ \ 502 queue_t **qpp = &sq->sq_tail; \ 503 queue_t *qnext = NULL; \ 504 \ 505 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 506 qnext = *qpp; \ 507 qpp = &(*qpp)->q_sqprev; \ 508 } \ 509 qp->q_sqnext = qnext; \ 510 qp->q_sqprev = *qpp; \ 511 if (*qpp != NULL) { \ 512 (*qpp)->q_sqnext = qp; \ 513 } else { \ 514 sq->sq_head = qp; \ 515 sq->sq_pri = sq->sq_head->q_spri; \ 516 } \ 517 *qpp = qp; \ 518 } \ 519 qp->q_sqflags |= Q_SQQUEUED; \ 520 qp->q_sqtstamp = ddi_get_lbolt(); \ 521 sq->sq_nqueues++; \ 522 } \ 523 } 524 525 /* 526 * Remove a queue from the syncq list 527 * Assumes SQLOCK held. 528 */ 529 #define SQRM_Q(sq, qp) \ 530 { \ 531 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 532 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 533 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 534 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 535 /* Check that the queue is actually in the list */ \ 536 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 537 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 538 ASSERT(sq->sq_nqueues != 0); \ 539 if (qp->q_sqprev == NULL) { \ 540 /* First queue on list, make head q_sqnext */ \ 541 sq->sq_head = qp->q_sqnext; \ 542 } else { \ 543 /* Make prev->next == next */ \ 544 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 545 } \ 546 if (qp->q_sqnext == NULL) { \ 547 /* Last queue on list, make tail sqprev */ \ 548 sq->sq_tail = qp->q_sqprev; \ 549 } else { \ 550 /* Make next->prev == prev */ \ 551 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 552 } \ 553 /* clear out references on this queue */ \ 554 qp->q_sqprev = qp->q_sqnext = NULL; \ 555 qp->q_sqflags &= ~Q_SQQUEUED; \ 556 /* If there is nothing queued, clear SQ_MESSAGES */ \ 557 if (sq->sq_head != NULL) { \ 558 sq->sq_pri = sq->sq_head->q_spri; \ 559 } else { \ 560 sq->sq_flags &= ~SQ_MESSAGES; \ 561 sq->sq_pri = 0; \ 562 } \ 563 sq->sq_nqueues--; \ 564 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 565 (sq->sq_flags & SQ_QUEUED) == 0); \ 566 } 567 568 /* Hide the definition from the header file. */ 569 #ifdef SQPUT_MP 570 #undef SQPUT_MP 571 #endif 572 573 /* 574 * Put a message on the queue syncq. 575 * Assumes QLOCK held. 576 */ 577 #define SQPUT_MP(qp, mp) \ 578 { \ 579 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 580 ASSERT(qp->q_sqhead == NULL || \ 581 (qp->q_sqtail != NULL && \ 582 qp->q_sqtail->b_next == NULL)); \ 583 qp->q_syncqmsgs++; \ 584 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 585 if (qp->q_sqhead == NULL) { \ 586 qp->q_sqhead = qp->q_sqtail = mp; \ 587 } else { \ 588 qp->q_sqtail->b_next = mp; \ 589 qp->q_sqtail = mp; \ 590 } \ 591 ASSERT(qp->q_syncqmsgs > 0); \ 592 set_qfull(qp); \ 593 } 594 595 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 596 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 597 if ((sq)->sq_ciputctrl != NULL) { \ 598 int i; \ 599 int nlocks = (sq)->sq_nciputctrl; \ 600 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 601 ASSERT((sq)->sq_type & SQ_CIPUT); \ 602 for (i = 0; i <= nlocks; i++) { \ 603 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 604 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 605 } \ 606 } \ 607 } 608 609 610 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 611 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 612 if ((sq)->sq_ciputctrl != NULL) { \ 613 int i; \ 614 int nlocks = (sq)->sq_nciputctrl; \ 615 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 616 ASSERT((sq)->sq_type & SQ_CIPUT); \ 617 for (i = 0; i <= nlocks; i++) { \ 618 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 619 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 620 } \ 621 } \ 622 } 623 624 /* 625 * Run service procedures for all queues in the stream head. 626 */ 627 #define STR_SERVICE(stp, q) { \ 628 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 629 while (stp->sd_qhead != NULL) { \ 630 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 631 ASSERT(stp->sd_nqueues > 0); \ 632 stp->sd_nqueues--; \ 633 ASSERT(!(q->q_flag & QINSERVICE)); \ 634 mutex_exit(&stp->sd_qlock); \ 635 queue_service(q); \ 636 mutex_enter(&stp->sd_qlock); \ 637 } \ 638 ASSERT(stp->sd_nqueues == 0); \ 639 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 640 } 641 642 /* 643 * Constructor/destructor routines for the stream head cache 644 */ 645 /* ARGSUSED */ 646 static int 647 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 648 { 649 stdata_t *stp = buf; 650 651 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 652 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 653 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 654 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 655 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 656 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 657 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 658 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 659 stp->sd_wrq = NULL; 660 661 return (0); 662 } 663 664 /* ARGSUSED */ 665 static void 666 stream_head_destructor(void *buf, void *cdrarg) 667 { 668 stdata_t *stp = buf; 669 670 mutex_destroy(&stp->sd_lock); 671 mutex_destroy(&stp->sd_reflock); 672 mutex_destroy(&stp->sd_qlock); 673 cv_destroy(&stp->sd_monitor); 674 cv_destroy(&stp->sd_iocmonitor); 675 cv_destroy(&stp->sd_refmonitor); 676 cv_destroy(&stp->sd_qcv); 677 cv_destroy(&stp->sd_zcopy_wait); 678 } 679 680 /* 681 * Constructor/destructor routines for the queue cache 682 */ 683 /* ARGSUSED */ 684 static int 685 queue_constructor(void *buf, void *cdrarg, int kmflags) 686 { 687 queinfo_t *qip = buf; 688 queue_t *qp = &qip->qu_rqueue; 689 queue_t *wqp = &qip->qu_wqueue; 690 syncq_t *sq = &qip->qu_syncq; 691 692 qp->q_first = NULL; 693 qp->q_link = NULL; 694 qp->q_count = 0; 695 qp->q_mblkcnt = 0; 696 qp->q_sqhead = NULL; 697 qp->q_sqtail = NULL; 698 qp->q_sqnext = NULL; 699 qp->q_sqprev = NULL; 700 qp->q_sqflags = 0; 701 qp->q_rwcnt = 0; 702 qp->q_spri = 0; 703 704 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 705 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 706 707 wqp->q_first = NULL; 708 wqp->q_link = NULL; 709 wqp->q_count = 0; 710 wqp->q_mblkcnt = 0; 711 wqp->q_sqhead = NULL; 712 wqp->q_sqtail = NULL; 713 wqp->q_sqnext = NULL; 714 wqp->q_sqprev = NULL; 715 wqp->q_sqflags = 0; 716 wqp->q_rwcnt = 0; 717 wqp->q_spri = 0; 718 719 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 720 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 721 722 sq->sq_head = NULL; 723 sq->sq_tail = NULL; 724 sq->sq_evhead = NULL; 725 sq->sq_evtail = NULL; 726 sq->sq_callbpend = NULL; 727 sq->sq_outer = NULL; 728 sq->sq_onext = NULL; 729 sq->sq_oprev = NULL; 730 sq->sq_next = NULL; 731 sq->sq_svcflags = 0; 732 sq->sq_servcount = 0; 733 sq->sq_needexcl = 0; 734 sq->sq_nqueues = 0; 735 sq->sq_pri = 0; 736 737 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 738 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 739 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 740 741 return (0); 742 } 743 744 /* ARGSUSED */ 745 static void 746 queue_destructor(void *buf, void *cdrarg) 747 { 748 queinfo_t *qip = buf; 749 queue_t *qp = &qip->qu_rqueue; 750 queue_t *wqp = &qip->qu_wqueue; 751 syncq_t *sq = &qip->qu_syncq; 752 753 ASSERT(qp->q_sqhead == NULL); 754 ASSERT(wqp->q_sqhead == NULL); 755 ASSERT(qp->q_sqnext == NULL); 756 ASSERT(wqp->q_sqnext == NULL); 757 ASSERT(qp->q_rwcnt == 0); 758 ASSERT(wqp->q_rwcnt == 0); 759 760 mutex_destroy(&qp->q_lock); 761 cv_destroy(&qp->q_wait); 762 763 mutex_destroy(&wqp->q_lock); 764 cv_destroy(&wqp->q_wait); 765 766 mutex_destroy(&sq->sq_lock); 767 cv_destroy(&sq->sq_wait); 768 cv_destroy(&sq->sq_exitwait); 769 } 770 771 /* 772 * Constructor/destructor routines for the syncq cache 773 */ 774 /* ARGSUSED */ 775 static int 776 syncq_constructor(void *buf, void *cdrarg, int kmflags) 777 { 778 syncq_t *sq = buf; 779 780 bzero(buf, sizeof (syncq_t)); 781 782 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 783 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 784 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 785 786 return (0); 787 } 788 789 /* ARGSUSED */ 790 static void 791 syncq_destructor(void *buf, void *cdrarg) 792 { 793 syncq_t *sq = buf; 794 795 ASSERT(sq->sq_head == NULL); 796 ASSERT(sq->sq_tail == NULL); 797 ASSERT(sq->sq_evhead == NULL); 798 ASSERT(sq->sq_evtail == NULL); 799 ASSERT(sq->sq_callbpend == NULL); 800 ASSERT(sq->sq_callbflags == 0); 801 ASSERT(sq->sq_outer == NULL); 802 ASSERT(sq->sq_onext == NULL); 803 ASSERT(sq->sq_oprev == NULL); 804 ASSERT(sq->sq_next == NULL); 805 ASSERT(sq->sq_needexcl == 0); 806 ASSERT(sq->sq_svcflags == 0); 807 ASSERT(sq->sq_servcount == 0); 808 ASSERT(sq->sq_nqueues == 0); 809 ASSERT(sq->sq_pri == 0); 810 ASSERT(sq->sq_count == 0); 811 ASSERT(sq->sq_rmqcount == 0); 812 ASSERT(sq->sq_cancelid == 0); 813 ASSERT(sq->sq_ciputctrl == NULL); 814 ASSERT(sq->sq_nciputctrl == 0); 815 ASSERT(sq->sq_type == 0); 816 ASSERT(sq->sq_flags == 0); 817 818 mutex_destroy(&sq->sq_lock); 819 cv_destroy(&sq->sq_wait); 820 cv_destroy(&sq->sq_exitwait); 821 } 822 823 /* ARGSUSED */ 824 static int 825 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 826 { 827 ciputctrl_t *cip = buf; 828 int i; 829 830 for (i = 0; i < n_ciputctrl; i++) { 831 cip[i].ciputctrl_count = SQ_FASTPUT; 832 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 833 } 834 835 return (0); 836 } 837 838 /* ARGSUSED */ 839 static void 840 ciputctrl_destructor(void *buf, void *cdrarg) 841 { 842 ciputctrl_t *cip = buf; 843 int i; 844 845 for (i = 0; i < n_ciputctrl; i++) { 846 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 847 mutex_destroy(&cip[i].ciputctrl_lock); 848 } 849 } 850 851 /* 852 * Init routine run from main at boot time. 853 */ 854 void 855 strinit(void) 856 { 857 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 858 859 stream_head_cache = kmem_cache_create("stream_head_cache", 860 sizeof (stdata_t), 0, 861 stream_head_constructor, stream_head_destructor, NULL, 862 NULL, NULL, 0); 863 864 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 865 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 866 867 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 868 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 869 870 qband_cache = kmem_cache_create("qband_cache", 871 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 872 873 linkinfo_cache = kmem_cache_create("linkinfo_cache", 874 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 875 876 n_ciputctrl = ncpus; 877 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 878 ASSERT(n_ciputctrl >= 1); 879 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 880 if (n_ciputctrl >= min_n_ciputctrl) { 881 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 882 sizeof (ciputctrl_t) * n_ciputctrl, 883 sizeof (ciputctrl_t), ciputctrl_constructor, 884 ciputctrl_destructor, NULL, NULL, NULL, 0); 885 } 886 887 streams_taskq = system_taskq; 888 889 if (streams_taskq == NULL) 890 panic("strinit: no memory for streams taskq!"); 891 892 bc_bkgrnd_thread = thread_create(NULL, 0, 893 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 894 895 streams_qbkgrnd_thread = thread_create(NULL, 0, 896 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 897 898 streams_sqbkgrnd_thread = thread_create(NULL, 0, 899 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 900 901 /* 902 * Create STREAMS kstats. 903 */ 904 str_kstat = kstat_create("streams", 0, "strstat", 905 "net", KSTAT_TYPE_NAMED, 906 sizeof (str_statistics) / sizeof (kstat_named_t), 907 KSTAT_FLAG_VIRTUAL); 908 909 if (str_kstat != NULL) { 910 str_kstat->ks_data = &str_statistics; 911 kstat_install(str_kstat); 912 } 913 914 /* 915 * TPI support routine initialisation. 916 */ 917 tpi_init(); 918 919 /* 920 * Handle to have autopush and persistent link information per 921 * zone. 922 * Note: uses shutdown hook instead of destroy hook so that the 923 * persistent links can be torn down before the destroy hooks 924 * in the TCP/IP stack are called. 925 */ 926 netstack_register(NS_STR, str_stack_init, str_stack_shutdown, 927 str_stack_fini); 928 } 929 930 void 931 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 932 { 933 struct stdata *stp; 934 935 ASSERT(vp->v_stream); 936 stp = vp->v_stream; 937 /* Have to hold sd_lock to prevent siglist from changing */ 938 mutex_enter(&stp->sd_lock); 939 if (stp->sd_sigflags & event) 940 strsendsig(stp->sd_siglist, event, band, error); 941 mutex_exit(&stp->sd_lock); 942 } 943 944 /* 945 * Send the "sevent" set of signals to a process. 946 * This might send more than one signal if the process is registered 947 * for multiple events. The caller should pass in an sevent that only 948 * includes the events for which the process has registered. 949 */ 950 static void 951 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 952 uchar_t band, int error) 953 { 954 ASSERT(MUTEX_HELD(&proc->p_lock)); 955 956 info->si_band = 0; 957 info->si_errno = 0; 958 959 if (sevent & S_ERROR) { 960 sevent &= ~S_ERROR; 961 info->si_code = POLL_ERR; 962 info->si_errno = error; 963 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 964 "strsendsig:proc %p info %p", proc, info); 965 sigaddq(proc, NULL, info, KM_NOSLEEP); 966 info->si_errno = 0; 967 } 968 if (sevent & S_HANGUP) { 969 sevent &= ~S_HANGUP; 970 info->si_code = POLL_HUP; 971 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 972 "strsendsig:proc %p info %p", proc, info); 973 sigaddq(proc, NULL, info, KM_NOSLEEP); 974 } 975 if (sevent & S_HIPRI) { 976 sevent &= ~S_HIPRI; 977 info->si_code = POLL_PRI; 978 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 979 "strsendsig:proc %p info %p", proc, info); 980 sigaddq(proc, NULL, info, KM_NOSLEEP); 981 } 982 if (sevent & S_RDBAND) { 983 sevent &= ~S_RDBAND; 984 if (events & S_BANDURG) 985 sigtoproc(proc, NULL, SIGURG); 986 else 987 sigtoproc(proc, NULL, SIGPOLL); 988 } 989 if (sevent & S_WRBAND) { 990 sevent &= ~S_WRBAND; 991 sigtoproc(proc, NULL, SIGPOLL); 992 } 993 if (sevent & S_INPUT) { 994 sevent &= ~S_INPUT; 995 info->si_code = POLL_IN; 996 info->si_band = band; 997 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 998 "strsendsig:proc %p info %p", proc, info); 999 sigaddq(proc, NULL, info, KM_NOSLEEP); 1000 info->si_band = 0; 1001 } 1002 if (sevent & S_OUTPUT) { 1003 sevent &= ~S_OUTPUT; 1004 info->si_code = POLL_OUT; 1005 info->si_band = band; 1006 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1007 "strsendsig:proc %p info %p", proc, info); 1008 sigaddq(proc, NULL, info, KM_NOSLEEP); 1009 info->si_band = 0; 1010 } 1011 if (sevent & S_MSG) { 1012 sevent &= ~S_MSG; 1013 info->si_code = POLL_MSG; 1014 info->si_band = band; 1015 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1016 "strsendsig:proc %p info %p", proc, info); 1017 sigaddq(proc, NULL, info, KM_NOSLEEP); 1018 info->si_band = 0; 1019 } 1020 if (sevent & S_RDNORM) { 1021 sevent &= ~S_RDNORM; 1022 sigtoproc(proc, NULL, SIGPOLL); 1023 } 1024 if (sevent != 0) { 1025 panic("strsendsig: unknown event(s) %x", sevent); 1026 } 1027 } 1028 1029 /* 1030 * Send SIGPOLL/SIGURG signal to all processes and process groups 1031 * registered on the given signal list that want a signal for at 1032 * least one of the specified events. 1033 * 1034 * Must be called with exclusive access to siglist (caller holding sd_lock). 1035 * 1036 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1037 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1038 * while it is in the siglist. 1039 * 1040 * For performance reasons (MP scalability) the code drops pidlock 1041 * when sending signals to a single process. 1042 * When sending to a process group the code holds 1043 * pidlock to prevent the membership in the process group from changing 1044 * while walking the p_pglink list. 1045 */ 1046 void 1047 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1048 { 1049 strsig_t *ssp; 1050 k_siginfo_t info; 1051 struct pid *pidp; 1052 proc_t *proc; 1053 1054 info.si_signo = SIGPOLL; 1055 info.si_errno = 0; 1056 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1057 int sevent; 1058 1059 sevent = ssp->ss_events & event; 1060 if (sevent == 0) 1061 continue; 1062 1063 if ((pidp = ssp->ss_pidp) == NULL) { 1064 /* pid was released but still on event list */ 1065 continue; 1066 } 1067 1068 1069 if (ssp->ss_pid > 0) { 1070 /* 1071 * XXX This unfortunately still generates 1072 * a signal when a fd is closed but 1073 * the proc is active. 1074 */ 1075 ASSERT(ssp->ss_pid == pidp->pid_id); 1076 1077 mutex_enter(&pidlock); 1078 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1079 if (proc == NULL) { 1080 mutex_exit(&pidlock); 1081 continue; 1082 } 1083 mutex_enter(&proc->p_lock); 1084 mutex_exit(&pidlock); 1085 dosendsig(proc, ssp->ss_events, sevent, &info, 1086 band, error); 1087 mutex_exit(&proc->p_lock); 1088 } else { 1089 /* 1090 * Send to process group. Hold pidlock across 1091 * calls to dosendsig(). 1092 */ 1093 pid_t pgrp = -ssp->ss_pid; 1094 1095 mutex_enter(&pidlock); 1096 proc = pgfind_zone(pgrp, ALL_ZONES); 1097 while (proc != NULL) { 1098 mutex_enter(&proc->p_lock); 1099 dosendsig(proc, ssp->ss_events, sevent, 1100 &info, band, error); 1101 mutex_exit(&proc->p_lock); 1102 proc = proc->p_pglink; 1103 } 1104 mutex_exit(&pidlock); 1105 } 1106 } 1107 } 1108 1109 /* 1110 * Attach a stream device or module. 1111 * qp is a read queue; the new queue goes in so its next 1112 * read ptr is the argument, and the write queue corresponding 1113 * to the argument points to this queue. Return 0 on success, 1114 * or a non-zero errno on failure. 1115 */ 1116 int 1117 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1118 boolean_t is_insert) 1119 { 1120 major_t major; 1121 cdevsw_impl_t *dp; 1122 struct streamtab *str; 1123 queue_t *rq; 1124 queue_t *wrq; 1125 uint32_t qflag; 1126 uint32_t sqtype; 1127 perdm_t *dmp; 1128 int error; 1129 int sflag; 1130 1131 rq = allocq(); 1132 wrq = _WR(rq); 1133 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1134 1135 if (fp != NULL) { 1136 str = fp->f_str; 1137 qflag = fp->f_qflag; 1138 sqtype = fp->f_sqtype; 1139 dmp = fp->f_dmp; 1140 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1141 sflag = MODOPEN; 1142 1143 /* 1144 * stash away a pointer to the module structure so we can 1145 * unref it in qdetach. 1146 */ 1147 rq->q_fp = fp; 1148 } else { 1149 ASSERT(!is_insert); 1150 1151 major = getmajor(*devp); 1152 dp = &devimpl[major]; 1153 1154 str = dp->d_str; 1155 ASSERT(str == STREAMSTAB(major)); 1156 1157 qflag = dp->d_qflag; 1158 ASSERT(qflag & QISDRV); 1159 sqtype = dp->d_sqtype; 1160 1161 /* create perdm_t if needed */ 1162 if (NEED_DM(dp->d_dmp, qflag)) 1163 dp->d_dmp = hold_dm(str, qflag, sqtype); 1164 1165 dmp = dp->d_dmp; 1166 sflag = 0; 1167 } 1168 1169 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1170 "qattach:qflag == %X(%X)", qflag, *devp); 1171 1172 /* setq might sleep in allocator - avoid holding locks. */ 1173 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1174 1175 /* 1176 * Before calling the module's open routine, set up the q_next 1177 * pointer for inserting a module in the middle of a stream. 1178 * 1179 * Note that we can always set _QINSERTING and set up q_next 1180 * pointer for both inserting and pushing a module. Then there 1181 * is no need for the is_insert parameter. In insertq(), called 1182 * by qprocson(), assume that q_next of the new module always points 1183 * to the correct queue and use it for insertion. Everything should 1184 * work out fine. But in the first release of _I_INSERT, we 1185 * distinguish between inserting and pushing to make sure that 1186 * pushing a module follows the same code path as before. 1187 */ 1188 if (is_insert) { 1189 rq->q_flag |= _QINSERTING; 1190 rq->q_next = qp; 1191 } 1192 1193 /* 1194 * If there is an outer perimeter get exclusive access during 1195 * the open procedure. Bump up the reference count on the queue. 1196 */ 1197 entersq(rq->q_syncq, SQ_OPENCLOSE); 1198 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1199 if (error != 0) 1200 goto failed; 1201 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1202 ASSERT(qprocsareon(rq)); 1203 return (0); 1204 1205 failed: 1206 rq->q_flag &= ~_QINSERTING; 1207 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1208 qprocsoff(rq); 1209 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1210 rq->q_next = wrq->q_next = NULL; 1211 qdetach(rq, 0, 0, crp, B_FALSE); 1212 return (error); 1213 } 1214 1215 /* 1216 * Handle second open of stream. For modules, set the 1217 * last argument to MODOPEN and do not pass any open flags. 1218 * Ignore dummydev since this is not the first open. 1219 */ 1220 int 1221 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1222 { 1223 int error; 1224 dev_t dummydev; 1225 queue_t *wqp = _WR(qp); 1226 1227 ASSERT(qp->q_flag & QREADR); 1228 entersq(qp->q_syncq, SQ_OPENCLOSE); 1229 1230 dummydev = *devp; 1231 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1232 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1233 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1234 mutex_enter(&STREAM(qp)->sd_lock); 1235 qp->q_stream->sd_flag |= STREOPENFAIL; 1236 mutex_exit(&STREAM(qp)->sd_lock); 1237 return (error); 1238 } 1239 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1240 1241 /* 1242 * successful open should have done qprocson() 1243 */ 1244 ASSERT(qprocsareon(_RD(qp))); 1245 return (0); 1246 } 1247 1248 /* 1249 * Detach a stream module or device. 1250 * If clmode == 1 then the module or driver was opened and its 1251 * close routine must be called. If clmode == 0, the module 1252 * or driver was never opened or the open failed, and so its close 1253 * should not be called. 1254 */ 1255 void 1256 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1257 { 1258 queue_t *wqp = _WR(qp); 1259 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1260 1261 if (STREAM_NEEDSERVICE(STREAM(qp))) 1262 stream_runservice(STREAM(qp)); 1263 1264 if (clmode) { 1265 /* 1266 * Make sure that all the messages on the write side syncq are 1267 * processed and nothing is left. Since we are closing, no new 1268 * messages may appear there. 1269 */ 1270 wait_q_syncq(wqp); 1271 1272 entersq(qp->q_syncq, SQ_OPENCLOSE); 1273 if (is_remove) { 1274 mutex_enter(QLOCK(qp)); 1275 qp->q_flag |= _QREMOVING; 1276 mutex_exit(QLOCK(qp)); 1277 } 1278 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1279 /* 1280 * Check that qprocsoff() was actually called. 1281 */ 1282 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1283 1284 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1285 } else { 1286 disable_svc(qp); 1287 } 1288 1289 /* 1290 * Allow any threads blocked in entersq to proceed and discover 1291 * the QWCLOSE is set. 1292 * Note: This assumes that all users of entersq check QWCLOSE. 1293 * Currently runservice is the only entersq that can happen 1294 * after removeq has finished. 1295 * Removeq will have discarded all messages destined to the closing 1296 * pair of queues from the syncq. 1297 * NOTE: Calling a function inside an assert is unconventional. 1298 * However, it does not cause any problem since flush_syncq() does 1299 * not change any state except when it returns non-zero i.e. 1300 * when the assert will trigger. 1301 */ 1302 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1303 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1304 ASSERT((qp->q_flag & QPERMOD) || 1305 ((qp->q_syncq->sq_head == NULL) && 1306 (wqp->q_syncq->sq_head == NULL))); 1307 1308 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1309 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1310 if (qp->q_fp != NULL) 1311 fmodsw_rele(qp->q_fp); 1312 1313 /* freeq removes us from the outer perimeter if any */ 1314 freeq(qp); 1315 } 1316 1317 /* Prevent service procedures from being called */ 1318 void 1319 disable_svc(queue_t *qp) 1320 { 1321 queue_t *wqp = _WR(qp); 1322 1323 ASSERT(qp->q_flag & QREADR); 1324 mutex_enter(QLOCK(qp)); 1325 qp->q_flag |= QWCLOSE; 1326 mutex_exit(QLOCK(qp)); 1327 mutex_enter(QLOCK(wqp)); 1328 wqp->q_flag |= QWCLOSE; 1329 mutex_exit(QLOCK(wqp)); 1330 } 1331 1332 /* Allow service procedures to be called again */ 1333 void 1334 enable_svc(queue_t *qp) 1335 { 1336 queue_t *wqp = _WR(qp); 1337 1338 ASSERT(qp->q_flag & QREADR); 1339 mutex_enter(QLOCK(qp)); 1340 qp->q_flag &= ~QWCLOSE; 1341 mutex_exit(QLOCK(qp)); 1342 mutex_enter(QLOCK(wqp)); 1343 wqp->q_flag &= ~QWCLOSE; 1344 mutex_exit(QLOCK(wqp)); 1345 } 1346 1347 /* 1348 * Remove queue from qhead/qtail if it is enabled. 1349 * Only reset QENAB if the queue was removed from the runlist. 1350 * A queue goes through 3 stages: 1351 * It is on the service list and QENAB is set. 1352 * It is removed from the service list but QENAB is still set. 1353 * QENAB gets changed to QINSERVICE. 1354 * QINSERVICE is reset (when the service procedure is done) 1355 * Thus we can not reset QENAB unless we actually removed it from the service 1356 * queue. 1357 */ 1358 void 1359 remove_runlist(queue_t *qp) 1360 { 1361 if (qp->q_flag & QENAB && qhead != NULL) { 1362 queue_t *q_chase; 1363 queue_t *q_curr; 1364 int removed; 1365 1366 mutex_enter(&service_queue); 1367 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1368 mutex_exit(&service_queue); 1369 if (removed) { 1370 STRSTAT(qremoved); 1371 qp->q_flag &= ~QENAB; 1372 } 1373 } 1374 } 1375 1376 1377 /* 1378 * Wait for any pending service processing to complete. 1379 * The removal of queues from the runlist is not atomic with the 1380 * clearing of the QENABLED flag and setting the INSERVICE flag. 1381 * consequently it is possible for remove_runlist in strclose 1382 * to not find the queue on the runlist but for it to be QENABLED 1383 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1384 * as well as INSERVICE. 1385 */ 1386 void 1387 wait_svc(queue_t *qp) 1388 { 1389 queue_t *wqp = _WR(qp); 1390 1391 ASSERT(qp->q_flag & QREADR); 1392 1393 /* 1394 * Try to remove queues from qhead/qtail list. 1395 */ 1396 if (qhead != NULL) { 1397 remove_runlist(qp); 1398 remove_runlist(wqp); 1399 } 1400 /* 1401 * Wait till the syncqs associated with the queue disappear from the 1402 * background processing list. 1403 * This only needs to be done for non-PERMOD perimeters since 1404 * for PERMOD perimeters the syncq may be shared and will only be freed 1405 * when the last module/driver is unloaded. 1406 * If for PERMOD perimeters queue was on the syncq list, removeq() 1407 * should call propagate_syncq() or drain_syncq() for it. Both of these 1408 * functions remove the queue from its syncq list, so sqthread will not 1409 * try to access the queue. 1410 */ 1411 if (!(qp->q_flag & QPERMOD)) { 1412 syncq_t *rsq = qp->q_syncq; 1413 syncq_t *wsq = wqp->q_syncq; 1414 1415 /* 1416 * Disable rsq and wsq and wait for any background processing of 1417 * syncq to complete. 1418 */ 1419 wait_sq_svc(rsq); 1420 if (wsq != rsq) 1421 wait_sq_svc(wsq); 1422 } 1423 1424 mutex_enter(QLOCK(qp)); 1425 while (qp->q_flag & (QINSERVICE|QENAB)) 1426 cv_wait(&qp->q_wait, QLOCK(qp)); 1427 mutex_exit(QLOCK(qp)); 1428 mutex_enter(QLOCK(wqp)); 1429 while (wqp->q_flag & (QINSERVICE|QENAB)) 1430 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1431 mutex_exit(QLOCK(wqp)); 1432 } 1433 1434 /* 1435 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1436 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1437 * also be set, and is passed through to allocb_cred_wait(). 1438 * 1439 * Returns errno on failure, zero on success. 1440 */ 1441 int 1442 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1443 { 1444 mblk_t *tmp; 1445 ssize_t count; 1446 int error = 0; 1447 1448 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1449 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1450 1451 if (bp->b_datap->db_type == M_IOCTL) { 1452 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1453 } else { 1454 ASSERT(bp->b_datap->db_type == M_COPYIN); 1455 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1456 } 1457 /* 1458 * strdoioctl validates ioc_count, so if this assert fails it 1459 * cannot be due to user error. 1460 */ 1461 ASSERT(count >= 0); 1462 1463 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr, 1464 curproc->p_pid)) == NULL) { 1465 return (error); 1466 } 1467 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K)); 1468 if (error != 0) { 1469 freeb(tmp); 1470 return (error); 1471 } 1472 DB_CPID(tmp) = curproc->p_pid; 1473 tmp->b_wptr += count; 1474 bp->b_cont = tmp; 1475 1476 return (0); 1477 } 1478 1479 /* 1480 * Copy ioctl data to user-land. Return non-zero errno on failure, 1481 * 0 for success. 1482 */ 1483 int 1484 getiocd(mblk_t *bp, char *arg, int copymode) 1485 { 1486 ssize_t count; 1487 size_t n; 1488 int error; 1489 1490 if (bp->b_datap->db_type == M_IOCACK) 1491 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1492 else { 1493 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1494 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1495 } 1496 ASSERT(count >= 0); 1497 1498 for (bp = bp->b_cont; bp && count; 1499 count -= n, bp = bp->b_cont, arg += n) { 1500 n = MIN(count, bp->b_wptr - bp->b_rptr); 1501 error = strcopyout(bp->b_rptr, arg, n, copymode); 1502 if (error) 1503 return (error); 1504 } 1505 ASSERT(count == 0); 1506 return (0); 1507 } 1508 1509 /* 1510 * Allocate a linkinfo entry given the write queue of the 1511 * bottom module of the top stream and the write queue of the 1512 * stream head of the bottom stream. 1513 */ 1514 linkinfo_t * 1515 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1516 { 1517 linkinfo_t *linkp; 1518 1519 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1520 1521 linkp->li_lblk.l_qtop = qup; 1522 linkp->li_lblk.l_qbot = qdown; 1523 linkp->li_fpdown = fpdown; 1524 1525 mutex_enter(&strresources); 1526 linkp->li_next = linkinfo_list; 1527 linkp->li_prev = NULL; 1528 if (linkp->li_next) 1529 linkp->li_next->li_prev = linkp; 1530 linkinfo_list = linkp; 1531 linkp->li_lblk.l_index = ++lnk_id; 1532 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1533 mutex_exit(&strresources); 1534 1535 return (linkp); 1536 } 1537 1538 /* 1539 * Free a linkinfo entry. 1540 */ 1541 void 1542 lbfree(linkinfo_t *linkp) 1543 { 1544 mutex_enter(&strresources); 1545 if (linkp->li_next) 1546 linkp->li_next->li_prev = linkp->li_prev; 1547 if (linkp->li_prev) 1548 linkp->li_prev->li_next = linkp->li_next; 1549 else 1550 linkinfo_list = linkp->li_next; 1551 mutex_exit(&strresources); 1552 1553 kmem_cache_free(linkinfo_cache, linkp); 1554 } 1555 1556 /* 1557 * Check for a potential linking cycle. 1558 * Return 1 if a link will result in a cycle, 1559 * and 0 otherwise. 1560 */ 1561 int 1562 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss) 1563 { 1564 struct mux_node *np; 1565 struct mux_edge *ep; 1566 int i; 1567 major_t lomaj; 1568 major_t upmaj; 1569 /* 1570 * if the lower stream is a pipe/FIFO, return, since link 1571 * cycles can not happen on pipes/FIFOs 1572 */ 1573 if (lostp->sd_vnode->v_type == VFIFO) 1574 return (0); 1575 1576 for (i = 0; i < ss->ss_devcnt; i++) { 1577 np = &ss->ss_mux_nodes[i]; 1578 MUX_CLEAR(np); 1579 } 1580 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1581 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1582 np = &ss->ss_mux_nodes[lomaj]; 1583 for (;;) { 1584 if (!MUX_DIDVISIT(np)) { 1585 if (np->mn_imaj == upmaj) 1586 return (1); 1587 if (np->mn_outp == NULL) { 1588 MUX_VISIT(np); 1589 if (np->mn_originp == NULL) 1590 return (0); 1591 np = np->mn_originp; 1592 continue; 1593 } 1594 MUX_VISIT(np); 1595 np->mn_startp = np->mn_outp; 1596 } else { 1597 if (np->mn_startp == NULL) { 1598 if (np->mn_originp == NULL) 1599 return (0); 1600 else { 1601 np = np->mn_originp; 1602 continue; 1603 } 1604 } 1605 /* 1606 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1607 * ignore the edge and move on. ep->me_nodep gets 1608 * set to NULL in mux_addedge() if it is a FIFO. 1609 * 1610 */ 1611 ep = np->mn_startp; 1612 np->mn_startp = ep->me_nextp; 1613 if (ep->me_nodep == NULL) 1614 continue; 1615 ep->me_nodep->mn_originp = np; 1616 np = ep->me_nodep; 1617 } 1618 } 1619 } 1620 1621 /* 1622 * Find linkinfo entry corresponding to the parameters. 1623 */ 1624 linkinfo_t * 1625 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss) 1626 { 1627 linkinfo_t *linkp; 1628 struct mux_edge *mep; 1629 struct mux_node *mnp; 1630 queue_t *qup; 1631 1632 mutex_enter(&strresources); 1633 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1634 qup = getendq(stp->sd_wrq); 1635 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1636 if ((qup == linkp->li_lblk.l_qtop) && 1637 (!index || (index == linkp->li_lblk.l_index))) { 1638 mutex_exit(&strresources); 1639 return (linkp); 1640 } 1641 } 1642 } else { 1643 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1644 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1645 mep = mnp->mn_outp; 1646 while (mep) { 1647 if ((index == 0) || (index == mep->me_muxid)) 1648 break; 1649 mep = mep->me_nextp; 1650 } 1651 if (!mep) { 1652 mutex_exit(&strresources); 1653 return (NULL); 1654 } 1655 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1656 if ((!linkp->li_lblk.l_qtop) && 1657 (mep->me_muxid == linkp->li_lblk.l_index)) { 1658 mutex_exit(&strresources); 1659 return (linkp); 1660 } 1661 } 1662 } 1663 mutex_exit(&strresources); 1664 return (NULL); 1665 } 1666 1667 /* 1668 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1669 * last queue on the chain and return it. 1670 */ 1671 queue_t * 1672 getendq(queue_t *q) 1673 { 1674 ASSERT(q != NULL); 1675 while (_SAMESTR(q)) 1676 q = q->q_next; 1677 return (q); 1678 } 1679 1680 /* 1681 * Wait for the syncq count to drop to zero. 1682 * sq could be either outer or inner. 1683 */ 1684 1685 static void 1686 wait_syncq(syncq_t *sq) 1687 { 1688 uint16_t count; 1689 1690 mutex_enter(SQLOCK(sq)); 1691 count = sq->sq_count; 1692 SQ_PUTLOCKS_ENTER(sq); 1693 SUM_SQ_PUTCOUNTS(sq, count); 1694 while (count != 0) { 1695 sq->sq_flags |= SQ_WANTWAKEUP; 1696 SQ_PUTLOCKS_EXIT(sq); 1697 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1698 count = sq->sq_count; 1699 SQ_PUTLOCKS_ENTER(sq); 1700 SUM_SQ_PUTCOUNTS(sq, count); 1701 } 1702 SQ_PUTLOCKS_EXIT(sq); 1703 mutex_exit(SQLOCK(sq)); 1704 } 1705 1706 /* 1707 * Wait while there are any messages for the queue in its syncq. 1708 */ 1709 static void 1710 wait_q_syncq(queue_t *q) 1711 { 1712 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1713 syncq_t *sq = q->q_syncq; 1714 1715 mutex_enter(SQLOCK(sq)); 1716 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1717 sq->sq_flags |= SQ_WANTWAKEUP; 1718 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1719 } 1720 mutex_exit(SQLOCK(sq)); 1721 } 1722 } 1723 1724 1725 int 1726 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1727 int lhlink) 1728 { 1729 struct stdata *stp; 1730 struct strioctl strioc; 1731 struct linkinfo *linkp; 1732 struct stdata *stpdown; 1733 struct streamtab *str; 1734 queue_t *passq; 1735 syncq_t *passyncq; 1736 queue_t *rq; 1737 cdevsw_impl_t *dp; 1738 uint32_t qflag; 1739 uint32_t sqtype; 1740 perdm_t *dmp; 1741 int error = 0; 1742 netstack_t *ns; 1743 str_stack_t *ss; 1744 1745 stp = vp->v_stream; 1746 TRACE_1(TR_FAC_STREAMS_FR, 1747 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1748 /* 1749 * Test for invalid upper stream 1750 */ 1751 if (stp->sd_flag & STRHUP) { 1752 return (ENXIO); 1753 } 1754 if (vp->v_type == VFIFO) { 1755 return (EINVAL); 1756 } 1757 if (stp->sd_strtab == NULL) { 1758 return (EINVAL); 1759 } 1760 if (!stp->sd_strtab->st_muxwinit) { 1761 return (EINVAL); 1762 } 1763 if (fpdown == NULL) { 1764 return (EBADF); 1765 } 1766 ns = netstack_find_by_cred(crp); 1767 ASSERT(ns != NULL); 1768 ss = ns->netstack_str; 1769 ASSERT(ss != NULL); 1770 1771 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) { 1772 netstack_rele(ss->ss_netstack); 1773 return (EINVAL); 1774 } 1775 mutex_enter(&muxifier); 1776 if (stp->sd_flag & STPLEX) { 1777 mutex_exit(&muxifier); 1778 netstack_rele(ss->ss_netstack); 1779 return (ENXIO); 1780 } 1781 1782 /* 1783 * Test for invalid lower stream. 1784 * The check for the v_type != VFIFO and having a major 1785 * number not >= devcnt is done to avoid problems with 1786 * adding mux_node entry past the end of mux_nodes[]. 1787 * For FIFO's we don't add an entry so this isn't a 1788 * problem. 1789 */ 1790 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1791 (stpdown == stp) || (stpdown->sd_flag & 1792 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1793 ((stpdown->sd_vnode->v_type != VFIFO) && 1794 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) || 1795 linkcycle(stp, stpdown, ss)) { 1796 mutex_exit(&muxifier); 1797 netstack_rele(ss->ss_netstack); 1798 return (EINVAL); 1799 } 1800 TRACE_1(TR_FAC_STREAMS_FR, 1801 TR_STPDOWN, "stpdown:%p", stpdown); 1802 rq = getendq(stp->sd_wrq); 1803 if (cmd == I_PLINK) 1804 rq = NULL; 1805 1806 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1807 1808 strioc.ic_cmd = cmd; 1809 strioc.ic_timout = INFTIM; 1810 strioc.ic_len = sizeof (struct linkblk); 1811 strioc.ic_dp = (char *)&linkp->li_lblk; 1812 1813 /* 1814 * STRPLUMB protects plumbing changes and should be set before 1815 * link_addpassthru()/link_rempassthru() are called, so it is set here 1816 * and cleared in the end of mlink when passthru queue is removed. 1817 * Setting of STRPLUMB prevents reopens of the stream while passthru 1818 * queue is in-place (it is not a proper module and doesn't have open 1819 * entry point). 1820 * 1821 * STPLEX prevents any threads from entering the stream from above. It 1822 * can't be set before the call to link_addpassthru() because putnext 1823 * from below may cause stream head I/O routines to be called and these 1824 * routines assert that STPLEX is not set. After link_addpassthru() 1825 * nothing may come from below since the pass queue syncq is blocked. 1826 * Note also that STPLEX should be cleared before the call to 1827 * link_rempassthru() since when messages start flowing to the stream 1828 * head (e.g. because of message propagation from the pass queue) stream 1829 * head I/O routines may be called with STPLEX flag set. 1830 * 1831 * When STPLEX is set, nothing may come into the stream from above and 1832 * it is safe to do a setq which will change stream head. So, the 1833 * correct sequence of actions is: 1834 * 1835 * 1) Set STRPLUMB 1836 * 2) Call link_addpassthru() 1837 * 3) Set STPLEX 1838 * 4) Call setq and update the stream state 1839 * 5) Clear STPLEX 1840 * 6) Call link_rempassthru() 1841 * 7) Clear STRPLUMB 1842 * 1843 * The same sequence applies to munlink() code. 1844 */ 1845 mutex_enter(&stpdown->sd_lock); 1846 stpdown->sd_flag |= STRPLUMB; 1847 mutex_exit(&stpdown->sd_lock); 1848 /* 1849 * Add passthru queue below lower mux. This will block 1850 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1851 */ 1852 passq = link_addpassthru(stpdown); 1853 1854 mutex_enter(&stpdown->sd_lock); 1855 stpdown->sd_flag |= STPLEX; 1856 mutex_exit(&stpdown->sd_lock); 1857 1858 rq = _RD(stpdown->sd_wrq); 1859 /* 1860 * There may be messages in the streamhead's syncq due to messages 1861 * that arrived before link_addpassthru() was done. To avoid 1862 * background processing of the syncq happening simultaneous with 1863 * setq processing, we disable the streamhead syncq and wait until 1864 * existing background thread finishes working on it. 1865 */ 1866 wait_sq_svc(rq->q_syncq); 1867 passyncq = passq->q_syncq; 1868 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1869 blocksq(passyncq, SQ_BLOCKED, 0); 1870 1871 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1872 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1873 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1874 1875 /* setq might sleep in allocator - avoid holding locks. */ 1876 /* Note: we are holding muxifier here. */ 1877 1878 str = stp->sd_strtab; 1879 dp = &devimpl[getmajor(vp->v_rdev)]; 1880 ASSERT(dp->d_str == str); 1881 1882 qflag = dp->d_qflag; 1883 sqtype = dp->d_sqtype; 1884 1885 /* create perdm_t if needed */ 1886 if (NEED_DM(dp->d_dmp, qflag)) 1887 dp->d_dmp = hold_dm(str, qflag, sqtype); 1888 1889 dmp = dp->d_dmp; 1890 1891 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1892 B_TRUE); 1893 1894 /* 1895 * XXX Remove any "odd" messages from the queue. 1896 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1897 */ 1898 error = strdoioctl(stp, &strioc, FNATIVE, 1899 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1900 if (error != 0) { 1901 lbfree(linkp); 1902 1903 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1904 blocksq(passyncq, SQ_BLOCKED, 0); 1905 /* 1906 * Restore the stream head queue and then remove 1907 * the passq. Turn off STPLEX before we turn on 1908 * the stream by removing the passq. 1909 */ 1910 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1911 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1912 B_TRUE); 1913 1914 mutex_enter(&stpdown->sd_lock); 1915 stpdown->sd_flag &= ~STPLEX; 1916 mutex_exit(&stpdown->sd_lock); 1917 1918 link_rempassthru(passq); 1919 1920 mutex_enter(&stpdown->sd_lock); 1921 stpdown->sd_flag &= ~STRPLUMB; 1922 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1923 cv_broadcast(&stpdown->sd_monitor); 1924 mutex_exit(&stpdown->sd_lock); 1925 1926 mutex_exit(&muxifier); 1927 netstack_rele(ss->ss_netstack); 1928 return (error); 1929 } 1930 mutex_enter(&fpdown->f_tlock); 1931 fpdown->f_count++; 1932 mutex_exit(&fpdown->f_tlock); 1933 1934 /* 1935 * if we've made it here the linkage is all set up so we should also 1936 * set up the layered driver linkages 1937 */ 1938 1939 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1940 if (cmd == I_LINK) { 1941 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1942 } else { 1943 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1944 } 1945 1946 link_rempassthru(passq); 1947 1948 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss); 1949 1950 /* 1951 * Mark the upper stream as having dependent links 1952 * so that strclose can clean it up. 1953 */ 1954 if (cmd == I_LINK) { 1955 mutex_enter(&stp->sd_lock); 1956 stp->sd_flag |= STRHASLINKS; 1957 mutex_exit(&stp->sd_lock); 1958 } 1959 /* 1960 * Wake up any other processes that may have been 1961 * waiting on the lower stream. These will all 1962 * error out. 1963 */ 1964 mutex_enter(&stpdown->sd_lock); 1965 /* The passthru module is removed so we may release STRPLUMB */ 1966 stpdown->sd_flag &= ~STRPLUMB; 1967 cv_broadcast(&rq->q_wait); 1968 cv_broadcast(&_WR(rq)->q_wait); 1969 cv_broadcast(&stpdown->sd_monitor); 1970 mutex_exit(&stpdown->sd_lock); 1971 mutex_exit(&muxifier); 1972 *rvalp = linkp->li_lblk.l_index; 1973 netstack_rele(ss->ss_netstack); 1974 return (0); 1975 } 1976 1977 int 1978 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1979 { 1980 int ret; 1981 struct file *fpdown; 1982 1983 fpdown = getf(arg); 1984 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1985 if (fpdown != NULL) 1986 releasef(arg); 1987 return (ret); 1988 } 1989 1990 /* 1991 * Unlink a multiplexor link. Stp is the controlling stream for the 1992 * link, and linkp points to the link's entry in the linkinfo list. 1993 * The muxifier lock must be held on entry and is dropped on exit. 1994 * 1995 * NOTE : Currently it is assumed that mux would process all the messages 1996 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 1997 * of the mux to handle all the messages that arrive before UNLINK. 1998 * If the mux has to send down messages on its lower stream before 1999 * ACKing I_UNLINK, then it *should* know to handle messages even 2000 * after the UNLINK is acked (actually it should be able to handle till we 2001 * re-block the read side of the pass queue here). If the mux does not 2002 * open up the lower stream, any messages that arrive during UNLINK 2003 * will be put in the stream head. In the case of lower stream opening 2004 * up, some messages might land in the stream head depending on when 2005 * the message arrived and when the read side of the pass queue was 2006 * re-blocked. 2007 */ 2008 int 2009 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp, 2010 str_stack_t *ss) 2011 { 2012 struct strioctl strioc; 2013 struct stdata *stpdown; 2014 queue_t *rq, *wrq; 2015 queue_t *passq; 2016 syncq_t *passyncq; 2017 int error = 0; 2018 file_t *fpdown; 2019 2020 ASSERT(MUTEX_HELD(&muxifier)); 2021 2022 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2023 2024 /* 2025 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2026 */ 2027 mutex_enter(&stpdown->sd_lock); 2028 stpdown->sd_flag |= STRPLUMB; 2029 mutex_exit(&stpdown->sd_lock); 2030 2031 /* 2032 * Add passthru queue below lower mux. This will block 2033 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2034 */ 2035 passq = link_addpassthru(stpdown); 2036 2037 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2038 strioc.ic_cmd = I_UNLINK; 2039 else 2040 strioc.ic_cmd = I_PUNLINK; 2041 strioc.ic_timout = INFTIM; 2042 strioc.ic_len = sizeof (struct linkblk); 2043 strioc.ic_dp = (char *)&linkp->li_lblk; 2044 2045 error = strdoioctl(stp, &strioc, FNATIVE, 2046 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2047 2048 /* 2049 * If there was an error and this is not called via strclose, 2050 * return to the user. Otherwise, pretend there was no error 2051 * and close the link. 2052 */ 2053 if (error) { 2054 if (flag & LINKCLOSE) { 2055 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2056 "unlink ioctl, closing anyway (%d)\n", error); 2057 } else { 2058 link_rempassthru(passq); 2059 mutex_enter(&stpdown->sd_lock); 2060 stpdown->sd_flag &= ~STRPLUMB; 2061 cv_broadcast(&stpdown->sd_monitor); 2062 mutex_exit(&stpdown->sd_lock); 2063 mutex_exit(&muxifier); 2064 return (error); 2065 } 2066 } 2067 2068 mux_rmvedge(stp, linkp->li_lblk.l_index, ss); 2069 fpdown = linkp->li_fpdown; 2070 lbfree(linkp); 2071 2072 /* 2073 * We go ahead and drop muxifier here--it's a nasty global lock that 2074 * can slow others down. It's okay to since attempts to mlink() this 2075 * stream will be stopped because STPLEX is still set in the stdata 2076 * structure, and munlink() is stopped because mux_rmvedge() and 2077 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2078 * respectively. Note that we defer the closef() of fpdown until 2079 * after we drop muxifier since strclose() can call munlinkall(). 2080 */ 2081 mutex_exit(&muxifier); 2082 2083 wrq = stpdown->sd_wrq; 2084 rq = _RD(wrq); 2085 2086 /* 2087 * Get rid of outstanding service procedure runs, before we make 2088 * it a stream head, since a stream head doesn't have any service 2089 * procedure. 2090 */ 2091 disable_svc(rq); 2092 wait_svc(rq); 2093 2094 /* 2095 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2096 * is queued up to be finished. mux should take care that nothing is 2097 * send down to this queue. We should do it now as we're going to block 2098 * passyncq if it was unblocked. 2099 */ 2100 if (wrq->q_flag & QPERMOD) { 2101 syncq_t *sq = wrq->q_syncq; 2102 2103 mutex_enter(SQLOCK(sq)); 2104 while (wrq->q_sqflags & Q_SQQUEUED) { 2105 sq->sq_flags |= SQ_WANTWAKEUP; 2106 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2107 } 2108 mutex_exit(SQLOCK(sq)); 2109 } 2110 passyncq = passq->q_syncq; 2111 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2112 2113 syncq_t *sq, *outer; 2114 2115 /* 2116 * Messages could be flowing from underneath. We will 2117 * block the read side of the passq. This would be 2118 * sufficient for QPAIR and QPERQ muxes to ensure 2119 * that no data is flowing up into this queue 2120 * and hence no thread active in this instance of 2121 * lower mux. But for QPERMOD and QMTOUTPERIM there 2122 * could be messages on the inner and outer/inner 2123 * syncqs respectively. We will wait for them to drain. 2124 * Because passq is blocked messages end up in the syncq 2125 * And qfill_syncq could possibly end up setting QFULL 2126 * which will access the rq->q_flag. Hence, we have to 2127 * acquire the QLOCK in setq. 2128 * 2129 * XXX Messages can also flow from top into this 2130 * queue though the unlink is over (Ex. some instance 2131 * in putnext() called from top that has still not 2132 * accessed this queue. And also putq(lowerq) ?). 2133 * Solution : How about blocking the l_qtop queue ? 2134 * Do we really care about such pure D_MP muxes ? 2135 */ 2136 2137 blocksq(passyncq, SQ_BLOCKED, 0); 2138 2139 sq = rq->q_syncq; 2140 if ((outer = sq->sq_outer) != NULL) { 2141 2142 /* 2143 * We have to just wait for the outer sq_count 2144 * drop to zero. As this does not prevent new 2145 * messages to enter the outer perimeter, this 2146 * is subject to starvation. 2147 * 2148 * NOTE :Because of blocksq above, messages could 2149 * be in the inner syncq only because of some 2150 * thread holding the outer perimeter exclusively. 2151 * Hence it would be sufficient to wait for the 2152 * exclusive holder of the outer perimeter to drain 2153 * the inner and outer syncqs. But we will not depend 2154 * on this feature and hence check the inner syncqs 2155 * separately. 2156 */ 2157 wait_syncq(outer); 2158 } 2159 2160 2161 /* 2162 * There could be messages destined for 2163 * this queue. Let the exclusive holder 2164 * drain it. 2165 */ 2166 2167 wait_syncq(sq); 2168 ASSERT((rq->q_flag & QPERMOD) || 2169 ((rq->q_syncq->sq_head == NULL) && 2170 (_WR(rq)->q_syncq->sq_head == NULL))); 2171 } 2172 2173 /* 2174 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2175 * case as we don't disable its syncq or remove it off the syncq 2176 * service list. 2177 */ 2178 if (rq->q_flag & QPERMOD) { 2179 syncq_t *sq = rq->q_syncq; 2180 2181 mutex_enter(SQLOCK(sq)); 2182 while (rq->q_sqflags & Q_SQQUEUED) { 2183 sq->sq_flags |= SQ_WANTWAKEUP; 2184 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2185 } 2186 mutex_exit(SQLOCK(sq)); 2187 } 2188 2189 /* 2190 * flush_syncq changes states only when there are some messages to 2191 * free, i.e. when it returns non-zero value to return. 2192 */ 2193 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2194 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2195 2196 /* 2197 * Nobody else should know about this queue now. 2198 * If the mux did not process the messages before 2199 * acking the I_UNLINK, free them now. 2200 */ 2201 2202 flushq(rq, FLUSHALL); 2203 flushq(_WR(rq), FLUSHALL); 2204 2205 /* 2206 * Convert the mux lower queue into a stream head queue. 2207 * Turn off STPLEX before we turn on the stream by removing the passq. 2208 */ 2209 rq->q_ptr = wrq->q_ptr = stpdown; 2210 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2211 2212 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2213 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2214 2215 enable_svc(rq); 2216 2217 /* 2218 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2219 * needs to be set to prevent reopen() of the stream - such reopen may 2220 * try to call non-existent pass queue open routine and panic. 2221 */ 2222 mutex_enter(&stpdown->sd_lock); 2223 stpdown->sd_flag &= ~STPLEX; 2224 mutex_exit(&stpdown->sd_lock); 2225 2226 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2227 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2228 2229 /* clean up the layered driver linkages */ 2230 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2231 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2232 } else { 2233 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2234 } 2235 2236 link_rempassthru(passq); 2237 2238 /* 2239 * Now all plumbing changes are finished and STRPLUMB is no 2240 * longer needed. 2241 */ 2242 mutex_enter(&stpdown->sd_lock); 2243 stpdown->sd_flag &= ~STRPLUMB; 2244 cv_broadcast(&stpdown->sd_monitor); 2245 mutex_exit(&stpdown->sd_lock); 2246 2247 (void) closef(fpdown); 2248 return (0); 2249 } 2250 2251 /* 2252 * Unlink all multiplexor links for which stp is the controlling stream. 2253 * Return 0, or a non-zero errno on failure. 2254 */ 2255 int 2256 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss) 2257 { 2258 linkinfo_t *linkp; 2259 int error = 0; 2260 2261 mutex_enter(&muxifier); 2262 while (linkp = findlinks(stp, 0, flag, ss)) { 2263 /* 2264 * munlink() releases the muxifier lock. 2265 */ 2266 if (error = munlink(stp, linkp, flag, crp, rvalp, ss)) 2267 return (error); 2268 mutex_enter(&muxifier); 2269 } 2270 mutex_exit(&muxifier); 2271 return (0); 2272 } 2273 2274 /* 2275 * A multiplexor link has been made. Add an 2276 * edge to the directed graph. 2277 */ 2278 void 2279 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss) 2280 { 2281 struct mux_node *np; 2282 struct mux_edge *ep; 2283 major_t upmaj; 2284 major_t lomaj; 2285 2286 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2287 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2288 np = &ss->ss_mux_nodes[upmaj]; 2289 if (np->mn_outp) { 2290 ep = np->mn_outp; 2291 while (ep->me_nextp) 2292 ep = ep->me_nextp; 2293 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2294 ep = ep->me_nextp; 2295 } else { 2296 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2297 ep = np->mn_outp; 2298 } 2299 ep->me_nextp = NULL; 2300 ep->me_muxid = muxid; 2301 /* 2302 * Save the dev_t for the purposes of str_stack_shutdown. 2303 * str_stack_shutdown assumes that the device allows reopen, since 2304 * this dev_t is the one after any cloning by xx_open(). 2305 * Would prefer finding the dev_t from before any cloning, 2306 * but specfs doesn't retain that. 2307 */ 2308 ep->me_dev = upstp->sd_vnode->v_rdev; 2309 if (lostp->sd_vnode->v_type == VFIFO) 2310 ep->me_nodep = NULL; 2311 else 2312 ep->me_nodep = &ss->ss_mux_nodes[lomaj]; 2313 } 2314 2315 /* 2316 * A multiplexor link has been removed. Remove the 2317 * edge in the directed graph. 2318 */ 2319 void 2320 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss) 2321 { 2322 struct mux_node *np; 2323 struct mux_edge *ep; 2324 struct mux_edge *pep = NULL; 2325 major_t upmaj; 2326 2327 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2328 np = &ss->ss_mux_nodes[upmaj]; 2329 ASSERT(np->mn_outp != NULL); 2330 ep = np->mn_outp; 2331 while (ep) { 2332 if (ep->me_muxid == muxid) { 2333 if (pep) 2334 pep->me_nextp = ep->me_nextp; 2335 else 2336 np->mn_outp = ep->me_nextp; 2337 kmem_free(ep, sizeof (struct mux_edge)); 2338 return; 2339 } 2340 pep = ep; 2341 ep = ep->me_nextp; 2342 } 2343 ASSERT(0); /* should not reach here */ 2344 } 2345 2346 /* 2347 * Translate the device flags (from conf.h) to the corresponding 2348 * qflag and sq_flag (type) values. 2349 */ 2350 int 2351 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2352 uint32_t *sqtypep) 2353 { 2354 uint32_t qflag = 0; 2355 uint32_t sqtype = 0; 2356 2357 if (devflag & _D_OLD) 2358 goto bad; 2359 2360 /* Inner perimeter presence and scope */ 2361 switch (devflag & D_MTINNER_MASK) { 2362 case D_MP: 2363 qflag |= QMTSAFE; 2364 sqtype |= SQ_CI; 2365 break; 2366 case D_MTPERQ|D_MP: 2367 qflag |= QPERQ; 2368 break; 2369 case D_MTQPAIR|D_MP: 2370 qflag |= QPAIR; 2371 break; 2372 case D_MTPERMOD|D_MP: 2373 qflag |= QPERMOD; 2374 break; 2375 default: 2376 goto bad; 2377 } 2378 2379 /* Outer perimeter */ 2380 if (devflag & D_MTOUTPERIM) { 2381 switch (devflag & D_MTINNER_MASK) { 2382 case D_MP: 2383 case D_MTPERQ|D_MP: 2384 case D_MTQPAIR|D_MP: 2385 break; 2386 default: 2387 goto bad; 2388 } 2389 qflag |= QMTOUTPERIM; 2390 } 2391 2392 /* Inner perimeter modifiers */ 2393 if (devflag & D_MTINNER_MOD) { 2394 switch (devflag & D_MTINNER_MASK) { 2395 case D_MP: 2396 goto bad; 2397 default: 2398 break; 2399 } 2400 if (devflag & D_MTPUTSHARED) 2401 sqtype |= SQ_CIPUT; 2402 if (devflag & _D_MTOCSHARED) { 2403 /* 2404 * The code in putnext assumes that it has the 2405 * highest concurrency by not checking sq_count. 2406 * Thus _D_MTOCSHARED can only be supported when 2407 * D_MTPUTSHARED is set. 2408 */ 2409 if (!(devflag & D_MTPUTSHARED)) 2410 goto bad; 2411 sqtype |= SQ_CIOC; 2412 } 2413 if (devflag & _D_MTCBSHARED) { 2414 /* 2415 * The code in putnext assumes that it has the 2416 * highest concurrency by not checking sq_count. 2417 * Thus _D_MTCBSHARED can only be supported when 2418 * D_MTPUTSHARED is set. 2419 */ 2420 if (!(devflag & D_MTPUTSHARED)) 2421 goto bad; 2422 sqtype |= SQ_CICB; 2423 } 2424 if (devflag & _D_MTSVCSHARED) { 2425 /* 2426 * The code in putnext assumes that it has the 2427 * highest concurrency by not checking sq_count. 2428 * Thus _D_MTSVCSHARED can only be supported when 2429 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2430 * supported only for QPERMOD. 2431 */ 2432 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2433 goto bad; 2434 sqtype |= SQ_CISVC; 2435 } 2436 } 2437 2438 /* Default outer perimeter concurrency */ 2439 sqtype |= SQ_CO; 2440 2441 /* Outer perimeter modifiers */ 2442 if (devflag & D_MTOCEXCL) { 2443 if (!(devflag & D_MTOUTPERIM)) { 2444 /* No outer perimeter */ 2445 goto bad; 2446 } 2447 sqtype &= ~SQ_COOC; 2448 } 2449 2450 /* Synchronous Streams extended qinit structure */ 2451 if (devflag & D_SYNCSTR) 2452 qflag |= QSYNCSTR; 2453 2454 /* 2455 * Private flag used by a transport module to indicate 2456 * to sockfs that it supports direct-access mode without 2457 * having to go through STREAMS. 2458 */ 2459 if (devflag & _D_DIRECT) { 2460 /* Reject unless the module is fully-MT (no perimeter) */ 2461 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2462 goto bad; 2463 qflag |= _QDIRECT; 2464 } 2465 2466 *qflagp = qflag; 2467 *sqtypep = sqtype; 2468 return (0); 2469 2470 bad: 2471 cmn_err(CE_WARN, 2472 "stropen: bad MT flags (0x%x) in driver '%s'", 2473 (int)(qflag & D_MTSAFETY_MASK), 2474 stp->st_rdinit->qi_minfo->mi_idname); 2475 2476 return (EINVAL); 2477 } 2478 2479 /* 2480 * Set the interface values for a pair of queues (qinit structure, 2481 * packet sizes, water marks). 2482 * setq assumes that the caller does not have a claim (entersq or claimq) 2483 * on the queue. 2484 */ 2485 void 2486 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2487 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2488 { 2489 queue_t *wq; 2490 syncq_t *sq, *outer; 2491 2492 ASSERT(rq->q_flag & QREADR); 2493 ASSERT((qflag & QMT_TYPEMASK) != 0); 2494 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2495 2496 wq = _WR(rq); 2497 rq->q_qinfo = rinit; 2498 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2499 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2500 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2501 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2502 wq->q_qinfo = winit; 2503 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2504 wq->q_lowat = winit->qi_minfo->mi_lowat; 2505 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2506 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2507 2508 /* Remove old syncqs */ 2509 sq = rq->q_syncq; 2510 outer = sq->sq_outer; 2511 if (outer != NULL) { 2512 ASSERT(wq->q_syncq->sq_outer == outer); 2513 outer_remove(outer, rq->q_syncq); 2514 if (wq->q_syncq != rq->q_syncq) 2515 outer_remove(outer, wq->q_syncq); 2516 } 2517 ASSERT(sq->sq_outer == NULL); 2518 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2519 2520 if (sq != SQ(rq)) { 2521 if (!(rq->q_flag & QPERMOD)) 2522 free_syncq(sq); 2523 if (wq->q_syncq == rq->q_syncq) 2524 wq->q_syncq = NULL; 2525 rq->q_syncq = NULL; 2526 } 2527 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2528 wq->q_syncq != SQ(rq)) { 2529 free_syncq(wq->q_syncq); 2530 wq->q_syncq = NULL; 2531 } 2532 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2533 rq->q_syncq->sq_tail == NULL)); 2534 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2535 wq->q_syncq->sq_tail == NULL)); 2536 2537 if (!(rq->q_flag & QPERMOD) && 2538 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2539 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2540 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2541 rq->q_syncq->sq_nciputctrl, 0); 2542 ASSERT(ciputctrl_cache != NULL); 2543 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2544 rq->q_syncq->sq_ciputctrl = NULL; 2545 rq->q_syncq->sq_nciputctrl = 0; 2546 } 2547 2548 if (!(wq->q_flag & QPERMOD) && 2549 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2550 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2551 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2552 wq->q_syncq->sq_nciputctrl, 0); 2553 ASSERT(ciputctrl_cache != NULL); 2554 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2555 wq->q_syncq->sq_ciputctrl = NULL; 2556 wq->q_syncq->sq_nciputctrl = 0; 2557 } 2558 2559 sq = SQ(rq); 2560 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2561 ASSERT(sq->sq_outer == NULL); 2562 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2563 2564 /* 2565 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2566 * bits in sq_flag based on the sqtype. 2567 */ 2568 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2569 2570 rq->q_syncq = wq->q_syncq = sq; 2571 sq->sq_type = sqtype; 2572 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2573 2574 /* 2575 * We are making sq_svcflags zero, 2576 * resetting SQ_DISABLED in case it was set by 2577 * wait_svc() in the munlink path. 2578 * 2579 */ 2580 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2581 sq->sq_svcflags = 0; 2582 2583 /* 2584 * We need to acquire the lock here for the mlink and munlink case, 2585 * where canputnext, backenable, etc can access the q_flag. 2586 */ 2587 if (lock_needed) { 2588 mutex_enter(QLOCK(rq)); 2589 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2590 mutex_exit(QLOCK(rq)); 2591 mutex_enter(QLOCK(wq)); 2592 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2593 mutex_exit(QLOCK(wq)); 2594 } else { 2595 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2596 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2597 } 2598 2599 if (qflag & QPERQ) { 2600 /* Allocate a separate syncq for the write side */ 2601 sq = new_syncq(); 2602 sq->sq_type = rq->q_syncq->sq_type; 2603 sq->sq_flags = rq->q_syncq->sq_flags; 2604 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2605 sq->sq_oprev == NULL); 2606 wq->q_syncq = sq; 2607 } 2608 if (qflag & QPERMOD) { 2609 sq = dmp->dm_sq; 2610 2611 /* 2612 * Assert that we do have an inner perimeter syncq and that it 2613 * does not have an outer perimeter associated with it. 2614 */ 2615 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2616 sq->sq_oprev == NULL); 2617 rq->q_syncq = wq->q_syncq = sq; 2618 } 2619 if (qflag & QMTOUTPERIM) { 2620 outer = dmp->dm_sq; 2621 2622 ASSERT(outer->sq_outer == NULL); 2623 outer_insert(outer, rq->q_syncq); 2624 if (wq->q_syncq != rq->q_syncq) 2625 outer_insert(outer, wq->q_syncq); 2626 } 2627 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2628 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2629 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2630 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2631 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2632 2633 /* 2634 * Initialize struio() types. 2635 */ 2636 rq->q_struiot = 2637 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2638 wq->q_struiot = 2639 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2640 } 2641 2642 perdm_t * 2643 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2644 { 2645 syncq_t *sq; 2646 perdm_t **pp; 2647 perdm_t *p; 2648 perdm_t *dmp; 2649 2650 ASSERT(str != NULL); 2651 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2652 2653 rw_enter(&perdm_rwlock, RW_READER); 2654 for (p = perdm_list; p != NULL; p = p->dm_next) { 2655 if (p->dm_str == str) { /* found one */ 2656 atomic_inc_32(&(p->dm_ref)); 2657 rw_exit(&perdm_rwlock); 2658 return (p); 2659 } 2660 } 2661 rw_exit(&perdm_rwlock); 2662 2663 sq = new_syncq(); 2664 if (qflag & QPERMOD) { 2665 sq->sq_type = sqtype | SQ_PERMOD; 2666 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2667 } else { 2668 ASSERT(qflag & QMTOUTPERIM); 2669 sq->sq_onext = sq->sq_oprev = sq; 2670 } 2671 2672 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2673 dmp->dm_sq = sq; 2674 dmp->dm_str = str; 2675 dmp->dm_ref = 1; 2676 dmp->dm_next = NULL; 2677 2678 rw_enter(&perdm_rwlock, RW_WRITER); 2679 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2680 if (p->dm_str == str) { /* already present */ 2681 p->dm_ref++; 2682 rw_exit(&perdm_rwlock); 2683 free_syncq(sq); 2684 kmem_free(dmp, sizeof (perdm_t)); 2685 return (p); 2686 } 2687 } 2688 2689 *pp = dmp; 2690 rw_exit(&perdm_rwlock); 2691 return (dmp); 2692 } 2693 2694 void 2695 rele_dm(perdm_t *dmp) 2696 { 2697 perdm_t **pp; 2698 perdm_t *p; 2699 2700 rw_enter(&perdm_rwlock, RW_WRITER); 2701 ASSERT(dmp->dm_ref > 0); 2702 2703 if (--dmp->dm_ref > 0) { 2704 rw_exit(&perdm_rwlock); 2705 return; 2706 } 2707 2708 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2709 if (p == dmp) 2710 break; 2711 ASSERT(p == dmp); 2712 *pp = p->dm_next; 2713 rw_exit(&perdm_rwlock); 2714 2715 /* 2716 * Wait for any background processing that relies on the 2717 * syncq to complete before it is freed. 2718 */ 2719 wait_sq_svc(p->dm_sq); 2720 free_syncq(p->dm_sq); 2721 kmem_free(p, sizeof (perdm_t)); 2722 } 2723 2724 /* 2725 * Make a protocol message given control and data buffers. 2726 * n.b., this can block; be careful of what locks you hold when calling it. 2727 * 2728 * If sd_maxblk is less than *iosize this routine can fail part way through 2729 * (due to an allocation failure). In this case on return *iosize will contain 2730 * the amount that was consumed. Otherwise *iosize will not be modified 2731 * i.e. it will contain the amount that was consumed. 2732 */ 2733 int 2734 strmakemsg( 2735 struct strbuf *mctl, 2736 ssize_t *iosize, 2737 struct uio *uiop, 2738 stdata_t *stp, 2739 int32_t flag, 2740 mblk_t **mpp) 2741 { 2742 mblk_t *mpctl = NULL; 2743 mblk_t *mpdata = NULL; 2744 int error; 2745 2746 ASSERT(uiop != NULL); 2747 2748 *mpp = NULL; 2749 /* Create control part, if any */ 2750 if ((mctl != NULL) && (mctl->len >= 0)) { 2751 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2752 if (error) 2753 return (error); 2754 } 2755 /* Create data part, if any */ 2756 if (*iosize >= 0) { 2757 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2758 if (error) { 2759 freemsg(mpctl); 2760 return (error); 2761 } 2762 } 2763 if (mpctl != NULL) { 2764 if (mpdata != NULL) 2765 linkb(mpctl, mpdata); 2766 *mpp = mpctl; 2767 } else { 2768 *mpp = mpdata; 2769 } 2770 return (0); 2771 } 2772 2773 /* 2774 * Make the control part of a protocol message given a control buffer. 2775 * n.b., this can block; be careful of what locks you hold when calling it. 2776 */ 2777 int 2778 strmakectl( 2779 struct strbuf *mctl, 2780 int32_t flag, 2781 int32_t fflag, 2782 mblk_t **mpp) 2783 { 2784 mblk_t *bp = NULL; 2785 unsigned char msgtype; 2786 int error = 0; 2787 cred_t *cr = CRED(); 2788 2789 /* We do not support interrupt threads using the stream head to send */ 2790 ASSERT(cr != NULL); 2791 2792 *mpp = NULL; 2793 /* 2794 * Create control part of message, if any. 2795 */ 2796 if ((mctl != NULL) && (mctl->len >= 0)) { 2797 caddr_t base; 2798 int ctlcount; 2799 int allocsz; 2800 2801 if (flag & RS_HIPRI) 2802 msgtype = M_PCPROTO; 2803 else 2804 msgtype = M_PROTO; 2805 2806 ctlcount = mctl->len; 2807 base = mctl->buf; 2808 2809 /* 2810 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2811 * blocks by increasing the size to something more usable. 2812 */ 2813 allocsz = MAX(ctlcount, 64); 2814 2815 /* 2816 * Range checking has already been done; simply try 2817 * to allocate a message block for the ctl part. 2818 */ 2819 while ((bp = allocb_cred(allocsz, cr, 2820 curproc->p_pid)) == NULL) { 2821 if (fflag & (FNDELAY|FNONBLOCK)) 2822 return (EAGAIN); 2823 if (error = strwaitbuf(allocsz, BPRI_MED)) 2824 return (error); 2825 } 2826 2827 bp->b_datap->db_type = msgtype; 2828 if (copyin(base, bp->b_wptr, ctlcount)) { 2829 freeb(bp); 2830 return (EFAULT); 2831 } 2832 bp->b_wptr += ctlcount; 2833 } 2834 *mpp = bp; 2835 return (0); 2836 } 2837 2838 /* 2839 * Make a protocol message given data buffers. 2840 * n.b., this can block; be careful of what locks you hold when calling it. 2841 * 2842 * If sd_maxblk is less than *iosize this routine can fail part way through 2843 * (due to an allocation failure). In this case on return *iosize will contain 2844 * the amount that was consumed. Otherwise *iosize will not be modified 2845 * i.e. it will contain the amount that was consumed. 2846 */ 2847 int 2848 strmakedata( 2849 ssize_t *iosize, 2850 struct uio *uiop, 2851 stdata_t *stp, 2852 int32_t flag, 2853 mblk_t **mpp) 2854 { 2855 mblk_t *mp = NULL; 2856 mblk_t *bp; 2857 int wroff = (int)stp->sd_wroff; 2858 int tail_len = (int)stp->sd_tail; 2859 int extra = wroff + tail_len; 2860 int error = 0; 2861 ssize_t maxblk; 2862 ssize_t count = *iosize; 2863 cred_t *cr; 2864 2865 *mpp = NULL; 2866 if (count < 0) 2867 return (0); 2868 2869 /* We do not support interrupt threads using the stream head to send */ 2870 cr = CRED(); 2871 ASSERT(cr != NULL); 2872 2873 maxblk = stp->sd_maxblk; 2874 if (maxblk == INFPSZ) 2875 maxblk = count; 2876 2877 /* 2878 * Create data part of message, if any. 2879 */ 2880 do { 2881 ssize_t size; 2882 dblk_t *dp; 2883 2884 ASSERT(uiop); 2885 2886 size = MIN(count, maxblk); 2887 2888 while ((bp = allocb_cred(size + extra, cr, 2889 curproc->p_pid)) == NULL) { 2890 error = EAGAIN; 2891 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2892 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2893 if (count == *iosize) { 2894 freemsg(mp); 2895 return (error); 2896 } else { 2897 *iosize -= count; 2898 *mpp = mp; 2899 return (0); 2900 } 2901 } 2902 } 2903 dp = bp->b_datap; 2904 dp->db_cpid = curproc->p_pid; 2905 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2906 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2907 2908 if (flag & STRUIO_POSTPONE) { 2909 /* 2910 * Setup the stream uio portion of the 2911 * dblk for subsequent use by struioget(). 2912 */ 2913 dp->db_struioflag = STRUIO_SPEC; 2914 dp->db_cksumstart = 0; 2915 dp->db_cksumstuff = 0; 2916 dp->db_cksumend = size; 2917 *(long long *)dp->db_struioun.data = 0ll; 2918 bp->b_wptr += size; 2919 } else { 2920 if (stp->sd_copyflag & STRCOPYCACHED) 2921 uiop->uio_extflg |= UIO_COPY_CACHED; 2922 2923 if (size != 0) { 2924 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2925 uiop); 2926 if (error != 0) { 2927 freeb(bp); 2928 freemsg(mp); 2929 return (error); 2930 } 2931 } 2932 bp->b_wptr += size; 2933 2934 if (stp->sd_wputdatafunc != NULL) { 2935 mblk_t *newbp; 2936 2937 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2938 bp, NULL, NULL, NULL, NULL); 2939 if (newbp == NULL) { 2940 freeb(bp); 2941 freemsg(mp); 2942 return (ECOMM); 2943 } 2944 bp = newbp; 2945 } 2946 } 2947 2948 count -= size; 2949 2950 if (mp == NULL) 2951 mp = bp; 2952 else 2953 linkb(mp, bp); 2954 } while (count > 0); 2955 2956 *mpp = mp; 2957 return (0); 2958 } 2959 2960 /* 2961 * Wait for a buffer to become available. Return non-zero errno 2962 * if not able to wait, 0 if buffer is probably there. 2963 */ 2964 int 2965 strwaitbuf(size_t size, int pri) 2966 { 2967 bufcall_id_t id; 2968 2969 mutex_enter(&bcall_monitor); 2970 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2971 &ttoproc(curthread)->p_flag_cv)) == 0) { 2972 mutex_exit(&bcall_monitor); 2973 return (ENOSR); 2974 } 2975 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2976 unbufcall(id); 2977 mutex_exit(&bcall_monitor); 2978 return (EINTR); 2979 } 2980 unbufcall(id); 2981 mutex_exit(&bcall_monitor); 2982 return (0); 2983 } 2984 2985 /* 2986 * This function waits for a read or write event to happen on a stream. 2987 * fmode can specify FNDELAY and/or FNONBLOCK. 2988 * The timeout is in ms with -1 meaning infinite. 2989 * The flag values work as follows: 2990 * READWAIT Check for read side errors, send M_READ 2991 * GETWAIT Check for read side errors, no M_READ 2992 * WRITEWAIT Check for write side errors. 2993 * NOINTR Do not return error if nonblocking or timeout. 2994 * STR_NOERROR Ignore all errors except STPLEX. 2995 * STR_NOSIG Ignore/hold signals during the duration of the call. 2996 * STR_PEEK Pass through the strgeterr(). 2997 */ 2998 int 2999 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 3000 int *done) 3001 { 3002 int slpflg, errs; 3003 int error; 3004 kcondvar_t *sleepon; 3005 mblk_t *mp; 3006 ssize_t *rd_count; 3007 clock_t rval; 3008 3009 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3010 if ((flag & READWAIT) || (flag & GETWAIT)) { 3011 slpflg = RSLEEP; 3012 sleepon = &_RD(stp->sd_wrq)->q_wait; 3013 errs = STRDERR|STPLEX; 3014 } else { 3015 slpflg = WSLEEP; 3016 sleepon = &stp->sd_wrq->q_wait; 3017 errs = STWRERR|STRHUP|STPLEX; 3018 } 3019 if (flag & STR_NOERROR) 3020 errs = STPLEX; 3021 3022 if (stp->sd_wakeq & slpflg) { 3023 /* 3024 * A strwakeq() is pending, no need to sleep. 3025 */ 3026 stp->sd_wakeq &= ~slpflg; 3027 *done = 0; 3028 return (0); 3029 } 3030 3031 if (stp->sd_flag & errs) { 3032 /* 3033 * Check for errors before going to sleep since the 3034 * caller might not have checked this while holding 3035 * sd_lock. 3036 */ 3037 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3038 if (error != 0) { 3039 *done = 1; 3040 return (error); 3041 } 3042 } 3043 3044 /* 3045 * If any module downstream has requested read notification 3046 * by setting SNDMREAD flag using M_SETOPTS, send a message 3047 * down stream. 3048 */ 3049 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3050 mutex_exit(&stp->sd_lock); 3051 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3052 (flag & STR_NOSIG), &error))) { 3053 mutex_enter(&stp->sd_lock); 3054 *done = 1; 3055 return (error); 3056 } 3057 mp->b_datap->db_type = M_READ; 3058 rd_count = (ssize_t *)mp->b_wptr; 3059 *rd_count = count; 3060 mp->b_wptr += sizeof (ssize_t); 3061 /* 3062 * Send the number of bytes requested by the 3063 * read as the argument to M_READ. 3064 */ 3065 stream_willservice(stp); 3066 putnext(stp->sd_wrq, mp); 3067 stream_runservice(stp); 3068 mutex_enter(&stp->sd_lock); 3069 3070 /* 3071 * If any data arrived due to inline processing 3072 * of putnext(), don't sleep. 3073 */ 3074 if (_RD(stp->sd_wrq)->q_first != NULL) { 3075 *done = 0; 3076 return (0); 3077 } 3078 } 3079 3080 if (fmode & (FNDELAY|FNONBLOCK)) { 3081 if (!(flag & NOINTR)) 3082 error = EAGAIN; 3083 else 3084 error = 0; 3085 *done = 1; 3086 return (error); 3087 } 3088 3089 stp->sd_flag |= slpflg; 3090 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3091 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3092 stp, flag, count, fmode, done); 3093 3094 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3095 if (rval > 0) { 3096 /* EMPTY */ 3097 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3098 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3099 stp, flag, count, fmode, done); 3100 } else if (rval == 0) { 3101 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3102 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3103 stp, flag, count, fmode, done); 3104 stp->sd_flag &= ~slpflg; 3105 cv_broadcast(sleepon); 3106 if (!(flag & NOINTR)) 3107 error = EINTR; 3108 else 3109 error = 0; 3110 *done = 1; 3111 return (error); 3112 } else { 3113 /* timeout */ 3114 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3115 "strwaitq timeout:%p, %X, %lX, %X, %p", 3116 stp, flag, count, fmode, done); 3117 *done = 1; 3118 if (!(flag & NOINTR)) 3119 return (ETIME); 3120 else 3121 return (0); 3122 } 3123 /* 3124 * If the caller implements delayed errors (i.e. queued after data) 3125 * we can not check for errors here since data as well as an 3126 * error might have arrived at the stream head. We return to 3127 * have the caller check the read queue before checking for errors. 3128 */ 3129 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3130 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3131 if (error != 0) { 3132 *done = 1; 3133 return (error); 3134 } 3135 } 3136 *done = 0; 3137 return (0); 3138 } 3139 3140 /* 3141 * Perform job control discipline access checks. 3142 * Return 0 for success and the errno for failure. 3143 */ 3144 3145 #define cantsend(p, t, sig) \ 3146 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3147 3148 int 3149 straccess(struct stdata *stp, enum jcaccess mode) 3150 { 3151 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3152 kthread_t *t = curthread; 3153 proc_t *p = ttoproc(t); 3154 sess_t *sp; 3155 3156 ASSERT(mutex_owned(&stp->sd_lock)); 3157 3158 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3159 return (0); 3160 3161 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3162 3163 for (;;) { 3164 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3165 sp = p->p_sessp; 3166 mutex_enter(&sp->s_lock); /* protects sp->* */ 3167 3168 /* 3169 * If this is not the calling process's controlling terminal 3170 * or if the calling process is already in the foreground 3171 * then allow access. 3172 */ 3173 if (sp->s_dev != stp->sd_vnode->v_rdev || 3174 p->p_pgidp == stp->sd_pgidp) { 3175 mutex_exit(&sp->s_lock); 3176 mutex_exit(&p->p_splock); 3177 mutex_exit(&p->p_lock); 3178 return (0); 3179 } 3180 3181 /* 3182 * Check to see if controlling terminal has been deallocated. 3183 */ 3184 if (sp->s_vp == NULL) { 3185 if (!cantsend(p, t, SIGHUP)) 3186 sigtoproc(p, t, SIGHUP); 3187 mutex_exit(&sp->s_lock); 3188 mutex_exit(&p->p_splock); 3189 mutex_exit(&p->p_lock); 3190 return (EIO); 3191 } 3192 3193 mutex_exit(&sp->s_lock); 3194 mutex_exit(&p->p_splock); 3195 3196 if (mode == JCGETP) { 3197 mutex_exit(&p->p_lock); 3198 return (0); 3199 } 3200 3201 if (mode == JCREAD) { 3202 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3203 mutex_exit(&p->p_lock); 3204 return (EIO); 3205 } 3206 mutex_exit(&p->p_lock); 3207 mutex_exit(&stp->sd_lock); 3208 pgsignal(p->p_pgidp, SIGTTIN); 3209 mutex_enter(&stp->sd_lock); 3210 mutex_enter(&p->p_lock); 3211 } else { /* mode == JCWRITE or JCSETP */ 3212 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3213 cantsend(p, t, SIGTTOU)) { 3214 mutex_exit(&p->p_lock); 3215 return (0); 3216 } 3217 if (p->p_detached) { 3218 mutex_exit(&p->p_lock); 3219 return (EIO); 3220 } 3221 mutex_exit(&p->p_lock); 3222 mutex_exit(&stp->sd_lock); 3223 pgsignal(p->p_pgidp, SIGTTOU); 3224 mutex_enter(&stp->sd_lock); 3225 mutex_enter(&p->p_lock); 3226 } 3227 3228 /* 3229 * We call cv_wait_sig_swap() to cause the appropriate 3230 * action for the jobcontrol signal to take place. 3231 * If the signal is being caught, we will take the 3232 * EINTR error return. Otherwise, the default action 3233 * of causing the process to stop will take place. 3234 * In this case, we rely on the periodic cv_broadcast() on 3235 * &lbolt_cv to wake us up to loop around and test again. 3236 * We can't get here if the signal is ignored or 3237 * if the current thread is blocking the signal. 3238 */ 3239 mutex_exit(&stp->sd_lock); 3240 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3241 mutex_exit(&p->p_lock); 3242 mutex_enter(&stp->sd_lock); 3243 return (EINTR); 3244 } 3245 mutex_exit(&p->p_lock); 3246 mutex_enter(&stp->sd_lock); 3247 mutex_enter(&p->p_lock); 3248 } 3249 } 3250 3251 /* 3252 * Return size of message of block type (bp->b_datap->db_type) 3253 */ 3254 size_t 3255 xmsgsize(mblk_t *bp) 3256 { 3257 unsigned char type; 3258 size_t count = 0; 3259 3260 type = bp->b_datap->db_type; 3261 3262 for (; bp; bp = bp->b_cont) { 3263 if (type != bp->b_datap->db_type) 3264 break; 3265 ASSERT(bp->b_wptr >= bp->b_rptr); 3266 count += bp->b_wptr - bp->b_rptr; 3267 } 3268 return (count); 3269 } 3270 3271 /* 3272 * Allocate a stream head. 3273 */ 3274 struct stdata * 3275 shalloc(queue_t *qp) 3276 { 3277 stdata_t *stp; 3278 3279 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3280 3281 stp->sd_wrq = _WR(qp); 3282 stp->sd_strtab = NULL; 3283 stp->sd_iocid = 0; 3284 stp->sd_mate = NULL; 3285 stp->sd_freezer = NULL; 3286 stp->sd_refcnt = 0; 3287 stp->sd_wakeq = 0; 3288 stp->sd_anchor = 0; 3289 stp->sd_struiowrq = NULL; 3290 stp->sd_struiordq = NULL; 3291 stp->sd_struiodnak = 0; 3292 stp->sd_struionak = NULL; 3293 stp->sd_t_audit_data = NULL; 3294 stp->sd_rput_opt = 0; 3295 stp->sd_wput_opt = 0; 3296 stp->sd_read_opt = 0; 3297 stp->sd_rprotofunc = strrput_proto; 3298 stp->sd_rmiscfunc = strrput_misc; 3299 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3300 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3301 stp->sd_ciputctrl = NULL; 3302 stp->sd_nciputctrl = 0; 3303 stp->sd_qhead = NULL; 3304 stp->sd_qtail = NULL; 3305 stp->sd_servid = NULL; 3306 stp->sd_nqueues = 0; 3307 stp->sd_svcflags = 0; 3308 stp->sd_copyflag = 0; 3309 3310 return (stp); 3311 } 3312 3313 /* 3314 * Free a stream head. 3315 */ 3316 void 3317 shfree(stdata_t *stp) 3318 { 3319 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3320 3321 stp->sd_wrq = NULL; 3322 3323 mutex_enter(&stp->sd_qlock); 3324 while (stp->sd_svcflags & STRS_SCHEDULED) { 3325 STRSTAT(strwaits); 3326 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3327 } 3328 mutex_exit(&stp->sd_qlock); 3329 3330 if (stp->sd_ciputctrl != NULL) { 3331 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3332 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3333 stp->sd_nciputctrl, 0); 3334 ASSERT(ciputctrl_cache != NULL); 3335 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3336 stp->sd_ciputctrl = NULL; 3337 stp->sd_nciputctrl = 0; 3338 } 3339 ASSERT(stp->sd_qhead == NULL); 3340 ASSERT(stp->sd_qtail == NULL); 3341 ASSERT(stp->sd_nqueues == 0); 3342 kmem_cache_free(stream_head_cache, stp); 3343 } 3344 3345 /* 3346 * Allocate a pair of queues and a syncq for the pair 3347 */ 3348 queue_t * 3349 allocq(void) 3350 { 3351 queinfo_t *qip; 3352 queue_t *qp, *wqp; 3353 syncq_t *sq; 3354 3355 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3356 3357 qp = &qip->qu_rqueue; 3358 wqp = &qip->qu_wqueue; 3359 sq = &qip->qu_syncq; 3360 3361 qp->q_last = NULL; 3362 qp->q_next = NULL; 3363 qp->q_ptr = NULL; 3364 qp->q_flag = QUSE | QREADR; 3365 qp->q_bandp = NULL; 3366 qp->q_stream = NULL; 3367 qp->q_syncq = sq; 3368 qp->q_nband = 0; 3369 qp->q_nfsrv = NULL; 3370 qp->q_draining = 0; 3371 qp->q_syncqmsgs = 0; 3372 qp->q_spri = 0; 3373 qp->q_qtstamp = 0; 3374 qp->q_sqtstamp = 0; 3375 qp->q_fp = NULL; 3376 3377 wqp->q_last = NULL; 3378 wqp->q_next = NULL; 3379 wqp->q_ptr = NULL; 3380 wqp->q_flag = QUSE; 3381 wqp->q_bandp = NULL; 3382 wqp->q_stream = NULL; 3383 wqp->q_syncq = sq; 3384 wqp->q_nband = 0; 3385 wqp->q_nfsrv = NULL; 3386 wqp->q_draining = 0; 3387 wqp->q_syncqmsgs = 0; 3388 wqp->q_qtstamp = 0; 3389 wqp->q_sqtstamp = 0; 3390 wqp->q_spri = 0; 3391 3392 sq->sq_count = 0; 3393 sq->sq_rmqcount = 0; 3394 sq->sq_flags = 0; 3395 sq->sq_type = 0; 3396 sq->sq_callbflags = 0; 3397 sq->sq_cancelid = 0; 3398 sq->sq_ciputctrl = NULL; 3399 sq->sq_nciputctrl = 0; 3400 sq->sq_needexcl = 0; 3401 sq->sq_svcflags = 0; 3402 3403 return (qp); 3404 } 3405 3406 /* 3407 * Free a pair of queues and the "attached" syncq. 3408 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3409 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3410 */ 3411 void 3412 freeq(queue_t *qp) 3413 { 3414 qband_t *qbp, *nqbp; 3415 syncq_t *sq, *outer; 3416 queue_t *wqp = _WR(qp); 3417 3418 ASSERT(qp->q_flag & QREADR); 3419 3420 /* 3421 * If a previously dispatched taskq job is scheduled to run 3422 * sync_service() or a service routine is scheduled for the 3423 * queues about to be freed, wait here until all service is 3424 * done on the queue and all associated queues and syncqs. 3425 */ 3426 wait_svc(qp); 3427 3428 (void) flush_syncq(qp->q_syncq, qp); 3429 (void) flush_syncq(wqp->q_syncq, wqp); 3430 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3431 3432 /* 3433 * Flush the queues before q_next is set to NULL This is needed 3434 * in order to backenable any downstream queue before we go away. 3435 * Note: we are already removed from the stream so that the 3436 * backenabling will not cause any messages to be delivered to our 3437 * put procedures. 3438 */ 3439 flushq(qp, FLUSHALL); 3440 flushq(wqp, FLUSHALL); 3441 3442 /* Tidy up - removeq only does a half-remove from stream */ 3443 qp->q_next = wqp->q_next = NULL; 3444 ASSERT(!(qp->q_flag & QENAB)); 3445 ASSERT(!(wqp->q_flag & QENAB)); 3446 3447 outer = qp->q_syncq->sq_outer; 3448 if (outer != NULL) { 3449 outer_remove(outer, qp->q_syncq); 3450 if (wqp->q_syncq != qp->q_syncq) 3451 outer_remove(outer, wqp->q_syncq); 3452 } 3453 /* 3454 * Free any syncqs that are outside what allocq returned. 3455 */ 3456 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3457 free_syncq(qp->q_syncq); 3458 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3459 free_syncq(wqp->q_syncq); 3460 3461 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3462 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3463 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3464 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3465 sq = SQ(qp); 3466 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3467 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3468 ASSERT(sq->sq_outer == NULL); 3469 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3470 ASSERT(sq->sq_callbpend == NULL); 3471 ASSERT(sq->sq_needexcl == 0); 3472 3473 if (sq->sq_ciputctrl != NULL) { 3474 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3475 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3476 sq->sq_nciputctrl, 0); 3477 ASSERT(ciputctrl_cache != NULL); 3478 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3479 sq->sq_ciputctrl = NULL; 3480 sq->sq_nciputctrl = 0; 3481 } 3482 3483 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3484 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3485 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3486 3487 qp->q_flag &= ~QUSE; 3488 wqp->q_flag &= ~QUSE; 3489 3490 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3491 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3492 3493 qbp = qp->q_bandp; 3494 while (qbp) { 3495 nqbp = qbp->qb_next; 3496 freeband(qbp); 3497 qbp = nqbp; 3498 } 3499 qbp = wqp->q_bandp; 3500 while (qbp) { 3501 nqbp = qbp->qb_next; 3502 freeband(qbp); 3503 qbp = nqbp; 3504 } 3505 kmem_cache_free(queue_cache, qp); 3506 } 3507 3508 /* 3509 * Allocate a qband structure. 3510 */ 3511 qband_t * 3512 allocband(void) 3513 { 3514 qband_t *qbp; 3515 3516 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3517 if (qbp == NULL) 3518 return (NULL); 3519 3520 qbp->qb_next = NULL; 3521 qbp->qb_count = 0; 3522 qbp->qb_mblkcnt = 0; 3523 qbp->qb_first = NULL; 3524 qbp->qb_last = NULL; 3525 qbp->qb_flag = 0; 3526 3527 return (qbp); 3528 } 3529 3530 /* 3531 * Free a qband structure. 3532 */ 3533 void 3534 freeband(qband_t *qbp) 3535 { 3536 kmem_cache_free(qband_cache, qbp); 3537 } 3538 3539 /* 3540 * Just like putnextctl(9F), except that allocb_wait() is used. 3541 * 3542 * Consolidation Private, and of course only callable from the stream head or 3543 * routines that may block. 3544 */ 3545 int 3546 putnextctl_wait(queue_t *q, int type) 3547 { 3548 mblk_t *bp; 3549 int error; 3550 3551 if ((datamsg(type) && (type != M_DELAY)) || 3552 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3553 return (0); 3554 3555 bp->b_datap->db_type = (unsigned char)type; 3556 putnext(q, bp); 3557 return (1); 3558 } 3559 3560 /* 3561 * Run any possible bufcalls. 3562 */ 3563 void 3564 runbufcalls(void) 3565 { 3566 strbufcall_t *bcp; 3567 3568 mutex_enter(&bcall_monitor); 3569 mutex_enter(&strbcall_lock); 3570 3571 if (strbcalls.bc_head) { 3572 size_t count; 3573 int nevent; 3574 3575 /* 3576 * count how many events are on the list 3577 * now so we can check to avoid looping 3578 * in low memory situations 3579 */ 3580 nevent = 0; 3581 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3582 nevent++; 3583 3584 /* 3585 * get estimate of available memory from kmem_avail(). 3586 * awake all bufcall functions waiting for 3587 * memory whose request could be satisfied 3588 * by 'count' memory and let 'em fight for it. 3589 */ 3590 count = kmem_avail(); 3591 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3592 STRSTAT(bufcalls); 3593 --nevent; 3594 if (bcp->bc_size <= count) { 3595 bcp->bc_executor = curthread; 3596 mutex_exit(&strbcall_lock); 3597 (*bcp->bc_func)(bcp->bc_arg); 3598 mutex_enter(&strbcall_lock); 3599 bcp->bc_executor = NULL; 3600 cv_broadcast(&bcall_cv); 3601 strbcalls.bc_head = bcp->bc_next; 3602 kmem_free(bcp, sizeof (strbufcall_t)); 3603 } else { 3604 /* 3605 * too big, try again later - note 3606 * that nevent was decremented above 3607 * so we won't retry this one on this 3608 * iteration of the loop 3609 */ 3610 if (bcp->bc_next != NULL) { 3611 strbcalls.bc_head = bcp->bc_next; 3612 bcp->bc_next = NULL; 3613 strbcalls.bc_tail->bc_next = bcp; 3614 strbcalls.bc_tail = bcp; 3615 } 3616 } 3617 } 3618 if (strbcalls.bc_head == NULL) 3619 strbcalls.bc_tail = NULL; 3620 } 3621 3622 mutex_exit(&strbcall_lock); 3623 mutex_exit(&bcall_monitor); 3624 } 3625 3626 3627 /* 3628 * Actually run queue's service routine. 3629 */ 3630 static void 3631 runservice(queue_t *q) 3632 { 3633 qband_t *qbp; 3634 3635 ASSERT(q->q_qinfo->qi_srvp); 3636 again: 3637 entersq(q->q_syncq, SQ_SVC); 3638 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3639 "runservice starts:%p", q); 3640 3641 if (!(q->q_flag & QWCLOSE)) 3642 (*q->q_qinfo->qi_srvp)(q); 3643 3644 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3645 "runservice ends:(%p)", q); 3646 3647 leavesq(q->q_syncq, SQ_SVC); 3648 3649 mutex_enter(QLOCK(q)); 3650 if (q->q_flag & QENAB) { 3651 q->q_flag &= ~QENAB; 3652 mutex_exit(QLOCK(q)); 3653 goto again; 3654 } 3655 q->q_flag &= ~QINSERVICE; 3656 q->q_flag &= ~QBACK; 3657 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3658 qbp->qb_flag &= ~QB_BACK; 3659 /* 3660 * Wakeup thread waiting for the service procedure 3661 * to be run (strclose and qdetach). 3662 */ 3663 cv_broadcast(&q->q_wait); 3664 3665 mutex_exit(QLOCK(q)); 3666 } 3667 3668 /* 3669 * Background processing of bufcalls. 3670 */ 3671 void 3672 streams_bufcall_service(void) 3673 { 3674 callb_cpr_t cprinfo; 3675 3676 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3677 "streams_bufcall_service"); 3678 3679 mutex_enter(&strbcall_lock); 3680 3681 for (;;) { 3682 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3683 mutex_exit(&strbcall_lock); 3684 runbufcalls(); 3685 mutex_enter(&strbcall_lock); 3686 } 3687 if (strbcalls.bc_head != NULL) { 3688 STRSTAT(bcwaits); 3689 /* Wait for memory to become available */ 3690 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3691 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock, 3692 SEC_TO_TICK(60), TR_CLOCK_TICK); 3693 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3694 } 3695 3696 /* Wait for new work to arrive */ 3697 if (strbcalls.bc_head == NULL) { 3698 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3699 cv_wait(&strbcall_cv, &strbcall_lock); 3700 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3701 } 3702 } 3703 } 3704 3705 /* 3706 * Background processing of streams background tasks which failed 3707 * taskq_dispatch. 3708 */ 3709 static void 3710 streams_qbkgrnd_service(void) 3711 { 3712 callb_cpr_t cprinfo; 3713 queue_t *q; 3714 3715 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3716 "streams_bkgrnd_service"); 3717 3718 mutex_enter(&service_queue); 3719 3720 for (;;) { 3721 /* 3722 * Wait for work to arrive. 3723 */ 3724 while ((freebs_list == NULL) && (qhead == NULL)) { 3725 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3726 cv_wait(&services_to_run, &service_queue); 3727 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3728 } 3729 /* 3730 * Handle all pending freebs requests to free memory. 3731 */ 3732 while (freebs_list != NULL) { 3733 mblk_t *mp = freebs_list; 3734 freebs_list = mp->b_next; 3735 mutex_exit(&service_queue); 3736 mblk_free(mp); 3737 mutex_enter(&service_queue); 3738 } 3739 /* 3740 * Run pending queues. 3741 */ 3742 while (qhead != NULL) { 3743 DQ(q, qhead, qtail, q_link); 3744 ASSERT(q != NULL); 3745 mutex_exit(&service_queue); 3746 queue_service(q); 3747 mutex_enter(&service_queue); 3748 } 3749 ASSERT(qhead == NULL && qtail == NULL); 3750 } 3751 } 3752 3753 /* 3754 * Background processing of streams background tasks which failed 3755 * taskq_dispatch. 3756 */ 3757 static void 3758 streams_sqbkgrnd_service(void) 3759 { 3760 callb_cpr_t cprinfo; 3761 syncq_t *sq; 3762 3763 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3764 "streams_sqbkgrnd_service"); 3765 3766 mutex_enter(&service_queue); 3767 3768 for (;;) { 3769 /* 3770 * Wait for work to arrive. 3771 */ 3772 while (sqhead == NULL) { 3773 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3774 cv_wait(&syncqs_to_run, &service_queue); 3775 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3776 } 3777 3778 /* 3779 * Run pending syncqs. 3780 */ 3781 while (sqhead != NULL) { 3782 DQ(sq, sqhead, sqtail, sq_next); 3783 ASSERT(sq != NULL); 3784 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3785 mutex_exit(&service_queue); 3786 syncq_service(sq); 3787 mutex_enter(&service_queue); 3788 } 3789 } 3790 } 3791 3792 /* 3793 * Disable the syncq and wait for background syncq processing to complete. 3794 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3795 * list. 3796 */ 3797 void 3798 wait_sq_svc(syncq_t *sq) 3799 { 3800 mutex_enter(SQLOCK(sq)); 3801 sq->sq_svcflags |= SQ_DISABLED; 3802 if (sq->sq_svcflags & SQ_BGTHREAD) { 3803 syncq_t *sq_chase; 3804 syncq_t *sq_curr; 3805 int removed; 3806 3807 ASSERT(sq->sq_servcount == 1); 3808 mutex_enter(&service_queue); 3809 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3810 mutex_exit(&service_queue); 3811 if (removed) { 3812 sq->sq_svcflags &= ~SQ_BGTHREAD; 3813 sq->sq_servcount = 0; 3814 STRSTAT(sqremoved); 3815 goto done; 3816 } 3817 } 3818 while (sq->sq_servcount != 0) { 3819 sq->sq_flags |= SQ_WANTWAKEUP; 3820 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3821 } 3822 done: 3823 mutex_exit(SQLOCK(sq)); 3824 } 3825 3826 /* 3827 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3828 * Add the argument to the end of the sqhead list and set the flag 3829 * indicating this syncq has been enabled. If it has already been 3830 * enabled, don't do anything. 3831 * This routine assumes that SQLOCK is held. 3832 * NOTE that the lock order is to have the SQLOCK first, 3833 * so if the service_syncq lock is held, we need to release it 3834 * before acquiring the SQLOCK (mostly relevant for the background 3835 * thread, and this seems to be common among the STREAMS global locks). 3836 * Note that the sq_svcflags are protected by the SQLOCK. 3837 */ 3838 void 3839 sqenable(syncq_t *sq) 3840 { 3841 /* 3842 * This is probably not important except for where I believe it 3843 * is being called. At that point, it should be held (and it 3844 * is a pain to release it just for this routine, so don't do 3845 * it). 3846 */ 3847 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3848 3849 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3850 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3851 3852 /* 3853 * Do not put on list if background thread is scheduled or 3854 * syncq is disabled. 3855 */ 3856 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3857 return; 3858 3859 /* 3860 * Check whether we should enable sq at all. 3861 * Non PERMOD syncqs may be drained by at most one thread. 3862 * PERMOD syncqs may be drained by several threads but we limit the 3863 * total amount to the lesser of 3864 * Number of queues on the squeue and 3865 * Number of CPUs. 3866 */ 3867 if (sq->sq_servcount != 0) { 3868 if (((sq->sq_type & SQ_PERMOD) == 0) || 3869 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3870 STRSTAT(sqtoomany); 3871 return; 3872 } 3873 } 3874 3875 sq->sq_tstamp = ddi_get_lbolt(); 3876 STRSTAT(sqenables); 3877 3878 /* Attempt a taskq dispatch */ 3879 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3880 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3881 if (sq->sq_servid != NULL) { 3882 sq->sq_servcount++; 3883 return; 3884 } 3885 3886 /* 3887 * This taskq dispatch failed, but a previous one may have succeeded. 3888 * Don't try to schedule on the background thread whilst there is 3889 * outstanding taskq processing. 3890 */ 3891 if (sq->sq_servcount != 0) 3892 return; 3893 3894 /* 3895 * System is low on resources and can't perform a non-sleeping 3896 * dispatch. Schedule the syncq for a background thread and mark the 3897 * syncq to avoid any further taskq dispatch attempts. 3898 */ 3899 mutex_enter(&service_queue); 3900 STRSTAT(taskqfails); 3901 ENQUEUE(sq, sqhead, sqtail, sq_next); 3902 sq->sq_svcflags |= SQ_BGTHREAD; 3903 sq->sq_servcount = 1; 3904 cv_signal(&syncqs_to_run); 3905 mutex_exit(&service_queue); 3906 } 3907 3908 /* 3909 * Note: fifo_close() depends on the mblk_t on the queue being freed 3910 * asynchronously. The asynchronous freeing of messages breaks the 3911 * recursive call chain of fifo_close() while there are I_SENDFD type of 3912 * messages referring to other file pointers on the queue. Then when 3913 * closing pipes it can avoid stack overflow in case of daisy-chained 3914 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3915 * share the same fifolock_t). 3916 * 3917 * No need to kpreempt_disable to access cpu_seqid. If we migrate and 3918 * the esb queue does not match the new CPU, that is OK. 3919 */ 3920 void 3921 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3922 { 3923 int qindex = CPU->cpu_seqid >> esbq_log2_cpus_per_q; 3924 esb_queue_t *eqp; 3925 3926 ASSERT(dbp->db_mblk == mp); 3927 ASSERT(qindex < esbq_nelem); 3928 3929 eqp = system_esbq_array; 3930 if (eqp != NULL) { 3931 eqp += qindex; 3932 } else { 3933 mutex_enter(&esbq_lock); 3934 if (kmem_ready && system_esbq_array == NULL) 3935 system_esbq_array = (esb_queue_t *)kmem_zalloc( 3936 esbq_nelem * sizeof (esb_queue_t), KM_NOSLEEP); 3937 mutex_exit(&esbq_lock); 3938 eqp = system_esbq_array; 3939 if (eqp != NULL) 3940 eqp += qindex; 3941 else 3942 eqp = &system_esbq; 3943 } 3944 3945 /* 3946 * Check data sanity. The dblock should have non-empty free function. 3947 * It is better to panic here then later when the dblock is freed 3948 * asynchronously when the context is lost. 3949 */ 3950 if (dbp->db_frtnp->free_func == NULL) { 3951 panic("freebs_enqueue: dblock %p has a NULL free callback", 3952 (void *)dbp); 3953 } 3954 3955 mutex_enter(&eqp->eq_lock); 3956 /* queue the new mblk on the esballoc queue */ 3957 if (eqp->eq_head == NULL) { 3958 eqp->eq_head = eqp->eq_tail = mp; 3959 } else { 3960 eqp->eq_tail->b_next = mp; 3961 eqp->eq_tail = mp; 3962 } 3963 eqp->eq_len++; 3964 3965 /* If we're the first thread to reach the threshold, process */ 3966 if (eqp->eq_len >= esbq_max_qlen && 3967 !(eqp->eq_flags & ESBQ_PROCESSING)) 3968 esballoc_process_queue(eqp); 3969 3970 esballoc_set_timer(eqp, esbq_timeout); 3971 mutex_exit(&eqp->eq_lock); 3972 } 3973 3974 static void 3975 esballoc_process_queue(esb_queue_t *eqp) 3976 { 3977 mblk_t *mp; 3978 3979 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 3980 3981 eqp->eq_flags |= ESBQ_PROCESSING; 3982 3983 do { 3984 /* 3985 * Detach the message chain for processing. 3986 */ 3987 mp = eqp->eq_head; 3988 eqp->eq_tail->b_next = NULL; 3989 eqp->eq_head = eqp->eq_tail = NULL; 3990 eqp->eq_len = 0; 3991 mutex_exit(&eqp->eq_lock); 3992 3993 /* 3994 * Process the message chain. 3995 */ 3996 esballoc_enqueue_mblk(mp); 3997 mutex_enter(&eqp->eq_lock); 3998 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0)); 3999 4000 eqp->eq_flags &= ~ESBQ_PROCESSING; 4001 } 4002 4003 /* 4004 * taskq callback routine to free esballoced mblk's 4005 */ 4006 static void 4007 esballoc_mblk_free(mblk_t *mp) 4008 { 4009 mblk_t *nextmp; 4010 4011 for (; mp != NULL; mp = nextmp) { 4012 nextmp = mp->b_next; 4013 mp->b_next = NULL; 4014 mblk_free(mp); 4015 } 4016 } 4017 4018 static void 4019 esballoc_enqueue_mblk(mblk_t *mp) 4020 { 4021 4022 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp, 4023 TQ_NOSLEEP) == NULL) { 4024 mblk_t *first_mp = mp; 4025 /* 4026 * System is low on resources and can't perform a non-sleeping 4027 * dispatch. Schedule for a background thread. 4028 */ 4029 mutex_enter(&service_queue); 4030 STRSTAT(taskqfails); 4031 4032 while (mp->b_next != NULL) 4033 mp = mp->b_next; 4034 4035 mp->b_next = freebs_list; 4036 freebs_list = first_mp; 4037 cv_signal(&services_to_run); 4038 mutex_exit(&service_queue); 4039 } 4040 } 4041 4042 static void 4043 esballoc_timer(void *arg) 4044 { 4045 esb_queue_t *eqp = arg; 4046 4047 mutex_enter(&eqp->eq_lock); 4048 eqp->eq_flags &= ~ESBQ_TIMER; 4049 4050 if (!(eqp->eq_flags & ESBQ_PROCESSING) && 4051 eqp->eq_len > 0) 4052 esballoc_process_queue(eqp); 4053 4054 esballoc_set_timer(eqp, esbq_timeout); 4055 mutex_exit(&eqp->eq_lock); 4056 } 4057 4058 static void 4059 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout) 4060 { 4061 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4062 4063 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) { 4064 (void) timeout(esballoc_timer, eqp, eq_timeout); 4065 eqp->eq_flags |= ESBQ_TIMER; 4066 } 4067 } 4068 4069 /* 4070 * Setup esbq array length based upon NCPU scaled by CPUs per 4071 * queue. Use static system_esbq until kmem_ready and we can 4072 * create an array in freebs_enqueue(). 4073 */ 4074 void 4075 esballoc_queue_init(void) 4076 { 4077 esbq_log2_cpus_per_q = highbit(esbq_cpus_per_q - 1); 4078 esbq_cpus_per_q = 1 << esbq_log2_cpus_per_q; 4079 esbq_nelem = howmany(NCPU, esbq_cpus_per_q); 4080 system_esbq.eq_len = 0; 4081 system_esbq.eq_head = system_esbq.eq_tail = NULL; 4082 system_esbq.eq_flags = 0; 4083 } 4084 4085 /* 4086 * Set the QBACK or QB_BACK flag in the given queue for 4087 * the given priority band. 4088 */ 4089 void 4090 setqback(queue_t *q, unsigned char pri) 4091 { 4092 int i; 4093 qband_t *qbp; 4094 qband_t **qbpp; 4095 4096 ASSERT(MUTEX_HELD(QLOCK(q))); 4097 if (pri != 0) { 4098 if (pri > q->q_nband) { 4099 qbpp = &q->q_bandp; 4100 while (*qbpp) 4101 qbpp = &(*qbpp)->qb_next; 4102 while (pri > q->q_nband) { 4103 if ((*qbpp = allocband()) == NULL) { 4104 cmn_err(CE_WARN, 4105 "setqback: can't allocate qband\n"); 4106 return; 4107 } 4108 (*qbpp)->qb_hiwat = q->q_hiwat; 4109 (*qbpp)->qb_lowat = q->q_lowat; 4110 q->q_nband++; 4111 qbpp = &(*qbpp)->qb_next; 4112 } 4113 } 4114 qbp = q->q_bandp; 4115 i = pri; 4116 while (--i) 4117 qbp = qbp->qb_next; 4118 qbp->qb_flag |= QB_BACK; 4119 } else { 4120 q->q_flag |= QBACK; 4121 } 4122 } 4123 4124 int 4125 strcopyin(void *from, void *to, size_t len, int copyflag) 4126 { 4127 if (copyflag & U_TO_K) { 4128 ASSERT((copyflag & K_TO_K) == 0); 4129 if (copyin(from, to, len)) 4130 return (EFAULT); 4131 } else { 4132 ASSERT(copyflag & K_TO_K); 4133 bcopy(from, to, len); 4134 } 4135 return (0); 4136 } 4137 4138 int 4139 strcopyout(void *from, void *to, size_t len, int copyflag) 4140 { 4141 if (copyflag & U_TO_K) { 4142 if (copyout(from, to, len)) 4143 return (EFAULT); 4144 } else { 4145 ASSERT(copyflag & K_TO_K); 4146 bcopy(from, to, len); 4147 } 4148 return (0); 4149 } 4150 4151 /* 4152 * strsignal_nolock() posts a signal to the process(es) at the stream head. 4153 * It assumes that the stream head lock is already held, whereas strsignal() 4154 * acquires the lock first. This routine was created because a few callers 4155 * release the stream head lock before calling only to re-acquire it after 4156 * it returns. 4157 */ 4158 void 4159 strsignal_nolock(stdata_t *stp, int sig, uchar_t band) 4160 { 4161 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4162 switch (sig) { 4163 case SIGPOLL: 4164 if (stp->sd_sigflags & S_MSG) 4165 strsendsig(stp->sd_siglist, S_MSG, band, 0); 4166 break; 4167 default: 4168 if (stp->sd_pgidp) 4169 pgsignal(stp->sd_pgidp, sig); 4170 break; 4171 } 4172 } 4173 4174 void 4175 strsignal(stdata_t *stp, int sig, int32_t band) 4176 { 4177 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4178 "strsignal:%p, %X, %X", stp, sig, band); 4179 4180 mutex_enter(&stp->sd_lock); 4181 switch (sig) { 4182 case SIGPOLL: 4183 if (stp->sd_sigflags & S_MSG) 4184 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4185 break; 4186 4187 default: 4188 if (stp->sd_pgidp) { 4189 pgsignal(stp->sd_pgidp, sig); 4190 } 4191 break; 4192 } 4193 mutex_exit(&stp->sd_lock); 4194 } 4195 4196 void 4197 strhup(stdata_t *stp) 4198 { 4199 ASSERT(mutex_owned(&stp->sd_lock)); 4200 pollwakeup(&stp->sd_pollist, POLLHUP); 4201 if (stp->sd_sigflags & S_HANGUP) 4202 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4203 } 4204 4205 /* 4206 * Backenable the first queue upstream from `q' with a service procedure. 4207 */ 4208 void 4209 backenable(queue_t *q, uchar_t pri) 4210 { 4211 queue_t *nq; 4212 4213 /* 4214 * Our presence might not prevent other modules in our own 4215 * stream from popping/pushing since the caller of getq might not 4216 * have a claim on the queue (some drivers do a getq on somebody 4217 * else's queue - they know that the queue itself is not going away 4218 * but the framework has to guarantee q_next in that stream). 4219 */ 4220 claimstr(q); 4221 4222 /* Find nearest back queue with service proc */ 4223 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4224 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4225 } 4226 4227 if (nq) { 4228 kthread_t *freezer; 4229 /* 4230 * backenable can be called either with no locks held 4231 * or with the stream frozen (the latter occurs when a module 4232 * calls rmvq with the stream frozen). If the stream is frozen 4233 * by the caller the caller will hold all qlocks in the stream. 4234 * Note that a frozen stream doesn't freeze a mated stream, 4235 * so we explicitly check for that. 4236 */ 4237 freezer = STREAM(q)->sd_freezer; 4238 if (freezer != curthread || STREAM(q) != STREAM(nq)) { 4239 mutex_enter(QLOCK(nq)); 4240 } 4241 #ifdef DEBUG 4242 else { 4243 ASSERT(frozenstr(q)); 4244 ASSERT(MUTEX_HELD(QLOCK(q))); 4245 ASSERT(MUTEX_HELD(QLOCK(nq))); 4246 } 4247 #endif 4248 setqback(nq, pri); 4249 qenable_locked(nq); 4250 if (freezer != curthread || STREAM(q) != STREAM(nq)) 4251 mutex_exit(QLOCK(nq)); 4252 } 4253 releasestr(q); 4254 } 4255 4256 /* 4257 * Return the appropriate errno when one of flags_to_check is set 4258 * in sd_flags. Uses the exported error routines if they are set. 4259 * Will return 0 if non error is set (or if the exported error routines 4260 * do not return an error). 4261 * 4262 * If there is both a read and write error to check, we prefer the read error. 4263 * Also, give preference to recorded errno's over the error functions. 4264 * The flags that are handled are: 4265 * STPLEX return EINVAL 4266 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4267 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4268 * STRHUP return sd_werror 4269 * 4270 * If the caller indicates that the operation is a peek, a nonpersistent error 4271 * is not cleared. 4272 */ 4273 int 4274 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4275 { 4276 int32_t sd_flag = stp->sd_flag & flags_to_check; 4277 int error = 0; 4278 4279 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4280 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4281 if (sd_flag & STPLEX) 4282 error = EINVAL; 4283 else if (sd_flag & STRDERR) { 4284 error = stp->sd_rerror; 4285 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4286 /* 4287 * Read errors are non-persistent i.e. discarded once 4288 * returned to a non-peeking caller, 4289 */ 4290 stp->sd_rerror = 0; 4291 stp->sd_flag &= ~STRDERR; 4292 } 4293 if (error == 0 && stp->sd_rderrfunc != NULL) { 4294 int clearerr = 0; 4295 4296 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4297 &clearerr); 4298 if (clearerr) { 4299 stp->sd_flag &= ~STRDERR; 4300 stp->sd_rderrfunc = NULL; 4301 } 4302 } 4303 } else if (sd_flag & STWRERR) { 4304 error = stp->sd_werror; 4305 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4306 /* 4307 * Write errors are non-persistent i.e. discarded once 4308 * returned to a non-peeking caller, 4309 */ 4310 stp->sd_werror = 0; 4311 stp->sd_flag &= ~STWRERR; 4312 } 4313 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4314 int clearerr = 0; 4315 4316 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4317 &clearerr); 4318 if (clearerr) { 4319 stp->sd_flag &= ~STWRERR; 4320 stp->sd_wrerrfunc = NULL; 4321 } 4322 } 4323 } else if (sd_flag & STRHUP) { 4324 /* sd_werror set when STRHUP */ 4325 error = stp->sd_werror; 4326 } 4327 return (error); 4328 } 4329 4330 4331 /* 4332 * Single-thread open/close/push/pop 4333 * for twisted streams also 4334 */ 4335 int 4336 strstartplumb(stdata_t *stp, int flag, int cmd) 4337 { 4338 int waited = 1; 4339 int error = 0; 4340 4341 if (STRMATED(stp)) { 4342 struct stdata *stmatep = stp->sd_mate; 4343 4344 STRLOCKMATES(stp); 4345 while (waited) { 4346 waited = 0; 4347 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4348 if ((cmd == I_POP) && 4349 (flag & (FNDELAY|FNONBLOCK))) { 4350 STRUNLOCKMATES(stp); 4351 return (EAGAIN); 4352 } 4353 waited = 1; 4354 mutex_exit(&stp->sd_lock); 4355 if (!cv_wait_sig(&stmatep->sd_monitor, 4356 &stmatep->sd_lock)) { 4357 mutex_exit(&stmatep->sd_lock); 4358 return (EINTR); 4359 } 4360 mutex_exit(&stmatep->sd_lock); 4361 STRLOCKMATES(stp); 4362 } 4363 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4364 if ((cmd == I_POP) && 4365 (flag & (FNDELAY|FNONBLOCK))) { 4366 STRUNLOCKMATES(stp); 4367 return (EAGAIN); 4368 } 4369 waited = 1; 4370 mutex_exit(&stmatep->sd_lock); 4371 if (!cv_wait_sig(&stp->sd_monitor, 4372 &stp->sd_lock)) { 4373 mutex_exit(&stp->sd_lock); 4374 return (EINTR); 4375 } 4376 mutex_exit(&stp->sd_lock); 4377 STRLOCKMATES(stp); 4378 } 4379 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4380 error = strgeterr(stp, 4381 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4382 if (error != 0) { 4383 STRUNLOCKMATES(stp); 4384 return (error); 4385 } 4386 } 4387 } 4388 stp->sd_flag |= STRPLUMB; 4389 STRUNLOCKMATES(stp); 4390 } else { 4391 mutex_enter(&stp->sd_lock); 4392 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4393 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4394 (flag & (FNDELAY|FNONBLOCK))) { 4395 mutex_exit(&stp->sd_lock); 4396 return (EAGAIN); 4397 } 4398 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4399 mutex_exit(&stp->sd_lock); 4400 return (EINTR); 4401 } 4402 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4403 error = strgeterr(stp, 4404 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4405 if (error != 0) { 4406 mutex_exit(&stp->sd_lock); 4407 return (error); 4408 } 4409 } 4410 } 4411 stp->sd_flag |= STRPLUMB; 4412 mutex_exit(&stp->sd_lock); 4413 } 4414 return (0); 4415 } 4416 4417 /* 4418 * Complete the plumbing operation associated with stream `stp'. 4419 */ 4420 void 4421 strendplumb(stdata_t *stp) 4422 { 4423 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4424 ASSERT(stp->sd_flag & STRPLUMB); 4425 stp->sd_flag &= ~STRPLUMB; 4426 cv_broadcast(&stp->sd_monitor); 4427 } 4428 4429 /* 4430 * This describes how the STREAMS framework handles synchronization 4431 * during open/push and close/pop. 4432 * The key interfaces for open and close are qprocson and qprocsoff, 4433 * respectively. While the close case in general is harder both open 4434 * have close have significant similarities. 4435 * 4436 * During close the STREAMS framework has to both ensure that there 4437 * are no stale references to the queue pair (and syncq) that 4438 * are being closed and also provide the guarantees that are documented 4439 * in qprocsoff(9F). 4440 * If there are stale references to the queue that is closing it can 4441 * result in kernel memory corruption or kernel panics. 4442 * 4443 * Note that is it up to the module/driver to ensure that it itself 4444 * does not have any stale references to the closing queues once its close 4445 * routine returns. This includes: 4446 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4447 * associated with the queues. For timeout and bufcall callbacks the 4448 * module/driver also has to ensure (or wait for) any callbacks that 4449 * are in progress. 4450 * - If the module/driver is using esballoc it has to ensure that any 4451 * esballoc free functions do not refer to a queue that has closed. 4452 * (Note that in general the close routine can not wait for the esballoc'ed 4453 * messages to be freed since that can cause a deadlock.) 4454 * - Cancelling any interrupts that refer to the closing queues and 4455 * also ensuring that there are no interrupts in progress that will 4456 * refer to the closing queues once the close routine returns. 4457 * - For multiplexors removing any driver global state that refers to 4458 * the closing queue and also ensuring that there are no threads in 4459 * the multiplexor that has picked up a queue pointer but not yet 4460 * finished using it. 4461 * 4462 * In addition, a driver/module can only reference the q_next pointer 4463 * in its open, close, put, or service procedures or in a 4464 * qtimeout/qbufcall callback procedure executing "on" the correct 4465 * stream. Thus it can not reference the q_next pointer in an interrupt 4466 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4467 * it can not reference q_next of a different queue e.g. in a mux that 4468 * passes messages from one queues put/service procedure to another queue. 4469 * In all the cases when the driver/module can not access the q_next 4470 * field it must use the *next* versions e.g. canputnext instead of 4471 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4472 * 4473 * 4474 * Assuming that the driver/module conforms to the above constraints 4475 * the STREAMS framework has to avoid stale references to q_next for all 4476 * the framework internal cases which include (but are not limited to): 4477 * - Threads in canput/canputnext/backenable and elsewhere that are 4478 * walking q_next. 4479 * - Messages on a syncq that have a reference to the queue through b_queue. 4480 * - Messages on an outer perimeter (syncq) that have a reference to the 4481 * queue through b_queue. 4482 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4483 * Note that only canput and bcanput use q_nfsrv without any locking. 4484 * 4485 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4486 * after qprocsoff returns, the framework has to ensure that no threads can 4487 * enter the put or service routines for the closing read or write-side queue. 4488 * In addition to preventing "direct" entry into the put procedures 4489 * the framework also has to prevent messages being drained from 4490 * the syncq or the outer perimeter. 4491 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4492 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4493 * qprocsoff has returned. 4494 * Note that if a module/driver uses put(9F) on one of its own queues 4495 * it is up to the module/driver to ensure that the put() doesn't 4496 * get called when the queue is closing. 4497 * 4498 * 4499 * The framework aspects of the above "contract" is implemented by 4500 * qprocsoff, removeq, and strlock: 4501 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4502 * entering the service procedures. 4503 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4504 * canputnext, backenable etc from dereferencing the q_next that will 4505 * soon change. 4506 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4507 * or other q_next walker that uses claimstr/releasestr to finish. 4508 * - optionally for every syncq in the stream strlock acquires all the 4509 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4510 * that no thread executes in the put or service procedures and that no 4511 * thread is draining into the module/driver. This ensures that no 4512 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4513 * currently executing hence no such thread can end up with the old stale 4514 * q_next value and no canput/backenable can have the old stale 4515 * q_nfsrv/q_next. 4516 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4517 * have either finished or observed the QWCLOSE flag and gone away. 4518 */ 4519 4520 4521 /* 4522 * Get all the locks necessary to change q_next. 4523 * 4524 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4525 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4526 * the only threads inside the syncq are threads currently calling removeq(). 4527 * Since threads calling removeq() are in the process of removing their queues 4528 * from the stream, we do not need to worry about them accessing a stale q_next 4529 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4530 * for them can cause deadlock). 4531 * 4532 * This routine is subject to starvation since it does not set any flag to 4533 * prevent threads from entering a module in the stream (i.e. sq_count can 4534 * increase on some syncq while it is waiting on some other syncq). 4535 * 4536 * Assumes that only one thread attempts to call strlock for a given 4537 * stream. If this is not the case the two threads would deadlock. 4538 * This assumption is guaranteed since strlock is only called by insertq 4539 * and removeq and streams plumbing changes are single-threaded for 4540 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4541 * 4542 * For pipes, it is not difficult to atomically designate a pair of streams 4543 * to be mated. Once mated atomically by the framework the twisted pair remain 4544 * configured that way until dismantled atomically by the framework. 4545 * When plumbing takes place on a twisted stream it is necessary to ensure that 4546 * this operation is done exclusively on the twisted stream since two such 4547 * operations, each initiated on different ends of the pipe will deadlock 4548 * waiting for each other to complete. 4549 * 4550 * On entry, no locks should be held. 4551 * The locks acquired and held by strlock depends on a few factors. 4552 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4553 * and held on exit and all sq_count are at an acceptable level. 4554 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4555 * sd_refcnt being zero. 4556 */ 4557 4558 static void 4559 strlock(struct stdata *stp, sqlist_t *sqlist) 4560 { 4561 syncql_t *sql, *sql2; 4562 retry: 4563 /* 4564 * Wait for any claimstr to go away. 4565 */ 4566 if (STRMATED(stp)) { 4567 struct stdata *stp1, *stp2; 4568 4569 STRLOCKMATES(stp); 4570 /* 4571 * Note that the selection of locking order is not 4572 * important, just that they are always acquired in 4573 * the same order. To assure this, we choose this 4574 * order based on the value of the pointer, and since 4575 * the pointer will not change for the life of this 4576 * pair, we will always grab the locks in the same 4577 * order (and hence, prevent deadlocks). 4578 */ 4579 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4580 stp1 = stp; 4581 stp2 = stp->sd_mate; 4582 } else { 4583 stp2 = stp; 4584 stp1 = stp->sd_mate; 4585 } 4586 mutex_enter(&stp1->sd_reflock); 4587 if (stp1->sd_refcnt > 0) { 4588 STRUNLOCKMATES(stp); 4589 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4590 mutex_exit(&stp1->sd_reflock); 4591 goto retry; 4592 } 4593 mutex_enter(&stp2->sd_reflock); 4594 if (stp2->sd_refcnt > 0) { 4595 STRUNLOCKMATES(stp); 4596 mutex_exit(&stp1->sd_reflock); 4597 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4598 mutex_exit(&stp2->sd_reflock); 4599 goto retry; 4600 } 4601 STREAM_PUTLOCKS_ENTER(stp1); 4602 STREAM_PUTLOCKS_ENTER(stp2); 4603 } else { 4604 mutex_enter(&stp->sd_lock); 4605 mutex_enter(&stp->sd_reflock); 4606 while (stp->sd_refcnt > 0) { 4607 mutex_exit(&stp->sd_lock); 4608 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4609 if (mutex_tryenter(&stp->sd_lock) == 0) { 4610 mutex_exit(&stp->sd_reflock); 4611 mutex_enter(&stp->sd_lock); 4612 mutex_enter(&stp->sd_reflock); 4613 } 4614 } 4615 STREAM_PUTLOCKS_ENTER(stp); 4616 } 4617 4618 if (sqlist == NULL) 4619 return; 4620 4621 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4622 syncq_t *sq = sql->sql_sq; 4623 uint16_t count; 4624 4625 mutex_enter(SQLOCK(sq)); 4626 count = sq->sq_count; 4627 ASSERT(sq->sq_rmqcount <= count); 4628 SQ_PUTLOCKS_ENTER(sq); 4629 SUM_SQ_PUTCOUNTS(sq, count); 4630 if (count == sq->sq_rmqcount) 4631 continue; 4632 4633 /* Failed - drop all locks that we have acquired so far */ 4634 if (STRMATED(stp)) { 4635 STREAM_PUTLOCKS_EXIT(stp); 4636 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4637 STRUNLOCKMATES(stp); 4638 mutex_exit(&stp->sd_reflock); 4639 mutex_exit(&stp->sd_mate->sd_reflock); 4640 } else { 4641 STREAM_PUTLOCKS_EXIT(stp); 4642 mutex_exit(&stp->sd_lock); 4643 mutex_exit(&stp->sd_reflock); 4644 } 4645 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4646 sql2 = sql2->sql_next) { 4647 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4648 mutex_exit(SQLOCK(sql2->sql_sq)); 4649 } 4650 4651 /* 4652 * The wait loop below may starve when there are many threads 4653 * claiming the syncq. This is especially a problem with permod 4654 * syncqs (IP). To lessen the impact of the problem we increment 4655 * sq_needexcl and clear fastbits so that putnexts will slow 4656 * down and call sqenable instead of draining right away. 4657 */ 4658 sq->sq_needexcl++; 4659 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4660 while (count > sq->sq_rmqcount) { 4661 sq->sq_flags |= SQ_WANTWAKEUP; 4662 SQ_PUTLOCKS_EXIT(sq); 4663 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4664 count = sq->sq_count; 4665 SQ_PUTLOCKS_ENTER(sq); 4666 SUM_SQ_PUTCOUNTS(sq, count); 4667 } 4668 sq->sq_needexcl--; 4669 if (sq->sq_needexcl == 0) 4670 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4671 SQ_PUTLOCKS_EXIT(sq); 4672 ASSERT(count == sq->sq_rmqcount); 4673 mutex_exit(SQLOCK(sq)); 4674 goto retry; 4675 } 4676 } 4677 4678 /* 4679 * Drop all the locks that strlock acquired. 4680 */ 4681 static void 4682 strunlock(struct stdata *stp, sqlist_t *sqlist) 4683 { 4684 syncql_t *sql; 4685 4686 if (STRMATED(stp)) { 4687 STREAM_PUTLOCKS_EXIT(stp); 4688 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4689 STRUNLOCKMATES(stp); 4690 mutex_exit(&stp->sd_reflock); 4691 mutex_exit(&stp->sd_mate->sd_reflock); 4692 } else { 4693 STREAM_PUTLOCKS_EXIT(stp); 4694 mutex_exit(&stp->sd_lock); 4695 mutex_exit(&stp->sd_reflock); 4696 } 4697 4698 if (sqlist == NULL) 4699 return; 4700 4701 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4702 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4703 mutex_exit(SQLOCK(sql->sql_sq)); 4704 } 4705 } 4706 4707 /* 4708 * When the module has service procedure, we need check if the next 4709 * module which has service procedure is in flow control to trigger 4710 * the backenable. 4711 */ 4712 static void 4713 backenable_insertedq(queue_t *q) 4714 { 4715 qband_t *qbp; 4716 4717 claimstr(q); 4718 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4719 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4720 backenable(q, 0); 4721 4722 qbp = q->q_next->q_nfsrv->q_bandp; 4723 for (; qbp != NULL; qbp = qbp->qb_next) 4724 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4725 backenable(q, qbp->qb_first->b_band); 4726 } 4727 releasestr(q); 4728 } 4729 4730 /* 4731 * Given two read queues, insert a new single one after another. 4732 * 4733 * This routine acquires all the necessary locks in order to change 4734 * q_next and related pointer using strlock(). 4735 * It depends on the stream head ensuring that there are no concurrent 4736 * insertq or removeq on the same stream. The stream head ensures this 4737 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4738 * 4739 * Note that no syncq locks are held during the q_next change. This is 4740 * applied to all streams since, unlike removeq, there is no problem of stale 4741 * pointers when adding a module to the stream. Thus drivers/modules that do a 4742 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4743 * applied this optimization to all streams. 4744 */ 4745 void 4746 insertq(struct stdata *stp, queue_t *new) 4747 { 4748 queue_t *after; 4749 queue_t *wafter; 4750 queue_t *wnew = _WR(new); 4751 boolean_t have_fifo = B_FALSE; 4752 4753 if (new->q_flag & _QINSERTING) { 4754 ASSERT(stp->sd_vnode->v_type != VFIFO); 4755 after = new->q_next; 4756 wafter = _WR(new->q_next); 4757 } else { 4758 after = _RD(stp->sd_wrq); 4759 wafter = stp->sd_wrq; 4760 } 4761 4762 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4763 "insertq:%p, %p", after, new); 4764 ASSERT(after->q_flag & QREADR); 4765 ASSERT(new->q_flag & QREADR); 4766 4767 strlock(stp, NULL); 4768 4769 /* Do we have a FIFO? */ 4770 if (wafter->q_next == after) { 4771 have_fifo = B_TRUE; 4772 wnew->q_next = new; 4773 } else { 4774 wnew->q_next = wafter->q_next; 4775 } 4776 new->q_next = after; 4777 4778 set_nfsrv_ptr(new, wnew, after, wafter); 4779 /* 4780 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4781 * so only reset this flag after calling it. 4782 */ 4783 new->q_flag &= ~_QINSERTING; 4784 4785 if (have_fifo) { 4786 wafter->q_next = wnew; 4787 } else { 4788 if (wafter->q_next) 4789 _OTHERQ(wafter->q_next)->q_next = new; 4790 wafter->q_next = wnew; 4791 } 4792 4793 set_qend(new); 4794 /* The QEND flag might have to be updated for the upstream guy */ 4795 set_qend(after); 4796 4797 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4798 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4799 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4800 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4801 strsetuio(stp); 4802 4803 /* 4804 * If this was a module insertion, bump the push count. 4805 */ 4806 if (!(new->q_flag & QISDRV)) 4807 stp->sd_pushcnt++; 4808 4809 strunlock(stp, NULL); 4810 4811 /* check if the write Q needs backenable */ 4812 backenable_insertedq(wnew); 4813 4814 /* check if the read Q needs backenable */ 4815 backenable_insertedq(new); 4816 } 4817 4818 /* 4819 * Given a read queue, unlink it from any neighbors. 4820 * 4821 * This routine acquires all the necessary locks in order to 4822 * change q_next and related pointers and also guard against 4823 * stale references (e.g. through q_next) to the queue that 4824 * is being removed. It also plays part of the role in ensuring 4825 * that the module's/driver's put procedure doesn't get called 4826 * after qprocsoff returns. 4827 * 4828 * Removeq depends on the stream head ensuring that there are 4829 * no concurrent insertq or removeq on the same stream. The 4830 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4831 * STRPLUMB. 4832 * 4833 * The set of locks needed to remove the queue is different in 4834 * different cases: 4835 * 4836 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4837 * waiting for the syncq reference count to drop to 0 indicating that no 4838 * non-close threads are present anywhere in the stream. This ensures that any 4839 * module/driver can reference q_next in its open, close, put, or service 4840 * procedures. 4841 * 4842 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4843 * strlock() ensures that there is either no threads executing inside perimeter 4844 * or there is only a thread calling qprocsoff(). 4845 * 4846 * strlock() compares the value of sq_count with the number of threads inside 4847 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4848 * any threads waiting in strlock() when the sq_rmqcount increases. 4849 */ 4850 4851 void 4852 removeq(queue_t *qp) 4853 { 4854 queue_t *wqp = _WR(qp); 4855 struct stdata *stp = STREAM(qp); 4856 sqlist_t *sqlist = NULL; 4857 boolean_t isdriver; 4858 int moved; 4859 syncq_t *sq = qp->q_syncq; 4860 syncq_t *wsq = wqp->q_syncq; 4861 4862 ASSERT(stp); 4863 4864 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4865 "removeq:%p %p", qp, wqp); 4866 ASSERT(qp->q_flag&QREADR); 4867 4868 /* 4869 * For queues using Synchronous streams, we must wait for all threads in 4870 * rwnext() to drain out before proceeding. 4871 */ 4872 if (qp->q_flag & QSYNCSTR) { 4873 /* First, we need wakeup any threads blocked in rwnext() */ 4874 mutex_enter(SQLOCK(sq)); 4875 if (sq->sq_flags & SQ_WANTWAKEUP) { 4876 sq->sq_flags &= ~SQ_WANTWAKEUP; 4877 cv_broadcast(&sq->sq_wait); 4878 } 4879 mutex_exit(SQLOCK(sq)); 4880 4881 if (wsq != sq) { 4882 mutex_enter(SQLOCK(wsq)); 4883 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4884 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4885 cv_broadcast(&wsq->sq_wait); 4886 } 4887 mutex_exit(SQLOCK(wsq)); 4888 } 4889 4890 mutex_enter(QLOCK(qp)); 4891 while (qp->q_rwcnt > 0) { 4892 qp->q_flag |= QWANTRMQSYNC; 4893 cv_wait(&qp->q_wait, QLOCK(qp)); 4894 } 4895 mutex_exit(QLOCK(qp)); 4896 4897 mutex_enter(QLOCK(wqp)); 4898 while (wqp->q_rwcnt > 0) { 4899 wqp->q_flag |= QWANTRMQSYNC; 4900 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4901 } 4902 mutex_exit(QLOCK(wqp)); 4903 } 4904 4905 mutex_enter(SQLOCK(sq)); 4906 sq->sq_rmqcount++; 4907 if (sq->sq_flags & SQ_WANTWAKEUP) { 4908 sq->sq_flags &= ~SQ_WANTWAKEUP; 4909 cv_broadcast(&sq->sq_wait); 4910 } 4911 mutex_exit(SQLOCK(sq)); 4912 4913 isdriver = (qp->q_flag & QISDRV); 4914 4915 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4916 strlock(stp, sqlist); 4917 4918 reset_nfsrv_ptr(qp, wqp); 4919 4920 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4921 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4922 /* Do we have a FIFO? */ 4923 if (wqp->q_next == qp) { 4924 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4925 } else { 4926 if (wqp->q_next) 4927 backq(qp)->q_next = qp->q_next; 4928 if (qp->q_next) 4929 backq(wqp)->q_next = wqp->q_next; 4930 } 4931 4932 /* The QEND flag might have to be updated for the upstream guy */ 4933 if (qp->q_next) 4934 set_qend(qp->q_next); 4935 4936 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4937 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4938 4939 /* 4940 * Move any messages destined for the put procedures to the next 4941 * syncq in line. Otherwise free them. 4942 */ 4943 moved = 0; 4944 /* 4945 * Quick check to see whether there are any messages or events. 4946 */ 4947 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4948 moved += propagate_syncq(qp); 4949 if (wqp->q_syncqmsgs != 0 || 4950 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4951 moved += propagate_syncq(wqp); 4952 4953 strsetuio(stp); 4954 4955 /* 4956 * If this was a module removal, decrement the push count. 4957 */ 4958 if (!isdriver) 4959 stp->sd_pushcnt--; 4960 4961 strunlock(stp, sqlist); 4962 sqlist_free(sqlist); 4963 4964 /* 4965 * Make sure any messages that were propagated are drained. 4966 * Also clear any QFULL bit caused by messages that were propagated. 4967 */ 4968 4969 if (qp->q_next != NULL) { 4970 clr_qfull(qp); 4971 /* 4972 * For the driver calling qprocsoff, propagate_syncq 4973 * frees all the messages instead of putting it in 4974 * the stream head 4975 */ 4976 if (!isdriver && (moved > 0)) 4977 emptysq(qp->q_next->q_syncq); 4978 } 4979 if (wqp->q_next != NULL) { 4980 clr_qfull(wqp); 4981 /* 4982 * We come here for any pop of a module except for the 4983 * case of driver being removed. We don't call emptysq 4984 * if we did not move any messages. This will avoid holding 4985 * PERMOD syncq locks in emptysq 4986 */ 4987 if (moved > 0) 4988 emptysq(wqp->q_next->q_syncq); 4989 } 4990 4991 mutex_enter(SQLOCK(sq)); 4992 sq->sq_rmqcount--; 4993 mutex_exit(SQLOCK(sq)); 4994 } 4995 4996 /* 4997 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4998 * SQ_WRITER) on a syncq. 4999 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 5000 * sync queue and waits until sq_count reaches maxcnt. 5001 * 5002 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller 5003 * does not care about putnext threads that are in the middle of calling put 5004 * entry points. 5005 * 5006 * This routine is used for both inner and outer syncqs. 5007 */ 5008 static void 5009 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 5010 { 5011 uint16_t count = 0; 5012 5013 mutex_enter(SQLOCK(sq)); 5014 /* 5015 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 5016 * SQ_FROZEN will be set if there is a frozen stream that has a 5017 * queue which also refers to this "shared" syncq. 5018 * SQ_BLOCKED will be set if there is "off" queue which also 5019 * refers to this "shared" syncq. 5020 */ 5021 if (maxcnt != -1) { 5022 count = sq->sq_count; 5023 SQ_PUTLOCKS_ENTER(sq); 5024 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5025 SUM_SQ_PUTCOUNTS(sq, count); 5026 } 5027 sq->sq_needexcl++; 5028 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5029 5030 while ((sq->sq_flags & flag) || 5031 (maxcnt != -1 && count > (unsigned)maxcnt)) { 5032 sq->sq_flags |= SQ_WANTWAKEUP; 5033 if (maxcnt != -1) { 5034 SQ_PUTLOCKS_EXIT(sq); 5035 } 5036 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5037 if (maxcnt != -1) { 5038 count = sq->sq_count; 5039 SQ_PUTLOCKS_ENTER(sq); 5040 SUM_SQ_PUTCOUNTS(sq, count); 5041 } 5042 } 5043 sq->sq_needexcl--; 5044 sq->sq_flags |= flag; 5045 ASSERT(maxcnt == -1 || count == maxcnt); 5046 if (maxcnt != -1) { 5047 if (sq->sq_needexcl == 0) { 5048 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5049 } 5050 SQ_PUTLOCKS_EXIT(sq); 5051 } else if (sq->sq_needexcl == 0) { 5052 SQ_PUTCOUNT_SETFAST(sq); 5053 } 5054 5055 mutex_exit(SQLOCK(sq)); 5056 } 5057 5058 /* 5059 * Reset a flag that was set with blocksq. 5060 * 5061 * Can not use this routine to reset SQ_WRITER. 5062 * 5063 * If "isouter" is set then the syncq is assumed to be an outer perimeter 5064 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 5065 * to handle the queued qwriter operations. 5066 * 5067 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5068 * sq_putlocks are used. 5069 */ 5070 static void 5071 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 5072 { 5073 uint16_t flags; 5074 5075 mutex_enter(SQLOCK(sq)); 5076 ASSERT(resetflag != SQ_WRITER); 5077 ASSERT(sq->sq_flags & resetflag); 5078 flags = sq->sq_flags & ~resetflag; 5079 sq->sq_flags = flags; 5080 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 5081 if (flags & SQ_WANTWAKEUP) { 5082 flags &= ~SQ_WANTWAKEUP; 5083 cv_broadcast(&sq->sq_wait); 5084 } 5085 sq->sq_flags = flags; 5086 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5087 if (!isouter) { 5088 /* drain_syncq drops SQLOCK */ 5089 drain_syncq(sq); 5090 return; 5091 } 5092 } 5093 } 5094 mutex_exit(SQLOCK(sq)); 5095 } 5096 5097 /* 5098 * Reset a flag that was set with blocksq. 5099 * Does not drain the syncq. Use emptysq() for that. 5100 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 5101 * 5102 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5103 * sq_putlocks are used. 5104 */ 5105 static int 5106 dropsq(syncq_t *sq, uint16_t resetflag) 5107 { 5108 uint16_t flags; 5109 5110 mutex_enter(SQLOCK(sq)); 5111 ASSERT(sq->sq_flags & resetflag); 5112 flags = sq->sq_flags & ~resetflag; 5113 if (flags & SQ_WANTWAKEUP) { 5114 flags &= ~SQ_WANTWAKEUP; 5115 cv_broadcast(&sq->sq_wait); 5116 } 5117 sq->sq_flags = flags; 5118 mutex_exit(SQLOCK(sq)); 5119 if (flags & SQ_QUEUED) 5120 return (1); 5121 return (0); 5122 } 5123 5124 /* 5125 * Empty all the messages on a syncq. 5126 * 5127 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5128 * sq_putlocks are used. 5129 */ 5130 static void 5131 emptysq(syncq_t *sq) 5132 { 5133 uint16_t flags; 5134 5135 mutex_enter(SQLOCK(sq)); 5136 flags = sq->sq_flags; 5137 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5138 /* 5139 * To prevent potential recursive invocation of drain_syncq we 5140 * do not call drain_syncq if count is non-zero. 5141 */ 5142 if (sq->sq_count == 0) { 5143 /* drain_syncq() drops SQLOCK */ 5144 drain_syncq(sq); 5145 return; 5146 } else 5147 sqenable(sq); 5148 } 5149 mutex_exit(SQLOCK(sq)); 5150 } 5151 5152 /* 5153 * Ordered insert while removing duplicates. 5154 */ 5155 static void 5156 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 5157 { 5158 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 5159 5160 prev_sqlpp = &sqlist->sqlist_head; 5161 while ((sqlp = *prev_sqlpp) != NULL) { 5162 if (sqlp->sql_sq >= sqp) { 5163 if (sqlp->sql_sq == sqp) /* duplicate */ 5164 return; 5165 break; 5166 } 5167 prev_sqlpp = &sqlp->sql_next; 5168 } 5169 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 5170 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 5171 new_sqlp->sql_next = sqlp; 5172 new_sqlp->sql_sq = sqp; 5173 *prev_sqlpp = new_sqlp; 5174 } 5175 5176 /* 5177 * Walk the write side queues until we hit either the driver 5178 * or a twist in the stream (_SAMESTR will return false in both 5179 * these cases) then turn around and walk the read side queues 5180 * back up to the stream head. 5181 */ 5182 static void 5183 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5184 { 5185 while (q != NULL) { 5186 sqlist_insert(sqlist, q->q_syncq); 5187 5188 if (_SAMESTR(q)) 5189 q = q->q_next; 5190 else if (!(q->q_flag & QREADR)) 5191 q = _RD(q); 5192 else 5193 q = NULL; 5194 } 5195 } 5196 5197 /* 5198 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5199 * associated with the "q" parameter. The resulting list is sorted in a 5200 * canonical order and is free of duplicates. 5201 * Assumes the passed queue is a _RD(q). 5202 */ 5203 static sqlist_t * 5204 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5205 { 5206 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5207 5208 /* 5209 * start with the current queue/qpair 5210 */ 5211 ASSERT(q->q_flag & QREADR); 5212 5213 sqlist_insert(sqlist, q->q_syncq); 5214 sqlist_insert(sqlist, _WR(q)->q_syncq); 5215 5216 sqlist_insertall(sqlist, stp->sd_wrq); 5217 if (do_twist) 5218 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5219 5220 return (sqlist); 5221 } 5222 5223 static sqlist_t * 5224 sqlist_alloc(struct stdata *stp, int kmflag) 5225 { 5226 size_t sqlist_size; 5227 sqlist_t *sqlist; 5228 5229 /* 5230 * Allocate 2 syncql_t's for each pushed module. Note that 5231 * the sqlist_t structure already has 4 syncql_t's built in: 5232 * 2 for the stream head, and 2 for the driver/other stream head. 5233 */ 5234 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5235 sizeof (sqlist_t); 5236 if (STRMATED(stp)) 5237 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5238 sqlist = kmem_alloc(sqlist_size, kmflag); 5239 5240 sqlist->sqlist_head = NULL; 5241 sqlist->sqlist_size = sqlist_size; 5242 sqlist->sqlist_index = 0; 5243 5244 return (sqlist); 5245 } 5246 5247 /* 5248 * Free the list created by sqlist_alloc() 5249 */ 5250 static void 5251 sqlist_free(sqlist_t *sqlist) 5252 { 5253 kmem_free(sqlist, sqlist->sqlist_size); 5254 } 5255 5256 /* 5257 * Prevent any new entries into any syncq in this stream. 5258 * Used by freezestr. 5259 */ 5260 void 5261 strblock(queue_t *q) 5262 { 5263 struct stdata *stp; 5264 syncql_t *sql; 5265 sqlist_t *sqlist; 5266 5267 q = _RD(q); 5268 5269 stp = STREAM(q); 5270 ASSERT(stp != NULL); 5271 5272 /* 5273 * Get a sorted list with all the duplicates removed containing 5274 * all the syncqs referenced by this stream. 5275 */ 5276 sqlist = sqlist_build(q, stp, B_FALSE); 5277 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5278 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5279 sqlist_free(sqlist); 5280 } 5281 5282 /* 5283 * Release the block on new entries into this stream 5284 */ 5285 void 5286 strunblock(queue_t *q) 5287 { 5288 struct stdata *stp; 5289 syncql_t *sql; 5290 sqlist_t *sqlist; 5291 int drain_needed; 5292 5293 q = _RD(q); 5294 5295 /* 5296 * Get a sorted list with all the duplicates removed containing 5297 * all the syncqs referenced by this stream. 5298 * Have to drop the SQ_FROZEN flag on all the syncqs before 5299 * starting to drain them; otherwise the draining might 5300 * cause a freezestr in some module on the stream (which 5301 * would deadlock). 5302 */ 5303 stp = STREAM(q); 5304 ASSERT(stp != NULL); 5305 sqlist = sqlist_build(q, stp, B_FALSE); 5306 drain_needed = 0; 5307 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5308 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5309 if (drain_needed) { 5310 for (sql = sqlist->sqlist_head; sql != NULL; 5311 sql = sql->sql_next) 5312 emptysq(sql->sql_sq); 5313 } 5314 sqlist_free(sqlist); 5315 } 5316 5317 #ifdef DEBUG 5318 static int 5319 qprocsareon(queue_t *rq) 5320 { 5321 if (rq->q_next == NULL) 5322 return (0); 5323 return (_WR(rq->q_next)->q_next == _WR(rq)); 5324 } 5325 5326 int 5327 qclaimed(queue_t *q) 5328 { 5329 uint_t count; 5330 5331 count = q->q_syncq->sq_count; 5332 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5333 return (count != 0); 5334 } 5335 5336 /* 5337 * Check if anyone has frozen this stream with freezestr 5338 */ 5339 int 5340 frozenstr(queue_t *q) 5341 { 5342 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5343 } 5344 #endif /* DEBUG */ 5345 5346 /* 5347 * Enter a queue. 5348 * Obsoleted interface. Should not be used. 5349 */ 5350 void 5351 enterq(queue_t *q) 5352 { 5353 entersq(q->q_syncq, SQ_CALLBACK); 5354 } 5355 5356 void 5357 leaveq(queue_t *q) 5358 { 5359 leavesq(q->q_syncq, SQ_CALLBACK); 5360 } 5361 5362 /* 5363 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5364 * to check. 5365 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5366 * calls and the running of open, close and service procedures. 5367 * 5368 * If c_inner bit is set no need to grab sq_putlocks since we don't care 5369 * if other threads have entered or are entering put entry point. 5370 * 5371 * If c_inner bit is set it might have been possible to use 5372 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5373 * open/close path for IP) but since the count may need to be decremented in 5374 * qwait() we wouldn't know which counter to decrement. Currently counter is 5375 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5376 * in the future we might use curthread id bits to select the counter and this 5377 * would stay constant across routine calls. 5378 */ 5379 void 5380 entersq(syncq_t *sq, int entrypoint) 5381 { 5382 uint16_t count = 0; 5383 uint16_t flags; 5384 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5385 uint16_t type; 5386 uint_t c_inner = entrypoint & SQ_CI; 5387 uint_t c_outer = entrypoint & SQ_CO; 5388 5389 /* 5390 * Increment ref count to keep closes out of this queue. 5391 */ 5392 ASSERT(sq); 5393 ASSERT(c_inner && c_outer); 5394 mutex_enter(SQLOCK(sq)); 5395 flags = sq->sq_flags; 5396 type = sq->sq_type; 5397 if (!(type & c_inner)) { 5398 /* Make sure all putcounts now use slowlock. */ 5399 count = sq->sq_count; 5400 SQ_PUTLOCKS_ENTER(sq); 5401 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5402 SUM_SQ_PUTCOUNTS(sq, count); 5403 sq->sq_needexcl++; 5404 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5405 waitflags |= SQ_MESSAGES; 5406 } 5407 /* 5408 * Wait until we can enter the inner perimeter. 5409 * If we want exclusive access we wait until sq_count is 0. 5410 * We have to do this before entering the outer perimeter in order 5411 * to preserve put/close message ordering. 5412 */ 5413 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5414 sq->sq_flags = flags | SQ_WANTWAKEUP; 5415 if (!(type & c_inner)) { 5416 SQ_PUTLOCKS_EXIT(sq); 5417 } 5418 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5419 if (!(type & c_inner)) { 5420 count = sq->sq_count; 5421 SQ_PUTLOCKS_ENTER(sq); 5422 SUM_SQ_PUTCOUNTS(sq, count); 5423 } 5424 flags = sq->sq_flags; 5425 } 5426 5427 if (!(type & c_inner)) { 5428 ASSERT(sq->sq_needexcl > 0); 5429 sq->sq_needexcl--; 5430 if (sq->sq_needexcl == 0) { 5431 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5432 } 5433 } 5434 5435 /* Check if we need to enter the outer perimeter */ 5436 if (!(type & c_outer)) { 5437 /* 5438 * We have to enter the outer perimeter exclusively before 5439 * we can increment sq_count to avoid deadlock. This implies 5440 * that we have to re-check sq_flags and sq_count. 5441 * 5442 * is it possible to have c_inner set when c_outer is not set? 5443 */ 5444 if (!(type & c_inner)) { 5445 SQ_PUTLOCKS_EXIT(sq); 5446 } 5447 mutex_exit(SQLOCK(sq)); 5448 outer_enter(sq->sq_outer, SQ_GOAWAY); 5449 mutex_enter(SQLOCK(sq)); 5450 flags = sq->sq_flags; 5451 /* 5452 * there should be no need to recheck sq_putcounts 5453 * because outer_enter() has already waited for them to clear 5454 * after setting SQ_WRITER. 5455 */ 5456 count = sq->sq_count; 5457 #ifdef DEBUG 5458 /* 5459 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5460 * of doing an ASSERT internally. Others should do 5461 * something like 5462 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5463 * without the need to #ifdef DEBUG it. 5464 */ 5465 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5466 #endif 5467 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5468 (!(type & c_inner) && count != 0)) { 5469 sq->sq_flags = flags | SQ_WANTWAKEUP; 5470 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5471 count = sq->sq_count; 5472 flags = sq->sq_flags; 5473 } 5474 } 5475 5476 sq->sq_count++; 5477 ASSERT(sq->sq_count != 0); /* Wraparound */ 5478 if (!(type & c_inner)) { 5479 /* Exclusive entry */ 5480 ASSERT(sq->sq_count == 1); 5481 sq->sq_flags |= SQ_EXCL; 5482 if (type & c_outer) { 5483 SQ_PUTLOCKS_EXIT(sq); 5484 } 5485 } 5486 mutex_exit(SQLOCK(sq)); 5487 } 5488 5489 /* 5490 * Leave a syncq. Announce to framework that closes may proceed. 5491 * c_inner and c_outer specify which concurrency bits to check. 5492 * 5493 * Must never be called from driver or module put entry point. 5494 * 5495 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5496 * sq_putlocks are used. 5497 */ 5498 void 5499 leavesq(syncq_t *sq, int entrypoint) 5500 { 5501 uint16_t flags; 5502 uint16_t type; 5503 uint_t c_outer = entrypoint & SQ_CO; 5504 #ifdef DEBUG 5505 uint_t c_inner = entrypoint & SQ_CI; 5506 #endif 5507 5508 /* 5509 * Decrement ref count, drain the syncq if possible, and wake up 5510 * any waiting close. 5511 */ 5512 ASSERT(sq); 5513 ASSERT(c_inner && c_outer); 5514 mutex_enter(SQLOCK(sq)); 5515 flags = sq->sq_flags; 5516 type = sq->sq_type; 5517 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5518 5519 if (flags & SQ_WANTWAKEUP) { 5520 flags &= ~SQ_WANTWAKEUP; 5521 cv_broadcast(&sq->sq_wait); 5522 } 5523 if (flags & SQ_WANTEXWAKEUP) { 5524 flags &= ~SQ_WANTEXWAKEUP; 5525 cv_broadcast(&sq->sq_exitwait); 5526 } 5527 5528 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5529 /* 5530 * The syncq needs to be drained. "Exit" the syncq 5531 * before calling drain_syncq. 5532 */ 5533 ASSERT(sq->sq_count != 0); 5534 sq->sq_count--; 5535 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5536 sq->sq_flags = flags & ~SQ_EXCL; 5537 drain_syncq(sq); 5538 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5539 /* Check if we need to exit the outer perimeter */ 5540 /* XXX will this ever be true? */ 5541 if (!(type & c_outer)) 5542 outer_exit(sq->sq_outer); 5543 return; 5544 } 5545 } 5546 ASSERT(sq->sq_count != 0); 5547 sq->sq_count--; 5548 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5549 sq->sq_flags = flags & ~SQ_EXCL; 5550 mutex_exit(SQLOCK(sq)); 5551 5552 /* Check if we need to exit the outer perimeter */ 5553 if (!(sq->sq_type & c_outer)) 5554 outer_exit(sq->sq_outer); 5555 } 5556 5557 /* 5558 * Prevent q_next from changing in this stream by incrementing sq_count. 5559 * 5560 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5561 * sq_putlocks are used. 5562 */ 5563 void 5564 claimq(queue_t *qp) 5565 { 5566 syncq_t *sq = qp->q_syncq; 5567 5568 mutex_enter(SQLOCK(sq)); 5569 sq->sq_count++; 5570 ASSERT(sq->sq_count != 0); /* Wraparound */ 5571 mutex_exit(SQLOCK(sq)); 5572 } 5573 5574 /* 5575 * Undo claimq. 5576 * 5577 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5578 * sq_putlocks are used. 5579 */ 5580 void 5581 releaseq(queue_t *qp) 5582 { 5583 syncq_t *sq = qp->q_syncq; 5584 uint16_t flags; 5585 5586 mutex_enter(SQLOCK(sq)); 5587 ASSERT(sq->sq_count > 0); 5588 sq->sq_count--; 5589 5590 flags = sq->sq_flags; 5591 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5592 if (flags & SQ_WANTWAKEUP) { 5593 flags &= ~SQ_WANTWAKEUP; 5594 cv_broadcast(&sq->sq_wait); 5595 } 5596 sq->sq_flags = flags; 5597 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5598 /* 5599 * To prevent potential recursive invocation of 5600 * drain_syncq we do not call drain_syncq if count is 5601 * non-zero. 5602 */ 5603 if (sq->sq_count == 0) { 5604 drain_syncq(sq); 5605 return; 5606 } else 5607 sqenable(sq); 5608 } 5609 } 5610 mutex_exit(SQLOCK(sq)); 5611 } 5612 5613 /* 5614 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5615 */ 5616 void 5617 claimstr(queue_t *qp) 5618 { 5619 struct stdata *stp = STREAM(qp); 5620 5621 mutex_enter(&stp->sd_reflock); 5622 stp->sd_refcnt++; 5623 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5624 mutex_exit(&stp->sd_reflock); 5625 } 5626 5627 /* 5628 * Undo claimstr. 5629 */ 5630 void 5631 releasestr(queue_t *qp) 5632 { 5633 struct stdata *stp = STREAM(qp); 5634 5635 mutex_enter(&stp->sd_reflock); 5636 ASSERT(stp->sd_refcnt != 0); 5637 if (--stp->sd_refcnt == 0) 5638 cv_broadcast(&stp->sd_refmonitor); 5639 mutex_exit(&stp->sd_reflock); 5640 } 5641 5642 static syncq_t * 5643 new_syncq(void) 5644 { 5645 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5646 } 5647 5648 static void 5649 free_syncq(syncq_t *sq) 5650 { 5651 ASSERT(sq->sq_head == NULL); 5652 ASSERT(sq->sq_outer == NULL); 5653 ASSERT(sq->sq_callbpend == NULL); 5654 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5655 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5656 5657 if (sq->sq_ciputctrl != NULL) { 5658 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5659 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5660 sq->sq_nciputctrl, 0); 5661 ASSERT(ciputctrl_cache != NULL); 5662 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5663 } 5664 5665 sq->sq_tail = NULL; 5666 sq->sq_evhead = NULL; 5667 sq->sq_evtail = NULL; 5668 sq->sq_ciputctrl = NULL; 5669 sq->sq_nciputctrl = 0; 5670 sq->sq_count = 0; 5671 sq->sq_rmqcount = 0; 5672 sq->sq_callbflags = 0; 5673 sq->sq_cancelid = 0; 5674 sq->sq_next = NULL; 5675 sq->sq_needexcl = 0; 5676 sq->sq_svcflags = 0; 5677 sq->sq_nqueues = 0; 5678 sq->sq_pri = 0; 5679 sq->sq_onext = NULL; 5680 sq->sq_oprev = NULL; 5681 sq->sq_flags = 0; 5682 sq->sq_type = 0; 5683 sq->sq_servcount = 0; 5684 5685 kmem_cache_free(syncq_cache, sq); 5686 } 5687 5688 /* Outer perimeter code */ 5689 5690 /* 5691 * The outer syncq uses the fields and flags in the syncq slightly 5692 * differently from the inner syncqs. 5693 * sq_count Incremented when there are pending or running 5694 * writers at the outer perimeter to prevent the set of 5695 * inner syncqs that belong to the outer perimeter from 5696 * changing. 5697 * sq_head/tail List of deferred qwriter(OUTER) operations. 5698 * 5699 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5700 * inner syncqs are added to or removed from the 5701 * outer perimeter. 5702 * SQ_QUEUED sq_head/tail has messages or events queued. 5703 * 5704 * SQ_WRITER A thread is currently traversing all the inner syncqs 5705 * setting the SQ_WRITER flag. 5706 */ 5707 5708 /* 5709 * Get write access at the outer perimeter. 5710 * Note that read access is done by entersq, putnext, and put by simply 5711 * incrementing sq_count in the inner syncq. 5712 * 5713 * Waits until "flags" is no longer set in the outer to prevent multiple 5714 * threads from having write access at the same time. SQ_WRITER has to be part 5715 * of "flags". 5716 * 5717 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5718 * until the outer_exit is finished. 5719 * 5720 * outer_enter is vulnerable to starvation since it does not prevent new 5721 * threads from entering the inner syncqs while it is waiting for sq_count to 5722 * go to zero. 5723 */ 5724 void 5725 outer_enter(syncq_t *outer, uint16_t flags) 5726 { 5727 syncq_t *sq; 5728 int wait_needed; 5729 uint16_t count; 5730 5731 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5732 outer->sq_oprev != NULL); 5733 ASSERT(flags & SQ_WRITER); 5734 5735 retry: 5736 mutex_enter(SQLOCK(outer)); 5737 while (outer->sq_flags & flags) { 5738 outer->sq_flags |= SQ_WANTWAKEUP; 5739 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5740 } 5741 5742 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5743 outer->sq_flags |= SQ_WRITER; 5744 outer->sq_count++; 5745 ASSERT(outer->sq_count != 0); /* wraparound */ 5746 wait_needed = 0; 5747 /* 5748 * Set SQ_WRITER on all the inner syncqs while holding 5749 * the SQLOCK on the outer syncq. This ensures that the changing 5750 * of SQ_WRITER is atomic under the outer SQLOCK. 5751 */ 5752 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5753 mutex_enter(SQLOCK(sq)); 5754 count = sq->sq_count; 5755 SQ_PUTLOCKS_ENTER(sq); 5756 sq->sq_flags |= SQ_WRITER; 5757 SUM_SQ_PUTCOUNTS(sq, count); 5758 if (count != 0) 5759 wait_needed = 1; 5760 SQ_PUTLOCKS_EXIT(sq); 5761 mutex_exit(SQLOCK(sq)); 5762 } 5763 mutex_exit(SQLOCK(outer)); 5764 5765 /* 5766 * Get everybody out of the syncqs sequentially. 5767 * Note that we don't actually need to acquire the PUTLOCKS, since 5768 * we have already cleared the fastbit, and set QWRITER. By 5769 * definition, the count can not increase since putnext will 5770 * take the slowlock path (and the purpose of acquiring the 5771 * putlocks was to make sure it didn't increase while we were 5772 * waiting). 5773 * 5774 * Note that we still acquire the PUTLOCKS to be safe. 5775 */ 5776 if (wait_needed) { 5777 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5778 mutex_enter(SQLOCK(sq)); 5779 count = sq->sq_count; 5780 SQ_PUTLOCKS_ENTER(sq); 5781 SUM_SQ_PUTCOUNTS(sq, count); 5782 while (count != 0) { 5783 sq->sq_flags |= SQ_WANTWAKEUP; 5784 SQ_PUTLOCKS_EXIT(sq); 5785 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5786 count = sq->sq_count; 5787 SQ_PUTLOCKS_ENTER(sq); 5788 SUM_SQ_PUTCOUNTS(sq, count); 5789 } 5790 SQ_PUTLOCKS_EXIT(sq); 5791 mutex_exit(SQLOCK(sq)); 5792 } 5793 /* 5794 * Verify that none of the flags got set while we 5795 * were waiting for the sq_counts to drop. 5796 * If this happens we exit and retry entering the 5797 * outer perimeter. 5798 */ 5799 mutex_enter(SQLOCK(outer)); 5800 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5801 mutex_exit(SQLOCK(outer)); 5802 outer_exit(outer); 5803 goto retry; 5804 } 5805 mutex_exit(SQLOCK(outer)); 5806 } 5807 } 5808 5809 /* 5810 * Drop the write access at the outer perimeter. 5811 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5812 * decrementing sq_count. 5813 */ 5814 void 5815 outer_exit(syncq_t *outer) 5816 { 5817 syncq_t *sq; 5818 int drain_needed; 5819 uint16_t flags; 5820 5821 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5822 outer->sq_oprev != NULL); 5823 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5824 5825 /* 5826 * Atomically (from the perspective of threads calling become_writer) 5827 * drop the write access at the outer perimeter by holding 5828 * SQLOCK(outer) across all the dropsq calls and the resetting of 5829 * SQ_WRITER. 5830 * This defines a locking order between the outer perimeter 5831 * SQLOCK and the inner perimeter SQLOCKs. 5832 */ 5833 mutex_enter(SQLOCK(outer)); 5834 flags = outer->sq_flags; 5835 ASSERT(outer->sq_flags & SQ_WRITER); 5836 if (flags & SQ_QUEUED) { 5837 write_now(outer); 5838 flags = outer->sq_flags; 5839 } 5840 5841 /* 5842 * sq_onext is stable since sq_count has not yet been decreased. 5843 * Reset the SQ_WRITER flags in all syncqs. 5844 * After dropping SQ_WRITER on the outer syncq we empty all the 5845 * inner syncqs. 5846 */ 5847 drain_needed = 0; 5848 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5849 drain_needed += dropsq(sq, SQ_WRITER); 5850 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5851 flags &= ~SQ_WRITER; 5852 if (drain_needed) { 5853 outer->sq_flags = flags; 5854 mutex_exit(SQLOCK(outer)); 5855 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5856 emptysq(sq); 5857 mutex_enter(SQLOCK(outer)); 5858 flags = outer->sq_flags; 5859 } 5860 if (flags & SQ_WANTWAKEUP) { 5861 flags &= ~SQ_WANTWAKEUP; 5862 cv_broadcast(&outer->sq_wait); 5863 } 5864 outer->sq_flags = flags; 5865 ASSERT(outer->sq_count > 0); 5866 outer->sq_count--; 5867 mutex_exit(SQLOCK(outer)); 5868 } 5869 5870 /* 5871 * Add another syncq to an outer perimeter. 5872 * Block out all other access to the outer perimeter while it is being 5873 * changed using blocksq. 5874 * Assumes that the caller has *not* done an outer_enter. 5875 * 5876 * Vulnerable to starvation in blocksq. 5877 */ 5878 static void 5879 outer_insert(syncq_t *outer, syncq_t *sq) 5880 { 5881 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5882 outer->sq_oprev != NULL); 5883 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5884 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5885 5886 /* Get exclusive access to the outer perimeter list */ 5887 blocksq(outer, SQ_BLOCKED, 0); 5888 ASSERT(outer->sq_flags & SQ_BLOCKED); 5889 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5890 5891 mutex_enter(SQLOCK(sq)); 5892 sq->sq_outer = outer; 5893 outer->sq_onext->sq_oprev = sq; 5894 sq->sq_onext = outer->sq_onext; 5895 outer->sq_onext = sq; 5896 sq->sq_oprev = outer; 5897 mutex_exit(SQLOCK(sq)); 5898 unblocksq(outer, SQ_BLOCKED, 1); 5899 } 5900 5901 /* 5902 * Remove a syncq from an outer perimeter. 5903 * Block out all other access to the outer perimeter while it is being 5904 * changed using blocksq. 5905 * Assumes that the caller has *not* done an outer_enter. 5906 * 5907 * Vulnerable to starvation in blocksq. 5908 */ 5909 static void 5910 outer_remove(syncq_t *outer, syncq_t *sq) 5911 { 5912 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5913 outer->sq_oprev != NULL); 5914 ASSERT(sq->sq_outer == outer); 5915 5916 /* Get exclusive access to the outer perimeter list */ 5917 blocksq(outer, SQ_BLOCKED, 0); 5918 ASSERT(outer->sq_flags & SQ_BLOCKED); 5919 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5920 5921 mutex_enter(SQLOCK(sq)); 5922 sq->sq_outer = NULL; 5923 sq->sq_onext->sq_oprev = sq->sq_oprev; 5924 sq->sq_oprev->sq_onext = sq->sq_onext; 5925 sq->sq_oprev = sq->sq_onext = NULL; 5926 mutex_exit(SQLOCK(sq)); 5927 unblocksq(outer, SQ_BLOCKED, 1); 5928 } 5929 5930 /* 5931 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5932 * If this is the first callback for this outer perimeter then add 5933 * this outer perimeter to the list of outer perimeters that 5934 * the qwriter_outer_thread will process. 5935 * 5936 * Increments sq_count in the outer syncq to prevent the membership 5937 * of the outer perimeter (in terms of inner syncqs) to change while 5938 * the callback is pending. 5939 */ 5940 static void 5941 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5942 { 5943 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5944 5945 mp->b_prev = (mblk_t *)func; 5946 mp->b_queue = q; 5947 mp->b_next = NULL; 5948 outer->sq_count++; /* Decremented when dequeued */ 5949 ASSERT(outer->sq_count != 0); /* Wraparound */ 5950 if (outer->sq_evhead == NULL) { 5951 /* First message. */ 5952 outer->sq_evhead = outer->sq_evtail = mp; 5953 outer->sq_flags |= SQ_EVENTS; 5954 mutex_exit(SQLOCK(outer)); 5955 STRSTAT(qwr_outer); 5956 (void) taskq_dispatch(streams_taskq, 5957 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5958 } else { 5959 ASSERT(outer->sq_flags & SQ_EVENTS); 5960 outer->sq_evtail->b_next = mp; 5961 outer->sq_evtail = mp; 5962 mutex_exit(SQLOCK(outer)); 5963 } 5964 } 5965 5966 /* 5967 * Try and upgrade to write access at the outer perimeter. If this can 5968 * not be done without blocking then queue the callback to be done 5969 * by the qwriter_outer_thread. 5970 * 5971 * This routine can only be called from put or service procedures plus 5972 * asynchronous callback routines that have properly entered the queue (with 5973 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq 5974 * associated with q. 5975 */ 5976 void 5977 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5978 { 5979 syncq_t *osq, *sq, *outer; 5980 int failed; 5981 uint16_t flags; 5982 5983 osq = q->q_syncq; 5984 outer = osq->sq_outer; 5985 if (outer == NULL) 5986 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5987 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5988 outer->sq_oprev != NULL); 5989 5990 mutex_enter(SQLOCK(outer)); 5991 flags = outer->sq_flags; 5992 /* 5993 * If some thread is traversing sq_next, or if we are blocked by 5994 * outer_insert or outer_remove, or if the we already have queued 5995 * callbacks, then queue this callback for later processing. 5996 * 5997 * Also queue the qwriter for an interrupt thread in order 5998 * to reduce the time spent running at high IPL. 5999 * to identify there are events. 6000 */ 6001 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 6002 /* 6003 * Queue the become_writer request. 6004 * The queueing is atomic under SQLOCK(outer) in order 6005 * to synchronize with outer_exit. 6006 * queue_writer will drop the outer SQLOCK 6007 */ 6008 if (flags & SQ_BLOCKED) { 6009 /* Must set SQ_WRITER on inner perimeter */ 6010 mutex_enter(SQLOCK(osq)); 6011 osq->sq_flags |= SQ_WRITER; 6012 mutex_exit(SQLOCK(osq)); 6013 } else { 6014 if (!(flags & SQ_WRITER)) { 6015 /* 6016 * The outer could have been SQ_BLOCKED thus 6017 * SQ_WRITER might not be set on the inner. 6018 */ 6019 mutex_enter(SQLOCK(osq)); 6020 osq->sq_flags |= SQ_WRITER; 6021 mutex_exit(SQLOCK(osq)); 6022 } 6023 ASSERT(osq->sq_flags & SQ_WRITER); 6024 } 6025 queue_writer(outer, func, q, mp); 6026 return; 6027 } 6028 /* 6029 * We are half-way to exclusive access to the outer perimeter. 6030 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 6031 * while the inner syncqs are traversed. 6032 */ 6033 outer->sq_count++; 6034 ASSERT(outer->sq_count != 0); /* wraparound */ 6035 flags |= SQ_WRITER; 6036 /* 6037 * Check if we can run the function immediately. Mark all 6038 * syncqs with the writer flag to prevent new entries into 6039 * put and service procedures. 6040 * 6041 * Set SQ_WRITER on all the inner syncqs while holding 6042 * the SQLOCK on the outer syncq. This ensures that the changing 6043 * of SQ_WRITER is atomic under the outer SQLOCK. 6044 */ 6045 failed = 0; 6046 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 6047 uint16_t count; 6048 uint_t maxcnt = (sq == osq) ? 1 : 0; 6049 6050 mutex_enter(SQLOCK(sq)); 6051 count = sq->sq_count; 6052 SQ_PUTLOCKS_ENTER(sq); 6053 SUM_SQ_PUTCOUNTS(sq, count); 6054 if (sq->sq_count > maxcnt) 6055 failed = 1; 6056 sq->sq_flags |= SQ_WRITER; 6057 SQ_PUTLOCKS_EXIT(sq); 6058 mutex_exit(SQLOCK(sq)); 6059 } 6060 if (failed) { 6061 /* 6062 * Some other thread has a read claim on the outer perimeter. 6063 * Queue the callback for deferred processing. 6064 * 6065 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 6066 * so that other qwriter(OUTER) calls will queue their 6067 * callbacks as well. queue_writer increments sq_count so we 6068 * decrement to compensate for the our increment. 6069 * 6070 * Dropping SQ_WRITER enables the writer thread to work 6071 * on this outer perimeter. 6072 */ 6073 outer->sq_flags = flags; 6074 queue_writer(outer, func, q, mp); 6075 /* queue_writer dropper the lock */ 6076 mutex_enter(SQLOCK(outer)); 6077 ASSERT(outer->sq_count > 0); 6078 outer->sq_count--; 6079 ASSERT(outer->sq_flags & SQ_WRITER); 6080 flags = outer->sq_flags; 6081 flags &= ~SQ_WRITER; 6082 if (flags & SQ_WANTWAKEUP) { 6083 flags &= ~SQ_WANTWAKEUP; 6084 cv_broadcast(&outer->sq_wait); 6085 } 6086 outer->sq_flags = flags; 6087 mutex_exit(SQLOCK(outer)); 6088 return; 6089 } else { 6090 outer->sq_flags = flags; 6091 mutex_exit(SQLOCK(outer)); 6092 } 6093 6094 /* Can run it immediately */ 6095 (*func)(q, mp); 6096 6097 outer_exit(outer); 6098 } 6099 6100 /* 6101 * Dequeue all writer callbacks from the outer perimeter and run them. 6102 */ 6103 static void 6104 write_now(syncq_t *outer) 6105 { 6106 mblk_t *mp; 6107 queue_t *q; 6108 void (*func)(); 6109 6110 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6111 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6112 outer->sq_oprev != NULL); 6113 while ((mp = outer->sq_evhead) != NULL) { 6114 /* 6115 * queues cannot be placed on the queuelist on the outer 6116 * perimeter. 6117 */ 6118 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 6119 ASSERT((outer->sq_flags & SQ_EVENTS)); 6120 6121 outer->sq_evhead = mp->b_next; 6122 if (outer->sq_evhead == NULL) { 6123 outer->sq_evtail = NULL; 6124 outer->sq_flags &= ~SQ_EVENTS; 6125 } 6126 ASSERT(outer->sq_count != 0); 6127 outer->sq_count--; /* Incremented when enqueued. */ 6128 mutex_exit(SQLOCK(outer)); 6129 /* 6130 * Drop the message if the queue is closing. 6131 * Make sure that the queue is "claimed" when the callback 6132 * is run in order to satisfy various ASSERTs. 6133 */ 6134 q = mp->b_queue; 6135 func = (void (*)())mp->b_prev; 6136 ASSERT(func != NULL); 6137 mp->b_next = mp->b_prev = NULL; 6138 if (q->q_flag & QWCLOSE) { 6139 freemsg(mp); 6140 } else { 6141 claimq(q); 6142 (*func)(q, mp); 6143 releaseq(q); 6144 } 6145 mutex_enter(SQLOCK(outer)); 6146 } 6147 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6148 } 6149 6150 /* 6151 * The list of messages on the inner syncq is effectively hashed 6152 * by destination queue. These destination queues are doubly 6153 * linked lists (hopefully) in priority order. Messages are then 6154 * put on the queue referenced by the q_sqhead/q_sqtail elements. 6155 * Additional messages are linked together by the b_next/b_prev 6156 * elements in the mblk, with (similar to putq()) the first message 6157 * having a NULL b_prev and the last message having a NULL b_next. 6158 * 6159 * Events, such as qwriter callbacks, are put onto a list in FIFO 6160 * order referenced by sq_evhead, and sq_evtail. This is a singly 6161 * linked list, and messages here MUST be processed in the order queued. 6162 */ 6163 6164 /* 6165 * Run the events on the syncq event list (sq_evhead). 6166 * Assumes there is only one claim on the syncq, it is 6167 * already exclusive (SQ_EXCL set), and the SQLOCK held. 6168 * Messages here are processed in order, with the SQ_EXCL bit 6169 * held all the way through till the last message is processed. 6170 */ 6171 void 6172 sq_run_events(syncq_t *sq) 6173 { 6174 mblk_t *bp; 6175 queue_t *qp; 6176 uint16_t flags = sq->sq_flags; 6177 void (*func)(); 6178 6179 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6180 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6181 sq->sq_oprev == NULL) || 6182 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6183 sq->sq_oprev != NULL)); 6184 6185 ASSERT(flags & SQ_EXCL); 6186 ASSERT(sq->sq_count == 1); 6187 6188 /* 6189 * We need to process all of the events on this list. It 6190 * is possible that new events will be added while we are 6191 * away processing a callback, so on every loop, we start 6192 * back at the beginning of the list. 6193 */ 6194 /* 6195 * We have to reaccess sq_evhead since there is a 6196 * possibility of a new entry while we were running 6197 * the callback. 6198 */ 6199 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6200 ASSERT(bp->b_queue->q_syncq == sq); 6201 ASSERT(sq->sq_flags & SQ_EVENTS); 6202 6203 qp = bp->b_queue; 6204 func = (void (*)())bp->b_prev; 6205 ASSERT(func != NULL); 6206 6207 /* 6208 * Messages from the event queue must be taken off in 6209 * FIFO order. 6210 */ 6211 ASSERT(sq->sq_evhead == bp); 6212 sq->sq_evhead = bp->b_next; 6213 6214 if (bp->b_next == NULL) { 6215 /* Deleting last */ 6216 ASSERT(sq->sq_evtail == bp); 6217 sq->sq_evtail = NULL; 6218 sq->sq_flags &= ~SQ_EVENTS; 6219 } 6220 bp->b_prev = bp->b_next = NULL; 6221 ASSERT(bp->b_datap->db_ref != 0); 6222 6223 mutex_exit(SQLOCK(sq)); 6224 6225 (*func)(qp, bp); 6226 6227 mutex_enter(SQLOCK(sq)); 6228 /* 6229 * re-read the flags, since they could have changed. 6230 */ 6231 flags = sq->sq_flags; 6232 ASSERT(flags & SQ_EXCL); 6233 } 6234 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6235 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6236 6237 if (flags & SQ_WANTWAKEUP) { 6238 flags &= ~SQ_WANTWAKEUP; 6239 cv_broadcast(&sq->sq_wait); 6240 } 6241 if (flags & SQ_WANTEXWAKEUP) { 6242 flags &= ~SQ_WANTEXWAKEUP; 6243 cv_broadcast(&sq->sq_exitwait); 6244 } 6245 sq->sq_flags = flags; 6246 } 6247 6248 /* 6249 * Put messages on the event list. 6250 * If we can go exclusive now, do so and process the event list, otherwise 6251 * let the last claim service this list (or wake the sqthread). 6252 * This procedure assumes SQLOCK is held. To run the event list, it 6253 * must be called with no claims. 6254 */ 6255 static void 6256 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6257 { 6258 uint16_t count; 6259 6260 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6261 ASSERT(func != NULL); 6262 6263 /* 6264 * This is a callback. Add it to the list of callbacks 6265 * and see about upgrading. 6266 */ 6267 mp->b_prev = (mblk_t *)func; 6268 mp->b_queue = q; 6269 mp->b_next = NULL; 6270 if (sq->sq_evhead == NULL) { 6271 sq->sq_evhead = sq->sq_evtail = mp; 6272 sq->sq_flags |= SQ_EVENTS; 6273 } else { 6274 ASSERT(sq->sq_evtail != NULL); 6275 ASSERT(sq->sq_evtail->b_next == NULL); 6276 ASSERT(sq->sq_flags & SQ_EVENTS); 6277 sq->sq_evtail->b_next = mp; 6278 sq->sq_evtail = mp; 6279 } 6280 /* 6281 * We have set SQ_EVENTS, so threads will have to 6282 * unwind out of the perimeter, and new entries will 6283 * not grab a putlock. But we still need to know 6284 * how many threads have already made a claim to the 6285 * syncq, so grab the putlocks, and sum the counts. 6286 * If there are no claims on the syncq, we can upgrade 6287 * to exclusive, and run the event list. 6288 * NOTE: We hold the SQLOCK, so we can just grab the 6289 * putlocks. 6290 */ 6291 count = sq->sq_count; 6292 SQ_PUTLOCKS_ENTER(sq); 6293 SUM_SQ_PUTCOUNTS(sq, count); 6294 /* 6295 * We have no claim, so we need to check if there 6296 * are no others, then we can upgrade. 6297 */ 6298 /* 6299 * There are currently no claims on 6300 * the syncq by this thread (at least on this entry). The thread who has 6301 * the claim should drain syncq. 6302 */ 6303 if (count > 0) { 6304 /* 6305 * Can't upgrade - other threads inside. 6306 */ 6307 SQ_PUTLOCKS_EXIT(sq); 6308 mutex_exit(SQLOCK(sq)); 6309 return; 6310 } 6311 /* 6312 * Need to set SQ_EXCL and make a claim on the syncq. 6313 */ 6314 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6315 sq->sq_flags |= SQ_EXCL; 6316 ASSERT(sq->sq_count == 0); 6317 sq->sq_count++; 6318 SQ_PUTLOCKS_EXIT(sq); 6319 6320 /* Process the events list */ 6321 sq_run_events(sq); 6322 6323 /* 6324 * Release our claim... 6325 */ 6326 sq->sq_count--; 6327 6328 /* 6329 * And release SQ_EXCL. 6330 * We don't need to acquire the putlocks to release 6331 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6332 */ 6333 sq->sq_flags &= ~SQ_EXCL; 6334 6335 /* 6336 * sq_run_events should have released SQ_EXCL 6337 */ 6338 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6339 6340 /* 6341 * If anything happened while we were running the 6342 * events (or was there before), we need to process 6343 * them now. We shouldn't be exclusive sine we 6344 * released the perimeter above (plus, we asserted 6345 * for it). 6346 */ 6347 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6348 drain_syncq(sq); 6349 else 6350 mutex_exit(SQLOCK(sq)); 6351 } 6352 6353 /* 6354 * Perform delayed processing. The caller has to make sure that it is safe 6355 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6356 * set). 6357 * 6358 * Assume that the caller has NO claims on the syncq. However, a claim 6359 * on the syncq does not indicate that a thread is draining the syncq. 6360 * There may be more claims on the syncq than there are threads draining 6361 * (i.e. #_threads_draining <= sq_count) 6362 * 6363 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6364 * in order to preserve qwriter(OUTER) ordering constraints. 6365 * 6366 * sq_putcount only needs to be checked when dispatching the queued 6367 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6368 */ 6369 void 6370 drain_syncq(syncq_t *sq) 6371 { 6372 queue_t *qp; 6373 uint16_t count; 6374 uint16_t type = sq->sq_type; 6375 uint16_t flags = sq->sq_flags; 6376 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6377 6378 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6379 "drain_syncq start:%p", sq); 6380 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6381 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6382 sq->sq_oprev == NULL) || 6383 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6384 sq->sq_oprev != NULL)); 6385 6386 /* 6387 * Drop SQ_SERVICE flag. 6388 */ 6389 if (bg_service) 6390 sq->sq_svcflags &= ~SQ_SERVICE; 6391 6392 /* 6393 * If SQ_EXCL is set, someone else is processing this syncq - let him 6394 * finish the job. 6395 */ 6396 if (flags & SQ_EXCL) { 6397 if (bg_service) { 6398 ASSERT(sq->sq_servcount != 0); 6399 sq->sq_servcount--; 6400 } 6401 mutex_exit(SQLOCK(sq)); 6402 return; 6403 } 6404 6405 /* 6406 * This routine can be called by a background thread if 6407 * it was scheduled by a hi-priority thread. SO, if there are 6408 * NOT messages queued, return (remember, we have the SQLOCK, 6409 * and it cannot change until we release it). Wakeup any waiters also. 6410 */ 6411 if (!(flags & SQ_QUEUED)) { 6412 if (flags & SQ_WANTWAKEUP) { 6413 flags &= ~SQ_WANTWAKEUP; 6414 cv_broadcast(&sq->sq_wait); 6415 } 6416 if (flags & SQ_WANTEXWAKEUP) { 6417 flags &= ~SQ_WANTEXWAKEUP; 6418 cv_broadcast(&sq->sq_exitwait); 6419 } 6420 sq->sq_flags = flags; 6421 if (bg_service) { 6422 ASSERT(sq->sq_servcount != 0); 6423 sq->sq_servcount--; 6424 } 6425 mutex_exit(SQLOCK(sq)); 6426 return; 6427 } 6428 6429 /* 6430 * If this is not a concurrent put perimeter, we need to 6431 * become exclusive to drain. Also, if not CIPUT, we would 6432 * not have acquired a putlock, so we don't need to check 6433 * the putcounts. If not entering with a claim, we test 6434 * for sq_count == 0. 6435 */ 6436 type = sq->sq_type; 6437 if (!(type & SQ_CIPUT)) { 6438 if (sq->sq_count > 1) { 6439 if (bg_service) { 6440 ASSERT(sq->sq_servcount != 0); 6441 sq->sq_servcount--; 6442 } 6443 mutex_exit(SQLOCK(sq)); 6444 return; 6445 } 6446 sq->sq_flags |= SQ_EXCL; 6447 } 6448 6449 /* 6450 * This is where we make a claim to the syncq. 6451 * This can either be done by incrementing a putlock, or 6452 * the sq_count. But since we already have the SQLOCK 6453 * here, we just bump the sq_count. 6454 * 6455 * Note that after we make a claim, we need to let the code 6456 * fall through to the end of this routine to clean itself 6457 * up. A return in the while loop will put the syncq in a 6458 * very bad state. 6459 */ 6460 sq->sq_count++; 6461 ASSERT(sq->sq_count != 0); /* wraparound */ 6462 6463 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6464 /* 6465 * If we are told to stayaway or went exclusive, 6466 * we are done. 6467 */ 6468 if (flags & (SQ_STAYAWAY)) { 6469 break; 6470 } 6471 6472 /* 6473 * If there are events to run, do so. 6474 * We have one claim to the syncq, so if there are 6475 * more than one, other threads are running. 6476 */ 6477 if (sq->sq_evhead != NULL) { 6478 ASSERT(sq->sq_flags & SQ_EVENTS); 6479 6480 count = sq->sq_count; 6481 SQ_PUTLOCKS_ENTER(sq); 6482 SUM_SQ_PUTCOUNTS(sq, count); 6483 if (count > 1) { 6484 SQ_PUTLOCKS_EXIT(sq); 6485 /* Can't upgrade - other threads inside */ 6486 break; 6487 } 6488 ASSERT((flags & SQ_EXCL) == 0); 6489 sq->sq_flags = flags | SQ_EXCL; 6490 SQ_PUTLOCKS_EXIT(sq); 6491 /* 6492 * we have the only claim, run the events, 6493 * sq_run_events will clear the SQ_EXCL flag. 6494 */ 6495 sq_run_events(sq); 6496 6497 /* 6498 * If this is a CIPUT perimeter, we need 6499 * to drop the SQ_EXCL flag so we can properly 6500 * continue draining the syncq. 6501 */ 6502 if (type & SQ_CIPUT) { 6503 ASSERT(sq->sq_flags & SQ_EXCL); 6504 sq->sq_flags &= ~SQ_EXCL; 6505 } 6506 6507 /* 6508 * And go back to the beginning just in case 6509 * anything changed while we were away. 6510 */ 6511 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6512 continue; 6513 } 6514 6515 ASSERT(sq->sq_evhead == NULL); 6516 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6517 6518 /* 6519 * Find the queue that is not draining. 6520 * 6521 * q_draining is protected by QLOCK which we do not hold. 6522 * But if it was set, then a thread was draining, and if it gets 6523 * cleared, then it was because the thread has successfully 6524 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY 6525 * state to happen, a thread needs the SQLOCK which we hold, and 6526 * if there was such a flag, we would have already seen it. 6527 */ 6528 6529 for (qp = sq->sq_head; 6530 qp != NULL && (qp->q_draining || 6531 (qp->q_sqflags & Q_SQDRAINING)); 6532 qp = qp->q_sqnext) 6533 ; 6534 6535 if (qp == NULL) 6536 break; 6537 6538 /* 6539 * We have a queue to work on, and we hold the 6540 * SQLOCK and one claim, call qdrain_syncq. 6541 * This means we need to release the SQLOCK and 6542 * acquire the QLOCK (OK since we have a claim). 6543 * Note that qdrain_syncq will actually dequeue 6544 * this queue from the sq_head list when it is 6545 * convinced all the work is done and release 6546 * the QLOCK before returning. 6547 */ 6548 qp->q_sqflags |= Q_SQDRAINING; 6549 mutex_exit(SQLOCK(sq)); 6550 mutex_enter(QLOCK(qp)); 6551 qdrain_syncq(sq, qp); 6552 mutex_enter(SQLOCK(sq)); 6553 6554 /* The queue is drained */ 6555 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6556 qp->q_sqflags &= ~Q_SQDRAINING; 6557 /* 6558 * NOTE: After this point qp should not be used since it may be 6559 * closed. 6560 */ 6561 } 6562 6563 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6564 flags = sq->sq_flags; 6565 6566 /* 6567 * sq->sq_head cannot change because we hold the 6568 * sqlock. However, a thread CAN decide that it is no longer 6569 * going to drain that queue. However, this should be due to 6570 * a GOAWAY state, and we should see that here. 6571 * 6572 * This loop is not very efficient. One solution may be adding a second 6573 * pointer to the "draining" queue, but it is difficult to do when 6574 * queues are inserted in the middle due to priority ordering. Another 6575 * possibility is to yank the queue out of the sq list and put it onto 6576 * the "draining list" and then put it back if it can't be drained. 6577 */ 6578 6579 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6580 (type & SQ_CI) || sq->sq_head->q_draining); 6581 6582 /* Drop SQ_EXCL for non-CIPUT perimeters */ 6583 if (!(type & SQ_CIPUT)) 6584 flags &= ~SQ_EXCL; 6585 ASSERT((flags & SQ_EXCL) == 0); 6586 6587 /* Wake up any waiters. */ 6588 if (flags & SQ_WANTWAKEUP) { 6589 flags &= ~SQ_WANTWAKEUP; 6590 cv_broadcast(&sq->sq_wait); 6591 } 6592 if (flags & SQ_WANTEXWAKEUP) { 6593 flags &= ~SQ_WANTEXWAKEUP; 6594 cv_broadcast(&sq->sq_exitwait); 6595 } 6596 sq->sq_flags = flags; 6597 6598 ASSERT(sq->sq_count != 0); 6599 /* Release our claim. */ 6600 sq->sq_count--; 6601 6602 if (bg_service) { 6603 ASSERT(sq->sq_servcount != 0); 6604 sq->sq_servcount--; 6605 } 6606 6607 mutex_exit(SQLOCK(sq)); 6608 6609 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6610 "drain_syncq end:%p", sq); 6611 } 6612 6613 6614 /* 6615 * 6616 * qdrain_syncq can be called (currently) from only one of two places: 6617 * drain_syncq 6618 * putnext (or some variation of it). 6619 * and eventually 6620 * qwait(_sig) 6621 * 6622 * If called from drain_syncq, we found it in the list of queues needing 6623 * service, so there is work to be done (or it wouldn't be in the list). 6624 * 6625 * If called from some putnext variation, it was because the 6626 * perimeter is open, but messages are blocking a putnext and 6627 * there is not a thread working on it. Now a thread could start 6628 * working on it while we are getting ready to do so ourself, but 6629 * the thread would set the q_draining flag, and we can spin out. 6630 * 6631 * As for qwait(_sig), I think I shall let it continue to call 6632 * drain_syncq directly (after all, it will get here eventually). 6633 * 6634 * qdrain_syncq has to terminate when: 6635 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6636 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6637 * 6638 * ASSUMES: 6639 * One claim 6640 * QLOCK held 6641 * SQLOCK not held 6642 * Will release QLOCK before returning 6643 */ 6644 void 6645 qdrain_syncq(syncq_t *sq, queue_t *q) 6646 { 6647 mblk_t *bp; 6648 #ifdef DEBUG 6649 uint16_t count; 6650 #endif 6651 6652 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6653 "drain_syncq start:%p", sq); 6654 ASSERT(q->q_syncq == sq); 6655 ASSERT(MUTEX_HELD(QLOCK(q))); 6656 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6657 /* 6658 * For non-CIPUT perimeters, we should be called with the exclusive bit 6659 * set already. For CIPUT perimeters, we will be doing a concurrent 6660 * drain, so it better not be set. 6661 */ 6662 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6663 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6664 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6665 /* 6666 * All outer pointers are set, or none of them are 6667 */ 6668 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6669 sq->sq_oprev == NULL) || 6670 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6671 sq->sq_oprev != NULL)); 6672 #ifdef DEBUG 6673 count = sq->sq_count; 6674 /* 6675 * This is OK without the putlocks, because we have one 6676 * claim either from the sq_count, or a putcount. We could 6677 * get an erroneous value from other counts, but ours won't 6678 * change, so one way or another, we will have at least a 6679 * value of one. 6680 */ 6681 SUM_SQ_PUTCOUNTS(sq, count); 6682 ASSERT(count >= 1); 6683 #endif /* DEBUG */ 6684 6685 /* 6686 * The first thing to do is find out if a thread is already draining 6687 * this queue. If so, we are done, just return. 6688 */ 6689 if (q->q_draining) { 6690 mutex_exit(QLOCK(q)); 6691 return; 6692 } 6693 6694 /* 6695 * If the perimeter is exclusive, there is nothing we can do right now, 6696 * go away. Note that there is nothing to prevent this case from 6697 * changing right after this check, but the spin-out will catch it. 6698 */ 6699 6700 /* Tell other threads that we are draining this queue */ 6701 q->q_draining = 1; /* Protected by QLOCK */ 6702 6703 /* 6704 * If there is nothing to do, clear QFULL as necessary. This caters for 6705 * the case where an empty queue was enqueued onto the syncq. 6706 */ 6707 if (q->q_sqhead == NULL) { 6708 ASSERT(q->q_syncqmsgs == 0); 6709 mutex_exit(QLOCK(q)); 6710 clr_qfull(q); 6711 mutex_enter(QLOCK(q)); 6712 } 6713 6714 /* 6715 * Note that q_sqhead must be re-checked here in case another message 6716 * was enqueued whilst QLOCK was dropped during the call to clr_qfull. 6717 */ 6718 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6719 /* 6720 * Because we can enter this routine just because a putnext is 6721 * blocked, we need to spin out if the perimeter wants to go 6722 * exclusive as well as just blocked. We need to spin out also 6723 * if events are queued on the syncq. 6724 * Don't check for SQ_EXCL, because non-CIPUT perimeters would 6725 * set it, and it can't become exclusive while we hold a claim. 6726 */ 6727 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6728 break; 6729 } 6730 6731 #ifdef DEBUG 6732 /* 6733 * Since we are in qdrain_syncq, we already know the queue, 6734 * but for sanity, we want to check this against the qp that 6735 * was passed in by bp->b_queue. 6736 */ 6737 6738 ASSERT(bp->b_queue == q); 6739 ASSERT(bp->b_queue->q_syncq == sq); 6740 bp->b_queue = NULL; 6741 6742 /* 6743 * We would have the following check in the DEBUG code: 6744 * 6745 * if (bp->b_prev != NULL) { 6746 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6747 * } 6748 * 6749 * This can't be done, however, since IP modifies qinfo 6750 * structure at run-time (switching between IPv4 qinfo and IPv6 6751 * qinfo), invalidating the check. 6752 * So the assignment to func is left here, but the ASSERT itself 6753 * is removed until the whole issue is resolved. 6754 */ 6755 #endif 6756 ASSERT(q->q_sqhead == bp); 6757 q->q_sqhead = bp->b_next; 6758 bp->b_prev = bp->b_next = NULL; 6759 ASSERT(q->q_syncqmsgs > 0); 6760 mutex_exit(QLOCK(q)); 6761 6762 ASSERT(bp->b_datap->db_ref != 0); 6763 6764 (void) (*q->q_qinfo->qi_putp)(q, bp); 6765 6766 mutex_enter(QLOCK(q)); 6767 6768 /* 6769 * q_syncqmsgs should only be decremented after executing the 6770 * put procedure to avoid message re-ordering. This is due to an 6771 * optimisation in putnext() which can call the put procedure 6772 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED 6773 * being set). 6774 * 6775 * We also need to clear QFULL in the next service procedure 6776 * queue if this is the last message destined for that queue. 6777 * 6778 * It would make better sense to have some sort of tunable for 6779 * the low water mark, but these semantics are not yet defined. 6780 * So, alas, we use a constant. 6781 */ 6782 if (--q->q_syncqmsgs == 0) { 6783 mutex_exit(QLOCK(q)); 6784 clr_qfull(q); 6785 mutex_enter(QLOCK(q)); 6786 } 6787 6788 /* 6789 * Always clear SQ_EXCL when CIPUT in order to handle 6790 * qwriter(INNER). The putp() can call qwriter and get exclusive 6791 * access IFF this is the only claim. So, we need to test for 6792 * this possibility, acquire the mutex and clear the bit. 6793 */ 6794 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6795 mutex_enter(SQLOCK(sq)); 6796 sq->sq_flags &= ~SQ_EXCL; 6797 mutex_exit(SQLOCK(sq)); 6798 } 6799 } 6800 6801 /* 6802 * We should either have no messages on this queue, or we were told to 6803 * goaway by a waiter (which we will wake up at the end of this 6804 * function). 6805 */ 6806 ASSERT((q->q_sqhead == NULL) || 6807 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6808 6809 ASSERT(MUTEX_HELD(QLOCK(q))); 6810 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6811 6812 /* Remove the q from the syncq list if all the messages are drained. */ 6813 if (q->q_sqhead == NULL) { 6814 ASSERT(q->q_syncqmsgs == 0); 6815 mutex_enter(SQLOCK(sq)); 6816 if (q->q_sqflags & Q_SQQUEUED) 6817 SQRM_Q(sq, q); 6818 mutex_exit(SQLOCK(sq)); 6819 /* 6820 * Since the queue is removed from the list, reset its priority. 6821 */ 6822 q->q_spri = 0; 6823 } 6824 6825 /* 6826 * Remember, the q_draining flag is used to let another thread know 6827 * that there is a thread currently draining the messages for a queue. 6828 * Since we are now done with this queue (even if there may be messages 6829 * still there), we need to clear this flag so some thread will work on 6830 * it if needed. 6831 */ 6832 ASSERT(q->q_draining); 6833 q->q_draining = 0; 6834 6835 /* Called with a claim, so OK to drop all locks. */ 6836 mutex_exit(QLOCK(q)); 6837 6838 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6839 "drain_syncq end:%p", sq); 6840 } 6841 /* END OF QDRAIN_SYNCQ */ 6842 6843 6844 /* 6845 * This is the mate to qdrain_syncq, except that it is putting the message onto 6846 * the queue instead of draining. Since the message is destined for the queue 6847 * that is selected, there is no need to identify the function because the 6848 * message is intended for the put routine for the queue. For debug kernels, 6849 * this routine will do it anyway just in case. 6850 * 6851 * After the message is enqueued on the syncq, it calls putnext_tail() 6852 * which will schedule a background thread to actually process the message. 6853 * 6854 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6855 * SQLOCK(sq) and QLOCK(q) are not held. 6856 */ 6857 void 6858 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6859 { 6860 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6861 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6862 ASSERT(sq->sq_count > 0); 6863 ASSERT(q->q_syncq == sq); 6864 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6865 sq->sq_oprev == NULL) || 6866 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6867 sq->sq_oprev != NULL)); 6868 6869 mutex_enter(QLOCK(q)); 6870 6871 #ifdef DEBUG 6872 /* 6873 * This is used for debug in the qfill_syncq/qdrain_syncq case 6874 * to trace the queue that the message is intended for. Note 6875 * that the original use was to identify the queue and function 6876 * to call on the drain. In the new syncq, we have the context 6877 * of the queue that we are draining, so call it's putproc and 6878 * don't rely on the saved values. But for debug this is still 6879 * useful information. 6880 */ 6881 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6882 mp->b_queue = q; 6883 mp->b_next = NULL; 6884 #endif 6885 ASSERT(q->q_syncq == sq); 6886 /* 6887 * Enqueue the message on the list. 6888 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6889 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP(). 6890 */ 6891 SQPUT_MP(q, mp); 6892 mutex_enter(SQLOCK(sq)); 6893 6894 /* 6895 * And queue on syncq for scheduling, if not already queued. 6896 * Note that we need the SQLOCK for this, and for testing flags 6897 * at the end to see if we will drain. So grab it now, and 6898 * release it before we call qdrain_syncq or return. 6899 */ 6900 if (!(q->q_sqflags & Q_SQQUEUED)) { 6901 q->q_spri = curthread->t_pri; 6902 SQPUT_Q(sq, q); 6903 } 6904 #ifdef DEBUG 6905 else { 6906 /* 6907 * All of these conditions MUST be true! 6908 */ 6909 ASSERT(sq->sq_tail != NULL); 6910 if (sq->sq_tail == sq->sq_head) { 6911 ASSERT((q->q_sqprev == NULL) && 6912 (q->q_sqnext == NULL)); 6913 } else { 6914 ASSERT((q->q_sqprev != NULL) || 6915 (q->q_sqnext != NULL)); 6916 } 6917 ASSERT(sq->sq_flags & SQ_QUEUED); 6918 ASSERT(q->q_syncqmsgs != 0); 6919 ASSERT(q->q_sqflags & Q_SQQUEUED); 6920 } 6921 #endif 6922 mutex_exit(QLOCK(q)); 6923 /* 6924 * SQLOCK is still held, so sq_count can be safely decremented. 6925 */ 6926 sq->sq_count--; 6927 6928 putnext_tail(sq, q, 0); 6929 /* Should not reference sq or q after this point. */ 6930 } 6931 6932 /* End of qfill_syncq */ 6933 6934 /* 6935 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6936 * that would be put into qp by drain_syncq. 6937 * Used when deleting the syncq (qp == NULL) or when detaching 6938 * a queue (qp != NULL). 6939 * Return non-zero if one or more messages were freed. 6940 * 6941 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 6942 * sq_putlocks are used. 6943 * 6944 * NOTE: This function assumes that it is called from the close() context and 6945 * that all the queues in the syncq are going away. For this reason it doesn't 6946 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6947 * currently valid, but it is useful to rethink this function to behave properly 6948 * in other cases. 6949 */ 6950 int 6951 flush_syncq(syncq_t *sq, queue_t *qp) 6952 { 6953 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6954 queue_t *q; 6955 int ret = 0; 6956 6957 mutex_enter(SQLOCK(sq)); 6958 6959 /* 6960 * Before we leave, we need to make sure there are no 6961 * events listed for this queue. All events for this queue 6962 * will just be freed. 6963 */ 6964 if (qp != NULL && sq->sq_evhead != NULL) { 6965 ASSERT(sq->sq_flags & SQ_EVENTS); 6966 6967 mp_prev = NULL; 6968 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6969 mp_next = bp->b_next; 6970 if (bp->b_queue == qp) { 6971 /* Delete this message */ 6972 if (mp_prev != NULL) { 6973 mp_prev->b_next = mp_next; 6974 /* 6975 * Update sq_evtail if the last element 6976 * is removed. 6977 */ 6978 if (bp == sq->sq_evtail) { 6979 ASSERT(mp_next == NULL); 6980 sq->sq_evtail = mp_prev; 6981 } 6982 } else 6983 sq->sq_evhead = mp_next; 6984 if (sq->sq_evhead == NULL) 6985 sq->sq_flags &= ~SQ_EVENTS; 6986 bp->b_prev = bp->b_next = NULL; 6987 freemsg(bp); 6988 ret++; 6989 } else { 6990 mp_prev = bp; 6991 } 6992 } 6993 } 6994 6995 /* 6996 * Walk sq_head and: 6997 * - match qp if qp is set, remove it's messages 6998 * - all if qp is not set 6999 */ 7000 q = sq->sq_head; 7001 while (q != NULL) { 7002 ASSERT(q->q_syncq == sq); 7003 if ((qp == NULL) || (qp == q)) { 7004 /* 7005 * Yank the messages as a list off the queue 7006 */ 7007 mp_head = q->q_sqhead; 7008 /* 7009 * We do not have QLOCK(q) here (which is safe due to 7010 * assumptions mentioned above). To obtain the lock we 7011 * need to release SQLOCK which may allow lots of things 7012 * to change upon us. This place requires more analysis. 7013 */ 7014 q->q_sqhead = q->q_sqtail = NULL; 7015 ASSERT(mp_head->b_queue && 7016 mp_head->b_queue->q_syncq == sq); 7017 7018 /* 7019 * Free each of the messages. 7020 */ 7021 for (bp = mp_head; bp != NULL; bp = mp_next) { 7022 mp_next = bp->b_next; 7023 bp->b_prev = bp->b_next = NULL; 7024 freemsg(bp); 7025 ret++; 7026 } 7027 /* 7028 * Now remove the queue from the syncq. 7029 */ 7030 ASSERT(q->q_sqflags & Q_SQQUEUED); 7031 SQRM_Q(sq, q); 7032 q->q_spri = 0; 7033 q->q_syncqmsgs = 0; 7034 7035 /* 7036 * If qp was specified, we are done with it and are 7037 * going to drop SQLOCK(sq) and return. We wakeup syncq 7038 * waiters while we still have the SQLOCK. 7039 */ 7040 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 7041 sq->sq_flags &= ~SQ_WANTWAKEUP; 7042 cv_broadcast(&sq->sq_wait); 7043 } 7044 /* Drop SQLOCK across clr_qfull */ 7045 mutex_exit(SQLOCK(sq)); 7046 7047 /* 7048 * We avoid doing the test that drain_syncq does and 7049 * unconditionally clear qfull for every flushed 7050 * message. Since flush_syncq is only called during 7051 * close this should not be a problem. 7052 */ 7053 clr_qfull(q); 7054 if (qp != NULL) { 7055 return (ret); 7056 } else { 7057 mutex_enter(SQLOCK(sq)); 7058 /* 7059 * The head was removed by SQRM_Q above. 7060 * reread the new head and flush it. 7061 */ 7062 q = sq->sq_head; 7063 } 7064 } else { 7065 q = q->q_sqnext; 7066 } 7067 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7068 } 7069 7070 if (sq->sq_flags & SQ_WANTWAKEUP) { 7071 sq->sq_flags &= ~SQ_WANTWAKEUP; 7072 cv_broadcast(&sq->sq_wait); 7073 } 7074 7075 mutex_exit(SQLOCK(sq)); 7076 return (ret); 7077 } 7078 7079 /* 7080 * Propagate all messages from a syncq to the next syncq that are associated 7081 * with the specified queue. If the queue is attached to a driver or if the 7082 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 7083 * 7084 * Assumes that the stream is strlock()'ed. We don't come here if there 7085 * are no messages to propagate. 7086 * 7087 * NOTE : If the queue is attached to a driver, all the messages are freed 7088 * as there is no point in propagating the messages from the driver syncq 7089 * to the closing stream head which will in turn get freed later. 7090 */ 7091 static int 7092 propagate_syncq(queue_t *qp) 7093 { 7094 mblk_t *bp, *head, *tail, *prev, *next; 7095 syncq_t *sq; 7096 queue_t *nqp; 7097 syncq_t *nsq; 7098 boolean_t isdriver; 7099 int moved = 0; 7100 uint16_t flags; 7101 pri_t priority = curthread->t_pri; 7102 #ifdef DEBUG 7103 void (*func)(); 7104 #endif 7105 7106 sq = qp->q_syncq; 7107 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7108 /* debug macro */ 7109 SQ_PUTLOCKS_HELD(sq); 7110 /* 7111 * As entersq() does not increment the sq_count for 7112 * the write side, check sq_count for non-QPERQ 7113 * perimeters alone. 7114 */ 7115 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 7116 7117 /* 7118 * propagate_syncq() can be called because of either messages on the 7119 * queue syncq or because on events on the queue syncq. Do actual 7120 * message propagations if there are any messages. 7121 */ 7122 if (qp->q_syncqmsgs) { 7123 isdriver = (qp->q_flag & QISDRV); 7124 7125 if (!isdriver) { 7126 nqp = qp->q_next; 7127 nsq = nqp->q_syncq; 7128 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 7129 /* debug macro */ 7130 SQ_PUTLOCKS_HELD(nsq); 7131 #ifdef DEBUG 7132 func = (void (*)())nqp->q_qinfo->qi_putp; 7133 #endif 7134 } 7135 7136 SQRM_Q(sq, qp); 7137 priority = MAX(qp->q_spri, priority); 7138 qp->q_spri = 0; 7139 head = qp->q_sqhead; 7140 tail = qp->q_sqtail; 7141 qp->q_sqhead = qp->q_sqtail = NULL; 7142 qp->q_syncqmsgs = 0; 7143 7144 /* 7145 * Walk the list of messages, and free them if this is a driver, 7146 * otherwise reset the b_prev and b_queue value to the new putp. 7147 * Afterward, we will just add the head to the end of the next 7148 * syncq, and point the tail to the end of this one. 7149 */ 7150 7151 for (bp = head; bp != NULL; bp = next) { 7152 next = bp->b_next; 7153 if (isdriver) { 7154 bp->b_prev = bp->b_next = NULL; 7155 freemsg(bp); 7156 continue; 7157 } 7158 /* Change the q values for this message */ 7159 bp->b_queue = nqp; 7160 #ifdef DEBUG 7161 bp->b_prev = (mblk_t *)func; 7162 #endif 7163 moved++; 7164 } 7165 /* 7166 * Attach list of messages to the end of the new queue (if there 7167 * is a list of messages). 7168 */ 7169 7170 if (!isdriver && head != NULL) { 7171 ASSERT(tail != NULL); 7172 if (nqp->q_sqhead == NULL) { 7173 nqp->q_sqhead = head; 7174 } else { 7175 ASSERT(nqp->q_sqtail != NULL); 7176 nqp->q_sqtail->b_next = head; 7177 } 7178 nqp->q_sqtail = tail; 7179 /* 7180 * When messages are moved from high priority queue to 7181 * another queue, the destination queue priority is 7182 * upgraded. 7183 */ 7184 7185 if (priority > nqp->q_spri) 7186 nqp->q_spri = priority; 7187 7188 SQPUT_Q(nsq, nqp); 7189 7190 nqp->q_syncqmsgs += moved; 7191 ASSERT(nqp->q_syncqmsgs != 0); 7192 } 7193 } 7194 7195 /* 7196 * Before we leave, we need to make sure there are no 7197 * events listed for this queue. All events for this queue 7198 * will just be freed. 7199 */ 7200 if (sq->sq_evhead != NULL) { 7201 ASSERT(sq->sq_flags & SQ_EVENTS); 7202 prev = NULL; 7203 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7204 next = bp->b_next; 7205 if (bp->b_queue == qp) { 7206 /* Delete this message */ 7207 if (prev != NULL) { 7208 prev->b_next = next; 7209 /* 7210 * Update sq_evtail if the last element 7211 * is removed. 7212 */ 7213 if (bp == sq->sq_evtail) { 7214 ASSERT(next == NULL); 7215 sq->sq_evtail = prev; 7216 } 7217 } else 7218 sq->sq_evhead = next; 7219 if (sq->sq_evhead == NULL) 7220 sq->sq_flags &= ~SQ_EVENTS; 7221 bp->b_prev = bp->b_next = NULL; 7222 freemsg(bp); 7223 } else { 7224 prev = bp; 7225 } 7226 } 7227 } 7228 7229 flags = sq->sq_flags; 7230 7231 /* Wake up any waiter before leaving. */ 7232 if (flags & SQ_WANTWAKEUP) { 7233 flags &= ~SQ_WANTWAKEUP; 7234 cv_broadcast(&sq->sq_wait); 7235 } 7236 sq->sq_flags = flags; 7237 7238 return (moved); 7239 } 7240 7241 /* 7242 * Try and upgrade to exclusive access at the inner perimeter. If this can 7243 * not be done without blocking then request will be queued on the syncq 7244 * and drain_syncq will run it later. 7245 * 7246 * This routine can only be called from put or service procedures plus 7247 * asynchronous callback routines that have properly entered the queue (with 7248 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq 7249 * associated with q. 7250 */ 7251 void 7252 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7253 { 7254 syncq_t *sq = q->q_syncq; 7255 uint16_t count; 7256 7257 mutex_enter(SQLOCK(sq)); 7258 count = sq->sq_count; 7259 SQ_PUTLOCKS_ENTER(sq); 7260 SUM_SQ_PUTCOUNTS(sq, count); 7261 ASSERT(count >= 1); 7262 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7263 7264 if (count == 1) { 7265 /* 7266 * Can upgrade. This case also handles nested qwriter calls 7267 * (when the qwriter callback function calls qwriter). In that 7268 * case SQ_EXCL is already set. 7269 */ 7270 sq->sq_flags |= SQ_EXCL; 7271 SQ_PUTLOCKS_EXIT(sq); 7272 mutex_exit(SQLOCK(sq)); 7273 (*func)(q, mp); 7274 /* 7275 * Assumes that leavesq, putnext, and drain_syncq will reset 7276 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7277 * until putnext, leavesq, or drain_syncq drops it. 7278 * That way we handle nested qwriter(INNER) without dropping 7279 * SQ_EXCL until the outermost qwriter callback routine is 7280 * done. 7281 */ 7282 return; 7283 } 7284 SQ_PUTLOCKS_EXIT(sq); 7285 sqfill_events(sq, q, mp, func); 7286 } 7287 7288 /* 7289 * Synchronous callback support functions 7290 */ 7291 7292 /* 7293 * Allocate a callback parameter structure. 7294 * Assumes that caller initializes the flags and the id. 7295 * Acquires SQLOCK(sq) if non-NULL is returned. 7296 */ 7297 callbparams_t * 7298 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7299 { 7300 callbparams_t *cbp; 7301 size_t size = sizeof (callbparams_t); 7302 7303 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7304 7305 /* 7306 * Only try tryhard allocation if the caller is ready to panic. 7307 * Otherwise just fail. 7308 */ 7309 if (cbp == NULL) { 7310 if (kmflags & KM_PANIC) 7311 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7312 &size, kmflags); 7313 else 7314 return (NULL); 7315 } 7316 7317 ASSERT(size >= sizeof (callbparams_t)); 7318 cbp->cbp_size = size; 7319 cbp->cbp_sq = sq; 7320 cbp->cbp_func = func; 7321 cbp->cbp_arg = arg; 7322 mutex_enter(SQLOCK(sq)); 7323 cbp->cbp_next = sq->sq_callbpend; 7324 sq->sq_callbpend = cbp; 7325 return (cbp); 7326 } 7327 7328 void 7329 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7330 { 7331 callbparams_t **pp, *p; 7332 7333 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7334 7335 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7336 if (p == cbp) { 7337 *pp = p->cbp_next; 7338 kmem_free(p, p->cbp_size); 7339 return; 7340 } 7341 } 7342 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7343 "callbparams_free: not found\n")); 7344 } 7345 7346 void 7347 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7348 { 7349 callbparams_t **pp, *p; 7350 7351 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7352 7353 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7354 if (p->cbp_id == id && p->cbp_flags == flag) { 7355 *pp = p->cbp_next; 7356 kmem_free(p, p->cbp_size); 7357 return; 7358 } 7359 } 7360 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7361 "callbparams_free_id: not found\n")); 7362 } 7363 7364 /* 7365 * Callback wrapper function used by once-only callbacks that can be 7366 * cancelled (qtimeout and qbufcall) 7367 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7368 * cancelled by the qun* functions. 7369 */ 7370 void 7371 qcallbwrapper(void *arg) 7372 { 7373 callbparams_t *cbp = arg; 7374 syncq_t *sq; 7375 uint16_t count = 0; 7376 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7377 uint16_t type; 7378 7379 sq = cbp->cbp_sq; 7380 mutex_enter(SQLOCK(sq)); 7381 type = sq->sq_type; 7382 if (!(type & SQ_CICB)) { 7383 count = sq->sq_count; 7384 SQ_PUTLOCKS_ENTER(sq); 7385 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7386 SUM_SQ_PUTCOUNTS(sq, count); 7387 sq->sq_needexcl++; 7388 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7389 waitflags |= SQ_MESSAGES; 7390 } 7391 /* Can not handle exclusive entry at outer perimeter */ 7392 ASSERT(type & SQ_COCB); 7393 7394 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7395 if ((sq->sq_callbflags & cbp->cbp_flags) && 7396 (sq->sq_cancelid == cbp->cbp_id)) { 7397 /* timeout has been cancelled */ 7398 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7399 callbparams_free(sq, cbp); 7400 if (!(type & SQ_CICB)) { 7401 ASSERT(sq->sq_needexcl > 0); 7402 sq->sq_needexcl--; 7403 if (sq->sq_needexcl == 0) { 7404 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7405 } 7406 SQ_PUTLOCKS_EXIT(sq); 7407 } 7408 mutex_exit(SQLOCK(sq)); 7409 return; 7410 } 7411 sq->sq_flags |= SQ_WANTWAKEUP; 7412 if (!(type & SQ_CICB)) { 7413 SQ_PUTLOCKS_EXIT(sq); 7414 } 7415 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7416 if (!(type & SQ_CICB)) { 7417 count = sq->sq_count; 7418 SQ_PUTLOCKS_ENTER(sq); 7419 SUM_SQ_PUTCOUNTS(sq, count); 7420 } 7421 } 7422 7423 sq->sq_count++; 7424 ASSERT(sq->sq_count != 0); /* Wraparound */ 7425 if (!(type & SQ_CICB)) { 7426 ASSERT(count == 0); 7427 sq->sq_flags |= SQ_EXCL; 7428 ASSERT(sq->sq_needexcl > 0); 7429 sq->sq_needexcl--; 7430 if (sq->sq_needexcl == 0) { 7431 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7432 } 7433 SQ_PUTLOCKS_EXIT(sq); 7434 } 7435 7436 mutex_exit(SQLOCK(sq)); 7437 7438 cbp->cbp_func(cbp->cbp_arg); 7439 7440 /* 7441 * We drop the lock only for leavesq to re-acquire it. 7442 * Possible optimization is inline of leavesq. 7443 */ 7444 mutex_enter(SQLOCK(sq)); 7445 callbparams_free(sq, cbp); 7446 mutex_exit(SQLOCK(sq)); 7447 leavesq(sq, SQ_CALLBACK); 7448 } 7449 7450 /* 7451 * No need to grab sq_putlocks here. See comment in strsubr.h that 7452 * explains when sq_putlocks are used. 7453 * 7454 * sq_count (or one of the sq_putcounts) has already been 7455 * decremented by the caller, and if SQ_QUEUED, we need to call 7456 * drain_syncq (the global syncq drain). 7457 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7458 * one of two states, non-CIPUT perimeter, and we need to clear 7459 * it, or we went exclusive in the put procedure. In any case, 7460 * we want to clear the bit now, and it is probably easier to do 7461 * this at the beginning of this function (remember, we hold 7462 * the SQLOCK). Lastly, if there are other messages queued 7463 * on the syncq (and not for our destination), enable the syncq 7464 * for background work. 7465 */ 7466 7467 /* ARGSUSED */ 7468 void 7469 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7470 { 7471 uint16_t flags = sq->sq_flags; 7472 7473 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7474 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7475 7476 /* Clear SQ_EXCL if set in passflags */ 7477 if (passflags & SQ_EXCL) { 7478 flags &= ~SQ_EXCL; 7479 } 7480 if (flags & SQ_WANTWAKEUP) { 7481 flags &= ~SQ_WANTWAKEUP; 7482 cv_broadcast(&sq->sq_wait); 7483 } 7484 if (flags & SQ_WANTEXWAKEUP) { 7485 flags &= ~SQ_WANTEXWAKEUP; 7486 cv_broadcast(&sq->sq_exitwait); 7487 } 7488 sq->sq_flags = flags; 7489 7490 /* 7491 * We have cleared SQ_EXCL if we were asked to, and started 7492 * the wakeup process for waiters. If there are no writers 7493 * then we need to drain the syncq if we were told to, or 7494 * enable the background thread to do it. 7495 */ 7496 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7497 if ((passflags & SQ_QUEUED) || 7498 (sq->sq_svcflags & SQ_DISABLED)) { 7499 /* drain_syncq will take care of events in the list */ 7500 drain_syncq(sq); 7501 return; 7502 } else if (flags & SQ_QUEUED) { 7503 sqenable(sq); 7504 } 7505 } 7506 /* Drop the SQLOCK on exit */ 7507 mutex_exit(SQLOCK(sq)); 7508 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7509 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7510 } 7511 7512 void 7513 set_qend(queue_t *q) 7514 { 7515 mutex_enter(QLOCK(q)); 7516 if (!O_SAMESTR(q)) 7517 q->q_flag |= QEND; 7518 else 7519 q->q_flag &= ~QEND; 7520 mutex_exit(QLOCK(q)); 7521 q = _OTHERQ(q); 7522 mutex_enter(QLOCK(q)); 7523 if (!O_SAMESTR(q)) 7524 q->q_flag |= QEND; 7525 else 7526 q->q_flag &= ~QEND; 7527 mutex_exit(QLOCK(q)); 7528 } 7529 7530 /* 7531 * Set QFULL in next service procedure queue (that cares) if not already 7532 * set and if there are already more messages on the syncq than 7533 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on 7534 * any syncq. 7535 * 7536 * The fq here is the next queue with a service procedure. This is where 7537 * we would fail canputnext, so this is where we need to set QFULL. 7538 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag. 7539 * 7540 * We already have QLOCK at this point. To avoid cross-locks with 7541 * freezestr() which grabs all QLOCKs and with strlock() which grabs both 7542 * SQLOCK and sd_reflock, we need to drop respective locks first. 7543 */ 7544 void 7545 set_qfull(queue_t *q) 7546 { 7547 queue_t *fq = NULL; 7548 7549 ASSERT(MUTEX_HELD(QLOCK(q))); 7550 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 7551 (q->q_syncqmsgs > sq_max_size)) { 7552 if ((fq = q->q_nfsrv) == q) { 7553 fq->q_flag |= QFULL; 7554 } else { 7555 mutex_exit(QLOCK(q)); 7556 mutex_enter(QLOCK(fq)); 7557 fq->q_flag |= QFULL; 7558 mutex_exit(QLOCK(fq)); 7559 mutex_enter(QLOCK(q)); 7560 } 7561 } 7562 } 7563 7564 void 7565 clr_qfull(queue_t *q) 7566 { 7567 queue_t *oq = q; 7568 7569 q = q->q_nfsrv; 7570 /* Fast check if there is any work to do before getting the lock. */ 7571 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7572 return; 7573 } 7574 7575 /* 7576 * Do not reset QFULL (and backenable) if the q_count is the reason 7577 * for QFULL being set. 7578 */ 7579 mutex_enter(QLOCK(q)); 7580 /* 7581 * If queue is empty i.e q_mblkcnt is zero, queue can not be full. 7582 * Hence clear the QFULL. 7583 * If both q_count and q_mblkcnt are less than the hiwat mark, 7584 * clear the QFULL. 7585 */ 7586 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 7587 (q->q_mblkcnt < q->q_hiwat))) { 7588 q->q_flag &= ~QFULL; 7589 /* 7590 * A little more confusing, how about this way: 7591 * if someone wants to write, 7592 * AND 7593 * both counts are less than the lowat mark 7594 * OR 7595 * the lowat mark is zero 7596 * THEN 7597 * backenable 7598 */ 7599 if ((q->q_flag & QWANTW) && 7600 (((q->q_count < q->q_lowat) && 7601 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7602 q->q_flag &= ~QWANTW; 7603 mutex_exit(QLOCK(q)); 7604 backenable(oq, 0); 7605 } else 7606 mutex_exit(QLOCK(q)); 7607 } else 7608 mutex_exit(QLOCK(q)); 7609 } 7610 7611 /* 7612 * Set the forward service procedure pointer. 7613 * 7614 * Called at insert-time to cache a queue's next forward service procedure in 7615 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7616 * has a service procedure then q_nfsrv points to itself. If the queue to be 7617 * inserted does not have a service procedure, then q_nfsrv points to the next 7618 * queue forward that has a service procedure. If the queue is at the logical 7619 * end of the stream (driver for write side, stream head for the read side) 7620 * and does not have a service procedure, then q_nfsrv also points to itself. 7621 */ 7622 void 7623 set_nfsrv_ptr( 7624 queue_t *rnew, /* read queue pointer to new module */ 7625 queue_t *wnew, /* write queue pointer to new module */ 7626 queue_t *prev_rq, /* read queue pointer to the module above */ 7627 queue_t *prev_wq) /* write queue pointer to the module above */ 7628 { 7629 queue_t *qp; 7630 7631 if (prev_wq->q_next == NULL) { 7632 /* 7633 * Insert the driver, initialize the driver and stream head. 7634 * In this case, prev_rq/prev_wq should be the stream head. 7635 * _I_INSERT does not allow inserting a driver. Make sure 7636 * that it is not an insertion. 7637 */ 7638 ASSERT(!(rnew->q_flag & _QINSERTING)); 7639 wnew->q_nfsrv = wnew; 7640 if (rnew->q_qinfo->qi_srvp) 7641 rnew->q_nfsrv = rnew; 7642 else 7643 rnew->q_nfsrv = prev_rq; 7644 prev_rq->q_nfsrv = prev_rq; 7645 prev_wq->q_nfsrv = prev_wq; 7646 } else { 7647 /* 7648 * set up read side q_nfsrv pointer. This MUST be done 7649 * before setting the write side, because the setting of 7650 * the write side for a fifo may depend on it. 7651 * 7652 * Suppose we have a fifo that only has pipemod pushed. 7653 * pipemod has no read or write service procedures, so 7654 * nfsrv for both pipemod queues points to prev_rq (the 7655 * stream read head). Now push bufmod (which has only a 7656 * read service procedure). Doing the write side first, 7657 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7658 * is WRONG; the next queue forward from wnew with a 7659 * service procedure will be rnew, not the stream read head. 7660 * Since the downstream queue (which in the case of a fifo 7661 * is the read queue rnew) can affect upstream queues, it 7662 * needs to be done first. Setting up the read side first 7663 * sets nfsrv for both pipemod queues to rnew and then 7664 * when the write side is set up, wnew-q_nfsrv will also 7665 * point to rnew. 7666 */ 7667 if (rnew->q_qinfo->qi_srvp) { 7668 /* 7669 * use _OTHERQ() because, if this is a pipe, next 7670 * module may have been pushed from other end and 7671 * q_next could be a read queue. 7672 */ 7673 qp = _OTHERQ(prev_wq->q_next); 7674 while (qp && qp->q_nfsrv != qp) { 7675 qp->q_nfsrv = rnew; 7676 qp = backq(qp); 7677 } 7678 rnew->q_nfsrv = rnew; 7679 } else 7680 rnew->q_nfsrv = prev_rq->q_nfsrv; 7681 7682 /* set up write side q_nfsrv pointer */ 7683 if (wnew->q_qinfo->qi_srvp) { 7684 wnew->q_nfsrv = wnew; 7685 7686 /* 7687 * For insertion, need to update nfsrv of the modules 7688 * above which do not have a service routine. 7689 */ 7690 if (rnew->q_flag & _QINSERTING) { 7691 for (qp = prev_wq; 7692 qp != NULL && qp->q_nfsrv != qp; 7693 qp = backq(qp)) { 7694 qp->q_nfsrv = wnew->q_nfsrv; 7695 } 7696 } 7697 } else { 7698 if (prev_wq->q_next == prev_rq) 7699 /* 7700 * Since prev_wq/prev_rq are the middle of a 7701 * fifo, wnew/rnew will also be the middle of 7702 * a fifo and wnew's nfsrv is same as rnew's. 7703 */ 7704 wnew->q_nfsrv = rnew->q_nfsrv; 7705 else 7706 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7707 } 7708 } 7709 } 7710 7711 /* 7712 * Reset the forward service procedure pointer; called at remove-time. 7713 */ 7714 void 7715 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7716 { 7717 queue_t *tmp_qp; 7718 7719 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7720 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7721 for (tmp_qp = backq(wqp); 7722 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7723 tmp_qp = backq(tmp_qp)) { 7724 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7725 } 7726 } 7727 7728 /* reset the read side q_nfsrv pointer */ 7729 if (rqp->q_qinfo->qi_srvp) { 7730 if (wqp->q_next) { /* non-driver case */ 7731 tmp_qp = _OTHERQ(wqp->q_next); 7732 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7733 /* Note that rqp->q_next cannot be NULL */ 7734 ASSERT(rqp->q_next != NULL); 7735 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7736 tmp_qp = backq(tmp_qp); 7737 } 7738 } 7739 } 7740 } 7741 7742 /* 7743 * This routine should be called after all stream geometry changes to update 7744 * the stream head cached struio() rd/wr queue pointers. Note must be called 7745 * with the streamlock()ed. 7746 * 7747 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7748 * an explicit synchronous barrier module queue. That is, a queue that 7749 * has specified a struio() type. 7750 */ 7751 static void 7752 strsetuio(stdata_t *stp) 7753 { 7754 queue_t *wrq; 7755 7756 if (stp->sd_flag & STPLEX) { 7757 /* 7758 * Not streamhead, but a mux, so no Synchronous STREAMS. 7759 */ 7760 stp->sd_struiowrq = NULL; 7761 stp->sd_struiordq = NULL; 7762 return; 7763 } 7764 /* 7765 * Scan the write queue(s) while synchronous 7766 * until we find a qinfo uio type specified. 7767 */ 7768 wrq = stp->sd_wrq->q_next; 7769 while (wrq) { 7770 if (wrq->q_struiot == STRUIOT_NONE) { 7771 wrq = 0; 7772 break; 7773 } 7774 if (wrq->q_struiot != STRUIOT_DONTCARE) 7775 break; 7776 if (! _SAMESTR(wrq)) { 7777 wrq = 0; 7778 break; 7779 } 7780 wrq = wrq->q_next; 7781 } 7782 stp->sd_struiowrq = wrq; 7783 /* 7784 * Scan the read queue(s) while synchronous 7785 * until we find a qinfo uio type specified. 7786 */ 7787 wrq = stp->sd_wrq->q_next; 7788 while (wrq) { 7789 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7790 wrq = 0; 7791 break; 7792 } 7793 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7794 break; 7795 if (! _SAMESTR(wrq)) { 7796 wrq = 0; 7797 break; 7798 } 7799 wrq = wrq->q_next; 7800 } 7801 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7802 } 7803 7804 /* 7805 * pass_wput, unblocks the passthru queues, so that 7806 * messages can arrive at muxs lower read queue, before 7807 * I_LINK/I_UNLINK is acked/nacked. 7808 */ 7809 static void 7810 pass_wput(queue_t *q, mblk_t *mp) 7811 { 7812 syncq_t *sq; 7813 7814 sq = _RD(q)->q_syncq; 7815 if (sq->sq_flags & SQ_BLOCKED) 7816 unblocksq(sq, SQ_BLOCKED, 0); 7817 putnext(q, mp); 7818 } 7819 7820 /* 7821 * Set up queues for the link/unlink. 7822 * Create a new queue and block it and then insert it 7823 * below the stream head on the lower stream. 7824 * This prevents any messages from arriving during the setq 7825 * as well as while the mux is processing the LINK/I_UNLINK. 7826 * The blocked passq is unblocked once the LINK/I_UNLINK has 7827 * been acked or nacked or if a message is generated and sent 7828 * down muxs write put procedure. 7829 * See pass_wput(). 7830 * 7831 * After the new queue is inserted, all messages coming from below are 7832 * blocked. The call to strlock will ensure that all activity in the stream head 7833 * read queue syncq is stopped (sq_count drops to zero). 7834 */ 7835 static queue_t * 7836 link_addpassthru(stdata_t *stpdown) 7837 { 7838 queue_t *passq; 7839 sqlist_t sqlist; 7840 7841 passq = allocq(); 7842 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7843 /* setq might sleep in allocator - avoid holding locks. */ 7844 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7845 SQ_CI|SQ_CO, B_FALSE); 7846 claimq(passq); 7847 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7848 insertq(STREAM(passq), passq); 7849 7850 /* 7851 * Use strlock() to wait for the stream head sq_count to drop to zero 7852 * since we are going to change q_ptr in the stream head. Note that 7853 * insertq() doesn't wait for any syncq counts to drop to zero. 7854 */ 7855 sqlist.sqlist_head = NULL; 7856 sqlist.sqlist_index = 0; 7857 sqlist.sqlist_size = sizeof (sqlist_t); 7858 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7859 strlock(stpdown, &sqlist); 7860 strunlock(stpdown, &sqlist); 7861 7862 releaseq(passq); 7863 return (passq); 7864 } 7865 7866 /* 7867 * Let messages flow up into the mux by removing 7868 * the passq. 7869 */ 7870 static void 7871 link_rempassthru(queue_t *passq) 7872 { 7873 claimq(passq); 7874 removeq(passq); 7875 releaseq(passq); 7876 freeq(passq); 7877 } 7878 7879 /* 7880 * Wait for the condition variable pointed to by `cvp' to be signaled, 7881 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7882 * is negative, then there is no time limit. If `nosigs' is non-zero, 7883 * then the wait will be non-interruptible. 7884 * 7885 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7886 */ 7887 clock_t 7888 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7889 { 7890 clock_t ret; 7891 7892 if (tim < 0) { 7893 if (nosigs) { 7894 cv_wait(cvp, mp); 7895 ret = 1; 7896 } else { 7897 ret = cv_wait_sig(cvp, mp); 7898 } 7899 } else if (tim > 0) { 7900 /* 7901 * convert milliseconds to clock ticks 7902 */ 7903 if (nosigs) { 7904 ret = cv_reltimedwait(cvp, mp, 7905 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 7906 } else { 7907 ret = cv_reltimedwait_sig(cvp, mp, 7908 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 7909 } 7910 } else { 7911 ret = -1; 7912 } 7913 return (ret); 7914 } 7915 7916 /* 7917 * Wait until the stream head can determine if it is at the mark but 7918 * don't wait forever to prevent a race condition between the "mark" state 7919 * in the stream head and any mark state in the caller/user of this routine. 7920 * 7921 * This is used by sockets and for a socket it would be incorrect 7922 * to return a failure for SIOCATMARK when there is no data in the receive 7923 * queue and the marked urgent data is traveling up the stream. 7924 * 7925 * This routine waits until the mark is known by waiting for one of these 7926 * three events: 7927 * The stream head read queue becoming non-empty (including an EOF). 7928 * The STRATMARK flag being set (due to a MSGMARKNEXT message). 7929 * The STRNOTATMARK flag being set (which indicates that the transport 7930 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7931 * the mark). 7932 * 7933 * The routine returns 1 if the stream is at the mark; 0 if it can 7934 * be determined that the stream is not at the mark. 7935 * If the wait times out and it can't determine 7936 * whether or not the stream might be at the mark the routine will return -1. 7937 * 7938 * Note: This routine should only be used when a mark is pending i.e., 7939 * in the socket case the SIGURG has been posted. 7940 * Note2: This can not wakeup just because synchronous streams indicate 7941 * that data is available since it is not possible to use the synchronous 7942 * streams interfaces to determine the b_flag value for the data queued below 7943 * the stream head. 7944 */ 7945 int 7946 strwaitmark(vnode_t *vp) 7947 { 7948 struct stdata *stp = vp->v_stream; 7949 queue_t *rq = _RD(stp->sd_wrq); 7950 int mark; 7951 7952 mutex_enter(&stp->sd_lock); 7953 while (rq->q_first == NULL && 7954 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7955 stp->sd_flag |= RSLEEP; 7956 7957 /* Wait for 100 milliseconds for any state change. */ 7958 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7959 mutex_exit(&stp->sd_lock); 7960 return (-1); 7961 } 7962 } 7963 if (stp->sd_flag & STRATMARK) 7964 mark = 1; 7965 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7966 mark = 1; 7967 else 7968 mark = 0; 7969 7970 mutex_exit(&stp->sd_lock); 7971 return (mark); 7972 } 7973 7974 /* 7975 * Set a read side error. If persist is set change the socket error 7976 * to persistent. If errfunc is set install the function as the exported 7977 * error handler. 7978 */ 7979 void 7980 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7981 { 7982 struct stdata *stp = vp->v_stream; 7983 7984 mutex_enter(&stp->sd_lock); 7985 stp->sd_rerror = error; 7986 if (error == 0 && errfunc == NULL) 7987 stp->sd_flag &= ~STRDERR; 7988 else 7989 stp->sd_flag |= STRDERR; 7990 if (persist) { 7991 stp->sd_flag &= ~STRDERRNONPERSIST; 7992 } else { 7993 stp->sd_flag |= STRDERRNONPERSIST; 7994 } 7995 stp->sd_rderrfunc = errfunc; 7996 if (error != 0 || errfunc != NULL) { 7997 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7998 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7999 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8000 8001 mutex_exit(&stp->sd_lock); 8002 pollwakeup(&stp->sd_pollist, POLLERR); 8003 mutex_enter(&stp->sd_lock); 8004 8005 if (stp->sd_sigflags & S_ERROR) 8006 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8007 } 8008 mutex_exit(&stp->sd_lock); 8009 } 8010 8011 /* 8012 * Set a write side error. If persist is set change the socket error 8013 * to persistent. 8014 */ 8015 void 8016 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 8017 { 8018 struct stdata *stp = vp->v_stream; 8019 8020 mutex_enter(&stp->sd_lock); 8021 stp->sd_werror = error; 8022 if (error == 0 && errfunc == NULL) 8023 stp->sd_flag &= ~STWRERR; 8024 else 8025 stp->sd_flag |= STWRERR; 8026 if (persist) { 8027 stp->sd_flag &= ~STWRERRNONPERSIST; 8028 } else { 8029 stp->sd_flag |= STWRERRNONPERSIST; 8030 } 8031 stp->sd_wrerrfunc = errfunc; 8032 if (error != 0 || errfunc != NULL) { 8033 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 8034 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 8035 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8036 8037 mutex_exit(&stp->sd_lock); 8038 pollwakeup(&stp->sd_pollist, POLLERR); 8039 mutex_enter(&stp->sd_lock); 8040 8041 if (stp->sd_sigflags & S_ERROR) 8042 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8043 } 8044 mutex_exit(&stp->sd_lock); 8045 } 8046 8047 /* 8048 * Make the stream return 0 (EOF) when all data has been read. 8049 * No effect on write side. 8050 */ 8051 void 8052 strseteof(vnode_t *vp, int eof) 8053 { 8054 struct stdata *stp = vp->v_stream; 8055 8056 mutex_enter(&stp->sd_lock); 8057 if (!eof) { 8058 stp->sd_flag &= ~STREOF; 8059 mutex_exit(&stp->sd_lock); 8060 return; 8061 } 8062 stp->sd_flag |= STREOF; 8063 if (stp->sd_flag & RSLEEP) { 8064 stp->sd_flag &= ~RSLEEP; 8065 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 8066 } 8067 8068 mutex_exit(&stp->sd_lock); 8069 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 8070 mutex_enter(&stp->sd_lock); 8071 8072 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 8073 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 8074 mutex_exit(&stp->sd_lock); 8075 } 8076 8077 void 8078 strflushrq(vnode_t *vp, int flag) 8079 { 8080 struct stdata *stp = vp->v_stream; 8081 8082 mutex_enter(&stp->sd_lock); 8083 flushq(_RD(stp->sd_wrq), flag); 8084 mutex_exit(&stp->sd_lock); 8085 } 8086 8087 void 8088 strsetrputhooks(vnode_t *vp, uint_t flags, 8089 msgfunc_t protofunc, msgfunc_t miscfunc) 8090 { 8091 struct stdata *stp = vp->v_stream; 8092 8093 mutex_enter(&stp->sd_lock); 8094 8095 if (protofunc == NULL) 8096 stp->sd_rprotofunc = strrput_proto; 8097 else 8098 stp->sd_rprotofunc = protofunc; 8099 8100 if (miscfunc == NULL) 8101 stp->sd_rmiscfunc = strrput_misc; 8102 else 8103 stp->sd_rmiscfunc = miscfunc; 8104 8105 if (flags & SH_CONSOL_DATA) 8106 stp->sd_rput_opt |= SR_CONSOL_DATA; 8107 else 8108 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 8109 8110 if (flags & SH_SIGALLDATA) 8111 stp->sd_rput_opt |= SR_SIGALLDATA; 8112 else 8113 stp->sd_rput_opt &= ~SR_SIGALLDATA; 8114 8115 if (flags & SH_IGN_ZEROLEN) 8116 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 8117 else 8118 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 8119 8120 mutex_exit(&stp->sd_lock); 8121 } 8122 8123 void 8124 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 8125 { 8126 struct stdata *stp = vp->v_stream; 8127 8128 mutex_enter(&stp->sd_lock); 8129 stp->sd_closetime = closetime; 8130 8131 if (flags & SH_SIGPIPE) 8132 stp->sd_wput_opt |= SW_SIGPIPE; 8133 else 8134 stp->sd_wput_opt &= ~SW_SIGPIPE; 8135 if (flags & SH_RECHECK_ERR) 8136 stp->sd_wput_opt |= SW_RECHECK_ERR; 8137 else 8138 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 8139 8140 mutex_exit(&stp->sd_lock); 8141 } 8142 8143 void 8144 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 8145 { 8146 struct stdata *stp = vp->v_stream; 8147 8148 mutex_enter(&stp->sd_lock); 8149 8150 stp->sd_rputdatafunc = rdatafunc; 8151 stp->sd_wputdatafunc = wdatafunc; 8152 8153 mutex_exit(&stp->sd_lock); 8154 } 8155 8156 /* Used within framework when the queue is already locked */ 8157 void 8158 qenable_locked(queue_t *q) 8159 { 8160 stdata_t *stp = STREAM(q); 8161 8162 ASSERT(MUTEX_HELD(QLOCK(q))); 8163 8164 if (!q->q_qinfo->qi_srvp) 8165 return; 8166 8167 /* 8168 * Do not place on run queue if already enabled or closing. 8169 */ 8170 if (q->q_flag & (QWCLOSE|QENAB)) 8171 return; 8172 8173 /* 8174 * mark queue enabled and place on run list if it is not already being 8175 * serviced. If it is serviced, the runservice() function will detect 8176 * that QENAB is set and call service procedure before clearing 8177 * QINSERVICE flag. 8178 */ 8179 q->q_flag |= QENAB; 8180 if (q->q_flag & QINSERVICE) 8181 return; 8182 8183 /* Record the time of qenable */ 8184 q->q_qtstamp = ddi_get_lbolt(); 8185 8186 /* 8187 * Put the queue in the stp list and schedule it for background 8188 * processing if it is not already scheduled or if stream head does not 8189 * intent to process it in the foreground later by setting 8190 * STRS_WILLSERVICE flag. 8191 */ 8192 mutex_enter(&stp->sd_qlock); 8193 /* 8194 * If there are already something on the list, stp flags should show 8195 * intention to drain it. 8196 */ 8197 IMPLY(STREAM_NEEDSERVICE(stp), 8198 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8199 8200 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8201 stp->sd_nqueues++; 8202 8203 /* 8204 * If no one will drain this stream we are the first producer and 8205 * need to schedule it for background thread. 8206 */ 8207 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8208 /* 8209 * No one will service this stream later, so we have to 8210 * schedule it now. 8211 */ 8212 STRSTAT(stenables); 8213 stp->sd_svcflags |= STRS_SCHEDULED; 8214 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8215 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8216 8217 if (stp->sd_servid == NULL) { 8218 /* 8219 * Task queue failed so fail over to the backup 8220 * servicing thread. 8221 */ 8222 STRSTAT(taskqfails); 8223 /* 8224 * It is safe to clear STRS_SCHEDULED flag because it 8225 * was set by this thread above. 8226 */ 8227 stp->sd_svcflags &= ~STRS_SCHEDULED; 8228 8229 /* 8230 * Failover scheduling is protected by service_queue 8231 * lock. 8232 */ 8233 mutex_enter(&service_queue); 8234 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8235 ASSERT(q->q_link == NULL); 8236 /* 8237 * Append the queue to qhead/qtail list. 8238 */ 8239 if (qhead == NULL) 8240 qhead = q; 8241 else 8242 qtail->q_link = q; 8243 qtail = q; 8244 /* 8245 * Clear stp queue list. 8246 */ 8247 stp->sd_qhead = stp->sd_qtail = NULL; 8248 stp->sd_nqueues = 0; 8249 /* 8250 * Wakeup background queue processing thread. 8251 */ 8252 cv_signal(&services_to_run); 8253 mutex_exit(&service_queue); 8254 } 8255 } 8256 mutex_exit(&stp->sd_qlock); 8257 } 8258 8259 static void 8260 queue_service(queue_t *q) 8261 { 8262 /* 8263 * The queue in the list should have 8264 * QENAB flag set and should not have 8265 * QINSERVICE flag set. QINSERVICE is 8266 * set when the queue is dequeued and 8267 * qenable_locked doesn't enqueue a 8268 * queue with QINSERVICE set. 8269 */ 8270 8271 ASSERT(!(q->q_flag & QINSERVICE)); 8272 ASSERT((q->q_flag & QENAB)); 8273 mutex_enter(QLOCK(q)); 8274 q->q_flag &= ~QENAB; 8275 q->q_flag |= QINSERVICE; 8276 mutex_exit(QLOCK(q)); 8277 runservice(q); 8278 } 8279 8280 static void 8281 syncq_service(syncq_t *sq) 8282 { 8283 STRSTAT(syncqservice); 8284 mutex_enter(SQLOCK(sq)); 8285 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8286 ASSERT(sq->sq_servcount != 0); 8287 ASSERT(sq->sq_next == NULL); 8288 8289 /* if we came here from the background thread, clear the flag */ 8290 if (sq->sq_svcflags & SQ_BGTHREAD) 8291 sq->sq_svcflags &= ~SQ_BGTHREAD; 8292 8293 /* let drain_syncq know that it's being called in the background */ 8294 sq->sq_svcflags |= SQ_SERVICE; 8295 drain_syncq(sq); 8296 } 8297 8298 static void 8299 qwriter_outer_service(syncq_t *outer) 8300 { 8301 /* 8302 * Note that SQ_WRITER is used on the outer perimeter 8303 * to signal that a qwriter(OUTER) is either investigating 8304 * running or that it is actually running a function. 8305 */ 8306 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8307 8308 /* 8309 * All inner syncq are empty and have SQ_WRITER set 8310 * to block entering the outer perimeter. 8311 * 8312 * We do not need to explicitly call write_now since 8313 * outer_exit does it for us. 8314 */ 8315 outer_exit(outer); 8316 } 8317 8318 static void 8319 mblk_free(mblk_t *mp) 8320 { 8321 dblk_t *dbp = mp->b_datap; 8322 frtn_t *frp = dbp->db_frtnp; 8323 8324 mp->b_next = NULL; 8325 if (dbp->db_fthdr != NULL) 8326 str_ftfree(dbp); 8327 8328 ASSERT(dbp->db_fthdr == NULL); 8329 frp->free_func(frp->free_arg); 8330 ASSERT(dbp->db_mblk == mp); 8331 8332 if (dbp->db_credp != NULL) { 8333 crfree(dbp->db_credp); 8334 dbp->db_credp = NULL; 8335 } 8336 dbp->db_cpid = -1; 8337 dbp->db_struioflag = 0; 8338 dbp->db_struioun.cksum.flags = 0; 8339 8340 kmem_cache_free(dbp->db_cache, dbp); 8341 } 8342 8343 /* 8344 * Background processing of the stream queue list. 8345 */ 8346 static void 8347 stream_service(stdata_t *stp) 8348 { 8349 queue_t *q; 8350 8351 mutex_enter(&stp->sd_qlock); 8352 8353 STR_SERVICE(stp, q); 8354 8355 stp->sd_svcflags &= ~STRS_SCHEDULED; 8356 stp->sd_servid = NULL; 8357 cv_signal(&stp->sd_qcv); 8358 mutex_exit(&stp->sd_qlock); 8359 } 8360 8361 /* 8362 * Foreground processing of the stream queue list. 8363 */ 8364 void 8365 stream_runservice(stdata_t *stp) 8366 { 8367 queue_t *q; 8368 8369 mutex_enter(&stp->sd_qlock); 8370 STRSTAT(rservice); 8371 /* 8372 * We are going to drain this stream queue list, so qenable_locked will 8373 * not schedule it until we finish. 8374 */ 8375 stp->sd_svcflags |= STRS_WILLSERVICE; 8376 8377 STR_SERVICE(stp, q); 8378 8379 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8380 mutex_exit(&stp->sd_qlock); 8381 /* 8382 * Help backup background thread to drain the qhead/qtail list. 8383 */ 8384 while (qhead != NULL) { 8385 STRSTAT(qhelps); 8386 mutex_enter(&service_queue); 8387 DQ(q, qhead, qtail, q_link); 8388 mutex_exit(&service_queue); 8389 if (q != NULL) 8390 queue_service(q); 8391 } 8392 } 8393 8394 void 8395 stream_willservice(stdata_t *stp) 8396 { 8397 mutex_enter(&stp->sd_qlock); 8398 stp->sd_svcflags |= STRS_WILLSERVICE; 8399 mutex_exit(&stp->sd_qlock); 8400 } 8401 8402 /* 8403 * Replace the cred currently in the mblk with a different one. 8404 * Also update db_cpid. 8405 */ 8406 void 8407 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid) 8408 { 8409 dblk_t *dbp = mp->b_datap; 8410 cred_t *ocr = dbp->db_credp; 8411 8412 ASSERT(cr != NULL); 8413 8414 if (cr != ocr) { 8415 crhold(dbp->db_credp = cr); 8416 if (ocr != NULL) 8417 crfree(ocr); 8418 } 8419 /* Don't overwrite with NOPID */ 8420 if (cpid != NOPID) 8421 dbp->db_cpid = cpid; 8422 } 8423 8424 /* 8425 * If the src message has a cred, then replace the cred currently in the mblk 8426 * with it. 8427 * Also update db_cpid. 8428 */ 8429 void 8430 mblk_copycred(mblk_t *mp, const mblk_t *src) 8431 { 8432 dblk_t *dbp = mp->b_datap; 8433 cred_t *cr, *ocr; 8434 pid_t cpid; 8435 8436 cr = msg_getcred(src, &cpid); 8437 if (cr == NULL) 8438 return; 8439 8440 ocr = dbp->db_credp; 8441 if (cr != ocr) { 8442 crhold(dbp->db_credp = cr); 8443 if (ocr != NULL) 8444 crfree(ocr); 8445 } 8446 /* Don't overwrite with NOPID */ 8447 if (cpid != NOPID) 8448 dbp->db_cpid = cpid; 8449 } 8450 8451 int 8452 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8453 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8454 uint32_t flags, int km_flags) 8455 { 8456 int rc = 0; 8457 8458 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8459 if (mp->b_datap->db_type == M_DATA) { 8460 /* Associate values for M_DATA type */ 8461 DB_CKSUMSTART(mp) = (intptr_t)start; 8462 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8463 DB_CKSUMEND(mp) = (intptr_t)end; 8464 DB_CKSUMFLAGS(mp) = flags; 8465 DB_CKSUM16(mp) = (uint16_t)value; 8466 8467 } else { 8468 pattrinfo_t pa_info; 8469 8470 ASSERT(mmd != NULL); 8471 8472 pa_info.type = PATTR_HCKSUM; 8473 pa_info.len = sizeof (pattr_hcksum_t); 8474 8475 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8476 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8477 8478 hck->hcksum_start_offset = start; 8479 hck->hcksum_stuff_offset = stuff; 8480 hck->hcksum_end_offset = end; 8481 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8482 hck->hcksum_flags = flags; 8483 } else { 8484 rc = -1; 8485 } 8486 } 8487 return (rc); 8488 } 8489 8490 void 8491 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8492 uint32_t *start, uint32_t *stuff, uint32_t *end, 8493 uint32_t *value, uint32_t *flags) 8494 { 8495 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8496 if (mp->b_datap->db_type == M_DATA) { 8497 if (flags != NULL) { 8498 *flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS; 8499 if ((*flags & (HCK_PARTIALCKSUM | 8500 HCK_FULLCKSUM)) != 0) { 8501 if (value != NULL) 8502 *value = (uint32_t)DB_CKSUM16(mp); 8503 if ((*flags & HCK_PARTIALCKSUM) != 0) { 8504 if (start != NULL) 8505 *start = 8506 (uint32_t)DB_CKSUMSTART(mp); 8507 if (stuff != NULL) 8508 *stuff = 8509 (uint32_t)DB_CKSUMSTUFF(mp); 8510 if (end != NULL) 8511 *end = 8512 (uint32_t)DB_CKSUMEND(mp); 8513 } 8514 } 8515 } 8516 } else { 8517 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8518 8519 ASSERT(mmd != NULL); 8520 8521 /* get hardware checksum attribute */ 8522 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8523 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8524 8525 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8526 if (flags != NULL) 8527 *flags = hck->hcksum_flags; 8528 if (start != NULL) 8529 *start = hck->hcksum_start_offset; 8530 if (stuff != NULL) 8531 *stuff = hck->hcksum_stuff_offset; 8532 if (end != NULL) 8533 *end = hck->hcksum_end_offset; 8534 if (value != NULL) 8535 *value = (uint32_t) 8536 hck->hcksum_cksum_val.inet_cksum; 8537 } 8538 } 8539 } 8540 8541 void 8542 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags) 8543 { 8544 ASSERT(DB_TYPE(mp) == M_DATA); 8545 ASSERT((flags & ~HW_LSO_FLAGS) == 0); 8546 8547 /* Set the flags */ 8548 DB_LSOFLAGS(mp) |= flags; 8549 DB_LSOMSS(mp) = mss; 8550 } 8551 8552 void 8553 lso_info_cleanup(mblk_t *mp) 8554 { 8555 ASSERT(DB_TYPE(mp) == M_DATA); 8556 8557 /* Clear the flags */ 8558 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS; 8559 DB_LSOMSS(mp) = 0; 8560 } 8561 8562 /* 8563 * Checksum buffer *bp for len bytes with psum partial checksum, 8564 * or 0 if none, and return the 16 bit partial checksum. 8565 */ 8566 unsigned 8567 bcksum(uchar_t *bp, int len, unsigned int psum) 8568 { 8569 int odd = len & 1; 8570 extern unsigned int ip_ocsum(); 8571 8572 if (((intptr_t)bp & 1) == 0 && !odd) { 8573 /* 8574 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8575 */ 8576 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8577 } 8578 if (((intptr_t)bp & 1) != 0) { 8579 /* 8580 * Bp isn't 16 bit aligned. 8581 */ 8582 unsigned int tsum; 8583 8584 #ifdef _LITTLE_ENDIAN 8585 psum += *bp; 8586 #else 8587 psum += *bp << 8; 8588 #endif 8589 len--; 8590 bp++; 8591 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8592 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8593 if (len & 1) { 8594 bp += len - 1; 8595 #ifdef _LITTLE_ENDIAN 8596 psum += *bp << 8; 8597 #else 8598 psum += *bp; 8599 #endif 8600 } 8601 } else { 8602 /* 8603 * Bp is 16 bit aligned. 8604 */ 8605 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8606 if (odd) { 8607 bp += len - 1; 8608 #ifdef _LITTLE_ENDIAN 8609 psum += *bp; 8610 #else 8611 psum += *bp << 8; 8612 #endif 8613 } 8614 } 8615 /* 8616 * Normalize psum to 16 bits before returning the new partial 8617 * checksum. The max psum value before normalization is 0x3FDFE. 8618 */ 8619 return ((psum >> 16) + (psum & 0xFFFF)); 8620 } 8621 8622 boolean_t 8623 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8624 { 8625 boolean_t rc; 8626 8627 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8628 if (DB_TYPE(mp) == M_DATA) { 8629 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8630 } else { 8631 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8632 8633 ASSERT(mmd != NULL); 8634 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8635 } 8636 return (rc); 8637 } 8638 8639 void 8640 freemsgchain(mblk_t *mp) 8641 { 8642 mblk_t *next; 8643 8644 while (mp != NULL) { 8645 next = mp->b_next; 8646 mp->b_next = NULL; 8647 8648 freemsg(mp); 8649 mp = next; 8650 } 8651 } 8652 8653 mblk_t * 8654 copymsgchain(mblk_t *mp) 8655 { 8656 mblk_t *nmp = NULL; 8657 mblk_t **nmpp = &nmp; 8658 8659 for (; mp != NULL; mp = mp->b_next) { 8660 if ((*nmpp = copymsg(mp)) == NULL) { 8661 freemsgchain(nmp); 8662 return (NULL); 8663 } 8664 8665 nmpp = &((*nmpp)->b_next); 8666 } 8667 8668 return (nmp); 8669 } 8670 8671 /* NOTE: Do not add code after this point. */ 8672 #undef QLOCK 8673 8674 /* 8675 * Replacement for QLOCK macro for those that can't use it. 8676 */ 8677 kmutex_t * 8678 QLOCK(queue_t *q) 8679 { 8680 return (&(q)->q_lock); 8681 } 8682 8683 /* 8684 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8685 */ 8686 #undef runqueues 8687 void 8688 runqueues(void) 8689 { 8690 } 8691 8692 #undef queuerun 8693 void 8694 queuerun(void) 8695 { 8696 } 8697 8698 /* 8699 * Initialize the STR stack instance, which tracks autopush and persistent 8700 * links. 8701 */ 8702 /* ARGSUSED */ 8703 static void * 8704 str_stack_init(netstackid_t stackid, netstack_t *ns) 8705 { 8706 str_stack_t *ss; 8707 int i; 8708 8709 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP); 8710 ss->ss_netstack = ns; 8711 8712 /* 8713 * set up autopush 8714 */ 8715 sad_initspace(ss); 8716 8717 /* 8718 * set up mux_node structures. 8719 */ 8720 ss->ss_devcnt = devcnt; /* In case it should change before free */ 8721 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) * 8722 ss->ss_devcnt), KM_SLEEP); 8723 for (i = 0; i < ss->ss_devcnt; i++) 8724 ss->ss_mux_nodes[i].mn_imaj = i; 8725 return (ss); 8726 } 8727 8728 /* 8729 * Note: run at zone shutdown and not destroy so that the PLINKs are 8730 * gone by the time other cleanup happens from the destroy callbacks. 8731 */ 8732 static void 8733 str_stack_shutdown(netstackid_t stackid, void *arg) 8734 { 8735 str_stack_t *ss = (str_stack_t *)arg; 8736 int i; 8737 cred_t *cr; 8738 8739 cr = zone_get_kcred(netstackid_to_zoneid(stackid)); 8740 ASSERT(cr != NULL); 8741 8742 /* Undo all the I_PLINKs for this zone */ 8743 for (i = 0; i < ss->ss_devcnt; i++) { 8744 struct mux_edge *ep; 8745 ldi_handle_t lh; 8746 ldi_ident_t li; 8747 int ret; 8748 int rval; 8749 dev_t rdev; 8750 8751 ep = ss->ss_mux_nodes[i].mn_outp; 8752 if (ep == NULL) 8753 continue; 8754 ret = ldi_ident_from_major((major_t)i, &li); 8755 if (ret != 0) { 8756 continue; 8757 } 8758 rdev = ep->me_dev; 8759 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE, 8760 cr, &lh, li); 8761 if (ret != 0) { 8762 ldi_ident_release(li); 8763 continue; 8764 } 8765 8766 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL, 8767 cr, &rval); 8768 if (ret) { 8769 (void) ldi_close(lh, FREAD|FWRITE, cr); 8770 ldi_ident_release(li); 8771 continue; 8772 } 8773 (void) ldi_close(lh, FREAD|FWRITE, cr); 8774 8775 /* Close layered handles */ 8776 ldi_ident_release(li); 8777 } 8778 crfree(cr); 8779 8780 sad_freespace(ss); 8781 8782 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt); 8783 ss->ss_mux_nodes = NULL; 8784 } 8785 8786 /* 8787 * Free the structure; str_stack_shutdown did the other cleanup work. 8788 */ 8789 /* ARGSUSED */ 8790 static void 8791 str_stack_fini(netstackid_t stackid, void *arg) 8792 { 8793 str_stack_t *ss = (str_stack_t *)arg; 8794 8795 kmem_free(ss, sizeof (*ss)); 8796 }