1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
23
24
25 /*
26 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
27 */
28
29 #include <sys/types.h>
30 #include <sys/sysmacros.h>
31 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/signal.h>
34 #include <sys/stat.h>
35 #include <sys/proc.h>
36 #include <sys/cred.h>
37 #include <sys/user.h>
38 #include <sys/vnode.h>
39 #include <sys/file.h>
40 #include <sys/stream.h>
41 #include <sys/strsubr.h>
42 #include <sys/stropts.h>
43 #include <sys/tihdr.h>
44 #include <sys/var.h>
45 #include <sys/poll.h>
46 #include <sys/termio.h>
47 #include <sys/ttold.h>
48 #include <sys/systm.h>
49 #include <sys/uio.h>
50 #include <sys/cmn_err.h>
51 #include <sys/sad.h>
52 #include <sys/netstack.h>
53 #include <sys/priocntl.h>
54 #include <sys/jioctl.h>
55 #include <sys/procset.h>
56 #include <sys/session.h>
57 #include <sys/kmem.h>
58 #include <sys/filio.h>
59 #include <sys/vtrace.h>
60 #include <sys/debug.h>
61 #include <sys/strredir.h>
62 #include <sys/fs/fifonode.h>
63 #include <sys/fs/snode.h>
64 #include <sys/strlog.h>
65 #include <sys/strsun.h>
66 #include <sys/project.h>
67 #include <sys/kbio.h>
68 #include <sys/msio.h>
69 #include <sys/tty.h>
70 #include <sys/ptyvar.h>
71 #include <sys/vuid_event.h>
72 #include <sys/modctl.h>
73 #include <sys/sunddi.h>
74 #include <sys/sunldi_impl.h>
75 #include <sys/autoconf.h>
76 #include <sys/policy.h>
77 #include <sys/dld.h>
78 #include <sys/zone.h>
79 #include <c2/audit.h>
80 #include <sys/fcntl.h>
81
82 /*
83 * This define helps improve the readability of streams code while
84 * still maintaining a very old streams performance enhancement. The
85 * performance enhancement basically involved having all callers
86 * of straccess() perform the first check that straccess() will do
87 * locally before actually calling straccess(). (There by reducing
88 * the number of unnecessary calls to straccess().)
89 */
90 #define i_straccess(x, y) ((stp->sd_sidp == NULL) ? 0 : \
91 (stp->sd_vnode->v_type == VFIFO) ? 0 : \
92 straccess((x), (y)))
93
94 /*
95 * what is mblk_pull_len?
96 *
97 * If a streams message consists of many short messages,
98 * a performance degradation occurs from copyout overhead.
99 * To decrease the per mblk overhead, messages that are
100 * likely to consist of many small mblks are pulled up into
101 * one continuous chunk of memory.
102 *
103 * To avoid the processing overhead of examining every
104 * mblk, a quick heuristic is used. If the first mblk in
105 * the message is shorter than mblk_pull_len, it is likely
106 * that the rest of the mblk will be short.
107 *
108 * This heuristic was decided upon after performance tests
109 * indicated that anything more complex slowed down the main
110 * code path.
111 */
112 #define MBLK_PULL_LEN 64
113 uint32_t mblk_pull_len = MBLK_PULL_LEN;
114
115 /*
116 * The sgttyb_handling flag controls the handling of the old BSD
117 * TIOCGETP, TIOCSETP, and TIOCSETN ioctls as follows:
118 *
119 * 0 - Emit no warnings at all and retain old, broken behavior.
120 * 1 - Emit no warnings and silently handle new semantics.
121 * 2 - Send cmn_err(CE_NOTE) when either TIOCSETP or TIOCSETN is used
122 * (once per system invocation). Handle with new semantics.
123 * 3 - Send SIGSYS when any TIOCGETP, TIOCSETP, or TIOCSETN call is
124 * made (so that offenders drop core and are easy to debug).
125 *
126 * The "new semantics" are that TIOCGETP returns B38400 for
127 * sg_[io]speed if the corresponding value is over B38400, and that
128 * TIOCSET[PN] accept B38400 in these cases to mean "retain current
129 * bit rate."
130 */
131 int sgttyb_handling = 1;
132 static boolean_t sgttyb_complaint;
133
134 /* don't push drcompat module by default on Style-2 streams */
135 static int push_drcompat = 0;
136
137 /*
138 * id value used to distinguish between different ioctl messages
139 */
140 static uint32_t ioc_id;
141
142 static void putback(struct stdata *, queue_t *, mblk_t *, int);
143 static void strcleanall(struct vnode *);
144 static int strwsrv(queue_t *);
145 static int strdocmd(struct stdata *, struct strcmd *, cred_t *);
146
147 /*
148 * qinit and module_info structures for stream head read and write queues
149 */
150 struct module_info strm_info = { 0, "strrhead", 0, INFPSZ, STRHIGH, STRLOW };
151 struct module_info stwm_info = { 0, "strwhead", 0, 0, 0, 0 };
152 struct qinit strdata = { strrput, NULL, NULL, NULL, NULL, &strm_info };
153 struct qinit stwdata = { NULL, strwsrv, NULL, NULL, NULL, &stwm_info };
154 struct module_info fiform_info = { 0, "fifostrrhead", 0, PIPE_BUF, FIFOHIWAT,
155 FIFOLOWAT };
156 struct module_info fifowm_info = { 0, "fifostrwhead", 0, 0, 0, 0 };
157 struct qinit fifo_strdata = { strrput, NULL, NULL, NULL, NULL, &fiform_info };
158 struct qinit fifo_stwdata = { NULL, strwsrv, NULL, NULL, NULL, &fifowm_info };
159
160 extern kmutex_t strresources; /* protects global resources */
161 extern kmutex_t muxifier; /* single-threads multiplexor creation */
162
163 static boolean_t msghasdata(mblk_t *bp);
164 #define msgnodata(bp) (!msghasdata(bp))
165
166 /*
167 * Stream head locking notes:
168 * There are four monitors associated with the stream head:
169 * 1. v_stream monitor: in stropen() and strclose() v_lock
170 * is held while the association of vnode and stream
171 * head is established or tested for.
172 * 2. open/close/push/pop monitor: sd_lock is held while each
173 * thread bids for exclusive access to this monitor
174 * for opening or closing a stream. In addition, this
175 * monitor is entered during pushes and pops. This
176 * guarantees that during plumbing operations there
177 * is only one thread trying to change the plumbing.
178 * Any other threads present in the stream are only
179 * using the plumbing.
180 * 3. read/write monitor: in the case of read, a thread holds
181 * sd_lock while trying to get data from the stream
182 * head queue. if there is none to fulfill a read
183 * request, it sets RSLEEP and calls cv_wait_sig() down
184 * in strwaitq() to await the arrival of new data.
185 * when new data arrives in strrput(), sd_lock is acquired
186 * before testing for RSLEEP and calling cv_broadcast().
187 * the behavior of strwrite(), strwsrv(), and WSLEEP
188 * mirror this.
189 * 4. ioctl monitor: sd_lock is gotten to ensure that only one
190 * thread is doing an ioctl at a time.
191 */
192
193 static int
194 push_mod(queue_t *qp, dev_t *devp, struct stdata *stp, const char *name,
195 int anchor, cred_t *crp, uint_t anchor_zoneid)
196 {
197 int error;
198 fmodsw_impl_t *fp;
199
200 if (stp->sd_flag & (STRHUP|STRDERR|STWRERR)) {
201 error = (stp->sd_flag & STRHUP) ? ENXIO : EIO;
202 return (error);
203 }
204 if (stp->sd_pushcnt >= nstrpush) {
205 return (EINVAL);
206 }
207
208 if ((fp = fmodsw_find(name, FMODSW_HOLD | FMODSW_LOAD)) == NULL) {
209 stp->sd_flag |= STREOPENFAIL;
210 return (EINVAL);
211 }
212
213 /*
214 * push new module and call its open routine via qattach
215 */
216 if ((error = qattach(qp, devp, 0, crp, fp, B_FALSE)) != 0)
217 return (error);
218
219 /*
220 * Check to see if caller wants a STREAMS anchor
221 * put at this place in the stream, and add if so.
222 */
223 mutex_enter(&stp->sd_lock);
224 if (anchor == stp->sd_pushcnt) {
225 stp->sd_anchor = stp->sd_pushcnt;
226 stp->sd_anchorzone = anchor_zoneid;
227 }
228 mutex_exit(&stp->sd_lock);
229
230 return (0);
231 }
232
233 /*
234 * Open a stream device.
235 */
236 int
237 stropen(vnode_t *vp, dev_t *devp, int flag, cred_t *crp)
238 {
239 struct stdata *stp;
240 queue_t *qp;
241 int s;
242 dev_t dummydev, savedev;
243 struct autopush *ap;
244 struct dlautopush dlap;
245 int error = 0;
246 ssize_t rmin, rmax;
247 int cloneopen;
248 queue_t *brq;
249 major_t major;
250 str_stack_t *ss;
251 zoneid_t zoneid;
252 uint_t anchor;
253
254 /*
255 * If the stream already exists, wait for any open in progress
256 * to complete, then call the open function of each module and
257 * driver in the stream. Otherwise create the stream.
258 */
259 TRACE_1(TR_FAC_STREAMS_FR, TR_STROPEN, "stropen:%p", vp);
260 retry:
261 mutex_enter(&vp->v_lock);
262 if ((stp = vp->v_stream) != NULL) {
263
264 /*
265 * Waiting for stream to be created to device
266 * due to another open.
267 */
268 mutex_exit(&vp->v_lock);
269
270 if (STRMATED(stp)) {
271 struct stdata *strmatep = stp->sd_mate;
272
273 STRLOCKMATES(stp);
274 if (strmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
275 if (flag & (FNDELAY|FNONBLOCK)) {
276 error = EAGAIN;
277 mutex_exit(&strmatep->sd_lock);
278 goto ckreturn;
279 }
280 mutex_exit(&stp->sd_lock);
281 if (!cv_wait_sig(&strmatep->sd_monitor,
282 &strmatep->sd_lock)) {
283 error = EINTR;
284 mutex_exit(&strmatep->sd_lock);
285 mutex_enter(&stp->sd_lock);
286 goto ckreturn;
287 }
288 mutex_exit(&strmatep->sd_lock);
289 goto retry;
290 }
291 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
292 if (flag & (FNDELAY|FNONBLOCK)) {
293 error = EAGAIN;
294 mutex_exit(&strmatep->sd_lock);
295 goto ckreturn;
296 }
297 mutex_exit(&strmatep->sd_lock);
298 if (!cv_wait_sig(&stp->sd_monitor,
299 &stp->sd_lock)) {
300 error = EINTR;
301 goto ckreturn;
302 }
303 mutex_exit(&stp->sd_lock);
304 goto retry;
305 }
306
307 if (stp->sd_flag & (STRDERR|STWRERR)) {
308 error = EIO;
309 mutex_exit(&strmatep->sd_lock);
310 goto ckreturn;
311 }
312
313 stp->sd_flag |= STWOPEN;
314 STRUNLOCKMATES(stp);
315 } else {
316 mutex_enter(&stp->sd_lock);
317 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
318 if (flag & (FNDELAY|FNONBLOCK)) {
319 error = EAGAIN;
320 goto ckreturn;
321 }
322 if (!cv_wait_sig(&stp->sd_monitor,
323 &stp->sd_lock)) {
324 error = EINTR;
325 goto ckreturn;
326 }
327 mutex_exit(&stp->sd_lock);
328 goto retry; /* could be clone! */
329 }
330
331 if (stp->sd_flag & (STRDERR|STWRERR)) {
332 error = EIO;
333 goto ckreturn;
334 }
335
336 stp->sd_flag |= STWOPEN;
337 mutex_exit(&stp->sd_lock);
338 }
339
340 /*
341 * Open all modules and devices down stream to notify
342 * that another user is streaming. For modules, set the
343 * last argument to MODOPEN and do not pass any open flags.
344 * Ignore dummydev since this is not the first open.
345 */
346 claimstr(stp->sd_wrq);
347 qp = stp->sd_wrq;
348 while (_SAMESTR(qp)) {
349 qp = qp->q_next;
350 if ((error = qreopen(_RD(qp), devp, flag, crp)) != 0)
351 break;
352 }
353 releasestr(stp->sd_wrq);
354 mutex_enter(&stp->sd_lock);
355 stp->sd_flag &= ~(STRHUP|STWOPEN|STRDERR|STWRERR);
356 stp->sd_rerror = 0;
357 stp->sd_werror = 0;
358 ckreturn:
359 cv_broadcast(&stp->sd_monitor);
360 mutex_exit(&stp->sd_lock);
361 return (error);
362 }
363
364 /*
365 * This vnode isn't streaming. SPECFS already
366 * checked for multiple vnodes pointing to the
367 * same stream, so create a stream to the driver.
368 */
369 qp = allocq();
370 stp = shalloc(qp);
371
372 /*
373 * Initialize stream head. shalloc() has given us
374 * exclusive access, and we have the vnode locked;
375 * we can do whatever we want with stp.
376 */
377 stp->sd_flag = STWOPEN;
378 stp->sd_siglist = NULL;
379 stp->sd_pollist.ph_list = NULL;
380 stp->sd_sigflags = 0;
381 stp->sd_mark = NULL;
382 stp->sd_closetime = STRTIMOUT;
383 stp->sd_sidp = NULL;
384 stp->sd_pgidp = NULL;
385 stp->sd_vnode = vp;
386 stp->sd_rerror = 0;
387 stp->sd_werror = 0;
388 stp->sd_wroff = 0;
389 stp->sd_tail = 0;
390 stp->sd_iocblk = NULL;
391 stp->sd_cmdblk = NULL;
392 stp->sd_pushcnt = 0;
393 stp->sd_qn_minpsz = 0;
394 stp->sd_qn_maxpsz = INFPSZ - 1; /* used to check for initialization */
395 stp->sd_maxblk = INFPSZ;
396 qp->q_ptr = _WR(qp)->q_ptr = stp;
397 STREAM(qp) = STREAM(_WR(qp)) = stp;
398 vp->v_stream = stp;
399 mutex_exit(&vp->v_lock);
400 if (vp->v_type == VFIFO) {
401 stp->sd_flag |= OLDNDELAY;
402 /*
403 * This means, both for pipes and fifos
404 * strwrite will send SIGPIPE if the other
405 * end is closed. For putmsg it depends
406 * on whether it is a XPG4_2 application
407 * or not
408 */
409 stp->sd_wput_opt = SW_SIGPIPE;
410
411 /* setq might sleep in kmem_alloc - avoid holding locks. */
412 setq(qp, &fifo_strdata, &fifo_stwdata, NULL, QMTSAFE,
413 SQ_CI|SQ_CO, B_FALSE);
414
415 set_qend(qp);
416 stp->sd_strtab = fifo_getinfo();
417 _WR(qp)->q_nfsrv = _WR(qp);
418 qp->q_nfsrv = qp;
419 /*
420 * Wake up others that are waiting for stream to be created.
421 */
422 mutex_enter(&stp->sd_lock);
423 /*
424 * nothing is be pushed on stream yet, so
425 * optimized stream head packetsizes are just that
426 * of the read queue
427 */
428 stp->sd_qn_minpsz = qp->q_minpsz;
429 stp->sd_qn_maxpsz = qp->q_maxpsz;
430 stp->sd_flag &= ~STWOPEN;
431 goto fifo_opendone;
432 }
433 /* setq might sleep in kmem_alloc - avoid holding locks. */
434 setq(qp, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_FALSE);
435
436 set_qend(qp);
437
438 /*
439 * Open driver and create stream to it (via qattach).
440 */
441 savedev = *devp;
442 cloneopen = (getmajor(*devp) == clone_major);
443 if ((error = qattach(qp, devp, flag, crp, NULL, B_FALSE)) != 0) {
444 mutex_enter(&vp->v_lock);
445 vp->v_stream = NULL;
446 mutex_exit(&vp->v_lock);
447 mutex_enter(&stp->sd_lock);
448 cv_broadcast(&stp->sd_monitor);
449 mutex_exit(&stp->sd_lock);
450 freeq(_RD(qp));
451 shfree(stp);
452 return (error);
453 }
454 /*
455 * Set sd_strtab after open in order to handle clonable drivers
456 */
457 stp->sd_strtab = STREAMSTAB(getmajor(*devp));
458
459 /*
460 * Historical note: dummydev used to be be prior to the initial
461 * open (via qattach above), which made the value seen
462 * inconsistent between an I_PUSH and an autopush of a module.
463 */
464 dummydev = *devp;
465
466 /*
467 * For clone open of old style (Q not associated) network driver,
468 * push DRMODNAME module to handle DL_ATTACH/DL_DETACH
469 */
470 brq = _RD(_WR(qp)->q_next);
471 major = getmajor(*devp);
472 if (push_drcompat && cloneopen && NETWORK_DRV(major) &&
473 ((brq->q_flag & _QASSOCIATED) == 0)) {
474 if (push_mod(qp, &dummydev, stp, DRMODNAME, 0, crp, 0) != 0)
475 cmn_err(CE_WARN, "cannot push " DRMODNAME
476 " streams module");
477 }
478
479 if (!NETWORK_DRV(major)) {
480 savedev = *devp;
481 } else {
482 /*
483 * For network devices, process differently based on the
484 * return value from dld_autopush():
485 *
486 * 0: the passed-in device points to a GLDv3 datalink with
487 * per-link autopush configuration; use that configuration
488 * and ignore any per-driver autopush configuration.
489 *
490 * 1: the passed-in device points to a physical GLDv3
491 * datalink without per-link autopush configuration. The
492 * passed in device was changed to refer to the actual
493 * physical device (if it's not already); we use that new
494 * device to look up any per-driver autopush configuration.
495 *
496 * -1: neither of the above cases applied; use the initial
497 * device to look up any per-driver autopush configuration.
498 */
499 switch (dld_autopush(&savedev, &dlap)) {
500 case 0:
501 zoneid = crgetzoneid(crp);
502 for (s = 0; s < dlap.dap_npush; s++) {
503 error = push_mod(qp, &dummydev, stp,
504 dlap.dap_aplist[s], dlap.dap_anchor, crp,
505 zoneid);
506 if (error != 0)
507 break;
508 }
509 goto opendone;
510 case 1:
511 break;
512 case -1:
513 savedev = *devp;
514 break;
515 }
516 }
517 /*
518 * Find the autopush configuration based on "savedev". Start with the
519 * global zone. If not found check in the local zone.
520 */
521 zoneid = GLOBAL_ZONEID;
522 retryap:
523 ss = netstack_find_by_stackid(zoneid_to_netstackid(zoneid))->
524 netstack_str;
525 if ((ap = sad_ap_find_by_dev(savedev, ss)) == NULL) {
526 netstack_rele(ss->ss_netstack);
527 if (zoneid == GLOBAL_ZONEID) {
528 /*
529 * None found. Also look in the zone's autopush table.
530 */
531 zoneid = crgetzoneid(crp);
532 if (zoneid != GLOBAL_ZONEID)
533 goto retryap;
534 }
535 goto opendone;
536 }
537 anchor = ap->ap_anchor;
538 zoneid = crgetzoneid(crp);
539 for (s = 0; s < ap->ap_npush; s++) {
540 error = push_mod(qp, &dummydev, stp, ap->ap_list[s],
541 anchor, crp, zoneid);
542 if (error != 0)
543 break;
544 }
545 sad_ap_rele(ap, ss);
546 netstack_rele(ss->ss_netstack);
547
548 opendone:
549
550 /*
551 * let specfs know that open failed part way through
552 */
553 if (error) {
554 mutex_enter(&stp->sd_lock);
555 stp->sd_flag |= STREOPENFAIL;
556 mutex_exit(&stp->sd_lock);
557 }
558
559 /*
560 * Wake up others that are waiting for stream to be created.
561 */
562 mutex_enter(&stp->sd_lock);
563 stp->sd_flag &= ~STWOPEN;
564
565 /*
566 * As a performance concern we are caching the values of
567 * q_minpsz and q_maxpsz of the module below the stream
568 * head in the stream head.
569 */
570 mutex_enter(QLOCK(stp->sd_wrq->q_next));
571 rmin = stp->sd_wrq->q_next->q_minpsz;
572 rmax = stp->sd_wrq->q_next->q_maxpsz;
573 mutex_exit(QLOCK(stp->sd_wrq->q_next));
574
575 /* do this processing here as a performance concern */
576 if (strmsgsz != 0) {
577 if (rmax == INFPSZ)
578 rmax = strmsgsz;
579 else
580 rmax = MIN(strmsgsz, rmax);
581 }
582
583 mutex_enter(QLOCK(stp->sd_wrq));
584 stp->sd_qn_minpsz = rmin;
585 stp->sd_qn_maxpsz = rmax;
586 mutex_exit(QLOCK(stp->sd_wrq));
587
588 fifo_opendone:
589 cv_broadcast(&stp->sd_monitor);
590 mutex_exit(&stp->sd_lock);
591 return (error);
592 }
593
594 static int strsink(queue_t *, mblk_t *);
595 static struct qinit deadrend = {
596 strsink, NULL, NULL, NULL, NULL, &strm_info, NULL
597 };
598 static struct qinit deadwend = {
599 NULL, NULL, NULL, NULL, NULL, &stwm_info, NULL
600 };
601
602 /*
603 * Close a stream.
604 * This is called from closef() on the last close of an open stream.
605 * Strclean() will already have removed the siglist and pollist
606 * information, so all that remains is to remove all multiplexor links
607 * for the stream, pop all the modules (and the driver), and free the
608 * stream structure.
609 */
610
611 int
612 strclose(struct vnode *vp, int flag, cred_t *crp)
613 {
614 struct stdata *stp;
615 queue_t *qp;
616 int rval;
617 int freestp = 1;
618 queue_t *rmq;
619
620 TRACE_1(TR_FAC_STREAMS_FR,
621 TR_STRCLOSE, "strclose:%p", vp);
622 ASSERT(vp->v_stream);
623
624 stp = vp->v_stream;
625 ASSERT(!(stp->sd_flag & STPLEX));
626 qp = stp->sd_wrq;
627
628 /*
629 * Needed so that strpoll will return non-zero for this fd.
630 * Note that with POLLNOERR STRHUP does still cause POLLHUP.
631 */
632 mutex_enter(&stp->sd_lock);
633 stp->sd_flag |= STRHUP;
634 mutex_exit(&stp->sd_lock);
635
636 /*
637 * If the registered process or process group did not have an
638 * open instance of this stream then strclean would not be
639 * called. Thus at the time of closing all remaining siglist entries
640 * are removed.
641 */
642 if (stp->sd_siglist != NULL)
643 strcleanall(vp);
644
645 ASSERT(stp->sd_siglist == NULL);
646 ASSERT(stp->sd_sigflags == 0);
647
648 if (STRMATED(stp)) {
649 struct stdata *strmatep = stp->sd_mate;
650 int waited = 1;
651
652 STRLOCKMATES(stp);
653 while (waited) {
654 waited = 0;
655 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
656 mutex_exit(&strmatep->sd_lock);
657 cv_wait(&stp->sd_monitor, &stp->sd_lock);
658 mutex_exit(&stp->sd_lock);
659 STRLOCKMATES(stp);
660 waited = 1;
661 }
662 while (strmatep->sd_flag &
663 (STWOPEN|STRCLOSE|STRPLUMB)) {
664 mutex_exit(&stp->sd_lock);
665 cv_wait(&strmatep->sd_monitor,
666 &strmatep->sd_lock);
667 mutex_exit(&strmatep->sd_lock);
668 STRLOCKMATES(stp);
669 waited = 1;
670 }
671 }
672 stp->sd_flag |= STRCLOSE;
673 STRUNLOCKMATES(stp);
674 } else {
675 mutex_enter(&stp->sd_lock);
676 stp->sd_flag |= STRCLOSE;
677 mutex_exit(&stp->sd_lock);
678 }
679
680 ASSERT(qp->q_first == NULL); /* No more delayed write */
681
682 /* Check if an I_LINK was ever done on this stream */
683 if (stp->sd_flag & STRHASLINKS) {
684 netstack_t *ns;
685 str_stack_t *ss;
686
687 ns = netstack_find_by_cred(crp);
688 ASSERT(ns != NULL);
689 ss = ns->netstack_str;
690 ASSERT(ss != NULL);
691
692 (void) munlinkall(stp, LINKCLOSE|LINKNORMAL, crp, &rval, ss);
693 netstack_rele(ss->ss_netstack);
694 }
695
696 while (_SAMESTR(qp)) {
697 /*
698 * Holding sd_lock prevents q_next from changing in
699 * this stream.
700 */
701 mutex_enter(&stp->sd_lock);
702 if (!(flag & (FNDELAY|FNONBLOCK)) && (stp->sd_closetime > 0)) {
703
704 /*
705 * sleep until awakened by strwsrv() or timeout
706 */
707 for (;;) {
708 mutex_enter(QLOCK(qp->q_next));
709 if (!(qp->q_next->q_mblkcnt)) {
710 mutex_exit(QLOCK(qp->q_next));
711 break;
712 }
713 stp->sd_flag |= WSLEEP;
714
715 /* ensure strwsrv gets enabled */
716 qp->q_next->q_flag |= QWANTW;
717 mutex_exit(QLOCK(qp->q_next));
718 /* get out if we timed out or recv'd a signal */
719 if (str_cv_wait(&qp->q_wait, &stp->sd_lock,
720 stp->sd_closetime, 0) <= 0) {
721 break;
722 }
723 }
724 stp->sd_flag &= ~WSLEEP;
725 }
726 mutex_exit(&stp->sd_lock);
727
728 rmq = qp->q_next;
729 if (rmq->q_flag & QISDRV) {
730 ASSERT(!_SAMESTR(rmq));
731 wait_sq_svc(_RD(qp)->q_syncq);
732 }
733
734 qdetach(_RD(rmq), 1, flag, crp, B_FALSE);
735 }
736
737 /*
738 * Since we call pollwakeup in close() now, the poll list should
739 * be empty in most cases. The only exception is the layered devices
740 * (e.g. the console drivers with redirection modules pushed on top
741 * of it). We have to do this after calling qdetach() because
742 * the redirection module won't have torn down the console
743 * redirection until after qdetach() has been invoked.
744 */
745 if (stp->sd_pollist.ph_list != NULL) {
746 pollwakeup(&stp->sd_pollist, POLLERR);
747 pollhead_clean(&stp->sd_pollist);
748 }
749 ASSERT(stp->sd_pollist.ph_list == NULL);
750 ASSERT(stp->sd_sidp == NULL);
751 ASSERT(stp->sd_pgidp == NULL);
752
753 /* Prevent qenable from re-enabling the stream head queue */
754 disable_svc(_RD(qp));
755
756 /*
757 * Wait until service procedure of each queue is
758 * run, if QINSERVICE is set.
759 */
760 wait_svc(_RD(qp));
761
762 /*
763 * Now, flush both queues.
764 */
765 flushq(_RD(qp), FLUSHALL);
766 flushq(qp, FLUSHALL);
767
768 /*
769 * If the write queue of the stream head is pointing to a
770 * read queue, we have a twisted stream. If the read queue
771 * is alive, convert the stream head queues into a dead end.
772 * If the read queue is dead, free the dead pair.
773 */
774 if (qp->q_next && !_SAMESTR(qp)) {
775 if (qp->q_next->q_qinfo == &deadrend) { /* half-closed pipe */
776 flushq(qp->q_next, FLUSHALL); /* ensure no message */
777 shfree(qp->q_next->q_stream);
778 freeq(qp->q_next);
779 freeq(_RD(qp));
780 } else if (qp->q_next == _RD(qp)) { /* fifo */
781 freeq(_RD(qp));
782 } else { /* pipe */
783 freestp = 0;
784 /*
785 * The q_info pointers are never accessed when
786 * SQLOCK is held.
787 */
788 ASSERT(qp->q_syncq == _RD(qp)->q_syncq);
789 mutex_enter(SQLOCK(qp->q_syncq));
790 qp->q_qinfo = &deadwend;
791 _RD(qp)->q_qinfo = &deadrend;
792 mutex_exit(SQLOCK(qp->q_syncq));
793 }
794 } else {
795 freeq(_RD(qp)); /* free stream head queue pair */
796 }
797
798 mutex_enter(&vp->v_lock);
799 if (stp->sd_iocblk) {
800 if (stp->sd_iocblk != (mblk_t *)-1) {
801 freemsg(stp->sd_iocblk);
802 }
803 stp->sd_iocblk = NULL;
804 }
805 stp->sd_vnode = NULL;
806 vp->v_stream = NULL;
807 mutex_exit(&vp->v_lock);
808 mutex_enter(&stp->sd_lock);
809 freemsg(stp->sd_cmdblk);
810 stp->sd_cmdblk = NULL;
811 stp->sd_flag &= ~STRCLOSE;
812 cv_broadcast(&stp->sd_monitor);
813 mutex_exit(&stp->sd_lock);
814
815 if (freestp)
816 shfree(stp);
817 return (0);
818 }
819
820 static int
821 strsink(queue_t *q, mblk_t *bp)
822 {
823 struct copyresp *resp;
824
825 switch (bp->b_datap->db_type) {
826 case M_FLUSH:
827 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) {
828 *bp->b_rptr &= ~FLUSHR;
829 bp->b_flag |= MSGNOLOOP;
830 /*
831 * Protect against the driver passing up
832 * messages after it has done a qprocsoff.
833 */
834 if (_OTHERQ(q)->q_next == NULL)
835 freemsg(bp);
836 else
837 qreply(q, bp);
838 } else {
839 freemsg(bp);
840 }
841 break;
842
843 case M_COPYIN:
844 case M_COPYOUT:
845 if (bp->b_cont) {
846 freemsg(bp->b_cont);
847 bp->b_cont = NULL;
848 }
849 bp->b_datap->db_type = M_IOCDATA;
850 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
851 resp = (struct copyresp *)bp->b_rptr;
852 resp->cp_rval = (caddr_t)1; /* failure */
853 /*
854 * Protect against the driver passing up
855 * messages after it has done a qprocsoff.
856 */
857 if (_OTHERQ(q)->q_next == NULL)
858 freemsg(bp);
859 else
860 qreply(q, bp);
861 break;
862
863 case M_IOCTL:
864 if (bp->b_cont) {
865 freemsg(bp->b_cont);
866 bp->b_cont = NULL;
867 }
868 bp->b_datap->db_type = M_IOCNAK;
869 /*
870 * Protect against the driver passing up
871 * messages after it has done a qprocsoff.
872 */
873 if (_OTHERQ(q)->q_next == NULL)
874 freemsg(bp);
875 else
876 qreply(q, bp);
877 break;
878
879 default:
880 freemsg(bp);
881 break;
882 }
883
884 return (0);
885 }
886
887 /*
888 * Clean up after a process when it closes a stream. This is called
889 * from closef for all closes, whereas strclose is called only for the
890 * last close on a stream. The siglist is scanned for entries for the
891 * current process, and these are removed.
892 */
893 void
894 strclean(struct vnode *vp)
895 {
896 strsig_t *ssp, *pssp, *tssp;
897 stdata_t *stp;
898 int update = 0;
899
900 TRACE_1(TR_FAC_STREAMS_FR,
901 TR_STRCLEAN, "strclean:%p", vp);
902 stp = vp->v_stream;
903 pssp = NULL;
904 mutex_enter(&stp->sd_lock);
905 ssp = stp->sd_siglist;
906 while (ssp) {
907 if (ssp->ss_pidp == curproc->p_pidp) {
908 tssp = ssp->ss_next;
909 if (pssp)
910 pssp->ss_next = tssp;
911 else
912 stp->sd_siglist = tssp;
913 mutex_enter(&pidlock);
914 PID_RELE(ssp->ss_pidp);
915 mutex_exit(&pidlock);
916 kmem_free(ssp, sizeof (strsig_t));
917 update = 1;
918 ssp = tssp;
919 } else {
920 pssp = ssp;
921 ssp = ssp->ss_next;
922 }
923 }
924 if (update) {
925 stp->sd_sigflags = 0;
926 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
927 stp->sd_sigflags |= ssp->ss_events;
928 }
929 mutex_exit(&stp->sd_lock);
930 }
931
932 /*
933 * Used on the last close to remove any remaining items on the siglist.
934 * These could be present on the siglist due to I_ESETSIG calls that
935 * use process groups or processed that do not have an open file descriptor
936 * for this stream (Such entries would not be removed by strclean).
937 */
938 static void
939 strcleanall(struct vnode *vp)
940 {
941 strsig_t *ssp, *nssp;
942 stdata_t *stp;
943
944 stp = vp->v_stream;
945 mutex_enter(&stp->sd_lock);
946 ssp = stp->sd_siglist;
947 stp->sd_siglist = NULL;
948 while (ssp) {
949 nssp = ssp->ss_next;
950 mutex_enter(&pidlock);
951 PID_RELE(ssp->ss_pidp);
952 mutex_exit(&pidlock);
953 kmem_free(ssp, sizeof (strsig_t));
954 ssp = nssp;
955 }
956 stp->sd_sigflags = 0;
957 mutex_exit(&stp->sd_lock);
958 }
959
960 /*
961 * Retrieve the next message from the logical stream head read queue
962 * using either rwnext (if sync stream) or getq_noenab.
963 * It is the callers responsibility to call qbackenable after
964 * it is finished with the message. The caller should not call
965 * qbackenable until after any putback calls to avoid spurious backenabling.
966 */
967 mblk_t *
968 strget(struct stdata *stp, queue_t *q, struct uio *uiop, int first,
969 int *errorp)
970 {
971 mblk_t *bp;
972 int error;
973 ssize_t rbytes = 0;
974
975 /* Holding sd_lock prevents the read queue from changing */
976 ASSERT(MUTEX_HELD(&stp->sd_lock));
977
978 if (uiop != NULL && stp->sd_struiordq != NULL &&
979 q->q_first == NULL &&
980 (!first || (stp->sd_wakeq & RSLEEP))) {
981 /*
982 * Stream supports rwnext() for the read side.
983 * If this is the first time we're called by e.g. strread
984 * only do the downcall if there is a deferred wakeup
985 * (registered in sd_wakeq).
986 */
987 struiod_t uiod;
988
989 if (first)
990 stp->sd_wakeq &= ~RSLEEP;
991
992 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov,
993 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov));
994 uiod.d_mp = 0;
995 /*
996 * Mark that a thread is in rwnext on the read side
997 * to prevent strrput from nacking ioctls immediately.
998 * When the last concurrent rwnext returns
999 * the ioctls are nack'ed.
1000 */
1001 ASSERT(MUTEX_HELD(&stp->sd_lock));
1002 stp->sd_struiodnak++;
1003 /*
1004 * Note: rwnext will drop sd_lock.
1005 */
1006 error = rwnext(q, &uiod);
1007 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
1008 mutex_enter(&stp->sd_lock);
1009 stp->sd_struiodnak--;
1010 while (stp->sd_struiodnak == 0 &&
1011 ((bp = stp->sd_struionak) != NULL)) {
1012 stp->sd_struionak = bp->b_next;
1013 bp->b_next = NULL;
1014 bp->b_datap->db_type = M_IOCNAK;
1015 /*
1016 * Protect against the driver passing up
1017 * messages after it has done a qprocsoff.
1018 */
1019 if (_OTHERQ(q)->q_next == NULL)
1020 freemsg(bp);
1021 else {
1022 mutex_exit(&stp->sd_lock);
1023 qreply(q, bp);
1024 mutex_enter(&stp->sd_lock);
1025 }
1026 }
1027 ASSERT(MUTEX_HELD(&stp->sd_lock));
1028 if (error == 0 || error == EWOULDBLOCK) {
1029 if ((bp = uiod.d_mp) != NULL) {
1030 *errorp = 0;
1031 ASSERT(MUTEX_HELD(&stp->sd_lock));
1032 return (bp);
1033 }
1034 error = 0;
1035 } else if (error == EINVAL) {
1036 /*
1037 * The stream plumbing must have
1038 * changed while we were away, so
1039 * just turn off rwnext()s.
1040 */
1041 error = 0;
1042 } else if (error == EBUSY) {
1043 /*
1044 * The module might have data in transit using putnext
1045 * Fall back on waiting + getq.
1046 */
1047 error = 0;
1048 } else {
1049 *errorp = error;
1050 ASSERT(MUTEX_HELD(&stp->sd_lock));
1051 return (NULL);
1052 }
1053 /*
1054 * Try a getq in case a rwnext() generated mblk
1055 * has bubbled up via strrput().
1056 */
1057 }
1058 *errorp = 0;
1059 ASSERT(MUTEX_HELD(&stp->sd_lock));
1060
1061 /*
1062 * If we have a valid uio, try and use this as a guide for how
1063 * many bytes to retrieve from the queue via getq_noenab().
1064 * Doing this can avoid unneccesary counting of overlong
1065 * messages in putback(). We currently only do this for sockets
1066 * and only if there is no sd_rputdatafunc hook.
1067 *
1068 * The sd_rputdatafunc hook transforms the entire message
1069 * before any bytes in it can be given to a client. So, rbytes
1070 * must be 0 if there is a hook.
1071 */
1072 if ((uiop != NULL) && (stp->sd_vnode->v_type == VSOCK) &&
1073 (stp->sd_rputdatafunc == NULL))
1074 rbytes = uiop->uio_resid;
1075
1076 return (getq_noenab(q, rbytes));
1077 }
1078
1079 /*
1080 * Copy out the message pointed to by `bp' into the uio pointed to by `uiop'.
1081 * If the message does not fit in the uio the remainder of it is returned;
1082 * otherwise NULL is returned. Any embedded zero-length mblk_t's are
1083 * consumed, even if uio_resid reaches zero. On error, `*errorp' is set to
1084 * the error code, the message is consumed, and NULL is returned.
1085 */
1086 static mblk_t *
1087 struiocopyout(mblk_t *bp, struct uio *uiop, int *errorp)
1088 {
1089 int error;
1090 ptrdiff_t n;
1091 mblk_t *nbp;
1092
1093 ASSERT(bp->b_wptr >= bp->b_rptr);
1094
1095 do {
1096 if ((n = MIN(uiop->uio_resid, MBLKL(bp))) != 0) {
1097 ASSERT(n > 0);
1098
1099 error = uiomove(bp->b_rptr, n, UIO_READ, uiop);
1100 if (error != 0) {
1101 freemsg(bp);
1102 *errorp = error;
1103 return (NULL);
1104 }
1105 }
1106
1107 bp->b_rptr += n;
1108 while (bp != NULL && (bp->b_rptr >= bp->b_wptr)) {
1109 nbp = bp;
1110 bp = bp->b_cont;
1111 freeb(nbp);
1112 }
1113 } while (bp != NULL && uiop->uio_resid > 0);
1114
1115 *errorp = 0;
1116 return (bp);
1117 }
1118
1119 /*
1120 * Read a stream according to the mode flags in sd_flag:
1121 *
1122 * (default mode) - Byte stream, msg boundaries are ignored
1123 * RD_MSGDIS (msg discard) - Read on msg boundaries and throw away
1124 * any data remaining in msg
1125 * RD_MSGNODIS (msg non-discard) - Read on msg boundaries and put back
1126 * any remaining data on head of read queue
1127 *
1128 * Consume readable messages on the front of the queue until
1129 * ttolwp(curthread)->lwp_count
1130 * is satisfied, the readable messages are exhausted, or a message
1131 * boundary is reached in a message mode. If no data was read and
1132 * the stream was not opened with the NDELAY flag, block until data arrives.
1133 * Otherwise return the data read and update the count.
1134 *
1135 * In default mode a 0 length message signifies end-of-file and terminates
1136 * a read in progress. The 0 length message is removed from the queue
1137 * only if it is the only message read (no data is read).
1138 *
1139 * An attempt to read an M_PROTO or M_PCPROTO message results in an
1140 * EBADMSG error return, unless either RD_PROTDAT or RD_PROTDIS are set.
1141 * If RD_PROTDAT is set, M_PROTO and M_PCPROTO messages are read as data.
1142 * If RD_PROTDIS is set, the M_PROTO and M_PCPROTO parts of the message
1143 * are unlinked from and M_DATA blocks in the message, the protos are
1144 * thrown away, and the data is read.
1145 */
1146 /* ARGSUSED */
1147 int
1148 strread(struct vnode *vp, struct uio *uiop, cred_t *crp)
1149 {
1150 struct stdata *stp;
1151 mblk_t *bp, *nbp;
1152 queue_t *q;
1153 int error = 0;
1154 uint_t old_sd_flag;
1155 int first;
1156 char rflg;
1157 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
1158 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
1159 short delim;
1160 unsigned char pri = 0;
1161 char waitflag;
1162 unsigned char type;
1163
1164 TRACE_1(TR_FAC_STREAMS_FR,
1165 TR_STRREAD_ENTER, "strread:%p", vp);
1166 ASSERT(vp->v_stream);
1167 stp = vp->v_stream;
1168
1169 mutex_enter(&stp->sd_lock);
1170
1171 if ((error = i_straccess(stp, JCREAD)) != 0) {
1172 mutex_exit(&stp->sd_lock);
1173 return (error);
1174 }
1175
1176 if (stp->sd_flag & (STRDERR|STPLEX)) {
1177 error = strgeterr(stp, STRDERR|STPLEX, 0);
1178 if (error != 0) {
1179 mutex_exit(&stp->sd_lock);
1180 return (error);
1181 }
1182 }
1183
1184 /*
1185 * Loop terminates when uiop->uio_resid == 0.
1186 */
1187 rflg = 0;
1188 waitflag = READWAIT;
1189 q = _RD(stp->sd_wrq);
1190 for (;;) {
1191 ASSERT(MUTEX_HELD(&stp->sd_lock));
1192 old_sd_flag = stp->sd_flag;
1193 mark = 0;
1194 delim = 0;
1195 first = 1;
1196 while ((bp = strget(stp, q, uiop, first, &error)) == NULL) {
1197 int done = 0;
1198
1199 ASSERT(MUTEX_HELD(&stp->sd_lock));
1200
1201 if (error != 0)
1202 goto oops;
1203
1204 if (stp->sd_flag & (STRHUP|STREOF)) {
1205 goto oops;
1206 }
1207 if (rflg && !(stp->sd_flag & STRDELIM)) {
1208 goto oops;
1209 }
1210 /*
1211 * If a read(fd,buf,0) has been done, there is no
1212 * need to sleep. We always have zero bytes to
1213 * return.
1214 */
1215 if (uiop->uio_resid == 0) {
1216 goto oops;
1217 }
1218
1219 qbackenable(q, 0);
1220
1221 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_WAIT,
1222 "strread calls strwaitq:%p, %p, %p",
1223 vp, uiop, crp);
1224 if ((error = strwaitq(stp, waitflag, uiop->uio_resid,
1225 uiop->uio_fmode, -1, &done)) != 0 || done) {
1226 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_DONE,
1227 "strread error or done:%p, %p, %p",
1228 vp, uiop, crp);
1229 if ((uiop->uio_fmode & FNDELAY) &&
1230 (stp->sd_flag & OLDNDELAY) &&
1231 (error == EAGAIN))
1232 error = 0;
1233 goto oops;
1234 }
1235 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_AWAKE,
1236 "strread awakes:%p, %p, %p", vp, uiop, crp);
1237 if ((error = i_straccess(stp, JCREAD)) != 0) {
1238 goto oops;
1239 }
1240 first = 0;
1241 }
1242
1243 ASSERT(MUTEX_HELD(&stp->sd_lock));
1244 ASSERT(bp);
1245 pri = bp->b_band;
1246 /*
1247 * Extract any mark information. If the message is not
1248 * completely consumed this information will be put in the mblk
1249 * that is putback.
1250 * If MSGMARKNEXT is set and the message is completely consumed
1251 * the STRATMARK flag will be set below. Likewise, if
1252 * MSGNOTMARKNEXT is set and the message is
1253 * completely consumed STRNOTATMARK will be set.
1254 *
1255 * For some unknown reason strread only breaks the read at the
1256 * last mark.
1257 */
1258 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
1259 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
1260 (MSGMARKNEXT|MSGNOTMARKNEXT));
1261 if (mark != 0 && bp == stp->sd_mark) {
1262 if (rflg) {
1263 putback(stp, q, bp, pri);
1264 goto oops;
1265 }
1266 mark |= _LASTMARK;
1267 stp->sd_mark = NULL;
1268 }
1269 if ((stp->sd_flag & STRDELIM) && (bp->b_flag & MSGDELIM))
1270 delim = 1;
1271 mutex_exit(&stp->sd_lock);
1272
1273 if (STREAM_NEEDSERVICE(stp))
1274 stream_runservice(stp);
1275
1276 type = bp->b_datap->db_type;
1277
1278 switch (type) {
1279
1280 case M_DATA:
1281 ismdata:
1282 if (msgnodata(bp)) {
1283 if (mark || delim) {
1284 freemsg(bp);
1285 } else if (rflg) {
1286
1287 /*
1288 * If already read data put zero
1289 * length message back on queue else
1290 * free msg and return 0.
1291 */
1292 bp->b_band = pri;
1293 mutex_enter(&stp->sd_lock);
1294 putback(stp, q, bp, pri);
1295 mutex_exit(&stp->sd_lock);
1296 } else {
1297 freemsg(bp);
1298 }
1299 error = 0;
1300 goto oops1;
1301 }
1302
1303 rflg = 1;
1304 waitflag |= NOINTR;
1305 bp = struiocopyout(bp, uiop, &error);
1306 if (error != 0)
1307 goto oops1;
1308
1309 mutex_enter(&stp->sd_lock);
1310 if (bp) {
1311 /*
1312 * Have remaining data in message.
1313 * Free msg if in discard mode.
1314 */
1315 if (stp->sd_read_opt & RD_MSGDIS) {
1316 freemsg(bp);
1317 } else {
1318 bp->b_band = pri;
1319 if ((mark & _LASTMARK) &&
1320 (stp->sd_mark == NULL))
1321 stp->sd_mark = bp;
1322 bp->b_flag |= mark & ~_LASTMARK;
1323 if (delim)
1324 bp->b_flag |= MSGDELIM;
1325 if (msgnodata(bp))
1326 freemsg(bp);
1327 else
1328 putback(stp, q, bp, pri);
1329 }
1330 } else {
1331 /*
1332 * Consumed the complete message.
1333 * Move the MSG*MARKNEXT information
1334 * to the stream head just in case
1335 * the read queue becomes empty.
1336 *
1337 * If the stream head was at the mark
1338 * (STRATMARK) before we dropped sd_lock above
1339 * and some data was consumed then we have
1340 * moved past the mark thus STRATMARK is
1341 * cleared. However, if a message arrived in
1342 * strrput during the copyout above causing
1343 * STRATMARK to be set we can not clear that
1344 * flag.
1345 */
1346 if (mark &
1347 (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
1348 if (mark & MSGMARKNEXT) {
1349 stp->sd_flag &= ~STRNOTATMARK;
1350 stp->sd_flag |= STRATMARK;
1351 } else if (mark & MSGNOTMARKNEXT) {
1352 stp->sd_flag &= ~STRATMARK;
1353 stp->sd_flag |= STRNOTATMARK;
1354 } else {
1355 stp->sd_flag &=
1356 ~(STRATMARK|STRNOTATMARK);
1357 }
1358 } else if (rflg && (old_sd_flag & STRATMARK)) {
1359 stp->sd_flag &= ~STRATMARK;
1360 }
1361 }
1362
1363 /*
1364 * Check for signal messages at the front of the read
1365 * queue and generate the signal(s) if appropriate.
1366 * The only signal that can be on queue is M_SIG at
1367 * this point.
1368 */
1369 while ((((bp = q->q_first)) != NULL) &&
1370 (bp->b_datap->db_type == M_SIG)) {
1371 bp = getq_noenab(q, 0);
1372 /*
1373 * sd_lock is held so the content of the
1374 * read queue can not change.
1375 */
1376 ASSERT(bp != NULL && DB_TYPE(bp) == M_SIG);
1377 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
1378 mutex_exit(&stp->sd_lock);
1379 freemsg(bp);
1380 if (STREAM_NEEDSERVICE(stp))
1381 stream_runservice(stp);
1382 mutex_enter(&stp->sd_lock);
1383 }
1384
1385 if ((uiop->uio_resid == 0) || (mark & _LASTMARK) ||
1386 delim ||
1387 (stp->sd_read_opt & (RD_MSGDIS|RD_MSGNODIS))) {
1388 goto oops;
1389 }
1390 continue;
1391
1392 case M_SIG:
1393 strsignal(stp, *bp->b_rptr, (int32_t)bp->b_band);
1394 freemsg(bp);
1395 mutex_enter(&stp->sd_lock);
1396 continue;
1397
1398 case M_PROTO:
1399 case M_PCPROTO:
1400 /*
1401 * Only data messages are readable.
1402 * Any others generate an error, unless
1403 * RD_PROTDIS or RD_PROTDAT is set.
1404 */
1405 if (stp->sd_read_opt & RD_PROTDAT) {
1406 for (nbp = bp; nbp; nbp = nbp->b_next) {
1407 if ((nbp->b_datap->db_type ==
1408 M_PROTO) ||
1409 (nbp->b_datap->db_type ==
1410 M_PCPROTO)) {
1411 nbp->b_datap->db_type = M_DATA;
1412 } else {
1413 break;
1414 }
1415 }
1416 /*
1417 * clear stream head hi pri flag based on
1418 * first message
1419 */
1420 if (type == M_PCPROTO) {
1421 mutex_enter(&stp->sd_lock);
1422 stp->sd_flag &= ~STRPRI;
1423 mutex_exit(&stp->sd_lock);
1424 }
1425 goto ismdata;
1426 } else if (stp->sd_read_opt & RD_PROTDIS) {
1427 /*
1428 * discard non-data messages
1429 */
1430 while (bp &&
1431 ((bp->b_datap->db_type == M_PROTO) ||
1432 (bp->b_datap->db_type == M_PCPROTO))) {
1433 nbp = unlinkb(bp);
1434 freeb(bp);
1435 bp = nbp;
1436 }
1437 /*
1438 * clear stream head hi pri flag based on
1439 * first message
1440 */
1441 if (type == M_PCPROTO) {
1442 mutex_enter(&stp->sd_lock);
1443 stp->sd_flag &= ~STRPRI;
1444 mutex_exit(&stp->sd_lock);
1445 }
1446 if (bp) {
1447 bp->b_band = pri;
1448 goto ismdata;
1449 } else {
1450 break;
1451 }
1452 }
1453 /* FALLTHRU */
1454 case M_PASSFP:
1455 if ((bp->b_datap->db_type == M_PASSFP) &&
1456 (stp->sd_read_opt & RD_PROTDIS)) {
1457 freemsg(bp);
1458 break;
1459 }
1460 mutex_enter(&stp->sd_lock);
1461 putback(stp, q, bp, pri);
1462 mutex_exit(&stp->sd_lock);
1463 if (rflg == 0)
1464 error = EBADMSG;
1465 goto oops1;
1466
1467 default:
1468 /*
1469 * Garbage on stream head read queue.
1470 */
1471 cmn_err(CE_WARN, "bad %x found at stream head\n",
1472 bp->b_datap->db_type);
1473 freemsg(bp);
1474 goto oops1;
1475 }
1476 mutex_enter(&stp->sd_lock);
1477 }
1478 oops:
1479 mutex_exit(&stp->sd_lock);
1480 oops1:
1481 qbackenable(q, pri);
1482 return (error);
1483 #undef _LASTMARK
1484 }
1485
1486 /*
1487 * Default processing of M_PROTO/M_PCPROTO messages.
1488 * Determine which wakeups and signals are needed.
1489 * This can be replaced by a user-specified procedure for kernel users
1490 * of STREAMS.
1491 */
1492 /* ARGSUSED */
1493 mblk_t *
1494 strrput_proto(vnode_t *vp, mblk_t *mp,
1495 strwakeup_t *wakeups, strsigset_t *firstmsgsigs,
1496 strsigset_t *allmsgsigs, strpollset_t *pollwakeups)
1497 {
1498 *wakeups = RSLEEP;
1499 *allmsgsigs = 0;
1500
1501 switch (mp->b_datap->db_type) {
1502 case M_PROTO:
1503 if (mp->b_band == 0) {
1504 *firstmsgsigs = S_INPUT | S_RDNORM;
1505 *pollwakeups = POLLIN | POLLRDNORM;
1506 } else {
1507 *firstmsgsigs = S_INPUT | S_RDBAND;
1508 *pollwakeups = POLLIN | POLLRDBAND;
1509 }
1510 break;
1511 case M_PCPROTO:
1512 *firstmsgsigs = S_HIPRI;
1513 *pollwakeups = POLLPRI;
1514 break;
1515 }
1516 return (mp);
1517 }
1518
1519 /*
1520 * Default processing of everything but M_DATA, M_PROTO, M_PCPROTO and
1521 * M_PASSFP messages.
1522 * Determine which wakeups and signals are needed.
1523 * This can be replaced by a user-specified procedure for kernel users
1524 * of STREAMS.
1525 */
1526 /* ARGSUSED */
1527 mblk_t *
1528 strrput_misc(vnode_t *vp, mblk_t *mp,
1529 strwakeup_t *wakeups, strsigset_t *firstmsgsigs,
1530 strsigset_t *allmsgsigs, strpollset_t *pollwakeups)
1531 {
1532 *wakeups = 0;
1533 *firstmsgsigs = 0;
1534 *allmsgsigs = 0;
1535 *pollwakeups = 0;
1536 return (mp);
1537 }
1538
1539 /*
1540 * Stream read put procedure. Called from downstream driver/module
1541 * with messages for the stream head. Data, protocol, and in-stream
1542 * signal messages are placed on the queue, others are handled directly.
1543 */
1544 int
1545 strrput(queue_t *q, mblk_t *bp)
1546 {
1547 struct stdata *stp;
1548 ulong_t rput_opt;
1549 strwakeup_t wakeups;
1550 strsigset_t firstmsgsigs; /* Signals if first message on queue */
1551 strsigset_t allmsgsigs; /* Signals for all messages */
1552 strsigset_t signals; /* Signals events to generate */
1553 strpollset_t pollwakeups;
1554 mblk_t *nextbp;
1555 uchar_t band = 0;
1556 int hipri_sig;
1557
1558 stp = (struct stdata *)q->q_ptr;
1559 /*
1560 * Use rput_opt for optimized access to the SR_ flags except
1561 * SR_POLLIN. That flag has to be checked under sd_lock since it
1562 * is modified by strpoll().
1563 */
1564 rput_opt = stp->sd_rput_opt;
1565
1566 ASSERT(qclaimed(q));
1567 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_ENTER,
1568 "strrput called with message type:q %p bp %p", q, bp);
1569
1570 /*
1571 * Perform initial processing and pass to the parameterized functions.
1572 */
1573 ASSERT(bp->b_next == NULL);
1574
1575 switch (bp->b_datap->db_type) {
1576 case M_DATA:
1577 /*
1578 * sockfs is the only consumer of STREOF and when it is set,
1579 * it implies that the receiver is not interested in receiving
1580 * any more data, hence the mblk is freed to prevent unnecessary
1581 * message queueing at the stream head.
1582 */
1583 if (stp->sd_flag == STREOF) {
1584 freemsg(bp);
1585 return (0);
1586 }
1587 if ((rput_opt & SR_IGN_ZEROLEN) &&
1588 bp->b_rptr == bp->b_wptr && msgnodata(bp)) {
1589 /*
1590 * Ignore zero-length M_DATA messages. These might be
1591 * generated by some transports.
1592 * The zero-length M_DATA messages, even if they
1593 * are ignored, should effect the atmark tracking and
1594 * should wake up a thread sleeping in strwaitmark.
1595 */
1596 mutex_enter(&stp->sd_lock);
1597 if (bp->b_flag & MSGMARKNEXT) {
1598 /*
1599 * Record the position of the mark either
1600 * in q_last or in STRATMARK.
1601 */
1602 if (q->q_last != NULL) {
1603 q->q_last->b_flag &= ~MSGNOTMARKNEXT;
1604 q->q_last->b_flag |= MSGMARKNEXT;
1605 } else {
1606 stp->sd_flag &= ~STRNOTATMARK;
1607 stp->sd_flag |= STRATMARK;
1608 }
1609 } else if (bp->b_flag & MSGNOTMARKNEXT) {
1610 /*
1611 * Record that this is not the position of
1612 * the mark either in q_last or in
1613 * STRNOTATMARK.
1614 */
1615 if (q->q_last != NULL) {
1616 q->q_last->b_flag &= ~MSGMARKNEXT;
1617 q->q_last->b_flag |= MSGNOTMARKNEXT;
1618 } else {
1619 stp->sd_flag &= ~STRATMARK;
1620 stp->sd_flag |= STRNOTATMARK;
1621 }
1622 }
1623 if (stp->sd_flag & RSLEEP) {
1624 stp->sd_flag &= ~RSLEEP;
1625 cv_broadcast(&q->q_wait);
1626 }
1627 mutex_exit(&stp->sd_lock);
1628 freemsg(bp);
1629 return (0);
1630 }
1631 wakeups = RSLEEP;
1632 if (bp->b_band == 0) {
1633 firstmsgsigs = S_INPUT | S_RDNORM;
1634 pollwakeups = POLLIN | POLLRDNORM;
1635 } else {
1636 firstmsgsigs = S_INPUT | S_RDBAND;
1637 pollwakeups = POLLIN | POLLRDBAND;
1638 }
1639 if (rput_opt & SR_SIGALLDATA)
1640 allmsgsigs = firstmsgsigs;
1641 else
1642 allmsgsigs = 0;
1643
1644 mutex_enter(&stp->sd_lock);
1645 if ((rput_opt & SR_CONSOL_DATA) &&
1646 (q->q_last != NULL) &&
1647 (bp->b_flag & (MSGMARK|MSGDELIM)) == 0) {
1648 /*
1649 * Consolidate an M_DATA message onto an M_DATA,
1650 * M_PROTO, or M_PCPROTO by merging it with q_last.
1651 * The consolidation does not take place if
1652 * the old message is marked with either of the
1653 * marks or the delim flag or if the new
1654 * message is marked with MSGMARK. The MSGMARK
1655 * check is needed to handle the odd semantics of
1656 * MSGMARK where essentially the whole message
1657 * is to be treated as marked.
1658 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from the
1659 * new message to the front of the b_cont chain.
1660 */
1661 mblk_t *lbp = q->q_last;
1662 unsigned char db_type = lbp->b_datap->db_type;
1663
1664 if ((db_type == M_DATA || db_type == M_PROTO ||
1665 db_type == M_PCPROTO) &&
1666 !(lbp->b_flag & (MSGDELIM|MSGMARK|MSGMARKNEXT))) {
1667 rmvq_noenab(q, lbp);
1668 /*
1669 * The first message in the b_cont list
1670 * tracks MSGMARKNEXT and MSGNOTMARKNEXT.
1671 * We need to handle the case where we
1672 * are appending:
1673 *
1674 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT.
1675 * 2) a MSGMARKNEXT to a plain message.
1676 * 3) a MSGNOTMARKNEXT to a plain message
1677 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT
1678 * message.
1679 *
1680 * Thus we never append a MSGMARKNEXT or
1681 * MSGNOTMARKNEXT to a MSGMARKNEXT message.
1682 */
1683 if (bp->b_flag & MSGMARKNEXT) {
1684 lbp->b_flag |= MSGMARKNEXT;
1685 lbp->b_flag &= ~MSGNOTMARKNEXT;
1686 bp->b_flag &= ~MSGMARKNEXT;
1687 } else if (bp->b_flag & MSGNOTMARKNEXT) {
1688 lbp->b_flag |= MSGNOTMARKNEXT;
1689 bp->b_flag &= ~MSGNOTMARKNEXT;
1690 }
1691
1692 linkb(lbp, bp);
1693 bp = lbp;
1694 /*
1695 * The new message logically isn't the first
1696 * even though the q_first check below thinks
1697 * it is. Clear the firstmsgsigs to make it
1698 * not appear to be first.
1699 */
1700 firstmsgsigs = 0;
1701 }
1702 }
1703 break;
1704
1705 case M_PASSFP:
1706 wakeups = RSLEEP;
1707 allmsgsigs = 0;
1708 if (bp->b_band == 0) {
1709 firstmsgsigs = S_INPUT | S_RDNORM;
1710 pollwakeups = POLLIN | POLLRDNORM;
1711 } else {
1712 firstmsgsigs = S_INPUT | S_RDBAND;
1713 pollwakeups = POLLIN | POLLRDBAND;
1714 }
1715 mutex_enter(&stp->sd_lock);
1716 break;
1717
1718 case M_PROTO:
1719 case M_PCPROTO:
1720 ASSERT(stp->sd_rprotofunc != NULL);
1721 bp = (stp->sd_rprotofunc)(stp->sd_vnode, bp,
1722 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups);
1723 #define ALLSIG (S_INPUT|S_HIPRI|S_OUTPUT|S_MSG|S_ERROR|S_HANGUP|S_RDNORM|\
1724 S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)
1725 #define ALLPOLL (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLWRNORM|POLLRDBAND|\
1726 POLLWRBAND)
1727
1728 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0);
1729 ASSERT((firstmsgsigs & ~ALLSIG) == 0);
1730 ASSERT((allmsgsigs & ~ALLSIG) == 0);
1731 ASSERT((pollwakeups & ~ALLPOLL) == 0);
1732
1733 mutex_enter(&stp->sd_lock);
1734 break;
1735
1736 default:
1737 ASSERT(stp->sd_rmiscfunc != NULL);
1738 bp = (stp->sd_rmiscfunc)(stp->sd_vnode, bp,
1739 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups);
1740 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0);
1741 ASSERT((firstmsgsigs & ~ALLSIG) == 0);
1742 ASSERT((allmsgsigs & ~ALLSIG) == 0);
1743 ASSERT((pollwakeups & ~ALLPOLL) == 0);
1744 #undef ALLSIG
1745 #undef ALLPOLL
1746 mutex_enter(&stp->sd_lock);
1747 break;
1748 }
1749 ASSERT(MUTEX_HELD(&stp->sd_lock));
1750
1751 /* By default generate superset of signals */
1752 signals = (firstmsgsigs | allmsgsigs);
1753
1754 /*
1755 * The proto and misc functions can return multiple messages
1756 * as a b_next chain. Such messages are processed separately.
1757 */
1758 one_more:
1759 hipri_sig = 0;
1760 if (bp == NULL) {
1761 nextbp = NULL;
1762 } else {
1763 nextbp = bp->b_next;
1764 bp->b_next = NULL;
1765
1766 switch (bp->b_datap->db_type) {
1767 case M_PCPROTO:
1768 /*
1769 * Only one priority protocol message is allowed at the
1770 * stream head at a time.
1771 */
1772 if (stp->sd_flag & STRPRI) {
1773 TRACE_0(TR_FAC_STREAMS_FR, TR_STRRPUT_PROTERR,
1774 "M_PCPROTO already at head");
1775 freemsg(bp);
1776 mutex_exit(&stp->sd_lock);
1777 goto done;
1778 }
1779 stp->sd_flag |= STRPRI;
1780 hipri_sig = 1;
1781 /* FALLTHRU */
1782 case M_DATA:
1783 case M_PROTO:
1784 case M_PASSFP:
1785 band = bp->b_band;
1786 /*
1787 * Marking doesn't work well when messages
1788 * are marked in more than one band. We only
1789 * remember the last message received, even if
1790 * it is placed on the queue ahead of other
1791 * marked messages.
1792 */
1793 if (bp->b_flag & MSGMARK)
1794 stp->sd_mark = bp;
1795 (void) putq(q, bp);
1796
1797 /*
1798 * If message is a PCPROTO message, always use
1799 * firstmsgsigs to determine if a signal should be
1800 * sent as strrput is the only place to send
1801 * signals for PCPROTO. Other messages are based on
1802 * the STRGETINPROG flag. The flag determines if
1803 * strrput or (k)strgetmsg will be responsible for
1804 * sending the signals, in the firstmsgsigs case.
1805 */
1806 if ((hipri_sig == 1) ||
1807 (((stp->sd_flag & STRGETINPROG) == 0) &&
1808 (q->q_first == bp)))
1809 signals = (firstmsgsigs | allmsgsigs);
1810 else
1811 signals = allmsgsigs;
1812 break;
1813
1814 default:
1815 mutex_exit(&stp->sd_lock);
1816 (void) strrput_nondata(q, bp);
1817 mutex_enter(&stp->sd_lock);
1818 break;
1819 }
1820 }
1821 ASSERT(MUTEX_HELD(&stp->sd_lock));
1822 /*
1823 * Wake sleeping read/getmsg and cancel deferred wakeup
1824 */
1825 if (wakeups & RSLEEP)
1826 stp->sd_wakeq &= ~RSLEEP;
1827
1828 wakeups &= stp->sd_flag;
1829 if (wakeups & RSLEEP) {
1830 stp->sd_flag &= ~RSLEEP;
1831 cv_broadcast(&q->q_wait);
1832 }
1833 if (wakeups & WSLEEP) {
1834 stp->sd_flag &= ~WSLEEP;
1835 cv_broadcast(&_WR(q)->q_wait);
1836 }
1837
1838 if (pollwakeups != 0) {
1839 if (pollwakeups == (POLLIN | POLLRDNORM)) {
1840 /*
1841 * Can't use rput_opt since it was not
1842 * read when sd_lock was held and SR_POLLIN is changed
1843 * by strpoll() under sd_lock.
1844 */
1845 if (!(stp->sd_rput_opt & SR_POLLIN))
1846 goto no_pollwake;
1847 stp->sd_rput_opt &= ~SR_POLLIN;
1848 }
1849 mutex_exit(&stp->sd_lock);
1850 pollwakeup(&stp->sd_pollist, pollwakeups);
1851 mutex_enter(&stp->sd_lock);
1852 }
1853 no_pollwake:
1854
1855 /*
1856 * strsendsig can handle multiple signals with a
1857 * single call.
1858 */
1859 if (stp->sd_sigflags & signals)
1860 strsendsig(stp->sd_siglist, signals, band, 0);
1861 mutex_exit(&stp->sd_lock);
1862
1863
1864 done:
1865 if (nextbp == NULL)
1866 return (0);
1867
1868 /*
1869 * Any signals were handled the first time.
1870 * Wakeups and pollwakeups are redone to avoid any race
1871 * conditions - all the messages are not queued until the
1872 * last message has been processed by strrput.
1873 */
1874 bp = nextbp;
1875 signals = firstmsgsigs = allmsgsigs = 0;
1876 mutex_enter(&stp->sd_lock);
1877 goto one_more;
1878 }
1879
1880 static void
1881 log_dupioc(queue_t *rq, mblk_t *bp)
1882 {
1883 queue_t *wq, *qp;
1884 char *modnames, *mnp, *dname;
1885 size_t maxmodstr;
1886 boolean_t islast;
1887
1888 /*
1889 * Allocate a buffer large enough to hold the names of nstrpush modules
1890 * and one driver, with spaces between and NUL terminator. If we can't
1891 * get memory, then we'll just log the driver name.
1892 */
1893 maxmodstr = nstrpush * (FMNAMESZ + 1);
1894 mnp = modnames = kmem_alloc(maxmodstr, KM_NOSLEEP);
1895
1896 /* march down write side to print log message down to the driver */
1897 wq = WR(rq);
1898
1899 /* make sure q_next doesn't shift around while we're grabbing data */
1900 claimstr(wq);
1901 qp = wq->q_next;
1902 do {
1903 dname = Q2NAME(qp);
1904 islast = !SAMESTR(qp) || qp->q_next == NULL;
1905 if (modnames == NULL) {
1906 /*
1907 * If we don't have memory, then get the driver name in
1908 * the log where we can see it. Note that memory
1909 * pressure is a possible cause of these sorts of bugs.
1910 */
1911 if (islast) {
1912 modnames = dname;
1913 maxmodstr = 0;
1914 }
1915 } else {
1916 mnp += snprintf(mnp, FMNAMESZ + 1, "%s", dname);
1917 if (!islast)
1918 *mnp++ = ' ';
1919 }
1920 qp = qp->q_next;
1921 } while (!islast);
1922 releasestr(wq);
1923 /* Cannot happen unless stream head is corrupt. */
1924 ASSERT(modnames != NULL);
1925 (void) strlog(rq->q_qinfo->qi_minfo->mi_idnum, 0, 1,
1926 SL_CONSOLE|SL_TRACE|SL_ERROR,
1927 "Warning: stream %p received duplicate %X M_IOC%s; module list: %s",
1928 rq->q_ptr, ((struct iocblk *)bp->b_rptr)->ioc_cmd,
1929 (DB_TYPE(bp) == M_IOCACK ? "ACK" : "NAK"), modnames);
1930 if (maxmodstr != 0)
1931 kmem_free(modnames, maxmodstr);
1932 }
1933
1934 int
1935 strrput_nondata(queue_t *q, mblk_t *bp)
1936 {
1937 struct stdata *stp;
1938 struct iocblk *iocbp;
1939 struct stroptions *sop;
1940 struct copyreq *reqp;
1941 struct copyresp *resp;
1942 unsigned char bpri;
1943 unsigned char flushed_already = 0;
1944
1945 stp = (struct stdata *)q->q_ptr;
1946
1947 ASSERT(!(stp->sd_flag & STPLEX));
1948 ASSERT(qclaimed(q));
1949
1950 switch (bp->b_datap->db_type) {
1951 case M_ERROR:
1952 /*
1953 * An error has occurred downstream, the errno is in the first
1954 * bytes of the message.
1955 */
1956 if ((bp->b_wptr - bp->b_rptr) == 2) { /* New flavor */
1957 unsigned char rw = 0;
1958
1959 mutex_enter(&stp->sd_lock);
1960 if (*bp->b_rptr != NOERROR) { /* read error */
1961 if (*bp->b_rptr != 0) {
1962 if (stp->sd_flag & STRDERR)
1963 flushed_already |= FLUSHR;
1964 stp->sd_flag |= STRDERR;
1965 rw |= FLUSHR;
1966 } else {
1967 stp->sd_flag &= ~STRDERR;
1968 }
1969 stp->sd_rerror = *bp->b_rptr;
1970 }
1971 bp->b_rptr++;
1972 if (*bp->b_rptr != NOERROR) { /* write error */
1973 if (*bp->b_rptr != 0) {
1974 if (stp->sd_flag & STWRERR)
1975 flushed_already |= FLUSHW;
1976 stp->sd_flag |= STWRERR;
1977 rw |= FLUSHW;
1978 } else {
1979 stp->sd_flag &= ~STWRERR;
1980 }
1981 stp->sd_werror = *bp->b_rptr;
1982 }
1983 if (rw) {
1984 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_WAKE,
1985 "strrput cv_broadcast:q %p, bp %p",
1986 q, bp);
1987 cv_broadcast(&q->q_wait); /* readers */
1988 cv_broadcast(&_WR(q)->q_wait); /* writers */
1989 cv_broadcast(&stp->sd_monitor); /* ioctllers */
1990
1991 mutex_exit(&stp->sd_lock);
1992 pollwakeup(&stp->sd_pollist, POLLERR);
1993 mutex_enter(&stp->sd_lock);
1994
1995 if (stp->sd_sigflags & S_ERROR)
1996 strsendsig(stp->sd_siglist, S_ERROR, 0,
1997 ((rw & FLUSHR) ? stp->sd_rerror :
1998 stp->sd_werror));
1999 mutex_exit(&stp->sd_lock);
2000 /*
2001 * Send the M_FLUSH only
2002 * for the first M_ERROR
2003 * message on the stream
2004 */
2005 if (flushed_already == rw) {
2006 freemsg(bp);
2007 return (0);
2008 }
2009
2010 bp->b_datap->db_type = M_FLUSH;
2011 *bp->b_rptr = rw;
2012 bp->b_wptr = bp->b_rptr + 1;
2013 /*
2014 * Protect against the driver
2015 * passing up messages after
2016 * it has done a qprocsoff
2017 */
2018 if (_OTHERQ(q)->q_next == NULL)
2019 freemsg(bp);
2020 else
2021 qreply(q, bp);
2022 return (0);
2023 } else
2024 mutex_exit(&stp->sd_lock);
2025 } else if (*bp->b_rptr != 0) { /* Old flavor */
2026 if (stp->sd_flag & (STRDERR|STWRERR))
2027 flushed_already = FLUSHRW;
2028 mutex_enter(&stp->sd_lock);
2029 stp->sd_flag |= (STRDERR|STWRERR);
2030 stp->sd_rerror = *bp->b_rptr;
2031 stp->sd_werror = *bp->b_rptr;
2032 TRACE_2(TR_FAC_STREAMS_FR,
2033 TR_STRRPUT_WAKE2,
2034 "strrput wakeup #2:q %p, bp %p", q, bp);
2035 cv_broadcast(&q->q_wait); /* the readers */
2036 cv_broadcast(&_WR(q)->q_wait); /* the writers */
2037 cv_broadcast(&stp->sd_monitor); /* ioctllers */
2038
2039 mutex_exit(&stp->sd_lock);
2040 pollwakeup(&stp->sd_pollist, POLLERR);
2041 mutex_enter(&stp->sd_lock);
2042
2043 if (stp->sd_sigflags & S_ERROR)
2044 strsendsig(stp->sd_siglist, S_ERROR, 0,
2045 (stp->sd_werror ? stp->sd_werror :
2046 stp->sd_rerror));
2047 mutex_exit(&stp->sd_lock);
2048
2049 /*
2050 * Send the M_FLUSH only
2051 * for the first M_ERROR
2052 * message on the stream
2053 */
2054 if (flushed_already != FLUSHRW) {
2055 bp->b_datap->db_type = M_FLUSH;
2056 *bp->b_rptr = FLUSHRW;
2057 /*
2058 * Protect against the driver passing up
2059 * messages after it has done a
2060 * qprocsoff.
2061 */
2062 if (_OTHERQ(q)->q_next == NULL)
2063 freemsg(bp);
2064 else
2065 qreply(q, bp);
2066 return (0);
2067 }
2068 }
2069 freemsg(bp);
2070 return (0);
2071
2072 case M_HANGUP:
2073
2074 freemsg(bp);
2075 mutex_enter(&stp->sd_lock);
2076 stp->sd_werror = ENXIO;
2077 stp->sd_flag |= STRHUP;
2078 stp->sd_flag &= ~(WSLEEP|RSLEEP);
2079
2080 /*
2081 * send signal if controlling tty
2082 */
2083
2084 if (stp->sd_sidp) {
2085 prsignal(stp->sd_sidp, SIGHUP);
2086 if (stp->sd_sidp != stp->sd_pgidp)
2087 pgsignal(stp->sd_pgidp, SIGTSTP);
2088 }
2089
2090 /*
2091 * wake up read, write, and exception pollers and
2092 * reset wakeup mechanism.
2093 */
2094 cv_broadcast(&q->q_wait); /* the readers */
2095 cv_broadcast(&_WR(q)->q_wait); /* the writers */
2096 cv_broadcast(&stp->sd_monitor); /* the ioctllers */
2097 strhup(stp);
2098 mutex_exit(&stp->sd_lock);
2099 return (0);
2100
2101 case M_UNHANGUP:
2102 freemsg(bp);
2103 mutex_enter(&stp->sd_lock);
2104 stp->sd_werror = 0;
2105 stp->sd_flag &= ~STRHUP;
2106 mutex_exit(&stp->sd_lock);
2107 return (0);
2108
2109 case M_SIG:
2110 /*
2111 * Someone downstream wants to post a signal. The
2112 * signal to post is contained in the first byte of the
2113 * message. If the message would go on the front of
2114 * the queue, send a signal to the process group
2115 * (if not SIGPOLL) or to the siglist processes
2116 * (SIGPOLL). If something is already on the queue,
2117 * OR if we are delivering a delayed suspend (*sigh*
2118 * another "tty" hack) and there's no one sleeping already,
2119 * just enqueue the message.
2120 */
2121 mutex_enter(&stp->sd_lock);
2122 if (q->q_first || (*bp->b_rptr == SIGTSTP &&
2123 !(stp->sd_flag & RSLEEP))) {
2124 (void) putq(q, bp);
2125 mutex_exit(&stp->sd_lock);
2126 return (0);
2127 }
2128 mutex_exit(&stp->sd_lock);
2129 /* FALLTHRU */
2130
2131 case M_PCSIG:
2132 /*
2133 * Don't enqueue, just post the signal.
2134 */
2135 strsignal(stp, *bp->b_rptr, 0L);
2136 freemsg(bp);
2137 return (0);
2138
2139 case M_CMD:
2140 if (MBLKL(bp) != sizeof (cmdblk_t)) {
2141 freemsg(bp);
2142 return (0);
2143 }
2144
2145 mutex_enter(&stp->sd_lock);
2146 if (stp->sd_flag & STRCMDWAIT) {
2147 ASSERT(stp->sd_cmdblk == NULL);
2148 stp->sd_cmdblk = bp;
2149 cv_broadcast(&stp->sd_monitor);
2150 mutex_exit(&stp->sd_lock);
2151 } else {
2152 mutex_exit(&stp->sd_lock);
2153 freemsg(bp);
2154 }
2155 return (0);
2156
2157 case M_FLUSH:
2158 /*
2159 * Flush queues. The indication of which queues to flush
2160 * is in the first byte of the message. If the read queue
2161 * is specified, then flush it. If FLUSHBAND is set, just
2162 * flush the band specified by the second byte of the message.
2163 *
2164 * If a module has issued a M_SETOPT to not flush hi
2165 * priority messages off of the stream head, then pass this
2166 * flag into the flushq code to preserve such messages.
2167 */
2168
2169 if (*bp->b_rptr & FLUSHR) {
2170 mutex_enter(&stp->sd_lock);
2171 if (*bp->b_rptr & FLUSHBAND) {
2172 ASSERT((bp->b_wptr - bp->b_rptr) >= 2);
2173 flushband(q, *(bp->b_rptr + 1), FLUSHALL);
2174 } else
2175 flushq_common(q, FLUSHALL,
2176 stp->sd_read_opt & RFLUSHPCPROT);
2177 if ((q->q_first == NULL) ||
2178 (q->q_first->b_datap->db_type < QPCTL))
2179 stp->sd_flag &= ~STRPRI;
2180 else {
2181 ASSERT(stp->sd_flag & STRPRI);
2182 }
2183 mutex_exit(&stp->sd_lock);
2184 }
2185 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) {
2186 *bp->b_rptr &= ~FLUSHR;
2187 bp->b_flag |= MSGNOLOOP;
2188 /*
2189 * Protect against the driver passing up
2190 * messages after it has done a qprocsoff.
2191 */
2192 if (_OTHERQ(q)->q_next == NULL)
2193 freemsg(bp);
2194 else
2195 qreply(q, bp);
2196 return (0);
2197 }
2198 freemsg(bp);
2199 return (0);
2200
2201 case M_IOCACK:
2202 case M_IOCNAK:
2203 iocbp = (struct iocblk *)bp->b_rptr;
2204 /*
2205 * If not waiting for ACK or NAK then just free msg.
2206 * If incorrect id sequence number then just free msg.
2207 * If already have ACK or NAK for user then this is a
2208 * duplicate, display a warning and free the msg.
2209 */
2210 mutex_enter(&stp->sd_lock);
2211 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk ||
2212 (stp->sd_iocid != iocbp->ioc_id)) {
2213 /*
2214 * If the ACK/NAK is a dup, display a message
2215 * Dup is when sd_iocid == ioc_id, and
2216 * sd_iocblk == <valid ptr> or -1 (the former
2217 * is when an ioctl has been put on the stream
2218 * head, but has not yet been consumed, the
2219 * later is when it has been consumed).
2220 */
2221 if ((stp->sd_iocid == iocbp->ioc_id) &&
2222 (stp->sd_iocblk != NULL)) {
2223 log_dupioc(q, bp);
2224 }
2225 freemsg(bp);
2226 mutex_exit(&stp->sd_lock);
2227 return (0);
2228 }
2229
2230 /*
2231 * Assign ACK or NAK to user and wake up.
2232 */
2233 stp->sd_iocblk = bp;
2234 cv_broadcast(&stp->sd_monitor);
2235 mutex_exit(&stp->sd_lock);
2236 return (0);
2237
2238 case M_COPYIN:
2239 case M_COPYOUT:
2240 reqp = (struct copyreq *)bp->b_rptr;
2241
2242 /*
2243 * If not waiting for ACK or NAK then just fail request.
2244 * If already have ACK, NAK, or copy request, then just
2245 * fail request.
2246 * If incorrect id sequence number then just fail request.
2247 */
2248 mutex_enter(&stp->sd_lock);
2249 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk ||
2250 (stp->sd_iocid != reqp->cq_id)) {
2251 if (bp->b_cont) {
2252 freemsg(bp->b_cont);
2253 bp->b_cont = NULL;
2254 }
2255 bp->b_datap->db_type = M_IOCDATA;
2256 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
2257 resp = (struct copyresp *)bp->b_rptr;
2258 resp->cp_rval = (caddr_t)1; /* failure */
2259 mutex_exit(&stp->sd_lock);
2260 putnext(stp->sd_wrq, bp);
2261 return (0);
2262 }
2263
2264 /*
2265 * Assign copy request to user and wake up.
2266 */
2267 stp->sd_iocblk = bp;
2268 cv_broadcast(&stp->sd_monitor);
2269 mutex_exit(&stp->sd_lock);
2270 return (0);
2271
2272 case M_SETOPTS:
2273 /*
2274 * Set stream head options (read option, write offset,
2275 * min/max packet size, and/or high/low water marks for
2276 * the read side only).
2277 */
2278
2279 bpri = 0;
2280 sop = (struct stroptions *)bp->b_rptr;
2281 mutex_enter(&stp->sd_lock);
2282 if (sop->so_flags & SO_READOPT) {
2283 switch (sop->so_readopt & RMODEMASK) {
2284 case RNORM:
2285 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS);
2286 break;
2287
2288 case RMSGD:
2289 stp->sd_read_opt =
2290 ((stp->sd_read_opt & ~RD_MSGNODIS) |
2291 RD_MSGDIS);
2292 break;
2293
2294 case RMSGN:
2295 stp->sd_read_opt =
2296 ((stp->sd_read_opt & ~RD_MSGDIS) |
2297 RD_MSGNODIS);
2298 break;
2299 }
2300 switch (sop->so_readopt & RPROTMASK) {
2301 case RPROTNORM:
2302 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS);
2303 break;
2304
2305 case RPROTDAT:
2306 stp->sd_read_opt =
2307 ((stp->sd_read_opt & ~RD_PROTDIS) |
2308 RD_PROTDAT);
2309 break;
2310
2311 case RPROTDIS:
2312 stp->sd_read_opt =
2313 ((stp->sd_read_opt & ~RD_PROTDAT) |
2314 RD_PROTDIS);
2315 break;
2316 }
2317 switch (sop->so_readopt & RFLUSHMASK) {
2318 case RFLUSHPCPROT:
2319 /*
2320 * This sets the stream head to NOT flush
2321 * M_PCPROTO messages.
2322 */
2323 stp->sd_read_opt |= RFLUSHPCPROT;
2324 break;
2325 }
2326 }
2327 if (sop->so_flags & SO_ERROPT) {
2328 switch (sop->so_erropt & RERRMASK) {
2329 case RERRNORM:
2330 stp->sd_flag &= ~STRDERRNONPERSIST;
2331 break;
2332 case RERRNONPERSIST:
2333 stp->sd_flag |= STRDERRNONPERSIST;
2334 break;
2335 }
2336 switch (sop->so_erropt & WERRMASK) {
2337 case WERRNORM:
2338 stp->sd_flag &= ~STWRERRNONPERSIST;
2339 break;
2340 case WERRNONPERSIST:
2341 stp->sd_flag |= STWRERRNONPERSIST;
2342 break;
2343 }
2344 }
2345 if (sop->so_flags & SO_COPYOPT) {
2346 if (sop->so_copyopt & ZCVMSAFE) {
2347 stp->sd_copyflag |= STZCVMSAFE;
2348 stp->sd_copyflag &= ~STZCVMUNSAFE;
2349 } else if (sop->so_copyopt & ZCVMUNSAFE) {
2350 stp->sd_copyflag |= STZCVMUNSAFE;
2351 stp->sd_copyflag &= ~STZCVMSAFE;
2352 }
2353
2354 if (sop->so_copyopt & COPYCACHED) {
2355 stp->sd_copyflag |= STRCOPYCACHED;
2356 }
2357 }
2358 if (sop->so_flags & SO_WROFF)
2359 stp->sd_wroff = sop->so_wroff;
2360 if (sop->so_flags & SO_TAIL)
2361 stp->sd_tail = sop->so_tail;
2362 if (sop->so_flags & SO_MINPSZ)
2363 q->q_minpsz = sop->so_minpsz;
2364 if (sop->so_flags & SO_MAXPSZ)
2365 q->q_maxpsz = sop->so_maxpsz;
2366 if (sop->so_flags & SO_MAXBLK)
2367 stp->sd_maxblk = sop->so_maxblk;
2368 if (sop->so_flags & SO_HIWAT) {
2369 if (sop->so_flags & SO_BAND) {
2370 if (strqset(q, QHIWAT,
2371 sop->so_band, sop->so_hiwat)) {
2372 cmn_err(CE_WARN, "strrput: could not "
2373 "allocate qband\n");
2374 } else {
2375 bpri = sop->so_band;
2376 }
2377 } else {
2378 q->q_hiwat = sop->so_hiwat;
2379 }
2380 }
2381 if (sop->so_flags & SO_LOWAT) {
2382 if (sop->so_flags & SO_BAND) {
2383 if (strqset(q, QLOWAT,
2384 sop->so_band, sop->so_lowat)) {
2385 cmn_err(CE_WARN, "strrput: could not "
2386 "allocate qband\n");
2387 } else {
2388 bpri = sop->so_band;
2389 }
2390 } else {
2391 q->q_lowat = sop->so_lowat;
2392 }
2393 }
2394 if (sop->so_flags & SO_MREADON)
2395 stp->sd_flag |= SNDMREAD;
2396 if (sop->so_flags & SO_MREADOFF)
2397 stp->sd_flag &= ~SNDMREAD;
2398 if (sop->so_flags & SO_NDELON)
2399 stp->sd_flag |= OLDNDELAY;
2400 if (sop->so_flags & SO_NDELOFF)
2401 stp->sd_flag &= ~OLDNDELAY;
2402 if (sop->so_flags & SO_ISTTY)
2403 stp->sd_flag |= STRISTTY;
2404 if (sop->so_flags & SO_ISNTTY)
2405 stp->sd_flag &= ~STRISTTY;
2406 if (sop->so_flags & SO_TOSTOP)
2407 stp->sd_flag |= STRTOSTOP;
2408 if (sop->so_flags & SO_TONSTOP)
2409 stp->sd_flag &= ~STRTOSTOP;
2410 if (sop->so_flags & SO_DELIM)
2411 stp->sd_flag |= STRDELIM;
2412 if (sop->so_flags & SO_NODELIM)
2413 stp->sd_flag &= ~STRDELIM;
2414
2415 mutex_exit(&stp->sd_lock);
2416 freemsg(bp);
2417
2418 /* Check backenable in case the water marks changed */
2419 qbackenable(q, bpri);
2420 return (0);
2421
2422 /*
2423 * The following set of cases deal with situations where two stream
2424 * heads are connected to each other (twisted streams). These messages
2425 * have no meaning at the stream head.
2426 */
2427 case M_BREAK:
2428 case M_CTL:
2429 case M_DELAY:
2430 case M_START:
2431 case M_STOP:
2432 case M_IOCDATA:
2433 case M_STARTI:
2434 case M_STOPI:
2435 freemsg(bp);
2436 return (0);
2437
2438 case M_IOCTL:
2439 /*
2440 * Always NAK this condition
2441 * (makes no sense)
2442 * If there is one or more threads in the read side
2443 * rwnext we have to defer the nacking until that thread
2444 * returns (in strget).
2445 */
2446 mutex_enter(&stp->sd_lock);
2447 if (stp->sd_struiodnak != 0) {
2448 /*
2449 * Defer NAK to the streamhead. Queue at the end
2450 * the list.
2451 */
2452 mblk_t *mp = stp->sd_struionak;
2453
2454 while (mp && mp->b_next)
2455 mp = mp->b_next;
2456 if (mp)
2457 mp->b_next = bp;
2458 else
2459 stp->sd_struionak = bp;
2460 bp->b_next = NULL;
2461 mutex_exit(&stp->sd_lock);
2462 return (0);
2463 }
2464 mutex_exit(&stp->sd_lock);
2465
2466 bp->b_datap->db_type = M_IOCNAK;
2467 /*
2468 * Protect against the driver passing up
2469 * messages after it has done a qprocsoff.
2470 */
2471 if (_OTHERQ(q)->q_next == NULL)
2472 freemsg(bp);
2473 else
2474 qreply(q, bp);
2475 return (0);
2476
2477 default:
2478 #ifdef DEBUG
2479 cmn_err(CE_WARN,
2480 "bad message type %x received at stream head\n",
2481 bp->b_datap->db_type);
2482 #endif
2483 freemsg(bp);
2484 return (0);
2485 }
2486
2487 /* NOTREACHED */
2488 }
2489
2490 /*
2491 * Check if the stream pointed to by `stp' can be written to, and return an
2492 * error code if not. If `eiohup' is set, then return EIO if STRHUP is set.
2493 * If `sigpipeok' is set and the SW_SIGPIPE option is enabled on the stream,
2494 * then always return EPIPE and send a SIGPIPE to the invoking thread.
2495 */
2496 static int
2497 strwriteable(struct stdata *stp, boolean_t eiohup, boolean_t sigpipeok)
2498 {
2499 int error;
2500
2501 ASSERT(MUTEX_HELD(&stp->sd_lock));
2502
2503 /*
2504 * For modem support, POSIX states that on writes, EIO should
2505 * be returned if the stream has been hung up.
2506 */
2507 if (eiohup && (stp->sd_flag & (STPLEX|STRHUP)) == STRHUP)
2508 error = EIO;
2509 else
2510 error = strgeterr(stp, STRHUP|STPLEX|STWRERR, 0);
2511
2512 if (error != 0) {
2513 if (!(stp->sd_flag & STPLEX) &&
2514 (stp->sd_wput_opt & SW_SIGPIPE) && sigpipeok) {
2515 tsignal(curthread, SIGPIPE);
2516 error = EPIPE;
2517 }
2518 }
2519
2520 return (error);
2521 }
2522
2523 /*
2524 * Copyin and send data down a stream.
2525 * The caller will allocate and copyin any control part that precedes the
2526 * message and pass that in as mctl.
2527 *
2528 * Caller should *not* hold sd_lock.
2529 * When EWOULDBLOCK is returned the caller has to redo the canputnext
2530 * under sd_lock in order to avoid missing a backenabling wakeup.
2531 *
2532 * Use iosize = -1 to not send any M_DATA. iosize = 0 sends zero-length M_DATA.
2533 *
2534 * Set MSG_IGNFLOW in flags to ignore flow control for hipri messages.
2535 * For sync streams we can only ignore flow control by reverting to using
2536 * putnext.
2537 *
2538 * If sd_maxblk is less than *iosize this routine might return without
2539 * transferring all of *iosize. In all cases, on return *iosize will contain
2540 * the amount of data that was transferred.
2541 */
2542 static int
2543 strput(struct stdata *stp, mblk_t *mctl, struct uio *uiop, ssize_t *iosize,
2544 int b_flag, int pri, int flags)
2545 {
2546 struiod_t uiod;
2547 mblk_t *mp;
2548 queue_t *wqp = stp->sd_wrq;
2549 int error = 0;
2550 ssize_t count = *iosize;
2551
2552 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
2553
2554 if (uiop != NULL && count >= 0)
2555 flags |= stp->sd_struiowrq ? STRUIO_POSTPONE : 0;
2556
2557 if (!(flags & STRUIO_POSTPONE)) {
2558 /*
2559 * Use regular canputnext, strmakedata, putnext sequence.
2560 */
2561 if (pri == 0) {
2562 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) {
2563 freemsg(mctl);
2564 return (EWOULDBLOCK);
2565 }
2566 } else {
2567 if (!(flags & MSG_IGNFLOW) && !bcanputnext(wqp, pri)) {
2568 freemsg(mctl);
2569 return (EWOULDBLOCK);
2570 }
2571 }
2572
2573 if ((error = strmakedata(iosize, uiop, stp, flags,
2574 &mp)) != 0) {
2575 freemsg(mctl);
2576 /*
2577 * need to change return code to ENOMEM
2578 * so that this is not confused with
2579 * flow control, EAGAIN.
2580 */
2581
2582 if (error == EAGAIN)
2583 return (ENOMEM);
2584 else
2585 return (error);
2586 }
2587 if (mctl != NULL) {
2588 if (mctl->b_cont == NULL)
2589 mctl->b_cont = mp;
2590 else if (mp != NULL)
2591 linkb(mctl, mp);
2592 mp = mctl;
2593 } else if (mp == NULL)
2594 return (0);
2595
2596 mp->b_flag |= b_flag;
2597 mp->b_band = (uchar_t)pri;
2598
2599 if (flags & MSG_IGNFLOW) {
2600 /*
2601 * XXX Hack: Don't get stuck running service
2602 * procedures. This is needed for sockfs when
2603 * sending the unbind message out of the rput
2604 * procedure - we don't want a put procedure
2605 * to run service procedures.
2606 */
2607 putnext(wqp, mp);
2608 } else {
2609 stream_willservice(stp);
2610 putnext(wqp, mp);
2611 stream_runservice(stp);
2612 }
2613 return (0);
2614 }
2615 /*
2616 * Stream supports rwnext() for the write side.
2617 */
2618 if ((error = strmakedata(iosize, uiop, stp, flags, &mp)) != 0) {
2619 freemsg(mctl);
2620 /*
2621 * map EAGAIN to ENOMEM since EAGAIN means "flow controlled".
2622 */
2623 return (error == EAGAIN ? ENOMEM : error);
2624 }
2625 if (mctl != NULL) {
2626 if (mctl->b_cont == NULL)
2627 mctl->b_cont = mp;
2628 else if (mp != NULL)
2629 linkb(mctl, mp);
2630 mp = mctl;
2631 } else if (mp == NULL) {
2632 return (0);
2633 }
2634
2635 mp->b_flag |= b_flag;
2636 mp->b_band = (uchar_t)pri;
2637
2638 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov,
2639 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov));
2640 uiod.d_uio.uio_offset = 0;
2641 uiod.d_mp = mp;
2642 error = rwnext(wqp, &uiod);
2643 if (! uiod.d_mp) {
2644 uioskip(uiop, *iosize);
2645 return (error);
2646 }
2647 ASSERT(mp == uiod.d_mp);
2648 if (error == EINVAL) {
2649 /*
2650 * The stream plumbing must have changed while
2651 * we were away, so just turn off rwnext()s.
2652 */
2653 error = 0;
2654 } else if (error == EBUSY || error == EWOULDBLOCK) {
2655 /*
2656 * Couldn't enter a perimeter or took a page fault,
2657 * so fall-back to putnext().
2658 */
2659 error = 0;
2660 } else {
2661 freemsg(mp);
2662 return (error);
2663 }
2664 /* Have to check canput before consuming data from the uio */
2665 if (pri == 0) {
2666 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) {
2667 freemsg(mp);
2668 return (EWOULDBLOCK);
2669 }
2670 } else {
2671 if (!bcanputnext(wqp, pri) && !(flags & MSG_IGNFLOW)) {
2672 freemsg(mp);
2673 return (EWOULDBLOCK);
2674 }
2675 }
2676 ASSERT(mp == uiod.d_mp);
2677 /* Copyin data from the uio */
2678 if ((error = struioget(wqp, mp, &uiod, 0)) != 0) {
2679 freemsg(mp);
2680 return (error);
2681 }
2682 uioskip(uiop, *iosize);
2683 if (flags & MSG_IGNFLOW) {
2684 /*
2685 * XXX Hack: Don't get stuck running service procedures.
2686 * This is needed for sockfs when sending the unbind message
2687 * out of the rput procedure - we don't want a put procedure
2688 * to run service procedures.
2689 */
2690 putnext(wqp, mp);
2691 } else {
2692 stream_willservice(stp);
2693 putnext(wqp, mp);
2694 stream_runservice(stp);
2695 }
2696 return (0);
2697 }
2698
2699 /*
2700 * Write attempts to break the write request into messages conforming
2701 * with the minimum and maximum packet sizes set downstream.
2702 *
2703 * Write will not block if downstream queue is full and
2704 * O_NDELAY is set, otherwise it will block waiting for the queue to get room.
2705 *
2706 * A write of zero bytes gets packaged into a zero length message and sent
2707 * downstream like any other message.
2708 *
2709 * If buffers of the requested sizes are not available, the write will
2710 * sleep until the buffers become available.
2711 *
2712 * Write (if specified) will supply a write offset in a message if it
2713 * makes sense. This can be specified by downstream modules as part of
2714 * a M_SETOPTS message. Write will not supply the write offset if it
2715 * cannot supply any data in a buffer. In other words, write will never
2716 * send down an empty packet due to a write offset.
2717 */
2718 /* ARGSUSED2 */
2719 int
2720 strwrite(struct vnode *vp, struct uio *uiop, cred_t *crp)
2721 {
2722 return (strwrite_common(vp, uiop, crp, 0));
2723 }
2724
2725 /* ARGSUSED2 */
2726 int
2727 strwrite_common(struct vnode *vp, struct uio *uiop, cred_t *crp, int wflag)
2728 {
2729 struct stdata *stp;
2730 struct queue *wqp;
2731 ssize_t rmin, rmax;
2732 ssize_t iosize;
2733 int waitflag;
2734 int tempmode;
2735 int error = 0;
2736 int b_flag;
2737
2738 ASSERT(vp->v_stream);
2739 stp = vp->v_stream;
2740
2741 mutex_enter(&stp->sd_lock);
2742
2743 if ((error = i_straccess(stp, JCWRITE)) != 0) {
2744 mutex_exit(&stp->sd_lock);
2745 return (error);
2746 }
2747
2748 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
2749 error = strwriteable(stp, B_TRUE, B_TRUE);
2750 if (error != 0) {
2751 mutex_exit(&stp->sd_lock);
2752 return (error);
2753 }
2754 }
2755
2756 mutex_exit(&stp->sd_lock);
2757
2758 wqp = stp->sd_wrq;
2759
2760 /* get these values from them cached in the stream head */
2761 rmin = stp->sd_qn_minpsz;
2762 rmax = stp->sd_qn_maxpsz;
2763
2764 /*
2765 * Check the min/max packet size constraints. If min packet size
2766 * is non-zero, the write cannot be split into multiple messages
2767 * and still guarantee the size constraints.
2768 */
2769 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_IN, "strwrite in:q %p", wqp);
2770
2771 ASSERT((rmax >= 0) || (rmax == INFPSZ));
2772 if (rmax == 0) {
2773 return (0);
2774 }
2775 if (rmin > 0) {
2776 if (uiop->uio_resid < rmin) {
2777 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2778 "strwrite out:q %p out %d error %d",
2779 wqp, 0, ERANGE);
2780 return (ERANGE);
2781 }
2782 if ((rmax != INFPSZ) && (uiop->uio_resid > rmax)) {
2783 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2784 "strwrite out:q %p out %d error %d",
2785 wqp, 1, ERANGE);
2786 return (ERANGE);
2787 }
2788 }
2789
2790 /*
2791 * Do until count satisfied or error.
2792 */
2793 waitflag = WRITEWAIT | wflag;
2794 if (stp->sd_flag & OLDNDELAY)
2795 tempmode = uiop->uio_fmode & ~FNDELAY;
2796 else
2797 tempmode = uiop->uio_fmode;
2798
2799 if (rmax == INFPSZ)
2800 rmax = uiop->uio_resid;
2801
2802 /*
2803 * Note that tempmode does not get used in strput/strmakedata
2804 * but only in strwaitq. The other routines use uio_fmode
2805 * unmodified.
2806 */
2807
2808 /* LINTED: constant in conditional context */
2809 while (1) { /* breaks when uio_resid reaches zero */
2810 /*
2811 * Determine the size of the next message to be
2812 * packaged. May have to break write into several
2813 * messages based on max packet size.
2814 */
2815 iosize = MIN(uiop->uio_resid, rmax);
2816
2817 /*
2818 * Put block downstream when flow control allows it.
2819 */
2820 if ((stp->sd_flag & STRDELIM) && (uiop->uio_resid == iosize))
2821 b_flag = MSGDELIM;
2822 else
2823 b_flag = 0;
2824
2825 for (;;) {
2826 int done = 0;
2827
2828 error = strput(stp, NULL, uiop, &iosize, b_flag, 0, 0);
2829 if (error == 0)
2830 break;
2831 if (error != EWOULDBLOCK)
2832 goto out;
2833
2834 mutex_enter(&stp->sd_lock);
2835 /*
2836 * Check for a missed wakeup.
2837 * Needed since strput did not hold sd_lock across
2838 * the canputnext.
2839 */
2840 if (canputnext(wqp)) {
2841 /* Try again */
2842 mutex_exit(&stp->sd_lock);
2843 continue;
2844 }
2845 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAIT,
2846 "strwrite wait:q %p wait", wqp);
2847 if ((error = strwaitq(stp, waitflag, (ssize_t)0,
2848 tempmode, -1, &done)) != 0 || done) {
2849 mutex_exit(&stp->sd_lock);
2850 if ((vp->v_type == VFIFO) &&
2851 (uiop->uio_fmode & FNDELAY) &&
2852 (error == EAGAIN))
2853 error = 0;
2854 goto out;
2855 }
2856 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAKE,
2857 "strwrite wake:q %p awakes", wqp);
2858 if ((error = i_straccess(stp, JCWRITE)) != 0) {
2859 mutex_exit(&stp->sd_lock);
2860 goto out;
2861 }
2862 mutex_exit(&stp->sd_lock);
2863 }
2864 waitflag |= NOINTR;
2865 TRACE_2(TR_FAC_STREAMS_FR, TR_STRWRITE_RESID,
2866 "strwrite resid:q %p uiop %p", wqp, uiop);
2867 if (uiop->uio_resid) {
2868 /* Recheck for errors - needed for sockets */
2869 if ((stp->sd_wput_opt & SW_RECHECK_ERR) &&
2870 (stp->sd_flag & (STWRERR|STRHUP|STPLEX))) {
2871 mutex_enter(&stp->sd_lock);
2872 error = strwriteable(stp, B_FALSE, B_TRUE);
2873 mutex_exit(&stp->sd_lock);
2874 if (error != 0)
2875 return (error);
2876 }
2877 continue;
2878 }
2879 break;
2880 }
2881 out:
2882 /*
2883 * For historical reasons, applications expect EAGAIN when a data
2884 * mblk_t cannot be allocated, so change ENOMEM back to EAGAIN.
2885 */
2886 if (error == ENOMEM)
2887 error = EAGAIN;
2888 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2889 "strwrite out:q %p out %d error %d", wqp, 2, error);
2890 return (error);
2891 }
2892
2893 /*
2894 * Stream head write service routine.
2895 * Its job is to wake up any sleeping writers when a queue
2896 * downstream needs data (part of the flow control in putq and getq).
2897 * It also must wake anyone sleeping on a poll().
2898 * For stream head right below mux module, it must also invoke put procedure
2899 * of next downstream module.
2900 */
2901 int
2902 strwsrv(queue_t *q)
2903 {
2904 struct stdata *stp;
2905 queue_t *tq;
2906 qband_t *qbp;
2907 int i;
2908 qband_t *myqbp;
2909 int isevent;
2910 unsigned char qbf[NBAND]; /* band flushing backenable flags */
2911
2912 TRACE_1(TR_FAC_STREAMS_FR,
2913 TR_STRWSRV, "strwsrv:q %p", q);
2914 stp = (struct stdata *)q->q_ptr;
2915 ASSERT(qclaimed(q));
2916 mutex_enter(&stp->sd_lock);
2917 ASSERT(!(stp->sd_flag & STPLEX));
2918
2919 if (stp->sd_flag & WSLEEP) {
2920 stp->sd_flag &= ~WSLEEP;
2921 cv_broadcast(&q->q_wait);
2922 }
2923 mutex_exit(&stp->sd_lock);
2924
2925 /* The other end of a stream pipe went away. */
2926 if ((tq = q->q_next) == NULL) {
2927 return (0);
2928 }
2929
2930 /* Find the next module forward that has a service procedure */
2931 claimstr(q);
2932 tq = q->q_nfsrv;
2933 ASSERT(tq != NULL);
2934
2935 if ((q->q_flag & QBACK)) {
2936 if ((tq->q_flag & QFULL)) {
2937 mutex_enter(QLOCK(tq));
2938 if (!(tq->q_flag & QFULL)) {
2939 mutex_exit(QLOCK(tq));
2940 goto wakeup;
2941 }
2942 /*
2943 * The queue must have become full again. Set QWANTW
2944 * again so strwsrv will be back enabled when
2945 * the queue becomes non-full next time.
2946 */
2947 tq->q_flag |= QWANTW;
2948 mutex_exit(QLOCK(tq));
2949 } else {
2950 wakeup:
2951 pollwakeup(&stp->sd_pollist, POLLWRNORM);
2952 mutex_enter(&stp->sd_lock);
2953 if (stp->sd_sigflags & S_WRNORM)
2954 strsendsig(stp->sd_siglist, S_WRNORM, 0, 0);
2955 mutex_exit(&stp->sd_lock);
2956 }
2957 }
2958
2959 isevent = 0;
2960 i = 1;
2961 bzero((caddr_t)qbf, NBAND);
2962 mutex_enter(QLOCK(tq));
2963 if ((myqbp = q->q_bandp) != NULL)
2964 for (qbp = tq->q_bandp; qbp && myqbp; qbp = qbp->qb_next) {
2965 ASSERT(myqbp);
2966 if ((myqbp->qb_flag & QB_BACK)) {
2967 if (qbp->qb_flag & QB_FULL) {
2968 /*
2969 * The band must have become full again.
2970 * Set QB_WANTW again so strwsrv will
2971 * be back enabled when the band becomes
2972 * non-full next time.
2973 */
2974 qbp->qb_flag |= QB_WANTW;
2975 } else {
2976 isevent = 1;
2977 qbf[i] = 1;
2978 }
2979 }
2980 myqbp = myqbp->qb_next;
2981 i++;
2982 }
2983 mutex_exit(QLOCK(tq));
2984
2985 if (isevent) {
2986 for (i = tq->q_nband; i; i--) {
2987 if (qbf[i]) {
2988 pollwakeup(&stp->sd_pollist, POLLWRBAND);
2989 mutex_enter(&stp->sd_lock);
2990 if (stp->sd_sigflags & S_WRBAND)
2991 strsendsig(stp->sd_siglist, S_WRBAND,
2992 (uchar_t)i, 0);
2993 mutex_exit(&stp->sd_lock);
2994 }
2995 }
2996 }
2997
2998 releasestr(q);
2999 return (0);
3000 }
3001
3002 /*
3003 * Special case of strcopyin/strcopyout for copying
3004 * struct strioctl that can deal with both data
3005 * models.
3006 */
3007
3008 #ifdef _LP64
3009
3010 static int
3011 strcopyin_strioctl(void *from, void *to, int flag, int copyflag)
3012 {
3013 struct strioctl32 strioc32;
3014 struct strioctl *striocp;
3015
3016 if (copyflag & U_TO_K) {
3017 ASSERT((copyflag & K_TO_K) == 0);
3018
3019 if ((flag & FMODELS) == DATAMODEL_ILP32) {
3020 if (copyin(from, &strioc32, sizeof (strioc32)))
3021 return (EFAULT);
3022
3023 striocp = (struct strioctl *)to;
3024 striocp->ic_cmd = strioc32.ic_cmd;
3025 striocp->ic_timout = strioc32.ic_timout;
3026 striocp->ic_len = strioc32.ic_len;
3027 striocp->ic_dp = (char *)(uintptr_t)strioc32.ic_dp;
3028
3029 } else { /* NATIVE data model */
3030 if (copyin(from, to, sizeof (struct strioctl))) {
3031 return (EFAULT);
3032 } else {
3033 return (0);
3034 }
3035 }
3036 } else {
3037 ASSERT(copyflag & K_TO_K);
3038 bcopy(from, to, sizeof (struct strioctl));
3039 }
3040 return (0);
3041 }
3042
3043 static int
3044 strcopyout_strioctl(void *from, void *to, int flag, int copyflag)
3045 {
3046 struct strioctl32 strioc32;
3047 struct strioctl *striocp;
3048
3049 if (copyflag & U_TO_K) {
3050 ASSERT((copyflag & K_TO_K) == 0);
3051
3052 if ((flag & FMODELS) == DATAMODEL_ILP32) {
3053 striocp = (struct strioctl *)from;
3054 strioc32.ic_cmd = striocp->ic_cmd;
3055 strioc32.ic_timout = striocp->ic_timout;
3056 strioc32.ic_len = striocp->ic_len;
3057 strioc32.ic_dp = (caddr32_t)(uintptr_t)striocp->ic_dp;
3058 ASSERT((char *)(uintptr_t)strioc32.ic_dp ==
3059 striocp->ic_dp);
3060
3061 if (copyout(&strioc32, to, sizeof (strioc32)))
3062 return (EFAULT);
3063
3064 } else { /* NATIVE data model */
3065 if (copyout(from, to, sizeof (struct strioctl))) {
3066 return (EFAULT);
3067 } else {
3068 return (0);
3069 }
3070 }
3071 } else {
3072 ASSERT(copyflag & K_TO_K);
3073 bcopy(from, to, sizeof (struct strioctl));
3074 }
3075 return (0);
3076 }
3077
3078 #else /* ! _LP64 */
3079
3080 /* ARGSUSED2 */
3081 static int
3082 strcopyin_strioctl(void *from, void *to, int flag, int copyflag)
3083 {
3084 return (strcopyin(from, to, sizeof (struct strioctl), copyflag));
3085 }
3086
3087 /* ARGSUSED2 */
3088 static int
3089 strcopyout_strioctl(void *from, void *to, int flag, int copyflag)
3090 {
3091 return (strcopyout(from, to, sizeof (struct strioctl), copyflag));
3092 }
3093
3094 #endif /* _LP64 */
3095
3096 /*
3097 * Determine type of job control semantics expected by user. The
3098 * possibilities are:
3099 * JCREAD - Behaves like read() on fd; send SIGTTIN
3100 * JCWRITE - Behaves like write() on fd; send SIGTTOU if TOSTOP set
3101 * JCSETP - Sets a value in the stream; send SIGTTOU, ignore TOSTOP
3102 * JCGETP - Gets a value in the stream; no signals.
3103 * See straccess in strsubr.c for usage of these values.
3104 *
3105 * This routine also returns -1 for I_STR as a special case; the
3106 * caller must call again with the real ioctl number for
3107 * classification.
3108 */
3109 static int
3110 job_control_type(int cmd)
3111 {
3112 switch (cmd) {
3113 case I_STR:
3114 return (-1);
3115
3116 case I_RECVFD:
3117 case I_E_RECVFD:
3118 return (JCREAD);
3119
3120 case I_FDINSERT:
3121 case I_SENDFD:
3122 return (JCWRITE);
3123
3124 case TCSETA:
3125 case TCSETAW:
3126 case TCSETAF:
3127 case TCSBRK:
3128 case TCXONC:
3129 case TCFLSH:
3130 case TCDSET: /* Obsolete */
3131 case TIOCSWINSZ:
3132 case TCSETS:
3133 case TCSETSW:
3134 case TCSETSF:
3135 case TIOCSETD:
3136 case TIOCHPCL:
3137 case TIOCSETP:
3138 case TIOCSETN:
3139 case TIOCEXCL:
3140 case TIOCNXCL:
3141 case TIOCFLUSH:
3142 case TIOCSETC:
3143 case TIOCLBIS:
3144 case TIOCLBIC:
3145 case TIOCLSET:
3146 case TIOCSBRK:
3147 case TIOCCBRK:
3148 case TIOCSDTR:
3149 case TIOCCDTR:
3150 case TIOCSLTC:
3151 case TIOCSTOP:
3152 case TIOCSTART:
3153 case TIOCSTI:
3154 case TIOCSPGRP:
3155 case TIOCMSET:
3156 case TIOCMBIS:
3157 case TIOCMBIC:
3158 case TIOCREMOTE:
3159 case TIOCSIGNAL:
3160 case LDSETT:
3161 case LDSMAP: /* Obsolete */
3162 case DIOCSETP:
3163 case I_FLUSH:
3164 case I_SRDOPT:
3165 case I_SETSIG:
3166 case I_SWROPT:
3167 case I_FLUSHBAND:
3168 case I_SETCLTIME:
3169 case I_SERROPT:
3170 case I_ESETSIG:
3171 case FIONBIO:
3172 case FIOASYNC:
3173 case FIOSETOWN:
3174 case JBOOT: /* Obsolete */
3175 case JTERM: /* Obsolete */
3176 case JTIMOM: /* Obsolete */
3177 case JZOMBOOT: /* Obsolete */
3178 case JAGENT: /* Obsolete */
3179 case JTRUN: /* Obsolete */
3180 case JXTPROTO: /* Obsolete */
3181 return (JCSETP);
3182 }
3183
3184 return (JCGETP);
3185 }
3186
3187 /*
3188 * ioctl for streams
3189 */
3190 int
3191 strioctl(struct vnode *vp, int cmd, intptr_t arg, int flag, int copyflag,
3192 cred_t *crp, int *rvalp)
3193 {
3194 struct stdata *stp;
3195 struct strcmd *scp;
3196 struct strioctl strioc;
3197 struct uio uio;
3198 struct iovec iov;
3199 int access;
3200 mblk_t *mp;
3201 int error = 0;
3202 int done = 0;
3203 ssize_t rmin, rmax;
3204 queue_t *wrq;
3205 queue_t *rdq;
3206 boolean_t kioctl = B_FALSE;
3207 uint32_t auditing = AU_AUDITING();
3208
3209 if (flag & FKIOCTL) {
3210 copyflag = K_TO_K;
3211 kioctl = B_TRUE;
3212 }
3213 ASSERT(vp->v_stream);
3214 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K);
3215 stp = vp->v_stream;
3216
3217 TRACE_3(TR_FAC_STREAMS_FR, TR_IOCTL_ENTER,
3218 "strioctl:stp %p cmd %X arg %lX", stp, cmd, arg);
3219
3220 /*
3221 * If the copy is kernel to kernel, make sure that the FNATIVE
3222 * flag is set. After this it would be a serious error to have
3223 * no model flag.
3224 */
3225 if (copyflag == K_TO_K)
3226 flag = (flag & ~FMODELS) | FNATIVE;
3227
3228 ASSERT((flag & FMODELS) != 0);
3229
3230 wrq = stp->sd_wrq;
3231 rdq = _RD(wrq);
3232
3233 access = job_control_type(cmd);
3234
3235 /* We should never see these here, should be handled by iwscn */
3236 if (cmd == SRIOCSREDIR || cmd == SRIOCISREDIR)
3237 return (EINVAL);
3238
3239 mutex_enter(&stp->sd_lock);
3240 if ((access != -1) && ((error = i_straccess(stp, access)) != 0)) {
3241 mutex_exit(&stp->sd_lock);
3242 return (error);
3243 }
3244 mutex_exit(&stp->sd_lock);
3245
3246 /*
3247 * Check for sgttyb-related ioctls first, and complain as
3248 * necessary.
3249 */
3250 switch (cmd) {
3251 case TIOCGETP:
3252 case TIOCSETP:
3253 case TIOCSETN:
3254 if (sgttyb_handling >= 2 && !sgttyb_complaint) {
3255 sgttyb_complaint = B_TRUE;
3256 cmn_err(CE_NOTE,
3257 "application used obsolete TIOC[GS]ET");
3258 }
3259 if (sgttyb_handling >= 3) {
3260 tsignal(curthread, SIGSYS);
3261 return (EIO);
3262 }
3263 break;
3264 }
3265
3266 mutex_enter(&stp->sd_lock);
3267
3268 switch (cmd) {
3269 case I_RECVFD:
3270 case I_E_RECVFD:
3271 case I_PEEK:
3272 case I_NREAD:
3273 case FIONREAD:
3274 case FIORDCHK:
3275 case I_ATMARK:
3276 case FIONBIO:
3277 case FIOASYNC:
3278 if (stp->sd_flag & (STRDERR|STPLEX)) {
3279 error = strgeterr(stp, STRDERR|STPLEX, 0);
3280 if (error != 0) {
3281 mutex_exit(&stp->sd_lock);
3282 return (error);
3283 }
3284 }
3285 break;
3286
3287 default:
3288 if (stp->sd_flag & (STRDERR|STWRERR|STPLEX)) {
3289 error = strgeterr(stp, STRDERR|STWRERR|STPLEX, 0);
3290 if (error != 0) {
3291 mutex_exit(&stp->sd_lock);
3292 return (error);
3293 }
3294 }
3295 }
3296
3297 mutex_exit(&stp->sd_lock);
3298
3299 switch (cmd) {
3300 default:
3301 /*
3302 * The stream head has hardcoded knowledge of a
3303 * miscellaneous collection of terminal-, keyboard- and
3304 * mouse-related ioctls, enumerated below. This hardcoded
3305 * knowledge allows the stream head to automatically
3306 * convert transparent ioctl requests made by userland
3307 * programs into I_STR ioctls which many old STREAMS
3308 * modules and drivers require.
3309 *
3310 * No new ioctls should ever be added to this list.
3311 * Instead, the STREAMS module or driver should be written
3312 * to either handle transparent ioctls or require any
3313 * userland programs to use I_STR ioctls (by returning
3314 * EINVAL to any transparent ioctl requests).
3315 *
3316 * More importantly, removing ioctls from this list should
3317 * be done with the utmost care, since our STREAMS modules
3318 * and drivers *count* on the stream head performing this
3319 * conversion, and thus may panic while processing
3320 * transparent ioctl request for one of these ioctls (keep
3321 * in mind that third party modules and drivers may have
3322 * similar problems).
3323 */
3324 if (((cmd & IOCTYPE) == LDIOC) ||
3325 ((cmd & IOCTYPE) == tIOC) ||
3326 ((cmd & IOCTYPE) == TIOC) ||
3327 ((cmd & IOCTYPE) == KIOC) ||
3328 ((cmd & IOCTYPE) == MSIOC) ||
3329 ((cmd & IOCTYPE) == VUIOC)) {
3330 /*
3331 * The ioctl is a tty ioctl - set up strioc buffer
3332 * and call strdoioctl() to do the work.
3333 */
3334 if (stp->sd_flag & STRHUP)
3335 return (ENXIO);
3336 strioc.ic_cmd = cmd;
3337 strioc.ic_timout = INFTIM;
3338
3339 switch (cmd) {
3340
3341 case TCXONC:
3342 case TCSBRK:
3343 case TCFLSH:
3344 case TCDSET:
3345 {
3346 int native_arg = (int)arg;
3347 strioc.ic_len = sizeof (int);
3348 strioc.ic_dp = (char *)&native_arg;
3349 return (strdoioctl(stp, &strioc, flag,
3350 K_TO_K, crp, rvalp));
3351 }
3352
3353 case TCSETA:
3354 case TCSETAW:
3355 case TCSETAF:
3356 strioc.ic_len = sizeof (struct termio);
3357 strioc.ic_dp = (char *)arg;
3358 return (strdoioctl(stp, &strioc, flag,
3359 copyflag, crp, rvalp));
3360
3361 case TCSETS:
3362 case TCSETSW:
3363 case TCSETSF:
3364 strioc.ic_len = sizeof (struct termios);
3365 strioc.ic_dp = (char *)arg;
3366 return (strdoioctl(stp, &strioc, flag,
3367 copyflag, crp, rvalp));
3368
3369 case LDSETT:
3370 strioc.ic_len = sizeof (struct termcb);
3371 strioc.ic_dp = (char *)arg;
3372 return (strdoioctl(stp, &strioc, flag,
3373 copyflag, crp, rvalp));
3374
3375 case TIOCSETP:
3376 strioc.ic_len = sizeof (struct sgttyb);
3377 strioc.ic_dp = (char *)arg;
3378 return (strdoioctl(stp, &strioc, flag,
3379 copyflag, crp, rvalp));
3380
3381 case TIOCSTI:
3382 if ((flag & FREAD) == 0 &&
3383 secpolicy_sti(crp) != 0) {
3384 return (EPERM);
3385 }
3386 mutex_enter(&stp->sd_lock);
3387 mutex_enter(&curproc->p_splock);
3388 if (stp->sd_sidp != curproc->p_sessp->s_sidp &&
3389 secpolicy_sti(crp) != 0) {
3390 mutex_exit(&curproc->p_splock);
3391 mutex_exit(&stp->sd_lock);
3392 return (EACCES);
3393 }
3394 mutex_exit(&curproc->p_splock);
3395 mutex_exit(&stp->sd_lock);
3396
3397 strioc.ic_len = sizeof (char);
3398 strioc.ic_dp = (char *)arg;
3399 return (strdoioctl(stp, &strioc, flag,
3400 copyflag, crp, rvalp));
3401
3402 case TIOCSWINSZ:
3403 strioc.ic_len = sizeof (struct winsize);
3404 strioc.ic_dp = (char *)arg;
3405 return (strdoioctl(stp, &strioc, flag,
3406 copyflag, crp, rvalp));
3407
3408 case TIOCSSIZE:
3409 strioc.ic_len = sizeof (struct ttysize);
3410 strioc.ic_dp = (char *)arg;
3411 return (strdoioctl(stp, &strioc, flag,
3412 copyflag, crp, rvalp));
3413
3414 case TIOCSSOFTCAR:
3415 case KIOCTRANS:
3416 case KIOCTRANSABLE:
3417 case KIOCCMD:
3418 case KIOCSDIRECT:
3419 case KIOCSCOMPAT:
3420 case KIOCSKABORTEN:
3421 case KIOCSRPTDELAY:
3422 case KIOCSRPTRATE:
3423 case VUIDSFORMAT:
3424 case TIOCSPPS:
3425 strioc.ic_len = sizeof (int);
3426 strioc.ic_dp = (char *)arg;
3427 return (strdoioctl(stp, &strioc, flag,
3428 copyflag, crp, rvalp));
3429
3430 case KIOCSETKEY:
3431 case KIOCGETKEY:
3432 strioc.ic_len = sizeof (struct kiockey);
3433 strioc.ic_dp = (char *)arg;
3434 return (strdoioctl(stp, &strioc, flag,
3435 copyflag, crp, rvalp));
3436
3437 case KIOCSKEY:
3438 case KIOCGKEY:
3439 strioc.ic_len = sizeof (struct kiockeymap);
3440 strioc.ic_dp = (char *)arg;
3441 return (strdoioctl(stp, &strioc, flag,
3442 copyflag, crp, rvalp));
3443
3444 case KIOCSLED:
3445 /* arg is a pointer to char */
3446 strioc.ic_len = sizeof (char);
3447 strioc.ic_dp = (char *)arg;
3448 return (strdoioctl(stp, &strioc, flag,
3449 copyflag, crp, rvalp));
3450
3451 case MSIOSETPARMS:
3452 strioc.ic_len = sizeof (Ms_parms);
3453 strioc.ic_dp = (char *)arg;
3454 return (strdoioctl(stp, &strioc, flag,
3455 copyflag, crp, rvalp));
3456
3457 case VUIDSADDR:
3458 case VUIDGADDR:
3459 strioc.ic_len = sizeof (struct vuid_addr_probe);
3460 strioc.ic_dp = (char *)arg;
3461 return (strdoioctl(stp, &strioc, flag,
3462 copyflag, crp, rvalp));
3463
3464 /*
3465 * These M_IOCTL's don't require any data to be sent
3466 * downstream, and the driver will allocate and link
3467 * on its own mblk_t upon M_IOCACK -- thus we set
3468 * ic_len to zero and set ic_dp to arg so we know
3469 * where to copyout to later.
3470 */
3471 case TIOCGSOFTCAR:
3472 case TIOCGWINSZ:
3473 case TIOCGSIZE:
3474 case KIOCGTRANS:
3475 case KIOCGTRANSABLE:
3476 case KIOCTYPE:
3477 case KIOCGDIRECT:
3478 case KIOCGCOMPAT:
3479 case KIOCLAYOUT:
3480 case KIOCGLED:
3481 case MSIOGETPARMS:
3482 case MSIOBUTTONS:
3483 case VUIDGFORMAT:
3484 case TIOCGPPS:
3485 case TIOCGPPSEV:
3486 case TCGETA:
3487 case TCGETS:
3488 case LDGETT:
3489 case TIOCGETP:
3490 case KIOCGRPTDELAY:
3491 case KIOCGRPTRATE:
3492 strioc.ic_len = 0;
3493 strioc.ic_dp = (char *)arg;
3494 return (strdoioctl(stp, &strioc, flag,
3495 copyflag, crp, rvalp));
3496 }
3497 }
3498
3499 /*
3500 * Unknown cmd - send it down as a transparent ioctl.
3501 */
3502 strioc.ic_cmd = cmd;
3503 strioc.ic_timout = INFTIM;
3504 strioc.ic_len = TRANSPARENT;
3505 strioc.ic_dp = (char *)&arg;
3506
3507 return (strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp));
3508
3509 case I_STR:
3510 /*
3511 * Stream ioctl. Read in an strioctl buffer from the user
3512 * along with any data specified and send it downstream.
3513 * Strdoioctl will wait allow only one ioctl message at
3514 * a time, and waits for the acknowledgement.
3515 */
3516
3517 if (stp->sd_flag & STRHUP)
3518 return (ENXIO);
3519
3520 error = strcopyin_strioctl((void *)arg, &strioc, flag,
3521 copyflag);
3522 if (error != 0)
3523 return (error);
3524
3525 if ((strioc.ic_len < 0) || (strioc.ic_timout < -1))
3526 return (EINVAL);
3527
3528 access = job_control_type(strioc.ic_cmd);
3529 mutex_enter(&stp->sd_lock);
3530 if ((access != -1) &&
3531 ((error = i_straccess(stp, access)) != 0)) {
3532 mutex_exit(&stp->sd_lock);
3533 return (error);
3534 }
3535 mutex_exit(&stp->sd_lock);
3536
3537 /*
3538 * The I_STR facility provides a trap door for malicious
3539 * code to send down bogus streamio(7I) ioctl commands to
3540 * unsuspecting STREAMS modules and drivers which expect to
3541 * only get these messages from the stream head.
3542 * Explicitly prohibit any streamio ioctls which can be
3543 * passed downstream by the stream head. Note that we do
3544 * not block all streamio ioctls because the ioctl
3545 * numberspace is not well managed and thus it's possible
3546 * that a module or driver's ioctl numbers may accidentally
3547 * collide with them.
3548 */
3549 switch (strioc.ic_cmd) {
3550 case I_LINK:
3551 case I_PLINK:
3552 case I_UNLINK:
3553 case I_PUNLINK:
3554 case _I_GETPEERCRED:
3555 case _I_PLINK_LH:
3556 return (EINVAL);
3557 }
3558
3559 error = strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp);
3560 if (error == 0) {
3561 error = strcopyout_strioctl(&strioc, (void *)arg,
3562 flag, copyflag);
3563 }
3564 return (error);
3565
3566 case _I_CMD:
3567 /*
3568 * Like I_STR, but without using M_IOC* messages and without
3569 * copyins/copyouts beyond the passed-in argument.
3570 */
3571 if (stp->sd_flag & STRHUP)
3572 return (ENXIO);
3573
3574 if ((scp = kmem_alloc(sizeof (strcmd_t), KM_NOSLEEP)) == NULL)
3575 return (ENOMEM);
3576
3577 if (copyin((void *)arg, scp, sizeof (strcmd_t))) {
3578 kmem_free(scp, sizeof (strcmd_t));
3579 return (EFAULT);
3580 }
3581
3582 access = job_control_type(scp->sc_cmd);
3583 mutex_enter(&stp->sd_lock);
3584 if (access != -1 && (error = i_straccess(stp, access)) != 0) {
3585 mutex_exit(&stp->sd_lock);
3586 kmem_free(scp, sizeof (strcmd_t));
3587 return (error);
3588 }
3589 mutex_exit(&stp->sd_lock);
3590
3591 *rvalp = 0;
3592 if ((error = strdocmd(stp, scp, crp)) == 0) {
3593 if (copyout(scp, (void *)arg, sizeof (strcmd_t)))
3594 error = EFAULT;
3595 }
3596 kmem_free(scp, sizeof (strcmd_t));
3597 return (error);
3598
3599 case I_NREAD:
3600 /*
3601 * Return number of bytes of data in first message
3602 * in queue in "arg" and return the number of messages
3603 * in queue in return value.
3604 */
3605 {
3606 size_t size;
3607 int retval;
3608 int count = 0;
3609
3610 mutex_enter(QLOCK(rdq));
3611
3612 size = msgdsize(rdq->q_first);
3613 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3614 count++;
3615
3616 mutex_exit(QLOCK(rdq));
3617 if (stp->sd_struiordq) {
3618 infod_t infod;
3619
3620 infod.d_cmd = INFOD_COUNT;
3621 infod.d_count = 0;
3622 if (count == 0) {
3623 infod.d_cmd |= INFOD_FIRSTBYTES;
3624 infod.d_bytes = 0;
3625 }
3626 infod.d_res = 0;
3627 (void) infonext(rdq, &infod);
3628 count += infod.d_count;
3629 if (infod.d_res & INFOD_FIRSTBYTES)
3630 size = infod.d_bytes;
3631 }
3632
3633 /*
3634 * Drop down from size_t to the "int" required by the
3635 * interface. Cap at INT_MAX.
3636 */
3637 retval = MIN(size, INT_MAX);
3638 error = strcopyout(&retval, (void *)arg, sizeof (retval),
3639 copyflag);
3640 if (!error)
3641 *rvalp = count;
3642 return (error);
3643 }
3644
3645 case FIONREAD:
3646 /*
3647 * Return number of bytes of data in all data messages
3648 * in queue in "arg".
3649 */
3650 {
3651 size_t size = 0;
3652 int retval;
3653
3654 mutex_enter(QLOCK(rdq));
3655 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3656 size += msgdsize(mp);
3657 mutex_exit(QLOCK(rdq));
3658
3659 if (stp->sd_struiordq) {
3660 infod_t infod;
3661
3662 infod.d_cmd = INFOD_BYTES;
3663 infod.d_res = 0;
3664 infod.d_bytes = 0;
3665 (void) infonext(rdq, &infod);
3666 size += infod.d_bytes;
3667 }
3668
3669 /*
3670 * Drop down from size_t to the "int" required by the
3671 * interface. Cap at INT_MAX.
3672 */
3673 retval = MIN(size, INT_MAX);
3674 error = strcopyout(&retval, (void *)arg, sizeof (retval),
3675 copyflag);
3676
3677 *rvalp = 0;
3678 return (error);
3679 }
3680 case FIORDCHK:
3681 /*
3682 * FIORDCHK does not use arg value (like FIONREAD),
3683 * instead a count is returned. I_NREAD value may
3684 * not be accurate but safe. The real thing to do is
3685 * to add the msgdsizes of all data messages until
3686 * a non-data message.
3687 */
3688 {
3689 size_t size = 0;
3690
3691 mutex_enter(QLOCK(rdq));
3692 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3693 size += msgdsize(mp);
3694 mutex_exit(QLOCK(rdq));
3695
3696 if (stp->sd_struiordq) {
3697 infod_t infod;
3698
3699 infod.d_cmd = INFOD_BYTES;
3700 infod.d_res = 0;
3701 infod.d_bytes = 0;
3702 (void) infonext(rdq, &infod);
3703 size += infod.d_bytes;
3704 }
3705
3706 /*
3707 * Since ioctl returns an int, and memory sizes under
3708 * LP64 may not fit, we return INT_MAX if the count was
3709 * actually greater.
3710 */
3711 *rvalp = MIN(size, INT_MAX);
3712 return (0);
3713 }
3714
3715 case I_FIND:
3716 /*
3717 * Get module name.
3718 */
3719 {
3720 char mname[FMNAMESZ + 1];
3721 queue_t *q;
3722
3723 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg,
3724 mname, FMNAMESZ + 1, NULL);
3725 if (error)
3726 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
3727
3728 /*
3729 * Return EINVAL if we're handed a bogus module name.
3730 */
3731 if (fmodsw_find(mname, FMODSW_LOAD) == NULL) {
3732 TRACE_0(TR_FAC_STREAMS_FR,
3733 TR_I_CANT_FIND, "couldn't I_FIND");
3734 return (EINVAL);
3735 }
3736
3737 *rvalp = 0;
3738
3739 /* Look downstream to see if module is there. */
3740 claimstr(stp->sd_wrq);
3741 for (q = stp->sd_wrq->q_next; q; q = q->q_next) {
3742 if (q->q_flag & QREADR) {
3743 q = NULL;
3744 break;
3745 }
3746 if (strcmp(mname, Q2NAME(q)) == 0)
3747 break;
3748 }
3749 releasestr(stp->sd_wrq);
3750
3751 *rvalp = (q ? 1 : 0);
3752 return (error);
3753 }
3754
3755 case I_PUSH:
3756 case __I_PUSH_NOCTTY:
3757 /*
3758 * Push a module.
3759 * For the case __I_PUSH_NOCTTY push a module but
3760 * do not allocate controlling tty. See bugid 4025044
3761 */
3762
3763 {
3764 char mname[FMNAMESZ + 1];
3765 fmodsw_impl_t *fp;
3766 dev_t dummydev;
3767
3768 if (stp->sd_flag & STRHUP)
3769 return (ENXIO);
3770
3771 /*
3772 * Get module name and look up in fmodsw.
3773 */
3774 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg,
3775 mname, FMNAMESZ + 1, NULL);
3776 if (error)
3777 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
3778
3779 if ((fp = fmodsw_find(mname, FMODSW_HOLD | FMODSW_LOAD)) ==
3780 NULL)
3781 return (EINVAL);
3782
3783 TRACE_2(TR_FAC_STREAMS_FR, TR_I_PUSH,
3784 "I_PUSH:fp %p stp %p", fp, stp);
3785
3786 if (error = strstartplumb(stp, flag, cmd)) {
3787 fmodsw_rele(fp);
3788 return (error);
3789 }
3790
3791 /*
3792 * See if any more modules can be pushed on this stream.
3793 * Note that this check must be done after strstartplumb()
3794 * since otherwise multiple threads issuing I_PUSHes on
3795 * the same stream will be able to exceed nstrpush.
3796 */
3797 mutex_enter(&stp->sd_lock);
3798 if (stp->sd_pushcnt >= nstrpush) {
3799 fmodsw_rele(fp);
3800 strendplumb(stp);
3801 mutex_exit(&stp->sd_lock);
3802 return (EINVAL);
3803 }
3804 mutex_exit(&stp->sd_lock);
3805
3806 /*
3807 * Push new module and call its open routine
3808 * via qattach(). Modules don't change device
3809 * numbers, so just ignore dummydev here.
3810 */
3811 dummydev = vp->v_rdev;
3812 if ((error = qattach(rdq, &dummydev, 0, crp, fp,
3813 B_FALSE)) == 0) {
3814 if (vp->v_type == VCHR && /* sorry, no pipes allowed */
3815 (cmd == I_PUSH) && (stp->sd_flag & STRISTTY)) {
3816 /*
3817 * try to allocate it as a controlling terminal
3818 */
3819 (void) strctty(stp);
3820 }
3821 }
3822
3823 mutex_enter(&stp->sd_lock);
3824
3825 /*
3826 * As a performance concern we are caching the values of
3827 * q_minpsz and q_maxpsz of the module below the stream
3828 * head in the stream head.
3829 */
3830 mutex_enter(QLOCK(stp->sd_wrq->q_next));
3831 rmin = stp->sd_wrq->q_next->q_minpsz;
3832 rmax = stp->sd_wrq->q_next->q_maxpsz;
3833 mutex_exit(QLOCK(stp->sd_wrq->q_next));
3834
3835 /* Do this processing here as a performance concern */
3836 if (strmsgsz != 0) {
3837 if (rmax == INFPSZ)
3838 rmax = strmsgsz;
3839 else {
3840 if (vp->v_type == VFIFO)
3841 rmax = MIN(PIPE_BUF, rmax);
3842 else rmax = MIN(strmsgsz, rmax);
3843 }
3844 }
3845
3846 mutex_enter(QLOCK(wrq));
3847 stp->sd_qn_minpsz = rmin;
3848 stp->sd_qn_maxpsz = rmax;
3849 mutex_exit(QLOCK(wrq));
3850
3851 strendplumb(stp);
3852 mutex_exit(&stp->sd_lock);
3853 return (error);
3854 }
3855
3856 case I_POP:
3857 {
3858 queue_t *q;
3859
3860 if (stp->sd_flag & STRHUP)
3861 return (ENXIO);
3862 if (!wrq->q_next) /* for broken pipes */
3863 return (EINVAL);
3864
3865 if (error = strstartplumb(stp, flag, cmd))
3866 return (error);
3867
3868 /*
3869 * If there is an anchor on this stream and popping
3870 * the current module would attempt to pop through the
3871 * anchor, then disallow the pop unless we have sufficient
3872 * privileges; take the cheapest (non-locking) check
3873 * first.
3874 */
3875 if (secpolicy_ip_config(crp, B_TRUE) != 0 ||
3876 (stp->sd_anchorzone != crgetzoneid(crp))) {
3877 mutex_enter(&stp->sd_lock);
3878 /*
3879 * Anchors only apply if there's at least one
3880 * module on the stream (sd_pushcnt > 0).
3881 */
3882 if (stp->sd_pushcnt > 0 &&
3883 stp->sd_pushcnt == stp->sd_anchor &&
3884 stp->sd_vnode->v_type != VFIFO) {
3885 strendplumb(stp);
3886 mutex_exit(&stp->sd_lock);
3887 if (stp->sd_anchorzone != crgetzoneid(crp))
3888 return (EINVAL);
3889 /* Audit and report error */
3890 return (secpolicy_ip_config(crp, B_FALSE));
3891 }
3892 mutex_exit(&stp->sd_lock);
3893 }
3894
3895 q = wrq->q_next;
3896 TRACE_2(TR_FAC_STREAMS_FR, TR_I_POP,
3897 "I_POP:%p from %p", q, stp);
3898 if (q->q_next == NULL || (q->q_flag & (QREADR|QISDRV))) {
3899 error = EINVAL;
3900 } else {
3901 qdetach(_RD(q), 1, flag, crp, B_FALSE);
3902 error = 0;
3903 }
3904 mutex_enter(&stp->sd_lock);
3905
3906 /*
3907 * As a performance concern we are caching the values of
3908 * q_minpsz and q_maxpsz of the module below the stream
3909 * head in the stream head.
3910 */
3911 mutex_enter(QLOCK(wrq->q_next));
3912 rmin = wrq->q_next->q_minpsz;
3913 rmax = wrq->q_next->q_maxpsz;
3914 mutex_exit(QLOCK(wrq->q_next));
3915
3916 /* Do this processing here as a performance concern */
3917 if (strmsgsz != 0) {
3918 if (rmax == INFPSZ)
3919 rmax = strmsgsz;
3920 else {
3921 if (vp->v_type == VFIFO)
3922 rmax = MIN(PIPE_BUF, rmax);
3923 else rmax = MIN(strmsgsz, rmax);
3924 }
3925 }
3926
3927 mutex_enter(QLOCK(wrq));
3928 stp->sd_qn_minpsz = rmin;
3929 stp->sd_qn_maxpsz = rmax;
3930 mutex_exit(QLOCK(wrq));
3931
3932 /* If we popped through the anchor, then reset the anchor. */
3933 if (stp->sd_pushcnt < stp->sd_anchor) {
3934 stp->sd_anchor = 0;
3935 stp->sd_anchorzone = 0;
3936 }
3937 strendplumb(stp);
3938 mutex_exit(&stp->sd_lock);
3939 return (error);
3940 }
3941
3942 case _I_MUXID2FD:
3943 {
3944 /*
3945 * Create a fd for a I_PLINK'ed lower stream with a given
3946 * muxid. With the fd, application can send down ioctls,
3947 * like I_LIST, to the previously I_PLINK'ed stream. Note
3948 * that after getting the fd, the application has to do an
3949 * I_PUNLINK on the muxid before it can do any operation
3950 * on the lower stream. This is required by spec1170.
3951 *
3952 * The fd used to do this ioctl should point to the same
3953 * controlling device used to do the I_PLINK. If it uses
3954 * a different stream or an invalid muxid, I_MUXID2FD will
3955 * fail. The error code is set to EINVAL.
3956 *
3957 * The intended use of this interface is the following.
3958 * An application I_PLINK'ed a stream and exits. The fd
3959 * to the lower stream is gone. Another application
3960 * wants to get a fd to the lower stream, it uses I_MUXID2FD.
3961 */
3962 int muxid = (int)arg;
3963 int fd;
3964 linkinfo_t *linkp;
3965 struct file *fp;
3966 netstack_t *ns;
3967 str_stack_t *ss;
3968
3969 /*
3970 * Do not allow the wildcard muxid. This ioctl is not
3971 * intended to find arbitrary link.
3972 */
3973 if (muxid == 0) {
3974 return (EINVAL);
3975 }
3976
3977 ns = netstack_find_by_cred(crp);
3978 ASSERT(ns != NULL);
3979 ss = ns->netstack_str;
3980 ASSERT(ss != NULL);
3981
3982 mutex_enter(&muxifier);
3983 linkp = findlinks(vp->v_stream, muxid, LINKPERSIST, ss);
3984 if (linkp == NULL) {
3985 mutex_exit(&muxifier);
3986 netstack_rele(ss->ss_netstack);
3987 return (EINVAL);
3988 }
3989
3990 if ((fd = ufalloc(0)) == -1) {
3991 mutex_exit(&muxifier);
3992 netstack_rele(ss->ss_netstack);
3993 return (EMFILE);
3994 }
3995 fp = linkp->li_fpdown;
3996 mutex_enter(&fp->f_tlock);
3997 fp->f_count++;
3998 mutex_exit(&fp->f_tlock);
3999 mutex_exit(&muxifier);
4000 setf(fd, fp);
4001 *rvalp = fd;
4002 netstack_rele(ss->ss_netstack);
4003 return (0);
4004 }
4005
4006 case _I_INSERT:
4007 {
4008 /*
4009 * To insert a module to a given position in a stream.
4010 * In the first release, only allow privileged user
4011 * to use this ioctl. Furthermore, the insert is only allowed
4012 * below an anchor if the zoneid is the same as the zoneid
4013 * which created the anchor.
4014 *
4015 * Note that we do not plan to support this ioctl
4016 * on pipes in the first release. We want to learn more
4017 * about the implications of these ioctls before extending
4018 * their support. And we do not think these features are
4019 * valuable for pipes.
4020 */
4021 STRUCT_DECL(strmodconf, strmodinsert);
4022 char mod_name[FMNAMESZ + 1];
4023 fmodsw_impl_t *fp;
4024 dev_t dummydev;
4025 queue_t *tmp_wrq;
4026 int pos;
4027 boolean_t is_insert;
4028
4029 STRUCT_INIT(strmodinsert, flag);
4030 if (stp->sd_flag & STRHUP)
4031 return (ENXIO);
4032 if (STRMATED(stp))
4033 return (EINVAL);
4034 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0)
4035 return (error);
4036 if (stp->sd_anchor != 0 &&
4037 stp->sd_anchorzone != crgetzoneid(crp))
4038 return (EINVAL);
4039
4040 error = strcopyin((void *)arg, STRUCT_BUF(strmodinsert),
4041 STRUCT_SIZE(strmodinsert), copyflag);
4042 if (error)
4043 return (error);
4044
4045 /*
4046 * Get module name and look up in fmodsw.
4047 */
4048 error = (copyflag & U_TO_K ? copyinstr :
4049 copystr)(STRUCT_FGETP(strmodinsert, mod_name),
4050 mod_name, FMNAMESZ + 1, NULL);
4051 if (error)
4052 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
4053
4054 if ((fp = fmodsw_find(mod_name, FMODSW_HOLD | FMODSW_LOAD)) ==
4055 NULL)
4056 return (EINVAL);
4057
4058 if (error = strstartplumb(stp, flag, cmd)) {
4059 fmodsw_rele(fp);
4060 return (error);
4061 }
4062
4063 /*
4064 * Is this _I_INSERT just like an I_PUSH? We need to know
4065 * this because we do some optimizations if this is a
4066 * module being pushed.
4067 */
4068 pos = STRUCT_FGET(strmodinsert, pos);
4069 is_insert = (pos != 0);
4070
4071 /*
4072 * Make sure pos is valid. Even though it is not an I_PUSH,
4073 * we impose the same limit on the number of modules in a
4074 * stream.
4075 */
4076 mutex_enter(&stp->sd_lock);
4077 if (stp->sd_pushcnt >= nstrpush || pos < 0 ||
4078 pos > stp->sd_pushcnt) {
4079 fmodsw_rele(fp);
4080 strendplumb(stp);
4081 mutex_exit(&stp->sd_lock);
4082 return (EINVAL);
4083 }
4084 if (stp->sd_anchor != 0) {
4085 /*
4086 * Is this insert below the anchor?
4087 * Pushcnt hasn't been increased yet hence
4088 * we test for greater than here, and greater or
4089 * equal after qattach.
4090 */
4091 if (pos > (stp->sd_pushcnt - stp->sd_anchor) &&
4092 stp->sd_anchorzone != crgetzoneid(crp)) {
4093 fmodsw_rele(fp);
4094 strendplumb(stp);
4095 mutex_exit(&stp->sd_lock);
4096 return (EPERM);
4097 }
4098 }
4099
4100 mutex_exit(&stp->sd_lock);
4101
4102 /*
4103 * First find the correct position this module to
4104 * be inserted. We don't need to call claimstr()
4105 * as the stream should not be changing at this point.
4106 *
4107 * Insert new module and call its open routine
4108 * via qattach(). Modules don't change device
4109 * numbers, so just ignore dummydev here.
4110 */
4111 for (tmp_wrq = stp->sd_wrq; pos > 0;
4112 tmp_wrq = tmp_wrq->q_next, pos--) {
4113 ASSERT(SAMESTR(tmp_wrq));
4114 }
4115 dummydev = vp->v_rdev;
4116 if ((error = qattach(_RD(tmp_wrq), &dummydev, 0, crp,
4117 fp, is_insert)) != 0) {
4118 mutex_enter(&stp->sd_lock);
4119 strendplumb(stp);
4120 mutex_exit(&stp->sd_lock);
4121 return (error);
4122 }
4123
4124 mutex_enter(&stp->sd_lock);
4125
4126 /*
4127 * As a performance concern we are caching the values of
4128 * q_minpsz and q_maxpsz of the module below the stream
4129 * head in the stream head.
4130 */
4131 if (!is_insert) {
4132 mutex_enter(QLOCK(stp->sd_wrq->q_next));
4133 rmin = stp->sd_wrq->q_next->q_minpsz;
4134 rmax = stp->sd_wrq->q_next->q_maxpsz;
4135 mutex_exit(QLOCK(stp->sd_wrq->q_next));
4136
4137 /* Do this processing here as a performance concern */
4138 if (strmsgsz != 0) {
4139 if (rmax == INFPSZ) {
4140 rmax = strmsgsz;
4141 } else {
4142 rmax = MIN(strmsgsz, rmax);
4143 }
4144 }
4145
4146 mutex_enter(QLOCK(wrq));
4147 stp->sd_qn_minpsz = rmin;
4148 stp->sd_qn_maxpsz = rmax;
4149 mutex_exit(QLOCK(wrq));
4150 }
4151
4152 /*
4153 * Need to update the anchor value if this module is
4154 * inserted below the anchor point.
4155 */
4156 if (stp->sd_anchor != 0) {
4157 pos = STRUCT_FGET(strmodinsert, pos);
4158 if (pos >= (stp->sd_pushcnt - stp->sd_anchor))
4159 stp->sd_anchor++;
4160 }
4161
4162 strendplumb(stp);
4163 mutex_exit(&stp->sd_lock);
4164 return (0);
4165 }
4166
4167 case _I_REMOVE:
4168 {
4169 /*
4170 * To remove a module with a given name in a stream. The
4171 * caller of this ioctl needs to provide both the name and
4172 * the position of the module to be removed. This eliminates
4173 * the ambiguity of removal if a module is inserted/pushed
4174 * multiple times in a stream. In the first release, only
4175 * allow privileged user to use this ioctl.
4176 * Furthermore, the remove is only allowed
4177 * below an anchor if the zoneid is the same as the zoneid
4178 * which created the anchor.
4179 *
4180 * Note that we do not plan to support this ioctl
4181 * on pipes in the first release. We want to learn more
4182 * about the implications of these ioctls before extending
4183 * their support. And we do not think these features are
4184 * valuable for pipes.
4185 *
4186 * Also note that _I_REMOVE cannot be used to remove a
4187 * driver or the stream head.
4188 */
4189 STRUCT_DECL(strmodconf, strmodremove);
4190 queue_t *q;
4191 int pos;
4192 char mod_name[FMNAMESZ + 1];
4193 boolean_t is_remove;
4194
4195 STRUCT_INIT(strmodremove, flag);
4196 if (stp->sd_flag & STRHUP)
4197 return (ENXIO);
4198 if (STRMATED(stp))
4199 return (EINVAL);
4200 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0)
4201 return (error);
4202 if (stp->sd_anchor != 0 &&
4203 stp->sd_anchorzone != crgetzoneid(crp))
4204 return (EINVAL);
4205
4206 error = strcopyin((void *)arg, STRUCT_BUF(strmodremove),
4207 STRUCT_SIZE(strmodremove), copyflag);
4208 if (error)
4209 return (error);
4210
4211 error = (copyflag & U_TO_K ? copyinstr :
4212 copystr)(STRUCT_FGETP(strmodremove, mod_name),
4213 mod_name, FMNAMESZ + 1, NULL);
4214 if (error)
4215 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
4216
4217 if ((error = strstartplumb(stp, flag, cmd)) != 0)
4218 return (error);
4219
4220 /*
4221 * Match the name of given module to the name of module at
4222 * the given position.
4223 */
4224 pos = STRUCT_FGET(strmodremove, pos);
4225
4226 is_remove = (pos != 0);
4227 for (q = stp->sd_wrq->q_next; SAMESTR(q) && pos > 0;
4228 q = q->q_next, pos--)
4229 ;
4230 if (pos > 0 || !SAMESTR(q) ||
4231 strcmp(Q2NAME(q), mod_name) != 0) {
4232 mutex_enter(&stp->sd_lock);
4233 strendplumb(stp);
4234 mutex_exit(&stp->sd_lock);
4235 return (EINVAL);
4236 }
4237
4238 /*
4239 * If the position is at or below an anchor, then the zoneid
4240 * must match the zoneid that created the anchor.
4241 */
4242 if (stp->sd_anchor != 0) {
4243 pos = STRUCT_FGET(strmodremove, pos);
4244 if (pos >= (stp->sd_pushcnt - stp->sd_anchor) &&
4245 stp->sd_anchorzone != crgetzoneid(crp)) {
4246 mutex_enter(&stp->sd_lock);
4247 strendplumb(stp);
4248 mutex_exit(&stp->sd_lock);
4249 return (EPERM);
4250 }
4251 }
4252
4253
4254 ASSERT(!(q->q_flag & QREADR));
4255 qdetach(_RD(q), 1, flag, crp, is_remove);
4256
4257 mutex_enter(&stp->sd_lock);
4258
4259 /*
4260 * As a performance concern we are caching the values of
4261 * q_minpsz and q_maxpsz of the module below the stream
4262 * head in the stream head.
4263 */
4264 if (!is_remove) {
4265 mutex_enter(QLOCK(wrq->q_next));
4266 rmin = wrq->q_next->q_minpsz;
4267 rmax = wrq->q_next->q_maxpsz;
4268 mutex_exit(QLOCK(wrq->q_next));
4269
4270 /* Do this processing here as a performance concern */
4271 if (strmsgsz != 0) {
4272 if (rmax == INFPSZ)
4273 rmax = strmsgsz;
4274 else {
4275 if (vp->v_type == VFIFO)
4276 rmax = MIN(PIPE_BUF, rmax);
4277 else rmax = MIN(strmsgsz, rmax);
4278 }
4279 }
4280
4281 mutex_enter(QLOCK(wrq));
4282 stp->sd_qn_minpsz = rmin;
4283 stp->sd_qn_maxpsz = rmax;
4284 mutex_exit(QLOCK(wrq));
4285 }
4286
4287 /*
4288 * Need to update the anchor value if this module is removed
4289 * at or below the anchor point. If the removed module is at
4290 * the anchor point, remove the anchor for this stream if
4291 * there is no module above the anchor point. Otherwise, if
4292 * the removed module is below the anchor point, decrement the
4293 * anchor point by 1.
4294 */
4295 if (stp->sd_anchor != 0) {
4296 pos = STRUCT_FGET(strmodremove, pos);
4297 if (pos == stp->sd_pushcnt - stp->sd_anchor + 1)
4298 stp->sd_anchor = 0;
4299 else if (pos > (stp->sd_pushcnt - stp->sd_anchor + 1))
4300 stp->sd_anchor--;
4301 }
4302
4303 strendplumb(stp);
4304 mutex_exit(&stp->sd_lock);
4305 return (0);
4306 }
4307
4308 case I_ANCHOR:
4309 /*
4310 * Set the anchor position on the stream to reside at
4311 * the top module (in other words, the top module
4312 * cannot be popped). Anchors with a FIFO make no
4313 * obvious sense, so they're not allowed.
4314 */
4315 mutex_enter(&stp->sd_lock);
4316
4317 if (stp->sd_vnode->v_type == VFIFO) {
4318 mutex_exit(&stp->sd_lock);
4319 return (EINVAL);
4320 }
4321 /* Only allow the same zoneid to update the anchor */
4322 if (stp->sd_anchor != 0 &&
4323 stp->sd_anchorzone != crgetzoneid(crp)) {
4324 mutex_exit(&stp->sd_lock);
4325 return (EINVAL);
4326 }
4327 stp->sd_anchor = stp->sd_pushcnt;
4328 stp->sd_anchorzone = crgetzoneid(crp);
4329 mutex_exit(&stp->sd_lock);
4330 return (0);
4331
4332 case I_LOOK:
4333 /*
4334 * Get name of first module downstream.
4335 * If no module, return an error.
4336 */
4337 claimstr(wrq);
4338 if (_SAMESTR(wrq) && wrq->q_next->q_next != NULL) {
4339 char *name = Q2NAME(wrq->q_next);
4340
4341 error = strcopyout(name, (void *)arg, strlen(name) + 1,
4342 copyflag);
4343 releasestr(wrq);
4344 return (error);
4345 }
4346 releasestr(wrq);
4347 return (EINVAL);
4348
4349 case I_LINK:
4350 case I_PLINK:
4351 /*
4352 * Link a multiplexor.
4353 */
4354 return (mlink(vp, cmd, (int)arg, crp, rvalp, 0));
4355
4356 case _I_PLINK_LH:
4357 /*
4358 * Link a multiplexor: Call must originate from kernel.
4359 */
4360 if (kioctl)
4361 return (ldi_mlink_lh(vp, cmd, arg, crp, rvalp));
4362
4363 return (EINVAL);
4364 case I_UNLINK:
4365 case I_PUNLINK:
4366 /*
4367 * Unlink a multiplexor.
4368 * If arg is -1, unlink all links for which this is the
4369 * controlling stream. Otherwise, arg is an index number
4370 * for a link to be removed.
4371 */
4372 {
4373 struct linkinfo *linkp;
4374 int native_arg = (int)arg;
4375 int type;
4376 netstack_t *ns;
4377 str_stack_t *ss;
4378
4379 TRACE_1(TR_FAC_STREAMS_FR,
4380 TR_I_UNLINK, "I_UNLINK/I_PUNLINK:%p", stp);
4381 if (vp->v_type == VFIFO) {
4382 return (EINVAL);
4383 }
4384 if (cmd == I_UNLINK)
4385 type = LINKNORMAL;
4386 else /* I_PUNLINK */
4387 type = LINKPERSIST;
4388 if (native_arg == 0) {
4389 return (EINVAL);
4390 }
4391 ns = netstack_find_by_cred(crp);
4392 ASSERT(ns != NULL);
4393 ss = ns->netstack_str;
4394 ASSERT(ss != NULL);
4395
4396 if (native_arg == MUXID_ALL)
4397 error = munlinkall(stp, type, crp, rvalp, ss);
4398 else {
4399 mutex_enter(&muxifier);
4400 if (!(linkp = findlinks(stp, (int)arg, type, ss))) {
4401 /* invalid user supplied index number */
4402 mutex_exit(&muxifier);
4403 netstack_rele(ss->ss_netstack);
4404 return (EINVAL);
4405 }
4406 /* munlink drops the muxifier lock */
4407 error = munlink(stp, linkp, type, crp, rvalp, ss);
4408 }
4409 netstack_rele(ss->ss_netstack);
4410 return (error);
4411 }
4412
4413 case I_FLUSH:
4414 /*
4415 * send a flush message downstream
4416 * flush message can indicate
4417 * FLUSHR - flush read queue
4418 * FLUSHW - flush write queue
4419 * FLUSHRW - flush read/write queue
4420 */
4421 if (stp->sd_flag & STRHUP)
4422 return (ENXIO);
4423 if (arg & ~FLUSHRW)
4424 return (EINVAL);
4425
4426 for (;;) {
4427 if (putnextctl1(stp->sd_wrq, M_FLUSH, (int)arg)) {
4428 break;
4429 }
4430 if (error = strwaitbuf(1, BPRI_HI)) {
4431 return (error);
4432 }
4433 }
4434
4435 /*
4436 * Send down an unsupported ioctl and wait for the nack
4437 * in order to allow the M_FLUSH to propagate back
4438 * up to the stream head.
4439 * Replaces if (qready()) runqueues();
4440 */
4441 strioc.ic_cmd = -1; /* The unsupported ioctl */
4442 strioc.ic_timout = 0;
4443 strioc.ic_len = 0;
4444 strioc.ic_dp = NULL;
4445 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp);
4446 *rvalp = 0;
4447 return (0);
4448
4449 case I_FLUSHBAND:
4450 {
4451 struct bandinfo binfo;
4452
4453 error = strcopyin((void *)arg, &binfo, sizeof (binfo),
4454 copyflag);
4455 if (error)
4456 return (error);
4457 if (stp->sd_flag & STRHUP)
4458 return (ENXIO);
4459 if (binfo.bi_flag & ~FLUSHRW)
4460 return (EINVAL);
4461 while (!(mp = allocb(2, BPRI_HI))) {
4462 if (error = strwaitbuf(2, BPRI_HI))
4463 return (error);
4464 }
4465 mp->b_datap->db_type = M_FLUSH;
4466 *mp->b_wptr++ = binfo.bi_flag | FLUSHBAND;
4467 *mp->b_wptr++ = binfo.bi_pri;
4468 putnext(stp->sd_wrq, mp);
4469 /*
4470 * Send down an unsupported ioctl and wait for the nack
4471 * in order to allow the M_FLUSH to propagate back
4472 * up to the stream head.
4473 * Replaces if (qready()) runqueues();
4474 */
4475 strioc.ic_cmd = -1; /* The unsupported ioctl */
4476 strioc.ic_timout = 0;
4477 strioc.ic_len = 0;
4478 strioc.ic_dp = NULL;
4479 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp);
4480 *rvalp = 0;
4481 return (0);
4482 }
4483
4484 case I_SRDOPT:
4485 /*
4486 * Set read options
4487 *
4488 * RNORM - default stream mode
4489 * RMSGN - message no discard
4490 * RMSGD - message discard
4491 * RPROTNORM - fail read with EBADMSG for M_[PC]PROTOs
4492 * RPROTDAT - convert M_[PC]PROTOs to M_DATAs
4493 * RPROTDIS - discard M_[PC]PROTOs and retain M_DATAs
4494 */
4495 if (arg & ~(RMODEMASK | RPROTMASK))
4496 return (EINVAL);
4497
4498 if ((arg & (RMSGD|RMSGN)) == (RMSGD|RMSGN))
4499 return (EINVAL);
4500
4501 mutex_enter(&stp->sd_lock);
4502 switch (arg & RMODEMASK) {
4503 case RNORM:
4504 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS);
4505 break;
4506 case RMSGD:
4507 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGNODIS) |
4508 RD_MSGDIS;
4509 break;
4510 case RMSGN:
4511 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGDIS) |
4512 RD_MSGNODIS;
4513 break;
4514 }
4515
4516 switch (arg & RPROTMASK) {
4517 case RPROTNORM:
4518 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS);
4519 break;
4520
4521 case RPROTDAT:
4522 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDIS) |
4523 RD_PROTDAT);
4524 break;
4525
4526 case RPROTDIS:
4527 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDAT) |
4528 RD_PROTDIS);
4529 break;
4530 }
4531 mutex_exit(&stp->sd_lock);
4532 return (0);
4533
4534 case I_GRDOPT:
4535 /*
4536 * Get read option and return the value
4537 * to spot pointed to by arg
4538 */
4539 {
4540 int rdopt;
4541
4542 rdopt = ((stp->sd_read_opt & RD_MSGDIS) ? RMSGD :
4543 ((stp->sd_read_opt & RD_MSGNODIS) ? RMSGN : RNORM));
4544 rdopt |= ((stp->sd_read_opt & RD_PROTDAT) ? RPROTDAT :
4545 ((stp->sd_read_opt & RD_PROTDIS) ? RPROTDIS : RPROTNORM));
4546
4547 return (strcopyout(&rdopt, (void *)arg, sizeof (int),
4548 copyflag));
4549 }
4550
4551 case I_SERROPT:
4552 /*
4553 * Set error options
4554 *
4555 * RERRNORM - persistent read errors
4556 * RERRNONPERSIST - non-persistent read errors
4557 * WERRNORM - persistent write errors
4558 * WERRNONPERSIST - non-persistent write errors
4559 */
4560 if (arg & ~(RERRMASK | WERRMASK))
4561 return (EINVAL);
4562
4563 mutex_enter(&stp->sd_lock);
4564 switch (arg & RERRMASK) {
4565 case RERRNORM:
4566 stp->sd_flag &= ~STRDERRNONPERSIST;
4567 break;
4568 case RERRNONPERSIST:
4569 stp->sd_flag |= STRDERRNONPERSIST;
4570 break;
4571 }
4572 switch (arg & WERRMASK) {
4573 case WERRNORM:
4574 stp->sd_flag &= ~STWRERRNONPERSIST;
4575 break;
4576 case WERRNONPERSIST:
4577 stp->sd_flag |= STWRERRNONPERSIST;
4578 break;
4579 }
4580 mutex_exit(&stp->sd_lock);
4581 return (0);
4582
4583 case I_GERROPT:
4584 /*
4585 * Get error option and return the value
4586 * to spot pointed to by arg
4587 */
4588 {
4589 int erropt = 0;
4590
4591 erropt |= (stp->sd_flag & STRDERRNONPERSIST) ? RERRNONPERSIST :
4592 RERRNORM;
4593 erropt |= (stp->sd_flag & STWRERRNONPERSIST) ? WERRNONPERSIST :
4594 WERRNORM;
4595 return (strcopyout(&erropt, (void *)arg, sizeof (int),
4596 copyflag));
4597 }
4598
4599 case I_SETSIG:
4600 /*
4601 * Register the calling proc to receive the SIGPOLL
4602 * signal based on the events given in arg. If
4603 * arg is zero, remove the proc from register list.
4604 */
4605 {
4606 strsig_t *ssp, *pssp;
4607 struct pid *pidp;
4608
4609 pssp = NULL;
4610 pidp = curproc->p_pidp;
4611 /*
4612 * Hold sd_lock to prevent traversal of sd_siglist while
4613 * it is modified.
4614 */
4615 mutex_enter(&stp->sd_lock);
4616 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pidp != pidp);
4617 pssp = ssp, ssp = ssp->ss_next)
4618 ;
4619
4620 if (arg) {
4621 if (arg & ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR|
4622 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) {
4623 mutex_exit(&stp->sd_lock);
4624 return (EINVAL);
4625 }
4626 if ((arg & S_BANDURG) && !(arg & S_RDBAND)) {
4627 mutex_exit(&stp->sd_lock);
4628 return (EINVAL);
4629 }
4630
4631 /*
4632 * If proc not already registered, add it
4633 * to list.
4634 */
4635 if (!ssp) {
4636 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP);
4637 ssp->ss_pidp = pidp;
4638 ssp->ss_pid = pidp->pid_id;
4639 ssp->ss_next = NULL;
4640 if (pssp)
4641 pssp->ss_next = ssp;
4642 else
4643 stp->sd_siglist = ssp;
4644 mutex_enter(&pidlock);
4645 PID_HOLD(pidp);
4646 mutex_exit(&pidlock);
4647 }
4648
4649 /*
4650 * Set events.
4651 */
4652 ssp->ss_events = (int)arg;
4653 } else {
4654 /*
4655 * Remove proc from register list.
4656 */
4657 if (ssp) {
4658 mutex_enter(&pidlock);
4659 PID_RELE(pidp);
4660 mutex_exit(&pidlock);
4661 if (pssp)
4662 pssp->ss_next = ssp->ss_next;
4663 else
4664 stp->sd_siglist = ssp->ss_next;
4665 kmem_free(ssp, sizeof (strsig_t));
4666 } else {
4667 mutex_exit(&stp->sd_lock);
4668 return (EINVAL);
4669 }
4670 }
4671
4672 /*
4673 * Recalculate OR of sig events.
4674 */
4675 stp->sd_sigflags = 0;
4676 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4677 stp->sd_sigflags |= ssp->ss_events;
4678 mutex_exit(&stp->sd_lock);
4679 return (0);
4680 }
4681
4682 case I_GETSIG:
4683 /*
4684 * Return (in arg) the current registration of events
4685 * for which the calling proc is to be signaled.
4686 */
4687 {
4688 struct strsig *ssp;
4689 struct pid *pidp;
4690
4691 pidp = curproc->p_pidp;
4692 mutex_enter(&stp->sd_lock);
4693 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4694 if (ssp->ss_pidp == pidp) {
4695 error = strcopyout(&ssp->ss_events, (void *)arg,
4696 sizeof (int), copyflag);
4697 mutex_exit(&stp->sd_lock);
4698 return (error);
4699 }
4700 mutex_exit(&stp->sd_lock);
4701 return (EINVAL);
4702 }
4703
4704 case I_ESETSIG:
4705 /*
4706 * Register the ss_pid to receive the SIGPOLL
4707 * signal based on the events is ss_events arg. If
4708 * ss_events is zero, remove the proc from register list.
4709 */
4710 {
4711 struct strsig *ssp, *pssp;
4712 struct proc *proc;
4713 struct pid *pidp;
4714 pid_t pid;
4715 struct strsigset ss;
4716
4717 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag);
4718 if (error)
4719 return (error);
4720
4721 pid = ss.ss_pid;
4722
4723 if (ss.ss_events != 0) {
4724 /*
4725 * Permissions check by sending signal 0.
4726 * Note that when kill fails it does a set_errno
4727 * causing the system call to fail.
4728 */
4729 error = kill(pid, 0);
4730 if (error) {
4731 return (error);
4732 }
4733 }
4734 mutex_enter(&pidlock);
4735 if (pid == 0)
4736 proc = curproc;
4737 else if (pid < 0)
4738 proc = pgfind(-pid);
4739 else
4740 proc = prfind(pid);
4741 if (proc == NULL) {
4742 mutex_exit(&pidlock);
4743 return (ESRCH);
4744 }
4745 if (pid < 0)
4746 pidp = proc->p_pgidp;
4747 else
4748 pidp = proc->p_pidp;
4749 ASSERT(pidp);
4750 /*
4751 * Get a hold on the pid structure while referencing it.
4752 * There is a separate PID_HOLD should it be inserted
4753 * in the list below.
4754 */
4755 PID_HOLD(pidp);
4756 mutex_exit(&pidlock);
4757
4758 pssp = NULL;
4759 /*
4760 * Hold sd_lock to prevent traversal of sd_siglist while
4761 * it is modified.
4762 */
4763 mutex_enter(&stp->sd_lock);
4764 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pid != pid);
4765 pssp = ssp, ssp = ssp->ss_next)
4766 ;
4767
4768 if (ss.ss_events) {
4769 if (ss.ss_events &
4770 ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR|
4771 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) {
4772 mutex_exit(&stp->sd_lock);
4773 mutex_enter(&pidlock);
4774 PID_RELE(pidp);
4775 mutex_exit(&pidlock);
4776 return (EINVAL);
4777 }
4778 if ((ss.ss_events & S_BANDURG) &&
4779 !(ss.ss_events & S_RDBAND)) {
4780 mutex_exit(&stp->sd_lock);
4781 mutex_enter(&pidlock);
4782 PID_RELE(pidp);
4783 mutex_exit(&pidlock);
4784 return (EINVAL);
4785 }
4786
4787 /*
4788 * If proc not already registered, add it
4789 * to list.
4790 */
4791 if (!ssp) {
4792 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP);
4793 ssp->ss_pidp = pidp;
4794 ssp->ss_pid = pid;
4795 ssp->ss_next = NULL;
4796 if (pssp)
4797 pssp->ss_next = ssp;
4798 else
4799 stp->sd_siglist = ssp;
4800 mutex_enter(&pidlock);
4801 PID_HOLD(pidp);
4802 mutex_exit(&pidlock);
4803 }
4804
4805 /*
4806 * Set events.
4807 */
4808 ssp->ss_events = ss.ss_events;
4809 } else {
4810 /*
4811 * Remove proc from register list.
4812 */
4813 if (ssp) {
4814 mutex_enter(&pidlock);
4815 PID_RELE(pidp);
4816 mutex_exit(&pidlock);
4817 if (pssp)
4818 pssp->ss_next = ssp->ss_next;
4819 else
4820 stp->sd_siglist = ssp->ss_next;
4821 kmem_free(ssp, sizeof (strsig_t));
4822 } else {
4823 mutex_exit(&stp->sd_lock);
4824 mutex_enter(&pidlock);
4825 PID_RELE(pidp);
4826 mutex_exit(&pidlock);
4827 return (EINVAL);
4828 }
4829 }
4830
4831 /*
4832 * Recalculate OR of sig events.
4833 */
4834 stp->sd_sigflags = 0;
4835 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4836 stp->sd_sigflags |= ssp->ss_events;
4837 mutex_exit(&stp->sd_lock);
4838 mutex_enter(&pidlock);
4839 PID_RELE(pidp);
4840 mutex_exit(&pidlock);
4841 return (0);
4842 }
4843
4844 case I_EGETSIG:
4845 /*
4846 * Return (in arg) the current registration of events
4847 * for which the calling proc is to be signaled.
4848 */
4849 {
4850 struct strsig *ssp;
4851 struct proc *proc;
4852 pid_t pid;
4853 struct pid *pidp;
4854 struct strsigset ss;
4855
4856 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag);
4857 if (error)
4858 return (error);
4859
4860 pid = ss.ss_pid;
4861 mutex_enter(&pidlock);
4862 if (pid == 0)
4863 proc = curproc;
4864 else if (pid < 0)
4865 proc = pgfind(-pid);
4866 else
4867 proc = prfind(pid);
4868 if (proc == NULL) {
4869 mutex_exit(&pidlock);
4870 return (ESRCH);
4871 }
4872 if (pid < 0)
4873 pidp = proc->p_pgidp;
4874 else
4875 pidp = proc->p_pidp;
4876
4877 /* Prevent the pidp from being reassigned */
4878 PID_HOLD(pidp);
4879 mutex_exit(&pidlock);
4880
4881 mutex_enter(&stp->sd_lock);
4882 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4883 if (ssp->ss_pid == pid) {
4884 ss.ss_pid = ssp->ss_pid;
4885 ss.ss_events = ssp->ss_events;
4886 error = strcopyout(&ss, (void *)arg,
4887 sizeof (struct strsigset), copyflag);
4888 mutex_exit(&stp->sd_lock);
4889 mutex_enter(&pidlock);
4890 PID_RELE(pidp);
4891 mutex_exit(&pidlock);
4892 return (error);
4893 }
4894 mutex_exit(&stp->sd_lock);
4895 mutex_enter(&pidlock);
4896 PID_RELE(pidp);
4897 mutex_exit(&pidlock);
4898 return (EINVAL);
4899 }
4900
4901 case I_PEEK:
4902 {
4903 STRUCT_DECL(strpeek, strpeek);
4904 size_t n;
4905 mblk_t *fmp, *tmp_mp = NULL;
4906
4907 STRUCT_INIT(strpeek, flag);
4908
4909 error = strcopyin((void *)arg, STRUCT_BUF(strpeek),
4910 STRUCT_SIZE(strpeek), copyflag);
4911 if (error)
4912 return (error);
4913
4914 mutex_enter(QLOCK(rdq));
4915 /*
4916 * Skip the invalid messages
4917 */
4918 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
4919 if (mp->b_datap->db_type != M_SIG)
4920 break;
4921
4922 /*
4923 * If user has requested to peek at a high priority message
4924 * and first message is not, return 0
4925 */
4926 if (mp != NULL) {
4927 if ((STRUCT_FGET(strpeek, flags) & RS_HIPRI) &&
4928 queclass(mp) == QNORM) {
4929 *rvalp = 0;
4930 mutex_exit(QLOCK(rdq));
4931 return (0);
4932 }
4933 } else if (stp->sd_struiordq == NULL ||
4934 (STRUCT_FGET(strpeek, flags) & RS_HIPRI)) {
4935 /*
4936 * No mblks to look at at the streamhead and
4937 * 1). This isn't a synch stream or
4938 * 2). This is a synch stream but caller wants high
4939 * priority messages which is not supported by
4940 * the synch stream. (it only supports QNORM)
4941 */
4942 *rvalp = 0;
4943 mutex_exit(QLOCK(rdq));
4944 return (0);
4945 }
4946
4947 fmp = mp;
4948
4949 if (mp && mp->b_datap->db_type == M_PASSFP) {
4950 mutex_exit(QLOCK(rdq));
4951 return (EBADMSG);
4952 }
4953
4954 ASSERT(mp == NULL || mp->b_datap->db_type == M_PCPROTO ||
4955 mp->b_datap->db_type == M_PROTO ||
4956 mp->b_datap->db_type == M_DATA);
4957
4958 if (mp && mp->b_datap->db_type == M_PCPROTO) {
4959 STRUCT_FSET(strpeek, flags, RS_HIPRI);
4960 } else {
4961 STRUCT_FSET(strpeek, flags, 0);
4962 }
4963
4964
4965 if (mp && ((tmp_mp = dupmsg(mp)) == NULL)) {
4966 mutex_exit(QLOCK(rdq));
4967 return (ENOSR);
4968 }
4969 mutex_exit(QLOCK(rdq));
4970
4971 /*
4972 * set mp = tmp_mp, so that I_PEEK processing can continue.
4973 * tmp_mp is used to free the dup'd message.
4974 */
4975 mp = tmp_mp;
4976
4977 uio.uio_fmode = 0;
4978 uio.uio_extflg = UIO_COPY_CACHED;
4979 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE :
4980 UIO_SYSSPACE;
4981 uio.uio_limit = 0;
4982 /*
4983 * First process PROTO blocks, if any.
4984 * If user doesn't want to get ctl info by setting maxlen <= 0,
4985 * then set len to -1/0 and skip control blocks part.
4986 */
4987 if (STRUCT_FGET(strpeek, ctlbuf.maxlen) < 0)
4988 STRUCT_FSET(strpeek, ctlbuf.len, -1);
4989 else if (STRUCT_FGET(strpeek, ctlbuf.maxlen) == 0)
4990 STRUCT_FSET(strpeek, ctlbuf.len, 0);
4991 else {
4992 int ctl_part = 0;
4993
4994 iov.iov_base = STRUCT_FGETP(strpeek, ctlbuf.buf);
4995 iov.iov_len = STRUCT_FGET(strpeek, ctlbuf.maxlen);
4996 uio.uio_iov = &iov;
4997 uio.uio_resid = iov.iov_len;
4998 uio.uio_loffset = 0;
4999 uio.uio_iovcnt = 1;
5000 while (mp && mp->b_datap->db_type != M_DATA &&
5001 uio.uio_resid >= 0) {
5002 ASSERT(STRUCT_FGET(strpeek, flags) == 0 ?
5003 mp->b_datap->db_type == M_PROTO :
5004 mp->b_datap->db_type == M_PCPROTO);
5005
5006 if ((n = MIN(uio.uio_resid,
5007 mp->b_wptr - mp->b_rptr)) != 0 &&
5008 (error = uiomove((char *)mp->b_rptr, n,
5009 UIO_READ, &uio)) != 0) {
5010 freemsg(tmp_mp);
5011 return (error);
5012 }
5013 ctl_part = 1;
5014 mp = mp->b_cont;
5015 }
5016 /* No ctl message */
5017 if (ctl_part == 0)
5018 STRUCT_FSET(strpeek, ctlbuf.len, -1);
5019 else
5020 STRUCT_FSET(strpeek, ctlbuf.len,
5021 STRUCT_FGET(strpeek, ctlbuf.maxlen) -
5022 uio.uio_resid);
5023 }
5024
5025 /*
5026 * Now process DATA blocks, if any.
5027 * If user doesn't want to get data info by setting maxlen <= 0,
5028 * then set len to -1/0 and skip data blocks part.
5029 */
5030 if (STRUCT_FGET(strpeek, databuf.maxlen) < 0)
5031 STRUCT_FSET(strpeek, databuf.len, -1);
5032 else if (STRUCT_FGET(strpeek, databuf.maxlen) == 0)
5033 STRUCT_FSET(strpeek, databuf.len, 0);
5034 else {
5035 int data_part = 0;
5036
5037 iov.iov_base = STRUCT_FGETP(strpeek, databuf.buf);
5038 iov.iov_len = STRUCT_FGET(strpeek, databuf.maxlen);
5039 uio.uio_iov = &iov;
5040 uio.uio_resid = iov.iov_len;
5041 uio.uio_loffset = 0;
5042 uio.uio_iovcnt = 1;
5043 while (mp && uio.uio_resid) {
5044 if (mp->b_datap->db_type == M_DATA) {
5045 if ((n = MIN(uio.uio_resid,
5046 mp->b_wptr - mp->b_rptr)) != 0 &&
5047 (error = uiomove((char *)mp->b_rptr,
5048 n, UIO_READ, &uio)) != 0) {
5049 freemsg(tmp_mp);
5050 return (error);
5051 }
5052 data_part = 1;
5053 }
5054 ASSERT(data_part == 0 ||
5055 mp->b_datap->db_type == M_DATA);
5056 mp = mp->b_cont;
5057 }
5058 /* No data message */
5059 if (data_part == 0)
5060 STRUCT_FSET(strpeek, databuf.len, -1);
5061 else
5062 STRUCT_FSET(strpeek, databuf.len,
5063 STRUCT_FGET(strpeek, databuf.maxlen) -
5064 uio.uio_resid);
5065 }
5066 freemsg(tmp_mp);
5067
5068 /*
5069 * It is a synch stream and user wants to get
5070 * data (maxlen > 0).
5071 * uio setup is done by the codes that process DATA
5072 * blocks above.
5073 */
5074 if ((fmp == NULL) && STRUCT_FGET(strpeek, databuf.maxlen) > 0) {
5075 infod_t infod;
5076
5077 infod.d_cmd = INFOD_COPYOUT;
5078 infod.d_res = 0;
5079 infod.d_uiop = &uio;
5080 error = infonext(rdq, &infod);
5081 if (error == EINVAL || error == EBUSY)
5082 error = 0;
5083 if (error)
5084 return (error);
5085 STRUCT_FSET(strpeek, databuf.len, STRUCT_FGET(strpeek,
5086 databuf.maxlen) - uio.uio_resid);
5087 if (STRUCT_FGET(strpeek, databuf.len) == 0) {
5088 /*
5089 * No data found by the infonext().
5090 */
5091 STRUCT_FSET(strpeek, databuf.len, -1);
5092 }
5093 }
5094 error = strcopyout(STRUCT_BUF(strpeek), (void *)arg,
5095 STRUCT_SIZE(strpeek), copyflag);
5096 if (error) {
5097 return (error);
5098 }
5099 /*
5100 * If there is no message retrieved, set return code to 0
5101 * otherwise, set it to 1.
5102 */
5103 if (STRUCT_FGET(strpeek, ctlbuf.len) == -1 &&
5104 STRUCT_FGET(strpeek, databuf.len) == -1)
5105 *rvalp = 0;
5106 else
5107 *rvalp = 1;
5108 return (0);
5109 }
5110
5111 case I_FDINSERT:
5112 {
5113 STRUCT_DECL(strfdinsert, strfdinsert);
5114 struct file *resftp;
5115 struct stdata *resstp;
5116 t_uscalar_t ival;
5117 ssize_t msgsize;
5118 struct strbuf mctl;
5119
5120 STRUCT_INIT(strfdinsert, flag);
5121 if (stp->sd_flag & STRHUP)
5122 return (ENXIO);
5123 /*
5124 * STRDERR, STWRERR and STPLEX tested above.
5125 */
5126 error = strcopyin((void *)arg, STRUCT_BUF(strfdinsert),
5127 STRUCT_SIZE(strfdinsert), copyflag);
5128 if (error)
5129 return (error);
5130
5131 if (STRUCT_FGET(strfdinsert, offset) < 0 ||
5132 (STRUCT_FGET(strfdinsert, offset) %
5133 sizeof (t_uscalar_t)) != 0)
5134 return (EINVAL);
5135 if ((resftp = getf(STRUCT_FGET(strfdinsert, fildes))) != NULL) {
5136 if ((resstp = resftp->f_vnode->v_stream) == NULL) {
5137 releasef(STRUCT_FGET(strfdinsert, fildes));
5138 return (EINVAL);
5139 }
5140 } else
5141 return (EINVAL);
5142
5143 mutex_enter(&resstp->sd_lock);
5144 if (resstp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
5145 error = strgeterr(resstp,
5146 STRDERR|STWRERR|STRHUP|STPLEX, 0);
5147 if (error != 0) {
5148 mutex_exit(&resstp->sd_lock);
5149 releasef(STRUCT_FGET(strfdinsert, fildes));
5150 return (error);
5151 }
5152 }
5153 mutex_exit(&resstp->sd_lock);
5154
5155 #ifdef _ILP32
5156 {
5157 queue_t *q;
5158 queue_t *mate = NULL;
5159
5160 /* get read queue of stream terminus */
5161 claimstr(resstp->sd_wrq);
5162 for (q = resstp->sd_wrq->q_next; q->q_next != NULL;
5163 q = q->q_next)
5164 if (!STRMATED(resstp) && STREAM(q) != resstp &&
5165 mate == NULL) {
5166 ASSERT(q->q_qinfo->qi_srvp);
5167 ASSERT(_OTHERQ(q)->q_qinfo->qi_srvp);
5168 claimstr(q);
5169 mate = q;
5170 }
5171 q = _RD(q);
5172 if (mate)
5173 releasestr(mate);
5174 releasestr(resstp->sd_wrq);
5175 ival = (t_uscalar_t)q;
5176 }
5177 #else
5178 ival = (t_uscalar_t)getminor(resftp->f_vnode->v_rdev);
5179 #endif /* _ILP32 */
5180
5181 if (STRUCT_FGET(strfdinsert, ctlbuf.len) <
5182 STRUCT_FGET(strfdinsert, offset) + sizeof (t_uscalar_t)) {
5183 releasef(STRUCT_FGET(strfdinsert, fildes));
5184 return (EINVAL);
5185 }
5186
5187 /*
5188 * Check for legal flag value.
5189 */
5190 if (STRUCT_FGET(strfdinsert, flags) & ~RS_HIPRI) {
5191 releasef(STRUCT_FGET(strfdinsert, fildes));
5192 return (EINVAL);
5193 }
5194
5195 /* get these values from those cached in the stream head */
5196 mutex_enter(QLOCK(stp->sd_wrq));
5197 rmin = stp->sd_qn_minpsz;
5198 rmax = stp->sd_qn_maxpsz;
5199 mutex_exit(QLOCK(stp->sd_wrq));
5200
5201 /*
5202 * Make sure ctl and data sizes together fall within
5203 * the limits of the max and min receive packet sizes
5204 * and do not exceed system limit. A negative data
5205 * length means that no data part is to be sent.
5206 */
5207 ASSERT((rmax >= 0) || (rmax == INFPSZ));
5208 if (rmax == 0) {
5209 releasef(STRUCT_FGET(strfdinsert, fildes));
5210 return (ERANGE);
5211 }
5212 if ((msgsize = STRUCT_FGET(strfdinsert, databuf.len)) < 0)
5213 msgsize = 0;
5214 if ((msgsize < rmin) ||
5215 ((msgsize > rmax) && (rmax != INFPSZ)) ||
5216 (STRUCT_FGET(strfdinsert, ctlbuf.len) > strctlsz)) {
5217 releasef(STRUCT_FGET(strfdinsert, fildes));
5218 return (ERANGE);
5219 }
5220
5221 mutex_enter(&stp->sd_lock);
5222 while (!(STRUCT_FGET(strfdinsert, flags) & RS_HIPRI) &&
5223 !canputnext(stp->sd_wrq)) {
5224 if ((error = strwaitq(stp, WRITEWAIT, (ssize_t)0,
5225 flag, -1, &done)) != 0 || done) {
5226 mutex_exit(&stp->sd_lock);
5227 releasef(STRUCT_FGET(strfdinsert, fildes));
5228 return (error);
5229 }
5230 if ((error = i_straccess(stp, access)) != 0) {
5231 mutex_exit(&stp->sd_lock);
5232 releasef(
5233 STRUCT_FGET(strfdinsert, fildes));
5234 return (error);
5235 }
5236 }
5237 mutex_exit(&stp->sd_lock);
5238
5239 /*
5240 * Copy strfdinsert.ctlbuf into native form of
5241 * ctlbuf to pass down into strmakemsg().
5242 */
5243 mctl.maxlen = STRUCT_FGET(strfdinsert, ctlbuf.maxlen);
5244 mctl.len = STRUCT_FGET(strfdinsert, ctlbuf.len);
5245 mctl.buf = STRUCT_FGETP(strfdinsert, ctlbuf.buf);
5246
5247 iov.iov_base = STRUCT_FGETP(strfdinsert, databuf.buf);
5248 iov.iov_len = STRUCT_FGET(strfdinsert, databuf.len);
5249 uio.uio_iov = &iov;
5250 uio.uio_iovcnt = 1;
5251 uio.uio_loffset = 0;
5252 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE :
5253 UIO_SYSSPACE;
5254 uio.uio_fmode = 0;
5255 uio.uio_extflg = UIO_COPY_CACHED;
5256 uio.uio_resid = iov.iov_len;
5257 if ((error = strmakemsg(&mctl,
5258 &msgsize, &uio, stp,
5259 STRUCT_FGET(strfdinsert, flags), &mp)) != 0 || !mp) {
5260 STRUCT_FSET(strfdinsert, databuf.len, msgsize);
5261 releasef(STRUCT_FGET(strfdinsert, fildes));
5262 return (error);
5263 }
5264
5265 STRUCT_FSET(strfdinsert, databuf.len, msgsize);
5266
5267 /*
5268 * Place the possibly reencoded queue pointer 'offset' bytes
5269 * from the start of the control portion of the message.
5270 */
5271 *((t_uscalar_t *)(mp->b_rptr +
5272 STRUCT_FGET(strfdinsert, offset))) = ival;
5273
5274 /*
5275 * Put message downstream.
5276 */
5277 stream_willservice(stp);
5278 putnext(stp->sd_wrq, mp);
5279 stream_runservice(stp);
5280 releasef(STRUCT_FGET(strfdinsert, fildes));
5281 return (error);
5282 }
5283
5284 case I_SENDFD:
5285 {
5286 struct file *fp;
5287
5288 if ((fp = getf((int)arg)) == NULL)
5289 return (EBADF);
5290 error = do_sendfp(stp, fp, crp);
5291 if (auditing) {
5292 audit_fdsend((int)arg, fp, error);
5293 }
5294 releasef((int)arg);
5295 return (error);
5296 }
5297
5298 case I_RECVFD:
5299 case I_E_RECVFD:
5300 {
5301 struct k_strrecvfd *srf;
5302 int i, fd;
5303
5304 mutex_enter(&stp->sd_lock);
5305 while (!(mp = getq(rdq))) {
5306 if (stp->sd_flag & (STRHUP|STREOF)) {
5307 mutex_exit(&stp->sd_lock);
5308 return (ENXIO);
5309 }
5310 if ((error = strwaitq(stp, GETWAIT, (ssize_t)0,
5311 flag, -1, &done)) != 0 || done) {
5312 mutex_exit(&stp->sd_lock);
5313 return (error);
5314 }
5315 if ((error = i_straccess(stp, access)) != 0) {
5316 mutex_exit(&stp->sd_lock);
5317 return (error);
5318 }
5319 }
5320 if (mp->b_datap->db_type != M_PASSFP) {
5321 putback(stp, rdq, mp, mp->b_band);
5322 mutex_exit(&stp->sd_lock);
5323 return (EBADMSG);
5324 }
5325 mutex_exit(&stp->sd_lock);
5326
5327 srf = (struct k_strrecvfd *)mp->b_rptr;
5328 if ((fd = ufalloc(0)) == -1) {
5329 mutex_enter(&stp->sd_lock);
5330 putback(stp, rdq, mp, mp->b_band);
5331 mutex_exit(&stp->sd_lock);
5332 return (EMFILE);
5333 }
5334 if (cmd == I_RECVFD) {
5335 struct o_strrecvfd ostrfd;
5336
5337 /* check to see if uid/gid values are too large. */
5338
5339 if (srf->uid > (o_uid_t)USHRT_MAX ||
5340 srf->gid > (o_gid_t)USHRT_MAX) {
5341 mutex_enter(&stp->sd_lock);
5342 putback(stp, rdq, mp, mp->b_band);
5343 mutex_exit(&stp->sd_lock);
5344 setf(fd, NULL); /* release fd entry */
5345 return (EOVERFLOW);
5346 }
5347
5348 ostrfd.fd = fd;
5349 ostrfd.uid = (o_uid_t)srf->uid;
5350 ostrfd.gid = (o_gid_t)srf->gid;
5351
5352 /* Null the filler bits */
5353 for (i = 0; i < 8; i++)
5354 ostrfd.fill[i] = 0;
5355
5356 error = strcopyout(&ostrfd, (void *)arg,
5357 sizeof (struct o_strrecvfd), copyflag);
5358 } else { /* I_E_RECVFD */
5359 struct strrecvfd strfd;
5360
5361 strfd.fd = fd;
5362 strfd.uid = srf->uid;
5363 strfd.gid = srf->gid;
5364
5365 /* null the filler bits */
5366 for (i = 0; i < 8; i++)
5367 strfd.fill[i] = 0;
5368
5369 error = strcopyout(&strfd, (void *)arg,
5370 sizeof (struct strrecvfd), copyflag);
5371 }
5372
5373 if (error) {
5374 setf(fd, NULL); /* release fd entry */
5375 mutex_enter(&stp->sd_lock);
5376 putback(stp, rdq, mp, mp->b_band);
5377 mutex_exit(&stp->sd_lock);
5378 return (error);
5379 }
5380 if (auditing) {
5381 audit_fdrecv(fd, srf->fp);
5382 }
5383
5384 /*
5385 * Always increment f_count since the freemsg() below will
5386 * always call free_passfp() which performs a closef().
5387 */
5388 mutex_enter(&srf->fp->f_tlock);
5389 srf->fp->f_count++;
5390 mutex_exit(&srf->fp->f_tlock);
5391 setf(fd, srf->fp);
5392 freemsg(mp);
5393 return (0);
5394 }
5395
5396 case I_SWROPT:
5397 /*
5398 * Set/clear the write options. arg is a bit
5399 * mask with any of the following bits set...
5400 * SNDZERO - send zero length message
5401 * SNDPIPE - send sigpipe to process if
5402 * sd_werror is set and process is
5403 * doing a write or putmsg.
5404 * The new stream head write options should reflect
5405 * what is in arg.
5406 */
5407 if (arg & ~(SNDZERO|SNDPIPE))
5408 return (EINVAL);
5409
5410 mutex_enter(&stp->sd_lock);
5411 stp->sd_wput_opt &= ~(SW_SIGPIPE|SW_SNDZERO);
5412 if (arg & SNDZERO)
5413 stp->sd_wput_opt |= SW_SNDZERO;
5414 if (arg & SNDPIPE)
5415 stp->sd_wput_opt |= SW_SIGPIPE;
5416 mutex_exit(&stp->sd_lock);
5417 return (0);
5418
5419 case I_GWROPT:
5420 {
5421 int wropt = 0;
5422
5423 if (stp->sd_wput_opt & SW_SNDZERO)
5424 wropt |= SNDZERO;
5425 if (stp->sd_wput_opt & SW_SIGPIPE)
5426 wropt |= SNDPIPE;
5427 return (strcopyout(&wropt, (void *)arg, sizeof (wropt),
5428 copyflag));
5429 }
5430
5431 case I_LIST:
5432 /*
5433 * Returns all the modules found on this stream,
5434 * upto the driver. If argument is NULL, return the
5435 * number of modules (including driver). If argument
5436 * is not NULL, copy the names into the structure
5437 * provided.
5438 */
5439
5440 {
5441 queue_t *q;
5442 char *qname;
5443 int i, nmods;
5444 struct str_mlist *mlist;
5445 STRUCT_DECL(str_list, strlist);
5446
5447 if (arg == NULL) { /* Return number of modules plus driver */
5448 if (stp->sd_vnode->v_type == VFIFO)
5449 *rvalp = stp->sd_pushcnt;
5450 else
5451 *rvalp = stp->sd_pushcnt + 1;
5452 return (0);
5453 }
5454
5455 STRUCT_INIT(strlist, flag);
5456
5457 error = strcopyin((void *)arg, STRUCT_BUF(strlist),
5458 STRUCT_SIZE(strlist), copyflag);
5459 if (error != 0)
5460 return (error);
5461
5462 mlist = STRUCT_FGETP(strlist, sl_modlist);
5463 nmods = STRUCT_FGET(strlist, sl_nmods);
5464 if (nmods <= 0)
5465 return (EINVAL);
5466
5467 claimstr(stp->sd_wrq);
5468 q = stp->sd_wrq;
5469 for (i = 0; i < nmods && _SAMESTR(q); i++, q = q->q_next) {
5470 qname = Q2NAME(q->q_next);
5471 error = strcopyout(qname, &mlist[i], strlen(qname) + 1,
5472 copyflag);
5473 if (error != 0) {
5474 releasestr(stp->sd_wrq);
5475 return (error);
5476 }
5477 }
5478 releasestr(stp->sd_wrq);
5479 return (strcopyout(&i, (void *)arg, sizeof (int), copyflag));
5480 }
5481
5482 case I_CKBAND:
5483 {
5484 queue_t *q;
5485 qband_t *qbp;
5486
5487 if ((arg < 0) || (arg >= NBAND))
5488 return (EINVAL);
5489 q = _RD(stp->sd_wrq);
5490 mutex_enter(QLOCK(q));
5491 if (arg > (int)q->q_nband) {
5492 *rvalp = 0;
5493 } else {
5494 if (arg == 0) {
5495 if (q->q_first)
5496 *rvalp = 1;
5497 else
5498 *rvalp = 0;
5499 } else {
5500 qbp = q->q_bandp;
5501 while (--arg > 0)
5502 qbp = qbp->qb_next;
5503 if (qbp->qb_first)
5504 *rvalp = 1;
5505 else
5506 *rvalp = 0;
5507 }
5508 }
5509 mutex_exit(QLOCK(q));
5510 return (0);
5511 }
5512
5513 case I_GETBAND:
5514 {
5515 int intpri;
5516 queue_t *q;
5517
5518 q = _RD(stp->sd_wrq);
5519 mutex_enter(QLOCK(q));
5520 mp = q->q_first;
5521 if (!mp) {
5522 mutex_exit(QLOCK(q));
5523 return (ENODATA);
5524 }
5525 intpri = (int)mp->b_band;
5526 error = strcopyout(&intpri, (void *)arg, sizeof (int),
5527 copyflag);
5528 mutex_exit(QLOCK(q));
5529 return (error);
5530 }
5531
5532 case I_ATMARK:
5533 {
5534 queue_t *q;
5535
5536 if (arg & ~(ANYMARK|LASTMARK))
5537 return (EINVAL);
5538 q = _RD(stp->sd_wrq);
5539 mutex_enter(&stp->sd_lock);
5540 if ((stp->sd_flag & STRATMARK) && (arg == ANYMARK)) {
5541 *rvalp = 1;
5542 } else {
5543 mutex_enter(QLOCK(q));
5544 mp = q->q_first;
5545
5546 if (mp == NULL)
5547 *rvalp = 0;
5548 else if ((arg == ANYMARK) && (mp->b_flag & MSGMARK))
5549 *rvalp = 1;
5550 else if ((arg == LASTMARK) && (mp == stp->sd_mark))
5551 *rvalp = 1;
5552 else
5553 *rvalp = 0;
5554 mutex_exit(QLOCK(q));
5555 }
5556 mutex_exit(&stp->sd_lock);
5557 return (0);
5558 }
5559
5560 case I_CANPUT:
5561 {
5562 char band;
5563
5564 if ((arg < 0) || (arg >= NBAND))
5565 return (EINVAL);
5566 band = (char)arg;
5567 *rvalp = bcanputnext(stp->sd_wrq, band);
5568 return (0);
5569 }
5570
5571 case I_SETCLTIME:
5572 {
5573 int closetime;
5574
5575 error = strcopyin((void *)arg, &closetime, sizeof (int),
5576 copyflag);
5577 if (error)
5578 return (error);
5579 if (closetime < 0)
5580 return (EINVAL);
5581
5582 stp->sd_closetime = closetime;
5583 return (0);
5584 }
5585
5586 case I_GETCLTIME:
5587 {
5588 int closetime;
5589
5590 closetime = stp->sd_closetime;
5591 return (strcopyout(&closetime, (void *)arg, sizeof (int),
5592 copyflag));
5593 }
5594
5595 case TIOCGSID:
5596 {
5597 pid_t sid;
5598
5599 mutex_enter(&stp->sd_lock);
5600 if (stp->sd_sidp == NULL) {
5601 mutex_exit(&stp->sd_lock);
5602 return (ENOTTY);
5603 }
5604 sid = stp->sd_sidp->pid_id;
5605 mutex_exit(&stp->sd_lock);
5606 return (strcopyout(&sid, (void *)arg, sizeof (pid_t),
5607 copyflag));
5608 }
5609
5610 case TIOCSPGRP:
5611 {
5612 pid_t pgrp;
5613 proc_t *q;
5614 pid_t sid, fg_pgid, bg_pgid;
5615
5616 if (error = strcopyin((void *)arg, &pgrp, sizeof (pid_t),
5617 copyflag))
5618 return (error);
5619 mutex_enter(&stp->sd_lock);
5620 mutex_enter(&pidlock);
5621 if (stp->sd_sidp != ttoproc(curthread)->p_sessp->s_sidp) {
5622 mutex_exit(&pidlock);
5623 mutex_exit(&stp->sd_lock);
5624 return (ENOTTY);
5625 }
5626 if (pgrp == stp->sd_pgidp->pid_id) {
5627 mutex_exit(&pidlock);
5628 mutex_exit(&stp->sd_lock);
5629 return (0);
5630 }
5631 if (pgrp <= 0 || pgrp >= maxpid) {
5632 mutex_exit(&pidlock);
5633 mutex_exit(&stp->sd_lock);
5634 return (EINVAL);
5635 }
5636 if ((q = pgfind(pgrp)) == NULL ||
5637 q->p_sessp != ttoproc(curthread)->p_sessp) {
5638 mutex_exit(&pidlock);
5639 mutex_exit(&stp->sd_lock);
5640 return (EPERM);
5641 }
5642 sid = stp->sd_sidp->pid_id;
5643 fg_pgid = q->p_pgrp;
5644 bg_pgid = stp->sd_pgidp->pid_id;
5645 CL_SET_PROCESS_GROUP(curthread, sid, bg_pgid, fg_pgid);
5646 PID_RELE(stp->sd_pgidp);
5647 ctty_clear_sighuped();
5648 stp->sd_pgidp = q->p_pgidp;
5649 PID_HOLD(stp->sd_pgidp);
5650 mutex_exit(&pidlock);
5651 mutex_exit(&stp->sd_lock);
5652 return (0);
5653 }
5654
5655 case TIOCGPGRP:
5656 {
5657 pid_t pgrp;
5658
5659 mutex_enter(&stp->sd_lock);
5660 if (stp->sd_sidp == NULL) {
5661 mutex_exit(&stp->sd_lock);
5662 return (ENOTTY);
5663 }
5664 pgrp = stp->sd_pgidp->pid_id;
5665 mutex_exit(&stp->sd_lock);
5666 return (strcopyout(&pgrp, (void *)arg, sizeof (pid_t),
5667 copyflag));
5668 }
5669
5670 case TIOCSCTTY:
5671 {
5672 return (strctty(stp));
5673 }
5674
5675 case TIOCNOTTY:
5676 {
5677 /* freectty() always assumes curproc. */
5678 if (freectty(B_FALSE) != 0)
5679 return (0);
5680 return (ENOTTY);
5681 }
5682
5683 case FIONBIO:
5684 case FIOASYNC:
5685 return (0); /* handled by the upper layer */
5686 case F_FORKED: {
5687 if (crp != kcred)
5688 return (-1);
5689 sh_insert_pid(stp, (proc_t *)arg);
5690 return (0);
5691 }
5692 case F_CLOSED: {
5693 if (crp != kcred)
5694 return (-1);
5695 sh_remove_pid(stp, (proc_t *)arg);
5696 return (0);
5697 }
5698 }
5699 }
5700
5701 /*
5702 * Custom free routine used for M_PASSFP messages.
5703 */
5704 static void
5705 free_passfp(struct k_strrecvfd *srf)
5706 {
5707 (void) closef(srf->fp);
5708 kmem_free(srf, sizeof (struct k_strrecvfd) + sizeof (frtn_t));
5709 }
5710
5711 /* ARGSUSED */
5712 int
5713 do_sendfp(struct stdata *stp, struct file *fp, struct cred *cr)
5714 {
5715 queue_t *qp, *nextqp;
5716 struct k_strrecvfd *srf;
5717 mblk_t *mp;
5718 frtn_t *frtnp;
5719 size_t bufsize;
5720 queue_t *mate = NULL;
5721 syncq_t *sq = NULL;
5722 int retval = 0;
5723
5724 if (stp->sd_flag & STRHUP)
5725 return (ENXIO);
5726
5727 claimstr(stp->sd_wrq);
5728
5729 /* Fastpath, we have a pipe, and we are already mated, use it. */
5730 if (STRMATED(stp)) {
5731 qp = _RD(stp->sd_mate->sd_wrq);
5732 claimstr(qp);
5733 mate = qp;
5734 } else { /* Not already mated. */
5735
5736 /*
5737 * Walk the stream to the end of this one.
5738 * assumes that the claimstr() will prevent
5739 * plumbing between the stream head and the
5740 * driver from changing
5741 */
5742 qp = stp->sd_wrq;
5743
5744 /*
5745 * Loop until we reach the end of this stream.
5746 * On completion, qp points to the write queue
5747 * at the end of the stream, or the read queue
5748 * at the stream head if this is a fifo.
5749 */
5750 while (((qp = qp->q_next) != NULL) && _SAMESTR(qp))
5751 ;
5752
5753 /*
5754 * Just in case we get a q_next which is NULL, but
5755 * not at the end of the stream. This is actually
5756 * broken, so we set an assert to catch it in
5757 * debug, and set an error and return if not debug.
5758 */
5759 ASSERT(qp);
5760 if (qp == NULL) {
5761 releasestr(stp->sd_wrq);
5762 return (EINVAL);
5763 }
5764
5765 /*
5766 * Enter the syncq for the driver, so (hopefully)
5767 * the queue values will not change on us.
5768 * XXXX - This will only prevent the race IFF only
5769 * the write side modifies the q_next member, and
5770 * the put procedure is protected by at least
5771 * MT_PERQ.
5772 */
5773 if ((sq = qp->q_syncq) != NULL)
5774 entersq(sq, SQ_PUT);
5775
5776 /* Now get the q_next value from this qp. */
5777 nextqp = qp->q_next;
5778
5779 /*
5780 * If nextqp exists and the other stream is different
5781 * from this one claim the stream, set the mate, and
5782 * get the read queue at the stream head of the other
5783 * stream. Assumes that nextqp was at least valid when
5784 * we got it. Hopefully the entersq of the driver
5785 * will prevent it from changing on us.
5786 */
5787 if ((nextqp != NULL) && (STREAM(nextqp) != stp)) {
5788 ASSERT(qp->q_qinfo->qi_srvp);
5789 ASSERT(_OTHERQ(qp)->q_qinfo->qi_srvp);
5790 ASSERT(_OTHERQ(qp->q_next)->q_qinfo->qi_srvp);
5791 claimstr(nextqp);
5792
5793 /* Make sure we still have a q_next */
5794 if (nextqp != qp->q_next) {
5795 releasestr(stp->sd_wrq);
5796 releasestr(nextqp);
5797 return (EINVAL);
5798 }
5799
5800 qp = _RD(STREAM(nextqp)->sd_wrq);
5801 mate = qp;
5802 }
5803 /* If we entered the synq above, leave it. */
5804 if (sq != NULL)
5805 leavesq(sq, SQ_PUT);
5806 } /* STRMATED(STP) */
5807
5808 /* XXX prevents substitution of the ops vector */
5809 if (qp->q_qinfo != &strdata && qp->q_qinfo != &fifo_strdata) {
5810 retval = EINVAL;
5811 goto out;
5812 }
5813
5814 if (qp->q_flag & QFULL) {
5815 retval = EAGAIN;
5816 goto out;
5817 }
5818
5819 /*
5820 * Since M_PASSFP messages include a file descriptor, we use
5821 * esballoc() and specify a custom free routine (free_passfp()) that
5822 * will close the descriptor as part of freeing the message. For
5823 * convenience, we stash the frtn_t right after the data block.
5824 */
5825 bufsize = sizeof (struct k_strrecvfd) + sizeof (frtn_t);
5826 srf = kmem_alloc(bufsize, KM_NOSLEEP);
5827 if (srf == NULL) {
5828 retval = EAGAIN;
5829 goto out;
5830 }
5831
5832 frtnp = (frtn_t *)(srf + 1);
5833 frtnp->free_arg = (caddr_t)srf;
5834 frtnp->free_func = free_passfp;
5835
5836 mp = esballoc((uchar_t *)srf, bufsize, BPRI_MED, frtnp);
5837 if (mp == NULL) {
5838 kmem_free(srf, bufsize);
5839 retval = EAGAIN;
5840 goto out;
5841 }
5842 mp->b_wptr += sizeof (struct k_strrecvfd);
5843 mp->b_datap->db_type = M_PASSFP;
5844
5845 srf->fp = fp;
5846 srf->uid = crgetuid(curthread->t_cred);
5847 srf->gid = crgetgid(curthread->t_cred);
5848 mutex_enter(&fp->f_tlock);
5849 fp->f_count++;
5850 mutex_exit(&fp->f_tlock);
5851
5852 put(qp, mp);
5853 out:
5854 releasestr(stp->sd_wrq);
5855 if (mate)
5856 releasestr(mate);
5857 return (retval);
5858 }
5859
5860 /*
5861 * Send an ioctl message downstream and wait for acknowledgement.
5862 * flags may be set to either U_TO_K or K_TO_K and a combination
5863 * of STR_NOERROR or STR_NOSIG
5864 * STR_NOSIG: Signals are essentially ignored or held and have
5865 * no effect for the duration of the call.
5866 * STR_NOERROR: Ignores stream head read, write and hup errors.
5867 * Additionally, if an existing ioctl times out, it is assumed
5868 * lost and and this ioctl will continue as if the previous ioctl had
5869 * finished. ETIME may be returned if this ioctl times out (i.e.
5870 * ic_timout is not INFTIM). Non-stream head errors may be returned if
5871 * the ioc_error indicates that the driver/module had problems,
5872 * an EFAULT was found when accessing user data, a lack of
5873 * resources, etc.
5874 */
5875 int
5876 strdoioctl(
5877 struct stdata *stp,
5878 struct strioctl *strioc,
5879 int fflags, /* file flags with model info */
5880 int flag,
5881 cred_t *crp,
5882 int *rvalp)
5883 {
5884 mblk_t *bp;
5885 struct iocblk *iocbp;
5886 struct copyreq *reqp;
5887 struct copyresp *resp;
5888 int id;
5889 int transparent = 0;
5890 int error = 0;
5891 int len = 0;
5892 caddr_t taddr;
5893 int copyflag = (flag & (U_TO_K | K_TO_K));
5894 int sigflag = (flag & STR_NOSIG);
5895 int errs;
5896 uint_t waitflags;
5897 boolean_t set_iocwaitne = B_FALSE;
5898
5899 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K);
5900 ASSERT((fflags & FMODELS) != 0);
5901
5902 TRACE_2(TR_FAC_STREAMS_FR,
5903 TR_STRDOIOCTL,
5904 "strdoioctl:stp %p strioc %p", stp, strioc);
5905 if (strioc->ic_len == TRANSPARENT) { /* send arg in M_DATA block */
5906 transparent = 1;
5907 strioc->ic_len = sizeof (intptr_t);
5908 }
5909
5910 if (strioc->ic_len < 0 || (strmsgsz > 0 && strioc->ic_len > strmsgsz))
5911 return (EINVAL);
5912
5913 if ((bp = allocb_cred_wait(sizeof (union ioctypes), sigflag, &error,
5914 crp, curproc->p_pid)) == NULL)
5915 return (error);
5916
5917 bzero(bp->b_wptr, sizeof (union ioctypes));
5918
5919 iocbp = (struct iocblk *)bp->b_wptr;
5920 iocbp->ioc_count = strioc->ic_len;
5921 iocbp->ioc_cmd = strioc->ic_cmd;
5922 iocbp->ioc_flag = (fflags & FMODELS);
5923
5924 crhold(crp);
5925 iocbp->ioc_cr = crp;
5926 DB_TYPE(bp) = M_IOCTL;
5927 bp->b_wptr += sizeof (struct iocblk);
5928
5929 if (flag & STR_NOERROR)
5930 errs = STPLEX;
5931 else
5932 errs = STRHUP|STRDERR|STWRERR|STPLEX;
5933
5934 /*
5935 * If there is data to copy into ioctl block, do so.
5936 */
5937 if (iocbp->ioc_count > 0) {
5938 if (transparent)
5939 /*
5940 * Note: STR_NOERROR does not have an effect
5941 * in putiocd()
5942 */
5943 id = K_TO_K | sigflag;
5944 else
5945 id = flag;
5946 if ((error = putiocd(bp, strioc->ic_dp, id, crp)) != 0) {
5947 freemsg(bp);
5948 crfree(crp);
5949 return (error);
5950 }
5951
5952 /*
5953 * We could have slept copying in user pages.
5954 * Recheck the stream head state (the other end
5955 * of a pipe could have gone away).
5956 */
5957 if (stp->sd_flag & errs) {
5958 mutex_enter(&stp->sd_lock);
5959 error = strgeterr(stp, errs, 0);
5960 mutex_exit(&stp->sd_lock);
5961 if (error != 0) {
5962 freemsg(bp);
5963 crfree(crp);
5964 return (error);
5965 }
5966 }
5967 }
5968 if (transparent)
5969 iocbp->ioc_count = TRANSPARENT;
5970
5971 /*
5972 * Block for up to STRTIMOUT milliseconds if there is an outstanding
5973 * ioctl for this stream already running. All processes
5974 * sleeping here will be awakened as a result of an ACK
5975 * or NAK being received for the outstanding ioctl, or
5976 * as a result of the timer expiring on the outstanding
5977 * ioctl (a failure), or as a result of any waiting
5978 * process's timer expiring (also a failure).
5979 */
5980
5981 error = 0;
5982 mutex_enter(&stp->sd_lock);
5983 while ((stp->sd_flag & IOCWAIT) ||
5984 (!set_iocwaitne && (stp->sd_flag & IOCWAITNE))) {
5985 clock_t cv_rval;
5986
5987 TRACE_0(TR_FAC_STREAMS_FR,
5988 TR_STRDOIOCTL_WAIT,
5989 "strdoioctl sleeps - IOCWAIT");
5990 cv_rval = str_cv_wait(&stp->sd_iocmonitor, &stp->sd_lock,
5991 STRTIMOUT, sigflag);
5992 if (cv_rval <= 0) {
5993 if (cv_rval == 0) {
5994 error = EINTR;
5995 } else {
5996 if (flag & STR_NOERROR) {
5997 /*
5998 * Terminating current ioctl in
5999 * progress -- assume it got lost and
6000 * wake up the other thread so that the
6001 * operation completes.
6002 */
6003 if (!(stp->sd_flag & IOCWAITNE)) {
6004 set_iocwaitne = B_TRUE;
6005 stp->sd_flag |= IOCWAITNE;
6006 cv_broadcast(&stp->sd_monitor);
6007 }
6008 /*
6009 * Otherwise, there's a running
6010 * STR_NOERROR -- we have no choice
6011 * here but to wait forever (or until
6012 * interrupted).
6013 */
6014 } else {
6015 /*
6016 * pending ioctl has caused
6017 * us to time out
6018 */
6019 error = ETIME;
6020 }
6021 }
6022 } else if ((stp->sd_flag & errs)) {
6023 error = strgeterr(stp, errs, 0);
6024 }
6025 if (error) {
6026 mutex_exit(&stp->sd_lock);
6027 freemsg(bp);
6028 crfree(crp);
6029 return (error);
6030 }
6031 }
6032
6033 /*
6034 * Have control of ioctl mechanism.
6035 * Send down ioctl packet and wait for response.
6036 */
6037 if (stp->sd_iocblk != (mblk_t *)-1) {
6038 freemsg(stp->sd_iocblk);
6039 }
6040 stp->sd_iocblk = NULL;
6041
6042 /*
6043 * If this is marked with 'noerror' (internal; mostly
6044 * I_{P,}{UN,}LINK), then make sure nobody else is able to get
6045 * in here by setting IOCWAITNE.
6046 */
6047 waitflags = IOCWAIT;
6048 if (flag & STR_NOERROR)
6049 waitflags |= IOCWAITNE;
6050
6051 stp->sd_flag |= waitflags;
6052
6053 /*
6054 * Assign sequence number.
6055 */
6056 iocbp->ioc_id = stp->sd_iocid = getiocseqno();
6057
6058 mutex_exit(&stp->sd_lock);
6059
6060 TRACE_1(TR_FAC_STREAMS_FR,
6061 TR_STRDOIOCTL_PUT, "strdoioctl put: stp %p", stp);
6062 stream_willservice(stp);
6063 putnext(stp->sd_wrq, bp);
6064 stream_runservice(stp);
6065
6066 /*
6067 * Timed wait for acknowledgment. The wait time is limited by the
6068 * timeout value, which must be a positive integer (number of
6069 * milliseconds) to wait, or 0 (use default value of STRTIMOUT
6070 * milliseconds), or -1 (wait forever). This will be awakened
6071 * either by an ACK/NAK message arriving, the timer expiring, or
6072 * the timer expiring on another ioctl waiting for control of the
6073 * mechanism.
6074 */
6075 waitioc:
6076 mutex_enter(&stp->sd_lock);
6077
6078
6079 /*
6080 * If the reply has already arrived, don't sleep. If awakened from
6081 * the sleep, fail only if the reply has not arrived by then.
6082 * Otherwise, process the reply.
6083 */
6084 while (!stp->sd_iocblk) {
6085 clock_t cv_rval;
6086
6087 if (stp->sd_flag & errs) {
6088 error = strgeterr(stp, errs, 0);
6089 if (error != 0) {
6090 stp->sd_flag &= ~waitflags;
6091 cv_broadcast(&stp->sd_iocmonitor);
6092 mutex_exit(&stp->sd_lock);
6093 crfree(crp);
6094 return (error);
6095 }
6096 }
6097
6098 TRACE_0(TR_FAC_STREAMS_FR,
6099 TR_STRDOIOCTL_WAIT2,
6100 "strdoioctl sleeps awaiting reply");
6101 ASSERT(error == 0);
6102
6103 cv_rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock,
6104 (strioc->ic_timout ?
6105 strioc->ic_timout * 1000 : STRTIMOUT), sigflag);
6106
6107 /*
6108 * There are four possible cases here: interrupt, timeout,
6109 * wakeup by IOCWAITNE (above), or wakeup by strrput_nondata (a
6110 * valid M_IOCTL reply).
6111 *
6112 * If we've been awakened by a STR_NOERROR ioctl on some other
6113 * thread, then sd_iocblk will still be NULL, and IOCWAITNE
6114 * will be set. Pretend as if we just timed out. Note that
6115 * this other thread waited at least STRTIMOUT before trying to
6116 * awaken our thread, so this is indistinguishable (even for
6117 * INFTIM) from the case where we failed with ETIME waiting on
6118 * IOCWAIT in the prior loop.
6119 */
6120 if (cv_rval > 0 && !(flag & STR_NOERROR) &&
6121 stp->sd_iocblk == NULL && (stp->sd_flag & IOCWAITNE)) {
6122 cv_rval = -1;
6123 }
6124
6125 /*
6126 * note: STR_NOERROR does not protect
6127 * us here.. use ic_timout < 0
6128 */
6129 if (cv_rval <= 0) {
6130 if (cv_rval == 0) {
6131 error = EINTR;
6132 } else {
6133 error = ETIME;
6134 }
6135 /*
6136 * A message could have come in after we were scheduled
6137 * but before we were actually run.
6138 */
6139 bp = stp->sd_iocblk;
6140 stp->sd_iocblk = NULL;
6141 if (bp != NULL) {
6142 if ((bp->b_datap->db_type == M_COPYIN) ||
6143 (bp->b_datap->db_type == M_COPYOUT)) {
6144 mutex_exit(&stp->sd_lock);
6145 if (bp->b_cont) {
6146 freemsg(bp->b_cont);
6147 bp->b_cont = NULL;
6148 }
6149 bp->b_datap->db_type = M_IOCDATA;
6150 bp->b_wptr = bp->b_rptr +
6151 sizeof (struct copyresp);
6152 resp = (struct copyresp *)bp->b_rptr;
6153 resp->cp_rval =
6154 (caddr_t)1; /* failure */
6155 stream_willservice(stp);
6156 putnext(stp->sd_wrq, bp);
6157 stream_runservice(stp);
6158 mutex_enter(&stp->sd_lock);
6159 } else {
6160 freemsg(bp);
6161 }
6162 }
6163 stp->sd_flag &= ~waitflags;
6164 cv_broadcast(&stp->sd_iocmonitor);
6165 mutex_exit(&stp->sd_lock);
6166 crfree(crp);
6167 return (error);
6168 }
6169 }
6170 bp = stp->sd_iocblk;
6171 /*
6172 * Note: it is strictly impossible to get here with sd_iocblk set to
6173 * -1. This is because the initial loop above doesn't allow any new
6174 * ioctls into the fray until all others have passed this point.
6175 */
6176 ASSERT(bp != NULL && bp != (mblk_t *)-1);
6177 TRACE_1(TR_FAC_STREAMS_FR,
6178 TR_STRDOIOCTL_ACK, "strdoioctl got reply: bp %p", bp);
6179 if ((bp->b_datap->db_type == M_IOCACK) ||
6180 (bp->b_datap->db_type == M_IOCNAK)) {
6181 /* for detection of duplicate ioctl replies */
6182 stp->sd_iocblk = (mblk_t *)-1;
6183 stp->sd_flag &= ~waitflags;
6184 cv_broadcast(&stp->sd_iocmonitor);
6185 mutex_exit(&stp->sd_lock);
6186 } else {
6187 /*
6188 * flags not cleared here because we're still doing
6189 * copy in/out for ioctl.
6190 */
6191 stp->sd_iocblk = NULL;
6192 mutex_exit(&stp->sd_lock);
6193 }
6194
6195
6196 /*
6197 * Have received acknowledgment.
6198 */
6199
6200 switch (bp->b_datap->db_type) {
6201 case M_IOCACK:
6202 /*
6203 * Positive ack.
6204 */
6205 iocbp = (struct iocblk *)bp->b_rptr;
6206
6207 /*
6208 * Set error if indicated.
6209 */
6210 if (iocbp->ioc_error) {
6211 error = iocbp->ioc_error;
6212 break;
6213 }
6214
6215 /*
6216 * Set return value.
6217 */
6218 *rvalp = iocbp->ioc_rval;
6219
6220 /*
6221 * Data may have been returned in ACK message (ioc_count > 0).
6222 * If so, copy it out to the user's buffer.
6223 */
6224 if (iocbp->ioc_count && !transparent) {
6225 if (error = getiocd(bp, strioc->ic_dp, copyflag))
6226 break;
6227 }
6228 if (!transparent) {
6229 if (len) /* an M_COPYOUT was used with I_STR */
6230 strioc->ic_len = len;
6231 else
6232 strioc->ic_len = (int)iocbp->ioc_count;
6233 }
6234 break;
6235
6236 case M_IOCNAK:
6237 /*
6238 * Negative ack.
6239 *
6240 * The only thing to do is set error as specified
6241 * in neg ack packet.
6242 */
6243 iocbp = (struct iocblk *)bp->b_rptr;
6244
6245 error = (iocbp->ioc_error ? iocbp->ioc_error : EINVAL);
6246 break;
6247
6248 case M_COPYIN:
6249 /*
6250 * Driver or module has requested user ioctl data.
6251 */
6252 reqp = (struct copyreq *)bp->b_rptr;
6253
6254 /*
6255 * M_COPYIN should *never* have a message attached, though
6256 * it's harmless if it does -- thus, panic on a DEBUG
6257 * kernel and just free it on a non-DEBUG build.
6258 */
6259 ASSERT(bp->b_cont == NULL);
6260 if (bp->b_cont != NULL) {
6261 freemsg(bp->b_cont);
6262 bp->b_cont = NULL;
6263 }
6264
6265 error = putiocd(bp, reqp->cq_addr, flag, crp);
6266 if (error && bp->b_cont) {
6267 freemsg(bp->b_cont);
6268 bp->b_cont = NULL;
6269 }
6270
6271 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
6272 bp->b_datap->db_type = M_IOCDATA;
6273
6274 mblk_setcred(bp, crp, curproc->p_pid);
6275 resp = (struct copyresp *)bp->b_rptr;
6276 resp->cp_rval = (caddr_t)(uintptr_t)error;
6277 resp->cp_flag = (fflags & FMODELS);
6278
6279 stream_willservice(stp);
6280 putnext(stp->sd_wrq, bp);
6281 stream_runservice(stp);
6282
6283 if (error) {
6284 mutex_enter(&stp->sd_lock);
6285 stp->sd_flag &= ~waitflags;
6286 cv_broadcast(&stp->sd_iocmonitor);
6287 mutex_exit(&stp->sd_lock);
6288 crfree(crp);
6289 return (error);
6290 }
6291
6292 goto waitioc;
6293
6294 case M_COPYOUT:
6295 /*
6296 * Driver or module has ioctl data for a user.
6297 */
6298 reqp = (struct copyreq *)bp->b_rptr;
6299 ASSERT(bp->b_cont != NULL);
6300
6301 /*
6302 * Always (transparent or non-transparent )
6303 * use the address specified in the request
6304 */
6305 taddr = reqp->cq_addr;
6306 if (!transparent)
6307 len = (int)reqp->cq_size;
6308
6309 /* copyout data to the provided address */
6310 error = getiocd(bp, taddr, copyflag);
6311
6312 freemsg(bp->b_cont);
6313 bp->b_cont = NULL;
6314
6315 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
6316 bp->b_datap->db_type = M_IOCDATA;
6317
6318 mblk_setcred(bp, crp, curproc->p_pid);
6319 resp = (struct copyresp *)bp->b_rptr;
6320 resp->cp_rval = (caddr_t)(uintptr_t)error;
6321 resp->cp_flag = (fflags & FMODELS);
6322
6323 stream_willservice(stp);
6324 putnext(stp->sd_wrq, bp);
6325 stream_runservice(stp);
6326
6327 if (error) {
6328 mutex_enter(&stp->sd_lock);
6329 stp->sd_flag &= ~waitflags;
6330 cv_broadcast(&stp->sd_iocmonitor);
6331 mutex_exit(&stp->sd_lock);
6332 crfree(crp);
6333 return (error);
6334 }
6335 goto waitioc;
6336
6337 default:
6338 ASSERT(0);
6339 mutex_enter(&stp->sd_lock);
6340 stp->sd_flag &= ~waitflags;
6341 cv_broadcast(&stp->sd_iocmonitor);
6342 mutex_exit(&stp->sd_lock);
6343 break;
6344 }
6345
6346 freemsg(bp);
6347 crfree(crp);
6348 return (error);
6349 }
6350
6351 /*
6352 * Send an M_CMD message downstream and wait for a reply. This is a ptools
6353 * special used to retrieve information from modules/drivers a stream without
6354 * being subjected to flow control or interfering with pending messages on the
6355 * stream (e.g. an ioctl in flight).
6356 */
6357 int
6358 strdocmd(struct stdata *stp, struct strcmd *scp, cred_t *crp)
6359 {
6360 mblk_t *mp;
6361 struct cmdblk *cmdp;
6362 int error = 0;
6363 int errs = STRHUP|STRDERR|STWRERR|STPLEX;
6364 clock_t rval, timeout = STRTIMOUT;
6365
6366 if (scp->sc_len < 0 || scp->sc_len > sizeof (scp->sc_buf) ||
6367 scp->sc_timeout < -1)
6368 return (EINVAL);
6369
6370 if (scp->sc_timeout > 0)
6371 timeout = scp->sc_timeout * MILLISEC;
6372
6373 if ((mp = allocb_cred(sizeof (struct cmdblk), crp,
6374 curproc->p_pid)) == NULL)
6375 return (ENOMEM);
6376
6377 crhold(crp);
6378
6379 cmdp = (struct cmdblk *)mp->b_wptr;
6380 cmdp->cb_cr = crp;
6381 cmdp->cb_cmd = scp->sc_cmd;
6382 cmdp->cb_len = scp->sc_len;
6383 cmdp->cb_error = 0;
6384 mp->b_wptr += sizeof (struct cmdblk);
6385
6386 DB_TYPE(mp) = M_CMD;
6387 DB_CPID(mp) = curproc->p_pid;
6388
6389 /*
6390 * Copy in the payload.
6391 */
6392 if (cmdp->cb_len > 0) {
6393 mp->b_cont = allocb_cred(sizeof (scp->sc_buf), crp,
6394 curproc->p_pid);
6395 if (mp->b_cont == NULL) {
6396 error = ENOMEM;
6397 goto out;
6398 }
6399
6400 /* cb_len comes from sc_len, which has already been checked */
6401 ASSERT(cmdp->cb_len <= sizeof (scp->sc_buf));
6402 (void) bcopy(scp->sc_buf, mp->b_cont->b_wptr, cmdp->cb_len);
6403 mp->b_cont->b_wptr += cmdp->cb_len;
6404 DB_CPID(mp->b_cont) = curproc->p_pid;
6405 }
6406
6407 /*
6408 * Since this mechanism is strictly for ptools, and since only one
6409 * process can be grabbed at a time, we simply fail if there's
6410 * currently an operation pending.
6411 */
6412 mutex_enter(&stp->sd_lock);
6413 if (stp->sd_flag & STRCMDWAIT) {
6414 mutex_exit(&stp->sd_lock);
6415 error = EBUSY;
6416 goto out;
6417 }
6418 stp->sd_flag |= STRCMDWAIT;
6419 ASSERT(stp->sd_cmdblk == NULL);
6420 mutex_exit(&stp->sd_lock);
6421
6422 putnext(stp->sd_wrq, mp);
6423 mp = NULL;
6424
6425 /*
6426 * Timed wait for acknowledgment. If the reply has already arrived,
6427 * don't sleep. If awakened from the sleep, fail only if the reply
6428 * has not arrived by then. Otherwise, process the reply.
6429 */
6430 mutex_enter(&stp->sd_lock);
6431 while (stp->sd_cmdblk == NULL) {
6432 if (stp->sd_flag & errs) {
6433 if ((error = strgeterr(stp, errs, 0)) != 0)
6434 goto waitout;
6435 }
6436
6437 rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, timeout, 0);
6438 if (stp->sd_cmdblk != NULL)
6439 break;
6440
6441 if (rval <= 0) {
6442 error = (rval == 0) ? EINTR : ETIME;
6443 goto waitout;
6444 }
6445 }
6446
6447 /*
6448 * We received a reply.
6449 */
6450 mp = stp->sd_cmdblk;
6451 stp->sd_cmdblk = NULL;
6452 ASSERT(mp != NULL && DB_TYPE(mp) == M_CMD);
6453 ASSERT(stp->sd_flag & STRCMDWAIT);
6454 stp->sd_flag &= ~STRCMDWAIT;
6455 mutex_exit(&stp->sd_lock);
6456
6457 cmdp = (struct cmdblk *)mp->b_rptr;
6458 if ((error = cmdp->cb_error) != 0)
6459 goto out;
6460
6461 /*
6462 * Data may have been returned in the reply (cb_len > 0).
6463 * If so, copy it out to the user's buffer.
6464 */
6465 if (cmdp->cb_len > 0) {
6466 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < cmdp->cb_len) {
6467 error = EPROTO;
6468 goto out;
6469 }
6470
6471 cmdp->cb_len = MIN(cmdp->cb_len, sizeof (scp->sc_buf));
6472 (void) bcopy(mp->b_cont->b_rptr, scp->sc_buf, cmdp->cb_len);
6473 }
6474 scp->sc_len = cmdp->cb_len;
6475 out:
6476 freemsg(mp);
6477 crfree(crp);
6478 return (error);
6479 waitout:
6480 ASSERT(stp->sd_cmdblk == NULL);
6481 stp->sd_flag &= ~STRCMDWAIT;
6482 mutex_exit(&stp->sd_lock);
6483 crfree(crp);
6484 return (error);
6485 }
6486
6487 /*
6488 * For the SunOS keyboard driver.
6489 * Return the next available "ioctl" sequence number.
6490 * Exported, so that streams modules can send "ioctl" messages
6491 * downstream from their open routine.
6492 */
6493 int
6494 getiocseqno(void)
6495 {
6496 int i;
6497
6498 mutex_enter(&strresources);
6499 i = ++ioc_id;
6500 mutex_exit(&strresources);
6501 return (i);
6502 }
6503
6504 /*
6505 * Get the next message from the read queue. If the message is
6506 * priority, STRPRI will have been set by strrput(). This flag
6507 * should be reset only when the entire message at the front of the
6508 * queue as been consumed.
6509 *
6510 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common.
6511 */
6512 int
6513 strgetmsg(
6514 struct vnode *vp,
6515 struct strbuf *mctl,
6516 struct strbuf *mdata,
6517 unsigned char *prip,
6518 int *flagsp,
6519 int fmode,
6520 rval_t *rvp)
6521 {
6522 struct stdata *stp;
6523 mblk_t *bp, *nbp;
6524 mblk_t *savemp = NULL;
6525 mblk_t *savemptail = NULL;
6526 uint_t old_sd_flag;
6527 int flg;
6528 int more = 0;
6529 int error = 0;
6530 char first = 1;
6531 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
6532 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
6533 unsigned char pri = 0;
6534 queue_t *q;
6535 int pr = 0; /* Partial read successful */
6536 struct uio uios;
6537 struct uio *uiop = &uios;
6538 struct iovec iovs;
6539 unsigned char type;
6540
6541 TRACE_1(TR_FAC_STREAMS_FR, TR_STRGETMSG_ENTER,
6542 "strgetmsg:%p", vp);
6543
6544 ASSERT(vp->v_stream);
6545 stp = vp->v_stream;
6546 rvp->r_val1 = 0;
6547
6548 mutex_enter(&stp->sd_lock);
6549
6550 if ((error = i_straccess(stp, JCREAD)) != 0) {
6551 mutex_exit(&stp->sd_lock);
6552 return (error);
6553 }
6554
6555 if (stp->sd_flag & (STRDERR|STPLEX)) {
6556 error = strgeterr(stp, STRDERR|STPLEX, 0);
6557 if (error != 0) {
6558 mutex_exit(&stp->sd_lock);
6559 return (error);
6560 }
6561 }
6562 mutex_exit(&stp->sd_lock);
6563
6564 switch (*flagsp) {
6565 case MSG_HIPRI:
6566 if (*prip != 0)
6567 return (EINVAL);
6568 break;
6569
6570 case MSG_ANY:
6571 case MSG_BAND:
6572 break;
6573
6574 default:
6575 return (EINVAL);
6576 }
6577 /*
6578 * Setup uio and iov for data part
6579 */
6580 iovs.iov_base = mdata->buf;
6581 iovs.iov_len = mdata->maxlen;
6582 uios.uio_iov = &iovs;
6583 uios.uio_iovcnt = 1;
6584 uios.uio_loffset = 0;
6585 uios.uio_segflg = UIO_USERSPACE;
6586 uios.uio_fmode = 0;
6587 uios.uio_extflg = UIO_COPY_CACHED;
6588 uios.uio_resid = mdata->maxlen;
6589 uios.uio_offset = 0;
6590
6591 q = _RD(stp->sd_wrq);
6592 mutex_enter(&stp->sd_lock);
6593 old_sd_flag = stp->sd_flag;
6594 mark = 0;
6595 for (;;) {
6596 int done = 0;
6597 mblk_t *q_first = q->q_first;
6598
6599 /*
6600 * Get the next message of appropriate priority
6601 * from the stream head. If the caller is interested
6602 * in band or hipri messages, then they should already
6603 * be enqueued at the stream head. On the other hand
6604 * if the caller wants normal (band 0) messages, they
6605 * might be deferred in a synchronous stream and they
6606 * will need to be pulled up.
6607 *
6608 * After we have dequeued a message, we might find that
6609 * it was a deferred M_SIG that was enqueued at the
6610 * stream head. It must now be posted as part of the
6611 * read by calling strsignal_nolock().
6612 *
6613 * Also note that strrput does not enqueue an M_PCSIG,
6614 * and there cannot be more than one hipri message,
6615 * so there was no need to have the M_PCSIG case.
6616 *
6617 * At some time it might be nice to try and wrap the
6618 * functionality of kstrgetmsg() and strgetmsg() into
6619 * a common routine so to reduce the amount of replicated
6620 * code (since they are extremely similar).
6621 */
6622 if (!(*flagsp & (MSG_HIPRI|MSG_BAND))) {
6623 /* Asking for normal, band0 data */
6624 bp = strget(stp, q, uiop, first, &error);
6625 ASSERT(MUTEX_HELD(&stp->sd_lock));
6626 if (bp != NULL) {
6627 if (DB_TYPE(bp) == M_SIG) {
6628 strsignal_nolock(stp, *bp->b_rptr,
6629 bp->b_band);
6630 freemsg(bp);
6631 continue;
6632 } else {
6633 break;
6634 }
6635 }
6636 if (error != 0)
6637 goto getmout;
6638
6639 /*
6640 * We can't depend on the value of STRPRI here because
6641 * the stream head may be in transit. Therefore, we
6642 * must look at the type of the first message to
6643 * determine if a high priority messages is waiting
6644 */
6645 } else if ((*flagsp & MSG_HIPRI) && q_first != NULL &&
6646 DB_TYPE(q_first) >= QPCTL &&
6647 (bp = getq_noenab(q, 0)) != NULL) {
6648 /* Asked for HIPRI and got one */
6649 ASSERT(DB_TYPE(bp) >= QPCTL);
6650 break;
6651 } else if ((*flagsp & MSG_BAND) && q_first != NULL &&
6652 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) &&
6653 (bp = getq_noenab(q, 0)) != NULL) {
6654 /*
6655 * Asked for at least band "prip" and got either at
6656 * least that band or a hipri message.
6657 */
6658 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL);
6659 if (DB_TYPE(bp) == M_SIG) {
6660 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
6661 freemsg(bp);
6662 continue;
6663 } else {
6664 break;
6665 }
6666 }
6667
6668 /* No data. Time to sleep? */
6669 qbackenable(q, 0);
6670
6671 /*
6672 * If STRHUP or STREOF, return 0 length control and data.
6673 * If resid is 0, then a read(fd,buf,0) was done. Do not
6674 * sleep to satisfy this request because by default we have
6675 * zero bytes to return.
6676 */
6677 if ((stp->sd_flag & (STRHUP|STREOF)) || (mctl->maxlen == 0 &&
6678 mdata->maxlen == 0)) {
6679 mctl->len = mdata->len = 0;
6680 *flagsp = 0;
6681 mutex_exit(&stp->sd_lock);
6682 return (0);
6683 }
6684 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_WAIT,
6685 "strgetmsg calls strwaitq:%p, %p",
6686 vp, uiop);
6687 if (((error = strwaitq(stp, GETWAIT, (ssize_t)0, fmode, -1,
6688 &done)) != 0) || done) {
6689 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_DONE,
6690 "strgetmsg error or done:%p, %p",
6691 vp, uiop);
6692 mutex_exit(&stp->sd_lock);
6693 return (error);
6694 }
6695 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_AWAKE,
6696 "strgetmsg awakes:%p, %p", vp, uiop);
6697 if ((error = i_straccess(stp, JCREAD)) != 0) {
6698 mutex_exit(&stp->sd_lock);
6699 return (error);
6700 }
6701 first = 0;
6702 }
6703 ASSERT(bp != NULL);
6704 /*
6705 * Extract any mark information. If the message is not completely
6706 * consumed this information will be put in the mblk
6707 * that is putback.
6708 * If MSGMARKNEXT is set and the message is completely consumed
6709 * the STRATMARK flag will be set below. Likewise, if
6710 * MSGNOTMARKNEXT is set and the message is
6711 * completely consumed STRNOTATMARK will be set.
6712 */
6713 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
6714 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
6715 (MSGMARKNEXT|MSGNOTMARKNEXT));
6716 if (mark != 0 && bp == stp->sd_mark) {
6717 mark |= _LASTMARK;
6718 stp->sd_mark = NULL;
6719 }
6720 /*
6721 * keep track of the original message type and priority
6722 */
6723 pri = bp->b_band;
6724 type = bp->b_datap->db_type;
6725 if (type == M_PASSFP) {
6726 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
6727 stp->sd_mark = bp;
6728 bp->b_flag |= mark & ~_LASTMARK;
6729 putback(stp, q, bp, pri);
6730 qbackenable(q, pri);
6731 mutex_exit(&stp->sd_lock);
6732 return (EBADMSG);
6733 }
6734 ASSERT(type != M_SIG);
6735
6736 /*
6737 * Set this flag so strrput will not generate signals. Need to
6738 * make sure this flag is cleared before leaving this routine
6739 * else signals will stop being sent.
6740 */
6741 stp->sd_flag |= STRGETINPROG;
6742 mutex_exit(&stp->sd_lock);
6743
6744 if (STREAM_NEEDSERVICE(stp))
6745 stream_runservice(stp);
6746
6747 /*
6748 * Set HIPRI flag if message is priority.
6749 */
6750 if (type >= QPCTL)
6751 flg = MSG_HIPRI;
6752 else
6753 flg = MSG_BAND;
6754
6755 /*
6756 * First process PROTO or PCPROTO blocks, if any.
6757 */
6758 if (mctl->maxlen >= 0 && type != M_DATA) {
6759 size_t n, bcnt;
6760 char *ubuf;
6761
6762 bcnt = mctl->maxlen;
6763 ubuf = mctl->buf;
6764 while (bp != NULL && bp->b_datap->db_type != M_DATA) {
6765 if ((n = MIN(bcnt, bp->b_wptr - bp->b_rptr)) != 0 &&
6766 copyout(bp->b_rptr, ubuf, n)) {
6767 error = EFAULT;
6768 mutex_enter(&stp->sd_lock);
6769 /*
6770 * clear stream head pri flag based on
6771 * first message type
6772 */
6773 if (type >= QPCTL) {
6774 ASSERT(type == M_PCPROTO);
6775 stp->sd_flag &= ~STRPRI;
6776 }
6777 more = 0;
6778 freemsg(bp);
6779 goto getmout;
6780 }
6781 ubuf += n;
6782 bp->b_rptr += n;
6783 if (bp->b_rptr >= bp->b_wptr) {
6784 nbp = bp;
6785 bp = bp->b_cont;
6786 freeb(nbp);
6787 }
6788 ASSERT(n <= bcnt);
6789 bcnt -= n;
6790 if (bcnt == 0)
6791 break;
6792 }
6793 mctl->len = mctl->maxlen - bcnt;
6794 } else
6795 mctl->len = -1;
6796
6797 if (bp && bp->b_datap->db_type != M_DATA) {
6798 /*
6799 * More PROTO blocks in msg.
6800 */
6801 more |= MORECTL;
6802 savemp = bp;
6803 while (bp && bp->b_datap->db_type != M_DATA) {
6804 savemptail = bp;
6805 bp = bp->b_cont;
6806 }
6807 savemptail->b_cont = NULL;
6808 }
6809
6810 /*
6811 * Now process DATA blocks, if any.
6812 */
6813 if (mdata->maxlen >= 0 && bp) {
6814 /*
6815 * struiocopyout will consume a potential zero-length
6816 * M_DATA even if uio_resid is zero.
6817 */
6818 size_t oldresid = uiop->uio_resid;
6819
6820 bp = struiocopyout(bp, uiop, &error);
6821 if (error != 0) {
6822 mutex_enter(&stp->sd_lock);
6823 /*
6824 * clear stream head hi pri flag based on
6825 * first message
6826 */
6827 if (type >= QPCTL) {
6828 ASSERT(type == M_PCPROTO);
6829 stp->sd_flag &= ~STRPRI;
6830 }
6831 more = 0;
6832 freemsg(savemp);
6833 goto getmout;
6834 }
6835 /*
6836 * (pr == 1) indicates a partial read.
6837 */
6838 if (oldresid > uiop->uio_resid)
6839 pr = 1;
6840 mdata->len = mdata->maxlen - uiop->uio_resid;
6841 } else
6842 mdata->len = -1;
6843
6844 if (bp) { /* more data blocks in msg */
6845 more |= MOREDATA;
6846 if (savemp)
6847 savemptail->b_cont = bp;
6848 else
6849 savemp = bp;
6850 }
6851
6852 mutex_enter(&stp->sd_lock);
6853 if (savemp) {
6854 if (pr && (savemp->b_datap->db_type == M_DATA) &&
6855 msgnodata(savemp)) {
6856 /*
6857 * Avoid queuing a zero-length tail part of
6858 * a message. pr=1 indicates that we read some of
6859 * the message.
6860 */
6861 freemsg(savemp);
6862 more &= ~MOREDATA;
6863 /*
6864 * clear stream head hi pri flag based on
6865 * first message
6866 */
6867 if (type >= QPCTL) {
6868 ASSERT(type == M_PCPROTO);
6869 stp->sd_flag &= ~STRPRI;
6870 }
6871 } else {
6872 savemp->b_band = pri;
6873 /*
6874 * If the first message was HIPRI and the one we're
6875 * putting back isn't, then clear STRPRI, otherwise
6876 * set STRPRI again. Note that we must set STRPRI
6877 * again since the flush logic in strrput_nondata()
6878 * may have cleared it while we had sd_lock dropped.
6879 */
6880 if (type >= QPCTL) {
6881 ASSERT(type == M_PCPROTO);
6882 if (queclass(savemp) < QPCTL)
6883 stp->sd_flag &= ~STRPRI;
6884 else
6885 stp->sd_flag |= STRPRI;
6886 } else if (queclass(savemp) >= QPCTL) {
6887 /*
6888 * The first message was not a HIPRI message,
6889 * but the one we are about to putback is.
6890 * For simplicitly, we do not allow for HIPRI
6891 * messages to be embedded in the message
6892 * body, so just force it to same type as
6893 * first message.
6894 */
6895 ASSERT(type == M_DATA || type == M_PROTO);
6896 ASSERT(savemp->b_datap->db_type == M_PCPROTO);
6897 savemp->b_datap->db_type = type;
6898 }
6899 if (mark != 0) {
6900 savemp->b_flag |= mark & ~_LASTMARK;
6901 if ((mark & _LASTMARK) &&
6902 (stp->sd_mark == NULL)) {
6903 /*
6904 * If another marked message arrived
6905 * while sd_lock was not held sd_mark
6906 * would be non-NULL.
6907 */
6908 stp->sd_mark = savemp;
6909 }
6910 }
6911 putback(stp, q, savemp, pri);
6912 }
6913 } else {
6914 /*
6915 * The complete message was consumed.
6916 *
6917 * If another M_PCPROTO arrived while sd_lock was not held
6918 * it would have been discarded since STRPRI was still set.
6919 *
6920 * Move the MSG*MARKNEXT information
6921 * to the stream head just in case
6922 * the read queue becomes empty.
6923 * clear stream head hi pri flag based on
6924 * first message
6925 *
6926 * If the stream head was at the mark
6927 * (STRATMARK) before we dropped sd_lock above
6928 * and some data was consumed then we have
6929 * moved past the mark thus STRATMARK is
6930 * cleared. However, if a message arrived in
6931 * strrput during the copyout above causing
6932 * STRATMARK to be set we can not clear that
6933 * flag.
6934 */
6935 if (type >= QPCTL) {
6936 ASSERT(type == M_PCPROTO);
6937 stp->sd_flag &= ~STRPRI;
6938 }
6939 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
6940 if (mark & MSGMARKNEXT) {
6941 stp->sd_flag &= ~STRNOTATMARK;
6942 stp->sd_flag |= STRATMARK;
6943 } else if (mark & MSGNOTMARKNEXT) {
6944 stp->sd_flag &= ~STRATMARK;
6945 stp->sd_flag |= STRNOTATMARK;
6946 } else {
6947 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK);
6948 }
6949 } else if (pr && (old_sd_flag & STRATMARK)) {
6950 stp->sd_flag &= ~STRATMARK;
6951 }
6952 }
6953
6954 *flagsp = flg;
6955 *prip = pri;
6956
6957 /*
6958 * Getmsg cleanup processing - if the state of the queue has changed
6959 * some signals may need to be sent and/or poll awakened.
6960 */
6961 getmout:
6962 qbackenable(q, pri);
6963
6964 /*
6965 * We dropped the stream head lock above. Send all M_SIG messages
6966 * before processing stream head for SIGPOLL messages.
6967 */
6968 ASSERT(MUTEX_HELD(&stp->sd_lock));
6969 while ((bp = q->q_first) != NULL &&
6970 (bp->b_datap->db_type == M_SIG)) {
6971 /*
6972 * sd_lock is held so the content of the read queue can not
6973 * change.
6974 */
6975 bp = getq(q);
6976 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG);
6977
6978 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
6979 mutex_exit(&stp->sd_lock);
6980 freemsg(bp);
6981 if (STREAM_NEEDSERVICE(stp))
6982 stream_runservice(stp);
6983 mutex_enter(&stp->sd_lock);
6984 }
6985
6986 /*
6987 * stream head cannot change while we make the determination
6988 * whether or not to send a signal. Drop the flag to allow strrput
6989 * to send firstmsgsigs again.
6990 */
6991 stp->sd_flag &= ~STRGETINPROG;
6992
6993 /*
6994 * If the type of message at the front of the queue changed
6995 * due to the receive the appropriate signals and pollwakeup events
6996 * are generated. The type of changes are:
6997 * Processed a hipri message, q_first is not hipri.
6998 * Processed a band X message, and q_first is band Y.
6999 * The generated signals and pollwakeups are identical to what
7000 * strrput() generates should the message that is now on q_first
7001 * arrive to an empty read queue.
7002 *
7003 * Note: only strrput will send a signal for a hipri message.
7004 */
7005 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) {
7006 strsigset_t signals = 0;
7007 strpollset_t pollwakeups = 0;
7008
7009 if (flg & MSG_HIPRI) {
7010 /*
7011 * Removed a hipri message. Regular data at
7012 * the front of the queue.
7013 */
7014 if (bp->b_band == 0) {
7015 signals = S_INPUT | S_RDNORM;
7016 pollwakeups = POLLIN | POLLRDNORM;
7017 } else {
7018 signals = S_INPUT | S_RDBAND;
7019 pollwakeups = POLLIN | POLLRDBAND;
7020 }
7021 } else if (pri != bp->b_band) {
7022 /*
7023 * The band is different for the new q_first.
7024 */
7025 if (bp->b_band == 0) {
7026 signals = S_RDNORM;
7027 pollwakeups = POLLIN | POLLRDNORM;
7028 } else {
7029 signals = S_RDBAND;
7030 pollwakeups = POLLIN | POLLRDBAND;
7031 }
7032 }
7033
7034 if (pollwakeups != 0) {
7035 if (pollwakeups == (POLLIN | POLLRDNORM)) {
7036 if (!(stp->sd_rput_opt & SR_POLLIN))
7037 goto no_pollwake;
7038 stp->sd_rput_opt &= ~SR_POLLIN;
7039 }
7040 mutex_exit(&stp->sd_lock);
7041 pollwakeup(&stp->sd_pollist, pollwakeups);
7042 mutex_enter(&stp->sd_lock);
7043 }
7044 no_pollwake:
7045
7046 if (stp->sd_sigflags & signals)
7047 strsendsig(stp->sd_siglist, signals, bp->b_band, 0);
7048 }
7049 mutex_exit(&stp->sd_lock);
7050
7051 rvp->r_val1 = more;
7052 return (error);
7053 #undef _LASTMARK
7054 }
7055
7056 /*
7057 * Get the next message from the read queue. If the message is
7058 * priority, STRPRI will have been set by strrput(). This flag
7059 * should be reset only when the entire message at the front of the
7060 * queue as been consumed.
7061 *
7062 * If uiop is NULL all data is returned in mctlp.
7063 * Note that a NULL uiop implies that FNDELAY and FNONBLOCK are assumed
7064 * not enabled.
7065 * The timeout parameter is in milliseconds; -1 for infinity.
7066 * This routine handles the consolidation private flags:
7067 * MSG_IGNERROR Ignore any stream head error except STPLEX.
7068 * MSG_DELAYERROR Defer the error check until the queue is empty.
7069 * MSG_HOLDSIG Hold signals while waiting for data.
7070 * MSG_IPEEK Only peek at messages.
7071 * MSG_DISCARDTAIL Discard the tail M_DATA part of the message
7072 * that doesn't fit.
7073 * MSG_NOMARK If the message is marked leave it on the queue.
7074 *
7075 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common.
7076 */
7077 int
7078 kstrgetmsg(
7079 struct vnode *vp,
7080 mblk_t **mctlp,
7081 struct uio *uiop,
7082 unsigned char *prip,
7083 int *flagsp,
7084 clock_t timout,
7085 rval_t *rvp)
7086 {
7087 struct stdata *stp;
7088 mblk_t *bp, *nbp;
7089 mblk_t *savemp = NULL;
7090 mblk_t *savemptail = NULL;
7091 int flags;
7092 uint_t old_sd_flag;
7093 int flg;
7094 int more = 0;
7095 int error = 0;
7096 char first = 1;
7097 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
7098 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
7099 unsigned char pri = 0;
7100 queue_t *q;
7101 int pr = 0; /* Partial read successful */
7102 unsigned char type;
7103
7104 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_ENTER,
7105 "kstrgetmsg:%p", vp);
7106
7107 ASSERT(vp->v_stream);
7108 stp = vp->v_stream;
7109 rvp->r_val1 = 0;
7110
7111 mutex_enter(&stp->sd_lock);
7112
7113 if ((error = i_straccess(stp, JCREAD)) != 0) {
7114 mutex_exit(&stp->sd_lock);
7115 return (error);
7116 }
7117
7118 flags = *flagsp;
7119 if (stp->sd_flag & (STRDERR|STPLEX)) {
7120 if ((stp->sd_flag & STPLEX) ||
7121 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == 0) {
7122 error = strgeterr(stp, STRDERR|STPLEX,
7123 (flags & MSG_IPEEK));
7124 if (error != 0) {
7125 mutex_exit(&stp->sd_lock);
7126 return (error);
7127 }
7128 }
7129 }
7130 mutex_exit(&stp->sd_lock);
7131
7132 switch (flags & (MSG_HIPRI|MSG_ANY|MSG_BAND)) {
7133 case MSG_HIPRI:
7134 if (*prip != 0)
7135 return (EINVAL);
7136 break;
7137
7138 case MSG_ANY:
7139 case MSG_BAND:
7140 break;
7141
7142 default:
7143 return (EINVAL);
7144 }
7145
7146 retry:
7147 q = _RD(stp->sd_wrq);
7148 mutex_enter(&stp->sd_lock);
7149 old_sd_flag = stp->sd_flag;
7150 mark = 0;
7151 for (;;) {
7152 int done = 0;
7153 int waitflag;
7154 int fmode;
7155 mblk_t *q_first = q->q_first;
7156
7157 /*
7158 * This section of the code operates just like the code
7159 * in strgetmsg(). There is a comment there about what
7160 * is going on here.
7161 */
7162 if (!(flags & (MSG_HIPRI|MSG_BAND))) {
7163 /* Asking for normal, band0 data */
7164 bp = strget(stp, q, uiop, first, &error);
7165 ASSERT(MUTEX_HELD(&stp->sd_lock));
7166 if (bp != NULL) {
7167 if (DB_TYPE(bp) == M_SIG) {
7168 strsignal_nolock(stp, *bp->b_rptr,
7169 bp->b_band);
7170 freemsg(bp);
7171 continue;
7172 } else {
7173 break;
7174 }
7175 }
7176 if (error != 0) {
7177 goto getmout;
7178 }
7179 /*
7180 * We can't depend on the value of STRPRI here because
7181 * the stream head may be in transit. Therefore, we
7182 * must look at the type of the first message to
7183 * determine if a high priority messages is waiting
7184 */
7185 } else if ((flags & MSG_HIPRI) && q_first != NULL &&
7186 DB_TYPE(q_first) >= QPCTL &&
7187 (bp = getq_noenab(q, 0)) != NULL) {
7188 ASSERT(DB_TYPE(bp) >= QPCTL);
7189 break;
7190 } else if ((flags & MSG_BAND) && q_first != NULL &&
7191 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) &&
7192 (bp = getq_noenab(q, 0)) != NULL) {
7193 /*
7194 * Asked for at least band "prip" and got either at
7195 * least that band or a hipri message.
7196 */
7197 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL);
7198 if (DB_TYPE(bp) == M_SIG) {
7199 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
7200 freemsg(bp);
7201 continue;
7202 } else {
7203 break;
7204 }
7205 }
7206
7207 /* No data. Time to sleep? */
7208 qbackenable(q, 0);
7209
7210 /*
7211 * Delayed error notification?
7212 */
7213 if ((stp->sd_flag & (STRDERR|STPLEX)) &&
7214 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == MSG_DELAYERROR) {
7215 error = strgeterr(stp, STRDERR|STPLEX,
7216 (flags & MSG_IPEEK));
7217 if (error != 0) {
7218 mutex_exit(&stp->sd_lock);
7219 return (error);
7220 }
7221 }
7222
7223 /*
7224 * If STRHUP or STREOF, return 0 length control and data.
7225 * If a read(fd,buf,0) has been done, do not sleep, just
7226 * return.
7227 *
7228 * If mctlp == NULL and uiop == NULL, then the code will
7229 * do the strwaitq. This is an understood way of saying
7230 * sleep "polling" until a message is received.
7231 */
7232 if ((stp->sd_flag & (STRHUP|STREOF)) ||
7233 (uiop != NULL && uiop->uio_resid == 0)) {
7234 if (mctlp != NULL)
7235 *mctlp = NULL;
7236 *flagsp = 0;
7237 mutex_exit(&stp->sd_lock);
7238 return (0);
7239 }
7240
7241 waitflag = GETWAIT;
7242 if (flags &
7243 (MSG_HOLDSIG|MSG_IGNERROR|MSG_IPEEK|MSG_DELAYERROR)) {
7244 if (flags & MSG_HOLDSIG)
7245 waitflag |= STR_NOSIG;
7246 if (flags & MSG_IGNERROR)
7247 waitflag |= STR_NOERROR;
7248 if (flags & MSG_IPEEK)
7249 waitflag |= STR_PEEK;
7250 if (flags & MSG_DELAYERROR)
7251 waitflag |= STR_DELAYERR;
7252 }
7253 if (uiop != NULL)
7254 fmode = uiop->uio_fmode;
7255 else
7256 fmode = 0;
7257
7258 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_WAIT,
7259 "kstrgetmsg calls strwaitq:%p, %p",
7260 vp, uiop);
7261 if (((error = strwaitq(stp, waitflag, (ssize_t)0,
7262 fmode, timout, &done))) != 0 || done) {
7263 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_DONE,
7264 "kstrgetmsg error or done:%p, %p",
7265 vp, uiop);
7266 mutex_exit(&stp->sd_lock);
7267 return (error);
7268 }
7269 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_AWAKE,
7270 "kstrgetmsg awakes:%p, %p", vp, uiop);
7271 if ((error = i_straccess(stp, JCREAD)) != 0) {
7272 mutex_exit(&stp->sd_lock);
7273 return (error);
7274 }
7275 first = 0;
7276 }
7277 ASSERT(bp != NULL);
7278 /*
7279 * Extract any mark information. If the message is not completely
7280 * consumed this information will be put in the mblk
7281 * that is putback.
7282 * If MSGMARKNEXT is set and the message is completely consumed
7283 * the STRATMARK flag will be set below. Likewise, if
7284 * MSGNOTMARKNEXT is set and the message is
7285 * completely consumed STRNOTATMARK will be set.
7286 */
7287 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
7288 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
7289 (MSGMARKNEXT|MSGNOTMARKNEXT));
7290 pri = bp->b_band;
7291 if (mark != 0) {
7292 /*
7293 * If the caller doesn't want the mark return.
7294 * Used to implement MSG_WAITALL in sockets.
7295 */
7296 if (flags & MSG_NOMARK) {
7297 putback(stp, q, bp, pri);
7298 qbackenable(q, pri);
7299 mutex_exit(&stp->sd_lock);
7300 return (EWOULDBLOCK);
7301 }
7302 if (bp == stp->sd_mark) {
7303 mark |= _LASTMARK;
7304 stp->sd_mark = NULL;
7305 }
7306 }
7307
7308 /*
7309 * keep track of the first message type
7310 */
7311 type = bp->b_datap->db_type;
7312
7313 if (bp->b_datap->db_type == M_PASSFP) {
7314 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7315 stp->sd_mark = bp;
7316 bp->b_flag |= mark & ~_LASTMARK;
7317 putback(stp, q, bp, pri);
7318 qbackenable(q, pri);
7319 mutex_exit(&stp->sd_lock);
7320 return (EBADMSG);
7321 }
7322 ASSERT(type != M_SIG);
7323
7324 if (flags & MSG_IPEEK) {
7325 /*
7326 * Clear any struioflag - we do the uiomove over again
7327 * when peeking since it simplifies the code.
7328 *
7329 * Dup the message and put the original back on the queue.
7330 * If dupmsg() fails, try again with copymsg() to see if
7331 * there is indeed a shortage of memory. dupmsg() may fail
7332 * if db_ref in any of the messages reaches its limit.
7333 */
7334
7335 if ((nbp = dupmsg(bp)) == NULL && (nbp = copymsg(bp)) == NULL) {
7336 /*
7337 * Restore the state of the stream head since we
7338 * need to drop sd_lock (strwaitbuf is sleeping).
7339 */
7340 size_t size = msgdsize(bp);
7341
7342 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7343 stp->sd_mark = bp;
7344 bp->b_flag |= mark & ~_LASTMARK;
7345 putback(stp, q, bp, pri);
7346 mutex_exit(&stp->sd_lock);
7347 error = strwaitbuf(size, BPRI_HI);
7348 if (error) {
7349 /*
7350 * There is no net change to the queue thus
7351 * no need to qbackenable.
7352 */
7353 return (error);
7354 }
7355 goto retry;
7356 }
7357
7358 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7359 stp->sd_mark = bp;
7360 bp->b_flag |= mark & ~_LASTMARK;
7361 putback(stp, q, bp, pri);
7362 bp = nbp;
7363 }
7364
7365 /*
7366 * Set this flag so strrput will not generate signals. Need to
7367 * make sure this flag is cleared before leaving this routine
7368 * else signals will stop being sent.
7369 */
7370 stp->sd_flag |= STRGETINPROG;
7371 mutex_exit(&stp->sd_lock);
7372
7373 if ((stp->sd_rputdatafunc != NULL) && (DB_TYPE(bp) == M_DATA)) {
7374 mblk_t *tmp, *prevmp;
7375
7376 /*
7377 * Put first non-data mblk back to stream head and
7378 * cut the mblk chain so sd_rputdatafunc only sees
7379 * M_DATA mblks. We can skip the first mblk since it
7380 * is M_DATA according to the condition above.
7381 */
7382 for (prevmp = bp, tmp = bp->b_cont; tmp != NULL;
7383 prevmp = tmp, tmp = tmp->b_cont) {
7384 if (DB_TYPE(tmp) != M_DATA) {
7385 prevmp->b_cont = NULL;
7386 mutex_enter(&stp->sd_lock);
7387 putback(stp, q, tmp, tmp->b_band);
7388 mutex_exit(&stp->sd_lock);
7389 break;
7390 }
7391 }
7392
7393 bp = (stp->sd_rputdatafunc)(stp->sd_vnode, bp,
7394 NULL, NULL, NULL, NULL);
7395
7396 if (bp == NULL)
7397 goto retry;
7398 }
7399
7400 if (STREAM_NEEDSERVICE(stp))
7401 stream_runservice(stp);
7402
7403 /*
7404 * Set HIPRI flag if message is priority.
7405 */
7406 if (type >= QPCTL)
7407 flg = MSG_HIPRI;
7408 else
7409 flg = MSG_BAND;
7410
7411 /*
7412 * First process PROTO or PCPROTO blocks, if any.
7413 */
7414 if (mctlp != NULL && type != M_DATA) {
7415 mblk_t *nbp;
7416
7417 *mctlp = bp;
7418 while (bp->b_cont && bp->b_cont->b_datap->db_type != M_DATA)
7419 bp = bp->b_cont;
7420 nbp = bp->b_cont;
7421 bp->b_cont = NULL;
7422 bp = nbp;
7423 }
7424
7425 if (bp && bp->b_datap->db_type != M_DATA) {
7426 /*
7427 * More PROTO blocks in msg. Will only happen if mctlp is NULL.
7428 */
7429 more |= MORECTL;
7430 savemp = bp;
7431 while (bp && bp->b_datap->db_type != M_DATA) {
7432 savemptail = bp;
7433 bp = bp->b_cont;
7434 }
7435 savemptail->b_cont = NULL;
7436 }
7437
7438 /*
7439 * Now process DATA blocks, if any.
7440 */
7441 if (uiop == NULL) {
7442 /* Append data to tail of mctlp */
7443
7444 if (mctlp != NULL) {
7445 mblk_t **mpp = mctlp;
7446
7447 while (*mpp != NULL)
7448 mpp = &((*mpp)->b_cont);
7449 *mpp = bp;
7450 bp = NULL;
7451 }
7452 } else if (uiop->uio_resid >= 0 && bp) {
7453 size_t oldresid = uiop->uio_resid;
7454
7455 /*
7456 * If a streams message is likely to consist
7457 * of many small mblks, it is pulled up into
7458 * one continuous chunk of memory.
7459 * The size of the first mblk may be bogus because
7460 * successive read() calls on the socket reduce
7461 * the size of this mblk until it is exhausted
7462 * and then the code walks on to the next. Thus
7463 * the size of the mblk may not be the original size
7464 * that was passed up, it's simply a remainder
7465 * and hence can be very small without any
7466 * implication that the packet is badly fragmented.
7467 * So the size of the possible second mblk is
7468 * used to spot a badly fragmented packet.
7469 * see longer comment at top of page
7470 * by mblk_pull_len declaration.
7471 */
7472
7473 if (bp->b_cont != NULL && MBLKL(bp->b_cont) < mblk_pull_len) {
7474 (void) pullupmsg(bp, -1);
7475 }
7476
7477 bp = struiocopyout(bp, uiop, &error);
7478 if (error != 0) {
7479 if (mctlp != NULL) {
7480 freemsg(*mctlp);
7481 *mctlp = NULL;
7482 } else
7483 freemsg(savemp);
7484 mutex_enter(&stp->sd_lock);
7485 /*
7486 * clear stream head hi pri flag based on
7487 * first message
7488 */
7489 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) {
7490 ASSERT(type == M_PCPROTO);
7491 stp->sd_flag &= ~STRPRI;
7492 }
7493 more = 0;
7494 goto getmout;
7495 }
7496 /*
7497 * (pr == 1) indicates a partial read.
7498 */
7499 if (oldresid > uiop->uio_resid)
7500 pr = 1;
7501 }
7502
7503 if (bp) { /* more data blocks in msg */
7504 more |= MOREDATA;
7505 if (savemp)
7506 savemptail->b_cont = bp;
7507 else
7508 savemp = bp;
7509 }
7510
7511 mutex_enter(&stp->sd_lock);
7512 if (savemp) {
7513 if (flags & (MSG_IPEEK|MSG_DISCARDTAIL)) {
7514 /*
7515 * When MSG_DISCARDTAIL is set or
7516 * when peeking discard any tail. When peeking this
7517 * is the tail of the dup that was copied out - the
7518 * message has already been putback on the queue.
7519 * Return MOREDATA to the caller even though the data
7520 * is discarded. This is used by sockets (to
7521 * set MSG_TRUNC).
7522 */
7523 freemsg(savemp);
7524 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) {
7525 ASSERT(type == M_PCPROTO);
7526 stp->sd_flag &= ~STRPRI;
7527 }
7528 } else if (pr && (savemp->b_datap->db_type == M_DATA) &&
7529 msgnodata(savemp)) {
7530 /*
7531 * Avoid queuing a zero-length tail part of
7532 * a message. pr=1 indicates that we read some of
7533 * the message.
7534 */
7535 freemsg(savemp);
7536 more &= ~MOREDATA;
7537 if (type >= QPCTL) {
7538 ASSERT(type == M_PCPROTO);
7539 stp->sd_flag &= ~STRPRI;
7540 }
7541 } else {
7542 savemp->b_band = pri;
7543 /*
7544 * If the first message was HIPRI and the one we're
7545 * putting back isn't, then clear STRPRI, otherwise
7546 * set STRPRI again. Note that we must set STRPRI
7547 * again since the flush logic in strrput_nondata()
7548 * may have cleared it while we had sd_lock dropped.
7549 */
7550
7551 if (type >= QPCTL) {
7552 ASSERT(type == M_PCPROTO);
7553 if (queclass(savemp) < QPCTL)
7554 stp->sd_flag &= ~STRPRI;
7555 else
7556 stp->sd_flag |= STRPRI;
7557 } else if (queclass(savemp) >= QPCTL) {
7558 /*
7559 * The first message was not a HIPRI message,
7560 * but the one we are about to putback is.
7561 * For simplicitly, we do not allow for HIPRI
7562 * messages to be embedded in the message
7563 * body, so just force it to same type as
7564 * first message.
7565 */
7566 ASSERT(type == M_DATA || type == M_PROTO);
7567 ASSERT(savemp->b_datap->db_type == M_PCPROTO);
7568 savemp->b_datap->db_type = type;
7569 }
7570 if (mark != 0) {
7571 if ((mark & _LASTMARK) &&
7572 (stp->sd_mark == NULL)) {
7573 /*
7574 * If another marked message arrived
7575 * while sd_lock was not held sd_mark
7576 * would be non-NULL.
7577 */
7578 stp->sd_mark = savemp;
7579 }
7580 savemp->b_flag |= mark & ~_LASTMARK;
7581 }
7582 putback(stp, q, savemp, pri);
7583 }
7584 } else if (!(flags & MSG_IPEEK)) {
7585 /*
7586 * The complete message was consumed.
7587 *
7588 * If another M_PCPROTO arrived while sd_lock was not held
7589 * it would have been discarded since STRPRI was still set.
7590 *
7591 * Move the MSG*MARKNEXT information
7592 * to the stream head just in case
7593 * the read queue becomes empty.
7594 * clear stream head hi pri flag based on
7595 * first message
7596 *
7597 * If the stream head was at the mark
7598 * (STRATMARK) before we dropped sd_lock above
7599 * and some data was consumed then we have
7600 * moved past the mark thus STRATMARK is
7601 * cleared. However, if a message arrived in
7602 * strrput during the copyout above causing
7603 * STRATMARK to be set we can not clear that
7604 * flag.
7605 * XXX A "perimeter" would help by single-threading strrput,
7606 * strread, strgetmsg and kstrgetmsg.
7607 */
7608 if (type >= QPCTL) {
7609 ASSERT(type == M_PCPROTO);
7610 stp->sd_flag &= ~STRPRI;
7611 }
7612 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
7613 if (mark & MSGMARKNEXT) {
7614 stp->sd_flag &= ~STRNOTATMARK;
7615 stp->sd_flag |= STRATMARK;
7616 } else if (mark & MSGNOTMARKNEXT) {
7617 stp->sd_flag &= ~STRATMARK;
7618 stp->sd_flag |= STRNOTATMARK;
7619 } else {
7620 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK);
7621 }
7622 } else if (pr && (old_sd_flag & STRATMARK)) {
7623 stp->sd_flag &= ~STRATMARK;
7624 }
7625 }
7626
7627 *flagsp = flg;
7628 *prip = pri;
7629
7630 /*
7631 * Getmsg cleanup processing - if the state of the queue has changed
7632 * some signals may need to be sent and/or poll awakened.
7633 */
7634 getmout:
7635 qbackenable(q, pri);
7636
7637 /*
7638 * We dropped the stream head lock above. Send all M_SIG messages
7639 * before processing stream head for SIGPOLL messages.
7640 */
7641 ASSERT(MUTEX_HELD(&stp->sd_lock));
7642 while ((bp = q->q_first) != NULL &&
7643 (bp->b_datap->db_type == M_SIG)) {
7644 /*
7645 * sd_lock is held so the content of the read queue can not
7646 * change.
7647 */
7648 bp = getq(q);
7649 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG);
7650
7651 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
7652 mutex_exit(&stp->sd_lock);
7653 freemsg(bp);
7654 if (STREAM_NEEDSERVICE(stp))
7655 stream_runservice(stp);
7656 mutex_enter(&stp->sd_lock);
7657 }
7658
7659 /*
7660 * stream head cannot change while we make the determination
7661 * whether or not to send a signal. Drop the flag to allow strrput
7662 * to send firstmsgsigs again.
7663 */
7664 stp->sd_flag &= ~STRGETINPROG;
7665
7666 /*
7667 * If the type of message at the front of the queue changed
7668 * due to the receive the appropriate signals and pollwakeup events
7669 * are generated. The type of changes are:
7670 * Processed a hipri message, q_first is not hipri.
7671 * Processed a band X message, and q_first is band Y.
7672 * The generated signals and pollwakeups are identical to what
7673 * strrput() generates should the message that is now on q_first
7674 * arrive to an empty read queue.
7675 *
7676 * Note: only strrput will send a signal for a hipri message.
7677 */
7678 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) {
7679 strsigset_t signals = 0;
7680 strpollset_t pollwakeups = 0;
7681
7682 if (flg & MSG_HIPRI) {
7683 /*
7684 * Removed a hipri message. Regular data at
7685 * the front of the queue.
7686 */
7687 if (bp->b_band == 0) {
7688 signals = S_INPUT | S_RDNORM;
7689 pollwakeups = POLLIN | POLLRDNORM;
7690 } else {
7691 signals = S_INPUT | S_RDBAND;
7692 pollwakeups = POLLIN | POLLRDBAND;
7693 }
7694 } else if (pri != bp->b_band) {
7695 /*
7696 * The band is different for the new q_first.
7697 */
7698 if (bp->b_band == 0) {
7699 signals = S_RDNORM;
7700 pollwakeups = POLLIN | POLLRDNORM;
7701 } else {
7702 signals = S_RDBAND;
7703 pollwakeups = POLLIN | POLLRDBAND;
7704 }
7705 }
7706
7707 if (pollwakeups != 0) {
7708 if (pollwakeups == (POLLIN | POLLRDNORM)) {
7709 if (!(stp->sd_rput_opt & SR_POLLIN))
7710 goto no_pollwake;
7711 stp->sd_rput_opt &= ~SR_POLLIN;
7712 }
7713 mutex_exit(&stp->sd_lock);
7714 pollwakeup(&stp->sd_pollist, pollwakeups);
7715 mutex_enter(&stp->sd_lock);
7716 }
7717 no_pollwake:
7718
7719 if (stp->sd_sigflags & signals)
7720 strsendsig(stp->sd_siglist, signals, bp->b_band, 0);
7721 }
7722 mutex_exit(&stp->sd_lock);
7723
7724 rvp->r_val1 = more;
7725 return (error);
7726 #undef _LASTMARK
7727 }
7728
7729 /*
7730 * Put a message downstream.
7731 *
7732 * NOTE: strputmsg and kstrputmsg have much of the logic in common.
7733 */
7734 int
7735 strputmsg(
7736 struct vnode *vp,
7737 struct strbuf *mctl,
7738 struct strbuf *mdata,
7739 unsigned char pri,
7740 int flag,
7741 int fmode)
7742 {
7743 struct stdata *stp;
7744 queue_t *wqp;
7745 mblk_t *mp;
7746 ssize_t msgsize;
7747 ssize_t rmin, rmax;
7748 int error;
7749 struct uio uios;
7750 struct uio *uiop = &uios;
7751 struct iovec iovs;
7752 int xpg4 = 0;
7753
7754 ASSERT(vp->v_stream);
7755 stp = vp->v_stream;
7756 wqp = stp->sd_wrq;
7757
7758 /*
7759 * If it is an XPG4 application, we need to send
7760 * SIGPIPE below
7761 */
7762
7763 xpg4 = (flag & MSG_XPG4) ? 1 : 0;
7764 flag &= ~MSG_XPG4;
7765
7766 if (AU_AUDITING())
7767 audit_strputmsg(vp, mctl, mdata, pri, flag, fmode);
7768
7769 mutex_enter(&stp->sd_lock);
7770
7771 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7772 mutex_exit(&stp->sd_lock);
7773 return (error);
7774 }
7775
7776 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
7777 error = strwriteable(stp, B_FALSE, xpg4);
7778 if (error != 0) {
7779 mutex_exit(&stp->sd_lock);
7780 return (error);
7781 }
7782 }
7783
7784 mutex_exit(&stp->sd_lock);
7785
7786 /*
7787 * Check for legal flag value.
7788 */
7789 switch (flag) {
7790 case MSG_HIPRI:
7791 if ((mctl->len < 0) || (pri != 0))
7792 return (EINVAL);
7793 break;
7794 case MSG_BAND:
7795 break;
7796
7797 default:
7798 return (EINVAL);
7799 }
7800
7801 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_IN,
7802 "strputmsg in:stp %p", stp);
7803
7804 /* get these values from those cached in the stream head */
7805 rmin = stp->sd_qn_minpsz;
7806 rmax = stp->sd_qn_maxpsz;
7807
7808 /*
7809 * Make sure ctl and data sizes together fall within the
7810 * limits of the max and min receive packet sizes and do
7811 * not exceed system limit.
7812 */
7813 ASSERT((rmax >= 0) || (rmax == INFPSZ));
7814 if (rmax == 0) {
7815 return (ERANGE);
7816 }
7817 /*
7818 * Use the MAXIMUM of sd_maxblk and q_maxpsz.
7819 * Needed to prevent partial failures in the strmakedata loop.
7820 */
7821 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk)
7822 rmax = stp->sd_maxblk;
7823
7824 if ((msgsize = mdata->len) < 0) {
7825 msgsize = 0;
7826 rmin = 0; /* no range check for NULL data part */
7827 }
7828 if ((msgsize < rmin) ||
7829 ((msgsize > rmax) && (rmax != INFPSZ)) ||
7830 (mctl->len > strctlsz)) {
7831 return (ERANGE);
7832 }
7833
7834 /*
7835 * Setup uio and iov for data part
7836 */
7837 iovs.iov_base = mdata->buf;
7838 iovs.iov_len = msgsize;
7839 uios.uio_iov = &iovs;
7840 uios.uio_iovcnt = 1;
7841 uios.uio_loffset = 0;
7842 uios.uio_segflg = UIO_USERSPACE;
7843 uios.uio_fmode = fmode;
7844 uios.uio_extflg = UIO_COPY_DEFAULT;
7845 uios.uio_resid = msgsize;
7846 uios.uio_offset = 0;
7847
7848 /* Ignore flow control in strput for HIPRI */
7849 if (flag & MSG_HIPRI)
7850 flag |= MSG_IGNFLOW;
7851
7852 for (;;) {
7853 int done = 0;
7854
7855 /*
7856 * strput will always free the ctl mblk - even when strput
7857 * fails.
7858 */
7859 if ((error = strmakectl(mctl, flag, fmode, &mp)) != 0) {
7860 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7861 "strputmsg out:stp %p out %d error %d",
7862 stp, 1, error);
7863 return (error);
7864 }
7865 /*
7866 * Verify that the whole message can be transferred by
7867 * strput.
7868 */
7869 ASSERT(stp->sd_maxblk == INFPSZ ||
7870 stp->sd_maxblk >= mdata->len);
7871
7872 msgsize = mdata->len;
7873 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag);
7874 mdata->len = msgsize;
7875
7876 if (error == 0)
7877 break;
7878
7879 if (error != EWOULDBLOCK)
7880 goto out;
7881
7882 mutex_enter(&stp->sd_lock);
7883 /*
7884 * Check for a missed wakeup.
7885 * Needed since strput did not hold sd_lock across
7886 * the canputnext.
7887 */
7888 if (bcanputnext(wqp, pri)) {
7889 /* Try again */
7890 mutex_exit(&stp->sd_lock);
7891 continue;
7892 }
7893 TRACE_2(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAIT,
7894 "strputmsg wait:stp %p waits pri %d", stp, pri);
7895 if (((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, fmode, -1,
7896 &done)) != 0) || done) {
7897 mutex_exit(&stp->sd_lock);
7898 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7899 "strputmsg out:q %p out %d error %d",
7900 stp, 0, error);
7901 return (error);
7902 }
7903 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAKE,
7904 "strputmsg wake:stp %p wakes", stp);
7905 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7906 mutex_exit(&stp->sd_lock);
7907 return (error);
7908 }
7909 mutex_exit(&stp->sd_lock);
7910 }
7911 out:
7912 /*
7913 * For historic reasons, applications expect EAGAIN
7914 * when data mblk could not be allocated. so change
7915 * ENOMEM back to EAGAIN
7916 */
7917 if (error == ENOMEM)
7918 error = EAGAIN;
7919 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7920 "strputmsg out:stp %p out %d error %d", stp, 2, error);
7921 return (error);
7922 }
7923
7924 /*
7925 * Put a message downstream.
7926 * Can send only an M_PROTO/M_PCPROTO by passing in a NULL uiop.
7927 * The fmode flag (NDELAY, NONBLOCK) is the or of the flags in the uio
7928 * and the fmode parameter.
7929 *
7930 * This routine handles the consolidation private flags:
7931 * MSG_IGNERROR Ignore any stream head error except STPLEX.
7932 * MSG_HOLDSIG Hold signals while waiting for data.
7933 * MSG_IGNFLOW Don't check streams flow control.
7934 *
7935 * NOTE: strputmsg and kstrputmsg have much of the logic in common.
7936 */
7937 int
7938 kstrputmsg(
7939 struct vnode *vp,
7940 mblk_t *mctl,
7941 struct uio *uiop,
7942 ssize_t msgsize,
7943 unsigned char pri,
7944 int flag,
7945 int fmode)
7946 {
7947 struct stdata *stp;
7948 queue_t *wqp;
7949 ssize_t rmin, rmax;
7950 int error;
7951
7952 ASSERT(vp->v_stream);
7953 stp = vp->v_stream;
7954 wqp = stp->sd_wrq;
7955 if (AU_AUDITING())
7956 audit_strputmsg(vp, NULL, NULL, pri, flag, fmode);
7957 if (mctl == NULL)
7958 return (EINVAL);
7959
7960 mutex_enter(&stp->sd_lock);
7961
7962 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7963 mutex_exit(&stp->sd_lock);
7964 freemsg(mctl);
7965 return (error);
7966 }
7967
7968 if ((stp->sd_flag & STPLEX) || !(flag & MSG_IGNERROR)) {
7969 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
7970 error = strwriteable(stp, B_FALSE, B_TRUE);
7971 if (error != 0) {
7972 mutex_exit(&stp->sd_lock);
7973 freemsg(mctl);
7974 return (error);
7975 }
7976 }
7977 }
7978
7979 mutex_exit(&stp->sd_lock);
7980
7981 /*
7982 * Check for legal flag value.
7983 */
7984 switch (flag & (MSG_HIPRI|MSG_BAND|MSG_ANY)) {
7985 case MSG_HIPRI:
7986 if (pri != 0) {
7987 freemsg(mctl);
7988 return (EINVAL);
7989 }
7990 break;
7991 case MSG_BAND:
7992 break;
7993 default:
7994 freemsg(mctl);
7995 return (EINVAL);
7996 }
7997
7998 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_IN,
7999 "kstrputmsg in:stp %p", stp);
8000
8001 /* get these values from those cached in the stream head */
8002 rmin = stp->sd_qn_minpsz;
8003 rmax = stp->sd_qn_maxpsz;
8004
8005 /*
8006 * Make sure ctl and data sizes together fall within the
8007 * limits of the max and min receive packet sizes and do
8008 * not exceed system limit.
8009 */
8010 ASSERT((rmax >= 0) || (rmax == INFPSZ));
8011 if (rmax == 0) {
8012 freemsg(mctl);
8013 return (ERANGE);
8014 }
8015 /*
8016 * Use the MAXIMUM of sd_maxblk and q_maxpsz.
8017 * Needed to prevent partial failures in the strmakedata loop.
8018 */
8019 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk)
8020 rmax = stp->sd_maxblk;
8021
8022 if (uiop == NULL) {
8023 msgsize = -1;
8024 rmin = -1; /* no range check for NULL data part */
8025 } else {
8026 /* Use uio flags as well as the fmode parameter flags */
8027 fmode |= uiop->uio_fmode;
8028
8029 if ((msgsize < rmin) ||
8030 ((msgsize > rmax) && (rmax != INFPSZ))) {
8031 freemsg(mctl);
8032 return (ERANGE);
8033 }
8034 }
8035
8036 /* Ignore flow control in strput for HIPRI */
8037 if (flag & MSG_HIPRI)
8038 flag |= MSG_IGNFLOW;
8039
8040 for (;;) {
8041 int done = 0;
8042 int waitflag;
8043 mblk_t *mp;
8044
8045 /*
8046 * strput will always free the ctl mblk - even when strput
8047 * fails. If MSG_IGNFLOW is set then any error returned
8048 * will cause us to break the loop, so we don't need a copy
8049 * of the message. If MSG_IGNFLOW is not set, then we can
8050 * get hit by flow control and be forced to try again. In
8051 * this case we need to have a copy of the message. We
8052 * do this using copymsg since the message may get modified
8053 * by something below us.
8054 *
8055 * We've observed that many TPI providers do not check db_ref
8056 * on the control messages but blindly reuse them for the
8057 * T_OK_ACK/T_ERROR_ACK. Thus using copymsg is more
8058 * friendly to such providers than using dupmsg. Also, note
8059 * that sockfs uses MSG_IGNFLOW for all TPI control messages.
8060 * Only data messages are subject to flow control, hence
8061 * subject to this copymsg.
8062 */
8063 if (flag & MSG_IGNFLOW) {
8064 mp = mctl;
8065 mctl = NULL;
8066 } else {
8067 do {
8068 /*
8069 * If a message has a free pointer, the message
8070 * must be dupmsg to maintain this pointer.
8071 * Code using this facility must be sure
8072 * that modules below will not change the
8073 * contents of the dblk without checking db_ref
8074 * first. If db_ref is > 1, then the module
8075 * needs to do a copymsg first. Otherwise,
8076 * the contents of the dblk may become
8077 * inconsistent because the freesmg/freeb below
8078 * may end up calling atomic_add_32_nv.
8079 * The atomic_add_32_nv in freeb (accessing
8080 * all of db_ref, db_type, db_flags, and
8081 * db_struioflag) does not prevent other threads
8082 * from concurrently trying to modify e.g.
8083 * db_type.
8084 */
8085 if (mctl->b_datap->db_frtnp != NULL)
8086 mp = dupmsg(mctl);
8087 else
8088 mp = copymsg(mctl);
8089
8090 if (mp != NULL)
8091 break;
8092
8093 error = strwaitbuf(msgdsize(mctl), BPRI_MED);
8094 if (error) {
8095 freemsg(mctl);
8096 return (error);
8097 }
8098 } while (mp == NULL);
8099 }
8100 /*
8101 * Verify that all of msgsize can be transferred by
8102 * strput.
8103 */
8104 ASSERT(stp->sd_maxblk == INFPSZ || stp->sd_maxblk >= msgsize);
8105 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag);
8106 if (error == 0)
8107 break;
8108
8109 if (error != EWOULDBLOCK)
8110 goto out;
8111
8112 /*
8113 * IF MSG_IGNFLOW is set we should have broken out of loop
8114 * above.
8115 */
8116 ASSERT(!(flag & MSG_IGNFLOW));
8117 mutex_enter(&stp->sd_lock);
8118 /*
8119 * Check for a missed wakeup.
8120 * Needed since strput did not hold sd_lock across
8121 * the canputnext.
8122 */
8123 if (bcanputnext(wqp, pri)) {
8124 /* Try again */
8125 mutex_exit(&stp->sd_lock);
8126 continue;
8127 }
8128 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAIT,
8129 "kstrputmsg wait:stp %p waits pri %d", stp, pri);
8130
8131 waitflag = WRITEWAIT;
8132 if (flag & (MSG_HOLDSIG|MSG_IGNERROR)) {
8133 if (flag & MSG_HOLDSIG)
8134 waitflag |= STR_NOSIG;
8135 if (flag & MSG_IGNERROR)
8136 waitflag |= STR_NOERROR;
8137 }
8138 if (((error = strwaitq(stp, waitflag,
8139 (ssize_t)0, fmode, -1, &done)) != 0) || done) {
8140 mutex_exit(&stp->sd_lock);
8141 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT,
8142 "kstrputmsg out:stp %p out %d error %d",
8143 stp, 0, error);
8144 freemsg(mctl);
8145 return (error);
8146 }
8147 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAKE,
8148 "kstrputmsg wake:stp %p wakes", stp);
8149 if ((error = i_straccess(stp, JCWRITE)) != 0) {
8150 mutex_exit(&stp->sd_lock);
8151 freemsg(mctl);
8152 return (error);
8153 }
8154 mutex_exit(&stp->sd_lock);
8155 }
8156 out:
8157 freemsg(mctl);
8158 /*
8159 * For historic reasons, applications expect EAGAIN
8160 * when data mblk could not be allocated. so change
8161 * ENOMEM back to EAGAIN
8162 */
8163 if (error == ENOMEM)
8164 error = EAGAIN;
8165 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT,
8166 "kstrputmsg out:stp %p out %d error %d", stp, 2, error);
8167 return (error);
8168 }
8169
8170 /*
8171 * Determines whether the necessary conditions are set on a stream
8172 * for it to be readable, writeable, or have exceptions.
8173 *
8174 * strpoll handles the consolidation private events:
8175 * POLLNOERR Do not return POLLERR even if there are stream
8176 * head errors.
8177 * Used by sockfs.
8178 * POLLRDDATA Do not return POLLIN unless at least one message on
8179 * the queue contains one or more M_DATA mblks. Thus
8180 * when this flag is set a queue with only
8181 * M_PROTO/M_PCPROTO mblks does not return POLLIN.
8182 * Used by sockfs to ignore T_EXDATA_IND messages.
8183 *
8184 * Note: POLLRDDATA assumes that synch streams only return messages with
8185 * an M_DATA attached (i.e. not messages consisting of only
8186 * an M_PROTO/M_PCPROTO part).
8187 */
8188 int
8189 strpoll(
8190 struct stdata *stp,
8191 short events_arg,
8192 int anyyet,
8193 short *reventsp,
8194 struct pollhead **phpp)
8195 {
8196 int events = (ushort_t)events_arg;
8197 int retevents = 0;
8198 mblk_t *mp;
8199 qband_t *qbp;
8200 long sd_flags = stp->sd_flag;
8201 int headlocked = 0;
8202
8203 /*
8204 * For performance, a single 'if' tests for most possible edge
8205 * conditions in one shot
8206 */
8207 if (sd_flags & (STPLEX | STRDERR | STWRERR)) {
8208 if (sd_flags & STPLEX) {
8209 *reventsp = POLLNVAL;
8210 return (EINVAL);
8211 }
8212 if (((events & (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)) &&
8213 (sd_flags & STRDERR)) ||
8214 ((events & (POLLOUT | POLLWRNORM | POLLWRBAND)) &&
8215 (sd_flags & STWRERR))) {
8216 if (!(events & POLLNOERR)) {
8217 *reventsp = POLLERR;
8218 return (0);
8219 }
8220 }
8221 }
8222 if (sd_flags & STRHUP) {
8223 retevents |= POLLHUP;
8224 } else if (events & (POLLWRNORM | POLLWRBAND)) {
8225 queue_t *tq;
8226 queue_t *qp = stp->sd_wrq;
8227
8228 claimstr(qp);
8229 /* Find next module forward that has a service procedure */
8230 tq = qp->q_next->q_nfsrv;
8231 ASSERT(tq != NULL);
8232
8233 polllock(&stp->sd_pollist, QLOCK(tq));
8234 if (events & POLLWRNORM) {
8235 queue_t *sqp;
8236
8237 if (tq->q_flag & QFULL)
8238 /* ensure backq svc procedure runs */
8239 tq->q_flag |= QWANTW;
8240 else if ((sqp = stp->sd_struiowrq) != NULL) {
8241 /* Check sync stream barrier write q */
8242 mutex_exit(QLOCK(tq));
8243 polllock(&stp->sd_pollist, QLOCK(sqp));
8244 if (sqp->q_flag & QFULL)
8245 /* ensure pollwakeup() is done */
8246 sqp->q_flag |= QWANTWSYNC;
8247 else
8248 retevents |= POLLOUT;
8249 /* More write events to process ??? */
8250 if (! (events & POLLWRBAND)) {
8251 mutex_exit(QLOCK(sqp));
8252 releasestr(qp);
8253 goto chkrd;
8254 }
8255 mutex_exit(QLOCK(sqp));
8256 polllock(&stp->sd_pollist, QLOCK(tq));
8257 } else
8258 retevents |= POLLOUT;
8259 }
8260 if (events & POLLWRBAND) {
8261 qbp = tq->q_bandp;
8262 if (qbp) {
8263 while (qbp) {
8264 if (qbp->qb_flag & QB_FULL)
8265 qbp->qb_flag |= QB_WANTW;
8266 else
8267 retevents |= POLLWRBAND;
8268 qbp = qbp->qb_next;
8269 }
8270 } else {
8271 retevents |= POLLWRBAND;
8272 }
8273 }
8274 mutex_exit(QLOCK(tq));
8275 releasestr(qp);
8276 }
8277 chkrd:
8278 if (sd_flags & STRPRI) {
8279 retevents |= (events & POLLPRI);
8280 } else if (events & (POLLRDNORM | POLLRDBAND | POLLIN)) {
8281 queue_t *qp = _RD(stp->sd_wrq);
8282 int normevents = (events & (POLLIN | POLLRDNORM));
8283
8284 /*
8285 * Note: Need to do polllock() here since ps_lock may be
8286 * held. See bug 4191544.
8287 */
8288 polllock(&stp->sd_pollist, &stp->sd_lock);
8289 headlocked = 1;
8290 mp = qp->q_first;
8291 while (mp) {
8292 /*
8293 * For POLLRDDATA we scan b_cont and b_next until we
8294 * find an M_DATA.
8295 */
8296 if ((events & POLLRDDATA) &&
8297 mp->b_datap->db_type != M_DATA) {
8298 mblk_t *nmp = mp->b_cont;
8299
8300 while (nmp != NULL &&
8301 nmp->b_datap->db_type != M_DATA)
8302 nmp = nmp->b_cont;
8303 if (nmp == NULL) {
8304 mp = mp->b_next;
8305 continue;
8306 }
8307 }
8308 if (mp->b_band == 0)
8309 retevents |= normevents;
8310 else
8311 retevents |= (events & (POLLIN | POLLRDBAND));
8312 break;
8313 }
8314 if (! (retevents & normevents) &&
8315 (stp->sd_wakeq & RSLEEP)) {
8316 /*
8317 * Sync stream barrier read queue has data.
8318 */
8319 retevents |= normevents;
8320 }
8321 /* Treat eof as normal data */
8322 if (sd_flags & STREOF)
8323 retevents |= normevents;
8324 }
8325
8326 *reventsp = (short)retevents;
8327 if (retevents) {
8328 if (headlocked)
8329 mutex_exit(&stp->sd_lock);
8330 return (0);
8331 }
8332
8333 /*
8334 * If poll() has not found any events yet, set up event cell
8335 * to wake up the poll if a requested event occurs on this
8336 * stream. Check for collisions with outstanding poll requests.
8337 */
8338 if (!anyyet) {
8339 *phpp = &stp->sd_pollist;
8340 if (headlocked == 0) {
8341 polllock(&stp->sd_pollist, &stp->sd_lock);
8342 headlocked = 1;
8343 }
8344 stp->sd_rput_opt |= SR_POLLIN;
8345 }
8346 if (headlocked)
8347 mutex_exit(&stp->sd_lock);
8348 return (0);
8349 }
8350
8351 /*
8352 * The purpose of putback() is to assure sleeping polls/reads
8353 * are awakened when there are no new messages arriving at the,
8354 * stream head, and a message is placed back on the read queue.
8355 *
8356 * sd_lock must be held when messages are placed back on stream
8357 * head. (getq() holds sd_lock when it removes messages from
8358 * the queue)
8359 */
8360
8361 static void
8362 putback(struct stdata *stp, queue_t *q, mblk_t *bp, int band)
8363 {
8364 mblk_t *qfirst;
8365 ASSERT(MUTEX_HELD(&stp->sd_lock));
8366
8367 /*
8368 * As a result of lock-step ordering around q_lock and sd_lock,
8369 * it's possible for function calls like putnext() and
8370 * canputnext() to get an inaccurate picture of how much
8371 * data is really being processed at the stream head.
8372 * We only consolidate with existing messages on the queue
8373 * if the length of the message we want to put back is smaller
8374 * than the queue hiwater mark.
8375 */
8376 if ((stp->sd_rput_opt & SR_CONSOL_DATA) &&
8377 (DB_TYPE(bp) == M_DATA) && ((qfirst = q->q_first) != NULL) &&
8378 (DB_TYPE(qfirst) == M_DATA) &&
8379 ((qfirst->b_flag & (MSGMARK|MSGDELIM)) == 0) &&
8380 ((bp->b_flag & (MSGMARK|MSGDELIM|MSGMARKNEXT)) == 0) &&
8381 (mp_cont_len(bp, NULL) < q->q_hiwat)) {
8382 /*
8383 * We use the same logic as defined in strrput()
8384 * but in reverse as we are putting back onto the
8385 * queue and want to retain byte ordering.
8386 * Consolidate M_DATA messages with M_DATA ONLY.
8387 * strrput() allows the consolidation of M_DATA onto
8388 * M_PROTO | M_PCPROTO but not the other way round.
8389 *
8390 * The consolidation does not take place if the message
8391 * we are returning to the queue is marked with either
8392 * of the marks or the delim flag or if q_first
8393 * is marked with MSGMARK. The MSGMARK check is needed to
8394 * handle the odd semantics of MSGMARK where essentially
8395 * the whole message is to be treated as marked.
8396 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from q_first
8397 * to the front of the b_cont chain.
8398 */
8399 rmvq_noenab(q, qfirst);
8400
8401 /*
8402 * The first message in the b_cont list
8403 * tracks MSGMARKNEXT and MSGNOTMARKNEXT.
8404 * We need to handle the case where we
8405 * are appending:
8406 *
8407 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT.
8408 * 2) a MSGMARKNEXT to a plain message.
8409 * 3) a MSGNOTMARKNEXT to a plain message
8410 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT
8411 * message.
8412 *
8413 * Thus we never append a MSGMARKNEXT or
8414 * MSGNOTMARKNEXT to a MSGMARKNEXT message.
8415 */
8416 if (qfirst->b_flag & MSGMARKNEXT) {
8417 bp->b_flag |= MSGMARKNEXT;
8418 bp->b_flag &= ~MSGNOTMARKNEXT;
8419 qfirst->b_flag &= ~MSGMARKNEXT;
8420 } else if (qfirst->b_flag & MSGNOTMARKNEXT) {
8421 bp->b_flag |= MSGNOTMARKNEXT;
8422 qfirst->b_flag &= ~MSGNOTMARKNEXT;
8423 }
8424
8425 linkb(bp, qfirst);
8426 }
8427 (void) putbq(q, bp);
8428
8429 /*
8430 * A message may have come in when the sd_lock was dropped in the
8431 * calling routine. If this is the case and STR*ATMARK info was
8432 * received, need to move that from the stream head to the q_last
8433 * so that SIOCATMARK can return the proper value.
8434 */
8435 if (stp->sd_flag & (STRATMARK | STRNOTATMARK)) {
8436 unsigned short *flagp = &q->q_last->b_flag;
8437 uint_t b_flag = (uint_t)*flagp;
8438
8439 if (stp->sd_flag & STRATMARK) {
8440 b_flag &= ~MSGNOTMARKNEXT;
8441 b_flag |= MSGMARKNEXT;
8442 stp->sd_flag &= ~STRATMARK;
8443 } else {
8444 b_flag &= ~MSGMARKNEXT;
8445 b_flag |= MSGNOTMARKNEXT;
8446 stp->sd_flag &= ~STRNOTATMARK;
8447 }
8448 *flagp = (unsigned short) b_flag;
8449 }
8450
8451 #ifdef DEBUG
8452 /*
8453 * Make sure that the flags are not messed up.
8454 */
8455 {
8456 mblk_t *mp;
8457 mp = q->q_last;
8458 while (mp != NULL) {
8459 ASSERT((mp->b_flag & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
8460 (MSGMARKNEXT|MSGNOTMARKNEXT));
8461 mp = mp->b_cont;
8462 }
8463 }
8464 #endif
8465 if (q->q_first == bp) {
8466 short pollevents;
8467
8468 if (stp->sd_flag & RSLEEP) {
8469 stp->sd_flag &= ~RSLEEP;
8470 cv_broadcast(&q->q_wait);
8471 }
8472 if (stp->sd_flag & STRPRI) {
8473 pollevents = POLLPRI;
8474 } else {
8475 if (band == 0) {
8476 if (!(stp->sd_rput_opt & SR_POLLIN))
8477 return;
8478 stp->sd_rput_opt &= ~SR_POLLIN;
8479 pollevents = POLLIN | POLLRDNORM;
8480 } else {
8481 pollevents = POLLIN | POLLRDBAND;
8482 }
8483 }
8484 mutex_exit(&stp->sd_lock);
8485 pollwakeup(&stp->sd_pollist, pollevents);
8486 mutex_enter(&stp->sd_lock);
8487 }
8488 }
8489
8490 /*
8491 * Return the held vnode attached to the stream head of a
8492 * given queue
8493 * It is the responsibility of the calling routine to ensure
8494 * that the queue does not go away (e.g. pop).
8495 */
8496 vnode_t *
8497 strq2vp(queue_t *qp)
8498 {
8499 vnode_t *vp;
8500 vp = STREAM(qp)->sd_vnode;
8501 ASSERT(vp != NULL);
8502 VN_HOLD(vp);
8503 return (vp);
8504 }
8505
8506 /*
8507 * return the stream head write queue for the given vp
8508 * It is the responsibility of the calling routine to ensure
8509 * that the stream or vnode do not close.
8510 */
8511 queue_t *
8512 strvp2wq(vnode_t *vp)
8513 {
8514 ASSERT(vp->v_stream != NULL);
8515 return (vp->v_stream->sd_wrq);
8516 }
8517
8518 /*
8519 * pollwakeup stream head
8520 * It is the responsibility of the calling routine to ensure
8521 * that the stream or vnode do not close.
8522 */
8523 void
8524 strpollwakeup(vnode_t *vp, short event)
8525 {
8526 ASSERT(vp->v_stream);
8527 pollwakeup(&vp->v_stream->sd_pollist, event);
8528 }
8529
8530 /*
8531 * Mate the stream heads of two vnodes together. If the two vnodes are the
8532 * same, we just make the write-side point at the read-side -- otherwise,
8533 * we do a full mate. Only works on vnodes associated with streams that are
8534 * still being built and thus have only a stream head.
8535 */
8536 void
8537 strmate(vnode_t *vp1, vnode_t *vp2)
8538 {
8539 queue_t *wrq1 = strvp2wq(vp1);
8540 queue_t *wrq2 = strvp2wq(vp2);
8541
8542 /*
8543 * Verify that there are no modules on the stream yet. We also
8544 * rely on the stream head always having a service procedure to
8545 * avoid tweaking q_nfsrv.
8546 */
8547 ASSERT(wrq1->q_next == NULL && wrq2->q_next == NULL);
8548 ASSERT(wrq1->q_qinfo->qi_srvp != NULL);
8549 ASSERT(wrq2->q_qinfo->qi_srvp != NULL);
8550
8551 /*
8552 * If the queues are the same, just twist; otherwise do a full mate.
8553 */
8554 if (wrq1 == wrq2) {
8555 wrq1->q_next = _RD(wrq1);
8556 } else {
8557 wrq1->q_next = _RD(wrq2);
8558 wrq2->q_next = _RD(wrq1);
8559 STREAM(wrq1)->sd_mate = STREAM(wrq2);
8560 STREAM(wrq1)->sd_flag |= STRMATE;
8561 STREAM(wrq2)->sd_mate = STREAM(wrq1);
8562 STREAM(wrq2)->sd_flag |= STRMATE;
8563 }
8564 }
8565
8566 /*
8567 * XXX will go away when console is correctly fixed.
8568 * Clean up the console PIDS, from previous I_SETSIG,
8569 * called only for cnopen which never calls strclean().
8570 */
8571 void
8572 str_cn_clean(struct vnode *vp)
8573 {
8574 strsig_t *ssp, *pssp, *tssp;
8575 struct stdata *stp;
8576 struct pid *pidp;
8577 int update = 0;
8578
8579 ASSERT(vp->v_stream);
8580 stp = vp->v_stream;
8581 pssp = NULL;
8582 mutex_enter(&stp->sd_lock);
8583 ssp = stp->sd_siglist;
8584 while (ssp) {
8585 mutex_enter(&pidlock);
8586 pidp = ssp->ss_pidp;
8587 /*
8588 * Get rid of PID if the proc is gone.
8589 */
8590 if (pidp->pid_prinactive) {
8591 tssp = ssp->ss_next;
8592 if (pssp)
8593 pssp->ss_next = tssp;
8594 else
8595 stp->sd_siglist = tssp;
8596 ASSERT(pidp->pid_ref <= 1);
8597 PID_RELE(ssp->ss_pidp);
8598 mutex_exit(&pidlock);
8599 kmem_free(ssp, sizeof (strsig_t));
8600 update = 1;
8601 ssp = tssp;
8602 continue;
8603 } else
8604 mutex_exit(&pidlock);
8605 pssp = ssp;
8606 ssp = ssp->ss_next;
8607 }
8608 if (update) {
8609 stp->sd_sigflags = 0;
8610 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
8611 stp->sd_sigflags |= ssp->ss_events;
8612 }
8613 mutex_exit(&stp->sd_lock);
8614 }
8615
8616 /*
8617 * Return B_TRUE if there is data in the message, B_FALSE otherwise.
8618 */
8619 static boolean_t
8620 msghasdata(mblk_t *bp)
8621 {
8622 for (; bp; bp = bp->b_cont)
8623 if (bp->b_datap->db_type == M_DATA) {
8624 ASSERT(bp->b_wptr >= bp->b_rptr);
8625 if (bp->b_wptr > bp->b_rptr)
8626 return (B_TRUE);
8627 }
8628 return (B_FALSE);
8629 }