Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/logindmux.c
+++ new/usr/src/uts/common/io/logindmux.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26
27 27 /*
28 28 * Description: logindmux.c
29 29 *
30 30 * The logindmux driver is used with login modules (like telmod/rlmod).
31 31 * This is a 1x1 cloning mux and two of these muxes are used. The lower link
32 32 * of one of the muxes receives input from net and the lower link of the
33 33 * other mux receives input from pseudo terminal subsystem.
34 34 *
35 35 * The logdmux_qexch_lock mutex manages the race between LOGDMX_IOC_QEXCHANGE,
36 36 * logdmuxunlink() and logdmuxclose(), so that the instance selected as a peer
37 37 * in LOGDMX_IOC_QEXCHANGE cannot be unlinked or closed until the qexchange
38 38 * is complete; see the inline comments in the code for details.
39 39 *
40 40 * The logdmux_peerq_lock mutex manages the race between logdmuxlwsrv() and
41 41 * logdmuxlrput() (when null'ing tmxp->peerq during LOGDMUX_UNLINK_REQ
42 42 * processing).
43 43 *
44 44 * The logdmux_minor_lock mutex serializes the growth of logdmux_minor_arena
45 45 * (the arena is grown gradually rather than allocated all at once so that
46 46 * minor numbers are recycled sooner; for simplicity it is never shrunk).
47 47 *
48 48 * The unlink operation is implemented using protocol messages that flow
49 49 * between the two logindmux peer instances. The instance processing the
50 50 * I_UNLINK ioctl will send a LOGDMUX_UNLINK_REQ protocol message to its
51 51 * peer to indicate that it wishes to unlink; the peer will process this
52 52 * message in its lrput, null its tmxp->peerq and then send a
53 53 * LOGDMUX_UNLINK_RESP protocol message in reply to indicate that the
54 54 * unlink can proceed; having received the reply in its lrput, the
55 55 * instance processing the I_UNLINK can then continue. To ensure that only
56 56 * one of the peer instances will be actively processing an I_UNLINK at
57 57 * any one time, a single structure (an unlinkinfo_t containing a mutex,
58 58 * state variable and pointer to an M_CTL mblk) is allocated during
59 59 * the processing of the LOGDMX_IOC_QEXCHANGE ioctl. The two instances, if
60 60 * trying to unlink simultaneously, will race to get control of this
61 61 * structure which contains the resources necessary to process the
62 62 * I_UNLINK. The instance that wins this race will be able to continue
63 63 * with the unlink whilst the other instance will be obliged to wait.
64 64 */
65 65
66 66 #include <sys/types.h>
67 67 #include <sys/param.h>
68 68 #include <sys/errno.h>
69 69 #include <sys/debug.h>
70 70 #include <sys/stropts.h>
71 71 #include <sys/stream.h>
72 72 #include <sys/logindmux.h>
73 73 #include <sys/logindmux_impl.h>
74 74 #include <sys/stat.h>
75 75 #include <sys/kmem.h>
76 76 #include <sys/vmem.h>
77 77 #include <sys/strsun.h>
78 78 #include <sys/sysmacros.h>
79 79 #include <sys/mkdev.h>
80 80 #include <sys/ddi.h>
81 81 #include <sys/sunddi.h>
82 82 #include <sys/modctl.h>
83 83 #include <sys/termios.h>
84 84 #include <sys/cmn_err.h>
85 85
86 86 static int logdmuxopen(queue_t *, dev_t *, int, int, cred_t *);
87 87 static int logdmuxclose(queue_t *, int, cred_t *);
88 88 static int logdmuxursrv(queue_t *);
89 89 static int logdmuxuwput(queue_t *, mblk_t *);
90 90 static int logdmuxlrput(queue_t *, mblk_t *);
91 91 static int logdmuxlrsrv(queue_t *);
92 92 static int logdmuxlwsrv(queue_t *);
93 93 static int logdmuxuwsrv(queue_t *);
94 94 static int logdmux_alloc_unlinkinfo(struct tmx *, struct tmx *);
95 95
96 96 static void logdmuxlink(queue_t *, mblk_t *);
97 97 static void logdmuxunlink(queue_t *, mblk_t *);
98 98 static void logdmux_finish_unlink(queue_t *, mblk_t *);
99 99 static void logdmux_unlink_timer(void *arg);
100 100 static void recover(queue_t *, mblk_t *, size_t);
101 101 static void flushq_dataonly(queue_t *);
102 102
103 103 static kmutex_t logdmux_qexch_lock;
104 104 static kmutex_t logdmux_peerq_lock;
105 105 static kmutex_t logdmux_minor_lock;
106 106 static minor_t logdmux_maxminor = 256; /* grown as necessary */
107 107 static vmem_t *logdmux_minor_arena;
108 108 static void *logdmux_statep;
109 109
110 110 static struct module_info logdmuxm_info = {
111 111 LOGDMX_ID,
112 112 "logindmux",
113 113 0,
114 114 256,
115 115 512,
116 116 256
117 117 };
118 118
119 119 static struct qinit logdmuxurinit = {
120 120 NULL,
121 121 logdmuxursrv,
122 122 logdmuxopen,
123 123 logdmuxclose,
124 124 NULL,
125 125 &logdmuxm_info
126 126 };
127 127
128 128 static struct qinit logdmuxuwinit = {
129 129 logdmuxuwput,
130 130 logdmuxuwsrv,
131 131 NULL,
132 132 NULL,
133 133 NULL,
134 134 &logdmuxm_info
135 135 };
136 136
137 137 static struct qinit logdmuxlrinit = {
138 138 logdmuxlrput,
139 139 logdmuxlrsrv,
140 140 NULL,
141 141 NULL,
142 142 NULL,
143 143 &logdmuxm_info
144 144 };
145 145
146 146 static struct qinit logdmuxlwinit = {
147 147 NULL,
148 148 logdmuxlwsrv,
149 149 NULL,
150 150 NULL,
151 151 NULL,
152 152 &logdmuxm_info
153 153 };
154 154
155 155 struct streamtab logdmuxinfo = {
156 156 &logdmuxurinit,
157 157 &logdmuxuwinit,
158 158 &logdmuxlrinit,
159 159 &logdmuxlwinit
160 160 };
161 161
162 162 static int logdmux_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
163 163 static int logdmux_attach(dev_info_t *, ddi_attach_cmd_t);
164 164 static int logdmux_detach(dev_info_t *, ddi_detach_cmd_t);
165 165 static dev_info_t *logdmux_dip;
166 166
167 167 DDI_DEFINE_STREAM_OPS(logdmux_ops, nulldev, nulldev, logdmux_attach,
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
168 168 logdmux_detach, nulldev, logdmux_info, D_MP | D_MTPERQ, &logdmuxinfo,
169 169 ddi_quiesce_not_needed);
170 170
171 171 static struct modldrv modldrv = {
172 172 &mod_driverops,
173 173 "logindmux driver",
174 174 &logdmux_ops
175 175 };
176 176
177 177 static struct modlinkage modlinkage = {
178 - MODREV_1, &modldrv, NULL
178 + MODREV_1, { &modldrv, NULL }
179 179 };
180 180
181 181 int
182 182 _init(void)
183 183 {
184 184 int ret;
185 185
186 186 mutex_init(&logdmux_peerq_lock, NULL, MUTEX_DRIVER, NULL);
187 187 mutex_init(&logdmux_qexch_lock, NULL, MUTEX_DRIVER, NULL);
188 188
189 189 if ((ret = mod_install(&modlinkage)) != 0) {
190 190 mutex_destroy(&logdmux_peerq_lock);
191 191 mutex_destroy(&logdmux_qexch_lock);
192 192 return (ret);
193 193 }
194 194
195 195 logdmux_minor_arena = vmem_create("logdmux_minor", (void *)1,
196 196 logdmux_maxminor, 1, NULL, NULL, NULL, 0,
197 197 VM_SLEEP | VMC_IDENTIFIER);
198 198 (void) ddi_soft_state_init(&logdmux_statep, sizeof (struct tmx), 1);
199 199
200 200 return (0);
201 201 }
202 202
203 203 int
204 204 _fini(void)
205 205 {
206 206 int ret;
207 207
208 208 if ((ret = mod_remove(&modlinkage)) == 0) {
209 209 mutex_destroy(&logdmux_peerq_lock);
210 210 mutex_destroy(&logdmux_qexch_lock);
211 211 ddi_soft_state_fini(&logdmux_statep);
212 212 vmem_destroy(logdmux_minor_arena);
213 213 logdmux_minor_arena = NULL;
214 214 }
215 215
216 216 return (ret);
217 217 }
218 218
219 219 int
220 220 _info(struct modinfo *modinfop)
221 221 {
222 222 return (mod_info(&modlinkage, modinfop));
223 223 }
224 224
225 225 static int
226 226 logdmux_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
227 227 {
228 228 if (cmd != DDI_ATTACH)
229 229 return (DDI_FAILURE);
230 230
231 231 if (ddi_create_minor_node(devi, "logindmux", S_IFCHR, 0, DDI_PSEUDO,
232 232 CLONE_DEV) == DDI_FAILURE)
233 233 return (DDI_FAILURE);
234 234
235 235 logdmux_dip = devi;
236 236 return (DDI_SUCCESS);
237 237 }
238 238
239 239 static int
240 240 logdmux_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
241 241 {
242 242 if (cmd != DDI_DETACH)
243 243 return (DDI_FAILURE);
244 244
245 245 ddi_remove_minor_node(devi, NULL);
246 246 return (DDI_SUCCESS);
247 247 }
248 248
249 249 /* ARGSUSED */
250 250 static int
251 251 logdmux_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
252 252 {
253 253 int error;
254 254
255 255 switch (infocmd) {
256 256 case DDI_INFO_DEVT2DEVINFO:
257 257 if (logdmux_dip == NULL) {
258 258 error = DDI_FAILURE;
259 259 } else {
260 260 *result = logdmux_dip;
261 261 error = DDI_SUCCESS;
262 262 }
263 263 break;
264 264 case DDI_INFO_DEVT2INSTANCE:
265 265 *result = (void *)0;
266 266 error = DDI_SUCCESS;
267 267 break;
268 268 default:
269 269 error = DDI_FAILURE;
270 270 }
271 271 return (error);
272 272 }
273 273
274 274 /*
275 275 * Logindmux open routine
276 276 */
277 277 /*ARGSUSED*/
278 278 static int
279 279 logdmuxopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *crp)
280 280 {
281 281 struct tmx *tmxp;
282 282 minor_t minor, omaxminor;
283 283
284 284 if (sflag != CLONEOPEN)
285 285 return (EINVAL);
286 286
287 287 mutex_enter(&logdmux_minor_lock);
288 288 if (vmem_size(logdmux_minor_arena, VMEM_FREE) == 0) {
289 289 /*
290 290 * The arena has been exhausted; grow by powers of two
291 291 * up to MAXMIN; bail if we've run out of minors.
292 292 */
293 293 if (logdmux_maxminor == MAXMIN) {
294 294 mutex_exit(&logdmux_minor_lock);
295 295 return (ENOMEM);
296 296 }
297 297
298 298 omaxminor = logdmux_maxminor;
299 299 logdmux_maxminor = MIN(logdmux_maxminor << 1, MAXMIN);
300 300
301 301 (void) vmem_add(logdmux_minor_arena,
302 302 (void *)(uintptr_t)(omaxminor + 1),
303 303 logdmux_maxminor - omaxminor, VM_SLEEP);
304 304 }
305 305 minor = (minor_t)(uintptr_t)
306 306 vmem_alloc(logdmux_minor_arena, 1, VM_SLEEP);
307 307 mutex_exit(&logdmux_minor_lock);
308 308
309 309 if (ddi_soft_state_zalloc(logdmux_statep, minor) == DDI_FAILURE) {
310 310 vmem_free(logdmux_minor_arena, (void *)(uintptr_t)minor, 1);
311 311 return (ENOMEM);
312 312 }
313 313
314 314 tmxp = ddi_get_soft_state(logdmux_statep, minor);
315 315 tmxp->rdq = q;
316 316 tmxp->muxq = NULL;
317 317 tmxp->peerq = NULL;
318 318 tmxp->unlinkinfop = NULL;
319 319 tmxp->dev0 = minor;
320 320
321 321 *devp = makedevice(getmajor(*devp), tmxp->dev0);
322 322 q->q_ptr = tmxp;
323 323 WR(q)->q_ptr = tmxp;
324 324
325 325 qprocson(q);
326 326 return (0);
327 327 }
328 328
329 329 /*
330 330 * Logindmux close routine gets called when telnet connection is closed
331 331 */
332 332 /*ARGSUSED*/
333 333 static int
334 334 logdmuxclose(queue_t *q, int flag, cred_t *crp)
335 335 {
336 336 struct tmx *tmxp = q->q_ptr;
337 337 minor_t minor = tmxp->dev0;
338 338
339 339 ASSERT(tmxp->muxq == NULL);
340 340 ASSERT(tmxp->peerq == NULL);
341 341
342 342 qprocsoff(q);
343 343 if (tmxp->wbufcid != 0) {
344 344 qunbufcall(q, tmxp->wbufcid);
345 345 tmxp->wbufcid = 0;
346 346 }
347 347 if (tmxp->rbufcid != 0) {
348 348 qunbufcall(q, tmxp->rbufcid);
349 349 tmxp->rbufcid = 0;
350 350 }
351 351 if (tmxp->rtimoutid != 0) {
352 352 (void) quntimeout(q, tmxp->rtimoutid);
353 353 tmxp->rtimoutid = 0;
354 354 }
355 355 if (tmxp->wtimoutid != 0) {
356 356 (void) quntimeout(q, tmxp->wtimoutid);
357 357 tmxp->wtimoutid = 0;
358 358 }
359 359 if (tmxp->utimoutid != 0) {
360 360 (void) quntimeout(q, tmxp->utimoutid);
361 361 tmxp->utimoutid = 0;
362 362 }
363 363
364 364 /*
365 365 * Hold logdmux_qexch_lock to prevent another thread that might be
366 366 * in LOGDMX_IOC_QEXCHANGE from looking up our state while we're
367 367 * disposing of it.
368 368 */
369 369 mutex_enter(&logdmux_qexch_lock);
370 370 ddi_soft_state_free(logdmux_statep, minor);
371 371 vmem_free(logdmux_minor_arena, (void *)(uintptr_t)minor, 1);
372 372 mutex_exit(&logdmux_qexch_lock);
373 373
374 374 q->q_ptr = NULL;
375 375 WR(q)->q_ptr = NULL;
376 376
377 377 return (0);
378 378 }
379 379
380 380 /*
381 381 * Upper read service routine
382 382 */
383 383 static int
384 384 logdmuxursrv(queue_t *q)
385 385 {
386 386 struct tmx *tmxp = q->q_ptr;
387 387
388 388 if (tmxp->muxq != NULL)
389 389 qenable(RD(tmxp->muxq));
390 390 return (0);
391 391 }
392 392
393 393 /*
394 394 * This routine gets called when telnet daemon sends data or ioctl messages
395 395 * to upper mux queue.
396 396 */
397 397 static int
398 398 logdmuxuwput(queue_t *q, mblk_t *mp)
399 399 {
400 400 queue_t *qp;
401 401 mblk_t *newmp;
402 402 struct iocblk *ioc;
403 403 minor_t minor;
404 404 STRUCT_HANDLE(protocol_arg, protoh);
405 405 struct tmx *tmxp, *tmxpeerp;
406 406 int error;
407 407
408 408 tmxp = q->q_ptr;
409 409
410 410 switch (mp->b_datap->db_type) {
411 411
412 412 case M_IOCTL:
413 413 ASSERT(MBLKL(mp) == sizeof (struct iocblk));
414 414
415 415 ioc = (struct iocblk *)mp->b_rptr;
416 416 switch (ioc->ioc_cmd) {
417 417 /*
418 418 * This is a special ioctl which exchanges q info
419 419 * of the two peers, connected to netf and ptmx.
420 420 */
421 421 case LOGDMX_IOC_QEXCHANGE:
422 422 error = miocpullup(mp,
423 423 SIZEOF_STRUCT(protocol_arg, ioc->ioc_flag));
424 424 if (error != 0) {
425 425 miocnak(q, mp, 0, error);
426 426 break;
427 427 }
428 428 STRUCT_SET_HANDLE(protoh, ioc->ioc_flag,
429 429 (struct protocol_arg *)mp->b_cont->b_rptr);
430 430 #ifdef _SYSCALL32_IMPL
431 431 if ((ioc->ioc_flag & DATAMODEL_MASK) ==
432 432 DATAMODEL_ILP32) {
433 433 minor = getminor(expldev(
434 434 STRUCT_FGET(protoh, dev)));
435 435 } else
436 436 #endif
437 437 {
438 438 minor = getminor(STRUCT_FGET(protoh, dev));
439 439 }
440 440
441 441 /*
442 442 * The second argument to ddi_get_soft_state() is
443 443 * interpreted as an `int', so prohibit negative
444 444 * values.
445 445 */
446 446 if ((int)minor < 0) {
447 447 miocnak(q, mp, 0, EINVAL);
448 448 break;
449 449 }
450 450
451 451 /*
452 452 * We must hold logdmux_qexch_lock while looking up
453 453 * the proposed peer to prevent another thread from
454 454 * simultaneously I_UNLINKing or closing it.
455 455 */
456 456 mutex_enter(&logdmux_qexch_lock);
457 457
458 458 /*
459 459 * For LOGDMX_IOC_QEXCHANGE to succeed, our peer must
460 460 * exist (and not be us), and both we and our peer
461 461 * must be I_LINKed (i.e., muxq must not be NULL) and
462 462 * not already have a peer.
463 463 */
464 464 tmxpeerp = ddi_get_soft_state(logdmux_statep, minor);
465 465 if (tmxpeerp == NULL || tmxpeerp == tmxp ||
466 466 tmxpeerp->muxq == NULL || tmxpeerp->peerq != NULL ||
467 467 tmxp->muxq == NULL || tmxp->peerq != NULL) {
468 468 mutex_exit(&logdmux_qexch_lock);
469 469 miocnak(q, mp, 0, EINVAL);
470 470 break;
471 471 }
472 472
473 473 /*
474 474 * If `flag' is set then exchange queues and assume
475 475 * tmxp refers to the ptmx stream.
476 476 */
477 477 if (STRUCT_FGET(protoh, flag)) {
478 478 /*
479 479 * Allocate and populate the structure we
480 480 * need when processing an I_UNLINK ioctl.
481 481 * Give both logindmux instances a pointer
482 482 * to it from their tmx structure.
483 483 */
484 484 if ((error = logdmux_alloc_unlinkinfo(
485 485 tmxp, tmxpeerp)) != 0) {
486 486 mutex_exit(&logdmux_qexch_lock);
487 487 miocnak(q, mp, 0, error);
488 488 break;
489 489 }
490 490 tmxp->peerq = tmxpeerp->muxq;
491 491 tmxpeerp->peerq = tmxp->muxq;
492 492 tmxp->isptm = B_TRUE;
493 493 }
494 494 mutex_exit(&logdmux_qexch_lock);
495 495 miocack(q, mp, 0, 0);
496 496 break;
497 497
498 498 case I_LINK:
499 499 ASSERT(MBLKL(mp->b_cont) == sizeof (struct linkblk));
500 500 logdmuxlink(q, mp);
501 501 break;
502 502
503 503 case I_UNLINK:
504 504 ASSERT(MBLKL(mp->b_cont) == sizeof (struct linkblk));
505 505 logdmuxunlink(q, mp);
506 506 break;
507 507
508 508 default:
509 509 if (tmxp->muxq == NULL) {
510 510 miocnak(q, mp, 0, EINVAL);
511 511 return (0);
512 512 }
513 513 putnext(tmxp->muxq, mp);
514 514 break;
515 515 }
516 516
517 517 break;
518 518
519 519 case M_DATA:
520 520 if (!tmxp->isptm) {
521 521 if ((newmp = allocb(sizeof (char), BPRI_MED)) == NULL) {
522 522 recover(q, mp, sizeof (char));
523 523 return (0);
524 524 }
525 525 newmp->b_datap->db_type = M_CTL;
526 526 *newmp->b_wptr++ = M_CTL_MAGIC_NUMBER;
527 527 newmp->b_cont = mp;
528 528 mp = newmp;
529 529 }
530 530 /* FALLTHRU */
531 531
532 532 case M_PROTO:
533 533 case M_PCPROTO:
534 534 qp = tmxp->muxq;
535 535 if (qp == NULL) {
536 536 merror(q, mp, EINVAL);
537 537 return (0);
538 538 }
539 539
540 540 if (queclass(mp) < QPCTL) {
541 541 if (q->q_first != NULL || !canputnext(qp)) {
542 542 (void) putq(q, mp);
543 543 return (0);
544 544 }
545 545 }
546 546 putnext(qp, mp);
547 547 break;
548 548
549 549 case M_FLUSH:
550 550 if (*mp->b_rptr & FLUSHW)
551 551 flushq(q, FLUSHALL);
552 552
553 553 if (tmxp->muxq != NULL) {
554 554 putnext(tmxp->muxq, mp);
555 555 return (0);
556 556 }
557 557
558 558 *mp->b_rptr &= ~FLUSHW;
559 559 if (*mp->b_rptr & FLUSHR)
560 560 qreply(q, mp);
561 561 else
562 562 freemsg(mp);
563 563 break;
564 564
565 565 default:
566 566 cmn_err(CE_NOTE, "logdmuxuwput: received unexpected message"
567 567 " of type 0x%x", mp->b_datap->db_type);
568 568 freemsg(mp);
569 569 }
570 570 return (0);
571 571 }
572 572
573 573 /*
574 574 * Upper write service routine
575 575 */
576 576 static int
577 577 logdmuxuwsrv(queue_t *q)
578 578 {
579 579 mblk_t *mp, *newmp;
580 580 queue_t *qp;
581 581 struct tmx *tmxp = q->q_ptr;
582 582
583 583 while ((mp = getq(q)) != NULL) {
584 584 switch (mp->b_datap->db_type) {
585 585 case M_DATA:
586 586 if (!tmxp->isptm) {
587 587 if ((newmp = allocb(sizeof (char), BPRI_MED)) ==
588 588 NULL) {
589 589 recover(q, mp, sizeof (char));
590 590 return (0);
591 591 }
592 592 newmp->b_datap->db_type = M_CTL;
593 593 *newmp->b_wptr++ = M_CTL_MAGIC_NUMBER;
594 594 newmp->b_cont = mp;
595 595 mp = newmp;
596 596 }
597 597 /* FALLTHRU */
598 598
599 599 case M_CTL:
600 600 case M_PROTO:
601 601 if (tmxp->muxq == NULL) {
602 602 merror(q, mp, EIO);
603 603 break;
604 604 }
605 605 qp = tmxp->muxq;
606 606 if (!canputnext(qp)) {
607 607 (void) putbq(q, mp);
608 608 return (0);
609 609 }
610 610 putnext(qp, mp);
611 611 break;
612 612
613 613
614 614 default:
615 615 cmn_err(CE_NOTE, "logdmuxuwsrv: received unexpected"
616 616 " message of type 0x%x", mp->b_datap->db_type);
617 617 freemsg(mp);
618 618 }
619 619 }
620 620 return (0);
621 621 }
622 622
623 623 /*
624 624 * Logindmux lower put routine detects from which of the two lower queues
625 625 * the data needs to be read from and writes it out to its peer queue.
626 626 * For protocol, it detects M_CTL and sends its data to the daemon. Also,
627 627 * for ioctl and other types of messages, it lets the daemon handle it.
628 628 */
629 629 static int
630 630 logdmuxlrput(queue_t *q, mblk_t *mp)
631 631 {
632 632 mblk_t *savemp;
633 633 queue_t *qp;
634 634 struct iocblk *ioc;
635 635 struct tmx *tmxp = q->q_ptr;
636 636 uchar_t flush;
637 637 uint_t *messagep;
638 638 unlinkinfo_t *unlinkinfop = tmxp->unlinkinfop;
639 639
640 640 if (tmxp->muxq == NULL || tmxp->peerq == NULL) {
641 641 freemsg(mp);
642 642 return (0);
643 643 }
644 644
645 645 /*
646 646 * If there's already a message on our queue and the incoming
647 647 * message is not of a high-priority, enqueue the message --
648 648 * but not if it's a logindmux protocol message.
649 649 */
650 650 if ((q->q_first != NULL) && (queclass(mp) < QPCTL) &&
651 651 (!LOGDMUX_PROTO_MBLK(mp))) {
652 652 (void) putq(q, mp);
653 653 return (0);
654 654 }
655 655
656 656 switch (mp->b_datap->db_type) {
657 657
658 658 case M_IOCTL:
659 659 ioc = (struct iocblk *)mp->b_rptr;
660 660 switch (ioc->ioc_cmd) {
661 661
662 662 case TIOCSWINSZ:
663 663 case TCSETAF:
664 664 case TCSETSF:
665 665 case TCSETA:
666 666 case TCSETAW:
667 667 case TCSETS:
668 668 case TCSETSW:
669 669 case TCSBRK:
670 670 case TIOCSTI:
671 671 qp = tmxp->peerq;
672 672 break;
673 673
674 674 default:
675 675 cmn_err(CE_NOTE, "logdmuxlrput: received unexpected"
676 676 " request for ioctl 0x%x", ioc->ioc_cmd);
677 677
678 678 /* NAK unrecognized ioctl's. */
679 679 miocnak(q, mp, 0, 0);
680 680 return (0);
681 681 }
682 682 break;
683 683
684 684 case M_DATA:
685 685 case M_HANGUP:
686 686 qp = tmxp->peerq;
687 687 break;
688 688
689 689 case M_CTL:
690 690 /*
691 691 * The protocol messages that flow between the peers
692 692 * to implement the unlink functionality are M_CTLs
693 693 * which have the M_IOCTL/I_UNLINK mblk of the ioctl
694 694 * attached via b_cont. LOGDMUX_PROTO_MBLK() uses
695 695 * this to determine whether a particular M_CTL is a
696 696 * peer protocol message.
697 697 */
698 698 if (LOGDMUX_PROTO_MBLK(mp)) {
699 699 messagep = (uint_t *)mp->b_rptr;
700 700
701 701 switch (*messagep) {
702 702
703 703 case LOGDMUX_UNLINK_REQ:
704 704 /*
705 705 * We've received a message from our
706 706 * peer indicating that it wants to
707 707 * unlink.
708 708 */
709 709 *messagep = LOGDMUX_UNLINK_RESP;
710 710 qp = tmxp->peerq;
711 711
712 712 mutex_enter(&logdmux_peerq_lock);
713 713 tmxp->peerq = NULL;
714 714 mutex_exit(&logdmux_peerq_lock);
715 715
716 716 put(RD(qp), mp);
717 717 return (0);
718 718
719 719 case LOGDMUX_UNLINK_RESP:
720 720 /*
721 721 * We've received a positive response
722 722 * from our peer to an earlier
723 723 * LOGDMUX_UNLINK_REQ that we sent.
724 724 * We can now carry on with the unlink.
725 725 */
726 726 qp = tmxp->rdq;
727 727 mutex_enter(&unlinkinfop->state_lock);
728 728 ASSERT(unlinkinfop->state ==
729 729 LOGDMUX_UNLINK_PENDING);
730 730 unlinkinfop->state = LOGDMUX_UNLINKED;
731 731 mutex_exit(&unlinkinfop->state_lock);
732 732 logdmux_finish_unlink(WR(qp), mp->b_cont);
733 733 return (0);
734 734 }
735 735 }
736 736
737 737 qp = tmxp->rdq;
738 738 if (q->q_first != NULL || !canputnext(qp)) {
739 739 (void) putq(q, mp);
740 740 return (0);
741 741 }
742 742 if ((MBLKL(mp) == 1) && (*mp->b_rptr == M_CTL_MAGIC_NUMBER)) {
743 743 savemp = mp->b_cont;
744 744 freeb(mp);
745 745 mp = savemp;
746 746 }
747 747 putnext(qp, mp);
748 748 return (0);
749 749
750 750 case M_IOCACK:
751 751 case M_IOCNAK:
752 752 case M_PROTO:
753 753 case M_PCPROTO:
754 754 case M_PCSIG:
755 755 case M_SETOPTS:
756 756 qp = tmxp->rdq;
757 757 break;
758 758
759 759 case M_ERROR:
760 760 if (tmxp->isptm) {
761 761 /*
762 762 * This error is from ptm. We could tell TCP to
763 763 * shutdown the connection, but it's easier to just
764 764 * wait for the daemon to get SIGCHLD and close from
765 765 * above.
766 766 */
767 767 freemsg(mp);
768 768 return (0);
769 769 }
770 770 /*
771 771 * This is from TCP. Don't really know why we'd
772 772 * get this, but we have a pretty good idea what
773 773 * to do: Send M_HANGUP to the pty.
774 774 */
775 775 mp->b_datap->db_type = M_HANGUP;
776 776 mp->b_wptr = mp->b_rptr;
777 777 qp = tmxp->peerq;
778 778 break;
779 779
780 780 case M_FLUSH:
781 781 if (*mp->b_rptr & FLUSHR)
782 782 flushq_dataonly(q);
783 783
784 784 if (mp->b_flag & MSGMARK) {
785 785 /*
786 786 * This M_FLUSH has been marked by the module
787 787 * below as intended for the upper queue,
788 788 * not the peer queue.
789 789 */
790 790 qp = tmxp->rdq;
791 791 mp->b_flag &= ~MSGMARK;
792 792 } else {
793 793 /*
794 794 * Wrap this M_FLUSH through the mux.
795 795 * The FLUSHR and FLUSHW bits must be
796 796 * reversed.
797 797 */
798 798 qp = tmxp->peerq;
799 799 flush = *mp->b_rptr;
800 800 *mp->b_rptr &= ~(FLUSHR | FLUSHW);
801 801 if (flush & FLUSHW)
802 802 *mp->b_rptr |= FLUSHR;
803 803 if (flush & FLUSHR)
804 804 *mp->b_rptr |= FLUSHW;
805 805 }
806 806 break;
807 807
808 808 case M_START:
809 809 case M_STOP:
810 810 case M_STARTI:
811 811 case M_STOPI:
812 812 freemsg(mp);
813 813 return (0);
814 814
815 815 default:
816 816 cmn_err(CE_NOTE, "logdmuxlrput: received unexpected "
817 817 "message of type 0x%x", mp->b_datap->db_type);
818 818 freemsg(mp);
819 819 return (0);
820 820 }
821 821 if (queclass(mp) < QPCTL) {
822 822 if (q->q_first != NULL || !canputnext(qp)) {
823 823 (void) putq(q, mp);
824 824 return (0);
825 825 }
826 826 }
827 827 putnext(qp, mp);
828 828 return (0);
829 829 }
830 830
831 831 /*
832 832 * Lower read service routine
833 833 */
834 834 static int
835 835 logdmuxlrsrv(queue_t *q)
836 836 {
837 837 mblk_t *mp, *savemp;
838 838 queue_t *qp;
839 839 struct iocblk *ioc;
840 840 struct tmx *tmxp = q->q_ptr;
841 841
842 842 while ((mp = getq(q)) != NULL) {
843 843 if (tmxp->muxq == NULL || tmxp->peerq == NULL) {
844 844 freemsg(mp);
845 845 continue;
846 846 }
847 847
848 848 switch (mp->b_datap->db_type) {
849 849
850 850 case M_IOCTL:
851 851 ioc = (struct iocblk *)mp->b_rptr;
852 852
853 853 switch (ioc->ioc_cmd) {
854 854
855 855 case TIOCSWINSZ:
856 856 case TCSETAF:
857 857 case TCSETSF:
858 858 case TCSETA:
859 859 case TCSETAW:
860 860 case TCSETS:
861 861 case TCSETSW:
862 862 case TCSBRK:
863 863 case TIOCSTI:
864 864 qp = tmxp->peerq;
865 865 break;
866 866
867 867 default:
868 868 cmn_err(CE_NOTE, "logdmuxlrsrv: received "
869 869 "unexpected request for ioctl 0x%x",
870 870 ioc->ioc_cmd);
871 871
872 872 /* NAK unrecognized ioctl's. */
873 873 miocnak(q, mp, 0, 0);
874 874 continue;
875 875 }
876 876 break;
877 877
878 878 case M_DATA:
879 879 case M_HANGUP:
880 880 qp = tmxp->peerq;
881 881 break;
882 882
883 883 case M_CTL:
884 884 qp = tmxp->rdq;
885 885 if (!canputnext(qp)) {
886 886 (void) putbq(q, mp);
887 887 return (0);
888 888 }
889 889 if (MBLKL(mp) == 1 &&
890 890 (*mp->b_rptr == M_CTL_MAGIC_NUMBER)) {
891 891 savemp = mp->b_cont;
892 892 freeb(mp);
893 893 mp = savemp;
894 894 }
895 895 putnext(qp, mp);
896 896 continue;
897 897
898 898 case M_PROTO:
899 899 case M_SETOPTS:
900 900 qp = tmxp->rdq;
901 901 break;
902 902
903 903 default:
904 904 cmn_err(CE_NOTE, "logdmuxlrsrv: received unexpected "
905 905 "message of type 0x%x", mp->b_datap->db_type);
906 906 freemsg(mp);
907 907 continue;
908 908 }
909 909 ASSERT(queclass(mp) < QPCTL);
910 910 if (!canputnext(qp)) {
911 911 (void) putbq(q, mp);
912 912 return (0);
913 913 }
914 914 putnext(qp, mp);
915 915 }
916 916 return (0);
917 917 }
918 918
919 919 /*
920 920 * Lower side write service procedure. No messages are ever placed on
921 921 * the write queue here, this just back-enables all of the upper side
922 922 * write service procedures.
923 923 */
924 924 static int
925 925 logdmuxlwsrv(queue_t *q)
926 926 {
927 927 struct tmx *tmxp = q->q_ptr;
928 928
929 929 /*
930 930 * Qenable upper write queue and find out which lower
931 931 * queue needs to be restarted with flow control.
932 932 * Qenable the peer queue so canputnext will
933 933 * succeed on next call to logdmuxlrput.
934 934 */
935 935 qenable(WR(tmxp->rdq));
936 936
937 937 mutex_enter(&logdmux_peerq_lock);
938 938 if (tmxp->peerq != NULL)
939 939 qenable(RD(tmxp->peerq));
940 940 mutex_exit(&logdmux_peerq_lock);
941 941
942 942 return (0);
943 943 }
944 944
945 945 /*
946 946 * This routine does I_LINK operation.
947 947 */
948 948 static void
949 949 logdmuxlink(queue_t *q, mblk_t *mp)
950 950 {
951 951 struct tmx *tmxp = q->q_ptr;
952 952 struct linkblk *lp = (struct linkblk *)mp->b_cont->b_rptr;
953 953
954 954 /*
955 955 * Fail if we're already linked.
956 956 */
957 957 if (tmxp->muxq != NULL) {
958 958 miocnak(q, mp, 0, EINVAL);
959 959 return;
960 960 }
961 961
962 962 tmxp->muxq = lp->l_qbot;
963 963 tmxp->muxq->q_ptr = tmxp;
964 964 RD(tmxp->muxq)->q_ptr = tmxp;
965 965
966 966 miocack(q, mp, 0, 0);
967 967 }
968 968
969 969 /*
970 970 * logdmuxunlink() is called from logdmuxuwput() and is the first of two
971 971 * functions which process an I_UNLINK ioctl. logdmuxunlink() will determine
972 972 * the state of logindmux peer linkage and, based on this, control when the
973 973 * second function, logdmux_finish_unlink(), is called. It's
974 974 * logdmux_finish_unlink() that's sending the M_IOCACK upstream and
975 975 * resetting the link state.
976 976 */
977 977 static void
978 978 logdmuxunlink(queue_t *q, mblk_t *mp)
979 979 {
980 980 struct tmx *tmxp = q->q_ptr;
981 981 unlinkinfo_t *unlinkinfop;
982 982
983 983 /*
984 984 * If we don't have a peer, just unlink. Note that this check needs
985 985 * to be done under logdmux_qexch_lock to prevent racing with
986 986 * LOGDMX_IOC_QEXCHANGE, and we *must* set muxq to NULL prior to
987 987 * releasing the lock so that LOGDMX_IOC_QEXCHANGE will not consider
988 988 * us as a possible peer anymore (if it already considers us to be a
989 989 * peer, then unlinkinfop will not be NULL) -- NULLing muxq precludes
990 990 * use of logdmux_finish_unlink() here.
991 991 */
992 992 mutex_enter(&logdmux_qexch_lock);
993 993 unlinkinfop = tmxp->unlinkinfop;
994 994 if (unlinkinfop == NULL) {
995 995 ASSERT(tmxp->peerq == NULL);
996 996 tmxp->muxq = NULL;
997 997 mutex_exit(&logdmux_qexch_lock);
998 998 miocack(q, mp, 0, 0);
999 999 return;
1000 1000 }
1001 1001 mutex_exit(&logdmux_qexch_lock);
1002 1002
1003 1003 mutex_enter(&unlinkinfop->state_lock);
1004 1004
1005 1005 switch (unlinkinfop->state) {
1006 1006
1007 1007 case LOGDMUX_LINKED:
1008 1008 /*
1009 1009 * We're the first instance to process an I_UNLINK --
1010 1010 * ie, the peer instance is still there. We'll change
1011 1011 * the state so that only one instance is executing an
1012 1012 * I_UNLINK at any one time.
1013 1013 */
1014 1014 unlinkinfop->state = LOGDMUX_UNLINK_PENDING;
1015 1015 mutex_exit(&unlinkinfop->state_lock);
1016 1016 /*
1017 1017 * Attach the original M_IOCTL message to a
1018 1018 * LOGDMUX_UNLINK_REQ message and send it to our peer to
1019 1019 * tell it to unlink from us. When it has completed the
1020 1020 * task, it will send us a LOGDMUX_UNLINK_RESP message
1021 1021 * with the original M_IOCTL still attached, which will be
1022 1022 * processed in our logdmuxlrput(). At that point, we will
1023 1023 * call logdmux_finish_unlink() to complete the unlink
1024 1024 * operation using the attached M_IOCTL.
1025 1025 */
1026 1026 unlinkinfop->prot_mp->b_cont = mp;
1027 1027 /*
1028 1028 * Put the M_CTL directly to the peer's lower RQ.
1029 1029 */
1030 1030 put(RD(tmxp->peerq), unlinkinfop->prot_mp);
1031 1031 break;
1032 1032
1033 1033 case LOGDMUX_UNLINK_PENDING:
1034 1034 mutex_exit(&unlinkinfop->state_lock);
1035 1035 /*
1036 1036 * Our peer is actively processing an I_UNLINK itself.
1037 1037 * We have to wait for the peer to complete and we use
1038 1038 * qtimeout as a way to poll for its completion.
1039 1039 * We save a reference to our mblk so that we can send
1040 1040 * it upstream once our peer is done.
1041 1041 */
1042 1042 tmxp->unlink_mp = mp;
1043 1043 tmxp->utimoutid = qtimeout(q, logdmux_unlink_timer, q,
1044 1044 drv_usectohz(LOGDMUX_POLL_WAIT));
1045 1045 break;
1046 1046
1047 1047 case LOGDMUX_UNLINKED:
1048 1048 /*
1049 1049 * Our peer is no longer linked so we can proceed.
1050 1050 */
1051 1051 mutex_exit(&unlinkinfop->state_lock);
1052 1052 mutex_destroy(&unlinkinfop->state_lock);
1053 1053 freeb(unlinkinfop->prot_mp);
1054 1054 kmem_free(unlinkinfop, sizeof (unlinkinfo_t));
1055 1055 logdmux_finish_unlink(q, mp);
1056 1056 break;
1057 1057
1058 1058 default:
1059 1059 mutex_exit(&unlinkinfop->state_lock);
1060 1060 cmn_err(CE_PANIC,
1061 1061 "logdmuxunlink: peer linkage is in an unrecognized state");
1062 1062 break;
1063 1063 }
1064 1064 }
1065 1065
1066 1066 /*
1067 1067 * Finish the unlink operation. Note that no locks should be held since
1068 1068 * this routine calls into other queues.
1069 1069 */
1070 1070 static void
1071 1071 logdmux_finish_unlink(queue_t *q, mblk_t *unlink_mp)
1072 1072 {
1073 1073 struct tmx *tmxp = q->q_ptr;
1074 1074 mblk_t *mp;
1075 1075
1076 1076 /*
1077 1077 * Flush any write side data downstream.
1078 1078 */
1079 1079 while ((mp = getq(WR(q))) != NULL)
1080 1080 putnext(tmxp->muxq, mp);
1081 1081
1082 1082 /*
1083 1083 * Note that we do not NULL out q_ptr since another thread (e.g., a
1084 1084 * STREAMS service thread) might call logdmuxlrput() between the time
1085 1085 * we exit the logindmux perimeter and the time the STREAMS framework
1086 1086 * resets q_ptr to stdata (since muxq is set to NULL, any messages
1087 1087 * will just be discarded).
1088 1088 */
1089 1089 tmxp->muxq = NULL;
1090 1090 tmxp->unlinkinfop = NULL;
1091 1091 tmxp->peerq = NULL;
1092 1092 miocack(q, unlink_mp, 0, 0);
1093 1093 }
1094 1094
1095 1095 /*
1096 1096 * logdmux_unlink_timer() is executed by qtimeout(). This function will
1097 1097 * check unlinkinfop->state to determine whether the peer has completed
1098 1098 * its I_UNLINK. If it hasn't, we use qtimeout() to initiate another poll.
1099 1099 */
1100 1100 static void
1101 1101 logdmux_unlink_timer(void *arg)
1102 1102 {
1103 1103 queue_t *q = arg;
1104 1104 struct tmx *tmxp = q->q_ptr;
1105 1105 unlinkinfo_t *unlinkinfop = tmxp->unlinkinfop;
1106 1106
1107 1107 tmxp->utimoutid = 0;
1108 1108
1109 1109 mutex_enter(&unlinkinfop->state_lock);
1110 1110
1111 1111 if (unlinkinfop->state != LOGDMUX_UNLINKED) {
1112 1112 ASSERT(unlinkinfop->state == LOGDMUX_UNLINK_PENDING);
1113 1113 mutex_exit(&unlinkinfop->state_lock);
1114 1114 /*
1115 1115 * We need to wait longer for our peer to complete.
1116 1116 */
1117 1117 tmxp->utimoutid = qtimeout(q, logdmux_unlink_timer, q,
1118 1118 drv_usectohz(LOGDMUX_POLL_WAIT));
1119 1119 } else {
1120 1120 /*
1121 1121 * Our peer is no longer linked so we can proceed with
1122 1122 * the cleanup.
1123 1123 */
1124 1124 mutex_exit(&unlinkinfop->state_lock);
1125 1125 mutex_destroy(&unlinkinfop->state_lock);
1126 1126 freeb(unlinkinfop->prot_mp);
1127 1127 kmem_free(unlinkinfop, sizeof (unlinkinfo_t));
1128 1128 logdmux_finish_unlink(q, tmxp->unlink_mp);
1129 1129 }
1130 1130 }
1131 1131
1132 1132 static void
1133 1133 logdmux_timer(void *arg)
1134 1134 {
1135 1135 queue_t *q = arg;
1136 1136 struct tmx *tmxp = q->q_ptr;
1137 1137
1138 1138 ASSERT(tmxp != NULL);
1139 1139
1140 1140 if (q->q_flag & QREADR) {
1141 1141 ASSERT(tmxp->rtimoutid != 0);
1142 1142 tmxp->rtimoutid = 0;
1143 1143 } else {
1144 1144 ASSERT(tmxp->wtimoutid != 0);
1145 1145 tmxp->wtimoutid = 0;
1146 1146 }
1147 1147 enableok(q);
1148 1148 qenable(q);
1149 1149 }
1150 1150
1151 1151 static void
1152 1152 logdmux_buffer(void *arg)
1153 1153 {
1154 1154 queue_t *q = arg;
1155 1155 struct tmx *tmxp = q->q_ptr;
1156 1156
1157 1157 ASSERT(tmxp != NULL);
1158 1158
1159 1159 if (q->q_flag & QREADR) {
1160 1160 ASSERT(tmxp->rbufcid != 0);
1161 1161 tmxp->rbufcid = 0;
1162 1162 } else {
1163 1163 ASSERT(tmxp->wbufcid != 0);
1164 1164 tmxp->wbufcid = 0;
1165 1165 }
1166 1166 enableok(q);
1167 1167 qenable(q);
1168 1168 }
1169 1169
1170 1170 static void
1171 1171 recover(queue_t *q, mblk_t *mp, size_t size)
1172 1172 {
1173 1173 timeout_id_t tid;
1174 1174 bufcall_id_t bid;
1175 1175 struct tmx *tmxp = q->q_ptr;
1176 1176
1177 1177 /*
1178 1178 * Avoid re-enabling the queue.
1179 1179 */
1180 1180 ASSERT(queclass(mp) < QPCTL);
1181 1181 ASSERT(WR(q)->q_next == NULL); /* Called from upper queue only */
1182 1182 noenable(q);
1183 1183 (void) putbq(q, mp);
1184 1184
1185 1185 /*
1186 1186 * Make sure there is at most one outstanding request per queue.
1187 1187 */
1188 1188 if (q->q_flag & QREADR) {
1189 1189 if (tmxp->rtimoutid != 0 || tmxp->rbufcid != 0)
1190 1190 return;
1191 1191 } else {
1192 1192 if (tmxp->wtimoutid != 0 || tmxp->wbufcid != 0)
1193 1193 return;
1194 1194 }
1195 1195 if (!(bid = qbufcall(RD(q), size, BPRI_MED, logdmux_buffer, q))) {
1196 1196 tid = qtimeout(RD(q), logdmux_timer, q, drv_usectohz(SIMWAIT));
1197 1197 if (q->q_flag & QREADR)
1198 1198 tmxp->rtimoutid = tid;
1199 1199 else
1200 1200 tmxp->wtimoutid = tid;
1201 1201 } else {
1202 1202 if (q->q_flag & QREADR)
1203 1203 tmxp->rbufcid = bid;
1204 1204 else
1205 1205 tmxp->wbufcid = bid;
1206 1206 }
1207 1207 }
1208 1208
1209 1209 static void
1210 1210 flushq_dataonly(queue_t *q)
1211 1211 {
1212 1212 mblk_t *mp, *nmp;
1213 1213
1214 1214 /*
1215 1215 * Since we are already in the perimeter, and we are not a put-shared
1216 1216 * perimeter, we don't need to freeze the stream or anything to
1217 1217 * be ensured of exclusivity.
1218 1218 */
1219 1219 mp = q->q_first;
1220 1220 while (mp != NULL) {
1221 1221 if (mp->b_datap->db_type == M_DATA) {
1222 1222 nmp = mp->b_next;
1223 1223 rmvq(q, mp);
1224 1224 freemsg(mp);
1225 1225 mp = nmp;
1226 1226 } else {
1227 1227 mp = mp->b_next;
1228 1228 }
1229 1229 }
1230 1230 }
1231 1231
1232 1232 /*
1233 1233 * logdmux_alloc_unlinkinfo() is called from logdmuxuwput() during the
1234 1234 * processing of a LOGDMX_IOC_QEXCHANGE ioctl() to allocate the
1235 1235 * unlinkinfo_t which is needed during the processing of an I_UNLINK.
1236 1236 */
1237 1237 static int
1238 1238 logdmux_alloc_unlinkinfo(struct tmx *t0, struct tmx *t1)
1239 1239 {
1240 1240 unlinkinfo_t *p;
1241 1241 uint_t *messagep;
1242 1242
1243 1243 if ((p = kmem_zalloc(sizeof (unlinkinfo_t), KM_NOSLEEP)) == NULL)
1244 1244 return (ENOSR);
1245 1245
1246 1246 if ((p->prot_mp = allocb(sizeof (uint_t), BPRI_MED)) == NULL) {
1247 1247 kmem_free(p, sizeof (unlinkinfo_t));
1248 1248 return (ENOSR);
1249 1249 }
1250 1250
1251 1251 DB_TYPE(p->prot_mp) = M_CTL;
1252 1252 messagep = (uint_t *)p->prot_mp->b_wptr;
1253 1253 *messagep = LOGDMUX_UNLINK_REQ;
1254 1254 p->prot_mp->b_wptr += sizeof (*messagep);
1255 1255 p->state = LOGDMUX_LINKED;
1256 1256 mutex_init(&p->state_lock, NULL, MUTEX_DRIVER, NULL);
1257 1257
1258 1258 t0->unlinkinfop = t1->unlinkinfop = p;
1259 1259
1260 1260 return (0);
1261 1261 }
↓ open down ↓ |
1073 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX