Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/timod.c
+++ new/usr/src/uts/common/io/timod.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28
29 29 /*
30 30 * Transport Interface Library cooperating module - issue 2
31 31 */
32 32
33 33 #include <sys/param.h>
34 34 #include <sys/types.h>
35 35 #include <sys/stream.h>
36 36 #include <sys/stropts.h>
37 37 #include <sys/strsubr.h>
38 38 #define _SUN_TPI_VERSION 2
39 39 #include <sys/tihdr.h>
40 40 #include <sys/timod.h>
41 41 #include <sys/suntpi.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/strlog.h>
44 44 #include <sys/errno.h>
45 45 #include <sys/cred.h>
46 46 #include <sys/cmn_err.h>
47 47 #include <sys/kmem.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/ddi.h>
50 50 #include <sys/sunddi.h>
51 51 #include <sys/strsun.h>
52 52 #include <c2/audit.h>
53 53
54 54 /*
55 55 * This is the loadable module wrapper.
56 56 */
57 57 #include <sys/conf.h>
58 58 #include <sys/modctl.h>
59 59
60 60 static struct streamtab timinfo;
61 61
62 62 static struct fmodsw fsw = {
63 63 "timod",
64 64 &timinfo,
65 65 D_MTQPAIR | D_MP,
66 66 };
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
67 67
68 68 /*
69 69 * Module linkage information for the kernel.
70 70 */
71 71
72 72 static struct modlstrmod modlstrmod = {
73 73 &mod_strmodops, "transport interface str mod", &fsw
74 74 };
75 75
76 76 static struct modlinkage modlinkage = {
77 - MODREV_1, &modlstrmod, NULL
77 + MODREV_1, { &modlstrmod, NULL }
78 78 };
79 79
80 80 static krwlock_t tim_list_rwlock;
81 81
82 82 /*
83 83 * This module keeps track of capabilities of underlying transport. Information
84 84 * is persistent through module invocations (open/close). Currently it remembers
85 85 * whether underlying transport supports TI_GET{MY,PEER}NAME ioctls and
86 86 * T_CAPABILITY_REQ message. This module either passes ioctl/messages to the
87 87 * transport or emulates it when transport doesn't understand these
88 88 * ioctl/messages.
89 89 *
90 90 * It is assumed that transport supports T_CAPABILITY_REQ when timod receives
91 91 * T_CAPABILITY_ACK from the transport. There is no current standard describing
92 92 * transport behaviour when it receives unknown message type, so following
93 93 * reactions are expected and handled:
94 94 *
95 95 * 1) Transport drops unknown T_CAPABILITY_REQ message type. In this case timod
96 96 * will wait for tcap_wait time and assume that transport doesn't provide
97 97 * this message type. T_CAPABILITY_REQ should never travel over the wire, so
98 98 * timeout value should only take into consideration internal processing time
99 99 * for the message. From user standpoint it may mean that an application will
100 100 * hang for TCAP_WAIT time in the kernel the first time this message is used
101 101 * with some particular transport (e.g. TCP/IP) during system uptime.
102 102 *
103 103 * 2) Transport responds with T_ERROR_ACK specifying T_CAPABILITY_REQ as
104 104 * original message type. In this case it is assumed that transport doesn't
105 105 * support it (which may not always be true - some transports return
106 106 * T_ERROR_ACK in other cases like lack of system memory).
107 107 *
108 108 * 3) Transport responds with M_ERROR, effectively shutting down the
109 109 * stream. Unfortunately there is no standard way to pass the reason of
110 110 * M_ERROR message back to the caller, so it is assumed that if M_ERROR was
111 111 * sent in response to T_CAPABILITY_REQ message, transport doesn't support
112 112 * it.
113 113 *
114 114 * It is possible under certain circumstances that timod will incorrectly assume
115 115 * that underlying transport doesn't provide T_CAPABILITY_REQ message type. In
116 116 * this "worst-case" scenario timod will emulate its functionality by itself and
117 117 * will provide only TC1_INFO capability. All other bits in CAP_bits1 field are
118 118 * cleaned. TC1_INFO is emulated by sending T_INFO_REQ down to transport
119 119 * provider.
120 120 */
121 121
122 122 /*
123 123 * Notes about locking:
124 124 *
125 125 * tim_list_rwlock protects the list of tim_tim structures itself. When this
126 126 * lock is held, the list itself is stable, but the contents of the entries
127 127 * themselves might not be.
128 128 *
129 129 * The rest of the members are generally protected by D_MTQPAIR, which
130 130 * specifies a default exclusive inner perimeter. If you're looking at
131 131 * q->q_ptr, then it's stable.
132 132 *
133 133 * There's one exception to this rule: tim_peer{maxlen,len,name}. These members
134 134 * are touched without entering the associated STREAMS perimeter because we
135 135 * get the pointer via tim_findlink() rather than q_ptr. These are protected
136 136 * by tim_mutex instead. If you don't hold that lock, don't look at them.
137 137 *
138 138 * (It would be possible to separate out the 'set by T_CONN_RES' cases from the
139 139 * others, but there appears to be no reason to do so.)
140 140 */
141 141 struct tim_tim {
142 142 uint32_t tim_flags;
143 143 t_uscalar_t tim_backlog;
144 144 mblk_t *tim_iocsave;
145 145 t_scalar_t tim_mymaxlen;
146 146 t_scalar_t tim_mylen;
147 147 caddr_t tim_myname;
148 148 t_scalar_t tim_peermaxlen;
149 149 t_scalar_t tim_peerlen;
150 150 caddr_t tim_peername;
151 151 cred_t *tim_peercred;
152 152 mblk_t *tim_consave;
153 153 bufcall_id_t tim_wbufcid;
154 154 bufcall_id_t tim_rbufcid;
155 155 timeout_id_t tim_wtimoutid;
156 156 timeout_id_t tim_rtimoutid;
157 157 /* Protected by the global tim_list_rwlock for all instances */
158 158 struct tim_tim *tim_next;
159 159 struct tim_tim **tim_ptpn;
160 160 t_uscalar_t tim_acceptor;
161 161 t_scalar_t tim_saved_prim; /* Primitive from message */
162 162 /* part of ioctl. */
163 163 timeout_id_t tim_tcap_timoutid; /* For T_CAP_REQ timeout */
164 164 tpi_provinfo_t *tim_provinfo; /* Transport description */
165 165 kmutex_t tim_mutex; /* protect tim_peer* */
166 166 pid_t tim_cpid;
167 167 };
168 168
169 169
170 170 /*
171 171 * Local flags used with tim_flags field in instance structure of
172 172 * type 'struct _ti_user' declared above.
173 173 * Historical note:
174 174 * This namespace constants were previously declared in a
175 175 * a very messed up namespace in timod.h
176 176 *
177 177 * There may be 3 states for transport:
178 178 *
179 179 * 1) It provides T_CAPABILITY_REQ
180 180 * 2) It does not provide T_CAPABILITY_REQ
181 181 * 3) It is not known yet whether transport provides T_CAPABILITY_REQ or not.
182 182 *
183 183 * It is assumed that the underlying transport either provides
184 184 * T_CAPABILITY_REQ or not and this does not changes during the
185 185 * system lifetime.
186 186 *
187 187 */
188 188 #define PEEK_RDQ_EXPIND 0x0001 /* look for expinds on stream rd queues */
189 189 #define WAITIOCACK 0x0002 /* waiting for info for ioctl act */
190 190 #define CLTS 0x0004 /* connectionless transport */
191 191 #define COTS 0x0008 /* connection-oriented transport */
192 192 #define CONNWAIT 0x0010 /* waiting for connect confirmation */
193 193 #define LOCORDREL 0x0020 /* local end has orderly released */
194 194 #define REMORDREL 0x0040 /* remote end had orderly released */
195 195 #define NAMEPROC 0x0080 /* processing a NAME ioctl */
196 196 #define DO_MYNAME 0x0100 /* timod handles TI_GETMYNAME */
197 197 #define DO_PEERNAME 0x0200 /* timod handles TI_GETPEERNAME */
198 198 #define TI_CAP_RECVD 0x0400 /* TI_CAPABILITY received */
199 199 #define CAP_WANTS_INFO 0x0800 /* TI_CAPABILITY has TC1_INFO set */
200 200 #define WAIT_IOCINFOACK 0x1000 /* T_INFO_REQ generated from ioctl */
201 201 #define WAIT_CONNRESACK 0x2000 /* waiting for T_OK_ACK to T_CONN_RES */
202 202
203 203
204 204 /* Debugging facilities */
205 205 /*
206 206 * Logging needed for debugging timod should only appear in DEBUG kernel.
207 207 */
208 208 #ifdef DEBUG
209 209 #define TILOG(msg, arg) tilog((msg), (arg))
210 210 #define TILOGP(msg, arg) tilogp((msg), (arg))
211 211 #else
212 212 #define TILOG(msg, arg)
213 213 #define TILOGP(msg, arg)
214 214 #endif
215 215
216 216
217 217 /*
218 218 * Sleep timeout for T_CAPABILITY_REQ. This message never travels across
219 219 * network, so timeout value should be enough to cover all internal processing
220 220 * time.
221 221 */
222 222 clock_t tim_tcap_wait = 2;
223 223
224 224 /* Sleep timeout in tim_recover() */
225 225 #define TIMWAIT (1*hz)
226 226 /* Sleep timeout in tim_ioctl_retry() 0.2 seconds */
227 227 #define TIMIOCWAIT (200*hz/1000)
228 228
229 229 /*
230 230 * Return values for ti_doname().
231 231 */
232 232 #define DONAME_FAIL 0 /* failing ioctl (done) */
233 233 #define DONAME_DONE 1 /* done processing */
234 234 #define DONAME_CONT 2 /* continue proceesing (not done yet) */
235 235
236 236 /*
237 237 * Function prototypes
238 238 */
239 239 static int ti_doname(queue_t *, mblk_t *);
240 240 static int ti_expind_on_rdqueues(queue_t *);
241 241 static void tim_ioctl_send_reply(queue_t *, mblk_t *, mblk_t *);
242 242 static void tim_send_ioc_error_ack(queue_t *, struct tim_tim *, mblk_t *);
243 243 static void tim_tcap_timer(void *);
244 244 static void tim_tcap_genreply(queue_t *, struct tim_tim *);
245 245 static void tim_send_reply(queue_t *, mblk_t *, struct tim_tim *, t_scalar_t);
246 246 static void tim_answer_ti_sync(queue_t *, mblk_t *, struct tim_tim *,
247 247 mblk_t *, uint32_t);
248 248 static void tim_send_ioctl_tpi_msg(queue_t *, mblk_t *, struct tim_tim *,
249 249 struct iocblk *);
250 250 static void tim_clear_peer(struct tim_tim *);
251 251
252 252 int
253 253 _init(void)
254 254 {
255 255 int error;
256 256
257 257 rw_init(&tim_list_rwlock, NULL, RW_DRIVER, NULL);
258 258 error = mod_install(&modlinkage);
259 259 if (error != 0) {
260 260 rw_destroy(&tim_list_rwlock);
261 261 return (error);
262 262 }
263 263
264 264 return (0);
265 265 }
266 266
267 267 int
268 268 _fini(void)
269 269 {
270 270 int error;
271 271
272 272 error = mod_remove(&modlinkage);
273 273 if (error != 0)
274 274 return (error);
275 275 rw_destroy(&tim_list_rwlock);
276 276 return (0);
277 277 }
278 278
279 279 int
280 280 _info(struct modinfo *modinfop)
281 281 {
282 282 return (mod_info(&modlinkage, modinfop));
283 283 }
284 284
285 285
286 286 /*
287 287 * Hash list for all instances. Used to find tim_tim structure based on
288 288 * ACCEPTOR_id in T_CONN_RES. Protected by tim_list_rwlock.
289 289 */
290 290 #define TIM_HASH_SIZE 256
291 291 #ifdef _ILP32
292 292 #define TIM_HASH(id) (((uintptr_t)(id) >> 8) % TIM_HASH_SIZE)
293 293 #else
294 294 #define TIM_HASH(id) ((uintptr_t)(id) % TIM_HASH_SIZE)
295 295 #endif /* _ILP32 */
296 296 static struct tim_tim *tim_hash[TIM_HASH_SIZE];
297 297 int tim_cnt = 0;
298 298
299 299 static void tilog(char *, t_scalar_t);
300 300 static void tilogp(char *, uintptr_t);
301 301 static mblk_t *tim_filladdr(queue_t *, mblk_t *, boolean_t);
302 302 static void tim_addlink(struct tim_tim *);
303 303 static void tim_dellink(struct tim_tim *);
304 304 static struct tim_tim *tim_findlink(t_uscalar_t);
305 305 static void tim_recover(queue_t *, mblk_t *, t_scalar_t);
306 306 static void tim_ioctl_retry(queue_t *);
307 307
308 308 int dotilog = 0;
309 309
310 310 #define TIMOD_ID 3
311 311
312 312 static int timodopen(queue_t *, dev_t *, int, int, cred_t *);
313 313 static int timodclose(queue_t *, int, cred_t *);
314 314 static void timodwput(queue_t *, mblk_t *);
315 315 static void timodrput(queue_t *, mblk_t *);
316 316 static void timodrsrv(queue_t *);
317 317 static void timodwsrv(queue_t *);
318 318 static int timodrproc(queue_t *, mblk_t *);
319 319 static int timodwproc(queue_t *, mblk_t *);
320 320
321 321 /* stream data structure definitions */
322 322
323 323 static struct module_info timod_info =
324 324 {TIMOD_ID, "timod", 0, INFPSZ, 512, 128};
325 325 static struct qinit timodrinit = {
326 326 (int (*)())timodrput,
327 327 (int (*)())timodrsrv,
328 328 timodopen,
329 329 timodclose,
330 330 nulldev,
331 331 &timod_info,
332 332 NULL
333 333 };
334 334 static struct qinit timodwinit = {
335 335 (int (*)())timodwput,
336 336 (int (*)())timodwsrv,
337 337 timodopen,
338 338 timodclose,
339 339 nulldev,
340 340 &timod_info,
341 341 NULL
342 342 };
343 343 static struct streamtab timinfo = { &timodrinit, &timodwinit, NULL, NULL };
344 344
345 345 /*
346 346 * timodopen - open routine gets called when the module gets pushed
347 347 * onto the stream.
348 348 */
349 349 /*ARGSUSED*/
350 350 static int
351 351 timodopen(
352 352 queue_t *q,
353 353 dev_t *devp,
354 354 int flag,
355 355 int sflag,
356 356 cred_t *crp)
357 357 {
358 358 struct tim_tim *tp;
359 359 struct stroptions *sop;
360 360 mblk_t *bp;
361 361
362 362 ASSERT(q != NULL);
363 363
364 364 if (q->q_ptr) {
365 365 return (0);
366 366 }
367 367
368 368 if ((bp = allocb(sizeof (struct stroptions), BPRI_MED)) == 0)
369 369 return (ENOMEM);
370 370
371 371 tp = kmem_zalloc(sizeof (struct tim_tim), KM_SLEEP);
372 372
373 373 tp->tim_cpid = -1;
374 374 tp->tim_saved_prim = -1;
375 375
376 376 mutex_init(&tp->tim_mutex, NULL, MUTEX_DEFAULT, NULL);
377 377
378 378 q->q_ptr = (caddr_t)tp;
379 379 WR(q)->q_ptr = (caddr_t)tp;
380 380
381 381 tilogp("timodopen: Allocated for tp %lx\n", (uintptr_t)tp);
382 382 tilogp("timodopen: Allocated for q %lx\n", (uintptr_t)q);
383 383
384 384 /* Must be done before tpi_findprov and _ILP32 q_next walk below */
385 385 qprocson(q);
386 386
387 387 tp->tim_provinfo = tpi_findprov(q);
388 388
389 389 /*
390 390 * Defer allocation of the buffers for the local address and
391 391 * the peer's address until we need them.
392 392 * Assume that timod has to handle getname until we here
393 393 * an iocack from the transport provider or we know that
394 394 * transport provider doesn't understand it.
395 395 */
396 396 if (tp->tim_provinfo->tpi_myname != PI_YES) {
397 397 TILOG("timodopen: setting DO_MYNAME\n", 0);
398 398 tp->tim_flags |= DO_MYNAME;
399 399 }
400 400
401 401 if (tp->tim_provinfo->tpi_peername != PI_YES) {
402 402 TILOG("timodopen: setting DO_PEERNAME\n", 0);
403 403 tp->tim_flags |= DO_PEERNAME;
404 404 }
405 405
406 406 #ifdef _ILP32
407 407 {
408 408 queue_t *driverq;
409 409
410 410 /*
411 411 * Find my driver's read queue (for T_CONN_RES handling)
412 412 */
413 413 driverq = WR(q);
414 414 while (SAMESTR(driverq))
415 415 driverq = driverq->q_next;
416 416
417 417 tp->tim_acceptor = (t_uscalar_t)RD(driverq);
418 418 }
419 419 #else
420 420 tp->tim_acceptor = (t_uscalar_t)getminor(*devp);
421 421 #endif /* _ILP32 */
422 422
423 423 /*
424 424 * Add this one to the list.
425 425 */
426 426 tim_addlink(tp);
427 427
428 428 /*
429 429 * Send M_SETOPTS to stream head to make sure M_PCPROTO messages
430 430 * are not flushed. This prevents application deadlocks.
431 431 */
432 432 bp->b_datap->db_type = M_SETOPTS;
433 433 bp->b_wptr += sizeof (struct stroptions);
434 434 sop = (struct stroptions *)bp->b_rptr;
435 435 sop->so_flags = SO_READOPT;
436 436 sop->so_readopt = RFLUSHPCPROT;
437 437
438 438 putnext(q, bp);
439 439
440 440 return (0);
441 441 }
442 442
443 443 static void
444 444 tim_timer(void *arg)
445 445 {
446 446 queue_t *q = arg;
447 447 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
448 448
449 449 ASSERT(tp);
450 450
451 451 if (q->q_flag & QREADR) {
452 452 ASSERT(tp->tim_rtimoutid);
453 453 tp->tim_rtimoutid = 0;
454 454 } else {
455 455 ASSERT(tp->tim_wtimoutid);
456 456 tp->tim_wtimoutid = 0;
457 457 }
458 458 enableok(q);
459 459 qenable(q);
460 460 }
461 461
462 462 static void
463 463 tim_buffer(void *arg)
464 464 {
465 465 queue_t *q = arg;
466 466 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
467 467
468 468 ASSERT(tp);
469 469
470 470 if (q->q_flag & QREADR) {
471 471 ASSERT(tp->tim_rbufcid);
472 472 tp->tim_rbufcid = 0;
473 473 } else {
474 474 ASSERT(tp->tim_wbufcid);
475 475 tp->tim_wbufcid = 0;
476 476 }
477 477 enableok(q);
478 478 qenable(q);
479 479 }
480 480
481 481 /*
482 482 * timodclose - This routine gets called when the module gets popped
483 483 * off of the stream.
484 484 */
485 485 /*ARGSUSED*/
486 486 static int
487 487 timodclose(
488 488 queue_t *q,
489 489 int flag,
490 490 cred_t *crp)
491 491 {
492 492 struct tim_tim *tp;
493 493 mblk_t *mp;
494 494 mblk_t *nmp;
495 495
496 496 ASSERT(q != NULL);
497 497
498 498 tp = (struct tim_tim *)q->q_ptr;
499 499 q->q_ptr = NULL;
500 500
501 501 ASSERT(tp != NULL);
502 502
503 503 tilogp("timodclose: Entered for tp %lx\n", (uintptr_t)tp);
504 504 tilogp("timodclose: Entered for q %lx\n", (uintptr_t)q);
505 505
506 506 qprocsoff(q);
507 507 tim_dellink(tp);
508 508
509 509 /*
510 510 * Cancel any outstanding bufcall
511 511 * or timeout requests.
512 512 */
513 513 if (tp->tim_wbufcid) {
514 514 qunbufcall(q, tp->tim_wbufcid);
515 515 tp->tim_wbufcid = 0;
516 516 }
517 517 if (tp->tim_rbufcid) {
518 518 qunbufcall(q, tp->tim_rbufcid);
519 519 tp->tim_rbufcid = 0;
520 520 }
521 521 if (tp->tim_wtimoutid) {
522 522 (void) quntimeout(q, tp->tim_wtimoutid);
523 523 tp->tim_wtimoutid = 0;
524 524 }
525 525 if (tp->tim_rtimoutid) {
526 526 (void) quntimeout(q, tp->tim_rtimoutid);
527 527 tp->tim_rtimoutid = 0;
528 528 }
529 529
530 530 if (tp->tim_tcap_timoutid != 0) {
531 531 (void) quntimeout(q, tp->tim_tcap_timoutid);
532 532 tp->tim_tcap_timoutid = 0;
533 533 }
534 534
535 535 if (tp->tim_iocsave != NULL)
536 536 freemsg(tp->tim_iocsave);
537 537 mp = tp->tim_consave;
538 538 while (mp) {
539 539 nmp = mp->b_next;
540 540 mp->b_next = NULL;
541 541 freemsg(mp);
542 542 mp = nmp;
543 543 }
544 544 ASSERT(tp->tim_mymaxlen >= 0);
545 545 if (tp->tim_mymaxlen != 0)
546 546 kmem_free(tp->tim_myname, (size_t)tp->tim_mymaxlen);
547 547 ASSERT(tp->tim_peermaxlen >= 0);
548 548 if (tp->tim_peermaxlen != 0)
549 549 kmem_free(tp->tim_peername, (size_t)tp->tim_peermaxlen);
550 550
551 551 q->q_ptr = WR(q)->q_ptr = NULL;
552 552
553 553 mutex_destroy(&tp->tim_mutex);
554 554
555 555 if (tp->tim_peercred != NULL)
556 556 crfree(tp->tim_peercred);
557 557
558 558 kmem_free(tp, sizeof (struct tim_tim));
559 559
560 560 return (0);
561 561 }
562 562
563 563 /*
564 564 * timodrput - Module read put procedure. This is called from
565 565 * the module, driver, or stream head upstream/downstream.
566 566 * Handles M_FLUSH, M_DATA and some M_PROTO (T_DATA_IND,
567 567 * and T_UNITDATA_IND) messages. All others are queued to
568 568 * be handled by the service procedures.
569 569 */
570 570 static void
571 571 timodrput(queue_t *q, mblk_t *mp)
572 572 {
573 573 union T_primitives *pptr;
574 574
575 575 /*
576 576 * During flow control and other instances when messages
577 577 * are on queue, queue up a non high priority message
578 578 */
579 579 if (q->q_first != 0 && mp->b_datap->db_type < QPCTL) {
580 580 (void) putq(q, mp);
581 581 return;
582 582 }
583 583
584 584 /*
585 585 * Inline processing of data (to avoid additional procedure call).
586 586 * Rest is handled in timodrproc.
587 587 */
588 588
589 589 switch (mp->b_datap->db_type) {
590 590 case M_DATA:
591 591 if (bcanputnext(q, mp->b_band))
592 592 putnext(q, mp);
593 593 else
594 594 (void) putq(q, mp);
595 595 break;
596 596 case M_PROTO:
597 597 case M_PCPROTO:
598 598 if (MBLKL(mp) < sizeof (t_scalar_t)) {
599 599 if (mp->b_datap->db_type == M_PCPROTO ||
600 600 bcanputnext(q, mp->b_band)) {
601 601 putnext(q, mp);
602 602 } else {
603 603 (void) putq(q, mp);
604 604 }
605 605 break;
606 606 }
607 607 pptr = (union T_primitives *)mp->b_rptr;
608 608 switch (pptr->type) {
609 609 case T_EXDATA_IND:
610 610 case T_DATA_IND:
611 611 case T_UNITDATA_IND:
612 612 if (bcanputnext(q, mp->b_band))
613 613 putnext(q, mp);
614 614 else
615 615 (void) putq(q, mp);
616 616 break;
617 617 default:
618 618 (void) timodrproc(q, mp);
619 619 break;
620 620 }
621 621 break;
622 622 default:
623 623 (void) timodrproc(q, mp);
624 624 break;
625 625 }
626 626 }
627 627
628 628 /*
629 629 * timodrsrv - Module read queue service procedure. This is called when
630 630 * messages are placed on an empty queue, when high priority
631 631 * messages are placed on the queue, and when flow control
632 632 * restrictions subside. This code used to be included in a
633 633 * put procedure, but it was moved to a service procedure
634 634 * because several points were added where memory allocation
635 635 * could fail, and there is no reasonable recovery mechanism
636 636 * from the put procedure.
637 637 */
638 638 /*ARGSUSED*/
639 639 static void
640 640 timodrsrv(queue_t *q)
641 641 {
642 642 mblk_t *mp;
643 643 struct tim_tim *tp;
644 644
645 645 ASSERT(q != NULL);
646 646
647 647 tp = (struct tim_tim *)q->q_ptr;
648 648 if (!tp)
649 649 return;
650 650
651 651 while ((mp = getq(q)) != NULL) {
652 652 if (timodrproc(q, mp)) {
653 653 /*
654 654 * timodrproc did a putbq - stop processing
655 655 * messages.
656 656 */
657 657 return;
658 658 }
659 659 }
660 660 }
661 661
662 662 /*
663 663 * Perform common processing when a T_CAPABILITY_ACK or T_INFO_ACK
664 664 * arrive. Set the queue properties and adjust the tim_flags according
665 665 * to the service type.
666 666 */
667 667 static void
668 668 timodprocessinfo(queue_t *q, struct tim_tim *tp, struct T_info_ack *tia)
669 669 {
670 670 TILOG("timodprocessinfo: strqset(%d)\n", tia->TIDU_size);
671 671 (void) strqset(q, QMAXPSZ, 0, tia->TIDU_size);
672 672 (void) strqset(OTHERQ(q), QMAXPSZ, 0, tia->TIDU_size);
673 673
674 674 if ((tia->SERV_type == T_COTS) || (tia->SERV_type == T_COTS_ORD))
675 675 tp->tim_flags = (tp->tim_flags & ~CLTS) | COTS;
676 676 else if (tia->SERV_type == T_CLTS)
677 677 tp->tim_flags = (tp->tim_flags & ~COTS) | CLTS;
678 678 }
679 679
680 680 static int
681 681 timodrproc(queue_t *q, mblk_t *mp)
682 682 {
683 683 uint32_t auditing = AU_AUDITING();
684 684 union T_primitives *pptr;
685 685 struct tim_tim *tp;
686 686 struct iocblk *iocbp;
687 687 mblk_t *nbp;
688 688 size_t blen;
689 689
690 690 tp = (struct tim_tim *)q->q_ptr;
691 691
692 692 switch (mp->b_datap->db_type) {
693 693 default:
694 694 putnext(q, mp);
695 695 break;
696 696
697 697 case M_ERROR:
698 698 TILOG("timodrproc: Got M_ERROR, flags = %x\n", tp->tim_flags);
699 699 /*
700 700 * There is no specified standard response for driver when it
701 701 * receives unknown message type and M_ERROR is one
702 702 * possibility. If we send T_CAPABILITY_REQ down and transport
703 703 * provider responds with M_ERROR we assume that it doesn't
704 704 * understand this message type. This assumption may be
705 705 * sometimes incorrect (transport may reply with M_ERROR for
706 706 * some other reason) but there is no way for us to distinguish
707 707 * between different cases. In the worst case timod and everyone
708 708 * else sharing global transport description with it may end up
709 709 * emulating T_CAPABILITY_REQ.
710 710 */
711 711
712 712 /*
713 713 * Check that we are waiting for T_CAPABILITY_ACK and
714 714 * T_CAPABILITY_REQ is not implemented by transport or emulated
715 715 * by timod.
716 716 */
717 717 if ((tp->tim_provinfo->tpi_capability == PI_DONTKNOW) &&
718 718 ((tp->tim_flags & TI_CAP_RECVD) != 0)) {
719 719 /*
720 720 * Good chances that this transport doesn't provide
721 721 * T_CAPABILITY_REQ. Mark this information permanently
722 722 * for the module + transport combination.
723 723 */
724 724 PI_PROVLOCK(tp->tim_provinfo);
725 725 if (tp->tim_provinfo->tpi_capability == PI_DONTKNOW)
726 726 tp->tim_provinfo->tpi_capability = PI_NO;
727 727 PI_PROVUNLOCK(tp->tim_provinfo);
728 728 if (tp->tim_tcap_timoutid != 0) {
729 729 (void) quntimeout(q, tp->tim_tcap_timoutid);
730 730 tp->tim_tcap_timoutid = 0;
731 731 }
732 732 }
733 733 putnext(q, mp);
734 734 break;
735 735 case M_DATA:
736 736 if (!bcanputnext(q, mp->b_band)) {
737 737 (void) putbq(q, mp);
738 738 return (1);
739 739 }
740 740 putnext(q, mp);
741 741 break;
742 742
743 743 case M_PROTO:
744 744 case M_PCPROTO:
745 745 blen = MBLKL(mp);
746 746 if (blen < sizeof (t_scalar_t)) {
747 747 /*
748 748 * Note: it's not actually possible to get
749 749 * here with db_type M_PCPROTO, because
750 750 * timodrput has already checked MBLKL, and
751 751 * thus the assertion below. If the length
752 752 * was too short, then the message would have
753 753 * already been putnext'd, and would thus
754 754 * never appear here. Just the same, the code
755 755 * below handles the impossible case since
756 756 * it's easy to do and saves future
757 757 * maintainers from unfortunate accidents.
758 758 */
759 759 ASSERT(mp->b_datap->db_type == M_PROTO);
760 760 if (mp->b_datap->db_type == M_PROTO &&
761 761 !bcanputnext(q, mp->b_band)) {
762 762 (void) putbq(q, mp);
763 763 return (1);
764 764 }
765 765 putnext(q, mp);
766 766 break;
767 767 }
768 768
769 769 pptr = (union T_primitives *)mp->b_rptr;
770 770 switch (pptr->type) {
771 771 default:
772 772
773 773 if (auditing)
774 774 audit_sock(T_UNITDATA_IND, q, mp, TIMOD_ID);
775 775 putnext(q, mp);
776 776 break;
777 777
778 778 case T_ERROR_ACK:
779 779 /* Restore db_type - recover() might have changed it */
780 780 mp->b_datap->db_type = M_PCPROTO;
781 781 if (blen < sizeof (struct T_error_ack)) {
782 782 putnext(q, mp);
783 783 break;
784 784 }
785 785
786 786 tilog("timodrproc: Got T_ERROR_ACK, flags = %x\n",
787 787 tp->tim_flags);
788 788
789 789 if ((tp->tim_flags & WAIT_CONNRESACK) &&
790 790 tp->tim_saved_prim == pptr->error_ack.ERROR_prim) {
791 791 tp->tim_flags &=
792 792 ~(WAIT_CONNRESACK | WAITIOCACK);
793 793 freemsg(tp->tim_iocsave);
794 794 tp->tim_iocsave = NULL;
795 795 tp->tim_saved_prim = -1;
796 796 putnext(q, mp);
797 797 } else if (tp->tim_flags & WAITIOCACK) {
798 798 tim_send_ioc_error_ack(q, tp, mp);
799 799 } else {
800 800 putnext(q, mp);
801 801 }
802 802 break;
803 803
804 804 case T_OK_ACK:
805 805 if (blen < sizeof (pptr->ok_ack)) {
806 806 mp->b_datap->db_type = M_PCPROTO;
807 807 putnext(q, mp);
808 808 break;
809 809 }
810 810
811 811 tilog("timodrproc: Got T_OK_ACK\n", 0);
812 812
813 813 if (pptr->ok_ack.CORRECT_prim == T_UNBIND_REQ)
814 814 tp->tim_mylen = 0;
815 815
816 816 if ((tp->tim_flags & WAIT_CONNRESACK) &&
817 817 tp->tim_saved_prim == pptr->ok_ack.CORRECT_prim) {
818 818 struct T_conn_res *resp;
819 819 struct T_conn_ind *indp;
820 820 struct tim_tim *ntp;
821 821 caddr_t ptr;
822 822
823 823 rw_enter(&tim_list_rwlock, RW_READER);
824 824 resp = (struct T_conn_res *)
825 825 tp->tim_iocsave->b_rptr;
826 826 ntp = tim_findlink(resp->ACCEPTOR_id);
827 827 if (ntp == NULL)
828 828 goto cresackout;
829 829
830 830 mutex_enter(&ntp->tim_mutex);
831 831 if (ntp->tim_peercred != NULL)
832 832 crfree(ntp->tim_peercred);
833 833 ntp->tim_peercred =
834 834 msg_getcred(tp->tim_iocsave->b_cont,
835 835 &ntp->tim_cpid);
836 836 if (ntp->tim_peercred != NULL)
837 837 crhold(ntp->tim_peercred);
838 838
839 839 if (!(ntp->tim_flags & DO_PEERNAME)) {
840 840 mutex_exit(&ntp->tim_mutex);
841 841 goto cresackout;
842 842 }
843 843
844 844 indp = (struct T_conn_ind *)
845 845 tp->tim_iocsave->b_cont->b_rptr;
846 846 /* true as message is put on list */
847 847 ASSERT(indp->SRC_length >= 0);
848 848
849 849 if (indp->SRC_length > ntp->tim_peermaxlen) {
850 850 ptr = kmem_alloc(indp->SRC_length,
851 851 KM_NOSLEEP);
852 852 if (ptr == NULL) {
853 853 mutex_exit(&ntp->tim_mutex);
854 854 rw_exit(&tim_list_rwlock);
855 855 tilog("timodwproc: kmem_alloc "
856 856 "failed, attempting "
857 857 "recovery\n", 0);
858 858 tim_recover(q, mp,
859 859 indp->SRC_length);
860 860 return (1);
861 861 }
862 862 if (ntp->tim_peermaxlen > 0)
863 863 kmem_free(ntp->tim_peername,
864 864 ntp->tim_peermaxlen);
865 865 ntp->tim_peername = ptr;
866 866 ntp->tim_peermaxlen = indp->SRC_length;
867 867 }
868 868 ntp->tim_peerlen = indp->SRC_length;
869 869 ptr = (caddr_t)indp + indp->SRC_offset;
870 870 bcopy(ptr, ntp->tim_peername, ntp->tim_peerlen);
871 871
872 872 mutex_exit(&ntp->tim_mutex);
873 873
874 874 cresackout:
875 875 rw_exit(&tim_list_rwlock);
876 876 tp->tim_flags &=
877 877 ~(WAIT_CONNRESACK | WAITIOCACK);
878 878 freemsg(tp->tim_iocsave);
879 879 tp->tim_iocsave = NULL;
880 880 tp->tim_saved_prim = -1;
881 881 }
882 882
883 883 tim_send_reply(q, mp, tp, pptr->ok_ack.CORRECT_prim);
884 884 break;
885 885
886 886 case T_BIND_ACK: {
887 887 struct T_bind_ack *ackp =
888 888 (struct T_bind_ack *)mp->b_rptr;
889 889
890 890 /* Restore db_type - recover() might have changed it */
891 891 mp->b_datap->db_type = M_PCPROTO;
892 892 if (blen < sizeof (*ackp)) {
893 893 putnext(q, mp);
894 894 break;
895 895 }
896 896
897 897 /* save negotiated backlog */
898 898 tp->tim_backlog = ackp->CONIND_number;
899 899
900 900 if (((tp->tim_flags & WAITIOCACK) == 0) ||
901 901 ((tp->tim_saved_prim != O_T_BIND_REQ) &&
902 902 (tp->tim_saved_prim != T_BIND_REQ))) {
903 903 putnext(q, mp);
904 904 break;
905 905 }
906 906 ASSERT(tp->tim_iocsave != NULL);
907 907
908 908 if (tp->tim_flags & DO_MYNAME) {
909 909 caddr_t p;
910 910
911 911 if (ackp->ADDR_length < 0 ||
912 912 mp->b_rptr + ackp->ADDR_offset +
913 913 ackp->ADDR_length > mp->b_wptr) {
914 914 putnext(q, mp);
915 915 break;
916 916 }
917 917 if (ackp->ADDR_length > tp->tim_mymaxlen) {
918 918 p = kmem_alloc(ackp->ADDR_length,
919 919 KM_NOSLEEP);
920 920 if (p == NULL) {
921 921 tilog("timodrproc: kmem_alloc "
922 922 "failed attempt recovery",
923 923 0);
924 924
925 925 tim_recover(q, mp,
926 926 ackp->ADDR_length);
927 927 return (1);
928 928 }
929 929 ASSERT(tp->tim_mymaxlen >= 0);
930 930 if (tp->tim_mymaxlen != NULL) {
931 931 kmem_free(tp->tim_myname,
932 932 tp->tim_mymaxlen);
933 933 }
934 934 tp->tim_myname = p;
935 935 tp->tim_mymaxlen = ackp->ADDR_length;
936 936 }
937 937 tp->tim_mylen = ackp->ADDR_length;
938 938 bcopy(mp->b_rptr + ackp->ADDR_offset,
939 939 tp->tim_myname, tp->tim_mylen);
940 940 }
941 941 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
942 942 tp->tim_iocsave = NULL;
943 943 tp->tim_saved_prim = -1;
944 944 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
945 945 TI_CAP_RECVD | CAP_WANTS_INFO);
946 946 break;
947 947 }
948 948
949 949 case T_OPTMGMT_ACK:
950 950
951 951 tilog("timodrproc: Got T_OPTMGMT_ACK\n", 0);
952 952
953 953 /* Restore db_type - recover() might have change it */
954 954 mp->b_datap->db_type = M_PCPROTO;
955 955
956 956 if (((tp->tim_flags & WAITIOCACK) == 0) ||
957 957 ((tp->tim_saved_prim != T_SVR4_OPTMGMT_REQ) &&
958 958 (tp->tim_saved_prim != T_OPTMGMT_REQ))) {
959 959 putnext(q, mp);
960 960 } else {
961 961 ASSERT(tp->tim_iocsave != NULL);
962 962 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
963 963 tp->tim_iocsave = NULL;
964 964 tp->tim_saved_prim = -1;
965 965 tp->tim_flags &= ~(WAITIOCACK |
966 966 WAIT_IOCINFOACK | TI_CAP_RECVD |
967 967 CAP_WANTS_INFO);
968 968 }
969 969 break;
970 970
971 971 case T_INFO_ACK: {
972 972 struct T_info_ack *tia = (struct T_info_ack *)pptr;
973 973
974 974 /* Restore db_type - recover() might have changed it */
975 975 mp->b_datap->db_type = M_PCPROTO;
976 976
977 977 if (blen < sizeof (*tia)) {
978 978 putnext(q, mp);
979 979 break;
980 980 }
981 981
982 982 tilog("timodrproc: Got T_INFO_ACK, flags = %x\n",
983 983 tp->tim_flags);
984 984
985 985 timodprocessinfo(q, tp, tia);
986 986
987 987 TILOG("timodrproc: flags = %x\n", tp->tim_flags);
988 988 if ((tp->tim_flags & WAITIOCACK) != 0) {
989 989 size_t expected_ack_size;
990 990 ssize_t deficit;
991 991 int ioc_cmd;
992 992 struct T_capability_ack *tcap;
993 993
994 994 /*
995 995 * The only case when T_INFO_ACK may be received back
996 996 * when we are waiting for ioctl to complete is when
997 997 * this ioctl sent T_INFO_REQ down.
998 998 */
999 999 if (!(tp->tim_flags & WAIT_IOCINFOACK)) {
1000 1000 putnext(q, mp);
1001 1001 break;
1002 1002 }
1003 1003 ASSERT(tp->tim_iocsave != NULL);
1004 1004
1005 1005 iocbp = (struct iocblk *)tp->tim_iocsave->b_rptr;
1006 1006 ioc_cmd = iocbp->ioc_cmd;
1007 1007
1008 1008 /*
1009 1009 * Was it sent from TI_CAPABILITY emulation?
1010 1010 */
1011 1011 if (ioc_cmd == TI_CAPABILITY) {
1012 1012 struct T_info_ack saved_info;
1013 1013
1014 1014 /*
1015 1015 * Perform sanity checks. The only case when we
1016 1016 * send T_INFO_REQ from TI_CAPABILITY is when
1017 1017 * timod emulates T_CAPABILITY_REQ and CAP_bits1
1018 1018 * has TC1_INFO set.
1019 1019 */
1020 1020 if ((tp->tim_flags &
1021 1021 (TI_CAP_RECVD | CAP_WANTS_INFO)) !=
1022 1022 (TI_CAP_RECVD | CAP_WANTS_INFO)) {
1023 1023 putnext(q, mp);
1024 1024 break;
1025 1025 }
1026 1026
1027 1027 TILOG("timodrproc: emulating TI_CAPABILITY/"
1028 1028 "info\n", 0);
1029 1029
1030 1030 /* Save info & reuse mp for T_CAPABILITY_ACK */
1031 1031 saved_info = *tia;
1032 1032
1033 1033 mp = tpi_ack_alloc(mp,
1034 1034 sizeof (struct T_capability_ack),
1035 1035 M_PCPROTO, T_CAPABILITY_ACK);
1036 1036
1037 1037 if (mp == NULL) {
1038 1038 tilog("timodrproc: realloc failed, "
1039 1039 "no recovery attempted\n", 0);
1040 1040 return (1);
1041 1041 }
1042 1042
1043 1043 /*
1044 1044 * Copy T_INFO information into T_CAPABILITY_ACK
1045 1045 */
1046 1046 tcap = (struct T_capability_ack *)mp->b_rptr;
1047 1047 tcap->CAP_bits1 = TC1_INFO;
1048 1048 tcap->INFO_ack = saved_info;
1049 1049 tp->tim_flags &= ~(WAITIOCACK |
1050 1050 WAIT_IOCINFOACK | TI_CAP_RECVD |
1051 1051 CAP_WANTS_INFO);
1052 1052 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
1053 1053 tp->tim_iocsave = NULL;
1054 1054 tp->tim_saved_prim = -1;
1055 1055 break;
1056 1056 }
1057 1057
1058 1058 /*
1059 1059 * The code for TI_SYNC/TI_GETINFO is left here only for
1060 1060 * backward compatibility with staticaly linked old
1061 1061 * applications. New TLI/XTI code should use
1062 1062 * TI_CAPABILITY for getting transport info and should
1063 1063 * not use TI_GETINFO/TI_SYNC for this purpose.
1064 1064 */
1065 1065
1066 1066 /*
1067 1067 * make sure the message sent back is the size of
1068 1068 * the "expected ack"
1069 1069 * For TI_GETINFO, expected ack size is
1070 1070 * sizeof (T_info_ack)
1071 1071 * For TI_SYNC, expected ack size is
1072 1072 * sizeof (struct ti_sync_ack);
1073 1073 */
1074 1074 if (ioc_cmd != TI_GETINFO && ioc_cmd != TI_SYNC) {
1075 1075 putnext(q, mp);
1076 1076 break;
1077 1077 }
1078 1078
1079 1079 expected_ack_size =
1080 1080 sizeof (struct T_info_ack); /* TI_GETINFO */
1081 1081 if (iocbp->ioc_cmd == TI_SYNC) {
1082 1082 expected_ack_size = 2 * sizeof (uint32_t) +
1083 1083 sizeof (struct ti_sync_ack);
1084 1084 }
1085 1085 deficit = expected_ack_size - blen;
1086 1086
1087 1087 if (deficit != 0) {
1088 1088 if (mp->b_datap->db_lim - mp->b_wptr <
1089 1089 deficit) {
1090 1090 mblk_t *tmp = allocb(expected_ack_size,
1091 1091 BPRI_HI);
1092 1092 if (tmp == NULL) {
1093 1093 ASSERT(MBLKSIZE(mp) >=
1094 1094 sizeof (struct T_error_ack));
1095 1095
1096 1096 tilog("timodrproc: allocb failed no "
1097 1097 "recovery attempt\n", 0);
1098 1098
1099 1099 mp->b_rptr = mp->b_datap->db_base;
1100 1100 pptr = (union T_primitives *)
1101 1101 mp->b_rptr;
1102 1102 pptr->error_ack.ERROR_prim = T_INFO_REQ;
1103 1103 pptr->error_ack.TLI_error = TSYSERR;
1104 1104 pptr->error_ack.UNIX_error = EAGAIN;
1105 1105 pptr->error_ack.PRIM_type = T_ERROR_ACK;
1106 1106 mp->b_datap->db_type = M_PCPROTO;
1107 1107 tim_send_ioc_error_ack(q, tp, mp);
1108 1108 break;
1109 1109 } else {
1110 1110 bcopy(mp->b_rptr, tmp->b_rptr, blen);
1111 1111 tmp->b_wptr += blen;
1112 1112 pptr = (union T_primitives *)
1113 1113 tmp->b_rptr;
1114 1114 freemsg(mp);
1115 1115 mp = tmp;
1116 1116 }
1117 1117 }
1118 1118 }
1119 1119 /*
1120 1120 * We now have "mp" which has enough space for an
1121 1121 * appropriate ack and contains struct T_info_ack
1122 1122 * that the transport provider returned. We now
1123 1123 * stuff it with more stuff to fullfill
1124 1124 * TI_SYNC ioctl needs, as necessary
1125 1125 */
1126 1126 if (iocbp->ioc_cmd == TI_SYNC) {
1127 1127 /*
1128 1128 * Assumes struct T_info_ack is first embedded
1129 1129 * type in struct ti_sync_ack so it is
1130 1130 * automatically there.
1131 1131 */
1132 1132 struct ti_sync_ack *tsap =
1133 1133 (struct ti_sync_ack *)mp->b_rptr;
1134 1134
1135 1135 /*
1136 1136 * tsap->tsa_qlen needs to be set only if
1137 1137 * TSRF_QLEN_REQ flag is set, but for
1138 1138 * compatibility with statically linked
1139 1139 * applications it is set here regardless of the
1140 1140 * flag since old XTI library expected it to be
1141 1141 * set.
1142 1142 */
1143 1143 tsap->tsa_qlen = tp->tim_backlog;
1144 1144 tsap->tsa_flags = 0x0; /* intialize clear */
1145 1145 if (tp->tim_flags & PEEK_RDQ_EXPIND) {
1146 1146 /*
1147 1147 * Request to peek for EXPIND in
1148 1148 * rcvbuf.
1149 1149 */
1150 1150 if (ti_expind_on_rdqueues(q)) {
1151 1151 /*
1152 1152 * Expedited data is
1153 1153 * queued on the stream
1154 1154 * read side
1155 1155 */
1156 1156 tsap->tsa_flags |=
1157 1157 TSAF_EXP_QUEUED;
1158 1158 }
1159 1159 tp->tim_flags &=
1160 1160 ~PEEK_RDQ_EXPIND;
1161 1161 }
1162 1162 mp->b_wptr += 2*sizeof (uint32_t);
1163 1163 }
1164 1164 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
1165 1165 tp->tim_iocsave = NULL;
1166 1166 tp->tim_saved_prim = -1;
1167 1167 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
1168 1168 TI_CAP_RECVD | CAP_WANTS_INFO);
1169 1169 break;
1170 1170 }
1171 1171 }
1172 1172
1173 1173 putnext(q, mp);
1174 1174 break;
1175 1175
1176 1176 case T_ADDR_ACK:
1177 1177 tilog("timodrproc: Got T_ADDR_ACK\n", 0);
1178 1178 tim_send_reply(q, mp, tp, T_ADDR_REQ);
1179 1179 break;
1180 1180
1181 1181 case T_CONN_IND: {
1182 1182 struct T_conn_ind *tcip =
1183 1183 (struct T_conn_ind *)mp->b_rptr;
1184 1184
1185 1185 tilog("timodrproc: Got T_CONN_IND\n", 0);
1186 1186
1187 1187 if (blen >= sizeof (*tcip) &&
1188 1188 MBLKIN(mp, tcip->SRC_offset, tcip->SRC_length)) {
1189 1189 if (((nbp = dupmsg(mp)) != NULL) ||
1190 1190 ((nbp = copymsg(mp)) != NULL)) {
1191 1191 nbp->b_next = tp->tim_consave;
1192 1192 tp->tim_consave = nbp;
1193 1193 } else {
1194 1194 tim_recover(q, mp,
1195 1195 (t_scalar_t)sizeof (mblk_t));
1196 1196 return (1);
1197 1197 }
1198 1198 }
1199 1199 if (auditing)
1200 1200 audit_sock(T_CONN_IND, q, mp, TIMOD_ID);
1201 1201 putnext(q, mp);
1202 1202 break;
1203 1203 }
1204 1204
1205 1205 case T_CONN_CON:
1206 1206 mutex_enter(&tp->tim_mutex);
1207 1207 if (tp->tim_peercred != NULL)
1208 1208 crfree(tp->tim_peercred);
1209 1209 tp->tim_peercred = msg_getcred(mp, &tp->tim_cpid);
1210 1210 if (tp->tim_peercred != NULL)
1211 1211 crhold(tp->tim_peercred);
1212 1212 mutex_exit(&tp->tim_mutex);
1213 1213
1214 1214 tilog("timodrproc: Got T_CONN_CON\n", 0);
1215 1215
1216 1216 tp->tim_flags &= ~CONNWAIT;
1217 1217 putnext(q, mp);
1218 1218 break;
1219 1219
1220 1220 case T_DISCON_IND: {
1221 1221 struct T_discon_ind *disp;
1222 1222 struct T_conn_ind *conp;
1223 1223 mblk_t *pbp = NULL;
1224 1224
1225 1225 if (q->q_first != 0)
1226 1226 tilog("timodrput: T_DISCON_IND - flow control\n", 0);
1227 1227
1228 1228 if (blen < sizeof (*disp)) {
1229 1229 putnext(q, mp);
1230 1230 break;
1231 1231 }
1232 1232
1233 1233 disp = (struct T_discon_ind *)mp->b_rptr;
1234 1234
1235 1235 tilog("timodrproc: Got T_DISCON_IND Reason: %d\n",
1236 1236 disp->DISCON_reason);
1237 1237
1238 1238 tp->tim_flags &= ~(CONNWAIT|LOCORDREL|REMORDREL);
1239 1239 tim_clear_peer(tp);
1240 1240 for (nbp = tp->tim_consave; nbp; nbp = nbp->b_next) {
1241 1241 conp = (struct T_conn_ind *)nbp->b_rptr;
1242 1242 if (conp->SEQ_number == disp->SEQ_number)
1243 1243 break;
1244 1244 pbp = nbp;
1245 1245 }
1246 1246 if (nbp) {
1247 1247 if (pbp)
1248 1248 pbp->b_next = nbp->b_next;
1249 1249 else
1250 1250 tp->tim_consave = nbp->b_next;
1251 1251 nbp->b_next = NULL;
1252 1252 freemsg(nbp);
1253 1253 }
1254 1254 putnext(q, mp);
1255 1255 break;
1256 1256 }
1257 1257
1258 1258 case T_ORDREL_IND:
1259 1259
1260 1260 tilog("timodrproc: Got T_ORDREL_IND\n", 0);
1261 1261
1262 1262 if (tp->tim_flags & LOCORDREL) {
1263 1263 tp->tim_flags &= ~(LOCORDREL|REMORDREL);
1264 1264 tim_clear_peer(tp);
1265 1265 } else {
1266 1266 tp->tim_flags |= REMORDREL;
1267 1267 }
1268 1268 putnext(q, mp);
1269 1269 break;
1270 1270
1271 1271 case T_EXDATA_IND:
1272 1272 case T_DATA_IND:
1273 1273 case T_UNITDATA_IND:
1274 1274 if (pptr->type == T_EXDATA_IND)
1275 1275 tilog("timodrproc: Got T_EXDATA_IND\n", 0);
1276 1276
1277 1277 if (!bcanputnext(q, mp->b_band)) {
1278 1278 (void) putbq(q, mp);
1279 1279 return (1);
1280 1280 }
1281 1281 putnext(q, mp);
1282 1282 break;
1283 1283
1284 1284 case T_CAPABILITY_ACK: {
1285 1285 struct T_capability_ack *tca;
1286 1286
1287 1287 if (blen < sizeof (*tca)) {
1288 1288 putnext(q, mp);
1289 1289 break;
1290 1290 }
1291 1291
1292 1292 /* This transport supports T_CAPABILITY_REQ */
1293 1293 tilog("timodrproc: Got T_CAPABILITY_ACK\n", 0);
1294 1294
1295 1295 PI_PROVLOCK(tp->tim_provinfo);
1296 1296 if (tp->tim_provinfo->tpi_capability != PI_YES)
1297 1297 tp->tim_provinfo->tpi_capability = PI_YES;
1298 1298 PI_PROVUNLOCK(tp->tim_provinfo);
1299 1299
1300 1300 /* Reset possible pending timeout */
1301 1301 if (tp->tim_tcap_timoutid != 0) {
1302 1302 (void) quntimeout(q, tp->tim_tcap_timoutid);
1303 1303 tp->tim_tcap_timoutid = 0;
1304 1304 }
1305 1305
1306 1306 tca = (struct T_capability_ack *)mp->b_rptr;
1307 1307
1308 1308 if (tca->CAP_bits1 & TC1_INFO)
1309 1309 timodprocessinfo(q, tp, &tca->INFO_ack);
1310 1310
1311 1311 tim_send_reply(q, mp, tp, T_CAPABILITY_REQ);
1312 1312 }
1313 1313 break;
1314 1314 }
1315 1315 break;
1316 1316
1317 1317 case M_FLUSH:
1318 1318
1319 1319 tilog("timodrproc: Got M_FLUSH\n", 0);
1320 1320
1321 1321 if (*mp->b_rptr & FLUSHR) {
1322 1322 if (*mp->b_rptr & FLUSHBAND)
1323 1323 flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
1324 1324 else
1325 1325 flushq(q, FLUSHDATA);
1326 1326 }
1327 1327 putnext(q, mp);
1328 1328 break;
1329 1329
1330 1330 case M_IOCACK:
1331 1331 iocbp = (struct iocblk *)mp->b_rptr;
1332 1332
1333 1333 tilog("timodrproc: Got M_IOCACK\n", 0);
1334 1334
1335 1335 if (iocbp->ioc_cmd == TI_GETMYNAME) {
1336 1336
1337 1337 /*
1338 1338 * Transport provider supports this ioctl,
1339 1339 * so I don't have to.
1340 1340 */
1341 1341 if ((tp->tim_flags & DO_MYNAME) != 0) {
1342 1342 tp->tim_flags &= ~DO_MYNAME;
1343 1343 PI_PROVLOCK(tp->tim_provinfo);
1344 1344 tp->tim_provinfo->tpi_myname = PI_YES;
1345 1345 PI_PROVUNLOCK(tp->tim_provinfo);
1346 1346 }
1347 1347
1348 1348 ASSERT(tp->tim_mymaxlen >= 0);
1349 1349 if (tp->tim_mymaxlen != 0) {
1350 1350 kmem_free(tp->tim_myname, (size_t)tp->tim_mymaxlen);
1351 1351 tp->tim_myname = NULL;
1352 1352 tp->tim_mymaxlen = 0;
1353 1353 }
1354 1354 /* tim_iocsave may already be overwritten. */
1355 1355 if (tp->tim_saved_prim == -1) {
1356 1356 freemsg(tp->tim_iocsave);
1357 1357 tp->tim_iocsave = NULL;
1358 1358 }
1359 1359 } else if (iocbp->ioc_cmd == TI_GETPEERNAME) {
1360 1360 boolean_t clearit;
1361 1361
1362 1362 /*
1363 1363 * Transport provider supports this ioctl,
1364 1364 * so I don't have to.
1365 1365 */
1366 1366 if ((tp->tim_flags & DO_PEERNAME) != 0) {
1367 1367 tp->tim_flags &= ~DO_PEERNAME;
1368 1368 PI_PROVLOCK(tp->tim_provinfo);
1369 1369 tp->tim_provinfo->tpi_peername = PI_YES;
1370 1370 PI_PROVUNLOCK(tp->tim_provinfo);
1371 1371 }
1372 1372
1373 1373 mutex_enter(&tp->tim_mutex);
1374 1374 ASSERT(tp->tim_peermaxlen >= 0);
1375 1375 clearit = tp->tim_peermaxlen != 0;
1376 1376 if (clearit) {
1377 1377 kmem_free(tp->tim_peername, tp->tim_peermaxlen);
1378 1378 tp->tim_peername = NULL;
1379 1379 tp->tim_peermaxlen = 0;
1380 1380 tp->tim_peerlen = 0;
1381 1381 }
1382 1382 mutex_exit(&tp->tim_mutex);
1383 1383 if (clearit) {
1384 1384 mblk_t *bp;
1385 1385
1386 1386 bp = tp->tim_consave;
1387 1387 while (bp != NULL) {
1388 1388 nbp = bp->b_next;
1389 1389 bp->b_next = NULL;
1390 1390 freemsg(bp);
1391 1391 bp = nbp;
1392 1392 }
1393 1393 tp->tim_consave = NULL;
1394 1394 }
1395 1395 /* tim_iocsave may already be overwritten. */
1396 1396 if (tp->tim_saved_prim == -1) {
1397 1397 freemsg(tp->tim_iocsave);
1398 1398 tp->tim_iocsave = NULL;
1399 1399 }
1400 1400 }
1401 1401 putnext(q, mp);
1402 1402 break;
1403 1403
1404 1404 case M_IOCNAK:
1405 1405
1406 1406 tilog("timodrproc: Got M_IOCNAK\n", 0);
1407 1407
1408 1408 iocbp = (struct iocblk *)mp->b_rptr;
1409 1409 if (((iocbp->ioc_cmd == TI_GETMYNAME) ||
1410 1410 (iocbp->ioc_cmd == TI_GETPEERNAME)) &&
1411 1411 ((iocbp->ioc_error == EINVAL) || (iocbp->ioc_error == 0))) {
1412 1412 PI_PROVLOCK(tp->tim_provinfo);
1413 1413 if (iocbp->ioc_cmd == TI_GETMYNAME) {
1414 1414 if (tp->tim_provinfo->tpi_myname == PI_DONTKNOW)
1415 1415 tp->tim_provinfo->tpi_myname = PI_NO;
1416 1416 } else if (iocbp->ioc_cmd == TI_GETPEERNAME) {
1417 1417 if (tp->tim_provinfo->tpi_peername == PI_DONTKNOW)
1418 1418 tp->tim_provinfo->tpi_peername = PI_NO;
1419 1419 }
1420 1420 PI_PROVUNLOCK(tp->tim_provinfo);
1421 1421 /* tim_iocsave may already be overwritten. */
1422 1422 if ((tp->tim_iocsave != NULL) &&
1423 1423 (tp->tim_saved_prim == -1)) {
1424 1424 freemsg(mp);
1425 1425 mp = tp->tim_iocsave;
1426 1426 tp->tim_iocsave = NULL;
1427 1427 tp->tim_flags |= NAMEPROC;
1428 1428 if (ti_doname(WR(q), mp) != DONAME_CONT) {
1429 1429 tp->tim_flags &= ~NAMEPROC;
1430 1430 }
1431 1431 break;
1432 1432 }
1433 1433 }
1434 1434 putnext(q, mp);
1435 1435 break;
1436 1436 }
1437 1437
1438 1438 return (0);
1439 1439 }
1440 1440
1441 1441 /*
1442 1442 * timodwput - Module write put procedure. This is called from
1443 1443 * the module, driver, or stream head upstream/downstream.
1444 1444 * Handles M_FLUSH, M_DATA and some M_PROTO (T_DATA_REQ,
1445 1445 * and T_UNITDATA_REQ) messages. All others are queued to
1446 1446 * be handled by the service procedures.
1447 1447 */
1448 1448
1449 1449 static void
1450 1450 timodwput(queue_t *q, mblk_t *mp)
1451 1451 {
1452 1452 union T_primitives *pptr;
1453 1453 struct tim_tim *tp;
1454 1454 struct iocblk *iocbp;
1455 1455
1456 1456 /*
1457 1457 * Enqueue normal-priority messages if our queue already
1458 1458 * holds some messages for deferred processing but don't
1459 1459 * enqueue those M_IOCTLs which will result in an
1460 1460 * M_PCPROTO (ie, high priority) message being created.
1461 1461 */
1462 1462 if (q->q_first != 0 && mp->b_datap->db_type < QPCTL) {
1463 1463 if (mp->b_datap->db_type == M_IOCTL) {
1464 1464 iocbp = (struct iocblk *)mp->b_rptr;
1465 1465 switch (iocbp->ioc_cmd) {
1466 1466 default:
1467 1467 (void) putq(q, mp);
1468 1468 return;
1469 1469
1470 1470 case TI_GETINFO:
1471 1471 case TI_SYNC:
1472 1472 case TI_CAPABILITY:
1473 1473 break;
1474 1474 }
1475 1475 } else {
1476 1476 (void) putq(q, mp);
1477 1477 return;
1478 1478 }
1479 1479 }
1480 1480 /*
1481 1481 * Inline processing of data (to avoid additional procedure call).
1482 1482 * Rest is handled in timodwproc.
1483 1483 */
1484 1484
1485 1485 switch (mp->b_datap->db_type) {
1486 1486 case M_DATA:
1487 1487 tp = (struct tim_tim *)q->q_ptr;
1488 1488 ASSERT(tp);
1489 1489 if (tp->tim_flags & CLTS) {
1490 1490 mblk_t *tmp;
1491 1491
1492 1492 if ((tmp = tim_filladdr(q, mp, B_FALSE)) == NULL) {
1493 1493 (void) putq(q, mp);
1494 1494 break;
1495 1495 } else {
1496 1496 mp = tmp;
1497 1497 }
1498 1498 }
1499 1499 if (bcanputnext(q, mp->b_band))
1500 1500 putnext(q, mp);
1501 1501 else
1502 1502 (void) putq(q, mp);
1503 1503 break;
1504 1504 case M_PROTO:
1505 1505 case M_PCPROTO:
1506 1506 pptr = (union T_primitives *)mp->b_rptr;
1507 1507 switch (pptr->type) {
1508 1508 case T_UNITDATA_REQ:
1509 1509 tp = (struct tim_tim *)q->q_ptr;
1510 1510 ASSERT(tp);
1511 1511 if (tp->tim_flags & CLTS) {
1512 1512 mblk_t *tmp;
1513 1513
1514 1514 tmp = tim_filladdr(q, mp, B_FALSE);
1515 1515 if (tmp == NULL) {
1516 1516 (void) putq(q, mp);
1517 1517 break;
1518 1518 } else {
1519 1519 mp = tmp;
1520 1520 }
1521 1521 }
1522 1522 if (bcanputnext(q, mp->b_band))
1523 1523 putnext(q, mp);
1524 1524 else
1525 1525 (void) putq(q, mp);
1526 1526 break;
1527 1527
1528 1528 case T_DATA_REQ:
1529 1529 case T_EXDATA_REQ:
1530 1530 if (bcanputnext(q, mp->b_band))
1531 1531 putnext(q, mp);
1532 1532 else
1533 1533 (void) putq(q, mp);
1534 1534 break;
1535 1535 default:
1536 1536 (void) timodwproc(q, mp);
1537 1537 break;
1538 1538 }
1539 1539 break;
1540 1540 default:
1541 1541 (void) timodwproc(q, mp);
1542 1542 break;
1543 1543 }
1544 1544 }
1545 1545 /*
1546 1546 * timodwsrv - Module write queue service procedure.
1547 1547 * This is called when messages are placed on an empty queue,
1548 1548 * when high priority messages are placed on the queue, and
1549 1549 * when flow control restrictions subside. This code used to
1550 1550 * be included in a put procedure, but it was moved to a
1551 1551 * service procedure because several points were added where
1552 1552 * memory allocation could fail, and there is no reasonable
1553 1553 * recovery mechanism from the put procedure.
1554 1554 */
1555 1555 static void
1556 1556 timodwsrv(queue_t *q)
1557 1557 {
1558 1558 mblk_t *mp;
1559 1559
1560 1560 ASSERT(q != NULL);
1561 1561 if (q->q_ptr == NULL)
1562 1562 return;
1563 1563
1564 1564 while ((mp = getq(q)) != NULL) {
1565 1565 if (timodwproc(q, mp)) {
1566 1566 /*
1567 1567 * timodwproc did a putbq - stop processing
1568 1568 * messages.
1569 1569 */
1570 1570 return;
1571 1571 }
1572 1572 }
1573 1573 }
1574 1574
1575 1575 /*
1576 1576 * Common routine to process write side messages
1577 1577 */
1578 1578
1579 1579 static int
1580 1580 timodwproc(queue_t *q, mblk_t *mp)
1581 1581 {
1582 1582 union T_primitives *pptr;
1583 1583 struct tim_tim *tp;
1584 1584 uint32_t auditing = AU_AUDITING();
1585 1585 mblk_t *tmp;
1586 1586 struct iocblk *iocbp;
1587 1587 int error;
1588 1588
1589 1589 tp = (struct tim_tim *)q->q_ptr;
1590 1590
1591 1591 switch (mp->b_datap->db_type) {
1592 1592 default:
1593 1593 putnext(q, mp);
1594 1594 break;
1595 1595
1596 1596 case M_DATA:
1597 1597 if (tp->tim_flags & CLTS) {
1598 1598 if ((tmp = tim_filladdr(q, mp, B_TRUE)) == NULL) {
1599 1599 return (1);
1600 1600 } else {
1601 1601 mp = tmp;
1602 1602 }
1603 1603 }
1604 1604 if (!bcanputnext(q, mp->b_band)) {
1605 1605 (void) putbq(q, mp);
1606 1606 return (1);
1607 1607 }
1608 1608 putnext(q, mp);
1609 1609 break;
1610 1610
1611 1611 case M_IOCTL:
1612 1612
1613 1613 iocbp = (struct iocblk *)mp->b_rptr;
1614 1614 TILOG("timodwproc: Got M_IOCTL(%d)\n", iocbp->ioc_cmd);
1615 1615
1616 1616 ASSERT(MBLKL(mp) == sizeof (struct iocblk));
1617 1617
1618 1618 /*
1619 1619 * TPI requires we await response to a previously sent message
1620 1620 * before handling another, put it back on the head of queue.
1621 1621 * Since putbq() may see QWANTR unset when called from the
1622 1622 * service procedure, the queue must be explicitly scheduled
1623 1623 * for service, as no backenable will occur for this case.
1624 1624 * tim_ioctl_retry() sets a timer to handle the qenable.
1625 1625 */
1626 1626 if (tp->tim_flags & WAITIOCACK) {
1627 1627 TILOG("timodwproc: putbq M_IOCTL(%d)\n",
1628 1628 iocbp->ioc_cmd);
1629 1629 (void) putbq(q, mp);
1630 1630 /* Called from timodwsrv() and messages on queue */
1631 1631 if (!(q->q_flag & QWANTR))
1632 1632 tim_ioctl_retry(q);
1633 1633 return (1);
1634 1634 }
1635 1635
1636 1636 switch (iocbp->ioc_cmd) {
1637 1637 default:
1638 1638 putnext(q, mp);
1639 1639 break;
1640 1640
1641 1641 case _I_GETPEERCRED:
1642 1642 if ((tp->tim_flags & COTS) == 0) {
1643 1643 miocnak(q, mp, 0, ENOTSUP);
1644 1644 } else {
1645 1645 mblk_t *cmp = mp->b_cont;
1646 1646 k_peercred_t *kp = NULL;
1647 1647
1648 1648 mutex_enter(&tp->tim_mutex);
1649 1649 if (cmp != NULL &&
1650 1650 iocbp->ioc_flag == IOC_NATIVE &&
1651 1651 (tp->tim_flags &
1652 1652 (CONNWAIT|LOCORDREL|REMORDREL)) == 0 &&
1653 1653 tp->tim_peercred != NULL &&
1654 1654 DB_TYPE(cmp) == M_DATA &&
1655 1655 MBLKL(cmp) == sizeof (k_peercred_t)) {
1656 1656 kp = (k_peercred_t *)cmp->b_rptr;
1657 1657 crhold(kp->pc_cr = tp->tim_peercred);
1658 1658 kp->pc_cpid = tp->tim_cpid;
1659 1659 }
1660 1660 mutex_exit(&tp->tim_mutex);
1661 1661 if (kp != NULL)
1662 1662 miocack(q, mp, sizeof (*kp), 0);
1663 1663 else
1664 1664 miocnak(q, mp, 0, ENOTCONN);
1665 1665 }
1666 1666 break;
1667 1667 case TI_BIND:
1668 1668 case TI_UNBIND:
1669 1669 case TI_OPTMGMT:
1670 1670 case TI_GETADDRS:
1671 1671 TILOG("timodwproc: TI_{BIND|UNBIND|OPTMGMT|GETADDRS}"
1672 1672 "\n", 0);
1673 1673
1674 1674 /*
1675 1675 * We know that tim_send_ioctl_tpi_msg() is only
1676 1676 * going to examine the `type' field, so we only
1677 1677 * check that we can access that much data.
1678 1678 */
1679 1679 error = miocpullup(mp, sizeof (t_scalar_t));
1680 1680 if (error != 0) {
1681 1681 miocnak(q, mp, 0, error);
1682 1682 break;
1683 1683 }
1684 1684 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1685 1685 break;
1686 1686
1687 1687 case TI_GETINFO:
1688 1688 TILOG("timodwproc: TI_GETINFO\n", 0);
1689 1689 error = miocpullup(mp, sizeof (struct T_info_req));
1690 1690 if (error != 0) {
1691 1691 miocnak(q, mp, 0, error);
1692 1692 break;
1693 1693 }
1694 1694 tp->tim_flags |= WAIT_IOCINFOACK;
1695 1695 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1696 1696 break;
1697 1697
1698 1698 case TI_SYNC: {
1699 1699 mblk_t *tsr_mp;
1700 1700 struct ti_sync_req *tsr;
1701 1701 uint32_t tsr_flags;
1702 1702
1703 1703 error = miocpullup(mp, sizeof (struct ti_sync_req));
1704 1704 if (error != 0) {
1705 1705 miocnak(q, mp, 0, error);
1706 1706 break;
1707 1707 }
1708 1708
1709 1709 tsr_mp = mp->b_cont;
1710 1710 tsr = (struct ti_sync_req *)tsr_mp->b_rptr;
1711 1711 TILOG("timodwproc: TI_SYNC(%x)\n", tsr->tsr_flags);
1712 1712
1713 1713 /*
1714 1714 * Save out the value of tsr_flags, in case we
1715 1715 * reallocb() tsr_mp (below).
1716 1716 */
1717 1717 tsr_flags = tsr->tsr_flags;
1718 1718 if ((tsr_flags & TSRF_INFO_REQ) == 0) {
1719 1719 mblk_t *ack_mp = reallocb(tsr_mp,
1720 1720 sizeof (struct ti_sync_ack), 0);
1721 1721
1722 1722 /* Can reply immediately. */
1723 1723 mp->b_cont = NULL;
1724 1724 if (ack_mp == NULL) {
1725 1725 tilog("timodwproc: allocb failed no "
1726 1726 "recovery attempt\n", 0);
1727 1727 freemsg(tsr_mp);
1728 1728 miocnak(q, mp, 0, ENOMEM);
1729 1729 } else {
1730 1730 tim_answer_ti_sync(q, mp, tp,
1731 1731 ack_mp, tsr_flags);
1732 1732 }
1733 1733 break;
1734 1734 }
1735 1735
1736 1736 /*
1737 1737 * This code is retained for compatibility with
1738 1738 * old statically linked applications. New code
1739 1739 * should use TI_CAPABILITY for all TPI
1740 1740 * information and should not use TSRF_INFO_REQ
1741 1741 * flag.
1742 1742 *
1743 1743 * defer processsing necessary to rput procedure
1744 1744 * as we need to get information from transport
1745 1745 * driver. Set flags that will tell the read
1746 1746 * side the work needed on this request.
1747 1747 */
1748 1748
1749 1749 if (tsr_flags & TSRF_IS_EXP_IN_RCVBUF)
1750 1750 tp->tim_flags |= PEEK_RDQ_EXPIND;
1751 1751
1752 1752 /*
1753 1753 * Convert message to a T_INFO_REQ message; relies
1754 1754 * on sizeof (struct ti_sync_req) >= sizeof (struct
1755 1755 * T_info_req)).
1756 1756 */
1757 1757 ASSERT(MBLKL(tsr_mp) >= sizeof (struct T_info_req));
1758 1758
1759 1759 ((struct T_info_req *)tsr_mp->b_rptr)->PRIM_type =
1760 1760 T_INFO_REQ;
1761 1761 tsr_mp->b_wptr = tsr_mp->b_rptr +
1762 1762 sizeof (struct T_info_req);
1763 1763 tp->tim_flags |= WAIT_IOCINFOACK;
1764 1764 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1765 1765 }
1766 1766 break;
1767 1767
1768 1768 case TI_CAPABILITY: {
1769 1769 mblk_t *tcsr_mp;
1770 1770 struct T_capability_req *tcr;
1771 1771
1772 1772 error = miocpullup(mp, sizeof (*tcr));
1773 1773 if (error != 0) {
1774 1774 miocnak(q, mp, 0, error);
1775 1775 break;
1776 1776 }
1777 1777
1778 1778 tcsr_mp = mp->b_cont;
1779 1779 tcr = (struct T_capability_req *)tcsr_mp->b_rptr;
1780 1780 TILOG("timodwproc: TI_CAPABILITY(CAP_bits1 = %x)\n",
1781 1781 tcr->CAP_bits1);
1782 1782
1783 1783 if (tcr->PRIM_type != T_CAPABILITY_REQ) {
1784 1784 TILOG("timodwproc: invalid msg type %d\n",
1785 1785 tcr->PRIM_type);
1786 1786 miocnak(q, mp, 0, EPROTO);
1787 1787 break;
1788 1788 }
1789 1789
1790 1790 switch (tp->tim_provinfo->tpi_capability) {
1791 1791 case PI_YES:
1792 1792 /* Just send T_CAPABILITY_REQ down */
1793 1793 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1794 1794 break;
1795 1795
1796 1796 case PI_DONTKNOW:
1797 1797 /*
1798 1798 * It is unknown yet whether transport provides
1799 1799 * T_CAPABILITY_REQ or not. Send message down
1800 1800 * and wait for reply.
1801 1801 */
1802 1802
1803 1803 ASSERT(tp->tim_tcap_timoutid == 0);
1804 1804 if ((tcr->CAP_bits1 & TC1_INFO) == 0) {
1805 1805 tp->tim_flags |= TI_CAP_RECVD;
1806 1806 } else {
1807 1807 tp->tim_flags |= (TI_CAP_RECVD |
1808 1808 CAP_WANTS_INFO);
1809 1809 }
1810 1810
1811 1811 tp->tim_tcap_timoutid = qtimeout(q,
1812 1812 tim_tcap_timer, q, tim_tcap_wait * hz);
1813 1813 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1814 1814 break;
1815 1815
1816 1816 case PI_NO:
1817 1817 /*
1818 1818 * Transport doesn't support T_CAPABILITY_REQ.
1819 1819 * Either reply immediately or send T_INFO_REQ
1820 1820 * if needed.
1821 1821 */
1822 1822 if ((tcr->CAP_bits1 & TC1_INFO) != 0) {
1823 1823 tp->tim_flags |= (TI_CAP_RECVD |
1824 1824 CAP_WANTS_INFO | WAIT_IOCINFOACK);
1825 1825 TILOG("timodwproc: sending down "
1826 1826 "T_INFO_REQ, flags = %x\n",
1827 1827 tp->tim_flags);
1828 1828
1829 1829 /*
1830 1830 * Generate T_INFO_REQ message and send
1831 1831 * it down
1832 1832 */
1833 1833 ((struct T_info_req *)tcsr_mp->b_rptr)->
1834 1834 PRIM_type = T_INFO_REQ;
1835 1835 tcsr_mp->b_wptr = tcsr_mp->b_rptr +
1836 1836 sizeof (struct T_info_req);
1837 1837 tim_send_ioctl_tpi_msg(q, mp, tp,
1838 1838 iocbp);
1839 1839 break;
1840 1840 }
1841 1841
1842 1842
1843 1843 /*
1844 1844 * Can reply immediately. Just send back
1845 1845 * T_CAPABILITY_ACK with CAP_bits1 set to 0.
1846 1846 */
1847 1847 mp->b_cont = tcsr_mp = tpi_ack_alloc(mp->b_cont,
1848 1848 sizeof (struct T_capability_ack), M_PCPROTO,
1849 1849 T_CAPABILITY_ACK);
1850 1850
1851 1851 if (tcsr_mp == NULL) {
1852 1852 tilog("timodwproc: allocb failed no "
1853 1853 "recovery attempt\n", 0);
1854 1854 miocnak(q, mp, 0, ENOMEM);
1855 1855 break;
1856 1856 }
1857 1857
1858 1858 tp->tim_flags &= ~(WAITIOCACK | TI_CAP_RECVD |
1859 1859 WAIT_IOCINFOACK | CAP_WANTS_INFO);
1860 1860 ((struct T_capability_ack *)
1861 1861 tcsr_mp->b_rptr)->CAP_bits1 = 0;
1862 1862 tim_ioctl_send_reply(q, mp, tcsr_mp);
1863 1863
1864 1864 /*
1865 1865 * It could happen when timod is awaiting ack
1866 1866 * for TI_GETPEERNAME/TI_GETMYNAME.
1867 1867 */
1868 1868 if (tp->tim_iocsave != NULL) {
1869 1869 freemsg(tp->tim_iocsave);
1870 1870 tp->tim_iocsave = NULL;
1871 1871 tp->tim_saved_prim = -1;
1872 1872 }
1873 1873 break;
1874 1874
1875 1875 default:
1876 1876 cmn_err(CE_PANIC,
1877 1877 "timodwproc: unknown tpi_capability value "
1878 1878 "%d\n", tp->tim_provinfo->tpi_capability);
1879 1879 break;
1880 1880 }
1881 1881 }
1882 1882 break;
1883 1883
1884 1884 case TI_GETMYNAME:
1885 1885
1886 1886 tilog("timodwproc: Got TI_GETMYNAME\n", 0);
1887 1887
1888 1888 if (tp->tim_provinfo->tpi_myname == PI_YES) {
1889 1889 putnext(q, mp);
1890 1890 break;
1891 1891 }
1892 1892 goto getname;
1893 1893
1894 1894 case TI_GETPEERNAME:
1895 1895
1896 1896 tilog("timodwproc: Got TI_GETPEERNAME\n", 0);
1897 1897
1898 1898 if (tp->tim_provinfo->tpi_peername == PI_YES) {
1899 1899 putnext(q, mp);
1900 1900 break;
1901 1901 }
1902 1902 getname:
1903 1903 if ((tmp = copymsg(mp)) == NULL) {
1904 1904 tim_recover(q, mp, msgsize(mp));
1905 1905 return (1);
1906 1906 }
1907 1907 /*
1908 1908 * tim_iocsave may be non-NULL when timod is awaiting
1909 1909 * ack for another TI_GETPEERNAME/TI_GETMYNAME.
1910 1910 */
1911 1911 freemsg(tp->tim_iocsave);
1912 1912 tp->tim_iocsave = mp;
1913 1913 tp->tim_saved_prim = -1;
1914 1914 putnext(q, tmp);
1915 1915 break;
1916 1916 }
1917 1917 break;
1918 1918
1919 1919 case M_IOCDATA:
1920 1920
1921 1921 if (tp->tim_flags & NAMEPROC) {
1922 1922 if (ti_doname(q, mp) != DONAME_CONT) {
1923 1923 tp->tim_flags &= ~NAMEPROC;
1924 1924 }
1925 1925 } else
1926 1926 putnext(q, mp);
1927 1927 break;
1928 1928
1929 1929 case M_PROTO:
1930 1930 case M_PCPROTO:
1931 1931 if (MBLKL(mp) < sizeof (t_scalar_t)) {
1932 1932 merror(q, mp, EPROTO);
1933 1933 return (1);
1934 1934 }
1935 1935
1936 1936 pptr = (union T_primitives *)mp->b_rptr;
1937 1937 switch (pptr->type) {
1938 1938 default:
1939 1939 putnext(q, mp);
1940 1940 break;
1941 1941
1942 1942 case T_EXDATA_REQ:
1943 1943 case T_DATA_REQ:
1944 1944 if (pptr->type == T_EXDATA_REQ)
1945 1945 tilog("timodwproc: Got T_EXDATA_REQ\n", 0);
1946 1946
1947 1947 if (!bcanputnext(q, mp->b_band)) {
1948 1948 (void) putbq(q, mp);
1949 1949 return (1);
1950 1950 }
1951 1951 putnext(q, mp);
1952 1952 break;
1953 1953
1954 1954 case T_UNITDATA_REQ:
1955 1955 if (tp->tim_flags & CLTS) {
1956 1956 tmp = tim_filladdr(q, mp, B_TRUE);
1957 1957 if (tmp == NULL) {
1958 1958 return (1);
1959 1959 } else {
1960 1960 mp = tmp;
1961 1961 }
1962 1962 }
1963 1963 if (auditing)
1964 1964 audit_sock(T_UNITDATA_REQ, q, mp, TIMOD_ID);
1965 1965 if (!bcanputnext(q, mp->b_band)) {
1966 1966 (void) putbq(q, mp);
1967 1967 return (1);
1968 1968 }
1969 1969 putnext(q, mp);
1970 1970 break;
1971 1971
1972 1972 case T_CONN_REQ: {
1973 1973 struct T_conn_req *reqp = (struct T_conn_req *)
1974 1974 mp->b_rptr;
1975 1975 void *p;
1976 1976
1977 1977 tilog("timodwproc: Got T_CONN_REQ\n", 0);
1978 1978
1979 1979 if (MBLKL(mp) < sizeof (struct T_conn_req)) {
1980 1980 merror(q, mp, EPROTO);
1981 1981 return (1);
1982 1982 }
1983 1983
1984 1984 if (tp->tim_flags & DO_PEERNAME) {
1985 1985 if (!MBLKIN(mp, reqp->DEST_offset,
1986 1986 reqp->DEST_length)) {
1987 1987 merror(q, mp, EPROTO);
1988 1988 return (1);
1989 1989 }
1990 1990 ASSERT(reqp->DEST_length >= 0);
1991 1991 mutex_enter(&tp->tim_mutex);
1992 1992 if (reqp->DEST_length > tp->tim_peermaxlen) {
1993 1993 p = kmem_alloc(reqp->DEST_length,
1994 1994 KM_NOSLEEP);
1995 1995 if (p == NULL) {
1996 1996 mutex_exit(&tp->tim_mutex);
1997 1997 tilog("timodwproc: kmem_alloc "
1998 1998 "failed, attempting "
1999 1999 "recovery\n", 0);
2000 2000 tim_recover(q, mp,
2001 2001 reqp->DEST_length);
2002 2002 return (1);
2003 2003 }
2004 2004 if (tp->tim_peermaxlen)
2005 2005 kmem_free(tp->tim_peername,
2006 2006 tp->tim_peermaxlen);
2007 2007 tp->tim_peername = p;
2008 2008 tp->tim_peermaxlen = reqp->DEST_length;
2009 2009 }
2010 2010 tp->tim_peerlen = reqp->DEST_length;
2011 2011 p = mp->b_rptr + reqp->DEST_offset;
2012 2012 bcopy(p, tp->tim_peername, tp->tim_peerlen);
2013 2013 mutex_exit(&tp->tim_mutex);
2014 2014 }
2015 2015 if (tp->tim_flags & COTS)
2016 2016 tp->tim_flags |= CONNWAIT;
2017 2017 if (auditing)
2018 2018 audit_sock(T_CONN_REQ, q, mp, TIMOD_ID);
2019 2019 putnext(q, mp);
2020 2020 break;
2021 2021 }
2022 2022
2023 2023 case O_T_CONN_RES:
2024 2024 case T_CONN_RES: {
2025 2025 struct T_conn_res *resp;
2026 2026 struct T_conn_ind *indp;
2027 2027 mblk_t *pmp = NULL;
2028 2028 mblk_t *nbp;
2029 2029
2030 2030 if (MBLKL(mp) < sizeof (struct T_conn_res) ||
2031 2031 (tp->tim_flags & WAITIOCACK)) {
2032 2032 merror(q, mp, EPROTO);
2033 2033 return (1);
2034 2034 }
2035 2035
2036 2036 resp = (struct T_conn_res *)mp->b_rptr;
2037 2037 for (tmp = tp->tim_consave; tmp != NULL;
2038 2038 tmp = tmp->b_next) {
2039 2039 indp = (struct T_conn_ind *)tmp->b_rptr;
2040 2040 if (indp->SEQ_number == resp->SEQ_number)
2041 2041 break;
2042 2042 pmp = tmp;
2043 2043 }
2044 2044 if (tmp == NULL)
2045 2045 goto cresout;
2046 2046
2047 2047 if ((nbp = dupb(mp)) == NULL &&
2048 2048 (nbp = copyb(mp)) == NULL) {
2049 2049 tim_recover(q, mp, msgsize(mp));
2050 2050 return (1);
2051 2051 }
2052 2052
2053 2053 if (pmp != NULL)
2054 2054 pmp->b_next = tmp->b_next;
2055 2055 else
2056 2056 tp->tim_consave = tmp->b_next;
2057 2057 tmp->b_next = NULL;
2058 2058
2059 2059 /*
2060 2060 * Construct a list with:
2061 2061 * nbp - copy of user's original request
2062 2062 * tmp - the extracted T_conn_ind
2063 2063 */
2064 2064 nbp->b_cont = tmp;
2065 2065 /*
2066 2066 * tim_iocsave may be non-NULL when timod is awaiting
2067 2067 * ack for TI_GETPEERNAME/TI_GETMYNAME.
2068 2068 */
2069 2069 freemsg(tp->tim_iocsave);
2070 2070 tp->tim_iocsave = nbp;
2071 2071 tp->tim_saved_prim = pptr->type;
2072 2072 tp->tim_flags |= WAIT_CONNRESACK | WAITIOCACK;
2073 2073
2074 2074 cresout:
2075 2075 putnext(q, mp);
2076 2076 break;
2077 2077 }
2078 2078
2079 2079 case T_DISCON_REQ: {
2080 2080 struct T_discon_req *disp;
2081 2081 struct T_conn_ind *conp;
2082 2082 mblk_t *pmp = NULL;
2083 2083
2084 2084 if (MBLKL(mp) < sizeof (struct T_discon_req)) {
2085 2085 merror(q, mp, EPROTO);
2086 2086 return (1);
2087 2087 }
2088 2088
2089 2089 disp = (struct T_discon_req *)mp->b_rptr;
2090 2090 tp->tim_flags &= ~(CONNWAIT|LOCORDREL|REMORDREL);
2091 2091 tim_clear_peer(tp);
2092 2092
2093 2093 /*
2094 2094 * If we are already connected, there won't
2095 2095 * be any messages on tim_consave.
2096 2096 */
2097 2097 for (tmp = tp->tim_consave; tmp; tmp = tmp->b_next) {
2098 2098 conp = (struct T_conn_ind *)tmp->b_rptr;
2099 2099 if (conp->SEQ_number == disp->SEQ_number)
2100 2100 break;
2101 2101 pmp = tmp;
2102 2102 }
2103 2103 if (tmp) {
2104 2104 if (pmp)
2105 2105 pmp->b_next = tmp->b_next;
2106 2106 else
2107 2107 tp->tim_consave = tmp->b_next;
2108 2108 tmp->b_next = NULL;
2109 2109 freemsg(tmp);
2110 2110 }
2111 2111 putnext(q, mp);
2112 2112 break;
2113 2113 }
2114 2114
2115 2115 case T_ORDREL_REQ:
2116 2116 if (tp->tim_flags & REMORDREL) {
2117 2117 tp->tim_flags &= ~(LOCORDREL|REMORDREL);
2118 2118 tim_clear_peer(tp);
2119 2119 } else {
2120 2120 tp->tim_flags |= LOCORDREL;
2121 2121 }
2122 2122 putnext(q, mp);
2123 2123 break;
2124 2124
2125 2125 case T_CAPABILITY_REQ:
2126 2126 tilog("timodwproc: Got T_CAPABILITY_REQ\n", 0);
2127 2127 /*
2128 2128 * XXX: We may know at this point whether transport
2129 2129 * provides T_CAPABILITY_REQ or not and we may utilise
2130 2130 * this knowledge here.
2131 2131 */
2132 2132 putnext(q, mp);
2133 2133 break;
2134 2134 }
2135 2135 break;
2136 2136 case M_FLUSH:
2137 2137
2138 2138 tilog("timodwproc: Got M_FLUSH\n", 0);
2139 2139
2140 2140 if (*mp->b_rptr & FLUSHW) {
2141 2141 if (*mp->b_rptr & FLUSHBAND)
2142 2142 flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
2143 2143 else
2144 2144 flushq(q, FLUSHDATA);
2145 2145 }
2146 2146 putnext(q, mp);
2147 2147 break;
2148 2148 }
2149 2149
2150 2150 return (0);
2151 2151 }
2152 2152
2153 2153 static void
2154 2154 tilog(char *str, t_scalar_t arg)
2155 2155 {
2156 2156 if (dotilog) {
2157 2157 if (dotilog & 2)
2158 2158 cmn_err(CE_CONT, str, arg);
2159 2159 if (dotilog & 4)
2160 2160 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE | SL_ERROR,
2161 2161 str, arg);
2162 2162 else
2163 2163 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE, str, arg);
2164 2164 }
2165 2165 }
2166 2166
2167 2167 static void
2168 2168 tilogp(char *str, uintptr_t arg)
2169 2169 {
2170 2170 if (dotilog) {
2171 2171 if (dotilog & 2)
2172 2172 cmn_err(CE_CONT, str, arg);
2173 2173 if (dotilog & 4)
2174 2174 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE | SL_ERROR,
2175 2175 str, arg);
2176 2176 else
2177 2177 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE, str, arg);
2178 2178 }
2179 2179 }
2180 2180
2181 2181
2182 2182 /*
2183 2183 * Process the TI_GETNAME ioctl. If no name exists, return len = 0
2184 2184 * in strbuf structures. The state transitions are determined by what
2185 2185 * is hung of cq_private (cp_private) in the copyresp (copyreq) structure.
2186 2186 * The high-level steps in the ioctl processing are as follows:
2187 2187 *
2188 2188 * 1) we recieve an transparent M_IOCTL with the arg in the second message
2189 2189 * block of the message.
2190 2190 * 2) we send up an M_COPYIN request for the strbuf structure pointed to
2191 2191 * by arg. The block containing arg is hung off cq_private.
2192 2192 * 3) we receive an M_IOCDATA response with cp->cp_private->b_cont == NULL.
2193 2193 * This means that the strbuf structure is found in the message block
2194 2194 * mp->b_cont.
2195 2195 * 4) we send up an M_COPYOUT request with the strbuf message hung off
2196 2196 * cq_private->b_cont. The address we are copying to is strbuf.buf.
2197 2197 * we set strbuf.len to 0 to indicate that we should copy the strbuf
2198 2198 * structure the next time. The message mp->b_cont contains the
2199 2199 * address info.
2200 2200 * 5) we receive an M_IOCDATA with cp_private->b_cont != NULL and
2201 2201 * strbuf.len == 0. Restore strbuf.len to either tp->tim_mylen or
2202 2202 * tp->tim_peerlen.
2203 2203 * 6) we send up an M_COPYOUT request with a copy of the strbuf message
2204 2204 * hung off mp->b_cont. In the strbuf structure in the message hung
2205 2205 * off cq_private->b_cont, we set strbuf.len to 0 and strbuf.maxlen
2206 2206 * to 0. This means that the next step is to ACK the ioctl.
2207 2207 * 7) we receive an M_IOCDATA message with cp_private->b_cont != NULL and
2208 2208 * strbuf.len == 0 and strbuf.maxlen == 0. Free up cp->private and
2209 2209 * send an M_IOCACK upstream, and we are done.
2210 2210 *
2211 2211 */
2212 2212 static int
2213 2213 ti_doname(
2214 2214 queue_t *q, /* queue message arrived at */
2215 2215 mblk_t *mp) /* M_IOCTL or M_IOCDATA message only */
2216 2216 {
2217 2217 struct iocblk *iocp;
2218 2218 struct copyreq *cqp;
2219 2219 STRUCT_HANDLE(strbuf, sb);
2220 2220 struct copyresp *csp;
2221 2221 int ret;
2222 2222 mblk_t *bp;
2223 2223 struct tim_tim *tp = q->q_ptr;
2224 2224 boolean_t getpeer;
2225 2225
2226 2226 switch (mp->b_datap->db_type) {
2227 2227 case M_IOCTL:
2228 2228 iocp = (struct iocblk *)mp->b_rptr;
2229 2229 if ((iocp->ioc_cmd != TI_GETMYNAME) &&
2230 2230 (iocp->ioc_cmd != TI_GETPEERNAME)) {
2231 2231 tilog("ti_doname: bad M_IOCTL command\n", 0);
2232 2232 miocnak(q, mp, 0, EINVAL);
2233 2233 ret = DONAME_FAIL;
2234 2234 break;
2235 2235 }
2236 2236 if ((iocp->ioc_count != TRANSPARENT)) {
2237 2237 miocnak(q, mp, 0, EINVAL);
2238 2238 ret = DONAME_FAIL;
2239 2239 break;
2240 2240 }
2241 2241
2242 2242 cqp = (struct copyreq *)mp->b_rptr;
2243 2243 cqp->cq_private = mp->b_cont;
2244 2244 cqp->cq_addr = (caddr_t)*(intptr_t *)mp->b_cont->b_rptr;
2245 2245 mp->b_cont = NULL;
2246 2246 cqp->cq_size = SIZEOF_STRUCT(strbuf, iocp->ioc_flag);
2247 2247 cqp->cq_flag = 0;
2248 2248 mp->b_datap->db_type = M_COPYIN;
2249 2249 mp->b_wptr = mp->b_rptr + sizeof (struct copyreq);
2250 2250 qreply(q, mp);
2251 2251 ret = DONAME_CONT;
2252 2252 break;
2253 2253
2254 2254 case M_IOCDATA:
2255 2255 csp = (struct copyresp *)mp->b_rptr;
2256 2256 iocp = (struct iocblk *)mp->b_rptr;
2257 2257 cqp = (struct copyreq *)mp->b_rptr;
2258 2258 if ((csp->cp_cmd != TI_GETMYNAME) &&
2259 2259 (csp->cp_cmd != TI_GETPEERNAME)) {
2260 2260 cmn_err(CE_WARN, "ti_doname: bad M_IOCDATA command\n");
2261 2261 miocnak(q, mp, 0, EINVAL);
2262 2262 ret = DONAME_FAIL;
2263 2263 break;
2264 2264 }
2265 2265 if (csp->cp_rval) { /* error */
2266 2266 freemsg(csp->cp_private);
2267 2267 freemsg(mp);
2268 2268 ret = DONAME_FAIL;
2269 2269 break;
2270 2270 }
2271 2271 ASSERT(csp->cp_private != NULL);
2272 2272 getpeer = csp->cp_cmd == TI_GETPEERNAME;
2273 2273 if (getpeer)
2274 2274 mutex_enter(&tp->tim_mutex);
2275 2275 if (csp->cp_private->b_cont == NULL) { /* got strbuf */
2276 2276 ASSERT(mp->b_cont);
2277 2277 STRUCT_SET_HANDLE(sb, iocp->ioc_flag,
2278 2278 (void *)mp->b_cont->b_rptr);
2279 2279 if (getpeer) {
2280 2280 if (tp->tim_peerlen == 0) {
2281 2281 /* copy just strbuf */
2282 2282 STRUCT_FSET(sb, len, 0);
2283 2283 } else if (tp->tim_peerlen >
2284 2284 STRUCT_FGET(sb, maxlen)) {
2285 2285 mutex_exit(&tp->tim_mutex);
2286 2286 miocnak(q, mp, 0, ENAMETOOLONG);
2287 2287 ret = DONAME_FAIL;
2288 2288 break;
2289 2289 } else {
2290 2290 /* copy buffer */
2291 2291 STRUCT_FSET(sb, len, tp->tim_peerlen);
2292 2292 }
2293 2293 } else {
2294 2294 if (tp->tim_mylen == 0) {
2295 2295 /* copy just strbuf */
2296 2296 STRUCT_FSET(sb, len, 0);
2297 2297 } else if (tp->tim_mylen >
2298 2298 STRUCT_FGET(sb, maxlen)) {
2299 2299 freemsg(csp->cp_private);
2300 2300 miocnak(q, mp, 0, ENAMETOOLONG);
2301 2301 ret = DONAME_FAIL;
2302 2302 break;
2303 2303 } else {
2304 2304 /* copy buffer */
2305 2305 STRUCT_FSET(sb, len, tp->tim_mylen);
2306 2306 }
2307 2307 }
2308 2308 csp->cp_private->b_cont = mp->b_cont;
2309 2309 mp->b_cont = NULL;
2310 2310 }
2311 2311 STRUCT_SET_HANDLE(sb, iocp->ioc_flag,
2312 2312 (void *)csp->cp_private->b_cont->b_rptr);
2313 2313 if (STRUCT_FGET(sb, len) == 0) {
2314 2314 /*
2315 2315 * restore strbuf.len
2316 2316 */
2317 2317 if (getpeer)
2318 2318 STRUCT_FSET(sb, len, tp->tim_peerlen);
2319 2319 else
2320 2320 STRUCT_FSET(sb, len, tp->tim_mylen);
2321 2321
2322 2322 if (getpeer)
2323 2323 mutex_exit(&tp->tim_mutex);
2324 2324 if (STRUCT_FGET(sb, maxlen) == 0) {
2325 2325
2326 2326 /*
2327 2327 * ack the ioctl
2328 2328 */
2329 2329 freemsg(csp->cp_private);
2330 2330 tim_ioctl_send_reply(q, mp, NULL);
2331 2331 ret = DONAME_DONE;
2332 2332 break;
2333 2333 }
2334 2334
2335 2335 if ((bp = allocb(STRUCT_SIZE(sb), BPRI_MED)) == NULL) {
2336 2336
2337 2337 tilog(
2338 2338 "ti_doname: allocb failed no recovery attempt\n", 0);
2339 2339
2340 2340 freemsg(csp->cp_private);
2341 2341 miocnak(q, mp, 0, EAGAIN);
2342 2342 ret = DONAME_FAIL;
2343 2343 break;
2344 2344 }
2345 2345 bp->b_wptr += STRUCT_SIZE(sb);
2346 2346 bcopy(STRUCT_BUF(sb), bp->b_rptr, STRUCT_SIZE(sb));
2347 2347 cqp->cq_addr =
2348 2348 (caddr_t)*(intptr_t *)csp->cp_private->b_rptr;
2349 2349 cqp->cq_size = STRUCT_SIZE(sb);
2350 2350 cqp->cq_flag = 0;
2351 2351 mp->b_datap->db_type = M_COPYOUT;
2352 2352 mp->b_cont = bp;
2353 2353 STRUCT_FSET(sb, len, 0);
2354 2354 STRUCT_FSET(sb, maxlen, 0); /* ack next time around */
2355 2355 qreply(q, mp);
2356 2356 ret = DONAME_CONT;
2357 2357 break;
2358 2358 }
2359 2359
2360 2360 /*
2361 2361 * copy the address to the user
2362 2362 */
2363 2363 if ((bp = allocb((size_t)STRUCT_FGET(sb, len), BPRI_MED))
2364 2364 == NULL) {
2365 2365 if (getpeer)
2366 2366 mutex_exit(&tp->tim_mutex);
2367 2367
2368 2368 tilog("ti_doname: allocb failed no recovery attempt\n",
2369 2369 0);
2370 2370
2371 2371 freemsg(csp->cp_private);
2372 2372 miocnak(q, mp, 0, EAGAIN);
2373 2373 ret = DONAME_FAIL;
2374 2374 break;
2375 2375 }
2376 2376 bp->b_wptr += STRUCT_FGET(sb, len);
2377 2377 if (getpeer) {
2378 2378 bcopy(tp->tim_peername, bp->b_rptr,
2379 2379 STRUCT_FGET(sb, len));
2380 2380 mutex_exit(&tp->tim_mutex);
2381 2381 } else {
2382 2382 bcopy(tp->tim_myname, bp->b_rptr, STRUCT_FGET(sb, len));
2383 2383 }
2384 2384 cqp->cq_addr = (caddr_t)STRUCT_FGETP(sb, buf);
2385 2385 cqp->cq_size = STRUCT_FGET(sb, len);
2386 2386 cqp->cq_flag = 0;
2387 2387 mp->b_datap->db_type = M_COPYOUT;
2388 2388 mp->b_cont = bp;
2389 2389 STRUCT_FSET(sb, len, 0); /* copy the strbuf next time around */
2390 2390 qreply(q, mp);
2391 2391 ret = DONAME_CONT;
2392 2392 break;
2393 2393
2394 2394 default:
2395 2395 tilog("ti_doname: freeing bad message type = %d\n",
2396 2396 mp->b_datap->db_type);
2397 2397 freemsg(mp);
2398 2398 ret = DONAME_FAIL;
2399 2399 break;
2400 2400 }
2401 2401 return (ret);
2402 2402 }
2403 2403
2404 2404
2405 2405 /*
2406 2406 * Fill in the address of a connectionless data packet if a connect
2407 2407 * had been done on this endpoint.
2408 2408 */
2409 2409 static mblk_t *
2410 2410 tim_filladdr(queue_t *q, mblk_t *mp, boolean_t dorecover)
2411 2411 {
2412 2412 mblk_t *bp;
2413 2413 struct tim_tim *tp;
2414 2414 struct T_unitdata_req *up;
2415 2415 struct T_unitdata_req *nup;
2416 2416 size_t plen;
2417 2417
2418 2418 tp = (struct tim_tim *)q->q_ptr;
2419 2419 if (mp->b_datap->db_type == M_DATA) {
2420 2420 mutex_enter(&tp->tim_mutex);
2421 2421 bp = allocb(sizeof (struct T_unitdata_req) + tp->tim_peerlen,
2422 2422 BPRI_MED);
2423 2423 if (bp != NULL) {
2424 2424 bp->b_datap->db_type = M_PROTO;
2425 2425 up = (struct T_unitdata_req *)bp->b_rptr;
2426 2426 up->PRIM_type = T_UNITDATA_REQ;
2427 2427 up->DEST_length = tp->tim_peerlen;
2428 2428 bp->b_wptr += sizeof (struct T_unitdata_req);
2429 2429 up->DEST_offset = sizeof (struct T_unitdata_req);
2430 2430 up->OPT_length = 0;
2431 2431 up->OPT_offset = 0;
2432 2432 if (tp->tim_peerlen > 0) {
2433 2433 bcopy(tp->tim_peername, bp->b_wptr,
2434 2434 tp->tim_peerlen);
2435 2435 bp->b_wptr += tp->tim_peerlen;
2436 2436 }
2437 2437 bp->b_cont = mp;
2438 2438 }
2439 2439 } else {
2440 2440 ASSERT(mp->b_datap->db_type == M_PROTO);
2441 2441 up = (struct T_unitdata_req *)mp->b_rptr;
2442 2442 ASSERT(up->PRIM_type == T_UNITDATA_REQ);
2443 2443 if (up->DEST_length != 0)
2444 2444 return (mp);
2445 2445 mutex_enter(&tp->tim_mutex);
2446 2446 bp = allocb(sizeof (struct T_unitdata_req) + up->OPT_length +
2447 2447 tp->tim_peerlen, BPRI_MED);
2448 2448 if (bp != NULL) {
2449 2449 bp->b_datap->db_type = M_PROTO;
2450 2450 nup = (struct T_unitdata_req *)bp->b_rptr;
2451 2451 nup->PRIM_type = T_UNITDATA_REQ;
2452 2452 nup->DEST_length = plen = tp->tim_peerlen;
2453 2453 bp->b_wptr += sizeof (struct T_unitdata_req);
2454 2454 nup->DEST_offset = sizeof (struct T_unitdata_req);
2455 2455 if (plen > 0) {
2456 2456 bcopy(tp->tim_peername, bp->b_wptr, plen);
2457 2457 bp->b_wptr += plen;
2458 2458 }
2459 2459 mutex_exit(&tp->tim_mutex);
2460 2460 if (up->OPT_length == 0) {
2461 2461 nup->OPT_length = 0;
2462 2462 nup->OPT_offset = 0;
2463 2463 } else {
2464 2464 nup->OPT_length = up->OPT_length;
2465 2465 nup->OPT_offset =
2466 2466 sizeof (struct T_unitdata_req) + plen;
2467 2467 bcopy((mp->b_wptr + up->OPT_offset), bp->b_wptr,
2468 2468 up->OPT_length);
2469 2469 bp->b_wptr += up->OPT_length;
2470 2470 }
2471 2471 bp->b_cont = mp->b_cont;
2472 2472 mp->b_cont = NULL;
2473 2473 freeb(mp);
2474 2474 return (bp);
2475 2475 }
2476 2476 }
2477 2477 ASSERT(MUTEX_HELD(&tp->tim_mutex));
2478 2478 if (bp == NULL && dorecover) {
2479 2479 tim_recover(q, mp,
2480 2480 sizeof (struct T_unitdata_req) + tp->tim_peerlen);
2481 2481 }
2482 2482 mutex_exit(&tp->tim_mutex);
2483 2483 return (bp);
2484 2484 }
2485 2485
2486 2486 static void
2487 2487 tim_addlink(struct tim_tim *tp)
2488 2488 {
2489 2489 struct tim_tim **tpp;
2490 2490 struct tim_tim *next;
2491 2491
2492 2492 tpp = &tim_hash[TIM_HASH(tp->tim_acceptor)];
2493 2493 rw_enter(&tim_list_rwlock, RW_WRITER);
2494 2494
2495 2495 if ((next = *tpp) != NULL)
2496 2496 next->tim_ptpn = &tp->tim_next;
2497 2497 tp->tim_next = next;
2498 2498 tp->tim_ptpn = tpp;
2499 2499 *tpp = tp;
2500 2500
2501 2501 tim_cnt++;
2502 2502
2503 2503 rw_exit(&tim_list_rwlock);
2504 2504 }
2505 2505
2506 2506 static void
2507 2507 tim_dellink(struct tim_tim *tp)
2508 2508 {
2509 2509 struct tim_tim *next;
2510 2510
2511 2511 rw_enter(&tim_list_rwlock, RW_WRITER);
2512 2512
2513 2513 if ((next = tp->tim_next) != NULL)
2514 2514 next->tim_ptpn = tp->tim_ptpn;
2515 2515 *(tp->tim_ptpn) = next;
2516 2516
2517 2517 tim_cnt--;
2518 2518
2519 2519 rw_exit(&tim_list_rwlock);
2520 2520 }
2521 2521
2522 2522 static struct tim_tim *
2523 2523 tim_findlink(t_uscalar_t id)
2524 2524 {
2525 2525 struct tim_tim *tp;
2526 2526
2527 2527 ASSERT(rw_lock_held(&tim_list_rwlock));
2528 2528
2529 2529 for (tp = tim_hash[TIM_HASH(id)]; tp != NULL; tp = tp->tim_next) {
2530 2530 if (tp->tim_acceptor == id) {
2531 2531 break;
2532 2532 }
2533 2533 }
2534 2534 return (tp);
2535 2535 }
2536 2536
2537 2537 static void
2538 2538 tim_recover(queue_t *q, mblk_t *mp, t_scalar_t size)
2539 2539 {
2540 2540 struct tim_tim *tp;
2541 2541 bufcall_id_t bid;
2542 2542 timeout_id_t tid;
2543 2543
2544 2544 tp = (struct tim_tim *)q->q_ptr;
2545 2545
2546 2546 /*
2547 2547 * Avoid re-enabling the queue.
2548 2548 */
2549 2549 if (mp->b_datap->db_type == M_PCPROTO)
2550 2550 mp->b_datap->db_type = M_PROTO;
2551 2551 noenable(q);
2552 2552 (void) putbq(q, mp);
2553 2553
2554 2554 /*
2555 2555 * Make sure there is at most one outstanding request per queue.
2556 2556 */
2557 2557 if (q->q_flag & QREADR) {
2558 2558 if (tp->tim_rtimoutid || tp->tim_rbufcid)
2559 2559 return;
2560 2560 } else {
2561 2561 if (tp->tim_wtimoutid || tp->tim_wbufcid)
2562 2562 return;
2563 2563 }
2564 2564 if (!(bid = qbufcall(RD(q), (size_t)size, BPRI_MED, tim_buffer, q))) {
2565 2565 tid = qtimeout(RD(q), tim_timer, q, TIMWAIT);
2566 2566 if (q->q_flag & QREADR)
2567 2567 tp->tim_rtimoutid = tid;
2568 2568 else
2569 2569 tp->tim_wtimoutid = tid;
2570 2570 } else {
2571 2571 if (q->q_flag & QREADR)
2572 2572 tp->tim_rbufcid = bid;
2573 2573 else
2574 2574 tp->tim_wbufcid = bid;
2575 2575 }
2576 2576 }
2577 2577
2578 2578 /*
2579 2579 * Timod is waiting on a downstream ioctl reply, come back soon
2580 2580 * to reschedule the write side service routine, which will check
2581 2581 * if the ioctl is done and another can proceed.
2582 2582 */
2583 2583 static void
2584 2584 tim_ioctl_retry(queue_t *q)
2585 2585 {
2586 2586 struct tim_tim *tp;
2587 2587
2588 2588 tp = (struct tim_tim *)q->q_ptr;
2589 2589
2590 2590 /*
2591 2591 * Make sure there is at most one outstanding request per wqueue.
2592 2592 */
2593 2593 if (tp->tim_wtimoutid || tp->tim_wbufcid)
2594 2594 return;
2595 2595
2596 2596 tp->tim_wtimoutid = qtimeout(RD(q), tim_timer, q, TIMIOCWAIT);
2597 2597 }
2598 2598
2599 2599 /*
2600 2600 * Inspect the data on read queues starting from read queues passed as
2601 2601 * paramter (timod read queue) and traverse until
2602 2602 * q_next is NULL (stream head). Look for a TPI T_EXDATA_IND message
2603 2603 * reutrn 1 if found, 0 if not found.
2604 2604 */
2605 2605 static int
2606 2606 ti_expind_on_rdqueues(queue_t *rq)
2607 2607 {
2608 2608 mblk_t *bp;
2609 2609 queue_t *q;
2610 2610
2611 2611 q = rq;
2612 2612 /*
2613 2613 * We are going to walk q_next, so protect stream from plumbing
2614 2614 * changes.
2615 2615 */
2616 2616 claimstr(q);
2617 2617 do {
2618 2618 /*
2619 2619 * Hold QLOCK while referencing data on queues
2620 2620 */
2621 2621 mutex_enter(QLOCK(rq));
2622 2622 bp = rq->q_first;
2623 2623 while (bp != NULL) {
2624 2624 /*
2625 2625 * Walk the messages on the queue looking
2626 2626 * for a possible T_EXDATA_IND
2627 2627 */
2628 2628 if ((bp->b_datap->db_type == M_PROTO) &&
2629 2629 ((bp->b_wptr - bp->b_rptr) >=
2630 2630 sizeof (struct T_exdata_ind)) &&
2631 2631 (((struct T_exdata_ind *)bp->b_rptr)->PRIM_type
2632 2632 == T_EXDATA_IND)) {
2633 2633 /* bp is T_EXDATA_IND */
2634 2634 mutex_exit(QLOCK(rq));
2635 2635 releasestr(q); /* decrement sd_refcnt */
2636 2636 return (1); /* expdata is on a read queue */
2637 2637 }
2638 2638 bp = bp->b_next; /* next message */
2639 2639 }
2640 2640 mutex_exit(QLOCK(rq));
2641 2641 rq = rq->q_next; /* next upstream queue */
2642 2642 } while (rq != NULL);
2643 2643 releasestr(q);
2644 2644 return (0); /* no expdata on read queues */
2645 2645 }
2646 2646
2647 2647 static void
2648 2648 tim_tcap_timer(void *q_ptr)
2649 2649 {
2650 2650 queue_t *q = (queue_t *)q_ptr;
2651 2651 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
2652 2652
2653 2653 ASSERT(tp != NULL && tp->tim_tcap_timoutid != 0);
2654 2654 ASSERT((tp->tim_flags & TI_CAP_RECVD) != 0);
2655 2655
2656 2656 tp->tim_tcap_timoutid = 0;
2657 2657 TILOG("tim_tcap_timer: fired\n", 0);
2658 2658 tim_tcap_genreply(q, tp);
2659 2659 }
2660 2660
2661 2661 /*
2662 2662 * tim_tcap_genreply() is called either from timeout routine or when
2663 2663 * T_ERROR_ACK is received. In both cases it means that underlying
2664 2664 * transport doesn't provide T_CAPABILITY_REQ.
2665 2665 */
2666 2666 static void
2667 2667 tim_tcap_genreply(queue_t *q, struct tim_tim *tp)
2668 2668 {
2669 2669 mblk_t *mp = tp->tim_iocsave;
2670 2670 struct iocblk *iocbp;
2671 2671
2672 2672 TILOG("timodrproc: tim_tcap_genreply\n", 0);
2673 2673
2674 2674 ASSERT(tp == (struct tim_tim *)q->q_ptr);
2675 2675 ASSERT(mp != NULL);
2676 2676
2677 2677 iocbp = (struct iocblk *)mp->b_rptr;
2678 2678 ASSERT(iocbp != NULL);
2679 2679 ASSERT(MBLKL(mp) == sizeof (struct iocblk));
2680 2680 ASSERT(iocbp->ioc_cmd == TI_CAPABILITY);
2681 2681 ASSERT(mp->b_cont == NULL);
2682 2682
2683 2683 /* Save this information permanently in the module */
2684 2684 PI_PROVLOCK(tp->tim_provinfo);
2685 2685 if (tp->tim_provinfo->tpi_capability == PI_DONTKNOW)
2686 2686 tp->tim_provinfo->tpi_capability = PI_NO;
2687 2687 PI_PROVUNLOCK(tp->tim_provinfo);
2688 2688
2689 2689 if (tp->tim_tcap_timoutid != 0) {
2690 2690 (void) quntimeout(q, tp->tim_tcap_timoutid);
2691 2691 tp->tim_tcap_timoutid = 0;
2692 2692 }
2693 2693
2694 2694 if ((tp->tim_flags & CAP_WANTS_INFO) != 0) {
2695 2695 /* Send T_INFO_REQ down */
2696 2696 mblk_t *tirmp = tpi_ack_alloc(NULL,
2697 2697 sizeof (struct T_info_req), M_PCPROTO, T_INFO_REQ);
2698 2698
2699 2699 if (tirmp != NULL) {
2700 2700 /* Emulate TC1_INFO */
2701 2701 TILOG("emulate_tcap_ioc_req: sending T_INFO_REQ\n", 0);
2702 2702 tp->tim_flags |= WAIT_IOCINFOACK;
2703 2703 putnext(WR(q), tirmp);
2704 2704 } else {
2705 2705 tilog("emulate_tcap_req: allocb fail, "
2706 2706 "no recovery attmpt\n", 0);
2707 2707 tp->tim_iocsave = NULL;
2708 2708 tp->tim_saved_prim = -1;
2709 2709 tp->tim_flags &= ~(TI_CAP_RECVD | WAITIOCACK |
2710 2710 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2711 2711 miocnak(q, mp, 0, ENOMEM);
2712 2712 }
2713 2713 } else {
2714 2714 /* Reply immediately */
2715 2715 mblk_t *ackmp = tpi_ack_alloc(NULL,
2716 2716 sizeof (struct T_capability_ack), M_PCPROTO,
2717 2717 T_CAPABILITY_ACK);
2718 2718
2719 2719 mp->b_cont = ackmp;
2720 2720
2721 2721 if (ackmp != NULL) {
2722 2722 ((struct T_capability_ack *)
2723 2723 ackmp->b_rptr)->CAP_bits1 = 0;
2724 2724 tim_ioctl_send_reply(q, mp, ackmp);
2725 2725 tp->tim_iocsave = NULL;
2726 2726 tp->tim_saved_prim = -1;
2727 2727 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2728 2728 TI_CAP_RECVD | CAP_WANTS_INFO);
2729 2729 } else {
2730 2730 tilog("timodwproc:allocb failed no "
2731 2731 "recovery attempt\n", 0);
2732 2732 tp->tim_iocsave = NULL;
2733 2733 tp->tim_saved_prim = -1;
2734 2734 tp->tim_flags &= ~(TI_CAP_RECVD | WAITIOCACK |
2735 2735 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2736 2736 miocnak(q, mp, 0, ENOMEM);
2737 2737 }
2738 2738 }
2739 2739 }
2740 2740
2741 2741
2742 2742 static void
2743 2743 tim_ioctl_send_reply(queue_t *q, mblk_t *ioc_mp, mblk_t *mp)
2744 2744 {
2745 2745 struct iocblk *iocbp;
2746 2746
2747 2747 ASSERT(q != NULL && ioc_mp != NULL);
2748 2748
2749 2749 ioc_mp->b_datap->db_type = M_IOCACK;
2750 2750 if (mp != NULL)
2751 2751 mp->b_datap->db_type = M_DATA;
2752 2752
2753 2753 if (ioc_mp->b_cont != mp) {
2754 2754 /* It is safe to call freemsg for NULL pointers */
2755 2755 freemsg(ioc_mp->b_cont);
2756 2756 ioc_mp->b_cont = mp;
2757 2757 }
2758 2758 iocbp = (struct iocblk *)ioc_mp->b_rptr;
2759 2759 iocbp->ioc_error = 0;
2760 2760 iocbp->ioc_rval = 0;
2761 2761 /*
2762 2762 * All ioctl's may return more data than was specified by
2763 2763 * count arg. For TI_CAPABILITY count is treated as maximum data size.
2764 2764 */
2765 2765 if (mp == NULL)
2766 2766 iocbp->ioc_count = 0;
2767 2767 else if (iocbp->ioc_cmd != TI_CAPABILITY)
2768 2768 iocbp->ioc_count = msgsize(mp);
2769 2769 else {
2770 2770 iocbp->ioc_count = MIN(MBLKL(mp), iocbp->ioc_count);
2771 2771 /* Truncate message if too large */
2772 2772 mp->b_wptr = mp->b_rptr + iocbp->ioc_count;
2773 2773 }
2774 2774
2775 2775 TILOG("iosendreply: ioc_cmd = %d, ", iocbp->ioc_cmd);
2776 2776 putnext(RD(q), ioc_mp);
2777 2777 }
2778 2778
2779 2779 /*
2780 2780 * Send M_IOCACK for errors.
2781 2781 */
2782 2782 static void
2783 2783 tim_send_ioc_error_ack(queue_t *q, struct tim_tim *tp, mblk_t *mp)
2784 2784 {
2785 2785 struct T_error_ack *tea = (struct T_error_ack *)mp->b_rptr;
2786 2786 t_scalar_t error_prim;
2787 2787
2788 2788 mp->b_wptr = mp->b_rptr + sizeof (struct T_error_ack);
2789 2789 ASSERT(mp->b_wptr <= mp->b_datap->db_lim);
2790 2790 error_prim = tea->ERROR_prim;
2791 2791
2792 2792 ASSERT(tp->tim_iocsave != NULL);
2793 2793 ASSERT(tp->tim_iocsave->b_cont != mp);
2794 2794
2795 2795 /* Always send this to the read side of the queue */
2796 2796 q = RD(q);
2797 2797
2798 2798 TILOG("tim_send_ioc_error_ack: prim = %d\n", tp->tim_saved_prim);
2799 2799
2800 2800 if (tp->tim_saved_prim != error_prim) {
2801 2801 putnext(q, mp);
2802 2802 } else if (error_prim == T_CAPABILITY_REQ) {
2803 2803 TILOG("timodrproc: T_ERROR_ACK/T_CAPABILITY_REQ\n", 0);
2804 2804 ASSERT(tp->tim_iocsave->b_cont == NULL);
2805 2805
2806 2806 tim_tcap_genreply(q, tp);
2807 2807 freemsg(mp);
2808 2808 } else {
2809 2809 struct iocblk *iocbp = (struct iocblk *)tp->tim_iocsave->b_rptr;
2810 2810
2811 2811 TILOG("tim_send_ioc_error_ack: T_ERROR_ACK: prim %d\n",
2812 2812 error_prim);
2813 2813 ASSERT(tp->tim_iocsave->b_cont == NULL);
2814 2814
2815 2815 switch (error_prim) {
2816 2816 default:
2817 2817 TILOG("timodrproc: Unknown T_ERROR_ACK: tlierror %d\n",
2818 2818 tea->TLI_error);
2819 2819
2820 2820 putnext(q, mp);
2821 2821 break;
2822 2822
2823 2823 case T_INFO_REQ:
2824 2824 case T_SVR4_OPTMGMT_REQ:
2825 2825 case T_OPTMGMT_REQ:
2826 2826 case O_T_BIND_REQ:
2827 2827 case T_BIND_REQ:
2828 2828 case T_UNBIND_REQ:
2829 2829 case T_ADDR_REQ:
2830 2830 case T_CAPABILITY_REQ:
2831 2831
2832 2832 TILOG("ioc_err_ack: T_ERROR_ACK: tlierror %x\n",
2833 2833 tea->TLI_error);
2834 2834
2835 2835 /* get saved ioctl msg and set values */
2836 2836 iocbp->ioc_count = 0;
2837 2837 iocbp->ioc_error = 0;
2838 2838 iocbp->ioc_rval = tea->TLI_error;
2839 2839 if (iocbp->ioc_rval == TSYSERR)
2840 2840 iocbp->ioc_rval |= tea->UNIX_error << 8;
2841 2841 tp->tim_iocsave->b_datap->db_type = M_IOCACK;
2842 2842 freemsg(mp);
2843 2843 putnext(q, tp->tim_iocsave);
2844 2844 tp->tim_iocsave = NULL;
2845 2845 tp->tim_saved_prim = -1;
2846 2846 tp->tim_flags &= ~(WAITIOCACK | TI_CAP_RECVD |
2847 2847 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2848 2848 break;
2849 2849 }
2850 2850 }
2851 2851 }
2852 2852
2853 2853 /*
2854 2854 * Send reply to a usual message or ioctl message upstream.
2855 2855 * Should be called from the read side only.
2856 2856 */
2857 2857 static void
2858 2858 tim_send_reply(queue_t *q, mblk_t *mp, struct tim_tim *tp, t_scalar_t prim)
2859 2859 {
2860 2860 ASSERT(mp != NULL && q != NULL && tp != NULL);
2861 2861 ASSERT(q == RD(q));
2862 2862
2863 2863 /* Restore db_type - recover() might have changed it */
2864 2864 mp->b_datap->db_type = M_PCPROTO;
2865 2865
2866 2866 if (((tp->tim_flags & WAITIOCACK) == 0) || (tp->tim_saved_prim != prim))
2867 2867 putnext(q, mp);
2868 2868 else {
2869 2869 ASSERT(tp->tim_iocsave != NULL);
2870 2870 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
2871 2871 tp->tim_iocsave = NULL;
2872 2872 tp->tim_saved_prim = -1;
2873 2873 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2874 2874 TI_CAP_RECVD | CAP_WANTS_INFO);
2875 2875 }
2876 2876 }
2877 2877
2878 2878 /*
2879 2879 * Reply to TI_SYNC reequest without sending anything downstream.
2880 2880 */
2881 2881 static void
2882 2882 tim_answer_ti_sync(queue_t *q, mblk_t *mp, struct tim_tim *tp,
2883 2883 mblk_t *ackmp, uint32_t tsr_flags)
2884 2884 {
2885 2885 struct ti_sync_ack *tsap;
2886 2886
2887 2887 ASSERT(q != NULL && q == WR(q) && ackmp != NULL);
2888 2888
2889 2889 tsap = (struct ti_sync_ack *)ackmp->b_rptr;
2890 2890 bzero(tsap, sizeof (struct ti_sync_ack));
2891 2891 ackmp->b_wptr = ackmp->b_rptr + sizeof (struct ti_sync_ack);
2892 2892
2893 2893 if (tsr_flags == 0 ||
2894 2894 (tsr_flags & ~(TSRF_QLEN_REQ | TSRF_IS_EXP_IN_RCVBUF)) != 0) {
2895 2895 /*
2896 2896 * unsupported/bad flag setting
2897 2897 * or no flag set.
2898 2898 */
2899 2899 TILOG("timodwproc: unsupported/bad flag setting %x\n",
2900 2900 tsr_flags);
2901 2901 freemsg(ackmp);
2902 2902 miocnak(q, mp, 0, EINVAL);
2903 2903 return;
2904 2904 }
2905 2905
2906 2906 if ((tsr_flags & TSRF_QLEN_REQ) != 0)
2907 2907 tsap->tsa_qlen = tp->tim_backlog;
2908 2908
2909 2909 if ((tsr_flags & TSRF_IS_EXP_IN_RCVBUF) != 0 &&
2910 2910 ti_expind_on_rdqueues(RD(q))) {
2911 2911 /*
2912 2912 * Expedited data is queued on
2913 2913 * the stream read side
2914 2914 */
2915 2915 tsap->tsa_flags |= TSAF_EXP_QUEUED;
2916 2916 }
2917 2917
2918 2918 tim_ioctl_send_reply(q, mp, ackmp);
2919 2919 tp->tim_iocsave = NULL;
2920 2920 tp->tim_saved_prim = -1;
2921 2921 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2922 2922 TI_CAP_RECVD | CAP_WANTS_INFO);
2923 2923 }
2924 2924
2925 2925 /*
2926 2926 * Send TPI message from IOCTL message, ssave original ioctl header and TPI
2927 2927 * message type. Should be called from write side only.
2928 2928 */
2929 2929 static void
2930 2930 tim_send_ioctl_tpi_msg(queue_t *q, mblk_t *mp, struct tim_tim *tp,
2931 2931 struct iocblk *iocb)
2932 2932 {
2933 2933 mblk_t *tmp;
2934 2934 int ioc_cmd = iocb->ioc_cmd;
2935 2935
2936 2936 ASSERT(q != NULL && mp != NULL && tp != NULL);
2937 2937 ASSERT(q == WR(q));
2938 2938 ASSERT(mp->b_cont != NULL);
2939 2939
2940 2940 tp->tim_iocsave = mp;
2941 2941 tmp = mp->b_cont;
2942 2942
2943 2943 mp->b_cont = NULL;
2944 2944 tp->tim_flags |= WAITIOCACK;
2945 2945 tp->tim_saved_prim = ((union T_primitives *)tmp->b_rptr)->type;
2946 2946
2947 2947 /*
2948 2948 * For TI_GETINFO, the attached message is a T_INFO_REQ
2949 2949 * For TI_SYNC, we generate the T_INFO_REQ message above
2950 2950 * For TI_CAPABILITY the attached message is either
2951 2951 * T_CAPABILITY_REQ or T_INFO_REQ.
2952 2952 * Among TPI request messages possible,
2953 2953 * T_INFO_REQ/T_CAPABILITY_ACK messages are a M_PCPROTO, rest
2954 2954 * are M_PROTO
2955 2955 */
2956 2956 if (ioc_cmd == TI_GETINFO || ioc_cmd == TI_SYNC ||
2957 2957 ioc_cmd == TI_CAPABILITY) {
2958 2958 tmp->b_datap->db_type = M_PCPROTO;
2959 2959 } else {
2960 2960 tmp->b_datap->db_type = M_PROTO;
2961 2961 }
2962 2962
2963 2963 /* Verify credentials in STREAM */
2964 2964 ASSERT(iocb->ioc_cr == NULL || iocb->ioc_cr == DB_CRED(tmp));
2965 2965
2966 2966 ASSERT(DB_CRED(tmp) != NULL);
2967 2967
2968 2968 TILOG("timodwproc: sending down %d\n", tp->tim_saved_prim);
2969 2969 putnext(q, tmp);
2970 2970 }
2971 2971
2972 2972 static void
2973 2973 tim_clear_peer(struct tim_tim *tp)
2974 2974 {
2975 2975 mutex_enter(&tp->tim_mutex);
2976 2976 if (tp->tim_peercred != NULL) {
2977 2977 crfree(tp->tim_peercred);
2978 2978 tp->tim_peercred = NULL;
2979 2979 }
2980 2980 tp->tim_peerlen = 0;
2981 2981 mutex_exit(&tp->tim_mutex);
2982 2982 }
↓ open down ↓ |
2895 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX