Print this page
12694 race between write() and shutdown() for unix sockets
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/tl.c
+++ new/usr/src/uts/common/io/tl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 - * Copyright (c) 2018, Joyent, Inc.
28 + * Copyright 2020 Joyent, Inc.
29 29 */
30 30
31 31 /*
32 32 * Multithreaded STREAMS Local Transport Provider.
33 33 *
34 34 * OVERVIEW
35 35 * ========
36 36 *
37 37 * This driver provides TLI as well as socket semantics. It provides
38 38 * connectionless, connection oriented, and connection oriented with orderly
39 39 * release transports for TLI and sockets. Each transport type has separate name
40 40 * spaces (i.e. it is not possible to connect from a socket to a TLI endpoint) -
41 41 * this removes any name space conflicts when binding to socket style transport
42 42 * addresses.
43 43 *
44 44 * NOTE: There is one exception: Socket ticots and ticotsord transports share
45 45 * the same namespace. In fact, sockets always use ticotsord type transport.
46 46 *
47 47 * The driver mode is specified during open() by the minor number used for
48 48 * open.
49 49 *
50 50 * The sockets in addition have the following semantic differences:
51 51 * No support for passing up credentials (TL_SET[U]CRED).
52 52 *
53 53 * Options are passed through transparently on T_CONN_REQ to T_CONN_IND,
54 54 * from T_UNITDATA_REQ to T_UNIDATA_IND, and from T_OPTDATA_REQ to
55 55 * T_OPTDATA_IND.
56 56 *
57 57 * The T_CONN_CON is generated when processing the T_CONN_REQ i.e. before
58 58 * a T_CONN_RES is received from the acceptor. This means that a socket
59 59 * connect will complete before the peer has called accept.
60 60 *
61 61 *
62 62 * MULTITHREADING
63 63 * ==============
64 64 *
65 65 * The driver does not use STREAMS protection mechanisms. Instead it uses a
66 66 * generic "serializer" abstraction. Most of the operations are executed behind
67 67 * the serializer and are, essentially single-threaded. All functions executed
68 68 * behind the same serializer are strictly serialized. So if one thread calls
69 69 * serializer_enter(serializer, foo, mp1, arg1); and another thread calls
70 70 * serializer_enter(serializer, bar, mp2, arg1); then (depending on which one
71 71 * was called) the actual sequence will be foo(mp1, arg1); bar(mp1, arg2) or
72 72 * bar(mp1, arg2); foo(mp1, arg1); But foo() and bar() will never run at the
73 73 * same time.
74 74 *
75 75 * Connectionless transport use a single serializer per transport type (one for
76 76 * TLI and one for sockets. Connection-oriented transports use finer-grained
77 77 * serializers.
78 78 *
79 79 * All COTS-type endpoints start their life with private serializers. During
80 80 * connection request processing the endpoint serializer is switched to the
81 81 * listener's serializer and the rest of T_CONN_REQ processing is done on the
82 82 * listener serializer. During T_CONN_RES processing the eager serializer is
83 83 * switched from listener to acceptor serializer and after that point all
84 84 * processing for eager and acceptor happens on this serializer. To avoid races
85 85 * with endpoint closes while its serializer may be changing closes are blocked
86 86 * while serializers are manipulated.
87 87 *
88 88 * References accounting
89 89 * ---------------------
90 90 *
91 91 * Endpoints are reference counted and freed when the last reference is
92 92 * dropped. Functions within the serializer may access an endpoint state even
93 93 * after an endpoint closed. The te_closing being set on the endpoint indicates
94 94 * that the endpoint entered its close routine.
95 95 *
96 96 * One reference is held for each opened endpoint instance. The reference
97 97 * counter is incremented when the endpoint is linked to another endpoint and
98 98 * decremented when the link disappears. It is also incremented when the
99 99 * endpoint is found by the hash table lookup. This increment is atomic with the
100 100 * lookup itself and happens while the hash table read lock is held.
101 101 *
102 102 * Close synchronization
103 103 * ---------------------
104 104 *
105 105 * During close the endpoint as marked as closing using te_closing flag. It is
106 106 * usually enough to check for te_closing flag since all other state changes
107 107 * happen after this flag is set and the close entered serializer. Immediately
108 108 * after setting te_closing flag tl_close() enters serializer and waits until
109 109 * the callback finishes. This allows all functions called within serializer to
110 110 * simply check te_closing without any locks.
111 111 *
112 112 * Serializer management.
113 113 * ---------------------
114 114 *
115 115 * For COTS transports serializers are created when the endpoint is constructed
116 116 * and destroyed when the endpoint is destructed. CLTS transports use global
117 117 * serializers - one for sockets and one for TLI.
118 118 *
119 119 * COTS serializers have separate reference counts to deal with several
120 120 * endpoints sharing the same serializer. There is a subtle problem related to
121 121 * the serializer destruction. The serializer should never be destroyed by any
122 122 * function executed inside serializer. This means that close has to wait till
123 123 * all serializer activity for this endpoint is finished before it can drop the
124 124 * last reference on the endpoint (which may as well free the serializer). This
125 125 * is only relevant for COTS transports which manage serializers
126 126 * dynamically. For CLTS transports close may complete without waiting for all
127 127 * serializer activity to finish since serializer is only destroyed at driver
128 128 * detach time.
129 129 *
130 130 * COTS endpoints keep track of the number of outstanding requests on the
131 131 * serializer for the endpoint. The code handling accept() avoids changing
132 132 * client serializer if it has any pending messages on the serializer and
133 133 * instead moves acceptor to listener's serializer.
134 134 *
135 135 *
136 136 * Use of hash tables
137 137 * ------------------
138 138 *
139 139 * The driver uses modhash hash table implementation. Each transport uses two
140 140 * hash tables - one for finding endpoints by acceptor ID and another one for
141 141 * finding endpoints by address. For sockets TICOTS and TICOTSORD share the same
142 142 * pair of hash tables since sockets only use TICOTSORD.
143 143 *
144 144 * All hash tables lookups increment a reference count for returned endpoints,
145 145 * so we may safely check the endpoint state even when the endpoint is removed
146 146 * from the hash by another thread immediately after it is found.
147 147 *
148 148 *
149 149 * CLOSE processing
150 150 * ================
151 151 *
152 152 * The driver enters serializer twice on close(). The close sequence is the
153 153 * following:
154 154 *
155 155 * 1) Wait until closing is safe (te_closewait becomes zero)
156 156 * This step is needed to prevent close during serializer switches. In most
157 157 * cases (close happening after connection establishment) te_closewait is
158 158 * zero.
159 159 * 1) Set te_closing.
160 160 * 2) Call tl_close_ser() within serializer and wait for it to complete.
161 161 *
162 162 * te_close_ser simply marks endpoint and wakes up waiting tl_close().
163 163 * It also needs to clear write-side q_next pointers - this should be done
164 164 * before qprocsoff().
165 165 *
166 166 * This synchronous serializer entry during close is needed to ensure that
167 167 * the queue is valid everywhere inside the serializer.
168 168 *
169 169 * Note that in many cases close will execute tl_close_ser() synchronously,
170 170 * so it will not wait at all.
171 171 *
172 172 * 3) Calls qprocsoff().
173 173 * 4) Calls tl_close_finish_ser() within the serializer and waits for it to
174 174 * complete (for COTS transports). For CLTS transport there is no wait.
175 175 *
176 176 * tl_close_finish_ser() Finishes the close process and wakes up waiting
177 177 * close if there is any.
178 178 *
179 179 * Note that in most cases close will enter te_close_ser_finish()
180 180 * synchronously and will not wait at all.
181 181 *
182 182 *
183 183 * Flow Control
184 184 * ============
185 185 *
186 186 * The driver implements both read and write side service routines. No one calls
187 187 * putq() on the read queue. The read side service routine tl_rsrv() is called
188 188 * when the read side stream is back-enabled. It enters serializer synchronously
189 189 * (waits till serializer processing is complete). Within serializer it
190 190 * back-enables all endpoints blocked by the queue for connection-less
191 191 * transports and enables write side service processing for the peer for
192 192 * connection-oriented transports.
193 193 *
194 194 * Read and write side service routines use special mblk_sized space in the
195 195 * endpoint structure to enter perimeter.
196 196 *
197 197 * Write-side flow control
198 198 * -----------------------
199 199 *
200 200 * Write side flow control is a bit tricky. The driver needs to deal with two
201 201 * message queues - the explicit STREAMS message queue maintained by
202 202 * putq()/getq()/putbq() and the implicit queue within the serializer. These two
203 203 * queues should be synchronized to preserve message ordering and should
204 204 * maintain a single order determined by the order in which messages enter
205 205 * tl_wput(). In order to maintain the ordering between these two queues the
206 206 * STREAMS queue is only manipulated within the serializer, so the ordering is
207 207 * provided by the serializer.
208 208 *
209 209 * Functions called from the tl_wsrv() sometimes may call putbq(). To
210 210 * immediately stop any further processing of the STREAMS message queues the
211 211 * code calling putbq() also sets the te_nowsrv flag in the endpoint. The write
212 212 * side service processing stops when the flag is set.
213 213 *
214 214 * The tl_wsrv() function enters serializer synchronously and waits for it to
215 215 * complete. The serializer call-back tl_wsrv_ser() either drains all messages
216 216 * on the STREAMS queue or terminates when it notices the te_nowsrv flag
217 217 * set. Note that the maximum amount of messages processed by tl_wput_ser() is
218 218 * always bounded by the amount of messages on the STREAMS queue at the time
219 219 * tl_wsrv_ser() is entered. Any new messages may only appear on the STREAMS
220 220 * queue from another serialized entry which can't happen in parallel. This
221 221 * guarantees that tl_wput_ser() is complete in bounded time (there is no risk
222 222 * of it draining forever while writer places new messages on the STREAMS
223 223 * queue).
224 224 *
225 225 * Note that a closing endpoint never sets te_nowsrv and never calls putbq().
226 226 *
227 227 *
228 228 * Unix Domain Sockets
229 229 * ===================
230 230 *
231 231 * The driver knows the structure of Unix Domain sockets addresses and treats
232 232 * them differently from generic TLI addresses. For sockets implicit binds are
233 233 * requested by setting SOU_MAGIC_IMPLICIT in the soua_magic part of the address
234 234 * instead of using address length of zero. Explicit binds specify
235 235 * SOU_MAGIC_EXPLICIT as magic.
236 236 *
237 237 * For implicit binds we always use minor number as soua_vp part of the address
238 238 * and avoid any hash table lookups. This saves two hash tables lookups per
239 239 * anonymous bind.
240 240 *
241 241 * For explicit address we hash the vnode pointer instead of hashing the
242 242 * full-scale address+zone+length. Hashing by pointer is more efficient then
243 243 * hashing by the full address.
244 244 *
245 245 * For unix domain sockets the te_ap is always pointing to te_uxaddr part of the
246 246 * tep structure, so it should be never freed.
247 247 *
248 248 * Also for sockets the driver always uses minor number as acceptor id.
249 249 *
250 250 * TPI VIOLATIONS
251 251 * --------------
252 252 *
253 253 * This driver violates TPI in several respects for Unix Domain Sockets:
254 254 *
255 255 * 1) It treats O_T_BIND_REQ as T_BIND_REQ and refuses bind if an explicit bind
256 256 * is requested and the endpoint is already in use. There is no point in
257 257 * generating an unused address since this address will be rejected by
258 258 * sockfs anyway. For implicit binds it always generates a new address
259 259 * (sets soua_vp to its minor number).
260 260 *
261 261 * 2) It always uses minor number as acceptor ID and never uses queue
262 262 * pointer. It is ok since sockets get acceptor ID from T_CAPABILITY_REQ
263 263 * message and they do not use the queue pointer.
264 264 *
265 265 * 3) For Listener sockets the usual sequence is to issue bind() zero backlog
266 266 * followed by listen(). The listen() should be issued with non-zero
267 267 * backlog, so sotpi_listen() issues unbind request followed by bind
268 268 * request to the same address but with a non-zero qlen value. Both
269 269 * tl_bind() and tl_unbind() require write lock on the hash table to
270 270 * insert/remove the address. The driver does not remove the address from
271 271 * the hash for endpoints that are bound to the explicit address and have
272 272 * backlog of zero. During T_BIND_REQ processing if the address requested
273 273 * is equal to the address the endpoint already has it updates the backlog
274 274 * without reinserting the address in the hash table. This optimization
275 275 * avoids two hash table updates for each listener created. It always
276 276 * avoids the problem of a "stolen" address when another listener may use
277 277 * the same address between the unbind and bind and suddenly listen() fails
278 278 * because address is in use even though the bind() succeeded.
279 279 *
280 280 *
281 281 * CONNECTIONLESS TRANSPORTS
282 282 * =========================
283 283 *
284 284 * Connectionless transports all share the same serializer (one for TLI and one
285 285 * for Sockets). Functions executing behind serializer can check or modify state
286 286 * of any endpoint.
287 287 *
288 288 * When endpoint X talks to another endpoint Y it caches the pointer to Y in the
289 289 * te_lastep field. The next time X talks to some address A it checks whether A
290 290 * is the same as Y's address and if it is there is no need to lookup Y. If the
291 291 * address is different or the state of Y is not appropriate (e.g. closed or not
292 292 * idle) X does a lookup using tl_find_peer() and caches the new address.
293 293 * NOTE: tl_find_peer() never returns closing endpoint and it places a refhold
294 294 * on the endpoint found.
295 295 *
296 296 * During close of endpoint Y it doesn't try to remove itself from other
297 297 * endpoints caches. They will detect that Y is gone and will search the peer
298 298 * endpoint again.
299 299 *
300 300 * Flow Control Handling.
301 301 * ----------------------
302 302 *
303 303 * Each connectionless endpoint keeps a list of endpoints which are
304 304 * flow-controlled by its queue. It also keeps a pointer to the queue which
305 305 * flow-controls itself. Whenever flow control releases for endpoint X it
306 306 * enables all queues from the list. During close it also back-enables everyone
307 307 * in the list. If X is flow-controlled when it is closing it removes it from
308 308 * the peers list.
309 309 *
310 310 * DATA STRUCTURES
311 311 * ===============
312 312 *
313 313 * Each endpoint is represented by the tl_endpt_t structure which keeps all the
314 314 * endpoint state. For connection-oriented transports it has a keeps a list
315 315 * of pending connections (tl_icon_t). For connectionless transports it keeps a
316 316 * list of endpoints flow controlled by this one.
317 317 *
318 318 * Each transport type is represented by a per-transport data structure
319 319 * tl_transport_state_t. It contains a pointer to an acceptor ID hash and the
320 320 * endpoint address hash tables for each transport. It also contains pointer to
321 321 * transport serializer for connectionless transports.
322 322 *
323 323 * Each endpoint keeps a link to its transport structure, so the code can find
324 324 * all per-transport information quickly.
325 325 */
326 326
327 327 #include <sys/types.h>
328 328 #include <sys/inttypes.h>
329 329 #include <sys/stream.h>
330 330 #include <sys/stropts.h>
331 331 #define _SUN_TPI_VERSION 2
332 332 #include <sys/tihdr.h>
333 333 #include <sys/strlog.h>
334 334 #include <sys/debug.h>
335 335 #include <sys/cred.h>
336 336 #include <sys/errno.h>
337 337 #include <sys/kmem.h>
338 338 #include <sys/id_space.h>
339 339 #include <sys/modhash.h>
340 340 #include <sys/mkdev.h>
341 341 #include <sys/tl.h>
342 342 #include <sys/stat.h>
343 343 #include <sys/conf.h>
344 344 #include <sys/modctl.h>
345 345 #include <sys/strsun.h>
346 346 #include <sys/socket.h>
347 347 #include <sys/socketvar.h>
348 348 #include <sys/sysmacros.h>
349 349 #include <sys/xti_xtiopt.h>
350 350 #include <sys/ddi.h>
351 351 #include <sys/sunddi.h>
352 352 #include <sys/zone.h>
353 353 #include <inet/common.h> /* typedef int (*pfi_t)() for inet/optcom.h */
354 354 #include <inet/optcom.h>
355 355 #include <sys/strsubr.h>
356 356 #include <sys/ucred.h>
357 357 #include <sys/suntpi.h>
358 358 #include <sys/list.h>
359 359 #include <sys/serializer.h>
360 360
361 361 /*
362 362 * TBD List
363 363 * 14 Eliminate state changes through table
364 364 * 16. AF_UNIX socket options
365 365 * 17. connect() for ticlts
366 366 * 18. support for "netstat" to show AF_UNIX plus TLI local
367 367 * transport connections
368 368 * 21. sanity check to flushing on sending M_ERROR
↓ open down ↓ |
330 lines elided |
↑ open up ↑ |
369 369 */
370 370
371 371 /*
372 372 * CONSTANT DECLARATIONS
373 373 * --------------------
374 374 */
375 375
376 376 /*
377 377 * Local declarations
378 378 */
379 -#define NEXTSTATE(EV, ST) ti_statetbl[EV][ST]
380 -
381 379 #define BADSEQNUM (-1) /* initial seq number used by T_DISCON_IND */
382 380 #define TL_BUFWAIT (10000) /* usecs to wait for allocb buffer timeout */
383 381 #define TL_TIDUSZ (64*1024) /* tidu size when "strmsgz" is unlimited (0) */
384 382 /*
385 383 * Hash tables size.
386 384 */
387 385 #define TL_HASH_SIZE 311
388 386
389 387 /*
390 388 * Definitions for module_info
391 389 */
392 390 #define TL_ID (104) /* module ID number */
393 391 #define TL_NAME "tl" /* module name */
394 392 #define TL_MINPSZ (0) /* min packet size */
395 393 #define TL_MAXPSZ INFPSZ /* max packet size ZZZ */
396 394 #define TL_HIWAT (16*1024) /* hi water mark */
397 395 #define TL_LOWAT (256) /* lo water mark */
398 396 /*
399 397 * Definition of minor numbers/modes for new transport provider modes.
400 398 * We view the socket use as a separate mode to get a separate name space.
401 399 */
402 400 #define TL_TICOTS 0 /* connection oriented transport */
403 401 #define TL_TICOTSORD 1 /* COTS w/ orderly release */
404 402 #define TL_TICLTS 2 /* connectionless transport */
405 403 #define TL_UNUSED 3
406 404 #define TL_SOCKET 4 /* Socket */
407 405 #define TL_SOCK_COTS (TL_SOCKET | TL_TICOTS)
408 406 #define TL_SOCK_COTSORD (TL_SOCKET | TL_TICOTSORD)
409 407 #define TL_SOCK_CLTS (TL_SOCKET | TL_TICLTS)
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
410 408
411 409 #define TL_MINOR_MASK 0x7
412 410 #define TL_MINOR_START (TL_TICLTS + 1)
413 411
414 412 /*
415 413 * LOCAL MACROS
416 414 */
417 415 #define T_ALIGN(p) P2ROUNDUP((p), sizeof (t_scalar_t))
418 416
419 417 /*
420 - * EXTERNAL VARIABLE DECLARATIONS
421 - * -----------------------------
422 - */
423 -/*
424 - * state table defined in the OS space.c
425 - */
426 -extern char ti_statetbl[TE_NOEVENTS][TS_NOSTATES];
427 -
428 -/*
429 418 * STREAMS DRIVER ENTRY POINTS PROTOTYPES
430 419 */
431 420 static int tl_open(queue_t *, dev_t *, int, int, cred_t *);
432 421 static int tl_close(queue_t *, int, cred_t *);
433 422 static int tl_wput(queue_t *, mblk_t *);
434 423 static int tl_wsrv(queue_t *);
435 424 static int tl_rsrv(queue_t *);
436 425
437 426 static int tl_attach(dev_info_t *, ddi_attach_cmd_t);
438 427 static int tl_detach(dev_info_t *, ddi_detach_cmd_t);
439 428 static int tl_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
440 429
441 430
442 431 /*
443 432 * GLOBAL DATA STRUCTURES AND VARIABLES
444 433 * -----------------------------------
445 434 */
446 435
447 436 /*
448 437 * Table representing database of all options managed by T_SVR4_OPTMGMT_REQ
449 438 * For now, we only manage the SO_RECVUCRED option but we also have
450 439 * harmless dummy options to make things work with some common code we access.
451 440 */
452 441 opdes_t tl_opt_arr[] = {
453 442 /* The SO_TYPE is needed for the hack below */
454 443 {
455 444 SO_TYPE,
456 445 SOL_SOCKET,
457 446 OA_R,
458 447 OA_R,
459 448 OP_NP,
460 449 0,
461 450 sizeof (t_scalar_t),
462 451 0
463 452 },
464 453 {
465 454 SO_RECVUCRED,
466 455 SOL_SOCKET,
467 456 OA_RW,
468 457 OA_RW,
469 458 OP_NP,
470 459 0,
471 460 sizeof (int),
472 461 0
473 462 }
474 463 };
475 464
476 465 /*
477 466 * Table of all supported levels
478 467 * Note: Some levels (e.g. XTI_GENERIC) may be valid but may not have
479 468 * any supported options so we need this info separately.
480 469 *
481 470 * This is needed only for topmost tpi providers.
482 471 */
483 472 optlevel_t tl_valid_levels_arr[] = {
484 473 XTI_GENERIC,
485 474 SOL_SOCKET,
486 475 TL_PROT_LEVEL
487 476 };
488 477
489 478 #define TL_VALID_LEVELS_CNT A_CNT(tl_valid_levels_arr)
490 479 /*
491 480 * Current upper bound on the amount of space needed to return all options.
492 481 * Additional options with data size of sizeof(long) are handled automatically.
493 482 * Others need hand job.
494 483 */
495 484 #define TL_MAX_OPT_BUF_LEN \
496 485 ((A_CNT(tl_opt_arr) << 2) + \
497 486 (A_CNT(tl_opt_arr) * sizeof (struct opthdr)) + \
498 487 + 64 + sizeof (struct T_optmgmt_ack))
499 488
500 489 #define TL_OPT_ARR_CNT A_CNT(tl_opt_arr)
501 490
502 491 /*
503 492 * transport addr structure
504 493 */
505 494 typedef struct tl_addr {
506 495 zoneid_t ta_zoneid; /* Zone scope of address */
507 496 t_scalar_t ta_alen; /* length of abuf */
508 497 void *ta_abuf; /* the addr itself */
509 498 } tl_addr_t;
510 499
511 500 /*
512 501 * Refcounted version of serializer.
513 502 */
514 503 typedef struct tl_serializer {
515 504 uint_t ts_refcnt;
516 505 serializer_t *ts_serializer;
517 506 } tl_serializer_t;
518 507
519 508 /*
520 509 * Each transport type has a separate state.
521 510 * Per-transport state.
522 511 */
523 512 typedef struct tl_transport_state {
524 513 char *tr_name;
525 514 minor_t tr_minor;
526 515 uint32_t tr_defaddr;
527 516 mod_hash_t *tr_ai_hash;
528 517 mod_hash_t *tr_addr_hash;
529 518 tl_serializer_t *tr_serializer;
530 519 } tl_transport_state_t;
531 520
532 521 #define TL_DFADDR 0x1000
533 522
534 523 static tl_transport_state_t tl_transports[] = {
535 524 { "ticots", TL_TICOTS, TL_DFADDR, NULL, NULL, NULL },
536 525 { "ticotsord", TL_TICOTSORD, TL_DFADDR, NULL, NULL, NULL },
537 526 { "ticlts", TL_TICLTS, TL_DFADDR, NULL, NULL, NULL },
538 527 { "undefined", TL_UNUSED, TL_DFADDR, NULL, NULL, NULL },
539 528 { "sticots", TL_SOCK_COTS, TL_DFADDR, NULL, NULL, NULL },
540 529 { "sticotsord", TL_SOCK_COTSORD, TL_DFADDR, NULL, NULL },
541 530 { "sticlts", TL_SOCK_CLTS, TL_DFADDR, NULL, NULL, NULL }
542 531 };
543 532
544 533 #define TL_MAXTRANSPORT A_CNT(tl_transports)
545 534
546 535 struct tl_endpt;
547 536 typedef struct tl_endpt tl_endpt_t;
548 537
549 538 typedef void (tlproc_t)(mblk_t *, tl_endpt_t *);
550 539
551 540 /*
552 541 * Data structure used to represent pending connects.
553 542 * Records enough information so that the connecting peer can close
554 543 * before the connection gets accepted.
555 544 */
556 545 typedef struct tl_icon {
557 546 list_node_t ti_node;
558 547 struct tl_endpt *ti_tep; /* NULL if peer has already closed */
559 548 mblk_t *ti_mp; /* b_next list of data + ordrel_ind */
560 549 t_scalar_t ti_seqno; /* Sequence number */
561 550 } tl_icon_t;
562 551
563 552 typedef struct so_ux_addr soux_addr_t;
564 553 #define TL_SOUX_ADDRLEN sizeof (soux_addr_t)
565 554
566 555 /*
567 556 * Maximum number of unaccepted connection indications allowed per listener.
568 557 */
569 558 #define TL_MAXQLEN 4096
570 559 int tl_maxqlen = TL_MAXQLEN;
571 560
572 561 /*
573 562 * transport endpoint structure
574 563 */
575 564 struct tl_endpt {
576 565 queue_t *te_rq; /* stream read queue */
577 566 queue_t *te_wq; /* stream write queue */
578 567 uint32_t te_refcnt;
579 568 int32_t te_state; /* TPI state of endpoint */
580 569 minor_t te_minor; /* minor number */
581 570 #define te_seqno te_minor
582 571 uint_t te_flag; /* flag field */
583 572 boolean_t te_nowsrv;
584 573 tl_serializer_t *te_ser; /* Serializer to use */
585 574 #define te_serializer te_ser->ts_serializer
586 575
587 576 soux_addr_t te_uxaddr; /* Socket address */
588 577 #define te_magic te_uxaddr.soua_magic
589 578 #define te_vp te_uxaddr.soua_vp
590 579 tl_addr_t te_ap; /* addr bound to this endpt */
591 580 #define te_zoneid te_ap.ta_zoneid
592 581 #define te_alen te_ap.ta_alen
593 582 #define te_abuf te_ap.ta_abuf
594 583
595 584 tl_transport_state_t *te_transport;
596 585 #define te_addrhash te_transport->tr_addr_hash
597 586 #define te_aihash te_transport->tr_ai_hash
598 587 #define te_defaddr te_transport->tr_defaddr
599 588 cred_t *te_credp; /* endpoint user credentials */
600 589 mod_hash_hndl_t te_hash_hndl; /* Handle for address hash */
601 590
602 591 /*
603 592 * State specific for connection-oriented and connectionless transports.
604 593 */
605 594 union {
606 595 /* Connection-oriented state. */
607 596 struct {
608 597 t_uscalar_t _te_nicon; /* count of conn requests */
609 598 t_uscalar_t _te_qlen; /* max conn requests */
610 599 tl_endpt_t *_te_oconp; /* conn request pending */
611 600 tl_endpt_t *_te_conp; /* connected endpt */
612 601 #ifndef _ILP32
613 602 void *_te_pad;
614 603 #endif
615 604 list_t _te_iconp; /* list of conn ind. pending */
616 605 } _te_cots_state;
617 606 /* Connection-less state. */
618 607 struct {
619 608 tl_endpt_t *_te_lastep; /* last dest. endpoint */
620 609 tl_endpt_t *_te_flowq; /* flow controlled on whom */
621 610 list_node_t _te_flows; /* lists of connections */
622 611 list_t _te_flowlist; /* Who flowcontrols on me */
623 612 } _te_clts_state;
624 613 } _te_transport_state;
625 614 #define te_nicon _te_transport_state._te_cots_state._te_nicon
626 615 #define te_qlen _te_transport_state._te_cots_state._te_qlen
627 616 #define te_oconp _te_transport_state._te_cots_state._te_oconp
628 617 #define te_conp _te_transport_state._te_cots_state._te_conp
629 618 #define te_iconp _te_transport_state._te_cots_state._te_iconp
630 619 #define te_lastep _te_transport_state._te_clts_state._te_lastep
631 620 #define te_flowq _te_transport_state._te_clts_state._te_flowq
632 621 #define te_flowlist _te_transport_state._te_clts_state._te_flowlist
633 622 #define te_flows _te_transport_state._te_clts_state._te_flows
634 623
635 624 bufcall_id_t te_bufcid; /* outstanding bufcall id */
636 625 timeout_id_t te_timoutid; /* outstanding timeout id */
637 626 pid_t te_cpid; /* cached pid of endpoint */
638 627 t_uscalar_t te_acceptor_id; /* acceptor id for T_CONN_RES */
639 628 /*
640 629 * Pieces of the endpoint state needed for closing.
641 630 */
642 631 kmutex_t te_closelock;
643 632 kcondvar_t te_closecv;
644 633 uint8_t te_closing; /* The endpoint started closing */
645 634 uint8_t te_closewait; /* Wait in close until zero */
646 635 mblk_t te_closemp; /* for entering serializer on close */
647 636 mblk_t te_rsrvmp; /* for entering serializer on rsrv */
648 637 mblk_t te_wsrvmp; /* for entering serializer on wsrv */
649 638 kmutex_t te_srv_lock;
650 639 kcondvar_t te_srv_cv;
651 640 uint8_t te_rsrv_active; /* Running in tl_rsrv() */
652 641 uint8_t te_wsrv_active; /* Running in tl_wsrv() */
653 642 /*
654 643 * Pieces of the endpoint state needed for serializer transitions.
655 644 */
656 645 kmutex_t te_ser_lock; /* Protects the count below */
657 646 uint_t te_ser_count; /* Number of messages on serializer */
658 647 };
659 648
660 649 /*
661 650 * Flag values. Lower 4 bits specify that transport used.
662 651 * TL_LISTENER, TL_ACCEPTOR, TL_ACCEPTED and TL_EAGER are for debugging only,
663 652 * they allow to identify the endpoint more easily.
664 653 */
665 654 #define TL_LISTENER 0x00010 /* the listener endpoint */
666 655 #define TL_ACCEPTOR 0x00020 /* the accepting endpoint */
667 656 #define TL_EAGER 0x00040 /* connecting endpoint */
668 657 #define TL_ACCEPTED 0x00080 /* accepted connection */
669 658 #define TL_SETCRED 0x00100 /* flag to indicate sending of credentials */
670 659 #define TL_SETUCRED 0x00200 /* flag to indicate sending of ucred */
671 660 #define TL_SOCKUCRED 0x00400 /* flag to indicate sending of SCM_UCRED */
672 661 #define TL_ADDRHASHED 0x01000 /* Endpoint address is stored in te_addrhash */
673 662 #define TL_CLOSE_SER 0x10000 /* Endpoint close has entered the serializer */
674 663 /*
675 664 * Boolean checks for the endpoint type.
676 665 */
677 666 #define IS_CLTS(x) (((x)->te_flag & TL_TICLTS) != 0)
678 667 #define IS_COTS(x) (((x)->te_flag & TL_TICLTS) == 0)
679 668 #define IS_COTSORD(x) (((x)->te_flag & TL_TICOTSORD) != 0)
680 669 #define IS_SOCKET(x) (((x)->te_flag & TL_SOCKET) != 0)
681 670
682 671 /*
683 672 * Certain operations are always used together. These macros reduce the chance
684 673 * of missing a part of a combination.
685 674 */
686 675 #define TL_UNCONNECT(x) { tl_refrele(x); x = NULL; }
687 676 #define TL_REMOVE_PEER(x) { if ((x) != NULL) TL_UNCONNECT(x) }
688 677
689 678 #define TL_PUTBQ(x, mp) { \
690 679 ASSERT(!((x)->te_flag & TL_CLOSE_SER)); \
691 680 (x)->te_nowsrv = B_TRUE; \
692 681 (void) putbq((x)->te_wq, mp); \
693 682 }
694 683
695 684 #define TL_QENABLE(x) { (x)->te_nowsrv = B_FALSE; qenable((x)->te_wq); }
696 685 #define TL_PUTQ(x, mp) { (x)->te_nowsrv = B_FALSE; (void)putq((x)->te_wq, mp); }
697 686
698 687 /*
699 688 * STREAMS driver glue data structures.
700 689 */
701 690 static struct module_info tl_minfo = {
702 691 TL_ID, /* mi_idnum */
703 692 TL_NAME, /* mi_idname */
704 693 TL_MINPSZ, /* mi_minpsz */
705 694 TL_MAXPSZ, /* mi_maxpsz */
706 695 TL_HIWAT, /* mi_hiwat */
707 696 TL_LOWAT /* mi_lowat */
708 697 };
709 698
710 699 static struct qinit tl_rinit = {
711 700 NULL, /* qi_putp */
712 701 tl_rsrv, /* qi_srvp */
713 702 tl_open, /* qi_qopen */
714 703 tl_close, /* qi_qclose */
715 704 NULL, /* qi_qadmin */
716 705 &tl_minfo, /* qi_minfo */
717 706 NULL /* qi_mstat */
718 707 };
719 708
720 709 static struct qinit tl_winit = {
721 710 tl_wput, /* qi_putp */
722 711 tl_wsrv, /* qi_srvp */
723 712 NULL, /* qi_qopen */
724 713 NULL, /* qi_qclose */
725 714 NULL, /* qi_qadmin */
726 715 &tl_minfo, /* qi_minfo */
727 716 NULL /* qi_mstat */
728 717 };
729 718
730 719 static struct streamtab tlinfo = {
731 720 &tl_rinit, /* st_rdinit */
732 721 &tl_winit, /* st_wrinit */
733 722 NULL, /* st_muxrinit */
734 723 NULL /* st_muxwrinit */
735 724 };
736 725
737 726 DDI_DEFINE_STREAM_OPS(tl_devops, nulldev, nulldev, tl_attach, tl_detach,
738 727 nulldev, tl_info, D_MP, &tlinfo, ddi_quiesce_not_supported);
739 728
740 729 static struct modldrv modldrv = {
741 730 &mod_driverops, /* Type of module -- pseudo driver here */
742 731 "TPI Local Transport (tl)",
743 732 &tl_devops, /* driver ops */
744 733 };
745 734
746 735 /*
747 736 * Module linkage information for the kernel.
748 737 */
749 738 static struct modlinkage modlinkage = {
750 739 MODREV_1,
751 740 &modldrv,
752 741 NULL
753 742 };
754 743
755 744 /*
756 745 * Templates for response to info request
757 746 * Check sanity of unlimited connect data etc.
758 747 */
759 748
760 749 #define TL_CLTS_PROVIDER_FLAG (XPG4_1 | SENDZERO)
761 750 #define TL_COTS_PROVIDER_FLAG (XPG4_1 | SENDZERO)
762 751
763 752 static struct T_info_ack tl_cots_info_ack =
764 753 {
765 754 T_INFO_ACK, /* PRIM_type -always T_INFO_ACK */
766 755 T_INFINITE, /* TSDU size */
767 756 T_INFINITE, /* ETSDU size */
768 757 T_INFINITE, /* CDATA_size */
769 758 T_INFINITE, /* DDATA_size */
770 759 T_INFINITE, /* ADDR_size */
771 760 T_INFINITE, /* OPT_size */
772 761 0, /* TIDU_size - fill at run time */
773 762 T_COTS, /* SERV_type */
774 763 -1, /* CURRENT_state */
775 764 TL_COTS_PROVIDER_FLAG /* PROVIDER_flag */
776 765 };
777 766
778 767 static struct T_info_ack tl_clts_info_ack =
779 768 {
780 769 T_INFO_ACK, /* PRIM_type - always T_INFO_ACK */
781 770 0, /* TSDU_size - fill at run time */
782 771 -2, /* ETSDU_size -2 => not supported */
783 772 -2, /* CDATA_size -2 => not supported */
784 773 -2, /* DDATA_size -2 => not supported */
785 774 -1, /* ADDR_size -1 => infinite */
786 775 -1, /* OPT_size */
787 776 0, /* TIDU_size - fill at run time */
788 777 T_CLTS, /* SERV_type */
789 778 -1, /* CURRENT_state */
790 779 TL_CLTS_PROVIDER_FLAG /* PROVIDER_flag */
791 780 };
792 781
793 782 /*
794 783 * private copy of devinfo pointer used in tl_info
795 784 */
796 785 static dev_info_t *tl_dip;
797 786
798 787 /*
799 788 * Endpoints cache.
800 789 */
801 790 static kmem_cache_t *tl_cache;
802 791 /*
803 792 * Minor number space.
804 793 */
805 794 static id_space_t *tl_minors;
806 795
807 796 /*
808 797 * Default Data Unit size.
809 798 */
810 799 static t_scalar_t tl_tidusz;
811 800
812 801 /*
813 802 * Size of hash tables.
814 803 */
815 804 static size_t tl_hash_size = TL_HASH_SIZE;
↓ open down ↓ |
377 lines elided |
↑ open up ↑ |
816 805
817 806 /*
818 807 * Debug and test variable ONLY. Turn off T_CONN_IND queueing
819 808 * for sockets.
820 809 */
821 810 static int tl_disable_early_connect = 0;
822 811 static int tl_client_closing_when_accepting;
823 812
824 813 static int tl_serializer_noswitch;
825 814
815 +#define nr 127 /* not reachable */
816 +
817 +#define TE_NOEVENTS 28
818 +
819 +static char nextstate[TE_NOEVENTS][TS_NOSTATES] = {
820 + /* STATES */
821 + /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
822 +
823 +/* Initialization events */
824 +
825 +#define TE_BIND_REQ 0 /* bind request */
826 + { 1, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
827 +#define TE_UNBIND_REQ 1 /* unbind request */
828 + {nr, nr, nr, 2, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
829 +#define TE_OPTMGMT_REQ 2 /* manage options req */
830 + {nr, nr, nr, 4, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
831 +#define TE_BIND_ACK 3 /* bind acknowledment */
832 + {nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
833 +#define TE_OPTMGMT_ACK 4 /* manage options ack */
834 + {nr, nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
835 +#define TE_ERROR_ACK 5 /* error acknowledgment */
836 + {nr, 0, 3, nr, 3, 3, nr, nr, 7, nr, nr, nr, 6, 7, 9, 10, 11},
837 +#define TE_OK_ACK1 6 /* ok ack seqcnt == 0 */
838 + {nr, nr, 0, nr, nr, 6, nr, nr, nr, nr, nr, nr, 3, nr, 3, 3, 3},
839 +#define TE_OK_ACK2 7 /* ok ack seqcnt == 1, q == resq */
840 + {nr, nr, nr, nr, nr, nr, nr, nr, 9, nr, nr, nr, nr, 3, nr, nr, nr},
841 +#define TE_OK_ACK3 8 /* ok ack seqcnt == 1, q != resq */
842 + {nr, nr, nr, nr, nr, nr, nr, nr, 3, nr, nr, nr, nr, 3, nr, nr, nr},
843 +#define TE_OK_ACK4 9 /* ok ack seqcnt > 1 */
844 + {nr, nr, nr, nr, nr, nr, nr, nr, 7, nr, nr, nr, nr, 7, nr, nr, nr},
845 +
846 +/* Connection oriented events */
847 +#define TE_CONN_REQ 10 /* connection request */
848 + {nr, nr, nr, 5, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
849 +#define TE_CONN_RES 11 /* connection response */
850 + {nr, nr, nr, nr, nr, nr, nr, 8, nr, nr, nr, nr, nr, nr, nr, nr, nr},
851 +#define TE_DISCON_REQ 12 /* disconnect request */
852 + {nr, nr, nr, nr, nr, nr, 12, 13, nr, 14, 15, 16, nr, nr, nr, nr, nr},
853 +#define TE_DATA_REQ 13 /* data request */
854 + {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, nr, 11, nr, nr, nr, nr, nr},
855 +#define TE_EXDATA_REQ 14 /* expedited data request */
856 + {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, nr, 11, nr, nr, nr, nr, nr},
857 +#define TE_ORDREL_REQ 15 /* orderly release req */
858 + {nr, nr, nr, nr, nr, nr, nr, nr, nr, 10, nr, 3, nr, nr, nr, nr, nr},
859 +#define TE_CONN_IND 16 /* connection indication */
860 + {nr, nr, nr, 7, nr, nr, nr, 7, nr, nr, nr, nr, nr, nr, nr, nr, nr},
861 +#define TE_CONN_CON 17 /* connection confirmation */
862 + {nr, nr, nr, nr, nr, nr, 9, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
863 +#define TE_DATA_IND 18 /* data indication */
864 + {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, 10, nr, nr, nr, nr, nr, nr},
865 +#define TE_EXDATA_IND 19 /* expedited data indication */
866 + {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, 10, nr, nr, nr, nr, nr, nr},
867 +#define TE_ORDREL_IND 20 /* orderly release ind */
868 + {nr, nr, nr, nr, nr, nr, nr, nr, nr, 11, 3, nr, nr, nr, nr, nr, nr},
869 +#define TE_DISCON_IND1 21 /* disconnect indication seq == 0 */
870 + {nr, nr, nr, nr, nr, nr, 3, nr, nr, 3, 3, 3, nr, nr, nr, nr, nr},
871 +#define TE_DISCON_IND2 22 /* disconnect indication seq == 1 */
872 + {nr, nr, nr, nr, nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr},
873 +#define TE_DISCON_IND3 23 /* disconnect indication seq > 1 */
874 + {nr, nr, nr, nr, nr, nr, nr, 7, nr, nr, nr, nr, nr, nr, nr, nr, nr},
875 +#define TE_PASS_CONN 24 /* pass connection */
876 + {nr, nr, nr, 9, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
877 +
878 +
879 +/* Unit data events */
880 +
881 +#define TE_UNITDATA_REQ 25 /* unitdata request */
882 + {nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
883 +#define TE_UNITDATA_IND 26 /* unitdata indication */
884 + {nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
885 +#define TE_UDERROR_IND 27 /* unitdata error indication */
886 + {nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
887 +};
888 +
889 +
890 +
826 891 /*
827 892 * LOCAL FUNCTION PROTOTYPES
828 893 * -------------------------
829 894 */
830 895 static boolean_t tl_eqaddr(tl_addr_t *, tl_addr_t *);
831 896 static void tl_do_proto(mblk_t *, tl_endpt_t *);
832 897 static void tl_do_ioctl(mblk_t *, tl_endpt_t *);
833 898 static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *);
834 899 static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t,
835 900 t_scalar_t);
836 901 static void tl_bind(mblk_t *, tl_endpt_t *);
837 902 static void tl_bind_ser(mblk_t *, tl_endpt_t *);
838 903 static void tl_ok_ack(queue_t *, mblk_t *mp, t_scalar_t);
839 904 static void tl_unbind(mblk_t *, tl_endpt_t *);
840 905 static void tl_optmgmt(queue_t *, mblk_t *);
841 906 static void tl_conn_req(queue_t *, mblk_t *);
842 907 static void tl_conn_req_ser(mblk_t *, tl_endpt_t *);
843 908 static void tl_conn_res(mblk_t *, tl_endpt_t *);
844 909 static void tl_discon_req(mblk_t *, tl_endpt_t *);
845 910 static void tl_capability_req(mblk_t *, tl_endpt_t *);
846 911 static void tl_info_req_ser(mblk_t *, tl_endpt_t *);
847 912 static void tl_addr_req_ser(mblk_t *, tl_endpt_t *);
848 913 static void tl_info_req(mblk_t *, tl_endpt_t *);
849 914 static void tl_addr_req(mblk_t *, tl_endpt_t *);
850 915 static void tl_connected_cots_addr_req(mblk_t *, tl_endpt_t *);
851 916 static void tl_data(mblk_t *, tl_endpt_t *);
852 917 static void tl_exdata(mblk_t *, tl_endpt_t *);
853 918 static void tl_ordrel(mblk_t *, tl_endpt_t *);
854 919 static void tl_unitdata(mblk_t *, tl_endpt_t *);
855 920 static void tl_unitdata_ser(mblk_t *, tl_endpt_t *);
856 921 static void tl_uderr(queue_t *, mblk_t *, t_scalar_t);
857 922 static tl_endpt_t *tl_find_peer(tl_endpt_t *, tl_addr_t *);
858 923 static tl_endpt_t *tl_sock_find_peer(tl_endpt_t *, struct so_ux_addr *);
859 924 static boolean_t tl_get_any_addr(tl_endpt_t *, tl_addr_t *);
860 925 static void tl_cl_backenable(tl_endpt_t *);
861 926 static void tl_co_unconnect(tl_endpt_t *);
862 927 static mblk_t *tl_resizemp(mblk_t *, ssize_t);
863 928 static void tl_discon_ind(tl_endpt_t *, uint32_t);
864 929 static mblk_t *tl_discon_ind_alloc(uint32_t, t_scalar_t);
865 930 static mblk_t *tl_ordrel_ind_alloc(void);
866 931 static tl_icon_t *tl_icon_find(tl_endpt_t *, t_scalar_t);
867 932 static void tl_icon_queuemsg(tl_endpt_t *, t_scalar_t, mblk_t *);
868 933 static boolean_t tl_icon_hasprim(tl_endpt_t *, t_scalar_t, t_scalar_t);
869 934 static void tl_icon_sendmsgs(tl_endpt_t *, mblk_t **);
870 935 static void tl_icon_freemsgs(mblk_t **);
871 936 static void tl_merror(queue_t *, mblk_t *, int);
872 937 static void tl_fill_option(uchar_t *, cred_t *, pid_t, int, cred_t *);
873 938 static int tl_default_opt(queue_t *, int, int, uchar_t *);
874 939 static int tl_get_opt(queue_t *, int, int, uchar_t *);
875 940 static int tl_set_opt(queue_t *, uint_t, int, int, uint_t, uchar_t *, uint_t *,
876 941 uchar_t *, void *, cred_t *);
877 942 static void tl_memrecover(queue_t *, mblk_t *, size_t);
878 943 static void tl_freetip(tl_endpt_t *, tl_icon_t *);
879 944 static void tl_free(tl_endpt_t *);
880 945 static int tl_constructor(void *, void *, int);
881 946 static void tl_destructor(void *, void *);
882 947 static void tl_find_callback(mod_hash_key_t, mod_hash_val_t);
883 948 static tl_serializer_t *tl_serializer_alloc(int);
884 949 static void tl_serializer_refhold(tl_serializer_t *);
885 950 static void tl_serializer_refrele(tl_serializer_t *);
886 951 static void tl_serializer_enter(tl_endpt_t *, tlproc_t, mblk_t *);
887 952 static void tl_serializer_exit(tl_endpt_t *);
888 953 static boolean_t tl_noclose(tl_endpt_t *);
889 954 static void tl_closeok(tl_endpt_t *);
890 955 static void tl_refhold(tl_endpt_t *);
891 956 static void tl_refrele(tl_endpt_t *);
892 957 static int tl_hash_cmp_addr(mod_hash_key_t, mod_hash_key_t);
893 958 static uint_t tl_hash_by_addr(void *, mod_hash_key_t);
894 959 static void tl_close_ser(mblk_t *, tl_endpt_t *);
895 960 static void tl_close_finish_ser(mblk_t *, tl_endpt_t *);
896 961 static void tl_wput_data_ser(mblk_t *, tl_endpt_t *);
897 962 static void tl_proto_ser(mblk_t *, tl_endpt_t *);
898 963 static void tl_putq_ser(mblk_t *, tl_endpt_t *);
899 964 static void tl_wput_common_ser(mblk_t *, tl_endpt_t *);
900 965 static void tl_wput_ser(mblk_t *, tl_endpt_t *);
901 966 static void tl_wsrv_ser(mblk_t *, tl_endpt_t *);
902 967 static void tl_rsrv_ser(mblk_t *, tl_endpt_t *);
903 968 static void tl_addr_unbind(tl_endpt_t *);
904 969
905 970 /*
906 971 * Intialize option database object for TL
907 972 */
908 973
909 974 optdb_obj_t tl_opt_obj = {
910 975 tl_default_opt, /* TL default value function pointer */
911 976 tl_get_opt, /* TL get function pointer */
912 977 tl_set_opt, /* TL set function pointer */
913 978 TL_OPT_ARR_CNT, /* TL option database count of entries */
914 979 tl_opt_arr, /* TL option database */
915 980 TL_VALID_LEVELS_CNT, /* TL valid level count of entries */
916 981 tl_valid_levels_arr /* TL valid level array */
917 982 };
918 983
919 984 /*
920 985 * LOCAL FUNCTIONS AND DRIVER ENTRY POINTS
921 986 * ---------------------------------------
922 987 */
923 988
924 989 /*
925 990 * Loadable module routines
926 991 */
927 992 int
928 993 _init(void)
929 994 {
930 995 return (mod_install(&modlinkage));
931 996 }
932 997
933 998 int
934 999 _fini(void)
935 1000 {
936 1001 return (mod_remove(&modlinkage));
937 1002 }
938 1003
939 1004 int
940 1005 _info(struct modinfo *modinfop)
941 1006 {
942 1007 return (mod_info(&modlinkage, modinfop));
943 1008 }
944 1009
945 1010 /*
946 1011 * Driver Entry Points and Other routines
947 1012 */
948 1013 static int
949 1014 tl_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
950 1015 {
951 1016 int i;
952 1017 char name[32];
953 1018
954 1019 /*
955 1020 * Resume from a checkpoint state.
956 1021 */
957 1022 if (cmd == DDI_RESUME)
958 1023 return (DDI_SUCCESS);
959 1024
960 1025 if (cmd != DDI_ATTACH)
961 1026 return (DDI_FAILURE);
962 1027
963 1028 /*
964 1029 * Deduce TIDU size to use. Note: "strmsgsz" being 0 has semantics that
965 1030 * streams message sizes can be unlimited. We use a defined constant
966 1031 * instead.
967 1032 */
968 1033 tl_tidusz = strmsgsz != 0 ? (t_scalar_t)strmsgsz : TL_TIDUSZ;
969 1034
970 1035 /*
971 1036 * Create subdevices for each transport.
972 1037 */
973 1038 for (i = 0; i < TL_UNUSED; i++) {
974 1039 if (ddi_create_minor_node(devi,
975 1040 tl_transports[i].tr_name,
976 1041 S_IFCHR, tl_transports[i].tr_minor,
977 1042 DDI_PSEUDO, 0) == DDI_FAILURE) {
978 1043 ddi_remove_minor_node(devi, NULL);
979 1044 return (DDI_FAILURE);
980 1045 }
981 1046 }
982 1047
983 1048 tl_cache = kmem_cache_create("tl_cache", sizeof (tl_endpt_t),
984 1049 0, tl_constructor, tl_destructor, NULL, NULL, NULL, 0);
985 1050
986 1051 if (tl_cache == NULL) {
987 1052 ddi_remove_minor_node(devi, NULL);
988 1053 return (DDI_FAILURE);
989 1054 }
990 1055
991 1056 tl_minors = id_space_create("tl_minor_space",
992 1057 TL_MINOR_START, MAXMIN32 - TL_MINOR_START + 1);
993 1058
994 1059 /*
995 1060 * Create ID space for minor numbers
996 1061 */
997 1062 for (i = 0; i < TL_MAXTRANSPORT; i++) {
998 1063 tl_transport_state_t *t = &tl_transports[i];
999 1064
1000 1065 if (i == TL_UNUSED)
1001 1066 continue;
1002 1067
1003 1068 /* Socket COTSORD shares namespace with COTS */
1004 1069 if (i == TL_SOCK_COTSORD) {
1005 1070 t->tr_ai_hash =
1006 1071 tl_transports[TL_SOCK_COTS].tr_ai_hash;
1007 1072 ASSERT(t->tr_ai_hash != NULL);
1008 1073 t->tr_addr_hash =
1009 1074 tl_transports[TL_SOCK_COTS].tr_addr_hash;
1010 1075 ASSERT(t->tr_addr_hash != NULL);
1011 1076 continue;
1012 1077 }
1013 1078
1014 1079 /*
1015 1080 * Create hash tables.
1016 1081 */
1017 1082 (void) snprintf(name, sizeof (name), "%s_ai_hash",
1018 1083 t->tr_name);
1019 1084 #ifdef _ILP32
1020 1085 if (i & TL_SOCKET)
1021 1086 t->tr_ai_hash =
1022 1087 mod_hash_create_idhash(name, tl_hash_size - 1,
1023 1088 mod_hash_null_valdtor);
1024 1089 else
1025 1090 t->tr_ai_hash =
1026 1091 mod_hash_create_ptrhash(name, tl_hash_size,
1027 1092 mod_hash_null_valdtor, sizeof (queue_t));
1028 1093 #else
1029 1094 t->tr_ai_hash =
1030 1095 mod_hash_create_idhash(name, tl_hash_size - 1,
1031 1096 mod_hash_null_valdtor);
1032 1097 #endif /* _ILP32 */
1033 1098
1034 1099 if (i & TL_SOCKET) {
1035 1100 (void) snprintf(name, sizeof (name), "%s_sockaddr_hash",
1036 1101 t->tr_name);
1037 1102 t->tr_addr_hash = mod_hash_create_ptrhash(name,
1038 1103 tl_hash_size, mod_hash_null_valdtor,
1039 1104 sizeof (uintptr_t));
1040 1105 } else {
1041 1106 (void) snprintf(name, sizeof (name), "%s_addr_hash",
1042 1107 t->tr_name);
1043 1108 t->tr_addr_hash = mod_hash_create_extended(name,
1044 1109 tl_hash_size, mod_hash_null_keydtor,
1045 1110 mod_hash_null_valdtor,
1046 1111 tl_hash_by_addr, NULL, tl_hash_cmp_addr, KM_SLEEP);
1047 1112 }
1048 1113
1049 1114 /* Create serializer for connectionless transports. */
1050 1115 if (i & TL_TICLTS)
1051 1116 t->tr_serializer = tl_serializer_alloc(KM_SLEEP);
1052 1117 }
1053 1118
1054 1119 tl_dip = devi;
1055 1120
1056 1121 return (DDI_SUCCESS);
1057 1122 }
1058 1123
1059 1124 static int
1060 1125 tl_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1061 1126 {
1062 1127 int i;
1063 1128
1064 1129 if (cmd == DDI_SUSPEND)
1065 1130 return (DDI_SUCCESS);
1066 1131
1067 1132 if (cmd != DDI_DETACH)
1068 1133 return (DDI_FAILURE);
1069 1134
1070 1135 /*
1071 1136 * Destroy arenas and hash tables.
1072 1137 */
1073 1138 for (i = 0; i < TL_MAXTRANSPORT; i++) {
1074 1139 tl_transport_state_t *t = &tl_transports[i];
1075 1140
1076 1141 if ((i == TL_UNUSED) || (i == TL_SOCK_COTSORD))
1077 1142 continue;
1078 1143
1079 1144 EQUIV(i & TL_TICLTS, t->tr_serializer != NULL);
1080 1145 if (t->tr_serializer != NULL) {
1081 1146 tl_serializer_refrele(t->tr_serializer);
1082 1147 t->tr_serializer = NULL;
1083 1148 }
1084 1149
1085 1150 #ifdef _ILP32
1086 1151 if (i & TL_SOCKET)
1087 1152 mod_hash_destroy_idhash(t->tr_ai_hash);
1088 1153 else
1089 1154 mod_hash_destroy_ptrhash(t->tr_ai_hash);
1090 1155 #else
1091 1156 mod_hash_destroy_idhash(t->tr_ai_hash);
1092 1157 #endif /* _ILP32 */
1093 1158 t->tr_ai_hash = NULL;
1094 1159 if (i & TL_SOCKET)
1095 1160 mod_hash_destroy_ptrhash(t->tr_addr_hash);
1096 1161 else
1097 1162 mod_hash_destroy_hash(t->tr_addr_hash);
1098 1163 t->tr_addr_hash = NULL;
1099 1164 }
1100 1165
1101 1166 kmem_cache_destroy(tl_cache);
1102 1167 tl_cache = NULL;
1103 1168 id_space_destroy(tl_minors);
1104 1169 tl_minors = NULL;
1105 1170 ddi_remove_minor_node(devi, NULL);
1106 1171 return (DDI_SUCCESS);
1107 1172 }
1108 1173
1109 1174 /* ARGSUSED */
1110 1175 static int
1111 1176 tl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1112 1177 {
1113 1178
1114 1179 int retcode = DDI_FAILURE;
1115 1180
1116 1181 switch (infocmd) {
1117 1182
1118 1183 case DDI_INFO_DEVT2DEVINFO:
1119 1184 if (tl_dip != NULL) {
1120 1185 *result = (void *)tl_dip;
1121 1186 retcode = DDI_SUCCESS;
1122 1187 }
1123 1188 break;
1124 1189
1125 1190 case DDI_INFO_DEVT2INSTANCE:
1126 1191 *result = NULL;
1127 1192 retcode = DDI_SUCCESS;
1128 1193 break;
1129 1194
1130 1195 default:
1131 1196 break;
1132 1197 }
1133 1198 return (retcode);
1134 1199 }
1135 1200
1136 1201 /*
1137 1202 * Endpoint reference management.
1138 1203 */
1139 1204 static void
1140 1205 tl_refhold(tl_endpt_t *tep)
1141 1206 {
1142 1207 atomic_inc_32(&tep->te_refcnt);
1143 1208 }
1144 1209
1145 1210 static void
1146 1211 tl_refrele(tl_endpt_t *tep)
1147 1212 {
1148 1213 ASSERT(tep->te_refcnt != 0);
1149 1214
1150 1215 if (atomic_dec_32_nv(&tep->te_refcnt) == 0)
1151 1216 tl_free(tep);
1152 1217 }
1153 1218
1154 1219 /*ARGSUSED*/
1155 1220 static int
1156 1221 tl_constructor(void *buf, void *cdrarg, int kmflags)
1157 1222 {
1158 1223 tl_endpt_t *tep = buf;
1159 1224
1160 1225 bzero(tep, sizeof (tl_endpt_t));
1161 1226 mutex_init(&tep->te_closelock, NULL, MUTEX_DEFAULT, NULL);
1162 1227 cv_init(&tep->te_closecv, NULL, CV_DEFAULT, NULL);
1163 1228 mutex_init(&tep->te_srv_lock, NULL, MUTEX_DEFAULT, NULL);
1164 1229 cv_init(&tep->te_srv_cv, NULL, CV_DEFAULT, NULL);
1165 1230 mutex_init(&tep->te_ser_lock, NULL, MUTEX_DEFAULT, NULL);
1166 1231
1167 1232 return (0);
1168 1233 }
1169 1234
1170 1235 /*ARGSUSED*/
1171 1236 static void
1172 1237 tl_destructor(void *buf, void *cdrarg)
1173 1238 {
1174 1239 tl_endpt_t *tep = buf;
1175 1240
1176 1241 mutex_destroy(&tep->te_closelock);
1177 1242 cv_destroy(&tep->te_closecv);
1178 1243 mutex_destroy(&tep->te_srv_lock);
1179 1244 cv_destroy(&tep->te_srv_cv);
1180 1245 mutex_destroy(&tep->te_ser_lock);
1181 1246 }
1182 1247
1183 1248 static void
1184 1249 tl_free(tl_endpt_t *tep)
1185 1250 {
1186 1251 ASSERT(tep->te_refcnt == 0);
1187 1252 ASSERT(tep->te_transport != NULL);
1188 1253 ASSERT(tep->te_rq == NULL);
1189 1254 ASSERT(tep->te_wq == NULL);
1190 1255 ASSERT(tep->te_ser != NULL);
1191 1256 ASSERT(tep->te_ser_count == 0);
1192 1257 ASSERT(!(tep->te_flag & TL_ADDRHASHED));
1193 1258
1194 1259 if (IS_SOCKET(tep)) {
1195 1260 ASSERT(tep->te_alen == TL_SOUX_ADDRLEN);
1196 1261 ASSERT(tep->te_abuf == &tep->te_uxaddr);
1197 1262 ASSERT(tep->te_vp == (void *)(uintptr_t)tep->te_minor);
1198 1263 ASSERT(tep->te_magic == SOU_MAGIC_IMPLICIT);
1199 1264 } else if (tep->te_abuf != NULL) {
1200 1265 kmem_free(tep->te_abuf, tep->te_alen);
1201 1266 tep->te_alen = -1; /* uninitialized */
1202 1267 tep->te_abuf = NULL;
1203 1268 } else {
1204 1269 ASSERT(tep->te_alen == -1);
1205 1270 }
1206 1271
1207 1272 id_free(tl_minors, tep->te_minor);
1208 1273 ASSERT(tep->te_credp == NULL);
1209 1274
1210 1275 if (tep->te_hash_hndl != NULL)
1211 1276 mod_hash_cancel(tep->te_addrhash, &tep->te_hash_hndl);
1212 1277
1213 1278 if (IS_COTS(tep)) {
1214 1279 TL_REMOVE_PEER(tep->te_conp);
1215 1280 TL_REMOVE_PEER(tep->te_oconp);
1216 1281 tl_serializer_refrele(tep->te_ser);
1217 1282 tep->te_ser = NULL;
1218 1283 ASSERT(tep->te_nicon == 0);
1219 1284 ASSERT(list_head(&tep->te_iconp) == NULL);
1220 1285 } else {
1221 1286 ASSERT(tep->te_lastep == NULL);
1222 1287 ASSERT(list_head(&tep->te_flowlist) == NULL);
1223 1288 ASSERT(tep->te_flowq == NULL);
1224 1289 }
1225 1290
1226 1291 ASSERT(tep->te_bufcid == 0);
1227 1292 ASSERT(tep->te_timoutid == 0);
1228 1293 bzero(&tep->te_ap, sizeof (tep->te_ap));
1229 1294 tep->te_acceptor_id = 0;
1230 1295
1231 1296 ASSERT(tep->te_closewait == 0);
1232 1297 ASSERT(!tep->te_rsrv_active);
1233 1298 ASSERT(!tep->te_wsrv_active);
1234 1299 tep->te_closing = 0;
1235 1300 tep->te_nowsrv = B_FALSE;
1236 1301 tep->te_flag = 0;
1237 1302
1238 1303 kmem_cache_free(tl_cache, tep);
1239 1304 }
1240 1305
1241 1306 /*
1242 1307 * Allocate/free reference-counted wrappers for serializers.
1243 1308 */
1244 1309 static tl_serializer_t *
1245 1310 tl_serializer_alloc(int flags)
1246 1311 {
1247 1312 tl_serializer_t *s = kmem_alloc(sizeof (tl_serializer_t), flags);
1248 1313 serializer_t *ser;
1249 1314
1250 1315 if (s == NULL)
1251 1316 return (NULL);
1252 1317
1253 1318 ser = serializer_create(flags);
1254 1319
1255 1320 if (ser == NULL) {
1256 1321 kmem_free(s, sizeof (tl_serializer_t));
1257 1322 return (NULL);
1258 1323 }
1259 1324
1260 1325 s->ts_refcnt = 1;
1261 1326 s->ts_serializer = ser;
1262 1327 return (s);
1263 1328 }
1264 1329
1265 1330 static void
1266 1331 tl_serializer_refhold(tl_serializer_t *s)
1267 1332 {
1268 1333 atomic_inc_32(&s->ts_refcnt);
1269 1334 }
1270 1335
1271 1336 static void
1272 1337 tl_serializer_refrele(tl_serializer_t *s)
1273 1338 {
1274 1339 if (atomic_dec_32_nv(&s->ts_refcnt) == 0) {
1275 1340 serializer_destroy(s->ts_serializer);
1276 1341 kmem_free(s, sizeof (tl_serializer_t));
1277 1342 }
1278 1343 }
1279 1344
1280 1345 /*
1281 1346 * Post a request on the endpoint serializer. For COTS transports keep track of
1282 1347 * the number of pending requests.
1283 1348 */
1284 1349 static void
1285 1350 tl_serializer_enter(tl_endpt_t *tep, tlproc_t tlproc, mblk_t *mp)
1286 1351 {
1287 1352 if (IS_COTS(tep)) {
1288 1353 mutex_enter(&tep->te_ser_lock);
1289 1354 tep->te_ser_count++;
1290 1355 mutex_exit(&tep->te_ser_lock);
1291 1356 }
1292 1357 serializer_enter(tep->te_serializer, (srproc_t *)tlproc, mp, tep);
1293 1358 }
1294 1359
1295 1360 /*
1296 1361 * Complete processing the request on the serializer. Decrement the counter for
1297 1362 * pending requests for COTS transports.
1298 1363 */
1299 1364 static void
1300 1365 tl_serializer_exit(tl_endpt_t *tep)
1301 1366 {
1302 1367 if (IS_COTS(tep)) {
1303 1368 mutex_enter(&tep->te_ser_lock);
1304 1369 ASSERT(tep->te_ser_count != 0);
1305 1370 tep->te_ser_count--;
1306 1371 mutex_exit(&tep->te_ser_lock);
1307 1372 }
1308 1373 }
1309 1374
1310 1375 /*
1311 1376 * Hash management functions.
1312 1377 */
1313 1378
1314 1379 /*
1315 1380 * Return TRUE if two addresses are equal, false otherwise.
1316 1381 */
1317 1382 static boolean_t
1318 1383 tl_eqaddr(tl_addr_t *ap1, tl_addr_t *ap2)
1319 1384 {
1320 1385 return ((ap1->ta_alen > 0) &&
1321 1386 (ap1->ta_alen == ap2->ta_alen) &&
1322 1387 (ap1->ta_zoneid == ap2->ta_zoneid) &&
1323 1388 (bcmp(ap1->ta_abuf, ap2->ta_abuf, ap1->ta_alen) == 0));
1324 1389 }
1325 1390
1326 1391 /*
1327 1392 * This function is called whenever an endpoint is found in the hash table.
1328 1393 */
1329 1394 /* ARGSUSED0 */
1330 1395 static void
1331 1396 tl_find_callback(mod_hash_key_t key, mod_hash_val_t val)
1332 1397 {
1333 1398 tl_refhold((tl_endpt_t *)val);
1334 1399 }
1335 1400
1336 1401 /*
1337 1402 * Address hash function.
1338 1403 */
1339 1404 /* ARGSUSED */
1340 1405 static uint_t
1341 1406 tl_hash_by_addr(void *hash_data, mod_hash_key_t key)
1342 1407 {
1343 1408 tl_addr_t *ap = (tl_addr_t *)key;
1344 1409 size_t len = ap->ta_alen;
1345 1410 uchar_t *p = ap->ta_abuf;
1346 1411 uint_t i, g;
1347 1412
1348 1413 ASSERT((len > 0) && (p != NULL));
1349 1414
1350 1415 for (i = ap->ta_zoneid; len -- != 0; p++) {
1351 1416 i = (i << 4) + (*p);
1352 1417 if ((g = (i & 0xf0000000U)) != 0) {
1353 1418 i ^= (g >> 24);
1354 1419 i ^= g;
1355 1420 }
1356 1421 }
1357 1422 return (i);
1358 1423 }
1359 1424
1360 1425 /*
1361 1426 * This function is used by hash lookups. It compares two generic addresses.
1362 1427 */
1363 1428 static int
1364 1429 tl_hash_cmp_addr(mod_hash_key_t key1, mod_hash_key_t key2)
1365 1430 {
1366 1431 #ifdef DEBUG
1367 1432 tl_addr_t *ap1 = (tl_addr_t *)key1;
1368 1433 tl_addr_t *ap2 = (tl_addr_t *)key2;
1369 1434
1370 1435 ASSERT(key1 != NULL);
1371 1436 ASSERT(key2 != NULL);
1372 1437
1373 1438 ASSERT(ap1->ta_abuf != NULL);
1374 1439 ASSERT(ap2->ta_abuf != NULL);
1375 1440 ASSERT(ap1->ta_alen > 0);
1376 1441 ASSERT(ap2->ta_alen > 0);
1377 1442 #endif
1378 1443
1379 1444 return (!tl_eqaddr((tl_addr_t *)key1, (tl_addr_t *)key2));
1380 1445 }
1381 1446
1382 1447 /*
1383 1448 * Prevent endpoint from closing if possible.
1384 1449 * Return B_TRUE on success, B_FALSE on failure.
1385 1450 */
1386 1451 static boolean_t
1387 1452 tl_noclose(tl_endpt_t *tep)
1388 1453 {
1389 1454 boolean_t rc = B_FALSE;
1390 1455
1391 1456 mutex_enter(&tep->te_closelock);
1392 1457 if (!tep->te_closing) {
1393 1458 ASSERT(tep->te_closewait == 0);
1394 1459 tep->te_closewait++;
1395 1460 rc = B_TRUE;
1396 1461 }
1397 1462 mutex_exit(&tep->te_closelock);
1398 1463 return (rc);
1399 1464 }
1400 1465
1401 1466 /*
1402 1467 * Allow endpoint to close if needed.
1403 1468 */
1404 1469 static void
1405 1470 tl_closeok(tl_endpt_t *tep)
1406 1471 {
1407 1472 ASSERT(tep->te_closewait > 0);
1408 1473 mutex_enter(&tep->te_closelock);
1409 1474 ASSERT(tep->te_closewait == 1);
1410 1475 tep->te_closewait--;
1411 1476 cv_signal(&tep->te_closecv);
1412 1477 mutex_exit(&tep->te_closelock);
1413 1478 }
1414 1479
1415 1480 /*
1416 1481 * STREAMS open entry point.
1417 1482 */
1418 1483 /* ARGSUSED */
1419 1484 static int
1420 1485 tl_open(queue_t *rq, dev_t *devp, int oflag, int sflag, cred_t *credp)
1421 1486 {
1422 1487 tl_endpt_t *tep;
1423 1488 minor_t minor = getminor(*devp);
1424 1489
1425 1490 /*
1426 1491 * Driver is called directly. Both CLONEOPEN and MODOPEN
1427 1492 * are illegal
1428 1493 */
1429 1494 if ((sflag == CLONEOPEN) || (sflag == MODOPEN))
1430 1495 return (ENXIO);
1431 1496
1432 1497 if (rq->q_ptr != NULL)
1433 1498 return (0);
1434 1499
1435 1500 /* Minor number should specify the mode used for the driver. */
1436 1501 if ((minor >= TL_UNUSED))
1437 1502 return (ENXIO);
1438 1503
1439 1504 if (oflag & SO_SOCKSTR) {
1440 1505 minor |= TL_SOCKET;
1441 1506 }
1442 1507
1443 1508 tep = kmem_cache_alloc(tl_cache, KM_SLEEP);
1444 1509 tep->te_refcnt = 1;
1445 1510 tep->te_cpid = curproc->p_pid;
1446 1511 rq->q_ptr = WR(rq)->q_ptr = tep;
1447 1512 tep->te_state = TS_UNBND;
1448 1513 tep->te_credp = credp;
1449 1514 crhold(credp);
1450 1515 tep->te_zoneid = getzoneid();
1451 1516
1452 1517 tep->te_flag = minor & TL_MINOR_MASK;
1453 1518 tep->te_transport = &tl_transports[minor];
1454 1519
1455 1520 /* Allocate a unique minor number for this instance. */
1456 1521 tep->te_minor = (minor_t)id_alloc(tl_minors);
1457 1522
1458 1523 /* Reserve hash handle for bind(). */
1459 1524 (void) mod_hash_reserve(tep->te_addrhash, &tep->te_hash_hndl);
1460 1525
1461 1526 /* Transport-specific initialization */
1462 1527 if (IS_COTS(tep)) {
1463 1528 /* Use private serializer */
1464 1529 tep->te_ser = tl_serializer_alloc(KM_SLEEP);
1465 1530
1466 1531 /* Create list for pending connections */
1467 1532 list_create(&tep->te_iconp, sizeof (tl_icon_t),
1468 1533 offsetof(tl_icon_t, ti_node));
1469 1534 tep->te_qlen = 0;
1470 1535 tep->te_nicon = 0;
1471 1536 tep->te_oconp = NULL;
1472 1537 tep->te_conp = NULL;
1473 1538 } else {
1474 1539 /* Use shared serializer */
1475 1540 tep->te_ser = tep->te_transport->tr_serializer;
1476 1541 bzero(&tep->te_flows, sizeof (list_node_t));
1477 1542 /* Create list for flow control */
1478 1543 list_create(&tep->te_flowlist, sizeof (tl_endpt_t),
1479 1544 offsetof(tl_endpt_t, te_flows));
1480 1545 tep->te_flowq = NULL;
1481 1546 tep->te_lastep = NULL;
1482 1547
1483 1548 }
1484 1549
1485 1550 /* Initialize endpoint address */
1486 1551 if (IS_SOCKET(tep)) {
1487 1552 /* Socket-specific address handling. */
1488 1553 tep->te_alen = TL_SOUX_ADDRLEN;
1489 1554 tep->te_abuf = &tep->te_uxaddr;
1490 1555 tep->te_vp = (void *)(uintptr_t)tep->te_minor;
1491 1556 tep->te_magic = SOU_MAGIC_IMPLICIT;
1492 1557 } else {
1493 1558 tep->te_alen = -1;
1494 1559 tep->te_abuf = NULL;
1495 1560 }
1496 1561
1497 1562 /* clone the driver */
1498 1563 *devp = makedevice(getmajor(*devp), tep->te_minor);
1499 1564
1500 1565 tep->te_rq = rq;
1501 1566 tep->te_wq = WR(rq);
1502 1567
1503 1568 #ifdef _ILP32
1504 1569 if (IS_SOCKET(tep))
1505 1570 tep->te_acceptor_id = tep->te_minor;
1506 1571 else
1507 1572 tep->te_acceptor_id = (t_uscalar_t)rq;
1508 1573 #else
1509 1574 tep->te_acceptor_id = tep->te_minor;
1510 1575 #endif /* _ILP32 */
1511 1576
1512 1577
1513 1578 qprocson(rq);
1514 1579
1515 1580 /*
1516 1581 * Insert acceptor ID in the hash. The AI hash always sleeps on
1517 1582 * insertion so insertion can't fail.
1518 1583 */
1519 1584 (void) mod_hash_insert(tep->te_transport->tr_ai_hash,
1520 1585 (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
1521 1586 (mod_hash_val_t)tep);
1522 1587
1523 1588 return (0);
1524 1589 }
1525 1590
1526 1591 /* ARGSUSED1 */
1527 1592 static int
1528 1593 tl_close(queue_t *rq, int flag, cred_t *credp)
1529 1594 {
1530 1595 tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
1531 1596 tl_endpt_t *elp = NULL;
1532 1597 queue_t *wq = tep->te_wq;
1533 1598 int rc;
1534 1599
1535 1600 ASSERT(wq == WR(rq));
1536 1601
1537 1602 /*
1538 1603 * Remove the endpoint from acceptor hash.
1539 1604 */
1540 1605 rc = mod_hash_remove(tep->te_transport->tr_ai_hash,
1541 1606 (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
1542 1607 (mod_hash_val_t *)&elp);
1543 1608 ASSERT(rc == 0 && tep == elp);
1544 1609 if ((rc != 0) || (tep != elp)) {
1545 1610 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1546 1611 SL_TRACE | SL_ERROR,
1547 1612 "tl_close:inconsistency in AI hash"));
1548 1613 }
1549 1614
1550 1615 /*
1551 1616 * Wait till close is safe, then mark endpoint as closing.
1552 1617 */
1553 1618 mutex_enter(&tep->te_closelock);
1554 1619 while (tep->te_closewait)
1555 1620 cv_wait(&tep->te_closecv, &tep->te_closelock);
1556 1621 tep->te_closing = B_TRUE;
1557 1622 /*
1558 1623 * Will wait for the serializer part of the close to finish, so set
1559 1624 * te_closewait now.
1560 1625 */
1561 1626 tep->te_closewait = 1;
1562 1627 tep->te_nowsrv = B_FALSE;
1563 1628 mutex_exit(&tep->te_closelock);
1564 1629
1565 1630 /*
1566 1631 * tl_close_ser doesn't drop reference, so no need to tl_refhold.
1567 1632 * It is safe because close will wait for tl_close_ser to finish.
1568 1633 */
1569 1634 tl_serializer_enter(tep, tl_close_ser, &tep->te_closemp);
1570 1635
1571 1636 /*
1572 1637 * Wait for the first phase of close to complete before qprocsoff().
1573 1638 */
1574 1639 mutex_enter(&tep->te_closelock);
1575 1640 while (tep->te_closewait)
1576 1641 cv_wait(&tep->te_closecv, &tep->te_closelock);
1577 1642 mutex_exit(&tep->te_closelock);
1578 1643
1579 1644 qprocsoff(rq);
1580 1645
1581 1646 if (tep->te_bufcid) {
1582 1647 qunbufcall(rq, tep->te_bufcid);
1583 1648 tep->te_bufcid = 0;
1584 1649 }
1585 1650 if (tep->te_timoutid) {
1586 1651 (void) quntimeout(rq, tep->te_timoutid);
1587 1652 tep->te_timoutid = 0;
1588 1653 }
1589 1654
1590 1655 /*
1591 1656 * Finish close behind serializer.
1592 1657 *
1593 1658 * For a CLTS endpoint increase a refcount and continue close processing
1594 1659 * with serializer protection. This processing may happen asynchronously
1595 1660 * with the completion of tl_close().
1596 1661 *
1597 1662 * Fot a COTS endpoint wait before destroying tep since the serializer
1598 1663 * may go away together with tep and we need to destroy serializer
1599 1664 * outside of serializer context.
1600 1665 */
1601 1666 ASSERT(tep->te_closewait == 0);
1602 1667 if (IS_COTS(tep))
1603 1668 tep->te_closewait = 1;
1604 1669 else
1605 1670 tl_refhold(tep);
1606 1671
1607 1672 tl_serializer_enter(tep, tl_close_finish_ser, &tep->te_closemp);
1608 1673
1609 1674 /*
1610 1675 * For connection-oriented transports wait for all serializer activity
1611 1676 * to settle down.
1612 1677 */
1613 1678 if (IS_COTS(tep)) {
1614 1679 mutex_enter(&tep->te_closelock);
1615 1680 while (tep->te_closewait)
1616 1681 cv_wait(&tep->te_closecv, &tep->te_closelock);
1617 1682 mutex_exit(&tep->te_closelock);
1618 1683 }
1619 1684
1620 1685 crfree(tep->te_credp);
1621 1686 tep->te_credp = NULL;
1622 1687 tep->te_wq = NULL;
1623 1688 tl_refrele(tep);
1624 1689 /*
1625 1690 * tep is likely to be destroyed now, so can't reference it any more.
1626 1691 */
1627 1692
1628 1693 rq->q_ptr = wq->q_ptr = NULL;
1629 1694 return (0);
1630 1695 }
1631 1696
1632 1697 /*
1633 1698 * First phase of close processing done behind the serializer.
1634 1699 *
1635 1700 * Do not drop the reference in the end - tl_close() wants this reference to
1636 1701 * stay.
1637 1702 */
1638 1703 /* ARGSUSED0 */
1639 1704 static void
1640 1705 tl_close_ser(mblk_t *mp, tl_endpt_t *tep)
1641 1706 {
1642 1707 ASSERT(tep->te_closing);
1643 1708 ASSERT(tep->te_closewait == 1);
1644 1709 ASSERT(!(tep->te_flag & TL_CLOSE_SER));
1645 1710
1646 1711 tep->te_flag |= TL_CLOSE_SER;
1647 1712
1648 1713 /*
1649 1714 * Drain out all messages on queue except for TL_TICOTS where the
1650 1715 * abortive release semantics permit discarding of data on close
1651 1716 */
1652 1717 if (tep->te_wq->q_first && (IS_CLTS(tep) || IS_COTSORD(tep))) {
1653 1718 tl_wsrv_ser(NULL, tep);
1654 1719 }
1655 1720
1656 1721 /* Remove address from hash table. */
1657 1722 tl_addr_unbind(tep);
1658 1723 /*
1659 1724 * qprocsoff() gets confused when q->q_next is not NULL on the write
1660 1725 * queue of the driver, so clear these before qprocsoff() is called.
1661 1726 * Also clear q_next for the peer since this queue is going away.
1662 1727 */
1663 1728 if (IS_COTS(tep) && !IS_SOCKET(tep)) {
1664 1729 tl_endpt_t *peer_tep = tep->te_conp;
1665 1730
1666 1731 tep->te_wq->q_next = NULL;
1667 1732 if ((peer_tep != NULL) && !peer_tep->te_closing)
1668 1733 peer_tep->te_wq->q_next = NULL;
1669 1734 }
1670 1735
1671 1736 tep->te_rq = NULL;
1672 1737
1673 1738 /* wake up tl_close() */
1674 1739 tl_closeok(tep);
1675 1740 tl_serializer_exit(tep);
1676 1741 }
1677 1742
1678 1743 /*
1679 1744 * Second phase of tl_close(). Should wakeup tl_close() for COTS mode and drop
1680 1745 * the reference for CLTS.
1681 1746 *
1682 1747 * Called from serializer. Should drop reference count for CLTS only.
1683 1748 */
1684 1749 /* ARGSUSED0 */
1685 1750 static void
1686 1751 tl_close_finish_ser(mblk_t *mp, tl_endpt_t *tep)
1687 1752 {
1688 1753 ASSERT(tep->te_closing);
1689 1754 IMPLY(IS_CLTS(tep), tep->te_closewait == 0);
1690 1755 IMPLY(IS_COTS(tep), tep->te_closewait == 1);
1691 1756
1692 1757 tep->te_state = -1; /* Uninitialized */
1693 1758 if (IS_COTS(tep)) {
1694 1759 tl_co_unconnect(tep);
1695 1760 } else {
1696 1761 /* Connectionless specific cleanup */
1697 1762 TL_REMOVE_PEER(tep->te_lastep);
1698 1763 /*
1699 1764 * Backenable anybody that is flow controlled waiting for
1700 1765 * this endpoint.
1701 1766 */
1702 1767 tl_cl_backenable(tep);
1703 1768 if (tep->te_flowq != NULL) {
1704 1769 list_remove(&(tep->te_flowq->te_flowlist), tep);
1705 1770 tep->te_flowq = NULL;
1706 1771 }
1707 1772 }
1708 1773
1709 1774 tl_serializer_exit(tep);
1710 1775 if (IS_COTS(tep))
1711 1776 tl_closeok(tep);
1712 1777 else
1713 1778 tl_refrele(tep);
1714 1779 }
1715 1780
1716 1781 /*
1717 1782 * STREAMS write-side put procedure.
1718 1783 * Enter serializer for most of the processing.
1719 1784 *
1720 1785 * The T_CONN_REQ is processed outside of serializer.
1721 1786 */
1722 1787 static int
1723 1788 tl_wput(queue_t *wq, mblk_t *mp)
1724 1789 {
1725 1790 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
1726 1791 ssize_t msz = MBLKL(mp);
1727 1792 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
1728 1793 tlproc_t *tl_proc = NULL;
1729 1794
1730 1795 switch (DB_TYPE(mp)) {
1731 1796 case M_DATA:
1732 1797 /* Only valid for connection-oriented transports */
1733 1798 if (IS_CLTS(tep)) {
1734 1799 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1735 1800 SL_TRACE | SL_ERROR,
1736 1801 "tl_wput:M_DATA invalid for ticlts driver"));
1737 1802 tl_merror(wq, mp, EPROTO);
1738 1803 return (0);
1739 1804 }
1740 1805 tl_proc = tl_wput_data_ser;
1741 1806 break;
1742 1807
1743 1808 case M_IOCTL:
1744 1809 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
1745 1810 case TL_IOC_CREDOPT:
1746 1811 /* FALLTHROUGH */
1747 1812 case TL_IOC_UCREDOPT:
1748 1813 /*
1749 1814 * Serialize endpoint state change.
1750 1815 */
1751 1816 tl_proc = tl_do_ioctl_ser;
1752 1817 break;
1753 1818
1754 1819 default:
1755 1820 miocnak(wq, mp, 0, EINVAL);
1756 1821 return (0);
1757 1822 }
1758 1823 break;
1759 1824
1760 1825 case M_FLUSH:
1761 1826 /*
1762 1827 * do canonical M_FLUSH processing
1763 1828 */
1764 1829 if (*mp->b_rptr & FLUSHW) {
1765 1830 flushq(wq, FLUSHALL);
1766 1831 *mp->b_rptr &= ~FLUSHW;
1767 1832 }
1768 1833 if (*mp->b_rptr & FLUSHR) {
1769 1834 flushq(RD(wq), FLUSHALL);
1770 1835 qreply(wq, mp);
1771 1836 } else {
1772 1837 freemsg(mp);
1773 1838 }
1774 1839 return (0);
1775 1840
1776 1841 case M_PROTO:
1777 1842 if (msz < sizeof (prim->type)) {
1778 1843 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1779 1844 SL_TRACE | SL_ERROR,
1780 1845 "tl_wput:M_PROTO data too short"));
1781 1846 tl_merror(wq, mp, EPROTO);
1782 1847 return (0);
1783 1848 }
1784 1849 switch (prim->type) {
1785 1850 case T_OPTMGMT_REQ:
1786 1851 case T_SVR4_OPTMGMT_REQ:
1787 1852 /*
1788 1853 * Process TPI option management requests immediately
1789 1854 * in put procedure regardless of in-order processing
1790 1855 * of already queued messages.
1791 1856 * (Note: This driver supports AF_UNIX socket
1792 1857 * implementation. Unless we implement this processing,
1793 1858 * setsockopt() on socket endpoint will block on flow
1794 1859 * controlled endpoints which it should not. That is
1795 1860 * required for successful execution of VSU socket tests
1796 1861 * and is consistent with BSD socket behavior).
1797 1862 */
1798 1863 tl_optmgmt(wq, mp);
1799 1864 return (0);
1800 1865 case O_T_BIND_REQ:
1801 1866 case T_BIND_REQ:
1802 1867 tl_proc = tl_bind_ser;
1803 1868 break;
1804 1869 case T_CONN_REQ:
1805 1870 if (IS_CLTS(tep)) {
1806 1871 tl_merror(wq, mp, EPROTO);
1807 1872 return (0);
1808 1873 }
1809 1874 tl_conn_req(wq, mp);
1810 1875 return (0);
1811 1876 case T_DATA_REQ:
1812 1877 case T_OPTDATA_REQ:
1813 1878 case T_EXDATA_REQ:
1814 1879 case T_ORDREL_REQ:
1815 1880 tl_proc = tl_putq_ser;
1816 1881 break;
1817 1882 case T_UNITDATA_REQ:
1818 1883 if (IS_COTS(tep) ||
1819 1884 (msz < sizeof (struct T_unitdata_req))) {
1820 1885 tl_merror(wq, mp, EPROTO);
1821 1886 return (0);
1822 1887 }
1823 1888 if ((tep->te_state == TS_IDLE) && !wq->q_first) {
1824 1889 tl_proc = tl_unitdata_ser;
1825 1890 } else {
1826 1891 tl_proc = tl_putq_ser;
1827 1892 }
1828 1893 break;
1829 1894 default:
1830 1895 /*
1831 1896 * process in service procedure if message already
1832 1897 * queued (maintain in-order processing)
1833 1898 */
1834 1899 if (wq->q_first != NULL) {
1835 1900 tl_proc = tl_putq_ser;
1836 1901 } else {
1837 1902 tl_proc = tl_wput_ser;
1838 1903 }
1839 1904 break;
1840 1905 }
1841 1906 break;
1842 1907
1843 1908 case M_PCPROTO:
1844 1909 /*
1845 1910 * Check that the message has enough data to figure out TPI
1846 1911 * primitive.
1847 1912 */
1848 1913 if (msz < sizeof (prim->type)) {
1849 1914 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1850 1915 SL_TRACE | SL_ERROR,
1851 1916 "tl_wput:M_PCROTO data too short"));
1852 1917 tl_merror(wq, mp, EPROTO);
1853 1918 return (0);
1854 1919 }
1855 1920 switch (prim->type) {
1856 1921 case T_CAPABILITY_REQ:
1857 1922 tl_capability_req(mp, tep);
1858 1923 return (0);
1859 1924 case T_INFO_REQ:
1860 1925 tl_proc = tl_info_req_ser;
1861 1926 break;
1862 1927 case T_ADDR_REQ:
1863 1928 tl_proc = tl_addr_req_ser;
1864 1929 break;
1865 1930
1866 1931 default:
1867 1932 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1868 1933 SL_TRACE | SL_ERROR,
1869 1934 "tl_wput:unknown TPI msg primitive"));
1870 1935 tl_merror(wq, mp, EPROTO);
1871 1936 return (0);
1872 1937 }
1873 1938 break;
1874 1939 default:
1875 1940 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
1876 1941 "tl_wput:default:unexpected Streams message"));
1877 1942 freemsg(mp);
1878 1943 return (0);
1879 1944 }
1880 1945
1881 1946 /*
1882 1947 * Continue processing via serializer.
1883 1948 */
1884 1949 ASSERT(tl_proc != NULL);
1885 1950 tl_refhold(tep);
1886 1951 tl_serializer_enter(tep, tl_proc, mp);
1887 1952 return (0);
1888 1953 }
1889 1954
1890 1955 /*
1891 1956 * Place message on the queue while preserving order.
1892 1957 */
1893 1958 static void
1894 1959 tl_putq_ser(mblk_t *mp, tl_endpt_t *tep)
1895 1960 {
1896 1961 if (tep->te_closing) {
1897 1962 tl_wput_ser(mp, tep);
1898 1963 } else {
1899 1964 TL_PUTQ(tep, mp);
1900 1965 tl_serializer_exit(tep);
1901 1966 tl_refrele(tep);
1902 1967 }
1903 1968
1904 1969 }
1905 1970
1906 1971 static void
1907 1972 tl_wput_common_ser(mblk_t *mp, tl_endpt_t *tep)
1908 1973 {
1909 1974 ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
1910 1975
1911 1976 switch (DB_TYPE(mp)) {
1912 1977 case M_DATA:
1913 1978 tl_data(mp, tep);
1914 1979 break;
1915 1980 case M_PROTO:
1916 1981 tl_do_proto(mp, tep);
1917 1982 break;
1918 1983 default:
1919 1984 freemsg(mp);
1920 1985 break;
1921 1986 }
1922 1987 }
1923 1988
1924 1989 /*
1925 1990 * Write side put procedure called from serializer.
1926 1991 */
1927 1992 static void
1928 1993 tl_wput_ser(mblk_t *mp, tl_endpt_t *tep)
1929 1994 {
1930 1995 tl_wput_common_ser(mp, tep);
1931 1996 tl_serializer_exit(tep);
1932 1997 tl_refrele(tep);
1933 1998 }
1934 1999
1935 2000 /*
1936 2001 * M_DATA processing. Called from serializer.
1937 2002 */
1938 2003 static void
1939 2004 tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep)
1940 2005 {
1941 2006 tl_endpt_t *peer_tep = tep->te_conp;
1942 2007 queue_t *peer_rq;
1943 2008
1944 2009 ASSERT(DB_TYPE(mp) == M_DATA);
1945 2010 ASSERT(IS_COTS(tep));
1946 2011
1947 2012 IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer);
↓ open down ↓ |
1112 lines elided |
↑ open up ↑ |
1948 2013
1949 2014 /*
1950 2015 * fastpath for data. Ignore flow control if tep is closing.
1951 2016 */
1952 2017 if ((peer_tep != NULL) &&
1953 2018 !peer_tep->te_closing &&
1954 2019 ((tep->te_state == TS_DATA_XFER) ||
1955 2020 (tep->te_state == TS_WREQ_ORDREL)) &&
1956 2021 (tep->te_wq != NULL) &&
1957 2022 (tep->te_wq->q_first == NULL) &&
1958 - ((peer_tep->te_state == TS_DATA_XFER) ||
1959 - (peer_tep->te_state == TS_WREQ_ORDREL)) &&
2023 + (peer_tep->te_state == TS_DATA_XFER ||
2024 + peer_tep->te_state == TS_WIND_ORDREL ||
2025 + peer_tep->te_state == TS_WREQ_ORDREL) &&
1960 2026 ((peer_rq = peer_tep->te_rq) != NULL) &&
1961 2027 (canputnext(peer_rq) || tep->te_closing)) {
1962 2028 putnext(peer_rq, mp);
1963 2029 } else if (tep->te_closing) {
1964 2030 /*
1965 2031 * It is possible that by the time we got here tep started to
1966 2032 * close. If the write queue is not empty, and the state is
1967 2033 * TS_DATA_XFER the data should be delivered in order, so we
1968 2034 * call putq() instead of freeing the data.
1969 2035 */
1970 2036 if ((tep->te_wq != NULL) &&
1971 2037 ((tep->te_state == TS_DATA_XFER) ||
1972 2038 (tep->te_state == TS_WREQ_ORDREL))) {
1973 2039 TL_PUTQ(tep, mp);
1974 2040 } else {
1975 2041 freemsg(mp);
1976 2042 }
1977 2043 } else {
1978 2044 TL_PUTQ(tep, mp);
1979 2045 }
1980 2046
1981 2047 tl_serializer_exit(tep);
1982 2048 tl_refrele(tep);
1983 2049 }
1984 2050
1985 2051 /*
1986 2052 * Write side service routine.
1987 2053 *
1988 2054 * All actual processing happens within serializer which is entered
1989 2055 * synchronously. It is possible that by the time tl_wsrv() wakes up, some new
1990 2056 * messages that need processing may have arrived, so tl_wsrv repeats until
1991 2057 * queue is empty or te_nowsrv is set.
1992 2058 */
1993 2059 static int
1994 2060 tl_wsrv(queue_t *wq)
1995 2061 {
1996 2062 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
1997 2063
1998 2064 while ((wq->q_first != NULL) && !tep->te_nowsrv) {
1999 2065 mutex_enter(&tep->te_srv_lock);
2000 2066 ASSERT(tep->te_wsrv_active == B_FALSE);
2001 2067 tep->te_wsrv_active = B_TRUE;
2002 2068 mutex_exit(&tep->te_srv_lock);
2003 2069
2004 2070 tl_serializer_enter(tep, tl_wsrv_ser, &tep->te_wsrvmp);
2005 2071
2006 2072 /*
2007 2073 * Wait for serializer job to complete.
2008 2074 */
2009 2075 mutex_enter(&tep->te_srv_lock);
2010 2076 while (tep->te_wsrv_active) {
2011 2077 cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
2012 2078 }
2013 2079 cv_signal(&tep->te_srv_cv);
2014 2080 mutex_exit(&tep->te_srv_lock);
2015 2081 }
2016 2082 return (0);
2017 2083 }
2018 2084
2019 2085 /*
2020 2086 * Serialized write side processing of the STREAMS queue.
2021 2087 * May be called either from tl_wsrv() or from tl_close() in which case ser_mp
2022 2088 * is NULL.
2023 2089 */
2024 2090 static void
2025 2091 tl_wsrv_ser(mblk_t *ser_mp, tl_endpt_t *tep)
2026 2092 {
2027 2093 mblk_t *mp;
2028 2094 queue_t *wq = tep->te_wq;
2029 2095
2030 2096 ASSERT(wq != NULL);
2031 2097 while (!tep->te_nowsrv && (mp = getq(wq)) != NULL) {
2032 2098 tl_wput_common_ser(mp, tep);
2033 2099 }
2034 2100
2035 2101 /*
2036 2102 * Wakeup service routine unless called from close.
2037 2103 * If ser_mp is specified, the caller is tl_wsrv().
2038 2104 * Otherwise, the caller is tl_close_ser(). Since tl_close_ser() doesn't
2039 2105 * call tl_serializer_enter() before calling tl_wsrv_ser(), there should
2040 2106 * be no matching tl_serializer_exit() in this case.
2041 2107 * Also, there is no need to wakeup anyone since tl_close_ser() is not
2042 2108 * waiting on te_srv_cv.
2043 2109 */
2044 2110 if (ser_mp != NULL) {
2045 2111 /*
2046 2112 * We are called from tl_wsrv.
2047 2113 */
2048 2114 mutex_enter(&tep->te_srv_lock);
2049 2115 ASSERT(tep->te_wsrv_active);
2050 2116 tep->te_wsrv_active = B_FALSE;
2051 2117 cv_signal(&tep->te_srv_cv);
2052 2118 mutex_exit(&tep->te_srv_lock);
2053 2119 tl_serializer_exit(tep);
2054 2120 }
2055 2121 }
2056 2122
2057 2123 /*
2058 2124 * Called when the stream is backenabled. Enter serializer and qenable everyone
2059 2125 * flow controlled by tep.
2060 2126 *
2061 2127 * NOTE: The service routine should enter serializer synchronously. Otherwise it
2062 2128 * is possible that two instances of tl_rsrv will be running reusing the same
2063 2129 * rsrv mblk.
2064 2130 */
2065 2131 static int
2066 2132 tl_rsrv(queue_t *rq)
2067 2133 {
2068 2134 tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
2069 2135
2070 2136 ASSERT(rq->q_first == NULL);
2071 2137 ASSERT(tep->te_rsrv_active == 0);
2072 2138
2073 2139 tep->te_rsrv_active = B_TRUE;
2074 2140 tl_serializer_enter(tep, tl_rsrv_ser, &tep->te_rsrvmp);
2075 2141 /*
2076 2142 * Wait for serializer job to complete.
2077 2143 */
2078 2144 mutex_enter(&tep->te_srv_lock);
2079 2145 while (tep->te_rsrv_active) {
2080 2146 cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
2081 2147 }
2082 2148 cv_signal(&tep->te_srv_cv);
2083 2149 mutex_exit(&tep->te_srv_lock);
2084 2150 return (0);
2085 2151 }
2086 2152
2087 2153 /* ARGSUSED */
2088 2154 static void
2089 2155 tl_rsrv_ser(mblk_t *mp, tl_endpt_t *tep)
2090 2156 {
2091 2157 tl_endpt_t *peer_tep;
2092 2158
2093 2159 if (IS_CLTS(tep) && tep->te_state == TS_IDLE) {
2094 2160 tl_cl_backenable(tep);
2095 2161 } else if (
2096 2162 IS_COTS(tep) &&
2097 2163 ((peer_tep = tep->te_conp) != NULL) &&
2098 2164 !peer_tep->te_closing &&
2099 2165 ((tep->te_state == TS_DATA_XFER) ||
2100 2166 (tep->te_state == TS_WIND_ORDREL)||
2101 2167 (tep->te_state == TS_WREQ_ORDREL))) {
2102 2168 TL_QENABLE(peer_tep);
2103 2169 }
2104 2170
2105 2171 /*
2106 2172 * Wakeup read side service routine.
2107 2173 */
2108 2174 mutex_enter(&tep->te_srv_lock);
2109 2175 ASSERT(tep->te_rsrv_active);
2110 2176 tep->te_rsrv_active = B_FALSE;
2111 2177 cv_signal(&tep->te_srv_cv);
2112 2178 mutex_exit(&tep->te_srv_lock);
2113 2179 tl_serializer_exit(tep);
2114 2180 }
2115 2181
2116 2182 /*
2117 2183 * process M_PROTO messages. Always called from serializer.
2118 2184 */
2119 2185 static void
2120 2186 tl_do_proto(mblk_t *mp, tl_endpt_t *tep)
2121 2187 {
2122 2188 ssize_t msz = MBLKL(mp);
2123 2189 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
2124 2190
2125 2191 /* Message size was validated by tl_wput(). */
2126 2192 ASSERT(msz >= sizeof (prim->type));
2127 2193
2128 2194 switch (prim->type) {
2129 2195 case T_UNBIND_REQ:
2130 2196 tl_unbind(mp, tep);
2131 2197 break;
2132 2198
2133 2199 case T_ADDR_REQ:
2134 2200 tl_addr_req(mp, tep);
2135 2201 break;
2136 2202
2137 2203 case O_T_CONN_RES:
2138 2204 case T_CONN_RES:
2139 2205 if (IS_CLTS(tep)) {
2140 2206 tl_merror(tep->te_wq, mp, EPROTO);
2141 2207 break;
2142 2208 }
2143 2209 tl_conn_res(mp, tep);
2144 2210 break;
2145 2211
2146 2212 case T_DISCON_REQ:
2147 2213 if (IS_CLTS(tep)) {
2148 2214 tl_merror(tep->te_wq, mp, EPROTO);
2149 2215 break;
2150 2216 }
2151 2217 tl_discon_req(mp, tep);
2152 2218 break;
2153 2219
2154 2220 case T_DATA_REQ:
2155 2221 if (IS_CLTS(tep)) {
2156 2222 tl_merror(tep->te_wq, mp, EPROTO);
2157 2223 break;
2158 2224 }
2159 2225 tl_data(mp, tep);
2160 2226 break;
2161 2227
2162 2228 case T_OPTDATA_REQ:
2163 2229 if (IS_CLTS(tep)) {
2164 2230 tl_merror(tep->te_wq, mp, EPROTO);
2165 2231 break;
2166 2232 }
2167 2233 tl_data(mp, tep);
2168 2234 break;
2169 2235
2170 2236 case T_EXDATA_REQ:
2171 2237 if (IS_CLTS(tep)) {
2172 2238 tl_merror(tep->te_wq, mp, EPROTO);
2173 2239 break;
2174 2240 }
2175 2241 tl_exdata(mp, tep);
2176 2242 break;
2177 2243
2178 2244 case T_ORDREL_REQ:
2179 2245 if (!IS_COTSORD(tep)) {
2180 2246 tl_merror(tep->te_wq, mp, EPROTO);
2181 2247 break;
2182 2248 }
2183 2249 tl_ordrel(mp, tep);
2184 2250 break;
2185 2251
2186 2252 case T_UNITDATA_REQ:
2187 2253 if (IS_COTS(tep)) {
2188 2254 tl_merror(tep->te_wq, mp, EPROTO);
2189 2255 break;
2190 2256 }
2191 2257 tl_unitdata(mp, tep);
2192 2258 break;
2193 2259
2194 2260 default:
2195 2261 tl_merror(tep->te_wq, mp, EPROTO);
2196 2262 break;
2197 2263 }
2198 2264 }
2199 2265
2200 2266 /*
2201 2267 * Process ioctl from serializer.
2202 2268 * This is a wrapper around tl_do_ioctl().
2203 2269 */
2204 2270 static void
2205 2271 tl_do_ioctl_ser(mblk_t *mp, tl_endpt_t *tep)
2206 2272 {
2207 2273 if (!tep->te_closing)
2208 2274 tl_do_ioctl(mp, tep);
2209 2275 else
2210 2276 freemsg(mp);
2211 2277
2212 2278 tl_serializer_exit(tep);
2213 2279 tl_refrele(tep);
2214 2280 }
2215 2281
2216 2282 static void
2217 2283 tl_do_ioctl(mblk_t *mp, tl_endpt_t *tep)
2218 2284 {
2219 2285 struct iocblk *iocbp = (struct iocblk *)mp->b_rptr;
2220 2286 int cmd = iocbp->ioc_cmd;
2221 2287 queue_t *wq = tep->te_wq;
2222 2288 int error;
2223 2289 int thisopt, otheropt;
2224 2290
2225 2291 ASSERT((cmd == TL_IOC_CREDOPT) || (cmd == TL_IOC_UCREDOPT));
2226 2292
2227 2293 switch (cmd) {
2228 2294 case TL_IOC_CREDOPT:
2229 2295 if (cmd == TL_IOC_CREDOPT) {
2230 2296 thisopt = TL_SETCRED;
2231 2297 otheropt = TL_SETUCRED;
2232 2298 } else {
2233 2299 /* FALLTHROUGH */
2234 2300 case TL_IOC_UCREDOPT:
2235 2301 thisopt = TL_SETUCRED;
2236 2302 otheropt = TL_SETCRED;
2237 2303 }
2238 2304 /*
2239 2305 * The credentials passing does not apply to sockets.
2240 2306 * Only one of the cred options can be set at a given time.
2241 2307 */
2242 2308 if (IS_SOCKET(tep) || (tep->te_flag & otheropt)) {
2243 2309 miocnak(wq, mp, 0, EINVAL);
2244 2310 return;
2245 2311 }
2246 2312
2247 2313 /*
2248 2314 * Turn on generation of credential options for
2249 2315 * T_conn_req, T_conn_con, T_unidata_ind.
2250 2316 */
2251 2317 error = miocpullup(mp, sizeof (uint32_t));
2252 2318 if (error != 0) {
2253 2319 miocnak(wq, mp, 0, error);
2254 2320 return;
2255 2321 }
2256 2322 if (!IS_P2ALIGNED(mp->b_cont->b_rptr, sizeof (uint32_t))) {
2257 2323 miocnak(wq, mp, 0, EINVAL);
2258 2324 return;
2259 2325 }
2260 2326
2261 2327 if (*(uint32_t *)mp->b_cont->b_rptr)
2262 2328 tep->te_flag |= thisopt;
2263 2329 else
2264 2330 tep->te_flag &= ~thisopt;
2265 2331
2266 2332 miocack(wq, mp, 0, 0);
2267 2333 break;
2268 2334
2269 2335 default:
2270 2336 /* Should not be here */
2271 2337 miocnak(wq, mp, 0, EINVAL);
2272 2338 break;
2273 2339 }
2274 2340 }
2275 2341
2276 2342
2277 2343 /*
2278 2344 * send T_ERROR_ACK
2279 2345 * Note: assumes enough memory or caller passed big enough mp
2280 2346 * - no recovery from allocb failures
2281 2347 */
2282 2348
2283 2349 static void
2284 2350 tl_error_ack(queue_t *wq, mblk_t *mp, t_scalar_t tli_err,
2285 2351 t_scalar_t unix_err, t_scalar_t type)
2286 2352 {
2287 2353 struct T_error_ack *err_ack;
2288 2354 mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_error_ack),
2289 2355 M_PCPROTO, T_ERROR_ACK);
2290 2356
2291 2357 if (ackmp == NULL) {
2292 2358 (void) (STRLOG(TL_ID, 0, 1, SL_TRACE | SL_ERROR,
2293 2359 "tl_error_ack:out of mblk memory"));
2294 2360 tl_merror(wq, NULL, ENOSR);
2295 2361 return;
2296 2362 }
2297 2363 err_ack = (struct T_error_ack *)ackmp->b_rptr;
2298 2364 err_ack->ERROR_prim = type;
2299 2365 err_ack->TLI_error = tli_err;
2300 2366 err_ack->UNIX_error = unix_err;
2301 2367
2302 2368 /*
2303 2369 * send error ack message
2304 2370 */
2305 2371 qreply(wq, ackmp);
2306 2372 }
2307 2373
2308 2374
2309 2375
2310 2376 /*
2311 2377 * send T_OK_ACK
2312 2378 * Note: assumes enough memory or caller passed big enough mp
2313 2379 * - no recovery from allocb failures
2314 2380 */
2315 2381 static void
2316 2382 tl_ok_ack(queue_t *wq, mblk_t *mp, t_scalar_t type)
2317 2383 {
2318 2384 struct T_ok_ack *ok_ack;
2319 2385 mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_ok_ack),
2320 2386 M_PCPROTO, T_OK_ACK);
2321 2387
2322 2388 if (ackmp == NULL) {
2323 2389 tl_merror(wq, NULL, ENOMEM);
2324 2390 return;
2325 2391 }
2326 2392
2327 2393 ok_ack = (struct T_ok_ack *)ackmp->b_rptr;
2328 2394 ok_ack->CORRECT_prim = type;
2329 2395
2330 2396 (void) qreply(wq, ackmp);
2331 2397 }
2332 2398
2333 2399 /*
2334 2400 * Process T_BIND_REQ and O_T_BIND_REQ from serializer.
2335 2401 * This is a wrapper around tl_bind().
2336 2402 */
2337 2403 static void
2338 2404 tl_bind_ser(mblk_t *mp, tl_endpt_t *tep)
2339 2405 {
2340 2406 if (!tep->te_closing)
2341 2407 tl_bind(mp, tep);
2342 2408 else
2343 2409 freemsg(mp);
2344 2410
2345 2411 tl_serializer_exit(tep);
2346 2412 tl_refrele(tep);
2347 2413 }
2348 2414
2349 2415 /*
2350 2416 * Process T_BIND_REQ and O_T_BIND_REQ TPI requests.
2351 2417 * Assumes that the endpoint is in the unbound.
2352 2418 */
2353 2419 static void
2354 2420 tl_bind(mblk_t *mp, tl_endpt_t *tep)
2355 2421 {
2356 2422 queue_t *wq = tep->te_wq;
2357 2423 struct T_bind_ack *b_ack;
2358 2424 struct T_bind_req *bind = (struct T_bind_req *)mp->b_rptr;
2359 2425 mblk_t *ackmp, *bamp;
2360 2426 soux_addr_t ux_addr;
2361 2427 t_uscalar_t qlen = 0;
2362 2428 t_scalar_t alen, aoff;
2363 2429 tl_addr_t addr_req;
2364 2430 void *addr_startp;
2365 2431 ssize_t msz = MBLKL(mp), basize;
2366 2432 t_scalar_t tli_err = 0, unix_err = 0;
2367 2433 t_scalar_t save_prim_type = bind->PRIM_type;
2368 2434 t_scalar_t save_state = tep->te_state;
2369 2435
2370 2436 if (tep->te_state != TS_UNBND) {
2371 2437 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2372 2438 SL_TRACE | SL_ERROR,
2373 2439 "tl_wput:bind_request:out of state, state=%d",
2374 2440 tep->te_state));
↓ open down ↓ |
405 lines elided |
↑ open up ↑ |
2375 2441 tli_err = TOUTSTATE;
2376 2442 goto error;
2377 2443 }
2378 2444
2379 2445 if (msz < sizeof (struct T_bind_req)) {
2380 2446 tli_err = TSYSERR;
2381 2447 unix_err = EINVAL;
2382 2448 goto error;
2383 2449 }
2384 2450
2385 - tep->te_state = NEXTSTATE(TE_BIND_REQ, tep->te_state);
2451 + tep->te_state = nextstate[TE_BIND_REQ][tep->te_state];
2386 2452
2387 2453 ASSERT((bind->PRIM_type == O_T_BIND_REQ) ||
2388 2454 (bind->PRIM_type == T_BIND_REQ));
2389 2455
2390 2456 alen = bind->ADDR_length;
2391 2457 aoff = bind->ADDR_offset;
2392 2458
2393 2459 /* negotiate max conn req pending */
2394 2460 if (IS_COTS(tep)) {
2395 2461 qlen = bind->CONIND_number;
2396 2462 if (qlen > tl_maxqlen)
2397 2463 qlen = tl_maxqlen;
2398 2464 }
2399 2465
2400 2466 /*
2401 2467 * Reserve hash handle. It can only be NULL if the endpoint is unbound
2402 2468 * and bound again.
2403 2469 */
2404 2470 if ((tep->te_hash_hndl == NULL) &&
2405 2471 ((tep->te_flag & TL_ADDRHASHED) == 0) &&
2406 2472 mod_hash_reserve_nosleep(tep->te_addrhash,
2407 2473 &tep->te_hash_hndl) != 0) {
2408 2474 tli_err = TSYSERR;
2409 2475 unix_err = ENOSR;
2410 2476 goto error;
2411 2477 }
2412 2478
2413 2479 /*
2414 2480 * Verify address correctness.
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
2415 2481 */
2416 2482 if (IS_SOCKET(tep)) {
2417 2483 ASSERT(bind->PRIM_type == O_T_BIND_REQ);
2418 2484
2419 2485 if ((alen != TL_SOUX_ADDRLEN) ||
2420 2486 (aoff < 0) ||
2421 2487 (aoff + alen > msz)) {
2422 2488 (void) (STRLOG(TL_ID, tep->te_minor,
2423 2489 1, SL_TRACE | SL_ERROR,
2424 2490 "tl_bind: invalid socket addr"));
2425 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2491 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2426 2492 tli_err = TSYSERR;
2427 2493 unix_err = EINVAL;
2428 2494 goto error;
2429 2495 }
2430 2496 /* Copy address from message to local buffer. */
2431 2497 bcopy(mp->b_rptr + aoff, &ux_addr, sizeof (ux_addr));
2432 2498 /*
2433 2499 * Check that we got correct address from sockets
2434 2500 */
2435 2501 if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) &&
2436 2502 (ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) {
2437 2503 (void) (STRLOG(TL_ID, tep->te_minor,
2438 2504 1, SL_TRACE | SL_ERROR,
2439 2505 "tl_bind: invalid socket magic"));
2440 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2506 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2441 2507 tli_err = TSYSERR;
2442 2508 unix_err = EINVAL;
2443 2509 goto error;
2444 2510 }
2445 2511 if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) &&
2446 2512 (ux_addr.soua_vp != NULL)) {
2447 2513 (void) (STRLOG(TL_ID, tep->te_minor,
2448 2514 1, SL_TRACE | SL_ERROR,
2449 2515 "tl_bind: implicit addr non-empty"));
2450 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2516 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2451 2517 tli_err = TSYSERR;
2452 2518 unix_err = EINVAL;
2453 2519 goto error;
2454 2520 }
2455 2521 if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) &&
2456 2522 (ux_addr.soua_vp == NULL)) {
2457 2523 (void) (STRLOG(TL_ID, tep->te_minor,
2458 2524 1, SL_TRACE | SL_ERROR,
2459 2525 "tl_bind: explicit addr empty"));
2460 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2526 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2461 2527 tli_err = TSYSERR;
2462 2528 unix_err = EINVAL;
2463 2529 goto error;
2464 2530 }
2465 2531 } else {
2466 2532 if ((alen > 0) && ((aoff < 0) ||
2467 2533 ((ssize_t)(aoff + alen) > msz) ||
2468 2534 ((aoff + alen) < 0))) {
2469 2535 (void) (STRLOG(TL_ID, tep->te_minor,
2470 2536 1, SL_TRACE | SL_ERROR,
2471 2537 "tl_bind: invalid message"));
2472 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2538 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2473 2539 tli_err = TSYSERR;
2474 2540 unix_err = EINVAL;
2475 2541 goto error;
2476 2542 }
2477 2543 if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) {
2478 2544 (void) (STRLOG(TL_ID, tep->te_minor,
2479 2545 1, SL_TRACE | SL_ERROR,
2480 2546 "tl_bind: bad addr in message"));
2481 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2547 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2482 2548 tli_err = TBADADDR;
2483 2549 goto error;
2484 2550 }
2485 2551 #ifdef DEBUG
2486 2552 /*
2487 2553 * Mild form of ASSERT()ion to detect broken TPI apps.
2488 2554 * if (!assertion)
2489 2555 * log warning;
2490 2556 */
2491 2557 if (!((alen == 0 && aoff == 0) ||
2492 2558 (aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) {
2493 2559 (void) (STRLOG(TL_ID, tep->te_minor,
2494 2560 3, SL_TRACE | SL_ERROR,
2495 2561 "tl_bind: addr overlaps TPI message"));
2496 2562 }
2497 2563 #endif
2498 2564 }
2499 2565
2500 2566 /*
2501 2567 * Bind the address provided or allocate one if requested.
2502 2568 * Allow rebinds with a new qlen value.
2503 2569 */
2504 2570 if (IS_SOCKET(tep)) {
2505 2571 /*
2506 2572 * For anonymous requests the te_ap is already set up properly
2507 2573 * so use minor number as an address.
2508 2574 * For explicit requests need to check whether the address is
2509 2575 * already in use.
2510 2576 */
2511 2577 if (ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) {
2512 2578 int rc;
2513 2579
2514 2580 if (tep->te_flag & TL_ADDRHASHED) {
2515 2581 ASSERT(IS_COTS(tep) && tep->te_qlen == 0);
2516 2582 if (tep->te_vp == ux_addr.soua_vp)
2517 2583 goto skip_addr_bind;
2518 2584 else /* Rebind to a new address. */
2519 2585 tl_addr_unbind(tep);
2520 2586 }
2521 2587 /*
2522 2588 * Insert address in the hash if it is not already
2523 2589 * there. Since we use preallocated handle, the insert
2524 2590 * can fail only if the key is already present.
2525 2591 */
2526 2592 rc = mod_hash_insert_reserve(tep->te_addrhash,
2527 2593 (mod_hash_key_t)ux_addr.soua_vp,
2528 2594 (mod_hash_val_t)tep, tep->te_hash_hndl);
2529 2595
2530 2596 if (rc != 0) {
2531 2597 ASSERT(rc == MH_ERR_DUPLICATE);
2532 2598 /*
2533 2599 * Violate O_T_BIND_REQ semantics and fail with
2534 2600 * TADDRBUSY - sockets will not use any address
2535 2601 * other than supplied one for explicit binds.
2536 2602 */
2537 2603 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2538 2604 SL_TRACE | SL_ERROR,
2539 2605 "tl_bind:requested addr %p is busy",
2540 2606 ux_addr.soua_vp));
2541 2607 tli_err = TADDRBUSY;
2542 2608 unix_err = 0;
2543 2609 goto error;
2544 2610 }
2545 2611 tep->te_uxaddr = ux_addr;
2546 2612 tep->te_flag |= TL_ADDRHASHED;
2547 2613 tep->te_hash_hndl = NULL;
2548 2614 }
2549 2615 } else if (alen == 0) {
2550 2616 /*
2551 2617 * assign any free address
2552 2618 */
2553 2619 if (!tl_get_any_addr(tep, NULL)) {
2554 2620 (void) (STRLOG(TL_ID, tep->te_minor,
2555 2621 1, SL_TRACE | SL_ERROR,
2556 2622 "tl_bind:failed to get buffer for any "
2557 2623 "address"));
2558 2624 tli_err = TSYSERR;
2559 2625 unix_err = ENOSR;
2560 2626 goto error;
2561 2627 }
2562 2628 } else {
2563 2629 addr_req.ta_alen = alen;
2564 2630 addr_req.ta_abuf = (mp->b_rptr + aoff);
2565 2631 addr_req.ta_zoneid = tep->te_zoneid;
2566 2632
2567 2633 tep->te_abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
2568 2634 if (tep->te_abuf == NULL) {
2569 2635 tli_err = TSYSERR;
2570 2636 unix_err = ENOSR;
2571 2637 goto error;
2572 2638 }
2573 2639 bcopy(addr_req.ta_abuf, tep->te_abuf, addr_req.ta_alen);
2574 2640 tep->te_alen = alen;
2575 2641
2576 2642 if (mod_hash_insert_reserve(tep->te_addrhash,
2577 2643 (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
2578 2644 tep->te_hash_hndl) != 0) {
2579 2645 if (save_prim_type == T_BIND_REQ) {
2580 2646 /*
2581 2647 * The bind semantics for this primitive
2582 2648 * require a failure if the exact address
2583 2649 * requested is busy
2584 2650 */
2585 2651 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2586 2652 SL_TRACE | SL_ERROR,
2587 2653 "tl_bind:requested addr is busy"));
2588 2654 tli_err = TADDRBUSY;
2589 2655 unix_err = 0;
2590 2656 goto error;
2591 2657 }
2592 2658
2593 2659 /*
2594 2660 * O_T_BIND_REQ semantics say if address if requested
2595 2661 * address is busy, bind to any available free address
2596 2662 */
2597 2663 if (!tl_get_any_addr(tep, &addr_req)) {
2598 2664 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2599 2665 SL_TRACE | SL_ERROR,
2600 2666 "tl_bind:unable to get any addr buf"));
2601 2667 tli_err = TSYSERR;
2602 2668 unix_err = ENOMEM;
2603 2669 goto error;
2604 2670 }
2605 2671 } else {
2606 2672 tep->te_flag |= TL_ADDRHASHED;
2607 2673 tep->te_hash_hndl = NULL;
2608 2674 }
2609 2675 }
2610 2676
2611 2677 ASSERT(tep->te_alen >= 0);
2612 2678
2613 2679 skip_addr_bind:
2614 2680 /*
2615 2681 * prepare T_BIND_ACK TPI message
2616 2682 */
2617 2683 basize = sizeof (struct T_bind_ack) + tep->te_alen;
2618 2684 bamp = reallocb(mp, basize, 0);
2619 2685 if (bamp == NULL) {
2620 2686 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
2621 2687 "tl_wput:tl_bind: allocb failed"));
2622 2688 /*
2623 2689 * roll back state changes
2624 2690 */
2625 2691 tl_addr_unbind(tep);
2626 2692 tep->te_state = TS_UNBND;
2627 2693 tl_memrecover(wq, mp, basize);
2628 2694 return;
2629 2695 }
2630 2696
2631 2697 DB_TYPE(bamp) = M_PCPROTO;
2632 2698 bamp->b_wptr = bamp->b_rptr + basize;
2633 2699 b_ack = (struct T_bind_ack *)bamp->b_rptr;
2634 2700 b_ack->PRIM_type = T_BIND_ACK;
2635 2701 b_ack->CONIND_number = qlen;
2636 2702 b_ack->ADDR_length = tep->te_alen;
↓ open down ↓ |
145 lines elided |
↑ open up ↑ |
2637 2703 b_ack->ADDR_offset = (t_scalar_t)sizeof (struct T_bind_ack);
2638 2704 addr_startp = bamp->b_rptr + b_ack->ADDR_offset;
2639 2705 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
2640 2706
2641 2707 if (IS_COTS(tep)) {
2642 2708 tep->te_qlen = qlen;
2643 2709 if (qlen > 0)
2644 2710 tep->te_flag |= TL_LISTENER;
2645 2711 }
2646 2712
2647 - tep->te_state = NEXTSTATE(TE_BIND_ACK, tep->te_state);
2713 + tep->te_state = nextstate[TE_BIND_ACK][tep->te_state];
2648 2714 /*
2649 2715 * send T_BIND_ACK message
2650 2716 */
2651 2717 (void) qreply(wq, bamp);
2652 2718 return;
2653 2719
2654 2720 error:
2655 2721 ackmp = reallocb(mp, sizeof (struct T_error_ack), 0);
2656 2722 if (ackmp == NULL) {
2657 2723 /*
2658 2724 * roll back state changes
2659 2725 */
2660 2726 tep->te_state = save_state;
2661 2727 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2662 2728 return;
2663 2729 }
2664 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2730 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2665 2731 tl_error_ack(wq, ackmp, tli_err, unix_err, save_prim_type);
2666 2732 }
2667 2733
2668 2734 /*
2669 2735 * Process T_UNBIND_REQ.
2670 2736 * Called from serializer.
2671 2737 */
2672 2738 static void
2673 2739 tl_unbind(mblk_t *mp, tl_endpt_t *tep)
2674 2740 {
2675 2741 queue_t *wq;
2676 2742 mblk_t *ackmp;
2677 2743
2678 2744 if (tep->te_closing) {
2679 2745 freemsg(mp);
2680 2746 return;
2681 2747 }
2682 2748
2683 2749 wq = tep->te_wq;
2684 2750
2685 2751 /*
2686 2752 * preallocate memory for max of T_OK_ACK and T_ERROR_ACK
2687 2753 * ==> allocate for T_ERROR_ACK (known max)
2688 2754 */
2689 2755 if ((ackmp = reallocb(mp, sizeof (struct T_error_ack), 0)) == NULL) {
2690 2756 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2691 2757 return;
2692 2758 }
2693 2759 /*
2694 2760 * memory resources committed
2695 2761 * Note: no message validation. T_UNBIND_REQ message is
2696 2762 * same size as PRIM_type field so already verified earlier.
2697 2763 */
2698 2764
2699 2765 /*
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
2700 2766 * validate state
2701 2767 */
2702 2768 if (tep->te_state != TS_IDLE) {
2703 2769 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2704 2770 SL_TRACE | SL_ERROR,
2705 2771 "tl_wput:T_UNBIND_REQ:out of state, state=%d",
2706 2772 tep->te_state));
2707 2773 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ);
2708 2774 return;
2709 2775 }
2710 - tep->te_state = NEXTSTATE(TE_UNBIND_REQ, tep->te_state);
2776 + tep->te_state = nextstate[TE_UNBIND_REQ][tep->te_state];
2711 2777
2712 2778 /*
2713 2779 * TPI says on T_UNBIND_REQ:
2714 2780 * send up a M_FLUSH to flush both
2715 2781 * read and write queues
2716 2782 */
2717 2783 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
2718 2784
2719 2785 if (!IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
2720 2786 tep->te_magic != SOU_MAGIC_EXPLICIT) {
2721 2787
2722 2788 /*
2723 2789 * Sockets use bind with qlen==0 followed by bind() to
2724 2790 * the same address with qlen > 0 for listeners.
2725 2791 * We allow rebind with a new qlen value.
2726 2792 */
2727 2793 tl_addr_unbind(tep);
2728 2794 }
2729 2795
2730 - tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
2796 + tep->te_state = nextstate[TE_OK_ACK1][tep->te_state];
2731 2797 /*
2732 2798 * send T_OK_ACK
2733 2799 */
2734 2800 tl_ok_ack(wq, ackmp, T_UNBIND_REQ);
2735 2801 }
2736 2802
2737 2803
2738 2804 /*
2739 2805 * Option management code from drv/ip is used here
2740 2806 * Note: TL_PROT_LEVEL/TL_IOC_CREDOPT option is not part of tl_opt_arr
2741 2807 * database of options. So optcom_req() will fail T_SVR4_OPTMGMT_REQ.
2742 2808 * However, that is what we want as that option is 'unorthodox'
2743 2809 * and only valid in T_CONN_IND, T_CONN_CON and T_UNITDATA_IND
2744 2810 * and not in T_SVR4_OPTMGMT_REQ/ACK
2745 2811 * Note2: use of optcom_req means this routine is an exception to
2746 2812 * recovery from allocb() failures.
2747 2813 */
2748 2814
2749 2815 static void
2750 2816 tl_optmgmt(queue_t *wq, mblk_t *mp)
2751 2817 {
2752 2818 tl_endpt_t *tep;
2753 2819 mblk_t *ackmp;
2754 2820 union T_primitives *prim;
2755 2821 cred_t *cr;
2756 2822
2757 2823 tep = (tl_endpt_t *)wq->q_ptr;
2758 2824 prim = (union T_primitives *)mp->b_rptr;
2759 2825
2760 2826 /*
2761 2827 * All Solaris components should pass a db_credp
2762 2828 * for this TPI message, hence we ASSERT.
2763 2829 * But in case there is some other M_PROTO that looks
2764 2830 * like a TPI message sent by some other kernel
2765 2831 * component, we check and return an error.
2766 2832 */
2767 2833 cr = msg_getcred(mp, NULL);
2768 2834 ASSERT(cr != NULL);
2769 2835 if (cr == NULL) {
2770 2836 tl_error_ack(wq, mp, TSYSERR, EINVAL, prim->type);
2771 2837 return;
2772 2838 }
2773 2839
2774 2840 /* all states OK for AF_UNIX options ? */
2775 2841 if (!IS_SOCKET(tep) && tep->te_state != TS_IDLE &&
2776 2842 prim->type == T_SVR4_OPTMGMT_REQ) {
2777 2843 /*
2778 2844 * Broken TLI semantics that options can only be managed
2779 2845 * in TS_IDLE state. Needed for Sparc ABI test suite that
2780 2846 * tests this TLI (mis)feature using this device driver.
2781 2847 */
2782 2848 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2783 2849 SL_TRACE | SL_ERROR,
2784 2850 "tl_wput:T_SVR4_OPTMGMT_REQ:out of state, state=%d",
2785 2851 tep->te_state));
2786 2852 /*
2787 2853 * preallocate memory for T_ERROR_ACK
2788 2854 */
2789 2855 ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
2790 2856 if (ackmp == NULL) {
2791 2857 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2792 2858 return;
2793 2859 }
2794 2860
2795 2861 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_SVR4_OPTMGMT_REQ);
2796 2862 freemsg(mp);
2797 2863 return;
2798 2864 }
2799 2865
2800 2866 /*
2801 2867 * call common option management routine from drv/ip
2802 2868 */
2803 2869 if (prim->type == T_SVR4_OPTMGMT_REQ) {
2804 2870 svr4_optcom_req(wq, mp, cr, &tl_opt_obj);
2805 2871 } else {
2806 2872 ASSERT(prim->type == T_OPTMGMT_REQ);
2807 2873 tpi_optcom_req(wq, mp, cr, &tl_opt_obj);
2808 2874 }
2809 2875 }
2810 2876
2811 2877 /*
2812 2878 * Handle T_conn_req - the driver part of accept().
2813 2879 * If TL_SET[U]CRED generate the credentials options.
2814 2880 * If this is a socket pass through options unmodified.
2815 2881 * For sockets generate the T_CONN_CON here instead of
2816 2882 * waiting for the T_CONN_RES.
2817 2883 */
2818 2884 static void
2819 2885 tl_conn_req(queue_t *wq, mblk_t *mp)
2820 2886 {
2821 2887 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
2822 2888 struct T_conn_req *creq = (struct T_conn_req *)mp->b_rptr;
2823 2889 ssize_t msz = MBLKL(mp);
2824 2890 t_scalar_t alen, aoff, olen, ooff, err = 0;
2825 2891 tl_endpt_t *peer_tep = NULL;
2826 2892 mblk_t *ackmp;
2827 2893 mblk_t *dimp;
2828 2894 struct T_discon_ind *di;
2829 2895 soux_addr_t ux_addr;
2830 2896 tl_addr_t dst;
2831 2897
2832 2898 ASSERT(IS_COTS(tep));
2833 2899
2834 2900 if (tep->te_closing) {
2835 2901 freemsg(mp);
2836 2902 return;
2837 2903 }
2838 2904
2839 2905 /*
2840 2906 * preallocate memory for:
2841 2907 * 1. max of T_ERROR_ACK and T_OK_ACK
2842 2908 * ==> known max T_ERROR_ACK
2843 2909 * 2. max of T_DISCON_IND and T_CONN_IND
2844 2910 */
2845 2911 ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
2846 2912 if (ackmp == NULL) {
2847 2913 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2848 2914 return;
2849 2915 }
2850 2916 /*
2851 2917 * memory committed for T_OK_ACK/T_ERROR_ACK now
2852 2918 * will be committed for T_DISCON_IND/T_CONN_IND later
2853 2919 */
2854 2920
2855 2921 if (tep->te_state != TS_IDLE) {
2856 2922 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2857 2923 SL_TRACE | SL_ERROR,
2858 2924 "tl_wput:T_CONN_REQ:out of state, state=%d",
2859 2925 tep->te_state));
2860 2926 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
2861 2927 freemsg(mp);
2862 2928 return;
2863 2929 }
2864 2930
2865 2931 /*
2866 2932 * validate the message
2867 2933 * Note: dereference fields in struct inside message only
2868 2934 * after validating the message length.
2869 2935 */
2870 2936 if (msz < sizeof (struct T_conn_req)) {
2871 2937 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
2872 2938 "tl_conn_req:invalid message length"));
2873 2939 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2874 2940 freemsg(mp);
2875 2941 return;
2876 2942 }
2877 2943 alen = creq->DEST_length;
2878 2944 aoff = creq->DEST_offset;
2879 2945 olen = creq->OPT_length;
2880 2946 ooff = creq->OPT_offset;
2881 2947 if (olen == 0)
2882 2948 ooff = 0;
2883 2949
2884 2950 if (IS_SOCKET(tep)) {
2885 2951 if ((alen != TL_SOUX_ADDRLEN) ||
2886 2952 (aoff < 0) ||
2887 2953 (aoff + alen > msz) ||
2888 2954 (alen > msz - sizeof (struct T_conn_req))) {
2889 2955 (void) (STRLOG(TL_ID, tep->te_minor,
2890 2956 1, SL_TRACE | SL_ERROR,
2891 2957 "tl_conn_req: invalid socket addr"));
2892 2958 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2893 2959 freemsg(mp);
2894 2960 return;
2895 2961 }
2896 2962 bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN);
2897 2963 if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
2898 2964 (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
2899 2965 (void) (STRLOG(TL_ID, tep->te_minor,
2900 2966 1, SL_TRACE | SL_ERROR,
2901 2967 "tl_conn_req: invalid socket magic"));
2902 2968 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2903 2969 freemsg(mp);
2904 2970 return;
2905 2971 }
2906 2972 } else {
2907 2973 if ((alen > 0 && ((aoff + alen) > msz || aoff + alen < 0)) ||
2908 2974 (olen > 0 && ((ssize_t)(ooff + olen) > msz ||
2909 2975 ooff + olen < 0)) ||
2910 2976 olen < 0 || ooff < 0) {
2911 2977 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2912 2978 SL_TRACE | SL_ERROR,
2913 2979 "tl_conn_req:invalid message"));
2914 2980 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2915 2981 freemsg(mp);
2916 2982 return;
2917 2983 }
2918 2984
2919 2985 if (alen <= 0 || aoff < 0 ||
2920 2986 (ssize_t)alen > msz - sizeof (struct T_conn_req)) {
2921 2987 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2922 2988 SL_TRACE | SL_ERROR,
2923 2989 "tl_conn_req:bad addr in message, "
2924 2990 "alen=%d, msz=%ld",
2925 2991 alen, msz));
2926 2992 tl_error_ack(wq, ackmp, TBADADDR, 0, T_CONN_REQ);
2927 2993 freemsg(mp);
2928 2994 return;
2929 2995 }
2930 2996 #ifdef DEBUG
2931 2997 /*
2932 2998 * Mild form of ASSERT()ion to detect broken TPI apps.
2933 2999 * if (!assertion)
2934 3000 * log warning;
2935 3001 */
2936 3002 if (!(aoff >= (t_scalar_t)sizeof (struct T_conn_req))) {
2937 3003 (void) (STRLOG(TL_ID, tep->te_minor, 3,
2938 3004 SL_TRACE | SL_ERROR,
2939 3005 "tl_conn_req: addr overlaps TPI message"));
2940 3006 }
2941 3007 #endif
2942 3008 if (olen) {
2943 3009 /*
2944 3010 * no opts in connect req
2945 3011 * supported in this provider except for sockets.
2946 3012 */
2947 3013 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2948 3014 SL_TRACE | SL_ERROR,
2949 3015 "tl_conn_req:options not supported "
2950 3016 "in message"));
2951 3017 tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ);
2952 3018 freemsg(mp);
2953 3019 return;
2954 3020 }
2955 3021 }
2956 3022
2957 3023 /*
↓ open down ↓ |
217 lines elided |
↑ open up ↑ |
2958 3024 * Prevent tep from closing on us.
2959 3025 */
2960 3026 if (!tl_noclose(tep)) {
2961 3027 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
2962 3028 "tl_conn_req:endpoint is closing"));
2963 3029 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
2964 3030 freemsg(mp);
2965 3031 return;
2966 3032 }
2967 3033
2968 - tep->te_state = NEXTSTATE(TE_CONN_REQ, tep->te_state);
3034 + tep->te_state = nextstate[TE_CONN_REQ][tep->te_state];
2969 3035 /*
2970 3036 * get endpoint to connect to
2971 3037 * check that peer with DEST addr is bound to addr
2972 3038 * and has CONIND_number > 0
2973 3039 */
2974 3040 dst.ta_alen = alen;
2975 3041 dst.ta_abuf = mp->b_rptr + aoff;
2976 3042 dst.ta_zoneid = tep->te_zoneid;
2977 3043
2978 3044 /*
2979 3045 * Verify if remote addr is in use
2980 3046 */
2981 3047 peer_tep = (IS_SOCKET(tep) ?
2982 3048 tl_sock_find_peer(tep, &ux_addr) :
2983 3049 tl_find_peer(tep, &dst));
2984 3050
2985 3051 if (peer_tep == NULL) {
2986 3052 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
2987 3053 "tl_conn_req:no one at connect address"));
2988 3054 err = ECONNREFUSED;
2989 3055 } else if (peer_tep->te_nicon >= peer_tep->te_qlen) {
2990 3056 /*
2991 3057 * validate that number of incoming connection is
2992 3058 * not to capacity on destination endpoint
2993 3059 */
2994 3060 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
2995 3061 "tl_conn_req: qlen overflow connection refused"));
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2996 3062 err = ECONNREFUSED;
2997 3063 }
2998 3064
2999 3065 /*
3000 3066 * Send T_DISCON_IND in case of error
3001 3067 */
3002 3068 if (err != 0) {
3003 3069 if (peer_tep != NULL)
3004 3070 tl_refrele(peer_tep);
3005 3071 /* We are still expected to send T_OK_ACK */
3006 - tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
3072 + tep->te_state = nextstate[TE_OK_ACK1][tep->te_state];
3007 3073 tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ);
3008 3074 tl_closeok(tep);
3009 3075 dimp = tpi_ack_alloc(mp, sizeof (struct T_discon_ind),
3010 3076 M_PROTO, T_DISCON_IND);
3011 3077 if (dimp == NULL) {
3012 3078 tl_merror(wq, NULL, ENOSR);
3013 3079 return;
3014 3080 }
3015 3081 di = (struct T_discon_ind *)dimp->b_rptr;
3016 3082 di->DISCON_reason = err;
3017 3083 di->SEQ_number = BADSEQNUM;
3018 3084
3019 3085 tep->te_state = TS_IDLE;
3020 3086 /*
3021 3087 * send T_DISCON_IND message
3022 3088 */
3023 3089 putnext(tep->te_rq, dimp);
3024 3090 return;
3025 3091 }
3026 3092
3027 3093 ASSERT(IS_COTS(peer_tep));
3028 3094
3029 3095 /*
3030 3096 * Found the listener. At this point processing will continue on
3031 3097 * listener serializer. Close of the endpoint should be blocked while we
3032 3098 * switch serializers.
3033 3099 */
3034 3100 tl_serializer_refhold(peer_tep->te_ser);
3035 3101 tl_serializer_refrele(tep->te_ser);
3036 3102 tep->te_ser = peer_tep->te_ser;
3037 3103 ASSERT(tep->te_oconp == NULL);
3038 3104 tep->te_oconp = peer_tep;
3039 3105
3040 3106 /*
3041 3107 * It is safe to close now. Close may continue on listener serializer.
3042 3108 */
3043 3109 tl_closeok(tep);
3044 3110
3045 3111 /*
3046 3112 * Pass ackmp to tl_conn_req_ser. Note that mp->b_cont may contain user
3047 3113 * data, so we link mp to ackmp.
3048 3114 */
3049 3115 ackmp->b_cont = mp;
3050 3116 mp = ackmp;
3051 3117
3052 3118 tl_refhold(tep);
3053 3119 tl_serializer_enter(tep, tl_conn_req_ser, mp);
3054 3120 }
3055 3121
3056 3122 /*
3057 3123 * Finish T_CONN_REQ processing on listener serializer.
3058 3124 */
3059 3125 static void
3060 3126 tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep)
3061 3127 {
3062 3128 queue_t *wq;
3063 3129 tl_endpt_t *peer_tep = tep->te_oconp;
3064 3130 mblk_t *confmp, *cimp, *indmp;
3065 3131 void *opts = NULL;
3066 3132 mblk_t *ackmp = mp;
3067 3133 struct T_conn_req *creq = (struct T_conn_req *)mp->b_cont->b_rptr;
3068 3134 struct T_conn_ind *ci;
3069 3135 tl_icon_t *tip;
3070 3136 void *addr_startp;
3071 3137 t_scalar_t olen = creq->OPT_length;
3072 3138 t_scalar_t ooff = creq->OPT_offset;
3073 3139 size_t ci_msz;
3074 3140 size_t size;
3075 3141 cred_t *cr = NULL;
3076 3142 pid_t cpid;
3077 3143
3078 3144 if (tep->te_closing) {
3079 3145 TL_UNCONNECT(tep->te_oconp);
3080 3146 tl_serializer_exit(tep);
3081 3147 tl_refrele(tep);
3082 3148 freemsg(mp);
3083 3149 return;
3084 3150 }
3085 3151
3086 3152 wq = tep->te_wq;
3087 3153 tep->te_flag |= TL_EAGER;
3088 3154
3089 3155 /*
3090 3156 * Extract preallocated ackmp from mp.
3091 3157 */
3092 3158 mp = mp->b_cont;
3093 3159 ackmp->b_cont = NULL;
3094 3160
3095 3161 if (olen == 0)
3096 3162 ooff = 0;
3097 3163
3098 3164 if (peer_tep->te_closing ||
3099 3165 !((peer_tep->te_state == TS_IDLE) ||
3100 3166 (peer_tep->te_state == TS_WRES_CIND))) {
3101 3167 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3102 3168 "tl_conn_req:peer in bad state (%d)",
3103 3169 peer_tep->te_state));
3104 3170 TL_UNCONNECT(tep->te_oconp);
3105 3171 tl_error_ack(wq, mp, TSYSERR, ECONNREFUSED, T_CONN_REQ);
3106 3172 freemsg(ackmp);
3107 3173 tl_serializer_exit(tep);
3108 3174 tl_refrele(tep);
3109 3175 return;
3110 3176 }
3111 3177
3112 3178 /*
3113 3179 * preallocate now for T_DISCON_IND or T_CONN_IND
3114 3180 */
3115 3181 /*
3116 3182 * calculate length of T_CONN_IND message
3117 3183 */
3118 3184 if (peer_tep->te_flag & (TL_SETCRED | TL_SETUCRED)) {
3119 3185 cr = msg_getcred(mp, &cpid);
3120 3186 ASSERT(cr != NULL);
3121 3187 if (peer_tep->te_flag & TL_SETCRED) {
3122 3188 ooff = 0;
3123 3189 olen = (t_scalar_t) sizeof (struct opthdr) +
3124 3190 OPTLEN(sizeof (tl_credopt_t));
3125 3191 /* 1 option only */
3126 3192 } else {
3127 3193 ooff = 0;
3128 3194 olen = (t_scalar_t)sizeof (struct opthdr) +
3129 3195 OPTLEN(ucredminsize(cr));
3130 3196 /* 1 option only */
3131 3197 }
3132 3198 }
3133 3199 ci_msz = sizeof (struct T_conn_ind) + tep->te_alen;
3134 3200 ci_msz = T_ALIGN(ci_msz) + olen;
3135 3201 size = max(ci_msz, sizeof (struct T_discon_ind));
3136 3202
3137 3203 /*
3138 3204 * Save options from mp - we'll need them for T_CONN_IND.
3139 3205 */
3140 3206 if (ooff != 0) {
3141 3207 opts = kmem_alloc(olen, KM_NOSLEEP);
3142 3208 if (opts == NULL) {
3143 3209 /*
3144 3210 * roll back state changes
3145 3211 */
3146 3212 tep->te_state = TS_IDLE;
3147 3213 tl_memrecover(wq, mp, size);
3148 3214 freemsg(ackmp);
3149 3215 TL_UNCONNECT(tep->te_oconp);
3150 3216 tl_serializer_exit(tep);
3151 3217 tl_refrele(tep);
3152 3218 return;
3153 3219 }
3154 3220 /* Copy options to a temp buffer */
3155 3221 bcopy(mp->b_rptr + ooff, opts, olen);
3156 3222 }
3157 3223
3158 3224 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
3159 3225 /*
3160 3226 * Generate a T_CONN_CON that has the identical address
3161 3227 * (and options) as the T_CONN_REQ.
3162 3228 * NOTE: assumes that the T_conn_req and T_conn_con structures
3163 3229 * are isomorphic.
3164 3230 */
3165 3231 confmp = copyb(mp);
3166 3232 if (confmp == NULL) {
3167 3233 /*
3168 3234 * roll back state changes
3169 3235 */
3170 3236 tep->te_state = TS_IDLE;
3171 3237 tl_memrecover(wq, mp, mp->b_wptr - mp->b_rptr);
3172 3238 freemsg(ackmp);
3173 3239 if (opts != NULL)
3174 3240 kmem_free(opts, olen);
3175 3241 TL_UNCONNECT(tep->te_oconp);
3176 3242 tl_serializer_exit(tep);
3177 3243 tl_refrele(tep);
3178 3244 return;
3179 3245 }
3180 3246 ((struct T_conn_con *)(confmp->b_rptr))->PRIM_type =
3181 3247 T_CONN_CON;
3182 3248 } else {
3183 3249 confmp = NULL;
3184 3250 }
3185 3251 if ((indmp = reallocb(mp, size, 0)) == NULL) {
3186 3252 /*
3187 3253 * roll back state changes
3188 3254 */
3189 3255 tep->te_state = TS_IDLE;
3190 3256 tl_memrecover(wq, mp, size);
3191 3257 freemsg(ackmp);
3192 3258 if (opts != NULL)
3193 3259 kmem_free(opts, olen);
3194 3260 freemsg(confmp);
3195 3261 TL_UNCONNECT(tep->te_oconp);
3196 3262 tl_serializer_exit(tep);
3197 3263 tl_refrele(tep);
3198 3264 return;
3199 3265 }
3200 3266
3201 3267 tip = kmem_zalloc(sizeof (*tip), KM_NOSLEEP);
3202 3268 if (tip == NULL) {
3203 3269 /*
3204 3270 * roll back state changes
3205 3271 */
3206 3272 tep->te_state = TS_IDLE;
3207 3273 tl_memrecover(wq, indmp, sizeof (*tip));
3208 3274 freemsg(ackmp);
3209 3275 if (opts != NULL)
3210 3276 kmem_free(opts, olen);
3211 3277 freemsg(confmp);
3212 3278 TL_UNCONNECT(tep->te_oconp);
3213 3279 tl_serializer_exit(tep);
3214 3280 tl_refrele(tep);
3215 3281 return;
3216 3282 }
↓ open down ↓ |
200 lines elided |
↑ open up ↑ |
3217 3283 tip->ti_mp = NULL;
3218 3284
3219 3285 /*
3220 3286 * memory is now committed for T_DISCON_IND/T_CONN_IND/T_CONN_CON
3221 3287 * and tl_icon_t cell.
3222 3288 */
3223 3289
3224 3290 /*
3225 3291 * ack validity of request and send the peer credential in the ACK.
3226 3292 */
3227 - tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
3293 + tep->te_state = nextstate[TE_OK_ACK1][tep->te_state];
3228 3294
3229 3295 if (peer_tep != NULL && peer_tep->te_credp != NULL &&
3230 3296 confmp != NULL) {
3231 3297 mblk_setcred(confmp, peer_tep->te_credp, peer_tep->te_cpid);
3232 3298 }
3233 3299
3234 3300 tl_ok_ack(wq, ackmp, T_CONN_REQ);
3235 3301
3236 3302 /*
3237 3303 * prepare message to send T_CONN_IND
3238 3304 */
3239 3305 /*
3240 3306 * allocate the message - original data blocks retained
3241 3307 * in the returned mblk
3242 3308 */
3243 3309 cimp = tl_resizemp(indmp, size);
3244 3310 if (cimp == NULL) {
3245 3311 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
3246 3312 "tl_conn_req:con_ind:allocb failure"));
3247 3313 tl_merror(wq, indmp, ENOMEM);
3248 3314 TL_UNCONNECT(tep->te_oconp);
3249 3315 tl_serializer_exit(tep);
3250 3316 tl_refrele(tep);
3251 3317 if (opts != NULL)
3252 3318 kmem_free(opts, olen);
3253 3319 freemsg(confmp);
3254 3320 ASSERT(tip->ti_mp == NULL);
3255 3321 kmem_free(tip, sizeof (*tip));
3256 3322 return;
3257 3323 }
3258 3324
3259 3325 DB_TYPE(cimp) = M_PROTO;
3260 3326 ci = (struct T_conn_ind *)cimp->b_rptr;
3261 3327 ci->PRIM_type = T_CONN_IND;
3262 3328 ci->SRC_offset = (t_scalar_t)sizeof (struct T_conn_ind);
3263 3329 ci->SRC_length = tep->te_alen;
3264 3330 ci->SEQ_number = tep->te_seqno;
3265 3331
3266 3332 addr_startp = cimp->b_rptr + ci->SRC_offset;
3267 3333 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
3268 3334 if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
3269 3335
3270 3336 ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset +
3271 3337 ci->SRC_length);
3272 3338 ci->OPT_length = olen; /* because only 1 option */
3273 3339 tl_fill_option(cimp->b_rptr + ci->OPT_offset,
3274 3340 cr, cpid,
3275 3341 peer_tep->te_flag, peer_tep->te_credp);
3276 3342 } else if (ooff != 0) {
3277 3343 /* Copy option from T_CONN_REQ */
3278 3344 ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset +
3279 3345 ci->SRC_length);
3280 3346 ci->OPT_length = olen;
3281 3347 ASSERT(opts != NULL);
3282 3348 bcopy(opts, (void *)((uintptr_t)ci + ci->OPT_offset), olen);
3283 3349 } else {
3284 3350 ci->OPT_offset = 0;
3285 3351 ci->OPT_length = 0;
3286 3352 }
3287 3353 if (opts != NULL)
3288 3354 kmem_free(opts, olen);
3289 3355
3290 3356 /*
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
3291 3357 * register connection request with server peer
3292 3358 * append to list of incoming connections
3293 3359 * increment references for both peer_tep and tep: peer_tep is placed on
3294 3360 * te_oconp and tep is placed on listeners queue.
3295 3361 */
3296 3362 tip->ti_tep = tep;
3297 3363 tip->ti_seqno = tep->te_seqno;
3298 3364 list_insert_tail(&peer_tep->te_iconp, tip);
3299 3365 peer_tep->te_nicon++;
3300 3366
3301 - peer_tep->te_state = NEXTSTATE(TE_CONN_IND, peer_tep->te_state);
3367 + peer_tep->te_state = nextstate[TE_CONN_IND][peer_tep->te_state];
3302 3368 /*
3303 3369 * send the T_CONN_IND message
3304 3370 */
3305 3371 putnext(peer_tep->te_rq, cimp);
3306 3372
3307 3373 /*
3308 3374 * Send a T_CONN_CON message for sockets.
3309 3375 * Disable the queues until we have reached the correct state!
3310 3376 */
3311 3377 if (confmp != NULL) {
3312 - tep->te_state = NEXTSTATE(TE_CONN_CON, tep->te_state);
3378 + tep->te_state = nextstate[TE_CONN_CON][tep->te_state];
3313 3379 noenable(wq);
3314 3380 putnext(tep->te_rq, confmp);
3315 3381 }
3316 3382 /*
3317 3383 * Now we need to increment tep reference because tep is referenced by
3318 3384 * server list of pending connections. We also need to decrement
3319 3385 * reference before exiting serializer. Two operations void each other
3320 3386 * so we don't modify reference at all.
3321 3387 */
3322 3388 ASSERT(tep->te_refcnt >= 2);
3323 3389 ASSERT(peer_tep->te_refcnt >= 2);
3324 3390 tl_serializer_exit(tep);
3325 3391 }
3326 3392
3327 3393
3328 3394
3329 3395 /*
3330 3396 * Handle T_conn_res on listener stream. Called on listener serializer.
3331 3397 * tl_conn_req has already generated the T_CONN_CON.
3332 3398 * tl_conn_res is called on listener serializer.
3333 3399 * No one accesses acceptor at this point, so it is safe to modify acceptor.
3334 3400 * Switch eager serializer to acceptor's.
3335 3401 *
3336 3402 * If TL_SET[U]CRED generate the credentials options.
3337 3403 * For sockets tl_conn_req has already generated the T_CONN_CON.
3338 3404 */
3339 3405 static void
3340 3406 tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
3341 3407 {
3342 3408 queue_t *wq;
3343 3409 struct T_conn_res *cres = (struct T_conn_res *)mp->b_rptr;
3344 3410 ssize_t msz = MBLKL(mp);
3345 3411 t_scalar_t olen, ooff, err = 0;
3346 3412 t_scalar_t prim = cres->PRIM_type;
3347 3413 uchar_t *addr_startp;
3348 3414 tl_endpt_t *acc_ep = NULL, *cl_ep = NULL;
3349 3415 tl_icon_t *tip;
3350 3416 size_t size;
3351 3417 mblk_t *ackmp, *respmp;
3352 3418 mblk_t *dimp, *ccmp = NULL;
3353 3419 struct T_discon_ind *di;
3354 3420 struct T_conn_con *cc;
3355 3421 boolean_t client_noclose_set = B_FALSE;
3356 3422 boolean_t switch_client_serializer = B_TRUE;
3357 3423
3358 3424 ASSERT(IS_COTS(tep));
3359 3425
3360 3426 if (tep->te_closing) {
3361 3427 freemsg(mp);
3362 3428 return;
3363 3429 }
3364 3430
3365 3431 wq = tep->te_wq;
3366 3432
3367 3433 /*
3368 3434 * preallocate memory for:
3369 3435 * 1. max of T_ERROR_ACK and T_OK_ACK
3370 3436 * ==> known max T_ERROR_ACK
3371 3437 * 2. max of T_DISCON_IND and T_CONN_CON
3372 3438 */
3373 3439 ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
3374 3440 if (ackmp == NULL) {
3375 3441 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
3376 3442 return;
3377 3443 }
3378 3444 /*
3379 3445 * memory committed for T_OK_ACK/T_ERROR_ACK now
3380 3446 * will be committed for T_DISCON_IND/T_CONN_CON later
3381 3447 */
3382 3448
3383 3449
3384 3450 ASSERT(prim == T_CONN_RES || prim == O_T_CONN_RES);
3385 3451
3386 3452 /*
3387 3453 * validate state
3388 3454 */
3389 3455 if (tep->te_state != TS_WRES_CIND) {
3390 3456 (void) (STRLOG(TL_ID, tep->te_minor, 1,
3391 3457 SL_TRACE | SL_ERROR,
3392 3458 "tl_wput:T_CONN_RES:out of state, state=%d",
3393 3459 tep->te_state));
3394 3460 tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
3395 3461 freemsg(mp);
3396 3462 return;
3397 3463 }
3398 3464
3399 3465 /*
3400 3466 * validate the message
3401 3467 * Note: dereference fields in struct inside message only
3402 3468 * after validating the message length.
3403 3469 */
3404 3470 if (msz < sizeof (struct T_conn_res)) {
3405 3471 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3406 3472 "tl_conn_res:invalid message length"));
3407 3473 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
3408 3474 freemsg(mp);
3409 3475 return;
3410 3476 }
3411 3477 olen = cres->OPT_length;
3412 3478 ooff = cres->OPT_offset;
3413 3479 if (((olen > 0) && ((ooff + olen) > msz))) {
3414 3480 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3415 3481 "tl_conn_res:invalid message"));
3416 3482 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
3417 3483 freemsg(mp);
3418 3484 return;
3419 3485 }
3420 3486 if (olen) {
3421 3487 /*
↓ open down ↓ |
99 lines elided |
↑ open up ↑ |
3422 3488 * no opts in connect res
3423 3489 * supported in this provider
3424 3490 */
3425 3491 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3426 3492 "tl_conn_res:options not supported in message"));
3427 3493 tl_error_ack(wq, ackmp, TBADOPT, 0, prim);
3428 3494 freemsg(mp);
3429 3495 return;
3430 3496 }
3431 3497
3432 - tep->te_state = NEXTSTATE(TE_CONN_RES, tep->te_state);
3498 + tep->te_state = nextstate[TE_CONN_RES][tep->te_state];
3433 3499 ASSERT(tep->te_state == TS_WACK_CRES);
3434 3500
3435 3501 if (cres->SEQ_number < TL_MINOR_START &&
3436 3502 cres->SEQ_number >= BADSEQNUM) {
3437 3503 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3438 3504 "tl_conn_res:remote endpoint sequence number bad"));
3439 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3505 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3440 3506 tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3441 3507 freemsg(mp);
3442 3508 return;
3443 3509 }
3444 3510
3445 3511 /*
3446 3512 * find accepting endpoint. Will have extra reference if found.
3447 3513 */
3448 3514 if (mod_hash_find_cb(tep->te_transport->tr_ai_hash,
3449 3515 (mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id,
3450 3516 (mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) {
3451 3517 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3452 3518 "tl_conn_res:bad accepting endpoint"));
3453 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3519 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3454 3520 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3455 3521 freemsg(mp);
3456 3522 return;
3457 3523 }
3458 3524
3459 3525 /*
3460 3526 * Prevent acceptor from closing.
3461 3527 */
3462 3528 if (!tl_noclose(acc_ep)) {
3463 3529 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3464 3530 "tl_conn_res:bad accepting endpoint"));
3465 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3531 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3466 3532 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3467 3533 tl_refrele(acc_ep);
3468 3534 freemsg(mp);
3469 3535 return;
3470 3536 }
3471 3537
3472 3538 acc_ep->te_flag |= TL_ACCEPTOR;
3473 3539
3474 3540 /*
3475 3541 * validate that accepting endpoint, if different from listening
3476 3542 * has address bound => state is TS_IDLE
3477 3543 * TROUBLE in XPG4 !!?
3478 3544 */
3479 3545 if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) {
3480 3546 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3481 3547 "tl_conn_res:accepting endpoint has no address bound,"
3482 3548 "state=%d", acc_ep->te_state));
3483 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3549 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3484 3550 tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
3485 3551 freemsg(mp);
3486 3552 tl_closeok(acc_ep);
3487 3553 tl_refrele(acc_ep);
3488 3554 return;
3489 3555 }
3490 3556
3491 3557 /*
3492 3558 * validate if accepting endpt same as listening, then
3493 3559 * no other incoming connection should be on the queue
3494 3560 */
3495 3561
3496 3562 if ((tep == acc_ep) && (tep->te_nicon > 1)) {
3497 3563 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
3498 3564 "tl_conn_res: > 1 conn_ind on listener-acceptor"));
3499 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3565 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3500 3566 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3501 3567 freemsg(mp);
3502 3568 tl_closeok(acc_ep);
3503 3569 tl_refrele(acc_ep);
3504 3570 return;
3505 3571 }
3506 3572
3507 3573 /*
3508 3574 * Mark for deletion, the entry corresponding to client
3509 3575 * on list of pending connections made by the listener
3510 3576 * search list to see if client is one of the
3511 3577 * recorded as a listener.
3512 3578 */
3513 3579 tip = tl_icon_find(tep, cres->SEQ_number);
3514 3580 if (tip == NULL) {
3515 3581 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3516 3582 "tl_conn_res:no client in listener list"));
3517 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3583 + tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3518 3584 tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3519 3585 freemsg(mp);
3520 3586 tl_closeok(acc_ep);
3521 3587 tl_refrele(acc_ep);
3522 3588 return;
3523 3589 }
3524 3590
3525 3591 /*
3526 3592 * If ti_tep is NULL the client has already closed. In this case
3527 3593 * the code below will avoid any action on the client side
3528 3594 * but complete the server and acceptor state transitions.
3529 3595 */
3530 3596 ASSERT(tip->ti_tep == NULL ||
3531 3597 tip->ti_tep->te_seqno == cres->SEQ_number);
3532 3598 cl_ep = tip->ti_tep;
3533 3599
3534 3600 /*
3535 3601 * If the client is present it is switched from listener's to acceptor's
3536 3602 * serializer. We should block client closes while serializers are
3537 3603 * being switched.
3538 3604 *
3539 3605 * It is possible that the client is present but is currently being
3540 3606 * closed. There are two possible cases:
3541 3607 *
3542 3608 * 1) The client has already entered tl_close_finish_ser() and sent
3543 3609 * T_ORDREL_IND. In this case we can just ignore the client (but we
3544 3610 * still need to send all messages from tip->ti_mp to the acceptor).
3545 3611 *
3546 3612 * 2) The client started the close but has not entered
3547 3613 * tl_close_finish_ser() yet. In this case, the client is already
3548 3614 * proceeding asynchronously on the listener's serializer, so we're
3549 3615 * forced to change the acceptor to use the listener's serializer to
3550 3616 * ensure that any operations on the acceptor are serialized with
3551 3617 * respect to the close that's in-progress.
3552 3618 */
3553 3619 if (cl_ep != NULL) {
3554 3620 if (tl_noclose(cl_ep)) {
3555 3621 client_noclose_set = B_TRUE;
3556 3622 } else {
3557 3623 /*
3558 3624 * Client is closing. If it it has sent the
3559 3625 * T_ORDREL_IND, we can simply ignore it - otherwise,
3560 3626 * we have to let let the client continue until it is
3561 3627 * sent.
3562 3628 *
3563 3629 * If we do continue using the client, acceptor will
3564 3630 * switch to client's serializer which is used by client
3565 3631 * for its close.
3566 3632 */
3567 3633 tl_client_closing_when_accepting++;
3568 3634 switch_client_serializer = B_FALSE;
3569 3635 if (!IS_SOCKET(cl_ep) || tl_disable_early_connect ||
3570 3636 cl_ep->te_state == -1)
3571 3637 cl_ep = NULL;
3572 3638 }
3573 3639 }
3574 3640
3575 3641 if (cl_ep != NULL) {
3576 3642 /*
3577 3643 * validate client state to be TS_WCON_CREQ or TS_DATA_XFER
3578 3644 * (latter for sockets only)
3579 3645 */
3580 3646 if (cl_ep->te_state != TS_WCON_CREQ &&
3581 3647 (cl_ep->te_state != TS_DATA_XFER &&
3582 3648 IS_SOCKET(cl_ep))) {
3583 3649 err = ECONNREFUSED;
3584 3650 /*
3585 3651 * T_DISCON_IND sent later after committing memory
3586 3652 * and acking validity of request
3587 3653 */
3588 3654 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
3589 3655 "tl_conn_res:peer in bad state"));
3590 3656 }
3591 3657
3592 3658 /*
3593 3659 * preallocate now for T_DISCON_IND or T_CONN_CONN
3594 3660 * ack validity of request (T_OK_ACK) after memory committed
3595 3661 */
3596 3662
3597 3663 if (err) {
3598 3664 size = sizeof (struct T_discon_ind);
3599 3665 } else {
3600 3666 /*
3601 3667 * calculate length of T_CONN_CON message
3602 3668 */
3603 3669 olen = 0;
3604 3670 if (cl_ep->te_flag & TL_SETCRED) {
3605 3671 olen = (t_scalar_t)sizeof (struct opthdr) +
3606 3672 OPTLEN(sizeof (tl_credopt_t));
3607 3673 } else if (cl_ep->te_flag & TL_SETUCRED) {
3608 3674 olen = (t_scalar_t)sizeof (struct opthdr) +
3609 3675 OPTLEN(ucredminsize(acc_ep->te_credp));
3610 3676 }
3611 3677 size = T_ALIGN(sizeof (struct T_conn_con) +
3612 3678 acc_ep->te_alen) + olen;
3613 3679 }
3614 3680 if ((respmp = reallocb(mp, size, 0)) == NULL) {
3615 3681 /*
3616 3682 * roll back state changes
3617 3683 */
3618 3684 tep->te_state = TS_WRES_CIND;
3619 3685 tl_memrecover(wq, mp, size);
3620 3686 freemsg(ackmp);
3621 3687 if (client_noclose_set)
3622 3688 tl_closeok(cl_ep);
3623 3689 tl_closeok(acc_ep);
3624 3690 tl_refrele(acc_ep);
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
3625 3691 return;
3626 3692 }
3627 3693 mp = NULL;
3628 3694 }
3629 3695
3630 3696 /*
3631 3697 * Now ack validity of request
3632 3698 */
3633 3699 if (tep->te_nicon == 1) {
3634 3700 if (tep == acc_ep)
3635 - tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state);
3701 + tep->te_state = nextstate[TE_OK_ACK2][tep->te_state];
3636 3702 else
3637 - tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state);
3703 + tep->te_state = nextstate[TE_OK_ACK3][tep->te_state];
3638 3704 } else {
3639 - tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state);
3705 + tep->te_state = nextstate[TE_OK_ACK4][tep->te_state];
3640 3706 }
3641 3707
3642 3708 /*
3643 3709 * send T_DISCON_IND now if client state validation failed earlier
3644 3710 */
3645 3711 if (err) {
3646 3712 tl_ok_ack(wq, ackmp, prim);
3647 3713 /*
3648 3714 * flush the queues - why always ?
3649 3715 */
3650 3716 (void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR);
3651 3717
3652 3718 dimp = tl_resizemp(respmp, size);
3653 3719 if (dimp == NULL) {
3654 3720 (void) (STRLOG(TL_ID, tep->te_minor, 3,
3655 3721 SL_TRACE | SL_ERROR,
3656 3722 "tl_conn_res:con_ind:allocb failure"));
3657 3723 tl_merror(wq, respmp, ENOMEM);
3658 3724 tl_closeok(acc_ep);
3659 3725 if (client_noclose_set)
3660 3726 tl_closeok(cl_ep);
3661 3727 tl_refrele(acc_ep);
3662 3728 return;
3663 3729 }
3664 3730 if (dimp->b_cont) {
3665 3731 /* no user data in provider generated discon ind */
3666 3732 freemsg(dimp->b_cont);
3667 3733 dimp->b_cont = NULL;
3668 3734 }
3669 3735
3670 3736 DB_TYPE(dimp) = M_PROTO;
3671 3737 di = (struct T_discon_ind *)dimp->b_rptr;
3672 3738 di->PRIM_type = T_DISCON_IND;
3673 3739 di->DISCON_reason = err;
3674 3740 di->SEQ_number = BADSEQNUM;
3675 3741
3676 3742 tep->te_state = TS_IDLE;
3677 3743 /*
3678 3744 * send T_DISCON_IND message
3679 3745 */
3680 3746 putnext(acc_ep->te_rq, dimp);
3681 3747 if (client_noclose_set)
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
3682 3748 tl_closeok(cl_ep);
3683 3749 tl_closeok(acc_ep);
3684 3750 tl_refrele(acc_ep);
3685 3751 return;
3686 3752 }
3687 3753
3688 3754 /*
3689 3755 * now start connecting the accepting endpoint
3690 3756 */
3691 3757 if (tep != acc_ep)
3692 - acc_ep->te_state = NEXTSTATE(TE_PASS_CONN, acc_ep->te_state);
3758 + acc_ep->te_state = nextstate[TE_PASS_CONN][acc_ep->te_state];
3693 3759
3694 3760 if (cl_ep == NULL) {
3695 3761 /*
3696 3762 * The client has already closed. Send up any queued messages
3697 3763 * and change the state accordingly.
3698 3764 */
3699 3765 tl_ok_ack(wq, ackmp, prim);
3700 3766 tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
3701 3767
3702 3768 /*
3703 3769 * remove endpoint from incoming connection
3704 3770 * delete client from list of incoming connections
3705 3771 */
3706 3772 tl_freetip(tep, tip);
3707 3773 freemsg(mp);
3708 3774 tl_closeok(acc_ep);
3709 3775 tl_refrele(acc_ep);
3710 3776 return;
3711 3777 } else if (tip->ti_mp != NULL) {
3712 3778 /*
3713 3779 * The client could have queued a T_DISCON_IND which needs
3714 3780 * to be sent up.
3715 3781 * Note that t_discon_req can not operate the same as
3716 3782 * t_data_req since it is not possible for it to putbq
3717 3783 * the message and return -1 due to the use of qwriter.
3718 3784 */
3719 3785 tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
3720 3786 }
3721 3787
3722 3788 /*
3723 3789 * prepare connect confirm T_CONN_CON message
3724 3790 */
3725 3791
3726 3792 /*
3727 3793 * allocate the message - original data blocks
3728 3794 * retained in the returned mblk
3729 3795 */
3730 3796 if (!IS_SOCKET(cl_ep) || tl_disable_early_connect) {
3731 3797 ccmp = tl_resizemp(respmp, size);
3732 3798 if (ccmp == NULL) {
3733 3799 tl_ok_ack(wq, ackmp, prim);
3734 3800 (void) (STRLOG(TL_ID, tep->te_minor, 3,
3735 3801 SL_TRACE | SL_ERROR,
3736 3802 "tl_conn_res:conn_con:allocb failure"));
3737 3803 tl_merror(wq, respmp, ENOMEM);
3738 3804 tl_closeok(acc_ep);
3739 3805 if (client_noclose_set)
3740 3806 tl_closeok(cl_ep);
3741 3807 tl_refrele(acc_ep);
3742 3808 return;
3743 3809 }
3744 3810
3745 3811 DB_TYPE(ccmp) = M_PROTO;
3746 3812 cc = (struct T_conn_con *)ccmp->b_rptr;
3747 3813 cc->PRIM_type = T_CONN_CON;
3748 3814 cc->RES_offset = (t_scalar_t)sizeof (struct T_conn_con);
3749 3815 cc->RES_length = acc_ep->te_alen;
3750 3816 addr_startp = ccmp->b_rptr + cc->RES_offset;
3751 3817 bcopy(acc_ep->te_abuf, addr_startp, acc_ep->te_alen);
3752 3818 if (cl_ep->te_flag & (TL_SETCRED | TL_SETUCRED)) {
3753 3819 cc->OPT_offset = (t_scalar_t)T_ALIGN(cc->RES_offset +
3754 3820 cc->RES_length);
3755 3821 cc->OPT_length = olen;
3756 3822 tl_fill_option(ccmp->b_rptr + cc->OPT_offset,
3757 3823 acc_ep->te_credp, acc_ep->te_cpid, cl_ep->te_flag,
3758 3824 cl_ep->te_credp);
3759 3825 } else {
3760 3826 cc->OPT_offset = 0;
3761 3827 cc->OPT_length = 0;
3762 3828 }
3763 3829 /*
3764 3830 * Forward the credential in the packet so it can be picked up
3765 3831 * at the higher layers for more complete credential processing
3766 3832 */
3767 3833 mblk_setcred(ccmp, acc_ep->te_credp, acc_ep->te_cpid);
3768 3834 } else {
3769 3835 freemsg(respmp);
3770 3836 respmp = NULL;
3771 3837 }
3772 3838
3773 3839 /*
3774 3840 * make connection linking
3775 3841 * accepting and client endpoints
3776 3842 * No need to increment references:
3777 3843 * on client: it should already have one from tip->ti_tep linkage.
3778 3844 * on acceptor is should already have one from the table lookup.
3779 3845 *
3780 3846 * At this point both client and acceptor can't close. Set client
3781 3847 * serializer to acceptor's.
3782 3848 */
3783 3849 ASSERT(cl_ep->te_refcnt >= 2);
3784 3850 ASSERT(acc_ep->te_refcnt >= 2);
3785 3851 ASSERT(cl_ep->te_conp == NULL);
3786 3852 ASSERT(acc_ep->te_conp == NULL);
3787 3853 cl_ep->te_conp = acc_ep;
3788 3854 acc_ep->te_conp = cl_ep;
3789 3855 ASSERT(cl_ep->te_ser == tep->te_ser);
3790 3856 if (switch_client_serializer) {
3791 3857 mutex_enter(&cl_ep->te_ser_lock);
3792 3858 if (cl_ep->te_ser_count > 0) {
3793 3859 switch_client_serializer = B_FALSE;
3794 3860 tl_serializer_noswitch++;
3795 3861 } else {
3796 3862 /*
3797 3863 * Move client to the acceptor's serializer.
3798 3864 */
3799 3865 tl_serializer_refhold(acc_ep->te_ser);
3800 3866 tl_serializer_refrele(cl_ep->te_ser);
3801 3867 cl_ep->te_ser = acc_ep->te_ser;
3802 3868 }
3803 3869 mutex_exit(&cl_ep->te_ser_lock);
3804 3870 }
3805 3871 if (!switch_client_serializer) {
3806 3872 /*
3807 3873 * It is not possible to switch client to use acceptor's.
3808 3874 * Move acceptor to client's serializer (which is the same as
3809 3875 * listener's).
3810 3876 */
3811 3877 tl_serializer_refhold(cl_ep->te_ser);
3812 3878 tl_serializer_refrele(acc_ep->te_ser);
3813 3879 acc_ep->te_ser = cl_ep->te_ser;
3814 3880 }
3815 3881
3816 3882 TL_REMOVE_PEER(cl_ep->te_oconp);
3817 3883 TL_REMOVE_PEER(acc_ep->te_oconp);
3818 3884
3819 3885 /*
3820 3886 * remove endpoint from incoming connection
3821 3887 * delete client from list of incoming connections
3822 3888 */
3823 3889 tip->ti_tep = NULL;
3824 3890 tl_freetip(tep, tip);
3825 3891 tl_ok_ack(wq, ackmp, prim);
3826 3892
3827 3893 /*
3828 3894 * data blocks already linked in reallocb()
3829 3895 */
3830 3896
3831 3897 /*
3832 3898 * link queues so that I_SENDFD will work
3833 3899 */
3834 3900 if (!IS_SOCKET(tep)) {
3835 3901 acc_ep->te_wq->q_next = cl_ep->te_rq;
3836 3902 cl_ep->te_wq->q_next = acc_ep->te_rq;
3837 3903 }
3838 3904
3839 3905 /*
3840 3906 * send T_CONN_CON up on client side unless it was already
3841 3907 * done (for a socket). In cases any data or ordrel req has been
3842 3908 * queued make sure that the service procedure runs.
↓ open down ↓ |
140 lines elided |
↑ open up ↑ |
3843 3909 */
3844 3910 if (IS_SOCKET(cl_ep) && !tl_disable_early_connect) {
3845 3911 enableok(cl_ep->te_wq);
3846 3912 TL_QENABLE(cl_ep);
3847 3913 if (ccmp != NULL)
3848 3914 freemsg(ccmp);
3849 3915 } else {
3850 3916 /*
3851 3917 * change client state on TE_CONN_CON event
3852 3918 */
3853 - cl_ep->te_state = NEXTSTATE(TE_CONN_CON, cl_ep->te_state);
3919 + cl_ep->te_state = nextstate[TE_CONN_CON][cl_ep->te_state];
3854 3920 putnext(cl_ep->te_rq, ccmp);
3855 3921 }
3856 3922
3857 3923 /* Mark the both endpoints as accepted */
3858 3924 cl_ep->te_flag |= TL_ACCEPTED;
3859 3925 acc_ep->te_flag |= TL_ACCEPTED;
3860 3926
3861 3927 /*
3862 3928 * Allow client and acceptor to close.
3863 3929 */
3864 3930 tl_closeok(acc_ep);
3865 3931 if (client_noclose_set)
3866 3932 tl_closeok(cl_ep);
3867 3933 }
3868 3934
3869 3935
3870 3936
3871 3937
3872 3938 static void
3873 3939 tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
3874 3940 {
3875 3941 queue_t *wq;
3876 3942 struct T_discon_req *dr;
3877 3943 ssize_t msz;
3878 3944 tl_endpt_t *peer_tep = tep->te_conp;
3879 3945 tl_endpt_t *srv_tep = tep->te_oconp;
3880 3946 tl_icon_t *tip;
3881 3947 size_t size;
3882 3948 mblk_t *ackmp, *dimp, *respmp;
3883 3949 struct T_discon_ind *di;
3884 3950 t_scalar_t save_state, new_state;
3885 3951
3886 3952 if (tep->te_closing) {
3887 3953 freemsg(mp);
3888 3954 return;
3889 3955 }
3890 3956
3891 3957 if ((peer_tep != NULL) && peer_tep->te_closing) {
3892 3958 TL_UNCONNECT(tep->te_conp);
3893 3959 peer_tep = NULL;
3894 3960 }
3895 3961 if ((srv_tep != NULL) && srv_tep->te_closing) {
3896 3962 TL_UNCONNECT(tep->te_oconp);
3897 3963 srv_tep = NULL;
3898 3964 }
3899 3965
3900 3966 wq = tep->te_wq;
3901 3967
3902 3968 /*
3903 3969 * preallocate memory for:
3904 3970 * 1. max of T_ERROR_ACK and T_OK_ACK
3905 3971 * ==> known max T_ERROR_ACK
3906 3972 * 2. for T_DISCON_IND
3907 3973 */
3908 3974 ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
3909 3975 if (ackmp == NULL) {
3910 3976 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
3911 3977 return;
3912 3978 }
3913 3979 /*
3914 3980 * memory committed for T_OK_ACK/T_ERROR_ACK now
3915 3981 * will be committed for T_DISCON_IND later
3916 3982 */
3917 3983
3918 3984 dr = (struct T_discon_req *)mp->b_rptr;
3919 3985 msz = MBLKL(mp);
3920 3986
3921 3987 /*
3922 3988 * validate the state
3923 3989 */
3924 3990 save_state = new_state = tep->te_state;
3925 3991 if (!(save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) &&
3926 3992 !(save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) {
3927 3993 (void) (STRLOG(TL_ID, tep->te_minor, 1,
3928 3994 SL_TRACE | SL_ERROR,
↓ open down ↓ |
65 lines elided |
↑ open up ↑ |
3929 3995 "tl_wput:T_DISCON_REQ:out of state, state=%d",
3930 3996 tep->te_state));
3931 3997 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ);
3932 3998 freemsg(mp);
3933 3999 return;
3934 4000 }
3935 4001 /*
3936 4002 * Defer committing the state change until it is determined if
3937 4003 * the message will be queued with the tl_icon or not.
3938 4004 */
3939 - new_state = NEXTSTATE(TE_DISCON_REQ, tep->te_state);
4005 + new_state = nextstate[TE_DISCON_REQ][tep->te_state];
3940 4006
3941 4007 /* validate the message */
3942 4008 if (msz < sizeof (struct T_discon_req)) {
3943 4009 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3944 4010 "tl_discon_req:invalid message"));
3945 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
4011 + tep->te_state = nextstate[TE_ERROR_ACK][new_state];
3946 4012 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ);
3947 4013 freemsg(mp);
3948 4014 return;
3949 4015 }
3950 4016
3951 4017 /*
3952 4018 * if server, then validate that client exists
3953 4019 * by connection sequence number etc.
3954 4020 */
3955 4021 if (tep->te_nicon > 0) { /* server */
3956 4022
3957 4023 /*
3958 4024 * search server list for disconnect client
3959 4025 */
3960 4026 tip = tl_icon_find(tep, dr->SEQ_number);
3961 4027 if (tip == NULL) {
3962 4028 (void) (STRLOG(TL_ID, tep->te_minor, 2,
3963 4029 SL_TRACE | SL_ERROR,
3964 4030 "tl_discon_req:no disconnect endpoint"));
3965 - tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
4031 + tep->te_state = nextstate[TE_ERROR_ACK][new_state];
3966 4032 tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ);
3967 4033 freemsg(mp);
3968 4034 return;
3969 4035 }
3970 4036 /*
3971 4037 * If ti_tep is NULL the client has already closed. In this case
3972 4038 * the code below will avoid any action on the client side.
3973 4039 */
3974 4040
3975 4041 IMPLY(tip->ti_tep != NULL,
3976 4042 tip->ti_tep->te_seqno == dr->SEQ_number);
3977 4043 peer_tep = tip->ti_tep;
3978 4044 }
3979 4045
3980 4046 /*
3981 4047 * preallocate now for T_DISCON_IND
3982 4048 * ack validity of request (T_OK_ACK) after memory committed
3983 4049 */
3984 4050 size = sizeof (struct T_discon_ind);
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
3985 4051 if ((respmp = reallocb(mp, size, 0)) == NULL) {
3986 4052 tl_memrecover(wq, mp, size);
3987 4053 freemsg(ackmp);
3988 4054 return;
3989 4055 }
3990 4056
3991 4057 /*
3992 4058 * prepare message to ack validity of request
3993 4059 */
3994 4060 if (tep->te_nicon == 0) {
3995 - new_state = NEXTSTATE(TE_OK_ACK1, new_state);
4061 + new_state = nextstate[TE_OK_ACK1][new_state];
3996 4062 } else {
3997 4063 if (tep->te_nicon == 1)
3998 - new_state = NEXTSTATE(TE_OK_ACK2, new_state);
4064 + new_state = nextstate[TE_OK_ACK2][new_state];
3999 4065 else
4000 - new_state = NEXTSTATE(TE_OK_ACK4, new_state);
4066 + new_state = nextstate[TE_OK_ACK4][new_state];
4001 4067 }
4002 4068
4003 4069 /*
4004 4070 * Flushing queues according to TPI. Using the old state.
4005 4071 */
4006 4072 if ((tep->te_nicon <= 1) &&
4007 4073 ((save_state == TS_DATA_XFER) ||
4008 4074 (save_state == TS_WIND_ORDREL) ||
4009 4075 (save_state == TS_WREQ_ORDREL)))
4010 4076 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
4011 4077
4012 4078 /* send T_OK_ACK up */
4013 4079 tl_ok_ack(wq, ackmp, T_DISCON_REQ);
4014 4080
4015 4081 /*
4016 4082 * now do disconnect business
4017 4083 */
4018 4084 if (tep->te_nicon > 0) { /* listener */
4019 4085 if (peer_tep != NULL && !peer_tep->te_closing) {
4020 4086 /*
4021 4087 * disconnect incoming connect request pending to tep
4022 4088 */
4023 4089 if ((dimp = tl_resizemp(respmp, size)) == NULL) {
4024 4090 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4025 4091 SL_TRACE | SL_ERROR,
4026 4092 "tl_discon_req: reallocb failed"));
4027 4093 tep->te_state = new_state;
4028 4094 tl_merror(wq, respmp, ENOMEM);
4029 4095 return;
4030 4096 }
4031 4097 di = (struct T_discon_ind *)dimp->b_rptr;
4032 4098 di->SEQ_number = BADSEQNUM;
4033 4099 save_state = peer_tep->te_state;
4034 4100 peer_tep->te_state = TS_IDLE;
4035 4101
4036 4102 TL_REMOVE_PEER(peer_tep->te_oconp);
4037 4103 enableok(peer_tep->te_wq);
4038 4104 TL_QENABLE(peer_tep);
4039 4105 } else {
4040 4106 freemsg(respmp);
4041 4107 dimp = NULL;
4042 4108 }
4043 4109
4044 4110 /*
4045 4111 * remove endpoint from incoming connection list
4046 4112 * - remove disconnect client from list on server
4047 4113 */
4048 4114 tl_freetip(tep, tip);
4049 4115 } else if ((peer_tep = tep->te_oconp) != NULL) { /* client */
4050 4116 /*
4051 4117 * disconnect an outgoing request pending from tep
4052 4118 */
4053 4119
4054 4120 if ((dimp = tl_resizemp(respmp, size)) == NULL) {
4055 4121 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4056 4122 SL_TRACE | SL_ERROR,
4057 4123 "tl_discon_req: reallocb failed"));
4058 4124 tep->te_state = new_state;
4059 4125 tl_merror(wq, respmp, ENOMEM);
4060 4126 return;
4061 4127 }
4062 4128 di = (struct T_discon_ind *)dimp->b_rptr;
4063 4129 DB_TYPE(dimp) = M_PROTO;
4064 4130 di->PRIM_type = T_DISCON_IND;
4065 4131 di->DISCON_reason = ECONNRESET;
4066 4132 di->SEQ_number = tep->te_seqno;
4067 4133
4068 4134 /*
4069 4135 * If this is a socket the T_DISCON_IND is queued with
4070 4136 * the T_CONN_IND. Otherwise the T_CONN_IND is removed
4071 4137 * from the list of pending connections.
4072 4138 * Note that when te_oconp is set the peer better have
4073 4139 * a t_connind_t for the client.
4074 4140 */
4075 4141 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
4076 4142 /*
4077 4143 * No need to check that
4078 4144 * ti_tep == NULL since the T_DISCON_IND
4079 4145 * takes precedence over other queued
4080 4146 * messages.
4081 4147 */
4082 4148 tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp);
4083 4149 peer_tep = NULL;
4084 4150 dimp = NULL;
4085 4151 /*
4086 4152 * Can't clear te_oconp since tl_co_unconnect needs
4087 4153 * it as a hint not to free the tep.
4088 4154 * Keep the state unchanged since tl_conn_res inspects
4089 4155 * it.
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
4090 4156 */
4091 4157 new_state = tep->te_state;
4092 4158 } else {
4093 4159 /* Found - delete it */
4094 4160 tip = tl_icon_find(peer_tep, tep->te_seqno);
4095 4161 if (tip != NULL) {
4096 4162 ASSERT(tep == tip->ti_tep);
4097 4163 save_state = peer_tep->te_state;
4098 4164 if (peer_tep->te_nicon == 1)
4099 4165 peer_tep->te_state =
4100 - NEXTSTATE(TE_DISCON_IND2,
4101 - peer_tep->te_state);
4166 + nextstate[TE_DISCON_IND2]
4167 + [peer_tep->te_state];
4102 4168 else
4103 4169 peer_tep->te_state =
4104 - NEXTSTATE(TE_DISCON_IND3,
4105 - peer_tep->te_state);
4170 + nextstate[TE_DISCON_IND3]
4171 + [peer_tep->te_state];
4106 4172 tl_freetip(peer_tep, tip);
4107 4173 }
4108 4174 ASSERT(tep->te_oconp != NULL);
4109 4175 TL_UNCONNECT(tep->te_oconp);
4110 4176 }
4111 4177 } else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */
4112 4178 if ((dimp = tl_resizemp(respmp, size)) == NULL) {
4113 4179 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4114 4180 SL_TRACE | SL_ERROR,
4115 4181 "tl_discon_req: reallocb failed"));
4116 4182 tep->te_state = new_state;
4117 4183 tl_merror(wq, respmp, ENOMEM);
4118 4184 return;
4119 4185 }
4120 4186 di = (struct T_discon_ind *)dimp->b_rptr;
4121 4187 di->SEQ_number = BADSEQNUM;
4122 4188
4123 4189 save_state = peer_tep->te_state;
4124 4190 peer_tep->te_state = TS_IDLE;
4125 4191 } else {
4126 4192 /* Not connected */
4127 4193 tep->te_state = new_state;
4128 4194 freemsg(respmp);
4129 4195 return;
4130 4196 }
4131 4197
4132 4198 /* Commit state changes */
4133 4199 tep->te_state = new_state;
4134 4200
4135 4201 if (peer_tep == NULL) {
4136 4202 ASSERT(dimp == NULL);
4137 4203 goto done;
4138 4204 }
4139 4205 /*
4140 4206 * Flush queues on peer before sending up
4141 4207 * T_DISCON_IND according to TPI
4142 4208 */
4143 4209
4144 4210 if ((save_state == TS_DATA_XFER) ||
4145 4211 (save_state == TS_WIND_ORDREL) ||
4146 4212 (save_state == TS_WREQ_ORDREL))
4147 4213 (void) putnextctl1(peer_tep->te_rq, M_FLUSH, FLUSHRW);
4148 4214
4149 4215 DB_TYPE(dimp) = M_PROTO;
4150 4216 di->PRIM_type = T_DISCON_IND;
4151 4217 di->DISCON_reason = ECONNRESET;
4152 4218
4153 4219 /*
4154 4220 * data blocks already linked into dimp by reallocb()
4155 4221 */
4156 4222 /*
4157 4223 * send indication message to peer user module
4158 4224 */
4159 4225 ASSERT(dimp != NULL);
4160 4226 putnext(peer_tep->te_rq, dimp);
4161 4227 done:
4162 4228 if (tep->te_conp) { /* disconnect pointers if connected */
4163 4229 ASSERT(!peer_tep->te_closing);
4164 4230
4165 4231 /*
4166 4232 * Messages may be queued on peer's write queue
4167 4233 * waiting to be processed by its write service
4168 4234 * procedure. Before the pointer to the peer transport
4169 4235 * structure is set to NULL, qenable the peer's write
4170 4236 * queue so that the queued up messages are processed.
4171 4237 */
4172 4238 if ((save_state == TS_DATA_XFER) ||
4173 4239 (save_state == TS_WIND_ORDREL) ||
4174 4240 (save_state == TS_WREQ_ORDREL))
4175 4241 TL_QENABLE(peer_tep);
4176 4242 ASSERT(peer_tep != NULL && peer_tep->te_conp != NULL);
4177 4243 TL_UNCONNECT(peer_tep->te_conp);
4178 4244 if (!IS_SOCKET(tep)) {
4179 4245 /*
4180 4246 * unlink the streams
4181 4247 */
4182 4248 tep->te_wq->q_next = NULL;
4183 4249 peer_tep->te_wq->q_next = NULL;
4184 4250 }
4185 4251 TL_UNCONNECT(tep->te_conp);
4186 4252 }
4187 4253 }
4188 4254
4189 4255 static void
4190 4256 tl_addr_req_ser(mblk_t *mp, tl_endpt_t *tep)
4191 4257 {
4192 4258 if (!tep->te_closing)
4193 4259 tl_addr_req(mp, tep);
4194 4260 else
4195 4261 freemsg(mp);
4196 4262
4197 4263 tl_serializer_exit(tep);
4198 4264 tl_refrele(tep);
4199 4265 }
4200 4266
4201 4267 static void
4202 4268 tl_addr_req(mblk_t *mp, tl_endpt_t *tep)
4203 4269 {
4204 4270 queue_t *wq;
4205 4271 size_t ack_sz;
4206 4272 mblk_t *ackmp;
4207 4273 struct T_addr_ack *taa;
4208 4274
4209 4275 if (tep->te_closing) {
4210 4276 freemsg(mp);
4211 4277 return;
4212 4278 }
4213 4279
4214 4280 wq = tep->te_wq;
4215 4281
4216 4282 /*
4217 4283 * Note: T_ADDR_REQ message has only PRIM_type field
4218 4284 * so it is already validated earlier.
4219 4285 */
4220 4286
4221 4287 if (IS_CLTS(tep) ||
4222 4288 (tep->te_state > TS_WREQ_ORDREL) ||
4223 4289 (tep->te_state < TS_DATA_XFER)) {
4224 4290 /*
4225 4291 * Either connectionless or connection oriented but not
4226 4292 * in connected data transfer state or half-closed states.
4227 4293 */
4228 4294 ack_sz = sizeof (struct T_addr_ack);
4229 4295 if (tep->te_state >= TS_IDLE)
4230 4296 /* is bound */
4231 4297 ack_sz += tep->te_alen;
4232 4298 ackmp = reallocb(mp, ack_sz, 0);
4233 4299 if (ackmp == NULL) {
4234 4300 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4235 4301 SL_TRACE | SL_ERROR,
4236 4302 "tl_addr_req: reallocb failed"));
4237 4303 tl_memrecover(wq, mp, ack_sz);
4238 4304 return;
4239 4305 }
4240 4306
4241 4307 taa = (struct T_addr_ack *)ackmp->b_rptr;
4242 4308
4243 4309 bzero(taa, sizeof (struct T_addr_ack));
4244 4310
4245 4311 taa->PRIM_type = T_ADDR_ACK;
4246 4312 ackmp->b_datap->db_type = M_PCPROTO;
4247 4313 ackmp->b_wptr = (uchar_t *)&taa[1];
4248 4314
4249 4315 if (tep->te_state >= TS_IDLE) {
4250 4316 /* endpoint is bound */
4251 4317 taa->LOCADDR_length = tep->te_alen;
4252 4318 taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa);
4253 4319
4254 4320 bcopy(tep->te_abuf, ackmp->b_wptr,
4255 4321 tep->te_alen);
4256 4322 ackmp->b_wptr += tep->te_alen;
4257 4323 ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
4258 4324 }
4259 4325
4260 4326 (void) qreply(wq, ackmp);
4261 4327 } else {
4262 4328 ASSERT(tep->te_state == TS_DATA_XFER ||
4263 4329 tep->te_state == TS_WIND_ORDREL ||
4264 4330 tep->te_state == TS_WREQ_ORDREL);
4265 4331 /* connection oriented in data transfer */
4266 4332 tl_connected_cots_addr_req(mp, tep);
4267 4333 }
4268 4334 }
4269 4335
4270 4336
4271 4337 static void
4272 4338 tl_connected_cots_addr_req(mblk_t *mp, tl_endpt_t *tep)
4273 4339 {
4274 4340 tl_endpt_t *peer_tep = tep->te_conp;
4275 4341 size_t ack_sz;
4276 4342 mblk_t *ackmp;
4277 4343 struct T_addr_ack *taa;
4278 4344 uchar_t *addr_startp;
4279 4345
4280 4346 if (tep->te_closing) {
4281 4347 freemsg(mp);
4282 4348 return;
4283 4349 }
4284 4350
4285 4351 if (peer_tep == NULL || peer_tep->te_closing) {
4286 4352 tl_error_ack(tep->te_wq, mp, TSYSERR, ECONNRESET, T_ADDR_REQ);
4287 4353 return;
4288 4354 }
4289 4355
4290 4356 ASSERT(tep->te_state >= TS_IDLE);
4291 4357
4292 4358 ack_sz = sizeof (struct T_addr_ack);
4293 4359 ack_sz += T_ALIGN(tep->te_alen);
4294 4360 ack_sz += peer_tep->te_alen;
4295 4361
4296 4362 ackmp = tpi_ack_alloc(mp, ack_sz, M_PCPROTO, T_ADDR_ACK);
4297 4363 if (ackmp == NULL) {
4298 4364 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4299 4365 "tl_connected_cots_addr_req: reallocb failed"));
4300 4366 tl_memrecover(tep->te_wq, mp, ack_sz);
4301 4367 return;
4302 4368 }
4303 4369
4304 4370 taa = (struct T_addr_ack *)ackmp->b_rptr;
4305 4371
4306 4372 /* endpoint is bound */
4307 4373 taa->LOCADDR_length = tep->te_alen;
4308 4374 taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa);
4309 4375
4310 4376 addr_startp = (uchar_t *)&taa[1];
4311 4377
4312 4378 bcopy(tep->te_abuf, addr_startp,
4313 4379 tep->te_alen);
4314 4380
4315 4381 taa->REMADDR_length = peer_tep->te_alen;
4316 4382 taa->REMADDR_offset = (t_scalar_t)T_ALIGN(taa->LOCADDR_offset +
4317 4383 taa->LOCADDR_length);
4318 4384 addr_startp = ackmp->b_rptr + taa->REMADDR_offset;
4319 4385 bcopy(peer_tep->te_abuf, addr_startp,
4320 4386 peer_tep->te_alen);
4321 4387 ackmp->b_wptr = (uchar_t *)ackmp->b_rptr +
4322 4388 taa->REMADDR_offset + peer_tep->te_alen;
4323 4389 ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
4324 4390
4325 4391 putnext(tep->te_rq, ackmp);
4326 4392 }
4327 4393
4328 4394 static void
4329 4395 tl_copy_info(struct T_info_ack *ia, tl_endpt_t *tep)
4330 4396 {
4331 4397 if (IS_CLTS(tep)) {
4332 4398 *ia = tl_clts_info_ack;
4333 4399 ia->TSDU_size = tl_tidusz; /* TSDU and TIDU size are same */
4334 4400 } else {
4335 4401 *ia = tl_cots_info_ack;
4336 4402 if (IS_COTSORD(tep))
4337 4403 ia->SERV_type = T_COTS_ORD;
4338 4404 }
4339 4405 ia->TIDU_size = tl_tidusz;
4340 4406 ia->CURRENT_state = tep->te_state;
4341 4407 }
4342 4408
4343 4409 /*
4344 4410 * This routine responds to T_CAPABILITY_REQ messages. It is called by
4345 4411 * tl_wput.
4346 4412 */
4347 4413 static void
4348 4414 tl_capability_req(mblk_t *mp, tl_endpt_t *tep)
4349 4415 {
4350 4416 mblk_t *ackmp;
4351 4417 t_uscalar_t cap_bits1;
4352 4418 struct T_capability_ack *tcap;
4353 4419
4354 4420 if (tep->te_closing) {
4355 4421 freemsg(mp);
4356 4422 return;
4357 4423 }
4358 4424
4359 4425 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
4360 4426
4361 4427 ackmp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
4362 4428 M_PCPROTO, T_CAPABILITY_ACK);
4363 4429 if (ackmp == NULL) {
4364 4430 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4365 4431 "tl_capability_req: reallocb failed"));
4366 4432 tl_memrecover(tep->te_wq, mp,
4367 4433 sizeof (struct T_capability_ack));
4368 4434 return;
4369 4435 }
4370 4436
4371 4437 tcap = (struct T_capability_ack *)ackmp->b_rptr;
4372 4438 tcap->CAP_bits1 = 0;
4373 4439
4374 4440 if (cap_bits1 & TC1_INFO) {
4375 4441 tl_copy_info(&tcap->INFO_ack, tep);
4376 4442 tcap->CAP_bits1 |= TC1_INFO;
4377 4443 }
4378 4444
4379 4445 if (cap_bits1 & TC1_ACCEPTOR_ID) {
4380 4446 tcap->ACCEPTOR_id = tep->te_acceptor_id;
4381 4447 tcap->CAP_bits1 |= TC1_ACCEPTOR_ID;
4382 4448 }
4383 4449
4384 4450 putnext(tep->te_rq, ackmp);
4385 4451 }
4386 4452
4387 4453 static void
4388 4454 tl_info_req_ser(mblk_t *mp, tl_endpt_t *tep)
4389 4455 {
4390 4456 if (!tep->te_closing)
4391 4457 tl_info_req(mp, tep);
4392 4458 else
4393 4459 freemsg(mp);
4394 4460
4395 4461 tl_serializer_exit(tep);
4396 4462 tl_refrele(tep);
4397 4463 }
4398 4464
4399 4465 static void
4400 4466 tl_info_req(mblk_t *mp, tl_endpt_t *tep)
4401 4467 {
4402 4468 mblk_t *ackmp;
4403 4469
4404 4470 ackmp = tpi_ack_alloc(mp, sizeof (struct T_info_ack),
4405 4471 M_PCPROTO, T_INFO_ACK);
4406 4472 if (ackmp == NULL) {
4407 4473 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4408 4474 "tl_info_req: reallocb failed"));
4409 4475 tl_memrecover(tep->te_wq, mp, sizeof (struct T_info_ack));
4410 4476 return;
4411 4477 }
4412 4478
4413 4479 /*
4414 4480 * fill in T_INFO_ACK contents
4415 4481 */
4416 4482 tl_copy_info((struct T_info_ack *)ackmp->b_rptr, tep);
4417 4483
4418 4484 /*
4419 4485 * send ack message
4420 4486 */
4421 4487 putnext(tep->te_rq, ackmp);
4422 4488 }
4423 4489
4424 4490 /*
4425 4491 * Handle M_DATA, T_data_req and T_optdata_req.
4426 4492 * If this is a socket pass through T_optdata_req options unmodified.
4427 4493 */
4428 4494 static void
4429 4495 tl_data(mblk_t *mp, tl_endpt_t *tep)
4430 4496 {
4431 4497 queue_t *wq = tep->te_wq;
4432 4498 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
4433 4499 ssize_t msz = MBLKL(mp);
4434 4500 tl_endpt_t *peer_tep;
4435 4501 queue_t *peer_rq;
4436 4502 boolean_t closing = tep->te_closing;
4437 4503
4438 4504 if (IS_CLTS(tep)) {
4439 4505 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4440 4506 SL_TRACE | SL_ERROR,
4441 4507 "tl_wput:clts:unattached M_DATA"));
4442 4508 if (!closing) {
4443 4509 tl_merror(wq, mp, EPROTO);
4444 4510 } else {
4445 4511 freemsg(mp);
4446 4512 }
4447 4513 return;
4448 4514 }
4449 4515
4450 4516 /*
4451 4517 * If the endpoint is closing it should still forward any data to the
4452 4518 * peer (if it has one). If it is not allowed to forward it can just
4453 4519 * free the message.
4454 4520 */
4455 4521 if (closing &&
4456 4522 (tep->te_state != TS_DATA_XFER) &&
4457 4523 (tep->te_state != TS_WREQ_ORDREL)) {
4458 4524 freemsg(mp);
4459 4525 return;
4460 4526 }
4461 4527
4462 4528 if (DB_TYPE(mp) == M_PROTO) {
4463 4529 if (prim->type == T_DATA_REQ &&
4464 4530 msz < sizeof (struct T_data_req)) {
4465 4531 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4466 4532 SL_TRACE | SL_ERROR,
4467 4533 "tl_data:T_DATA_REQ:invalid message"));
4468 4534 if (!closing) {
4469 4535 tl_merror(wq, mp, EPROTO);
4470 4536 } else {
4471 4537 freemsg(mp);
4472 4538 }
4473 4539 return;
4474 4540 } else if (prim->type == T_OPTDATA_REQ &&
4475 4541 (msz < sizeof (struct T_optdata_req) || !IS_SOCKET(tep))) {
4476 4542 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4477 4543 SL_TRACE | SL_ERROR,
4478 4544 "tl_data:T_OPTDATA_REQ:invalid message"));
4479 4545 if (!closing) {
4480 4546 tl_merror(wq, mp, EPROTO);
4481 4547 } else {
4482 4548 freemsg(mp);
4483 4549 }
4484 4550 return;
4485 4551 }
4486 4552 }
4487 4553
4488 4554 /*
4489 4555 * connection oriented provider
4490 4556 */
4491 4557 switch (tep->te_state) {
4492 4558 case TS_IDLE:
4493 4559 /*
4494 4560 * Other end not here - do nothing.
4495 4561 */
4496 4562 freemsg(mp);
4497 4563 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
4498 4564 "tl_data:cots with endpoint idle"));
4499 4565 return;
4500 4566
4501 4567 case TS_DATA_XFER:
4502 4568 /* valid states */
4503 4569 if (tep->te_conp != NULL)
4504 4570 break;
4505 4571
4506 4572 if (tep->te_oconp == NULL) {
4507 4573 if (!closing) {
4508 4574 tl_merror(wq, mp, EPROTO);
4509 4575 } else {
4510 4576 freemsg(mp);
4511 4577 }
4512 4578 return;
4513 4579 }
4514 4580 /*
4515 4581 * For a socket the T_CONN_CON is sent early thus
4516 4582 * the peer might not yet have accepted the connection.
4517 4583 * If we are closing queue the packet with the T_CONN_IND.
4518 4584 * Otherwise defer processing the packet until the peer
4519 4585 * accepts the connection.
4520 4586 * Note that the queue is noenabled when we go into this
4521 4587 * state.
4522 4588 */
4523 4589 if (!closing) {
4524 4590 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4525 4591 SL_TRACE | SL_ERROR,
4526 4592 "tl_data: ocon"));
4527 4593 TL_PUTBQ(tep, mp);
4528 4594 return;
4529 4595 }
4530 4596 if (DB_TYPE(mp) == M_PROTO) {
4531 4597 if (msz < sizeof (t_scalar_t)) {
4532 4598 freemsg(mp);
4533 4599 return;
4534 4600 }
4535 4601 /* reuse message block - just change REQ to IND */
4536 4602 if (prim->type == T_DATA_REQ)
4537 4603 prim->type = T_DATA_IND;
4538 4604 else
4539 4605 prim->type = T_OPTDATA_IND;
4540 4606 }
4541 4607 tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4542 4608 return;
4543 4609
4544 4610 case TS_WREQ_ORDREL:
4545 4611 if (tep->te_conp == NULL) {
4546 4612 /*
4547 4613 * Other end closed - generate discon_ind
4548 4614 * with reason 0 to cause an EPIPE but no
4549 4615 * read side error on AF_UNIX sockets.
4550 4616 */
4551 4617 freemsg(mp);
4552 4618 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4553 4619 SL_TRACE | SL_ERROR,
4554 4620 "tl_data: WREQ_ORDREL and no peer"));
4555 4621 tl_discon_ind(tep, 0);
4556 4622 return;
4557 4623 }
↓ open down ↓ |
442 lines elided |
↑ open up ↑ |
4558 4624 break;
4559 4625
4560 4626 default:
4561 4627 /* invalid state for event TE_DATA_REQ */
4562 4628 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4563 4629 "tl_data:cots:out of state"));
4564 4630 tl_merror(wq, mp, EPROTO);
4565 4631 return;
4566 4632 }
4567 4633 /*
4568 - * tep->te_state = NEXTSTATE(TE_DATA_REQ, tep->te_state);
4634 + * tep->te_state = nextstate[TE_DATA_REQ][tep->te_state];
4569 4635 * (State stays same on this event)
4570 4636 */
4571 4637
4572 4638 /*
4573 4639 * get connected endpoint
4574 4640 */
4575 4641 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4576 4642 freemsg(mp);
4577 4643 /* Peer closed */
4578 4644 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4579 4645 "tl_data: peer gone"));
4580 4646 return;
4581 4647 }
4582 4648
4583 4649 ASSERT(tep->te_serializer == peer_tep->te_serializer);
4584 4650 peer_rq = peer_tep->te_rq;
4585 4651
4586 4652 /*
4587 4653 * Put it back if flow controlled
4588 4654 * Note: Messages already on queue when we are closing is bounded
4589 4655 * so we can ignore flow control.
4590 4656 */
4591 4657 if (!canputnext(peer_rq) && !closing) {
4592 4658 TL_PUTBQ(tep, mp);
4593 4659 return;
4594 4660 }
4595 4661
4596 4662 /*
4597 4663 * validate peer state
4598 4664 */
4599 4665 switch (peer_tep->te_state) {
4600 4666 case TS_DATA_XFER:
4601 4667 case TS_WIND_ORDREL:
4602 4668 /* valid states */
4603 4669 break;
4604 4670 default:
4605 4671 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4606 4672 "tl_data:rx side:invalid state"));
4607 4673 tl_merror(peer_tep->te_wq, mp, EPROTO);
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
4608 4674 return;
4609 4675 }
4610 4676 if (DB_TYPE(mp) == M_PROTO) {
4611 4677 /* reuse message block - just change REQ to IND */
4612 4678 if (prim->type == T_DATA_REQ)
4613 4679 prim->type = T_DATA_IND;
4614 4680 else
4615 4681 prim->type = T_OPTDATA_IND;
4616 4682 }
4617 4683 /*
4618 - * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
4684 + * peer_tep->te_state = nextstate[TE_DATA_IND][peer_tep->te_state];
4619 4685 * (peer state stays same on this event)
4620 4686 */
4621 4687 /*
4622 4688 * send data to connected peer
4623 4689 */
4624 4690 putnext(peer_rq, mp);
4625 4691 }
4626 4692
4627 4693
4628 4694
4629 4695 static void
4630 4696 tl_exdata(mblk_t *mp, tl_endpt_t *tep)
4631 4697 {
4632 4698 queue_t *wq = tep->te_wq;
4633 4699 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
4634 4700 ssize_t msz = MBLKL(mp);
4635 4701 tl_endpt_t *peer_tep;
4636 4702 queue_t *peer_rq;
4637 4703 boolean_t closing = tep->te_closing;
4638 4704
4639 4705 if (msz < sizeof (struct T_exdata_req)) {
4640 4706 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4641 4707 "tl_exdata:invalid message"));
4642 4708 if (!closing) {
4643 4709 tl_merror(wq, mp, EPROTO);
4644 4710 } else {
4645 4711 freemsg(mp);
4646 4712 }
4647 4713 return;
4648 4714 }
4649 4715
4650 4716 /*
4651 4717 * If the endpoint is closing it should still forward any data to the
4652 4718 * peer (if it has one). If it is not allowed to forward it can just
4653 4719 * free the message.
4654 4720 */
4655 4721 if (closing &&
4656 4722 (tep->te_state != TS_DATA_XFER) &&
4657 4723 (tep->te_state != TS_WREQ_ORDREL)) {
4658 4724 freemsg(mp);
4659 4725 return;
4660 4726 }
4661 4727
4662 4728 /*
4663 4729 * validate state
4664 4730 */
4665 4731 switch (tep->te_state) {
4666 4732 case TS_IDLE:
4667 4733 /*
4668 4734 * Other end not here - do nothing.
4669 4735 */
4670 4736 freemsg(mp);
4671 4737 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
4672 4738 "tl_exdata:cots with endpoint idle"));
4673 4739 return;
4674 4740
4675 4741 case TS_DATA_XFER:
4676 4742 /* valid states */
4677 4743 if (tep->te_conp != NULL)
4678 4744 break;
4679 4745
4680 4746 if (tep->te_oconp == NULL) {
4681 4747 if (!closing) {
4682 4748 tl_merror(wq, mp, EPROTO);
4683 4749 } else {
4684 4750 freemsg(mp);
4685 4751 }
4686 4752 return;
4687 4753 }
4688 4754 /*
4689 4755 * For a socket the T_CONN_CON is sent early thus
4690 4756 * the peer might not yet have accepted the connection.
4691 4757 * If we are closing queue the packet with the T_CONN_IND.
4692 4758 * Otherwise defer processing the packet until the peer
4693 4759 * accepts the connection.
4694 4760 * Note that the queue is noenabled when we go into this
4695 4761 * state.
4696 4762 */
4697 4763 if (!closing) {
4698 4764 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4699 4765 SL_TRACE | SL_ERROR,
4700 4766 "tl_exdata: ocon"));
4701 4767 TL_PUTBQ(tep, mp);
4702 4768 return;
4703 4769 }
4704 4770 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4705 4771 "tl_exdata: closing socket ocon"));
4706 4772 prim->type = T_EXDATA_IND;
4707 4773 tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4708 4774 return;
4709 4775
4710 4776 case TS_WREQ_ORDREL:
4711 4777 if (tep->te_conp == NULL) {
4712 4778 /*
4713 4779 * Other end closed - generate discon_ind
4714 4780 * with reason 0 to cause an EPIPE but no
4715 4781 * read side error on AF_UNIX sockets.
4716 4782 */
4717 4783 freemsg(mp);
4718 4784 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4719 4785 SL_TRACE | SL_ERROR,
4720 4786 "tl_exdata: WREQ_ORDREL and no peer"));
4721 4787 tl_discon_ind(tep, 0);
4722 4788 return;
4723 4789 }
4724 4790 break;
↓ open down ↓ |
96 lines elided |
↑ open up ↑ |
4725 4791
4726 4792 default:
4727 4793 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4728 4794 SL_TRACE | SL_ERROR,
4729 4795 "tl_wput:T_EXDATA_REQ:out of state, state=%d",
4730 4796 tep->te_state));
4731 4797 tl_merror(wq, mp, EPROTO);
4732 4798 return;
4733 4799 }
4734 4800 /*
4735 - * tep->te_state = NEXTSTATE(TE_EXDATA_REQ, tep->te_state);
4801 + * tep->te_state = nextstate[TE_EXDATA_REQ][tep->te_state];
4736 4802 * (state stays same on this event)
4737 4803 */
4738 4804
4739 4805 /*
4740 4806 * get connected endpoint
4741 4807 */
4742 4808 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4743 4809 freemsg(mp);
4744 4810 /* Peer closed */
4745 4811 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4746 4812 "tl_exdata: peer gone"));
4747 4813 return;
4748 4814 }
4749 4815
4750 4816 peer_rq = peer_tep->te_rq;
4751 4817
4752 4818 /*
4753 4819 * Put it back if flow controlled
4754 4820 * Note: Messages already on queue when we are closing is bounded
4755 4821 * so we can ignore flow control.
4756 4822 */
4757 4823 if (!canputnext(peer_rq) && !closing) {
4758 4824 TL_PUTBQ(tep, mp);
4759 4825 return;
4760 4826 }
4761 4827
4762 4828 /*
4763 4829 * validate state on peer
4764 4830 */
4765 4831 switch (peer_tep->te_state) {
4766 4832 case TS_DATA_XFER:
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
4767 4833 case TS_WIND_ORDREL:
4768 4834 /* valid states */
4769 4835 break;
4770 4836 default:
4771 4837 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4772 4838 "tl_exdata:rx side:invalid state"));
4773 4839 tl_merror(peer_tep->te_wq, mp, EPROTO);
4774 4840 return;
4775 4841 }
4776 4842 /*
4777 - * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
4843 + * peer_tep->te_state = nextstate[TE_DATA_IND][peer_tep->te_state];
4778 4844 * (peer state stays same on this event)
4779 4845 */
4780 4846 /*
4781 4847 * reuse message block
4782 4848 */
4783 4849 prim->type = T_EXDATA_IND;
4784 4850
4785 4851 /*
4786 4852 * send data to connected peer
4787 4853 */
4788 4854 putnext(peer_rq, mp);
4789 4855 }
4790 4856
4791 4857
4792 4858
4793 4859 static void
4794 4860 tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
4795 4861 {
4796 4862 queue_t *wq = tep->te_wq;
4797 4863 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
4798 4864 ssize_t msz = MBLKL(mp);
4799 4865 tl_endpt_t *peer_tep;
4800 4866 queue_t *peer_rq;
4801 4867 boolean_t closing = tep->te_closing;
4802 4868
4803 4869 if (msz < sizeof (struct T_ordrel_req)) {
4804 4870 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4805 4871 "tl_ordrel:invalid message"));
4806 4872 if (!closing) {
4807 4873 tl_merror(wq, mp, EPROTO);
4808 4874 } else {
4809 4875 freemsg(mp);
4810 4876 }
4811 4877 return;
4812 4878 }
4813 4879
4814 4880 /*
4815 4881 * validate state
4816 4882 */
4817 4883 switch (tep->te_state) {
4818 4884 case TS_DATA_XFER:
4819 4885 case TS_WREQ_ORDREL:
4820 4886 /* valid states */
4821 4887 if (tep->te_conp != NULL)
4822 4888 break;
4823 4889
4824 4890 if (tep->te_oconp == NULL)
4825 4891 break;
4826 4892
4827 4893 /*
4828 4894 * For a socket the T_CONN_CON is sent early thus
4829 4895 * the peer might not yet have accepted the connection.
4830 4896 * If we are closing queue the packet with the T_CONN_IND.
4831 4897 * Otherwise defer processing the packet until the peer
4832 4898 * accepts the connection.
4833 4899 * Note that the queue is noenabled when we go into this
4834 4900 * state.
4835 4901 */
4836 4902 if (!closing) {
4837 4903 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4838 4904 SL_TRACE | SL_ERROR,
4839 4905 "tl_ordlrel: ocon"));
4840 4906 TL_PUTBQ(tep, mp);
4841 4907 return;
4842 4908 }
4843 4909 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4844 4910 "tl_ordlrel: closing socket ocon"));
4845 4911 prim->type = T_ORDREL_IND;
4846 4912 (void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4847 4913 return;
4848 4914
4849 4915 default:
4850 4916 (void) (STRLOG(TL_ID, tep->te_minor, 1,
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
4851 4917 SL_TRACE | SL_ERROR,
4852 4918 "tl_wput:T_ORDREL_REQ:out of state, state=%d",
4853 4919 tep->te_state));
4854 4920 if (!closing) {
4855 4921 tl_merror(wq, mp, EPROTO);
4856 4922 } else {
4857 4923 freemsg(mp);
4858 4924 }
4859 4925 return;
4860 4926 }
4861 - tep->te_state = NEXTSTATE(TE_ORDREL_REQ, tep->te_state);
4927 + tep->te_state = nextstate[TE_ORDREL_REQ][tep->te_state];
4862 4928
4863 4929 /*
4864 4930 * get connected endpoint
4865 4931 */
4866 4932 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4867 4933 /* Peer closed */
4868 4934 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4869 4935 "tl_ordrel: peer gone"));
4870 4936 freemsg(mp);
4871 4937 return;
4872 4938 }
4873 4939
4874 4940 peer_rq = peer_tep->te_rq;
4875 4941
4876 4942 /*
4877 4943 * Put it back if flow controlled except when we are closing.
4878 4944 * Note: Messages already on queue when we are closing is bounded
4879 4945 * so we can ignore flow control.
4880 4946 */
4881 4947 if (!canputnext(peer_rq) && !closing) {
4882 4948 TL_PUTBQ(tep, mp);
4883 4949 return;
4884 4950 }
4885 4951
4886 4952 /*
4887 4953 * validate state on peer
4888 4954 */
4889 4955 switch (peer_tep->te_state) {
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
4890 4956 case TS_DATA_XFER:
4891 4957 case TS_WIND_ORDREL:
4892 4958 /* valid states */
4893 4959 break;
4894 4960 default:
4895 4961 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4896 4962 "tl_ordrel:rx side:invalid state"));
4897 4963 tl_merror(peer_tep->te_wq, mp, EPROTO);
4898 4964 return;
4899 4965 }
4900 - peer_tep->te_state = NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
4966 + peer_tep->te_state = nextstate[TE_ORDREL_IND][peer_tep->te_state];
4901 4967
4902 4968 /*
4903 4969 * reuse message block
4904 4970 */
4905 4971 prim->type = T_ORDREL_IND;
4906 4972 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4907 4973 "tl_ordrel: send ordrel_ind"));
4908 4974
4909 4975 /*
4910 4976 * send data to connected peer
4911 4977 */
4912 4978 putnext(peer_rq, mp);
4913 4979 }
4914 4980
4915 4981
4916 4982 /*
4917 4983 * Send T_UDERROR_IND. The error should be from the <sys/errno.h> space.
4918 4984 */
4919 4985 static void
4920 4986 tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
4921 4987 {
4922 4988 size_t err_sz;
4923 4989 tl_endpt_t *tep;
4924 4990 struct T_unitdata_req *udreq;
4925 4991 mblk_t *err_mp;
4926 4992 t_scalar_t alen;
4927 4993 t_scalar_t olen;
4928 4994 struct T_uderror_ind *uderr;
4929 4995 uchar_t *addr_startp;
4930 4996
4931 4997 err_sz = sizeof (struct T_uderror_ind);
4932 4998 tep = (tl_endpt_t *)wq->q_ptr;
4933 4999 udreq = (struct T_unitdata_req *)mp->b_rptr;
4934 5000 alen = udreq->DEST_length;
4935 5001 olen = udreq->OPT_length;
4936 5002
4937 5003 if (alen > 0)
4938 5004 err_sz = T_ALIGN(err_sz + alen);
4939 5005 if (olen > 0)
4940 5006 err_sz += olen;
4941 5007
4942 5008 err_mp = allocb(err_sz, BPRI_MED);
4943 5009 if (err_mp == NULL) {
4944 5010 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
4945 5011 "tl_uderr:allocb failure"));
4946 5012 /*
4947 5013 * Note: no rollback of state needed as it does
4948 5014 * not change in connectionless transport
4949 5015 */
4950 5016 tl_memrecover(wq, mp, err_sz);
4951 5017 return;
4952 5018 }
4953 5019
4954 5020 DB_TYPE(err_mp) = M_PROTO;
4955 5021 err_mp->b_wptr = err_mp->b_rptr + err_sz;
4956 5022 uderr = (struct T_uderror_ind *)err_mp->b_rptr;
4957 5023 uderr->PRIM_type = T_UDERROR_IND;
4958 5024 uderr->ERROR_type = err;
4959 5025 uderr->DEST_length = alen;
4960 5026 uderr->OPT_length = olen;
4961 5027 if (alen <= 0) {
4962 5028 uderr->DEST_offset = 0;
4963 5029 } else {
4964 5030 uderr->DEST_offset =
4965 5031 (t_scalar_t)sizeof (struct T_uderror_ind);
4966 5032 addr_startp = mp->b_rptr + udreq->DEST_offset;
4967 5033 bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset,
4968 5034 (size_t)alen);
4969 5035 }
4970 5036 if (olen <= 0) {
4971 5037 uderr->OPT_offset = 0;
4972 5038 } else {
4973 5039 uderr->OPT_offset =
4974 5040 (t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) +
↓ open down ↓ |
64 lines elided |
↑ open up ↑ |
4975 5041 uderr->DEST_length);
4976 5042 addr_startp = mp->b_rptr + udreq->OPT_offset;
4977 5043 bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset,
4978 5044 (size_t)olen);
4979 5045 }
4980 5046 freemsg(mp);
4981 5047
4982 5048 /*
4983 5049 * send indication message
4984 5050 */
4985 - tep->te_state = NEXTSTATE(TE_UDERROR_IND, tep->te_state);
5051 + tep->te_state = nextstate[TE_UDERROR_IND][tep->te_state];
4986 5052
4987 5053 qreply(wq, err_mp);
4988 5054 }
4989 5055
4990 5056 static void
4991 5057 tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep)
4992 5058 {
4993 5059 queue_t *wq = tep->te_wq;
4994 5060
4995 5061 if (!tep->te_closing && (wq->q_first != NULL)) {
4996 5062 TL_PUTQ(tep, mp);
4997 5063 } else {
4998 5064 if (tep->te_rq != NULL)
4999 5065 tl_unitdata(mp, tep);
5000 5066 else
5001 5067 freemsg(mp);
5002 5068 }
5003 5069
5004 5070 tl_serializer_exit(tep);
5005 5071 tl_refrele(tep);
5006 5072 }
5007 5073
5008 5074 /*
5009 5075 * Handle T_unitdata_req.
5010 5076 * If TL_SET[U]CRED or TL_SOCKUCRED generate the credentials options.
5011 5077 * If this is a socket pass through options unmodified.
5012 5078 */
5013 5079 static void
5014 5080 tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
5015 5081 {
5016 5082 queue_t *wq = tep->te_wq;
5017 5083 soux_addr_t ux_addr;
5018 5084 tl_addr_t destaddr;
5019 5085 uchar_t *addr_startp;
5020 5086 tl_endpt_t *peer_tep;
5021 5087 struct T_unitdata_ind *udind;
5022 5088 struct T_unitdata_req *udreq;
5023 5089 ssize_t msz, ui_sz, reuse_mb_sz;
5024 5090 t_scalar_t alen, aoff, olen, ooff;
5025 5091 t_scalar_t oldolen = 0;
5026 5092 cred_t *cr = NULL;
5027 5093 pid_t cpid;
5028 5094
5029 5095 udreq = (struct T_unitdata_req *)mp->b_rptr;
5030 5096 msz = MBLKL(mp);
5031 5097
5032 5098 /*
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
5033 5099 * validate the state
5034 5100 */
5035 5101 if (tep->te_state != TS_IDLE) {
5036 5102 (void) (STRLOG(TL_ID, tep->te_minor, 1,
5037 5103 SL_TRACE | SL_ERROR,
5038 5104 "tl_wput:T_CONN_REQ:out of state"));
5039 5105 tl_merror(wq, mp, EPROTO);
5040 5106 return;
5041 5107 }
5042 5108 /*
5043 - * tep->te_state = NEXTSTATE(TE_UNITDATA_REQ, tep->te_state);
5109 + * tep->te_state = nextstate[TE_UNITDATA_REQ][tep->te_state];
5044 5110 * (state does not change on this event)
5045 5111 */
5046 5112
5047 5113 /*
5048 5114 * validate the message
5049 5115 * Note: dereference fields in struct inside message only
5050 5116 * after validating the message length.
5051 5117 */
5052 5118 if (msz < sizeof (struct T_unitdata_req)) {
5053 5119 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
5054 5120 "tl_unitdata:invalid message length"));
5055 5121 tl_merror(wq, mp, EINVAL);
5056 5122 return;
5057 5123 }
5058 5124 alen = udreq->DEST_length;
5059 5125 aoff = udreq->DEST_offset;
5060 5126 oldolen = olen = udreq->OPT_length;
5061 5127 ooff = udreq->OPT_offset;
5062 5128 if (olen == 0)
5063 5129 ooff = 0;
5064 5130
5065 5131 if (IS_SOCKET(tep)) {
5066 5132 if ((alen != TL_SOUX_ADDRLEN) ||
5067 5133 (aoff < 0) ||
5068 5134 (aoff + alen > msz) ||
5069 5135 (olen < 0) || (ooff < 0) ||
5070 5136 ((olen > 0) && ((ooff + olen) > msz))) {
5071 5137 (void) (STRLOG(TL_ID, tep->te_minor,
5072 5138 1, SL_TRACE | SL_ERROR,
5073 5139 "tl_unitdata_req: invalid socket addr "
5074 5140 "(msz=%d, al=%d, ao=%d, ol=%d, oo = %d)",
5075 5141 (int)msz, alen, aoff, olen, ooff));
5076 5142 tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ);
5077 5143 return;
5078 5144 }
5079 5145 bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN);
5080 5146
5081 5147 if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
5082 5148 (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
5083 5149 (void) (STRLOG(TL_ID, tep->te_minor,
5084 5150 1, SL_TRACE | SL_ERROR,
5085 5151 "tl_conn_req: invalid socket magic"));
5086 5152 tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ);
5087 5153 return;
5088 5154 }
5089 5155 } else {
5090 5156 if ((alen < 0) ||
5091 5157 (aoff < 0) ||
5092 5158 ((alen > 0) && ((aoff + alen) > msz)) ||
5093 5159 ((ssize_t)alen > (msz - sizeof (struct T_unitdata_req))) ||
5094 5160 ((aoff + alen) < 0) ||
5095 5161 ((olen > 0) && ((ooff + olen) > msz)) ||
5096 5162 (olen < 0) ||
5097 5163 (ooff < 0) ||
5098 5164 ((ssize_t)olen > (msz - sizeof (struct T_unitdata_req)))) {
5099 5165 (void) (STRLOG(TL_ID, tep->te_minor, 1,
5100 5166 SL_TRACE | SL_ERROR,
5101 5167 "tl_unitdata:invalid unit data message"));
5102 5168 tl_merror(wq, mp, EINVAL);
5103 5169 return;
5104 5170 }
5105 5171 }
5106 5172
5107 5173 /* Options not supported unless it's a socket */
5108 5174 if (alen == 0 || (olen != 0 && !IS_SOCKET(tep))) {
5109 5175 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
5110 5176 "tl_unitdata:option use(unsupported) or zero len addr"));
5111 5177 tl_uderr(wq, mp, EPROTO);
5112 5178 return;
5113 5179 }
5114 5180 #ifdef DEBUG
5115 5181 /*
5116 5182 * Mild form of ASSERT()ion to detect broken TPI apps.
5117 5183 * if (!assertion)
5118 5184 * log warning;
5119 5185 */
5120 5186 if (!(aoff >= (t_scalar_t)sizeof (struct T_unitdata_req))) {
5121 5187 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
5122 5188 "tl_unitdata:addr overlaps TPI message"));
5123 5189 }
5124 5190 #endif
5125 5191 /*
5126 5192 * get destination endpoint
5127 5193 */
5128 5194 destaddr.ta_alen = alen;
5129 5195 destaddr.ta_abuf = mp->b_rptr + aoff;
5130 5196 destaddr.ta_zoneid = tep->te_zoneid;
5131 5197
5132 5198 /*
5133 5199 * Check whether the destination is the same that was used previously
5134 5200 * and the destination endpoint is in the right state. If something is
5135 5201 * wrong, find destination again and cache it.
5136 5202 */
5137 5203 peer_tep = tep->te_lastep;
5138 5204
5139 5205 if ((peer_tep == NULL) || peer_tep->te_closing ||
5140 5206 (peer_tep->te_state != TS_IDLE) ||
5141 5207 !tl_eqaddr(&destaddr, &peer_tep->te_ap)) {
5142 5208 /*
5143 5209 * Not the same as cached destination , need to find the right
5144 5210 * destination.
5145 5211 */
5146 5212 peer_tep = (IS_SOCKET(tep) ?
5147 5213 tl_sock_find_peer(tep, &ux_addr) :
5148 5214 tl_find_peer(tep, &destaddr));
5149 5215
5150 5216 if (peer_tep == NULL) {
5151 5217 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5152 5218 SL_TRACE | SL_ERROR,
5153 5219 "tl_unitdata:no one at destination address"));
5154 5220 tl_uderr(wq, mp, ECONNRESET);
5155 5221 return;
5156 5222 }
5157 5223
5158 5224 /*
5159 5225 * Cache the new peer.
5160 5226 */
5161 5227 if (tep->te_lastep != NULL)
5162 5228 tl_refrele(tep->te_lastep);
5163 5229
5164 5230 tep->te_lastep = peer_tep;
5165 5231 }
5166 5232
5167 5233 if (peer_tep->te_state != TS_IDLE) {
5168 5234 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
5169 5235 "tl_unitdata:provider in invalid state"));
5170 5236 tl_uderr(wq, mp, EPROTO);
5171 5237 return;
5172 5238 }
5173 5239
5174 5240 ASSERT(peer_tep->te_rq != NULL);
5175 5241
5176 5242 /*
5177 5243 * Put it back if flow controlled except when we are closing.
5178 5244 * Note: Messages already on queue when we are closing is bounded
5179 5245 * so we can ignore flow control.
5180 5246 */
5181 5247 if (!canputnext(peer_tep->te_rq) && !(tep->te_closing)) {
5182 5248 /* record what we are flow controlled on */
5183 5249 if (tep->te_flowq != NULL) {
5184 5250 list_remove(&tep->te_flowq->te_flowlist, tep);
5185 5251 }
5186 5252 list_insert_head(&peer_tep->te_flowlist, tep);
5187 5253 tep->te_flowq = peer_tep;
5188 5254 TL_PUTBQ(tep, mp);
5189 5255 return;
5190 5256 }
5191 5257 /*
5192 5258 * prepare indication message
5193 5259 */
5194 5260
5195 5261 /*
5196 5262 * calculate length of message
5197 5263 */
5198 5264 if (peer_tep->te_flag & (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED)) {
5199 5265 cr = msg_getcred(mp, &cpid);
5200 5266 ASSERT(cr != NULL);
5201 5267
5202 5268 if (peer_tep->te_flag & TL_SETCRED) {
5203 5269 ASSERT(olen == 0);
5204 5270 olen = (t_scalar_t)sizeof (struct opthdr) +
5205 5271 OPTLEN(sizeof (tl_credopt_t));
5206 5272 /* 1 option only */
5207 5273 } else if (peer_tep->te_flag & TL_SETUCRED) {
5208 5274 ASSERT(olen == 0);
5209 5275 olen = (t_scalar_t)sizeof (struct opthdr) +
5210 5276 OPTLEN(ucredminsize(cr));
5211 5277 /* 1 option only */
5212 5278 } else {
5213 5279 /* Possibly more than one option */
5214 5280 olen += (t_scalar_t)sizeof (struct T_opthdr) +
5215 5281 OPTLEN(ucredminsize(cr));
5216 5282 }
5217 5283 }
5218 5284
5219 5285 ui_sz = T_ALIGN(sizeof (struct T_unitdata_ind) + tep->te_alen) + olen;
5220 5286 reuse_mb_sz = T_ALIGN(sizeof (struct T_unitdata_ind) + alen) + olen;
5221 5287
5222 5288 /*
5223 5289 * If the unitdata_ind fits and we are not adding options
5224 5290 * reuse the udreq mblk.
5225 5291 *
5226 5292 * Otherwise, it is possible we need to append an option if one of the
5227 5293 * te_flag bits is set. This requires extra space in the data block for
5228 5294 * the additional option but the traditional technique used below to
5229 5295 * allocate a new block and copy into it will not work when there is a
5230 5296 * message block with a free pointer (since we don't know anything
5231 5297 * about the layout of the data, pointers referencing or within the
5232 5298 * data, etc.). To handle this possibility the upper layers may have
5233 5299 * preallocated some space to use for appending an option. We check the
5234 5300 * overall mblock size against the size we need ('reuse_mb_sz' with the
5235 5301 * original address length [alen] to ensure we won't overrun the
5236 5302 * current mblk data size) to see if there is free space and thus
5237 5303 * avoid allocating a new message block.
5238 5304 */
5239 5305 if (msz >= ui_sz && alen >= tep->te_alen &&
5240 5306 !(peer_tep->te_flag & (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED))) {
5241 5307 /*
5242 5308 * Reuse the original mblk. Leave options in place.
5243 5309 */
5244 5310 udind = (struct T_unitdata_ind *)mp->b_rptr;
5245 5311 udind->PRIM_type = T_UNITDATA_IND;
5246 5312 udind->SRC_length = tep->te_alen;
5247 5313 addr_startp = mp->b_rptr + udind->SRC_offset;
5248 5314 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5249 5315
5250 5316 } else if (MBLKSIZE(mp) >= reuse_mb_sz && alen >= tep->te_alen &&
5251 5317 mp->b_datap->db_frtnp != NULL) {
5252 5318 /*
5253 5319 * We have a message block with a free pointer, but extra space
5254 5320 * has been pre-allocated for us in case we need to append an
5255 5321 * option. Reuse the original mblk, leaving existing options in
5256 5322 * place.
5257 5323 */
5258 5324 udind = (struct T_unitdata_ind *)mp->b_rptr;
5259 5325 udind->PRIM_type = T_UNITDATA_IND;
5260 5326 udind->SRC_length = tep->te_alen;
5261 5327 addr_startp = mp->b_rptr + udind->SRC_offset;
5262 5328 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5263 5329
5264 5330 if (peer_tep->te_flag &
5265 5331 (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED)) {
5266 5332 ASSERT(cr != NULL);
5267 5333 /*
5268 5334 * We're appending one new option here after the
5269 5335 * original ones.
5270 5336 */
5271 5337 tl_fill_option(mp->b_rptr + udind->OPT_offset + oldolen,
5272 5338 cr, cpid, peer_tep->te_flag, peer_tep->te_credp);
5273 5339 }
5274 5340
5275 5341 } else if (mp->b_datap->db_frtnp != NULL) {
5276 5342 /*
5277 5343 * The next block creates a new mp and tries to copy the data
5278 5344 * block into it, but that cannot handle a message with a free
5279 5345 * pointer (for more details see the comment in kstrputmsg()
5280 5346 * where dupmsg() is called). Since we can never properly
5281 5347 * duplicate the mp while also extending the data, just error
5282 5348 * out now.
5283 5349 */
5284 5350 tl_uderr(wq, mp, EPROTO);
5285 5351 return;
5286 5352 } else {
5287 5353 /* Allocate a new T_unitdata_ind message */
5288 5354 mblk_t *ui_mp;
5289 5355
5290 5356 ui_mp = allocb(ui_sz, BPRI_MED);
5291 5357 if (ui_mp == NULL) {
5292 5358 (void) (STRLOG(TL_ID, tep->te_minor, 4, SL_TRACE,
5293 5359 "tl_unitdata:allocb failure:message queued"));
5294 5360 tl_memrecover(wq, mp, ui_sz);
5295 5361 return;
5296 5362 }
5297 5363
5298 5364 /*
5299 5365 * fill in T_UNITDATA_IND contents
5300 5366 */
5301 5367 DB_TYPE(ui_mp) = M_PROTO;
5302 5368 ui_mp->b_wptr = ui_mp->b_rptr + ui_sz;
5303 5369 udind = (struct T_unitdata_ind *)ui_mp->b_rptr;
5304 5370 udind->PRIM_type = T_UNITDATA_IND;
5305 5371 udind->SRC_offset = (t_scalar_t)sizeof (struct T_unitdata_ind);
5306 5372 udind->SRC_length = tep->te_alen;
5307 5373 addr_startp = ui_mp->b_rptr + udind->SRC_offset;
5308 5374 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5309 5375 udind->OPT_offset =
5310 5376 (t_scalar_t)T_ALIGN(udind->SRC_offset + udind->SRC_length);
5311 5377 udind->OPT_length = olen;
5312 5378 if (peer_tep->te_flag &
5313 5379 (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED)) {
5314 5380
5315 5381 if (oldolen != 0) {
5316 5382 bcopy((void *)((uintptr_t)udreq + ooff),
5317 5383 (void *)((uintptr_t)udind +
5318 5384 udind->OPT_offset),
5319 5385 oldolen);
5320 5386 }
5321 5387 ASSERT(cr != NULL);
5322 5388
5323 5389 tl_fill_option(ui_mp->b_rptr + udind->OPT_offset +
5324 5390 oldolen, cr, cpid,
5325 5391 peer_tep->te_flag, peer_tep->te_credp);
5326 5392 } else {
5327 5393 bcopy((void *)((uintptr_t)udreq + ooff),
5328 5394 (void *)((uintptr_t)udind + udind->OPT_offset),
5329 5395 olen);
5330 5396 }
5331 5397
↓ open down ↓ |
278 lines elided |
↑ open up ↑ |
5332 5398 /*
5333 5399 * relink data blocks from mp to ui_mp
5334 5400 */
5335 5401 ui_mp->b_cont = mp->b_cont;
5336 5402 freeb(mp);
5337 5403 mp = ui_mp;
5338 5404 }
5339 5405 /*
5340 5406 * send indication message
5341 5407 */
5342 - peer_tep->te_state = NEXTSTATE(TE_UNITDATA_IND, peer_tep->te_state);
5408 + peer_tep->te_state = nextstate[TE_UNITDATA_IND][peer_tep->te_state];
5343 5409 putnext(peer_tep->te_rq, mp);
5344 5410 }
5345 5411
5346 5412
5347 5413
5348 5414 /*
5349 5415 * Check if a given addr is in use.
5350 5416 * Endpoint ptr returned or NULL if not found.
5351 5417 * The name space is separate for each mode. This implies that
5352 5418 * sockets get their own name space.
5353 5419 */
5354 5420 static tl_endpt_t *
5355 5421 tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap)
5356 5422 {
5357 5423 tl_endpt_t *peer_tep = NULL;
5358 5424 int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap,
5359 5425 (mod_hash_val_t *)&peer_tep, tl_find_callback);
5360 5426
5361 5427 ASSERT(!IS_SOCKET(tep));
5362 5428
5363 5429 ASSERT(ap != NULL && ap->ta_alen > 0);
5364 5430 ASSERT(ap->ta_zoneid == tep->te_zoneid);
5365 5431 ASSERT(ap->ta_abuf != NULL);
5366 5432 EQUIV(rc == 0, peer_tep != NULL);
5367 5433 IMPLY(rc == 0,
5368 5434 (tep->te_zoneid == peer_tep->te_zoneid) &&
5369 5435 (tep->te_transport == peer_tep->te_transport));
5370 5436
5371 5437 if ((rc == 0) && (peer_tep->te_closing)) {
5372 5438 tl_refrele(peer_tep);
5373 5439 peer_tep = NULL;
5374 5440 }
5375 5441
5376 5442 return (peer_tep);
5377 5443 }
5378 5444
5379 5445 /*
5380 5446 * Find peer for a socket based on unix domain address.
5381 5447 * For implicit addresses our peer can be found by minor number in ai hash. For
5382 5448 * explicit binds we look vnode address at addr_hash.
5383 5449 */
5384 5450 static tl_endpt_t *
5385 5451 tl_sock_find_peer(tl_endpt_t *tep, soux_addr_t *ux_addr)
5386 5452 {
5387 5453 tl_endpt_t *peer_tep = NULL;
5388 5454 mod_hash_t *hash = ux_addr->soua_magic == SOU_MAGIC_IMPLICIT ?
5389 5455 tep->te_aihash : tep->te_addrhash;
5390 5456 int rc = mod_hash_find_cb(hash, (mod_hash_key_t)ux_addr->soua_vp,
5391 5457 (mod_hash_val_t *)&peer_tep, tl_find_callback);
5392 5458
5393 5459 ASSERT(IS_SOCKET(tep));
5394 5460 EQUIV(rc == 0, peer_tep != NULL);
5395 5461 IMPLY(rc == 0, (tep->te_transport == peer_tep->te_transport));
5396 5462
5397 5463 if (peer_tep != NULL) {
5398 5464 /* Don't attempt to use closing peer. */
5399 5465 if (peer_tep->te_closing)
5400 5466 goto errout;
5401 5467
5402 5468 /*
5403 5469 * Cross-zone unix sockets are permitted, but for Trusted
5404 5470 * Extensions only, the "server" for these must be in the
5405 5471 * global zone.
5406 5472 */
5407 5473 if ((peer_tep->te_zoneid != tep->te_zoneid) &&
5408 5474 is_system_labeled() &&
5409 5475 (peer_tep->te_zoneid != GLOBAL_ZONEID))
5410 5476 goto errout;
5411 5477 }
5412 5478
5413 5479 return (peer_tep);
5414 5480
5415 5481 errout:
5416 5482 tl_refrele(peer_tep);
5417 5483 return (NULL);
5418 5484 }
5419 5485
5420 5486 /*
5421 5487 * Generate a free addr and return it in struct pointed by ap
5422 5488 * but allocating space for address buffer.
5423 5489 * The generated address will be at least 4 bytes long and, if req->ta_alen
5424 5490 * exceeds 4 bytes, be req->ta_alen bytes long.
5425 5491 *
5426 5492 * If address is found it will be inserted in the hash.
5427 5493 *
5428 5494 * If req->ta_alen is larger than the default alen (4 bytes) the last
5429 5495 * alen-4 bytes will always be the same as in req.
5430 5496 *
5431 5497 * Return 0 for failure.
5432 5498 * Return non-zero for success.
5433 5499 */
5434 5500 static boolean_t
5435 5501 tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req)
5436 5502 {
5437 5503 t_scalar_t alen;
5438 5504 uint32_t loopcnt; /* Limit loop to 2^32 */
5439 5505
5440 5506 ASSERT(tep->te_hash_hndl != NULL);
5441 5507 ASSERT(!IS_SOCKET(tep));
5442 5508
5443 5509 if (tep->te_hash_hndl == NULL)
5444 5510 return (B_FALSE);
5445 5511
5446 5512 /*
5447 5513 * check if default addr is in use
5448 5514 * if it is - bump it and try again
5449 5515 */
5450 5516 if (req == NULL) {
5451 5517 alen = sizeof (uint32_t);
5452 5518 } else {
5453 5519 alen = max(req->ta_alen, sizeof (uint32_t));
5454 5520 ASSERT(tep->te_zoneid == req->ta_zoneid);
5455 5521 }
5456 5522
5457 5523 if (tep->te_alen < alen) {
5458 5524 void *abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
5459 5525
5460 5526 /*
5461 5527 * Not enough space in tep->ta_ap to hold the address,
5462 5528 * allocate a bigger space.
5463 5529 */
5464 5530 if (abuf == NULL)
5465 5531 return (B_FALSE);
5466 5532
5467 5533 if (tep->te_alen > 0)
5468 5534 kmem_free(tep->te_abuf, tep->te_alen);
5469 5535
5470 5536 tep->te_alen = alen;
5471 5537 tep->te_abuf = abuf;
5472 5538 }
5473 5539
5474 5540 /* Copy in the address in req */
5475 5541 if (req != NULL) {
5476 5542 ASSERT(alen >= req->ta_alen);
5477 5543 bcopy(req->ta_abuf, tep->te_abuf, (size_t)req->ta_alen);
5478 5544 }
5479 5545
5480 5546 /*
5481 5547 * First try minor number then try default addresses.
5482 5548 */
5483 5549 bcopy(&tep->te_minor, tep->te_abuf, sizeof (uint32_t));
5484 5550
5485 5551 for (loopcnt = 0; loopcnt < UINT32_MAX; loopcnt++) {
5486 5552 if (mod_hash_insert_reserve(tep->te_addrhash,
5487 5553 (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
5488 5554 tep->te_hash_hndl) == 0) {
5489 5555 /*
5490 5556 * found free address
5491 5557 */
5492 5558 tep->te_flag |= TL_ADDRHASHED;
5493 5559 tep->te_hash_hndl = NULL;
5494 5560
5495 5561 return (B_TRUE); /* successful return */
5496 5562 }
5497 5563 /*
5498 5564 * Use default address.
5499 5565 */
5500 5566 bcopy(&tep->te_defaddr, tep->te_abuf, sizeof (uint32_t));
5501 5567 atomic_inc_32(&tep->te_defaddr);
5502 5568 }
5503 5569
5504 5570 /*
5505 5571 * Failed to find anything.
5506 5572 */
5507 5573 (void) (STRLOG(TL_ID, -1, 1, SL_ERROR,
5508 5574 "tl_get_any_addr:looped 2^32 times"));
5509 5575 return (B_FALSE);
5510 5576 }
5511 5577
5512 5578 /*
5513 5579 * reallocb + set r/w ptrs to reflect size.
5514 5580 */
5515 5581 static mblk_t *
5516 5582 tl_resizemp(mblk_t *mp, ssize_t new_size)
5517 5583 {
5518 5584 if ((mp = reallocb(mp, new_size, 0)) == NULL)
5519 5585 return (NULL);
5520 5586
5521 5587 mp->b_rptr = DB_BASE(mp);
5522 5588 mp->b_wptr = mp->b_rptr + new_size;
5523 5589 return (mp);
5524 5590 }
5525 5591
5526 5592 static void
5527 5593 tl_cl_backenable(tl_endpt_t *tep)
5528 5594 {
5529 5595 list_t *l = &tep->te_flowlist;
5530 5596 tl_endpt_t *elp;
5531 5597
5532 5598 ASSERT(IS_CLTS(tep));
5533 5599
5534 5600 for (elp = list_head(l); elp != NULL; elp = list_head(l)) {
5535 5601 ASSERT(tep->te_ser == elp->te_ser);
5536 5602 ASSERT(elp->te_flowq == tep);
5537 5603 if (!elp->te_closing)
5538 5604 TL_QENABLE(elp);
5539 5605 elp->te_flowq = NULL;
5540 5606 list_remove(l, elp);
5541 5607 }
5542 5608 }
5543 5609
5544 5610 /*
5545 5611 * Unconnect endpoints.
5546 5612 */
5547 5613 static void
5548 5614 tl_co_unconnect(tl_endpt_t *tep)
5549 5615 {
5550 5616 tl_endpt_t *peer_tep = tep->te_conp;
5551 5617 tl_endpt_t *srv_tep = tep->te_oconp;
5552 5618 list_t *l;
5553 5619 tl_icon_t *tip;
5554 5620 tl_endpt_t *cl_tep;
5555 5621 mblk_t *d_mp;
5556 5622
5557 5623 ASSERT(IS_COTS(tep));
5558 5624 /*
5559 5625 * If our peer is closing, don't use it.
5560 5626 */
5561 5627 if ((peer_tep != NULL) && peer_tep->te_closing) {
5562 5628 TL_UNCONNECT(tep->te_conp);
5563 5629 peer_tep = NULL;
5564 5630 }
5565 5631 if ((srv_tep != NULL) && srv_tep->te_closing) {
5566 5632 TL_UNCONNECT(tep->te_oconp);
5567 5633 srv_tep = NULL;
5568 5634 }
5569 5635
5570 5636 if (tep->te_nicon > 0) {
5571 5637 l = &tep->te_iconp;
5572 5638 /*
5573 5639 * If incoming requests pending, change state
5574 5640 * of clients on disconnect ind event and send
5575 5641 * discon_ind pdu to modules above them
5576 5642 * for server: all clients get disconnect
5577 5643 */
5578 5644
5579 5645 while (tep->te_nicon > 0) {
5580 5646 tip = list_head(l);
5581 5647 cl_tep = tip->ti_tep;
5582 5648
5583 5649 if (cl_tep == NULL) {
5584 5650 tl_freetip(tep, tip);
5585 5651 continue;
5586 5652 }
5587 5653
5588 5654 if (cl_tep->te_oconp != NULL) {
5589 5655 ASSERT(cl_tep != cl_tep->te_oconp);
5590 5656 TL_UNCONNECT(cl_tep->te_oconp);
5591 5657 }
5592 5658
5593 5659 if (cl_tep->te_closing) {
5594 5660 tl_freetip(tep, tip);
5595 5661 continue;
5596 5662 }
5597 5663
5598 5664 enableok(cl_tep->te_wq);
5599 5665 TL_QENABLE(cl_tep);
5600 5666 d_mp = tl_discon_ind_alloc(ECONNREFUSED, BADSEQNUM);
5601 5667 if (d_mp != NULL) {
5602 5668 cl_tep->te_state = TS_IDLE;
5603 5669 putnext(cl_tep->te_rq, d_mp);
5604 5670 } else {
5605 5671 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5606 5672 SL_TRACE | SL_ERROR,
5607 5673 "tl_co_unconnect:icmng: "
5608 5674 "allocb failure"));
5609 5675 }
5610 5676 tl_freetip(tep, tip);
5611 5677 }
5612 5678 } else if (srv_tep != NULL) {
5613 5679 /*
5614 5680 * If outgoing request pending, change state
5615 5681 * of server on discon ind event
5616 5682 */
5617 5683
5618 5684 if (IS_SOCKET(tep) && !tl_disable_early_connect &&
5619 5685 IS_COTSORD(srv_tep) &&
5620 5686 !tl_icon_hasprim(srv_tep, tep->te_seqno, T_ORDREL_IND)) {
5621 5687 /*
5622 5688 * Queue ordrel_ind for server to be picked up
5623 5689 * when the connection is accepted.
5624 5690 */
5625 5691 d_mp = tl_ordrel_ind_alloc();
5626 5692 } else {
5627 5693 /*
5628 5694 * send discon_ind to server
5629 5695 */
5630 5696 d_mp = tl_discon_ind_alloc(ECONNRESET, tep->te_seqno);
5631 5697 }
5632 5698 if (d_mp == NULL) {
5633 5699 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5634 5700 SL_TRACE | SL_ERROR,
5635 5701 "tl_co_unconnect:outgoing:allocb failure"));
5636 5702 TL_UNCONNECT(tep->te_oconp);
5637 5703 goto discon_peer;
5638 5704 }
5639 5705
5640 5706 /*
5641 5707 * If this is a socket the T_DISCON_IND is queued with
5642 5708 * the T_CONN_IND. Otherwise the T_CONN_IND is removed
5643 5709 * from the list of pending connections.
5644 5710 * Note that when te_oconp is set the peer better have
5645 5711 * a t_connind_t for the client.
5646 5712 */
5647 5713 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
5648 5714 /*
5649 5715 * Queue the disconnection message.
5650 5716 */
5651 5717 tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp);
5652 5718 } else {
5653 5719 tip = tl_icon_find(srv_tep, tep->te_seqno);
↓ open down ↓ |
301 lines elided |
↑ open up ↑ |
5654 5720 if (tip == NULL) {
5655 5721 freemsg(d_mp);
5656 5722 } else {
5657 5723 ASSERT(tep == tip->ti_tep);
5658 5724 ASSERT(tep->te_ser == srv_tep->te_ser);
5659 5725 /*
5660 5726 * Delete tip from the server list.
5661 5727 */
5662 5728 if (srv_tep->te_nicon == 1) {
5663 5729 srv_tep->te_state =
5664 - NEXTSTATE(TE_DISCON_IND2,
5665 - srv_tep->te_state);
5730 + nextstate[TE_DISCON_IND2]
5731 + [srv_tep->te_state];
5666 5732 } else {
5667 5733 srv_tep->te_state =
5668 - NEXTSTATE(TE_DISCON_IND3,
5669 - srv_tep->te_state);
5734 + nextstate[TE_DISCON_IND3]
5735 + [srv_tep->te_state];
5670 5736 }
5671 5737 ASSERT(*(uint32_t *)(d_mp->b_rptr) ==
5672 5738 T_DISCON_IND);
5673 5739 putnext(srv_tep->te_rq, d_mp);
5674 5740 tl_freetip(srv_tep, tip);
5675 5741 }
5676 5742 TL_UNCONNECT(tep->te_oconp);
5677 5743 srv_tep = NULL;
5678 5744 }
5679 5745 } else if (peer_tep != NULL) {
5680 5746 /*
5681 5747 * unconnect existing connection
5682 5748 * If connected, change state of peer on
5683 5749 * discon ind event and send discon ind pdu
5684 5750 * to module above it
5685 5751 */
5686 5752
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
5687 5753 ASSERT(tep->te_ser == peer_tep->te_ser);
5688 5754 if (IS_COTSORD(peer_tep) &&
5689 5755 (peer_tep->te_state == TS_WIND_ORDREL ||
5690 5756 peer_tep->te_state == TS_DATA_XFER)) {
5691 5757 /*
5692 5758 * send ordrel ind
5693 5759 */
5694 5760 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
5695 5761 "tl_co_unconnect:connected: ordrel_ind state %d->%d",
5696 5762 peer_tep->te_state,
5697 - NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state)));
5763 + nextstate[TE_ORDREL_IND][peer_tep->te_state]));
5698 5764 d_mp = tl_ordrel_ind_alloc();
5699 5765 if (d_mp == NULL) {
5700 5766 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5701 5767 SL_TRACE | SL_ERROR,
5702 5768 "tl_co_unconnect:connected:"
5703 5769 "allocb failure"));
5704 5770 /*
5705 5771 * Continue with cleaning up peer as
5706 5772 * this side may go away with the close
5707 5773 */
5708 5774 TL_QENABLE(peer_tep);
5709 5775 goto discon_peer;
5710 5776 }
5711 5777 peer_tep->te_state =
5712 - NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
5778 + nextstate[TE_ORDREL_IND][peer_tep->te_state];
5713 5779
5714 5780 putnext(peer_tep->te_rq, d_mp);
5715 5781 /*
5716 5782 * Handle flow control case. This will generate
5717 5783 * a t_discon_ind message with reason 0 if there
5718 5784 * is data queued on the write side.
5719 5785 */
5720 5786 TL_QENABLE(peer_tep);
5721 5787 } else if (IS_COTSORD(peer_tep) &&
5722 5788 peer_tep->te_state == TS_WREQ_ORDREL) {
5723 5789 /*
5724 5790 * Sent an ordrel_ind. We send a discon with
5725 5791 * with error 0 to inform that the peer is gone.
5726 5792 */
5727 5793 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5728 5794 SL_TRACE | SL_ERROR,
5729 5795 "tl_co_unconnect: discon in state %d",
5730 5796 tep->te_state));
5731 5797 tl_discon_ind(peer_tep, 0);
5732 5798 } else {
5733 5799 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5734 5800 SL_TRACE | SL_ERROR,
5735 5801 "tl_co_unconnect: state %d", tep->te_state));
5736 5802 tl_discon_ind(peer_tep, ECONNRESET);
5737 5803 }
5738 5804
5739 5805 discon_peer:
5740 5806 /*
5741 5807 * Disconnect cross-pointers only for close
5742 5808 */
5743 5809 if (tep->te_closing) {
5744 5810 peer_tep = tep->te_conp;
5745 5811 TL_REMOVE_PEER(peer_tep->te_conp);
5746 5812 TL_REMOVE_PEER(tep->te_conp);
5747 5813 }
5748 5814 }
5749 5815 }
5750 5816
5751 5817 /*
5752 5818 * Note: The following routine does not recover from allocb()
5753 5819 * failures
5754 5820 * The reason should be from the <sys/errno.h> space.
5755 5821 */
5756 5822 static void
5757 5823 tl_discon_ind(tl_endpt_t *tep, uint32_t reason)
5758 5824 {
5759 5825 mblk_t *d_mp;
5760 5826
5761 5827 if (tep->te_closing)
5762 5828 return;
5763 5829
5764 5830 /*
5765 5831 * flush the queues.
5766 5832 */
5767 5833 flushq(tep->te_rq, FLUSHDATA);
5768 5834 (void) putnextctl1(tep->te_rq, M_FLUSH, FLUSHRW);
5769 5835
5770 5836 /*
5771 5837 * send discon ind
5772 5838 */
5773 5839 d_mp = tl_discon_ind_alloc(reason, tep->te_seqno);
5774 5840 if (d_mp == NULL) {
5775 5841 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
5776 5842 "tl_discon_ind:allocb failure"));
5777 5843 return;
5778 5844 }
5779 5845 tep->te_state = TS_IDLE;
5780 5846 putnext(tep->te_rq, d_mp);
5781 5847 }
5782 5848
5783 5849 /*
5784 5850 * Note: The following routine does not recover from allocb()
5785 5851 * failures
5786 5852 * The reason should be from the <sys/errno.h> space.
5787 5853 */
5788 5854 static mblk_t *
5789 5855 tl_discon_ind_alloc(uint32_t reason, t_scalar_t seqnum)
5790 5856 {
5791 5857 mblk_t *mp;
5792 5858 struct T_discon_ind *tdi;
5793 5859
5794 5860 if (mp = allocb(sizeof (struct T_discon_ind), BPRI_MED)) {
5795 5861 DB_TYPE(mp) = M_PROTO;
5796 5862 mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind);
5797 5863 tdi = (struct T_discon_ind *)mp->b_rptr;
5798 5864 tdi->PRIM_type = T_DISCON_IND;
5799 5865 tdi->DISCON_reason = reason;
5800 5866 tdi->SEQ_number = seqnum;
5801 5867 }
5802 5868 return (mp);
5803 5869 }
5804 5870
5805 5871
5806 5872 /*
5807 5873 * Note: The following routine does not recover from allocb()
5808 5874 * failures
5809 5875 */
5810 5876 static mblk_t *
5811 5877 tl_ordrel_ind_alloc(void)
5812 5878 {
5813 5879 mblk_t *mp;
5814 5880 struct T_ordrel_ind *toi;
5815 5881
5816 5882 if (mp = allocb(sizeof (struct T_ordrel_ind), BPRI_MED)) {
5817 5883 DB_TYPE(mp) = M_PROTO;
5818 5884 mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_ind);
5819 5885 toi = (struct T_ordrel_ind *)mp->b_rptr;
5820 5886 toi->PRIM_type = T_ORDREL_IND;
5821 5887 }
5822 5888 return (mp);
5823 5889 }
5824 5890
5825 5891
5826 5892 /*
5827 5893 * Lookup the seqno in the list of queued connections.
5828 5894 */
5829 5895 static tl_icon_t *
5830 5896 tl_icon_find(tl_endpt_t *tep, t_scalar_t seqno)
5831 5897 {
5832 5898 list_t *l = &tep->te_iconp;
5833 5899 tl_icon_t *tip = list_head(l);
5834 5900
5835 5901 ASSERT(seqno != 0);
5836 5902
5837 5903 for (; tip != NULL && (tip->ti_seqno != seqno); tip = list_next(l, tip))
5838 5904 ;
5839 5905
5840 5906 return (tip);
5841 5907 }
5842 5908
5843 5909 /*
5844 5910 * Queue data for a given T_CONN_IND while verifying that redundant
5845 5911 * messages, such as a T_ORDREL_IND after a T_DISCON_IND, are not queued.
5846 5912 * Used when the originator of the connection closes.
5847 5913 */
5848 5914 static void
5849 5915 tl_icon_queuemsg(tl_endpt_t *tep, t_scalar_t seqno, mblk_t *nmp)
5850 5916 {
5851 5917 tl_icon_t *tip;
5852 5918 mblk_t **mpp, *mp;
5853 5919 int prim, nprim;
5854 5920
5855 5921 if (nmp->b_datap->db_type == M_PROTO)
5856 5922 nprim = ((union T_primitives *)nmp->b_rptr)->type;
5857 5923 else
5858 5924 nprim = -1; /* M_DATA */
5859 5925
5860 5926 tip = tl_icon_find(tep, seqno);
5861 5927 if (tip == NULL) {
5862 5928 freemsg(nmp);
5863 5929 return;
5864 5930 }
5865 5931
5866 5932 ASSERT(tip->ti_seqno != 0);
5867 5933 mpp = &tip->ti_mp;
5868 5934 while (*mpp != NULL) {
5869 5935 mp = *mpp;
5870 5936
5871 5937 if (mp->b_datap->db_type == M_PROTO)
5872 5938 prim = ((union T_primitives *)mp->b_rptr)->type;
5873 5939 else
5874 5940 prim = -1; /* M_DATA */
5875 5941
5876 5942 /*
5877 5943 * Allow nothing after a T_DISCON_IND
5878 5944 */
5879 5945 if (prim == T_DISCON_IND) {
5880 5946 freemsg(nmp);
5881 5947 return;
5882 5948 }
5883 5949 /*
5884 5950 * Only allow a T_DISCON_IND after an T_ORDREL_IND
5885 5951 */
5886 5952 if (prim == T_ORDREL_IND && nprim != T_DISCON_IND) {
5887 5953 freemsg(nmp);
5888 5954 return;
5889 5955 }
5890 5956 mpp = &(mp->b_next);
5891 5957 }
5892 5958 *mpp = nmp;
5893 5959 }
5894 5960
5895 5961 /*
5896 5962 * Verify if a certain TPI primitive exists on the connind queue.
5897 5963 * Use prim -1 for M_DATA.
5898 5964 * Return non-zero if found.
5899 5965 */
5900 5966 static boolean_t
5901 5967 tl_icon_hasprim(tl_endpt_t *tep, t_scalar_t seqno, t_scalar_t prim)
5902 5968 {
5903 5969 tl_icon_t *tip = tl_icon_find(tep, seqno);
5904 5970 boolean_t found = B_FALSE;
5905 5971
5906 5972 if (tip != NULL) {
5907 5973 mblk_t *mp;
5908 5974 for (mp = tip->ti_mp; !found && mp != NULL; mp = mp->b_next) {
5909 5975 found = (DB_TYPE(mp) == M_PROTO &&
5910 5976 ((union T_primitives *)mp->b_rptr)->type == prim);
5911 5977 }
5912 5978 }
5913 5979 return (found);
5914 5980 }
5915 5981
5916 5982 /*
5917 5983 * Send the b_next mblk chain that has accumulated before the connection
5918 5984 * was accepted. Perform the necessary state transitions.
5919 5985 */
5920 5986 static void
5921 5987 tl_icon_sendmsgs(tl_endpt_t *tep, mblk_t **mpp)
5922 5988 {
5923 5989 mblk_t *mp;
5924 5990 union T_primitives *primp;
5925 5991
5926 5992 if (tep->te_closing) {
5927 5993 tl_icon_freemsgs(mpp);
5928 5994 return;
5929 5995 }
5930 5996
5931 5997 ASSERT(tep->te_state == TS_DATA_XFER);
5932 5998 ASSERT(tep->te_rq->q_first == NULL);
5933 5999
5934 6000 while ((mp = *mpp) != NULL) {
5935 6001 *mpp = mp->b_next;
5936 6002 mp->b_next = NULL;
5937 6003
5938 6004 ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
5939 6005 switch (DB_TYPE(mp)) {
5940 6006 default:
5941 6007 freemsg(mp);
5942 6008 break;
5943 6009 case M_DATA:
5944 6010 putnext(tep->te_rq, mp);
5945 6011 break;
↓ open down ↓ |
223 lines elided |
↑ open up ↑ |
5946 6012 case M_PROTO:
5947 6013 primp = (union T_primitives *)mp->b_rptr;
5948 6014 switch (primp->type) {
5949 6015 case T_UNITDATA_IND:
5950 6016 case T_DATA_IND:
5951 6017 case T_OPTDATA_IND:
5952 6018 case T_EXDATA_IND:
5953 6019 putnext(tep->te_rq, mp);
5954 6020 break;
5955 6021 case T_ORDREL_IND:
5956 - tep->te_state = NEXTSTATE(TE_ORDREL_IND,
5957 - tep->te_state);
6022 + tep->te_state = nextstate[TE_ORDREL_IND]
6023 + [tep->te_state];
5958 6024 putnext(tep->te_rq, mp);
5959 6025 break;
5960 6026 case T_DISCON_IND:
5961 6027 tep->te_state = TS_IDLE;
5962 6028 putnext(tep->te_rq, mp);
5963 6029 break;
5964 6030 default:
5965 6031 #ifdef DEBUG
5966 6032 cmn_err(CE_PANIC,
5967 6033 "tl_icon_sendmsgs: unknown primitive");
5968 6034 #endif /* DEBUG */
5969 6035 freemsg(mp);
5970 6036 break;
5971 6037 }
5972 6038 break;
5973 6039 }
5974 6040 }
5975 6041 }
5976 6042
5977 6043 /*
5978 6044 * Free the b_next mblk chain that has accumulated before the connection
5979 6045 * was accepted.
5980 6046 */
5981 6047 static void
5982 6048 tl_icon_freemsgs(mblk_t **mpp)
5983 6049 {
5984 6050 mblk_t *mp;
5985 6051
5986 6052 while ((mp = *mpp) != NULL) {
5987 6053 *mpp = mp->b_next;
5988 6054 mp->b_next = NULL;
5989 6055 freemsg(mp);
5990 6056 }
5991 6057 }
5992 6058
5993 6059 /*
5994 6060 * Send M_ERROR
5995 6061 * Note: assumes caller ensured enough space in mp or enough
5996 6062 * memory available. Does not attempt recovery from allocb()
5997 6063 * failures
5998 6064 */
5999 6065
6000 6066 static void
6001 6067 tl_merror(queue_t *wq, mblk_t *mp, int error)
6002 6068 {
6003 6069 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
6004 6070
6005 6071 if (tep->te_closing) {
6006 6072 freemsg(mp);
6007 6073 return;
6008 6074 }
6009 6075
6010 6076 (void) (STRLOG(TL_ID, tep->te_minor, 1,
6011 6077 SL_TRACE | SL_ERROR,
6012 6078 "tl_merror: tep=%p, err=%d", (void *)tep, error));
6013 6079
6014 6080 /*
6015 6081 * flush all messages on queue. we are shutting
6016 6082 * the stream down on fatal error
6017 6083 */
6018 6084 flushq(wq, FLUSHALL);
6019 6085 if (IS_COTS(tep)) {
6020 6086 /* connection oriented - unconnect endpoints */
6021 6087 tl_co_unconnect(tep);
6022 6088 }
6023 6089 if (mp->b_cont) {
6024 6090 freemsg(mp->b_cont);
6025 6091 mp->b_cont = NULL;
6026 6092 }
6027 6093
6028 6094 if ((MBLKSIZE(mp) < 1) || (DB_REF(mp) > 1)) {
6029 6095 freemsg(mp);
6030 6096 mp = allocb(1, BPRI_HI);
6031 6097 if (mp == NULL) {
6032 6098 (void) (STRLOG(TL_ID, tep->te_minor, 1,
6033 6099 SL_TRACE | SL_ERROR,
6034 6100 "tl_merror:M_PROTO: out of memory"));
6035 6101 return;
6036 6102 }
6037 6103 }
6038 6104 if (mp) {
6039 6105 DB_TYPE(mp) = M_ERROR;
6040 6106 mp->b_rptr = DB_BASE(mp);
6041 6107 *mp->b_rptr = (char)error;
6042 6108 mp->b_wptr = mp->b_rptr + sizeof (char);
6043 6109 qreply(wq, mp);
6044 6110 } else {
6045 6111 (void) putnextctl1(tep->te_rq, M_ERROR, error);
6046 6112 }
6047 6113 }
6048 6114
6049 6115 static void
6050 6116 tl_fill_option(uchar_t *buf, cred_t *cr, pid_t cpid, int flag, cred_t *pcr)
6051 6117 {
6052 6118 ASSERT(cr != NULL);
6053 6119
6054 6120 if (flag & TL_SETCRED) {
6055 6121 struct opthdr *opt = (struct opthdr *)buf;
6056 6122 tl_credopt_t *tlcred;
6057 6123
6058 6124 opt->level = TL_PROT_LEVEL;
6059 6125 opt->name = TL_OPT_PEER_CRED;
6060 6126 opt->len = (t_uscalar_t)OPTLEN(sizeof (tl_credopt_t));
6061 6127
6062 6128 tlcred = (tl_credopt_t *)(opt + 1);
6063 6129 tlcred->tc_uid = crgetuid(cr);
6064 6130 tlcred->tc_gid = crgetgid(cr);
6065 6131 tlcred->tc_ruid = crgetruid(cr);
6066 6132 tlcred->tc_rgid = crgetrgid(cr);
6067 6133 tlcred->tc_suid = crgetsuid(cr);
6068 6134 tlcred->tc_sgid = crgetsgid(cr);
6069 6135 tlcred->tc_ngroups = crgetngroups(cr);
6070 6136 } else if (flag & TL_SETUCRED) {
6071 6137 struct opthdr *opt = (struct opthdr *)buf;
6072 6138
6073 6139 opt->level = TL_PROT_LEVEL;
6074 6140 opt->name = TL_OPT_PEER_UCRED;
6075 6141 opt->len = (t_uscalar_t)OPTLEN(ucredminsize(cr));
6076 6142
6077 6143 (void) cred2ucred(cr, cpid, (void *)(opt + 1), pcr);
6078 6144 } else {
6079 6145 struct T_opthdr *topt = (struct T_opthdr *)buf;
6080 6146 ASSERT(flag & TL_SOCKUCRED);
6081 6147
6082 6148 topt->level = SOL_SOCKET;
6083 6149 topt->name = SCM_UCRED;
6084 6150 topt->len = ucredminsize(cr) + sizeof (*topt);
6085 6151 topt->status = 0;
6086 6152 (void) cred2ucred(cr, cpid, (void *)(topt + 1), pcr);
6087 6153 }
6088 6154 }
6089 6155
6090 6156 /* ARGSUSED */
6091 6157 static int
6092 6158 tl_default_opt(queue_t *wq, int level, int name, uchar_t *ptr)
6093 6159 {
6094 6160 /* no default value processed in protocol specific code currently */
6095 6161 return (-1);
6096 6162 }
6097 6163
6098 6164 /* ARGSUSED */
6099 6165 static int
6100 6166 tl_get_opt(queue_t *wq, int level, int name, uchar_t *ptr)
6101 6167 {
6102 6168 int len;
6103 6169 tl_endpt_t *tep;
6104 6170 int *valp;
6105 6171
6106 6172 tep = (tl_endpt_t *)wq->q_ptr;
6107 6173
6108 6174 len = 0;
6109 6175
6110 6176 /*
6111 6177 * Assumes: option level and name sanity check done elsewhere
6112 6178 */
6113 6179
6114 6180 switch (level) {
6115 6181 case SOL_SOCKET:
6116 6182 if (!IS_SOCKET(tep))
6117 6183 break;
6118 6184 switch (name) {
6119 6185 case SO_RECVUCRED:
6120 6186 len = sizeof (int);
6121 6187 valp = (int *)ptr;
6122 6188 *valp = (tep->te_flag & TL_SOCKUCRED) != 0;
6123 6189 break;
6124 6190 default:
6125 6191 break;
6126 6192 }
6127 6193 break;
6128 6194 case TL_PROT_LEVEL:
6129 6195 switch (name) {
6130 6196 case TL_OPT_PEER_CRED:
6131 6197 case TL_OPT_PEER_UCRED:
6132 6198 /*
6133 6199 * option not supposed to retrieved directly
6134 6200 * Only sent in T_CON_{IND,CON}, T_UNITDATA_IND
6135 6201 * when some internal flags set by other options
6136 6202 * Direct retrieval always designed to fail(ignored)
6137 6203 * for this option.
6138 6204 */
6139 6205 break;
6140 6206 }
6141 6207 }
6142 6208 return (len);
6143 6209 }
6144 6210
6145 6211 /* ARGSUSED */
6146 6212 static int
6147 6213 tl_set_opt(queue_t *wq, uint_t mgmt_flags, int level, int name, uint_t inlen,
6148 6214 uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, void *thisdg_attrs,
6149 6215 cred_t *cr)
6150 6216 {
6151 6217 int error;
6152 6218 tl_endpt_t *tep;
6153 6219
6154 6220 tep = (tl_endpt_t *)wq->q_ptr;
6155 6221
6156 6222 error = 0; /* NOERROR */
6157 6223
6158 6224 /*
6159 6225 * Assumes: option level and name sanity checks done elsewhere
6160 6226 */
6161 6227
6162 6228 switch (level) {
6163 6229 case SOL_SOCKET:
6164 6230 if (!IS_SOCKET(tep)) {
6165 6231 error = EINVAL;
6166 6232 break;
6167 6233 }
6168 6234 /*
6169 6235 * TBD: fill in other AF_UNIX socket options and then stop
6170 6236 * returning error.
6171 6237 */
6172 6238 switch (name) {
6173 6239 case SO_RECVUCRED:
6174 6240 /*
6175 6241 * We only support this for datagram sockets;
6176 6242 * getpeerucred handles the connection oriented
6177 6243 * transports.
6178 6244 */
6179 6245 if (!IS_CLTS(tep)) {
6180 6246 error = EINVAL;
6181 6247 break;
6182 6248 }
6183 6249 if (*(int *)invalp == 0)
6184 6250 tep->te_flag &= ~TL_SOCKUCRED;
6185 6251 else
6186 6252 tep->te_flag |= TL_SOCKUCRED;
6187 6253 break;
6188 6254 default:
6189 6255 error = EINVAL;
6190 6256 break;
6191 6257 }
6192 6258 break;
6193 6259 case TL_PROT_LEVEL:
6194 6260 switch (name) {
6195 6261 case TL_OPT_PEER_CRED:
6196 6262 case TL_OPT_PEER_UCRED:
6197 6263 /*
6198 6264 * option not supposed to be set directly
6199 6265 * Its value in initialized for each endpoint at
6200 6266 * driver open time.
6201 6267 * Direct setting always designed to fail for this
6202 6268 * option.
6203 6269 */
6204 6270 (void) (STRLOG(TL_ID, tep->te_minor, 1,
6205 6271 SL_TRACE | SL_ERROR,
6206 6272 "tl_set_opt: option is not supported"));
6207 6273 error = EPROTO;
6208 6274 break;
6209 6275 }
6210 6276 }
6211 6277 return (error);
6212 6278 }
6213 6279
6214 6280
6215 6281 static void
6216 6282 tl_timer(void *arg)
6217 6283 {
6218 6284 queue_t *wq = arg;
6219 6285 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
6220 6286
6221 6287 ASSERT(tep);
6222 6288
6223 6289 tep->te_timoutid = 0;
6224 6290
6225 6291 enableok(wq);
6226 6292 /*
6227 6293 * Note: can call wsrv directly here and save context switch
6228 6294 * Consider change when qtimeout (not timeout) is active
6229 6295 */
6230 6296 qenable(wq);
6231 6297 }
6232 6298
6233 6299 static void
6234 6300 tl_buffer(void *arg)
6235 6301 {
6236 6302 queue_t *wq = arg;
6237 6303 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
6238 6304
6239 6305 ASSERT(tep);
6240 6306
6241 6307 tep->te_bufcid = 0;
6242 6308 tep->te_nowsrv = B_FALSE;
6243 6309
6244 6310 enableok(wq);
6245 6311 /*
6246 6312 * Note: can call wsrv directly here and save context switch
6247 6313 * Consider change when qbufcall (not bufcall) is active
6248 6314 */
6249 6315 qenable(wq);
6250 6316 }
6251 6317
6252 6318 static void
6253 6319 tl_memrecover(queue_t *wq, mblk_t *mp, size_t size)
6254 6320 {
6255 6321 tl_endpt_t *tep;
6256 6322
6257 6323 tep = (tl_endpt_t *)wq->q_ptr;
6258 6324
6259 6325 if (tep->te_closing) {
6260 6326 freemsg(mp);
6261 6327 return;
6262 6328 }
6263 6329 noenable(wq);
6264 6330
6265 6331 (void) insq(wq, wq->q_first, mp);
6266 6332
6267 6333 if (tep->te_bufcid || tep->te_timoutid) {
6268 6334 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
6269 6335 "tl_memrecover:recover %p pending", (void *)wq));
6270 6336 return;
6271 6337 }
6272 6338
6273 6339 tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq);
6274 6340 if (tep->te_bufcid == NULL) {
6275 6341 tep->te_timoutid = qtimeout(wq, tl_timer, wq,
6276 6342 drv_usectohz(TL_BUFWAIT));
6277 6343 }
6278 6344 }
6279 6345
6280 6346 static void
6281 6347 tl_freetip(tl_endpt_t *tep, tl_icon_t *tip)
6282 6348 {
6283 6349 ASSERT(tip->ti_seqno != 0);
6284 6350
6285 6351 if (tip->ti_mp != NULL) {
6286 6352 tl_icon_freemsgs(&tip->ti_mp);
6287 6353 tip->ti_mp = NULL;
6288 6354 }
6289 6355 if (tip->ti_tep != NULL) {
6290 6356 tl_refrele(tip->ti_tep);
6291 6357 tip->ti_tep = NULL;
6292 6358 }
6293 6359 list_remove(&tep->te_iconp, tip);
6294 6360 kmem_free(tip, sizeof (tl_icon_t));
6295 6361 tep->te_nicon--;
6296 6362 }
6297 6363
6298 6364 /*
6299 6365 * Remove address from address hash.
6300 6366 */
6301 6367 static void
6302 6368 tl_addr_unbind(tl_endpt_t *tep)
6303 6369 {
6304 6370 tl_endpt_t *elp;
6305 6371
6306 6372 if (tep->te_flag & TL_ADDRHASHED) {
6307 6373 if (IS_SOCKET(tep)) {
6308 6374 (void) mod_hash_remove(tep->te_addrhash,
6309 6375 (mod_hash_key_t)tep->te_vp,
6310 6376 (mod_hash_val_t *)&elp);
6311 6377 tep->te_vp = (void *)(uintptr_t)tep->te_minor;
6312 6378 tep->te_magic = SOU_MAGIC_IMPLICIT;
6313 6379 } else {
6314 6380 (void) mod_hash_remove(tep->te_addrhash,
6315 6381 (mod_hash_key_t)&tep->te_ap,
6316 6382 (mod_hash_val_t *)&elp);
6317 6383 (void) kmem_free(tep->te_abuf, tep->te_alen);
6318 6384 tep->te_alen = -1;
6319 6385 tep->te_abuf = NULL;
6320 6386 }
6321 6387 tep->te_flag &= ~TL_ADDRHASHED;
6322 6388 }
6323 6389 }
↓ open down ↓ |
356 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX