8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 * Copyright (c) 2018, Joyent, Inc.
29 */
30
31 /*
32 * Multithreaded STREAMS Local Transport Provider.
33 *
34 * OVERVIEW
35 * ========
36 *
37 * This driver provides TLI as well as socket semantics. It provides
38 * connectionless, connection oriented, and connection oriented with orderly
39 * release transports for TLI and sockets. Each transport type has separate name
40 * spaces (i.e. it is not possible to connect from a socket to a TLI endpoint) -
41 * this removes any name space conflicts when binding to socket style transport
42 * addresses.
43 *
44 * NOTE: There is one exception: Socket ticots and ticotsord transports share
45 * the same namespace. In fact, sockets always use ticotsord type transport.
46 *
47 * The driver mode is specified during open() by the minor number used for
48 * open.
359 #include <sys/serializer.h>
360
361 /*
362 * TBD List
363 * 14 Eliminate state changes through table
364 * 16. AF_UNIX socket options
365 * 17. connect() for ticlts
366 * 18. support for "netstat" to show AF_UNIX plus TLI local
367 * transport connections
368 * 21. sanity check to flushing on sending M_ERROR
369 */
370
371 /*
372 * CONSTANT DECLARATIONS
373 * --------------------
374 */
375
376 /*
377 * Local declarations
378 */
379 #define NEXTSTATE(EV, ST) ti_statetbl[EV][ST]
380
381 #define BADSEQNUM (-1) /* initial seq number used by T_DISCON_IND */
382 #define TL_BUFWAIT (10000) /* usecs to wait for allocb buffer timeout */
383 #define TL_TIDUSZ (64*1024) /* tidu size when "strmsgz" is unlimited (0) */
384 /*
385 * Hash tables size.
386 */
387 #define TL_HASH_SIZE 311
388
389 /*
390 * Definitions for module_info
391 */
392 #define TL_ID (104) /* module ID number */
393 #define TL_NAME "tl" /* module name */
394 #define TL_MINPSZ (0) /* min packet size */
395 #define TL_MAXPSZ INFPSZ /* max packet size ZZZ */
396 #define TL_HIWAT (16*1024) /* hi water mark */
397 #define TL_LOWAT (256) /* lo water mark */
398 /*
399 * Definition of minor numbers/modes for new transport provider modes.
400 * We view the socket use as a separate mode to get a separate name space.
401 */
402 #define TL_TICOTS 0 /* connection oriented transport */
403 #define TL_TICOTSORD 1 /* COTS w/ orderly release */
404 #define TL_TICLTS 2 /* connectionless transport */
405 #define TL_UNUSED 3
406 #define TL_SOCKET 4 /* Socket */
407 #define TL_SOCK_COTS (TL_SOCKET | TL_TICOTS)
408 #define TL_SOCK_COTSORD (TL_SOCKET | TL_TICOTSORD)
409 #define TL_SOCK_CLTS (TL_SOCKET | TL_TICLTS)
410
411 #define TL_MINOR_MASK 0x7
412 #define TL_MINOR_START (TL_TICLTS + 1)
413
414 /*
415 * LOCAL MACROS
416 */
417 #define T_ALIGN(p) P2ROUNDUP((p), sizeof (t_scalar_t))
418
419 /*
420 * EXTERNAL VARIABLE DECLARATIONS
421 * -----------------------------
422 */
423 /*
424 * state table defined in the OS space.c
425 */
426 extern char ti_statetbl[TE_NOEVENTS][TS_NOSTATES];
427
428 /*
429 * STREAMS DRIVER ENTRY POINTS PROTOTYPES
430 */
431 static int tl_open(queue_t *, dev_t *, int, int, cred_t *);
432 static int tl_close(queue_t *, int, cred_t *);
433 static int tl_wput(queue_t *, mblk_t *);
434 static int tl_wsrv(queue_t *);
435 static int tl_rsrv(queue_t *);
436
437 static int tl_attach(dev_info_t *, ddi_attach_cmd_t);
438 static int tl_detach(dev_info_t *, ddi_detach_cmd_t);
439 static int tl_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
440
441
442 /*
443 * GLOBAL DATA STRUCTURES AND VARIABLES
444 * -----------------------------------
445 */
446
447 /*
448 * Table representing database of all options managed by T_SVR4_OPTMGMT_REQ
806
807 /*
808 * Default Data Unit size.
809 */
810 static t_scalar_t tl_tidusz;
811
812 /*
813 * Size of hash tables.
814 */
815 static size_t tl_hash_size = TL_HASH_SIZE;
816
817 /*
818 * Debug and test variable ONLY. Turn off T_CONN_IND queueing
819 * for sockets.
820 */
821 static int tl_disable_early_connect = 0;
822 static int tl_client_closing_when_accepting;
823
824 static int tl_serializer_noswitch;
825
826 /*
827 * LOCAL FUNCTION PROTOTYPES
828 * -------------------------
829 */
830 static boolean_t tl_eqaddr(tl_addr_t *, tl_addr_t *);
831 static void tl_do_proto(mblk_t *, tl_endpt_t *);
832 static void tl_do_ioctl(mblk_t *, tl_endpt_t *);
833 static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *);
834 static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t,
835 t_scalar_t);
836 static void tl_bind(mblk_t *, tl_endpt_t *);
837 static void tl_bind_ser(mblk_t *, tl_endpt_t *);
838 static void tl_ok_ack(queue_t *, mblk_t *mp, t_scalar_t);
839 static void tl_unbind(mblk_t *, tl_endpt_t *);
840 static void tl_optmgmt(queue_t *, mblk_t *);
841 static void tl_conn_req(queue_t *, mblk_t *);
842 static void tl_conn_req_ser(mblk_t *, tl_endpt_t *);
843 static void tl_conn_res(mblk_t *, tl_endpt_t *);
844 static void tl_discon_req(mblk_t *, tl_endpt_t *);
845 static void tl_capability_req(mblk_t *, tl_endpt_t *);
1938 static void
1939 tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep)
1940 {
1941 tl_endpt_t *peer_tep = tep->te_conp;
1942 queue_t *peer_rq;
1943
1944 ASSERT(DB_TYPE(mp) == M_DATA);
1945 ASSERT(IS_COTS(tep));
1946
1947 IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer);
1948
1949 /*
1950 * fastpath for data. Ignore flow control if tep is closing.
1951 */
1952 if ((peer_tep != NULL) &&
1953 !peer_tep->te_closing &&
1954 ((tep->te_state == TS_DATA_XFER) ||
1955 (tep->te_state == TS_WREQ_ORDREL)) &&
1956 (tep->te_wq != NULL) &&
1957 (tep->te_wq->q_first == NULL) &&
1958 ((peer_tep->te_state == TS_DATA_XFER) ||
1959 (peer_tep->te_state == TS_WREQ_ORDREL)) &&
1960 ((peer_rq = peer_tep->te_rq) != NULL) &&
1961 (canputnext(peer_rq) || tep->te_closing)) {
1962 putnext(peer_rq, mp);
1963 } else if (tep->te_closing) {
1964 /*
1965 * It is possible that by the time we got here tep started to
1966 * close. If the write queue is not empty, and the state is
1967 * TS_DATA_XFER the data should be delivered in order, so we
1968 * call putq() instead of freeing the data.
1969 */
1970 if ((tep->te_wq != NULL) &&
1971 ((tep->te_state == TS_DATA_XFER) ||
1972 (tep->te_state == TS_WREQ_ORDREL))) {
1973 TL_PUTQ(tep, mp);
1974 } else {
1975 freemsg(mp);
1976 }
1977 } else {
1978 TL_PUTQ(tep, mp);
1979 }
2365 ssize_t msz = MBLKL(mp), basize;
2366 t_scalar_t tli_err = 0, unix_err = 0;
2367 t_scalar_t save_prim_type = bind->PRIM_type;
2368 t_scalar_t save_state = tep->te_state;
2369
2370 if (tep->te_state != TS_UNBND) {
2371 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2372 SL_TRACE | SL_ERROR,
2373 "tl_wput:bind_request:out of state, state=%d",
2374 tep->te_state));
2375 tli_err = TOUTSTATE;
2376 goto error;
2377 }
2378
2379 if (msz < sizeof (struct T_bind_req)) {
2380 tli_err = TSYSERR;
2381 unix_err = EINVAL;
2382 goto error;
2383 }
2384
2385 tep->te_state = NEXTSTATE(TE_BIND_REQ, tep->te_state);
2386
2387 ASSERT((bind->PRIM_type == O_T_BIND_REQ) ||
2388 (bind->PRIM_type == T_BIND_REQ));
2389
2390 alen = bind->ADDR_length;
2391 aoff = bind->ADDR_offset;
2392
2393 /* negotiate max conn req pending */
2394 if (IS_COTS(tep)) {
2395 qlen = bind->CONIND_number;
2396 if (qlen > tl_maxqlen)
2397 qlen = tl_maxqlen;
2398 }
2399
2400 /*
2401 * Reserve hash handle. It can only be NULL if the endpoint is unbound
2402 * and bound again.
2403 */
2404 if ((tep->te_hash_hndl == NULL) &&
2405 ((tep->te_flag & TL_ADDRHASHED) == 0) &&
2406 mod_hash_reserve_nosleep(tep->te_addrhash,
2407 &tep->te_hash_hndl) != 0) {
2408 tli_err = TSYSERR;
2409 unix_err = ENOSR;
2410 goto error;
2411 }
2412
2413 /*
2414 * Verify address correctness.
2415 */
2416 if (IS_SOCKET(tep)) {
2417 ASSERT(bind->PRIM_type == O_T_BIND_REQ);
2418
2419 if ((alen != TL_SOUX_ADDRLEN) ||
2420 (aoff < 0) ||
2421 (aoff + alen > msz)) {
2422 (void) (STRLOG(TL_ID, tep->te_minor,
2423 1, SL_TRACE | SL_ERROR,
2424 "tl_bind: invalid socket addr"));
2425 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2426 tli_err = TSYSERR;
2427 unix_err = EINVAL;
2428 goto error;
2429 }
2430 /* Copy address from message to local buffer. */
2431 bcopy(mp->b_rptr + aoff, &ux_addr, sizeof (ux_addr));
2432 /*
2433 * Check that we got correct address from sockets
2434 */
2435 if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) &&
2436 (ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) {
2437 (void) (STRLOG(TL_ID, tep->te_minor,
2438 1, SL_TRACE | SL_ERROR,
2439 "tl_bind: invalid socket magic"));
2440 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2441 tli_err = TSYSERR;
2442 unix_err = EINVAL;
2443 goto error;
2444 }
2445 if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) &&
2446 (ux_addr.soua_vp != NULL)) {
2447 (void) (STRLOG(TL_ID, tep->te_minor,
2448 1, SL_TRACE | SL_ERROR,
2449 "tl_bind: implicit addr non-empty"));
2450 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2451 tli_err = TSYSERR;
2452 unix_err = EINVAL;
2453 goto error;
2454 }
2455 if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) &&
2456 (ux_addr.soua_vp == NULL)) {
2457 (void) (STRLOG(TL_ID, tep->te_minor,
2458 1, SL_TRACE | SL_ERROR,
2459 "tl_bind: explicit addr empty"));
2460 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2461 tli_err = TSYSERR;
2462 unix_err = EINVAL;
2463 goto error;
2464 }
2465 } else {
2466 if ((alen > 0) && ((aoff < 0) ||
2467 ((ssize_t)(aoff + alen) > msz) ||
2468 ((aoff + alen) < 0))) {
2469 (void) (STRLOG(TL_ID, tep->te_minor,
2470 1, SL_TRACE | SL_ERROR,
2471 "tl_bind: invalid message"));
2472 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2473 tli_err = TSYSERR;
2474 unix_err = EINVAL;
2475 goto error;
2476 }
2477 if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) {
2478 (void) (STRLOG(TL_ID, tep->te_minor,
2479 1, SL_TRACE | SL_ERROR,
2480 "tl_bind: bad addr in message"));
2481 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2482 tli_err = TBADADDR;
2483 goto error;
2484 }
2485 #ifdef DEBUG
2486 /*
2487 * Mild form of ASSERT()ion to detect broken TPI apps.
2488 * if (!assertion)
2489 * log warning;
2490 */
2491 if (!((alen == 0 && aoff == 0) ||
2492 (aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) {
2493 (void) (STRLOG(TL_ID, tep->te_minor,
2494 3, SL_TRACE | SL_ERROR,
2495 "tl_bind: addr overlaps TPI message"));
2496 }
2497 #endif
2498 }
2499
2500 /*
2501 * Bind the address provided or allocate one if requested.
2627 tl_memrecover(wq, mp, basize);
2628 return;
2629 }
2630
2631 DB_TYPE(bamp) = M_PCPROTO;
2632 bamp->b_wptr = bamp->b_rptr + basize;
2633 b_ack = (struct T_bind_ack *)bamp->b_rptr;
2634 b_ack->PRIM_type = T_BIND_ACK;
2635 b_ack->CONIND_number = qlen;
2636 b_ack->ADDR_length = tep->te_alen;
2637 b_ack->ADDR_offset = (t_scalar_t)sizeof (struct T_bind_ack);
2638 addr_startp = bamp->b_rptr + b_ack->ADDR_offset;
2639 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
2640
2641 if (IS_COTS(tep)) {
2642 tep->te_qlen = qlen;
2643 if (qlen > 0)
2644 tep->te_flag |= TL_LISTENER;
2645 }
2646
2647 tep->te_state = NEXTSTATE(TE_BIND_ACK, tep->te_state);
2648 /*
2649 * send T_BIND_ACK message
2650 */
2651 (void) qreply(wq, bamp);
2652 return;
2653
2654 error:
2655 ackmp = reallocb(mp, sizeof (struct T_error_ack), 0);
2656 if (ackmp == NULL) {
2657 /*
2658 * roll back state changes
2659 */
2660 tep->te_state = save_state;
2661 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2662 return;
2663 }
2664 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2665 tl_error_ack(wq, ackmp, tli_err, unix_err, save_prim_type);
2666 }
2667
2668 /*
2669 * Process T_UNBIND_REQ.
2670 * Called from serializer.
2671 */
2672 static void
2673 tl_unbind(mblk_t *mp, tl_endpt_t *tep)
2674 {
2675 queue_t *wq;
2676 mblk_t *ackmp;
2677
2678 if (tep->te_closing) {
2679 freemsg(mp);
2680 return;
2681 }
2682
2683 wq = tep->te_wq;
2684
2690 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2691 return;
2692 }
2693 /*
2694 * memory resources committed
2695 * Note: no message validation. T_UNBIND_REQ message is
2696 * same size as PRIM_type field so already verified earlier.
2697 */
2698
2699 /*
2700 * validate state
2701 */
2702 if (tep->te_state != TS_IDLE) {
2703 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2704 SL_TRACE | SL_ERROR,
2705 "tl_wput:T_UNBIND_REQ:out of state, state=%d",
2706 tep->te_state));
2707 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ);
2708 return;
2709 }
2710 tep->te_state = NEXTSTATE(TE_UNBIND_REQ, tep->te_state);
2711
2712 /*
2713 * TPI says on T_UNBIND_REQ:
2714 * send up a M_FLUSH to flush both
2715 * read and write queues
2716 */
2717 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
2718
2719 if (!IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
2720 tep->te_magic != SOU_MAGIC_EXPLICIT) {
2721
2722 /*
2723 * Sockets use bind with qlen==0 followed by bind() to
2724 * the same address with qlen > 0 for listeners.
2725 * We allow rebind with a new qlen value.
2726 */
2727 tl_addr_unbind(tep);
2728 }
2729
2730 tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
2731 /*
2732 * send T_OK_ACK
2733 */
2734 tl_ok_ack(wq, ackmp, T_UNBIND_REQ);
2735 }
2736
2737
2738 /*
2739 * Option management code from drv/ip is used here
2740 * Note: TL_PROT_LEVEL/TL_IOC_CREDOPT option is not part of tl_opt_arr
2741 * database of options. So optcom_req() will fail T_SVR4_OPTMGMT_REQ.
2742 * However, that is what we want as that option is 'unorthodox'
2743 * and only valid in T_CONN_IND, T_CONN_CON and T_UNITDATA_IND
2744 * and not in T_SVR4_OPTMGMT_REQ/ACK
2745 * Note2: use of optcom_req means this routine is an exception to
2746 * recovery from allocb() failures.
2747 */
2748
2749 static void
2750 tl_optmgmt(queue_t *wq, mblk_t *mp)
2948 SL_TRACE | SL_ERROR,
2949 "tl_conn_req:options not supported "
2950 "in message"));
2951 tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ);
2952 freemsg(mp);
2953 return;
2954 }
2955 }
2956
2957 /*
2958 * Prevent tep from closing on us.
2959 */
2960 if (!tl_noclose(tep)) {
2961 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
2962 "tl_conn_req:endpoint is closing"));
2963 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
2964 freemsg(mp);
2965 return;
2966 }
2967
2968 tep->te_state = NEXTSTATE(TE_CONN_REQ, tep->te_state);
2969 /*
2970 * get endpoint to connect to
2971 * check that peer with DEST addr is bound to addr
2972 * and has CONIND_number > 0
2973 */
2974 dst.ta_alen = alen;
2975 dst.ta_abuf = mp->b_rptr + aoff;
2976 dst.ta_zoneid = tep->te_zoneid;
2977
2978 /*
2979 * Verify if remote addr is in use
2980 */
2981 peer_tep = (IS_SOCKET(tep) ?
2982 tl_sock_find_peer(tep, &ux_addr) :
2983 tl_find_peer(tep, &dst));
2984
2985 if (peer_tep == NULL) {
2986 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
2987 "tl_conn_req:no one at connect address"));
2988 err = ECONNREFUSED;
2989 } else if (peer_tep->te_nicon >= peer_tep->te_qlen) {
2990 /*
2991 * validate that number of incoming connection is
2992 * not to capacity on destination endpoint
2993 */
2994 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
2995 "tl_conn_req: qlen overflow connection refused"));
2996 err = ECONNREFUSED;
2997 }
2998
2999 /*
3000 * Send T_DISCON_IND in case of error
3001 */
3002 if (err != 0) {
3003 if (peer_tep != NULL)
3004 tl_refrele(peer_tep);
3005 /* We are still expected to send T_OK_ACK */
3006 tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
3007 tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ);
3008 tl_closeok(tep);
3009 dimp = tpi_ack_alloc(mp, sizeof (struct T_discon_ind),
3010 M_PROTO, T_DISCON_IND);
3011 if (dimp == NULL) {
3012 tl_merror(wq, NULL, ENOSR);
3013 return;
3014 }
3015 di = (struct T_discon_ind *)dimp->b_rptr;
3016 di->DISCON_reason = err;
3017 di->SEQ_number = BADSEQNUM;
3018
3019 tep->te_state = TS_IDLE;
3020 /*
3021 * send T_DISCON_IND message
3022 */
3023 putnext(tep->te_rq, dimp);
3024 return;
3025 }
3026
3207 tl_memrecover(wq, indmp, sizeof (*tip));
3208 freemsg(ackmp);
3209 if (opts != NULL)
3210 kmem_free(opts, olen);
3211 freemsg(confmp);
3212 TL_UNCONNECT(tep->te_oconp);
3213 tl_serializer_exit(tep);
3214 tl_refrele(tep);
3215 return;
3216 }
3217 tip->ti_mp = NULL;
3218
3219 /*
3220 * memory is now committed for T_DISCON_IND/T_CONN_IND/T_CONN_CON
3221 * and tl_icon_t cell.
3222 */
3223
3224 /*
3225 * ack validity of request and send the peer credential in the ACK.
3226 */
3227 tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
3228
3229 if (peer_tep != NULL && peer_tep->te_credp != NULL &&
3230 confmp != NULL) {
3231 mblk_setcred(confmp, peer_tep->te_credp, peer_tep->te_cpid);
3232 }
3233
3234 tl_ok_ack(wq, ackmp, T_CONN_REQ);
3235
3236 /*
3237 * prepare message to send T_CONN_IND
3238 */
3239 /*
3240 * allocate the message - original data blocks retained
3241 * in the returned mblk
3242 */
3243 cimp = tl_resizemp(indmp, size);
3244 if (cimp == NULL) {
3245 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
3246 "tl_conn_req:con_ind:allocb failure"));
3247 tl_merror(wq, indmp, ENOMEM);
3281 ASSERT(opts != NULL);
3282 bcopy(opts, (void *)((uintptr_t)ci + ci->OPT_offset), olen);
3283 } else {
3284 ci->OPT_offset = 0;
3285 ci->OPT_length = 0;
3286 }
3287 if (opts != NULL)
3288 kmem_free(opts, olen);
3289
3290 /*
3291 * register connection request with server peer
3292 * append to list of incoming connections
3293 * increment references for both peer_tep and tep: peer_tep is placed on
3294 * te_oconp and tep is placed on listeners queue.
3295 */
3296 tip->ti_tep = tep;
3297 tip->ti_seqno = tep->te_seqno;
3298 list_insert_tail(&peer_tep->te_iconp, tip);
3299 peer_tep->te_nicon++;
3300
3301 peer_tep->te_state = NEXTSTATE(TE_CONN_IND, peer_tep->te_state);
3302 /*
3303 * send the T_CONN_IND message
3304 */
3305 putnext(peer_tep->te_rq, cimp);
3306
3307 /*
3308 * Send a T_CONN_CON message for sockets.
3309 * Disable the queues until we have reached the correct state!
3310 */
3311 if (confmp != NULL) {
3312 tep->te_state = NEXTSTATE(TE_CONN_CON, tep->te_state);
3313 noenable(wq);
3314 putnext(tep->te_rq, confmp);
3315 }
3316 /*
3317 * Now we need to increment tep reference because tep is referenced by
3318 * server list of pending connections. We also need to decrement
3319 * reference before exiting serializer. Two operations void each other
3320 * so we don't modify reference at all.
3321 */
3322 ASSERT(tep->te_refcnt >= 2);
3323 ASSERT(peer_tep->te_refcnt >= 2);
3324 tl_serializer_exit(tep);
3325 }
3326
3327
3328
3329 /*
3330 * Handle T_conn_res on listener stream. Called on listener serializer.
3331 * tl_conn_req has already generated the T_CONN_CON.
3332 * tl_conn_res is called on listener serializer.
3412 ooff = cres->OPT_offset;
3413 if (((olen > 0) && ((ooff + olen) > msz))) {
3414 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3415 "tl_conn_res:invalid message"));
3416 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
3417 freemsg(mp);
3418 return;
3419 }
3420 if (olen) {
3421 /*
3422 * no opts in connect res
3423 * supported in this provider
3424 */
3425 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3426 "tl_conn_res:options not supported in message"));
3427 tl_error_ack(wq, ackmp, TBADOPT, 0, prim);
3428 freemsg(mp);
3429 return;
3430 }
3431
3432 tep->te_state = NEXTSTATE(TE_CONN_RES, tep->te_state);
3433 ASSERT(tep->te_state == TS_WACK_CRES);
3434
3435 if (cres->SEQ_number < TL_MINOR_START &&
3436 cres->SEQ_number >= BADSEQNUM) {
3437 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3438 "tl_conn_res:remote endpoint sequence number bad"));
3439 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3440 tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3441 freemsg(mp);
3442 return;
3443 }
3444
3445 /*
3446 * find accepting endpoint. Will have extra reference if found.
3447 */
3448 if (mod_hash_find_cb(tep->te_transport->tr_ai_hash,
3449 (mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id,
3450 (mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) {
3451 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3452 "tl_conn_res:bad accepting endpoint"));
3453 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3454 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3455 freemsg(mp);
3456 return;
3457 }
3458
3459 /*
3460 * Prevent acceptor from closing.
3461 */
3462 if (!tl_noclose(acc_ep)) {
3463 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3464 "tl_conn_res:bad accepting endpoint"));
3465 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3466 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3467 tl_refrele(acc_ep);
3468 freemsg(mp);
3469 return;
3470 }
3471
3472 acc_ep->te_flag |= TL_ACCEPTOR;
3473
3474 /*
3475 * validate that accepting endpoint, if different from listening
3476 * has address bound => state is TS_IDLE
3477 * TROUBLE in XPG4 !!?
3478 */
3479 if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) {
3480 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3481 "tl_conn_res:accepting endpoint has no address bound,"
3482 "state=%d", acc_ep->te_state));
3483 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3484 tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
3485 freemsg(mp);
3486 tl_closeok(acc_ep);
3487 tl_refrele(acc_ep);
3488 return;
3489 }
3490
3491 /*
3492 * validate if accepting endpt same as listening, then
3493 * no other incoming connection should be on the queue
3494 */
3495
3496 if ((tep == acc_ep) && (tep->te_nicon > 1)) {
3497 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
3498 "tl_conn_res: > 1 conn_ind on listener-acceptor"));
3499 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3500 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3501 freemsg(mp);
3502 tl_closeok(acc_ep);
3503 tl_refrele(acc_ep);
3504 return;
3505 }
3506
3507 /*
3508 * Mark for deletion, the entry corresponding to client
3509 * on list of pending connections made by the listener
3510 * search list to see if client is one of the
3511 * recorded as a listener.
3512 */
3513 tip = tl_icon_find(tep, cres->SEQ_number);
3514 if (tip == NULL) {
3515 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3516 "tl_conn_res:no client in listener list"));
3517 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3518 tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3519 freemsg(mp);
3520 tl_closeok(acc_ep);
3521 tl_refrele(acc_ep);
3522 return;
3523 }
3524
3525 /*
3526 * If ti_tep is NULL the client has already closed. In this case
3527 * the code below will avoid any action on the client side
3528 * but complete the server and acceptor state transitions.
3529 */
3530 ASSERT(tip->ti_tep == NULL ||
3531 tip->ti_tep->te_seqno == cres->SEQ_number);
3532 cl_ep = tip->ti_tep;
3533
3534 /*
3535 * If the client is present it is switched from listener's to acceptor's
3536 * serializer. We should block client closes while serializers are
3537 * being switched.
3615 /*
3616 * roll back state changes
3617 */
3618 tep->te_state = TS_WRES_CIND;
3619 tl_memrecover(wq, mp, size);
3620 freemsg(ackmp);
3621 if (client_noclose_set)
3622 tl_closeok(cl_ep);
3623 tl_closeok(acc_ep);
3624 tl_refrele(acc_ep);
3625 return;
3626 }
3627 mp = NULL;
3628 }
3629
3630 /*
3631 * Now ack validity of request
3632 */
3633 if (tep->te_nicon == 1) {
3634 if (tep == acc_ep)
3635 tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state);
3636 else
3637 tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state);
3638 } else {
3639 tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state);
3640 }
3641
3642 /*
3643 * send T_DISCON_IND now if client state validation failed earlier
3644 */
3645 if (err) {
3646 tl_ok_ack(wq, ackmp, prim);
3647 /*
3648 * flush the queues - why always ?
3649 */
3650 (void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR);
3651
3652 dimp = tl_resizemp(respmp, size);
3653 if (dimp == NULL) {
3654 (void) (STRLOG(TL_ID, tep->te_minor, 3,
3655 SL_TRACE | SL_ERROR,
3656 "tl_conn_res:con_ind:allocb failure"));
3657 tl_merror(wq, respmp, ENOMEM);
3658 tl_closeok(acc_ep);
3659 if (client_noclose_set)
3672 di->PRIM_type = T_DISCON_IND;
3673 di->DISCON_reason = err;
3674 di->SEQ_number = BADSEQNUM;
3675
3676 tep->te_state = TS_IDLE;
3677 /*
3678 * send T_DISCON_IND message
3679 */
3680 putnext(acc_ep->te_rq, dimp);
3681 if (client_noclose_set)
3682 tl_closeok(cl_ep);
3683 tl_closeok(acc_ep);
3684 tl_refrele(acc_ep);
3685 return;
3686 }
3687
3688 /*
3689 * now start connecting the accepting endpoint
3690 */
3691 if (tep != acc_ep)
3692 acc_ep->te_state = NEXTSTATE(TE_PASS_CONN, acc_ep->te_state);
3693
3694 if (cl_ep == NULL) {
3695 /*
3696 * The client has already closed. Send up any queued messages
3697 * and change the state accordingly.
3698 */
3699 tl_ok_ack(wq, ackmp, prim);
3700 tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
3701
3702 /*
3703 * remove endpoint from incoming connection
3704 * delete client from list of incoming connections
3705 */
3706 tl_freetip(tep, tip);
3707 freemsg(mp);
3708 tl_closeok(acc_ep);
3709 tl_refrele(acc_ep);
3710 return;
3711 } else if (tip->ti_mp != NULL) {
3712 /*
3833 */
3834 if (!IS_SOCKET(tep)) {
3835 acc_ep->te_wq->q_next = cl_ep->te_rq;
3836 cl_ep->te_wq->q_next = acc_ep->te_rq;
3837 }
3838
3839 /*
3840 * send T_CONN_CON up on client side unless it was already
3841 * done (for a socket). In cases any data or ordrel req has been
3842 * queued make sure that the service procedure runs.
3843 */
3844 if (IS_SOCKET(cl_ep) && !tl_disable_early_connect) {
3845 enableok(cl_ep->te_wq);
3846 TL_QENABLE(cl_ep);
3847 if (ccmp != NULL)
3848 freemsg(ccmp);
3849 } else {
3850 /*
3851 * change client state on TE_CONN_CON event
3852 */
3853 cl_ep->te_state = NEXTSTATE(TE_CONN_CON, cl_ep->te_state);
3854 putnext(cl_ep->te_rq, ccmp);
3855 }
3856
3857 /* Mark the both endpoints as accepted */
3858 cl_ep->te_flag |= TL_ACCEPTED;
3859 acc_ep->te_flag |= TL_ACCEPTED;
3860
3861 /*
3862 * Allow client and acceptor to close.
3863 */
3864 tl_closeok(acc_ep);
3865 if (client_noclose_set)
3866 tl_closeok(cl_ep);
3867 }
3868
3869
3870
3871
3872 static void
3873 tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
3919 msz = MBLKL(mp);
3920
3921 /*
3922 * validate the state
3923 */
3924 save_state = new_state = tep->te_state;
3925 if (!(save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) &&
3926 !(save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) {
3927 (void) (STRLOG(TL_ID, tep->te_minor, 1,
3928 SL_TRACE | SL_ERROR,
3929 "tl_wput:T_DISCON_REQ:out of state, state=%d",
3930 tep->te_state));
3931 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ);
3932 freemsg(mp);
3933 return;
3934 }
3935 /*
3936 * Defer committing the state change until it is determined if
3937 * the message will be queued with the tl_icon or not.
3938 */
3939 new_state = NEXTSTATE(TE_DISCON_REQ, tep->te_state);
3940
3941 /* validate the message */
3942 if (msz < sizeof (struct T_discon_req)) {
3943 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3944 "tl_discon_req:invalid message"));
3945 tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
3946 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ);
3947 freemsg(mp);
3948 return;
3949 }
3950
3951 /*
3952 * if server, then validate that client exists
3953 * by connection sequence number etc.
3954 */
3955 if (tep->te_nicon > 0) { /* server */
3956
3957 /*
3958 * search server list for disconnect client
3959 */
3960 tip = tl_icon_find(tep, dr->SEQ_number);
3961 if (tip == NULL) {
3962 (void) (STRLOG(TL_ID, tep->te_minor, 2,
3963 SL_TRACE | SL_ERROR,
3964 "tl_discon_req:no disconnect endpoint"));
3965 tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
3966 tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ);
3967 freemsg(mp);
3968 return;
3969 }
3970 /*
3971 * If ti_tep is NULL the client has already closed. In this case
3972 * the code below will avoid any action on the client side.
3973 */
3974
3975 IMPLY(tip->ti_tep != NULL,
3976 tip->ti_tep->te_seqno == dr->SEQ_number);
3977 peer_tep = tip->ti_tep;
3978 }
3979
3980 /*
3981 * preallocate now for T_DISCON_IND
3982 * ack validity of request (T_OK_ACK) after memory committed
3983 */
3984 size = sizeof (struct T_discon_ind);
3985 if ((respmp = reallocb(mp, size, 0)) == NULL) {
3986 tl_memrecover(wq, mp, size);
3987 freemsg(ackmp);
3988 return;
3989 }
3990
3991 /*
3992 * prepare message to ack validity of request
3993 */
3994 if (tep->te_nicon == 0) {
3995 new_state = NEXTSTATE(TE_OK_ACK1, new_state);
3996 } else {
3997 if (tep->te_nicon == 1)
3998 new_state = NEXTSTATE(TE_OK_ACK2, new_state);
3999 else
4000 new_state = NEXTSTATE(TE_OK_ACK4, new_state);
4001 }
4002
4003 /*
4004 * Flushing queues according to TPI. Using the old state.
4005 */
4006 if ((tep->te_nicon <= 1) &&
4007 ((save_state == TS_DATA_XFER) ||
4008 (save_state == TS_WIND_ORDREL) ||
4009 (save_state == TS_WREQ_ORDREL)))
4010 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
4011
4012 /* send T_OK_ACK up */
4013 tl_ok_ack(wq, ackmp, T_DISCON_REQ);
4014
4015 /*
4016 * now do disconnect business
4017 */
4018 if (tep->te_nicon > 0) { /* listener */
4019 if (peer_tep != NULL && !peer_tep->te_closing) {
4020 /*
4080 * messages.
4081 */
4082 tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp);
4083 peer_tep = NULL;
4084 dimp = NULL;
4085 /*
4086 * Can't clear te_oconp since tl_co_unconnect needs
4087 * it as a hint not to free the tep.
4088 * Keep the state unchanged since tl_conn_res inspects
4089 * it.
4090 */
4091 new_state = tep->te_state;
4092 } else {
4093 /* Found - delete it */
4094 tip = tl_icon_find(peer_tep, tep->te_seqno);
4095 if (tip != NULL) {
4096 ASSERT(tep == tip->ti_tep);
4097 save_state = peer_tep->te_state;
4098 if (peer_tep->te_nicon == 1)
4099 peer_tep->te_state =
4100 NEXTSTATE(TE_DISCON_IND2,
4101 peer_tep->te_state);
4102 else
4103 peer_tep->te_state =
4104 NEXTSTATE(TE_DISCON_IND3,
4105 peer_tep->te_state);
4106 tl_freetip(peer_tep, tip);
4107 }
4108 ASSERT(tep->te_oconp != NULL);
4109 TL_UNCONNECT(tep->te_oconp);
4110 }
4111 } else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */
4112 if ((dimp = tl_resizemp(respmp, size)) == NULL) {
4113 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4114 SL_TRACE | SL_ERROR,
4115 "tl_discon_req: reallocb failed"));
4116 tep->te_state = new_state;
4117 tl_merror(wq, respmp, ENOMEM);
4118 return;
4119 }
4120 di = (struct T_discon_ind *)dimp->b_rptr;
4121 di->SEQ_number = BADSEQNUM;
4122
4123 save_state = peer_tep->te_state;
4124 peer_tep->te_state = TS_IDLE;
4125 } else {
4548 * with reason 0 to cause an EPIPE but no
4549 * read side error on AF_UNIX sockets.
4550 */
4551 freemsg(mp);
4552 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4553 SL_TRACE | SL_ERROR,
4554 "tl_data: WREQ_ORDREL and no peer"));
4555 tl_discon_ind(tep, 0);
4556 return;
4557 }
4558 break;
4559
4560 default:
4561 /* invalid state for event TE_DATA_REQ */
4562 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4563 "tl_data:cots:out of state"));
4564 tl_merror(wq, mp, EPROTO);
4565 return;
4566 }
4567 /*
4568 * tep->te_state = NEXTSTATE(TE_DATA_REQ, tep->te_state);
4569 * (State stays same on this event)
4570 */
4571
4572 /*
4573 * get connected endpoint
4574 */
4575 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4576 freemsg(mp);
4577 /* Peer closed */
4578 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4579 "tl_data: peer gone"));
4580 return;
4581 }
4582
4583 ASSERT(tep->te_serializer == peer_tep->te_serializer);
4584 peer_rq = peer_tep->te_rq;
4585
4586 /*
4587 * Put it back if flow controlled
4588 * Note: Messages already on queue when we are closing is bounded
4598 */
4599 switch (peer_tep->te_state) {
4600 case TS_DATA_XFER:
4601 case TS_WIND_ORDREL:
4602 /* valid states */
4603 break;
4604 default:
4605 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4606 "tl_data:rx side:invalid state"));
4607 tl_merror(peer_tep->te_wq, mp, EPROTO);
4608 return;
4609 }
4610 if (DB_TYPE(mp) == M_PROTO) {
4611 /* reuse message block - just change REQ to IND */
4612 if (prim->type == T_DATA_REQ)
4613 prim->type = T_DATA_IND;
4614 else
4615 prim->type = T_OPTDATA_IND;
4616 }
4617 /*
4618 * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
4619 * (peer state stays same on this event)
4620 */
4621 /*
4622 * send data to connected peer
4623 */
4624 putnext(peer_rq, mp);
4625 }
4626
4627
4628
4629 static void
4630 tl_exdata(mblk_t *mp, tl_endpt_t *tep)
4631 {
4632 queue_t *wq = tep->te_wq;
4633 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
4634 ssize_t msz = MBLKL(mp);
4635 tl_endpt_t *peer_tep;
4636 queue_t *peer_rq;
4637 boolean_t closing = tep->te_closing;
4638
4715 * read side error on AF_UNIX sockets.
4716 */
4717 freemsg(mp);
4718 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4719 SL_TRACE | SL_ERROR,
4720 "tl_exdata: WREQ_ORDREL and no peer"));
4721 tl_discon_ind(tep, 0);
4722 return;
4723 }
4724 break;
4725
4726 default:
4727 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4728 SL_TRACE | SL_ERROR,
4729 "tl_wput:T_EXDATA_REQ:out of state, state=%d",
4730 tep->te_state));
4731 tl_merror(wq, mp, EPROTO);
4732 return;
4733 }
4734 /*
4735 * tep->te_state = NEXTSTATE(TE_EXDATA_REQ, tep->te_state);
4736 * (state stays same on this event)
4737 */
4738
4739 /*
4740 * get connected endpoint
4741 */
4742 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4743 freemsg(mp);
4744 /* Peer closed */
4745 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4746 "tl_exdata: peer gone"));
4747 return;
4748 }
4749
4750 peer_rq = peer_tep->te_rq;
4751
4752 /*
4753 * Put it back if flow controlled
4754 * Note: Messages already on queue when we are closing is bounded
4755 * so we can ignore flow control.
4757 if (!canputnext(peer_rq) && !closing) {
4758 TL_PUTBQ(tep, mp);
4759 return;
4760 }
4761
4762 /*
4763 * validate state on peer
4764 */
4765 switch (peer_tep->te_state) {
4766 case TS_DATA_XFER:
4767 case TS_WIND_ORDREL:
4768 /* valid states */
4769 break;
4770 default:
4771 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4772 "tl_exdata:rx side:invalid state"));
4773 tl_merror(peer_tep->te_wq, mp, EPROTO);
4774 return;
4775 }
4776 /*
4777 * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
4778 * (peer state stays same on this event)
4779 */
4780 /*
4781 * reuse message block
4782 */
4783 prim->type = T_EXDATA_IND;
4784
4785 /*
4786 * send data to connected peer
4787 */
4788 putnext(peer_rq, mp);
4789 }
4790
4791
4792
4793 static void
4794 tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
4795 {
4796 queue_t *wq = tep->te_wq;
4797 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
4841 return;
4842 }
4843 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4844 "tl_ordlrel: closing socket ocon"));
4845 prim->type = T_ORDREL_IND;
4846 (void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4847 return;
4848
4849 default:
4850 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4851 SL_TRACE | SL_ERROR,
4852 "tl_wput:T_ORDREL_REQ:out of state, state=%d",
4853 tep->te_state));
4854 if (!closing) {
4855 tl_merror(wq, mp, EPROTO);
4856 } else {
4857 freemsg(mp);
4858 }
4859 return;
4860 }
4861 tep->te_state = NEXTSTATE(TE_ORDREL_REQ, tep->te_state);
4862
4863 /*
4864 * get connected endpoint
4865 */
4866 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4867 /* Peer closed */
4868 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4869 "tl_ordrel: peer gone"));
4870 freemsg(mp);
4871 return;
4872 }
4873
4874 peer_rq = peer_tep->te_rq;
4875
4876 /*
4877 * Put it back if flow controlled except when we are closing.
4878 * Note: Messages already on queue when we are closing is bounded
4879 * so we can ignore flow control.
4880 */
4881 if (!canputnext(peer_rq) && !closing) {
4882 TL_PUTBQ(tep, mp);
4883 return;
4884 }
4885
4886 /*
4887 * validate state on peer
4888 */
4889 switch (peer_tep->te_state) {
4890 case TS_DATA_XFER:
4891 case TS_WIND_ORDREL:
4892 /* valid states */
4893 break;
4894 default:
4895 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4896 "tl_ordrel:rx side:invalid state"));
4897 tl_merror(peer_tep->te_wq, mp, EPROTO);
4898 return;
4899 }
4900 peer_tep->te_state = NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
4901
4902 /*
4903 * reuse message block
4904 */
4905 prim->type = T_ORDREL_IND;
4906 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4907 "tl_ordrel: send ordrel_ind"));
4908
4909 /*
4910 * send data to connected peer
4911 */
4912 putnext(peer_rq, mp);
4913 }
4914
4915
4916 /*
4917 * Send T_UDERROR_IND. The error should be from the <sys/errno.h> space.
4918 */
4919 static void
4920 tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
4965 (t_scalar_t)sizeof (struct T_uderror_ind);
4966 addr_startp = mp->b_rptr + udreq->DEST_offset;
4967 bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset,
4968 (size_t)alen);
4969 }
4970 if (olen <= 0) {
4971 uderr->OPT_offset = 0;
4972 } else {
4973 uderr->OPT_offset =
4974 (t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) +
4975 uderr->DEST_length);
4976 addr_startp = mp->b_rptr + udreq->OPT_offset;
4977 bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset,
4978 (size_t)olen);
4979 }
4980 freemsg(mp);
4981
4982 /*
4983 * send indication message
4984 */
4985 tep->te_state = NEXTSTATE(TE_UDERROR_IND, tep->te_state);
4986
4987 qreply(wq, err_mp);
4988 }
4989
4990 static void
4991 tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep)
4992 {
4993 queue_t *wq = tep->te_wq;
4994
4995 if (!tep->te_closing && (wq->q_first != NULL)) {
4996 TL_PUTQ(tep, mp);
4997 } else {
4998 if (tep->te_rq != NULL)
4999 tl_unitdata(mp, tep);
5000 else
5001 freemsg(mp);
5002 }
5003
5004 tl_serializer_exit(tep);
5005 tl_refrele(tep);
5023 ssize_t msz, ui_sz, reuse_mb_sz;
5024 t_scalar_t alen, aoff, olen, ooff;
5025 t_scalar_t oldolen = 0;
5026 cred_t *cr = NULL;
5027 pid_t cpid;
5028
5029 udreq = (struct T_unitdata_req *)mp->b_rptr;
5030 msz = MBLKL(mp);
5031
5032 /*
5033 * validate the state
5034 */
5035 if (tep->te_state != TS_IDLE) {
5036 (void) (STRLOG(TL_ID, tep->te_minor, 1,
5037 SL_TRACE | SL_ERROR,
5038 "tl_wput:T_CONN_REQ:out of state"));
5039 tl_merror(wq, mp, EPROTO);
5040 return;
5041 }
5042 /*
5043 * tep->te_state = NEXTSTATE(TE_UNITDATA_REQ, tep->te_state);
5044 * (state does not change on this event)
5045 */
5046
5047 /*
5048 * validate the message
5049 * Note: dereference fields in struct inside message only
5050 * after validating the message length.
5051 */
5052 if (msz < sizeof (struct T_unitdata_req)) {
5053 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
5054 "tl_unitdata:invalid message length"));
5055 tl_merror(wq, mp, EINVAL);
5056 return;
5057 }
5058 alen = udreq->DEST_length;
5059 aoff = udreq->DEST_offset;
5060 oldolen = olen = udreq->OPT_length;
5061 ooff = udreq->OPT_offset;
5062 if (olen == 0)
5063 ooff = 0;
5322
5323 tl_fill_option(ui_mp->b_rptr + udind->OPT_offset +
5324 oldolen, cr, cpid,
5325 peer_tep->te_flag, peer_tep->te_credp);
5326 } else {
5327 bcopy((void *)((uintptr_t)udreq + ooff),
5328 (void *)((uintptr_t)udind + udind->OPT_offset),
5329 olen);
5330 }
5331
5332 /*
5333 * relink data blocks from mp to ui_mp
5334 */
5335 ui_mp->b_cont = mp->b_cont;
5336 freeb(mp);
5337 mp = ui_mp;
5338 }
5339 /*
5340 * send indication message
5341 */
5342 peer_tep->te_state = NEXTSTATE(TE_UNITDATA_IND, peer_tep->te_state);
5343 putnext(peer_tep->te_rq, mp);
5344 }
5345
5346
5347
5348 /*
5349 * Check if a given addr is in use.
5350 * Endpoint ptr returned or NULL if not found.
5351 * The name space is separate for each mode. This implies that
5352 * sockets get their own name space.
5353 */
5354 static tl_endpt_t *
5355 tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap)
5356 {
5357 tl_endpt_t *peer_tep = NULL;
5358 int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap,
5359 (mod_hash_val_t *)&peer_tep, tl_find_callback);
5360
5361 ASSERT(!IS_SOCKET(tep));
5362
5644 * Note that when te_oconp is set the peer better have
5645 * a t_connind_t for the client.
5646 */
5647 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
5648 /*
5649 * Queue the disconnection message.
5650 */
5651 tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp);
5652 } else {
5653 tip = tl_icon_find(srv_tep, tep->te_seqno);
5654 if (tip == NULL) {
5655 freemsg(d_mp);
5656 } else {
5657 ASSERT(tep == tip->ti_tep);
5658 ASSERT(tep->te_ser == srv_tep->te_ser);
5659 /*
5660 * Delete tip from the server list.
5661 */
5662 if (srv_tep->te_nicon == 1) {
5663 srv_tep->te_state =
5664 NEXTSTATE(TE_DISCON_IND2,
5665 srv_tep->te_state);
5666 } else {
5667 srv_tep->te_state =
5668 NEXTSTATE(TE_DISCON_IND3,
5669 srv_tep->te_state);
5670 }
5671 ASSERT(*(uint32_t *)(d_mp->b_rptr) ==
5672 T_DISCON_IND);
5673 putnext(srv_tep->te_rq, d_mp);
5674 tl_freetip(srv_tep, tip);
5675 }
5676 TL_UNCONNECT(tep->te_oconp);
5677 srv_tep = NULL;
5678 }
5679 } else if (peer_tep != NULL) {
5680 /*
5681 * unconnect existing connection
5682 * If connected, change state of peer on
5683 * discon ind event and send discon ind pdu
5684 * to module above it
5685 */
5686
5687 ASSERT(tep->te_ser == peer_tep->te_ser);
5688 if (IS_COTSORD(peer_tep) &&
5689 (peer_tep->te_state == TS_WIND_ORDREL ||
5690 peer_tep->te_state == TS_DATA_XFER)) {
5691 /*
5692 * send ordrel ind
5693 */
5694 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
5695 "tl_co_unconnect:connected: ordrel_ind state %d->%d",
5696 peer_tep->te_state,
5697 NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state)));
5698 d_mp = tl_ordrel_ind_alloc();
5699 if (d_mp == NULL) {
5700 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5701 SL_TRACE | SL_ERROR,
5702 "tl_co_unconnect:connected:"
5703 "allocb failure"));
5704 /*
5705 * Continue with cleaning up peer as
5706 * this side may go away with the close
5707 */
5708 TL_QENABLE(peer_tep);
5709 goto discon_peer;
5710 }
5711 peer_tep->te_state =
5712 NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
5713
5714 putnext(peer_tep->te_rq, d_mp);
5715 /*
5716 * Handle flow control case. This will generate
5717 * a t_discon_ind message with reason 0 if there
5718 * is data queued on the write side.
5719 */
5720 TL_QENABLE(peer_tep);
5721 } else if (IS_COTSORD(peer_tep) &&
5722 peer_tep->te_state == TS_WREQ_ORDREL) {
5723 /*
5724 * Sent an ordrel_ind. We send a discon with
5725 * with error 0 to inform that the peer is gone.
5726 */
5727 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5728 SL_TRACE | SL_ERROR,
5729 "tl_co_unconnect: discon in state %d",
5730 tep->te_state));
5731 tl_discon_ind(peer_tep, 0);
5732 } else {
5936 mp->b_next = NULL;
5937
5938 ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
5939 switch (DB_TYPE(mp)) {
5940 default:
5941 freemsg(mp);
5942 break;
5943 case M_DATA:
5944 putnext(tep->te_rq, mp);
5945 break;
5946 case M_PROTO:
5947 primp = (union T_primitives *)mp->b_rptr;
5948 switch (primp->type) {
5949 case T_UNITDATA_IND:
5950 case T_DATA_IND:
5951 case T_OPTDATA_IND:
5952 case T_EXDATA_IND:
5953 putnext(tep->te_rq, mp);
5954 break;
5955 case T_ORDREL_IND:
5956 tep->te_state = NEXTSTATE(TE_ORDREL_IND,
5957 tep->te_state);
5958 putnext(tep->te_rq, mp);
5959 break;
5960 case T_DISCON_IND:
5961 tep->te_state = TS_IDLE;
5962 putnext(tep->te_rq, mp);
5963 break;
5964 default:
5965 #ifdef DEBUG
5966 cmn_err(CE_PANIC,
5967 "tl_icon_sendmsgs: unknown primitive");
5968 #endif /* DEBUG */
5969 freemsg(mp);
5970 break;
5971 }
5972 break;
5973 }
5974 }
5975 }
5976
5977 /*
|
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 * Copyright 2020 Joyent, Inc.
29 */
30
31 /*
32 * Multithreaded STREAMS Local Transport Provider.
33 *
34 * OVERVIEW
35 * ========
36 *
37 * This driver provides TLI as well as socket semantics. It provides
38 * connectionless, connection oriented, and connection oriented with orderly
39 * release transports for TLI and sockets. Each transport type has separate name
40 * spaces (i.e. it is not possible to connect from a socket to a TLI endpoint) -
41 * this removes any name space conflicts when binding to socket style transport
42 * addresses.
43 *
44 * NOTE: There is one exception: Socket ticots and ticotsord transports share
45 * the same namespace. In fact, sockets always use ticotsord type transport.
46 *
47 * The driver mode is specified during open() by the minor number used for
48 * open.
359 #include <sys/serializer.h>
360
361 /*
362 * TBD List
363 * 14 Eliminate state changes through table
364 * 16. AF_UNIX socket options
365 * 17. connect() for ticlts
366 * 18. support for "netstat" to show AF_UNIX plus TLI local
367 * transport connections
368 * 21. sanity check to flushing on sending M_ERROR
369 */
370
371 /*
372 * CONSTANT DECLARATIONS
373 * --------------------
374 */
375
376 /*
377 * Local declarations
378 */
379 #define BADSEQNUM (-1) /* initial seq number used by T_DISCON_IND */
380 #define TL_BUFWAIT (10000) /* usecs to wait for allocb buffer timeout */
381 #define TL_TIDUSZ (64*1024) /* tidu size when "strmsgz" is unlimited (0) */
382 /*
383 * Hash tables size.
384 */
385 #define TL_HASH_SIZE 311
386
387 /*
388 * Definitions for module_info
389 */
390 #define TL_ID (104) /* module ID number */
391 #define TL_NAME "tl" /* module name */
392 #define TL_MINPSZ (0) /* min packet size */
393 #define TL_MAXPSZ INFPSZ /* max packet size ZZZ */
394 #define TL_HIWAT (16*1024) /* hi water mark */
395 #define TL_LOWAT (256) /* lo water mark */
396 /*
397 * Definition of minor numbers/modes for new transport provider modes.
398 * We view the socket use as a separate mode to get a separate name space.
399 */
400 #define TL_TICOTS 0 /* connection oriented transport */
401 #define TL_TICOTSORD 1 /* COTS w/ orderly release */
402 #define TL_TICLTS 2 /* connectionless transport */
403 #define TL_UNUSED 3
404 #define TL_SOCKET 4 /* Socket */
405 #define TL_SOCK_COTS (TL_SOCKET | TL_TICOTS)
406 #define TL_SOCK_COTSORD (TL_SOCKET | TL_TICOTSORD)
407 #define TL_SOCK_CLTS (TL_SOCKET | TL_TICLTS)
408
409 #define TL_MINOR_MASK 0x7
410 #define TL_MINOR_START (TL_TICLTS + 1)
411
412 /*
413 * LOCAL MACROS
414 */
415 #define T_ALIGN(p) P2ROUNDUP((p), sizeof (t_scalar_t))
416
417 /*
418 * STREAMS DRIVER ENTRY POINTS PROTOTYPES
419 */
420 static int tl_open(queue_t *, dev_t *, int, int, cred_t *);
421 static int tl_close(queue_t *, int, cred_t *);
422 static int tl_wput(queue_t *, mblk_t *);
423 static int tl_wsrv(queue_t *);
424 static int tl_rsrv(queue_t *);
425
426 static int tl_attach(dev_info_t *, ddi_attach_cmd_t);
427 static int tl_detach(dev_info_t *, ddi_detach_cmd_t);
428 static int tl_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
429
430
431 /*
432 * GLOBAL DATA STRUCTURES AND VARIABLES
433 * -----------------------------------
434 */
435
436 /*
437 * Table representing database of all options managed by T_SVR4_OPTMGMT_REQ
795
796 /*
797 * Default Data Unit size.
798 */
799 static t_scalar_t tl_tidusz;
800
801 /*
802 * Size of hash tables.
803 */
804 static size_t tl_hash_size = TL_HASH_SIZE;
805
806 /*
807 * Debug and test variable ONLY. Turn off T_CONN_IND queueing
808 * for sockets.
809 */
810 static int tl_disable_early_connect = 0;
811 static int tl_client_closing_when_accepting;
812
813 static int tl_serializer_noswitch;
814
815 #define nr 127 /* not reachable */
816
817 #define TE_NOEVENTS 28
818
819 static char nextstate[TE_NOEVENTS][TS_NOSTATES] = {
820 /* STATES */
821 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
822
823 /* Initialization events */
824
825 #define TE_BIND_REQ 0 /* bind request */
826 { 1, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
827 #define TE_UNBIND_REQ 1 /* unbind request */
828 {nr, nr, nr, 2, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
829 #define TE_OPTMGMT_REQ 2 /* manage options req */
830 {nr, nr, nr, 4, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
831 #define TE_BIND_ACK 3 /* bind acknowledment */
832 {nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
833 #define TE_OPTMGMT_ACK 4 /* manage options ack */
834 {nr, nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
835 #define TE_ERROR_ACK 5 /* error acknowledgment */
836 {nr, 0, 3, nr, 3, 3, nr, nr, 7, nr, nr, nr, 6, 7, 9, 10, 11},
837 #define TE_OK_ACK1 6 /* ok ack seqcnt == 0 */
838 {nr, nr, 0, nr, nr, 6, nr, nr, nr, nr, nr, nr, 3, nr, 3, 3, 3},
839 #define TE_OK_ACK2 7 /* ok ack seqcnt == 1, q == resq */
840 {nr, nr, nr, nr, nr, nr, nr, nr, 9, nr, nr, nr, nr, 3, nr, nr, nr},
841 #define TE_OK_ACK3 8 /* ok ack seqcnt == 1, q != resq */
842 {nr, nr, nr, nr, nr, nr, nr, nr, 3, nr, nr, nr, nr, 3, nr, nr, nr},
843 #define TE_OK_ACK4 9 /* ok ack seqcnt > 1 */
844 {nr, nr, nr, nr, nr, nr, nr, nr, 7, nr, nr, nr, nr, 7, nr, nr, nr},
845
846 /* Connection oriented events */
847 #define TE_CONN_REQ 10 /* connection request */
848 {nr, nr, nr, 5, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
849 #define TE_CONN_RES 11 /* connection response */
850 {nr, nr, nr, nr, nr, nr, nr, 8, nr, nr, nr, nr, nr, nr, nr, nr, nr},
851 #define TE_DISCON_REQ 12 /* disconnect request */
852 {nr, nr, nr, nr, nr, nr, 12, 13, nr, 14, 15, 16, nr, nr, nr, nr, nr},
853 #define TE_DATA_REQ 13 /* data request */
854 {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, nr, 11, nr, nr, nr, nr, nr},
855 #define TE_EXDATA_REQ 14 /* expedited data request */
856 {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, nr, 11, nr, nr, nr, nr, nr},
857 #define TE_ORDREL_REQ 15 /* orderly release req */
858 {nr, nr, nr, nr, nr, nr, nr, nr, nr, 10, nr, 3, nr, nr, nr, nr, nr},
859 #define TE_CONN_IND 16 /* connection indication */
860 {nr, nr, nr, 7, nr, nr, nr, 7, nr, nr, nr, nr, nr, nr, nr, nr, nr},
861 #define TE_CONN_CON 17 /* connection confirmation */
862 {nr, nr, nr, nr, nr, nr, 9, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
863 #define TE_DATA_IND 18 /* data indication */
864 {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, 10, nr, nr, nr, nr, nr, nr},
865 #define TE_EXDATA_IND 19 /* expedited data indication */
866 {nr, nr, nr, nr, nr, nr, nr, nr, nr, 9, 10, nr, nr, nr, nr, nr, nr},
867 #define TE_ORDREL_IND 20 /* orderly release ind */
868 {nr, nr, nr, nr, nr, nr, nr, nr, nr, 11, 3, nr, nr, nr, nr, nr, nr},
869 #define TE_DISCON_IND1 21 /* disconnect indication seq == 0 */
870 {nr, nr, nr, nr, nr, nr, 3, nr, nr, 3, 3, 3, nr, nr, nr, nr, nr},
871 #define TE_DISCON_IND2 22 /* disconnect indication seq == 1 */
872 {nr, nr, nr, nr, nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr},
873 #define TE_DISCON_IND3 23 /* disconnect indication seq > 1 */
874 {nr, nr, nr, nr, nr, nr, nr, 7, nr, nr, nr, nr, nr, nr, nr, nr, nr},
875 #define TE_PASS_CONN 24 /* pass connection */
876 {nr, nr, nr, 9, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
877
878
879 /* Unit data events */
880
881 #define TE_UNITDATA_REQ 25 /* unitdata request */
882 {nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
883 #define TE_UNITDATA_IND 26 /* unitdata indication */
884 {nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
885 #define TE_UDERROR_IND 27 /* unitdata error indication */
886 {nr, nr, nr, 3, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr, nr},
887 };
888
889
890
891 /*
892 * LOCAL FUNCTION PROTOTYPES
893 * -------------------------
894 */
895 static boolean_t tl_eqaddr(tl_addr_t *, tl_addr_t *);
896 static void tl_do_proto(mblk_t *, tl_endpt_t *);
897 static void tl_do_ioctl(mblk_t *, tl_endpt_t *);
898 static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *);
899 static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t,
900 t_scalar_t);
901 static void tl_bind(mblk_t *, tl_endpt_t *);
902 static void tl_bind_ser(mblk_t *, tl_endpt_t *);
903 static void tl_ok_ack(queue_t *, mblk_t *mp, t_scalar_t);
904 static void tl_unbind(mblk_t *, tl_endpt_t *);
905 static void tl_optmgmt(queue_t *, mblk_t *);
906 static void tl_conn_req(queue_t *, mblk_t *);
907 static void tl_conn_req_ser(mblk_t *, tl_endpt_t *);
908 static void tl_conn_res(mblk_t *, tl_endpt_t *);
909 static void tl_discon_req(mblk_t *, tl_endpt_t *);
910 static void tl_capability_req(mblk_t *, tl_endpt_t *);
2003 static void
2004 tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep)
2005 {
2006 tl_endpt_t *peer_tep = tep->te_conp;
2007 queue_t *peer_rq;
2008
2009 ASSERT(DB_TYPE(mp) == M_DATA);
2010 ASSERT(IS_COTS(tep));
2011
2012 IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer);
2013
2014 /*
2015 * fastpath for data. Ignore flow control if tep is closing.
2016 */
2017 if ((peer_tep != NULL) &&
2018 !peer_tep->te_closing &&
2019 ((tep->te_state == TS_DATA_XFER) ||
2020 (tep->te_state == TS_WREQ_ORDREL)) &&
2021 (tep->te_wq != NULL) &&
2022 (tep->te_wq->q_first == NULL) &&
2023 (peer_tep->te_state == TS_DATA_XFER ||
2024 peer_tep->te_state == TS_WIND_ORDREL ||
2025 peer_tep->te_state == TS_WREQ_ORDREL) &&
2026 ((peer_rq = peer_tep->te_rq) != NULL) &&
2027 (canputnext(peer_rq) || tep->te_closing)) {
2028 putnext(peer_rq, mp);
2029 } else if (tep->te_closing) {
2030 /*
2031 * It is possible that by the time we got here tep started to
2032 * close. If the write queue is not empty, and the state is
2033 * TS_DATA_XFER the data should be delivered in order, so we
2034 * call putq() instead of freeing the data.
2035 */
2036 if ((tep->te_wq != NULL) &&
2037 ((tep->te_state == TS_DATA_XFER) ||
2038 (tep->te_state == TS_WREQ_ORDREL))) {
2039 TL_PUTQ(tep, mp);
2040 } else {
2041 freemsg(mp);
2042 }
2043 } else {
2044 TL_PUTQ(tep, mp);
2045 }
2431 ssize_t msz = MBLKL(mp), basize;
2432 t_scalar_t tli_err = 0, unix_err = 0;
2433 t_scalar_t save_prim_type = bind->PRIM_type;
2434 t_scalar_t save_state = tep->te_state;
2435
2436 if (tep->te_state != TS_UNBND) {
2437 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2438 SL_TRACE | SL_ERROR,
2439 "tl_wput:bind_request:out of state, state=%d",
2440 tep->te_state));
2441 tli_err = TOUTSTATE;
2442 goto error;
2443 }
2444
2445 if (msz < sizeof (struct T_bind_req)) {
2446 tli_err = TSYSERR;
2447 unix_err = EINVAL;
2448 goto error;
2449 }
2450
2451 tep->te_state = nextstate[TE_BIND_REQ][tep->te_state];
2452
2453 ASSERT((bind->PRIM_type == O_T_BIND_REQ) ||
2454 (bind->PRIM_type == T_BIND_REQ));
2455
2456 alen = bind->ADDR_length;
2457 aoff = bind->ADDR_offset;
2458
2459 /* negotiate max conn req pending */
2460 if (IS_COTS(tep)) {
2461 qlen = bind->CONIND_number;
2462 if (qlen > tl_maxqlen)
2463 qlen = tl_maxqlen;
2464 }
2465
2466 /*
2467 * Reserve hash handle. It can only be NULL if the endpoint is unbound
2468 * and bound again.
2469 */
2470 if ((tep->te_hash_hndl == NULL) &&
2471 ((tep->te_flag & TL_ADDRHASHED) == 0) &&
2472 mod_hash_reserve_nosleep(tep->te_addrhash,
2473 &tep->te_hash_hndl) != 0) {
2474 tli_err = TSYSERR;
2475 unix_err = ENOSR;
2476 goto error;
2477 }
2478
2479 /*
2480 * Verify address correctness.
2481 */
2482 if (IS_SOCKET(tep)) {
2483 ASSERT(bind->PRIM_type == O_T_BIND_REQ);
2484
2485 if ((alen != TL_SOUX_ADDRLEN) ||
2486 (aoff < 0) ||
2487 (aoff + alen > msz)) {
2488 (void) (STRLOG(TL_ID, tep->te_minor,
2489 1, SL_TRACE | SL_ERROR,
2490 "tl_bind: invalid socket addr"));
2491 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2492 tli_err = TSYSERR;
2493 unix_err = EINVAL;
2494 goto error;
2495 }
2496 /* Copy address from message to local buffer. */
2497 bcopy(mp->b_rptr + aoff, &ux_addr, sizeof (ux_addr));
2498 /*
2499 * Check that we got correct address from sockets
2500 */
2501 if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) &&
2502 (ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) {
2503 (void) (STRLOG(TL_ID, tep->te_minor,
2504 1, SL_TRACE | SL_ERROR,
2505 "tl_bind: invalid socket magic"));
2506 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2507 tli_err = TSYSERR;
2508 unix_err = EINVAL;
2509 goto error;
2510 }
2511 if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) &&
2512 (ux_addr.soua_vp != NULL)) {
2513 (void) (STRLOG(TL_ID, tep->te_minor,
2514 1, SL_TRACE | SL_ERROR,
2515 "tl_bind: implicit addr non-empty"));
2516 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2517 tli_err = TSYSERR;
2518 unix_err = EINVAL;
2519 goto error;
2520 }
2521 if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) &&
2522 (ux_addr.soua_vp == NULL)) {
2523 (void) (STRLOG(TL_ID, tep->te_minor,
2524 1, SL_TRACE | SL_ERROR,
2525 "tl_bind: explicit addr empty"));
2526 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2527 tli_err = TSYSERR;
2528 unix_err = EINVAL;
2529 goto error;
2530 }
2531 } else {
2532 if ((alen > 0) && ((aoff < 0) ||
2533 ((ssize_t)(aoff + alen) > msz) ||
2534 ((aoff + alen) < 0))) {
2535 (void) (STRLOG(TL_ID, tep->te_minor,
2536 1, SL_TRACE | SL_ERROR,
2537 "tl_bind: invalid message"));
2538 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2539 tli_err = TSYSERR;
2540 unix_err = EINVAL;
2541 goto error;
2542 }
2543 if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) {
2544 (void) (STRLOG(TL_ID, tep->te_minor,
2545 1, SL_TRACE | SL_ERROR,
2546 "tl_bind: bad addr in message"));
2547 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2548 tli_err = TBADADDR;
2549 goto error;
2550 }
2551 #ifdef DEBUG
2552 /*
2553 * Mild form of ASSERT()ion to detect broken TPI apps.
2554 * if (!assertion)
2555 * log warning;
2556 */
2557 if (!((alen == 0 && aoff == 0) ||
2558 (aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) {
2559 (void) (STRLOG(TL_ID, tep->te_minor,
2560 3, SL_TRACE | SL_ERROR,
2561 "tl_bind: addr overlaps TPI message"));
2562 }
2563 #endif
2564 }
2565
2566 /*
2567 * Bind the address provided or allocate one if requested.
2693 tl_memrecover(wq, mp, basize);
2694 return;
2695 }
2696
2697 DB_TYPE(bamp) = M_PCPROTO;
2698 bamp->b_wptr = bamp->b_rptr + basize;
2699 b_ack = (struct T_bind_ack *)bamp->b_rptr;
2700 b_ack->PRIM_type = T_BIND_ACK;
2701 b_ack->CONIND_number = qlen;
2702 b_ack->ADDR_length = tep->te_alen;
2703 b_ack->ADDR_offset = (t_scalar_t)sizeof (struct T_bind_ack);
2704 addr_startp = bamp->b_rptr + b_ack->ADDR_offset;
2705 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
2706
2707 if (IS_COTS(tep)) {
2708 tep->te_qlen = qlen;
2709 if (qlen > 0)
2710 tep->te_flag |= TL_LISTENER;
2711 }
2712
2713 tep->te_state = nextstate[TE_BIND_ACK][tep->te_state];
2714 /*
2715 * send T_BIND_ACK message
2716 */
2717 (void) qreply(wq, bamp);
2718 return;
2719
2720 error:
2721 ackmp = reallocb(mp, sizeof (struct T_error_ack), 0);
2722 if (ackmp == NULL) {
2723 /*
2724 * roll back state changes
2725 */
2726 tep->te_state = save_state;
2727 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2728 return;
2729 }
2730 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
2731 tl_error_ack(wq, ackmp, tli_err, unix_err, save_prim_type);
2732 }
2733
2734 /*
2735 * Process T_UNBIND_REQ.
2736 * Called from serializer.
2737 */
2738 static void
2739 tl_unbind(mblk_t *mp, tl_endpt_t *tep)
2740 {
2741 queue_t *wq;
2742 mblk_t *ackmp;
2743
2744 if (tep->te_closing) {
2745 freemsg(mp);
2746 return;
2747 }
2748
2749 wq = tep->te_wq;
2750
2756 tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2757 return;
2758 }
2759 /*
2760 * memory resources committed
2761 * Note: no message validation. T_UNBIND_REQ message is
2762 * same size as PRIM_type field so already verified earlier.
2763 */
2764
2765 /*
2766 * validate state
2767 */
2768 if (tep->te_state != TS_IDLE) {
2769 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2770 SL_TRACE | SL_ERROR,
2771 "tl_wput:T_UNBIND_REQ:out of state, state=%d",
2772 tep->te_state));
2773 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ);
2774 return;
2775 }
2776 tep->te_state = nextstate[TE_UNBIND_REQ][tep->te_state];
2777
2778 /*
2779 * TPI says on T_UNBIND_REQ:
2780 * send up a M_FLUSH to flush both
2781 * read and write queues
2782 */
2783 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
2784
2785 if (!IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
2786 tep->te_magic != SOU_MAGIC_EXPLICIT) {
2787
2788 /*
2789 * Sockets use bind with qlen==0 followed by bind() to
2790 * the same address with qlen > 0 for listeners.
2791 * We allow rebind with a new qlen value.
2792 */
2793 tl_addr_unbind(tep);
2794 }
2795
2796 tep->te_state = nextstate[TE_OK_ACK1][tep->te_state];
2797 /*
2798 * send T_OK_ACK
2799 */
2800 tl_ok_ack(wq, ackmp, T_UNBIND_REQ);
2801 }
2802
2803
2804 /*
2805 * Option management code from drv/ip is used here
2806 * Note: TL_PROT_LEVEL/TL_IOC_CREDOPT option is not part of tl_opt_arr
2807 * database of options. So optcom_req() will fail T_SVR4_OPTMGMT_REQ.
2808 * However, that is what we want as that option is 'unorthodox'
2809 * and only valid in T_CONN_IND, T_CONN_CON and T_UNITDATA_IND
2810 * and not in T_SVR4_OPTMGMT_REQ/ACK
2811 * Note2: use of optcom_req means this routine is an exception to
2812 * recovery from allocb() failures.
2813 */
2814
2815 static void
2816 tl_optmgmt(queue_t *wq, mblk_t *mp)
3014 SL_TRACE | SL_ERROR,
3015 "tl_conn_req:options not supported "
3016 "in message"));
3017 tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ);
3018 freemsg(mp);
3019 return;
3020 }
3021 }
3022
3023 /*
3024 * Prevent tep from closing on us.
3025 */
3026 if (!tl_noclose(tep)) {
3027 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3028 "tl_conn_req:endpoint is closing"));
3029 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
3030 freemsg(mp);
3031 return;
3032 }
3033
3034 tep->te_state = nextstate[TE_CONN_REQ][tep->te_state];
3035 /*
3036 * get endpoint to connect to
3037 * check that peer with DEST addr is bound to addr
3038 * and has CONIND_number > 0
3039 */
3040 dst.ta_alen = alen;
3041 dst.ta_abuf = mp->b_rptr + aoff;
3042 dst.ta_zoneid = tep->te_zoneid;
3043
3044 /*
3045 * Verify if remote addr is in use
3046 */
3047 peer_tep = (IS_SOCKET(tep) ?
3048 tl_sock_find_peer(tep, &ux_addr) :
3049 tl_find_peer(tep, &dst));
3050
3051 if (peer_tep == NULL) {
3052 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3053 "tl_conn_req:no one at connect address"));
3054 err = ECONNREFUSED;
3055 } else if (peer_tep->te_nicon >= peer_tep->te_qlen) {
3056 /*
3057 * validate that number of incoming connection is
3058 * not to capacity on destination endpoint
3059 */
3060 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
3061 "tl_conn_req: qlen overflow connection refused"));
3062 err = ECONNREFUSED;
3063 }
3064
3065 /*
3066 * Send T_DISCON_IND in case of error
3067 */
3068 if (err != 0) {
3069 if (peer_tep != NULL)
3070 tl_refrele(peer_tep);
3071 /* We are still expected to send T_OK_ACK */
3072 tep->te_state = nextstate[TE_OK_ACK1][tep->te_state];
3073 tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ);
3074 tl_closeok(tep);
3075 dimp = tpi_ack_alloc(mp, sizeof (struct T_discon_ind),
3076 M_PROTO, T_DISCON_IND);
3077 if (dimp == NULL) {
3078 tl_merror(wq, NULL, ENOSR);
3079 return;
3080 }
3081 di = (struct T_discon_ind *)dimp->b_rptr;
3082 di->DISCON_reason = err;
3083 di->SEQ_number = BADSEQNUM;
3084
3085 tep->te_state = TS_IDLE;
3086 /*
3087 * send T_DISCON_IND message
3088 */
3089 putnext(tep->te_rq, dimp);
3090 return;
3091 }
3092
3273 tl_memrecover(wq, indmp, sizeof (*tip));
3274 freemsg(ackmp);
3275 if (opts != NULL)
3276 kmem_free(opts, olen);
3277 freemsg(confmp);
3278 TL_UNCONNECT(tep->te_oconp);
3279 tl_serializer_exit(tep);
3280 tl_refrele(tep);
3281 return;
3282 }
3283 tip->ti_mp = NULL;
3284
3285 /*
3286 * memory is now committed for T_DISCON_IND/T_CONN_IND/T_CONN_CON
3287 * and tl_icon_t cell.
3288 */
3289
3290 /*
3291 * ack validity of request and send the peer credential in the ACK.
3292 */
3293 tep->te_state = nextstate[TE_OK_ACK1][tep->te_state];
3294
3295 if (peer_tep != NULL && peer_tep->te_credp != NULL &&
3296 confmp != NULL) {
3297 mblk_setcred(confmp, peer_tep->te_credp, peer_tep->te_cpid);
3298 }
3299
3300 tl_ok_ack(wq, ackmp, T_CONN_REQ);
3301
3302 /*
3303 * prepare message to send T_CONN_IND
3304 */
3305 /*
3306 * allocate the message - original data blocks retained
3307 * in the returned mblk
3308 */
3309 cimp = tl_resizemp(indmp, size);
3310 if (cimp == NULL) {
3311 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
3312 "tl_conn_req:con_ind:allocb failure"));
3313 tl_merror(wq, indmp, ENOMEM);
3347 ASSERT(opts != NULL);
3348 bcopy(opts, (void *)((uintptr_t)ci + ci->OPT_offset), olen);
3349 } else {
3350 ci->OPT_offset = 0;
3351 ci->OPT_length = 0;
3352 }
3353 if (opts != NULL)
3354 kmem_free(opts, olen);
3355
3356 /*
3357 * register connection request with server peer
3358 * append to list of incoming connections
3359 * increment references for both peer_tep and tep: peer_tep is placed on
3360 * te_oconp and tep is placed on listeners queue.
3361 */
3362 tip->ti_tep = tep;
3363 tip->ti_seqno = tep->te_seqno;
3364 list_insert_tail(&peer_tep->te_iconp, tip);
3365 peer_tep->te_nicon++;
3366
3367 peer_tep->te_state = nextstate[TE_CONN_IND][peer_tep->te_state];
3368 /*
3369 * send the T_CONN_IND message
3370 */
3371 putnext(peer_tep->te_rq, cimp);
3372
3373 /*
3374 * Send a T_CONN_CON message for sockets.
3375 * Disable the queues until we have reached the correct state!
3376 */
3377 if (confmp != NULL) {
3378 tep->te_state = nextstate[TE_CONN_CON][tep->te_state];
3379 noenable(wq);
3380 putnext(tep->te_rq, confmp);
3381 }
3382 /*
3383 * Now we need to increment tep reference because tep is referenced by
3384 * server list of pending connections. We also need to decrement
3385 * reference before exiting serializer. Two operations void each other
3386 * so we don't modify reference at all.
3387 */
3388 ASSERT(tep->te_refcnt >= 2);
3389 ASSERT(peer_tep->te_refcnt >= 2);
3390 tl_serializer_exit(tep);
3391 }
3392
3393
3394
3395 /*
3396 * Handle T_conn_res on listener stream. Called on listener serializer.
3397 * tl_conn_req has already generated the T_CONN_CON.
3398 * tl_conn_res is called on listener serializer.
3478 ooff = cres->OPT_offset;
3479 if (((olen > 0) && ((ooff + olen) > msz))) {
3480 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3481 "tl_conn_res:invalid message"));
3482 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
3483 freemsg(mp);
3484 return;
3485 }
3486 if (olen) {
3487 /*
3488 * no opts in connect res
3489 * supported in this provider
3490 */
3491 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
3492 "tl_conn_res:options not supported in message"));
3493 tl_error_ack(wq, ackmp, TBADOPT, 0, prim);
3494 freemsg(mp);
3495 return;
3496 }
3497
3498 tep->te_state = nextstate[TE_CONN_RES][tep->te_state];
3499 ASSERT(tep->te_state == TS_WACK_CRES);
3500
3501 if (cres->SEQ_number < TL_MINOR_START &&
3502 cres->SEQ_number >= BADSEQNUM) {
3503 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3504 "tl_conn_res:remote endpoint sequence number bad"));
3505 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3506 tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3507 freemsg(mp);
3508 return;
3509 }
3510
3511 /*
3512 * find accepting endpoint. Will have extra reference if found.
3513 */
3514 if (mod_hash_find_cb(tep->te_transport->tr_ai_hash,
3515 (mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id,
3516 (mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) {
3517 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3518 "tl_conn_res:bad accepting endpoint"));
3519 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3520 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3521 freemsg(mp);
3522 return;
3523 }
3524
3525 /*
3526 * Prevent acceptor from closing.
3527 */
3528 if (!tl_noclose(acc_ep)) {
3529 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3530 "tl_conn_res:bad accepting endpoint"));
3531 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3532 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3533 tl_refrele(acc_ep);
3534 freemsg(mp);
3535 return;
3536 }
3537
3538 acc_ep->te_flag |= TL_ACCEPTOR;
3539
3540 /*
3541 * validate that accepting endpoint, if different from listening
3542 * has address bound => state is TS_IDLE
3543 * TROUBLE in XPG4 !!?
3544 */
3545 if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) {
3546 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3547 "tl_conn_res:accepting endpoint has no address bound,"
3548 "state=%d", acc_ep->te_state));
3549 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3550 tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
3551 freemsg(mp);
3552 tl_closeok(acc_ep);
3553 tl_refrele(acc_ep);
3554 return;
3555 }
3556
3557 /*
3558 * validate if accepting endpt same as listening, then
3559 * no other incoming connection should be on the queue
3560 */
3561
3562 if ((tep == acc_ep) && (tep->te_nicon > 1)) {
3563 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
3564 "tl_conn_res: > 1 conn_ind on listener-acceptor"));
3565 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3566 tl_error_ack(wq, ackmp, TBADF, 0, prim);
3567 freemsg(mp);
3568 tl_closeok(acc_ep);
3569 tl_refrele(acc_ep);
3570 return;
3571 }
3572
3573 /*
3574 * Mark for deletion, the entry corresponding to client
3575 * on list of pending connections made by the listener
3576 * search list to see if client is one of the
3577 * recorded as a listener.
3578 */
3579 tip = tl_icon_find(tep, cres->SEQ_number);
3580 if (tip == NULL) {
3581 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3582 "tl_conn_res:no client in listener list"));
3583 tep->te_state = nextstate[TE_ERROR_ACK][tep->te_state];
3584 tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3585 freemsg(mp);
3586 tl_closeok(acc_ep);
3587 tl_refrele(acc_ep);
3588 return;
3589 }
3590
3591 /*
3592 * If ti_tep is NULL the client has already closed. In this case
3593 * the code below will avoid any action on the client side
3594 * but complete the server and acceptor state transitions.
3595 */
3596 ASSERT(tip->ti_tep == NULL ||
3597 tip->ti_tep->te_seqno == cres->SEQ_number);
3598 cl_ep = tip->ti_tep;
3599
3600 /*
3601 * If the client is present it is switched from listener's to acceptor's
3602 * serializer. We should block client closes while serializers are
3603 * being switched.
3681 /*
3682 * roll back state changes
3683 */
3684 tep->te_state = TS_WRES_CIND;
3685 tl_memrecover(wq, mp, size);
3686 freemsg(ackmp);
3687 if (client_noclose_set)
3688 tl_closeok(cl_ep);
3689 tl_closeok(acc_ep);
3690 tl_refrele(acc_ep);
3691 return;
3692 }
3693 mp = NULL;
3694 }
3695
3696 /*
3697 * Now ack validity of request
3698 */
3699 if (tep->te_nicon == 1) {
3700 if (tep == acc_ep)
3701 tep->te_state = nextstate[TE_OK_ACK2][tep->te_state];
3702 else
3703 tep->te_state = nextstate[TE_OK_ACK3][tep->te_state];
3704 } else {
3705 tep->te_state = nextstate[TE_OK_ACK4][tep->te_state];
3706 }
3707
3708 /*
3709 * send T_DISCON_IND now if client state validation failed earlier
3710 */
3711 if (err) {
3712 tl_ok_ack(wq, ackmp, prim);
3713 /*
3714 * flush the queues - why always ?
3715 */
3716 (void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR);
3717
3718 dimp = tl_resizemp(respmp, size);
3719 if (dimp == NULL) {
3720 (void) (STRLOG(TL_ID, tep->te_minor, 3,
3721 SL_TRACE | SL_ERROR,
3722 "tl_conn_res:con_ind:allocb failure"));
3723 tl_merror(wq, respmp, ENOMEM);
3724 tl_closeok(acc_ep);
3725 if (client_noclose_set)
3738 di->PRIM_type = T_DISCON_IND;
3739 di->DISCON_reason = err;
3740 di->SEQ_number = BADSEQNUM;
3741
3742 tep->te_state = TS_IDLE;
3743 /*
3744 * send T_DISCON_IND message
3745 */
3746 putnext(acc_ep->te_rq, dimp);
3747 if (client_noclose_set)
3748 tl_closeok(cl_ep);
3749 tl_closeok(acc_ep);
3750 tl_refrele(acc_ep);
3751 return;
3752 }
3753
3754 /*
3755 * now start connecting the accepting endpoint
3756 */
3757 if (tep != acc_ep)
3758 acc_ep->te_state = nextstate[TE_PASS_CONN][acc_ep->te_state];
3759
3760 if (cl_ep == NULL) {
3761 /*
3762 * The client has already closed. Send up any queued messages
3763 * and change the state accordingly.
3764 */
3765 tl_ok_ack(wq, ackmp, prim);
3766 tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
3767
3768 /*
3769 * remove endpoint from incoming connection
3770 * delete client from list of incoming connections
3771 */
3772 tl_freetip(tep, tip);
3773 freemsg(mp);
3774 tl_closeok(acc_ep);
3775 tl_refrele(acc_ep);
3776 return;
3777 } else if (tip->ti_mp != NULL) {
3778 /*
3899 */
3900 if (!IS_SOCKET(tep)) {
3901 acc_ep->te_wq->q_next = cl_ep->te_rq;
3902 cl_ep->te_wq->q_next = acc_ep->te_rq;
3903 }
3904
3905 /*
3906 * send T_CONN_CON up on client side unless it was already
3907 * done (for a socket). In cases any data or ordrel req has been
3908 * queued make sure that the service procedure runs.
3909 */
3910 if (IS_SOCKET(cl_ep) && !tl_disable_early_connect) {
3911 enableok(cl_ep->te_wq);
3912 TL_QENABLE(cl_ep);
3913 if (ccmp != NULL)
3914 freemsg(ccmp);
3915 } else {
3916 /*
3917 * change client state on TE_CONN_CON event
3918 */
3919 cl_ep->te_state = nextstate[TE_CONN_CON][cl_ep->te_state];
3920 putnext(cl_ep->te_rq, ccmp);
3921 }
3922
3923 /* Mark the both endpoints as accepted */
3924 cl_ep->te_flag |= TL_ACCEPTED;
3925 acc_ep->te_flag |= TL_ACCEPTED;
3926
3927 /*
3928 * Allow client and acceptor to close.
3929 */
3930 tl_closeok(acc_ep);
3931 if (client_noclose_set)
3932 tl_closeok(cl_ep);
3933 }
3934
3935
3936
3937
3938 static void
3939 tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
3985 msz = MBLKL(mp);
3986
3987 /*
3988 * validate the state
3989 */
3990 save_state = new_state = tep->te_state;
3991 if (!(save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) &&
3992 !(save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) {
3993 (void) (STRLOG(TL_ID, tep->te_minor, 1,
3994 SL_TRACE | SL_ERROR,
3995 "tl_wput:T_DISCON_REQ:out of state, state=%d",
3996 tep->te_state));
3997 tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ);
3998 freemsg(mp);
3999 return;
4000 }
4001 /*
4002 * Defer committing the state change until it is determined if
4003 * the message will be queued with the tl_icon or not.
4004 */
4005 new_state = nextstate[TE_DISCON_REQ][tep->te_state];
4006
4007 /* validate the message */
4008 if (msz < sizeof (struct T_discon_req)) {
4009 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4010 "tl_discon_req:invalid message"));
4011 tep->te_state = nextstate[TE_ERROR_ACK][new_state];
4012 tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ);
4013 freemsg(mp);
4014 return;
4015 }
4016
4017 /*
4018 * if server, then validate that client exists
4019 * by connection sequence number etc.
4020 */
4021 if (tep->te_nicon > 0) { /* server */
4022
4023 /*
4024 * search server list for disconnect client
4025 */
4026 tip = tl_icon_find(tep, dr->SEQ_number);
4027 if (tip == NULL) {
4028 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4029 SL_TRACE | SL_ERROR,
4030 "tl_discon_req:no disconnect endpoint"));
4031 tep->te_state = nextstate[TE_ERROR_ACK][new_state];
4032 tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ);
4033 freemsg(mp);
4034 return;
4035 }
4036 /*
4037 * If ti_tep is NULL the client has already closed. In this case
4038 * the code below will avoid any action on the client side.
4039 */
4040
4041 IMPLY(tip->ti_tep != NULL,
4042 tip->ti_tep->te_seqno == dr->SEQ_number);
4043 peer_tep = tip->ti_tep;
4044 }
4045
4046 /*
4047 * preallocate now for T_DISCON_IND
4048 * ack validity of request (T_OK_ACK) after memory committed
4049 */
4050 size = sizeof (struct T_discon_ind);
4051 if ((respmp = reallocb(mp, size, 0)) == NULL) {
4052 tl_memrecover(wq, mp, size);
4053 freemsg(ackmp);
4054 return;
4055 }
4056
4057 /*
4058 * prepare message to ack validity of request
4059 */
4060 if (tep->te_nicon == 0) {
4061 new_state = nextstate[TE_OK_ACK1][new_state];
4062 } else {
4063 if (tep->te_nicon == 1)
4064 new_state = nextstate[TE_OK_ACK2][new_state];
4065 else
4066 new_state = nextstate[TE_OK_ACK4][new_state];
4067 }
4068
4069 /*
4070 * Flushing queues according to TPI. Using the old state.
4071 */
4072 if ((tep->te_nicon <= 1) &&
4073 ((save_state == TS_DATA_XFER) ||
4074 (save_state == TS_WIND_ORDREL) ||
4075 (save_state == TS_WREQ_ORDREL)))
4076 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
4077
4078 /* send T_OK_ACK up */
4079 tl_ok_ack(wq, ackmp, T_DISCON_REQ);
4080
4081 /*
4082 * now do disconnect business
4083 */
4084 if (tep->te_nicon > 0) { /* listener */
4085 if (peer_tep != NULL && !peer_tep->te_closing) {
4086 /*
4146 * messages.
4147 */
4148 tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp);
4149 peer_tep = NULL;
4150 dimp = NULL;
4151 /*
4152 * Can't clear te_oconp since tl_co_unconnect needs
4153 * it as a hint not to free the tep.
4154 * Keep the state unchanged since tl_conn_res inspects
4155 * it.
4156 */
4157 new_state = tep->te_state;
4158 } else {
4159 /* Found - delete it */
4160 tip = tl_icon_find(peer_tep, tep->te_seqno);
4161 if (tip != NULL) {
4162 ASSERT(tep == tip->ti_tep);
4163 save_state = peer_tep->te_state;
4164 if (peer_tep->te_nicon == 1)
4165 peer_tep->te_state =
4166 nextstate[TE_DISCON_IND2]
4167 [peer_tep->te_state];
4168 else
4169 peer_tep->te_state =
4170 nextstate[TE_DISCON_IND3]
4171 [peer_tep->te_state];
4172 tl_freetip(peer_tep, tip);
4173 }
4174 ASSERT(tep->te_oconp != NULL);
4175 TL_UNCONNECT(tep->te_oconp);
4176 }
4177 } else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */
4178 if ((dimp = tl_resizemp(respmp, size)) == NULL) {
4179 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4180 SL_TRACE | SL_ERROR,
4181 "tl_discon_req: reallocb failed"));
4182 tep->te_state = new_state;
4183 tl_merror(wq, respmp, ENOMEM);
4184 return;
4185 }
4186 di = (struct T_discon_ind *)dimp->b_rptr;
4187 di->SEQ_number = BADSEQNUM;
4188
4189 save_state = peer_tep->te_state;
4190 peer_tep->te_state = TS_IDLE;
4191 } else {
4614 * with reason 0 to cause an EPIPE but no
4615 * read side error on AF_UNIX sockets.
4616 */
4617 freemsg(mp);
4618 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4619 SL_TRACE | SL_ERROR,
4620 "tl_data: WREQ_ORDREL and no peer"));
4621 tl_discon_ind(tep, 0);
4622 return;
4623 }
4624 break;
4625
4626 default:
4627 /* invalid state for event TE_DATA_REQ */
4628 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4629 "tl_data:cots:out of state"));
4630 tl_merror(wq, mp, EPROTO);
4631 return;
4632 }
4633 /*
4634 * tep->te_state = nextstate[TE_DATA_REQ][tep->te_state];
4635 * (State stays same on this event)
4636 */
4637
4638 /*
4639 * get connected endpoint
4640 */
4641 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4642 freemsg(mp);
4643 /* Peer closed */
4644 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4645 "tl_data: peer gone"));
4646 return;
4647 }
4648
4649 ASSERT(tep->te_serializer == peer_tep->te_serializer);
4650 peer_rq = peer_tep->te_rq;
4651
4652 /*
4653 * Put it back if flow controlled
4654 * Note: Messages already on queue when we are closing is bounded
4664 */
4665 switch (peer_tep->te_state) {
4666 case TS_DATA_XFER:
4667 case TS_WIND_ORDREL:
4668 /* valid states */
4669 break;
4670 default:
4671 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4672 "tl_data:rx side:invalid state"));
4673 tl_merror(peer_tep->te_wq, mp, EPROTO);
4674 return;
4675 }
4676 if (DB_TYPE(mp) == M_PROTO) {
4677 /* reuse message block - just change REQ to IND */
4678 if (prim->type == T_DATA_REQ)
4679 prim->type = T_DATA_IND;
4680 else
4681 prim->type = T_OPTDATA_IND;
4682 }
4683 /*
4684 * peer_tep->te_state = nextstate[TE_DATA_IND][peer_tep->te_state];
4685 * (peer state stays same on this event)
4686 */
4687 /*
4688 * send data to connected peer
4689 */
4690 putnext(peer_rq, mp);
4691 }
4692
4693
4694
4695 static void
4696 tl_exdata(mblk_t *mp, tl_endpt_t *tep)
4697 {
4698 queue_t *wq = tep->te_wq;
4699 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
4700 ssize_t msz = MBLKL(mp);
4701 tl_endpt_t *peer_tep;
4702 queue_t *peer_rq;
4703 boolean_t closing = tep->te_closing;
4704
4781 * read side error on AF_UNIX sockets.
4782 */
4783 freemsg(mp);
4784 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4785 SL_TRACE | SL_ERROR,
4786 "tl_exdata: WREQ_ORDREL and no peer"));
4787 tl_discon_ind(tep, 0);
4788 return;
4789 }
4790 break;
4791
4792 default:
4793 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4794 SL_TRACE | SL_ERROR,
4795 "tl_wput:T_EXDATA_REQ:out of state, state=%d",
4796 tep->te_state));
4797 tl_merror(wq, mp, EPROTO);
4798 return;
4799 }
4800 /*
4801 * tep->te_state = nextstate[TE_EXDATA_REQ][tep->te_state];
4802 * (state stays same on this event)
4803 */
4804
4805 /*
4806 * get connected endpoint
4807 */
4808 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4809 freemsg(mp);
4810 /* Peer closed */
4811 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4812 "tl_exdata: peer gone"));
4813 return;
4814 }
4815
4816 peer_rq = peer_tep->te_rq;
4817
4818 /*
4819 * Put it back if flow controlled
4820 * Note: Messages already on queue when we are closing is bounded
4821 * so we can ignore flow control.
4823 if (!canputnext(peer_rq) && !closing) {
4824 TL_PUTBQ(tep, mp);
4825 return;
4826 }
4827
4828 /*
4829 * validate state on peer
4830 */
4831 switch (peer_tep->te_state) {
4832 case TS_DATA_XFER:
4833 case TS_WIND_ORDREL:
4834 /* valid states */
4835 break;
4836 default:
4837 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4838 "tl_exdata:rx side:invalid state"));
4839 tl_merror(peer_tep->te_wq, mp, EPROTO);
4840 return;
4841 }
4842 /*
4843 * peer_tep->te_state = nextstate[TE_DATA_IND][peer_tep->te_state];
4844 * (peer state stays same on this event)
4845 */
4846 /*
4847 * reuse message block
4848 */
4849 prim->type = T_EXDATA_IND;
4850
4851 /*
4852 * send data to connected peer
4853 */
4854 putnext(peer_rq, mp);
4855 }
4856
4857
4858
4859 static void
4860 tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
4861 {
4862 queue_t *wq = tep->te_wq;
4863 union T_primitives *prim = (union T_primitives *)mp->b_rptr;
4907 return;
4908 }
4909 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4910 "tl_ordlrel: closing socket ocon"));
4911 prim->type = T_ORDREL_IND;
4912 (void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4913 return;
4914
4915 default:
4916 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4917 SL_TRACE | SL_ERROR,
4918 "tl_wput:T_ORDREL_REQ:out of state, state=%d",
4919 tep->te_state));
4920 if (!closing) {
4921 tl_merror(wq, mp, EPROTO);
4922 } else {
4923 freemsg(mp);
4924 }
4925 return;
4926 }
4927 tep->te_state = nextstate[TE_ORDREL_REQ][tep->te_state];
4928
4929 /*
4930 * get connected endpoint
4931 */
4932 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4933 /* Peer closed */
4934 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4935 "tl_ordrel: peer gone"));
4936 freemsg(mp);
4937 return;
4938 }
4939
4940 peer_rq = peer_tep->te_rq;
4941
4942 /*
4943 * Put it back if flow controlled except when we are closing.
4944 * Note: Messages already on queue when we are closing is bounded
4945 * so we can ignore flow control.
4946 */
4947 if (!canputnext(peer_rq) && !closing) {
4948 TL_PUTBQ(tep, mp);
4949 return;
4950 }
4951
4952 /*
4953 * validate state on peer
4954 */
4955 switch (peer_tep->te_state) {
4956 case TS_DATA_XFER:
4957 case TS_WIND_ORDREL:
4958 /* valid states */
4959 break;
4960 default:
4961 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
4962 "tl_ordrel:rx side:invalid state"));
4963 tl_merror(peer_tep->te_wq, mp, EPROTO);
4964 return;
4965 }
4966 peer_tep->te_state = nextstate[TE_ORDREL_IND][peer_tep->te_state];
4967
4968 /*
4969 * reuse message block
4970 */
4971 prim->type = T_ORDREL_IND;
4972 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4973 "tl_ordrel: send ordrel_ind"));
4974
4975 /*
4976 * send data to connected peer
4977 */
4978 putnext(peer_rq, mp);
4979 }
4980
4981
4982 /*
4983 * Send T_UDERROR_IND. The error should be from the <sys/errno.h> space.
4984 */
4985 static void
4986 tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
5031 (t_scalar_t)sizeof (struct T_uderror_ind);
5032 addr_startp = mp->b_rptr + udreq->DEST_offset;
5033 bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset,
5034 (size_t)alen);
5035 }
5036 if (olen <= 0) {
5037 uderr->OPT_offset = 0;
5038 } else {
5039 uderr->OPT_offset =
5040 (t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) +
5041 uderr->DEST_length);
5042 addr_startp = mp->b_rptr + udreq->OPT_offset;
5043 bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset,
5044 (size_t)olen);
5045 }
5046 freemsg(mp);
5047
5048 /*
5049 * send indication message
5050 */
5051 tep->te_state = nextstate[TE_UDERROR_IND][tep->te_state];
5052
5053 qreply(wq, err_mp);
5054 }
5055
5056 static void
5057 tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep)
5058 {
5059 queue_t *wq = tep->te_wq;
5060
5061 if (!tep->te_closing && (wq->q_first != NULL)) {
5062 TL_PUTQ(tep, mp);
5063 } else {
5064 if (tep->te_rq != NULL)
5065 tl_unitdata(mp, tep);
5066 else
5067 freemsg(mp);
5068 }
5069
5070 tl_serializer_exit(tep);
5071 tl_refrele(tep);
5089 ssize_t msz, ui_sz, reuse_mb_sz;
5090 t_scalar_t alen, aoff, olen, ooff;
5091 t_scalar_t oldolen = 0;
5092 cred_t *cr = NULL;
5093 pid_t cpid;
5094
5095 udreq = (struct T_unitdata_req *)mp->b_rptr;
5096 msz = MBLKL(mp);
5097
5098 /*
5099 * validate the state
5100 */
5101 if (tep->te_state != TS_IDLE) {
5102 (void) (STRLOG(TL_ID, tep->te_minor, 1,
5103 SL_TRACE | SL_ERROR,
5104 "tl_wput:T_CONN_REQ:out of state"));
5105 tl_merror(wq, mp, EPROTO);
5106 return;
5107 }
5108 /*
5109 * tep->te_state = nextstate[TE_UNITDATA_REQ][tep->te_state];
5110 * (state does not change on this event)
5111 */
5112
5113 /*
5114 * validate the message
5115 * Note: dereference fields in struct inside message only
5116 * after validating the message length.
5117 */
5118 if (msz < sizeof (struct T_unitdata_req)) {
5119 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
5120 "tl_unitdata:invalid message length"));
5121 tl_merror(wq, mp, EINVAL);
5122 return;
5123 }
5124 alen = udreq->DEST_length;
5125 aoff = udreq->DEST_offset;
5126 oldolen = olen = udreq->OPT_length;
5127 ooff = udreq->OPT_offset;
5128 if (olen == 0)
5129 ooff = 0;
5388
5389 tl_fill_option(ui_mp->b_rptr + udind->OPT_offset +
5390 oldolen, cr, cpid,
5391 peer_tep->te_flag, peer_tep->te_credp);
5392 } else {
5393 bcopy((void *)((uintptr_t)udreq + ooff),
5394 (void *)((uintptr_t)udind + udind->OPT_offset),
5395 olen);
5396 }
5397
5398 /*
5399 * relink data blocks from mp to ui_mp
5400 */
5401 ui_mp->b_cont = mp->b_cont;
5402 freeb(mp);
5403 mp = ui_mp;
5404 }
5405 /*
5406 * send indication message
5407 */
5408 peer_tep->te_state = nextstate[TE_UNITDATA_IND][peer_tep->te_state];
5409 putnext(peer_tep->te_rq, mp);
5410 }
5411
5412
5413
5414 /*
5415 * Check if a given addr is in use.
5416 * Endpoint ptr returned or NULL if not found.
5417 * The name space is separate for each mode. This implies that
5418 * sockets get their own name space.
5419 */
5420 static tl_endpt_t *
5421 tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap)
5422 {
5423 tl_endpt_t *peer_tep = NULL;
5424 int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap,
5425 (mod_hash_val_t *)&peer_tep, tl_find_callback);
5426
5427 ASSERT(!IS_SOCKET(tep));
5428
5710 * Note that when te_oconp is set the peer better have
5711 * a t_connind_t for the client.
5712 */
5713 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
5714 /*
5715 * Queue the disconnection message.
5716 */
5717 tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp);
5718 } else {
5719 tip = tl_icon_find(srv_tep, tep->te_seqno);
5720 if (tip == NULL) {
5721 freemsg(d_mp);
5722 } else {
5723 ASSERT(tep == tip->ti_tep);
5724 ASSERT(tep->te_ser == srv_tep->te_ser);
5725 /*
5726 * Delete tip from the server list.
5727 */
5728 if (srv_tep->te_nicon == 1) {
5729 srv_tep->te_state =
5730 nextstate[TE_DISCON_IND2]
5731 [srv_tep->te_state];
5732 } else {
5733 srv_tep->te_state =
5734 nextstate[TE_DISCON_IND3]
5735 [srv_tep->te_state];
5736 }
5737 ASSERT(*(uint32_t *)(d_mp->b_rptr) ==
5738 T_DISCON_IND);
5739 putnext(srv_tep->te_rq, d_mp);
5740 tl_freetip(srv_tep, tip);
5741 }
5742 TL_UNCONNECT(tep->te_oconp);
5743 srv_tep = NULL;
5744 }
5745 } else if (peer_tep != NULL) {
5746 /*
5747 * unconnect existing connection
5748 * If connected, change state of peer on
5749 * discon ind event and send discon ind pdu
5750 * to module above it
5751 */
5752
5753 ASSERT(tep->te_ser == peer_tep->te_ser);
5754 if (IS_COTSORD(peer_tep) &&
5755 (peer_tep->te_state == TS_WIND_ORDREL ||
5756 peer_tep->te_state == TS_DATA_XFER)) {
5757 /*
5758 * send ordrel ind
5759 */
5760 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
5761 "tl_co_unconnect:connected: ordrel_ind state %d->%d",
5762 peer_tep->te_state,
5763 nextstate[TE_ORDREL_IND][peer_tep->te_state]));
5764 d_mp = tl_ordrel_ind_alloc();
5765 if (d_mp == NULL) {
5766 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5767 SL_TRACE | SL_ERROR,
5768 "tl_co_unconnect:connected:"
5769 "allocb failure"));
5770 /*
5771 * Continue with cleaning up peer as
5772 * this side may go away with the close
5773 */
5774 TL_QENABLE(peer_tep);
5775 goto discon_peer;
5776 }
5777 peer_tep->te_state =
5778 nextstate[TE_ORDREL_IND][peer_tep->te_state];
5779
5780 putnext(peer_tep->te_rq, d_mp);
5781 /*
5782 * Handle flow control case. This will generate
5783 * a t_discon_ind message with reason 0 if there
5784 * is data queued on the write side.
5785 */
5786 TL_QENABLE(peer_tep);
5787 } else if (IS_COTSORD(peer_tep) &&
5788 peer_tep->te_state == TS_WREQ_ORDREL) {
5789 /*
5790 * Sent an ordrel_ind. We send a discon with
5791 * with error 0 to inform that the peer is gone.
5792 */
5793 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5794 SL_TRACE | SL_ERROR,
5795 "tl_co_unconnect: discon in state %d",
5796 tep->te_state));
5797 tl_discon_ind(peer_tep, 0);
5798 } else {
6002 mp->b_next = NULL;
6003
6004 ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
6005 switch (DB_TYPE(mp)) {
6006 default:
6007 freemsg(mp);
6008 break;
6009 case M_DATA:
6010 putnext(tep->te_rq, mp);
6011 break;
6012 case M_PROTO:
6013 primp = (union T_primitives *)mp->b_rptr;
6014 switch (primp->type) {
6015 case T_UNITDATA_IND:
6016 case T_DATA_IND:
6017 case T_OPTDATA_IND:
6018 case T_EXDATA_IND:
6019 putnext(tep->te_rq, mp);
6020 break;
6021 case T_ORDREL_IND:
6022 tep->te_state = nextstate[TE_ORDREL_IND]
6023 [tep->te_state];
6024 putnext(tep->te_rq, mp);
6025 break;
6026 case T_DISCON_IND:
6027 tep->te_state = TS_IDLE;
6028 putnext(tep->te_rq, mp);
6029 break;
6030 default:
6031 #ifdef DEBUG
6032 cmn_err(CE_PANIC,
6033 "tl_icon_sendmsgs: unknown primitive");
6034 #endif /* DEBUG */
6035 freemsg(mp);
6036 break;
6037 }
6038 break;
6039 }
6040 }
6041 }
6042
6043 /*
|