1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2014, 2016 by Delphix. All rights reserved.
  25  * Copyright 2019 Joyent, Inc.
  26  */
  27 
  28 /* This file contains all TCP output processing functions. */
  29 
  30 #include <sys/types.h>
  31 #include <sys/stream.h>
  32 #include <sys/strsun.h>
  33 #include <sys/strsubr.h>
  34 #include <sys/stropts.h>
  35 #include <sys/strlog.h>
  36 #define _SUN_TPI_VERSION 2
  37 #include <sys/tihdr.h>
  38 #include <sys/suntpi.h>
  39 #include <sys/xti_inet.h>
  40 #include <sys/timod.h>
  41 #include <sys/pattr.h>
  42 #include <sys/squeue_impl.h>
  43 #include <sys/squeue.h>
  44 #include <sys/sockio.h>
  45 #include <sys/tsol/tnet.h>
  46 
  47 #include <inet/common.h>
  48 #include <inet/ip.h>
  49 #include <inet/tcp.h>
  50 #include <inet/tcp_impl.h>
  51 #include <inet/snmpcom.h>
  52 #include <inet/proto_set.h>
  53 #include <inet/ipsec_impl.h>
  54 #include <inet/ip_ndp.h>
  55 
  56 static mblk_t   *tcp_get_seg_mp(tcp_t *, uint32_t, int32_t *);
  57 static void     tcp_wput_cmdblk(queue_t *, mblk_t *);
  58 static void     tcp_wput_flush(tcp_t *, mblk_t *);
  59 static void     tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp);
  60 static int      tcp_xmit_end(tcp_t *);
  61 static int      tcp_send(tcp_t *, const int, const int, const int,
  62                     const int, int *, uint32_t *, int *, mblk_t **, mblk_t *);
  63 static void     tcp_xmit_early_reset(char *, mblk_t *, uint32_t, uint32_t,
  64                     int, ip_recv_attr_t *, ip_stack_t *, conn_t *);
  65 static boolean_t        tcp_send_rst_chk(tcp_stack_t *);
  66 static void     tcp_process_shrunk_swnd(tcp_t *, uint32_t);
  67 static void     tcp_fill_header(tcp_t *, uchar_t *, int);
  68 
  69 /*
  70  * Functions called directly via squeue having a prototype of edesc_t.
  71  */
  72 static void     tcp_wput_nondata(void *, mblk_t *, void *, ip_recv_attr_t *);
  73 static void     tcp_wput_ioctl(void *, mblk_t *, void *, ip_recv_attr_t *);
  74 static void     tcp_wput_proto(void *, mblk_t *, void *, ip_recv_attr_t *);
  75 
  76 /*
  77  * This controls how tiny a write must be before we try to copy it
  78  * into the mblk on the tail of the transmit queue.  Not much
  79  * speedup is observed for values larger than sixteen.  Zero will
  80  * disable the optimisation.
  81  */
  82 static int tcp_tx_pull_len = 16;
  83 
  84 int
  85 tcp_wput(queue_t *q, mblk_t *mp)
  86 {
  87         conn_t  *connp = Q_TO_CONN(q);
  88         tcp_t   *tcp;
  89         void (*output_proc)();
  90         t_scalar_t type;
  91         uchar_t *rptr;
  92         struct iocblk   *iocp;
  93         size_t size;
  94 
  95         ASSERT(connp->conn_ref >= 2);
  96 
  97         switch (DB_TYPE(mp)) {
  98         case M_DATA:
  99                 tcp = connp->conn_tcp;
 100                 ASSERT(tcp != NULL);
 101 
 102                 size = msgdsize(mp);
 103 
 104                 mutex_enter(&tcp->tcp_non_sq_lock);
 105                 tcp->tcp_squeue_bytes += size;
 106                 if (TCP_UNSENT_BYTES(tcp) > connp->conn_sndbuf) {
 107                         tcp_setqfull(tcp);
 108                 }
 109                 mutex_exit(&tcp->tcp_non_sq_lock);
 110 
 111                 CONN_INC_REF(connp);
 112                 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output, connp,
 113                     NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT);
 114                 return (0);
 115 
 116         case M_CMD:
 117                 tcp_wput_cmdblk(q, mp);
 118                 return (0);
 119 
 120         case M_PROTO:
 121         case M_PCPROTO:
 122                 /*
 123                  * if it is a snmp message, don't get behind the squeue
 124                  */
 125                 tcp = connp->conn_tcp;
 126                 rptr = mp->b_rptr;
 127                 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
 128                         type = ((union T_primitives *)rptr)->type;
 129                 } else {
 130                         if (connp->conn_debug) {
 131                                 (void) strlog(TCP_MOD_ID, 0, 1,
 132                                     SL_ERROR|SL_TRACE,
 133                                     "tcp_wput_proto, dropping one...");
 134                         }
 135                         freemsg(mp);
 136                         return (0);
 137                 }
 138                 if (type == T_SVR4_OPTMGMT_REQ) {
 139                         /*
 140                          * All Solaris components should pass a db_credp
 141                          * for this TPI message, hence we ASSERT.
 142                          * But in case there is some other M_PROTO that looks
 143                          * like a TPI message sent by some other kernel
 144                          * component, we check and return an error.
 145                          */
 146                         cred_t  *cr = msg_getcred(mp, NULL);
 147 
 148                         ASSERT(cr != NULL);
 149                         if (cr == NULL) {
 150                                 tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
 151                                 return (0);
 152                         }
 153                         if (snmpcom_req(q, mp, tcp_snmp_set, ip_snmp_get,
 154                             cr)) {
 155                                 /*
 156                                  * This was a SNMP request
 157                                  */
 158                                 return (0);
 159                         } else {
 160                                 output_proc = tcp_wput_proto;
 161                         }
 162                 } else {
 163                         output_proc = tcp_wput_proto;
 164                 }
 165                 break;
 166         case M_IOCTL:
 167                 /*
 168                  * Most ioctls can be processed right away without going via
 169                  * squeues - process them right here. Those that do require
 170                  * squeue (currently _SIOCSOCKFALLBACK)
 171                  * are processed by tcp_wput_ioctl().
 172                  */
 173                 iocp = (struct iocblk *)mp->b_rptr;
 174                 tcp = connp->conn_tcp;
 175 
 176                 switch (iocp->ioc_cmd) {
 177                 case TCP_IOC_ABORT_CONN:
 178                         tcp_ioctl_abort_conn(q, mp);
 179                         return (0);
 180                 case TI_GETPEERNAME:
 181                 case TI_GETMYNAME:
 182                         mi_copyin(q, mp, NULL,
 183                             SIZEOF_STRUCT(strbuf, iocp->ioc_flag));
 184                         return (0);
 185 
 186                 default:
 187                         output_proc = tcp_wput_ioctl;
 188                         break;
 189                 }
 190                 break;
 191         default:
 192                 output_proc = tcp_wput_nondata;
 193                 break;
 194         }
 195 
 196         CONN_INC_REF(connp);
 197         SQUEUE_ENTER_ONE(connp->conn_sqp, mp, output_proc, connp,
 198             NULL, tcp_squeue_flag, SQTAG_TCP_WPUT_OTHER);
 199         return (0);
 200 }
 201 
 202 /*
 203  * The TCP normal data output path.
 204  * NOTE: the logic of the fast path is duplicated from this function.
 205  */
 206 void
 207 tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent)
 208 {
 209         int             len;
 210         mblk_t          *local_time;
 211         mblk_t          *mp1;
 212         uint32_t        snxt;
 213         int             tail_unsent;
 214         int             tcpstate;
 215         int             usable = 0;
 216         mblk_t          *xmit_tail;
 217         int32_t         mss;
 218         int32_t         num_sack_blk = 0;
 219         int32_t         total_hdr_len;
 220         int32_t         tcp_hdr_len;
 221         int             rc;
 222         tcp_stack_t     *tcps = tcp->tcp_tcps;
 223         conn_t          *connp = tcp->tcp_connp;
 224         clock_t         now = LBOLT_FASTPATH;
 225 
 226         tcpstate = tcp->tcp_state;
 227         if (mp == NULL) {
 228                 /*
 229                  * tcp_wput_data() with NULL mp should only be called when
 230                  * there is unsent data.
 231                  */
 232                 ASSERT(tcp->tcp_unsent > 0);
 233                 /* Really tacky... but we need this for detached closes. */
 234                 len = tcp->tcp_unsent;
 235                 goto data_null;
 236         }
 237 
 238         ASSERT(mp->b_datap->db_type == M_DATA);
 239         /*
 240          * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ,
 241          * or before a connection attempt has begun.
 242          */
 243         if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT ||
 244             (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
 245                 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
 246 #ifdef DEBUG
 247                         cmn_err(CE_WARN,
 248                             "tcp_wput_data: data after ordrel, %s",
 249                             tcp_display(tcp, NULL,
 250                             DISP_ADDR_AND_PORT));
 251 #else
 252                         if (connp->conn_debug) {
 253                                 (void) strlog(TCP_MOD_ID, 0, 1,
 254                                     SL_TRACE|SL_ERROR,
 255                                     "tcp_wput_data: data after ordrel, %s\n",
 256                                     tcp_display(tcp, NULL,
 257                                     DISP_ADDR_AND_PORT));
 258                         }
 259 #endif /* DEBUG */
 260                 }
 261                 if (tcp->tcp_snd_zcopy_aware &&
 262                     (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
 263                         tcp_zcopy_notify(tcp);
 264                 freemsg(mp);
 265                 mutex_enter(&tcp->tcp_non_sq_lock);
 266                 if (tcp->tcp_flow_stopped &&
 267                     TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
 268                         tcp_clrqfull(tcp);
 269                 }
 270                 mutex_exit(&tcp->tcp_non_sq_lock);
 271                 return;
 272         }
 273 
 274         /* Strip empties */
 275         for (;;) {
 276                 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
 277                     (uintptr_t)INT_MAX);
 278                 len = (int)(mp->b_wptr - mp->b_rptr);
 279                 if (len > 0)
 280                         break;
 281                 mp1 = mp;
 282                 mp = mp->b_cont;
 283                 freeb(mp1);
 284                 if (mp == NULL) {
 285                         return;
 286                 }
 287         }
 288 
 289         /* If we are the first on the list ... */
 290         if (tcp->tcp_xmit_head == NULL) {
 291                 tcp->tcp_xmit_head = mp;
 292                 tcp->tcp_xmit_tail = mp;
 293                 tcp->tcp_xmit_tail_unsent = len;
 294         } else {
 295                 /* If tiny tx and room in txq tail, pullup to save mblks. */
 296                 struct datab *dp;
 297 
 298                 mp1 = tcp->tcp_xmit_last;
 299                 if (len < tcp_tx_pull_len &&
 300                     (dp = mp1->b_datap)->db_ref == 1 &&
 301                     dp->db_lim - mp1->b_wptr >= len) {
 302                         ASSERT(len > 0);
 303                         ASSERT(!mp1->b_cont);
 304                         if (len == 1) {
 305                                 *mp1->b_wptr++ = *mp->b_rptr;
 306                         } else {
 307                                 bcopy(mp->b_rptr, mp1->b_wptr, len);
 308                                 mp1->b_wptr += len;
 309                         }
 310                         if (mp1 == tcp->tcp_xmit_tail)
 311                                 tcp->tcp_xmit_tail_unsent += len;
 312                         mp1->b_cont = mp->b_cont;
 313                         if (tcp->tcp_snd_zcopy_aware &&
 314                             (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
 315                                 mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
 316                         freeb(mp);
 317                         mp = mp1;
 318                 } else {
 319                         tcp->tcp_xmit_last->b_cont = mp;
 320                 }
 321                 len += tcp->tcp_unsent;
 322         }
 323 
 324         /* Tack on however many more positive length mblks we have */
 325         if ((mp1 = mp->b_cont) != NULL) {
 326                 do {
 327                         int tlen;
 328                         ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
 329                             (uintptr_t)INT_MAX);
 330                         tlen = (int)(mp1->b_wptr - mp1->b_rptr);
 331                         if (tlen <= 0) {
 332                                 mp->b_cont = mp1->b_cont;
 333                                 freeb(mp1);
 334                         } else {
 335                                 len += tlen;
 336                                 mp = mp1;
 337                         }
 338                 } while ((mp1 = mp->b_cont) != NULL);
 339         }
 340         tcp->tcp_xmit_last = mp;
 341         tcp->tcp_unsent = len;
 342 
 343         if (urgent)
 344                 usable = 1;
 345 
 346 data_null:
 347         snxt = tcp->tcp_snxt;
 348         xmit_tail = tcp->tcp_xmit_tail;
 349         tail_unsent = tcp->tcp_xmit_tail_unsent;
 350 
 351         /*
 352          * Note that tcp_mss has been adjusted to take into account the
 353          * timestamp option if applicable.  Because SACK options do not
 354          * appear in every TCP segments and they are of variable lengths,
 355          * they cannot be included in tcp_mss.  Thus we need to calculate
 356          * the actual segment length when we need to send a segment which
 357          * includes SACK options.
 358          */
 359         if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
 360                 int32_t opt_len;
 361 
 362                 num_sack_blk = MIN(tcp->tcp_max_sack_blk,
 363                     tcp->tcp_num_sack_blk);
 364                 opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN *
 365                     2 + TCPOPT_HEADER_LEN;
 366                 mss = tcp->tcp_mss - opt_len;
 367                 total_hdr_len = connp->conn_ht_iphc_len + opt_len;
 368                 tcp_hdr_len = connp->conn_ht_ulp_len + opt_len;
 369         } else {
 370                 mss = tcp->tcp_mss;
 371                 total_hdr_len = connp->conn_ht_iphc_len;
 372                 tcp_hdr_len = connp->conn_ht_ulp_len;
 373         }
 374 
 375         if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
 376             (TICK_TO_MSEC(now - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
 377                 TCP_SET_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
 378         }
 379         if (tcpstate == TCPS_SYN_RCVD) {
 380                 /*
 381                  * The three-way connection establishment handshake is not
 382                  * complete yet. We want to queue the data for transmission
 383                  * after entering ESTABLISHED state (RFC793). A jump to
 384                  * "done" label effectively leaves data on the queue.
 385                  */
 386                 goto done;
 387         } else {
 388                 int usable_r;
 389 
 390                 /*
 391                  * In the special case when cwnd is zero, which can only
 392                  * happen if the connection is ECN capable, return now.
 393                  * New segments is sent using tcp_timer().  The timer
 394                  * is set in tcp_input_data().
 395                  */
 396                 if (tcp->tcp_cwnd == 0) {
 397                         /*
 398                          * Note that tcp_cwnd is 0 before 3-way handshake is
 399                          * finished.
 400                          */
 401                         ASSERT(tcp->tcp_ecn_ok ||
 402                             tcp->tcp_state < TCPS_ESTABLISHED);
 403                         return;
 404                 }
 405 
 406                 /* NOTE: trouble if xmitting while SYN not acked? */
 407                 usable_r = snxt - tcp->tcp_suna;
 408                 usable_r = tcp->tcp_swnd - usable_r;
 409 
 410                 /*
 411                  * Check if the receiver has shrunk the window.  If
 412                  * tcp_wput_data() with NULL mp is called, tcp_fin_sent
 413                  * cannot be set as there is unsent data, so FIN cannot
 414                  * be sent out.  Otherwise, we need to take into account
 415                  * of FIN as it consumes an "invisible" sequence number.
 416                  */
 417                 ASSERT(tcp->tcp_fin_sent == 0);
 418                 if (usable_r < 0) {
 419                         /*
 420                          * The receiver has shrunk the window and we have sent
 421                          * -usable_r date beyond the window, re-adjust.
 422                          *
 423                          * If TCP window scaling is enabled, there can be
 424                          * round down error as the advertised receive window
 425                          * is actually right shifted n bits.  This means that
 426                          * the lower n bits info is wiped out.  It will look
 427                          * like the window is shrunk.  Do a check here to
 428                          * see if the shrunk amount is actually within the
 429                          * error in window calculation.  If it is, just
 430                          * return.  Note that this check is inside the
 431                          * shrunk window check.  This makes sure that even
 432                          * though tcp_process_shrunk_swnd() is not called,
 433                          * we will stop further processing.
 434                          */
 435                         if ((-usable_r >> tcp->tcp_snd_ws) > 0) {
 436                                 tcp_process_shrunk_swnd(tcp, -usable_r);
 437                         }
 438                         return;
 439                 }
 440 
 441                 /* usable = MIN(swnd, cwnd) - unacked_bytes */
 442                 if (tcp->tcp_swnd > tcp->tcp_cwnd)
 443                         usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd;
 444 
 445                 /* usable = MIN(usable, unsent) */
 446                 if (usable_r > len)
 447                         usable_r = len;
 448 
 449                 /* usable = MAX(usable, {1 for urgent, 0 for data}) */
 450                 if (usable_r > 0) {
 451                         usable = usable_r;
 452                 } else {
 453                         /* Bypass all other unnecessary processing. */
 454                         goto done;
 455                 }
 456         }
 457 
 458         local_time = (mblk_t *)(intptr_t)gethrtime();
 459 
 460         /*
 461          * "Our" Nagle Algorithm.  This is not the same as in the old
 462          * BSD.  This is more in line with the true intent of Nagle.
 463          *
 464          * The conditions are:
 465          * 1. The amount of unsent data (or amount of data which can be
 466          *    sent, whichever is smaller) is less than Nagle limit.
 467          * 2. The last sent size is also less than Nagle limit.
 468          * 3. There is unack'ed data.
 469          * 4. Urgent pointer is not set.  Send urgent data ignoring the
 470          *    Nagle algorithm.  This reduces the probability that urgent
 471          *    bytes get "merged" together.
 472          * 5. The app has not closed the connection.  This eliminates the
 473          *    wait time of the receiving side waiting for the last piece of
 474          *    (small) data.
 475          *
 476          * If all are satisified, exit without sending anything.  Note
 477          * that Nagle limit can be smaller than 1 MSS.  Nagle limit is
 478          * the smaller of 1 MSS and global tcp_naglim_def (default to be
 479          * 4095).
 480          */
 481         if (usable < (int)tcp->tcp_naglim &&
 482             tcp->tcp_naglim > tcp->tcp_last_sent_len &&
 483             snxt != tcp->tcp_suna &&
 484             !(tcp->tcp_valid_bits & TCP_URG_VALID) &&
 485             !(tcp->tcp_valid_bits & TCP_FSS_VALID)) {
 486                 goto done;
 487         }
 488 
 489         /*
 490          * If tcp_zero_win_probe is not set and the tcp->tcp_cork option
 491          * is set, then we have to force TCP not to send partial segment
 492          * (smaller than MSS bytes). We are calculating the usable now
 493          * based on full mss and will save the rest of remaining data for
 494          * later. When tcp_zero_win_probe is set, TCP needs to send out
 495          * something to do zero window probe.
 496          */
 497         if (tcp->tcp_cork && !tcp->tcp_zero_win_probe) {
 498                 if (usable < mss)
 499                         goto done;
 500                 usable = (usable / mss) * mss;
 501         }
 502 
 503         /* Update the latest receive window size in TCP header. */
 504         tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
 505 
 506         /* Send the packet. */
 507         rc = tcp_send(tcp, mss, total_hdr_len, tcp_hdr_len,
 508             num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail,
 509             local_time);
 510 
 511         /* Pretend that all we were trying to send really got sent */
 512         if (rc < 0 && tail_unsent < 0) {
 513                 do {
 514                         xmit_tail = xmit_tail->b_cont;
 515                         xmit_tail->b_prev = local_time;
 516                         ASSERT((uintptr_t)(xmit_tail->b_wptr -
 517                             xmit_tail->b_rptr) <= (uintptr_t)INT_MAX);
 518                         tail_unsent += (int)(xmit_tail->b_wptr -
 519                             xmit_tail->b_rptr);
 520                 } while (tail_unsent < 0);
 521         }
 522 done:;
 523         tcp->tcp_xmit_tail = xmit_tail;
 524         tcp->tcp_xmit_tail_unsent = tail_unsent;
 525         len = tcp->tcp_snxt - snxt;
 526         if (len) {
 527                 /*
 528                  * If new data was sent, need to update the notsack
 529                  * list, which is, afterall, data blocks that have
 530                  * not been sack'ed by the receiver.  New data is
 531                  * not sack'ed.
 532                  */
 533                 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) {
 534                         /* len is a negative value. */
 535                         tcp->tcp_pipe -= len;
 536                         tcp_notsack_update(&(tcp->tcp_notsack_list),
 537                             tcp->tcp_snxt, snxt,
 538                             &(tcp->tcp_num_notsack_blk),
 539                             &(tcp->tcp_cnt_notsack_list));
 540                 }
 541                 tcp->tcp_snxt = snxt + tcp->tcp_fin_sent;
 542                 tcp->tcp_rack = tcp->tcp_rnxt;
 543                 tcp->tcp_rack_cnt = 0;
 544                 if ((snxt + len) == tcp->tcp_suna) {
 545                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
 546                 }
 547         } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) {
 548                 /*
 549                  * Didn't send anything. Make sure the timer is running
 550                  * so that we will probe a zero window.
 551                  */
 552                 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
 553         }
 554         /* Note that len is the amount we just sent but with a negative sign */
 555         tcp->tcp_unsent += len;
 556         mutex_enter(&tcp->tcp_non_sq_lock);
 557         if (tcp->tcp_flow_stopped) {
 558                 if (TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
 559                         tcp_clrqfull(tcp);
 560                 }
 561         } else if (TCP_UNSENT_BYTES(tcp) >= connp->conn_sndbuf) {
 562                 if (!(tcp->tcp_detached))
 563                         tcp_setqfull(tcp);
 564         }
 565         mutex_exit(&tcp->tcp_non_sq_lock);
 566 }
 567 
 568 /*
 569  * Initial STREAMS write side put() procedure for sockets. It tries to
 570  * handle the T_CAPABILITY_REQ which sockfs sends down while setting
 571  * up the socket without using the squeue. Non T_CAPABILITY_REQ messages
 572  * are handled by tcp_wput() as usual.
 573  *
 574  * All further messages will also be handled by tcp_wput() because we cannot
 575  * be sure that the above short cut is safe later.
 576  */
 577 int
 578 tcp_wput_sock(queue_t *wq, mblk_t *mp)
 579 {
 580         conn_t                  *connp = Q_TO_CONN(wq);
 581         tcp_t                   *tcp = connp->conn_tcp;
 582         struct T_capability_req *car = (struct T_capability_req *)mp->b_rptr;
 583 
 584         ASSERT(wq->q_qinfo == &tcp_sock_winit);
 585         wq->q_qinfo = &tcp_winit;
 586 
 587         ASSERT(IPCL_IS_TCP(connp));
 588         ASSERT(TCP_IS_SOCKET(tcp));
 589 
 590         if (DB_TYPE(mp) == M_PCPROTO &&
 591             MBLKL(mp) == sizeof (struct T_capability_req) &&
 592             car->PRIM_type == T_CAPABILITY_REQ) {
 593                 tcp_capability_req(tcp, mp);
 594                 return (0);
 595         }
 596 
 597         tcp_wput(wq, mp);
 598         return (0);
 599 }
 600 
 601 /* ARGSUSED */
 602 int
 603 tcp_wput_fallback(queue_t *wq, mblk_t *mp)
 604 {
 605 #ifdef DEBUG
 606         cmn_err(CE_CONT, "tcp_wput_fallback: Message during fallback \n");
 607 #endif
 608         freemsg(mp);
 609         return (0);
 610 }
 611 
 612 /*
 613  * Call by tcp_wput() to handle misc non M_DATA messages.
 614  */
 615 /* ARGSUSED */
 616 static void
 617 tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
 618 {
 619         conn_t  *connp = (conn_t *)arg;
 620         tcp_t   *tcp = connp->conn_tcp;
 621 
 622         ASSERT(DB_TYPE(mp) != M_IOCTL);
 623         /*
 624          * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close.
 625          * Once the close starts, streamhead and sockfs will not let any data
 626          * packets come down (close ensures that there are no threads using the
 627          * queue and no new threads will come down) but since qprocsoff()
 628          * hasn't happened yet, a M_FLUSH or some non data message might
 629          * get reflected back (in response to our own FLUSHRW) and get
 630          * processed after tcp_close() is done. The conn would still be valid
 631          * because a ref would have added but we need to check the state
 632          * before actually processing the packet.
 633          */
 634         if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) {
 635                 freemsg(mp);
 636                 return;
 637         }
 638 
 639         switch (DB_TYPE(mp)) {
 640         case M_IOCDATA:
 641                 tcp_wput_iocdata(tcp, mp);
 642                 break;
 643         case M_FLUSH:
 644                 tcp_wput_flush(tcp, mp);
 645                 break;
 646         default:
 647                 ip_wput_nondata(connp->conn_wq, mp);
 648                 break;
 649         }
 650 }
 651 
 652 /* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */
 653 static void
 654 tcp_wput_flush(tcp_t *tcp, mblk_t *mp)
 655 {
 656         uchar_t fval = *mp->b_rptr;
 657         mblk_t  *tail;
 658         conn_t  *connp = tcp->tcp_connp;
 659         queue_t *q = connp->conn_wq;
 660 
 661         /* TODO: How should flush interact with urgent data? */
 662         if ((fval & FLUSHW) && tcp->tcp_xmit_head != NULL &&
 663             !(tcp->tcp_valid_bits & TCP_URG_VALID)) {
 664                 /*
 665                  * Flush only data that has not yet been put on the wire.  If
 666                  * we flush data that we have already transmitted, life, as we
 667                  * know it, may come to an end.
 668                  */
 669                 tail = tcp->tcp_xmit_tail;
 670                 tail->b_wptr -= tcp->tcp_xmit_tail_unsent;
 671                 tcp->tcp_xmit_tail_unsent = 0;
 672                 tcp->tcp_unsent = 0;
 673                 if (tail->b_wptr != tail->b_rptr)
 674                         tail = tail->b_cont;
 675                 if (tail) {
 676                         mblk_t **excess = &tcp->tcp_xmit_head;
 677                         for (;;) {
 678                                 mblk_t *mp1 = *excess;
 679                                 if (mp1 == tail)
 680                                         break;
 681                                 tcp->tcp_xmit_tail = mp1;
 682                                 tcp->tcp_xmit_last = mp1;
 683                                 excess = &mp1->b_cont;
 684                         }
 685                         *excess = NULL;
 686                         tcp_close_mpp(&tail);
 687                         if (tcp->tcp_snd_zcopy_aware)
 688                                 tcp_zcopy_notify(tcp);
 689                 }
 690                 /*
 691                  * We have no unsent data, so unsent must be less than
 692                  * conn_sndlowat, so re-enable flow.
 693                  */
 694                 mutex_enter(&tcp->tcp_non_sq_lock);
 695                 if (tcp->tcp_flow_stopped) {
 696                         tcp_clrqfull(tcp);
 697                 }
 698                 mutex_exit(&tcp->tcp_non_sq_lock);
 699         }
 700         /*
 701          * TODO: you can't just flush these, you have to increase rwnd for one
 702          * thing.  For another, how should urgent data interact?
 703          */
 704         if (fval & FLUSHR) {
 705                 *mp->b_rptr = fval & ~FLUSHW;
 706                 /* XXX */
 707                 qreply(q, mp);
 708                 return;
 709         }
 710         freemsg(mp);
 711 }
 712 
 713 /*
 714  * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA
 715  * messages.
 716  */
 717 static void
 718 tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp)
 719 {
 720         mblk_t          *mp1;
 721         struct iocblk   *iocp = (struct iocblk *)mp->b_rptr;
 722         STRUCT_HANDLE(strbuf, sb);
 723         uint_t          addrlen;
 724         conn_t          *connp = tcp->tcp_connp;
 725         queue_t         *q = connp->conn_wq;
 726 
 727         /* Make sure it is one of ours. */
 728         switch (iocp->ioc_cmd) {
 729         case TI_GETMYNAME:
 730         case TI_GETPEERNAME:
 731                 break;
 732         default:
 733                 /*
 734                  * If the conn is closing, then error the ioctl here. Otherwise
 735                  * use the CONN_IOCTLREF_* macros to hold off tcp_close until
 736                  * we're done here.
 737                  */
 738                 mutex_enter(&connp->conn_lock);
 739                 if (connp->conn_state_flags & CONN_CLOSING) {
 740                         mutex_exit(&connp->conn_lock);
 741                         iocp->ioc_error = EINVAL;
 742                         mp->b_datap->db_type = M_IOCNAK;
 743                         iocp->ioc_count = 0;
 744                         qreply(q, mp);
 745                         return;
 746                 }
 747 
 748                 CONN_INC_IOCTLREF_LOCKED(connp);
 749                 ip_wput_nondata(q, mp);
 750                 CONN_DEC_IOCTLREF(connp);
 751                 return;
 752         }
 753         switch (mi_copy_state(q, mp, &mp1)) {
 754         case -1:
 755                 return;
 756         case MI_COPY_CASE(MI_COPY_IN, 1):
 757                 break;
 758         case MI_COPY_CASE(MI_COPY_OUT, 1):
 759                 /* Copy out the strbuf. */
 760                 mi_copyout(q, mp);
 761                 return;
 762         case MI_COPY_CASE(MI_COPY_OUT, 2):
 763                 /* All done. */
 764                 mi_copy_done(q, mp, 0);
 765                 return;
 766         default:
 767                 mi_copy_done(q, mp, EPROTO);
 768                 return;
 769         }
 770         /* Check alignment of the strbuf */
 771         if (!OK_32PTR(mp1->b_rptr)) {
 772                 mi_copy_done(q, mp, EINVAL);
 773                 return;
 774         }
 775 
 776         STRUCT_SET_HANDLE(sb, iocp->ioc_flag, (void *)mp1->b_rptr);
 777 
 778         if (connp->conn_family == AF_INET)
 779                 addrlen = sizeof (sin_t);
 780         else
 781                 addrlen = sizeof (sin6_t);
 782 
 783         if (STRUCT_FGET(sb, maxlen) < addrlen) {
 784                 mi_copy_done(q, mp, EINVAL);
 785                 return;
 786         }
 787 
 788         switch (iocp->ioc_cmd) {
 789         case TI_GETMYNAME:
 790                 break;
 791         case TI_GETPEERNAME:
 792                 if (tcp->tcp_state < TCPS_SYN_RCVD) {
 793                         mi_copy_done(q, mp, ENOTCONN);
 794                         return;
 795                 }
 796                 break;
 797         }
 798         mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE);
 799         if (!mp1)
 800                 return;
 801 
 802         STRUCT_FSET(sb, len, addrlen);
 803         switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
 804         case TI_GETMYNAME:
 805                 (void) conn_getsockname(connp, (struct sockaddr *)mp1->b_wptr,
 806                     &addrlen);
 807                 break;
 808         case TI_GETPEERNAME:
 809                 (void) conn_getpeername(connp, (struct sockaddr *)mp1->b_wptr,
 810                     &addrlen);
 811                 break;
 812         }
 813         mp1->b_wptr += addrlen;
 814         /* Copy out the address */
 815         mi_copyout(q, mp);
 816 }
 817 
 818 /*
 819  * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL
 820  * messages.
 821  */
 822 /* ARGSUSED */
 823 static void
 824 tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
 825 {
 826         conn_t          *connp = (conn_t *)arg;
 827         tcp_t           *tcp = connp->conn_tcp;
 828         queue_t         *q = connp->conn_wq;
 829         struct iocblk   *iocp;
 830 
 831         ASSERT(DB_TYPE(mp) == M_IOCTL);
 832         /*
 833          * Try and ASSERT the minimum possible references on the
 834          * conn early enough. Since we are executing on write side,
 835          * the connection is obviously not detached and that means
 836          * there is a ref each for TCP and IP. Since we are behind
 837          * the squeue, the minimum references needed are 3. If the
 838          * conn is in classifier hash list, there should be an
 839          * extra ref for that (we check both the possibilities).
 840          */
 841         ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
 842             (connp->conn_fanout == NULL && connp->conn_ref >= 3));
 843 
 844         iocp = (struct iocblk *)mp->b_rptr;
 845         switch (iocp->ioc_cmd) {
 846         case _SIOCSOCKFALLBACK:
 847                 /*
 848                  * Either sockmod is about to be popped and the socket
 849                  * would now be treated as a plain stream, or a module
 850                  * is about to be pushed so we could no longer use read-
 851                  * side synchronous streams for fused loopback tcp.
 852                  * Drain any queued data and disable direct sockfs
 853                  * interface from now on.
 854                  */
 855                 if (!tcp->tcp_issocket) {
 856                         DB_TYPE(mp) = M_IOCNAK;
 857                         iocp->ioc_error = EINVAL;
 858                 } else {
 859                         tcp_use_pure_tpi(tcp);
 860                         DB_TYPE(mp) = M_IOCACK;
 861                         iocp->ioc_error = 0;
 862                 }
 863                 iocp->ioc_count = 0;
 864                 iocp->ioc_rval = 0;
 865                 qreply(q, mp);
 866                 return;
 867         }
 868 
 869         /*
 870          * If the conn is closing, then error the ioctl here. Otherwise bump the
 871          * conn_ioctlref to hold off tcp_close until we're done here.
 872          */
 873         mutex_enter(&(connp)->conn_lock);
 874         if ((connp)->conn_state_flags & CONN_CLOSING) {
 875                 mutex_exit(&(connp)->conn_lock);
 876                 iocp->ioc_error = EINVAL;
 877                 mp->b_datap->db_type = M_IOCNAK;
 878                 iocp->ioc_count = 0;
 879                 qreply(q, mp);
 880                 return;
 881         }
 882 
 883         CONN_INC_IOCTLREF_LOCKED(connp);
 884         ip_wput_nondata(q, mp);
 885         CONN_DEC_IOCTLREF(connp);
 886 }
 887 
 888 /*
 889  * This routine is called by tcp_wput() to handle all TPI requests.
 890  */
 891 /* ARGSUSED */
 892 static void
 893 tcp_wput_proto(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
 894 {
 895         conn_t          *connp = (conn_t *)arg;
 896         tcp_t           *tcp = connp->conn_tcp;
 897         union T_primitives *tprim = (union T_primitives *)mp->b_rptr;
 898         uchar_t         *rptr;
 899         t_scalar_t      type;
 900         cred_t          *cr;
 901 
 902         /*
 903          * Try and ASSERT the minimum possible references on the
 904          * conn early enough. Since we are executing on write side,
 905          * the connection is obviously not detached and that means
 906          * there is a ref each for TCP and IP. Since we are behind
 907          * the squeue, the minimum references needed are 3. If the
 908          * conn is in classifier hash list, there should be an
 909          * extra ref for that (we check both the possibilities).
 910          */
 911         ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
 912             (connp->conn_fanout == NULL && connp->conn_ref >= 3));
 913 
 914         rptr = mp->b_rptr;
 915         ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX);
 916         if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
 917                 type = ((union T_primitives *)rptr)->type;
 918                 if (type == T_EXDATA_REQ) {
 919                         tcp_output_urgent(connp, mp, arg2, NULL);
 920                 } else if (type != T_DATA_REQ) {
 921                         goto non_urgent_data;
 922                 } else {
 923                         /* TODO: options, flags, ... from user */
 924                         /* Set length to zero for reclamation below */
 925                         tcp_wput_data(tcp, mp->b_cont, B_TRUE);
 926                         freeb(mp);
 927                 }
 928                 return;
 929         } else {
 930                 if (connp->conn_debug) {
 931                         (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
 932                             "tcp_wput_proto, dropping one...");
 933                 }
 934                 freemsg(mp);
 935                 return;
 936         }
 937 
 938 non_urgent_data:
 939 
 940         switch ((int)tprim->type) {
 941         case O_T_BIND_REQ:      /* bind request */
 942         case T_BIND_REQ:        /* new semantics bind request */
 943                 tcp_tpi_bind(tcp, mp);
 944                 break;
 945         case T_UNBIND_REQ:      /* unbind request */
 946                 tcp_tpi_unbind(tcp, mp);
 947                 break;
 948         case O_T_CONN_RES:      /* old connection response XXX */
 949         case T_CONN_RES:        /* connection response */
 950                 tcp_tli_accept(tcp, mp);
 951                 break;
 952         case T_CONN_REQ:        /* connection request */
 953                 tcp_tpi_connect(tcp, mp);
 954                 break;
 955         case T_DISCON_REQ:      /* disconnect request */
 956                 tcp_disconnect(tcp, mp);
 957                 break;
 958         case T_CAPABILITY_REQ:
 959                 tcp_capability_req(tcp, mp);    /* capability request */
 960                 break;
 961         case T_INFO_REQ:        /* information request */
 962                 tcp_info_req(tcp, mp);
 963                 break;
 964         case T_SVR4_OPTMGMT_REQ:        /* manage options req */
 965         case T_OPTMGMT_REQ:
 966                 /*
 967                  * Note:  no support for snmpcom_req() through new
 968                  * T_OPTMGMT_REQ. See comments in ip.c
 969                  */
 970 
 971                 /*
 972                  * All Solaris components should pass a db_credp
 973                  * for this TPI message, hence we ASSERT.
 974                  * But in case there is some other M_PROTO that looks
 975                  * like a TPI message sent by some other kernel
 976                  * component, we check and return an error.
 977                  */
 978                 cr = msg_getcred(mp, NULL);
 979                 ASSERT(cr != NULL);
 980                 if (cr == NULL) {
 981                         tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
 982                         return;
 983                 }
 984                 /*
 985                  * If EINPROGRESS is returned, the request has been queued
 986                  * for subsequent processing by ip_restart_optmgmt(), which
 987                  * will do the CONN_DEC_REF().
 988                  */
 989                 if ((int)tprim->type == T_SVR4_OPTMGMT_REQ) {
 990                         svr4_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
 991                 } else {
 992                         tpi_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
 993                 }
 994                 break;
 995 
 996         case T_UNITDATA_REQ:    /* unitdata request */
 997                 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
 998                 break;
 999         case T_ORDREL_REQ:      /* orderly release req */
1000                 freemsg(mp);
1001 
1002                 if (tcp->tcp_fused)
1003                         tcp_unfuse(tcp);
1004 
1005                 if (tcp_xmit_end(tcp) != 0) {
1006                         /*
1007                          * We were crossing FINs and got a reset from
1008                          * the other side. Just ignore it.
1009                          */
1010                         if (connp->conn_debug) {
1011                                 (void) strlog(TCP_MOD_ID, 0, 1,
1012                                     SL_ERROR|SL_TRACE,
1013                                     "tcp_wput_proto, T_ORDREL_REQ out of "
1014                                     "state %s",
1015                                     tcp_display(tcp, NULL,
1016                                     DISP_ADDR_AND_PORT));
1017                         }
1018                 }
1019                 break;
1020         case T_ADDR_REQ:
1021                 tcp_addr_req(tcp, mp);
1022                 break;
1023         default:
1024                 if (connp->conn_debug) {
1025                         (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
1026                             "tcp_wput_proto, bogus TPI msg, type %d",
1027                             tprim->type);
1028                 }
1029                 /*
1030                  * We used to M_ERROR.  Sending TNOTSUPPORT gives the user
1031                  * to recover.
1032                  */
1033                 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
1034                 break;
1035         }
1036 }
1037 
1038 /*
1039  * Handle special out-of-band ioctl requests (see PSARC/2008/265).
1040  */
1041 static void
1042 tcp_wput_cmdblk(queue_t *q, mblk_t *mp)
1043 {
1044         void    *data;
1045         mblk_t  *datamp = mp->b_cont;
1046         conn_t  *connp = Q_TO_CONN(q);
1047         tcp_t   *tcp = connp->conn_tcp;
1048         cmdblk_t *cmdp = (cmdblk_t *)mp->b_rptr;
1049 
1050         if (datamp == NULL || MBLKL(datamp) < cmdp->cb_len) {
1051                 cmdp->cb_error = EPROTO;
1052                 qreply(q, mp);
1053                 return;
1054         }
1055 
1056         data = datamp->b_rptr;
1057 
1058         switch (cmdp->cb_cmd) {
1059         case TI_GETPEERNAME:
1060                 if (tcp->tcp_state < TCPS_SYN_RCVD)
1061                         cmdp->cb_error = ENOTCONN;
1062                 else
1063                         cmdp->cb_error = conn_getpeername(connp, data,
1064                             &cmdp->cb_len);
1065                 break;
1066         case TI_GETMYNAME:
1067                 cmdp->cb_error = conn_getsockname(connp, data, &cmdp->cb_len);
1068                 break;
1069         default:
1070                 cmdp->cb_error = EINVAL;
1071                 break;
1072         }
1073 
1074         qreply(q, mp);
1075 }
1076 
1077 /*
1078  * The TCP fast path write put procedure.
1079  * NOTE: the logic of the fast path is duplicated from tcp_wput_data()
1080  */
1081 /* ARGSUSED */
1082 void
1083 tcp_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1084 {
1085         int             len;
1086         int             hdrlen;
1087         int             plen;
1088         mblk_t          *mp1;
1089         uchar_t         *rptr;
1090         uint32_t        snxt;
1091         tcpha_t         *tcpha;
1092         struct datab    *db;
1093         uint32_t        suna;
1094         uint32_t        mss;
1095         ipaddr_t        *dst;
1096         ipaddr_t        *src;
1097         uint32_t        sum;
1098         int             usable;
1099         conn_t          *connp = (conn_t *)arg;
1100         tcp_t           *tcp = connp->conn_tcp;
1101         uint32_t        msize;
1102         tcp_stack_t     *tcps = tcp->tcp_tcps;
1103         ip_xmit_attr_t  *ixa;
1104         clock_t         now;
1105 
1106         /*
1107          * Try and ASSERT the minimum possible references on the
1108          * conn early enough. Since we are executing on write side,
1109          * the connection is obviously not detached and that means
1110          * there is a ref each for TCP and IP. Since we are behind
1111          * the squeue, the minimum references needed are 3. If the
1112          * conn is in classifier hash list, there should be an
1113          * extra ref for that (we check both the possibilities).
1114          */
1115         ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
1116             (connp->conn_fanout == NULL && connp->conn_ref >= 3));
1117 
1118         ASSERT(DB_TYPE(mp) == M_DATA);
1119         msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
1120 
1121         mutex_enter(&tcp->tcp_non_sq_lock);
1122         tcp->tcp_squeue_bytes -= msize;
1123         mutex_exit(&tcp->tcp_non_sq_lock);
1124 
1125         /* Bypass tcp protocol for fused tcp loopback */
1126         if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
1127                 return;
1128 
1129         mss = tcp->tcp_mss;
1130         /*
1131          * If ZEROCOPY has turned off, try not to send any zero-copy message
1132          * down. Do backoff, now.
1133          */
1134         if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on)
1135                 mp = tcp_zcopy_backoff(tcp, mp, B_FALSE);
1136 
1137 
1138         ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
1139         len = (int)(mp->b_wptr - mp->b_rptr);
1140 
1141         /*
1142          * Criteria for fast path:
1143          *
1144          *   1. no unsent data
1145          *   2. single mblk in request
1146          *   3. connection established
1147          *   4. data in mblk
1148          *   5. len <= mss
1149          *   6. no tcp_valid bits
1150          */
1151         if ((tcp->tcp_unsent != 0) ||
1152             (tcp->tcp_cork) ||
1153             (mp->b_cont != NULL) ||
1154             (tcp->tcp_state != TCPS_ESTABLISHED) ||
1155             (len == 0) ||
1156             (len > mss) ||
1157             (tcp->tcp_valid_bits != 0)) {
1158                 tcp_wput_data(tcp, mp, B_FALSE);
1159                 return;
1160         }
1161 
1162         ASSERT(tcp->tcp_xmit_tail_unsent == 0);
1163         ASSERT(tcp->tcp_fin_sent == 0);
1164 
1165         /* queue new packet onto retransmission queue */
1166         if (tcp->tcp_xmit_head == NULL) {
1167                 tcp->tcp_xmit_head = mp;
1168         } else {
1169                 tcp->tcp_xmit_last->b_cont = mp;
1170         }
1171         tcp->tcp_xmit_last = mp;
1172         tcp->tcp_xmit_tail = mp;
1173 
1174         /* find out how much we can send */
1175         /* BEGIN CSTYLED */
1176         /*
1177          *    un-acked     usable
1178          *  |--------------|-----------------|
1179          *  tcp_suna       tcp_snxt       tcp_suna+tcp_swnd
1180          */
1181         /* END CSTYLED */
1182 
1183         /* start sending from tcp_snxt */
1184         snxt = tcp->tcp_snxt;
1185 
1186         /*
1187          * Check to see if this connection has been idle for some time and no
1188          * ACK is expected. If so, then the congestion window size is no longer
1189          * meaningfully tied to current network conditions.
1190          *
1191          * We reinitialize tcp_cwnd, and slow start again to get back the
1192          * connection's "self-clock" as described in Van Jacobson's 1988 paper
1193          * "Congestion avoidance and control".
1194          */
1195         now = LBOLT_FASTPATH;
1196         if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
1197             (TICK_TO_MSEC(now - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
1198                 TCP_SET_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
1199         }
1200 
1201         usable = tcp->tcp_swnd;              /* tcp window size */
1202         if (usable > tcp->tcp_cwnd)
1203                 usable = tcp->tcp_cwnd;      /* congestion window smaller */
1204         usable -= snxt;         /* subtract stuff already sent */
1205         suna = tcp->tcp_suna;
1206         usable += suna;
1207         /* usable can be < 0 if the congestion window is smaller */
1208         if (len > usable) {
1209                 /* Can't send complete M_DATA in one shot */
1210                 goto slow;
1211         }
1212 
1213         mutex_enter(&tcp->tcp_non_sq_lock);
1214         if (tcp->tcp_flow_stopped &&
1215             TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
1216                 tcp_clrqfull(tcp);
1217         }
1218         mutex_exit(&tcp->tcp_non_sq_lock);
1219 
1220         /*
1221          * determine if anything to send (Nagle).
1222          *
1223          *   1. len < tcp_mss (i.e. small)
1224          *   2. unacknowledged data present
1225          *   3. len < nagle limit
1226          *   4. last packet sent < nagle limit (previous packet sent)
1227          */
1228         if ((len < mss) && (snxt != suna) &&
1229             (len < (int)tcp->tcp_naglim) &&
1230             (tcp->tcp_last_sent_len < tcp->tcp_naglim)) {
1231                 /*
1232                  * This was the first unsent packet and normally
1233                  * mss < xmit_hiwater so there is no need to worry
1234                  * about flow control. The next packet will go
1235                  * through the flow control check in tcp_wput_data().
1236                  */
1237                 /* leftover work from above */
1238                 tcp->tcp_unsent = len;
1239                 tcp->tcp_xmit_tail_unsent = len;
1240 
1241                 return;
1242         }
1243 
1244         /*
1245          * len <= tcp->tcp_mss && len == unsent so no sender silly window.  Can
1246          * send now.
1247          */
1248 
1249         if (snxt == suna) {
1250                 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
1251         }
1252 
1253         /* we have always sent something */
1254         tcp->tcp_rack_cnt = 0;
1255 
1256         tcp->tcp_snxt = snxt + len;
1257         tcp->tcp_rack = tcp->tcp_rnxt;
1258 
1259         if ((mp1 = dupb(mp)) == 0)
1260                 goto no_memory;
1261         mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
1262         mp->b_next = (mblk_t *)(uintptr_t)snxt;
1263 
1264         /* adjust tcp header information */
1265         tcpha = tcp->tcp_tcpha;
1266         tcpha->tha_flags = (TH_ACK|TH_PUSH);
1267 
1268         sum = len + connp->conn_ht_ulp_len + connp->conn_sum;
1269         sum = (sum >> 16) + (sum & 0xFFFF);
1270         tcpha->tha_sum = htons(sum);
1271 
1272         tcpha->tha_seq = htonl(snxt);
1273 
1274         TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1275         TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1276         BUMP_LOCAL(tcp->tcp_obsegs);
1277 
1278         /* Update the latest receive window size in TCP header. */
1279         tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
1280 
1281         tcp->tcp_last_sent_len = (ushort_t)len;
1282 
1283         plen = len + connp->conn_ht_iphc_len;
1284 
1285         ixa = connp->conn_ixa;
1286         ixa->ixa_pktlen = plen;
1287 
1288         if (ixa->ixa_flags & IXAF_IS_IPV4) {
1289                 tcp->tcp_ipha->ipha_length = htons(plen);
1290         } else {
1291                 tcp->tcp_ip6h->ip6_plen = htons(plen - IPV6_HDR_LEN);
1292         }
1293 
1294         /* see if we need to allocate a mblk for the headers */
1295         hdrlen = connp->conn_ht_iphc_len;
1296         rptr = mp1->b_rptr - hdrlen;
1297         db = mp1->b_datap;
1298         if ((db->db_ref != 2) || rptr < db->db_base ||
1299             (!OK_32PTR(rptr))) {
1300                 /* NOTE: we assume allocb returns an OK_32PTR */
1301                 mp = allocb(hdrlen + tcps->tcps_wroff_xtra, BPRI_MED);
1302                 if (!mp) {
1303                         freemsg(mp1);
1304                         goto no_memory;
1305                 }
1306                 mp->b_cont = mp1;
1307                 mp1 = mp;
1308                 /* Leave room for Link Level header */
1309                 rptr = &mp1->b_rptr[tcps->tcps_wroff_xtra];
1310                 mp1->b_wptr = &rptr[hdrlen];
1311         }
1312         mp1->b_rptr = rptr;
1313 
1314         /* Fill in the timestamp option. */
1315         if (tcp->tcp_snd_ts_ok) {
1316                 U32_TO_BE32(now,
1317                     (char *)tcpha + TCP_MIN_HEADER_LENGTH + 4);
1318                 U32_TO_BE32(tcp->tcp_ts_recent,
1319                     (char *)tcpha + TCP_MIN_HEADER_LENGTH + 8);
1320         } else {
1321                 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
1322         }
1323 
1324         /* copy header into outgoing packet */
1325         dst = (ipaddr_t *)rptr;
1326         src = (ipaddr_t *)connp->conn_ht_iphc;
1327         dst[0] = src[0];
1328         dst[1] = src[1];
1329         dst[2] = src[2];
1330         dst[3] = src[3];
1331         dst[4] = src[4];
1332         dst[5] = src[5];
1333         dst[6] = src[6];
1334         dst[7] = src[7];
1335         dst[8] = src[8];
1336         dst[9] = src[9];
1337         if (hdrlen -= 40) {
1338                 hdrlen >>= 2;
1339                 dst += 10;
1340                 src += 10;
1341                 do {
1342                         *dst++ = *src++;
1343                 } while (--hdrlen);
1344         }
1345 
1346         /*
1347          * Set the ECN info in the TCP header.  Note that this
1348          * is not the template header.
1349          */
1350         if (tcp->tcp_ecn_ok) {
1351                 TCP_SET_ECT(tcp, rptr);
1352 
1353                 tcpha = (tcpha_t *)(rptr + ixa->ixa_ip_hdr_length);
1354                 if (tcp->tcp_ecn_echo_on)
1355                         tcpha->tha_flags |= TH_ECE;
1356                 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
1357                         tcpha->tha_flags |= TH_CWR;
1358                         tcp->tcp_ecn_cwr_sent = B_TRUE;
1359                 }
1360         }
1361 
1362         if (tcp->tcp_ip_forward_progress) {
1363                 tcp->tcp_ip_forward_progress = B_FALSE;
1364                 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
1365         } else {
1366                 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
1367         }
1368         tcp_send_data(tcp, mp1);
1369         return;
1370 
1371         /*
1372          * If we ran out of memory, we pretend to have sent the packet
1373          * and that it was lost on the wire.
1374          */
1375 no_memory:
1376         return;
1377 
1378 slow:
1379         /* leftover work from above */
1380         tcp->tcp_unsent = len;
1381         tcp->tcp_xmit_tail_unsent = len;
1382         tcp_wput_data(tcp, NULL, B_FALSE);
1383 }
1384 
1385 /* ARGSUSED2 */
1386 void
1387 tcp_output_urgent(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1388 {
1389         int len;
1390         uint32_t msize;
1391         conn_t *connp = (conn_t *)arg;
1392         tcp_t *tcp = connp->conn_tcp;
1393 
1394         msize = msgdsize(mp);
1395 
1396         len = msize - 1;
1397         if (len < 0) {
1398                 freemsg(mp);
1399                 return;
1400         }
1401 
1402         /*
1403          * Try to force urgent data out on the wire. Even if we have unsent
1404          * data this will at least send the urgent flag.
1405          * XXX does not handle more flag correctly.
1406          */
1407         len += tcp->tcp_unsent;
1408         len += tcp->tcp_snxt;
1409         tcp->tcp_urg = len;
1410         tcp->tcp_valid_bits |= TCP_URG_VALID;
1411 
1412         /* Bypass tcp protocol for fused tcp loopback */
1413         if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
1414                 return;
1415 
1416         /* Strip off the T_EXDATA_REQ if the data is from TPI */
1417         if (DB_TYPE(mp) != M_DATA) {
1418                 mblk_t *mp1 = mp;
1419                 ASSERT(!IPCL_IS_NONSTR(connp));
1420                 mp = mp->b_cont;
1421                 freeb(mp1);
1422         }
1423         tcp_wput_data(tcp, mp, B_TRUE);
1424 }
1425 
1426 /*
1427  * Called by streams close routine via squeues when our client blows off its
1428  * descriptor, we take this to mean: "close the stream state NOW, close the tcp
1429  * connection politely" When SO_LINGER is set (with a non-zero linger time and
1430  * it is not a nonblocking socket) then this routine sleeps until the FIN is
1431  * acked.
1432  *
1433  * NOTE: tcp_close potentially returns error when lingering.
1434  * However, the stream head currently does not pass these errors
1435  * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK
1436  * errors to the application (from tsleep()) and not errors
1437  * like ECONNRESET caused by receiving a reset packet.
1438  */
1439 
1440 /* ARGSUSED */
1441 void
1442 tcp_close_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1443 {
1444         char    *msg;
1445         conn_t  *connp = (conn_t *)arg;
1446         tcp_t   *tcp = connp->conn_tcp;
1447         clock_t delta = 0;
1448         tcp_stack_t     *tcps = tcp->tcp_tcps;
1449 
1450         /*
1451          * When a non-STREAMS socket is being closed, it does not always
1452          * stick around waiting for tcp_close_output to run and can therefore
1453          * have dropped a reference already. So adjust the asserts accordingly.
1454          */
1455         ASSERT((connp->conn_fanout != NULL &&
1456             connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 3 : 4)) ||
1457             (connp->conn_fanout == NULL &&
1458             connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 2 : 3)));
1459 
1460         mutex_enter(&tcp->tcp_eager_lock);
1461         if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
1462                 /*
1463                  * Cleanup for listener. For non-STREAM sockets sockfs will
1464                  * close all the eagers on 'q', so in that case only deal
1465                  * with 'q0'.
1466                  */
1467                 tcp_eager_cleanup(tcp, IPCL_IS_NONSTR(connp) ? 1 : 0);
1468                 tcp->tcp_wait_for_eagers = 1;
1469         }
1470         mutex_exit(&tcp->tcp_eager_lock);
1471 
1472         tcp->tcp_lso = B_FALSE;
1473 
1474         msg = NULL;
1475         switch (tcp->tcp_state) {
1476         case TCPS_CLOSED:
1477         case TCPS_IDLE:
1478                 break;
1479         case TCPS_BOUND:
1480                 if (tcp->tcp_listener != NULL) {
1481                         ASSERT(IPCL_IS_NONSTR(connp));
1482                         /*
1483                          * Unlink from the listener and drop the reference
1484                          * put on it by the eager. tcp_closei_local will not
1485                          * do it because tcp_tconnind_started is TRUE.
1486                          */
1487                         mutex_enter(&tcp->tcp_saved_listener->tcp_eager_lock);
1488                         tcp_eager_unlink(tcp);
1489                         mutex_exit(&tcp->tcp_saved_listener->tcp_eager_lock);
1490                         CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp);
1491                 }
1492                 break;
1493         case TCPS_LISTEN:
1494                 break;
1495         case TCPS_SYN_SENT:
1496                 msg = "tcp_close, during connect";
1497                 break;
1498         case TCPS_SYN_RCVD:
1499                 /*
1500                  * Close during the connect 3-way handshake
1501                  * but here there may or may not be pending data
1502                  * already on queue. Process almost same as in
1503                  * the ESTABLISHED state.
1504                  */
1505                 /* FALLTHRU */
1506         default:
1507                 if (tcp->tcp_fused)
1508                         tcp_unfuse(tcp);
1509 
1510                 /*
1511                  * If SO_LINGER has set a zero linger time, abort the
1512                  * connection with a reset.
1513                  */
1514                 if (connp->conn_linger && connp->conn_lingertime == 0) {
1515                         msg = "tcp_close, zero lingertime";
1516                         break;
1517                 }
1518 
1519                 /*
1520                  * Abort connection if there is unread data queued.
1521                  */
1522                 if (tcp->tcp_rcv_list || tcp->tcp_reass_head) {
1523                         msg = "tcp_close, unread data";
1524                         break;
1525                 }
1526 
1527                 /*
1528                  * Abort connection if it is being closed without first
1529                  * being accepted. This can happen if a listening non-STREAM
1530                  * socket wants to get rid of the socket, for example, if the
1531                  * listener is closing.
1532                  */
1533                 if (tcp->tcp_listener != NULL) {
1534                         ASSERT(IPCL_IS_NONSTR(connp));
1535                         msg = "tcp_close, close before accept";
1536 
1537                         /*
1538                          * Unlink from the listener and drop the reference
1539                          * put on it by the eager. tcp_closei_local will not
1540                          * do it because tcp_tconnind_started is TRUE.
1541                          */
1542                         mutex_enter(&tcp->tcp_saved_listener->tcp_eager_lock);
1543                         tcp_eager_unlink(tcp);
1544                         mutex_exit(&tcp->tcp_saved_listener->tcp_eager_lock);
1545                         CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp);
1546                         break;
1547                 }
1548 
1549                 /*
1550                  * Transmit the FIN before detaching the tcp_t.
1551                  * After tcp_detach returns this queue/perimeter
1552                  * no longer owns the tcp_t thus others can modify it.
1553                  */
1554                 (void) tcp_xmit_end(tcp);
1555 
1556                 /*
1557                  * If lingering on close then wait until the fin is acked,
1558                  * the SO_LINGER time passes, or a reset is sent/received.
1559                  */
1560                 if (connp->conn_linger && connp->conn_lingertime > 0 &&
1561                     !(tcp->tcp_fin_acked) &&
1562                     tcp->tcp_state >= TCPS_ESTABLISHED) {
1563                         if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) {
1564                                 tcp->tcp_client_errno = EWOULDBLOCK;
1565                         } else if (tcp->tcp_client_errno == 0) {
1566 
1567                                 ASSERT(tcp->tcp_linger_tid == 0);
1568 
1569                                 /* conn_lingertime is in sec. */
1570                                 tcp->tcp_linger_tid = TCP_TIMER(tcp,
1571                                     tcp_close_linger_timeout,
1572                                     connp->conn_lingertime * MILLISEC);
1573 
1574                                 /* tcp_close_linger_timeout will finish close */
1575                                 if (tcp->tcp_linger_tid == 0)
1576                                         tcp->tcp_client_errno = ENOSR;
1577                                 else
1578                                         return;
1579                         }
1580 
1581                         /*
1582                          * Check if we need to detach or just close
1583                          * the instance.
1584                          */
1585                         if (tcp->tcp_state <= TCPS_LISTEN)
1586                                 break;
1587                 }
1588 
1589                 /*
1590                  * Make sure that no other thread will access the conn_rq of
1591                  * this instance (through lookups etc.) as conn_rq will go
1592                  * away shortly.
1593                  */
1594                 tcp_acceptor_hash_remove(tcp);
1595 
1596                 mutex_enter(&tcp->tcp_non_sq_lock);
1597                 if (tcp->tcp_flow_stopped) {
1598                         tcp_clrqfull(tcp);
1599                 }
1600                 mutex_exit(&tcp->tcp_non_sq_lock);
1601 
1602                 if (tcp->tcp_timer_tid != 0) {
1603                         delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
1604                         tcp->tcp_timer_tid = 0;
1605                 }
1606                 /*
1607                  * Need to cancel those timers which will not be used when
1608                  * TCP is detached.  This has to be done before the conn_wq
1609                  * is set to NULL.
1610                  */
1611                 tcp_timers_stop(tcp);
1612 
1613                 tcp->tcp_detached = B_TRUE;
1614                 if (tcp->tcp_state == TCPS_TIME_WAIT) {
1615                         tcp_time_wait_append(tcp);
1616                         TCP_DBGSTAT(tcps, tcp_detach_time_wait);
1617                         ASSERT(connp->conn_ref >=
1618                             (IPCL_IS_NONSTR(connp) ? 2 : 3));
1619                         goto finish;
1620                 }
1621 
1622                 /*
1623                  * If delta is zero the timer event wasn't executed and was
1624                  * successfully canceled. In this case we need to restart it
1625                  * with the minimal delta possible.
1626                  */
1627                 if (delta >= 0)
1628                         tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
1629                             delta ? delta : 1);
1630 
1631                 ASSERT(connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 2 : 3));
1632                 goto finish;
1633         }
1634 
1635         /* Detach did not complete. Still need to remove q from stream. */
1636         if (msg) {
1637                 if (tcp->tcp_state == TCPS_ESTABLISHED ||
1638                     tcp->tcp_state == TCPS_CLOSE_WAIT)
1639                         TCPS_BUMP_MIB(tcps, tcpEstabResets);
1640                 if (tcp->tcp_state == TCPS_SYN_SENT ||
1641                     tcp->tcp_state == TCPS_SYN_RCVD)
1642                         TCPS_BUMP_MIB(tcps, tcpAttemptFails);
1643                 tcp_xmit_ctl(msg, tcp,  tcp->tcp_snxt, 0, TH_RST);
1644         }
1645 
1646         tcp_closei_local(tcp);
1647         CONN_DEC_REF(connp);
1648         ASSERT(connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 1 : 2));
1649 
1650 finish:
1651         /*
1652          * Don't change the queues in the case of a listener that has
1653          * eagers in its q or q0. It could surprise the eagers.
1654          * Instead wait for the eagers outside the squeue.
1655          *
1656          * For non-STREAMS sockets tcp_wait_for_eagers implies that
1657          * we should delay the su_closed upcall until all eagers have
1658          * dropped their references.
1659          */
1660         if (!tcp->tcp_wait_for_eagers) {
1661                 tcp->tcp_detached = B_TRUE;
1662                 connp->conn_rq = NULL;
1663                 connp->conn_wq = NULL;
1664 
1665                 /* non-STREAM socket, release the upper handle */
1666                 if (IPCL_IS_NONSTR(connp)) {
1667                         ASSERT(connp->conn_upper_handle != NULL);
1668                         (*connp->conn_upcalls->su_closed)
1669                             (connp->conn_upper_handle);
1670                         connp->conn_upper_handle = NULL;
1671                         connp->conn_upcalls = NULL;
1672                 }
1673         }
1674 
1675         /* Signal tcp_close() to finish closing. */
1676         mutex_enter(&tcp->tcp_closelock);
1677         tcp->tcp_closed = 1;
1678         cv_signal(&tcp->tcp_closecv);
1679         mutex_exit(&tcp->tcp_closelock);
1680 }
1681 
1682 /* ARGSUSED */
1683 void
1684 tcp_shutdown_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1685 {
1686         conn_t  *connp = (conn_t *)arg;
1687         tcp_t   *tcp = connp->conn_tcp;
1688 
1689         freemsg(mp);
1690 
1691         if (tcp->tcp_fused)
1692                 tcp_unfuse(tcp);
1693 
1694         if (tcp_xmit_end(tcp) != 0) {
1695                 /*
1696                  * We were crossing FINs and got a reset from
1697                  * the other side. Just ignore it.
1698                  */
1699                 if (connp->conn_debug) {
1700                         (void) strlog(TCP_MOD_ID, 0, 1,
1701                             SL_ERROR|SL_TRACE,
1702                             "tcp_shutdown_output() out of state %s",
1703                             tcp_display(tcp, NULL, DISP_ADDR_AND_PORT));
1704                 }
1705         }
1706 }
1707 
1708 #pragma inline(tcp_send_data)
1709 
1710 void
1711 tcp_send_data(tcp_t *tcp, mblk_t *mp)
1712 {
1713         conn_t          *connp = tcp->tcp_connp;
1714 
1715         /*
1716          * Check here to avoid sending zero-copy message down to IP when
1717          * ZEROCOPY capability has turned off. We only need to deal with
1718          * the race condition between sockfs and the notification here.
1719          * Since we have tried to backoff the tcp_xmit_head when turning
1720          * zero-copy off and new messages in tcp_output(), we simply drop
1721          * the dup'ed packet here and let tcp retransmit, if tcp_xmit_zc_clean
1722          * is not true.
1723          */
1724         if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on &&
1725             !tcp->tcp_xmit_zc_clean) {
1726                 ip_drop_output("TCP ZC was disabled but not clean", mp, NULL);
1727                 freemsg(mp);
1728                 return;
1729         }
1730 
1731         DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa,
1732             __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, tcp,
1733             __dtrace_tcp_tcph_t *,
1734             &mp->b_rptr[connp->conn_ixa->ixa_ip_hdr_length]);
1735 
1736         ASSERT(connp->conn_ixa->ixa_notify_cookie == connp->conn_tcp);
1737         (void) conn_ip_output(mp, connp->conn_ixa);
1738 }
1739 
1740 /* ARGSUSED2 */
1741 void
1742 tcp_send_synack(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1743 {
1744         conn_t  *econnp = (conn_t *)arg;
1745         tcp_t   *tcp = econnp->conn_tcp;
1746         ip_xmit_attr_t *ixa = econnp->conn_ixa;
1747 
1748         /* Guard against a RST having blown it away while on the squeue */
1749         if (tcp->tcp_state == TCPS_CLOSED) {
1750                 freemsg(mp);
1751                 return;
1752         }
1753 
1754         /*
1755          * In the off-chance that the eager received and responded to
1756          * some other packet while the SYN|ACK was queued, we recalculate
1757          * the ixa_pktlen. It would be better to fix the SYN/accept
1758          * multithreading scheme to avoid this complexity.
1759          */
1760         ixa->ixa_pktlen = msgdsize(mp);
1761         (void) conn_ip_output(mp, ixa);
1762 }
1763 
1764 /*
1765  * tcp_send() is called by tcp_wput_data() and returns one of the following:
1766  *
1767  * -1 = failed allocation.
1768  *  0 = We've either successfully sent data, or our usable send window is too
1769  *      small and we'd rather wait until later before sending again.
1770  */
1771 static int
1772 tcp_send(tcp_t *tcp, const int mss, const int total_hdr_len,
1773     const int tcp_hdr_len, const int num_sack_blk, int *usable,
1774     uint32_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time)
1775 {
1776         int             num_lso_seg = 1;
1777         uint_t          lso_usable;
1778         boolean_t       do_lso_send = B_FALSE;
1779         tcp_stack_t     *tcps = tcp->tcp_tcps;
1780         conn_t          *connp = tcp->tcp_connp;
1781         ip_xmit_attr_t  *ixa = connp->conn_ixa;
1782 
1783         /*
1784          * Check LSO possibility. The value of tcp->tcp_lso indicates whether
1785          * the underlying connection is LSO capable. Will check whether having
1786          * enough available data to initiate LSO transmission in the for(){}
1787          * loops.
1788          */
1789         if (tcp->tcp_lso && (tcp->tcp_valid_bits & ~TCP_FSS_VALID) == 0)
1790                 do_lso_send = B_TRUE;
1791 
1792         for (;;) {
1793                 struct datab    *db;
1794                 tcpha_t         *tcpha;
1795                 uint32_t        sum;
1796                 mblk_t          *mp, *mp1;
1797                 uchar_t         *rptr;
1798                 int             len;
1799 
1800                 /*
1801                  * Calculate the maximum payload length we can send at one
1802                  * time.
1803                  */
1804                 if (do_lso_send) {
1805                         /*
1806                          * Determine whether or not it's possible to do LSO,
1807                          * and if so, how much data we can send.
1808                          */
1809                         if ((*usable - 1) / mss >= 1) {
1810                                 lso_usable = MIN(tcp->tcp_lso_max, *usable);
1811                                 num_lso_seg = lso_usable / mss;
1812                                 if (lso_usable % mss) {
1813                                         num_lso_seg++;
1814                                         tcp->tcp_last_sent_len = (ushort_t)
1815                                             (lso_usable % mss);
1816                                 } else {
1817                                         tcp->tcp_last_sent_len = (ushort_t)mss;
1818                                 }
1819                         } else {
1820                                 do_lso_send = B_FALSE;
1821                                 num_lso_seg = 1;
1822                                 lso_usable = mss;
1823                         }
1824                 }
1825 
1826                 ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1);
1827 
1828                 len = mss;
1829                 if (len > *usable) {
1830                         ASSERT(do_lso_send == B_FALSE);
1831 
1832                         len = *usable;
1833                         if (len <= 0) {
1834                                 /* Terminate the loop */
1835                                 break;  /* success; too small */
1836                         }
1837                         /*
1838                          * Sender silly-window avoidance.
1839                          * Ignore this if we are going to send a
1840                          * zero window probe out.
1841                          *
1842                          * TODO: force data into microscopic window?
1843                          *      ==> (!pushed || (unsent > usable))
1844                          */
1845                         if (len < (tcp->tcp_max_swnd >> 1) &&
1846                             (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len &&
1847                             !((tcp->tcp_valid_bits & TCP_URG_VALID) &&
1848                             len == 1) && (! tcp->tcp_zero_win_probe)) {
1849                                 /*
1850                                  * If the retransmit timer is not running
1851                                  * we start it so that we will retransmit
1852                                  * in the case when the receiver has
1853                                  * decremented the window.
1854                                  */
1855                                 if (*snxt == tcp->tcp_snxt &&
1856                                     *snxt == tcp->tcp_suna) {
1857                                         /*
1858                                          * We are not supposed to send
1859                                          * anything.  So let's wait a little
1860                                          * bit longer before breaking SWS
1861                                          * avoidance.
1862                                          *
1863                                          * What should the value be?
1864                                          * Suggestion: MAX(init rexmit time,
1865                                          * tcp->tcp_rto)
1866                                          */
1867                                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
1868                                 }
1869                                 break;  /* success; too small */
1870                         }
1871                 }
1872 
1873                 tcpha = tcp->tcp_tcpha;
1874 
1875                 /*
1876                  * The reason to adjust len here is that we need to set flags
1877                  * and calculate checksum.
1878                  */
1879                 if (do_lso_send)
1880                         len = lso_usable;
1881 
1882                 *usable -= len; /* Approximate - can be adjusted later */
1883                 if (*usable > 0)
1884                         tcpha->tha_flags = TH_ACK;
1885                 else
1886                         tcpha->tha_flags = (TH_ACK | TH_PUSH);
1887 
1888                 /*
1889                  * Prime pump for IP's checksumming on our behalf.
1890                  * Include the adjustment for a source route if any.
1891                  * In case of LSO, the partial pseudo-header checksum should
1892                  * exclusive TCP length, so zero tha_sum before IP calculate
1893                  * pseudo-header checksum for partial checksum offload.
1894                  */
1895                 if (do_lso_send) {
1896                         sum = 0;
1897                 } else {
1898                         sum = len + tcp_hdr_len + connp->conn_sum;
1899                         sum = (sum >> 16) + (sum & 0xFFFF);
1900                 }
1901                 tcpha->tha_sum = htons(sum);
1902                 tcpha->tha_seq = htonl(*snxt);
1903 
1904                 /*
1905                  * Branch off to tcp_xmit_mp() if any of the VALID bits is
1906                  * set.  For the case when TCP_FSS_VALID is the only valid
1907                  * bit (normal active close), branch off only when we think
1908                  * that the FIN flag needs to be set.  Note for this case,
1909                  * that (snxt + len) may not reflect the actual seg_len,
1910                  * as len may be further reduced in tcp_xmit_mp().  If len
1911                  * gets modified, we will end up here again.
1912                  */
1913                 if (tcp->tcp_valid_bits != 0 &&
1914                     (tcp->tcp_valid_bits != TCP_FSS_VALID ||
1915                     ((*snxt + len) == tcp->tcp_fss))) {
1916                         uchar_t         *prev_rptr;
1917                         uint32_t        prev_snxt = tcp->tcp_snxt;
1918 
1919                         if (*tail_unsent == 0) {
1920                                 ASSERT((*xmit_tail)->b_cont != NULL);
1921                                 *xmit_tail = (*xmit_tail)->b_cont;
1922                                 prev_rptr = (*xmit_tail)->b_rptr;
1923                                 *tail_unsent = (int)((*xmit_tail)->b_wptr -
1924                                     (*xmit_tail)->b_rptr);
1925                         } else {
1926                                 prev_rptr = (*xmit_tail)->b_rptr;
1927                                 (*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr -
1928                                     *tail_unsent;
1929                         }
1930                         mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL,
1931                             *snxt, B_FALSE, (uint32_t *)&len, B_FALSE);
1932                         /* Restore tcp_snxt so we get amount sent right. */
1933                         tcp->tcp_snxt = prev_snxt;
1934                         if (prev_rptr == (*xmit_tail)->b_rptr) {
1935                                 /*
1936                                  * If the previous timestamp is still in use,
1937                                  * don't stomp on it.
1938                                  */
1939                                 if ((*xmit_tail)->b_next == NULL) {
1940                                         (*xmit_tail)->b_prev = local_time;
1941                                         (*xmit_tail)->b_next =
1942                                             (mblk_t *)(uintptr_t)(*snxt);
1943                                 }
1944                         } else
1945                                 (*xmit_tail)->b_rptr = prev_rptr;
1946 
1947                         if (mp == NULL) {
1948                                 return (-1);
1949                         }
1950                         mp1 = mp->b_cont;
1951 
1952                         if (len <= mss) /* LSO is unusable (!do_lso_send) */
1953                                 tcp->tcp_last_sent_len = (ushort_t)len;
1954                         while (mp1->b_cont) {
1955                                 *xmit_tail = (*xmit_tail)->b_cont;
1956                                 (*xmit_tail)->b_prev = local_time;
1957                                 (*xmit_tail)->b_next =
1958                                     (mblk_t *)(uintptr_t)(*snxt);
1959                                 mp1 = mp1->b_cont;
1960                         }
1961                         *snxt += len;
1962                         *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr;
1963                         BUMP_LOCAL(tcp->tcp_obsegs);
1964                         TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1965                         TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1966                         tcp_send_data(tcp, mp);
1967                         continue;
1968                 }
1969 
1970                 *snxt += len;   /* Adjust later if we don't send all of len */
1971                 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1972                 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1973 
1974                 if (*tail_unsent) {
1975                         /* Are the bytes above us in flight? */
1976                         rptr = (*xmit_tail)->b_wptr - *tail_unsent;
1977                         if (rptr != (*xmit_tail)->b_rptr) {
1978                                 *tail_unsent -= len;
1979                                 if (len <= mss) /* LSO is unusable */
1980                                         tcp->tcp_last_sent_len = (ushort_t)len;
1981                                 len += total_hdr_len;
1982                                 ixa->ixa_pktlen = len;
1983 
1984                                 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1985                                         tcp->tcp_ipha->ipha_length = htons(len);
1986                                 } else {
1987                                         tcp->tcp_ip6h->ip6_plen =
1988                                             htons(len - IPV6_HDR_LEN);
1989                                 }
1990 
1991                                 mp = dupb(*xmit_tail);
1992                                 if (mp == NULL) {
1993                                         return (-1);    /* out_of_mem */
1994                                 }
1995                                 mp->b_rptr = rptr;
1996                                 /*
1997                                  * If the old timestamp is no longer in use,
1998                                  * sample a new timestamp now.
1999                                  */
2000                                 if ((*xmit_tail)->b_next == NULL) {
2001                                         (*xmit_tail)->b_prev = local_time;
2002                                         (*xmit_tail)->b_next =
2003                                             (mblk_t *)(uintptr_t)(*snxt-len);
2004                                 }
2005                                 goto must_alloc;
2006                         }
2007                 } else {
2008                         *xmit_tail = (*xmit_tail)->b_cont;
2009                         ASSERT((uintptr_t)((*xmit_tail)->b_wptr -
2010                             (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX);
2011                         *tail_unsent = (int)((*xmit_tail)->b_wptr -
2012                             (*xmit_tail)->b_rptr);
2013                 }
2014 
2015                 (*xmit_tail)->b_prev = local_time;
2016                 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len);
2017 
2018                 *tail_unsent -= len;
2019                 if (len <= mss) /* LSO is unusable (!do_lso_send) */
2020                         tcp->tcp_last_sent_len = (ushort_t)len;
2021 
2022                 len += total_hdr_len;
2023                 ixa->ixa_pktlen = len;
2024 
2025                 if (ixa->ixa_flags & IXAF_IS_IPV4) {
2026                         tcp->tcp_ipha->ipha_length = htons(len);
2027                 } else {
2028                         tcp->tcp_ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
2029                 }
2030 
2031                 mp = dupb(*xmit_tail);
2032                 if (mp == NULL) {
2033                         return (-1);    /* out_of_mem */
2034                 }
2035 
2036                 len = total_hdr_len;
2037                 /*
2038                  * There are four reasons to allocate a new hdr mblk:
2039                  *  1) The bytes above us are in use by another packet
2040                  *  2) We don't have good alignment
2041                  *  3) The mblk is being shared
2042                  *  4) We don't have enough room for a header
2043                  */
2044                 rptr = mp->b_rptr - len;
2045                 if (!OK_32PTR(rptr) ||
2046                     ((db = mp->b_datap), db->db_ref != 2) ||
2047                     rptr < db->db_base) {
2048                         /* NOTE: we assume allocb returns an OK_32PTR */
2049 
2050                 must_alloc:;
2051                         mp1 = allocb(connp->conn_ht_iphc_allocated +
2052                             tcps->tcps_wroff_xtra, BPRI_MED);
2053                         if (mp1 == NULL) {
2054                                 freemsg(mp);
2055                                 return (-1);    /* out_of_mem */
2056                         }
2057                         mp1->b_cont = mp;
2058                         mp = mp1;
2059                         /* Leave room for Link Level header */
2060                         len = total_hdr_len;
2061                         rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
2062                         mp->b_wptr = &rptr[len];
2063                 }
2064 
2065                 /*
2066                  * Fill in the header using the template header, and add
2067                  * options such as time-stamp, ECN and/or SACK, as needed.
2068                  */
2069                 tcp_fill_header(tcp, rptr, num_sack_blk);
2070 
2071                 mp->b_rptr = rptr;
2072 
2073                 if (*tail_unsent) {
2074                         int spill = *tail_unsent;
2075 
2076                         mp1 = mp->b_cont;
2077                         if (mp1 == NULL)
2078                                 mp1 = mp;
2079 
2080                         /*
2081                          * If we're a little short, tack on more mblks until
2082                          * there is no more spillover.
2083                          */
2084                         while (spill < 0) {
2085                                 mblk_t *nmp;
2086                                 int nmpsz;
2087 
2088                                 nmp = (*xmit_tail)->b_cont;
2089                                 nmpsz = MBLKL(nmp);
2090 
2091                                 /*
2092                                  * Excess data in mblk; can we split it?
2093                                  * If LSO is enabled for the connection,
2094                                  * keep on splitting as this is a transient
2095                                  * send path.
2096                                  */
2097                                 if (!do_lso_send && (spill + nmpsz > 0)) {
2098                                         /*
2099                                          * Don't split if stream head was
2100                                          * told to break up larger writes
2101                                          * into smaller ones.
2102                                          */
2103                                         if (tcp->tcp_maxpsz_multiplier > 0)
2104                                                 break;
2105 
2106                                         /*
2107                                          * Next mblk is less than SMSS/2
2108                                          * rounded up to nearest 64-byte;
2109                                          * let it get sent as part of the
2110                                          * next segment.
2111                                          */
2112                                         if (tcp->tcp_localnet &&
2113                                             !tcp->tcp_cork &&
2114                                             (nmpsz < roundup((mss >> 1), 64)))
2115                                                 break;
2116                                 }
2117 
2118                                 *xmit_tail = nmp;
2119                                 ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX);
2120                                 /* Stash for rtt use later */
2121                                 (*xmit_tail)->b_prev = local_time;
2122                                 (*xmit_tail)->b_next =
2123                                     (mblk_t *)(uintptr_t)(*snxt - len);
2124                                 mp1->b_cont = dupb(*xmit_tail);
2125                                 mp1 = mp1->b_cont;
2126 
2127                                 spill += nmpsz;
2128                                 if (mp1 == NULL) {
2129                                         *tail_unsent = spill;
2130                                         freemsg(mp);
2131                                         return (-1);    /* out_of_mem */
2132                                 }
2133                         }
2134 
2135                         /* Trim back any surplus on the last mblk */
2136                         if (spill >= 0) {
2137                                 mp1->b_wptr -= spill;
2138                                 *tail_unsent = spill;
2139                         } else {
2140                                 /*
2141                                  * We did not send everything we could in
2142                                  * order to remain within the b_cont limit.
2143                                  */
2144                                 *usable -= spill;
2145                                 *snxt += spill;
2146                                 tcp->tcp_last_sent_len += spill;
2147                                 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, spill);
2148                                 /*
2149                                  * Adjust the checksum
2150                                  */
2151                                 tcpha = (tcpha_t *)(rptr +
2152                                     ixa->ixa_ip_hdr_length);
2153                                 sum += spill;
2154                                 sum = (sum >> 16) + (sum & 0xFFFF);
2155                                 tcpha->tha_sum = htons(sum);
2156                                 if (connp->conn_ipversion == IPV4_VERSION) {
2157                                         sum = ntohs(
2158                                             ((ipha_t *)rptr)->ipha_length) +
2159                                             spill;
2160                                         ((ipha_t *)rptr)->ipha_length =
2161                                             htons(sum);
2162                                 } else {
2163                                         sum = ntohs(
2164                                             ((ip6_t *)rptr)->ip6_plen) +
2165                                             spill;
2166                                         ((ip6_t *)rptr)->ip6_plen =
2167                                             htons(sum);
2168                                 }
2169                                 ixa->ixa_pktlen += spill;
2170                                 *tail_unsent = 0;
2171                         }
2172                 }
2173                 if (tcp->tcp_ip_forward_progress) {
2174                         tcp->tcp_ip_forward_progress = B_FALSE;
2175                         ixa->ixa_flags |= IXAF_REACH_CONF;
2176                 } else {
2177                         ixa->ixa_flags &= ~IXAF_REACH_CONF;
2178                 }
2179 
2180                 if (do_lso_send) {
2181                         /* Append LSO information to the mp. */
2182                         lso_info_set(mp, mss, HW_LSO);
2183                         ixa->ixa_fragsize = IP_MAXPACKET;
2184                         ixa->ixa_extra_ident = num_lso_seg - 1;
2185 
2186                         DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg,
2187                             boolean_t, B_TRUE);
2188 
2189                         tcp_send_data(tcp, mp);
2190 
2191                         /*
2192                          * Restore values of ixa_fragsize and ixa_extra_ident.
2193                          */
2194                         ixa->ixa_fragsize = ixa->ixa_pmtu;
2195                         ixa->ixa_extra_ident = 0;
2196                         tcp->tcp_obsegs += num_lso_seg;
2197                         TCP_STAT(tcps, tcp_lso_times);
2198                         TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg);
2199                 } else {
2200                         /*
2201                          * Make sure to clean up LSO information. Wherever a
2202                          * new mp uses the prepended header room after dupb(),
2203                          * lso_info_cleanup() should be called.
2204                          */
2205                         lso_info_cleanup(mp);
2206                         tcp_send_data(tcp, mp);
2207                         BUMP_LOCAL(tcp->tcp_obsegs);
2208                 }
2209         }
2210 
2211         return (0);
2212 }
2213 
2214 /*
2215  * Initiate closedown sequence on an active connection.  (May be called as
2216  * writer.)  Return value zero for OK return, non-zero for error return.
2217  */
2218 static int
2219 tcp_xmit_end(tcp_t *tcp)
2220 {
2221         mblk_t          *mp;
2222         tcp_stack_t     *tcps = tcp->tcp_tcps;
2223         iulp_t          uinfo;
2224         ip_stack_t      *ipst = tcps->tcps_netstack->netstack_ip;
2225         conn_t          *connp = tcp->tcp_connp;
2226 
2227         if (tcp->tcp_state < TCPS_SYN_RCVD ||
2228             tcp->tcp_state > TCPS_CLOSE_WAIT) {
2229                 /*
2230                  * Invalid state, only states TCPS_SYN_RCVD,
2231                  * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid
2232                  */
2233                 return (-1);
2234         }
2235 
2236         tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent;
2237         tcp->tcp_valid_bits |= TCP_FSS_VALID;
2238         /*
2239          * If there is nothing more unsent, send the FIN now.
2240          * Otherwise, it will go out with the last segment.
2241          */
2242         if (tcp->tcp_unsent == 0) {
2243                 mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
2244                     tcp->tcp_fss, B_FALSE, NULL, B_FALSE);
2245 
2246                 if (mp) {
2247                         tcp_send_data(tcp, mp);
2248                 } else {
2249                         /*
2250                          * Couldn't allocate msg.  Pretend we got it out.
2251                          * Wait for rexmit timeout.
2252                          */
2253                         tcp->tcp_snxt = tcp->tcp_fss + 1;
2254                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
2255                 }
2256 
2257                 /*
2258                  * If needed, update tcp_rexmit_snxt as tcp_snxt is
2259                  * changed.
2260                  */
2261                 if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) {
2262                         tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
2263                 }
2264         } else {
2265                 /*
2266                  * If tcp->tcp_cork is set, then the data will not get sent,
2267                  * so we have to check that and unset it first.
2268                  */
2269                 if (tcp->tcp_cork)
2270                         tcp->tcp_cork = B_FALSE;
2271                 tcp_wput_data(tcp, NULL, B_FALSE);
2272         }
2273 
2274         /*
2275          * If TCP does not get enough samples of RTT or tcp_rtt_updates
2276          * is 0, don't update the cache.
2277          */
2278         if (tcps->tcps_rtt_updates == 0 ||
2279             tcp->tcp_rtt_update < tcps->tcps_rtt_updates)
2280                 return (0);
2281 
2282         /*
2283          * We do not have a good algorithm to update ssthresh at this time.
2284          * So don't do any update.
2285          */
2286         bzero(&uinfo, sizeof (uinfo));
2287         uinfo.iulp_rtt = NSEC2MSEC(tcp->tcp_rtt_sa);
2288         uinfo.iulp_rtt_sd = NSEC2MSEC(tcp->tcp_rtt_sd);
2289 
2290         /*
2291          * Note that uinfo is kept for conn_faddr in the DCE. Could update even
2292          * if source routed but we don't.
2293          */
2294         if (connp->conn_ipversion == IPV4_VERSION) {
2295                 if (connp->conn_faddr_v4 !=  tcp->tcp_ipha->ipha_dst) {
2296                         return (0);
2297                 }
2298                 (void) dce_update_uinfo_v4(connp->conn_faddr_v4, &uinfo, ipst);
2299         } else {
2300                 uint_t ifindex;
2301 
2302                 if (!(IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6,
2303                     &tcp->tcp_ip6h->ip6_dst))) {
2304                         return (0);
2305                 }
2306                 ifindex = 0;
2307                 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_faddr_v6)) {
2308                         ip_xmit_attr_t *ixa = connp->conn_ixa;
2309 
2310                         /*
2311                          * If we are going to create a DCE we'd better have
2312                          * an ifindex
2313                          */
2314                         if (ixa->ixa_nce != NULL) {
2315                                 ifindex = ixa->ixa_nce->nce_common->ncec_ill->
2316                                     ill_phyint->phyint_ifindex;
2317                         } else {
2318                                 return (0);
2319                         }
2320                 }
2321 
2322                 (void) dce_update_uinfo(&connp->conn_faddr_v6, ifindex, &uinfo,
2323                     ipst);
2324         }
2325         return (0);
2326 }
2327 
2328 /*
2329  * Send out a control packet on the tcp connection specified.  This routine
2330  * is typically called where we need a simple ACK or RST generated.
2331  */
2332 void
2333 tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl)
2334 {
2335         uchar_t         *rptr;
2336         tcpha_t         *tcpha;
2337         ipha_t          *ipha = NULL;
2338         ip6_t           *ip6h = NULL;
2339         uint32_t        sum;
2340         int             total_hdr_len;
2341         int             ip_hdr_len;
2342         mblk_t          *mp;
2343         tcp_stack_t     *tcps = tcp->tcp_tcps;
2344         conn_t          *connp = tcp->tcp_connp;
2345         ip_xmit_attr_t  *ixa = connp->conn_ixa;
2346 
2347         /*
2348          * Save sum for use in source route later.
2349          */
2350         sum = connp->conn_ht_ulp_len + connp->conn_sum;
2351         total_hdr_len = connp->conn_ht_iphc_len;
2352         ip_hdr_len = ixa->ixa_ip_hdr_length;
2353 
2354         /* If a text string is passed in with the request, pass it to strlog. */
2355         if (str != NULL && connp->conn_debug) {
2356                 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
2357                     "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x",
2358                     str, seq, ack, ctl);
2359         }
2360         mp = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
2361             BPRI_MED);
2362         if (mp == NULL) {
2363                 return;
2364         }
2365         rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
2366         mp->b_rptr = rptr;
2367         mp->b_wptr = &rptr[total_hdr_len];
2368         bcopy(connp->conn_ht_iphc, rptr, total_hdr_len);
2369 
2370         ixa->ixa_pktlen = total_hdr_len;
2371 
2372         if (ixa->ixa_flags & IXAF_IS_IPV4) {
2373                 ipha = (ipha_t *)rptr;
2374                 ipha->ipha_length = htons(total_hdr_len);
2375         } else {
2376                 ip6h = (ip6_t *)rptr;
2377                 ip6h->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN);
2378         }
2379         tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2380         tcpha->tha_flags = (uint8_t)ctl;
2381         if (ctl & TH_RST) {
2382                 TCPS_BUMP_MIB(tcps, tcpOutRsts);
2383                 TCPS_BUMP_MIB(tcps, tcpOutControl);
2384                 /*
2385                  * Don't send TSopt w/ TH_RST packets per RFC 1323.
2386                  */
2387                 if (tcp->tcp_snd_ts_ok &&
2388                     tcp->tcp_state > TCPS_SYN_SENT) {
2389                         mp->b_wptr = &rptr[total_hdr_len - TCPOPT_REAL_TS_LEN];
2390                         *(mp->b_wptr) = TCPOPT_EOL;
2391 
2392                         ixa->ixa_pktlen = total_hdr_len - TCPOPT_REAL_TS_LEN;
2393 
2394                         if (connp->conn_ipversion == IPV4_VERSION) {
2395                                 ipha->ipha_length = htons(total_hdr_len -
2396                                     TCPOPT_REAL_TS_LEN);
2397                         } else {
2398                                 ip6h->ip6_plen = htons(total_hdr_len -
2399                                     IPV6_HDR_LEN - TCPOPT_REAL_TS_LEN);
2400                         }
2401                         tcpha->tha_offset_and_reserved -= (3 << 4);
2402                         sum -= TCPOPT_REAL_TS_LEN;
2403                 }
2404         }
2405         if (ctl & TH_ACK) {
2406                 if (tcp->tcp_snd_ts_ok) {
2407                         uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2408 
2409                         U32_TO_BE32(llbolt,
2410                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
2411                         U32_TO_BE32(tcp->tcp_ts_recent,
2412                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
2413                 }
2414 
2415                 /* Update the latest receive window size in TCP header. */
2416                 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2417                 /* Track what we sent to the peer */
2418                 tcp->tcp_tcpha->tha_win = tcpha->tha_win;
2419                 tcp->tcp_rack = ack;
2420                 tcp->tcp_rack_cnt = 0;
2421                 TCPS_BUMP_MIB(tcps, tcpOutAck);
2422         }
2423         BUMP_LOCAL(tcp->tcp_obsegs);
2424         tcpha->tha_seq = htonl(seq);
2425         tcpha->tha_ack = htonl(ack);
2426         /*
2427          * Include the adjustment for a source route if any.
2428          */
2429         sum = (sum >> 16) + (sum & 0xFFFF);
2430         tcpha->tha_sum = htons(sum);
2431         tcp_send_data(tcp, mp);
2432 }
2433 
2434 /*
2435  * Generate a reset based on an inbound packet, connp is set by caller
2436  * when RST is in response to an unexpected inbound packet for which
2437  * there is active tcp state in the system.
2438  *
2439  * IPSEC NOTE : Try to send the reply with the same protection as it came
2440  * in.  We have the ip_recv_attr_t which is reversed to form the ip_xmit_attr_t.
2441  * That way the packet will go out at the same level of protection as it
2442  * came in with.
2443  */
2444 static void
2445 tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, uint32_t ack, int ctl,
2446     ip_recv_attr_t *ira, ip_stack_t *ipst, conn_t *connp)
2447 {
2448         ipha_t          *ipha = NULL;
2449         ip6_t           *ip6h = NULL;
2450         ushort_t        len;
2451         tcpha_t         *tcpha;
2452         int             i;
2453         ipaddr_t        v4addr;
2454         in6_addr_t      v6addr;
2455         netstack_t      *ns = ipst->ips_netstack;
2456         tcp_stack_t     *tcps = ns->netstack_tcp;
2457         ip_xmit_attr_t  ixas, *ixa;
2458         uint_t          ip_hdr_len = ira->ira_ip_hdr_length;
2459         boolean_t       need_refrele = B_FALSE;         /* ixa_refrele(ixa) */
2460         ushort_t        port;
2461 
2462         if (!tcp_send_rst_chk(tcps)) {
2463                 TCP_STAT(tcps, tcp_rst_unsent);
2464                 freemsg(mp);
2465                 return;
2466         }
2467 
2468         /*
2469          * If connp != NULL we use conn_ixa to keep IP_NEXTHOP and other
2470          * options from the listener. In that case the caller must ensure that
2471          * we are running on the listener = connp squeue.
2472          *
2473          * We get a safe copy of conn_ixa so we don't need to restore anything
2474          * we or ip_output_simple might change in the ixa.
2475          */
2476         if (connp != NULL) {
2477                 ASSERT(connp->conn_on_sqp);
2478 
2479                 ixa = conn_get_ixa_exclusive(connp);
2480                 if (ixa == NULL) {
2481                         TCP_STAT(tcps, tcp_rst_unsent);
2482                         freemsg(mp);
2483                         return;
2484                 }
2485                 need_refrele = B_TRUE;
2486         } else {
2487                 bzero(&ixas, sizeof (ixas));
2488                 ixa = &ixas;
2489                 /*
2490                  * IXAF_VERIFY_SOURCE is overkill since we know the
2491                  * packet was for us.
2492                  */
2493                 ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE;
2494                 ixa->ixa_protocol = IPPROTO_TCP;
2495                 ixa->ixa_zoneid = ira->ira_zoneid;
2496                 ixa->ixa_ifindex = 0;
2497                 ixa->ixa_ipst = ipst;
2498                 ixa->ixa_cred = kcred;
2499                 ixa->ixa_cpid = NOPID;
2500         }
2501 
2502         if (str && tcps->tcps_dbg) {
2503                 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
2504                     "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, "
2505                     "flags 0x%x",
2506                     str, seq, ack, ctl);
2507         }
2508         if (mp->b_datap->db_ref != 1) {
2509                 mblk_t *mp1 = copyb(mp);
2510                 freemsg(mp);
2511                 mp = mp1;
2512                 if (mp == NULL)
2513                         goto done;
2514         } else if (mp->b_cont) {
2515                 freemsg(mp->b_cont);
2516                 mp->b_cont = NULL;
2517                 DB_CKSUMFLAGS(mp) = 0;
2518         }
2519         /*
2520          * We skip reversing source route here.
2521          * (for now we replace all IP options with EOL)
2522          */
2523         if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2524                 ipha = (ipha_t *)mp->b_rptr;
2525                 for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++)
2526                         mp->b_rptr[i] = IPOPT_EOL;
2527                 /*
2528                  * Make sure that src address isn't flagrantly invalid.
2529                  * Not all broadcast address checking for the src address
2530                  * is possible, since we don't know the netmask of the src
2531                  * addr.  No check for destination address is done, since
2532                  * IP will not pass up a packet with a broadcast dest
2533                  * address to TCP.  Similar checks are done below for IPv6.
2534                  */
2535                 if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST ||
2536                     CLASSD(ipha->ipha_src)) {
2537                         BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
2538                         ip_drop_input("ipIfStatsInDiscards", mp, NULL);
2539                         freemsg(mp);
2540                         goto done;
2541                 }
2542         } else {
2543                 ip6h = (ip6_t *)mp->b_rptr;
2544 
2545                 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) ||
2546                     IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) {
2547                         BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsInDiscards);
2548                         ip_drop_input("ipIfStatsInDiscards", mp, NULL);
2549                         freemsg(mp);
2550                         goto done;
2551                 }
2552 
2553                 /* Remove any extension headers assuming partial overlay */
2554                 if (ip_hdr_len > IPV6_HDR_LEN) {
2555                         uint8_t *to;
2556 
2557                         to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN;
2558                         ovbcopy(ip6h, to, IPV6_HDR_LEN);
2559                         mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN;
2560                         ip_hdr_len = IPV6_HDR_LEN;
2561                         ip6h = (ip6_t *)mp->b_rptr;
2562                         ip6h->ip6_nxt = IPPROTO_TCP;
2563                 }
2564         }
2565         tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len];
2566         if (tcpha->tha_flags & TH_RST) {
2567                 freemsg(mp);
2568                 goto done;
2569         }
2570         tcpha->tha_offset_and_reserved = (5 << 4);
2571         len = ip_hdr_len + sizeof (tcpha_t);
2572         mp->b_wptr = &mp->b_rptr[len];
2573         if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2574                 ipha->ipha_length = htons(len);
2575                 /* Swap addresses */
2576                 v4addr = ipha->ipha_src;
2577                 ipha->ipha_src = ipha->ipha_dst;
2578                 ipha->ipha_dst = v4addr;
2579                 ipha->ipha_ident = 0;
2580                 ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl;
2581                 ixa->ixa_flags |= IXAF_IS_IPV4;
2582                 ixa->ixa_ip_hdr_length = ip_hdr_len;
2583         } else {
2584                 ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
2585                 /* Swap addresses */
2586                 v6addr = ip6h->ip6_src;
2587                 ip6h->ip6_src = ip6h->ip6_dst;
2588                 ip6h->ip6_dst = v6addr;
2589                 ip6h->ip6_hops = (uchar_t)tcps->tcps_ipv6_hoplimit;
2590                 ixa->ixa_flags &= ~IXAF_IS_IPV4;
2591 
2592                 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_dst)) {
2593                         ixa->ixa_flags |= IXAF_SCOPEID_SET;
2594                         ixa->ixa_scopeid = ira->ira_ruifindex;
2595                 }
2596                 ixa->ixa_ip_hdr_length = IPV6_HDR_LEN;
2597         }
2598         ixa->ixa_pktlen = len;
2599 
2600         /* Swap the ports */
2601         port = tcpha->tha_fport;
2602         tcpha->tha_fport = tcpha->tha_lport;
2603         tcpha->tha_lport = port;
2604 
2605         tcpha->tha_ack = htonl(ack);
2606         tcpha->tha_seq = htonl(seq);
2607         tcpha->tha_win = 0;
2608         tcpha->tha_sum = htons(sizeof (tcpha_t));
2609         tcpha->tha_flags = (uint8_t)ctl;
2610         if (ctl & TH_RST) {
2611                 if (ctl & TH_ACK) {
2612                         /*
2613                          * Probe connection rejection here.
2614                          * tcp_xmit_listeners_reset() drops non-SYN segments
2615                          * that do not specify TH_ACK in their flags without
2616                          * calling this function.  As a consequence, if this
2617                          * function is called with a TH_RST|TH_ACK ctl argument,
2618                          * it is being called in response to a SYN segment
2619                          * and thus the tcp:::accept-refused probe point
2620                          * is valid here.
2621                          */
2622                         DTRACE_TCP5(accept__refused, mblk_t *, NULL,
2623                             void, NULL, void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2624                             tcph_t *, tcpha);
2625                 }
2626                 TCPS_BUMP_MIB(tcps, tcpOutRsts);
2627                 TCPS_BUMP_MIB(tcps, tcpOutControl);
2628         }
2629 
2630         /* Discard any old label */
2631         if (ixa->ixa_free_flags & IXA_FREE_TSL) {
2632                 ASSERT(ixa->ixa_tsl != NULL);
2633                 label_rele(ixa->ixa_tsl);
2634                 ixa->ixa_free_flags &= ~IXA_FREE_TSL;
2635         }
2636         ixa->ixa_tsl = ira->ira_tsl;      /* Behave as a multi-level responder */
2637 
2638         if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2639                 /*
2640                  * Apply IPsec based on how IPsec was applied to
2641                  * the packet that caused the RST.
2642                  */
2643                 if (!ipsec_in_to_out(ira, ixa, mp, ipha, ip6h)) {
2644                         BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2645                         /* Note: mp already consumed and ip_drop_packet done */
2646                         goto done;
2647                 }
2648         } else {
2649                 /*
2650                  * This is in clear. The RST message we are building
2651                  * here should go out in clear, independent of our policy.
2652                  */
2653                 ixa->ixa_flags |= IXAF_NO_IPSEC;
2654         }
2655 
2656         DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa,
2657             __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2658             __dtrace_tcp_tcph_t *, tcpha);
2659 
2660         /*
2661          * NOTE:  one might consider tracing a TCP packet here, but
2662          * this function has no active TCP state and no tcp structure
2663          * that has a trace buffer.  If we traced here, we would have
2664          * to keep a local trace buffer in tcp_record_trace().
2665          */
2666 
2667         (void) ip_output_simple(mp, ixa);
2668 done:
2669         ixa_cleanup(ixa);
2670         if (need_refrele) {
2671                 ASSERT(ixa != &ixas);
2672                 ixa_refrele(ixa);
2673         }
2674 }
2675 
2676 /*
2677  * Generate a "no listener here" RST in response to an "unknown" segment.
2678  * connp is set by caller when RST is in response to an unexpected
2679  * inbound packet for which there is active tcp state in the system.
2680  * Note that we are reusing the incoming mp to construct the outgoing RST.
2681  */
2682 void
2683 tcp_xmit_listeners_reset(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst,
2684     conn_t *connp)
2685 {
2686         uchar_t         *rptr;
2687         uint32_t        seg_len;
2688         tcpha_t         *tcpha;
2689         uint32_t        seg_seq;
2690         uint32_t        seg_ack;
2691         uint_t          flags;
2692         ipha_t          *ipha;
2693         ip6_t           *ip6h;
2694         boolean_t       policy_present;
2695         netstack_t      *ns = ipst->ips_netstack;
2696         tcp_stack_t     *tcps = ns->netstack_tcp;
2697         ipsec_stack_t   *ipss = tcps->tcps_netstack->netstack_ipsec;
2698         uint_t          ip_hdr_len = ira->ira_ip_hdr_length;
2699 
2700         TCP_STAT(tcps, tcp_no_listener);
2701 
2702         /*
2703          * DTrace this "unknown" segment as a tcp:::receive, as we did
2704          * just receive something that was TCP.
2705          */
2706         DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, NULL,
2707             __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2708             __dtrace_tcp_tcph_t *, &mp->b_rptr[ip_hdr_len]);
2709 
2710         if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2711                 policy_present = ipss->ipsec_inbound_v4_policy_present;
2712                 ipha = (ipha_t *)mp->b_rptr;
2713                 ip6h = NULL;
2714         } else {
2715                 policy_present = ipss->ipsec_inbound_v6_policy_present;
2716                 ipha = NULL;
2717                 ip6h = (ip6_t *)mp->b_rptr;
2718         }
2719 
2720         if (policy_present) {
2721                 /*
2722                  * The conn_t parameter is NULL because we already know
2723                  * nobody's home.
2724                  */
2725                 mp = ipsec_check_global_policy(mp, (conn_t *)NULL, ipha, ip6h,
2726                     ira, ns);
2727                 if (mp == NULL)
2728                         return;
2729         }
2730         if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) {
2731                 DTRACE_PROBE2(
2732                     tx__ip__log__error__nolistener__tcp,
2733                     char *, "Could not reply with RST to mp(1)",
2734                     mblk_t *, mp);
2735                 ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n"));
2736                 freemsg(mp);
2737                 return;
2738         }
2739 
2740         rptr = mp->b_rptr;
2741 
2742         tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2743         seg_seq = ntohl(tcpha->tha_seq);
2744         seg_ack = ntohl(tcpha->tha_ack);
2745         flags = tcpha->tha_flags;
2746 
2747         seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcpha) + ip_hdr_len);
2748         if (flags & TH_RST) {
2749                 freemsg(mp);
2750         } else if (flags & TH_ACK) {
2751                 tcp_xmit_early_reset("no tcp, reset", mp, seg_ack, 0, TH_RST,
2752                     ira, ipst, connp);
2753         } else {
2754                 if (flags & TH_SYN) {
2755                         seg_len++;
2756                 } else {
2757                         /*
2758                          * Here we violate the RFC.  Note that a normal
2759                          * TCP will never send a segment without the ACK
2760                          * flag, except for RST or SYN segment.  This
2761                          * segment is neither.  Just drop it on the
2762                          * floor.
2763                          */
2764                         freemsg(mp);
2765                         TCP_STAT(tcps, tcp_rst_unsent);
2766                         return;
2767                 }
2768 
2769                 tcp_xmit_early_reset("no tcp, reset/ack", mp, 0,
2770                     seg_seq + seg_len, TH_RST | TH_ACK, ira, ipst, connp);
2771         }
2772 }
2773 
2774 /*
2775  * Helper function for tcp_xmit_mp() in handling connection set up flag
2776  * options setting.
2777  */
2778 static void
2779 tcp_xmit_mp_aux_iss(tcp_t *tcp, conn_t *connp, tcpha_t *tcpha, mblk_t *mp,
2780     uint_t *flags)
2781 {
2782         uint32_t u1;
2783         uint8_t *wptr = mp->b_wptr;
2784         tcp_stack_t *tcps = tcp->tcp_tcps;
2785         boolean_t add_sack = B_FALSE;
2786 
2787         /*
2788          * If TCP_ISS_VALID and the seq number is tcp_iss,
2789          * TCP can only be in SYN-SENT, SYN-RCVD or
2790          * FIN-WAIT-1 state.  It can be FIN-WAIT-1 if
2791          * our SYN is not ack'ed but the app closes this
2792          * TCP connection.
2793          */
2794         ASSERT(tcp->tcp_state == TCPS_SYN_SENT ||
2795             tcp->tcp_state == TCPS_SYN_RCVD ||
2796             tcp->tcp_state == TCPS_FIN_WAIT_1);
2797 
2798         /*
2799          * Tack on the MSS option.  It is always needed
2800          * for both active and passive open.
2801          *
2802          * MSS option value should be interface MTU - MIN
2803          * TCP/IP header according to RFC 793 as it means
2804          * the maximum segment size TCP can receive.  But
2805          * to get around some broken middle boxes/end hosts
2806          * out there, we allow the option value to be the
2807          * same as the MSS option size on the peer side.
2808          * In this way, the other side will not send
2809          * anything larger than they can receive.
2810          *
2811          * Note that for SYN_SENT state, the ndd param
2812          * tcp_use_smss_as_mss_opt has no effect as we
2813          * don't know the peer's MSS option value. So
2814          * the only case we need to take care of is in
2815          * SYN_RCVD state, which is done later.
2816          */
2817         wptr[0] = TCPOPT_MAXSEG;
2818         wptr[1] = TCPOPT_MAXSEG_LEN;
2819         wptr += 2;
2820         u1 = tcp->tcp_initial_pmtu - (connp->conn_ipversion == IPV4_VERSION ?
2821             IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) - TCP_MIN_HEADER_LENGTH;
2822         U16_TO_BE16(u1, wptr);
2823         wptr += 2;
2824 
2825         /* Update the offset to cover the additional word */
2826         tcpha->tha_offset_and_reserved += (1 << 4);
2827 
2828         switch (tcp->tcp_state) {
2829         case TCPS_SYN_SENT:
2830                 *flags = TH_SYN;
2831 
2832                 if (tcp->tcp_snd_sack_ok)
2833                         add_sack = B_TRUE;
2834 
2835                 if (tcp->tcp_snd_ts_ok) {
2836                         uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2837 
2838                         if (add_sack) {
2839                                 wptr[0] = TCPOPT_SACK_PERMITTED;
2840                                 wptr[1] = TCPOPT_SACK_OK_LEN;
2841                                 add_sack = B_FALSE;
2842                         } else {
2843                                 wptr[0] = TCPOPT_NOP;
2844                                 wptr[1] = TCPOPT_NOP;
2845                         }
2846                         wptr[2] = TCPOPT_TSTAMP;
2847                         wptr[3] = TCPOPT_TSTAMP_LEN;
2848                         wptr += 4;
2849                         U32_TO_BE32(llbolt, wptr);
2850                         wptr += 4;
2851                         ASSERT(tcp->tcp_ts_recent == 0);
2852                         U32_TO_BE32(0L, wptr);
2853                         wptr += 4;
2854                         tcpha->tha_offset_and_reserved += (3 << 4);
2855                 }
2856 
2857                 /*
2858                  * Set up all the bits to tell other side
2859                  * we are ECN capable.
2860                  */
2861                 if (tcp->tcp_ecn_ok)
2862                         *flags |= (TH_ECE | TH_CWR);
2863 
2864                 break;
2865 
2866         case TCPS_SYN_RCVD:
2867                 *flags |= TH_SYN;
2868 
2869                 /*
2870                  * Reset the MSS option value to be SMSS
2871                  * We should probably add back the bytes
2872                  * for timestamp option and IPsec.  We
2873                  * don't do that as this is a workaround
2874                  * for broken middle boxes/end hosts, it
2875                  * is better for us to be more cautious.
2876                  * They may not take these things into
2877                  * account in their SMSS calculation.  Thus
2878                  * the peer's calculated SMSS may be smaller
2879                  * than what it can be.  This should be OK.
2880                  */
2881                 if (tcps->tcps_use_smss_as_mss_opt) {
2882                         u1 = tcp->tcp_mss;
2883                         /*
2884                          * Note that wptr points just past the MSS
2885                          * option value.
2886                          */
2887                         U16_TO_BE16(u1, wptr - 2);
2888                 }
2889 
2890                 /*
2891                  * tcp_snd_ts_ok can only be set in TCPS_SYN_RCVD
2892                  * when the peer also uses timestamps option.  And
2893                  * the TCP header template must have already been
2894                  * updated to include the timestamps option.
2895                  */
2896                 if (tcp->tcp_snd_sack_ok) {
2897                         if (tcp->tcp_snd_ts_ok) {
2898                                 uint8_t *tmp_wptr;
2899 
2900                                 /*
2901                                  * Use the NOP in the header just
2902                                  * before timestamps opton.
2903                                  */
2904                                 tmp_wptr = (uint8_t *)tcpha +
2905                                     TCP_MIN_HEADER_LENGTH;
2906                                 ASSERT(tmp_wptr[0] == TCPOPT_NOP &&
2907                                     tmp_wptr[1] == TCPOPT_NOP);
2908                                 tmp_wptr[0] = TCPOPT_SACK_PERMITTED;
2909                                 tmp_wptr[1] = TCPOPT_SACK_OK_LEN;
2910                         } else {
2911                                 add_sack = B_TRUE;
2912                         }
2913                 }
2914 
2915 
2916                 /*
2917                  * If the other side is ECN capable, reply
2918                  * that we are also ECN capable.
2919                  */
2920                 if (tcp->tcp_ecn_ok)
2921                         *flags |= TH_ECE;
2922                 break;
2923 
2924         default:
2925                 /*
2926                  * The above ASSERT() makes sure that this
2927                  * must be FIN-WAIT-1 state.  Our SYN has
2928                  * not been ack'ed so retransmit it.
2929                  */
2930                 *flags |= TH_SYN;
2931                 break;
2932         }
2933 
2934         if (add_sack) {
2935                 wptr[0] = TCPOPT_NOP;
2936                 wptr[1] = TCPOPT_NOP;
2937                 wptr[2] = TCPOPT_SACK_PERMITTED;
2938                 wptr[3] = TCPOPT_SACK_OK_LEN;
2939                 wptr += TCPOPT_REAL_SACK_OK_LEN;
2940                 tcpha->tha_offset_and_reserved += (1 << 4);
2941         }
2942 
2943         if (tcp->tcp_snd_ws_ok) {
2944                 wptr[0] =  TCPOPT_NOP;
2945                 wptr[1] =  TCPOPT_WSCALE;
2946                 wptr[2] =  TCPOPT_WS_LEN;
2947                 wptr[3] = (uchar_t)tcp->tcp_rcv_ws;
2948                 wptr += TCPOPT_REAL_WS_LEN;
2949                 tcpha->tha_offset_and_reserved += (1 << 4);
2950         }
2951 
2952         mp->b_wptr = wptr;
2953         u1 = (int)(mp->b_wptr - mp->b_rptr);
2954         /*
2955          * Get IP set to checksum on our behalf
2956          * Include the adjustment for a source route if any.
2957          */
2958         u1 += connp->conn_sum;
2959         u1 = (u1 >> 16) + (u1 & 0xFFFF);
2960         tcpha->tha_sum = htons(u1);
2961         TCPS_BUMP_MIB(tcps, tcpOutControl);
2962 }
2963 
2964 /*
2965  * Helper function for tcp_xmit_mp() in handling connection tear down
2966  * flag setting and state changes.
2967  */
2968 static void
2969 tcp_xmit_mp_aux_fss(tcp_t *tcp, ip_xmit_attr_t *ixa, uint_t *flags)
2970 {
2971         if (!tcp->tcp_fin_acked) {
2972                 *flags |= TH_FIN;
2973                 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutControl);
2974         }
2975         if (!tcp->tcp_fin_sent) {
2976                 tcp->tcp_fin_sent = B_TRUE;
2977                 switch (tcp->tcp_state) {
2978                 case TCPS_SYN_RCVD:
2979                         tcp->tcp_state = TCPS_FIN_WAIT_1;
2980                         DTRACE_TCP6(state__change, void, NULL,
2981                             ip_xmit_attr_t *, ixa, void, NULL,
2982                             tcp_t *, tcp, void, NULL,
2983                             int32_t, TCPS_SYN_RCVD);
2984                         break;
2985                 case TCPS_ESTABLISHED:
2986                         tcp->tcp_state = TCPS_FIN_WAIT_1;
2987                         DTRACE_TCP6(state__change, void, NULL,
2988                             ip_xmit_attr_t *, ixa, void, NULL,
2989                             tcp_t *, tcp, void, NULL,
2990                             int32_t, TCPS_ESTABLISHED);
2991                         break;
2992                 case TCPS_CLOSE_WAIT:
2993                         tcp->tcp_state = TCPS_LAST_ACK;
2994                         DTRACE_TCP6(state__change, void, NULL,
2995                             ip_xmit_attr_t *, ixa, void, NULL,
2996                             tcp_t *, tcp, void, NULL,
2997                             int32_t, TCPS_CLOSE_WAIT);
2998                         break;
2999                 }
3000                 if (tcp->tcp_suna == tcp->tcp_snxt)
3001                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
3002                 tcp->tcp_snxt = tcp->tcp_fss + 1;
3003         }
3004 }
3005 
3006 /*
3007  * tcp_xmit_mp is called to return a pointer to an mblk chain complete with
3008  * ip and tcp header ready to pass down to IP.  If the mp passed in is
3009  * non-NULL, then up to max_to_send bytes of data will be dup'ed off that
3010  * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary
3011  * otherwise it will dup partial mblks.)
3012  * Otherwise, an appropriate ACK packet will be generated.  This
3013  * routine is not usually called to send new data for the first time.  It
3014  * is mostly called out of the timer for retransmits, and to generate ACKs.
3015  *
3016  * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will
3017  * be adjusted by *offset.  And after dupb(), the offset and the ending mblk
3018  * of the original mblk chain will be returned in *offset and *end_mp.
3019  */
3020 mblk_t *
3021 tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset,
3022     mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len,
3023     boolean_t rexmit)
3024 {
3025         int     data_length;
3026         int32_t off = 0;
3027         uint_t  flags;
3028         mblk_t  *mp1;
3029         mblk_t  *mp2;
3030         uchar_t *rptr;
3031         tcpha_t *tcpha;
3032         int32_t num_sack_blk = 0;
3033         int32_t sack_opt_len = 0;
3034         tcp_stack_t     *tcps = tcp->tcp_tcps;
3035         conn_t          *connp = tcp->tcp_connp;
3036         ip_xmit_attr_t  *ixa = connp->conn_ixa;
3037 
3038         /* Allocate for our maximum TCP header + link-level */
3039         mp1 = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
3040             BPRI_MED);
3041         if (mp1 == NULL)
3042                 return (NULL);
3043         data_length = 0;
3044 
3045         /*
3046          * Note that tcp_mss has been adjusted to take into account the
3047          * timestamp option if applicable.  Because SACK options do not
3048          * appear in every TCP segments and they are of variable lengths,
3049          * they cannot be included in tcp_mss.  Thus we need to calculate
3050          * the actual segment length when we need to send a segment which
3051          * includes SACK options.
3052          */
3053         if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
3054                 num_sack_blk = MIN(tcp->tcp_max_sack_blk,
3055                     tcp->tcp_num_sack_blk);
3056                 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) +
3057                     TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN;
3058                 if (max_to_send + sack_opt_len > tcp->tcp_mss)
3059                         max_to_send -= sack_opt_len;
3060         }
3061 
3062         if (offset != NULL) {
3063                 off = *offset;
3064                 /* We use offset as an indicator that end_mp is not NULL. */
3065                 *end_mp = NULL;
3066         }
3067         for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) {
3068                 /* This could be faster with cooperation from downstream */
3069                 if (mp2 != mp1 && !sendall &&
3070                     data_length + (int)(mp->b_wptr - mp->b_rptr) >
3071                     max_to_send)
3072                         /*
3073                          * Don't send the next mblk since the whole mblk
3074                          * does not fit.
3075                          */
3076                         break;
3077                 mp2->b_cont = dupb(mp);
3078                 mp2 = mp2->b_cont;
3079                 if (!mp2) {
3080                         freemsg(mp1);
3081                         return (NULL);
3082                 }
3083                 mp2->b_rptr += off;
3084                 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <=
3085                     (uintptr_t)INT_MAX);
3086 
3087                 data_length += (int)(mp2->b_wptr - mp2->b_rptr);
3088                 if (data_length > max_to_send) {
3089                         mp2->b_wptr -= data_length - max_to_send;
3090                         data_length = max_to_send;
3091                         off = mp2->b_wptr - mp->b_rptr;
3092                         break;
3093                 } else {
3094                         off = 0;
3095                 }
3096         }
3097         if (offset != NULL) {
3098                 *offset = off;
3099                 *end_mp = mp;
3100         }
3101         if (seg_len != NULL) {
3102                 *seg_len = data_length;
3103         }
3104 
3105         /* Update the latest receive window size in TCP header. */
3106         tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
3107 
3108         rptr = mp1->b_rptr + tcps->tcps_wroff_xtra;
3109         mp1->b_rptr = rptr;
3110         mp1->b_wptr = rptr + connp->conn_ht_iphc_len + sack_opt_len;
3111         bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len);
3112         tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length];
3113         tcpha->tha_seq = htonl(seq);
3114 
3115         /*
3116          * Use tcp_unsent to determine if the PUSH bit should be used assumes
3117          * that this function was called from tcp_wput_data. Thus, when called
3118          * to retransmit data the setting of the PUSH bit may appear some
3119          * what random in that it might get set when it should not. This
3120          * should not pose any performance issues.
3121          */
3122         if (data_length != 0 && (tcp->tcp_unsent == 0 ||
3123             tcp->tcp_unsent == data_length)) {
3124                 flags = TH_ACK | TH_PUSH;
3125         } else {
3126                 flags = TH_ACK;
3127         }
3128 
3129         if (tcp->tcp_ecn_ok) {
3130                 if (tcp->tcp_ecn_echo_on)
3131                         flags |= TH_ECE;
3132 
3133                 /*
3134                  * Only set ECT bit and ECN_CWR if a segment contains new data.
3135                  * There is no TCP flow control for non-data segments, and
3136                  * only data segment is transmitted reliably.
3137                  */
3138                 if (data_length > 0 && !rexmit) {
3139                         TCP_SET_ECT(tcp, rptr);
3140                         if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
3141                                 flags |= TH_CWR;
3142                                 tcp->tcp_ecn_cwr_sent = B_TRUE;
3143                         }
3144                 }
3145         }
3146 
3147         /* Check if there is any special processing needs to be done. */
3148         if (tcp->tcp_valid_bits) {
3149                 uint32_t u1;
3150 
3151                 /* We don't allow having SYN and FIN in the same segment... */
3152                 if ((tcp->tcp_valid_bits & TCP_ISS_VALID) &&
3153                     seq == tcp->tcp_iss) {
3154                         /* Need to do connection set up processing. */
3155                         tcp_xmit_mp_aux_iss(tcp, connp, tcpha, mp1, &flags);
3156                 } else if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
3157                     (seq + data_length) == tcp->tcp_fss) {
3158                         /* Need to do connection tear down processing. */
3159                         tcp_xmit_mp_aux_fss(tcp, ixa, &flags);
3160                 }
3161 
3162                 /*
3163                  * Need to do urgent pointer processing.
3164                  *
3165                  * Note the trick here.  u1 is unsigned.  When tcp_urg
3166                  * is smaller than seq, u1 will become a very huge value.
3167                  * So the comparison will fail.  Also note that tcp_urp
3168                  * should be positive, see RFC 793 page 17.
3169                  */
3170                 u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION;
3171                 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 &&
3172                     u1 < (uint32_t)(64 * 1024)) {
3173                         flags |= TH_URG;
3174                         TCPS_BUMP_MIB(tcps, tcpOutUrg);
3175                         tcpha->tha_urp = htons(u1);
3176                 }
3177         }
3178         tcpha->tha_flags = (uchar_t)flags;
3179         tcp->tcp_rack = tcp->tcp_rnxt;
3180         tcp->tcp_rack_cnt = 0;
3181 
3182         /* Fill in the current value of timestamps option. */
3183         if (tcp->tcp_snd_ts_ok) {
3184                 if (tcp->tcp_state != TCPS_SYN_SENT) {
3185                         uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
3186 
3187                         U32_TO_BE32(llbolt,
3188                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
3189                         U32_TO_BE32(tcp->tcp_ts_recent,
3190                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
3191                 }
3192         }
3193 
3194         /* Fill in the SACK blocks. */
3195         if (num_sack_blk > 0) {
3196                 uchar_t *wptr = (uchar_t *)tcpha + connp->conn_ht_ulp_len;
3197                 sack_blk_t *tmp;
3198                 int32_t i;
3199 
3200                 wptr[0] = TCPOPT_NOP;
3201                 wptr[1] = TCPOPT_NOP;
3202                 wptr[2] = TCPOPT_SACK;
3203                 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
3204                     sizeof (sack_blk_t);
3205                 wptr += TCPOPT_REAL_SACK_LEN;
3206 
3207                 tmp = tcp->tcp_sack_list;
3208                 for (i = 0; i < num_sack_blk; i++) {
3209                         U32_TO_BE32(tmp[i].begin, wptr);
3210                         wptr += sizeof (tcp_seq);
3211                         U32_TO_BE32(tmp[i].end, wptr);
3212                         wptr += sizeof (tcp_seq);
3213                 }
3214                 tcpha->tha_offset_and_reserved += ((num_sack_blk * 2 + 1) << 4);
3215         }
3216         ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX);
3217         data_length += (int)(mp1->b_wptr - rptr);
3218 
3219         ixa->ixa_pktlen = data_length;
3220 
3221         if (ixa->ixa_flags & IXAF_IS_IPV4) {
3222                 ((ipha_t *)rptr)->ipha_length = htons(data_length);
3223         } else {
3224                 ip6_t *ip6 = (ip6_t *)rptr;
3225 
3226                 ip6->ip6_plen = htons(data_length - IPV6_HDR_LEN);
3227         }
3228 
3229         /*
3230          * Prime pump for IP
3231          * Include the adjustment for a source route if any.
3232          */
3233         data_length -= ixa->ixa_ip_hdr_length;
3234         data_length += connp->conn_sum;
3235         data_length = (data_length >> 16) + (data_length & 0xFFFF);
3236         tcpha->tha_sum = htons(data_length);
3237         if (tcp->tcp_ip_forward_progress) {
3238                 tcp->tcp_ip_forward_progress = B_FALSE;
3239                 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
3240         } else {
3241                 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
3242         }
3243         return (mp1);
3244 }
3245 
3246 /*
3247  * If this routine returns B_TRUE, TCP can generate a RST in response
3248  * to a segment.  If it returns B_FALSE, TCP should not respond.
3249  */
3250 static boolean_t
3251 tcp_send_rst_chk(tcp_stack_t *tcps)
3252 {
3253         int64_t now;
3254 
3255         /*
3256          * TCP needs to protect itself from generating too many RSTs.
3257          * This can be a DoS attack by sending us random segments
3258          * soliciting RSTs.
3259          *
3260          * What we do here is to have a limit of tcp_rst_sent_rate RSTs
3261          * in each 1 second interval.  In this way, TCP still generate
3262          * RSTs in normal cases but when under attack, the impact is
3263          * limited.
3264          */
3265         if (tcps->tcps_rst_sent_rate_enabled != 0) {
3266                 now = ddi_get_lbolt64();
3267                 if (TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) >
3268                     1*SECONDS) {
3269                         tcps->tcps_last_rst_intrvl = now;
3270                         tcps->tcps_rst_cnt = 1;
3271                 } else if (++tcps->tcps_rst_cnt > tcps->tcps_rst_sent_rate) {
3272                         return (B_FALSE);
3273                 }
3274         }
3275         return (B_TRUE);
3276 }
3277 
3278 /*
3279  * This function handles all retransmissions if SACK is enabled for this
3280  * connection.  First it calculates how many segments can be retransmitted
3281  * based on tcp_pipe.  Then it goes thru the notsack list to find eligible
3282  * segments.  A segment is eligible if sack_cnt for that segment is greater
3283  * than or equal tcp_dupack_fast_retransmit.  After it has retransmitted
3284  * all eligible segments, it checks to see if TCP can send some new segments
3285  * (fast recovery).  If it can, set the appropriate flag for tcp_input_data().
3286  *
3287  * Parameters:
3288  *      tcp_t *tcp: the tcp structure of the connection.
3289  *      uint_t *flags: in return, appropriate value will be set for
3290  *      tcp_input_data().
3291  */
3292 void
3293 tcp_sack_rexmit(tcp_t *tcp, uint_t *flags)
3294 {
3295         notsack_blk_t   *notsack_blk;
3296         int32_t         usable_swnd;
3297         int32_t         mss;
3298         uint32_t        seg_len;
3299         mblk_t          *xmit_mp;
3300         tcp_stack_t     *tcps = tcp->tcp_tcps;
3301 
3302         ASSERT(tcp->tcp_notsack_list != NULL);
3303         ASSERT(tcp->tcp_rexmit == B_FALSE);
3304 
3305         /* Defensive coding in case there is a bug... */
3306         if (tcp->tcp_notsack_list == NULL) {
3307                 return;
3308         }
3309         notsack_blk = tcp->tcp_notsack_list;
3310         mss = tcp->tcp_mss;
3311 
3312         /*
3313          * Limit the num of outstanding data in the network to be
3314          * tcp_cwnd_ssthresh, which is half of the original congestion wnd.
3315          */
3316         usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
3317 
3318         /* At least retransmit 1 MSS of data. */
3319         if (usable_swnd <= 0) {
3320                 usable_swnd = mss;
3321         }
3322 
3323         /* Make sure no new RTT samples will be taken. */
3324         tcp->tcp_csuna = tcp->tcp_snxt;
3325 
3326         notsack_blk = tcp->tcp_notsack_list;
3327         while (usable_swnd > 0) {
3328                 mblk_t          *snxt_mp, *tmp_mp;
3329                 tcp_seq         begin = tcp->tcp_sack_snxt;
3330                 tcp_seq         end;
3331                 int32_t         off;
3332 
3333                 for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) {
3334                         if (SEQ_GT(notsack_blk->end, begin) &&
3335                             (notsack_blk->sack_cnt >=
3336                             tcps->tcps_dupack_fast_retransmit)) {
3337                                 end = notsack_blk->end;
3338                                 if (SEQ_LT(begin, notsack_blk->begin)) {
3339                                         begin = notsack_blk->begin;
3340                                 }
3341                                 break;
3342                         }
3343                 }
3344                 /*
3345                  * All holes are filled.  Manipulate tcp_cwnd to send more
3346                  * if we can.  Note that after the SACK recovery, tcp_cwnd is
3347                  * set to tcp_cwnd_ssthresh.
3348                  */
3349                 if (notsack_blk == NULL) {
3350                         usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
3351                         if (usable_swnd <= 0 || tcp->tcp_unsent == 0) {
3352                                 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna;
3353                                 ASSERT(tcp->tcp_cwnd > 0);
3354                                 return;
3355                         } else {
3356                                 usable_swnd = usable_swnd / mss;
3357                                 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna +
3358                                     MAX(usable_swnd * mss, mss);
3359                                 *flags |= TH_XMIT_NEEDED;
3360                                 return;
3361                         }
3362                 }
3363 
3364                 /*
3365                  * Note that we may send more than usable_swnd allows here
3366                  * because of round off, but no more than 1 MSS of data.
3367                  */
3368                 seg_len = end - begin;
3369                 if (seg_len > mss)
3370                         seg_len = mss;
3371                 snxt_mp = tcp_get_seg_mp(tcp, begin, &off);
3372                 ASSERT(snxt_mp != NULL);
3373                 /* This should not happen.  Defensive coding again... */
3374                 if (snxt_mp == NULL) {
3375                         return;
3376                 }
3377 
3378                 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off,
3379                     &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE);
3380                 if (xmit_mp == NULL)
3381                         return;
3382 
3383                 usable_swnd -= seg_len;
3384                 tcp->tcp_pipe += seg_len;
3385                 tcp->tcp_sack_snxt = begin + seg_len;
3386 
3387                 tcp_send_data(tcp, xmit_mp);
3388 
3389                 /*
3390                  * Update the send timestamp to avoid false retransmission.
3391                  */
3392                 snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3393 
3394                 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3395                 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, seg_len);
3396                 TCPS_BUMP_MIB(tcps, tcpOutSackRetransSegs);
3397                 /*
3398                  * Update tcp_rexmit_max to extend this SACK recovery phase.
3399                  * This happens when new data sent during fast recovery is
3400                  * also lost.  If TCP retransmits those new data, it needs
3401                  * to extend SACK recover phase to avoid starting another
3402                  * fast retransmit/recovery unnecessarily.
3403                  */
3404                 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) {
3405                         tcp->tcp_rexmit_max = tcp->tcp_sack_snxt;
3406                 }
3407         }
3408 }
3409 
3410 /*
3411  * tcp_ss_rexmit() is called to do slow start retransmission after a timeout
3412  * or ICMP errors.
3413  */
3414 void
3415 tcp_ss_rexmit(tcp_t *tcp)
3416 {
3417         uint32_t        snxt;
3418         uint32_t        smax;
3419         int32_t         win;
3420         int32_t         mss;
3421         int32_t         off;
3422         mblk_t          *snxt_mp;
3423         tcp_stack_t     *tcps = tcp->tcp_tcps;
3424 
3425         /*
3426          * Note that tcp_rexmit can be set even though TCP has retransmitted
3427          * all unack'ed segments.
3428          */
3429         if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) {
3430                 smax = tcp->tcp_rexmit_max;
3431                 snxt = tcp->tcp_rexmit_nxt;
3432                 if (SEQ_LT(snxt, tcp->tcp_suna)) {
3433                         snxt = tcp->tcp_suna;
3434                 }
3435                 win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd);
3436                 win -= snxt - tcp->tcp_suna;
3437                 mss = tcp->tcp_mss;
3438                 snxt_mp = tcp_get_seg_mp(tcp, snxt, &off);
3439 
3440                 while (SEQ_LT(snxt, smax) && (win > 0) && (snxt_mp != NULL)) {
3441                         mblk_t  *xmit_mp;
3442                         mblk_t  *old_snxt_mp = snxt_mp;
3443                         uint32_t cnt = mss;
3444 
3445                         if (win < cnt) {
3446                                 cnt = win;
3447                         }
3448                         if (SEQ_GT(snxt + cnt, smax)) {
3449                                 cnt = smax - snxt;
3450                         }
3451                         xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off,
3452                             &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE);
3453                         if (xmit_mp == NULL)
3454                                 return;
3455 
3456                         tcp_send_data(tcp, xmit_mp);
3457 
3458                         snxt += cnt;
3459                         win -= cnt;
3460                         /*
3461                          * Update the send timestamp to avoid false
3462                          * retransmission.
3463                          */
3464                         old_snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3465                         TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3466                         TCPS_UPDATE_MIB(tcps, tcpRetransBytes, cnt);
3467 
3468                         tcp->tcp_rexmit_nxt = snxt;
3469                 }
3470                 /*
3471                  * If we have transmitted all we have at the time
3472                  * we started the retranmission, we can leave
3473                  * the rest of the job to tcp_wput_data().  But we
3474                  * need to check the send window first.  If the
3475                  * win is not 0, go on with tcp_wput_data().
3476                  */
3477                 if (SEQ_LT(snxt, smax) || win == 0) {
3478                         return;
3479                 }
3480         }
3481         /* Only call tcp_wput_data() if there is data to be sent. */
3482         if (tcp->tcp_unsent) {
3483                 tcp_wput_data(tcp, NULL, B_FALSE);
3484         }
3485 }
3486 
3487 /*
3488  * Do slow start retransmission after ICMP errors of PMTU changes.
3489  */
3490 void
3491 tcp_rexmit_after_error(tcp_t *tcp)
3492 {
3493         /*
3494          * All sent data has been acknowledged or no data left to send, just
3495          * to return.
3496          */
3497         if (!SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) ||
3498             (tcp->tcp_xmit_head == NULL))
3499                 return;
3500 
3501         if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && (tcp->tcp_unsent == 0))
3502                 tcp->tcp_rexmit_max = tcp->tcp_fss;
3503         else
3504                 tcp->tcp_rexmit_max = tcp->tcp_snxt;
3505 
3506         tcp->tcp_rexmit_nxt = tcp->tcp_suna;
3507         tcp->tcp_rexmit = B_TRUE;
3508         tcp->tcp_dupack_cnt = 0;
3509         tcp_ss_rexmit(tcp);
3510 }
3511 
3512 /*
3513  * tcp_get_seg_mp() is called to get the pointer to a segment in the
3514  * send queue which starts at the given sequence number. If the given
3515  * sequence number is equal to last valid sequence number (tcp_snxt), the
3516  * returned mblk is the last valid mblk, and off is set to the length of
3517  * that mblk.
3518  *
3519  * send queue which starts at the given seq. no.
3520  *
3521  * Parameters:
3522  *      tcp_t *tcp: the tcp instance pointer.
3523  *      uint32_t seq: the starting seq. no of the requested segment.
3524  *      int32_t *off: after the execution, *off will be the offset to
3525  *              the returned mblk which points to the requested seq no.
3526  *              It is the caller's responsibility to send in a non-null off.
3527  *
3528  * Return:
3529  *      A mblk_t pointer pointing to the requested segment in send queue.
3530  */
3531 static mblk_t *
3532 tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off)
3533 {
3534         int32_t cnt;
3535         mblk_t  *mp;
3536 
3537         /* Defensive coding.  Make sure we don't send incorrect data. */
3538         if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GT(seq, tcp->tcp_snxt))
3539                 return (NULL);
3540 
3541         cnt = seq - tcp->tcp_suna;
3542         mp = tcp->tcp_xmit_head;
3543         while (cnt > 0 && mp != NULL) {
3544                 cnt -= mp->b_wptr - mp->b_rptr;
3545                 if (cnt <= 0) {
3546                         cnt += mp->b_wptr - mp->b_rptr;
3547                         break;
3548                 }
3549                 mp = mp->b_cont;
3550         }
3551         ASSERT(mp != NULL);
3552         *off = cnt;
3553         return (mp);
3554 }
3555 
3556 /*
3557  * This routine adjusts next-to-send sequence number variables, in the
3558  * case where the reciever has shrunk it's window.
3559  */
3560 void
3561 tcp_update_xmit_tail(tcp_t *tcp, uint32_t snxt)
3562 {
3563         mblk_t *xmit_tail;
3564         int32_t offset;
3565 
3566         tcp->tcp_snxt = snxt;
3567 
3568         /* Get the mblk, and the offset in it, as per the shrunk window */
3569         xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset);
3570         ASSERT(xmit_tail != NULL);
3571         tcp->tcp_xmit_tail = xmit_tail;
3572         tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr -
3573             xmit_tail->b_rptr - offset;
3574 }
3575 
3576 /*
3577  * This handles the case when the receiver has shrunk its win. Per RFC 1122
3578  * if the receiver shrinks the window, i.e. moves the right window to the
3579  * left, the we should not send new data, but should retransmit normally the
3580  * old unacked data between suna and suna + swnd. We might has sent data
3581  * that is now outside the new window, pretend that we didn't send  it.
3582  */
3583 static void
3584 tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count)
3585 {
3586         uint32_t        snxt = tcp->tcp_snxt;
3587 
3588         ASSERT(shrunk_count > 0);
3589 
3590         if (!tcp->tcp_is_wnd_shrnk) {
3591                 tcp->tcp_snxt_shrunk = snxt;
3592                 tcp->tcp_is_wnd_shrnk = B_TRUE;
3593         } else if (SEQ_GT(snxt, tcp->tcp_snxt_shrunk)) {
3594                 tcp->tcp_snxt_shrunk = snxt;
3595         }
3596 
3597         /* Pretend we didn't send the data outside the window */
3598         snxt -= shrunk_count;
3599 
3600         /* Reset all the values per the now shrunk window */
3601         tcp_update_xmit_tail(tcp, snxt);
3602         tcp->tcp_unsent += shrunk_count;
3603 
3604         /*
3605          * If the SACK option is set, delete the entire list of
3606          * notsack'ed blocks.
3607          */
3608         TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
3609 
3610         if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0)
3611                 /*
3612                  * Make sure the timer is running so that we will probe a zero
3613                  * window.
3614                  */
3615                 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
3616 }
3617 
3618 /*
3619  * tcp_fill_header is called by tcp_send() to fill the outgoing TCP header
3620  * with the template header, as well as other options such as time-stamp,
3621  * ECN and/or SACK.
3622  */
3623 static void
3624 tcp_fill_header(tcp_t *tcp, uchar_t *rptr, int num_sack_blk)
3625 {
3626         tcpha_t *tcp_tmpl, *tcpha;
3627         uint32_t *dst, *src;
3628         int hdrlen;
3629         conn_t *connp = tcp->tcp_connp;
3630 
3631         ASSERT(OK_32PTR(rptr));
3632 
3633         /* Template header */
3634         tcp_tmpl = tcp->tcp_tcpha;
3635 
3636         /* Header of outgoing packet */
3637         tcpha = (tcpha_t *)(rptr + connp->conn_ixa->ixa_ip_hdr_length);
3638 
3639         /* dst and src are opaque 32-bit fields, used for copying */
3640         dst = (uint32_t *)rptr;
3641         src = (uint32_t *)connp->conn_ht_iphc;
3642         hdrlen = connp->conn_ht_iphc_len;
3643 
3644         /* Fill time-stamp option if needed */
3645         if (tcp->tcp_snd_ts_ok) {
3646                 U32_TO_BE32(LBOLT_FASTPATH,
3647                     (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4);
3648                 U32_TO_BE32(tcp->tcp_ts_recent,
3649                     (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8);
3650         } else {
3651                 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
3652         }
3653 
3654         /*
3655          * Copy the template header; is this really more efficient than
3656          * calling bcopy()?  For simple IPv4/TCP, it may be the case,
3657          * but perhaps not for other scenarios.
3658          */
3659         dst[0] = src[0];
3660         dst[1] = src[1];
3661         dst[2] = src[2];
3662         dst[3] = src[3];
3663         dst[4] = src[4];
3664         dst[5] = src[5];
3665         dst[6] = src[6];
3666         dst[7] = src[7];
3667         dst[8] = src[8];
3668         dst[9] = src[9];
3669         if (hdrlen -= 40) {
3670                 hdrlen >>= 2;
3671                 dst += 10;
3672                 src += 10;
3673                 do {
3674                         *dst++ = *src++;
3675                 } while (--hdrlen);
3676         }
3677 
3678         /*
3679          * Set the ECN info in the TCP header if it is not a zero
3680          * window probe.  Zero window probe is only sent in
3681          * tcp_wput_data() and tcp_timer().
3682          */
3683         if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) {
3684                 TCP_SET_ECT(tcp, rptr);
3685 
3686                 if (tcp->tcp_ecn_echo_on)
3687                         tcpha->tha_flags |= TH_ECE;
3688                 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
3689                         tcpha->tha_flags |= TH_CWR;
3690                         tcp->tcp_ecn_cwr_sent = B_TRUE;
3691                 }
3692         }
3693 
3694         /* Fill in SACK options */
3695         if (num_sack_blk > 0) {
3696                 uchar_t *wptr = rptr + connp->conn_ht_iphc_len;
3697                 sack_blk_t *tmp;
3698                 int32_t i;
3699 
3700                 wptr[0] = TCPOPT_NOP;
3701                 wptr[1] = TCPOPT_NOP;
3702                 wptr[2] = TCPOPT_SACK;
3703                 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
3704                     sizeof (sack_blk_t);
3705                 wptr += TCPOPT_REAL_SACK_LEN;
3706 
3707                 tmp = tcp->tcp_sack_list;
3708                 for (i = 0; i < num_sack_blk; i++) {
3709                         U32_TO_BE32(tmp[i].begin, wptr);
3710                         wptr += sizeof (tcp_seq);
3711                         U32_TO_BE32(tmp[i].end, wptr);
3712                         wptr += sizeof (tcp_seq);
3713                 }
3714                 tcpha->tha_offset_and_reserved +=
3715                     ((num_sack_blk * 2 + 1) << 4);
3716         }
3717 }