1256 tcp->tcp_snxt = snxt + len;
1257 tcp->tcp_rack = tcp->tcp_rnxt;
1258
1259 if ((mp1 = dupb(mp)) == 0)
1260 goto no_memory;
1261 mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
1262 mp->b_next = (mblk_t *)(uintptr_t)snxt;
1263
1264 /* adjust tcp header information */
1265 tcpha = tcp->tcp_tcpha;
1266 tcpha->tha_flags = (TH_ACK|TH_PUSH);
1267
1268 sum = len + connp->conn_ht_ulp_len + connp->conn_sum;
1269 sum = (sum >> 16) + (sum & 0xFFFF);
1270 tcpha->tha_sum = htons(sum);
1271
1272 tcpha->tha_seq = htonl(snxt);
1273
1274 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1275 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1276 BUMP_LOCAL(tcp->tcp_obsegs);
1277
1278 /* Update the latest receive window size in TCP header. */
1279 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
1280
1281 tcp->tcp_last_sent_len = (ushort_t)len;
1282
1283 plen = len + connp->conn_ht_iphc_len;
1284
1285 ixa = connp->conn_ixa;
1286 ixa->ixa_pktlen = plen;
1287
1288 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1289 tcp->tcp_ipha->ipha_length = htons(plen);
1290 } else {
1291 tcp->tcp_ip6h->ip6_plen = htons(plen - IPV6_HDR_LEN);
1292 }
1293
1294 /* see if we need to allocate a mblk for the headers */
1295 hdrlen = connp->conn_ht_iphc_len;
1296 rptr = mp1->b_rptr - hdrlen;
1943 }
1944 } else
1945 (*xmit_tail)->b_rptr = prev_rptr;
1946
1947 if (mp == NULL) {
1948 return (-1);
1949 }
1950 mp1 = mp->b_cont;
1951
1952 if (len <= mss) /* LSO is unusable (!do_lso_send) */
1953 tcp->tcp_last_sent_len = (ushort_t)len;
1954 while (mp1->b_cont) {
1955 *xmit_tail = (*xmit_tail)->b_cont;
1956 (*xmit_tail)->b_prev = local_time;
1957 (*xmit_tail)->b_next =
1958 (mblk_t *)(uintptr_t)(*snxt);
1959 mp1 = mp1->b_cont;
1960 }
1961 *snxt += len;
1962 *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr;
1963 BUMP_LOCAL(tcp->tcp_obsegs);
1964 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1965 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1966 tcp_send_data(tcp, mp);
1967 continue;
1968 }
1969
1970 *snxt += len; /* Adjust later if we don't send all of len */
1971 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1972 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1973
1974 if (*tail_unsent) {
1975 /* Are the bytes above us in flight? */
1976 rptr = (*xmit_tail)->b_wptr - *tail_unsent;
1977 if (rptr != (*xmit_tail)->b_rptr) {
1978 *tail_unsent -= len;
1979 if (len <= mss) /* LSO is unusable */
1980 tcp->tcp_last_sent_len = (ushort_t)len;
1981 len += total_hdr_len;
1982 ixa->ixa_pktlen = len;
1983
1984 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1985 tcp->tcp_ipha->ipha_length = htons(len);
1986 } else {
1987 tcp->tcp_ip6h->ip6_plen =
1988 htons(len - IPV6_HDR_LEN);
1989 }
1990
1991 mp = dupb(*xmit_tail);
1992 if (mp == NULL) {
2128 if (mp1 == NULL) {
2129 *tail_unsent = spill;
2130 freemsg(mp);
2131 return (-1); /* out_of_mem */
2132 }
2133 }
2134
2135 /* Trim back any surplus on the last mblk */
2136 if (spill >= 0) {
2137 mp1->b_wptr -= spill;
2138 *tail_unsent = spill;
2139 } else {
2140 /*
2141 * We did not send everything we could in
2142 * order to remain within the b_cont limit.
2143 */
2144 *usable -= spill;
2145 *snxt += spill;
2146 tcp->tcp_last_sent_len += spill;
2147 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, spill);
2148 /*
2149 * Adjust the checksum
2150 */
2151 tcpha = (tcpha_t *)(rptr +
2152 ixa->ixa_ip_hdr_length);
2153 sum += spill;
2154 sum = (sum >> 16) + (sum & 0xFFFF);
2155 tcpha->tha_sum = htons(sum);
2156 if (connp->conn_ipversion == IPV4_VERSION) {
2157 sum = ntohs(
2158 ((ipha_t *)rptr)->ipha_length) +
2159 spill;
2160 ((ipha_t *)rptr)->ipha_length =
2161 htons(sum);
2162 } else {
2163 sum = ntohs(
2164 ((ip6_t *)rptr)->ip6_plen) +
2165 spill;
2166 ((ip6_t *)rptr)->ip6_plen =
2167 htons(sum);
2176 } else {
2177 ixa->ixa_flags &= ~IXAF_REACH_CONF;
2178 }
2179
2180 if (do_lso_send) {
2181 /* Append LSO information to the mp. */
2182 lso_info_set(mp, mss, HW_LSO);
2183 ixa->ixa_fragsize = IP_MAXPACKET;
2184 ixa->ixa_extra_ident = num_lso_seg - 1;
2185
2186 DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg,
2187 boolean_t, B_TRUE);
2188
2189 tcp_send_data(tcp, mp);
2190
2191 /*
2192 * Restore values of ixa_fragsize and ixa_extra_ident.
2193 */
2194 ixa->ixa_fragsize = ixa->ixa_pmtu;
2195 ixa->ixa_extra_ident = 0;
2196 tcp->tcp_obsegs += num_lso_seg;
2197 TCP_STAT(tcps, tcp_lso_times);
2198 TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg);
2199 } else {
2200 /*
2201 * Make sure to clean up LSO information. Wherever a
2202 * new mp uses the prepended header room after dupb(),
2203 * lso_info_cleanup() should be called.
2204 */
2205 lso_info_cleanup(mp);
2206 tcp_send_data(tcp, mp);
2207 BUMP_LOCAL(tcp->tcp_obsegs);
2208 }
2209 }
2210
2211 return (0);
2212 }
2213
2214 /*
2215 * Initiate closedown sequence on an active connection. (May be called as
2216 * writer.) Return value zero for OK return, non-zero for error return.
2217 */
2218 static int
2219 tcp_xmit_end(tcp_t *tcp)
2220 {
2221 mblk_t *mp;
2222 tcp_stack_t *tcps = tcp->tcp_tcps;
2223 iulp_t uinfo;
2224 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
2225 conn_t *connp = tcp->tcp_connp;
2226
2227 if (tcp->tcp_state < TCPS_SYN_RCVD ||
2403 }
2404 }
2405 if (ctl & TH_ACK) {
2406 if (tcp->tcp_snd_ts_ok) {
2407 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2408
2409 U32_TO_BE32(llbolt,
2410 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
2411 U32_TO_BE32(tcp->tcp_ts_recent,
2412 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
2413 }
2414
2415 /* Update the latest receive window size in TCP header. */
2416 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2417 /* Track what we sent to the peer */
2418 tcp->tcp_tcpha->tha_win = tcpha->tha_win;
2419 tcp->tcp_rack = ack;
2420 tcp->tcp_rack_cnt = 0;
2421 TCPS_BUMP_MIB(tcps, tcpOutAck);
2422 }
2423 BUMP_LOCAL(tcp->tcp_obsegs);
2424 tcpha->tha_seq = htonl(seq);
2425 tcpha->tha_ack = htonl(ack);
2426 /*
2427 * Include the adjustment for a source route if any.
2428 */
2429 sum = (sum >> 16) + (sum & 0xFFFF);
2430 tcpha->tha_sum = htons(sum);
2431 tcp_send_data(tcp, mp);
2432 }
2433
2434 /*
2435 * Generate a reset based on an inbound packet, connp is set by caller
2436 * when RST is in response to an unexpected inbound packet for which
2437 * there is active tcp state in the system.
2438 *
2439 * IPSEC NOTE : Try to send the reply with the same protection as it came
2440 * in. We have the ip_recv_attr_t which is reversed to form the ip_xmit_attr_t.
2441 * That way the packet will go out at the same level of protection as it
2442 * came in with.
2443 */
3377
3378 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off,
3379 &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE);
3380 if (xmit_mp == NULL)
3381 return;
3382
3383 usable_swnd -= seg_len;
3384 tcp->tcp_pipe += seg_len;
3385 tcp->tcp_sack_snxt = begin + seg_len;
3386
3387 tcp_send_data(tcp, xmit_mp);
3388
3389 /*
3390 * Update the send timestamp to avoid false retransmission.
3391 */
3392 snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3393
3394 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3395 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, seg_len);
3396 TCPS_BUMP_MIB(tcps, tcpOutSackRetransSegs);
3397 /*
3398 * Update tcp_rexmit_max to extend this SACK recovery phase.
3399 * This happens when new data sent during fast recovery is
3400 * also lost. If TCP retransmits those new data, it needs
3401 * to extend SACK recover phase to avoid starting another
3402 * fast retransmit/recovery unnecessarily.
3403 */
3404 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) {
3405 tcp->tcp_rexmit_max = tcp->tcp_sack_snxt;
3406 }
3407 }
3408 }
3409
3410 /*
3411 * tcp_ss_rexmit() is called to do slow start retransmission after a timeout
3412 * or ICMP errors.
3413 */
3414 void
3415 tcp_ss_rexmit(tcp_t *tcp)
3416 {
3447 }
3448 if (SEQ_GT(snxt + cnt, smax)) {
3449 cnt = smax - snxt;
3450 }
3451 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off,
3452 &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE);
3453 if (xmit_mp == NULL)
3454 return;
3455
3456 tcp_send_data(tcp, xmit_mp);
3457
3458 snxt += cnt;
3459 win -= cnt;
3460 /*
3461 * Update the send timestamp to avoid false
3462 * retransmission.
3463 */
3464 old_snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3465 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3466 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, cnt);
3467
3468 tcp->tcp_rexmit_nxt = snxt;
3469 }
3470 /*
3471 * If we have transmitted all we have at the time
3472 * we started the retranmission, we can leave
3473 * the rest of the job to tcp_wput_data(). But we
3474 * need to check the send window first. If the
3475 * win is not 0, go on with tcp_wput_data().
3476 */
3477 if (SEQ_LT(snxt, smax) || win == 0) {
3478 return;
3479 }
3480 }
3481 /* Only call tcp_wput_data() if there is data to be sent. */
3482 if (tcp->tcp_unsent) {
3483 tcp_wput_data(tcp, NULL, B_FALSE);
3484 }
3485 }
3486
|
1256 tcp->tcp_snxt = snxt + len;
1257 tcp->tcp_rack = tcp->tcp_rnxt;
1258
1259 if ((mp1 = dupb(mp)) == 0)
1260 goto no_memory;
1261 mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
1262 mp->b_next = (mblk_t *)(uintptr_t)snxt;
1263
1264 /* adjust tcp header information */
1265 tcpha = tcp->tcp_tcpha;
1266 tcpha->tha_flags = (TH_ACK|TH_PUSH);
1267
1268 sum = len + connp->conn_ht_ulp_len + connp->conn_sum;
1269 sum = (sum >> 16) + (sum & 0xFFFF);
1270 tcpha->tha_sum = htons(sum);
1271
1272 tcpha->tha_seq = htonl(snxt);
1273
1274 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1275 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1276 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
1277 tcp->tcp_cs.tcp_out_data_segs++;
1278 tcp->tcp_cs.tcp_out_data_bytes += len;
1279
1280 /* Update the latest receive window size in TCP header. */
1281 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
1282
1283 tcp->tcp_last_sent_len = (ushort_t)len;
1284
1285 plen = len + connp->conn_ht_iphc_len;
1286
1287 ixa = connp->conn_ixa;
1288 ixa->ixa_pktlen = plen;
1289
1290 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1291 tcp->tcp_ipha->ipha_length = htons(plen);
1292 } else {
1293 tcp->tcp_ip6h->ip6_plen = htons(plen - IPV6_HDR_LEN);
1294 }
1295
1296 /* see if we need to allocate a mblk for the headers */
1297 hdrlen = connp->conn_ht_iphc_len;
1298 rptr = mp1->b_rptr - hdrlen;
1945 }
1946 } else
1947 (*xmit_tail)->b_rptr = prev_rptr;
1948
1949 if (mp == NULL) {
1950 return (-1);
1951 }
1952 mp1 = mp->b_cont;
1953
1954 if (len <= mss) /* LSO is unusable (!do_lso_send) */
1955 tcp->tcp_last_sent_len = (ushort_t)len;
1956 while (mp1->b_cont) {
1957 *xmit_tail = (*xmit_tail)->b_cont;
1958 (*xmit_tail)->b_prev = local_time;
1959 (*xmit_tail)->b_next =
1960 (mblk_t *)(uintptr_t)(*snxt);
1961 mp1 = mp1->b_cont;
1962 }
1963 *snxt += len;
1964 *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr;
1965 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
1966 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1967 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1968 tcp->tcp_cs.tcp_out_data_segs++;
1969 tcp->tcp_cs.tcp_out_data_bytes += len;
1970 tcp_send_data(tcp, mp);
1971 continue;
1972 }
1973
1974 *snxt += len; /* Adjust later if we don't send all of len */
1975 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
1976 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1977 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1978 tcp->tcp_cs.tcp_out_data_segs++;
1979 tcp->tcp_cs.tcp_out_data_bytes += len;
1980
1981 if (*tail_unsent) {
1982 /* Are the bytes above us in flight? */
1983 rptr = (*xmit_tail)->b_wptr - *tail_unsent;
1984 if (rptr != (*xmit_tail)->b_rptr) {
1985 *tail_unsent -= len;
1986 if (len <= mss) /* LSO is unusable */
1987 tcp->tcp_last_sent_len = (ushort_t)len;
1988 len += total_hdr_len;
1989 ixa->ixa_pktlen = len;
1990
1991 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1992 tcp->tcp_ipha->ipha_length = htons(len);
1993 } else {
1994 tcp->tcp_ip6h->ip6_plen =
1995 htons(len - IPV6_HDR_LEN);
1996 }
1997
1998 mp = dupb(*xmit_tail);
1999 if (mp == NULL) {
2135 if (mp1 == NULL) {
2136 *tail_unsent = spill;
2137 freemsg(mp);
2138 return (-1); /* out_of_mem */
2139 }
2140 }
2141
2142 /* Trim back any surplus on the last mblk */
2143 if (spill >= 0) {
2144 mp1->b_wptr -= spill;
2145 *tail_unsent = spill;
2146 } else {
2147 /*
2148 * We did not send everything we could in
2149 * order to remain within the b_cont limit.
2150 */
2151 *usable -= spill;
2152 *snxt += spill;
2153 tcp->tcp_last_sent_len += spill;
2154 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, spill);
2155 tcp->tcp_cs.tcp_out_data_bytes += spill;
2156 /*
2157 * Adjust the checksum
2158 */
2159 tcpha = (tcpha_t *)(rptr +
2160 ixa->ixa_ip_hdr_length);
2161 sum += spill;
2162 sum = (sum >> 16) + (sum & 0xFFFF);
2163 tcpha->tha_sum = htons(sum);
2164 if (connp->conn_ipversion == IPV4_VERSION) {
2165 sum = ntohs(
2166 ((ipha_t *)rptr)->ipha_length) +
2167 spill;
2168 ((ipha_t *)rptr)->ipha_length =
2169 htons(sum);
2170 } else {
2171 sum = ntohs(
2172 ((ip6_t *)rptr)->ip6_plen) +
2173 spill;
2174 ((ip6_t *)rptr)->ip6_plen =
2175 htons(sum);
2184 } else {
2185 ixa->ixa_flags &= ~IXAF_REACH_CONF;
2186 }
2187
2188 if (do_lso_send) {
2189 /* Append LSO information to the mp. */
2190 lso_info_set(mp, mss, HW_LSO);
2191 ixa->ixa_fragsize = IP_MAXPACKET;
2192 ixa->ixa_extra_ident = num_lso_seg - 1;
2193
2194 DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg,
2195 boolean_t, B_TRUE);
2196
2197 tcp_send_data(tcp, mp);
2198
2199 /*
2200 * Restore values of ixa_fragsize and ixa_extra_ident.
2201 */
2202 ixa->ixa_fragsize = ixa->ixa_pmtu;
2203 ixa->ixa_extra_ident = 0;
2204 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
2205 TCP_STAT(tcps, tcp_lso_times);
2206 TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg);
2207 } else {
2208 /*
2209 * Make sure to clean up LSO information. Wherever a
2210 * new mp uses the prepended header room after dupb(),
2211 * lso_info_cleanup() should be called.
2212 */
2213 lso_info_cleanup(mp);
2214 tcp_send_data(tcp, mp);
2215 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
2216 }
2217 }
2218
2219 return (0);
2220 }
2221
2222 /*
2223 * Initiate closedown sequence on an active connection. (May be called as
2224 * writer.) Return value zero for OK return, non-zero for error return.
2225 */
2226 static int
2227 tcp_xmit_end(tcp_t *tcp)
2228 {
2229 mblk_t *mp;
2230 tcp_stack_t *tcps = tcp->tcp_tcps;
2231 iulp_t uinfo;
2232 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
2233 conn_t *connp = tcp->tcp_connp;
2234
2235 if (tcp->tcp_state < TCPS_SYN_RCVD ||
2411 }
2412 }
2413 if (ctl & TH_ACK) {
2414 if (tcp->tcp_snd_ts_ok) {
2415 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2416
2417 U32_TO_BE32(llbolt,
2418 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
2419 U32_TO_BE32(tcp->tcp_ts_recent,
2420 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
2421 }
2422
2423 /* Update the latest receive window size in TCP header. */
2424 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2425 /* Track what we sent to the peer */
2426 tcp->tcp_tcpha->tha_win = tcpha->tha_win;
2427 tcp->tcp_rack = ack;
2428 tcp->tcp_rack_cnt = 0;
2429 TCPS_BUMP_MIB(tcps, tcpOutAck);
2430 }
2431 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
2432 tcpha->tha_seq = htonl(seq);
2433 tcpha->tha_ack = htonl(ack);
2434 /*
2435 * Include the adjustment for a source route if any.
2436 */
2437 sum = (sum >> 16) + (sum & 0xFFFF);
2438 tcpha->tha_sum = htons(sum);
2439 tcp_send_data(tcp, mp);
2440 }
2441
2442 /*
2443 * Generate a reset based on an inbound packet, connp is set by caller
2444 * when RST is in response to an unexpected inbound packet for which
2445 * there is active tcp state in the system.
2446 *
2447 * IPSEC NOTE : Try to send the reply with the same protection as it came
2448 * in. We have the ip_recv_attr_t which is reversed to form the ip_xmit_attr_t.
2449 * That way the packet will go out at the same level of protection as it
2450 * came in with.
2451 */
3385
3386 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off,
3387 &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE);
3388 if (xmit_mp == NULL)
3389 return;
3390
3391 usable_swnd -= seg_len;
3392 tcp->tcp_pipe += seg_len;
3393 tcp->tcp_sack_snxt = begin + seg_len;
3394
3395 tcp_send_data(tcp, xmit_mp);
3396
3397 /*
3398 * Update the send timestamp to avoid false retransmission.
3399 */
3400 snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3401
3402 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3403 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, seg_len);
3404 TCPS_BUMP_MIB(tcps, tcpOutSackRetransSegs);
3405 tcp->tcp_cs.tcp_out_retrans_segs++;
3406 tcp->tcp_cs.tcp_out_retrans_bytes += seg_len;
3407 /*
3408 * Update tcp_rexmit_max to extend this SACK recovery phase.
3409 * This happens when new data sent during fast recovery is
3410 * also lost. If TCP retransmits those new data, it needs
3411 * to extend SACK recover phase to avoid starting another
3412 * fast retransmit/recovery unnecessarily.
3413 */
3414 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) {
3415 tcp->tcp_rexmit_max = tcp->tcp_sack_snxt;
3416 }
3417 }
3418 }
3419
3420 /*
3421 * tcp_ss_rexmit() is called to do slow start retransmission after a timeout
3422 * or ICMP errors.
3423 */
3424 void
3425 tcp_ss_rexmit(tcp_t *tcp)
3426 {
3457 }
3458 if (SEQ_GT(snxt + cnt, smax)) {
3459 cnt = smax - snxt;
3460 }
3461 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off,
3462 &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE);
3463 if (xmit_mp == NULL)
3464 return;
3465
3466 tcp_send_data(tcp, xmit_mp);
3467
3468 snxt += cnt;
3469 win -= cnt;
3470 /*
3471 * Update the send timestamp to avoid false
3472 * retransmission.
3473 */
3474 old_snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3475 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3476 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, cnt);
3477 tcp->tcp_cs.tcp_out_retrans_segs++;
3478 tcp->tcp_cs.tcp_out_retrans_bytes += cnt;
3479
3480 tcp->tcp_rexmit_nxt = snxt;
3481 }
3482 /*
3483 * If we have transmitted all we have at the time
3484 * we started the retranmission, we can leave
3485 * the rest of the job to tcp_wput_data(). But we
3486 * need to check the send window first. If the
3487 * win is not 0, go on with tcp_wput_data().
3488 */
3489 if (SEQ_LT(snxt, smax) || win == 0) {
3490 return;
3491 }
3492 }
3493 /* Only call tcp_wput_data() if there is data to be sent. */
3494 if (tcp->tcp_unsent) {
3495 tcp_wput_data(tcp, NULL, B_FALSE);
3496 }
3497 }
3498
|