1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2016 Joyent, Inc. 26 * Copyright (c) 2014 by Delphix. All rights reserved. 27 */ 28 29 /* This file contains all TCP input processing functions. */ 30 31 #include <sys/types.h> 32 #include <sys/stream.h> 33 #include <sys/strsun.h> 34 #include <sys/strsubr.h> 35 #include <sys/stropts.h> 36 #include <sys/strlog.h> 37 #define _SUN_TPI_VERSION 2 38 #include <sys/tihdr.h> 39 #include <sys/suntpi.h> 40 #include <sys/xti_inet.h> 41 #include <sys/squeue_impl.h> 42 #include <sys/squeue.h> 43 #include <sys/tsol/tnet.h> 44 45 #include <inet/common.h> 46 #include <inet/ip.h> 47 #include <inet/tcp.h> 48 #include <inet/tcp_impl.h> 49 #include <inet/tcp_cluster.h> 50 #include <inet/proto_set.h> 51 #include <inet/ipsec_impl.h> 52 53 /* 54 * RFC7323-recommended phrasing of TSTAMP option, for easier parsing 55 */ 56 57 #ifdef _BIG_ENDIAN 58 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 59 (TCPOPT_TSTAMP << 8) | 10) 60 #else 61 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 62 (TCPOPT_NOP << 8) | TCPOPT_NOP) 63 #endif 64 65 /* 66 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 67 */ 68 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 69 70 /* 71 * Since tcp_listener is not cleared atomically with tcp_detached 72 * being cleared we need this extra bit to tell a detached connection 73 * apart from one that is in the process of being accepted. 74 */ 75 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 76 (TCP_IS_DETACHED(tcp) && \ 77 (!(tcp)->tcp_hard_binding)) 78 79 /* 80 * Steps to do when a tcp_t moves to TIME-WAIT state. 81 * 82 * This connection is done, we don't need to account for it. Decrement 83 * the listener connection counter if needed. 84 * 85 * Decrement the connection counter of the stack. Note that this counter 86 * is per CPU. So the total number of connections in a stack is the sum of all 87 * of them. Since there is no lock for handling all of them exclusively, the 88 * resulting sum is only an approximation. 89 * 90 * Unconditionally clear the exclusive binding bit so this TIME-WAIT 91 * connection won't interfere with new ones. 92 * 93 * Start the TIME-WAIT timer. If upper layer has not closed the connection, 94 * the timer is handled within the context of this tcp_t. When the timer 95 * fires, tcp_clean_death() is called. If upper layer closes the connection 96 * during this period, tcp_time_wait_append() will be called to add this 97 * tcp_t to the global TIME-WAIT list. Note that this means that the 98 * actual wait time in TIME-WAIT state will be longer than the 99 * tcps_time_wait_interval since the period before upper layer closes the 100 * connection is not accounted for when tcp_time_wait_append() is called. 101 * 102 * If upper layer has closed the connection, call tcp_time_wait_append() 103 * directly. 104 * 105 */ 106 #define SET_TIME_WAIT(tcps, tcp, connp) \ 107 { \ 108 (tcp)->tcp_state = TCPS_TIME_WAIT; \ 109 if ((tcp)->tcp_listen_cnt != NULL) \ 110 TCP_DECR_LISTEN_CNT(tcp); \ 111 atomic_dec_64( \ 112 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt); \ 113 (connp)->conn_exclbind = 0; \ 114 if (!TCP_IS_DETACHED(tcp)) { \ 115 TCP_TIMER_RESTART(tcp, (tcps)->tcps_time_wait_interval); \ 116 } else { \ 117 tcp_time_wait_append(tcp); \ 118 TCP_DBGSTAT(tcps, tcp_rput_time_wait); \ 119 } \ 120 } 121 122 /* 123 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 124 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 125 * data, TCP will not respond with an ACK. RFC 793 requires that 126 * TCP responds with an ACK for such a bogus ACK. By not following 127 * the RFC, we prevent TCP from getting into an ACK storm if somehow 128 * an attacker successfully spoofs an acceptable segment to our 129 * peer; or when our peer is "confused." 130 */ 131 static uint32_t tcp_drop_ack_unsent_cnt = 10; 132 133 /* 134 * To protect TCP against attacker using a small window and requesting 135 * large amount of data (DoS attack by conuming memory), TCP checks the 136 * window advertised in the last ACK of the 3-way handshake. TCP uses 137 * the tcp_mss (the size of one packet) value for comparion. The window 138 * should be larger than tcp_mss. But while a sane TCP should advertise 139 * a receive window larger than or equal to 4*MSS to avoid stop and go 140 * tarrfic, not all TCP stacks do that. This is especially true when 141 * tcp_mss is a big value. 142 * 143 * To work around this issue, an additional fixed value for comparison 144 * is also used. If the advertised window is smaller than both tcp_mss 145 * and tcp_init_wnd_chk, the ACK is considered as invalid. So for large 146 * tcp_mss value (say, 8K), a window larger than tcp_init_wnd_chk but 147 * smaller than 8K is considered to be OK. 148 */ 149 static uint32_t tcp_init_wnd_chk = 4096; 150 151 /* Process ICMP source quench message or not. */ 152 static boolean_t tcp_icmp_source_quench = B_FALSE; 153 154 static boolean_t tcp_outbound_squeue_switch = B_FALSE; 155 156 static mblk_t *tcp_conn_create_v4(conn_t *, conn_t *, mblk_t *, 157 ip_recv_attr_t *); 158 static mblk_t *tcp_conn_create_v6(conn_t *, conn_t *, mblk_t *, 159 ip_recv_attr_t *); 160 static boolean_t tcp_drop_q0(tcp_t *); 161 static void tcp_icmp_error_ipv6(tcp_t *, mblk_t *, ip_recv_attr_t *); 162 static mblk_t *tcp_input_add_ancillary(tcp_t *, mblk_t *, ip_pkt_t *, 163 ip_recv_attr_t *); 164 static void tcp_input_listener(void *, mblk_t *, void *, ip_recv_attr_t *); 165 static void tcp_process_options(tcp_t *, tcpha_t *); 166 static mblk_t *tcp_reass(tcp_t *, mblk_t *, uint32_t); 167 static void tcp_reass_elim_overlap(tcp_t *, mblk_t *); 168 static void tcp_rsrv_input(void *, mblk_t *, void *, ip_recv_attr_t *); 169 static void tcp_set_rto(tcp_t *, time_t); 170 static void tcp_setcred_data(mblk_t *, ip_recv_attr_t *); 171 172 /* 173 * Set the MSS associated with a particular tcp based on its current value, 174 * and a new one passed in. Observe minimums and maximums, and reset other 175 * state variables that we want to view as multiples of MSS. 176 * 177 * The value of MSS could be either increased or descreased. 178 */ 179 void 180 tcp_mss_set(tcp_t *tcp, uint32_t mss) 181 { 182 uint32_t mss_max; 183 tcp_stack_t *tcps = tcp->tcp_tcps; 184 conn_t *connp = tcp->tcp_connp; 185 186 if (connp->conn_ipversion == IPV4_VERSION) 187 mss_max = tcps->tcps_mss_max_ipv4; 188 else 189 mss_max = tcps->tcps_mss_max_ipv6; 190 191 if (mss < tcps->tcps_mss_min) 192 mss = tcps->tcps_mss_min; 193 if (mss > mss_max) 194 mss = mss_max; 195 /* 196 * Unless naglim has been set by our client to 197 * a non-mss value, force naglim to track mss. 198 * This can help to aggregate small writes. 199 */ 200 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 201 tcp->tcp_naglim = mss; 202 /* 203 * TCP should be able to buffer at least 4 MSS data for obvious 204 * performance reason. 205 */ 206 if ((mss << 2) > connp->conn_sndbuf) 207 connp->conn_sndbuf = mss << 2; 208 209 /* 210 * Set the send lowater to at least twice of MSS. 211 */ 212 if ((mss << 1) > connp->conn_sndlowat) 213 connp->conn_sndlowat = mss << 1; 214 215 /* 216 * Update tcp_cwnd according to the new value of MSS. Keep the 217 * previous ratio to preserve the transmit rate. 218 */ 219 tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss; 220 tcp->tcp_cwnd_cnt = 0; 221 222 tcp->tcp_mss = mss; 223 (void) tcp_maxpsz_set(tcp, B_TRUE); 224 } 225 226 /* 227 * Extract option values from a tcp header. We put any found values into the 228 * tcpopt struct and return a bitmask saying which options were found. 229 */ 230 int 231 tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt) 232 { 233 uchar_t *endp; 234 int len; 235 uint32_t mss; 236 uchar_t *up = (uchar_t *)tcpha; 237 int found = 0; 238 int32_t sack_len; 239 tcp_seq sack_begin, sack_end; 240 tcp_t *tcp; 241 242 endp = up + TCP_HDR_LENGTH(tcpha); 243 up += TCP_MIN_HEADER_LENGTH; 244 /* 245 * If timestamp option is aligned as recommended in RFC 7323 Appendix 246 * A, and is the only option, return quickly. 247 */ 248 if (TCP_HDR_LENGTH(tcpha) == (uint32_t)TCP_MIN_HEADER_LENGTH + 249 TCPOPT_REAL_TS_LEN && 250 OK_32PTR(up) && 251 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 252 tcpopt->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 253 tcpopt->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 254 255 return (TCP_OPT_TSTAMP_PRESENT); 256 } 257 while (up < endp) { 258 len = endp - up; 259 switch (*up) { 260 case TCPOPT_EOL: 261 break; 262 263 case TCPOPT_NOP: 264 up++; 265 continue; 266 267 case TCPOPT_MAXSEG: 268 if (len < TCPOPT_MAXSEG_LEN || 269 up[1] != TCPOPT_MAXSEG_LEN) 270 break; 271 272 mss = BE16_TO_U16(up+2); 273 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 274 tcpopt->tcp_opt_mss = mss; 275 found |= TCP_OPT_MSS_PRESENT; 276 277 up += TCPOPT_MAXSEG_LEN; 278 continue; 279 280 case TCPOPT_WSCALE: 281 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 282 break; 283 284 if (up[2] > TCP_MAX_WINSHIFT) 285 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 286 else 287 tcpopt->tcp_opt_wscale = up[2]; 288 found |= TCP_OPT_WSCALE_PRESENT; 289 290 up += TCPOPT_WS_LEN; 291 continue; 292 293 case TCPOPT_SACK_PERMITTED: 294 if (len < TCPOPT_SACK_OK_LEN || 295 up[1] != TCPOPT_SACK_OK_LEN) 296 break; 297 found |= TCP_OPT_SACK_OK_PRESENT; 298 up += TCPOPT_SACK_OK_LEN; 299 continue; 300 301 case TCPOPT_SACK: 302 if (len <= 2 || up[1] <= 2 || len < up[1]) 303 break; 304 305 /* If TCP is not interested in SACK blks... */ 306 if ((tcp = tcpopt->tcp) == NULL) { 307 up += up[1]; 308 continue; 309 } 310 sack_len = up[1] - TCPOPT_HEADER_LEN; 311 up += TCPOPT_HEADER_LEN; 312 313 /* 314 * If the list is empty, allocate one and assume 315 * nothing is sack'ed. 316 */ 317 if (tcp->tcp_notsack_list == NULL) { 318 tcp_notsack_update(&(tcp->tcp_notsack_list), 319 tcp->tcp_suna, tcp->tcp_snxt, 320 &(tcp->tcp_num_notsack_blk), 321 &(tcp->tcp_cnt_notsack_list)); 322 323 /* 324 * Make sure tcp_notsack_list is not NULL. 325 * This happens when kmem_alloc(KM_NOSLEEP) 326 * returns NULL. 327 */ 328 if (tcp->tcp_notsack_list == NULL) { 329 up += sack_len; 330 continue; 331 } 332 tcp->tcp_fack = tcp->tcp_suna; 333 } 334 335 while (sack_len > 0) { 336 if (up + 8 > endp) { 337 up = endp; 338 break; 339 } 340 sack_begin = BE32_TO_U32(up); 341 up += 4; 342 sack_end = BE32_TO_U32(up); 343 up += 4; 344 sack_len -= 8; 345 /* 346 * Bounds checking. Make sure the SACK 347 * info is within tcp_suna and tcp_snxt. 348 * If this SACK blk is out of bound, ignore 349 * it but continue to parse the following 350 * blks. 351 */ 352 if (SEQ_LEQ(sack_end, sack_begin) || 353 SEQ_LT(sack_begin, tcp->tcp_suna) || 354 SEQ_GT(sack_end, tcp->tcp_snxt)) { 355 continue; 356 } 357 tcp_notsack_insert(&(tcp->tcp_notsack_list), 358 sack_begin, sack_end, 359 &(tcp->tcp_num_notsack_blk), 360 &(tcp->tcp_cnt_notsack_list)); 361 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 362 tcp->tcp_fack = sack_end; 363 } 364 } 365 found |= TCP_OPT_SACK_PRESENT; 366 continue; 367 368 case TCPOPT_TSTAMP: 369 if (len < TCPOPT_TSTAMP_LEN || 370 up[1] != TCPOPT_TSTAMP_LEN) 371 break; 372 373 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 374 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 375 376 found |= TCP_OPT_TSTAMP_PRESENT; 377 378 up += TCPOPT_TSTAMP_LEN; 379 continue; 380 381 default: 382 if (len <= 1 || len < (int)up[1] || up[1] == 0) 383 break; 384 up += up[1]; 385 continue; 386 } 387 break; 388 } 389 return (found); 390 } 391 392 /* 393 * Process all TCP option in SYN segment. Note that this function should 394 * be called after tcp_set_destination() is called so that the necessary info 395 * from IRE is already set in the tcp structure. 396 * 397 * This function sets up the correct tcp_mss value according to the 398 * MSS option value and our header size. It also sets up the window scale 399 * and timestamp values, and initialize SACK info blocks. But it does not 400 * change receive window size after setting the tcp_mss value. The caller 401 * should do the appropriate change. 402 */ 403 static void 404 tcp_process_options(tcp_t *tcp, tcpha_t *tcpha) 405 { 406 int options; 407 tcp_opt_t tcpopt; 408 uint32_t mss_max; 409 char *tmp_tcph; 410 tcp_stack_t *tcps = tcp->tcp_tcps; 411 conn_t *connp = tcp->tcp_connp; 412 413 tcpopt.tcp = NULL; 414 options = tcp_parse_options(tcpha, &tcpopt); 415 416 /* 417 * Process MSS option. Note that MSS option value does not account 418 * for IP or TCP options. This means that it is equal to MTU - minimum 419 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 420 * IPv6. 421 */ 422 if (!(options & TCP_OPT_MSS_PRESENT)) { 423 if (connp->conn_ipversion == IPV4_VERSION) 424 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 425 else 426 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 427 } else { 428 if (connp->conn_ipversion == IPV4_VERSION) 429 mss_max = tcps->tcps_mss_max_ipv4; 430 else 431 mss_max = tcps->tcps_mss_max_ipv6; 432 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 433 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 434 else if (tcpopt.tcp_opt_mss > mss_max) 435 tcpopt.tcp_opt_mss = mss_max; 436 } 437 438 /* Process Window Scale option. */ 439 if (options & TCP_OPT_WSCALE_PRESENT) { 440 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 441 tcp->tcp_snd_ws_ok = B_TRUE; 442 } else { 443 tcp->tcp_snd_ws = B_FALSE; 444 tcp->tcp_snd_ws_ok = B_FALSE; 445 tcp->tcp_rcv_ws = B_FALSE; 446 } 447 448 /* Process Timestamp option. */ 449 if ((options & TCP_OPT_TSTAMP_PRESENT) && 450 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 451 tmp_tcph = (char *)tcp->tcp_tcpha; 452 453 tcp->tcp_snd_ts_ok = B_TRUE; 454 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 455 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 456 ASSERT(OK_32PTR(tmp_tcph)); 457 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH); 458 459 /* Fill in our template header with basic timestamp option. */ 460 tmp_tcph += connp->conn_ht_ulp_len; 461 tmp_tcph[0] = TCPOPT_NOP; 462 tmp_tcph[1] = TCPOPT_NOP; 463 tmp_tcph[2] = TCPOPT_TSTAMP; 464 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 465 connp->conn_ht_iphc_len += TCPOPT_REAL_TS_LEN; 466 connp->conn_ht_ulp_len += TCPOPT_REAL_TS_LEN; 467 tcp->tcp_tcpha->tha_offset_and_reserved += (3 << 4); 468 } else { 469 tcp->tcp_snd_ts_ok = B_FALSE; 470 } 471 472 /* 473 * Process SACK options. If SACK is enabled for this connection, 474 * then allocate the SACK info structure. Note the following ways 475 * when tcp_snd_sack_ok is set to true. 476 * 477 * For active connection: in tcp_set_destination() called in 478 * tcp_connect(). 479 * 480 * For passive connection: in tcp_set_destination() called in 481 * tcp_input_listener(). 482 * 483 * That's the reason why the extra TCP_IS_DETACHED() check is there. 484 * That check makes sure that if we did not send a SACK OK option, 485 * we will not enable SACK for this connection even though the other 486 * side sends us SACK OK option. For active connection, the SACK 487 * info structure has already been allocated. So we need to free 488 * it if SACK is disabled. 489 */ 490 if ((options & TCP_OPT_SACK_OK_PRESENT) && 491 (tcp->tcp_snd_sack_ok || 492 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 493 ASSERT(tcp->tcp_num_sack_blk == 0); 494 ASSERT(tcp->tcp_notsack_list == NULL); 495 496 tcp->tcp_snd_sack_ok = B_TRUE; 497 if (tcp->tcp_snd_ts_ok) { 498 tcp->tcp_max_sack_blk = 3; 499 } else { 500 tcp->tcp_max_sack_blk = 4; 501 } 502 } else if (tcp->tcp_snd_sack_ok) { 503 /* 504 * Resetting tcp_snd_sack_ok to B_FALSE so that 505 * no SACK info will be used for this 506 * connection. This assumes that SACK usage 507 * permission is negotiated. This may need 508 * to be changed once this is clarified. 509 */ 510 ASSERT(tcp->tcp_num_sack_blk == 0); 511 ASSERT(tcp->tcp_notsack_list == NULL); 512 tcp->tcp_snd_sack_ok = B_FALSE; 513 } 514 515 /* 516 * Now we know the exact TCP/IP header length, subtract 517 * that from tcp_mss to get our side's MSS. 518 */ 519 tcp->tcp_mss -= connp->conn_ht_iphc_len; 520 521 /* 522 * Here we assume that the other side's header size will be equal to 523 * our header size. We calculate the real MSS accordingly. Need to 524 * take into additional stuffs IPsec puts in. 525 * 526 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 527 */ 528 tcpopt.tcp_opt_mss -= connp->conn_ht_iphc_len + 529 tcp->tcp_ipsec_overhead - 530 ((connp->conn_ipversion == IPV4_VERSION ? 531 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 532 533 /* 534 * Set MSS to the smaller one of both ends of the connection. 535 * We should not have called tcp_mss_set() before, but our 536 * side of the MSS should have been set to a proper value 537 * by tcp_set_destination(). tcp_mss_set() will also set up the 538 * STREAM head parameters properly. 539 * 540 * If we have a larger-than-16-bit window but the other side 541 * didn't want to do window scale, tcp_rwnd_set() will take 542 * care of that. 543 */ 544 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss)); 545 546 /* 547 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been 548 * updated properly. 549 */ 550 TCP_SET_INIT_CWND(tcp, tcp->tcp_mss, tcps->tcps_slow_start_initial); 551 } 552 553 /* 554 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 555 * is filled, return as much as we can. The message passed in may be 556 * multi-part, chained using b_cont. "start" is the starting sequence 557 * number for this piece. 558 */ 559 static mblk_t * 560 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 561 { 562 uint32_t end; 563 mblk_t *mp1; 564 mblk_t *mp2; 565 mblk_t *next_mp; 566 uint32_t u1; 567 tcp_stack_t *tcps = tcp->tcp_tcps; 568 569 570 /* Walk through all the new pieces. */ 571 do { 572 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 573 (uintptr_t)INT_MAX); 574 end = start + (int)(mp->b_wptr - mp->b_rptr); 575 next_mp = mp->b_cont; 576 if (start == end) { 577 /* Empty. Blast it. */ 578 freeb(mp); 579 continue; 580 } 581 mp->b_cont = NULL; 582 TCP_REASS_SET_SEQ(mp, start); 583 TCP_REASS_SET_END(mp, end); 584 mp1 = tcp->tcp_reass_tail; 585 if (!mp1) { 586 tcp->tcp_reass_tail = mp; 587 tcp->tcp_reass_head = mp; 588 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 589 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 590 end - start); 591 continue; 592 } 593 /* New stuff completely beyond tail? */ 594 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 595 /* Link it on end. */ 596 mp1->b_cont = mp; 597 tcp->tcp_reass_tail = mp; 598 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 599 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 600 end - start); 601 continue; 602 } 603 mp1 = tcp->tcp_reass_head; 604 u1 = TCP_REASS_SEQ(mp1); 605 /* New stuff at the front? */ 606 if (SEQ_LT(start, u1)) { 607 /* Yes... Check for overlap. */ 608 mp->b_cont = mp1; 609 tcp->tcp_reass_head = mp; 610 tcp_reass_elim_overlap(tcp, mp); 611 continue; 612 } 613 /* 614 * The new piece fits somewhere between the head and tail. 615 * We find our slot, where mp1 precedes us and mp2 trails. 616 */ 617 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 618 u1 = TCP_REASS_SEQ(mp2); 619 if (SEQ_LEQ(start, u1)) 620 break; 621 } 622 /* Link ourselves in */ 623 mp->b_cont = mp2; 624 mp1->b_cont = mp; 625 626 /* Trim overlap with following mblk(s) first */ 627 tcp_reass_elim_overlap(tcp, mp); 628 629 /* Trim overlap with preceding mblk */ 630 tcp_reass_elim_overlap(tcp, mp1); 631 632 } while (start = end, mp = next_mp); 633 mp1 = tcp->tcp_reass_head; 634 /* Anything ready to go? */ 635 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 636 return (NULL); 637 /* Eat what we can off the queue */ 638 for (;;) { 639 mp = mp1->b_cont; 640 end = TCP_REASS_END(mp1); 641 TCP_REASS_SET_SEQ(mp1, 0); 642 TCP_REASS_SET_END(mp1, 0); 643 if (!mp) { 644 tcp->tcp_reass_tail = NULL; 645 break; 646 } 647 if (end != TCP_REASS_SEQ(mp)) { 648 mp1->b_cont = NULL; 649 break; 650 } 651 mp1 = mp; 652 } 653 mp1 = tcp->tcp_reass_head; 654 tcp->tcp_reass_head = mp; 655 return (mp1); 656 } 657 658 /* Eliminate any overlap that mp may have over later mblks */ 659 static void 660 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 661 { 662 uint32_t end; 663 mblk_t *mp1; 664 uint32_t u1; 665 tcp_stack_t *tcps = tcp->tcp_tcps; 666 667 end = TCP_REASS_END(mp); 668 while ((mp1 = mp->b_cont) != NULL) { 669 u1 = TCP_REASS_SEQ(mp1); 670 if (!SEQ_GT(end, u1)) 671 break; 672 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 673 mp->b_wptr -= end - u1; 674 TCP_REASS_SET_END(mp, u1); 675 TCPS_BUMP_MIB(tcps, tcpInDataPartDupSegs); 676 TCPS_UPDATE_MIB(tcps, tcpInDataPartDupBytes, 677 end - u1); 678 break; 679 } 680 mp->b_cont = mp1->b_cont; 681 TCP_REASS_SET_SEQ(mp1, 0); 682 TCP_REASS_SET_END(mp1, 0); 683 freeb(mp1); 684 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 685 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, end - u1); 686 } 687 if (!mp1) 688 tcp->tcp_reass_tail = mp; 689 } 690 691 /* 692 * This function does PAWS protection check, per RFC 7323 section 5. Requires 693 * that timestamp options are already processed into tcpoptp. Returns B_TRUE if 694 * the segment passes the PAWS test, else returns B_FALSE. 695 */ 696 boolean_t 697 tcp_paws_check(tcp_t *tcp, const tcp_opt_t *tcpoptp) 698 { 699 if (TSTMP_LT(tcpoptp->tcp_opt_ts_val, 700 tcp->tcp_ts_recent)) { 701 if (LBOLT_FASTPATH64 < 702 (tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) { 703 /* This segment is not acceptable. */ 704 return (B_FALSE); 705 } else { 706 /* 707 * Connection has been idle for 708 * too long. Reset the timestamp 709 */ 710 tcp->tcp_ts_recent = 711 tcpoptp->tcp_opt_ts_val; 712 } 713 } 714 return (B_TRUE); 715 } 716 717 /* 718 * Defense for the SYN attack - 719 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 720 * one from the list of droppable eagers. This list is a subset of q0. 721 * see comments before the definition of MAKE_DROPPABLE(). 722 * 2. Don't drop a SYN request before its first timeout. This gives every 723 * request at least til the first timeout to complete its 3-way handshake. 724 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 725 * requests currently on the queue that has timed out. This will be used 726 * as an indicator of whether an attack is under way, so that appropriate 727 * actions can be taken. (It's incremented in tcp_timer() and decremented 728 * either when eager goes into ESTABLISHED, or gets freed up.) 729 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 730 * # of timeout drops back to <= q0len/32 => SYN alert off 731 */ 732 static boolean_t 733 tcp_drop_q0(tcp_t *tcp) 734 { 735 tcp_t *eager; 736 mblk_t *mp; 737 tcp_stack_t *tcps = tcp->tcp_tcps; 738 739 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 740 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 741 742 /* Pick oldest eager from the list of droppable eagers */ 743 eager = tcp->tcp_eager_prev_drop_q0; 744 745 /* If list is empty. return B_FALSE */ 746 if (eager == tcp) { 747 return (B_FALSE); 748 } 749 750 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 751 if ((mp = allocb(0, BPRI_HI)) == NULL) 752 return (B_FALSE); 753 754 /* 755 * Take this eager out from the list of droppable eagers since we are 756 * going to drop it. 757 */ 758 MAKE_UNDROPPABLE(eager); 759 760 if (tcp->tcp_connp->conn_debug) { 761 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 762 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 763 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 764 tcp->tcp_conn_req_cnt_q0, 765 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 766 } 767 768 TCPS_BUMP_MIB(tcps, tcpHalfOpenDrop); 769 770 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 771 CONN_INC_REF(eager->tcp_connp); 772 773 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 774 tcp_clean_death_wrapper, eager->tcp_connp, NULL, 775 SQ_FILL, SQTAG_TCP_DROP_Q0); 776 777 return (B_TRUE); 778 } 779 780 /* 781 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6 782 */ 783 static mblk_t * 784 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 785 ip_recv_attr_t *ira) 786 { 787 tcp_t *ltcp = lconnp->conn_tcp; 788 tcp_t *tcp = connp->conn_tcp; 789 mblk_t *tpi_mp; 790 ipha_t *ipha; 791 ip6_t *ip6h; 792 sin6_t sin6; 793 uint_t ifindex = ira->ira_ruifindex; 794 tcp_stack_t *tcps = tcp->tcp_tcps; 795 796 if (ira->ira_flags & IRAF_IS_IPV4) { 797 ipha = (ipha_t *)mp->b_rptr; 798 799 connp->conn_ipversion = IPV4_VERSION; 800 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 801 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 802 connp->conn_saddr_v6 = connp->conn_laddr_v6; 803 804 sin6 = sin6_null; 805 sin6.sin6_addr = connp->conn_faddr_v6; 806 sin6.sin6_port = connp->conn_fport; 807 sin6.sin6_family = AF_INET6; 808 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 809 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 810 811 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 812 sin6_t sin6d; 813 814 sin6d = sin6_null; 815 sin6d.sin6_addr = connp->conn_laddr_v6; 816 sin6d.sin6_port = connp->conn_lport; 817 sin6d.sin6_family = AF_INET; 818 tpi_mp = mi_tpi_extconn_ind(NULL, 819 (char *)&sin6d, sizeof (sin6_t), 820 (char *)&tcp, 821 (t_scalar_t)sizeof (intptr_t), 822 (char *)&sin6d, sizeof (sin6_t), 823 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 824 } else { 825 tpi_mp = mi_tpi_conn_ind(NULL, 826 (char *)&sin6, sizeof (sin6_t), 827 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 828 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 829 } 830 } else { 831 ip6h = (ip6_t *)mp->b_rptr; 832 833 connp->conn_ipversion = IPV6_VERSION; 834 connp->conn_laddr_v6 = ip6h->ip6_dst; 835 connp->conn_faddr_v6 = ip6h->ip6_src; 836 connp->conn_saddr_v6 = connp->conn_laddr_v6; 837 838 sin6 = sin6_null; 839 sin6.sin6_addr = connp->conn_faddr_v6; 840 sin6.sin6_port = connp->conn_fport; 841 sin6.sin6_family = AF_INET6; 842 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 843 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 844 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 845 846 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 847 /* Pass up the scope_id of remote addr */ 848 sin6.sin6_scope_id = ifindex; 849 } else { 850 sin6.sin6_scope_id = 0; 851 } 852 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 853 sin6_t sin6d; 854 855 sin6d = sin6_null; 856 sin6.sin6_addr = connp->conn_laddr_v6; 857 sin6d.sin6_port = connp->conn_lport; 858 sin6d.sin6_family = AF_INET6; 859 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_laddr_v6)) 860 sin6d.sin6_scope_id = ifindex; 861 862 tpi_mp = mi_tpi_extconn_ind(NULL, 863 (char *)&sin6d, sizeof (sin6_t), 864 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 865 (char *)&sin6d, sizeof (sin6_t), 866 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 867 } else { 868 tpi_mp = mi_tpi_conn_ind(NULL, 869 (char *)&sin6, sizeof (sin6_t), 870 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 871 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 872 } 873 } 874 875 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 876 return (tpi_mp); 877 } 878 879 /* Handle a SYN on an AF_INET socket */ 880 static mblk_t * 881 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp, 882 ip_recv_attr_t *ira) 883 { 884 tcp_t *ltcp = lconnp->conn_tcp; 885 tcp_t *tcp = connp->conn_tcp; 886 sin_t sin; 887 mblk_t *tpi_mp = NULL; 888 tcp_stack_t *tcps = tcp->tcp_tcps; 889 ipha_t *ipha; 890 891 ASSERT(ira->ira_flags & IRAF_IS_IPV4); 892 ipha = (ipha_t *)mp->b_rptr; 893 894 connp->conn_ipversion = IPV4_VERSION; 895 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 896 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 897 connp->conn_saddr_v6 = connp->conn_laddr_v6; 898 899 sin = sin_null; 900 sin.sin_addr.s_addr = connp->conn_faddr_v4; 901 sin.sin_port = connp->conn_fport; 902 sin.sin_family = AF_INET; 903 if (lconnp->conn_recv_ancillary.crb_recvdstaddr) { 904 sin_t sind; 905 906 sind = sin_null; 907 sind.sin_addr.s_addr = connp->conn_laddr_v4; 908 sind.sin_port = connp->conn_lport; 909 sind.sin_family = AF_INET; 910 tpi_mp = mi_tpi_extconn_ind(NULL, 911 (char *)&sind, sizeof (sin_t), (char *)&tcp, 912 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 913 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 914 } else { 915 tpi_mp = mi_tpi_conn_ind(NULL, 916 (char *)&sin, sizeof (sin_t), 917 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 918 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 919 } 920 921 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 922 return (tpi_mp); 923 } 924 925 /* 926 * Called via squeue to get on to eager's perimeter. It sends a 927 * TH_RST if eager is in the fanout table. The listener wants the 928 * eager to disappear either by means of tcp_eager_blowoff() or 929 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 930 * called (via squeue) if the eager cannot be inserted in the 931 * fanout table in tcp_input_listener(). 932 */ 933 /* ARGSUSED */ 934 void 935 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 936 { 937 conn_t *econnp = (conn_t *)arg; 938 tcp_t *eager = econnp->conn_tcp; 939 tcp_t *listener = eager->tcp_listener; 940 941 /* 942 * We could be called because listener is closing. Since 943 * the eager was using listener's queue's, we avoid 944 * using the listeners queues from now on. 945 */ 946 ASSERT(eager->tcp_detached); 947 econnp->conn_rq = NULL; 948 econnp->conn_wq = NULL; 949 950 /* 951 * An eager's conn_fanout will be NULL if it's a duplicate 952 * for an existing 4-tuples in the conn fanout table. 953 * We don't want to send an RST out in such case. 954 */ 955 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 956 tcp_xmit_ctl("tcp_eager_kill, can't wait", 957 eager, eager->tcp_snxt, 0, TH_RST); 958 } 959 960 /* We are here because listener wants this eager gone */ 961 if (listener != NULL) { 962 mutex_enter(&listener->tcp_eager_lock); 963 tcp_eager_unlink(eager); 964 if (eager->tcp_tconnind_started) { 965 /* 966 * The eager has sent a conn_ind up to the 967 * listener but listener decides to close 968 * instead. We need to drop the extra ref 969 * placed on eager in tcp_input_data() before 970 * sending the conn_ind to listener. 971 */ 972 CONN_DEC_REF(econnp); 973 } 974 mutex_exit(&listener->tcp_eager_lock); 975 CONN_DEC_REF(listener->tcp_connp); 976 } 977 978 if (eager->tcp_state != TCPS_CLOSED) 979 tcp_close_detached(eager); 980 } 981 982 /* 983 * Reset any eager connection hanging off this listener marked 984 * with 'seqnum' and then reclaim it's resources. 985 */ 986 boolean_t 987 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 988 { 989 tcp_t *eager; 990 mblk_t *mp; 991 992 eager = listener; 993 mutex_enter(&listener->tcp_eager_lock); 994 do { 995 eager = eager->tcp_eager_next_q; 996 if (eager == NULL) { 997 mutex_exit(&listener->tcp_eager_lock); 998 return (B_FALSE); 999 } 1000 } while (eager->tcp_conn_req_seqnum != seqnum); 1001 1002 if (eager->tcp_closemp_used) { 1003 mutex_exit(&listener->tcp_eager_lock); 1004 return (B_TRUE); 1005 } 1006 eager->tcp_closemp_used = B_TRUE; 1007 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1008 CONN_INC_REF(eager->tcp_connp); 1009 mutex_exit(&listener->tcp_eager_lock); 1010 mp = &eager->tcp_closemp; 1011 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 1012 eager->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_EAGER_BLOWOFF); 1013 return (B_TRUE); 1014 } 1015 1016 /* 1017 * Reset any eager connection hanging off this listener 1018 * and then reclaim it's resources. 1019 */ 1020 void 1021 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 1022 { 1023 tcp_t *eager; 1024 mblk_t *mp; 1025 tcp_stack_t *tcps = listener->tcp_tcps; 1026 1027 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1028 1029 if (!q0_only) { 1030 /* First cleanup q */ 1031 TCP_STAT(tcps, tcp_eager_blowoff_q); 1032 eager = listener->tcp_eager_next_q; 1033 while (eager != NULL) { 1034 if (!eager->tcp_closemp_used) { 1035 eager->tcp_closemp_used = B_TRUE; 1036 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1037 CONN_INC_REF(eager->tcp_connp); 1038 mp = &eager->tcp_closemp; 1039 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1040 tcp_eager_kill, eager->tcp_connp, NULL, 1041 SQ_FILL, SQTAG_TCP_EAGER_CLEANUP); 1042 } 1043 eager = eager->tcp_eager_next_q; 1044 } 1045 } 1046 /* Then cleanup q0 */ 1047 TCP_STAT(tcps, tcp_eager_blowoff_q0); 1048 eager = listener->tcp_eager_next_q0; 1049 while (eager != listener) { 1050 if (!eager->tcp_closemp_used) { 1051 eager->tcp_closemp_used = B_TRUE; 1052 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1053 CONN_INC_REF(eager->tcp_connp); 1054 mp = &eager->tcp_closemp; 1055 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1056 tcp_eager_kill, eager->tcp_connp, NULL, SQ_FILL, 1057 SQTAG_TCP_EAGER_CLEANUP_Q0); 1058 } 1059 eager = eager->tcp_eager_next_q0; 1060 } 1061 } 1062 1063 /* 1064 * If we are an eager connection hanging off a listener that hasn't 1065 * formally accepted the connection yet, get off its list and blow off 1066 * any data that we have accumulated. 1067 */ 1068 void 1069 tcp_eager_unlink(tcp_t *tcp) 1070 { 1071 tcp_t *listener = tcp->tcp_listener; 1072 1073 ASSERT(listener != NULL); 1074 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1075 if (tcp->tcp_eager_next_q0 != NULL) { 1076 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 1077 1078 /* Remove the eager tcp from q0 */ 1079 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 1080 tcp->tcp_eager_prev_q0; 1081 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 1082 tcp->tcp_eager_next_q0; 1083 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 1084 listener->tcp_conn_req_cnt_q0--; 1085 1086 tcp->tcp_eager_next_q0 = NULL; 1087 tcp->tcp_eager_prev_q0 = NULL; 1088 1089 /* 1090 * Take the eager out, if it is in the list of droppable 1091 * eagers. 1092 */ 1093 MAKE_UNDROPPABLE(tcp); 1094 1095 if (tcp->tcp_syn_rcvd_timeout != 0) { 1096 /* we have timed out before */ 1097 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 1098 listener->tcp_syn_rcvd_timeout--; 1099 } 1100 } else { 1101 tcp_t **tcpp = &listener->tcp_eager_next_q; 1102 tcp_t *prev = NULL; 1103 1104 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 1105 if (tcpp[0] == tcp) { 1106 if (listener->tcp_eager_last_q == tcp) { 1107 /* 1108 * If we are unlinking the last 1109 * element on the list, adjust 1110 * tail pointer. Set tail pointer 1111 * to nil when list is empty. 1112 */ 1113 ASSERT(tcp->tcp_eager_next_q == NULL); 1114 if (listener->tcp_eager_last_q == 1115 listener->tcp_eager_next_q) { 1116 listener->tcp_eager_last_q = 1117 NULL; 1118 } else { 1119 /* 1120 * We won't get here if there 1121 * is only one eager in the 1122 * list. 1123 */ 1124 ASSERT(prev != NULL); 1125 listener->tcp_eager_last_q = 1126 prev; 1127 } 1128 } 1129 tcpp[0] = tcp->tcp_eager_next_q; 1130 tcp->tcp_eager_next_q = NULL; 1131 tcp->tcp_eager_last_q = NULL; 1132 ASSERT(listener->tcp_conn_req_cnt_q > 0); 1133 listener->tcp_conn_req_cnt_q--; 1134 break; 1135 } 1136 prev = tcpp[0]; 1137 } 1138 } 1139 tcp->tcp_listener = NULL; 1140 } 1141 1142 /* BEGIN CSTYLED */ 1143 /* 1144 * 1145 * The sockfs ACCEPT path: 1146 * ======================= 1147 * 1148 * The eager is now established in its own perimeter as soon as SYN is 1149 * received in tcp_input_listener(). When sockfs receives conn_ind, it 1150 * completes the accept processing on the acceptor STREAM. The sending 1151 * of conn_ind part is common for both sockfs listener and a TLI/XTI 1152 * listener but a TLI/XTI listener completes the accept processing 1153 * on the listener perimeter. 1154 * 1155 * Common control flow for 3 way handshake: 1156 * ---------------------------------------- 1157 * 1158 * incoming SYN (listener perimeter) -> tcp_input_listener() 1159 * 1160 * incoming SYN-ACK-ACK (eager perim) -> tcp_input_data() 1161 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 1162 * 1163 * Sockfs ACCEPT Path: 1164 * ------------------- 1165 * 1166 * open acceptor stream (tcp_open allocates tcp_tli_accept() 1167 * as STREAM entry point) 1168 * 1169 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept() 1170 * 1171 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager 1172 * association (we are not behind eager's squeue but sockfs is protecting us 1173 * and no one knows about this stream yet. The STREAMS entry point q->q_info 1174 * is changed to point at tcp_wput(). 1175 * 1176 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to 1177 * listener (done on listener's perimeter). 1178 * 1179 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish 1180 * accept. 1181 * 1182 * TLI/XTI client ACCEPT path: 1183 * --------------------------- 1184 * 1185 * soaccept() sends T_CONN_RES on the listener STREAM. 1186 * 1187 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send 1188 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()). 1189 * 1190 * Locks: 1191 * ====== 1192 * 1193 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 1194 * and listeners->tcp_eager_next_q. 1195 * 1196 * Referencing: 1197 * ============ 1198 * 1199 * 1) We start out in tcp_input_listener by eager placing a ref on 1200 * listener and listener adding eager to listeners->tcp_eager_next_q0. 1201 * 1202 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 1203 * doing so we place a ref on the eager. This ref is finally dropped at the 1204 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 1205 * reference is dropped by the squeue framework. 1206 * 1207 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 1208 * 1209 * The reference must be released by the same entity that added the reference 1210 * In the above scheme, the eager is the entity that adds and releases the 1211 * references. Note that tcp_accept_finish executes in the squeue of the eager 1212 * (albeit after it is attached to the acceptor stream). Though 1. executes 1213 * in the listener's squeue, the eager is nascent at this point and the 1214 * reference can be considered to have been added on behalf of the eager. 1215 * 1216 * Eager getting a Reset or listener closing: 1217 * ========================================== 1218 * 1219 * Once the listener and eager are linked, the listener never does the unlink. 1220 * If the listener needs to close, tcp_eager_cleanup() is called which queues 1221 * a message on all eager perimeter. The eager then does the unlink, clears 1222 * any pointers to the listener's queue and drops the reference to the 1223 * listener. The listener waits in tcp_close outside the squeue until its 1224 * refcount has dropped to 1. This ensures that the listener has waited for 1225 * all eagers to clear their association with the listener. 1226 * 1227 * Similarly, if eager decides to go away, it can unlink itself and close. 1228 * When the T_CONN_RES comes down, we check if eager has closed. Note that 1229 * the reference to eager is still valid because of the extra ref we put 1230 * in tcp_send_conn_ind. 1231 * 1232 * Listener can always locate the eager under the protection 1233 * of the listener->tcp_eager_lock, and then do a refhold 1234 * on the eager during the accept processing. 1235 * 1236 * The acceptor stream accesses the eager in the accept processing 1237 * based on the ref placed on eager before sending T_conn_ind. 1238 * The only entity that can negate this refhold is a listener close 1239 * which is mutually exclusive with an active acceptor stream. 1240 * 1241 * Eager's reference on the listener 1242 * =================================== 1243 * 1244 * If the accept happens (even on a closed eager) the eager drops its 1245 * reference on the listener at the start of tcp_accept_finish. If the 1246 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 1247 * the reference is dropped in tcp_closei_local. If the listener closes, 1248 * the reference is dropped in tcp_eager_kill. In all cases the reference 1249 * is dropped while executing in the eager's context (squeue). 1250 */ 1251 /* END CSTYLED */ 1252 1253 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 1254 1255 /* 1256 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 1257 * tcp_input_data will not see any packets for listeners since the listener 1258 * has conn_recv set to tcp_input_listener. 1259 */ 1260 /* ARGSUSED */ 1261 static void 1262 tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 1263 { 1264 tcpha_t *tcpha; 1265 uint32_t seg_seq; 1266 tcp_t *eager; 1267 int err; 1268 conn_t *econnp = NULL; 1269 squeue_t *new_sqp; 1270 mblk_t *mp1; 1271 uint_t ip_hdr_len; 1272 conn_t *lconnp = (conn_t *)arg; 1273 tcp_t *listener = lconnp->conn_tcp; 1274 tcp_stack_t *tcps = listener->tcp_tcps; 1275 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 1276 uint_t flags; 1277 mblk_t *tpi_mp; 1278 uint_t ifindex = ira->ira_ruifindex; 1279 boolean_t tlc_set = B_FALSE; 1280 1281 ip_hdr_len = ira->ira_ip_hdr_length; 1282 tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len]; 1283 flags = (unsigned int)tcpha->tha_flags & 0xFF; 1284 1285 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, lconnp->conn_ixa, 1286 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, listener, 1287 __dtrace_tcp_tcph_t *, tcpha); 1288 1289 if (!(flags & TH_SYN)) { 1290 if ((flags & TH_RST) || (flags & TH_URG)) { 1291 freemsg(mp); 1292 return; 1293 } 1294 if (flags & TH_ACK) { 1295 /* Note this executes in listener's squeue */ 1296 tcp_xmit_listeners_reset(mp, ira, ipst, lconnp); 1297 return; 1298 } 1299 1300 freemsg(mp); 1301 return; 1302 } 1303 1304 if (listener->tcp_state != TCPS_LISTEN) 1305 goto error2; 1306 1307 ASSERT(IPCL_IS_BOUND(lconnp)); 1308 1309 mutex_enter(&listener->tcp_eager_lock); 1310 1311 /* 1312 * The system is under memory pressure, so we need to do our part 1313 * to relieve the pressure. So we only accept new request if there 1314 * is nothing waiting to be accepted or waiting to complete the 3-way 1315 * handshake. This means that busy listener will not get too many 1316 * new requests which they cannot handle in time while non-busy 1317 * listener is still functioning properly. 1318 */ 1319 if (tcps->tcps_reclaim && (listener->tcp_conn_req_cnt_q > 0 || 1320 listener->tcp_conn_req_cnt_q0 > 0)) { 1321 mutex_exit(&listener->tcp_eager_lock); 1322 TCP_STAT(tcps, tcp_listen_mem_drop); 1323 goto error2; 1324 } 1325 1326 if (listener->tcp_conn_req_cnt_q >= listener->tcp_conn_req_max) { 1327 mutex_exit(&listener->tcp_eager_lock); 1328 TCP_STAT(tcps, tcp_listendrop); 1329 TCPS_BUMP_MIB(tcps, tcpListenDrop); 1330 if (lconnp->conn_debug) { 1331 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 1332 "tcp_input_listener: listen backlog (max=%d) " 1333 "overflow (%d pending) on %s", 1334 listener->tcp_conn_req_max, 1335 listener->tcp_conn_req_cnt_q, 1336 tcp_display(listener, NULL, DISP_PORT_ONLY)); 1337 } 1338 goto error2; 1339 } 1340 1341 if (listener->tcp_conn_req_cnt_q0 >= 1342 listener->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 1343 /* 1344 * Q0 is full. Drop a pending half-open req from the queue 1345 * to make room for the new SYN req. Also mark the time we 1346 * drop a SYN. 1347 * 1348 * A more aggressive defense against SYN attack will 1349 * be to set the "tcp_syn_defense" flag now. 1350 */ 1351 TCP_STAT(tcps, tcp_listendropq0); 1352 listener->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 1353 if (!tcp_drop_q0(listener)) { 1354 mutex_exit(&listener->tcp_eager_lock); 1355 TCPS_BUMP_MIB(tcps, tcpListenDropQ0); 1356 if (lconnp->conn_debug) { 1357 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 1358 "tcp_input_listener: listen half-open " 1359 "queue (max=%d) full (%d pending) on %s", 1360 tcps->tcps_conn_req_max_q0, 1361 listener->tcp_conn_req_cnt_q0, 1362 tcp_display(listener, NULL, 1363 DISP_PORT_ONLY)); 1364 } 1365 goto error2; 1366 } 1367 } 1368 1369 /* 1370 * Enforce the limit set on the number of connections per listener. 1371 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max 1372 * for comparison. 1373 */ 1374 if (listener->tcp_listen_cnt != NULL) { 1375 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt; 1376 int64_t now; 1377 1378 if (atomic_inc_32_nv(&tlc->tlc_cnt) > tlc->tlc_max + 1) { 1379 mutex_exit(&listener->tcp_eager_lock); 1380 now = ddi_get_lbolt64(); 1381 atomic_dec_32(&tlc->tlc_cnt); 1382 TCP_STAT(tcps, tcp_listen_cnt_drop); 1383 tlc->tlc_drop++; 1384 if (now - tlc->tlc_report_time > 1385 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) { 1386 zcmn_err(lconnp->conn_zoneid, CE_WARN, 1387 "Listener (port %d) connection max (%u) " 1388 "reached: %u attempts dropped total\n", 1389 ntohs(listener->tcp_connp->conn_lport), 1390 tlc->tlc_max, tlc->tlc_drop); 1391 tlc->tlc_report_time = now; 1392 } 1393 goto error2; 1394 } 1395 tlc_set = B_TRUE; 1396 } 1397 1398 mutex_exit(&listener->tcp_eager_lock); 1399 1400 /* 1401 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1402 * or based on the ring (for packets from GLD). Otherwise it is 1403 * set based on lbolt i.e., a somewhat random number. 1404 */ 1405 ASSERT(ira->ira_sqp != NULL); 1406 new_sqp = ira->ira_sqp; 1407 1408 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 1409 if (econnp == NULL) 1410 goto error2; 1411 1412 ASSERT(econnp->conn_netstack == lconnp->conn_netstack); 1413 econnp->conn_sqp = new_sqp; 1414 econnp->conn_initial_sqp = new_sqp; 1415 econnp->conn_ixa->ixa_sqp = new_sqp; 1416 1417 econnp->conn_fport = tcpha->tha_lport; 1418 econnp->conn_lport = tcpha->tha_fport; 1419 1420 err = conn_inherit_parent(lconnp, econnp); 1421 if (err != 0) 1422 goto error3; 1423 1424 /* We already know the laddr of the new connection is ours */ 1425 econnp->conn_ixa->ixa_src_generation = ipst->ips_src_generation; 1426 1427 ASSERT(OK_32PTR(mp->b_rptr)); 1428 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION || 1429 IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION); 1430 1431 if (lconnp->conn_family == AF_INET) { 1432 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION); 1433 tpi_mp = tcp_conn_create_v4(lconnp, econnp, mp, ira); 1434 } else { 1435 tpi_mp = tcp_conn_create_v6(lconnp, econnp, mp, ira); 1436 } 1437 1438 if (tpi_mp == NULL) 1439 goto error3; 1440 1441 eager = econnp->conn_tcp; 1442 eager->tcp_detached = B_TRUE; 1443 SOCK_CONNID_INIT(eager->tcp_connid); 1444 1445 /* 1446 * Initialize the eager's tcp_t and inherit some parameters from 1447 * the listener. 1448 */ 1449 tcp_init_values(eager, listener); 1450 1451 ASSERT((econnp->conn_ixa->ixa_flags & 1452 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1453 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)) == 1454 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1455 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)); 1456 1457 if (!tcps->tcps_dev_flow_ctl) 1458 econnp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL; 1459 1460 /* Prepare for diffing against previous packets */ 1461 eager->tcp_recvifindex = 0; 1462 eager->tcp_recvhops = 0xffffffffU; 1463 1464 if (!(ira->ira_flags & IRAF_IS_IPV4) && econnp->conn_bound_if == 0) { 1465 if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_faddr_v6) || 1466 IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6)) { 1467 econnp->conn_incoming_ifindex = ifindex; 1468 econnp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET; 1469 econnp->conn_ixa->ixa_scopeid = ifindex; 1470 } 1471 } 1472 1473 if ((ira->ira_flags & (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS)) == 1474 (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS) && 1475 tcps->tcps_rev_src_routes) { 1476 ipha_t *ipha = (ipha_t *)mp->b_rptr; 1477 ip_pkt_t *ipp = &econnp->conn_xmit_ipp; 1478 1479 /* Source routing option copyover (reverse it) */ 1480 err = ip_find_hdr_v4(ipha, ipp, B_TRUE); 1481 if (err != 0) { 1482 freemsg(tpi_mp); 1483 goto error3; 1484 } 1485 ip_pkt_source_route_reverse_v4(ipp); 1486 } 1487 1488 ASSERT(eager->tcp_conn.tcp_eager_conn_ind == NULL); 1489 ASSERT(!eager->tcp_tconnind_started); 1490 /* 1491 * If the SYN came with a credential, it's a loopback packet or a 1492 * labeled packet; attach the credential to the TPI message. 1493 */ 1494 if (ira->ira_cred != NULL) 1495 mblk_setcred(tpi_mp, ira->ira_cred, ira->ira_cpid); 1496 1497 eager->tcp_conn.tcp_eager_conn_ind = tpi_mp; 1498 ASSERT(eager->tcp_ordrel_mp == NULL); 1499 1500 /* Inherit the listener's non-STREAMS flag */ 1501 if (IPCL_IS_NONSTR(lconnp)) { 1502 econnp->conn_flags |= IPCL_NONSTR; 1503 /* All non-STREAMS tcp_ts are sockets */ 1504 eager->tcp_issocket = B_TRUE; 1505 } else { 1506 /* 1507 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that 1508 * at close time, we will always have that to send up. 1509 * Otherwise, we need to do special handling in case the 1510 * allocation fails at that time. 1511 */ 1512 if ((eager->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL) 1513 goto error3; 1514 } 1515 /* 1516 * Now that the IP addresses and ports are setup in econnp we 1517 * can do the IPsec policy work. 1518 */ 1519 if (ira->ira_flags & IRAF_IPSEC_SECURE) { 1520 if (lconnp->conn_policy != NULL) { 1521 /* 1522 * Inherit the policy from the listener; use 1523 * actions from ira 1524 */ 1525 if (!ip_ipsec_policy_inherit(econnp, lconnp, ira)) { 1526 CONN_DEC_REF(econnp); 1527 freemsg(mp); 1528 goto error3; 1529 } 1530 } 1531 } 1532 1533 /* 1534 * tcp_set_destination() may set tcp_rwnd according to the route 1535 * metrics. If it does not, the eager's receive window will be set 1536 * to the listener's receive window later in this function. 1537 */ 1538 eager->tcp_rwnd = 0; 1539 1540 if (is_system_labeled()) { 1541 ip_xmit_attr_t *ixa = econnp->conn_ixa; 1542 1543 ASSERT(ira->ira_tsl != NULL); 1544 /* Discard any old label */ 1545 if (ixa->ixa_free_flags & IXA_FREE_TSL) { 1546 ASSERT(ixa->ixa_tsl != NULL); 1547 label_rele(ixa->ixa_tsl); 1548 ixa->ixa_free_flags &= ~IXA_FREE_TSL; 1549 ixa->ixa_tsl = NULL; 1550 } 1551 if ((lconnp->conn_mlp_type != mlptSingle || 1552 lconnp->conn_mac_mode != CONN_MAC_DEFAULT) && 1553 ira->ira_tsl != NULL) { 1554 /* 1555 * If this is an MLP connection or a MAC-Exempt 1556 * connection with an unlabeled node, packets are to be 1557 * exchanged using the security label of the received 1558 * SYN packet instead of the server application's label. 1559 * tsol_check_dest called from ip_set_destination 1560 * might later update TSF_UNLABELED by replacing 1561 * ixa_tsl with a new label. 1562 */ 1563 label_hold(ira->ira_tsl); 1564 ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl); 1565 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 1566 econnp, ts_label_t *, ixa->ixa_tsl) 1567 } else { 1568 ixa->ixa_tsl = crgetlabel(econnp->conn_cred); 1569 DTRACE_PROBE2(syn_accept, conn_t *, 1570 econnp, ts_label_t *, ixa->ixa_tsl) 1571 } 1572 /* 1573 * conn_connect() called from tcp_set_destination will verify 1574 * the destination is allowed to receive packets at the 1575 * security label of the SYN-ACK we are generating. As part of 1576 * that, tsol_check_dest() may create a new effective label for 1577 * this connection. 1578 * Finally conn_connect() will call conn_update_label. 1579 * All that remains for TCP to do is to call 1580 * conn_build_hdr_template which is done as part of 1581 * tcp_set_destination. 1582 */ 1583 } 1584 1585 /* 1586 * Since we will clear tcp_listener before we clear tcp_detached 1587 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress 1588 * so we can tell a TCP_IS_DETACHED_NONEAGER apart. 1589 */ 1590 eager->tcp_hard_binding = B_TRUE; 1591 1592 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 1593 TCP_BIND_HASH(econnp->conn_lport)], eager, 0); 1594 1595 CL_INET_CONNECT(econnp, B_FALSE, err); 1596 if (err != 0) { 1597 tcp_bind_hash_remove(eager); 1598 goto error3; 1599 } 1600 1601 SOCK_CONNID_BUMP(eager->tcp_connid); 1602 1603 /* 1604 * Adapt our mss, ttl, ... based on the remote address. 1605 */ 1606 1607 if (tcp_set_destination(eager) != 0) { 1608 TCPS_BUMP_MIB(tcps, tcpAttemptFails); 1609 /* Undo the bind_hash_insert */ 1610 tcp_bind_hash_remove(eager); 1611 goto error3; 1612 } 1613 1614 /* Process all TCP options. */ 1615 tcp_process_options(eager, tcpha); 1616 1617 /* Is the other end ECN capable? */ 1618 if (tcps->tcps_ecn_permitted >= 1 && 1619 (tcpha->tha_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 1620 eager->tcp_ecn_ok = B_TRUE; 1621 } 1622 1623 /* 1624 * The listener's conn_rcvbuf should be the default window size or a 1625 * window size changed via SO_RCVBUF option. First round up the 1626 * eager's tcp_rwnd to the nearest MSS. Then find out the window 1627 * scale option value if needed. Call tcp_rwnd_set() to finish the 1628 * setting. 1629 * 1630 * Note if there is a rpipe metric associated with the remote host, 1631 * we should not inherit receive window size from listener. 1632 */ 1633 eager->tcp_rwnd = MSS_ROUNDUP( 1634 (eager->tcp_rwnd == 0 ? econnp->conn_rcvbuf : 1635 eager->tcp_rwnd), eager->tcp_mss); 1636 if (eager->tcp_snd_ws_ok) 1637 tcp_set_ws_value(eager); 1638 /* 1639 * Note that this is the only place tcp_rwnd_set() is called for 1640 * accepting a connection. We need to call it here instead of 1641 * after the 3-way handshake because we need to tell the other 1642 * side our rwnd in the SYN-ACK segment. 1643 */ 1644 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 1645 1646 ASSERT(eager->tcp_connp->conn_rcvbuf != 0 && 1647 eager->tcp_connp->conn_rcvbuf == eager->tcp_rwnd); 1648 1649 ASSERT(econnp->conn_rcvbuf != 0 && 1650 econnp->conn_rcvbuf == eager->tcp_rwnd); 1651 1652 /* Put a ref on the listener for the eager. */ 1653 CONN_INC_REF(lconnp); 1654 mutex_enter(&listener->tcp_eager_lock); 1655 listener->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 1656 eager->tcp_eager_next_q0 = listener->tcp_eager_next_q0; 1657 listener->tcp_eager_next_q0 = eager; 1658 eager->tcp_eager_prev_q0 = listener; 1659 1660 /* Set tcp_listener before adding it to tcp_conn_fanout */ 1661 eager->tcp_listener = listener; 1662 eager->tcp_saved_listener = listener; 1663 1664 /* 1665 * Set tcp_listen_cnt so that when the connection is done, the counter 1666 * is decremented. 1667 */ 1668 eager->tcp_listen_cnt = listener->tcp_listen_cnt; 1669 1670 /* 1671 * Tag this detached tcp vector for later retrieval 1672 * by our listener client in tcp_accept(). 1673 */ 1674 eager->tcp_conn_req_seqnum = listener->tcp_conn_req_seqnum; 1675 listener->tcp_conn_req_cnt_q0++; 1676 if (++listener->tcp_conn_req_seqnum == -1) { 1677 /* 1678 * -1 is "special" and defined in TPI as something 1679 * that should never be used in T_CONN_IND 1680 */ 1681 ++listener->tcp_conn_req_seqnum; 1682 } 1683 mutex_exit(&listener->tcp_eager_lock); 1684 1685 if (listener->tcp_syn_defense) { 1686 /* Don't drop the SYN that comes from a good IP source */ 1687 ipaddr_t *addr_cache; 1688 1689 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 1690 if (addr_cache != NULL && econnp->conn_faddr_v4 == 1691 addr_cache[IP_ADDR_CACHE_HASH(econnp->conn_faddr_v4)]) { 1692 eager->tcp_dontdrop = B_TRUE; 1693 } 1694 } 1695 1696 /* 1697 * We need to insert the eager in its own perimeter but as soon 1698 * as we do that, we expose the eager to the classifier and 1699 * should not touch any field outside the eager's perimeter. 1700 * So do all the work necessary before inserting the eager 1701 * in its own perimeter. Be optimistic that conn_connect() 1702 * will succeed but undo everything if it fails. 1703 */ 1704 seg_seq = ntohl(tcpha->tha_seq); 1705 eager->tcp_irs = seg_seq; 1706 eager->tcp_rack = seg_seq; 1707 eager->tcp_rnxt = seg_seq + 1; 1708 eager->tcp_tcpha->tha_ack = htonl(eager->tcp_rnxt); 1709 TCPS_BUMP_MIB(tcps, tcpPassiveOpens); 1710 eager->tcp_state = TCPS_SYN_RCVD; 1711 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 1712 econnp->conn_ixa, void, NULL, tcp_t *, eager, void, NULL, 1713 int32_t, TCPS_LISTEN); 1714 1715 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 1716 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 1717 if (mp1 == NULL) { 1718 /* 1719 * Increment the ref count as we are going to 1720 * enqueueing an mp in squeue 1721 */ 1722 CONN_INC_REF(econnp); 1723 goto error; 1724 } 1725 1726 /* 1727 * We need to start the rto timer. In normal case, we start 1728 * the timer after sending the packet on the wire (or at 1729 * least believing that packet was sent by waiting for 1730 * conn_ip_output() to return). Since this is the first packet 1731 * being sent on the wire for the eager, our initial tcp_rto 1732 * is at least tcp_rexmit_interval_min which is a fairly 1733 * large value to allow the algorithm to adjust slowly to large 1734 * fluctuations of RTT during first few transmissions. 1735 * 1736 * Starting the timer first and then sending the packet in this 1737 * case shouldn't make much difference since tcp_rexmit_interval_min 1738 * is of the order of several 100ms and starting the timer 1739 * first and then sending the packet will result in difference 1740 * of few micro seconds. 1741 * 1742 * Without this optimization, we are forced to hold the fanout 1743 * lock across the ipcl_bind_insert() and sending the packet 1744 * so that we don't race against an incoming packet (maybe RST) 1745 * for this eager. 1746 * 1747 * It is necessary to acquire an extra reference on the eager 1748 * at this point and hold it until after tcp_send_data() to 1749 * ensure against an eager close race. 1750 */ 1751 1752 CONN_INC_REF(econnp); 1753 1754 TCP_TIMER_RESTART(eager, eager->tcp_rto); 1755 1756 /* 1757 * Insert the eager in its own perimeter now. We are ready to deal 1758 * with any packets on eager. 1759 */ 1760 if (ipcl_conn_insert(econnp) != 0) 1761 goto error; 1762 1763 ASSERT(econnp->conn_ixa->ixa_notify_cookie == econnp->conn_tcp); 1764 freemsg(mp); 1765 /* 1766 * Send the SYN-ACK. Use the right squeue so that conn_ixa is 1767 * only used by one thread at a time. 1768 */ 1769 if (econnp->conn_sqp == lconnp->conn_sqp) { 1770 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, 1771 econnp->conn_ixa, __dtrace_tcp_void_ip_t *, mp1->b_rptr, 1772 tcp_t *, eager, __dtrace_tcp_tcph_t *, 1773 &mp1->b_rptr[econnp->conn_ixa->ixa_ip_hdr_length]); 1774 (void) conn_ip_output(mp1, econnp->conn_ixa); 1775 CONN_DEC_REF(econnp); 1776 } else { 1777 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_send_synack, 1778 econnp, NULL, SQ_PROCESS, SQTAG_TCP_SEND_SYNACK); 1779 } 1780 return; 1781 error: 1782 freemsg(mp1); 1783 eager->tcp_closemp_used = B_TRUE; 1784 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1785 mp1 = &eager->tcp_closemp; 1786 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_eager_kill, 1787 econnp, NULL, SQ_FILL, SQTAG_TCP_CONN_REQ_2); 1788 1789 /* 1790 * If a connection already exists, send the mp to that connections so 1791 * that it can be appropriately dealt with. 1792 */ 1793 ipst = tcps->tcps_netstack->netstack_ip; 1794 1795 if ((econnp = ipcl_classify(mp, ira, ipst)) != NULL) { 1796 if (!IPCL_IS_CONNECTED(econnp)) { 1797 /* 1798 * Something bad happened. ipcl_conn_insert() 1799 * failed because a connection already existed 1800 * in connected hash but we can't find it 1801 * anymore (someone blew it away). Just 1802 * free this message and hopefully remote 1803 * will retransmit at which time the SYN can be 1804 * treated as a new connection or dealth with 1805 * a TH_RST if a connection already exists. 1806 */ 1807 CONN_DEC_REF(econnp); 1808 freemsg(mp); 1809 } else { 1810 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data, 1811 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1); 1812 } 1813 } else { 1814 /* Nobody wants this packet */ 1815 freemsg(mp); 1816 } 1817 return; 1818 error3: 1819 CONN_DEC_REF(econnp); 1820 error2: 1821 freemsg(mp); 1822 if (tlc_set) 1823 atomic_dec_32(&listener->tcp_listen_cnt->tlc_cnt); 1824 } 1825 1826 /* 1827 * In an ideal case of vertical partition in NUMA architecture, its 1828 * beneficial to have the listener and all the incoming connections 1829 * tied to the same squeue. The other constraint is that incoming 1830 * connections should be tied to the squeue attached to interrupted 1831 * CPU for obvious locality reason so this leaves the listener to 1832 * be tied to the same squeue. Our only problem is that when listener 1833 * is binding, the CPU that will get interrupted by the NIC whose 1834 * IP address the listener is binding to is not even known. So 1835 * the code below allows us to change that binding at the time the 1836 * CPU is interrupted by virtue of incoming connection's squeue. 1837 * 1838 * This is usefull only in case of a listener bound to a specific IP 1839 * address. For other kind of listeners, they get bound the 1840 * very first time and there is no attempt to rebind them. 1841 */ 1842 void 1843 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2, 1844 ip_recv_attr_t *ira) 1845 { 1846 conn_t *connp = (conn_t *)arg; 1847 squeue_t *sqp = (squeue_t *)arg2; 1848 squeue_t *new_sqp; 1849 uint32_t conn_flags; 1850 1851 /* 1852 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1853 * or based on the ring (for packets from GLD). Otherwise it is 1854 * set based on lbolt i.e., a somewhat random number. 1855 */ 1856 ASSERT(ira->ira_sqp != NULL); 1857 new_sqp = ira->ira_sqp; 1858 1859 if (connp->conn_fanout == NULL) 1860 goto done; 1861 1862 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 1863 mutex_enter(&connp->conn_fanout->connf_lock); 1864 mutex_enter(&connp->conn_lock); 1865 /* 1866 * No one from read or write side can access us now 1867 * except for already queued packets on this squeue. 1868 * But since we haven't changed the squeue yet, they 1869 * can't execute. If they are processed after we have 1870 * changed the squeue, they are sent back to the 1871 * correct squeue down below. 1872 * But a listner close can race with processing of 1873 * incoming SYN. If incoming SYN processing changes 1874 * the squeue then the listener close which is waiting 1875 * to enter the squeue would operate on the wrong 1876 * squeue. Hence we don't change the squeue here unless 1877 * the refcount is exactly the minimum refcount. The 1878 * minimum refcount of 4 is counted as - 1 each for 1879 * TCP and IP, 1 for being in the classifier hash, and 1880 * 1 for the mblk being processed. 1881 */ 1882 1883 if (connp->conn_ref != 4 || 1884 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 1885 mutex_exit(&connp->conn_lock); 1886 mutex_exit(&connp->conn_fanout->connf_lock); 1887 goto done; 1888 } 1889 if (connp->conn_sqp != new_sqp) { 1890 while (connp->conn_sqp != new_sqp) 1891 (void) atomic_cas_ptr(&connp->conn_sqp, sqp, 1892 new_sqp); 1893 /* No special MT issues for outbound ixa_sqp hint */ 1894 connp->conn_ixa->ixa_sqp = new_sqp; 1895 } 1896 1897 do { 1898 conn_flags = connp->conn_flags; 1899 conn_flags |= IPCL_FULLY_BOUND; 1900 (void) atomic_cas_32(&connp->conn_flags, 1901 connp->conn_flags, conn_flags); 1902 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 1903 1904 mutex_exit(&connp->conn_fanout->connf_lock); 1905 mutex_exit(&connp->conn_lock); 1906 1907 /* 1908 * Assume we have picked a good squeue for the listener. Make 1909 * subsequent SYNs not try to change the squeue. 1910 */ 1911 connp->conn_recv = tcp_input_listener; 1912 } 1913 1914 done: 1915 if (connp->conn_sqp != sqp) { 1916 CONN_INC_REF(connp); 1917 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, connp->conn_recv, connp, 1918 ira, SQ_FILL, SQTAG_TCP_CONN_REQ_UNBOUND); 1919 } else { 1920 tcp_input_listener(connp, mp, sqp, ira); 1921 } 1922 } 1923 1924 /* 1925 * Send up all messages queued on tcp_rcv_list. 1926 */ 1927 uint_t 1928 tcp_rcv_drain(tcp_t *tcp) 1929 { 1930 mblk_t *mp; 1931 uint_t ret = 0; 1932 #ifdef DEBUG 1933 uint_t cnt = 0; 1934 #endif 1935 queue_t *q = tcp->tcp_connp->conn_rq; 1936 1937 /* Can't drain on an eager connection */ 1938 if (tcp->tcp_listener != NULL) 1939 return (ret); 1940 1941 /* Can't be a non-STREAMS connection */ 1942 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 1943 1944 /* No need for the push timer now. */ 1945 if (tcp->tcp_push_tid != 0) { 1946 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 1947 tcp->tcp_push_tid = 0; 1948 } 1949 1950 /* 1951 * Handle two cases here: we are currently fused or we were 1952 * previously fused and have some urgent data to be delivered 1953 * upstream. The latter happens because we either ran out of 1954 * memory or were detached and therefore sending the SIGURG was 1955 * deferred until this point. In either case we pass control 1956 * over to tcp_fuse_rcv_drain() since it may need to complete 1957 * some work. 1958 */ 1959 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 1960 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 1961 &tcp->tcp_fused_sigurg_mp)) 1962 return (ret); 1963 } 1964 1965 while ((mp = tcp->tcp_rcv_list) != NULL) { 1966 tcp->tcp_rcv_list = mp->b_next; 1967 mp->b_next = NULL; 1968 #ifdef DEBUG 1969 cnt += msgdsize(mp); 1970 #endif 1971 putnext(q, mp); 1972 } 1973 #ifdef DEBUG 1974 ASSERT(cnt == tcp->tcp_rcv_cnt); 1975 #endif 1976 tcp->tcp_rcv_last_head = NULL; 1977 tcp->tcp_rcv_last_tail = NULL; 1978 tcp->tcp_rcv_cnt = 0; 1979 1980 if (canputnext(q)) 1981 return (tcp_rwnd_reopen(tcp)); 1982 1983 return (ret); 1984 } 1985 1986 /* 1987 * Queue data on tcp_rcv_list which is a b_next chain. 1988 * tcp_rcv_last_head/tail is the last element of this chain. 1989 * Each element of the chain is a b_cont chain. 1990 * 1991 * M_DATA messages are added to the current element. 1992 * Other messages are added as new (b_next) elements. 1993 */ 1994 void 1995 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len, cred_t *cr) 1996 { 1997 ASSERT(seg_len == msgdsize(mp)); 1998 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 1999 2000 if (is_system_labeled()) { 2001 ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); 2002 /* 2003 * Provide for protocols above TCP such as RPC. NOPID leaves 2004 * db_cpid unchanged. 2005 * The cred could have already been set. 2006 */ 2007 if (cr != NULL) 2008 mblk_setcred(mp, cr, NOPID); 2009 } 2010 2011 if (tcp->tcp_rcv_list == NULL) { 2012 ASSERT(tcp->tcp_rcv_last_head == NULL); 2013 tcp->tcp_rcv_list = mp; 2014 tcp->tcp_rcv_last_head = mp; 2015 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 2016 tcp->tcp_rcv_last_tail->b_cont = mp; 2017 } else { 2018 tcp->tcp_rcv_last_head->b_next = mp; 2019 tcp->tcp_rcv_last_head = mp; 2020 } 2021 2022 while (mp->b_cont) 2023 mp = mp->b_cont; 2024 2025 tcp->tcp_rcv_last_tail = mp; 2026 tcp->tcp_rcv_cnt += seg_len; 2027 tcp->tcp_rwnd -= seg_len; 2028 } 2029 2030 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 2031 mblk_t * 2032 tcp_ack_mp(tcp_t *tcp) 2033 { 2034 uint32_t seq_no; 2035 tcp_stack_t *tcps = tcp->tcp_tcps; 2036 conn_t *connp = tcp->tcp_connp; 2037 2038 /* 2039 * There are a few cases to be considered while setting the sequence no. 2040 * Essentially, we can come here while processing an unacceptable pkt 2041 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 2042 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 2043 * If we are here for a zero window probe, stick with suna. In all 2044 * other cases, we check if suna + swnd encompasses snxt and set 2045 * the sequence number to snxt, if so. If snxt falls outside the 2046 * window (the receiver probably shrunk its window), we will go with 2047 * suna + swnd, otherwise the sequence no will be unacceptable to the 2048 * receiver. 2049 */ 2050 if (tcp->tcp_zero_win_probe) { 2051 seq_no = tcp->tcp_suna; 2052 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 2053 ASSERT(tcp->tcp_swnd == 0); 2054 seq_no = tcp->tcp_snxt; 2055 } else { 2056 seq_no = SEQ_GT(tcp->tcp_snxt, 2057 (tcp->tcp_suna + tcp->tcp_swnd)) ? 2058 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 2059 } 2060 2061 if (tcp->tcp_valid_bits) { 2062 /* 2063 * For the complex case where we have to send some 2064 * controls (FIN or SYN), let tcp_xmit_mp do it. 2065 */ 2066 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 2067 NULL, B_FALSE)); 2068 } else { 2069 /* Generate a simple ACK */ 2070 int data_length; 2071 uchar_t *rptr; 2072 tcpha_t *tcpha; 2073 mblk_t *mp1; 2074 int32_t total_hdr_len; 2075 int32_t tcp_hdr_len; 2076 int32_t num_sack_blk = 0; 2077 int32_t sack_opt_len; 2078 ip_xmit_attr_t *ixa = connp->conn_ixa; 2079 2080 /* 2081 * Allocate space for TCP + IP headers 2082 * and link-level header 2083 */ 2084 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 2085 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 2086 tcp->tcp_num_sack_blk); 2087 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 2088 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 2089 total_hdr_len = connp->conn_ht_iphc_len + sack_opt_len; 2090 tcp_hdr_len = connp->conn_ht_ulp_len + sack_opt_len; 2091 } else { 2092 total_hdr_len = connp->conn_ht_iphc_len; 2093 tcp_hdr_len = connp->conn_ht_ulp_len; 2094 } 2095 mp1 = allocb(total_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 2096 if (!mp1) 2097 return (NULL); 2098 2099 /* Update the latest receive window size in TCP header. */ 2100 tcp->tcp_tcpha->tha_win = 2101 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws); 2102 /* copy in prototype TCP + IP header */ 2103 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 2104 mp1->b_rptr = rptr; 2105 mp1->b_wptr = rptr + total_hdr_len; 2106 bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len); 2107 2108 tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length]; 2109 2110 /* Set the TCP sequence number. */ 2111 tcpha->tha_seq = htonl(seq_no); 2112 2113 /* Set up the TCP flag field. */ 2114 tcpha->tha_flags = (uchar_t)TH_ACK; 2115 if (tcp->tcp_ecn_echo_on) 2116 tcpha->tha_flags |= TH_ECE; 2117 2118 tcp->tcp_rack = tcp->tcp_rnxt; 2119 tcp->tcp_rack_cnt = 0; 2120 2121 /* fill in timestamp option if in use */ 2122 if (tcp->tcp_snd_ts_ok) { 2123 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH; 2124 2125 U32_TO_BE32(llbolt, 2126 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4); 2127 U32_TO_BE32(tcp->tcp_ts_recent, 2128 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8); 2129 } 2130 2131 /* Fill in SACK options */ 2132 if (num_sack_blk > 0) { 2133 uchar_t *wptr = (uchar_t *)tcpha + 2134 connp->conn_ht_ulp_len; 2135 sack_blk_t *tmp; 2136 int32_t i; 2137 2138 wptr[0] = TCPOPT_NOP; 2139 wptr[1] = TCPOPT_NOP; 2140 wptr[2] = TCPOPT_SACK; 2141 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 2142 sizeof (sack_blk_t); 2143 wptr += TCPOPT_REAL_SACK_LEN; 2144 2145 tmp = tcp->tcp_sack_list; 2146 for (i = 0; i < num_sack_blk; i++) { 2147 U32_TO_BE32(tmp[i].begin, wptr); 2148 wptr += sizeof (tcp_seq); 2149 U32_TO_BE32(tmp[i].end, wptr); 2150 wptr += sizeof (tcp_seq); 2151 } 2152 tcpha->tha_offset_and_reserved += 2153 ((num_sack_blk * 2 + 1) << 4); 2154 } 2155 2156 ixa->ixa_pktlen = total_hdr_len; 2157 2158 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2159 ((ipha_t *)rptr)->ipha_length = htons(total_hdr_len); 2160 } else { 2161 ip6_t *ip6 = (ip6_t *)rptr; 2162 2163 ip6->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN); 2164 } 2165 2166 /* 2167 * Prime pump for checksum calculation in IP. Include the 2168 * adjustment for a source route if any. 2169 */ 2170 data_length = tcp_hdr_len + connp->conn_sum; 2171 data_length = (data_length >> 16) + (data_length & 0xFFFF); 2172 tcpha->tha_sum = htons(data_length); 2173 2174 if (tcp->tcp_ip_forward_progress) { 2175 tcp->tcp_ip_forward_progress = B_FALSE; 2176 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF; 2177 } else { 2178 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF; 2179 } 2180 return (mp1); 2181 } 2182 } 2183 2184 /* 2185 * Dummy socket upcalls for if/when the conn_t gets detached from a 2186 * direct-callback sonode via a user-driven close(). Easy to catch with 2187 * DTrace FBT, and should be mostly harmless. 2188 */ 2189 2190 /* ARGSUSED */ 2191 static sock_upper_handle_t 2192 tcp_dummy_newconn(sock_upper_handle_t x, sock_lower_handle_t y, 2193 sock_downcalls_t *z, cred_t *cr, pid_t pid, sock_upcalls_t **ignored) 2194 { 2195 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2196 return (NULL); 2197 } 2198 2199 /* ARGSUSED */ 2200 static void 2201 tcp_dummy_connected(sock_upper_handle_t x, sock_connid_t y, cred_t *cr, 2202 pid_t pid) 2203 { 2204 ASSERT(x == NULL); 2205 /* Normally we'd crhold(cr) and attach it to socket state. */ 2206 /* LINTED */ 2207 } 2208 2209 /* ARGSUSED */ 2210 static int 2211 tcp_dummy_disconnected(sock_upper_handle_t x, sock_connid_t y, int blah) 2212 { 2213 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2214 return (-1); 2215 } 2216 2217 /* ARGSUSED */ 2218 static void 2219 tcp_dummy_opctl(sock_upper_handle_t x, sock_opctl_action_t y, uintptr_t blah) 2220 { 2221 ASSERT(x == NULL); 2222 /* We really want this one to be a harmless NOP for now. */ 2223 /* LINTED */ 2224 } 2225 2226 /* ARGSUSED */ 2227 static ssize_t 2228 tcp_dummy_recv(sock_upper_handle_t x, mblk_t *mp, size_t len, int flags, 2229 int *error, boolean_t *push) 2230 { 2231 ASSERT(x == NULL); 2232 2233 /* 2234 * Consume the message, set ESHUTDOWN, and return an error. 2235 * Nobody's home! 2236 */ 2237 freemsg(mp); 2238 *error = ESHUTDOWN; 2239 return (-1); 2240 } 2241 2242 /* ARGSUSED */ 2243 static void 2244 tcp_dummy_set_proto_props(sock_upper_handle_t x, struct sock_proto_props *y) 2245 { 2246 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2247 } 2248 2249 /* ARGSUSED */ 2250 static void 2251 tcp_dummy_txq_full(sock_upper_handle_t x, boolean_t y) 2252 { 2253 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2254 } 2255 2256 /* ARGSUSED */ 2257 static void 2258 tcp_dummy_signal_oob(sock_upper_handle_t x, ssize_t len) 2259 { 2260 ASSERT(x == NULL); 2261 /* Otherwise, this would signal socket state about OOB data. */ 2262 } 2263 2264 /* ARGSUSED */ 2265 static void 2266 tcp_dummy_set_error(sock_upper_handle_t x, int err) 2267 { 2268 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2269 } 2270 2271 /* ARGSUSED */ 2272 static void 2273 tcp_dummy_onearg(sock_upper_handle_t x) 2274 { 2275 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2276 } 2277 2278 static sock_upcalls_t tcp_dummy_upcalls = { 2279 tcp_dummy_newconn, 2280 tcp_dummy_connected, 2281 tcp_dummy_disconnected, 2282 tcp_dummy_opctl, 2283 tcp_dummy_recv, 2284 tcp_dummy_set_proto_props, 2285 tcp_dummy_txq_full, 2286 tcp_dummy_signal_oob, 2287 tcp_dummy_onearg, 2288 tcp_dummy_set_error, 2289 tcp_dummy_onearg 2290 }; 2291 2292 /* 2293 * Handle M_DATA messages from IP. Its called directly from IP via 2294 * squeue for received IP packets. 2295 * 2296 * The first argument is always the connp/tcp to which the mp belongs. 2297 * There are no exceptions to this rule. The caller has already put 2298 * a reference on this connp/tcp and once tcp_input_data() returns, 2299 * the squeue will do the refrele. 2300 * 2301 * The TH_SYN for the listener directly go to tcp_input_listener via 2302 * squeue. ICMP errors go directly to tcp_icmp_input(). 2303 * 2304 * sqp: NULL = recursive, sqp != NULL means called from squeue 2305 */ 2306 void 2307 tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 2308 { 2309 int32_t bytes_acked; 2310 int32_t gap; 2311 mblk_t *mp1; 2312 uint_t flags; 2313 uint32_t new_swnd = 0; 2314 uchar_t *iphdr; 2315 uchar_t *rptr; 2316 int32_t rgap; 2317 uint32_t seg_ack; 2318 int seg_len; 2319 uint_t ip_hdr_len; 2320 uint32_t seg_seq; 2321 tcpha_t *tcpha; 2322 int urp; 2323 tcp_opt_t tcpopt; 2324 ip_pkt_t ipp; 2325 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 2326 uint32_t cwnd; 2327 uint32_t add; 2328 int npkt; 2329 int mss; 2330 conn_t *connp = (conn_t *)arg; 2331 squeue_t *sqp = (squeue_t *)arg2; 2332 tcp_t *tcp = connp->conn_tcp; 2333 tcp_stack_t *tcps = tcp->tcp_tcps; 2334 sock_upcalls_t *sockupcalls; 2335 2336 /* 2337 * RST from fused tcp loopback peer should trigger an unfuse. 2338 */ 2339 if (tcp->tcp_fused) { 2340 TCP_STAT(tcps, tcp_fusion_aborted); 2341 tcp_unfuse(tcp); 2342 } 2343 2344 iphdr = mp->b_rptr; 2345 rptr = mp->b_rptr; 2346 ASSERT(OK_32PTR(rptr)); 2347 2348 ip_hdr_len = ira->ira_ip_hdr_length; 2349 if (connp->conn_recv_ancillary.crb_all != 0) { 2350 /* 2351 * Record packet information in the ip_pkt_t 2352 */ 2353 ipp.ipp_fields = 0; 2354 if (ira->ira_flags & IRAF_IS_IPV4) { 2355 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipp, 2356 B_FALSE); 2357 } else { 2358 uint8_t nexthdrp; 2359 2360 /* 2361 * IPv6 packets can only be received by applications 2362 * that are prepared to receive IPv6 addresses. 2363 * The IP fanout must ensure this. 2364 */ 2365 ASSERT(connp->conn_family == AF_INET6); 2366 2367 (void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp, 2368 &nexthdrp); 2369 ASSERT(nexthdrp == IPPROTO_TCP); 2370 2371 /* Could have caused a pullup? */ 2372 iphdr = mp->b_rptr; 2373 rptr = mp->b_rptr; 2374 } 2375 } 2376 ASSERT(DB_TYPE(mp) == M_DATA); 2377 ASSERT(mp->b_next == NULL); 2378 2379 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2380 seg_seq = ntohl(tcpha->tha_seq); 2381 seg_ack = ntohl(tcpha->tha_ack); 2382 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 2383 seg_len = (int)(mp->b_wptr - rptr) - 2384 (ip_hdr_len + TCP_HDR_LENGTH(tcpha)); 2385 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 2386 do { 2387 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 2388 (uintptr_t)INT_MAX); 2389 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 2390 } while ((mp1 = mp1->b_cont) != NULL && 2391 mp1->b_datap->db_type == M_DATA); 2392 } 2393 2394 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa, 2395 __dtrace_tcp_void_ip_t *, iphdr, tcp_t *, tcp, 2396 __dtrace_tcp_tcph_t *, tcpha); 2397 2398 if (tcp->tcp_state == TCPS_TIME_WAIT) { 2399 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 2400 seg_len, tcpha, ira); 2401 return; 2402 } 2403 2404 if (sqp != NULL) { 2405 /* 2406 * This is the correct place to update tcp_last_recv_time. Note 2407 * that it is also updated for tcp structure that belongs to 2408 * global and listener queues which do not really need updating. 2409 * But that should not cause any harm. And it is updated for 2410 * all kinds of incoming segments, not only for data segments. 2411 */ 2412 tcp->tcp_last_recv_time = LBOLT_FASTPATH; 2413 } 2414 2415 flags = (unsigned int)tcpha->tha_flags & 0xFF; 2416 2417 BUMP_LOCAL(tcp->tcp_ibsegs); 2418 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2419 2420 if ((flags & TH_URG) && sqp != NULL) { 2421 /* 2422 * TCP can't handle urgent pointers that arrive before 2423 * the connection has been accept()ed since it can't 2424 * buffer OOB data. Discard segment if this happens. 2425 * 2426 * We can't just rely on a non-null tcp_listener to indicate 2427 * that the accept() has completed since unlinking of the 2428 * eager and completion of the accept are not atomic. 2429 * tcp_detached, when it is not set (B_FALSE) indicates 2430 * that the accept() has completed. 2431 * 2432 * Nor can it reassemble urgent pointers, so discard 2433 * if it's not the next segment expected. 2434 * 2435 * Otherwise, collapse chain into one mblk (discard if 2436 * that fails). This makes sure the headers, retransmitted 2437 * data, and new data all are in the same mblk. 2438 */ 2439 ASSERT(mp != NULL); 2440 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 2441 freemsg(mp); 2442 return; 2443 } 2444 /* Update pointers into message */ 2445 iphdr = rptr = mp->b_rptr; 2446 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2447 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 2448 /* 2449 * Since we can't handle any data with this urgent 2450 * pointer that is out of sequence, we expunge 2451 * the data. This allows us to still register 2452 * the urgent mark and generate the M_PCSIG, 2453 * which we can do. 2454 */ 2455 mp->b_wptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2456 seg_len = 0; 2457 } 2458 } 2459 2460 sockupcalls = connp->conn_upcalls; 2461 /* A conn_t may have belonged to a now-closed socket. Be careful. */ 2462 if (sockupcalls == NULL) 2463 sockupcalls = &tcp_dummy_upcalls; 2464 2465 switch (tcp->tcp_state) { 2466 case TCPS_SYN_SENT: 2467 if (connp->conn_final_sqp == NULL && 2468 tcp_outbound_squeue_switch && sqp != NULL) { 2469 ASSERT(connp->conn_initial_sqp == connp->conn_sqp); 2470 connp->conn_final_sqp = sqp; 2471 if (connp->conn_final_sqp != connp->conn_sqp) { 2472 DTRACE_PROBE1(conn__final__sqp__switch, 2473 conn_t *, connp); 2474 CONN_INC_REF(connp); 2475 SQUEUE_SWITCH(connp, connp->conn_final_sqp); 2476 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 2477 tcp_input_data, connp, ira, ip_squeue_flag, 2478 SQTAG_CONNECT_FINISH); 2479 return; 2480 } 2481 DTRACE_PROBE1(conn__final__sqp__same, conn_t *, connp); 2482 } 2483 if (flags & TH_ACK) { 2484 /* 2485 * Note that our stack cannot send data before a 2486 * connection is established, therefore the 2487 * following check is valid. Otherwise, it has 2488 * to be changed. 2489 */ 2490 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 2491 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2492 freemsg(mp); 2493 if (flags & TH_RST) 2494 return; 2495 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 2496 tcp, seg_ack, 0, TH_RST); 2497 return; 2498 } 2499 ASSERT(tcp->tcp_suna + 1 == seg_ack); 2500 } 2501 if (flags & TH_RST) { 2502 if (flags & TH_ACK) { 2503 DTRACE_TCP5(connect__refused, mblk_t *, NULL, 2504 ip_xmit_attr_t *, connp->conn_ixa, 2505 void_ip_t *, iphdr, tcp_t *, tcp, 2506 tcph_t *, tcpha); 2507 (void) tcp_clean_death(tcp, ECONNREFUSED); 2508 } 2509 freemsg(mp); 2510 return; 2511 } 2512 if (!(flags & TH_SYN)) { 2513 freemsg(mp); 2514 return; 2515 } 2516 2517 /* Process all TCP options. */ 2518 tcp_process_options(tcp, tcpha); 2519 /* 2520 * The following changes our rwnd to be a multiple of the 2521 * MIN(peer MSS, our MSS) for performance reason. 2522 */ 2523 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(connp->conn_rcvbuf, 2524 tcp->tcp_mss)); 2525 2526 /* Is the other end ECN capable? */ 2527 if (tcp->tcp_ecn_ok) { 2528 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 2529 tcp->tcp_ecn_ok = B_FALSE; 2530 } 2531 } 2532 /* 2533 * Clear ECN flags because it may interfere with later 2534 * processing. 2535 */ 2536 flags &= ~(TH_ECE|TH_CWR); 2537 2538 tcp->tcp_irs = seg_seq; 2539 tcp->tcp_rack = seg_seq; 2540 tcp->tcp_rnxt = seg_seq + 1; 2541 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2542 if (!TCP_IS_DETACHED(tcp)) { 2543 /* Allocate room for SACK options if needed. */ 2544 connp->conn_wroff = connp->conn_ht_iphc_len; 2545 if (tcp->tcp_snd_sack_ok) 2546 connp->conn_wroff += TCPOPT_MAX_SACK_LEN; 2547 if (!tcp->tcp_loopback) 2548 connp->conn_wroff += tcps->tcps_wroff_xtra; 2549 2550 (void) proto_set_tx_wroff(connp->conn_rq, connp, 2551 connp->conn_wroff); 2552 } 2553 if (flags & TH_ACK) { 2554 /* 2555 * If we can't get the confirmation upstream, pretend 2556 * we didn't even see this one. 2557 * 2558 * XXX: how can we pretend we didn't see it if we 2559 * have updated rnxt et. al. 2560 * 2561 * For loopback we defer sending up the T_CONN_CON 2562 * until after some checks below. 2563 */ 2564 mp1 = NULL; 2565 /* 2566 * tcp_sendmsg() checks tcp_state without entering 2567 * the squeue so tcp_state should be updated before 2568 * sending up connection confirmation. Probe the 2569 * state change below when we are sure the connection 2570 * confirmation has been sent. 2571 */ 2572 tcp->tcp_state = TCPS_ESTABLISHED; 2573 if (!tcp_conn_con(tcp, iphdr, mp, 2574 tcp->tcp_loopback ? &mp1 : NULL, ira)) { 2575 tcp->tcp_state = TCPS_SYN_SENT; 2576 freemsg(mp); 2577 return; 2578 } 2579 TCPS_CONN_INC(tcps); 2580 /* SYN was acked - making progress */ 2581 tcp->tcp_ip_forward_progress = B_TRUE; 2582 2583 /* One for the SYN */ 2584 tcp->tcp_suna = tcp->tcp_iss + 1; 2585 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 2586 2587 /* 2588 * If SYN was retransmitted, need to reset all 2589 * retransmission info. This is because this 2590 * segment will be treated as a dup ACK. 2591 */ 2592 if (tcp->tcp_rexmit) { 2593 tcp->tcp_rexmit = B_FALSE; 2594 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 2595 tcp->tcp_rexmit_max = tcp->tcp_snxt; 2596 tcp->tcp_ms_we_have_waited = 0; 2597 2598 /* 2599 * Set tcp_cwnd back to 1 MSS, per 2600 * recommendation from 2601 * draft-floyd-incr-init-win-01.txt, 2602 * Increasing TCP's Initial Window. 2603 */ 2604 tcp->tcp_cwnd = tcp->tcp_mss; 2605 } 2606 2607 tcp->tcp_swl1 = seg_seq; 2608 tcp->tcp_swl2 = seg_ack; 2609 2610 new_swnd = ntohs(tcpha->tha_win); 2611 tcp->tcp_swnd = new_swnd; 2612 if (new_swnd > tcp->tcp_max_swnd) 2613 tcp->tcp_max_swnd = new_swnd; 2614 2615 /* 2616 * Always send the three-way handshake ack immediately 2617 * in order to make the connection complete as soon as 2618 * possible on the accepting host. 2619 */ 2620 flags |= TH_ACK_NEEDED; 2621 2622 /* 2623 * Trace connect-established here. 2624 */ 2625 DTRACE_TCP5(connect__established, mblk_t *, NULL, 2626 ip_xmit_attr_t *, tcp->tcp_connp->conn_ixa, 2627 void_ip_t *, iphdr, tcp_t *, tcp, tcph_t *, tcpha); 2628 2629 /* Trace change from SYN_SENT -> ESTABLISHED here */ 2630 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2631 connp->conn_ixa, void, NULL, tcp_t *, tcp, 2632 void, NULL, int32_t, TCPS_SYN_SENT); 2633 2634 /* 2635 * Special case for loopback. At this point we have 2636 * received SYN-ACK from the remote endpoint. In 2637 * order to ensure that both endpoints reach the 2638 * fused state prior to any data exchange, the final 2639 * ACK needs to be sent before we indicate T_CONN_CON 2640 * to the module upstream. 2641 */ 2642 if (tcp->tcp_loopback) { 2643 mblk_t *ack_mp; 2644 2645 ASSERT(!tcp->tcp_unfusable); 2646 ASSERT(mp1 != NULL); 2647 /* 2648 * For loopback, we always get a pure SYN-ACK 2649 * and only need to send back the final ACK 2650 * with no data (this is because the other 2651 * tcp is ours and we don't do T/TCP). This 2652 * final ACK triggers the passive side to 2653 * perform fusion in ESTABLISHED state. 2654 */ 2655 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 2656 if (tcp->tcp_ack_tid != 0) { 2657 (void) TCP_TIMER_CANCEL(tcp, 2658 tcp->tcp_ack_tid); 2659 tcp->tcp_ack_tid = 0; 2660 } 2661 tcp_send_data(tcp, ack_mp); 2662 BUMP_LOCAL(tcp->tcp_obsegs); 2663 TCPS_BUMP_MIB(tcps, tcpOutAck); 2664 2665 if (!IPCL_IS_NONSTR(connp)) { 2666 /* Send up T_CONN_CON */ 2667 if (ira->ira_cred != NULL) { 2668 mblk_setcred(mp1, 2669 ira->ira_cred, 2670 ira->ira_cpid); 2671 } 2672 putnext(connp->conn_rq, mp1); 2673 } else { 2674 (*sockupcalls->su_connected) 2675 (connp->conn_upper_handle, 2676 tcp->tcp_connid, 2677 ira->ira_cred, 2678 ira->ira_cpid); 2679 freemsg(mp1); 2680 } 2681 2682 freemsg(mp); 2683 return; 2684 } 2685 /* 2686 * Forget fusion; we need to handle more 2687 * complex cases below. Send the deferred 2688 * T_CONN_CON message upstream and proceed 2689 * as usual. Mark this tcp as not capable 2690 * of fusion. 2691 */ 2692 TCP_STAT(tcps, tcp_fusion_unfusable); 2693 tcp->tcp_unfusable = B_TRUE; 2694 if (!IPCL_IS_NONSTR(connp)) { 2695 if (ira->ira_cred != NULL) { 2696 mblk_setcred(mp1, ira->ira_cred, 2697 ira->ira_cpid); 2698 } 2699 putnext(connp->conn_rq, mp1); 2700 } else { 2701 (*sockupcalls->su_connected) 2702 (connp->conn_upper_handle, 2703 tcp->tcp_connid, ira->ira_cred, 2704 ira->ira_cpid); 2705 freemsg(mp1); 2706 } 2707 } 2708 2709 /* 2710 * Check to see if there is data to be sent. If 2711 * yes, set the transmit flag. Then check to see 2712 * if received data processing needs to be done. 2713 * If not, go straight to xmit_check. This short 2714 * cut is OK as we don't support T/TCP. 2715 */ 2716 if (tcp->tcp_unsent) 2717 flags |= TH_XMIT_NEEDED; 2718 2719 if (seg_len == 0 && !(flags & TH_URG)) { 2720 freemsg(mp); 2721 goto xmit_check; 2722 } 2723 2724 flags &= ~TH_SYN; 2725 seg_seq++; 2726 break; 2727 } 2728 tcp->tcp_state = TCPS_SYN_RCVD; 2729 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2730 connp->conn_ixa, void_ip_t *, NULL, tcp_t *, tcp, 2731 tcph_t *, NULL, int32_t, TCPS_SYN_SENT); 2732 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 2733 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 2734 if (mp1 != NULL) { 2735 tcp_send_data(tcp, mp1); 2736 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 2737 } 2738 freemsg(mp); 2739 return; 2740 case TCPS_SYN_RCVD: 2741 if (flags & TH_ACK) { 2742 uint32_t pinit_wnd; 2743 2744 /* 2745 * In this state, a SYN|ACK packet is either bogus 2746 * because the other side must be ACKing our SYN which 2747 * indicates it has seen the ACK for their SYN and 2748 * shouldn't retransmit it or we're crossing SYNs 2749 * on active open. 2750 */ 2751 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 2752 freemsg(mp); 2753 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 2754 tcp, seg_ack, 0, TH_RST); 2755 return; 2756 } 2757 /* 2758 * NOTE: RFC 793 pg. 72 says this should be 2759 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 2760 * but that would mean we have an ack that ignored 2761 * our SYN. 2762 */ 2763 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 2764 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2765 freemsg(mp); 2766 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 2767 tcp, seg_ack, 0, TH_RST); 2768 return; 2769 } 2770 /* 2771 * No sane TCP stack will send such a small window 2772 * without receiving any data. Just drop this invalid 2773 * ACK. We also shorten the abort timeout in case 2774 * this is an attack. 2775 */ 2776 pinit_wnd = ntohs(tcpha->tha_win) << tcp->tcp_snd_ws; 2777 if (pinit_wnd < tcp->tcp_mss && 2778 pinit_wnd < tcp_init_wnd_chk) { 2779 freemsg(mp); 2780 TCP_STAT(tcps, tcp_zwin_ack_syn); 2781 tcp->tcp_second_ctimer_threshold = 2782 tcp_early_abort * SECONDS; 2783 return; 2784 } 2785 } 2786 break; 2787 case TCPS_LISTEN: 2788 /* 2789 * Only a TLI listener can come through this path when a 2790 * acceptor is going back to be a listener and a packet 2791 * for the acceptor hits the classifier. For a socket 2792 * listener, this can never happen because a listener 2793 * can never accept connection on itself and hence a 2794 * socket acceptor can not go back to being a listener. 2795 */ 2796 ASSERT(!TCP_IS_SOCKET(tcp)); 2797 /*FALLTHRU*/ 2798 case TCPS_CLOSED: 2799 case TCPS_BOUND: { 2800 conn_t *new_connp; 2801 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2802 2803 /* 2804 * Don't accept any input on a closed tcp as this TCP logically 2805 * does not exist on the system. Don't proceed further with 2806 * this TCP. For instance, this packet could trigger another 2807 * close of this tcp which would be disastrous for tcp_refcnt. 2808 * tcp_close_detached / tcp_clean_death / tcp_closei_local must 2809 * be called at most once on a TCP. In this case we need to 2810 * refeed the packet into the classifier and figure out where 2811 * the packet should go. 2812 */ 2813 new_connp = ipcl_classify(mp, ira, ipst); 2814 if (new_connp != NULL) { 2815 /* Drops ref on new_connp */ 2816 tcp_reinput(new_connp, mp, ira, ipst); 2817 return; 2818 } 2819 /* We failed to classify. For now just drop the packet */ 2820 freemsg(mp); 2821 return; 2822 } 2823 case TCPS_IDLE: 2824 /* 2825 * Handle the case where the tcp_clean_death() has happened 2826 * on a connection (application hasn't closed yet) but a packet 2827 * was already queued on squeue before tcp_clean_death() 2828 * was processed. Calling tcp_clean_death() twice on same 2829 * connection can result in weird behaviour. 2830 */ 2831 freemsg(mp); 2832 return; 2833 default: 2834 break; 2835 } 2836 2837 /* 2838 * Already on the correct queue/perimeter. 2839 * If this is a detached connection and not an eager 2840 * connection hanging off a listener then new data 2841 * (past the FIN) will cause a reset. 2842 * We do a special check here where it 2843 * is out of the main line, rather than check 2844 * if we are detached every time we see new 2845 * data down below. 2846 */ 2847 if (TCP_IS_DETACHED_NONEAGER(tcp) && 2848 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 2849 TCPS_BUMP_MIB(tcps, tcpInClosed); 2850 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2851 freemsg(mp); 2852 tcp_xmit_ctl("new data when detached", tcp, 2853 tcp->tcp_snxt, 0, TH_RST); 2854 (void) tcp_clean_death(tcp, EPROTO); 2855 return; 2856 } 2857 2858 mp->b_rptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2859 urp = ntohs(tcpha->tha_urp) - TCP_OLD_URP_INTERPRETATION; 2860 new_swnd = ntohs(tcpha->tha_win) << 2861 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws); 2862 2863 /* 2864 * We are interested in two TCP options: timestamps (if negotiated) and 2865 * SACK (if negotiated). Skip option parsing if neither is negotiated. 2866 */ 2867 if (tcp->tcp_snd_ts_ok || tcp->tcp_snd_sack_ok) { 2868 int options; 2869 if (tcp->tcp_snd_sack_ok) 2870 tcpopt.tcp = tcp; 2871 else 2872 tcpopt.tcp = NULL; 2873 options = tcp_parse_options(tcpha, &tcpopt); 2874 /* 2875 * RST segments must not be subject to PAWS and are not 2876 * required to have timestamps. 2877 * We do not drop keepalive segments without 2878 * timestamps, to maintain compatibility with legacy TCP stacks. 2879 */ 2880 boolean_t keepalive = (seg_len == 0 || seg_len == 1) && 2881 (seg_seq + 1 == tcp->tcp_rnxt); 2882 if (tcp->tcp_snd_ts_ok && !(flags & TH_RST) && !keepalive) { 2883 /* 2884 * Per RFC 7323 section 3.2., silently drop non-RST 2885 * segments without expected TSopt. This is a 'SHOULD' 2886 * requirement. 2887 * We accept keepalives without TSopt to maintain 2888 * interoperability with tcp implementations that omit 2889 * the TSopt on these. Keepalive data is discarded, so 2890 * there is no risk corrupting data by accepting these. 2891 */ 2892 if (!(options & TCP_OPT_TSTAMP_PRESENT)) { 2893 /* 2894 * Leave a breadcrumb for people to detect this 2895 * behavior. 2896 */ 2897 DTRACE_TCP1(droppedtimestamp, tcp_t *, tcp); 2898 freemsg(mp); 2899 return; 2900 } 2901 2902 if (!tcp_paws_check(tcp, &tcpopt)) { 2903 /* 2904 * This segment is not acceptable. 2905 * Drop it and send back an ACK. 2906 */ 2907 freemsg(mp); 2908 flags |= TH_ACK_NEEDED; 2909 goto ack_check; 2910 } 2911 } 2912 } 2913 try_again:; 2914 mss = tcp->tcp_mss; 2915 gap = seg_seq - tcp->tcp_rnxt; 2916 rgap = tcp->tcp_rwnd - (gap + seg_len); 2917 /* 2918 * gap is the amount of sequence space between what we expect to see 2919 * and what we got for seg_seq. A positive value for gap means 2920 * something got lost. A negative value means we got some old stuff. 2921 */ 2922 if (gap < 0) { 2923 /* Old stuff present. Is the SYN in there? */ 2924 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 2925 (seg_len != 0)) { 2926 flags &= ~TH_SYN; 2927 seg_seq++; 2928 urp--; 2929 /* Recompute the gaps after noting the SYN. */ 2930 goto try_again; 2931 } 2932 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 2933 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, 2934 (seg_len > -gap ? -gap : seg_len)); 2935 /* Remove the old stuff from seg_len. */ 2936 seg_len += gap; 2937 /* 2938 * Anything left? 2939 * Make sure to check for unack'd FIN when rest of data 2940 * has been previously ack'd. 2941 */ 2942 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 2943 /* 2944 * Resets are only valid if they lie within our offered 2945 * window. If the RST bit is set, we just ignore this 2946 * segment. 2947 */ 2948 if (flags & TH_RST) { 2949 freemsg(mp); 2950 return; 2951 } 2952 2953 /* 2954 * The arriving of dup data packets indicate that we 2955 * may have postponed an ack for too long, or the other 2956 * side's RTT estimate is out of shape. Start acking 2957 * more often. 2958 */ 2959 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 2960 tcp->tcp_rack_cnt >= 1 && 2961 tcp->tcp_rack_abs_max > 2) { 2962 tcp->tcp_rack_abs_max--; 2963 } 2964 tcp->tcp_rack_cur_max = 1; 2965 2966 /* 2967 * This segment is "unacceptable". None of its 2968 * sequence space lies within our advertized window. 2969 * 2970 * Adjust seg_len to the original value for tracing. 2971 */ 2972 seg_len -= gap; 2973 if (connp->conn_debug) { 2974 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 2975 "tcp_rput: unacceptable, gap %d, rgap %d, " 2976 "flags 0x%x, seg_seq %u, seg_ack %u, " 2977 "seg_len %d, rnxt %u, snxt %u, %s", 2978 gap, rgap, flags, seg_seq, seg_ack, 2979 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 2980 tcp_display(tcp, NULL, 2981 DISP_ADDR_AND_PORT)); 2982 } 2983 2984 /* 2985 * Arrange to send an ACK in response to the 2986 * unacceptable segment per RFC 793 page 69. There 2987 * is only one small difference between ours and the 2988 * acceptability test in the RFC - we accept ACK-only 2989 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 2990 * will be generated. 2991 * 2992 * Note that we have to ACK an ACK-only packet at least 2993 * for stacks that send 0-length keep-alives with 2994 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 2995 * section 4.2.3.6. As long as we don't ever generate 2996 * an unacceptable packet in response to an incoming 2997 * packet that is unacceptable, it should not cause 2998 * "ACK wars". 2999 */ 3000 flags |= TH_ACK_NEEDED; 3001 3002 /* 3003 * Continue processing this segment in order to use the 3004 * ACK information it contains, but skip all other 3005 * sequence-number processing. Processing the ACK 3006 * information is necessary in order to 3007 * re-synchronize connections that may have lost 3008 * synchronization. 3009 * 3010 * We clear seg_len and flag fields related to 3011 * sequence number processing as they are not 3012 * to be trusted for an unacceptable segment. 3013 */ 3014 seg_len = 0; 3015 flags &= ~(TH_SYN | TH_FIN | TH_URG); 3016 goto process_ack; 3017 } 3018 3019 /* Fix seg_seq, and chew the gap off the front. */ 3020 seg_seq = tcp->tcp_rnxt; 3021 urp += gap; 3022 do { 3023 mblk_t *mp2; 3024 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 3025 (uintptr_t)UINT_MAX); 3026 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 3027 if (gap > 0) { 3028 mp->b_rptr = mp->b_wptr - gap; 3029 break; 3030 } 3031 mp2 = mp; 3032 mp = mp->b_cont; 3033 freeb(mp2); 3034 } while (gap < 0); 3035 /* 3036 * If the urgent data has already been acknowledged, we 3037 * should ignore TH_URG below 3038 */ 3039 if (urp < 0) 3040 flags &= ~TH_URG; 3041 } 3042 /* 3043 * rgap is the amount of stuff received out of window. A negative 3044 * value is the amount out of window. 3045 */ 3046 if (rgap < 0) { 3047 mblk_t *mp2; 3048 3049 if (tcp->tcp_rwnd == 0) { 3050 TCPS_BUMP_MIB(tcps, tcpInWinProbe); 3051 } else { 3052 TCPS_BUMP_MIB(tcps, tcpInDataPastWinSegs); 3053 TCPS_UPDATE_MIB(tcps, tcpInDataPastWinBytes, -rgap); 3054 } 3055 3056 /* 3057 * seg_len does not include the FIN, so if more than 3058 * just the FIN is out of window, we act like we don't 3059 * see it. (If just the FIN is out of window, rgap 3060 * will be zero and we will go ahead and acknowledge 3061 * the FIN.) 3062 */ 3063 flags &= ~TH_FIN; 3064 3065 /* Fix seg_len and make sure there is something left. */ 3066 seg_len += rgap; 3067 if (seg_len <= 0) { 3068 /* 3069 * Resets are only valid if they lie within our offered 3070 * window. If the RST bit is set, we just ignore this 3071 * segment. 3072 */ 3073 if (flags & TH_RST) { 3074 freemsg(mp); 3075 return; 3076 } 3077 3078 /* Per RFC 793, we need to send back an ACK. */ 3079 flags |= TH_ACK_NEEDED; 3080 3081 /* 3082 * Send SIGURG as soon as possible i.e. even 3083 * if the TH_URG was delivered in a window probe 3084 * packet (which will be unacceptable). 3085 * 3086 * We generate a signal if none has been generated 3087 * for this connection or if this is a new urgent 3088 * byte. Also send a zero-length "unmarked" message 3089 * to inform SIOCATMARK that this is not the mark. 3090 * 3091 * tcp_urp_last_valid is cleared when the T_exdata_ind 3092 * is sent up. This plus the check for old data 3093 * (gap >= 0) handles the wraparound of the sequence 3094 * number space without having to always track the 3095 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 3096 * this max in its rcv_up variable). 3097 * 3098 * This prevents duplicate SIGURGS due to a "late" 3099 * zero-window probe when the T_EXDATA_IND has already 3100 * been sent up. 3101 */ 3102 if ((flags & TH_URG) && 3103 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 3104 tcp->tcp_urp_last))) { 3105 if (IPCL_IS_NONSTR(connp)) { 3106 if (!TCP_IS_DETACHED(tcp)) { 3107 (*sockupcalls->su_signal_oob) 3108 (connp->conn_upper_handle, 3109 urp); 3110 } 3111 } else { 3112 mp1 = allocb(0, BPRI_MED); 3113 if (mp1 == NULL) { 3114 freemsg(mp); 3115 return; 3116 } 3117 if (!TCP_IS_DETACHED(tcp) && 3118 !putnextctl1(connp->conn_rq, 3119 M_PCSIG, SIGURG)) { 3120 /* Try again on the rexmit. */ 3121 freemsg(mp1); 3122 freemsg(mp); 3123 return; 3124 } 3125 /* 3126 * If the next byte would be the mark 3127 * then mark with MARKNEXT else mark 3128 * with NOTMARKNEXT. 3129 */ 3130 if (gap == 0 && urp == 0) 3131 mp1->b_flag |= MSGMARKNEXT; 3132 else 3133 mp1->b_flag |= MSGNOTMARKNEXT; 3134 freemsg(tcp->tcp_urp_mark_mp); 3135 tcp->tcp_urp_mark_mp = mp1; 3136 flags |= TH_SEND_URP_MARK; 3137 } 3138 tcp->tcp_urp_last_valid = B_TRUE; 3139 tcp->tcp_urp_last = urp + seg_seq; 3140 } 3141 /* 3142 * If this is a zero window probe, continue to 3143 * process the ACK part. But we need to set seg_len 3144 * to 0 to avoid data processing. Otherwise just 3145 * drop the segment and send back an ACK. 3146 */ 3147 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 3148 flags &= ~(TH_SYN | TH_URG); 3149 seg_len = 0; 3150 goto process_ack; 3151 } else { 3152 freemsg(mp); 3153 goto ack_check; 3154 } 3155 } 3156 /* Pitch out of window stuff off the end. */ 3157 rgap = seg_len; 3158 mp2 = mp; 3159 do { 3160 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 3161 (uintptr_t)INT_MAX); 3162 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 3163 if (rgap < 0) { 3164 mp2->b_wptr += rgap; 3165 if ((mp1 = mp2->b_cont) != NULL) { 3166 mp2->b_cont = NULL; 3167 freemsg(mp1); 3168 } 3169 break; 3170 } 3171 } while ((mp2 = mp2->b_cont) != NULL); 3172 } 3173 ok:; 3174 /* 3175 * TCP should check ECN info for segments inside the window only. 3176 * Therefore the check should be done here. 3177 */ 3178 if (tcp->tcp_ecn_ok) { 3179 if (flags & TH_CWR) { 3180 tcp->tcp_ecn_echo_on = B_FALSE; 3181 } 3182 /* 3183 * Note that both ECN_CE and CWR can be set in the 3184 * same segment. In this case, we once again turn 3185 * on ECN_ECHO. 3186 */ 3187 if (connp->conn_ipversion == IPV4_VERSION) { 3188 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 3189 3190 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 3191 tcp->tcp_ecn_echo_on = B_TRUE; 3192 } 3193 } else { 3194 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 3195 3196 if ((vcf & htonl(IPH_ECN_CE << 20)) == 3197 htonl(IPH_ECN_CE << 20)) { 3198 tcp->tcp_ecn_echo_on = B_TRUE; 3199 } 3200 } 3201 } 3202 3203 /* 3204 * Check whether we can update tcp_ts_recent. This test is from RFC 3205 * 7323, section 5.3. 3206 */ 3207 if (tcp->tcp_snd_ts_ok && !(flags & TH_RST) && 3208 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 3209 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 3210 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 3211 tcp->tcp_last_rcv_lbolt = LBOLT_FASTPATH64; 3212 } 3213 3214 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 3215 /* 3216 * FIN in an out of order segment. We record this in 3217 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 3218 * Clear the FIN so that any check on FIN flag will fail. 3219 * Remember that FIN also counts in the sequence number 3220 * space. So we need to ack out of order FIN only segments. 3221 */ 3222 if (flags & TH_FIN) { 3223 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 3224 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 3225 flags &= ~TH_FIN; 3226 flags |= TH_ACK_NEEDED; 3227 } 3228 if (seg_len > 0) { 3229 /* Fill in the SACK blk list. */ 3230 if (tcp->tcp_snd_sack_ok) { 3231 tcp_sack_insert(tcp->tcp_sack_list, 3232 seg_seq, seg_seq + seg_len, 3233 &(tcp->tcp_num_sack_blk)); 3234 } 3235 3236 /* 3237 * Attempt reassembly and see if we have something 3238 * ready to go. 3239 */ 3240 mp = tcp_reass(tcp, mp, seg_seq); 3241 /* Always ack out of order packets */ 3242 flags |= TH_ACK_NEEDED | TH_PUSH; 3243 if (mp) { 3244 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 3245 (uintptr_t)INT_MAX); 3246 seg_len = mp->b_cont ? msgdsize(mp) : 3247 (int)(mp->b_wptr - mp->b_rptr); 3248 seg_seq = tcp->tcp_rnxt; 3249 /* 3250 * A gap is filled and the seq num and len 3251 * of the gap match that of a previously 3252 * received FIN, put the FIN flag back in. 3253 */ 3254 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3255 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3256 flags |= TH_FIN; 3257 tcp->tcp_valid_bits &= 3258 ~TCP_OFO_FIN_VALID; 3259 } 3260 if (tcp->tcp_reass_tid != 0) { 3261 (void) TCP_TIMER_CANCEL(tcp, 3262 tcp->tcp_reass_tid); 3263 /* 3264 * Restart the timer if there is still 3265 * data in the reassembly queue. 3266 */ 3267 if (tcp->tcp_reass_head != NULL) { 3268 tcp->tcp_reass_tid = TCP_TIMER( 3269 tcp, tcp_reass_timer, 3270 tcps->tcps_reass_timeout); 3271 } else { 3272 tcp->tcp_reass_tid = 0; 3273 } 3274 } 3275 } else { 3276 /* 3277 * Keep going even with NULL mp. 3278 * There may be a useful ACK or something else 3279 * we don't want to miss. 3280 * 3281 * But TCP should not perform fast retransmit 3282 * because of the ack number. TCP uses 3283 * seg_len == 0 to determine if it is a pure 3284 * ACK. And this is not a pure ACK. 3285 */ 3286 seg_len = 0; 3287 ofo_seg = B_TRUE; 3288 3289 if (tcps->tcps_reass_timeout != 0 && 3290 tcp->tcp_reass_tid == 0) { 3291 tcp->tcp_reass_tid = TCP_TIMER(tcp, 3292 tcp_reass_timer, 3293 tcps->tcps_reass_timeout); 3294 } 3295 } 3296 } 3297 } else if (seg_len > 0) { 3298 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs); 3299 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len); 3300 /* 3301 * If an out of order FIN was received before, and the seq 3302 * num and len of the new segment match that of the FIN, 3303 * put the FIN flag back in. 3304 */ 3305 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3306 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3307 flags |= TH_FIN; 3308 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 3309 } 3310 } 3311 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 3312 if (flags & TH_RST) { 3313 freemsg(mp); 3314 switch (tcp->tcp_state) { 3315 case TCPS_SYN_RCVD: 3316 (void) tcp_clean_death(tcp, ECONNREFUSED); 3317 break; 3318 case TCPS_ESTABLISHED: 3319 case TCPS_FIN_WAIT_1: 3320 case TCPS_FIN_WAIT_2: 3321 case TCPS_CLOSE_WAIT: 3322 (void) tcp_clean_death(tcp, ECONNRESET); 3323 break; 3324 case TCPS_CLOSING: 3325 case TCPS_LAST_ACK: 3326 (void) tcp_clean_death(tcp, 0); 3327 break; 3328 default: 3329 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3330 (void) tcp_clean_death(tcp, ENXIO); 3331 break; 3332 } 3333 return; 3334 } 3335 if (flags & TH_SYN) { 3336 /* 3337 * See RFC 793, Page 71 3338 * 3339 * The seq number must be in the window as it should 3340 * be "fixed" above. If it is outside window, it should 3341 * be already rejected. Note that we allow seg_seq to be 3342 * rnxt + rwnd because we want to accept 0 window probe. 3343 */ 3344 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 3345 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 3346 freemsg(mp); 3347 /* 3348 * If the ACK flag is not set, just use our snxt as the 3349 * seq number of the RST segment. 3350 */ 3351 if (!(flags & TH_ACK)) { 3352 seg_ack = tcp->tcp_snxt; 3353 } 3354 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 3355 TH_RST|TH_ACK); 3356 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3357 (void) tcp_clean_death(tcp, ECONNRESET); 3358 return; 3359 } 3360 /* 3361 * urp could be -1 when the urp field in the packet is 0 3362 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 3363 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 3364 */ 3365 if (flags & TH_URG && urp >= 0) { 3366 if (!tcp->tcp_urp_last_valid || 3367 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 3368 /* 3369 * Non-STREAMS sockets handle the urgent data a litte 3370 * differently from STREAMS based sockets. There is no 3371 * need to mark any mblks with the MSG{NOT,}MARKNEXT 3372 * flags to keep SIOCATMARK happy. Instead a 3373 * su_signal_oob upcall is made to update the mark. 3374 * Neither is a T_EXDATA_IND mblk needed to be 3375 * prepended to the urgent data. The urgent data is 3376 * delivered using the su_recv upcall, where we set 3377 * the MSG_OOB flag to indicate that it is urg data. 3378 * 3379 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED 3380 * are used by non-STREAMS sockets. 3381 */ 3382 if (IPCL_IS_NONSTR(connp)) { 3383 if (!TCP_IS_DETACHED(tcp)) { 3384 (*sockupcalls->su_signal_oob) 3385 (connp->conn_upper_handle, urp); 3386 } 3387 } else { 3388 /* 3389 * If we haven't generated the signal yet for 3390 * this urgent pointer value, do it now. Also, 3391 * send up a zero-length M_DATA indicating 3392 * whether or not this is the mark. The latter 3393 * is not needed when a T_EXDATA_IND is sent up. 3394 * However, if there are allocation failures 3395 * this code relies on the sender retransmitting 3396 * and the socket code for determining the mark 3397 * should not block waiting for the peer to 3398 * transmit. Thus, for simplicity we always 3399 * send up the mark indication. 3400 */ 3401 mp1 = allocb(0, BPRI_MED); 3402 if (mp1 == NULL) { 3403 freemsg(mp); 3404 return; 3405 } 3406 if (!TCP_IS_DETACHED(tcp) && 3407 !putnextctl1(connp->conn_rq, M_PCSIG, 3408 SIGURG)) { 3409 /* Try again on the rexmit. */ 3410 freemsg(mp1); 3411 freemsg(mp); 3412 return; 3413 } 3414 /* 3415 * Mark with NOTMARKNEXT for now. 3416 * The code below will change this to MARKNEXT 3417 * if we are at the mark. 3418 * 3419 * If there are allocation failures (e.g. in 3420 * dupmsg below) the next time tcp_input_data 3421 * sees the urgent segment it will send up the 3422 * MSGMARKNEXT message. 3423 */ 3424 mp1->b_flag |= MSGNOTMARKNEXT; 3425 freemsg(tcp->tcp_urp_mark_mp); 3426 tcp->tcp_urp_mark_mp = mp1; 3427 flags |= TH_SEND_URP_MARK; 3428 #ifdef DEBUG 3429 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3430 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 3431 "last %x, %s", 3432 seg_seq, urp, tcp->tcp_urp_last, 3433 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3434 #endif /* DEBUG */ 3435 } 3436 tcp->tcp_urp_last_valid = B_TRUE; 3437 tcp->tcp_urp_last = urp + seg_seq; 3438 } else if (tcp->tcp_urp_mark_mp != NULL) { 3439 /* 3440 * An allocation failure prevented the previous 3441 * tcp_input_data from sending up the allocated 3442 * MSG*MARKNEXT message - send it up this time 3443 * around. 3444 */ 3445 flags |= TH_SEND_URP_MARK; 3446 } 3447 3448 /* 3449 * If the urgent byte is in this segment, make sure that it is 3450 * all by itself. This makes it much easier to deal with the 3451 * possibility of an allocation failure on the T_exdata_ind. 3452 * Note that seg_len is the number of bytes in the segment, and 3453 * urp is the offset into the segment of the urgent byte. 3454 * urp < seg_len means that the urgent byte is in this segment. 3455 */ 3456 if (urp < seg_len) { 3457 if (seg_len != 1) { 3458 uint32_t tmp_rnxt; 3459 /* 3460 * Break it up and feed it back in. 3461 * Re-attach the IP header. 3462 */ 3463 mp->b_rptr = iphdr; 3464 if (urp > 0) { 3465 /* 3466 * There is stuff before the urgent 3467 * byte. 3468 */ 3469 mp1 = dupmsg(mp); 3470 if (!mp1) { 3471 /* 3472 * Trim from urgent byte on. 3473 * The rest will come back. 3474 */ 3475 (void) adjmsg(mp, 3476 urp - seg_len); 3477 tcp_input_data(connp, 3478 mp, NULL, ira); 3479 return; 3480 } 3481 (void) adjmsg(mp1, urp - seg_len); 3482 /* Feed this piece back in. */ 3483 tmp_rnxt = tcp->tcp_rnxt; 3484 tcp_input_data(connp, mp1, NULL, ira); 3485 /* 3486 * If the data passed back in was not 3487 * processed (ie: bad ACK) sending 3488 * the remainder back in will cause a 3489 * loop. In this case, drop the 3490 * packet and let the sender try 3491 * sending a good packet. 3492 */ 3493 if (tmp_rnxt == tcp->tcp_rnxt) { 3494 freemsg(mp); 3495 return; 3496 } 3497 } 3498 if (urp != seg_len - 1) { 3499 uint32_t tmp_rnxt; 3500 /* 3501 * There is stuff after the urgent 3502 * byte. 3503 */ 3504 mp1 = dupmsg(mp); 3505 if (!mp1) { 3506 /* 3507 * Trim everything beyond the 3508 * urgent byte. The rest will 3509 * come back. 3510 */ 3511 (void) adjmsg(mp, 3512 urp + 1 - seg_len); 3513 tcp_input_data(connp, 3514 mp, NULL, ira); 3515 return; 3516 } 3517 (void) adjmsg(mp1, urp + 1 - seg_len); 3518 tmp_rnxt = tcp->tcp_rnxt; 3519 tcp_input_data(connp, mp1, NULL, ira); 3520 /* 3521 * If the data passed back in was not 3522 * processed (ie: bad ACK) sending 3523 * the remainder back in will cause a 3524 * loop. In this case, drop the 3525 * packet and let the sender try 3526 * sending a good packet. 3527 */ 3528 if (tmp_rnxt == tcp->tcp_rnxt) { 3529 freemsg(mp); 3530 return; 3531 } 3532 } 3533 tcp_input_data(connp, mp, NULL, ira); 3534 return; 3535 } 3536 /* 3537 * This segment contains only the urgent byte. We 3538 * have to allocate the T_exdata_ind, if we can. 3539 */ 3540 if (IPCL_IS_NONSTR(connp)) { 3541 int error; 3542 3543 (*sockupcalls->su_recv) 3544 (connp->conn_upper_handle, mp, seg_len, 3545 MSG_OOB, &error, NULL); 3546 /* 3547 * We should never be in middle of a 3548 * fallback, the squeue guarantees that. 3549 */ 3550 ASSERT(error != EOPNOTSUPP); 3551 mp = NULL; 3552 goto update_ack; 3553 } else if (!tcp->tcp_urp_mp) { 3554 struct T_exdata_ind *tei; 3555 mp1 = allocb(sizeof (struct T_exdata_ind), 3556 BPRI_MED); 3557 if (!mp1) { 3558 /* 3559 * Sigh... It'll be back. 3560 * Generate any MSG*MARK message now. 3561 */ 3562 freemsg(mp); 3563 seg_len = 0; 3564 if (flags & TH_SEND_URP_MARK) { 3565 3566 3567 ASSERT(tcp->tcp_urp_mark_mp); 3568 tcp->tcp_urp_mark_mp->b_flag &= 3569 ~MSGNOTMARKNEXT; 3570 tcp->tcp_urp_mark_mp->b_flag |= 3571 MSGMARKNEXT; 3572 } 3573 goto ack_check; 3574 } 3575 mp1->b_datap->db_type = M_PROTO; 3576 tei = (struct T_exdata_ind *)mp1->b_rptr; 3577 tei->PRIM_type = T_EXDATA_IND; 3578 tei->MORE_flag = 0; 3579 mp1->b_wptr = (uchar_t *)&tei[1]; 3580 tcp->tcp_urp_mp = mp1; 3581 #ifdef DEBUG 3582 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3583 "tcp_rput: allocated exdata_ind %s", 3584 tcp_display(tcp, NULL, 3585 DISP_PORT_ONLY)); 3586 #endif /* DEBUG */ 3587 /* 3588 * There is no need to send a separate MSG*MARK 3589 * message since the T_EXDATA_IND will be sent 3590 * now. 3591 */ 3592 flags &= ~TH_SEND_URP_MARK; 3593 freemsg(tcp->tcp_urp_mark_mp); 3594 tcp->tcp_urp_mark_mp = NULL; 3595 } 3596 /* 3597 * Now we are all set. On the next putnext upstream, 3598 * tcp_urp_mp will be non-NULL and will get prepended 3599 * to what has to be this piece containing the urgent 3600 * byte. If for any reason we abort this segment below, 3601 * if it comes back, we will have this ready, or it 3602 * will get blown off in close. 3603 */ 3604 } else if (urp == seg_len) { 3605 /* 3606 * The urgent byte is the next byte after this sequence 3607 * number. If this endpoint is non-STREAMS, then there 3608 * is nothing to do here since the socket has already 3609 * been notified about the urg pointer by the 3610 * su_signal_oob call above. 3611 * 3612 * In case of STREAMS, some more work might be needed. 3613 * If there is data it is marked with MSGMARKNEXT and 3614 * and any tcp_urp_mark_mp is discarded since it is not 3615 * needed. Otherwise, if the code above just allocated 3616 * a zero-length tcp_urp_mark_mp message, that message 3617 * is tagged with MSGMARKNEXT. Sending up these 3618 * MSGMARKNEXT messages makes SIOCATMARK work correctly 3619 * even though the T_EXDATA_IND will not be sent up 3620 * until the urgent byte arrives. 3621 */ 3622 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) { 3623 if (seg_len != 0) { 3624 flags |= TH_MARKNEXT_NEEDED; 3625 freemsg(tcp->tcp_urp_mark_mp); 3626 tcp->tcp_urp_mark_mp = NULL; 3627 flags &= ~TH_SEND_URP_MARK; 3628 } else if (tcp->tcp_urp_mark_mp != NULL) { 3629 flags |= TH_SEND_URP_MARK; 3630 tcp->tcp_urp_mark_mp->b_flag &= 3631 ~MSGNOTMARKNEXT; 3632 tcp->tcp_urp_mark_mp->b_flag |= 3633 MSGMARKNEXT; 3634 } 3635 } 3636 #ifdef DEBUG 3637 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3638 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 3639 seg_len, flags, 3640 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3641 #endif /* DEBUG */ 3642 } 3643 #ifdef DEBUG 3644 else { 3645 /* Data left until we hit mark */ 3646 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3647 "tcp_rput: URP %d bytes left, %s", 3648 urp - seg_len, tcp_display(tcp, NULL, 3649 DISP_PORT_ONLY)); 3650 } 3651 #endif /* DEBUG */ 3652 } 3653 3654 process_ack: 3655 if (!(flags & TH_ACK)) { 3656 freemsg(mp); 3657 goto xmit_check; 3658 } 3659 } 3660 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 3661 3662 if (bytes_acked > 0) 3663 tcp->tcp_ip_forward_progress = B_TRUE; 3664 if (tcp->tcp_state == TCPS_SYN_RCVD) { 3665 /* 3666 * tcp_sendmsg() checks tcp_state without entering 3667 * the squeue so tcp_state should be updated before 3668 * sending up a connection confirmation or a new 3669 * connection indication. 3670 */ 3671 tcp->tcp_state = TCPS_ESTABLISHED; 3672 3673 /* 3674 * We are seeing the final ack in the three way 3675 * hand shake of a active open'ed connection 3676 * so we must send up a T_CONN_CON 3677 */ 3678 if (tcp->tcp_active_open) { 3679 if (!tcp_conn_con(tcp, iphdr, mp, NULL, ira)) { 3680 freemsg(mp); 3681 tcp->tcp_state = TCPS_SYN_RCVD; 3682 return; 3683 } 3684 /* 3685 * Don't fuse the loopback endpoints for 3686 * simultaneous active opens. 3687 */ 3688 if (tcp->tcp_loopback) { 3689 TCP_STAT(tcps, tcp_fusion_unfusable); 3690 tcp->tcp_unfusable = B_TRUE; 3691 } 3692 /* 3693 * For simultaneous active open, trace receipt of final 3694 * ACK as tcp:::connect-established. 3695 */ 3696 DTRACE_TCP5(connect__established, mblk_t *, NULL, 3697 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3698 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3699 } else if (IPCL_IS_NONSTR(connp)) { 3700 /* 3701 * 3-way handshake has completed, so notify socket 3702 * of the new connection. 3703 * 3704 * We are here means eager is fine but it can 3705 * get a TH_RST at any point between now and till 3706 * accept completes and disappear. We need to 3707 * ensure that reference to eager is valid after 3708 * we get out of eager's perimeter. So we do 3709 * an extra refhold. 3710 */ 3711 CONN_INC_REF(connp); 3712 3713 if (!tcp_newconn_notify(tcp, ira)) { 3714 /* 3715 * The state-change probe for SYN_RCVD -> 3716 * ESTABLISHED has not fired yet. We reset 3717 * the state to SYN_RCVD so that future 3718 * state-change probes report correct state 3719 * transistions. 3720 */ 3721 tcp->tcp_state = TCPS_SYN_RCVD; 3722 freemsg(mp); 3723 /* notification did not go up, so drop ref */ 3724 CONN_DEC_REF(connp); 3725 /* ... and close the eager */ 3726 ASSERT(TCP_IS_DETACHED(tcp)); 3727 (void) tcp_close_detached(tcp); 3728 return; 3729 } 3730 /* 3731 * tcp_newconn_notify() changes conn_upcalls and 3732 * connp->conn_upper_handle. Fix things now, in case 3733 * there's data attached to this ack. 3734 */ 3735 if (connp->conn_upcalls != NULL) 3736 sockupcalls = connp->conn_upcalls; 3737 /* 3738 * For passive open, trace receipt of final ACK as 3739 * tcp:::accept-established. 3740 */ 3741 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3742 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3743 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3744 } else { 3745 /* 3746 * 3-way handshake complete - this is a STREAMS based 3747 * socket, so pass up the T_CONN_IND. 3748 */ 3749 tcp_t *listener = tcp->tcp_listener; 3750 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 3751 3752 tcp->tcp_tconnind_started = B_TRUE; 3753 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 3754 ASSERT(mp != NULL); 3755 /* 3756 * We are here means eager is fine but it can 3757 * get a TH_RST at any point between now and till 3758 * accept completes and disappear. We need to 3759 * ensure that reference to eager is valid after 3760 * we get out of eager's perimeter. So we do 3761 * an extra refhold. 3762 */ 3763 CONN_INC_REF(connp); 3764 3765 /* 3766 * The listener also exists because of the refhold 3767 * done in tcp_input_listener. Its possible that it 3768 * might have closed. We will check that once we 3769 * get inside listeners context. 3770 */ 3771 CONN_INC_REF(listener->tcp_connp); 3772 if (listener->tcp_connp->conn_sqp == 3773 connp->conn_sqp) { 3774 /* 3775 * We optimize by not calling an SQUEUE_ENTER 3776 * on the listener since we know that the 3777 * listener and eager squeues are the same. 3778 * We are able to make this check safely only 3779 * because neither the eager nor the listener 3780 * can change its squeue. Only an active connect 3781 * can change its squeue 3782 */ 3783 tcp_send_conn_ind(listener->tcp_connp, mp, 3784 listener->tcp_connp->conn_sqp); 3785 CONN_DEC_REF(listener->tcp_connp); 3786 } else if (!tcp->tcp_loopback) { 3787 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3788 mp, tcp_send_conn_ind, 3789 listener->tcp_connp, NULL, SQ_FILL, 3790 SQTAG_TCP_CONN_IND); 3791 } else { 3792 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3793 mp, tcp_send_conn_ind, 3794 listener->tcp_connp, NULL, SQ_NODRAIN, 3795 SQTAG_TCP_CONN_IND); 3796 } 3797 /* 3798 * For passive open, trace receipt of final ACK as 3799 * tcp:::accept-established. 3800 */ 3801 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3802 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3803 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3804 } 3805 TCPS_CONN_INC(tcps); 3806 3807 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 3808 bytes_acked--; 3809 /* SYN was acked - making progress */ 3810 tcp->tcp_ip_forward_progress = B_TRUE; 3811 3812 /* 3813 * If SYN was retransmitted, need to reset all 3814 * retransmission info as this segment will be 3815 * treated as a dup ACK. 3816 */ 3817 if (tcp->tcp_rexmit) { 3818 tcp->tcp_rexmit = B_FALSE; 3819 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 3820 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3821 tcp->tcp_ms_we_have_waited = 0; 3822 tcp->tcp_cwnd = mss; 3823 } 3824 3825 /* 3826 * We set the send window to zero here. 3827 * This is needed if there is data to be 3828 * processed already on the queue. 3829 * Later (at swnd_update label), the 3830 * "new_swnd > tcp_swnd" condition is satisfied 3831 * the XMIT_NEEDED flag is set in the current 3832 * (SYN_RCVD) state. This ensures tcp_wput_data() is 3833 * called if there is already data on queue in 3834 * this state. 3835 */ 3836 tcp->tcp_swnd = 0; 3837 3838 if (new_swnd > tcp->tcp_max_swnd) 3839 tcp->tcp_max_swnd = new_swnd; 3840 tcp->tcp_swl1 = seg_seq; 3841 tcp->tcp_swl2 = seg_ack; 3842 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 3843 3844 /* Trace change from SYN_RCVD -> ESTABLISHED here */ 3845 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 3846 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL, 3847 int32_t, TCPS_SYN_RCVD); 3848 3849 /* Fuse when both sides are in ESTABLISHED state */ 3850 if (tcp->tcp_loopback && do_tcp_fusion) 3851 tcp_fuse(tcp, iphdr, tcpha); 3852 3853 } 3854 /* This code follows 4.4BSD-Lite2 mostly. */ 3855 if (bytes_acked < 0) 3856 goto est; 3857 3858 /* 3859 * If TCP is ECN capable and the congestion experience bit is 3860 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 3861 * done once per window (or more loosely, per RTT). 3862 */ 3863 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 3864 tcp->tcp_cwr = B_FALSE; 3865 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 3866 if (!tcp->tcp_cwr) { 3867 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 3868 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 3869 tcp->tcp_cwnd = npkt * mss; 3870 /* 3871 * If the cwnd is 0, use the timer to clock out 3872 * new segments. This is required by the ECN spec. 3873 */ 3874 if (npkt == 0) { 3875 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 3876 /* 3877 * This makes sure that when the ACK comes 3878 * back, we will increase tcp_cwnd by 1 MSS. 3879 */ 3880 tcp->tcp_cwnd_cnt = 0; 3881 } 3882 tcp->tcp_cwr = B_TRUE; 3883 /* 3884 * This marks the end of the current window of in 3885 * flight data. That is why we don't use 3886 * tcp_suna + tcp_swnd. Only data in flight can 3887 * provide ECN info. 3888 */ 3889 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3890 tcp->tcp_ecn_cwr_sent = B_FALSE; 3891 } 3892 } 3893 3894 mp1 = tcp->tcp_xmit_head; 3895 if (bytes_acked == 0) { 3896 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 3897 int dupack_cnt; 3898 3899 TCPS_BUMP_MIB(tcps, tcpInDupAck); 3900 /* 3901 * Fast retransmit. When we have seen exactly three 3902 * identical ACKs while we have unacked data 3903 * outstanding we take it as a hint that our peer 3904 * dropped something. 3905 * 3906 * If TCP is retransmitting, don't do fast retransmit. 3907 */ 3908 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 3909 ! tcp->tcp_rexmit) { 3910 /* Do Limited Transmit */ 3911 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 3912 tcps->tcps_dupack_fast_retransmit) { 3913 /* 3914 * RFC 3042 3915 * 3916 * What we need to do is temporarily 3917 * increase tcp_cwnd so that new 3918 * data can be sent if it is allowed 3919 * by the receive window (tcp_rwnd). 3920 * tcp_wput_data() will take care of 3921 * the rest. 3922 * 3923 * If the connection is SACK capable, 3924 * only do limited xmit when there 3925 * is SACK info. 3926 * 3927 * Note how tcp_cwnd is incremented. 3928 * The first dup ACK will increase 3929 * it by 1 MSS. The second dup ACK 3930 * will increase it by 2 MSS. This 3931 * means that only 1 new segment will 3932 * be sent for each dup ACK. 3933 */ 3934 if (tcp->tcp_unsent > 0 && 3935 (!tcp->tcp_snd_sack_ok || 3936 (tcp->tcp_snd_sack_ok && 3937 tcp->tcp_notsack_list != NULL))) { 3938 tcp->tcp_cwnd += mss << 3939 (tcp->tcp_dupack_cnt - 1); 3940 flags |= TH_LIMIT_XMIT; 3941 } 3942 } else if (dupack_cnt == 3943 tcps->tcps_dupack_fast_retransmit) { 3944 3945 /* 3946 * If we have reduced tcp_ssthresh 3947 * because of ECN, do not reduce it again 3948 * unless it is already one window of data 3949 * away. After one window of data, tcp_cwr 3950 * should then be cleared. Note that 3951 * for non ECN capable connection, tcp_cwr 3952 * should always be false. 3953 * 3954 * Adjust cwnd since the duplicate 3955 * ack indicates that a packet was 3956 * dropped (due to congestion.) 3957 */ 3958 if (!tcp->tcp_cwr) { 3959 npkt = ((tcp->tcp_snxt - 3960 tcp->tcp_suna) >> 1) / mss; 3961 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 3962 mss; 3963 tcp->tcp_cwnd = (npkt + 3964 tcp->tcp_dupack_cnt) * mss; 3965 } 3966 if (tcp->tcp_ecn_ok) { 3967 tcp->tcp_cwr = B_TRUE; 3968 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3969 tcp->tcp_ecn_cwr_sent = B_FALSE; 3970 } 3971 3972 /* 3973 * We do Hoe's algorithm. Refer to her 3974 * paper "Improving the Start-up Behavior 3975 * of a Congestion Control Scheme for TCP," 3976 * appeared in SIGCOMM'96. 3977 * 3978 * Save highest seq no we have sent so far. 3979 * Be careful about the invisible FIN byte. 3980 */ 3981 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 3982 (tcp->tcp_unsent == 0)) { 3983 tcp->tcp_rexmit_max = tcp->tcp_fss; 3984 } else { 3985 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3986 } 3987 3988 /* 3989 * For SACK: 3990 * Calculate tcp_pipe, which is the 3991 * estimated number of bytes in 3992 * network. 3993 * 3994 * tcp_fack is the highest sack'ed seq num 3995 * TCP has received. 3996 * 3997 * tcp_pipe is explained in the above quoted 3998 * Fall and Floyd's paper. tcp_fack is 3999 * explained in Mathis and Mahdavi's 4000 * "Forward Acknowledgment: Refining TCP 4001 * Congestion Control" in SIGCOMM '96. 4002 */ 4003 if (tcp->tcp_snd_sack_ok) { 4004 if (tcp->tcp_notsack_list != NULL) { 4005 tcp->tcp_pipe = tcp->tcp_snxt - 4006 tcp->tcp_fack; 4007 tcp->tcp_sack_snxt = seg_ack; 4008 flags |= TH_NEED_SACK_REXMIT; 4009 } else { 4010 /* 4011 * Always initialize tcp_pipe 4012 * even though we don't have 4013 * any SACK info. If later 4014 * we get SACK info and 4015 * tcp_pipe is not initialized, 4016 * funny things will happen. 4017 */ 4018 tcp->tcp_pipe = 4019 tcp->tcp_cwnd_ssthresh; 4020 } 4021 } else { 4022 flags |= TH_REXMIT_NEEDED; 4023 } /* tcp_snd_sack_ok */ 4024 4025 } else { 4026 /* 4027 * Here we perform congestion 4028 * avoidance, but NOT slow start. 4029 * This is known as the Fast 4030 * Recovery Algorithm. 4031 */ 4032 if (tcp->tcp_snd_sack_ok && 4033 tcp->tcp_notsack_list != NULL) { 4034 flags |= TH_NEED_SACK_REXMIT; 4035 tcp->tcp_pipe -= mss; 4036 if (tcp->tcp_pipe < 0) 4037 tcp->tcp_pipe = 0; 4038 } else { 4039 /* 4040 * We know that one more packet has 4041 * left the pipe thus we can update 4042 * cwnd. 4043 */ 4044 cwnd = tcp->tcp_cwnd + mss; 4045 if (cwnd > tcp->tcp_cwnd_max) 4046 cwnd = tcp->tcp_cwnd_max; 4047 tcp->tcp_cwnd = cwnd; 4048 if (tcp->tcp_unsent > 0) 4049 flags |= TH_XMIT_NEEDED; 4050 } 4051 } 4052 } 4053 } else if (tcp->tcp_zero_win_probe) { 4054 /* 4055 * If the window has opened, need to arrange 4056 * to send additional data. 4057 */ 4058 if (new_swnd != 0) { 4059 /* tcp_suna != tcp_snxt */ 4060 /* Packet contains a window update */ 4061 TCPS_BUMP_MIB(tcps, tcpInWinUpdate); 4062 tcp->tcp_zero_win_probe = 0; 4063 tcp->tcp_timer_backoff = 0; 4064 tcp->tcp_ms_we_have_waited = 0; 4065 4066 /* 4067 * Transmit starting with tcp_suna since 4068 * the one byte probe is not ack'ed. 4069 * If TCP has sent more than one identical 4070 * probe, tcp_rexmit will be set. That means 4071 * tcp_ss_rexmit() will send out the one 4072 * byte along with new data. Otherwise, 4073 * fake the retransmission. 4074 */ 4075 flags |= TH_XMIT_NEEDED; 4076 if (!tcp->tcp_rexmit) { 4077 tcp->tcp_rexmit = B_TRUE; 4078 tcp->tcp_dupack_cnt = 0; 4079 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 4080 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 4081 } 4082 } 4083 } 4084 goto swnd_update; 4085 } 4086 4087 /* 4088 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 4089 * If the ACK value acks something that we have not yet sent, it might 4090 * be an old duplicate segment. Send an ACK to re-synchronize the 4091 * other side. 4092 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 4093 * state is handled above, so we can always just drop the segment and 4094 * send an ACK here. 4095 * 4096 * In the case where the peer shrinks the window, we see the new window 4097 * update, but all the data sent previously is queued up by the peer. 4098 * To account for this, in tcp_process_shrunk_swnd(), the sequence 4099 * number, which was already sent, and within window, is recorded. 4100 * tcp_snxt is then updated. 4101 * 4102 * If the window has previously shrunk, and an ACK for data not yet 4103 * sent, according to tcp_snxt is recieved, it may still be valid. If 4104 * the ACK is for data within the window at the time the window was 4105 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to 4106 * the sequence number ACK'ed. 4107 * 4108 * If the ACK covers all the data sent at the time the window was 4109 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE. 4110 * 4111 * Should we send ACKs in response to ACK only segments? 4112 */ 4113 4114 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 4115 if ((tcp->tcp_is_wnd_shrnk) && 4116 (SEQ_LEQ(seg_ack, tcp->tcp_snxt_shrunk))) { 4117 uint32_t data_acked_ahead_snxt; 4118 4119 data_acked_ahead_snxt = seg_ack - tcp->tcp_snxt; 4120 tcp_update_xmit_tail(tcp, seg_ack); 4121 tcp->tcp_unsent -= data_acked_ahead_snxt; 4122 } else { 4123 TCPS_BUMP_MIB(tcps, tcpInAckUnsent); 4124 /* drop the received segment */ 4125 freemsg(mp); 4126 4127 /* 4128 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 4129 * greater than 0, check if the number of such 4130 * bogus ACks is greater than that count. If yes, 4131 * don't send back any ACK. This prevents TCP from 4132 * getting into an ACK storm if somehow an attacker 4133 * successfully spoofs an acceptable segment to our 4134 * peer. If this continues (count > 2 X threshold), 4135 * we should abort this connection. 4136 */ 4137 if (tcp_drop_ack_unsent_cnt > 0 && 4138 ++tcp->tcp_in_ack_unsent > 4139 tcp_drop_ack_unsent_cnt) { 4140 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 4141 if (tcp->tcp_in_ack_unsent > 2 * 4142 tcp_drop_ack_unsent_cnt) { 4143 (void) tcp_clean_death(tcp, EPROTO); 4144 } 4145 return; 4146 } 4147 mp = tcp_ack_mp(tcp); 4148 if (mp != NULL) { 4149 BUMP_LOCAL(tcp->tcp_obsegs); 4150 TCPS_BUMP_MIB(tcps, tcpOutAck); 4151 tcp_send_data(tcp, mp); 4152 } 4153 return; 4154 } 4155 } else if (tcp->tcp_is_wnd_shrnk && SEQ_GEQ(seg_ack, 4156 tcp->tcp_snxt_shrunk)) { 4157 tcp->tcp_is_wnd_shrnk = B_FALSE; 4158 } 4159 4160 /* 4161 * TCP gets a new ACK, update the notsack'ed list to delete those 4162 * blocks that are covered by this ACK. 4163 */ 4164 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 4165 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 4166 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 4167 } 4168 4169 /* 4170 * If we got an ACK after fast retransmit, check to see 4171 * if it is a partial ACK. If it is not and the congestion 4172 * window was inflated to account for the other side's 4173 * cached packets, retract it. If it is, do Hoe's algorithm. 4174 */ 4175 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 4176 ASSERT(tcp->tcp_rexmit == B_FALSE); 4177 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 4178 tcp->tcp_dupack_cnt = 0; 4179 /* 4180 * Restore the orig tcp_cwnd_ssthresh after 4181 * fast retransmit phase. 4182 */ 4183 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 4184 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 4185 } 4186 tcp->tcp_rexmit_max = seg_ack; 4187 tcp->tcp_cwnd_cnt = 0; 4188 4189 /* 4190 * Remove all notsack info to avoid confusion with 4191 * the next fast retrasnmit/recovery phase. 4192 */ 4193 if (tcp->tcp_snd_sack_ok) { 4194 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, 4195 tcp); 4196 } 4197 } else { 4198 if (tcp->tcp_snd_sack_ok && 4199 tcp->tcp_notsack_list != NULL) { 4200 flags |= TH_NEED_SACK_REXMIT; 4201 tcp->tcp_pipe -= mss; 4202 if (tcp->tcp_pipe < 0) 4203 tcp->tcp_pipe = 0; 4204 } else { 4205 /* 4206 * Hoe's algorithm: 4207 * 4208 * Retransmit the unack'ed segment and 4209 * restart fast recovery. Note that we 4210 * need to scale back tcp_cwnd to the 4211 * original value when we started fast 4212 * recovery. This is to prevent overly 4213 * aggressive behaviour in sending new 4214 * segments. 4215 */ 4216 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 4217 tcps->tcps_dupack_fast_retransmit * mss; 4218 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 4219 flags |= TH_REXMIT_NEEDED; 4220 } 4221 } 4222 } else { 4223 tcp->tcp_dupack_cnt = 0; 4224 if (tcp->tcp_rexmit) { 4225 /* 4226 * TCP is retranmitting. If the ACK ack's all 4227 * outstanding data, update tcp_rexmit_max and 4228 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 4229 * to the correct value. 4230 * 4231 * Note that SEQ_LEQ() is used. This is to avoid 4232 * unnecessary fast retransmit caused by dup ACKs 4233 * received when TCP does slow start retransmission 4234 * after a time out. During this phase, TCP may 4235 * send out segments which are already received. 4236 * This causes dup ACKs to be sent back. 4237 */ 4238 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 4239 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 4240 tcp->tcp_rexmit_nxt = seg_ack; 4241 } 4242 if (seg_ack != tcp->tcp_rexmit_max) { 4243 flags |= TH_XMIT_NEEDED; 4244 } 4245 } else { 4246 tcp->tcp_rexmit = B_FALSE; 4247 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 4248 } 4249 tcp->tcp_ms_we_have_waited = 0; 4250 } 4251 } 4252 4253 TCPS_BUMP_MIB(tcps, tcpInAckSegs); 4254 TCPS_UPDATE_MIB(tcps, tcpInAckBytes, bytes_acked); 4255 tcp->tcp_suna = seg_ack; 4256 if (tcp->tcp_zero_win_probe != 0) { 4257 tcp->tcp_zero_win_probe = 0; 4258 tcp->tcp_timer_backoff = 0; 4259 } 4260 4261 /* 4262 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 4263 * Note that it cannot be the SYN being ack'ed. The code flow 4264 * will not reach here. 4265 */ 4266 if (mp1 == NULL) { 4267 goto fin_acked; 4268 } 4269 4270 /* 4271 * Update the congestion window. 4272 * 4273 * If TCP is not ECN capable or TCP is ECN capable but the 4274 * congestion experience bit is not set, increase the tcp_cwnd as 4275 * usual. 4276 */ 4277 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 4278 cwnd = tcp->tcp_cwnd; 4279 add = mss; 4280 4281 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 4282 /* 4283 * This is to prevent an increase of less than 1 MSS of 4284 * tcp_cwnd. With partial increase, tcp_wput_data() 4285 * may send out tinygrams in order to preserve mblk 4286 * boundaries. 4287 * 4288 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 4289 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 4290 * increased by 1 MSS for every RTTs. 4291 */ 4292 if (tcp->tcp_cwnd_cnt <= 0) { 4293 tcp->tcp_cwnd_cnt = cwnd + add; 4294 } else { 4295 tcp->tcp_cwnd_cnt -= add; 4296 add = 0; 4297 } 4298 } 4299 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 4300 } 4301 4302 /* See if the latest urgent data has been acknowledged */ 4303 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 4304 SEQ_GT(seg_ack, tcp->tcp_urg)) 4305 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 4306 4307 /* Can we update the RTT estimates? */ 4308 if (tcp->tcp_snd_ts_ok) { 4309 /* Ignore zero timestamp echo-reply. */ 4310 if (tcpopt.tcp_opt_ts_ecr != 0) { 4311 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4312 (int32_t)tcpopt.tcp_opt_ts_ecr); 4313 } 4314 4315 /* If needed, restart the timer. */ 4316 if (tcp->tcp_set_timer == 1) { 4317 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4318 tcp->tcp_set_timer = 0; 4319 } 4320 /* 4321 * Update tcp_csuna in case the other side stops sending 4322 * us timestamps. 4323 */ 4324 tcp->tcp_csuna = tcp->tcp_snxt; 4325 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 4326 /* 4327 * An ACK sequence we haven't seen before, so get the RTT 4328 * and update the RTO. But first check if the timestamp is 4329 * valid to use. 4330 */ 4331 if ((mp1->b_next != NULL) && 4332 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 4333 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4334 (int32_t)(intptr_t)mp1->b_prev); 4335 else 4336 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4337 4338 /* Remeber the last sequence to be ACKed */ 4339 tcp->tcp_csuna = seg_ack; 4340 if (tcp->tcp_set_timer == 1) { 4341 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4342 tcp->tcp_set_timer = 0; 4343 } 4344 } else { 4345 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4346 } 4347 4348 /* Eat acknowledged bytes off the xmit queue. */ 4349 for (;;) { 4350 mblk_t *mp2; 4351 uchar_t *wptr; 4352 4353 wptr = mp1->b_wptr; 4354 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 4355 bytes_acked -= (int)(wptr - mp1->b_rptr); 4356 if (bytes_acked < 0) { 4357 mp1->b_rptr = wptr + bytes_acked; 4358 /* 4359 * Set a new timestamp if all the bytes timed by the 4360 * old timestamp have been ack'ed. 4361 */ 4362 if (SEQ_GT(seg_ack, 4363 (uint32_t)(uintptr_t)(mp1->b_next))) { 4364 mp1->b_prev = 4365 (mblk_t *)(uintptr_t)LBOLT_FASTPATH; 4366 mp1->b_next = NULL; 4367 } 4368 break; 4369 } 4370 mp1->b_next = NULL; 4371 mp1->b_prev = NULL; 4372 mp2 = mp1; 4373 mp1 = mp1->b_cont; 4374 4375 /* 4376 * This notification is required for some zero-copy 4377 * clients to maintain a copy semantic. After the data 4378 * is ack'ed, client is safe to modify or reuse the buffer. 4379 */ 4380 if (tcp->tcp_snd_zcopy_aware && 4381 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 4382 tcp_zcopy_notify(tcp); 4383 freeb(mp2); 4384 if (bytes_acked == 0) { 4385 if (mp1 == NULL) { 4386 /* Everything is ack'ed, clear the tail. */ 4387 tcp->tcp_xmit_tail = NULL; 4388 /* 4389 * Cancel the timer unless we are still 4390 * waiting for an ACK for the FIN packet. 4391 */ 4392 if (tcp->tcp_timer_tid != 0 && 4393 tcp->tcp_snxt == tcp->tcp_suna) { 4394 (void) TCP_TIMER_CANCEL(tcp, 4395 tcp->tcp_timer_tid); 4396 tcp->tcp_timer_tid = 0; 4397 } 4398 goto pre_swnd_update; 4399 } 4400 if (mp2 != tcp->tcp_xmit_tail) 4401 break; 4402 tcp->tcp_xmit_tail = mp1; 4403 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 4404 (uintptr_t)INT_MAX); 4405 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 4406 mp1->b_rptr); 4407 break; 4408 } 4409 if (mp1 == NULL) { 4410 /* 4411 * More was acked but there is nothing more 4412 * outstanding. This means that the FIN was 4413 * just acked or that we're talking to a clown. 4414 */ 4415 fin_acked: 4416 ASSERT(tcp->tcp_fin_sent); 4417 tcp->tcp_xmit_tail = NULL; 4418 if (tcp->tcp_fin_sent) { 4419 /* FIN was acked - making progress */ 4420 if (!tcp->tcp_fin_acked) 4421 tcp->tcp_ip_forward_progress = B_TRUE; 4422 tcp->tcp_fin_acked = B_TRUE; 4423 if (tcp->tcp_linger_tid != 0 && 4424 TCP_TIMER_CANCEL(tcp, 4425 tcp->tcp_linger_tid) >= 0) { 4426 tcp_stop_lingering(tcp); 4427 freemsg(mp); 4428 mp = NULL; 4429 } 4430 } else { 4431 /* 4432 * We should never get here because 4433 * we have already checked that the 4434 * number of bytes ack'ed should be 4435 * smaller than or equal to what we 4436 * have sent so far (it is the 4437 * acceptability check of the ACK). 4438 * We can only get here if the send 4439 * queue is corrupted. 4440 * 4441 * Terminate the connection and 4442 * panic the system. It is better 4443 * for us to panic instead of 4444 * continuing to avoid other disaster. 4445 */ 4446 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 4447 tcp->tcp_rnxt, TH_RST|TH_ACK); 4448 panic("Memory corruption " 4449 "detected for connection %s.", 4450 tcp_display(tcp, NULL, 4451 DISP_ADDR_AND_PORT)); 4452 /*NOTREACHED*/ 4453 } 4454 goto pre_swnd_update; 4455 } 4456 ASSERT(mp2 != tcp->tcp_xmit_tail); 4457 } 4458 if (tcp->tcp_unsent) { 4459 flags |= TH_XMIT_NEEDED; 4460 } 4461 pre_swnd_update: 4462 tcp->tcp_xmit_head = mp1; 4463 swnd_update: 4464 /* 4465 * The following check is different from most other implementations. 4466 * For bi-directional transfer, when segments are dropped, the 4467 * "normal" check will not accept a window update in those 4468 * retransmitted segemnts. Failing to do that, TCP may send out 4469 * segments which are outside receiver's window. As TCP accepts 4470 * the ack in those retransmitted segments, if the window update in 4471 * the same segment is not accepted, TCP will incorrectly calculates 4472 * that it can send more segments. This can create a deadlock 4473 * with the receiver if its window becomes zero. 4474 */ 4475 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 4476 SEQ_LT(tcp->tcp_swl1, seg_seq) || 4477 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 4478 /* 4479 * The criteria for update is: 4480 * 4481 * 1. the segment acknowledges some data. Or 4482 * 2. the segment is new, i.e. it has a higher seq num. Or 4483 * 3. the segment is not old and the advertised window is 4484 * larger than the previous advertised window. 4485 */ 4486 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 4487 flags |= TH_XMIT_NEEDED; 4488 tcp->tcp_swnd = new_swnd; 4489 if (new_swnd > tcp->tcp_max_swnd) 4490 tcp->tcp_max_swnd = new_swnd; 4491 tcp->tcp_swl1 = seg_seq; 4492 tcp->tcp_swl2 = seg_ack; 4493 } 4494 est: 4495 if (tcp->tcp_state > TCPS_ESTABLISHED) { 4496 4497 switch (tcp->tcp_state) { 4498 case TCPS_FIN_WAIT_1: 4499 if (tcp->tcp_fin_acked) { 4500 tcp->tcp_state = TCPS_FIN_WAIT_2; 4501 DTRACE_TCP6(state__change, void, NULL, 4502 ip_xmit_attr_t *, connp->conn_ixa, 4503 void, NULL, tcp_t *, tcp, void, NULL, 4504 int32_t, TCPS_FIN_WAIT_1); 4505 /* 4506 * We implement the non-standard BSD/SunOS 4507 * FIN_WAIT_2 flushing algorithm. 4508 * If there is no user attached to this 4509 * TCP endpoint, then this TCP struct 4510 * could hang around forever in FIN_WAIT_2 4511 * state if the peer forgets to send us 4512 * a FIN. To prevent this, we wait only 4513 * 2*MSL (a convenient time value) for 4514 * the FIN to arrive. If it doesn't show up, 4515 * we flush the TCP endpoint. This algorithm, 4516 * though a violation of RFC-793, has worked 4517 * for over 10 years in BSD systems. 4518 * Note: SunOS 4.x waits 675 seconds before 4519 * flushing the FIN_WAIT_2 connection. 4520 */ 4521 TCP_TIMER_RESTART(tcp, 4522 tcp->tcp_fin_wait_2_flush_interval); 4523 } 4524 break; 4525 case TCPS_FIN_WAIT_2: 4526 break; /* Shutdown hook? */ 4527 case TCPS_LAST_ACK: 4528 freemsg(mp); 4529 if (tcp->tcp_fin_acked) { 4530 (void) tcp_clean_death(tcp, 0); 4531 return; 4532 } 4533 goto xmit_check; 4534 case TCPS_CLOSING: 4535 if (tcp->tcp_fin_acked) { 4536 SET_TIME_WAIT(tcps, tcp, connp); 4537 DTRACE_TCP6(state__change, void, NULL, 4538 ip_xmit_attr_t *, connp->conn_ixa, void, 4539 NULL, tcp_t *, tcp, void, NULL, int32_t, 4540 TCPS_CLOSING); 4541 } 4542 /*FALLTHRU*/ 4543 case TCPS_CLOSE_WAIT: 4544 freemsg(mp); 4545 goto xmit_check; 4546 default: 4547 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 4548 break; 4549 } 4550 } 4551 if (flags & TH_FIN) { 4552 /* Make sure we ack the fin */ 4553 flags |= TH_ACK_NEEDED; 4554 if (!tcp->tcp_fin_rcvd) { 4555 tcp->tcp_fin_rcvd = B_TRUE; 4556 tcp->tcp_rnxt++; 4557 tcpha = tcp->tcp_tcpha; 4558 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4559 4560 /* 4561 * Generate the ordrel_ind at the end unless the 4562 * conn is detached or it is a STREAMS based eager. 4563 * In the eager case we defer the notification until 4564 * tcp_accept_finish has run. 4565 */ 4566 if (!TCP_IS_DETACHED(tcp) && (IPCL_IS_NONSTR(connp) || 4567 (tcp->tcp_listener == NULL && 4568 !tcp->tcp_hard_binding))) 4569 flags |= TH_ORDREL_NEEDED; 4570 switch (tcp->tcp_state) { 4571 case TCPS_SYN_RCVD: 4572 tcp->tcp_state = TCPS_CLOSE_WAIT; 4573 DTRACE_TCP6(state__change, void, NULL, 4574 ip_xmit_attr_t *, connp->conn_ixa, 4575 void, NULL, tcp_t *, tcp, void, NULL, 4576 int32_t, TCPS_SYN_RCVD); 4577 /* Keepalive? */ 4578 break; 4579 case TCPS_ESTABLISHED: 4580 tcp->tcp_state = TCPS_CLOSE_WAIT; 4581 DTRACE_TCP6(state__change, void, NULL, 4582 ip_xmit_attr_t *, connp->conn_ixa, 4583 void, NULL, tcp_t *, tcp, void, NULL, 4584 int32_t, TCPS_ESTABLISHED); 4585 /* Keepalive? */ 4586 break; 4587 case TCPS_FIN_WAIT_1: 4588 if (!tcp->tcp_fin_acked) { 4589 tcp->tcp_state = TCPS_CLOSING; 4590 DTRACE_TCP6(state__change, void, NULL, 4591 ip_xmit_attr_t *, connp->conn_ixa, 4592 void, NULL, tcp_t *, tcp, void, 4593 NULL, int32_t, TCPS_FIN_WAIT_1); 4594 break; 4595 } 4596 /* FALLTHRU */ 4597 case TCPS_FIN_WAIT_2: 4598 SET_TIME_WAIT(tcps, tcp, connp); 4599 DTRACE_TCP6(state__change, void, NULL, 4600 ip_xmit_attr_t *, connp->conn_ixa, void, 4601 NULL, tcp_t *, tcp, void, NULL, int32_t, 4602 TCPS_FIN_WAIT_2); 4603 if (seg_len) { 4604 /* 4605 * implies data piggybacked on FIN. 4606 * break to handle data. 4607 */ 4608 break; 4609 } 4610 freemsg(mp); 4611 goto ack_check; 4612 } 4613 } 4614 } 4615 if (mp == NULL) 4616 goto xmit_check; 4617 if (seg_len == 0) { 4618 freemsg(mp); 4619 goto xmit_check; 4620 } 4621 if (mp->b_rptr == mp->b_wptr) { 4622 /* 4623 * The header has been consumed, so we remove the 4624 * zero-length mblk here. 4625 */ 4626 mp1 = mp; 4627 mp = mp->b_cont; 4628 freeb(mp1); 4629 } 4630 update_ack: 4631 tcpha = tcp->tcp_tcpha; 4632 tcp->tcp_rack_cnt++; 4633 { 4634 uint32_t cur_max; 4635 4636 cur_max = tcp->tcp_rack_cur_max; 4637 if (tcp->tcp_rack_cnt >= cur_max) { 4638 /* 4639 * We have more unacked data than we should - send 4640 * an ACK now. 4641 */ 4642 flags |= TH_ACK_NEEDED; 4643 cur_max++; 4644 if (cur_max > tcp->tcp_rack_abs_max) 4645 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 4646 else 4647 tcp->tcp_rack_cur_max = cur_max; 4648 } else if (TCP_IS_DETACHED(tcp)) { 4649 /* We don't have an ACK timer for detached TCP. */ 4650 flags |= TH_ACK_NEEDED; 4651 } else if (seg_len < mss) { 4652 /* 4653 * If we get a segment that is less than an mss, and we 4654 * already have unacknowledged data, and the amount 4655 * unacknowledged is not a multiple of mss, then we 4656 * better generate an ACK now. Otherwise, this may be 4657 * the tail piece of a transaction, and we would rather 4658 * wait for the response. 4659 */ 4660 uint32_t udif; 4661 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 4662 (uintptr_t)INT_MAX); 4663 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 4664 if (udif && (udif % mss)) 4665 flags |= TH_ACK_NEEDED; 4666 else 4667 flags |= TH_ACK_TIMER_NEEDED; 4668 } else { 4669 /* Start delayed ack timer */ 4670 flags |= TH_ACK_TIMER_NEEDED; 4671 } 4672 } 4673 tcp->tcp_rnxt += seg_len; 4674 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4675 4676 if (mp == NULL) 4677 goto xmit_check; 4678 4679 /* Update SACK list */ 4680 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 4681 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 4682 &(tcp->tcp_num_sack_blk)); 4683 } 4684 4685 if (tcp->tcp_urp_mp) { 4686 tcp->tcp_urp_mp->b_cont = mp; 4687 mp = tcp->tcp_urp_mp; 4688 tcp->tcp_urp_mp = NULL; 4689 /* Ready for a new signal. */ 4690 tcp->tcp_urp_last_valid = B_FALSE; 4691 #ifdef DEBUG 4692 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4693 "tcp_rput: sending exdata_ind %s", 4694 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4695 #endif /* DEBUG */ 4696 } 4697 4698 /* 4699 * Check for ancillary data changes compared to last segment. 4700 */ 4701 if (connp->conn_recv_ancillary.crb_all != 0) { 4702 mp = tcp_input_add_ancillary(tcp, mp, &ipp, ira); 4703 if (mp == NULL) 4704 return; 4705 } 4706 4707 if (IPCL_IS_NONSTR(connp)) { 4708 /* 4709 * Non-STREAMS socket 4710 */ 4711 boolean_t push = flags & (TH_PUSH|TH_FIN); 4712 int error; 4713 4714 if ((*sockupcalls->su_recv)(connp->conn_upper_handle, 4715 mp, seg_len, 0, &error, &push) <= 0) { 4716 /* 4717 * We should never be in middle of a 4718 * fallback, the squeue guarantees that. 4719 */ 4720 ASSERT(error != EOPNOTSUPP); 4721 if (error == ENOSPC) 4722 tcp->tcp_rwnd -= seg_len; 4723 } else if (push) { 4724 /* PUSH bit set and sockfs is not flow controlled */ 4725 flags |= tcp_rwnd_reopen(tcp); 4726 } 4727 } else if (tcp->tcp_listener != NULL || tcp->tcp_hard_binding) { 4728 /* 4729 * Side queue inbound data until the accept happens. 4730 * tcp_accept/tcp_rput drains this when the accept happens. 4731 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 4732 * T_EXDATA_IND) it is queued on b_next. 4733 * XXX Make urgent data use this. Requires: 4734 * Removing tcp_listener check for TH_URG 4735 * Making M_PCPROTO and MARK messages skip the eager case 4736 */ 4737 4738 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4739 } else { 4740 /* Active STREAMS socket */ 4741 if (mp->b_datap->db_type != M_DATA || 4742 (flags & TH_MARKNEXT_NEEDED)) { 4743 if (tcp->tcp_rcv_list != NULL) { 4744 flags |= tcp_rcv_drain(tcp); 4745 } 4746 ASSERT(tcp->tcp_rcv_list == NULL || 4747 tcp->tcp_fused_sigurg); 4748 4749 if (flags & TH_MARKNEXT_NEEDED) { 4750 #ifdef DEBUG 4751 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4752 "tcp_rput: sending MSGMARKNEXT %s", 4753 tcp_display(tcp, NULL, 4754 DISP_PORT_ONLY)); 4755 #endif /* DEBUG */ 4756 mp->b_flag |= MSGMARKNEXT; 4757 flags &= ~TH_MARKNEXT_NEEDED; 4758 } 4759 4760 if (is_system_labeled()) 4761 tcp_setcred_data(mp, ira); 4762 4763 putnext(connp->conn_rq, mp); 4764 if (!canputnext(connp->conn_rq)) 4765 tcp->tcp_rwnd -= seg_len; 4766 } else if ((flags & (TH_PUSH|TH_FIN)) || 4767 tcp->tcp_rcv_cnt + seg_len >= connp->conn_rcvbuf >> 3) { 4768 if (tcp->tcp_rcv_list != NULL) { 4769 /* 4770 * Enqueue the new segment first and then 4771 * call tcp_rcv_drain() to send all data 4772 * up. The other way to do this is to 4773 * send all queued data up and then call 4774 * putnext() to send the new segment up. 4775 * This way can remove the else part later 4776 * on. 4777 * 4778 * We don't do this to avoid one more call to 4779 * canputnext() as tcp_rcv_drain() needs to 4780 * call canputnext(). 4781 */ 4782 tcp_rcv_enqueue(tcp, mp, seg_len, 4783 ira->ira_cred); 4784 flags |= tcp_rcv_drain(tcp); 4785 } else { 4786 if (is_system_labeled()) 4787 tcp_setcred_data(mp, ira); 4788 4789 putnext(connp->conn_rq, mp); 4790 if (!canputnext(connp->conn_rq)) 4791 tcp->tcp_rwnd -= seg_len; 4792 } 4793 } else { 4794 /* 4795 * Enqueue all packets when processing an mblk 4796 * from the co queue and also enqueue normal packets. 4797 */ 4798 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4799 } 4800 /* 4801 * Make sure the timer is running if we have data waiting 4802 * for a push bit. This provides resiliency against 4803 * implementations that do not correctly generate push bits. 4804 */ 4805 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 4806 /* 4807 * The connection may be closed at this point, so don't 4808 * do anything for a detached tcp. 4809 */ 4810 if (!TCP_IS_DETACHED(tcp)) 4811 tcp->tcp_push_tid = TCP_TIMER(tcp, 4812 tcp_push_timer, 4813 tcps->tcps_push_timer_interval); 4814 } 4815 } 4816 4817 xmit_check: 4818 /* Is there anything left to do? */ 4819 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4820 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 4821 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 4822 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4823 goto done; 4824 4825 /* Any transmit work to do and a non-zero window? */ 4826 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 4827 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 4828 if (flags & TH_REXMIT_NEEDED) { 4829 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 4830 4831 TCPS_BUMP_MIB(tcps, tcpOutFastRetrans); 4832 if (snd_size > mss) 4833 snd_size = mss; 4834 if (snd_size > tcp->tcp_swnd) 4835 snd_size = tcp->tcp_swnd; 4836 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 4837 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 4838 B_TRUE); 4839 4840 if (mp1 != NULL) { 4841 tcp->tcp_xmit_head->b_prev = 4842 (mblk_t *)LBOLT_FASTPATH; 4843 tcp->tcp_csuna = tcp->tcp_snxt; 4844 TCPS_BUMP_MIB(tcps, tcpRetransSegs); 4845 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, 4846 snd_size); 4847 tcp_send_data(tcp, mp1); 4848 } 4849 } 4850 if (flags & TH_NEED_SACK_REXMIT) { 4851 tcp_sack_rexmit(tcp, &flags); 4852 } 4853 /* 4854 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 4855 * out new segment. Note that tcp_rexmit should not be 4856 * set, otherwise TH_LIMIT_XMIT should not be set. 4857 */ 4858 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 4859 if (!tcp->tcp_rexmit) { 4860 tcp_wput_data(tcp, NULL, B_FALSE); 4861 } else { 4862 tcp_ss_rexmit(tcp); 4863 } 4864 } 4865 /* 4866 * Adjust tcp_cwnd back to normal value after sending 4867 * new data segments. 4868 */ 4869 if (flags & TH_LIMIT_XMIT) { 4870 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 4871 /* 4872 * This will restart the timer. Restarting the 4873 * timer is used to avoid a timeout before the 4874 * limited transmitted segment's ACK gets back. 4875 */ 4876 if (tcp->tcp_xmit_head != NULL) 4877 tcp->tcp_xmit_head->b_prev = 4878 (mblk_t *)LBOLT_FASTPATH; 4879 } 4880 4881 /* Anything more to do? */ 4882 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 4883 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4884 goto done; 4885 } 4886 ack_check: 4887 if (flags & TH_SEND_URP_MARK) { 4888 ASSERT(tcp->tcp_urp_mark_mp); 4889 ASSERT(!IPCL_IS_NONSTR(connp)); 4890 /* 4891 * Send up any queued data and then send the mark message 4892 */ 4893 if (tcp->tcp_rcv_list != NULL) { 4894 flags |= tcp_rcv_drain(tcp); 4895 4896 } 4897 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4898 mp1 = tcp->tcp_urp_mark_mp; 4899 tcp->tcp_urp_mark_mp = NULL; 4900 if (is_system_labeled()) 4901 tcp_setcred_data(mp1, ira); 4902 4903 putnext(connp->conn_rq, mp1); 4904 #ifdef DEBUG 4905 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4906 "tcp_rput: sending zero-length %s %s", 4907 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 4908 "MSGNOTMARKNEXT"), 4909 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4910 #endif /* DEBUG */ 4911 flags &= ~TH_SEND_URP_MARK; 4912 } 4913 if (flags & TH_ACK_NEEDED) { 4914 /* 4915 * Time to send an ack for some reason. 4916 */ 4917 mp1 = tcp_ack_mp(tcp); 4918 4919 if (mp1 != NULL) { 4920 tcp_send_data(tcp, mp1); 4921 BUMP_LOCAL(tcp->tcp_obsegs); 4922 TCPS_BUMP_MIB(tcps, tcpOutAck); 4923 } 4924 if (tcp->tcp_ack_tid != 0) { 4925 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4926 tcp->tcp_ack_tid = 0; 4927 } 4928 } 4929 if (flags & TH_ACK_TIMER_NEEDED) { 4930 /* 4931 * Arrange for deferred ACK or push wait timeout. 4932 * Start timer if it is not already running. 4933 */ 4934 if (tcp->tcp_ack_tid == 0) { 4935 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 4936 tcp->tcp_localnet ? 4937 tcps->tcps_local_dack_interval : 4938 tcps->tcps_deferred_ack_interval); 4939 } 4940 } 4941 if (flags & TH_ORDREL_NEEDED) { 4942 /* 4943 * Notify upper layer about an orderly release. If this is 4944 * a non-STREAMS socket, then just make an upcall. For STREAMS 4945 * we send up an ordrel_ind, unless this is an eager, in which 4946 * case the ordrel will be sent when tcp_accept_finish runs. 4947 * Note that for non-STREAMS we make an upcall even if it is an 4948 * eager, because we have an upper handle to send it to. 4949 */ 4950 ASSERT(IPCL_IS_NONSTR(connp) || tcp->tcp_listener == NULL); 4951 ASSERT(!tcp->tcp_detached); 4952 4953 if (IPCL_IS_NONSTR(connp)) { 4954 ASSERT(tcp->tcp_ordrel_mp == NULL); 4955 tcp->tcp_ordrel_done = B_TRUE; 4956 (*sockupcalls->su_opctl)(connp->conn_upper_handle, 4957 SOCK_OPCTL_SHUT_RECV, 0); 4958 goto done; 4959 } 4960 4961 if (tcp->tcp_rcv_list != NULL) { 4962 /* 4963 * Push any mblk(s) enqueued from co processing. 4964 */ 4965 flags |= tcp_rcv_drain(tcp); 4966 } 4967 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4968 4969 mp1 = tcp->tcp_ordrel_mp; 4970 tcp->tcp_ordrel_mp = NULL; 4971 tcp->tcp_ordrel_done = B_TRUE; 4972 putnext(connp->conn_rq, mp1); 4973 } 4974 done: 4975 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4976 } 4977 4978 /* 4979 * Attach ancillary data to a received TCP segments for the 4980 * ancillary pieces requested by the application that are 4981 * different than they were in the previous data segment. 4982 * 4983 * Save the "current" values once memory allocation is ok so that 4984 * when memory allocation fails we can just wait for the next data segment. 4985 */ 4986 static mblk_t * 4987 tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp, 4988 ip_recv_attr_t *ira) 4989 { 4990 struct T_optdata_ind *todi; 4991 int optlen; 4992 uchar_t *optptr; 4993 struct T_opthdr *toh; 4994 crb_t addflag; /* Which pieces to add */ 4995 mblk_t *mp1; 4996 conn_t *connp = tcp->tcp_connp; 4997 4998 optlen = 0; 4999 addflag.crb_all = 0; 5000 /* If app asked for pktinfo and the index has changed ... */ 5001 if (connp->conn_recv_ancillary.crb_ip_recvpktinfo && 5002 ira->ira_ruifindex != tcp->tcp_recvifindex) { 5003 optlen += sizeof (struct T_opthdr) + 5004 sizeof (struct in6_pktinfo); 5005 addflag.crb_ip_recvpktinfo = 1; 5006 } 5007 /* If app asked for hoplimit and it has changed ... */ 5008 if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit && 5009 ipp->ipp_hoplimit != tcp->tcp_recvhops) { 5010 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 5011 addflag.crb_ipv6_recvhoplimit = 1; 5012 } 5013 /* If app asked for tclass and it has changed ... */ 5014 if (connp->conn_recv_ancillary.crb_ipv6_recvtclass && 5015 ipp->ipp_tclass != tcp->tcp_recvtclass) { 5016 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 5017 addflag.crb_ipv6_recvtclass = 1; 5018 } 5019 /* 5020 * If app asked for hopbyhop headers and it has changed ... 5021 * For security labels, note that (1) security labels can't change on 5022 * a connected socket at all, (2) we're connected to at most one peer, 5023 * (3) if anything changes, then it must be some other extra option. 5024 */ 5025 if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts && 5026 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 5027 (ipp->ipp_fields & IPPF_HOPOPTS), 5028 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 5029 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen; 5030 addflag.crb_ipv6_recvhopopts = 1; 5031 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 5032 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 5033 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 5034 return (mp); 5035 } 5036 /* If app asked for dst headers before routing headers ... */ 5037 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts && 5038 ip_cmpbuf(tcp->tcp_rthdrdstopts, tcp->tcp_rthdrdstoptslen, 5039 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5040 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) { 5041 optlen += sizeof (struct T_opthdr) + 5042 ipp->ipp_rthdrdstoptslen; 5043 addflag.crb_ipv6_recvrthdrdstopts = 1; 5044 if (!ip_allocbuf((void **)&tcp->tcp_rthdrdstopts, 5045 &tcp->tcp_rthdrdstoptslen, 5046 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5047 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) 5048 return (mp); 5049 } 5050 /* If app asked for routing headers and it has changed ... */ 5051 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr && 5052 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 5053 (ipp->ipp_fields & IPPF_RTHDR), 5054 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 5055 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 5056 addflag.crb_ipv6_recvrthdr = 1; 5057 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 5058 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 5059 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 5060 return (mp); 5061 } 5062 /* If app asked for dest headers and it has changed ... */ 5063 if ((connp->conn_recv_ancillary.crb_ipv6_recvdstopts || 5064 connp->conn_recv_ancillary.crb_old_ipv6_recvdstopts) && 5065 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 5066 (ipp->ipp_fields & IPPF_DSTOPTS), 5067 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 5068 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 5069 addflag.crb_ipv6_recvdstopts = 1; 5070 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 5071 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 5072 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 5073 return (mp); 5074 } 5075 5076 if (optlen == 0) { 5077 /* Nothing to add */ 5078 return (mp); 5079 } 5080 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 5081 if (mp1 == NULL) { 5082 /* 5083 * Defer sending ancillary data until the next TCP segment 5084 * arrives. 5085 */ 5086 return (mp); 5087 } 5088 mp1->b_cont = mp; 5089 mp = mp1; 5090 mp->b_wptr += sizeof (*todi) + optlen; 5091 mp->b_datap->db_type = M_PROTO; 5092 todi = (struct T_optdata_ind *)mp->b_rptr; 5093 todi->PRIM_type = T_OPTDATA_IND; 5094 todi->DATA_flag = 1; /* MORE data */ 5095 todi->OPT_length = optlen; 5096 todi->OPT_offset = sizeof (*todi); 5097 optptr = (uchar_t *)&todi[1]; 5098 /* 5099 * If app asked for pktinfo and the index has changed ... 5100 * Note that the local address never changes for the connection. 5101 */ 5102 if (addflag.crb_ip_recvpktinfo) { 5103 struct in6_pktinfo *pkti; 5104 uint_t ifindex; 5105 5106 ifindex = ira->ira_ruifindex; 5107 toh = (struct T_opthdr *)optptr; 5108 toh->level = IPPROTO_IPV6; 5109 toh->name = IPV6_PKTINFO; 5110 toh->len = sizeof (*toh) + sizeof (*pkti); 5111 toh->status = 0; 5112 optptr += sizeof (*toh); 5113 pkti = (struct in6_pktinfo *)optptr; 5114 pkti->ipi6_addr = connp->conn_laddr_v6; 5115 pkti->ipi6_ifindex = ifindex; 5116 optptr += sizeof (*pkti); 5117 ASSERT(OK_32PTR(optptr)); 5118 /* Save as "last" value */ 5119 tcp->tcp_recvifindex = ifindex; 5120 } 5121 /* If app asked for hoplimit and it has changed ... */ 5122 if (addflag.crb_ipv6_recvhoplimit) { 5123 toh = (struct T_opthdr *)optptr; 5124 toh->level = IPPROTO_IPV6; 5125 toh->name = IPV6_HOPLIMIT; 5126 toh->len = sizeof (*toh) + sizeof (uint_t); 5127 toh->status = 0; 5128 optptr += sizeof (*toh); 5129 *(uint_t *)optptr = ipp->ipp_hoplimit; 5130 optptr += sizeof (uint_t); 5131 ASSERT(OK_32PTR(optptr)); 5132 /* Save as "last" value */ 5133 tcp->tcp_recvhops = ipp->ipp_hoplimit; 5134 } 5135 /* If app asked for tclass and it has changed ... */ 5136 if (addflag.crb_ipv6_recvtclass) { 5137 toh = (struct T_opthdr *)optptr; 5138 toh->level = IPPROTO_IPV6; 5139 toh->name = IPV6_TCLASS; 5140 toh->len = sizeof (*toh) + sizeof (uint_t); 5141 toh->status = 0; 5142 optptr += sizeof (*toh); 5143 *(uint_t *)optptr = ipp->ipp_tclass; 5144 optptr += sizeof (uint_t); 5145 ASSERT(OK_32PTR(optptr)); 5146 /* Save as "last" value */ 5147 tcp->tcp_recvtclass = ipp->ipp_tclass; 5148 } 5149 if (addflag.crb_ipv6_recvhopopts) { 5150 toh = (struct T_opthdr *)optptr; 5151 toh->level = IPPROTO_IPV6; 5152 toh->name = IPV6_HOPOPTS; 5153 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen; 5154 toh->status = 0; 5155 optptr += sizeof (*toh); 5156 bcopy((uchar_t *)ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen); 5157 optptr += ipp->ipp_hopoptslen; 5158 ASSERT(OK_32PTR(optptr)); 5159 /* Save as last value */ 5160 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 5161 (ipp->ipp_fields & IPPF_HOPOPTS), 5162 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 5163 } 5164 if (addflag.crb_ipv6_recvrthdrdstopts) { 5165 toh = (struct T_opthdr *)optptr; 5166 toh->level = IPPROTO_IPV6; 5167 toh->name = IPV6_RTHDRDSTOPTS; 5168 toh->len = sizeof (*toh) + ipp->ipp_rthdrdstoptslen; 5169 toh->status = 0; 5170 optptr += sizeof (*toh); 5171 bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen); 5172 optptr += ipp->ipp_rthdrdstoptslen; 5173 ASSERT(OK_32PTR(optptr)); 5174 /* Save as last value */ 5175 ip_savebuf((void **)&tcp->tcp_rthdrdstopts, 5176 &tcp->tcp_rthdrdstoptslen, 5177 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5178 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen); 5179 } 5180 if (addflag.crb_ipv6_recvrthdr) { 5181 toh = (struct T_opthdr *)optptr; 5182 toh->level = IPPROTO_IPV6; 5183 toh->name = IPV6_RTHDR; 5184 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 5185 toh->status = 0; 5186 optptr += sizeof (*toh); 5187 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 5188 optptr += ipp->ipp_rthdrlen; 5189 ASSERT(OK_32PTR(optptr)); 5190 /* Save as last value */ 5191 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 5192 (ipp->ipp_fields & IPPF_RTHDR), 5193 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 5194 } 5195 if (addflag.crb_ipv6_recvdstopts) { 5196 toh = (struct T_opthdr *)optptr; 5197 toh->level = IPPROTO_IPV6; 5198 toh->name = IPV6_DSTOPTS; 5199 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 5200 toh->status = 0; 5201 optptr += sizeof (*toh); 5202 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 5203 optptr += ipp->ipp_dstoptslen; 5204 ASSERT(OK_32PTR(optptr)); 5205 /* Save as last value */ 5206 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 5207 (ipp->ipp_fields & IPPF_DSTOPTS), 5208 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 5209 } 5210 ASSERT(optptr == mp->b_wptr); 5211 return (mp); 5212 } 5213 5214 /* The minimum of smoothed mean deviation in RTO calculation. */ 5215 #define TCP_SD_MIN 400 5216 5217 /* 5218 * Set RTO for this connection. The formula is from Jacobson and Karels' 5219 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 5220 * are the same as those in Appendix A.2 of that paper. 5221 * 5222 * m = new measurement 5223 * sa = smoothed RTT average (8 * average estimates). 5224 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 5225 */ 5226 static void 5227 tcp_set_rto(tcp_t *tcp, clock_t rtt) 5228 { 5229 long m = TICK_TO_MSEC(rtt); 5230 clock_t sa = tcp->tcp_rtt_sa; 5231 clock_t sv = tcp->tcp_rtt_sd; 5232 clock_t rto; 5233 tcp_stack_t *tcps = tcp->tcp_tcps; 5234 5235 TCPS_BUMP_MIB(tcps, tcpRttUpdate); 5236 tcp->tcp_rtt_update++; 5237 5238 /* tcp_rtt_sa is not 0 means this is a new sample. */ 5239 if (sa != 0) { 5240 /* 5241 * Update average estimator: 5242 * new rtt = 7/8 old rtt + 1/8 Error 5243 */ 5244 5245 /* m is now Error in estimate. */ 5246 m -= sa >> 3; 5247 if ((sa += m) <= 0) { 5248 /* 5249 * Don't allow the smoothed average to be negative. 5250 * We use 0 to denote reinitialization of the 5251 * variables. 5252 */ 5253 sa = 1; 5254 } 5255 5256 /* 5257 * Update deviation estimator: 5258 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 5259 */ 5260 if (m < 0) 5261 m = -m; 5262 m -= sv >> 2; 5263 sv += m; 5264 } else { 5265 /* 5266 * This follows BSD's implementation. So the reinitialized 5267 * RTO is 3 * m. We cannot go less than 2 because if the 5268 * link is bandwidth dominated, doubling the window size 5269 * during slow start means doubling the RTT. We want to be 5270 * more conservative when we reinitialize our estimates. 3 5271 * is just a convenient number. 5272 */ 5273 sa = m << 3; 5274 sv = m << 1; 5275 } 5276 if (sv < TCP_SD_MIN) { 5277 /* 5278 * We do not know that if sa captures the delay ACK 5279 * effect as in a long train of segments, a receiver 5280 * does not delay its ACKs. So set the minimum of sv 5281 * to be TCP_SD_MIN, which is default to 400 ms, twice 5282 * of BSD DATO. That means the minimum of mean 5283 * deviation is 100 ms. 5284 * 5285 */ 5286 sv = TCP_SD_MIN; 5287 } 5288 tcp->tcp_rtt_sa = sa; 5289 tcp->tcp_rtt_sd = sv; 5290 /* 5291 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 5292 * 5293 * Add tcp_rexmit_interval extra in case of extreme environment 5294 * where the algorithm fails to work. The default value of 5295 * tcp_rexmit_interval_extra should be 0. 5296 * 5297 * As we use a finer grained clock than BSD and update 5298 * RTO for every ACKs, add in another .25 of RTT to the 5299 * deviation of RTO to accomodate burstiness of 1/4 of 5300 * window size. 5301 */ 5302 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 5303 5304 TCP_SET_RTO(tcp, rto); 5305 5306 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 5307 tcp->tcp_timer_backoff = 0; 5308 } 5309 5310 /* 5311 * On a labeled system we have some protocols above TCP, such as RPC, which 5312 * appear to assume that every mblk in a chain has a db_credp. 5313 */ 5314 static void 5315 tcp_setcred_data(mblk_t *mp, ip_recv_attr_t *ira) 5316 { 5317 ASSERT(is_system_labeled()); 5318 ASSERT(ira->ira_cred != NULL); 5319 5320 while (mp != NULL) { 5321 mblk_setcred(mp, ira->ira_cred, NOPID); 5322 mp = mp->b_cont; 5323 } 5324 } 5325 5326 uint_t 5327 tcp_rwnd_reopen(tcp_t *tcp) 5328 { 5329 uint_t ret = 0; 5330 uint_t thwin; 5331 conn_t *connp = tcp->tcp_connp; 5332 5333 /* Learn the latest rwnd information that we sent to the other side. */ 5334 thwin = ((uint_t)ntohs(tcp->tcp_tcpha->tha_win)) 5335 << tcp->tcp_rcv_ws; 5336 /* This is peer's calculated send window (our receive window). */ 5337 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 5338 /* 5339 * Increase the receive window to max. But we need to do receiver 5340 * SWS avoidance. This means that we need to check the increase of 5341 * of receive window is at least 1 MSS. 5342 */ 5343 if (connp->conn_rcvbuf - thwin >= tcp->tcp_mss) { 5344 /* 5345 * If the window that the other side knows is less than max 5346 * deferred acks segments, send an update immediately. 5347 */ 5348 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 5349 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutWinUpdate); 5350 ret = TH_ACK_NEEDED; 5351 } 5352 tcp->tcp_rwnd = connp->conn_rcvbuf; 5353 } 5354 return (ret); 5355 } 5356 5357 /* 5358 * Handle a packet that has been reclassified by TCP. 5359 * This function drops the ref on connp that the caller had. 5360 */ 5361 void 5362 tcp_reinput(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst) 5363 { 5364 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5365 5366 if (connp->conn_incoming_ifindex != 0 && 5367 connp->conn_incoming_ifindex != ira->ira_ruifindex) { 5368 freemsg(mp); 5369 CONN_DEC_REF(connp); 5370 return; 5371 } 5372 5373 if (CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss) || 5374 (ira->ira_flags & IRAF_IPSEC_SECURE)) { 5375 ip6_t *ip6h; 5376 ipha_t *ipha; 5377 5378 if (ira->ira_flags & IRAF_IS_IPV4) { 5379 ipha = (ipha_t *)mp->b_rptr; 5380 ip6h = NULL; 5381 } else { 5382 ipha = NULL; 5383 ip6h = (ip6_t *)mp->b_rptr; 5384 } 5385 mp = ipsec_check_inbound_policy(mp, connp, ipha, ip6h, ira); 5386 if (mp == NULL) { 5387 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 5388 /* Note that mp is NULL */ 5389 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 5390 CONN_DEC_REF(connp); 5391 return; 5392 } 5393 } 5394 5395 if (IPCL_IS_TCP(connp)) { 5396 /* 5397 * do not drain, certain use cases can blow 5398 * the stack 5399 */ 5400 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 5401 connp->conn_recv, connp, ira, 5402 SQ_NODRAIN, SQTAG_IP_TCP_INPUT); 5403 } else { 5404 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */ 5405 (connp->conn_recv)(connp, mp, NULL, 5406 ira); 5407 CONN_DEC_REF(connp); 5408 } 5409 5410 } 5411 5412 /* ARGSUSED */ 5413 static void 5414 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 5415 { 5416 conn_t *connp = (conn_t *)arg; 5417 tcp_t *tcp = connp->conn_tcp; 5418 queue_t *q = connp->conn_rq; 5419 5420 ASSERT(!IPCL_IS_NONSTR(connp)); 5421 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5422 tcp->tcp_rsrv_mp = mp; 5423 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5424 5425 if (TCP_IS_DETACHED(tcp) || q == NULL) { 5426 return; 5427 } 5428 5429 if (tcp->tcp_fused) { 5430 tcp_fuse_backenable(tcp); 5431 return; 5432 } 5433 5434 if (canputnext(q)) { 5435 /* Not flow-controlled, open rwnd */ 5436 tcp->tcp_rwnd = connp->conn_rcvbuf; 5437 5438 /* 5439 * Send back a window update immediately if TCP is above 5440 * ESTABLISHED state and the increase of the rcv window 5441 * that the other side knows is at least 1 MSS after flow 5442 * control is lifted. 5443 */ 5444 if (tcp->tcp_state >= TCPS_ESTABLISHED && 5445 tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) { 5446 tcp_xmit_ctl(NULL, tcp, 5447 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 5448 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 5449 } 5450 } 5451 } 5452 5453 /* 5454 * The read side service routine is called mostly when we get back-enabled as a 5455 * result of flow control relief. Since we don't actually queue anything in 5456 * TCP, we have no data to send out of here. What we do is clear the receive 5457 * window, and send out a window update. 5458 */ 5459 int 5460 tcp_rsrv(queue_t *q) 5461 { 5462 conn_t *connp = Q_TO_CONN(q); 5463 tcp_t *tcp = connp->conn_tcp; 5464 mblk_t *mp; 5465 5466 /* No code does a putq on the read side */ 5467 ASSERT(q->q_first == NULL); 5468 5469 /* 5470 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already 5471 * been run. So just return. 5472 */ 5473 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5474 if ((mp = tcp->tcp_rsrv_mp) == NULL) { 5475 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5476 return (0); 5477 } 5478 tcp->tcp_rsrv_mp = NULL; 5479 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5480 5481 CONN_INC_REF(connp); 5482 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_rsrv_input, connp, 5483 NULL, SQ_PROCESS, SQTAG_TCP_RSRV); 5484 return (0); 5485 } 5486 5487 /* At minimum we need 8 bytes in the TCP header for the lookup */ 5488 #define ICMP_MIN_TCP_HDR 8 5489 5490 /* 5491 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages 5492 * passed up by IP. The message is always received on the correct tcp_t. 5493 * Assumes that IP has pulled up everything up to and including the ICMP header. 5494 */ 5495 /* ARGSUSED2 */ 5496 void 5497 tcp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 5498 { 5499 conn_t *connp = (conn_t *)arg1; 5500 icmph_t *icmph; 5501 ipha_t *ipha; 5502 int iph_hdr_length; 5503 tcpha_t *tcpha; 5504 uint32_t seg_seq; 5505 tcp_t *tcp = connp->conn_tcp; 5506 5507 /* Assume IP provides aligned packets */ 5508 ASSERT(OK_32PTR(mp->b_rptr)); 5509 ASSERT((MBLKL(mp) >= sizeof (ipha_t))); 5510 5511 /* 5512 * It's possible we have a closed, but not yet destroyed, TCP 5513 * connection. Several fields (e.g. conn_ixa->ixa_ire) are invalid 5514 * in the closed state, so don't take any chances and drop the packet. 5515 */ 5516 if (tcp->tcp_state == TCPS_CLOSED) { 5517 freemsg(mp); 5518 return; 5519 } 5520 5521 /* 5522 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 5523 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 5524 */ 5525 if (!(ira->ira_flags & IRAF_IS_IPV4)) { 5526 tcp_icmp_error_ipv6(tcp, mp, ira); 5527 return; 5528 } 5529 5530 /* Skip past the outer IP and ICMP headers */ 5531 iph_hdr_length = ira->ira_ip_hdr_length; 5532 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 5533 /* 5534 * If we don't have the correct outer IP header length 5535 * or if we don't have a complete inner IP header 5536 * drop it. 5537 */ 5538 if (iph_hdr_length < sizeof (ipha_t) || 5539 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 5540 noticmpv4: 5541 freemsg(mp); 5542 return; 5543 } 5544 ipha = (ipha_t *)&icmph[1]; 5545 5546 /* Skip past the inner IP and find the ULP header */ 5547 iph_hdr_length = IPH_HDR_LENGTH(ipha); 5548 tcpha = (tcpha_t *)((char *)ipha + iph_hdr_length); 5549 /* 5550 * If we don't have the correct inner IP header length or if the ULP 5551 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 5552 * bytes of TCP header, drop it. 5553 */ 5554 if (iph_hdr_length < sizeof (ipha_t) || 5555 ipha->ipha_protocol != IPPROTO_TCP || 5556 (uchar_t *)tcpha + ICMP_MIN_TCP_HDR > mp->b_wptr) { 5557 goto noticmpv4; 5558 } 5559 5560 seg_seq = ntohl(tcpha->tha_seq); 5561 switch (icmph->icmph_type) { 5562 case ICMP_DEST_UNREACHABLE: 5563 switch (icmph->icmph_code) { 5564 case ICMP_FRAGMENTATION_NEEDED: 5565 /* 5566 * Update Path MTU, then try to send something out. 5567 */ 5568 tcp_update_pmtu(tcp, B_TRUE); 5569 tcp_rexmit_after_error(tcp); 5570 break; 5571 case ICMP_PORT_UNREACHABLE: 5572 case ICMP_PROTOCOL_UNREACHABLE: 5573 switch (tcp->tcp_state) { 5574 case TCPS_SYN_SENT: 5575 case TCPS_SYN_RCVD: 5576 /* 5577 * ICMP can snipe away incipient 5578 * TCP connections as long as 5579 * seq number is same as initial 5580 * send seq number. 5581 */ 5582 if (seg_seq == tcp->tcp_iss) { 5583 (void) tcp_clean_death(tcp, 5584 ECONNREFUSED); 5585 } 5586 break; 5587 } 5588 break; 5589 case ICMP_HOST_UNREACHABLE: 5590 case ICMP_NET_UNREACHABLE: 5591 /* Record the error in case we finally time out. */ 5592 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 5593 tcp->tcp_client_errno = EHOSTUNREACH; 5594 else 5595 tcp->tcp_client_errno = ENETUNREACH; 5596 if (tcp->tcp_state == TCPS_SYN_RCVD) { 5597 if (tcp->tcp_listener != NULL && 5598 tcp->tcp_listener->tcp_syn_defense) { 5599 /* 5600 * Ditch the half-open connection if we 5601 * suspect a SYN attack is under way. 5602 */ 5603 (void) tcp_clean_death(tcp, 5604 tcp->tcp_client_errno); 5605 } 5606 } 5607 break; 5608 default: 5609 break; 5610 } 5611 break; 5612 case ICMP_SOURCE_QUENCH: { 5613 /* 5614 * use a global boolean to control 5615 * whether TCP should respond to ICMP_SOURCE_QUENCH. 5616 * The default is false. 5617 */ 5618 if (tcp_icmp_source_quench) { 5619 /* 5620 * Reduce the sending rate as if we got a 5621 * retransmit timeout 5622 */ 5623 uint32_t npkt; 5624 5625 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 5626 tcp->tcp_mss; 5627 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 5628 tcp->tcp_cwnd = tcp->tcp_mss; 5629 tcp->tcp_cwnd_cnt = 0; 5630 } 5631 break; 5632 } 5633 } 5634 freemsg(mp); 5635 } 5636 5637 /* 5638 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6 5639 * error messages passed up by IP. 5640 * Assumes that IP has pulled up all the extension headers as well 5641 * as the ICMPv6 header. 5642 */ 5643 static void 5644 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, ip_recv_attr_t *ira) 5645 { 5646 icmp6_t *icmp6; 5647 ip6_t *ip6h; 5648 uint16_t iph_hdr_length = ira->ira_ip_hdr_length; 5649 tcpha_t *tcpha; 5650 uint8_t *nexthdrp; 5651 uint32_t seg_seq; 5652 5653 /* 5654 * Verify that we have a complete IP header. 5655 */ 5656 ASSERT((MBLKL(mp) >= sizeof (ip6_t))); 5657 5658 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 5659 ip6h = (ip6_t *)&icmp6[1]; 5660 /* 5661 * Verify if we have a complete ICMP and inner IP header. 5662 */ 5663 if ((uchar_t *)&ip6h[1] > mp->b_wptr) { 5664 noticmpv6: 5665 freemsg(mp); 5666 return; 5667 } 5668 5669 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 5670 goto noticmpv6; 5671 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 5672 /* 5673 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 5674 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 5675 * packet. 5676 */ 5677 if ((*nexthdrp != IPPROTO_TCP) || 5678 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 5679 goto noticmpv6; 5680 } 5681 5682 seg_seq = ntohl(tcpha->tha_seq); 5683 switch (icmp6->icmp6_type) { 5684 case ICMP6_PACKET_TOO_BIG: 5685 /* 5686 * Update Path MTU, then try to send something out. 5687 */ 5688 tcp_update_pmtu(tcp, B_TRUE); 5689 tcp_rexmit_after_error(tcp); 5690 break; 5691 case ICMP6_DST_UNREACH: 5692 switch (icmp6->icmp6_code) { 5693 case ICMP6_DST_UNREACH_NOPORT: 5694 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5695 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5696 (seg_seq == tcp->tcp_iss)) { 5697 (void) tcp_clean_death(tcp, ECONNREFUSED); 5698 } 5699 break; 5700 case ICMP6_DST_UNREACH_ADMIN: 5701 case ICMP6_DST_UNREACH_NOROUTE: 5702 case ICMP6_DST_UNREACH_BEYONDSCOPE: 5703 case ICMP6_DST_UNREACH_ADDR: 5704 /* Record the error in case we finally time out. */ 5705 tcp->tcp_client_errno = EHOSTUNREACH; 5706 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5707 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5708 (seg_seq == tcp->tcp_iss)) { 5709 if (tcp->tcp_listener != NULL && 5710 tcp->tcp_listener->tcp_syn_defense) { 5711 /* 5712 * Ditch the half-open connection if we 5713 * suspect a SYN attack is under way. 5714 */ 5715 (void) tcp_clean_death(tcp, 5716 tcp->tcp_client_errno); 5717 } 5718 } 5719 5720 5721 break; 5722 default: 5723 break; 5724 } 5725 break; 5726 case ICMP6_PARAM_PROB: 5727 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 5728 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 5729 (uchar_t *)ip6h + icmp6->icmp6_pptr == 5730 (uchar_t *)nexthdrp) { 5731 if (tcp->tcp_state == TCPS_SYN_SENT || 5732 tcp->tcp_state == TCPS_SYN_RCVD) { 5733 (void) tcp_clean_death(tcp, ECONNREFUSED); 5734 } 5735 break; 5736 } 5737 break; 5738 5739 case ICMP6_TIME_EXCEEDED: 5740 default: 5741 break; 5742 } 5743 freemsg(mp); 5744 } 5745 5746 /* 5747 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might 5748 * change. But it can refer to fields like tcp_suna and tcp_snxt. 5749 * 5750 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP 5751 * error messages received by IP. The message is always received on the correct 5752 * tcp_t. 5753 */ 5754 /* ARGSUSED */ 5755 boolean_t 5756 tcp_verifyicmp(conn_t *connp, void *arg2, icmph_t *icmph, icmp6_t *icmp6, 5757 ip_recv_attr_t *ira) 5758 { 5759 tcpha_t *tcpha = (tcpha_t *)arg2; 5760 uint32_t seq = ntohl(tcpha->tha_seq); 5761 tcp_t *tcp = connp->conn_tcp; 5762 5763 /* 5764 * TCP sequence number contained in payload of the ICMP error message 5765 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise, 5766 * the message is either a stale ICMP error, or an attack from the 5767 * network. Fail the verification. 5768 */ 5769 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 5770 return (B_FALSE); 5771 5772 /* For "too big" we also check the ignore flag */ 5773 if (ira->ira_flags & IRAF_IS_IPV4) { 5774 ASSERT(icmph != NULL); 5775 if (icmph->icmph_type == ICMP_DEST_UNREACHABLE && 5776 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED && 5777 tcp->tcp_tcps->tcps_ignore_path_mtu) 5778 return (B_FALSE); 5779 } else { 5780 ASSERT(icmp6 != NULL); 5781 if (icmp6->icmp6_type == ICMP6_PACKET_TOO_BIG && 5782 tcp->tcp_tcps->tcps_ignore_path_mtu) 5783 return (B_FALSE); 5784 } 5785 return (B_TRUE); 5786 }