591 * tcp_time_wait_processing() handles processing of incoming packets when
592 * the tcp_t is in the TIME_WAIT state.
593 *
594 * A TIME_WAIT tcp_t that has an associated open TCP end point (not in
595 * detached state) is never put on the time wait list.
596 */
597 void
598 tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq,
599 uint32_t seg_ack, int seg_len, tcpha_t *tcpha, ip_recv_attr_t *ira)
600 {
601 int32_t bytes_acked;
602 int32_t gap;
603 int32_t rgap;
604 tcp_opt_t tcpopt;
605 uint_t flags;
606 uint32_t new_swnd = 0;
607 conn_t *nconnp;
608 conn_t *connp = tcp->tcp_connp;
609 tcp_stack_t *tcps = tcp->tcp_tcps;
610
611 BUMP_LOCAL(tcp->tcp_ibsegs);
612 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp);
613
614 flags = (unsigned int)tcpha->tha_flags & 0xFF;
615 new_swnd = ntohs(tcpha->tha_win) <<
616 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws);
617
618 boolean_t keepalive = (seg_len == 0 || seg_len == 1) &&
619 (seg_seq + 1 == tcp->tcp_rnxt);
620 if (tcp->tcp_snd_ts_ok && !(flags & TH_RST) && !keepalive) {
621 int options;
622 if (tcp->tcp_snd_sack_ok)
623 tcpopt.tcp = tcp;
624 else
625 tcpopt.tcp = NULL;
626 options = tcp_parse_options(tcpha, &tcpopt);
627 if (!(options & TCP_OPT_TSTAMP_PRESENT)) {
628 DTRACE_TCP1(droppedtimestamp, tcp_t *, tcp);
629 goto done;
630 } else if (!tcp_paws_check(tcp, &tcpopt)) {
631 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, tcp->tcp_rnxt,
777 }
778 /*
779 * Check whether we can update tcp_ts_recent. This test is from RFC
780 * 7323, section 5.3.
781 */
782 if (tcp->tcp_snd_ts_ok && !(flags & TH_RST) &&
783 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) &&
784 SEQ_LEQ(seg_seq, tcp->tcp_rack)) {
785 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
786 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64();
787 }
788
789 if (seg_seq != tcp->tcp_rnxt && seg_len > 0) {
790 /* Always ack out of order packets */
791 flags |= TH_ACK_NEEDED;
792 seg_len = 0;
793 } else if (seg_len > 0) {
794 TCPS_BUMP_MIB(tcps, tcpInClosed);
795 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs);
796 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len);
797 }
798 if (flags & TH_RST) {
799 (void) tcp_clean_death(tcp, 0);
800 goto done;
801 }
802 if (flags & TH_SYN) {
803 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1,
804 TH_RST|TH_ACK);
805 /*
806 * Do not delete the TCP structure if it is in
807 * TIME_WAIT state. Refer to RFC 1122, 4.2.2.13.
808 */
809 goto done;
810 }
811 process_ack:
812 if (flags & TH_ACK) {
813 bytes_acked = (int)(seg_ack - tcp->tcp_suna);
814 if (bytes_acked <= 0) {
815 if (bytes_acked == 0 && seg_len == 0 &&
816 new_swnd == tcp->tcp_swnd)
|
591 * tcp_time_wait_processing() handles processing of incoming packets when
592 * the tcp_t is in the TIME_WAIT state.
593 *
594 * A TIME_WAIT tcp_t that has an associated open TCP end point (not in
595 * detached state) is never put on the time wait list.
596 */
597 void
598 tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq,
599 uint32_t seg_ack, int seg_len, tcpha_t *tcpha, ip_recv_attr_t *ira)
600 {
601 int32_t bytes_acked;
602 int32_t gap;
603 int32_t rgap;
604 tcp_opt_t tcpopt;
605 uint_t flags;
606 uint32_t new_swnd = 0;
607 conn_t *nconnp;
608 conn_t *connp = tcp->tcp_connp;
609 tcp_stack_t *tcps = tcp->tcp_tcps;
610
611 TCPS_BUMP_MIB(tcps, tcpHCInSegs);
612 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp);
613
614 flags = (unsigned int)tcpha->tha_flags & 0xFF;
615 new_swnd = ntohs(tcpha->tha_win) <<
616 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws);
617
618 boolean_t keepalive = (seg_len == 0 || seg_len == 1) &&
619 (seg_seq + 1 == tcp->tcp_rnxt);
620 if (tcp->tcp_snd_ts_ok && !(flags & TH_RST) && !keepalive) {
621 int options;
622 if (tcp->tcp_snd_sack_ok)
623 tcpopt.tcp = tcp;
624 else
625 tcpopt.tcp = NULL;
626 options = tcp_parse_options(tcpha, &tcpopt);
627 if (!(options & TCP_OPT_TSTAMP_PRESENT)) {
628 DTRACE_TCP1(droppedtimestamp, tcp_t *, tcp);
629 goto done;
630 } else if (!tcp_paws_check(tcp, &tcpopt)) {
631 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, tcp->tcp_rnxt,
777 }
778 /*
779 * Check whether we can update tcp_ts_recent. This test is from RFC
780 * 7323, section 5.3.
781 */
782 if (tcp->tcp_snd_ts_ok && !(flags & TH_RST) &&
783 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) &&
784 SEQ_LEQ(seg_seq, tcp->tcp_rack)) {
785 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
786 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64();
787 }
788
789 if (seg_seq != tcp->tcp_rnxt && seg_len > 0) {
790 /* Always ack out of order packets */
791 flags |= TH_ACK_NEEDED;
792 seg_len = 0;
793 } else if (seg_len > 0) {
794 TCPS_BUMP_MIB(tcps, tcpInClosed);
795 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs);
796 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len);
797 tcp->tcp_cs.tcp_in_data_inorder_segs++;
798 tcp->tcp_cs.tcp_in_data_inorder_bytes += seg_len;
799 }
800 if (flags & TH_RST) {
801 (void) tcp_clean_death(tcp, 0);
802 goto done;
803 }
804 if (flags & TH_SYN) {
805 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1,
806 TH_RST|TH_ACK);
807 /*
808 * Do not delete the TCP structure if it is in
809 * TIME_WAIT state. Refer to RFC 1122, 4.2.2.13.
810 */
811 goto done;
812 }
813 process_ack:
814 if (flags & TH_ACK) {
815 bytes_acked = (int)(seg_ack - tcp->tcp_suna);
816 if (bytes_acked <= 0) {
817 if (bytes_acked == 0 && seg_len == 0 &&
818 new_swnd == tcp->tcp_swnd)
|