Print this page
11553 Want pluggable TCP congestion control algorithms
Portions contributed by: Cody Peter Mello <cody.mello@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Robert Mustacchi <robert.mustacchi@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/inet/tcp/tcp_output.c
+++ new/usr/src/uts/common/inet/tcp/tcp_output.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 - * Copyright (c) 2014, 2016 by Delphix. All rights reserved.
24 + * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
25 25 * Copyright 2019 Joyent, Inc.
26 26 */
27 27
28 28 /* This file contains all TCP output processing functions. */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/stream.h>
32 32 #include <sys/strsun.h>
33 33 #include <sys/strsubr.h>
34 34 #include <sys/stropts.h>
35 35 #include <sys/strlog.h>
36 36 #define _SUN_TPI_VERSION 2
37 37 #include <sys/tihdr.h>
38 38 #include <sys/suntpi.h>
39 39 #include <sys/xti_inet.h>
40 40 #include <sys/timod.h>
41 41 #include <sys/pattr.h>
42 42 #include <sys/squeue_impl.h>
43 43 #include <sys/squeue.h>
44 44 #include <sys/sockio.h>
45 45 #include <sys/tsol/tnet.h>
46 46
47 47 #include <inet/common.h>
48 48 #include <inet/ip.h>
49 49 #include <inet/tcp.h>
50 50 #include <inet/tcp_impl.h>
51 51 #include <inet/snmpcom.h>
52 52 #include <inet/proto_set.h>
53 53 #include <inet/ipsec_impl.h>
54 54 #include <inet/ip_ndp.h>
55 55
56 56 static mblk_t *tcp_get_seg_mp(tcp_t *, uint32_t, int32_t *);
57 57 static void tcp_wput_cmdblk(queue_t *, mblk_t *);
58 58 static void tcp_wput_flush(tcp_t *, mblk_t *);
59 59 static void tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp);
60 60 static int tcp_xmit_end(tcp_t *);
61 61 static int tcp_send(tcp_t *, const int, const int, const int,
62 62 const int, int *, uint32_t *, int *, mblk_t **, mblk_t *);
63 63 static void tcp_xmit_early_reset(char *, mblk_t *, uint32_t, uint32_t,
64 64 int, ip_recv_attr_t *, ip_stack_t *, conn_t *);
65 65 static boolean_t tcp_send_rst_chk(tcp_stack_t *);
66 66 static void tcp_process_shrunk_swnd(tcp_t *, uint32_t);
67 67 static void tcp_fill_header(tcp_t *, uchar_t *, int);
68 68
69 69 /*
70 70 * Functions called directly via squeue having a prototype of edesc_t.
71 71 */
72 72 static void tcp_wput_nondata(void *, mblk_t *, void *, ip_recv_attr_t *);
73 73 static void tcp_wput_ioctl(void *, mblk_t *, void *, ip_recv_attr_t *);
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
74 74 static void tcp_wput_proto(void *, mblk_t *, void *, ip_recv_attr_t *);
75 75
76 76 /*
77 77 * This controls how tiny a write must be before we try to copy it
78 78 * into the mblk on the tail of the transmit queue. Not much
79 79 * speedup is observed for values larger than sixteen. Zero will
80 80 * disable the optimisation.
81 81 */
82 82 static int tcp_tx_pull_len = 16;
83 83
84 +static void
85 +cc_after_idle(tcp_t *tcp)
86 +{
87 + uint32_t old_cwnd = tcp->tcp_cwnd;
88 +
89 + if (CC_ALGO(tcp)->after_idle != NULL)
90 + CC_ALGO(tcp)->after_idle(&tcp->tcp_ccv);
91 +
92 + DTRACE_PROBE3(cwnd__cc__after__idle, tcp_t *, tcp, uint32_t, old_cwnd,
93 + uint32_t, tcp->tcp_cwnd);
94 +}
95 +
84 96 int
85 97 tcp_wput(queue_t *q, mblk_t *mp)
86 98 {
87 99 conn_t *connp = Q_TO_CONN(q);
88 100 tcp_t *tcp;
89 101 void (*output_proc)();
90 102 t_scalar_t type;
91 103 uchar_t *rptr;
92 104 struct iocblk *iocp;
93 105 size_t size;
94 106
95 107 ASSERT(connp->conn_ref >= 2);
96 108
97 109 switch (DB_TYPE(mp)) {
98 110 case M_DATA:
99 111 tcp = connp->conn_tcp;
100 112 ASSERT(tcp != NULL);
101 113
102 114 size = msgdsize(mp);
103 115
104 116 mutex_enter(&tcp->tcp_non_sq_lock);
105 117 tcp->tcp_squeue_bytes += size;
106 118 if (TCP_UNSENT_BYTES(tcp) > connp->conn_sndbuf) {
107 119 tcp_setqfull(tcp);
108 120 }
109 121 mutex_exit(&tcp->tcp_non_sq_lock);
110 122
111 123 CONN_INC_REF(connp);
112 124 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output, connp,
113 125 NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT);
114 126 return (0);
115 127
116 128 case M_CMD:
117 129 tcp_wput_cmdblk(q, mp);
118 130 return (0);
119 131
120 132 case M_PROTO:
121 133 case M_PCPROTO:
122 134 /*
123 135 * if it is a snmp message, don't get behind the squeue
124 136 */
125 137 tcp = connp->conn_tcp;
126 138 rptr = mp->b_rptr;
127 139 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
128 140 type = ((union T_primitives *)rptr)->type;
129 141 } else {
130 142 if (connp->conn_debug) {
131 143 (void) strlog(TCP_MOD_ID, 0, 1,
132 144 SL_ERROR|SL_TRACE,
133 145 "tcp_wput_proto, dropping one...");
134 146 }
135 147 freemsg(mp);
136 148 return (0);
137 149 }
138 150 if (type == T_SVR4_OPTMGMT_REQ) {
139 151 /*
140 152 * All Solaris components should pass a db_credp
141 153 * for this TPI message, hence we ASSERT.
142 154 * But in case there is some other M_PROTO that looks
143 155 * like a TPI message sent by some other kernel
144 156 * component, we check and return an error.
145 157 */
146 158 cred_t *cr = msg_getcred(mp, NULL);
147 159
148 160 ASSERT(cr != NULL);
149 161 if (cr == NULL) {
150 162 tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
151 163 return (0);
152 164 }
153 165 if (snmpcom_req(q, mp, tcp_snmp_set, ip_snmp_get,
154 166 cr)) {
155 167 /*
156 168 * This was a SNMP request
157 169 */
158 170 return (0);
159 171 } else {
160 172 output_proc = tcp_wput_proto;
161 173 }
162 174 } else {
163 175 output_proc = tcp_wput_proto;
164 176 }
165 177 break;
166 178 case M_IOCTL:
167 179 /*
168 180 * Most ioctls can be processed right away without going via
169 181 * squeues - process them right here. Those that do require
170 182 * squeue (currently _SIOCSOCKFALLBACK)
171 183 * are processed by tcp_wput_ioctl().
172 184 */
173 185 iocp = (struct iocblk *)mp->b_rptr;
174 186 tcp = connp->conn_tcp;
175 187
176 188 switch (iocp->ioc_cmd) {
177 189 case TCP_IOC_ABORT_CONN:
178 190 tcp_ioctl_abort_conn(q, mp);
179 191 return (0);
180 192 case TI_GETPEERNAME:
181 193 case TI_GETMYNAME:
182 194 mi_copyin(q, mp, NULL,
183 195 SIZEOF_STRUCT(strbuf, iocp->ioc_flag));
184 196 return (0);
185 197
186 198 default:
187 199 output_proc = tcp_wput_ioctl;
188 200 break;
189 201 }
190 202 break;
191 203 default:
192 204 output_proc = tcp_wput_nondata;
193 205 break;
194 206 }
195 207
196 208 CONN_INC_REF(connp);
197 209 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, output_proc, connp,
198 210 NULL, tcp_squeue_flag, SQTAG_TCP_WPUT_OTHER);
199 211 return (0);
200 212 }
201 213
202 214 /*
203 215 * The TCP normal data output path.
204 216 * NOTE: the logic of the fast path is duplicated from this function.
205 217 */
206 218 void
207 219 tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent)
208 220 {
209 221 int len;
210 222 mblk_t *local_time;
211 223 mblk_t *mp1;
↓ open down ↓ |
118 lines elided |
↑ open up ↑ |
212 224 uint32_t snxt;
213 225 int tail_unsent;
214 226 int tcpstate;
215 227 int usable = 0;
216 228 mblk_t *xmit_tail;
217 229 int32_t mss;
218 230 int32_t num_sack_blk = 0;
219 231 int32_t total_hdr_len;
220 232 int32_t tcp_hdr_len;
221 233 int rc;
222 - tcp_stack_t *tcps = tcp->tcp_tcps;
223 234 conn_t *connp = tcp->tcp_connp;
224 235 clock_t now = LBOLT_FASTPATH;
225 236
226 237 tcpstate = tcp->tcp_state;
227 238 if (mp == NULL) {
228 239 /*
229 240 * tcp_wput_data() with NULL mp should only be called when
230 241 * there is unsent data.
231 242 */
232 243 ASSERT(tcp->tcp_unsent > 0);
233 244 /* Really tacky... but we need this for detached closes. */
234 245 len = tcp->tcp_unsent;
235 246 goto data_null;
236 247 }
237 248
238 249 ASSERT(mp->b_datap->db_type == M_DATA);
239 250 /*
240 251 * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ,
241 252 * or before a connection attempt has begun.
242 253 */
243 254 if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT ||
244 255 (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
245 256 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
246 257 #ifdef DEBUG
247 258 cmn_err(CE_WARN,
248 259 "tcp_wput_data: data after ordrel, %s",
249 260 tcp_display(tcp, NULL,
250 261 DISP_ADDR_AND_PORT));
251 262 #else
252 263 if (connp->conn_debug) {
253 264 (void) strlog(TCP_MOD_ID, 0, 1,
254 265 SL_TRACE|SL_ERROR,
255 266 "tcp_wput_data: data after ordrel, %s\n",
256 267 tcp_display(tcp, NULL,
257 268 DISP_ADDR_AND_PORT));
258 269 }
259 270 #endif /* DEBUG */
260 271 }
261 272 if (tcp->tcp_snd_zcopy_aware &&
262 273 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
263 274 tcp_zcopy_notify(tcp);
264 275 freemsg(mp);
265 276 mutex_enter(&tcp->tcp_non_sq_lock);
266 277 if (tcp->tcp_flow_stopped &&
267 278 TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
268 279 tcp_clrqfull(tcp);
269 280 }
270 281 mutex_exit(&tcp->tcp_non_sq_lock);
271 282 return;
272 283 }
273 284
274 285 /* Strip empties */
275 286 for (;;) {
276 287 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
277 288 (uintptr_t)INT_MAX);
278 289 len = (int)(mp->b_wptr - mp->b_rptr);
279 290 if (len > 0)
280 291 break;
281 292 mp1 = mp;
282 293 mp = mp->b_cont;
283 294 freeb(mp1);
284 295 if (mp == NULL) {
285 296 return;
286 297 }
287 298 }
288 299
289 300 /* If we are the first on the list ... */
290 301 if (tcp->tcp_xmit_head == NULL) {
291 302 tcp->tcp_xmit_head = mp;
292 303 tcp->tcp_xmit_tail = mp;
293 304 tcp->tcp_xmit_tail_unsent = len;
294 305 } else {
295 306 /* If tiny tx and room in txq tail, pullup to save mblks. */
296 307 struct datab *dp;
297 308
298 309 mp1 = tcp->tcp_xmit_last;
299 310 if (len < tcp_tx_pull_len &&
300 311 (dp = mp1->b_datap)->db_ref == 1 &&
301 312 dp->db_lim - mp1->b_wptr >= len) {
302 313 ASSERT(len > 0);
303 314 ASSERT(!mp1->b_cont);
304 315 if (len == 1) {
305 316 *mp1->b_wptr++ = *mp->b_rptr;
306 317 } else {
307 318 bcopy(mp->b_rptr, mp1->b_wptr, len);
308 319 mp1->b_wptr += len;
309 320 }
310 321 if (mp1 == tcp->tcp_xmit_tail)
311 322 tcp->tcp_xmit_tail_unsent += len;
312 323 mp1->b_cont = mp->b_cont;
313 324 if (tcp->tcp_snd_zcopy_aware &&
314 325 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
315 326 mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
316 327 freeb(mp);
317 328 mp = mp1;
318 329 } else {
319 330 tcp->tcp_xmit_last->b_cont = mp;
320 331 }
321 332 len += tcp->tcp_unsent;
322 333 }
323 334
324 335 /* Tack on however many more positive length mblks we have */
325 336 if ((mp1 = mp->b_cont) != NULL) {
326 337 do {
327 338 int tlen;
328 339 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
329 340 (uintptr_t)INT_MAX);
330 341 tlen = (int)(mp1->b_wptr - mp1->b_rptr);
331 342 if (tlen <= 0) {
332 343 mp->b_cont = mp1->b_cont;
333 344 freeb(mp1);
334 345 } else {
335 346 len += tlen;
336 347 mp = mp1;
337 348 }
338 349 } while ((mp1 = mp->b_cont) != NULL);
339 350 }
340 351 tcp->tcp_xmit_last = mp;
341 352 tcp->tcp_unsent = len;
342 353
343 354 if (urgent)
344 355 usable = 1;
345 356
346 357 data_null:
347 358 snxt = tcp->tcp_snxt;
348 359 xmit_tail = tcp->tcp_xmit_tail;
349 360 tail_unsent = tcp->tcp_xmit_tail_unsent;
350 361
351 362 /*
352 363 * Note that tcp_mss has been adjusted to take into account the
353 364 * timestamp option if applicable. Because SACK options do not
354 365 * appear in every TCP segments and they are of variable lengths,
355 366 * they cannot be included in tcp_mss. Thus we need to calculate
356 367 * the actual segment length when we need to send a segment which
357 368 * includes SACK options.
358 369 */
359 370 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
360 371 int32_t opt_len;
361 372
362 373 num_sack_blk = MIN(tcp->tcp_max_sack_blk,
363 374 tcp->tcp_num_sack_blk);
364 375 opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN *
365 376 2 + TCPOPT_HEADER_LEN;
366 377 mss = tcp->tcp_mss - opt_len;
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
367 378 total_hdr_len = connp->conn_ht_iphc_len + opt_len;
368 379 tcp_hdr_len = connp->conn_ht_ulp_len + opt_len;
369 380 } else {
370 381 mss = tcp->tcp_mss;
371 382 total_hdr_len = connp->conn_ht_iphc_len;
372 383 tcp_hdr_len = connp->conn_ht_ulp_len;
373 384 }
374 385
375 386 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
376 387 (TICK_TO_MSEC(now - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
377 - TCP_SET_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
388 + cc_after_idle(tcp);
378 389 }
379 390 if (tcpstate == TCPS_SYN_RCVD) {
380 391 /*
381 392 * The three-way connection establishment handshake is not
382 393 * complete yet. We want to queue the data for transmission
383 394 * after entering ESTABLISHED state (RFC793). A jump to
384 395 * "done" label effectively leaves data on the queue.
385 396 */
386 397 goto done;
387 398 } else {
388 399 int usable_r;
389 400
390 401 /*
391 402 * In the special case when cwnd is zero, which can only
392 403 * happen if the connection is ECN capable, return now.
393 404 * New segments is sent using tcp_timer(). The timer
394 405 * is set in tcp_input_data().
395 406 */
396 407 if (tcp->tcp_cwnd == 0) {
397 408 /*
398 409 * Note that tcp_cwnd is 0 before 3-way handshake is
399 410 * finished.
400 411 */
401 412 ASSERT(tcp->tcp_ecn_ok ||
402 413 tcp->tcp_state < TCPS_ESTABLISHED);
403 414 return;
404 415 }
405 416
406 417 /* NOTE: trouble if xmitting while SYN not acked? */
407 418 usable_r = snxt - tcp->tcp_suna;
408 419 usable_r = tcp->tcp_swnd - usable_r;
409 420
410 421 /*
411 422 * Check if the receiver has shrunk the window. If
412 423 * tcp_wput_data() with NULL mp is called, tcp_fin_sent
413 424 * cannot be set as there is unsent data, so FIN cannot
414 425 * be sent out. Otherwise, we need to take into account
415 426 * of FIN as it consumes an "invisible" sequence number.
416 427 */
417 428 ASSERT(tcp->tcp_fin_sent == 0);
418 429 if (usable_r < 0) {
419 430 /*
420 431 * The receiver has shrunk the window and we have sent
421 432 * -usable_r date beyond the window, re-adjust.
422 433 *
423 434 * If TCP window scaling is enabled, there can be
424 435 * round down error as the advertised receive window
425 436 * is actually right shifted n bits. This means that
426 437 * the lower n bits info is wiped out. It will look
427 438 * like the window is shrunk. Do a check here to
428 439 * see if the shrunk amount is actually within the
429 440 * error in window calculation. If it is, just
430 441 * return. Note that this check is inside the
431 442 * shrunk window check. This makes sure that even
432 443 * though tcp_process_shrunk_swnd() is not called,
433 444 * we will stop further processing.
434 445 */
435 446 if ((-usable_r >> tcp->tcp_snd_ws) > 0) {
436 447 tcp_process_shrunk_swnd(tcp, -usable_r);
437 448 }
438 449 return;
439 450 }
440 451
441 452 /* usable = MIN(swnd, cwnd) - unacked_bytes */
442 453 if (tcp->tcp_swnd > tcp->tcp_cwnd)
443 454 usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd;
444 455
445 456 /* usable = MIN(usable, unsent) */
446 457 if (usable_r > len)
447 458 usable_r = len;
448 459
449 460 /* usable = MAX(usable, {1 for urgent, 0 for data}) */
450 461 if (usable_r > 0) {
451 462 usable = usable_r;
452 463 } else {
453 464 /* Bypass all other unnecessary processing. */
454 465 goto done;
455 466 }
456 467 }
457 468
458 469 local_time = (mblk_t *)(intptr_t)gethrtime();
459 470
460 471 /*
461 472 * "Our" Nagle Algorithm. This is not the same as in the old
462 473 * BSD. This is more in line with the true intent of Nagle.
463 474 *
464 475 * The conditions are:
465 476 * 1. The amount of unsent data (or amount of data which can be
466 477 * sent, whichever is smaller) is less than Nagle limit.
467 478 * 2. The last sent size is also less than Nagle limit.
468 479 * 3. There is unack'ed data.
469 480 * 4. Urgent pointer is not set. Send urgent data ignoring the
470 481 * Nagle algorithm. This reduces the probability that urgent
471 482 * bytes get "merged" together.
472 483 * 5. The app has not closed the connection. This eliminates the
473 484 * wait time of the receiving side waiting for the last piece of
474 485 * (small) data.
475 486 *
476 487 * If all are satisified, exit without sending anything. Note
477 488 * that Nagle limit can be smaller than 1 MSS. Nagle limit is
478 489 * the smaller of 1 MSS and global tcp_naglim_def (default to be
479 490 * 4095).
480 491 */
481 492 if (usable < (int)tcp->tcp_naglim &&
482 493 tcp->tcp_naglim > tcp->tcp_last_sent_len &&
483 494 snxt != tcp->tcp_suna &&
484 495 !(tcp->tcp_valid_bits & TCP_URG_VALID) &&
485 496 !(tcp->tcp_valid_bits & TCP_FSS_VALID)) {
486 497 goto done;
487 498 }
488 499
489 500 /*
490 501 * If tcp_zero_win_probe is not set and the tcp->tcp_cork option
491 502 * is set, then we have to force TCP not to send partial segment
492 503 * (smaller than MSS bytes). We are calculating the usable now
493 504 * based on full mss and will save the rest of remaining data for
494 505 * later. When tcp_zero_win_probe is set, TCP needs to send out
495 506 * something to do zero window probe.
496 507 */
497 508 if (tcp->tcp_cork && !tcp->tcp_zero_win_probe) {
498 509 if (usable < mss)
499 510 goto done;
500 511 usable = (usable / mss) * mss;
501 512 }
502 513
503 514 /* Update the latest receive window size in TCP header. */
504 515 tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
505 516
506 517 /* Send the packet. */
507 518 rc = tcp_send(tcp, mss, total_hdr_len, tcp_hdr_len,
508 519 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail,
509 520 local_time);
510 521
511 522 /* Pretend that all we were trying to send really got sent */
512 523 if (rc < 0 && tail_unsent < 0) {
513 524 do {
514 525 xmit_tail = xmit_tail->b_cont;
515 526 xmit_tail->b_prev = local_time;
516 527 ASSERT((uintptr_t)(xmit_tail->b_wptr -
517 528 xmit_tail->b_rptr) <= (uintptr_t)INT_MAX);
518 529 tail_unsent += (int)(xmit_tail->b_wptr -
519 530 xmit_tail->b_rptr);
520 531 } while (tail_unsent < 0);
521 532 }
522 533 done:;
523 534 tcp->tcp_xmit_tail = xmit_tail;
524 535 tcp->tcp_xmit_tail_unsent = tail_unsent;
525 536 len = tcp->tcp_snxt - snxt;
526 537 if (len) {
527 538 /*
528 539 * If new data was sent, need to update the notsack
529 540 * list, which is, afterall, data blocks that have
530 541 * not been sack'ed by the receiver. New data is
531 542 * not sack'ed.
532 543 */
533 544 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) {
534 545 /* len is a negative value. */
535 546 tcp->tcp_pipe -= len;
536 547 tcp_notsack_update(&(tcp->tcp_notsack_list),
537 548 tcp->tcp_snxt, snxt,
538 549 &(tcp->tcp_num_notsack_blk),
539 550 &(tcp->tcp_cnt_notsack_list));
540 551 }
541 552 tcp->tcp_snxt = snxt + tcp->tcp_fin_sent;
542 553 tcp->tcp_rack = tcp->tcp_rnxt;
543 554 tcp->tcp_rack_cnt = 0;
544 555 if ((snxt + len) == tcp->tcp_suna) {
545 556 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
546 557 }
547 558 } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) {
548 559 /*
549 560 * Didn't send anything. Make sure the timer is running
550 561 * so that we will probe a zero window.
551 562 */
552 563 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
553 564 }
554 565 /* Note that len is the amount we just sent but with a negative sign */
555 566 tcp->tcp_unsent += len;
556 567 mutex_enter(&tcp->tcp_non_sq_lock);
557 568 if (tcp->tcp_flow_stopped) {
558 569 if (TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
559 570 tcp_clrqfull(tcp);
560 571 }
561 572 } else if (TCP_UNSENT_BYTES(tcp) >= connp->conn_sndbuf) {
562 573 if (!(tcp->tcp_detached))
563 574 tcp_setqfull(tcp);
564 575 }
565 576 mutex_exit(&tcp->tcp_non_sq_lock);
566 577 }
567 578
568 579 /*
569 580 * Initial STREAMS write side put() procedure for sockets. It tries to
570 581 * handle the T_CAPABILITY_REQ which sockfs sends down while setting
571 582 * up the socket without using the squeue. Non T_CAPABILITY_REQ messages
572 583 * are handled by tcp_wput() as usual.
573 584 *
574 585 * All further messages will also be handled by tcp_wput() because we cannot
575 586 * be sure that the above short cut is safe later.
576 587 */
577 588 int
578 589 tcp_wput_sock(queue_t *wq, mblk_t *mp)
579 590 {
580 591 conn_t *connp = Q_TO_CONN(wq);
581 592 tcp_t *tcp = connp->conn_tcp;
582 593 struct T_capability_req *car = (struct T_capability_req *)mp->b_rptr;
583 594
584 595 ASSERT(wq->q_qinfo == &tcp_sock_winit);
585 596 wq->q_qinfo = &tcp_winit;
586 597
587 598 ASSERT(IPCL_IS_TCP(connp));
588 599 ASSERT(TCP_IS_SOCKET(tcp));
589 600
590 601 if (DB_TYPE(mp) == M_PCPROTO &&
591 602 MBLKL(mp) == sizeof (struct T_capability_req) &&
592 603 car->PRIM_type == T_CAPABILITY_REQ) {
593 604 tcp_capability_req(tcp, mp);
594 605 return (0);
595 606 }
596 607
597 608 tcp_wput(wq, mp);
598 609 return (0);
599 610 }
600 611
601 612 /* ARGSUSED */
602 613 int
603 614 tcp_wput_fallback(queue_t *wq, mblk_t *mp)
604 615 {
605 616 #ifdef DEBUG
606 617 cmn_err(CE_CONT, "tcp_wput_fallback: Message during fallback \n");
607 618 #endif
608 619 freemsg(mp);
609 620 return (0);
610 621 }
611 622
612 623 /*
613 624 * Call by tcp_wput() to handle misc non M_DATA messages.
614 625 */
615 626 /* ARGSUSED */
616 627 static void
617 628 tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
618 629 {
619 630 conn_t *connp = (conn_t *)arg;
620 631 tcp_t *tcp = connp->conn_tcp;
621 632
622 633 ASSERT(DB_TYPE(mp) != M_IOCTL);
623 634 /*
624 635 * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close.
625 636 * Once the close starts, streamhead and sockfs will not let any data
626 637 * packets come down (close ensures that there are no threads using the
627 638 * queue and no new threads will come down) but since qprocsoff()
628 639 * hasn't happened yet, a M_FLUSH or some non data message might
629 640 * get reflected back (in response to our own FLUSHRW) and get
630 641 * processed after tcp_close() is done. The conn would still be valid
631 642 * because a ref would have added but we need to check the state
632 643 * before actually processing the packet.
633 644 */
634 645 if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) {
635 646 freemsg(mp);
636 647 return;
637 648 }
638 649
639 650 switch (DB_TYPE(mp)) {
640 651 case M_IOCDATA:
641 652 tcp_wput_iocdata(tcp, mp);
642 653 break;
643 654 case M_FLUSH:
644 655 tcp_wput_flush(tcp, mp);
645 656 break;
646 657 default:
647 658 ip_wput_nondata(connp->conn_wq, mp);
648 659 break;
649 660 }
650 661 }
651 662
652 663 /* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */
653 664 static void
654 665 tcp_wput_flush(tcp_t *tcp, mblk_t *mp)
655 666 {
656 667 uchar_t fval = *mp->b_rptr;
657 668 mblk_t *tail;
658 669 conn_t *connp = tcp->tcp_connp;
659 670 queue_t *q = connp->conn_wq;
660 671
661 672 /* TODO: How should flush interact with urgent data? */
662 673 if ((fval & FLUSHW) && tcp->tcp_xmit_head != NULL &&
663 674 !(tcp->tcp_valid_bits & TCP_URG_VALID)) {
664 675 /*
665 676 * Flush only data that has not yet been put on the wire. If
666 677 * we flush data that we have already transmitted, life, as we
667 678 * know it, may come to an end.
668 679 */
669 680 tail = tcp->tcp_xmit_tail;
670 681 tail->b_wptr -= tcp->tcp_xmit_tail_unsent;
671 682 tcp->tcp_xmit_tail_unsent = 0;
672 683 tcp->tcp_unsent = 0;
673 684 if (tail->b_wptr != tail->b_rptr)
674 685 tail = tail->b_cont;
675 686 if (tail) {
676 687 mblk_t **excess = &tcp->tcp_xmit_head;
677 688 for (;;) {
678 689 mblk_t *mp1 = *excess;
679 690 if (mp1 == tail)
680 691 break;
681 692 tcp->tcp_xmit_tail = mp1;
682 693 tcp->tcp_xmit_last = mp1;
683 694 excess = &mp1->b_cont;
684 695 }
685 696 *excess = NULL;
686 697 tcp_close_mpp(&tail);
687 698 if (tcp->tcp_snd_zcopy_aware)
688 699 tcp_zcopy_notify(tcp);
689 700 }
690 701 /*
691 702 * We have no unsent data, so unsent must be less than
692 703 * conn_sndlowat, so re-enable flow.
693 704 */
694 705 mutex_enter(&tcp->tcp_non_sq_lock);
695 706 if (tcp->tcp_flow_stopped) {
696 707 tcp_clrqfull(tcp);
697 708 }
698 709 mutex_exit(&tcp->tcp_non_sq_lock);
699 710 }
700 711 /*
701 712 * TODO: you can't just flush these, you have to increase rwnd for one
702 713 * thing. For another, how should urgent data interact?
703 714 */
704 715 if (fval & FLUSHR) {
705 716 *mp->b_rptr = fval & ~FLUSHW;
706 717 /* XXX */
707 718 qreply(q, mp);
708 719 return;
709 720 }
710 721 freemsg(mp);
711 722 }
712 723
713 724 /*
714 725 * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA
715 726 * messages.
716 727 */
717 728 static void
718 729 tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp)
719 730 {
720 731 mblk_t *mp1;
721 732 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
722 733 STRUCT_HANDLE(strbuf, sb);
723 734 uint_t addrlen;
724 735 conn_t *connp = tcp->tcp_connp;
725 736 queue_t *q = connp->conn_wq;
726 737
727 738 /* Make sure it is one of ours. */
728 739 switch (iocp->ioc_cmd) {
729 740 case TI_GETMYNAME:
730 741 case TI_GETPEERNAME:
731 742 break;
732 743 default:
733 744 /*
734 745 * If the conn is closing, then error the ioctl here. Otherwise
735 746 * use the CONN_IOCTLREF_* macros to hold off tcp_close until
736 747 * we're done here.
737 748 */
738 749 mutex_enter(&connp->conn_lock);
739 750 if (connp->conn_state_flags & CONN_CLOSING) {
740 751 mutex_exit(&connp->conn_lock);
741 752 iocp->ioc_error = EINVAL;
742 753 mp->b_datap->db_type = M_IOCNAK;
743 754 iocp->ioc_count = 0;
744 755 qreply(q, mp);
745 756 return;
746 757 }
747 758
748 759 CONN_INC_IOCTLREF_LOCKED(connp);
749 760 ip_wput_nondata(q, mp);
750 761 CONN_DEC_IOCTLREF(connp);
751 762 return;
752 763 }
753 764 switch (mi_copy_state(q, mp, &mp1)) {
754 765 case -1:
755 766 return;
756 767 case MI_COPY_CASE(MI_COPY_IN, 1):
757 768 break;
758 769 case MI_COPY_CASE(MI_COPY_OUT, 1):
759 770 /* Copy out the strbuf. */
760 771 mi_copyout(q, mp);
761 772 return;
762 773 case MI_COPY_CASE(MI_COPY_OUT, 2):
763 774 /* All done. */
764 775 mi_copy_done(q, mp, 0);
765 776 return;
766 777 default:
767 778 mi_copy_done(q, mp, EPROTO);
768 779 return;
769 780 }
770 781 /* Check alignment of the strbuf */
771 782 if (!OK_32PTR(mp1->b_rptr)) {
772 783 mi_copy_done(q, mp, EINVAL);
773 784 return;
774 785 }
775 786
776 787 STRUCT_SET_HANDLE(sb, iocp->ioc_flag, (void *)mp1->b_rptr);
777 788
778 789 if (connp->conn_family == AF_INET)
779 790 addrlen = sizeof (sin_t);
780 791 else
781 792 addrlen = sizeof (sin6_t);
782 793
783 794 if (STRUCT_FGET(sb, maxlen) < addrlen) {
784 795 mi_copy_done(q, mp, EINVAL);
785 796 return;
786 797 }
787 798
788 799 switch (iocp->ioc_cmd) {
789 800 case TI_GETMYNAME:
790 801 break;
791 802 case TI_GETPEERNAME:
792 803 if (tcp->tcp_state < TCPS_SYN_RCVD) {
793 804 mi_copy_done(q, mp, ENOTCONN);
794 805 return;
795 806 }
796 807 break;
797 808 }
798 809 mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE);
799 810 if (!mp1)
800 811 return;
801 812
802 813 STRUCT_FSET(sb, len, addrlen);
803 814 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
804 815 case TI_GETMYNAME:
805 816 (void) conn_getsockname(connp, (struct sockaddr *)mp1->b_wptr,
806 817 &addrlen);
807 818 break;
808 819 case TI_GETPEERNAME:
809 820 (void) conn_getpeername(connp, (struct sockaddr *)mp1->b_wptr,
810 821 &addrlen);
811 822 break;
812 823 }
813 824 mp1->b_wptr += addrlen;
814 825 /* Copy out the address */
815 826 mi_copyout(q, mp);
816 827 }
817 828
818 829 /*
819 830 * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL
820 831 * messages.
821 832 */
822 833 /* ARGSUSED */
823 834 static void
824 835 tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
825 836 {
826 837 conn_t *connp = (conn_t *)arg;
827 838 tcp_t *tcp = connp->conn_tcp;
828 839 queue_t *q = connp->conn_wq;
829 840 struct iocblk *iocp;
830 841
831 842 ASSERT(DB_TYPE(mp) == M_IOCTL);
832 843 /*
833 844 * Try and ASSERT the minimum possible references on the
834 845 * conn early enough. Since we are executing on write side,
835 846 * the connection is obviously not detached and that means
836 847 * there is a ref each for TCP and IP. Since we are behind
837 848 * the squeue, the minimum references needed are 3. If the
838 849 * conn is in classifier hash list, there should be an
839 850 * extra ref for that (we check both the possibilities).
840 851 */
841 852 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
842 853 (connp->conn_fanout == NULL && connp->conn_ref >= 3));
843 854
844 855 iocp = (struct iocblk *)mp->b_rptr;
845 856 switch (iocp->ioc_cmd) {
846 857 case _SIOCSOCKFALLBACK:
847 858 /*
848 859 * Either sockmod is about to be popped and the socket
849 860 * would now be treated as a plain stream, or a module
850 861 * is about to be pushed so we could no longer use read-
851 862 * side synchronous streams for fused loopback tcp.
852 863 * Drain any queued data and disable direct sockfs
853 864 * interface from now on.
854 865 */
855 866 if (!tcp->tcp_issocket) {
856 867 DB_TYPE(mp) = M_IOCNAK;
857 868 iocp->ioc_error = EINVAL;
858 869 } else {
859 870 tcp_use_pure_tpi(tcp);
860 871 DB_TYPE(mp) = M_IOCACK;
861 872 iocp->ioc_error = 0;
862 873 }
863 874 iocp->ioc_count = 0;
864 875 iocp->ioc_rval = 0;
865 876 qreply(q, mp);
866 877 return;
867 878 }
868 879
869 880 /*
870 881 * If the conn is closing, then error the ioctl here. Otherwise bump the
871 882 * conn_ioctlref to hold off tcp_close until we're done here.
872 883 */
873 884 mutex_enter(&(connp)->conn_lock);
874 885 if ((connp)->conn_state_flags & CONN_CLOSING) {
875 886 mutex_exit(&(connp)->conn_lock);
876 887 iocp->ioc_error = EINVAL;
877 888 mp->b_datap->db_type = M_IOCNAK;
878 889 iocp->ioc_count = 0;
879 890 qreply(q, mp);
880 891 return;
881 892 }
882 893
883 894 CONN_INC_IOCTLREF_LOCKED(connp);
884 895 ip_wput_nondata(q, mp);
885 896 CONN_DEC_IOCTLREF(connp);
886 897 }
887 898
888 899 /*
889 900 * This routine is called by tcp_wput() to handle all TPI requests.
890 901 */
891 902 /* ARGSUSED */
892 903 static void
893 904 tcp_wput_proto(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
894 905 {
895 906 conn_t *connp = (conn_t *)arg;
896 907 tcp_t *tcp = connp->conn_tcp;
897 908 union T_primitives *tprim = (union T_primitives *)mp->b_rptr;
898 909 uchar_t *rptr;
899 910 t_scalar_t type;
900 911 cred_t *cr;
901 912
902 913 /*
903 914 * Try and ASSERT the minimum possible references on the
904 915 * conn early enough. Since we are executing on write side,
905 916 * the connection is obviously not detached and that means
906 917 * there is a ref each for TCP and IP. Since we are behind
907 918 * the squeue, the minimum references needed are 3. If the
908 919 * conn is in classifier hash list, there should be an
909 920 * extra ref for that (we check both the possibilities).
910 921 */
911 922 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
912 923 (connp->conn_fanout == NULL && connp->conn_ref >= 3));
913 924
914 925 rptr = mp->b_rptr;
915 926 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX);
916 927 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
917 928 type = ((union T_primitives *)rptr)->type;
918 929 if (type == T_EXDATA_REQ) {
919 930 tcp_output_urgent(connp, mp, arg2, NULL);
920 931 } else if (type != T_DATA_REQ) {
921 932 goto non_urgent_data;
922 933 } else {
923 934 /* TODO: options, flags, ... from user */
924 935 /* Set length to zero for reclamation below */
925 936 tcp_wput_data(tcp, mp->b_cont, B_TRUE);
926 937 freeb(mp);
927 938 }
928 939 return;
929 940 } else {
930 941 if (connp->conn_debug) {
931 942 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
932 943 "tcp_wput_proto, dropping one...");
933 944 }
934 945 freemsg(mp);
935 946 return;
936 947 }
937 948
938 949 non_urgent_data:
939 950
940 951 switch ((int)tprim->type) {
941 952 case O_T_BIND_REQ: /* bind request */
942 953 case T_BIND_REQ: /* new semantics bind request */
943 954 tcp_tpi_bind(tcp, mp);
944 955 break;
945 956 case T_UNBIND_REQ: /* unbind request */
946 957 tcp_tpi_unbind(tcp, mp);
947 958 break;
948 959 case O_T_CONN_RES: /* old connection response XXX */
949 960 case T_CONN_RES: /* connection response */
950 961 tcp_tli_accept(tcp, mp);
951 962 break;
952 963 case T_CONN_REQ: /* connection request */
953 964 tcp_tpi_connect(tcp, mp);
954 965 break;
955 966 case T_DISCON_REQ: /* disconnect request */
956 967 tcp_disconnect(tcp, mp);
957 968 break;
958 969 case T_CAPABILITY_REQ:
959 970 tcp_capability_req(tcp, mp); /* capability request */
960 971 break;
961 972 case T_INFO_REQ: /* information request */
962 973 tcp_info_req(tcp, mp);
963 974 break;
964 975 case T_SVR4_OPTMGMT_REQ: /* manage options req */
965 976 case T_OPTMGMT_REQ:
966 977 /*
967 978 * Note: no support for snmpcom_req() through new
968 979 * T_OPTMGMT_REQ. See comments in ip.c
969 980 */
970 981
971 982 /*
972 983 * All Solaris components should pass a db_credp
973 984 * for this TPI message, hence we ASSERT.
974 985 * But in case there is some other M_PROTO that looks
975 986 * like a TPI message sent by some other kernel
976 987 * component, we check and return an error.
977 988 */
978 989 cr = msg_getcred(mp, NULL);
979 990 ASSERT(cr != NULL);
980 991 if (cr == NULL) {
981 992 tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
982 993 return;
983 994 }
984 995 /*
985 996 * If EINPROGRESS is returned, the request has been queued
986 997 * for subsequent processing by ip_restart_optmgmt(), which
987 998 * will do the CONN_DEC_REF().
988 999 */
989 1000 if ((int)tprim->type == T_SVR4_OPTMGMT_REQ) {
990 1001 svr4_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
991 1002 } else {
992 1003 tpi_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
993 1004 }
994 1005 break;
995 1006
996 1007 case T_UNITDATA_REQ: /* unitdata request */
997 1008 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
998 1009 break;
999 1010 case T_ORDREL_REQ: /* orderly release req */
1000 1011 freemsg(mp);
1001 1012
1002 1013 if (tcp->tcp_fused)
1003 1014 tcp_unfuse(tcp);
1004 1015
1005 1016 if (tcp_xmit_end(tcp) != 0) {
1006 1017 /*
1007 1018 * We were crossing FINs and got a reset from
1008 1019 * the other side. Just ignore it.
1009 1020 */
1010 1021 if (connp->conn_debug) {
1011 1022 (void) strlog(TCP_MOD_ID, 0, 1,
1012 1023 SL_ERROR|SL_TRACE,
1013 1024 "tcp_wput_proto, T_ORDREL_REQ out of "
1014 1025 "state %s",
1015 1026 tcp_display(tcp, NULL,
1016 1027 DISP_ADDR_AND_PORT));
1017 1028 }
1018 1029 }
1019 1030 break;
1020 1031 case T_ADDR_REQ:
1021 1032 tcp_addr_req(tcp, mp);
1022 1033 break;
1023 1034 default:
1024 1035 if (connp->conn_debug) {
1025 1036 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
1026 1037 "tcp_wput_proto, bogus TPI msg, type %d",
1027 1038 tprim->type);
1028 1039 }
1029 1040 /*
1030 1041 * We used to M_ERROR. Sending TNOTSUPPORT gives the user
1031 1042 * to recover.
1032 1043 */
1033 1044 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
1034 1045 break;
1035 1046 }
1036 1047 }
1037 1048
1038 1049 /*
1039 1050 * Handle special out-of-band ioctl requests (see PSARC/2008/265).
1040 1051 */
1041 1052 static void
1042 1053 tcp_wput_cmdblk(queue_t *q, mblk_t *mp)
1043 1054 {
1044 1055 void *data;
1045 1056 mblk_t *datamp = mp->b_cont;
1046 1057 conn_t *connp = Q_TO_CONN(q);
1047 1058 tcp_t *tcp = connp->conn_tcp;
1048 1059 cmdblk_t *cmdp = (cmdblk_t *)mp->b_rptr;
1049 1060
1050 1061 if (datamp == NULL || MBLKL(datamp) < cmdp->cb_len) {
1051 1062 cmdp->cb_error = EPROTO;
1052 1063 qreply(q, mp);
1053 1064 return;
1054 1065 }
1055 1066
1056 1067 data = datamp->b_rptr;
1057 1068
1058 1069 switch (cmdp->cb_cmd) {
1059 1070 case TI_GETPEERNAME:
1060 1071 if (tcp->tcp_state < TCPS_SYN_RCVD)
1061 1072 cmdp->cb_error = ENOTCONN;
1062 1073 else
1063 1074 cmdp->cb_error = conn_getpeername(connp, data,
1064 1075 &cmdp->cb_len);
1065 1076 break;
1066 1077 case TI_GETMYNAME:
1067 1078 cmdp->cb_error = conn_getsockname(connp, data, &cmdp->cb_len);
1068 1079 break;
1069 1080 default:
1070 1081 cmdp->cb_error = EINVAL;
1071 1082 break;
1072 1083 }
1073 1084
1074 1085 qreply(q, mp);
1075 1086 }
1076 1087
1077 1088 /*
1078 1089 * The TCP fast path write put procedure.
1079 1090 * NOTE: the logic of the fast path is duplicated from tcp_wput_data()
1080 1091 */
1081 1092 /* ARGSUSED */
1082 1093 void
1083 1094 tcp_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1084 1095 {
1085 1096 int len;
1086 1097 int hdrlen;
1087 1098 int plen;
1088 1099 mblk_t *mp1;
1089 1100 uchar_t *rptr;
1090 1101 uint32_t snxt;
1091 1102 tcpha_t *tcpha;
1092 1103 struct datab *db;
1093 1104 uint32_t suna;
1094 1105 uint32_t mss;
1095 1106 ipaddr_t *dst;
1096 1107 ipaddr_t *src;
1097 1108 uint32_t sum;
1098 1109 int usable;
1099 1110 conn_t *connp = (conn_t *)arg;
1100 1111 tcp_t *tcp = connp->conn_tcp;
1101 1112 uint32_t msize;
1102 1113 tcp_stack_t *tcps = tcp->tcp_tcps;
1103 1114 ip_xmit_attr_t *ixa;
1104 1115 clock_t now;
1105 1116
1106 1117 /*
1107 1118 * Try and ASSERT the minimum possible references on the
1108 1119 * conn early enough. Since we are executing on write side,
1109 1120 * the connection is obviously not detached and that means
1110 1121 * there is a ref each for TCP and IP. Since we are behind
1111 1122 * the squeue, the minimum references needed are 3. If the
1112 1123 * conn is in classifier hash list, there should be an
1113 1124 * extra ref for that (we check both the possibilities).
1114 1125 */
1115 1126 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
1116 1127 (connp->conn_fanout == NULL && connp->conn_ref >= 3));
1117 1128
1118 1129 ASSERT(DB_TYPE(mp) == M_DATA);
1119 1130 msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
1120 1131
1121 1132 mutex_enter(&tcp->tcp_non_sq_lock);
1122 1133 tcp->tcp_squeue_bytes -= msize;
1123 1134 mutex_exit(&tcp->tcp_non_sq_lock);
1124 1135
1125 1136 /* Bypass tcp protocol for fused tcp loopback */
1126 1137 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
1127 1138 return;
1128 1139
1129 1140 mss = tcp->tcp_mss;
1130 1141 /*
1131 1142 * If ZEROCOPY has turned off, try not to send any zero-copy message
1132 1143 * down. Do backoff, now.
1133 1144 */
1134 1145 if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on)
1135 1146 mp = tcp_zcopy_backoff(tcp, mp, B_FALSE);
1136 1147
1137 1148
1138 1149 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
1139 1150 len = (int)(mp->b_wptr - mp->b_rptr);
1140 1151
1141 1152 /*
1142 1153 * Criteria for fast path:
1143 1154 *
1144 1155 * 1. no unsent data
1145 1156 * 2. single mblk in request
1146 1157 * 3. connection established
1147 1158 * 4. data in mblk
1148 1159 * 5. len <= mss
1149 1160 * 6. no tcp_valid bits
1150 1161 */
1151 1162 if ((tcp->tcp_unsent != 0) ||
1152 1163 (tcp->tcp_cork) ||
1153 1164 (mp->b_cont != NULL) ||
1154 1165 (tcp->tcp_state != TCPS_ESTABLISHED) ||
1155 1166 (len == 0) ||
1156 1167 (len > mss) ||
1157 1168 (tcp->tcp_valid_bits != 0)) {
1158 1169 tcp_wput_data(tcp, mp, B_FALSE);
1159 1170 return;
1160 1171 }
1161 1172
1162 1173 ASSERT(tcp->tcp_xmit_tail_unsent == 0);
1163 1174 ASSERT(tcp->tcp_fin_sent == 0);
1164 1175
1165 1176 /* queue new packet onto retransmission queue */
1166 1177 if (tcp->tcp_xmit_head == NULL) {
1167 1178 tcp->tcp_xmit_head = mp;
1168 1179 } else {
1169 1180 tcp->tcp_xmit_last->b_cont = mp;
1170 1181 }
1171 1182 tcp->tcp_xmit_last = mp;
1172 1183 tcp->tcp_xmit_tail = mp;
1173 1184
1174 1185 /* find out how much we can send */
1175 1186 /* BEGIN CSTYLED */
1176 1187 /*
1177 1188 * un-acked usable
1178 1189 * |--------------|-----------------|
1179 1190 * tcp_suna tcp_snxt tcp_suna+tcp_swnd
1180 1191 */
1181 1192 /* END CSTYLED */
1182 1193
1183 1194 /* start sending from tcp_snxt */
1184 1195 snxt = tcp->tcp_snxt;
1185 1196
1186 1197 /*
1187 1198 * Check to see if this connection has been idle for some time and no
↓ open down ↓ |
800 lines elided |
↑ open up ↑ |
1188 1199 * ACK is expected. If so, then the congestion window size is no longer
1189 1200 * meaningfully tied to current network conditions.
1190 1201 *
1191 1202 * We reinitialize tcp_cwnd, and slow start again to get back the
1192 1203 * connection's "self-clock" as described in Van Jacobson's 1988 paper
1193 1204 * "Congestion avoidance and control".
1194 1205 */
1195 1206 now = LBOLT_FASTPATH;
1196 1207 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
1197 1208 (TICK_TO_MSEC(now - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
1198 - TCP_SET_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
1209 + cc_after_idle(tcp);
1199 1210 }
1200 1211
1201 1212 usable = tcp->tcp_swnd; /* tcp window size */
1202 1213 if (usable > tcp->tcp_cwnd)
1203 1214 usable = tcp->tcp_cwnd; /* congestion window smaller */
1204 1215 usable -= snxt; /* subtract stuff already sent */
1205 1216 suna = tcp->tcp_suna;
1206 1217 usable += suna;
1207 1218 /* usable can be < 0 if the congestion window is smaller */
1208 1219 if (len > usable) {
1209 1220 /* Can't send complete M_DATA in one shot */
1210 1221 goto slow;
1211 1222 }
1212 1223
1213 1224 mutex_enter(&tcp->tcp_non_sq_lock);
1214 1225 if (tcp->tcp_flow_stopped &&
1215 1226 TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
1216 1227 tcp_clrqfull(tcp);
1217 1228 }
1218 1229 mutex_exit(&tcp->tcp_non_sq_lock);
1219 1230
1220 1231 /*
1221 1232 * determine if anything to send (Nagle).
1222 1233 *
1223 1234 * 1. len < tcp_mss (i.e. small)
1224 1235 * 2. unacknowledged data present
1225 1236 * 3. len < nagle limit
1226 1237 * 4. last packet sent < nagle limit (previous packet sent)
1227 1238 */
1228 1239 if ((len < mss) && (snxt != suna) &&
1229 1240 (len < (int)tcp->tcp_naglim) &&
1230 1241 (tcp->tcp_last_sent_len < tcp->tcp_naglim)) {
1231 1242 /*
1232 1243 * This was the first unsent packet and normally
1233 1244 * mss < xmit_hiwater so there is no need to worry
1234 1245 * about flow control. The next packet will go
1235 1246 * through the flow control check in tcp_wput_data().
1236 1247 */
1237 1248 /* leftover work from above */
1238 1249 tcp->tcp_unsent = len;
1239 1250 tcp->tcp_xmit_tail_unsent = len;
1240 1251
1241 1252 return;
1242 1253 }
1243 1254
1244 1255 /*
1245 1256 * len <= tcp->tcp_mss && len == unsent so no sender silly window. Can
1246 1257 * send now.
1247 1258 */
1248 1259
1249 1260 if (snxt == suna) {
1250 1261 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
1251 1262 }
1252 1263
1253 1264 /* we have always sent something */
1254 1265 tcp->tcp_rack_cnt = 0;
1255 1266
1256 1267 tcp->tcp_snxt = snxt + len;
1257 1268 tcp->tcp_rack = tcp->tcp_rnxt;
1258 1269
1259 1270 if ((mp1 = dupb(mp)) == 0)
1260 1271 goto no_memory;
1261 1272 mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
1262 1273 mp->b_next = (mblk_t *)(uintptr_t)snxt;
1263 1274
1264 1275 /* adjust tcp header information */
1265 1276 tcpha = tcp->tcp_tcpha;
1266 1277 tcpha->tha_flags = (TH_ACK|TH_PUSH);
1267 1278
1268 1279 sum = len + connp->conn_ht_ulp_len + connp->conn_sum;
1269 1280 sum = (sum >> 16) + (sum & 0xFFFF);
1270 1281 tcpha->tha_sum = htons(sum);
1271 1282
1272 1283 tcpha->tha_seq = htonl(snxt);
1273 1284
1274 1285 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1275 1286 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1276 1287 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
1277 1288 tcp->tcp_cs.tcp_out_data_segs++;
1278 1289 tcp->tcp_cs.tcp_out_data_bytes += len;
1279 1290
1280 1291 /* Update the latest receive window size in TCP header. */
1281 1292 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
1282 1293
1283 1294 tcp->tcp_last_sent_len = (ushort_t)len;
1284 1295
1285 1296 plen = len + connp->conn_ht_iphc_len;
1286 1297
1287 1298 ixa = connp->conn_ixa;
1288 1299 ixa->ixa_pktlen = plen;
1289 1300
1290 1301 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1291 1302 tcp->tcp_ipha->ipha_length = htons(plen);
1292 1303 } else {
1293 1304 tcp->tcp_ip6h->ip6_plen = htons(plen - IPV6_HDR_LEN);
1294 1305 }
1295 1306
1296 1307 /* see if we need to allocate a mblk for the headers */
1297 1308 hdrlen = connp->conn_ht_iphc_len;
1298 1309 rptr = mp1->b_rptr - hdrlen;
1299 1310 db = mp1->b_datap;
1300 1311 if ((db->db_ref != 2) || rptr < db->db_base ||
1301 1312 (!OK_32PTR(rptr))) {
1302 1313 /* NOTE: we assume allocb returns an OK_32PTR */
1303 1314 mp = allocb(hdrlen + tcps->tcps_wroff_xtra, BPRI_MED);
1304 1315 if (!mp) {
1305 1316 freemsg(mp1);
1306 1317 goto no_memory;
1307 1318 }
1308 1319 mp->b_cont = mp1;
1309 1320 mp1 = mp;
1310 1321 /* Leave room for Link Level header */
1311 1322 rptr = &mp1->b_rptr[tcps->tcps_wroff_xtra];
1312 1323 mp1->b_wptr = &rptr[hdrlen];
1313 1324 }
1314 1325 mp1->b_rptr = rptr;
1315 1326
1316 1327 /* Fill in the timestamp option. */
1317 1328 if (tcp->tcp_snd_ts_ok) {
1318 1329 U32_TO_BE32(now,
1319 1330 (char *)tcpha + TCP_MIN_HEADER_LENGTH + 4);
1320 1331 U32_TO_BE32(tcp->tcp_ts_recent,
1321 1332 (char *)tcpha + TCP_MIN_HEADER_LENGTH + 8);
1322 1333 } else {
1323 1334 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
1324 1335 }
1325 1336
1326 1337 /* copy header into outgoing packet */
1327 1338 dst = (ipaddr_t *)rptr;
1328 1339 src = (ipaddr_t *)connp->conn_ht_iphc;
1329 1340 dst[0] = src[0];
1330 1341 dst[1] = src[1];
1331 1342 dst[2] = src[2];
1332 1343 dst[3] = src[3];
1333 1344 dst[4] = src[4];
1334 1345 dst[5] = src[5];
1335 1346 dst[6] = src[6];
1336 1347 dst[7] = src[7];
1337 1348 dst[8] = src[8];
1338 1349 dst[9] = src[9];
1339 1350 if (hdrlen -= 40) {
1340 1351 hdrlen >>= 2;
1341 1352 dst += 10;
1342 1353 src += 10;
1343 1354 do {
1344 1355 *dst++ = *src++;
1345 1356 } while (--hdrlen);
1346 1357 }
1347 1358
1348 1359 /*
1349 1360 * Set the ECN info in the TCP header. Note that this
1350 1361 * is not the template header.
1351 1362 */
1352 1363 if (tcp->tcp_ecn_ok) {
1353 1364 TCP_SET_ECT(tcp, rptr);
1354 1365
1355 1366 tcpha = (tcpha_t *)(rptr + ixa->ixa_ip_hdr_length);
1356 1367 if (tcp->tcp_ecn_echo_on)
1357 1368 tcpha->tha_flags |= TH_ECE;
1358 1369 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
1359 1370 tcpha->tha_flags |= TH_CWR;
1360 1371 tcp->tcp_ecn_cwr_sent = B_TRUE;
1361 1372 }
1362 1373 }
1363 1374
1364 1375 if (tcp->tcp_ip_forward_progress) {
1365 1376 tcp->tcp_ip_forward_progress = B_FALSE;
1366 1377 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
1367 1378 } else {
1368 1379 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
1369 1380 }
1370 1381 tcp_send_data(tcp, mp1);
1371 1382 return;
1372 1383
1373 1384 /*
1374 1385 * If we ran out of memory, we pretend to have sent the packet
1375 1386 * and that it was lost on the wire.
1376 1387 */
1377 1388 no_memory:
1378 1389 return;
1379 1390
1380 1391 slow:
1381 1392 /* leftover work from above */
1382 1393 tcp->tcp_unsent = len;
1383 1394 tcp->tcp_xmit_tail_unsent = len;
1384 1395 tcp_wput_data(tcp, NULL, B_FALSE);
1385 1396 }
1386 1397
1387 1398 /* ARGSUSED2 */
1388 1399 void
1389 1400 tcp_output_urgent(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1390 1401 {
1391 1402 int len;
1392 1403 uint32_t msize;
1393 1404 conn_t *connp = (conn_t *)arg;
1394 1405 tcp_t *tcp = connp->conn_tcp;
1395 1406
1396 1407 msize = msgdsize(mp);
1397 1408
1398 1409 len = msize - 1;
1399 1410 if (len < 0) {
1400 1411 freemsg(mp);
1401 1412 return;
1402 1413 }
1403 1414
1404 1415 /*
1405 1416 * Try to force urgent data out on the wire. Even if we have unsent
1406 1417 * data this will at least send the urgent flag.
1407 1418 * XXX does not handle more flag correctly.
1408 1419 */
1409 1420 len += tcp->tcp_unsent;
1410 1421 len += tcp->tcp_snxt;
1411 1422 tcp->tcp_urg = len;
1412 1423 tcp->tcp_valid_bits |= TCP_URG_VALID;
1413 1424
1414 1425 /* Bypass tcp protocol for fused tcp loopback */
1415 1426 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
1416 1427 return;
1417 1428
1418 1429 /* Strip off the T_EXDATA_REQ if the data is from TPI */
1419 1430 if (DB_TYPE(mp) != M_DATA) {
1420 1431 mblk_t *mp1 = mp;
1421 1432 ASSERT(!IPCL_IS_NONSTR(connp));
1422 1433 mp = mp->b_cont;
1423 1434 freeb(mp1);
1424 1435 }
1425 1436 tcp_wput_data(tcp, mp, B_TRUE);
1426 1437 }
1427 1438
1428 1439 /*
1429 1440 * Called by streams close routine via squeues when our client blows off its
1430 1441 * descriptor, we take this to mean: "close the stream state NOW, close the tcp
1431 1442 * connection politely" When SO_LINGER is set (with a non-zero linger time and
1432 1443 * it is not a nonblocking socket) then this routine sleeps until the FIN is
1433 1444 * acked.
1434 1445 *
1435 1446 * NOTE: tcp_close potentially returns error when lingering.
1436 1447 * However, the stream head currently does not pass these errors
1437 1448 * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK
1438 1449 * errors to the application (from tsleep()) and not errors
1439 1450 * like ECONNRESET caused by receiving a reset packet.
1440 1451 */
1441 1452
1442 1453 /* ARGSUSED */
1443 1454 void
1444 1455 tcp_close_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1445 1456 {
1446 1457 char *msg;
1447 1458 conn_t *connp = (conn_t *)arg;
1448 1459 tcp_t *tcp = connp->conn_tcp;
1449 1460 clock_t delta = 0;
1450 1461 tcp_stack_t *tcps = tcp->tcp_tcps;
1451 1462
1452 1463 /*
1453 1464 * When a non-STREAMS socket is being closed, it does not always
1454 1465 * stick around waiting for tcp_close_output to run and can therefore
1455 1466 * have dropped a reference already. So adjust the asserts accordingly.
1456 1467 */
1457 1468 ASSERT((connp->conn_fanout != NULL &&
1458 1469 connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 3 : 4)) ||
1459 1470 (connp->conn_fanout == NULL &&
1460 1471 connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 2 : 3)));
1461 1472
1462 1473 mutex_enter(&tcp->tcp_eager_lock);
1463 1474 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
1464 1475 /*
1465 1476 * Cleanup for listener. For non-STREAM sockets sockfs will
1466 1477 * close all the eagers on 'q', so in that case only deal
1467 1478 * with 'q0'.
1468 1479 */
1469 1480 tcp_eager_cleanup(tcp, IPCL_IS_NONSTR(connp) ? 1 : 0);
1470 1481 tcp->tcp_wait_for_eagers = 1;
1471 1482 }
1472 1483 mutex_exit(&tcp->tcp_eager_lock);
1473 1484
1474 1485 tcp->tcp_lso = B_FALSE;
1475 1486
1476 1487 msg = NULL;
1477 1488 switch (tcp->tcp_state) {
1478 1489 case TCPS_CLOSED:
1479 1490 case TCPS_IDLE:
1480 1491 break;
1481 1492 case TCPS_BOUND:
1482 1493 if (tcp->tcp_listener != NULL) {
1483 1494 ASSERT(IPCL_IS_NONSTR(connp));
1484 1495 /*
1485 1496 * Unlink from the listener and drop the reference
1486 1497 * put on it by the eager. tcp_closei_local will not
1487 1498 * do it because tcp_tconnind_started is TRUE.
1488 1499 */
1489 1500 mutex_enter(&tcp->tcp_saved_listener->tcp_eager_lock);
1490 1501 tcp_eager_unlink(tcp);
1491 1502 mutex_exit(&tcp->tcp_saved_listener->tcp_eager_lock);
1492 1503 CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp);
1493 1504 }
1494 1505 break;
1495 1506 case TCPS_LISTEN:
1496 1507 break;
1497 1508 case TCPS_SYN_SENT:
1498 1509 msg = "tcp_close, during connect";
1499 1510 break;
1500 1511 case TCPS_SYN_RCVD:
1501 1512 /*
1502 1513 * Close during the connect 3-way handshake
1503 1514 * but here there may or may not be pending data
1504 1515 * already on queue. Process almost same as in
1505 1516 * the ESTABLISHED state.
1506 1517 */
1507 1518 /* FALLTHRU */
1508 1519 default:
1509 1520 if (tcp->tcp_fused)
1510 1521 tcp_unfuse(tcp);
1511 1522
1512 1523 /*
1513 1524 * If SO_LINGER has set a zero linger time, abort the
1514 1525 * connection with a reset.
1515 1526 */
1516 1527 if (connp->conn_linger && connp->conn_lingertime == 0) {
1517 1528 msg = "tcp_close, zero lingertime";
1518 1529 break;
1519 1530 }
1520 1531
1521 1532 /*
1522 1533 * Abort connection if there is unread data queued.
1523 1534 */
1524 1535 if (tcp->tcp_rcv_list || tcp->tcp_reass_head) {
1525 1536 msg = "tcp_close, unread data";
1526 1537 break;
1527 1538 }
1528 1539
1529 1540 /*
1530 1541 * Abort connection if it is being closed without first
1531 1542 * being accepted. This can happen if a listening non-STREAM
1532 1543 * socket wants to get rid of the socket, for example, if the
1533 1544 * listener is closing.
1534 1545 */
1535 1546 if (tcp->tcp_listener != NULL) {
1536 1547 ASSERT(IPCL_IS_NONSTR(connp));
1537 1548 msg = "tcp_close, close before accept";
1538 1549
1539 1550 /*
1540 1551 * Unlink from the listener and drop the reference
1541 1552 * put on it by the eager. tcp_closei_local will not
1542 1553 * do it because tcp_tconnind_started is TRUE.
1543 1554 */
1544 1555 mutex_enter(&tcp->tcp_saved_listener->tcp_eager_lock);
1545 1556 tcp_eager_unlink(tcp);
1546 1557 mutex_exit(&tcp->tcp_saved_listener->tcp_eager_lock);
1547 1558 CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp);
1548 1559 break;
1549 1560 }
1550 1561
1551 1562 /*
1552 1563 * Transmit the FIN before detaching the tcp_t.
1553 1564 * After tcp_detach returns this queue/perimeter
1554 1565 * no longer owns the tcp_t thus others can modify it.
1555 1566 */
1556 1567 (void) tcp_xmit_end(tcp);
1557 1568
1558 1569 /*
1559 1570 * If lingering on close then wait until the fin is acked,
1560 1571 * the SO_LINGER time passes, or a reset is sent/received.
1561 1572 */
1562 1573 if (connp->conn_linger && connp->conn_lingertime > 0 &&
1563 1574 !(tcp->tcp_fin_acked) &&
1564 1575 tcp->tcp_state >= TCPS_ESTABLISHED) {
1565 1576 if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) {
1566 1577 tcp->tcp_client_errno = EWOULDBLOCK;
1567 1578 } else if (tcp->tcp_client_errno == 0) {
1568 1579
1569 1580 ASSERT(tcp->tcp_linger_tid == 0);
1570 1581
1571 1582 /* conn_lingertime is in sec. */
1572 1583 tcp->tcp_linger_tid = TCP_TIMER(tcp,
1573 1584 tcp_close_linger_timeout,
1574 1585 connp->conn_lingertime * MILLISEC);
1575 1586
1576 1587 /* tcp_close_linger_timeout will finish close */
1577 1588 if (tcp->tcp_linger_tid == 0)
1578 1589 tcp->tcp_client_errno = ENOSR;
1579 1590 else
1580 1591 return;
1581 1592 }
1582 1593
1583 1594 /*
1584 1595 * Check if we need to detach or just close
1585 1596 * the instance.
1586 1597 */
1587 1598 if (tcp->tcp_state <= TCPS_LISTEN)
1588 1599 break;
1589 1600 }
1590 1601
1591 1602 /*
1592 1603 * Make sure that no other thread will access the conn_rq of
1593 1604 * this instance (through lookups etc.) as conn_rq will go
1594 1605 * away shortly.
1595 1606 */
1596 1607 tcp_acceptor_hash_remove(tcp);
1597 1608
1598 1609 mutex_enter(&tcp->tcp_non_sq_lock);
1599 1610 if (tcp->tcp_flow_stopped) {
1600 1611 tcp_clrqfull(tcp);
1601 1612 }
1602 1613 mutex_exit(&tcp->tcp_non_sq_lock);
1603 1614
1604 1615 if (tcp->tcp_timer_tid != 0) {
1605 1616 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
1606 1617 tcp->tcp_timer_tid = 0;
1607 1618 }
1608 1619 /*
1609 1620 * Need to cancel those timers which will not be used when
1610 1621 * TCP is detached. This has to be done before the conn_wq
1611 1622 * is set to NULL.
1612 1623 */
1613 1624 tcp_timers_stop(tcp);
1614 1625
1615 1626 tcp->tcp_detached = B_TRUE;
1616 1627 if (tcp->tcp_state == TCPS_TIME_WAIT) {
1617 1628 tcp_time_wait_append(tcp);
1618 1629 TCP_DBGSTAT(tcps, tcp_detach_time_wait);
1619 1630 ASSERT(connp->conn_ref >=
1620 1631 (IPCL_IS_NONSTR(connp) ? 2 : 3));
1621 1632 goto finish;
1622 1633 }
1623 1634
1624 1635 /*
1625 1636 * If delta is zero the timer event wasn't executed and was
1626 1637 * successfully canceled. In this case we need to restart it
1627 1638 * with the minimal delta possible.
1628 1639 */
1629 1640 if (delta >= 0)
1630 1641 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
1631 1642 delta ? delta : 1);
1632 1643
1633 1644 ASSERT(connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 2 : 3));
1634 1645 goto finish;
1635 1646 }
1636 1647
1637 1648 /* Detach did not complete. Still need to remove q from stream. */
1638 1649 if (msg) {
1639 1650 if (tcp->tcp_state == TCPS_ESTABLISHED ||
1640 1651 tcp->tcp_state == TCPS_CLOSE_WAIT)
1641 1652 TCPS_BUMP_MIB(tcps, tcpEstabResets);
1642 1653 if (tcp->tcp_state == TCPS_SYN_SENT ||
1643 1654 tcp->tcp_state == TCPS_SYN_RCVD)
1644 1655 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
1645 1656 tcp_xmit_ctl(msg, tcp, tcp->tcp_snxt, 0, TH_RST);
1646 1657 }
1647 1658
1648 1659 tcp_closei_local(tcp);
1649 1660 CONN_DEC_REF(connp);
1650 1661 ASSERT(connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 1 : 2));
1651 1662
1652 1663 finish:
1653 1664 /*
1654 1665 * Don't change the queues in the case of a listener that has
1655 1666 * eagers in its q or q0. It could surprise the eagers.
1656 1667 * Instead wait for the eagers outside the squeue.
1657 1668 *
1658 1669 * For non-STREAMS sockets tcp_wait_for_eagers implies that
1659 1670 * we should delay the su_closed upcall until all eagers have
1660 1671 * dropped their references.
1661 1672 */
1662 1673 if (!tcp->tcp_wait_for_eagers) {
1663 1674 tcp->tcp_detached = B_TRUE;
1664 1675 connp->conn_rq = NULL;
1665 1676 connp->conn_wq = NULL;
1666 1677
1667 1678 /* non-STREAM socket, release the upper handle */
1668 1679 if (IPCL_IS_NONSTR(connp)) {
1669 1680 ASSERT(connp->conn_upper_handle != NULL);
1670 1681 (*connp->conn_upcalls->su_closed)
1671 1682 (connp->conn_upper_handle);
1672 1683 connp->conn_upper_handle = NULL;
1673 1684 connp->conn_upcalls = NULL;
1674 1685 }
1675 1686 }
1676 1687
1677 1688 /* Signal tcp_close() to finish closing. */
1678 1689 mutex_enter(&tcp->tcp_closelock);
1679 1690 tcp->tcp_closed = 1;
1680 1691 cv_signal(&tcp->tcp_closecv);
1681 1692 mutex_exit(&tcp->tcp_closelock);
1682 1693 }
1683 1694
1684 1695 /* ARGSUSED */
1685 1696 void
1686 1697 tcp_shutdown_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1687 1698 {
1688 1699 conn_t *connp = (conn_t *)arg;
1689 1700 tcp_t *tcp = connp->conn_tcp;
1690 1701
1691 1702 freemsg(mp);
1692 1703
1693 1704 if (tcp->tcp_fused)
1694 1705 tcp_unfuse(tcp);
1695 1706
1696 1707 if (tcp_xmit_end(tcp) != 0) {
1697 1708 /*
1698 1709 * We were crossing FINs and got a reset from
1699 1710 * the other side. Just ignore it.
1700 1711 */
1701 1712 if (connp->conn_debug) {
1702 1713 (void) strlog(TCP_MOD_ID, 0, 1,
1703 1714 SL_ERROR|SL_TRACE,
1704 1715 "tcp_shutdown_output() out of state %s",
1705 1716 tcp_display(tcp, NULL, DISP_ADDR_AND_PORT));
1706 1717 }
1707 1718 }
1708 1719 }
1709 1720
1710 1721 #pragma inline(tcp_send_data)
1711 1722
1712 1723 void
1713 1724 tcp_send_data(tcp_t *tcp, mblk_t *mp)
1714 1725 {
1715 1726 conn_t *connp = tcp->tcp_connp;
1716 1727
1717 1728 /*
1718 1729 * Check here to avoid sending zero-copy message down to IP when
1719 1730 * ZEROCOPY capability has turned off. We only need to deal with
1720 1731 * the race condition between sockfs and the notification here.
1721 1732 * Since we have tried to backoff the tcp_xmit_head when turning
1722 1733 * zero-copy off and new messages in tcp_output(), we simply drop
1723 1734 * the dup'ed packet here and let tcp retransmit, if tcp_xmit_zc_clean
1724 1735 * is not true.
1725 1736 */
1726 1737 if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on &&
1727 1738 !tcp->tcp_xmit_zc_clean) {
1728 1739 ip_drop_output("TCP ZC was disabled but not clean", mp, NULL);
1729 1740 freemsg(mp);
1730 1741 return;
1731 1742 }
1732 1743
1733 1744 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa,
1734 1745 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, tcp,
1735 1746 __dtrace_tcp_tcph_t *,
1736 1747 &mp->b_rptr[connp->conn_ixa->ixa_ip_hdr_length]);
1737 1748
1738 1749 ASSERT(connp->conn_ixa->ixa_notify_cookie == connp->conn_tcp);
1739 1750 (void) conn_ip_output(mp, connp->conn_ixa);
1740 1751 }
1741 1752
1742 1753 /* ARGSUSED2 */
1743 1754 void
1744 1755 tcp_send_synack(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1745 1756 {
1746 1757 conn_t *econnp = (conn_t *)arg;
1747 1758 tcp_t *tcp = econnp->conn_tcp;
1748 1759 ip_xmit_attr_t *ixa = econnp->conn_ixa;
1749 1760
1750 1761 /* Guard against a RST having blown it away while on the squeue */
1751 1762 if (tcp->tcp_state == TCPS_CLOSED) {
1752 1763 freemsg(mp);
1753 1764 return;
1754 1765 }
1755 1766
1756 1767 /*
1757 1768 * In the off-chance that the eager received and responded to
1758 1769 * some other packet while the SYN|ACK was queued, we recalculate
1759 1770 * the ixa_pktlen. It would be better to fix the SYN/accept
1760 1771 * multithreading scheme to avoid this complexity.
1761 1772 */
1762 1773 ixa->ixa_pktlen = msgdsize(mp);
1763 1774 (void) conn_ip_output(mp, ixa);
1764 1775 }
1765 1776
1766 1777 /*
1767 1778 * tcp_send() is called by tcp_wput_data() and returns one of the following:
1768 1779 *
1769 1780 * -1 = failed allocation.
1770 1781 * 0 = We've either successfully sent data, or our usable send window is too
1771 1782 * small and we'd rather wait until later before sending again.
1772 1783 */
1773 1784 static int
1774 1785 tcp_send(tcp_t *tcp, const int mss, const int total_hdr_len,
1775 1786 const int tcp_hdr_len, const int num_sack_blk, int *usable,
1776 1787 uint32_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time)
1777 1788 {
1778 1789 int num_lso_seg = 1;
1779 1790 uint_t lso_usable;
1780 1791 boolean_t do_lso_send = B_FALSE;
1781 1792 tcp_stack_t *tcps = tcp->tcp_tcps;
1782 1793 conn_t *connp = tcp->tcp_connp;
1783 1794 ip_xmit_attr_t *ixa = connp->conn_ixa;
1784 1795
1785 1796 /*
1786 1797 * Check LSO possibility. The value of tcp->tcp_lso indicates whether
1787 1798 * the underlying connection is LSO capable. Will check whether having
1788 1799 * enough available data to initiate LSO transmission in the for(){}
1789 1800 * loops.
1790 1801 */
1791 1802 if (tcp->tcp_lso && (tcp->tcp_valid_bits & ~TCP_FSS_VALID) == 0)
1792 1803 do_lso_send = B_TRUE;
1793 1804
1794 1805 for (;;) {
1795 1806 struct datab *db;
1796 1807 tcpha_t *tcpha;
1797 1808 uint32_t sum;
1798 1809 mblk_t *mp, *mp1;
1799 1810 uchar_t *rptr;
1800 1811 int len;
1801 1812
1802 1813 /*
1803 1814 * Calculate the maximum payload length we can send at one
1804 1815 * time.
1805 1816 */
1806 1817 if (do_lso_send) {
1807 1818 /*
1808 1819 * Determine whether or not it's possible to do LSO,
1809 1820 * and if so, how much data we can send.
1810 1821 */
1811 1822 if ((*usable - 1) / mss >= 1) {
1812 1823 lso_usable = MIN(tcp->tcp_lso_max, *usable);
1813 1824 num_lso_seg = lso_usable / mss;
1814 1825 if (lso_usable % mss) {
1815 1826 num_lso_seg++;
1816 1827 tcp->tcp_last_sent_len = (ushort_t)
1817 1828 (lso_usable % mss);
1818 1829 } else {
1819 1830 tcp->tcp_last_sent_len = (ushort_t)mss;
1820 1831 }
1821 1832 } else {
1822 1833 do_lso_send = B_FALSE;
1823 1834 num_lso_seg = 1;
1824 1835 lso_usable = mss;
1825 1836 }
1826 1837 }
1827 1838
1828 1839 ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1);
1829 1840
1830 1841 len = mss;
1831 1842 if (len > *usable) {
1832 1843 ASSERT(do_lso_send == B_FALSE);
1833 1844
1834 1845 len = *usable;
1835 1846 if (len <= 0) {
1836 1847 /* Terminate the loop */
1837 1848 break; /* success; too small */
1838 1849 }
1839 1850 /*
1840 1851 * Sender silly-window avoidance.
1841 1852 * Ignore this if we are going to send a
1842 1853 * zero window probe out.
1843 1854 *
1844 1855 * TODO: force data into microscopic window?
1845 1856 * ==> (!pushed || (unsent > usable))
1846 1857 */
1847 1858 if (len < (tcp->tcp_max_swnd >> 1) &&
1848 1859 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len &&
1849 1860 !((tcp->tcp_valid_bits & TCP_URG_VALID) &&
1850 1861 len == 1) && (! tcp->tcp_zero_win_probe)) {
1851 1862 /*
1852 1863 * If the retransmit timer is not running
1853 1864 * we start it so that we will retransmit
1854 1865 * in the case when the receiver has
1855 1866 * decremented the window.
1856 1867 */
1857 1868 if (*snxt == tcp->tcp_snxt &&
1858 1869 *snxt == tcp->tcp_suna) {
1859 1870 /*
1860 1871 * We are not supposed to send
1861 1872 * anything. So let's wait a little
1862 1873 * bit longer before breaking SWS
1863 1874 * avoidance.
1864 1875 *
1865 1876 * What should the value be?
1866 1877 * Suggestion: MAX(init rexmit time,
1867 1878 * tcp->tcp_rto)
1868 1879 */
1869 1880 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
1870 1881 }
1871 1882 break; /* success; too small */
1872 1883 }
1873 1884 }
1874 1885
1875 1886 tcpha = tcp->tcp_tcpha;
1876 1887
1877 1888 /*
1878 1889 * The reason to adjust len here is that we need to set flags
1879 1890 * and calculate checksum.
1880 1891 */
1881 1892 if (do_lso_send)
1882 1893 len = lso_usable;
1883 1894
1884 1895 *usable -= len; /* Approximate - can be adjusted later */
1885 1896 if (*usable > 0)
1886 1897 tcpha->tha_flags = TH_ACK;
1887 1898 else
1888 1899 tcpha->tha_flags = (TH_ACK | TH_PUSH);
1889 1900
1890 1901 /*
1891 1902 * Prime pump for IP's checksumming on our behalf.
1892 1903 * Include the adjustment for a source route if any.
1893 1904 * In case of LSO, the partial pseudo-header checksum should
1894 1905 * exclusive TCP length, so zero tha_sum before IP calculate
1895 1906 * pseudo-header checksum for partial checksum offload.
1896 1907 */
1897 1908 if (do_lso_send) {
1898 1909 sum = 0;
1899 1910 } else {
1900 1911 sum = len + tcp_hdr_len + connp->conn_sum;
1901 1912 sum = (sum >> 16) + (sum & 0xFFFF);
1902 1913 }
1903 1914 tcpha->tha_sum = htons(sum);
1904 1915 tcpha->tha_seq = htonl(*snxt);
1905 1916
1906 1917 /*
1907 1918 * Branch off to tcp_xmit_mp() if any of the VALID bits is
1908 1919 * set. For the case when TCP_FSS_VALID is the only valid
1909 1920 * bit (normal active close), branch off only when we think
1910 1921 * that the FIN flag needs to be set. Note for this case,
1911 1922 * that (snxt + len) may not reflect the actual seg_len,
1912 1923 * as len may be further reduced in tcp_xmit_mp(). If len
1913 1924 * gets modified, we will end up here again.
1914 1925 */
1915 1926 if (tcp->tcp_valid_bits != 0 &&
1916 1927 (tcp->tcp_valid_bits != TCP_FSS_VALID ||
1917 1928 ((*snxt + len) == tcp->tcp_fss))) {
1918 1929 uchar_t *prev_rptr;
1919 1930 uint32_t prev_snxt = tcp->tcp_snxt;
1920 1931
1921 1932 if (*tail_unsent == 0) {
1922 1933 ASSERT((*xmit_tail)->b_cont != NULL);
1923 1934 *xmit_tail = (*xmit_tail)->b_cont;
1924 1935 prev_rptr = (*xmit_tail)->b_rptr;
1925 1936 *tail_unsent = (int)((*xmit_tail)->b_wptr -
1926 1937 (*xmit_tail)->b_rptr);
1927 1938 } else {
1928 1939 prev_rptr = (*xmit_tail)->b_rptr;
1929 1940 (*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr -
1930 1941 *tail_unsent;
1931 1942 }
1932 1943 mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL,
1933 1944 *snxt, B_FALSE, (uint32_t *)&len, B_FALSE);
1934 1945 /* Restore tcp_snxt so we get amount sent right. */
1935 1946 tcp->tcp_snxt = prev_snxt;
1936 1947 if (prev_rptr == (*xmit_tail)->b_rptr) {
1937 1948 /*
1938 1949 * If the previous timestamp is still in use,
1939 1950 * don't stomp on it.
1940 1951 */
1941 1952 if ((*xmit_tail)->b_next == NULL) {
1942 1953 (*xmit_tail)->b_prev = local_time;
1943 1954 (*xmit_tail)->b_next =
1944 1955 (mblk_t *)(uintptr_t)(*snxt);
1945 1956 }
1946 1957 } else
1947 1958 (*xmit_tail)->b_rptr = prev_rptr;
1948 1959
1949 1960 if (mp == NULL) {
1950 1961 return (-1);
1951 1962 }
1952 1963 mp1 = mp->b_cont;
1953 1964
1954 1965 if (len <= mss) /* LSO is unusable (!do_lso_send) */
1955 1966 tcp->tcp_last_sent_len = (ushort_t)len;
1956 1967 while (mp1->b_cont) {
1957 1968 *xmit_tail = (*xmit_tail)->b_cont;
1958 1969 (*xmit_tail)->b_prev = local_time;
1959 1970 (*xmit_tail)->b_next =
1960 1971 (mblk_t *)(uintptr_t)(*snxt);
1961 1972 mp1 = mp1->b_cont;
1962 1973 }
1963 1974 *snxt += len;
1964 1975 *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr;
1965 1976 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
1966 1977 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1967 1978 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1968 1979 tcp->tcp_cs.tcp_out_data_segs++;
1969 1980 tcp->tcp_cs.tcp_out_data_bytes += len;
1970 1981 tcp_send_data(tcp, mp);
1971 1982 continue;
1972 1983 }
1973 1984
1974 1985 *snxt += len; /* Adjust later if we don't send all of len */
1975 1986 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
1976 1987 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1977 1988 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1978 1989 tcp->tcp_cs.tcp_out_data_segs++;
1979 1990 tcp->tcp_cs.tcp_out_data_bytes += len;
1980 1991
1981 1992 if (*tail_unsent) {
1982 1993 /* Are the bytes above us in flight? */
1983 1994 rptr = (*xmit_tail)->b_wptr - *tail_unsent;
1984 1995 if (rptr != (*xmit_tail)->b_rptr) {
1985 1996 *tail_unsent -= len;
1986 1997 if (len <= mss) /* LSO is unusable */
1987 1998 tcp->tcp_last_sent_len = (ushort_t)len;
1988 1999 len += total_hdr_len;
1989 2000 ixa->ixa_pktlen = len;
1990 2001
1991 2002 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1992 2003 tcp->tcp_ipha->ipha_length = htons(len);
1993 2004 } else {
1994 2005 tcp->tcp_ip6h->ip6_plen =
1995 2006 htons(len - IPV6_HDR_LEN);
1996 2007 }
1997 2008
1998 2009 mp = dupb(*xmit_tail);
1999 2010 if (mp == NULL) {
2000 2011 return (-1); /* out_of_mem */
2001 2012 }
2002 2013 mp->b_rptr = rptr;
2003 2014 /*
2004 2015 * If the old timestamp is no longer in use,
2005 2016 * sample a new timestamp now.
2006 2017 */
2007 2018 if ((*xmit_tail)->b_next == NULL) {
2008 2019 (*xmit_tail)->b_prev = local_time;
2009 2020 (*xmit_tail)->b_next =
2010 2021 (mblk_t *)(uintptr_t)(*snxt-len);
2011 2022 }
2012 2023 goto must_alloc;
2013 2024 }
2014 2025 } else {
2015 2026 *xmit_tail = (*xmit_tail)->b_cont;
2016 2027 ASSERT((uintptr_t)((*xmit_tail)->b_wptr -
2017 2028 (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX);
2018 2029 *tail_unsent = (int)((*xmit_tail)->b_wptr -
2019 2030 (*xmit_tail)->b_rptr);
2020 2031 }
2021 2032
2022 2033 (*xmit_tail)->b_prev = local_time;
2023 2034 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len);
2024 2035
2025 2036 *tail_unsent -= len;
2026 2037 if (len <= mss) /* LSO is unusable (!do_lso_send) */
2027 2038 tcp->tcp_last_sent_len = (ushort_t)len;
2028 2039
2029 2040 len += total_hdr_len;
2030 2041 ixa->ixa_pktlen = len;
2031 2042
2032 2043 if (ixa->ixa_flags & IXAF_IS_IPV4) {
2033 2044 tcp->tcp_ipha->ipha_length = htons(len);
2034 2045 } else {
2035 2046 tcp->tcp_ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
2036 2047 }
2037 2048
2038 2049 mp = dupb(*xmit_tail);
2039 2050 if (mp == NULL) {
2040 2051 return (-1); /* out_of_mem */
2041 2052 }
2042 2053
2043 2054 len = total_hdr_len;
2044 2055 /*
2045 2056 * There are four reasons to allocate a new hdr mblk:
2046 2057 * 1) The bytes above us are in use by another packet
2047 2058 * 2) We don't have good alignment
2048 2059 * 3) The mblk is being shared
2049 2060 * 4) We don't have enough room for a header
2050 2061 */
2051 2062 rptr = mp->b_rptr - len;
2052 2063 if (!OK_32PTR(rptr) ||
2053 2064 ((db = mp->b_datap), db->db_ref != 2) ||
2054 2065 rptr < db->db_base) {
2055 2066 /* NOTE: we assume allocb returns an OK_32PTR */
2056 2067
2057 2068 must_alloc:;
2058 2069 mp1 = allocb(connp->conn_ht_iphc_allocated +
2059 2070 tcps->tcps_wroff_xtra, BPRI_MED);
2060 2071 if (mp1 == NULL) {
2061 2072 freemsg(mp);
2062 2073 return (-1); /* out_of_mem */
2063 2074 }
2064 2075 mp1->b_cont = mp;
2065 2076 mp = mp1;
2066 2077 /* Leave room for Link Level header */
2067 2078 len = total_hdr_len;
2068 2079 rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
2069 2080 mp->b_wptr = &rptr[len];
2070 2081 }
2071 2082
2072 2083 /*
2073 2084 * Fill in the header using the template header, and add
2074 2085 * options such as time-stamp, ECN and/or SACK, as needed.
2075 2086 */
2076 2087 tcp_fill_header(tcp, rptr, num_sack_blk);
2077 2088
2078 2089 mp->b_rptr = rptr;
2079 2090
2080 2091 if (*tail_unsent) {
2081 2092 int spill = *tail_unsent;
2082 2093
2083 2094 mp1 = mp->b_cont;
2084 2095 if (mp1 == NULL)
2085 2096 mp1 = mp;
2086 2097
2087 2098 /*
2088 2099 * If we're a little short, tack on more mblks until
2089 2100 * there is no more spillover.
2090 2101 */
2091 2102 while (spill < 0) {
2092 2103 mblk_t *nmp;
2093 2104 int nmpsz;
2094 2105
2095 2106 nmp = (*xmit_tail)->b_cont;
2096 2107 nmpsz = MBLKL(nmp);
2097 2108
2098 2109 /*
2099 2110 * Excess data in mblk; can we split it?
2100 2111 * If LSO is enabled for the connection,
2101 2112 * keep on splitting as this is a transient
2102 2113 * send path.
2103 2114 */
2104 2115 if (!do_lso_send && (spill + nmpsz > 0)) {
2105 2116 /*
2106 2117 * Don't split if stream head was
2107 2118 * told to break up larger writes
2108 2119 * into smaller ones.
2109 2120 */
2110 2121 if (tcp->tcp_maxpsz_multiplier > 0)
2111 2122 break;
2112 2123
2113 2124 /*
2114 2125 * Next mblk is less than SMSS/2
2115 2126 * rounded up to nearest 64-byte;
2116 2127 * let it get sent as part of the
2117 2128 * next segment.
2118 2129 */
2119 2130 if (tcp->tcp_localnet &&
2120 2131 !tcp->tcp_cork &&
2121 2132 (nmpsz < roundup((mss >> 1), 64)))
2122 2133 break;
2123 2134 }
2124 2135
2125 2136 *xmit_tail = nmp;
2126 2137 ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX);
2127 2138 /* Stash for rtt use later */
2128 2139 (*xmit_tail)->b_prev = local_time;
2129 2140 (*xmit_tail)->b_next =
2130 2141 (mblk_t *)(uintptr_t)(*snxt - len);
2131 2142 mp1->b_cont = dupb(*xmit_tail);
2132 2143 mp1 = mp1->b_cont;
2133 2144
2134 2145 spill += nmpsz;
2135 2146 if (mp1 == NULL) {
2136 2147 *tail_unsent = spill;
2137 2148 freemsg(mp);
2138 2149 return (-1); /* out_of_mem */
2139 2150 }
2140 2151 }
2141 2152
2142 2153 /* Trim back any surplus on the last mblk */
2143 2154 if (spill >= 0) {
2144 2155 mp1->b_wptr -= spill;
2145 2156 *tail_unsent = spill;
2146 2157 } else {
2147 2158 /*
2148 2159 * We did not send everything we could in
2149 2160 * order to remain within the b_cont limit.
2150 2161 */
2151 2162 *usable -= spill;
2152 2163 *snxt += spill;
2153 2164 tcp->tcp_last_sent_len += spill;
2154 2165 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, spill);
2155 2166 tcp->tcp_cs.tcp_out_data_bytes += spill;
2156 2167 /*
2157 2168 * Adjust the checksum
2158 2169 */
2159 2170 tcpha = (tcpha_t *)(rptr +
2160 2171 ixa->ixa_ip_hdr_length);
2161 2172 sum += spill;
2162 2173 sum = (sum >> 16) + (sum & 0xFFFF);
2163 2174 tcpha->tha_sum = htons(sum);
2164 2175 if (connp->conn_ipversion == IPV4_VERSION) {
2165 2176 sum = ntohs(
2166 2177 ((ipha_t *)rptr)->ipha_length) +
2167 2178 spill;
2168 2179 ((ipha_t *)rptr)->ipha_length =
2169 2180 htons(sum);
2170 2181 } else {
2171 2182 sum = ntohs(
2172 2183 ((ip6_t *)rptr)->ip6_plen) +
2173 2184 spill;
2174 2185 ((ip6_t *)rptr)->ip6_plen =
2175 2186 htons(sum);
2176 2187 }
2177 2188 ixa->ixa_pktlen += spill;
2178 2189 *tail_unsent = 0;
2179 2190 }
2180 2191 }
2181 2192 if (tcp->tcp_ip_forward_progress) {
2182 2193 tcp->tcp_ip_forward_progress = B_FALSE;
2183 2194 ixa->ixa_flags |= IXAF_REACH_CONF;
2184 2195 } else {
2185 2196 ixa->ixa_flags &= ~IXAF_REACH_CONF;
2186 2197 }
2187 2198
2188 2199 if (do_lso_send) {
2189 2200 /* Append LSO information to the mp. */
2190 2201 lso_info_set(mp, mss, HW_LSO);
2191 2202 ixa->ixa_fragsize = IP_MAXPACKET;
2192 2203 ixa->ixa_extra_ident = num_lso_seg - 1;
2193 2204
2194 2205 DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg,
2195 2206 boolean_t, B_TRUE);
2196 2207
2197 2208 tcp_send_data(tcp, mp);
2198 2209
2199 2210 /*
2200 2211 * Restore values of ixa_fragsize and ixa_extra_ident.
2201 2212 */
2202 2213 ixa->ixa_fragsize = ixa->ixa_pmtu;
2203 2214 ixa->ixa_extra_ident = 0;
2204 2215 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
2205 2216 TCP_STAT(tcps, tcp_lso_times);
2206 2217 TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg);
2207 2218 } else {
2208 2219 /*
2209 2220 * Make sure to clean up LSO information. Wherever a
2210 2221 * new mp uses the prepended header room after dupb(),
2211 2222 * lso_info_cleanup() should be called.
2212 2223 */
2213 2224 lso_info_cleanup(mp);
2214 2225 tcp_send_data(tcp, mp);
2215 2226 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
2216 2227 }
2217 2228 }
2218 2229
2219 2230 return (0);
2220 2231 }
2221 2232
2222 2233 /*
2223 2234 * Initiate closedown sequence on an active connection. (May be called as
2224 2235 * writer.) Return value zero for OK return, non-zero for error return.
2225 2236 */
2226 2237 static int
2227 2238 tcp_xmit_end(tcp_t *tcp)
2228 2239 {
2229 2240 mblk_t *mp;
2230 2241 tcp_stack_t *tcps = tcp->tcp_tcps;
2231 2242 iulp_t uinfo;
2232 2243 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
2233 2244 conn_t *connp = tcp->tcp_connp;
2234 2245
2235 2246 if (tcp->tcp_state < TCPS_SYN_RCVD ||
2236 2247 tcp->tcp_state > TCPS_CLOSE_WAIT) {
2237 2248 /*
2238 2249 * Invalid state, only states TCPS_SYN_RCVD,
2239 2250 * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid
2240 2251 */
2241 2252 return (-1);
2242 2253 }
2243 2254
2244 2255 tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent;
2245 2256 tcp->tcp_valid_bits |= TCP_FSS_VALID;
2246 2257 /*
2247 2258 * If there is nothing more unsent, send the FIN now.
2248 2259 * Otherwise, it will go out with the last segment.
2249 2260 */
2250 2261 if (tcp->tcp_unsent == 0) {
2251 2262 mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
2252 2263 tcp->tcp_fss, B_FALSE, NULL, B_FALSE);
2253 2264
2254 2265 if (mp) {
2255 2266 tcp_send_data(tcp, mp);
2256 2267 } else {
2257 2268 /*
2258 2269 * Couldn't allocate msg. Pretend we got it out.
2259 2270 * Wait for rexmit timeout.
2260 2271 */
2261 2272 tcp->tcp_snxt = tcp->tcp_fss + 1;
2262 2273 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
2263 2274 }
2264 2275
2265 2276 /*
2266 2277 * If needed, update tcp_rexmit_snxt as tcp_snxt is
2267 2278 * changed.
2268 2279 */
2269 2280 if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) {
2270 2281 tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
2271 2282 }
2272 2283 } else {
2273 2284 /*
2274 2285 * If tcp->tcp_cork is set, then the data will not get sent,
2275 2286 * so we have to check that and unset it first.
2276 2287 */
2277 2288 if (tcp->tcp_cork)
2278 2289 tcp->tcp_cork = B_FALSE;
2279 2290 tcp_wput_data(tcp, NULL, B_FALSE);
2280 2291 }
2281 2292
2282 2293 /*
2283 2294 * If TCP does not get enough samples of RTT or tcp_rtt_updates
2284 2295 * is 0, don't update the cache.
2285 2296 */
2286 2297 if (tcps->tcps_rtt_updates == 0 ||
2287 2298 tcp->tcp_rtt_update < tcps->tcps_rtt_updates)
2288 2299 return (0);
2289 2300
2290 2301 /*
2291 2302 * We do not have a good algorithm to update ssthresh at this time.
2292 2303 * So don't do any update.
2293 2304 */
2294 2305 bzero(&uinfo, sizeof (uinfo));
2295 2306 uinfo.iulp_rtt = NSEC2MSEC(tcp->tcp_rtt_sa);
2296 2307 uinfo.iulp_rtt_sd = NSEC2MSEC(tcp->tcp_rtt_sd);
2297 2308
2298 2309 /*
2299 2310 * Note that uinfo is kept for conn_faddr in the DCE. Could update even
2300 2311 * if source routed but we don't.
2301 2312 */
2302 2313 if (connp->conn_ipversion == IPV4_VERSION) {
2303 2314 if (connp->conn_faddr_v4 != tcp->tcp_ipha->ipha_dst) {
2304 2315 return (0);
2305 2316 }
2306 2317 (void) dce_update_uinfo_v4(connp->conn_faddr_v4, &uinfo, ipst);
2307 2318 } else {
2308 2319 uint_t ifindex;
2309 2320
2310 2321 if (!(IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6,
2311 2322 &tcp->tcp_ip6h->ip6_dst))) {
2312 2323 return (0);
2313 2324 }
2314 2325 ifindex = 0;
2315 2326 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_faddr_v6)) {
2316 2327 ip_xmit_attr_t *ixa = connp->conn_ixa;
2317 2328
2318 2329 /*
2319 2330 * If we are going to create a DCE we'd better have
2320 2331 * an ifindex
2321 2332 */
2322 2333 if (ixa->ixa_nce != NULL) {
2323 2334 ifindex = ixa->ixa_nce->nce_common->ncec_ill->
2324 2335 ill_phyint->phyint_ifindex;
2325 2336 } else {
2326 2337 return (0);
2327 2338 }
2328 2339 }
2329 2340
2330 2341 (void) dce_update_uinfo(&connp->conn_faddr_v6, ifindex, &uinfo,
2331 2342 ipst);
2332 2343 }
2333 2344 return (0);
2334 2345 }
2335 2346
2336 2347 /*
2337 2348 * Send out a control packet on the tcp connection specified. This routine
2338 2349 * is typically called where we need a simple ACK or RST generated.
2339 2350 */
2340 2351 void
2341 2352 tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl)
2342 2353 {
2343 2354 uchar_t *rptr;
2344 2355 tcpha_t *tcpha;
2345 2356 ipha_t *ipha = NULL;
2346 2357 ip6_t *ip6h = NULL;
2347 2358 uint32_t sum;
2348 2359 int total_hdr_len;
2349 2360 int ip_hdr_len;
2350 2361 mblk_t *mp;
2351 2362 tcp_stack_t *tcps = tcp->tcp_tcps;
2352 2363 conn_t *connp = tcp->tcp_connp;
2353 2364 ip_xmit_attr_t *ixa = connp->conn_ixa;
2354 2365
2355 2366 /*
2356 2367 * Save sum for use in source route later.
2357 2368 */
2358 2369 sum = connp->conn_ht_ulp_len + connp->conn_sum;
2359 2370 total_hdr_len = connp->conn_ht_iphc_len;
2360 2371 ip_hdr_len = ixa->ixa_ip_hdr_length;
2361 2372
2362 2373 /* If a text string is passed in with the request, pass it to strlog. */
2363 2374 if (str != NULL && connp->conn_debug) {
2364 2375 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
2365 2376 "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x",
2366 2377 str, seq, ack, ctl);
2367 2378 }
2368 2379 mp = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
2369 2380 BPRI_MED);
2370 2381 if (mp == NULL) {
2371 2382 return;
2372 2383 }
2373 2384 rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
2374 2385 mp->b_rptr = rptr;
2375 2386 mp->b_wptr = &rptr[total_hdr_len];
2376 2387 bcopy(connp->conn_ht_iphc, rptr, total_hdr_len);
2377 2388
2378 2389 ixa->ixa_pktlen = total_hdr_len;
2379 2390
2380 2391 if (ixa->ixa_flags & IXAF_IS_IPV4) {
2381 2392 ipha = (ipha_t *)rptr;
2382 2393 ipha->ipha_length = htons(total_hdr_len);
2383 2394 } else {
2384 2395 ip6h = (ip6_t *)rptr;
2385 2396 ip6h->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN);
2386 2397 }
2387 2398 tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2388 2399 tcpha->tha_flags = (uint8_t)ctl;
2389 2400 if (ctl & TH_RST) {
2390 2401 TCPS_BUMP_MIB(tcps, tcpOutRsts);
2391 2402 TCPS_BUMP_MIB(tcps, tcpOutControl);
2392 2403 /*
2393 2404 * Don't send TSopt w/ TH_RST packets per RFC 1323.
2394 2405 */
2395 2406 if (tcp->tcp_snd_ts_ok &&
2396 2407 tcp->tcp_state > TCPS_SYN_SENT) {
2397 2408 mp->b_wptr = &rptr[total_hdr_len - TCPOPT_REAL_TS_LEN];
2398 2409 *(mp->b_wptr) = TCPOPT_EOL;
2399 2410
2400 2411 ixa->ixa_pktlen = total_hdr_len - TCPOPT_REAL_TS_LEN;
2401 2412
2402 2413 if (connp->conn_ipversion == IPV4_VERSION) {
2403 2414 ipha->ipha_length = htons(total_hdr_len -
2404 2415 TCPOPT_REAL_TS_LEN);
2405 2416 } else {
2406 2417 ip6h->ip6_plen = htons(total_hdr_len -
2407 2418 IPV6_HDR_LEN - TCPOPT_REAL_TS_LEN);
2408 2419 }
2409 2420 tcpha->tha_offset_and_reserved -= (3 << 4);
2410 2421 sum -= TCPOPT_REAL_TS_LEN;
2411 2422 }
2412 2423 }
2413 2424 if (ctl & TH_ACK) {
2414 2425 if (tcp->tcp_snd_ts_ok) {
2415 2426 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2416 2427
2417 2428 U32_TO_BE32(llbolt,
2418 2429 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
2419 2430 U32_TO_BE32(tcp->tcp_ts_recent,
2420 2431 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
2421 2432 }
2422 2433
2423 2434 /* Update the latest receive window size in TCP header. */
2424 2435 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2425 2436 /* Track what we sent to the peer */
2426 2437 tcp->tcp_tcpha->tha_win = tcpha->tha_win;
2427 2438 tcp->tcp_rack = ack;
2428 2439 tcp->tcp_rack_cnt = 0;
2429 2440 TCPS_BUMP_MIB(tcps, tcpOutAck);
2430 2441 }
2431 2442 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
2432 2443 tcpha->tha_seq = htonl(seq);
2433 2444 tcpha->tha_ack = htonl(ack);
2434 2445 /*
2435 2446 * Include the adjustment for a source route if any.
2436 2447 */
2437 2448 sum = (sum >> 16) + (sum & 0xFFFF);
2438 2449 tcpha->tha_sum = htons(sum);
2439 2450 tcp_send_data(tcp, mp);
2440 2451 }
2441 2452
2442 2453 /*
2443 2454 * Generate a reset based on an inbound packet, connp is set by caller
2444 2455 * when RST is in response to an unexpected inbound packet for which
2445 2456 * there is active tcp state in the system.
2446 2457 *
2447 2458 * IPSEC NOTE : Try to send the reply with the same protection as it came
2448 2459 * in. We have the ip_recv_attr_t which is reversed to form the ip_xmit_attr_t.
2449 2460 * That way the packet will go out at the same level of protection as it
2450 2461 * came in with.
2451 2462 */
2452 2463 static void
2453 2464 tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, uint32_t ack, int ctl,
2454 2465 ip_recv_attr_t *ira, ip_stack_t *ipst, conn_t *connp)
2455 2466 {
2456 2467 ipha_t *ipha = NULL;
2457 2468 ip6_t *ip6h = NULL;
2458 2469 ushort_t len;
2459 2470 tcpha_t *tcpha;
2460 2471 int i;
2461 2472 ipaddr_t v4addr;
2462 2473 in6_addr_t v6addr;
2463 2474 netstack_t *ns = ipst->ips_netstack;
2464 2475 tcp_stack_t *tcps = ns->netstack_tcp;
2465 2476 ip_xmit_attr_t ixas, *ixa;
2466 2477 uint_t ip_hdr_len = ira->ira_ip_hdr_length;
2467 2478 boolean_t need_refrele = B_FALSE; /* ixa_refrele(ixa) */
2468 2479 ushort_t port;
2469 2480
2470 2481 if (!tcp_send_rst_chk(tcps)) {
2471 2482 TCP_STAT(tcps, tcp_rst_unsent);
2472 2483 freemsg(mp);
2473 2484 return;
2474 2485 }
2475 2486
2476 2487 /*
2477 2488 * If connp != NULL we use conn_ixa to keep IP_NEXTHOP and other
2478 2489 * options from the listener. In that case the caller must ensure that
2479 2490 * we are running on the listener = connp squeue.
2480 2491 *
2481 2492 * We get a safe copy of conn_ixa so we don't need to restore anything
2482 2493 * we or ip_output_simple might change in the ixa.
2483 2494 */
2484 2495 if (connp != NULL) {
2485 2496 ASSERT(connp->conn_on_sqp);
2486 2497
2487 2498 ixa = conn_get_ixa_exclusive(connp);
2488 2499 if (ixa == NULL) {
2489 2500 TCP_STAT(tcps, tcp_rst_unsent);
2490 2501 freemsg(mp);
2491 2502 return;
2492 2503 }
2493 2504 need_refrele = B_TRUE;
2494 2505 } else {
2495 2506 bzero(&ixas, sizeof (ixas));
2496 2507 ixa = &ixas;
2497 2508 /*
2498 2509 * IXAF_VERIFY_SOURCE is overkill since we know the
2499 2510 * packet was for us.
2500 2511 */
2501 2512 ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE;
2502 2513 ixa->ixa_protocol = IPPROTO_TCP;
2503 2514 ixa->ixa_zoneid = ira->ira_zoneid;
2504 2515 ixa->ixa_ifindex = 0;
2505 2516 ixa->ixa_ipst = ipst;
2506 2517 ixa->ixa_cred = kcred;
2507 2518 ixa->ixa_cpid = NOPID;
2508 2519 }
2509 2520
2510 2521 if (str && tcps->tcps_dbg) {
2511 2522 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
2512 2523 "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, "
2513 2524 "flags 0x%x",
2514 2525 str, seq, ack, ctl);
2515 2526 }
2516 2527 if (mp->b_datap->db_ref != 1) {
2517 2528 mblk_t *mp1 = copyb(mp);
2518 2529 freemsg(mp);
2519 2530 mp = mp1;
2520 2531 if (mp == NULL)
2521 2532 goto done;
2522 2533 } else if (mp->b_cont) {
2523 2534 freemsg(mp->b_cont);
2524 2535 mp->b_cont = NULL;
2525 2536 DB_CKSUMFLAGS(mp) = 0;
2526 2537 }
2527 2538 /*
2528 2539 * We skip reversing source route here.
2529 2540 * (for now we replace all IP options with EOL)
2530 2541 */
2531 2542 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2532 2543 ipha = (ipha_t *)mp->b_rptr;
2533 2544 for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++)
2534 2545 mp->b_rptr[i] = IPOPT_EOL;
2535 2546 /*
2536 2547 * Make sure that src address isn't flagrantly invalid.
2537 2548 * Not all broadcast address checking for the src address
2538 2549 * is possible, since we don't know the netmask of the src
2539 2550 * addr. No check for destination address is done, since
2540 2551 * IP will not pass up a packet with a broadcast dest
2541 2552 * address to TCP. Similar checks are done below for IPv6.
2542 2553 */
2543 2554 if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST ||
2544 2555 CLASSD(ipha->ipha_src)) {
2545 2556 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
2546 2557 ip_drop_input("ipIfStatsInDiscards", mp, NULL);
2547 2558 freemsg(mp);
2548 2559 goto done;
2549 2560 }
2550 2561 } else {
2551 2562 ip6h = (ip6_t *)mp->b_rptr;
2552 2563
2553 2564 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) ||
2554 2565 IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) {
2555 2566 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsInDiscards);
2556 2567 ip_drop_input("ipIfStatsInDiscards", mp, NULL);
2557 2568 freemsg(mp);
2558 2569 goto done;
2559 2570 }
2560 2571
2561 2572 /* Remove any extension headers assuming partial overlay */
2562 2573 if (ip_hdr_len > IPV6_HDR_LEN) {
2563 2574 uint8_t *to;
2564 2575
2565 2576 to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN;
2566 2577 ovbcopy(ip6h, to, IPV6_HDR_LEN);
2567 2578 mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN;
2568 2579 ip_hdr_len = IPV6_HDR_LEN;
2569 2580 ip6h = (ip6_t *)mp->b_rptr;
2570 2581 ip6h->ip6_nxt = IPPROTO_TCP;
2571 2582 }
2572 2583 }
2573 2584 tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len];
2574 2585 if (tcpha->tha_flags & TH_RST) {
2575 2586 freemsg(mp);
2576 2587 goto done;
2577 2588 }
2578 2589 tcpha->tha_offset_and_reserved = (5 << 4);
2579 2590 len = ip_hdr_len + sizeof (tcpha_t);
2580 2591 mp->b_wptr = &mp->b_rptr[len];
2581 2592 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2582 2593 ipha->ipha_length = htons(len);
2583 2594 /* Swap addresses */
2584 2595 v4addr = ipha->ipha_src;
2585 2596 ipha->ipha_src = ipha->ipha_dst;
2586 2597 ipha->ipha_dst = v4addr;
2587 2598 ipha->ipha_ident = 0;
2588 2599 ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl;
2589 2600 ixa->ixa_flags |= IXAF_IS_IPV4;
2590 2601 ixa->ixa_ip_hdr_length = ip_hdr_len;
2591 2602 } else {
2592 2603 ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
2593 2604 /* Swap addresses */
2594 2605 v6addr = ip6h->ip6_src;
2595 2606 ip6h->ip6_src = ip6h->ip6_dst;
2596 2607 ip6h->ip6_dst = v6addr;
2597 2608 ip6h->ip6_hops = (uchar_t)tcps->tcps_ipv6_hoplimit;
2598 2609 ixa->ixa_flags &= ~IXAF_IS_IPV4;
2599 2610
2600 2611 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_dst)) {
2601 2612 ixa->ixa_flags |= IXAF_SCOPEID_SET;
2602 2613 ixa->ixa_scopeid = ira->ira_ruifindex;
2603 2614 }
2604 2615 ixa->ixa_ip_hdr_length = IPV6_HDR_LEN;
2605 2616 }
2606 2617 ixa->ixa_pktlen = len;
2607 2618
2608 2619 /* Swap the ports */
2609 2620 port = tcpha->tha_fport;
2610 2621 tcpha->tha_fport = tcpha->tha_lport;
2611 2622 tcpha->tha_lport = port;
2612 2623
2613 2624 tcpha->tha_ack = htonl(ack);
2614 2625 tcpha->tha_seq = htonl(seq);
2615 2626 tcpha->tha_win = 0;
2616 2627 tcpha->tha_sum = htons(sizeof (tcpha_t));
2617 2628 tcpha->tha_flags = (uint8_t)ctl;
2618 2629 if (ctl & TH_RST) {
2619 2630 if (ctl & TH_ACK) {
2620 2631 /*
2621 2632 * Probe connection rejection here.
2622 2633 * tcp_xmit_listeners_reset() drops non-SYN segments
2623 2634 * that do not specify TH_ACK in their flags without
2624 2635 * calling this function. As a consequence, if this
2625 2636 * function is called with a TH_RST|TH_ACK ctl argument,
2626 2637 * it is being called in response to a SYN segment
2627 2638 * and thus the tcp:::accept-refused probe point
2628 2639 * is valid here.
2629 2640 */
2630 2641 DTRACE_TCP5(accept__refused, mblk_t *, NULL,
2631 2642 void, NULL, void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2632 2643 tcph_t *, tcpha);
2633 2644 }
2634 2645 TCPS_BUMP_MIB(tcps, tcpOutRsts);
2635 2646 TCPS_BUMP_MIB(tcps, tcpOutControl);
2636 2647 }
2637 2648
2638 2649 /* Discard any old label */
2639 2650 if (ixa->ixa_free_flags & IXA_FREE_TSL) {
2640 2651 ASSERT(ixa->ixa_tsl != NULL);
2641 2652 label_rele(ixa->ixa_tsl);
2642 2653 ixa->ixa_free_flags &= ~IXA_FREE_TSL;
2643 2654 }
2644 2655 ixa->ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */
2645 2656
2646 2657 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2647 2658 /*
2648 2659 * Apply IPsec based on how IPsec was applied to
2649 2660 * the packet that caused the RST.
2650 2661 */
2651 2662 if (!ipsec_in_to_out(ira, ixa, mp, ipha, ip6h)) {
2652 2663 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2653 2664 /* Note: mp already consumed and ip_drop_packet done */
2654 2665 goto done;
2655 2666 }
2656 2667 } else {
2657 2668 /*
2658 2669 * This is in clear. The RST message we are building
2659 2670 * here should go out in clear, independent of our policy.
2660 2671 */
2661 2672 ixa->ixa_flags |= IXAF_NO_IPSEC;
2662 2673 }
2663 2674
2664 2675 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa,
2665 2676 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2666 2677 __dtrace_tcp_tcph_t *, tcpha);
2667 2678
2668 2679 /*
2669 2680 * NOTE: one might consider tracing a TCP packet here, but
2670 2681 * this function has no active TCP state and no tcp structure
2671 2682 * that has a trace buffer. If we traced here, we would have
2672 2683 * to keep a local trace buffer in tcp_record_trace().
2673 2684 */
2674 2685
2675 2686 (void) ip_output_simple(mp, ixa);
2676 2687 done:
2677 2688 ixa_cleanup(ixa);
2678 2689 if (need_refrele) {
2679 2690 ASSERT(ixa != &ixas);
2680 2691 ixa_refrele(ixa);
2681 2692 }
2682 2693 }
2683 2694
2684 2695 /*
2685 2696 * Generate a "no listener here" RST in response to an "unknown" segment.
2686 2697 * connp is set by caller when RST is in response to an unexpected
2687 2698 * inbound packet for which there is active tcp state in the system.
2688 2699 * Note that we are reusing the incoming mp to construct the outgoing RST.
2689 2700 */
2690 2701 void
2691 2702 tcp_xmit_listeners_reset(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst,
2692 2703 conn_t *connp)
2693 2704 {
2694 2705 uchar_t *rptr;
2695 2706 uint32_t seg_len;
2696 2707 tcpha_t *tcpha;
2697 2708 uint32_t seg_seq;
2698 2709 uint32_t seg_ack;
2699 2710 uint_t flags;
2700 2711 ipha_t *ipha;
2701 2712 ip6_t *ip6h;
2702 2713 boolean_t policy_present;
2703 2714 netstack_t *ns = ipst->ips_netstack;
2704 2715 tcp_stack_t *tcps = ns->netstack_tcp;
2705 2716 ipsec_stack_t *ipss = tcps->tcps_netstack->netstack_ipsec;
2706 2717 uint_t ip_hdr_len = ira->ira_ip_hdr_length;
2707 2718
2708 2719 TCP_STAT(tcps, tcp_no_listener);
2709 2720
2710 2721 /*
2711 2722 * DTrace this "unknown" segment as a tcp:::receive, as we did
2712 2723 * just receive something that was TCP.
2713 2724 */
2714 2725 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, NULL,
2715 2726 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2716 2727 __dtrace_tcp_tcph_t *, &mp->b_rptr[ip_hdr_len]);
2717 2728
2718 2729 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2719 2730 policy_present = ipss->ipsec_inbound_v4_policy_present;
2720 2731 ipha = (ipha_t *)mp->b_rptr;
2721 2732 ip6h = NULL;
2722 2733 } else {
2723 2734 policy_present = ipss->ipsec_inbound_v6_policy_present;
2724 2735 ipha = NULL;
2725 2736 ip6h = (ip6_t *)mp->b_rptr;
2726 2737 }
2727 2738
2728 2739 if (policy_present) {
2729 2740 /*
2730 2741 * The conn_t parameter is NULL because we already know
2731 2742 * nobody's home.
2732 2743 */
2733 2744 mp = ipsec_check_global_policy(mp, (conn_t *)NULL, ipha, ip6h,
2734 2745 ira, ns);
2735 2746 if (mp == NULL)
2736 2747 return;
2737 2748 }
2738 2749 if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) {
2739 2750 DTRACE_PROBE2(
2740 2751 tx__ip__log__error__nolistener__tcp,
2741 2752 char *, "Could not reply with RST to mp(1)",
2742 2753 mblk_t *, mp);
2743 2754 ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n"));
2744 2755 freemsg(mp);
2745 2756 return;
2746 2757 }
2747 2758
2748 2759 rptr = mp->b_rptr;
2749 2760
2750 2761 tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2751 2762 seg_seq = ntohl(tcpha->tha_seq);
2752 2763 seg_ack = ntohl(tcpha->tha_ack);
2753 2764 flags = tcpha->tha_flags;
2754 2765
2755 2766 seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcpha) + ip_hdr_len);
2756 2767 if (flags & TH_RST) {
2757 2768 freemsg(mp);
2758 2769 } else if (flags & TH_ACK) {
2759 2770 tcp_xmit_early_reset("no tcp, reset", mp, seg_ack, 0, TH_RST,
2760 2771 ira, ipst, connp);
2761 2772 } else {
2762 2773 if (flags & TH_SYN) {
2763 2774 seg_len++;
2764 2775 } else {
2765 2776 /*
2766 2777 * Here we violate the RFC. Note that a normal
2767 2778 * TCP will never send a segment without the ACK
2768 2779 * flag, except for RST or SYN segment. This
2769 2780 * segment is neither. Just drop it on the
2770 2781 * floor.
2771 2782 */
2772 2783 freemsg(mp);
2773 2784 TCP_STAT(tcps, tcp_rst_unsent);
2774 2785 return;
2775 2786 }
2776 2787
2777 2788 tcp_xmit_early_reset("no tcp, reset/ack", mp, 0,
2778 2789 seg_seq + seg_len, TH_RST | TH_ACK, ira, ipst, connp);
2779 2790 }
2780 2791 }
2781 2792
2782 2793 /*
2783 2794 * Helper function for tcp_xmit_mp() in handling connection set up flag
2784 2795 * options setting.
2785 2796 */
2786 2797 static void
2787 2798 tcp_xmit_mp_aux_iss(tcp_t *tcp, conn_t *connp, tcpha_t *tcpha, mblk_t *mp,
2788 2799 uint_t *flags)
2789 2800 {
2790 2801 uint32_t u1;
2791 2802 uint8_t *wptr = mp->b_wptr;
2792 2803 tcp_stack_t *tcps = tcp->tcp_tcps;
2793 2804 boolean_t add_sack = B_FALSE;
2794 2805
2795 2806 /*
2796 2807 * If TCP_ISS_VALID and the seq number is tcp_iss,
2797 2808 * TCP can only be in SYN-SENT, SYN-RCVD or
2798 2809 * FIN-WAIT-1 state. It can be FIN-WAIT-1 if
2799 2810 * our SYN is not ack'ed but the app closes this
2800 2811 * TCP connection.
2801 2812 */
2802 2813 ASSERT(tcp->tcp_state == TCPS_SYN_SENT ||
2803 2814 tcp->tcp_state == TCPS_SYN_RCVD ||
2804 2815 tcp->tcp_state == TCPS_FIN_WAIT_1);
2805 2816
2806 2817 /*
2807 2818 * Tack on the MSS option. It is always needed
2808 2819 * for both active and passive open.
2809 2820 *
2810 2821 * MSS option value should be interface MTU - MIN
2811 2822 * TCP/IP header according to RFC 793 as it means
2812 2823 * the maximum segment size TCP can receive. But
2813 2824 * to get around some broken middle boxes/end hosts
2814 2825 * out there, we allow the option value to be the
2815 2826 * same as the MSS option size on the peer side.
2816 2827 * In this way, the other side will not send
2817 2828 * anything larger than they can receive.
2818 2829 *
2819 2830 * Note that for SYN_SENT state, the ndd param
2820 2831 * tcp_use_smss_as_mss_opt has no effect as we
2821 2832 * don't know the peer's MSS option value. So
2822 2833 * the only case we need to take care of is in
2823 2834 * SYN_RCVD state, which is done later.
2824 2835 */
2825 2836 wptr[0] = TCPOPT_MAXSEG;
2826 2837 wptr[1] = TCPOPT_MAXSEG_LEN;
2827 2838 wptr += 2;
2828 2839 u1 = tcp->tcp_initial_pmtu - (connp->conn_ipversion == IPV4_VERSION ?
2829 2840 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) - TCP_MIN_HEADER_LENGTH;
2830 2841 U16_TO_BE16(u1, wptr);
2831 2842 wptr += 2;
2832 2843
2833 2844 /* Update the offset to cover the additional word */
2834 2845 tcpha->tha_offset_and_reserved += (1 << 4);
2835 2846
2836 2847 switch (tcp->tcp_state) {
2837 2848 case TCPS_SYN_SENT:
2838 2849 *flags = TH_SYN;
2839 2850
2840 2851 if (tcp->tcp_snd_sack_ok)
2841 2852 add_sack = B_TRUE;
2842 2853
2843 2854 if (tcp->tcp_snd_ts_ok) {
2844 2855 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2845 2856
2846 2857 if (add_sack) {
2847 2858 wptr[0] = TCPOPT_SACK_PERMITTED;
2848 2859 wptr[1] = TCPOPT_SACK_OK_LEN;
2849 2860 add_sack = B_FALSE;
2850 2861 } else {
2851 2862 wptr[0] = TCPOPT_NOP;
2852 2863 wptr[1] = TCPOPT_NOP;
2853 2864 }
2854 2865 wptr[2] = TCPOPT_TSTAMP;
2855 2866 wptr[3] = TCPOPT_TSTAMP_LEN;
2856 2867 wptr += 4;
2857 2868 U32_TO_BE32(llbolt, wptr);
2858 2869 wptr += 4;
2859 2870 ASSERT(tcp->tcp_ts_recent == 0);
2860 2871 U32_TO_BE32(0L, wptr);
2861 2872 wptr += 4;
2862 2873 tcpha->tha_offset_and_reserved += (3 << 4);
2863 2874 }
2864 2875
2865 2876 /*
2866 2877 * Set up all the bits to tell other side
2867 2878 * we are ECN capable.
2868 2879 */
2869 2880 if (tcp->tcp_ecn_ok)
2870 2881 *flags |= (TH_ECE | TH_CWR);
2871 2882
2872 2883 break;
2873 2884
2874 2885 case TCPS_SYN_RCVD:
2875 2886 *flags |= TH_SYN;
2876 2887
2877 2888 /*
2878 2889 * Reset the MSS option value to be SMSS
2879 2890 * We should probably add back the bytes
2880 2891 * for timestamp option and IPsec. We
2881 2892 * don't do that as this is a workaround
2882 2893 * for broken middle boxes/end hosts, it
2883 2894 * is better for us to be more cautious.
2884 2895 * They may not take these things into
2885 2896 * account in their SMSS calculation. Thus
2886 2897 * the peer's calculated SMSS may be smaller
2887 2898 * than what it can be. This should be OK.
2888 2899 */
2889 2900 if (tcps->tcps_use_smss_as_mss_opt) {
2890 2901 u1 = tcp->tcp_mss;
2891 2902 /*
2892 2903 * Note that wptr points just past the MSS
2893 2904 * option value.
2894 2905 */
2895 2906 U16_TO_BE16(u1, wptr - 2);
2896 2907 }
2897 2908
2898 2909 /*
2899 2910 * tcp_snd_ts_ok can only be set in TCPS_SYN_RCVD
2900 2911 * when the peer also uses timestamps option. And
2901 2912 * the TCP header template must have already been
2902 2913 * updated to include the timestamps option.
2903 2914 */
2904 2915 if (tcp->tcp_snd_sack_ok) {
2905 2916 if (tcp->tcp_snd_ts_ok) {
2906 2917 uint8_t *tmp_wptr;
2907 2918
2908 2919 /*
2909 2920 * Use the NOP in the header just
2910 2921 * before timestamps opton.
2911 2922 */
2912 2923 tmp_wptr = (uint8_t *)tcpha +
2913 2924 TCP_MIN_HEADER_LENGTH;
2914 2925 ASSERT(tmp_wptr[0] == TCPOPT_NOP &&
2915 2926 tmp_wptr[1] == TCPOPT_NOP);
2916 2927 tmp_wptr[0] = TCPOPT_SACK_PERMITTED;
2917 2928 tmp_wptr[1] = TCPOPT_SACK_OK_LEN;
2918 2929 } else {
2919 2930 add_sack = B_TRUE;
2920 2931 }
2921 2932 }
2922 2933
2923 2934
2924 2935 /*
2925 2936 * If the other side is ECN capable, reply
2926 2937 * that we are also ECN capable.
2927 2938 */
2928 2939 if (tcp->tcp_ecn_ok)
2929 2940 *flags |= TH_ECE;
2930 2941 break;
2931 2942
2932 2943 default:
2933 2944 /*
2934 2945 * The above ASSERT() makes sure that this
2935 2946 * must be FIN-WAIT-1 state. Our SYN has
2936 2947 * not been ack'ed so retransmit it.
2937 2948 */
2938 2949 *flags |= TH_SYN;
2939 2950 break;
2940 2951 }
2941 2952
2942 2953 if (add_sack) {
2943 2954 wptr[0] = TCPOPT_NOP;
2944 2955 wptr[1] = TCPOPT_NOP;
2945 2956 wptr[2] = TCPOPT_SACK_PERMITTED;
2946 2957 wptr[3] = TCPOPT_SACK_OK_LEN;
2947 2958 wptr += TCPOPT_REAL_SACK_OK_LEN;
2948 2959 tcpha->tha_offset_and_reserved += (1 << 4);
2949 2960 }
2950 2961
2951 2962 if (tcp->tcp_snd_ws_ok) {
2952 2963 wptr[0] = TCPOPT_NOP;
2953 2964 wptr[1] = TCPOPT_WSCALE;
2954 2965 wptr[2] = TCPOPT_WS_LEN;
2955 2966 wptr[3] = (uchar_t)tcp->tcp_rcv_ws;
2956 2967 wptr += TCPOPT_REAL_WS_LEN;
2957 2968 tcpha->tha_offset_and_reserved += (1 << 4);
2958 2969 }
2959 2970
2960 2971 mp->b_wptr = wptr;
2961 2972 u1 = (int)(mp->b_wptr - mp->b_rptr);
2962 2973 /*
2963 2974 * Get IP set to checksum on our behalf
2964 2975 * Include the adjustment for a source route if any.
2965 2976 */
2966 2977 u1 += connp->conn_sum;
2967 2978 u1 = (u1 >> 16) + (u1 & 0xFFFF);
2968 2979 tcpha->tha_sum = htons(u1);
2969 2980 TCPS_BUMP_MIB(tcps, tcpOutControl);
2970 2981 }
2971 2982
2972 2983 /*
2973 2984 * Helper function for tcp_xmit_mp() in handling connection tear down
2974 2985 * flag setting and state changes.
2975 2986 */
2976 2987 static void
2977 2988 tcp_xmit_mp_aux_fss(tcp_t *tcp, ip_xmit_attr_t *ixa, uint_t *flags)
2978 2989 {
2979 2990 if (!tcp->tcp_fin_acked) {
2980 2991 *flags |= TH_FIN;
2981 2992 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutControl);
2982 2993 }
2983 2994 if (!tcp->tcp_fin_sent) {
2984 2995 tcp->tcp_fin_sent = B_TRUE;
2985 2996 switch (tcp->tcp_state) {
2986 2997 case TCPS_SYN_RCVD:
2987 2998 tcp->tcp_state = TCPS_FIN_WAIT_1;
2988 2999 DTRACE_TCP6(state__change, void, NULL,
2989 3000 ip_xmit_attr_t *, ixa, void, NULL,
2990 3001 tcp_t *, tcp, void, NULL,
2991 3002 int32_t, TCPS_SYN_RCVD);
2992 3003 break;
2993 3004 case TCPS_ESTABLISHED:
2994 3005 tcp->tcp_state = TCPS_FIN_WAIT_1;
2995 3006 DTRACE_TCP6(state__change, void, NULL,
2996 3007 ip_xmit_attr_t *, ixa, void, NULL,
2997 3008 tcp_t *, tcp, void, NULL,
2998 3009 int32_t, TCPS_ESTABLISHED);
2999 3010 break;
3000 3011 case TCPS_CLOSE_WAIT:
3001 3012 tcp->tcp_state = TCPS_LAST_ACK;
3002 3013 DTRACE_TCP6(state__change, void, NULL,
3003 3014 ip_xmit_attr_t *, ixa, void, NULL,
3004 3015 tcp_t *, tcp, void, NULL,
3005 3016 int32_t, TCPS_CLOSE_WAIT);
3006 3017 break;
3007 3018 }
3008 3019 if (tcp->tcp_suna == tcp->tcp_snxt)
3009 3020 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
3010 3021 tcp->tcp_snxt = tcp->tcp_fss + 1;
3011 3022 }
3012 3023 }
3013 3024
3014 3025 /*
3015 3026 * tcp_xmit_mp is called to return a pointer to an mblk chain complete with
3016 3027 * ip and tcp header ready to pass down to IP. If the mp passed in is
3017 3028 * non-NULL, then up to max_to_send bytes of data will be dup'ed off that
3018 3029 * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary
3019 3030 * otherwise it will dup partial mblks.)
3020 3031 * Otherwise, an appropriate ACK packet will be generated. This
3021 3032 * routine is not usually called to send new data for the first time. It
3022 3033 * is mostly called out of the timer for retransmits, and to generate ACKs.
3023 3034 *
3024 3035 * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will
3025 3036 * be adjusted by *offset. And after dupb(), the offset and the ending mblk
3026 3037 * of the original mblk chain will be returned in *offset and *end_mp.
3027 3038 */
3028 3039 mblk_t *
3029 3040 tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset,
3030 3041 mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len,
3031 3042 boolean_t rexmit)
3032 3043 {
3033 3044 int data_length;
3034 3045 int32_t off = 0;
3035 3046 uint_t flags;
3036 3047 mblk_t *mp1;
3037 3048 mblk_t *mp2;
3038 3049 uchar_t *rptr;
3039 3050 tcpha_t *tcpha;
3040 3051 int32_t num_sack_blk = 0;
3041 3052 int32_t sack_opt_len = 0;
3042 3053 tcp_stack_t *tcps = tcp->tcp_tcps;
3043 3054 conn_t *connp = tcp->tcp_connp;
3044 3055 ip_xmit_attr_t *ixa = connp->conn_ixa;
3045 3056
3046 3057 /* Allocate for our maximum TCP header + link-level */
3047 3058 mp1 = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
3048 3059 BPRI_MED);
3049 3060 if (mp1 == NULL)
3050 3061 return (NULL);
3051 3062 data_length = 0;
3052 3063
3053 3064 /*
3054 3065 * Note that tcp_mss has been adjusted to take into account the
3055 3066 * timestamp option if applicable. Because SACK options do not
3056 3067 * appear in every TCP segments and they are of variable lengths,
3057 3068 * they cannot be included in tcp_mss. Thus we need to calculate
3058 3069 * the actual segment length when we need to send a segment which
3059 3070 * includes SACK options.
3060 3071 */
3061 3072 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
3062 3073 num_sack_blk = MIN(tcp->tcp_max_sack_blk,
3063 3074 tcp->tcp_num_sack_blk);
3064 3075 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) +
3065 3076 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN;
3066 3077 if (max_to_send + sack_opt_len > tcp->tcp_mss)
3067 3078 max_to_send -= sack_opt_len;
3068 3079 }
3069 3080
3070 3081 if (offset != NULL) {
3071 3082 off = *offset;
3072 3083 /* We use offset as an indicator that end_mp is not NULL. */
3073 3084 *end_mp = NULL;
3074 3085 }
3075 3086 for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) {
3076 3087 /* This could be faster with cooperation from downstream */
3077 3088 if (mp2 != mp1 && !sendall &&
3078 3089 data_length + (int)(mp->b_wptr - mp->b_rptr) >
3079 3090 max_to_send)
3080 3091 /*
3081 3092 * Don't send the next mblk since the whole mblk
3082 3093 * does not fit.
3083 3094 */
3084 3095 break;
3085 3096 mp2->b_cont = dupb(mp);
3086 3097 mp2 = mp2->b_cont;
3087 3098 if (!mp2) {
3088 3099 freemsg(mp1);
3089 3100 return (NULL);
3090 3101 }
3091 3102 mp2->b_rptr += off;
3092 3103 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <=
3093 3104 (uintptr_t)INT_MAX);
3094 3105
3095 3106 data_length += (int)(mp2->b_wptr - mp2->b_rptr);
3096 3107 if (data_length > max_to_send) {
3097 3108 mp2->b_wptr -= data_length - max_to_send;
3098 3109 data_length = max_to_send;
3099 3110 off = mp2->b_wptr - mp->b_rptr;
3100 3111 break;
3101 3112 } else {
3102 3113 off = 0;
3103 3114 }
3104 3115 }
3105 3116 if (offset != NULL) {
3106 3117 *offset = off;
3107 3118 *end_mp = mp;
3108 3119 }
3109 3120 if (seg_len != NULL) {
3110 3121 *seg_len = data_length;
3111 3122 }
3112 3123
3113 3124 /* Update the latest receive window size in TCP header. */
3114 3125 tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
3115 3126
3116 3127 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra;
3117 3128 mp1->b_rptr = rptr;
3118 3129 mp1->b_wptr = rptr + connp->conn_ht_iphc_len + sack_opt_len;
3119 3130 bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len);
3120 3131 tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length];
3121 3132 tcpha->tha_seq = htonl(seq);
3122 3133
3123 3134 /*
3124 3135 * Use tcp_unsent to determine if the PUSH bit should be used assumes
3125 3136 * that this function was called from tcp_wput_data. Thus, when called
3126 3137 * to retransmit data the setting of the PUSH bit may appear some
3127 3138 * what random in that it might get set when it should not. This
3128 3139 * should not pose any performance issues.
3129 3140 */
3130 3141 if (data_length != 0 && (tcp->tcp_unsent == 0 ||
3131 3142 tcp->tcp_unsent == data_length)) {
3132 3143 flags = TH_ACK | TH_PUSH;
3133 3144 } else {
3134 3145 flags = TH_ACK;
3135 3146 }
3136 3147
3137 3148 if (tcp->tcp_ecn_ok) {
3138 3149 if (tcp->tcp_ecn_echo_on)
3139 3150 flags |= TH_ECE;
3140 3151
3141 3152 /*
3142 3153 * Only set ECT bit and ECN_CWR if a segment contains new data.
3143 3154 * There is no TCP flow control for non-data segments, and
3144 3155 * only data segment is transmitted reliably.
3145 3156 */
3146 3157 if (data_length > 0 && !rexmit) {
3147 3158 TCP_SET_ECT(tcp, rptr);
3148 3159 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
3149 3160 flags |= TH_CWR;
3150 3161 tcp->tcp_ecn_cwr_sent = B_TRUE;
3151 3162 }
3152 3163 }
3153 3164 }
3154 3165
3155 3166 /* Check if there is any special processing needs to be done. */
3156 3167 if (tcp->tcp_valid_bits) {
3157 3168 uint32_t u1;
3158 3169
3159 3170 /* We don't allow having SYN and FIN in the same segment... */
3160 3171 if ((tcp->tcp_valid_bits & TCP_ISS_VALID) &&
3161 3172 seq == tcp->tcp_iss) {
3162 3173 /* Need to do connection set up processing. */
3163 3174 tcp_xmit_mp_aux_iss(tcp, connp, tcpha, mp1, &flags);
3164 3175 } else if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
3165 3176 (seq + data_length) == tcp->tcp_fss) {
3166 3177 /* Need to do connection tear down processing. */
3167 3178 tcp_xmit_mp_aux_fss(tcp, ixa, &flags);
3168 3179 }
3169 3180
3170 3181 /*
3171 3182 * Need to do urgent pointer processing.
3172 3183 *
3173 3184 * Note the trick here. u1 is unsigned. When tcp_urg
3174 3185 * is smaller than seq, u1 will become a very huge value.
3175 3186 * So the comparison will fail. Also note that tcp_urp
3176 3187 * should be positive, see RFC 793 page 17.
3177 3188 */
3178 3189 u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION;
3179 3190 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 &&
3180 3191 u1 < (uint32_t)(64 * 1024)) {
3181 3192 flags |= TH_URG;
3182 3193 TCPS_BUMP_MIB(tcps, tcpOutUrg);
3183 3194 tcpha->tha_urp = htons(u1);
3184 3195 }
3185 3196 }
3186 3197 tcpha->tha_flags = (uchar_t)flags;
3187 3198 tcp->tcp_rack = tcp->tcp_rnxt;
3188 3199 tcp->tcp_rack_cnt = 0;
3189 3200
3190 3201 /* Fill in the current value of timestamps option. */
3191 3202 if (tcp->tcp_snd_ts_ok) {
3192 3203 if (tcp->tcp_state != TCPS_SYN_SENT) {
3193 3204 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
3194 3205
3195 3206 U32_TO_BE32(llbolt,
3196 3207 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
3197 3208 U32_TO_BE32(tcp->tcp_ts_recent,
3198 3209 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
3199 3210 }
3200 3211 }
3201 3212
3202 3213 /* Fill in the SACK blocks. */
3203 3214 if (num_sack_blk > 0) {
3204 3215 uchar_t *wptr = (uchar_t *)tcpha + connp->conn_ht_ulp_len;
3205 3216 sack_blk_t *tmp;
3206 3217 int32_t i;
3207 3218
3208 3219 wptr[0] = TCPOPT_NOP;
3209 3220 wptr[1] = TCPOPT_NOP;
3210 3221 wptr[2] = TCPOPT_SACK;
3211 3222 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
3212 3223 sizeof (sack_blk_t);
3213 3224 wptr += TCPOPT_REAL_SACK_LEN;
3214 3225
3215 3226 tmp = tcp->tcp_sack_list;
3216 3227 for (i = 0; i < num_sack_blk; i++) {
3217 3228 U32_TO_BE32(tmp[i].begin, wptr);
3218 3229 wptr += sizeof (tcp_seq);
3219 3230 U32_TO_BE32(tmp[i].end, wptr);
3220 3231 wptr += sizeof (tcp_seq);
3221 3232 }
3222 3233 tcpha->tha_offset_and_reserved += ((num_sack_blk * 2 + 1) << 4);
3223 3234 }
3224 3235 ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX);
3225 3236 data_length += (int)(mp1->b_wptr - rptr);
3226 3237
3227 3238 ixa->ixa_pktlen = data_length;
3228 3239
3229 3240 if (ixa->ixa_flags & IXAF_IS_IPV4) {
3230 3241 ((ipha_t *)rptr)->ipha_length = htons(data_length);
3231 3242 } else {
3232 3243 ip6_t *ip6 = (ip6_t *)rptr;
3233 3244
3234 3245 ip6->ip6_plen = htons(data_length - IPV6_HDR_LEN);
3235 3246 }
3236 3247
3237 3248 /*
3238 3249 * Prime pump for IP
3239 3250 * Include the adjustment for a source route if any.
3240 3251 */
3241 3252 data_length -= ixa->ixa_ip_hdr_length;
3242 3253 data_length += connp->conn_sum;
3243 3254 data_length = (data_length >> 16) + (data_length & 0xFFFF);
3244 3255 tcpha->tha_sum = htons(data_length);
3245 3256 if (tcp->tcp_ip_forward_progress) {
3246 3257 tcp->tcp_ip_forward_progress = B_FALSE;
3247 3258 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
3248 3259 } else {
3249 3260 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
3250 3261 }
3251 3262 return (mp1);
3252 3263 }
3253 3264
3254 3265 /*
3255 3266 * If this routine returns B_TRUE, TCP can generate a RST in response
3256 3267 * to a segment. If it returns B_FALSE, TCP should not respond.
3257 3268 */
3258 3269 static boolean_t
3259 3270 tcp_send_rst_chk(tcp_stack_t *tcps)
3260 3271 {
3261 3272 int64_t now;
3262 3273
3263 3274 /*
3264 3275 * TCP needs to protect itself from generating too many RSTs.
3265 3276 * This can be a DoS attack by sending us random segments
3266 3277 * soliciting RSTs.
3267 3278 *
3268 3279 * What we do here is to have a limit of tcp_rst_sent_rate RSTs
3269 3280 * in each 1 second interval. In this way, TCP still generate
3270 3281 * RSTs in normal cases but when under attack, the impact is
3271 3282 * limited.
3272 3283 */
3273 3284 if (tcps->tcps_rst_sent_rate_enabled != 0) {
3274 3285 now = ddi_get_lbolt64();
3275 3286 if (TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) >
3276 3287 1*SECONDS) {
3277 3288 tcps->tcps_last_rst_intrvl = now;
3278 3289 tcps->tcps_rst_cnt = 1;
3279 3290 } else if (++tcps->tcps_rst_cnt > tcps->tcps_rst_sent_rate) {
3280 3291 return (B_FALSE);
3281 3292 }
3282 3293 }
3283 3294 return (B_TRUE);
3284 3295 }
3285 3296
3286 3297 /*
3287 3298 * This function handles all retransmissions if SACK is enabled for this
3288 3299 * connection. First it calculates how many segments can be retransmitted
3289 3300 * based on tcp_pipe. Then it goes thru the notsack list to find eligible
3290 3301 * segments. A segment is eligible if sack_cnt for that segment is greater
3291 3302 * than or equal tcp_dupack_fast_retransmit. After it has retransmitted
3292 3303 * all eligible segments, it checks to see if TCP can send some new segments
3293 3304 * (fast recovery). If it can, set the appropriate flag for tcp_input_data().
3294 3305 *
3295 3306 * Parameters:
3296 3307 * tcp_t *tcp: the tcp structure of the connection.
3297 3308 * uint_t *flags: in return, appropriate value will be set for
3298 3309 * tcp_input_data().
3299 3310 */
3300 3311 void
3301 3312 tcp_sack_rexmit(tcp_t *tcp, uint_t *flags)
3302 3313 {
3303 3314 notsack_blk_t *notsack_blk;
3304 3315 int32_t usable_swnd;
3305 3316 int32_t mss;
3306 3317 uint32_t seg_len;
3307 3318 mblk_t *xmit_mp;
3308 3319 tcp_stack_t *tcps = tcp->tcp_tcps;
3309 3320
3310 3321 ASSERT(tcp->tcp_notsack_list != NULL);
3311 3322 ASSERT(tcp->tcp_rexmit == B_FALSE);
3312 3323
3313 3324 /* Defensive coding in case there is a bug... */
3314 3325 if (tcp->tcp_notsack_list == NULL) {
3315 3326 return;
3316 3327 }
3317 3328 notsack_blk = tcp->tcp_notsack_list;
3318 3329 mss = tcp->tcp_mss;
3319 3330
3320 3331 /*
3321 3332 * Limit the num of outstanding data in the network to be
3322 3333 * tcp_cwnd_ssthresh, which is half of the original congestion wnd.
3323 3334 */
3324 3335 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
3325 3336
3326 3337 /* At least retransmit 1 MSS of data. */
3327 3338 if (usable_swnd <= 0) {
3328 3339 usable_swnd = mss;
3329 3340 }
3330 3341
3331 3342 /* Make sure no new RTT samples will be taken. */
3332 3343 tcp->tcp_csuna = tcp->tcp_snxt;
3333 3344
3334 3345 notsack_blk = tcp->tcp_notsack_list;
3335 3346 while (usable_swnd > 0) {
3336 3347 mblk_t *snxt_mp, *tmp_mp;
3337 3348 tcp_seq begin = tcp->tcp_sack_snxt;
3338 3349 tcp_seq end;
3339 3350 int32_t off;
3340 3351
3341 3352 for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) {
3342 3353 if (SEQ_GT(notsack_blk->end, begin) &&
3343 3354 (notsack_blk->sack_cnt >=
3344 3355 tcps->tcps_dupack_fast_retransmit)) {
3345 3356 end = notsack_blk->end;
3346 3357 if (SEQ_LT(begin, notsack_blk->begin)) {
3347 3358 begin = notsack_blk->begin;
3348 3359 }
3349 3360 break;
3350 3361 }
3351 3362 }
3352 3363 /*
3353 3364 * All holes are filled. Manipulate tcp_cwnd to send more
3354 3365 * if we can. Note that after the SACK recovery, tcp_cwnd is
3355 3366 * set to tcp_cwnd_ssthresh.
3356 3367 */
3357 3368 if (notsack_blk == NULL) {
3358 3369 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
3359 3370 if (usable_swnd <= 0 || tcp->tcp_unsent == 0) {
3360 3371 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna;
3361 3372 ASSERT(tcp->tcp_cwnd > 0);
3362 3373 return;
3363 3374 } else {
3364 3375 usable_swnd = usable_swnd / mss;
3365 3376 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna +
3366 3377 MAX(usable_swnd * mss, mss);
3367 3378 *flags |= TH_XMIT_NEEDED;
3368 3379 return;
3369 3380 }
3370 3381 }
3371 3382
3372 3383 /*
3373 3384 * Note that we may send more than usable_swnd allows here
3374 3385 * because of round off, but no more than 1 MSS of data.
3375 3386 */
3376 3387 seg_len = end - begin;
3377 3388 if (seg_len > mss)
3378 3389 seg_len = mss;
3379 3390 snxt_mp = tcp_get_seg_mp(tcp, begin, &off);
3380 3391 ASSERT(snxt_mp != NULL);
3381 3392 /* This should not happen. Defensive coding again... */
3382 3393 if (snxt_mp == NULL) {
3383 3394 return;
3384 3395 }
3385 3396
3386 3397 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off,
3387 3398 &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE);
3388 3399 if (xmit_mp == NULL)
3389 3400 return;
3390 3401
3391 3402 usable_swnd -= seg_len;
3392 3403 tcp->tcp_pipe += seg_len;
3393 3404 tcp->tcp_sack_snxt = begin + seg_len;
3394 3405
3395 3406 tcp_send_data(tcp, xmit_mp);
3396 3407
3397 3408 /*
3398 3409 * Update the send timestamp to avoid false retransmission.
3399 3410 */
3400 3411 snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3401 3412
3402 3413 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3403 3414 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, seg_len);
3404 3415 TCPS_BUMP_MIB(tcps, tcpOutSackRetransSegs);
3405 3416 tcp->tcp_cs.tcp_out_retrans_segs++;
3406 3417 tcp->tcp_cs.tcp_out_retrans_bytes += seg_len;
3407 3418 /*
3408 3419 * Update tcp_rexmit_max to extend this SACK recovery phase.
3409 3420 * This happens when new data sent during fast recovery is
3410 3421 * also lost. If TCP retransmits those new data, it needs
3411 3422 * to extend SACK recover phase to avoid starting another
3412 3423 * fast retransmit/recovery unnecessarily.
3413 3424 */
3414 3425 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) {
3415 3426 tcp->tcp_rexmit_max = tcp->tcp_sack_snxt;
3416 3427 }
3417 3428 }
3418 3429 }
3419 3430
3420 3431 /*
3421 3432 * tcp_ss_rexmit() is called to do slow start retransmission after a timeout
3422 3433 * or ICMP errors.
3423 3434 */
3424 3435 void
3425 3436 tcp_ss_rexmit(tcp_t *tcp)
3426 3437 {
3427 3438 uint32_t snxt;
3428 3439 uint32_t smax;
3429 3440 int32_t win;
3430 3441 int32_t mss;
3431 3442 int32_t off;
3432 3443 mblk_t *snxt_mp;
3433 3444 tcp_stack_t *tcps = tcp->tcp_tcps;
3434 3445
3435 3446 /*
3436 3447 * Note that tcp_rexmit can be set even though TCP has retransmitted
3437 3448 * all unack'ed segments.
3438 3449 */
3439 3450 if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) {
3440 3451 smax = tcp->tcp_rexmit_max;
3441 3452 snxt = tcp->tcp_rexmit_nxt;
3442 3453 if (SEQ_LT(snxt, tcp->tcp_suna)) {
3443 3454 snxt = tcp->tcp_suna;
3444 3455 }
3445 3456 win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd);
3446 3457 win -= snxt - tcp->tcp_suna;
3447 3458 mss = tcp->tcp_mss;
3448 3459 snxt_mp = tcp_get_seg_mp(tcp, snxt, &off);
3449 3460
3450 3461 while (SEQ_LT(snxt, smax) && (win > 0) && (snxt_mp != NULL)) {
3451 3462 mblk_t *xmit_mp;
3452 3463 mblk_t *old_snxt_mp = snxt_mp;
3453 3464 uint32_t cnt = mss;
3454 3465
3455 3466 if (win < cnt) {
3456 3467 cnt = win;
3457 3468 }
3458 3469 if (SEQ_GT(snxt + cnt, smax)) {
3459 3470 cnt = smax - snxt;
3460 3471 }
3461 3472 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off,
3462 3473 &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE);
3463 3474 if (xmit_mp == NULL)
3464 3475 return;
3465 3476
3466 3477 tcp_send_data(tcp, xmit_mp);
3467 3478
3468 3479 snxt += cnt;
3469 3480 win -= cnt;
3470 3481 /*
3471 3482 * Update the send timestamp to avoid false
3472 3483 * retransmission.
3473 3484 */
3474 3485 old_snxt_mp->b_prev = (mblk_t *)(intptr_t)gethrtime();
3475 3486 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3476 3487 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, cnt);
3477 3488 tcp->tcp_cs.tcp_out_retrans_segs++;
3478 3489 tcp->tcp_cs.tcp_out_retrans_bytes += cnt;
3479 3490
3480 3491 tcp->tcp_rexmit_nxt = snxt;
3481 3492 }
3482 3493 /*
3483 3494 * If we have transmitted all we have at the time
3484 3495 * we started the retranmission, we can leave
3485 3496 * the rest of the job to tcp_wput_data(). But we
3486 3497 * need to check the send window first. If the
3487 3498 * win is not 0, go on with tcp_wput_data().
3488 3499 */
3489 3500 if (SEQ_LT(snxt, smax) || win == 0) {
3490 3501 return;
3491 3502 }
3492 3503 }
3493 3504 /* Only call tcp_wput_data() if there is data to be sent. */
3494 3505 if (tcp->tcp_unsent) {
3495 3506 tcp_wput_data(tcp, NULL, B_FALSE);
3496 3507 }
3497 3508 }
3498 3509
3499 3510 /*
3500 3511 * Do slow start retransmission after ICMP errors of PMTU changes.
3501 3512 */
3502 3513 void
3503 3514 tcp_rexmit_after_error(tcp_t *tcp)
3504 3515 {
3505 3516 /*
3506 3517 * All sent data has been acknowledged or no data left to send, just
3507 3518 * to return.
3508 3519 */
3509 3520 if (!SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) ||
3510 3521 (tcp->tcp_xmit_head == NULL))
3511 3522 return;
3512 3523
3513 3524 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && (tcp->tcp_unsent == 0))
3514 3525 tcp->tcp_rexmit_max = tcp->tcp_fss;
3515 3526 else
3516 3527 tcp->tcp_rexmit_max = tcp->tcp_snxt;
3517 3528
3518 3529 tcp->tcp_rexmit_nxt = tcp->tcp_suna;
3519 3530 tcp->tcp_rexmit = B_TRUE;
3520 3531 tcp->tcp_dupack_cnt = 0;
3521 3532 tcp_ss_rexmit(tcp);
3522 3533 }
3523 3534
3524 3535 /*
3525 3536 * tcp_get_seg_mp() is called to get the pointer to a segment in the
3526 3537 * send queue which starts at the given sequence number. If the given
3527 3538 * sequence number is equal to last valid sequence number (tcp_snxt), the
3528 3539 * returned mblk is the last valid mblk, and off is set to the length of
3529 3540 * that mblk.
3530 3541 *
3531 3542 * send queue which starts at the given seq. no.
3532 3543 *
3533 3544 * Parameters:
3534 3545 * tcp_t *tcp: the tcp instance pointer.
3535 3546 * uint32_t seq: the starting seq. no of the requested segment.
3536 3547 * int32_t *off: after the execution, *off will be the offset to
3537 3548 * the returned mblk which points to the requested seq no.
3538 3549 * It is the caller's responsibility to send in a non-null off.
3539 3550 *
3540 3551 * Return:
3541 3552 * A mblk_t pointer pointing to the requested segment in send queue.
3542 3553 */
3543 3554 static mblk_t *
3544 3555 tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off)
3545 3556 {
3546 3557 int32_t cnt;
3547 3558 mblk_t *mp;
3548 3559
3549 3560 /* Defensive coding. Make sure we don't send incorrect data. */
3550 3561 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GT(seq, tcp->tcp_snxt))
3551 3562 return (NULL);
3552 3563
3553 3564 cnt = seq - tcp->tcp_suna;
3554 3565 mp = tcp->tcp_xmit_head;
3555 3566 while (cnt > 0 && mp != NULL) {
3556 3567 cnt -= mp->b_wptr - mp->b_rptr;
3557 3568 if (cnt <= 0) {
3558 3569 cnt += mp->b_wptr - mp->b_rptr;
3559 3570 break;
3560 3571 }
3561 3572 mp = mp->b_cont;
3562 3573 }
3563 3574 ASSERT(mp != NULL);
3564 3575 *off = cnt;
3565 3576 return (mp);
3566 3577 }
3567 3578
3568 3579 /*
3569 3580 * This routine adjusts next-to-send sequence number variables, in the
3570 3581 * case where the reciever has shrunk it's window.
3571 3582 */
3572 3583 void
3573 3584 tcp_update_xmit_tail(tcp_t *tcp, uint32_t snxt)
3574 3585 {
3575 3586 mblk_t *xmit_tail;
3576 3587 int32_t offset;
3577 3588
3578 3589 tcp->tcp_snxt = snxt;
3579 3590
3580 3591 /* Get the mblk, and the offset in it, as per the shrunk window */
3581 3592 xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset);
3582 3593 ASSERT(xmit_tail != NULL);
3583 3594 tcp->tcp_xmit_tail = xmit_tail;
3584 3595 tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr -
3585 3596 xmit_tail->b_rptr - offset;
3586 3597 }
3587 3598
3588 3599 /*
3589 3600 * This handles the case when the receiver has shrunk its win. Per RFC 1122
3590 3601 * if the receiver shrinks the window, i.e. moves the right window to the
3591 3602 * left, the we should not send new data, but should retransmit normally the
3592 3603 * old unacked data between suna and suna + swnd. We might has sent data
3593 3604 * that is now outside the new window, pretend that we didn't send it.
3594 3605 */
3595 3606 static void
3596 3607 tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count)
3597 3608 {
3598 3609 uint32_t snxt = tcp->tcp_snxt;
3599 3610
3600 3611 ASSERT(shrunk_count > 0);
3601 3612
3602 3613 if (!tcp->tcp_is_wnd_shrnk) {
3603 3614 tcp->tcp_snxt_shrunk = snxt;
3604 3615 tcp->tcp_is_wnd_shrnk = B_TRUE;
3605 3616 } else if (SEQ_GT(snxt, tcp->tcp_snxt_shrunk)) {
3606 3617 tcp->tcp_snxt_shrunk = snxt;
3607 3618 }
3608 3619
3609 3620 /* Pretend we didn't send the data outside the window */
3610 3621 snxt -= shrunk_count;
3611 3622
3612 3623 /* Reset all the values per the now shrunk window */
3613 3624 tcp_update_xmit_tail(tcp, snxt);
3614 3625 tcp->tcp_unsent += shrunk_count;
3615 3626
3616 3627 /*
3617 3628 * If the SACK option is set, delete the entire list of
3618 3629 * notsack'ed blocks.
3619 3630 */
3620 3631 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
3621 3632
3622 3633 if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0)
3623 3634 /*
3624 3635 * Make sure the timer is running so that we will probe a zero
3625 3636 * window.
3626 3637 */
3627 3638 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
3628 3639 }
3629 3640
3630 3641 /*
3631 3642 * tcp_fill_header is called by tcp_send() to fill the outgoing TCP header
3632 3643 * with the template header, as well as other options such as time-stamp,
3633 3644 * ECN and/or SACK.
3634 3645 */
3635 3646 static void
3636 3647 tcp_fill_header(tcp_t *tcp, uchar_t *rptr, int num_sack_blk)
3637 3648 {
3638 3649 tcpha_t *tcp_tmpl, *tcpha;
3639 3650 uint32_t *dst, *src;
3640 3651 int hdrlen;
3641 3652 conn_t *connp = tcp->tcp_connp;
3642 3653
3643 3654 ASSERT(OK_32PTR(rptr));
3644 3655
3645 3656 /* Template header */
3646 3657 tcp_tmpl = tcp->tcp_tcpha;
3647 3658
3648 3659 /* Header of outgoing packet */
3649 3660 tcpha = (tcpha_t *)(rptr + connp->conn_ixa->ixa_ip_hdr_length);
3650 3661
3651 3662 /* dst and src are opaque 32-bit fields, used for copying */
3652 3663 dst = (uint32_t *)rptr;
3653 3664 src = (uint32_t *)connp->conn_ht_iphc;
3654 3665 hdrlen = connp->conn_ht_iphc_len;
3655 3666
3656 3667 /* Fill time-stamp option if needed */
3657 3668 if (tcp->tcp_snd_ts_ok) {
3658 3669 U32_TO_BE32(LBOLT_FASTPATH,
3659 3670 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4);
3660 3671 U32_TO_BE32(tcp->tcp_ts_recent,
3661 3672 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8);
3662 3673 } else {
3663 3674 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
3664 3675 }
3665 3676
3666 3677 /*
3667 3678 * Copy the template header; is this really more efficient than
3668 3679 * calling bcopy()? For simple IPv4/TCP, it may be the case,
3669 3680 * but perhaps not for other scenarios.
3670 3681 */
3671 3682 dst[0] = src[0];
3672 3683 dst[1] = src[1];
3673 3684 dst[2] = src[2];
3674 3685 dst[3] = src[3];
3675 3686 dst[4] = src[4];
3676 3687 dst[5] = src[5];
3677 3688 dst[6] = src[6];
3678 3689 dst[7] = src[7];
3679 3690 dst[8] = src[8];
3680 3691 dst[9] = src[9];
3681 3692 if (hdrlen -= 40) {
3682 3693 hdrlen >>= 2;
3683 3694 dst += 10;
3684 3695 src += 10;
3685 3696 do {
3686 3697 *dst++ = *src++;
3687 3698 } while (--hdrlen);
3688 3699 }
3689 3700
3690 3701 /*
3691 3702 * Set the ECN info in the TCP header if it is not a zero
3692 3703 * window probe. Zero window probe is only sent in
3693 3704 * tcp_wput_data() and tcp_timer().
3694 3705 */
3695 3706 if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) {
3696 3707 TCP_SET_ECT(tcp, rptr);
3697 3708
3698 3709 if (tcp->tcp_ecn_echo_on)
3699 3710 tcpha->tha_flags |= TH_ECE;
3700 3711 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
3701 3712 tcpha->tha_flags |= TH_CWR;
3702 3713 tcp->tcp_ecn_cwr_sent = B_TRUE;
3703 3714 }
3704 3715 }
3705 3716
3706 3717 /* Fill in SACK options */
3707 3718 if (num_sack_blk > 0) {
3708 3719 uchar_t *wptr = rptr + connp->conn_ht_iphc_len;
3709 3720 sack_blk_t *tmp;
3710 3721 int32_t i;
3711 3722
3712 3723 wptr[0] = TCPOPT_NOP;
3713 3724 wptr[1] = TCPOPT_NOP;
3714 3725 wptr[2] = TCPOPT_SACK;
3715 3726 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
3716 3727 sizeof (sack_blk_t);
3717 3728 wptr += TCPOPT_REAL_SACK_LEN;
3718 3729
3719 3730 tmp = tcp->tcp_sack_list;
3720 3731 for (i = 0; i < num_sack_blk; i++) {
3721 3732 U32_TO_BE32(tmp[i].begin, wptr);
3722 3733 wptr += sizeof (tcp_seq);
3723 3734 U32_TO_BE32(tmp[i].end, wptr);
3724 3735 wptr += sizeof (tcp_seq);
3725 3736 }
3726 3737 tcpha->tha_offset_and_reserved +=
3727 3738 ((num_sack_blk * 2 + 1) << 4);
3728 3739 }
3729 3740 }
↓ open down ↓ |
2521 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX