Print this page
11849 listen of IPv6 address fails with EAFNOSUPPORT
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/inet/tcp/tcp.c
+++ new/usr/src/uts/common/inet/tcp/tcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 - * Copyright (c) 2011, Joyent Inc. All rights reserved.
24 + * Copyright 2019 Joyent, Inc.
25 25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
26 26 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
27 27 * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved.
28 28 */
29 29 /* Copyright (c) 1990 Mentat Inc. */
30 30
31 31 #include <sys/types.h>
32 32 #include <sys/stream.h>
33 33 #include <sys/strsun.h>
34 34 #include <sys/strsubr.h>
35 35 #include <sys/stropts.h>
36 36 #include <sys/strlog.h>
37 37 #define _SUN_TPI_VERSION 2
38 38 #include <sys/tihdr.h>
39 39 #include <sys/timod.h>
40 40 #include <sys/ddi.h>
41 41 #include <sys/sunddi.h>
42 42 #include <sys/suntpi.h>
43 43 #include <sys/xti_inet.h>
44 44 #include <sys/cmn_err.h>
45 45 #include <sys/debug.h>
46 46 #include <sys/sdt.h>
47 47 #include <sys/vtrace.h>
48 48 #include <sys/kmem.h>
49 49 #include <sys/ethernet.h>
50 50 #include <sys/cpuvar.h>
51 51 #include <sys/dlpi.h>
52 52 #include <sys/pattr.h>
53 53 #include <sys/policy.h>
54 54 #include <sys/priv.h>
55 55 #include <sys/zone.h>
56 56 #include <sys/sunldi.h>
57 57
58 58 #include <sys/errno.h>
59 59 #include <sys/signal.h>
60 60 #include <sys/socket.h>
61 61 #include <sys/socketvar.h>
62 62 #include <sys/sockio.h>
63 63 #include <sys/isa_defs.h>
64 64 #include <sys/md5.h>
65 65 #include <sys/random.h>
66 66 #include <sys/uio.h>
67 67 #include <sys/systm.h>
68 68 #include <netinet/in.h>
69 69 #include <netinet/tcp.h>
70 70 #include <netinet/ip6.h>
71 71 #include <netinet/icmp6.h>
72 72 #include <net/if.h>
73 73 #include <net/route.h>
74 74 #include <inet/ipsec_impl.h>
75 75
76 76 #include <inet/common.h>
77 77 #include <inet/cc.h>
78 78 #include <inet/ip.h>
79 79 #include <inet/ip_impl.h>
80 80 #include <inet/ip6.h>
81 81 #include <inet/ip_ndp.h>
82 82 #include <inet/proto_set.h>
83 83 #include <inet/mib2.h>
84 84 #include <inet/optcom.h>
85 85 #include <inet/snmpcom.h>
86 86 #include <inet/kstatcom.h>
87 87 #include <inet/tcp.h>
88 88 #include <inet/tcp_impl.h>
89 89 #include <inet/tcp_cluster.h>
90 90 #include <inet/udp_impl.h>
91 91 #include <net/pfkeyv2.h>
92 92 #include <inet/ipdrop.h>
93 93
94 94 #include <inet/ipclassifier.h>
95 95 #include <inet/ip_ire.h>
96 96 #include <inet/ip_ftable.h>
97 97 #include <inet/ip_if.h>
98 98 #include <inet/ipp_common.h>
99 99 #include <inet/ip_rts.h>
100 100 #include <inet/ip_netinfo.h>
101 101 #include <sys/squeue_impl.h>
102 102 #include <sys/squeue.h>
103 103 #include <sys/tsol/label.h>
104 104 #include <sys/tsol/tnet.h>
105 105 #include <rpc/pmap_prot.h>
106 106 #include <sys/callo.h>
107 107
108 108 /*
109 109 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
110 110 *
111 111 * (Read the detailed design doc in PSARC case directory)
112 112 *
113 113 * The entire tcp state is contained in tcp_t and conn_t structure
114 114 * which are allocated in tandem using ipcl_conn_create() and passing
115 115 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
116 116 * the references on the tcp_t. The tcp_t structure is never compressed
117 117 * and packets always land on the correct TCP perimeter from the time
118 118 * eager is created till the time tcp_t dies (as such the old mentat
119 119 * TCP global queue is not used for detached state and no IPSEC checking
120 120 * is required). The global queue is still allocated to send out resets
121 121 * for connection which have no listeners and IP directly calls
122 122 * tcp_xmit_listeners_reset() which does any policy check.
123 123 *
124 124 * Protection and Synchronisation mechanism:
125 125 *
126 126 * The tcp data structure does not use any kind of lock for protecting
127 127 * its state but instead uses 'squeues' for mutual exclusion from various
128 128 * read and write side threads. To access a tcp member, the thread should
129 129 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
130 130 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
131 131 * can pass any tcp function having prototype of edesc_t as argument
132 132 * (different from traditional STREAMs model where packets come in only
133 133 * designated entry points). The list of functions that can be directly
134 134 * called via squeue are listed before the usual function prototype.
135 135 *
136 136 * Referencing:
137 137 *
138 138 * TCP is MT-Hot and we use a reference based scheme to make sure that the
139 139 * tcp structure doesn't disappear when its needed. When the application
140 140 * creates an outgoing connection or accepts an incoming connection, we
141 141 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
142 142 * The IP reference is just a symbolic reference since ip_tcpclose()
143 143 * looks at tcp structure after tcp_close_output() returns which could
144 144 * have dropped the last TCP reference. So as long as the connection is
145 145 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
146 146 * conn_t. The classifier puts its own reference when the connection is
147 147 * inserted in listen or connected hash. Anytime a thread needs to enter
148 148 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
149 149 * on write side or by doing a classify on read side and then puts a
150 150 * reference on the conn before doing squeue_enter/tryenter/fill. For
151 151 * read side, the classifier itself puts the reference under fanout lock
152 152 * to make sure that tcp can't disappear before it gets processed. The
153 153 * squeue will drop this reference automatically so the called function
154 154 * doesn't have to do a DEC_REF.
155 155 *
156 156 * Opening a new connection:
157 157 *
158 158 * The outgoing connection open is pretty simple. tcp_open() does the
159 159 * work in creating the conn/tcp structure and initializing it. The
160 160 * squeue assignment is done based on the CPU the application
161 161 * is running on. So for outbound connections, processing is always done
162 162 * on application CPU which might be different from the incoming CPU
163 163 * being interrupted by the NIC. An optimal way would be to figure out
164 164 * the NIC <-> CPU binding at listen time, and assign the outgoing
165 165 * connection to the squeue attached to the CPU that will be interrupted
166 166 * for incoming packets (we know the NIC based on the bind IP address).
167 167 * This might seem like a problem if more data is going out but the
168 168 * fact is that in most cases the transmit is ACK driven transmit where
169 169 * the outgoing data normally sits on TCP's xmit queue waiting to be
170 170 * transmitted.
171 171 *
172 172 * Accepting a connection:
173 173 *
174 174 * This is a more interesting case because of various races involved in
175 175 * establishing a eager in its own perimeter. Read the meta comment on
176 176 * top of tcp_input_listener(). But briefly, the squeue is picked by
177 177 * ip_fanout based on the ring or the sender (if loopback).
178 178 *
179 179 * Closing a connection:
180 180 *
181 181 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
182 182 * via squeue to do the close and mark the tcp as detached if the connection
183 183 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
184 184 * reference but tcp_close() drop IP's reference always. So if tcp was
185 185 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
186 186 * and 1 because it is in classifier's connected hash. This is the condition
187 187 * we use to determine that its OK to clean up the tcp outside of squeue
188 188 * when time wait expires (check the ref under fanout and conn_lock and
189 189 * if it is 2, remove it from fanout hash and kill it).
190 190 *
191 191 * Although close just drops the necessary references and marks the
192 192 * tcp_detached state, tcp_close needs to know the tcp_detached has been
193 193 * set (under squeue) before letting the STREAM go away (because a
194 194 * inbound packet might attempt to go up the STREAM while the close
195 195 * has happened and tcp_detached is not set). So a special lock and
196 196 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
197 197 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
198 198 * tcp_detached.
199 199 *
200 200 * Special provisions and fast paths:
201 201 *
202 202 * We make special provisions for sockfs by marking tcp_issocket
203 203 * whenever we have only sockfs on top of TCP. This allows us to skip
204 204 * putting the tcp in acceptor hash since a sockfs listener can never
205 205 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
206 206 * since eager has already been allocated and the accept now happens
207 207 * on acceptor STREAM. There is a big blob of comment on top of
208 208 * tcp_input_listener explaining the new accept. When socket is POP'd,
209 209 * sockfs sends us an ioctl to mark the fact and we go back to old
210 210 * behaviour. Once tcp_issocket is unset, its never set for the
211 211 * life of that connection.
212 212 *
213 213 * IPsec notes :
214 214 *
215 215 * Since a packet is always executed on the correct TCP perimeter
216 216 * all IPsec processing is defered to IP including checking new
217 217 * connections and setting IPSEC policies for new connection. The
218 218 * only exception is tcp_xmit_listeners_reset() which is called
219 219 * directly from IP and needs to policy check to see if TH_RST
220 220 * can be sent out.
221 221 */
222 222
223 223 /*
224 224 * Values for squeue switch:
225 225 * 1: SQ_NODRAIN
226 226 * 2: SQ_PROCESS
227 227 * 3: SQ_FILL
228 228 */
229 229 int tcp_squeue_wput = 2; /* /etc/systems */
230 230 int tcp_squeue_flag;
231 231
232 232 /*
233 233 * To prevent memory hog, limit the number of entries in tcp_free_list
234 234 * to 1% of available memory / number of cpus
235 235 */
236 236 uint_t tcp_free_list_max_cnt = 0;
237 237
238 238 #define TIDUSZ 4096 /* transport interface data unit size */
239 239
240 240 /*
241 241 * Size of acceptor hash list. It has to be a power of 2 for hashing.
242 242 */
243 243 #define TCP_ACCEPTOR_FANOUT_SIZE 512
244 244
245 245 #ifdef _ILP32
246 246 #define TCP_ACCEPTOR_HASH(accid) \
247 247 (((uint_t)(accid) >> 8) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
248 248 #else
249 249 #define TCP_ACCEPTOR_HASH(accid) \
250 250 ((uint_t)(accid) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
251 251 #endif /* _ILP32 */
252 252
253 253 /*
254 254 * Minimum number of connections which can be created per listener. Used
255 255 * when the listener connection count is in effect.
256 256 */
257 257 static uint32_t tcp_min_conn_listener = 2;
258 258
259 259 uint32_t tcp_early_abort = 30;
260 260
261 261 /* TCP Timer control structure */
262 262 typedef struct tcpt_s {
263 263 pfv_t tcpt_pfv; /* The routine we are to call */
264 264 tcp_t *tcpt_tcp; /* The parameter we are to pass in */
265 265 } tcpt_t;
266 266
267 267 /*
268 268 * Functions called directly via squeue having a prototype of edesc_t.
269 269 */
270 270 void tcp_input_data(void *arg, mblk_t *mp, void *arg2,
271 271 ip_recv_attr_t *ira);
272 272 static void tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2,
273 273 ip_recv_attr_t *dummy);
274 274
275 275
276 276 /* Prototype for TCP functions */
277 277 static void tcp_random_init(void);
278 278 int tcp_random(void);
279 279 static int tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp,
280 280 in_port_t dstport, uint_t srcid);
281 281 static int tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp,
282 282 in_port_t dstport, uint32_t flowinfo,
283 283 uint_t srcid, uint32_t scope_id);
284 284 static void tcp_iss_init(tcp_t *tcp);
285 285 static void tcp_reinit(tcp_t *tcp);
286 286 static void tcp_reinit_values(tcp_t *tcp);
287 287
288 288 static int tcp_wsrv(queue_t *q);
289 289 static void tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa);
290 290 static void tcp_update_zcopy(tcp_t *tcp);
291 291 static void tcp_notify(void *, ip_xmit_attr_t *, ixa_notify_type_t,
292 292 ixa_notify_arg_t);
293 293 static void *tcp_stack_init(netstackid_t stackid, netstack_t *ns);
294 294 static void tcp_stack_fini(netstackid_t stackid, void *arg);
295 295
296 296 static int tcp_squeue_switch(int);
297 297
298 298 static int tcp_open(queue_t *, dev_t *, int, int, cred_t *, boolean_t);
299 299 static int tcp_openv4(queue_t *, dev_t *, int, int, cred_t *);
300 300 static int tcp_openv6(queue_t *, dev_t *, int, int, cred_t *);
301 301
302 302 static void tcp_squeue_add(squeue_t *);
303 303
304 304 struct module_info tcp_rinfo = {
305 305 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER
306 306 };
307 307
308 308 static struct module_info tcp_winfo = {
309 309 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16
310 310 };
311 311
312 312 /*
313 313 * Entry points for TCP as a device. The normal case which supports
314 314 * the TCP functionality.
315 315 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
316 316 */
317 317 struct qinit tcp_rinitv4 = {
318 318 NULL, tcp_rsrv, tcp_openv4, tcp_tpi_close, NULL, &tcp_rinfo
319 319 };
320 320
321 321 struct qinit tcp_rinitv6 = {
322 322 NULL, tcp_rsrv, tcp_openv6, tcp_tpi_close, NULL, &tcp_rinfo
323 323 };
324 324
325 325 struct qinit tcp_winit = {
326 326 tcp_wput, tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
327 327 };
328 328
329 329 /* Initial entry point for TCP in socket mode. */
330 330 struct qinit tcp_sock_winit = {
331 331 tcp_wput_sock, tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
332 332 };
333 333
334 334 /* TCP entry point during fallback */
335 335 struct qinit tcp_fallback_sock_winit = {
336 336 tcp_wput_fallback, NULL, NULL, NULL, NULL, &tcp_winfo
337 337 };
338 338
339 339 /*
340 340 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
341 341 * an accept. Avoid allocating data structures since eager has already
342 342 * been created.
343 343 */
344 344 struct qinit tcp_acceptor_rinit = {
345 345 NULL, tcp_rsrv, NULL, tcp_tpi_close_accept, NULL, &tcp_winfo
346 346 };
347 347
348 348 struct qinit tcp_acceptor_winit = {
349 349 tcp_tpi_accept, NULL, NULL, NULL, NULL, &tcp_winfo
350 350 };
351 351
352 352 /* For AF_INET aka /dev/tcp */
353 353 struct streamtab tcpinfov4 = {
354 354 &tcp_rinitv4, &tcp_winit
355 355 };
356 356
357 357 /* For AF_INET6 aka /dev/tcp6 */
358 358 struct streamtab tcpinfov6 = {
359 359 &tcp_rinitv6, &tcp_winit
360 360 };
361 361
362 362 /*
363 363 * Following assumes TPI alignment requirements stay along 32 bit
364 364 * boundaries
365 365 */
366 366 #define ROUNDUP32(x) \
367 367 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
368 368
369 369 /* Template for response to info request. */
370 370 struct T_info_ack tcp_g_t_info_ack = {
371 371 T_INFO_ACK, /* PRIM_type */
372 372 0, /* TSDU_size */
373 373 T_INFINITE, /* ETSDU_size */
374 374 T_INVALID, /* CDATA_size */
375 375 T_INVALID, /* DDATA_size */
376 376 sizeof (sin_t), /* ADDR_size */
377 377 0, /* OPT_size - not initialized here */
378 378 TIDUSZ, /* TIDU_size */
379 379 T_COTS_ORD, /* SERV_type */
380 380 TCPS_IDLE, /* CURRENT_state */
381 381 (XPG4_1|EXPINLINE) /* PROVIDER_flag */
382 382 };
383 383
384 384 struct T_info_ack tcp_g_t_info_ack_v6 = {
385 385 T_INFO_ACK, /* PRIM_type */
386 386 0, /* TSDU_size */
387 387 T_INFINITE, /* ETSDU_size */
388 388 T_INVALID, /* CDATA_size */
389 389 T_INVALID, /* DDATA_size */
390 390 sizeof (sin6_t), /* ADDR_size */
391 391 0, /* OPT_size - not initialized here */
392 392 TIDUSZ, /* TIDU_size */
393 393 T_COTS_ORD, /* SERV_type */
394 394 TCPS_IDLE, /* CURRENT_state */
395 395 (XPG4_1|EXPINLINE) /* PROVIDER_flag */
396 396 };
397 397
398 398 /*
399 399 * TCP tunables related declarations. Definitions are in tcp_tunables.c
400 400 */
401 401 extern mod_prop_info_t tcp_propinfo_tbl[];
402 402 extern int tcp_propinfo_count;
403 403
404 404 #define IS_VMLOANED_MBLK(mp) \
405 405 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
406 406
407 407 uint32_t do_tcpzcopy = 1; /* 0: disable, 1: enable, 2: force */
408 408
409 409 /*
410 410 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
411 411 * tunable settable via NDD. Otherwise, the per-connection behavior is
412 412 * determined dynamically during tcp_set_destination(), which is the default.
413 413 */
414 414 boolean_t tcp_static_maxpsz = B_FALSE;
415 415
416 416 /*
417 417 * If the receive buffer size is changed, this function is called to update
418 418 * the upper socket layer on the new delayed receive wake up threshold.
419 419 */
420 420 static void
421 421 tcp_set_recv_threshold(tcp_t *tcp, uint32_t new_rcvthresh)
422 422 {
423 423 uint32_t default_threshold = SOCKET_RECVHIWATER >> 3;
424 424
425 425 if (IPCL_IS_NONSTR(tcp->tcp_connp)) {
426 426 conn_t *connp = tcp->tcp_connp;
427 427 struct sock_proto_props sopp;
428 428
429 429 /*
430 430 * only increase rcvthresh upto default_threshold
431 431 */
432 432 if (new_rcvthresh > default_threshold)
433 433 new_rcvthresh = default_threshold;
434 434
435 435 sopp.sopp_flags = SOCKOPT_RCVTHRESH;
436 436 sopp.sopp_rcvthresh = new_rcvthresh;
437 437
438 438 (*connp->conn_upcalls->su_set_proto_props)
439 439 (connp->conn_upper_handle, &sopp);
440 440 }
441 441 }
442 442
443 443 /*
444 444 * Figure out the value of window scale opton. Note that the rwnd is
445 445 * ASSUMED to be rounded up to the nearest MSS before the calculation.
446 446 * We cannot find the scale value and then do a round up of tcp_rwnd
447 447 * because the scale value may not be correct after that.
448 448 *
449 449 * Set the compiler flag to make this function inline.
450 450 */
451 451 void
452 452 tcp_set_ws_value(tcp_t *tcp)
453 453 {
454 454 int i;
455 455 uint32_t rwnd = tcp->tcp_rwnd;
456 456
457 457 for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT;
458 458 i++, rwnd >>= 1)
459 459 ;
460 460 tcp->tcp_rcv_ws = i;
461 461 }
462 462
463 463 /*
464 464 * Remove cached/latched IPsec references.
465 465 */
466 466 void
467 467 tcp_ipsec_cleanup(tcp_t *tcp)
468 468 {
469 469 conn_t *connp = tcp->tcp_connp;
470 470
471 471 ASSERT(connp->conn_flags & IPCL_TCPCONN);
472 472
473 473 if (connp->conn_latch != NULL) {
474 474 IPLATCH_REFRELE(connp->conn_latch);
475 475 connp->conn_latch = NULL;
476 476 }
477 477 if (connp->conn_latch_in_policy != NULL) {
478 478 IPPOL_REFRELE(connp->conn_latch_in_policy);
479 479 connp->conn_latch_in_policy = NULL;
480 480 }
481 481 if (connp->conn_latch_in_action != NULL) {
482 482 IPACT_REFRELE(connp->conn_latch_in_action);
483 483 connp->conn_latch_in_action = NULL;
484 484 }
485 485 if (connp->conn_policy != NULL) {
486 486 IPPH_REFRELE(connp->conn_policy, connp->conn_netstack);
487 487 connp->conn_policy = NULL;
488 488 }
489 489 }
490 490
491 491 /*
492 492 * Cleaup before placing on free list.
493 493 * Disassociate from the netstack/tcp_stack_t since the freelist
494 494 * is per squeue and not per netstack.
495 495 */
496 496 void
497 497 tcp_cleanup(tcp_t *tcp)
498 498 {
499 499 mblk_t *mp;
500 500 conn_t *connp = tcp->tcp_connp;
501 501 tcp_stack_t *tcps = tcp->tcp_tcps;
502 502 netstack_t *ns = tcps->tcps_netstack;
503 503 mblk_t *tcp_rsrv_mp;
504 504
505 505 tcp_bind_hash_remove(tcp);
506 506
507 507 /* Cleanup that which needs the netstack first */
508 508 tcp_ipsec_cleanup(tcp);
509 509 ixa_cleanup(connp->conn_ixa);
510 510
511 511 if (connp->conn_ht_iphc != NULL) {
512 512 kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
513 513 connp->conn_ht_iphc = NULL;
514 514 connp->conn_ht_iphc_allocated = 0;
515 515 connp->conn_ht_iphc_len = 0;
516 516 connp->conn_ht_ulp = NULL;
517 517 connp->conn_ht_ulp_len = 0;
518 518 tcp->tcp_ipha = NULL;
519 519 tcp->tcp_ip6h = NULL;
520 520 tcp->tcp_tcpha = NULL;
521 521 }
522 522
523 523 /* We clear any IP_OPTIONS and extension headers */
524 524 ip_pkt_free(&connp->conn_xmit_ipp);
525 525
526 526 tcp_free(tcp);
527 527
528 528 /*
529 529 * Since we will bzero the entire structure, we need to
530 530 * remove it and reinsert it in global hash list. We
531 531 * know the walkers can't get to this conn because we
532 532 * had set CONDEMNED flag earlier and checked reference
533 533 * under conn_lock so walker won't pick it and when we
534 534 * go the ipcl_globalhash_remove() below, no walker
535 535 * can get to it.
536 536 */
537 537 ipcl_globalhash_remove(connp);
538 538
539 539 /* Save some state */
540 540 mp = tcp->tcp_timercache;
541 541
542 542 tcp_rsrv_mp = tcp->tcp_rsrv_mp;
543 543
544 544 if (connp->conn_cred != NULL) {
545 545 crfree(connp->conn_cred);
546 546 connp->conn_cred = NULL;
547 547 }
548 548 ipcl_conn_cleanup(connp);
549 549 connp->conn_flags = IPCL_TCPCONN;
550 550
551 551 /*
552 552 * Now it is safe to decrement the reference counts.
553 553 * This might be the last reference on the netstack
554 554 * in which case it will cause the freeing of the IP Instance.
555 555 */
556 556 connp->conn_netstack = NULL;
557 557 connp->conn_ixa->ixa_ipst = NULL;
558 558 netstack_rele(ns);
559 559 ASSERT(tcps != NULL);
560 560 tcp->tcp_tcps = NULL;
561 561
562 562 bzero(tcp, sizeof (tcp_t));
563 563
564 564 /* restore the state */
565 565 tcp->tcp_timercache = mp;
566 566
567 567 tcp->tcp_rsrv_mp = tcp_rsrv_mp;
568 568
569 569 tcp->tcp_connp = connp;
570 570
571 571 ASSERT(connp->conn_tcp == tcp);
572 572 ASSERT(connp->conn_flags & IPCL_TCPCONN);
573 573 connp->conn_state_flags = CONN_INCIPIENT;
574 574 ASSERT(connp->conn_proto == IPPROTO_TCP);
575 575 ASSERT(connp->conn_ref == 1);
576 576 }
577 577
578 578 /*
579 579 * Adapt to the information, such as rtt and rtt_sd, provided from the
580 580 * DCE and IRE maintained by IP.
581 581 *
582 582 * Checks for multicast and broadcast destination address.
583 583 * Returns zero if ok; an errno on failure.
584 584 *
585 585 * Note that the MSS calculation here is based on the info given in
586 586 * the DCE and IRE. We do not do any calculation based on TCP options. They
587 587 * will be handled in tcp_input_data() when TCP knows which options to use.
588 588 *
589 589 * Note on how TCP gets its parameters for a connection.
590 590 *
591 591 * When a tcp_t structure is allocated, it gets all the default parameters.
592 592 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
593 593 * spipe, rpipe, ... from the route metrics. Route metric overrides the
594 594 * default.
595 595 *
596 596 * An incoming SYN with a multicast or broadcast destination address is dropped
597 597 * in ip_fanout_v4/v6.
598 598 *
599 599 * An incoming SYN with a multicast or broadcast source address is always
600 600 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
601 601 * conn_connect.
602 602 * The same logic in tcp_set_destination also serves to
603 603 * reject an attempt to connect to a broadcast or multicast (destination)
604 604 * address.
605 605 */
606 606 int
607 607 tcp_set_destination(tcp_t *tcp)
608 608 {
609 609 uint32_t mss_max;
610 610 uint32_t mss;
611 611 boolean_t tcp_detached = TCP_IS_DETACHED(tcp);
612 612 conn_t *connp = tcp->tcp_connp;
613 613 tcp_stack_t *tcps = tcp->tcp_tcps;
614 614 iulp_t uinfo;
615 615 int error;
616 616 uint32_t flags;
617 617
618 618 flags = IPDF_LSO | IPDF_ZCOPY;
619 619 /*
620 620 * Make sure we have a dce for the destination to avoid dce_ident
621 621 * contention for connected sockets.
622 622 */
623 623 flags |= IPDF_UNIQUE_DCE;
624 624
625 625 if (!tcps->tcps_ignore_path_mtu)
626 626 connp->conn_ixa->ixa_flags |= IXAF_PMTU_DISCOVERY;
627 627
628 628 /* Use conn_lock to satify ASSERT; tcp is already serialized */
629 629 mutex_enter(&connp->conn_lock);
630 630 error = conn_connect(connp, &uinfo, flags);
631 631 mutex_exit(&connp->conn_lock);
632 632 if (error != 0)
633 633 return (error);
634 634
635 635 error = tcp_build_hdrs(tcp);
636 636 if (error != 0)
637 637 return (error);
638 638
639 639 tcp->tcp_localnet = uinfo.iulp_localnet;
640 640
641 641 if (uinfo.iulp_rtt != 0) {
642 642 tcp->tcp_rtt_sa = MSEC2NSEC(uinfo.iulp_rtt);
643 643 tcp->tcp_rtt_sd = MSEC2NSEC(uinfo.iulp_rtt_sd);
644 644 tcp->tcp_rto = tcp_calculate_rto(tcp, tcps, 0);
645 645 }
646 646 if (uinfo.iulp_ssthresh != 0)
647 647 tcp->tcp_cwnd_ssthresh = uinfo.iulp_ssthresh;
648 648 else
649 649 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
650 650 if (uinfo.iulp_spipe > 0) {
651 651 connp->conn_sndbuf = MIN(uinfo.iulp_spipe,
652 652 tcps->tcps_max_buf);
653 653 if (tcps->tcps_snd_lowat_fraction != 0) {
654 654 connp->conn_sndlowat = connp->conn_sndbuf /
655 655 tcps->tcps_snd_lowat_fraction;
656 656 }
657 657 (void) tcp_maxpsz_set(tcp, B_TRUE);
658 658 }
659 659 /*
660 660 * Note that up till now, acceptor always inherits receive
661 661 * window from the listener. But if there is a metrics
662 662 * associated with a host, we should use that instead of
663 663 * inheriting it from listener. Thus we need to pass this
664 664 * info back to the caller.
665 665 */
666 666 if (uinfo.iulp_rpipe > 0) {
667 667 tcp->tcp_rwnd = MIN(uinfo.iulp_rpipe,
668 668 tcps->tcps_max_buf);
669 669 }
670 670
671 671 if (uinfo.iulp_rtomax > 0) {
672 672 tcp->tcp_second_timer_threshold =
673 673 uinfo.iulp_rtomax;
674 674 }
675 675
676 676 /*
677 677 * Use the metric option settings, iulp_tstamp_ok and
678 678 * iulp_wscale_ok, only for active open. What this means
679 679 * is that if the other side uses timestamp or window
680 680 * scale option, TCP will also use those options. That
681 681 * is for passive open. If the application sets a
682 682 * large window, window scale is enabled regardless of
683 683 * the value in iulp_wscale_ok. This is the behavior
684 684 * since 2.6. So we keep it.
685 685 * The only case left in passive open processing is the
686 686 * check for SACK.
687 687 * For ECN, it should probably be like SACK. But the
688 688 * current value is binary, so we treat it like the other
689 689 * cases. The metric only controls active open.For passive
690 690 * open, the ndd param, tcp_ecn_permitted, controls the
691 691 * behavior.
692 692 */
693 693 if (!tcp_detached) {
694 694 /*
695 695 * The if check means that the following can only
696 696 * be turned on by the metrics only IRE, but not off.
697 697 */
698 698 if (uinfo.iulp_tstamp_ok)
699 699 tcp->tcp_snd_ts_ok = B_TRUE;
700 700 if (uinfo.iulp_wscale_ok)
701 701 tcp->tcp_snd_ws_ok = B_TRUE;
702 702 if (uinfo.iulp_sack == 2)
703 703 tcp->tcp_snd_sack_ok = B_TRUE;
704 704 if (uinfo.iulp_ecn_ok)
705 705 tcp->tcp_ecn_ok = B_TRUE;
706 706 } else {
707 707 /*
708 708 * Passive open.
709 709 *
710 710 * As above, the if check means that SACK can only be
711 711 * turned on by the metric only IRE.
712 712 */
713 713 if (uinfo.iulp_sack > 0) {
714 714 tcp->tcp_snd_sack_ok = B_TRUE;
715 715 }
716 716 }
717 717
718 718 /*
719 719 * XXX Note that currently, iulp_mtu can be as small as 68
720 720 * because of PMTUd. So tcp_mss may go to negative if combined
721 721 * length of all those options exceeds 28 bytes. But because
722 722 * of the tcp_mss_min check below, we may not have a problem if
723 723 * tcp_mss_min is of a reasonable value. The default is 1 so
724 724 * the negative problem still exists. And the check defeats PMTUd.
725 725 * In fact, if PMTUd finds that the MSS should be smaller than
726 726 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
727 727 * value.
728 728 *
729 729 * We do not deal with that now. All those problems related to
730 730 * PMTUd will be fixed later.
731 731 */
732 732 ASSERT(uinfo.iulp_mtu != 0);
733 733 mss = tcp->tcp_initial_pmtu = uinfo.iulp_mtu;
734 734
735 735 /* Sanity check for MSS value. */
736 736 if (connp->conn_ipversion == IPV4_VERSION)
737 737 mss_max = tcps->tcps_mss_max_ipv4;
738 738 else
739 739 mss_max = tcps->tcps_mss_max_ipv6;
740 740
741 741 if (tcp->tcp_ipsec_overhead == 0)
742 742 tcp->tcp_ipsec_overhead = conn_ipsec_length(connp);
743 743
744 744 mss -= tcp->tcp_ipsec_overhead;
745 745
746 746 if (mss < tcps->tcps_mss_min)
747 747 mss = tcps->tcps_mss_min;
748 748 if (mss > mss_max)
749 749 mss = mss_max;
750 750
751 751 /* Note that this is the maximum MSS, excluding all options. */
752 752 tcp->tcp_mss = mss;
753 753
754 754 /*
755 755 * Update the tcp connection with LSO capability.
756 756 */
757 757 tcp_update_lso(tcp, connp->conn_ixa);
758 758
759 759 /*
760 760 * Initialize the ISS here now that we have the full connection ID.
761 761 * The RFC 1948 method of initial sequence number generation requires
762 762 * knowledge of the full connection ID before setting the ISS.
763 763 */
764 764 tcp_iss_init(tcp);
765 765
766 766 tcp->tcp_loopback = (uinfo.iulp_loopback | uinfo.iulp_local);
767 767
768 768 /*
769 769 * Make sure that conn is not marked incipient
770 770 * for incoming connections. A blind
771 771 * removal of incipient flag is cheaper than
772 772 * check and removal.
773 773 */
774 774 mutex_enter(&connp->conn_lock);
775 775 connp->conn_state_flags &= ~CONN_INCIPIENT;
776 776 mutex_exit(&connp->conn_lock);
777 777 return (0);
778 778 }
779 779
780 780 /*
781 781 * tcp_clean_death / tcp_close_detached must not be called more than once
782 782 * on a tcp. Thus every function that potentially calls tcp_clean_death
783 783 * must check for the tcp state before calling tcp_clean_death.
784 784 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
785 785 * tcp_timer_handler, all check for the tcp state.
786 786 */
787 787 /* ARGSUSED */
788 788 void
789 789 tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2,
790 790 ip_recv_attr_t *dummy)
791 791 {
792 792 tcp_t *tcp = ((conn_t *)arg)->conn_tcp;
793 793
794 794 freemsg(mp);
795 795 if (tcp->tcp_state > TCPS_BOUND)
796 796 (void) tcp_clean_death(((conn_t *)arg)->conn_tcp, ETIMEDOUT);
797 797 }
798 798
799 799 /*
800 800 * We are dying for some reason. Try to do it gracefully. (May be called
801 801 * as writer.)
802 802 *
803 803 * Return -1 if the structure was not cleaned up (if the cleanup had to be
804 804 * done by a service procedure).
805 805 * TBD - Should the return value distinguish between the tcp_t being
806 806 * freed and it being reinitialized?
807 807 */
808 808 int
809 809 tcp_clean_death(tcp_t *tcp, int err)
810 810 {
811 811 mblk_t *mp;
812 812 queue_t *q;
813 813 conn_t *connp = tcp->tcp_connp;
814 814 tcp_stack_t *tcps = tcp->tcp_tcps;
815 815
816 816 if (tcp->tcp_fused)
817 817 tcp_unfuse(tcp);
818 818
819 819 if (tcp->tcp_linger_tid != 0 &&
820 820 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
821 821 tcp_stop_lingering(tcp);
822 822 }
823 823
824 824 ASSERT(tcp != NULL);
825 825 ASSERT((connp->conn_family == AF_INET &&
826 826 connp->conn_ipversion == IPV4_VERSION) ||
827 827 (connp->conn_family == AF_INET6 &&
828 828 (connp->conn_ipversion == IPV4_VERSION ||
829 829 connp->conn_ipversion == IPV6_VERSION)));
830 830
831 831 if (TCP_IS_DETACHED(tcp)) {
832 832 if (tcp->tcp_hard_binding) {
833 833 /*
834 834 * Its an eager that we are dealing with. We close the
835 835 * eager but in case a conn_ind has already gone to the
836 836 * listener, let tcp_accept_finish() send a discon_ind
837 837 * to the listener and drop the last reference. If the
838 838 * listener doesn't even know about the eager i.e. the
839 839 * conn_ind hasn't gone up, blow away the eager and drop
840 840 * the last reference as well. If the conn_ind has gone
841 841 * up, state should be BOUND. tcp_accept_finish
842 842 * will figure out that the connection has received a
843 843 * RST and will send a DISCON_IND to the application.
844 844 */
845 845 tcp_closei_local(tcp);
846 846 if (!tcp->tcp_tconnind_started) {
847 847 CONN_DEC_REF(connp);
848 848 } else {
849 849 tcp->tcp_state = TCPS_BOUND;
850 850 DTRACE_TCP6(state__change, void, NULL,
851 851 ip_xmit_attr_t *, connp->conn_ixa,
852 852 void, NULL, tcp_t *, tcp, void, NULL,
853 853 int32_t, TCPS_CLOSED);
854 854 }
855 855 } else {
856 856 tcp_close_detached(tcp);
857 857 }
858 858 return (0);
859 859 }
860 860
861 861 TCP_STAT(tcps, tcp_clean_death_nondetached);
862 862
863 863 /*
864 864 * The connection is dead. Decrement listener connection counter if
865 865 * necessary.
866 866 */
867 867 if (tcp->tcp_listen_cnt != NULL)
868 868 TCP_DECR_LISTEN_CNT(tcp);
869 869
870 870 /*
871 871 * When a connection is moved to TIME_WAIT state, the connection
872 872 * counter is already decremented. So no need to decrement here
873 873 * again. See SET_TIME_WAIT() macro.
874 874 */
875 875 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
876 876 tcp->tcp_state < TCPS_TIME_WAIT) {
877 877 TCPS_CONN_DEC(tcps);
878 878 }
879 879
880 880 q = connp->conn_rq;
881 881
882 882 /* Trash all inbound data */
883 883 if (!IPCL_IS_NONSTR(connp)) {
884 884 ASSERT(q != NULL);
885 885 flushq(q, FLUSHALL);
886 886 }
887 887
888 888 /*
889 889 * If we are at least part way open and there is error
890 890 * (err==0 implies no error)
891 891 * notify our client by a T_DISCON_IND.
892 892 */
893 893 if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) {
894 894 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
895 895 !TCP_IS_SOCKET(tcp)) {
896 896 /*
897 897 * Send M_FLUSH according to TPI. Because sockets will
898 898 * (and must) ignore FLUSHR we do that only for TPI
899 899 * endpoints and sockets in STREAMS mode.
900 900 */
901 901 (void) putnextctl1(q, M_FLUSH, FLUSHR);
902 902 }
903 903 if (connp->conn_debug) {
904 904 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR,
905 905 "tcp_clean_death: discon err %d", err);
906 906 }
907 907 if (IPCL_IS_NONSTR(connp)) {
908 908 /* Direct socket, use upcall */
909 909 (*connp->conn_upcalls->su_disconnected)(
910 910 connp->conn_upper_handle, tcp->tcp_connid, err);
911 911 } else {
912 912 mp = mi_tpi_discon_ind(NULL, err, 0);
913 913 if (mp != NULL) {
914 914 putnext(q, mp);
915 915 } else {
916 916 if (connp->conn_debug) {
917 917 (void) strlog(TCP_MOD_ID, 0, 1,
918 918 SL_ERROR|SL_TRACE,
919 919 "tcp_clean_death, sending M_ERROR");
920 920 }
921 921 (void) putnextctl1(q, M_ERROR, EPROTO);
922 922 }
923 923 }
924 924 if (tcp->tcp_state <= TCPS_SYN_RCVD) {
925 925 /* SYN_SENT or SYN_RCVD */
926 926 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
927 927 } else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) {
928 928 /* ESTABLISHED or CLOSE_WAIT */
929 929 TCPS_BUMP_MIB(tcps, tcpEstabResets);
930 930 }
931 931 }
932 932
933 933 /*
934 934 * ESTABLISHED non-STREAMS eagers are not 'detached' because
935 935 * an upper handle is obtained when the SYN-ACK comes in. So it
936 936 * should receive the 'disconnected' upcall, but tcp_reinit should
937 937 * not be called since this is an eager.
938 938 */
939 939 if (tcp->tcp_listener != NULL && IPCL_IS_NONSTR(connp)) {
940 940 tcp_closei_local(tcp);
941 941 tcp->tcp_state = TCPS_BOUND;
942 942 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
943 943 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
944 944 int32_t, TCPS_CLOSED);
945 945 return (0);
946 946 }
947 947
948 948 tcp_reinit(tcp);
949 949 if (IPCL_IS_NONSTR(connp))
950 950 (void) tcp_do_unbind(connp);
951 951
952 952 return (-1);
953 953 }
954 954
955 955 /*
956 956 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
957 957 * to expire, stop the wait and finish the close.
958 958 */
959 959 void
960 960 tcp_stop_lingering(tcp_t *tcp)
961 961 {
962 962 clock_t delta = 0;
963 963 tcp_stack_t *tcps = tcp->tcp_tcps;
964 964 conn_t *connp = tcp->tcp_connp;
965 965
966 966 tcp->tcp_linger_tid = 0;
967 967 if (tcp->tcp_state > TCPS_LISTEN) {
968 968 tcp_acceptor_hash_remove(tcp);
969 969 mutex_enter(&tcp->tcp_non_sq_lock);
970 970 if (tcp->tcp_flow_stopped) {
971 971 tcp_clrqfull(tcp);
972 972 }
973 973 mutex_exit(&tcp->tcp_non_sq_lock);
974 974
975 975 if (tcp->tcp_timer_tid != 0) {
976 976 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
977 977 tcp->tcp_timer_tid = 0;
978 978 }
979 979 /*
980 980 * Need to cancel those timers which will not be used when
981 981 * TCP is detached. This has to be done before the conn_wq
982 982 * is cleared.
983 983 */
984 984 tcp_timers_stop(tcp);
985 985
986 986 tcp->tcp_detached = B_TRUE;
987 987 connp->conn_rq = NULL;
988 988 connp->conn_wq = NULL;
989 989
990 990 if (tcp->tcp_state == TCPS_TIME_WAIT) {
991 991 tcp_time_wait_append(tcp);
992 992 TCP_DBGSTAT(tcps, tcp_detach_time_wait);
993 993 goto finish;
994 994 }
995 995
996 996 /*
997 997 * If delta is zero the timer event wasn't executed and was
998 998 * successfully canceled. In this case we need to restart it
999 999 * with the minimal delta possible.
1000 1000 */
1001 1001 if (delta >= 0) {
1002 1002 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
1003 1003 delta ? delta : 1);
1004 1004 }
1005 1005 } else {
1006 1006 tcp_closei_local(tcp);
1007 1007 CONN_DEC_REF(connp);
1008 1008 }
1009 1009 finish:
1010 1010 tcp->tcp_detached = B_TRUE;
1011 1011 connp->conn_rq = NULL;
1012 1012 connp->conn_wq = NULL;
1013 1013
1014 1014 /* Signal closing thread that it can complete close */
1015 1015 mutex_enter(&tcp->tcp_closelock);
1016 1016 tcp->tcp_closed = 1;
1017 1017 cv_signal(&tcp->tcp_closecv);
1018 1018 mutex_exit(&tcp->tcp_closelock);
1019 1019
1020 1020 /* If we have an upper handle (socket), release it */
1021 1021 if (IPCL_IS_NONSTR(connp)) {
1022 1022 ASSERT(connp->conn_upper_handle != NULL);
1023 1023 (*connp->conn_upcalls->su_closed)(connp->conn_upper_handle);
1024 1024 connp->conn_upper_handle = NULL;
1025 1025 connp->conn_upcalls = NULL;
1026 1026 }
1027 1027 }
1028 1028
1029 1029 void
1030 1030 tcp_close_common(conn_t *connp, int flags)
1031 1031 {
1032 1032 tcp_t *tcp = connp->conn_tcp;
1033 1033 mblk_t *mp = &tcp->tcp_closemp;
1034 1034 boolean_t conn_ioctl_cleanup_reqd = B_FALSE;
1035 1035 mblk_t *bp;
1036 1036
1037 1037 ASSERT(connp->conn_ref >= 2);
1038 1038
1039 1039 /*
1040 1040 * Mark the conn as closing. ipsq_pending_mp_add will not
1041 1041 * add any mp to the pending mp list, after this conn has
1042 1042 * started closing.
1043 1043 */
1044 1044 mutex_enter(&connp->conn_lock);
1045 1045 connp->conn_state_flags |= CONN_CLOSING;
1046 1046 if (connp->conn_oper_pending_ill != NULL)
1047 1047 conn_ioctl_cleanup_reqd = B_TRUE;
1048 1048 CONN_INC_REF_LOCKED(connp);
1049 1049 mutex_exit(&connp->conn_lock);
1050 1050 tcp->tcp_closeflags = (uint8_t)flags;
1051 1051 ASSERT(connp->conn_ref >= 3);
1052 1052
1053 1053 /*
1054 1054 * tcp_closemp_used is used below without any protection of a lock
1055 1055 * as we don't expect any one else to use it concurrently at this
1056 1056 * point otherwise it would be a major defect.
1057 1057 */
1058 1058
1059 1059 if (mp->b_prev == NULL)
1060 1060 tcp->tcp_closemp_used = B_TRUE;
1061 1061 else
1062 1062 cmn_err(CE_PANIC, "tcp_close: concurrent use of tcp_closemp: "
1063 1063 "connp %p tcp %p\n", (void *)connp, (void *)tcp);
1064 1064
1065 1065 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15);
1066 1066
1067 1067 /*
1068 1068 * Cleanup any queued ioctls here. This must be done before the wq/rq
1069 1069 * are re-written by tcp_close_output().
1070 1070 */
1071 1071 if (conn_ioctl_cleanup_reqd)
1072 1072 conn_ioctl_cleanup(connp);
1073 1073
1074 1074 /*
1075 1075 * As CONN_CLOSING is set, no further ioctls should be passed down to
1076 1076 * IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
1077 1077 * tcp_wput_iocdata). If the ioctl was queued on an ipsq,
1078 1078 * conn_ioctl_cleanup should have found it and removed it. If the ioctl
1079 1079 * was still in flight at the time, we wait for it here. See comments
1080 1080 * for CONN_INC_IOCTLREF in ip.h for details.
1081 1081 */
1082 1082 mutex_enter(&connp->conn_lock);
1083 1083 while (connp->conn_ioctlref > 0)
1084 1084 cv_wait(&connp->conn_cv, &connp->conn_lock);
1085 1085 ASSERT(connp->conn_ioctlref == 0);
1086 1086 ASSERT(connp->conn_oper_pending_ill == NULL);
1087 1087 mutex_exit(&connp->conn_lock);
1088 1088
1089 1089 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_close_output, connp,
1090 1090 NULL, tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
1091 1091
1092 1092 /*
1093 1093 * For non-STREAMS sockets, the normal case is that the conn makes
1094 1094 * an upcall when it's finally closed, so there is no need to wait
1095 1095 * in the protocol. But in case of SO_LINGER the thread sleeps here
1096 1096 * so it can properly deal with the thread being interrupted.
1097 1097 */
1098 1098 if (IPCL_IS_NONSTR(connp) && connp->conn_linger == 0)
1099 1099 goto nowait;
1100 1100
1101 1101 mutex_enter(&tcp->tcp_closelock);
1102 1102 while (!tcp->tcp_closed) {
1103 1103 if (!cv_wait_sig(&tcp->tcp_closecv, &tcp->tcp_closelock)) {
1104 1104 /*
1105 1105 * The cv_wait_sig() was interrupted. We now do the
1106 1106 * following:
1107 1107 *
1108 1108 * 1) If the endpoint was lingering, we allow this
1109 1109 * to be interrupted by cancelling the linger timeout
1110 1110 * and closing normally.
1111 1111 *
1112 1112 * 2) Revert to calling cv_wait()
1113 1113 *
1114 1114 * We revert to using cv_wait() to avoid an
1115 1115 * infinite loop which can occur if the calling
1116 1116 * thread is higher priority than the squeue worker
1117 1117 * thread and is bound to the same cpu.
1118 1118 */
1119 1119 if (connp->conn_linger && connp->conn_lingertime > 0) {
1120 1120 mutex_exit(&tcp->tcp_closelock);
1121 1121 /* Entering squeue, bump ref count. */
1122 1122 CONN_INC_REF(connp);
1123 1123 bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL);
1124 1124 SQUEUE_ENTER_ONE(connp->conn_sqp, bp,
1125 1125 tcp_linger_interrupted, connp, NULL,
1126 1126 tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
1127 1127 mutex_enter(&tcp->tcp_closelock);
1128 1128 }
1129 1129 break;
1130 1130 }
1131 1131 }
1132 1132 while (!tcp->tcp_closed)
1133 1133 cv_wait(&tcp->tcp_closecv, &tcp->tcp_closelock);
1134 1134 mutex_exit(&tcp->tcp_closelock);
1135 1135
1136 1136 /*
1137 1137 * In the case of listener streams that have eagers in the q or q0
1138 1138 * we wait for the eagers to drop their reference to us. conn_rq and
1139 1139 * conn_wq of the eagers point to our queues. By waiting for the
1140 1140 * refcnt to drop to 1, we are sure that the eagers have cleaned
1141 1141 * up their queue pointers and also dropped their references to us.
1142 1142 *
1143 1143 * For non-STREAMS sockets we do not have to wait here; the
1144 1144 * listener will instead make a su_closed upcall when the last
1145 1145 * reference is dropped.
1146 1146 */
1147 1147 if (tcp->tcp_wait_for_eagers && !IPCL_IS_NONSTR(connp)) {
1148 1148 mutex_enter(&connp->conn_lock);
1149 1149 while (connp->conn_ref != 1) {
1150 1150 cv_wait(&connp->conn_cv, &connp->conn_lock);
1151 1151 }
1152 1152 mutex_exit(&connp->conn_lock);
1153 1153 }
1154 1154
1155 1155 nowait:
1156 1156 connp->conn_cpid = NOPID;
1157 1157 }
1158 1158
1159 1159 /*
1160 1160 * Called by tcp_close() routine via squeue when lingering is
1161 1161 * interrupted by a signal.
1162 1162 */
1163 1163
1164 1164 /* ARGSUSED */
1165 1165 static void
1166 1166 tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1167 1167 {
1168 1168 conn_t *connp = (conn_t *)arg;
1169 1169 tcp_t *tcp = connp->conn_tcp;
1170 1170
1171 1171 freeb(mp);
1172 1172 if (tcp->tcp_linger_tid != 0 &&
1173 1173 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
1174 1174 tcp_stop_lingering(tcp);
1175 1175 tcp->tcp_client_errno = EINTR;
1176 1176 }
1177 1177 }
1178 1178
1179 1179 /*
1180 1180 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
1181 1181 * Some stream heads get upset if they see these later on as anything but NULL.
1182 1182 */
1183 1183 void
1184 1184 tcp_close_mpp(mblk_t **mpp)
1185 1185 {
1186 1186 mblk_t *mp;
1187 1187
1188 1188 if ((mp = *mpp) != NULL) {
1189 1189 do {
1190 1190 mp->b_next = NULL;
1191 1191 mp->b_prev = NULL;
1192 1192 } while ((mp = mp->b_cont) != NULL);
1193 1193
1194 1194 mp = *mpp;
1195 1195 *mpp = NULL;
1196 1196 freemsg(mp);
1197 1197 }
1198 1198 }
1199 1199
1200 1200 /* Do detached close. */
1201 1201 void
1202 1202 tcp_close_detached(tcp_t *tcp)
1203 1203 {
1204 1204 if (tcp->tcp_fused)
1205 1205 tcp_unfuse(tcp);
1206 1206
1207 1207 /*
1208 1208 * Clustering code serializes TCP disconnect callbacks and
1209 1209 * cluster tcp list walks by blocking a TCP disconnect callback
1210 1210 * if a cluster tcp list walk is in progress. This ensures
1211 1211 * accurate accounting of TCPs in the cluster code even though
1212 1212 * the TCP list walk itself is not atomic.
1213 1213 */
1214 1214 tcp_closei_local(tcp);
1215 1215 CONN_DEC_REF(tcp->tcp_connp);
1216 1216 }
1217 1217
1218 1218 /*
1219 1219 * The tcp_t is going away. Remove it from all lists and set it
1220 1220 * to TCPS_CLOSED. The freeing up of memory is deferred until
1221 1221 * tcp_inactive. This is needed since a thread in tcp_rput might have
1222 1222 * done a CONN_INC_REF on this structure before it was removed from the
1223 1223 * hashes.
1224 1224 */
1225 1225 void
1226 1226 tcp_closei_local(tcp_t *tcp)
1227 1227 {
1228 1228 conn_t *connp = tcp->tcp_connp;
1229 1229 tcp_stack_t *tcps = tcp->tcp_tcps;
1230 1230 int32_t oldstate;
1231 1231
1232 1232 if (!TCP_IS_SOCKET(tcp))
1233 1233 tcp_acceptor_hash_remove(tcp);
1234 1234
1235 1235 /*
1236 1236 * This can be called via tcp_time_wait_processing() if TCP gets a
1237 1237 * SYN with sequence number outside the TIME-WAIT connection's
1238 1238 * window. So we need to check for TIME-WAIT state here as the
1239 1239 * connection counter is already decremented. See SET_TIME_WAIT()
1240 1240 * macro
1241 1241 */
1242 1242 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
1243 1243 tcp->tcp_state < TCPS_TIME_WAIT) {
1244 1244 TCPS_CONN_DEC(tcps);
1245 1245 }
1246 1246
1247 1247 /*
1248 1248 * If we are an eager connection hanging off a listener that
1249 1249 * hasn't formally accepted the connection yet, get off its
1250 1250 * list and blow off any data that we have accumulated.
1251 1251 */
1252 1252 if (tcp->tcp_listener != NULL) {
1253 1253 tcp_t *listener = tcp->tcp_listener;
1254 1254 mutex_enter(&listener->tcp_eager_lock);
1255 1255 /*
1256 1256 * tcp_tconnind_started == B_TRUE means that the
1257 1257 * conn_ind has already gone to listener. At
1258 1258 * this point, eager will be closed but we
1259 1259 * leave it in listeners eager list so that
1260 1260 * if listener decides to close without doing
1261 1261 * accept, we can clean this up. In tcp_tli_accept
1262 1262 * we take care of the case of accept on closed
1263 1263 * eager.
1264 1264 */
1265 1265 if (!tcp->tcp_tconnind_started) {
1266 1266 tcp_eager_unlink(tcp);
1267 1267 mutex_exit(&listener->tcp_eager_lock);
1268 1268 /*
1269 1269 * We don't want to have any pointers to the
1270 1270 * listener queue, after we have released our
1271 1271 * reference on the listener
1272 1272 */
1273 1273 ASSERT(tcp->tcp_detached);
1274 1274 connp->conn_rq = NULL;
1275 1275 connp->conn_wq = NULL;
1276 1276 CONN_DEC_REF(listener->tcp_connp);
1277 1277 } else {
1278 1278 mutex_exit(&listener->tcp_eager_lock);
1279 1279 }
1280 1280 }
1281 1281
1282 1282 /* Stop all the timers */
1283 1283 tcp_timers_stop(tcp);
1284 1284
1285 1285 if (tcp->tcp_state == TCPS_LISTEN) {
1286 1286 if (tcp->tcp_ip_addr_cache) {
1287 1287 kmem_free((void *)tcp->tcp_ip_addr_cache,
1288 1288 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
1289 1289 tcp->tcp_ip_addr_cache = NULL;
1290 1290 }
1291 1291 }
1292 1292
1293 1293 /* Decrement listerner connection counter if necessary. */
1294 1294 if (tcp->tcp_listen_cnt != NULL)
1295 1295 TCP_DECR_LISTEN_CNT(tcp);
1296 1296
1297 1297 mutex_enter(&tcp->tcp_non_sq_lock);
1298 1298 if (tcp->tcp_flow_stopped)
1299 1299 tcp_clrqfull(tcp);
1300 1300 mutex_exit(&tcp->tcp_non_sq_lock);
1301 1301
1302 1302 tcp_bind_hash_remove(tcp);
1303 1303 /*
1304 1304 * If the tcp_time_wait_collector (which runs outside the squeue)
1305 1305 * is trying to remove this tcp from the time wait list, we will
1306 1306 * block in tcp_time_wait_remove while trying to acquire the
1307 1307 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
1308 1308 * requires the ipcl_hash_remove to be ordered after the
1309 1309 * tcp_time_wait_remove for the refcnt checks to work correctly.
1310 1310 */
1311 1311 if (tcp->tcp_state == TCPS_TIME_WAIT)
1312 1312 (void) tcp_time_wait_remove(tcp, NULL);
1313 1313 CL_INET_DISCONNECT(connp);
1314 1314 ipcl_hash_remove(connp);
1315 1315 oldstate = tcp->tcp_state;
1316 1316 tcp->tcp_state = TCPS_CLOSED;
1317 1317 /* Need to probe before ixa_cleanup() is called */
1318 1318 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1319 1319 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
1320 1320 int32_t, oldstate);
1321 1321 ixa_cleanup(connp->conn_ixa);
1322 1322
1323 1323 /*
1324 1324 * Mark the conn as CONDEMNED
1325 1325 */
1326 1326 mutex_enter(&connp->conn_lock);
1327 1327 connp->conn_state_flags |= CONN_CONDEMNED;
1328 1328 mutex_exit(&connp->conn_lock);
1329 1329
1330 1330 ASSERT(tcp->tcp_time_wait_next == NULL);
1331 1331 ASSERT(tcp->tcp_time_wait_prev == NULL);
1332 1332 ASSERT(tcp->tcp_time_wait_expire == 0);
1333 1333
1334 1334 tcp_ipsec_cleanup(tcp);
1335 1335 }
1336 1336
1337 1337 /*
1338 1338 * tcp is dying (called from ipcl_conn_destroy and error cases).
1339 1339 * Free the tcp_t in either case.
1340 1340 */
1341 1341 void
1342 1342 tcp_free(tcp_t *tcp)
1343 1343 {
1344 1344 mblk_t *mp;
1345 1345 conn_t *connp = tcp->tcp_connp;
1346 1346
1347 1347 ASSERT(tcp != NULL);
1348 1348 ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL);
1349 1349
1350 1350 connp->conn_rq = NULL;
1351 1351 connp->conn_wq = NULL;
1352 1352
1353 1353 tcp_close_mpp(&tcp->tcp_xmit_head);
1354 1354 tcp_close_mpp(&tcp->tcp_reass_head);
1355 1355 if (tcp->tcp_rcv_list != NULL) {
1356 1356 /* Free b_next chain */
1357 1357 tcp_close_mpp(&tcp->tcp_rcv_list);
1358 1358 }
1359 1359 if ((mp = tcp->tcp_urp_mp) != NULL) {
1360 1360 freemsg(mp);
1361 1361 }
1362 1362 if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
1363 1363 freemsg(mp);
1364 1364 }
1365 1365
1366 1366 if (tcp->tcp_fused_sigurg_mp != NULL) {
1367 1367 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1368 1368 freeb(tcp->tcp_fused_sigurg_mp);
1369 1369 tcp->tcp_fused_sigurg_mp = NULL;
1370 1370 }
1371 1371
1372 1372 if (tcp->tcp_ordrel_mp != NULL) {
1373 1373 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1374 1374 freeb(tcp->tcp_ordrel_mp);
1375 1375 tcp->tcp_ordrel_mp = NULL;
1376 1376 }
1377 1377
1378 1378 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
1379 1379 bzero(&tcp->tcp_sack_info, sizeof (tcp_sack_info_t));
1380 1380
1381 1381 if (tcp->tcp_hopopts != NULL) {
1382 1382 mi_free(tcp->tcp_hopopts);
1383 1383 tcp->tcp_hopopts = NULL;
1384 1384 tcp->tcp_hopoptslen = 0;
1385 1385 }
1386 1386 ASSERT(tcp->tcp_hopoptslen == 0);
1387 1387 if (tcp->tcp_dstopts != NULL) {
1388 1388 mi_free(tcp->tcp_dstopts);
1389 1389 tcp->tcp_dstopts = NULL;
1390 1390 tcp->tcp_dstoptslen = 0;
1391 1391 }
1392 1392 ASSERT(tcp->tcp_dstoptslen == 0);
1393 1393 if (tcp->tcp_rthdrdstopts != NULL) {
1394 1394 mi_free(tcp->tcp_rthdrdstopts);
1395 1395 tcp->tcp_rthdrdstopts = NULL;
1396 1396 tcp->tcp_rthdrdstoptslen = 0;
1397 1397 }
1398 1398 ASSERT(tcp->tcp_rthdrdstoptslen == 0);
1399 1399 if (tcp->tcp_rthdr != NULL) {
1400 1400 mi_free(tcp->tcp_rthdr);
1401 1401 tcp->tcp_rthdr = NULL;
1402 1402 tcp->tcp_rthdrlen = 0;
1403 1403 }
1404 1404 ASSERT(tcp->tcp_rthdrlen == 0);
1405 1405
1406 1406 /*
1407 1407 * Following is really a blowing away a union.
1408 1408 * It happens to have exactly two members of identical size
1409 1409 * the following code is enough.
1410 1410 */
1411 1411 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
1412 1412
1413 1413 /* Allow the CC algorithm to clean up after itself. */
1414 1414 if (tcp->tcp_cc_algo != NULL && tcp->tcp_cc_algo->cb_destroy != NULL)
1415 1415 tcp->tcp_cc_algo->cb_destroy(&tcp->tcp_ccv);
1416 1416
1417 1417 /*
1418 1418 * If this is a non-STREAM socket still holding on to an upper
1419 1419 * handle, release it. As a result of fallback we might also see
1420 1420 * STREAMS based conns with upper handles, in which case there is
1421 1421 * nothing to do other than clearing the field.
1422 1422 */
1423 1423 if (connp->conn_upper_handle != NULL) {
1424 1424 if (IPCL_IS_NONSTR(connp)) {
1425 1425 (*connp->conn_upcalls->su_closed)(
1426 1426 connp->conn_upper_handle);
1427 1427 tcp->tcp_detached = B_TRUE;
1428 1428 }
1429 1429 connp->conn_upper_handle = NULL;
1430 1430 connp->conn_upcalls = NULL;
1431 1431 }
1432 1432 }
1433 1433
1434 1434 /*
1435 1435 * tcp_get_conn/tcp_free_conn
1436 1436 *
1437 1437 * tcp_get_conn is used to get a clean tcp connection structure.
1438 1438 * It tries to reuse the connections put on the freelist by the
1439 1439 * time_wait_collector failing which it goes to kmem_cache. This
1440 1440 * way has two benefits compared to just allocating from and
1441 1441 * freeing to kmem_cache.
1442 1442 * 1) The time_wait_collector can free (which includes the cleanup)
1443 1443 * outside the squeue. So when the interrupt comes, we have a clean
1444 1444 * connection sitting in the freelist. Obviously, this buys us
1445 1445 * performance.
1446 1446 *
1447 1447 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
1448 1448 * has multiple disadvantages - tying up the squeue during alloc.
1449 1449 * But allocating the conn/tcp in IP land is also not the best since
1450 1450 * we can't check the 'q' and 'q0' which are protected by squeue and
1451 1451 * blindly allocate memory which might have to be freed here if we are
1452 1452 * not allowed to accept the connection. By using the freelist and
1453 1453 * putting the conn/tcp back in freelist, we don't pay a penalty for
1454 1454 * allocating memory without checking 'q/q0' and freeing it if we can't
1455 1455 * accept the connection.
1456 1456 *
1457 1457 * Care should be taken to put the conn back in the same squeue's freelist
1458 1458 * from which it was allocated. Best results are obtained if conn is
1459 1459 * allocated from listener's squeue and freed to the same. Time wait
1460 1460 * collector will free up the freelist is the connection ends up sitting
1461 1461 * there for too long.
1462 1462 */
1463 1463 conn_t *
1464 1464 tcp_get_conn(void *arg, tcp_stack_t *tcps)
1465 1465 {
1466 1466 tcp_t *tcp = NULL;
1467 1467 conn_t *connp = NULL;
1468 1468 squeue_t *sqp = (squeue_t *)arg;
1469 1469 tcp_squeue_priv_t *tcp_time_wait;
1470 1470 netstack_t *ns;
1471 1471 mblk_t *tcp_rsrv_mp = NULL;
1472 1472
1473 1473 tcp_time_wait =
1474 1474 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP));
1475 1475
1476 1476 mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
1477 1477 tcp = tcp_time_wait->tcp_free_list;
1478 1478 ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0));
1479 1479 if (tcp != NULL) {
1480 1480 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next;
1481 1481 tcp_time_wait->tcp_free_list_cnt--;
1482 1482 mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1483 1483 tcp->tcp_time_wait_next = NULL;
1484 1484 connp = tcp->tcp_connp;
1485 1485 connp->conn_flags |= IPCL_REUSED;
1486 1486
1487 1487 ASSERT(tcp->tcp_tcps == NULL);
1488 1488 ASSERT(connp->conn_netstack == NULL);
1489 1489 ASSERT(tcp->tcp_rsrv_mp != NULL);
1490 1490 ns = tcps->tcps_netstack;
1491 1491 netstack_hold(ns);
1492 1492 connp->conn_netstack = ns;
1493 1493 connp->conn_ixa->ixa_ipst = ns->netstack_ip;
1494 1494 tcp->tcp_tcps = tcps;
1495 1495 ipcl_globalhash_insert(connp);
1496 1496
1497 1497 connp->conn_ixa->ixa_notify_cookie = tcp;
1498 1498 ASSERT(connp->conn_ixa->ixa_notify == tcp_notify);
1499 1499 connp->conn_recv = tcp_input_data;
1500 1500 ASSERT(connp->conn_recvicmp == tcp_icmp_input);
1501 1501 ASSERT(connp->conn_verifyicmp == tcp_verifyicmp);
1502 1502 return (connp);
1503 1503 }
1504 1504 mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1505 1505 /*
1506 1506 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
1507 1507 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
1508 1508 */
1509 1509 tcp_rsrv_mp = allocb(0, BPRI_HI);
1510 1510 if (tcp_rsrv_mp == NULL)
1511 1511 return (NULL);
1512 1512
1513 1513 if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP,
1514 1514 tcps->tcps_netstack)) == NULL) {
1515 1515 freeb(tcp_rsrv_mp);
1516 1516 return (NULL);
1517 1517 }
1518 1518
1519 1519 tcp = connp->conn_tcp;
1520 1520 tcp->tcp_rsrv_mp = tcp_rsrv_mp;
1521 1521 mutex_init(&tcp->tcp_rsrv_mp_lock, NULL, MUTEX_DEFAULT, NULL);
1522 1522
1523 1523 tcp->tcp_tcps = tcps;
1524 1524
1525 1525 connp->conn_recv = tcp_input_data;
1526 1526 connp->conn_recvicmp = tcp_icmp_input;
1527 1527 connp->conn_verifyicmp = tcp_verifyicmp;
1528 1528
1529 1529 /*
1530 1530 * Register tcp_notify to listen to capability changes detected by IP.
1531 1531 * This upcall is made in the context of the call to conn_ip_output
1532 1532 * thus it is inside the squeue.
1533 1533 */
1534 1534 connp->conn_ixa->ixa_notify = tcp_notify;
1535 1535 connp->conn_ixa->ixa_notify_cookie = tcp;
1536 1536
1537 1537 return (connp);
1538 1538 }
1539 1539
1540 1540 /*
1541 1541 * Handle connect to IPv4 destinations, including connections for AF_INET6
1542 1542 * sockets connecting to IPv4 mapped IPv6 destinations.
1543 1543 * Returns zero if OK, a positive errno, or a negative TLI error.
1544 1544 */
1545 1545 static int
1546 1546 tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp, in_port_t dstport,
1547 1547 uint_t srcid)
1548 1548 {
1549 1549 ipaddr_t dstaddr = *dstaddrp;
1550 1550 uint16_t lport;
1551 1551 conn_t *connp = tcp->tcp_connp;
1552 1552 tcp_stack_t *tcps = tcp->tcp_tcps;
1553 1553 int error;
1554 1554
1555 1555 ASSERT(connp->conn_ipversion == IPV4_VERSION);
1556 1556
1557 1557 /* Check for attempt to connect to INADDR_ANY */
1558 1558 if (dstaddr == INADDR_ANY) {
1559 1559 /*
1560 1560 * SunOS 4.x and 4.3 BSD allow an application
1561 1561 * to connect a TCP socket to INADDR_ANY.
1562 1562 * When they do this, the kernel picks the
1563 1563 * address of one interface and uses it
1564 1564 * instead. The kernel usually ends up
1565 1565 * picking the address of the loopback
1566 1566 * interface. This is an undocumented feature.
1567 1567 * However, we provide the same thing here
1568 1568 * in order to have source and binary
1569 1569 * compatibility with SunOS 4.x.
1570 1570 * Update the T_CONN_REQ (sin/sin6) since it is used to
1571 1571 * generate the T_CONN_CON.
1572 1572 */
1573 1573 dstaddr = htonl(INADDR_LOOPBACK);
1574 1574 *dstaddrp = dstaddr;
1575 1575 }
1576 1576
1577 1577 /* Handle __sin6_src_id if socket not bound to an IP address */
1578 1578 if (srcid != 0 && connp->conn_laddr_v4 == INADDR_ANY) {
1579 1579 if (!ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1580 1580 IPCL_ZONEID(connp), B_TRUE, tcps->tcps_netstack)) {
1581 1581 /* Mismatch - conn_laddr_v6 would be v6 address. */
1582 1582 return (EADDRNOTAVAIL);
1583 1583 }
1584 1584 connp->conn_saddr_v6 = connp->conn_laddr_v6;
1585 1585 }
1586 1586
1587 1587 IN6_IPADDR_TO_V4MAPPED(dstaddr, &connp->conn_faddr_v6);
1588 1588 connp->conn_fport = dstport;
1589 1589
1590 1590 /*
1591 1591 * At this point the remote destination address and remote port fields
1592 1592 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1593 1593 * have to see which state tcp was in so we can take appropriate action.
1594 1594 */
1595 1595 if (tcp->tcp_state == TCPS_IDLE) {
1596 1596 /*
1597 1597 * We support a quick connect capability here, allowing
1598 1598 * clients to transition directly from IDLE to SYN_SENT
1599 1599 * tcp_bindi will pick an unused port, insert the connection
1600 1600 * in the bind hash and transition to BOUND state.
1601 1601 */
1602 1602 lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
1603 1603 tcp, B_TRUE);
1604 1604 lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
1605 1605 B_FALSE, B_FALSE);
1606 1606 if (lport == 0)
1607 1607 return (-TNOADDR);
1608 1608 }
1609 1609
1610 1610 /*
1611 1611 * Lookup the route to determine a source address and the uinfo.
1612 1612 * Setup TCP parameters based on the metrics/DCE.
1613 1613 */
1614 1614 error = tcp_set_destination(tcp);
1615 1615 if (error != 0)
1616 1616 return (error);
1617 1617
1618 1618 /*
1619 1619 * Don't let an endpoint connect to itself.
1620 1620 */
1621 1621 if (connp->conn_faddr_v4 == connp->conn_laddr_v4 &&
1622 1622 connp->conn_fport == connp->conn_lport)
1623 1623 return (-TBADADDR);
1624 1624
1625 1625 tcp->tcp_state = TCPS_SYN_SENT;
1626 1626
1627 1627 return (ipcl_conn_insert_v4(connp));
1628 1628 }
1629 1629
1630 1630 /*
1631 1631 * Handle connect to IPv6 destinations.
1632 1632 * Returns zero if OK, a positive errno, or a negative TLI error.
1633 1633 */
1634 1634 static int
1635 1635 tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp, in_port_t dstport,
1636 1636 uint32_t flowinfo, uint_t srcid, uint32_t scope_id)
1637 1637 {
1638 1638 uint16_t lport;
1639 1639 conn_t *connp = tcp->tcp_connp;
1640 1640 tcp_stack_t *tcps = tcp->tcp_tcps;
1641 1641 int error;
1642 1642
1643 1643 ASSERT(connp->conn_family == AF_INET6);
1644 1644
1645 1645 /*
1646 1646 * If we're here, it means that the destination address is a native
1647 1647 * IPv6 address. Return an error if conn_ipversion is not IPv6. A
1648 1648 * reason why it might not be IPv6 is if the socket was bound to an
1649 1649 * IPv4-mapped IPv6 address.
1650 1650 */
1651 1651 if (connp->conn_ipversion != IPV6_VERSION)
1652 1652 return (-TBADADDR);
1653 1653
1654 1654 /*
1655 1655 * Interpret a zero destination to mean loopback.
1656 1656 * Update the T_CONN_REQ (sin/sin6) since it is used to
1657 1657 * generate the T_CONN_CON.
1658 1658 */
1659 1659 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp))
1660 1660 *dstaddrp = ipv6_loopback;
1661 1661
1662 1662 /* Handle __sin6_src_id if socket not bound to an IP address */
1663 1663 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6)) {
1664 1664 if (!ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1665 1665 IPCL_ZONEID(connp), B_FALSE, tcps->tcps_netstack)) {
1666 1666 /* Mismatch - conn_laddr_v6 would be v4-mapped. */
1667 1667 return (EADDRNOTAVAIL);
1668 1668 }
1669 1669 connp->conn_saddr_v6 = connp->conn_laddr_v6;
1670 1670 }
1671 1671
1672 1672 /*
1673 1673 * Take care of the scope_id now.
1674 1674 */
1675 1675 if (scope_id != 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp)) {
1676 1676 connp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET;
1677 1677 connp->conn_ixa->ixa_scopeid = scope_id;
1678 1678 } else {
1679 1679 connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET;
1680 1680 }
1681 1681
1682 1682 connp->conn_flowinfo = flowinfo;
1683 1683 connp->conn_faddr_v6 = *dstaddrp;
1684 1684 connp->conn_fport = dstport;
1685 1685
1686 1686 /*
1687 1687 * At this point the remote destination address and remote port fields
1688 1688 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1689 1689 * have to see which state tcp was in so we can take appropriate action.
1690 1690 */
1691 1691 if (tcp->tcp_state == TCPS_IDLE) {
1692 1692 /*
1693 1693 * We support a quick connect capability here, allowing
1694 1694 * clients to transition directly from IDLE to SYN_SENT
1695 1695 * tcp_bindi will pick an unused port, insert the connection
1696 1696 * in the bind hash and transition to BOUND state.
1697 1697 */
1698 1698 lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
1699 1699 tcp, B_TRUE);
1700 1700 lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
1701 1701 B_FALSE, B_FALSE);
1702 1702 if (lport == 0)
1703 1703 return (-TNOADDR);
1704 1704 }
1705 1705
1706 1706 /*
1707 1707 * Lookup the route to determine a source address and the uinfo.
1708 1708 * Setup TCP parameters based on the metrics/DCE.
1709 1709 */
1710 1710 error = tcp_set_destination(tcp);
1711 1711 if (error != 0)
1712 1712 return (error);
1713 1713
1714 1714 /*
1715 1715 * Don't let an endpoint connect to itself.
1716 1716 */
1717 1717 if (IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6, &connp->conn_laddr_v6) &&
1718 1718 connp->conn_fport == connp->conn_lport)
1719 1719 return (-TBADADDR);
1720 1720
1721 1721 tcp->tcp_state = TCPS_SYN_SENT;
1722 1722
1723 1723 return (ipcl_conn_insert_v6(connp));
1724 1724 }
1725 1725
1726 1726 /*
1727 1727 * Disconnect
1728 1728 * Note that unlike other functions this returns a positive tli error
1729 1729 * when it fails; it never returns an errno.
1730 1730 */
1731 1731 static int
1732 1732 tcp_disconnect_common(tcp_t *tcp, t_scalar_t seqnum)
1733 1733 {
1734 1734 conn_t *lconnp;
1735 1735 tcp_stack_t *tcps = tcp->tcp_tcps;
1736 1736 conn_t *connp = tcp->tcp_connp;
1737 1737
1738 1738 /*
1739 1739 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
1740 1740 * when the stream is in BOUND state. Do not send a reset,
1741 1741 * since the destination IP address is not valid, and it can
1742 1742 * be the initialized value of all zeros (broadcast address).
1743 1743 */
1744 1744 if (tcp->tcp_state <= TCPS_BOUND) {
1745 1745 if (connp->conn_debug) {
1746 1746 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
1747 1747 "tcp_disconnect: bad state, %d", tcp->tcp_state);
1748 1748 }
1749 1749 return (TOUTSTATE);
1750 1750 } else if (tcp->tcp_state >= TCPS_ESTABLISHED) {
1751 1751 TCPS_CONN_DEC(tcps);
1752 1752 }
1753 1753
1754 1754 if (seqnum == -1 || tcp->tcp_conn_req_max == 0) {
1755 1755
1756 1756 /*
1757 1757 * According to TPI, for non-listeners, ignore seqnum
1758 1758 * and disconnect.
1759 1759 * Following interpretation of -1 seqnum is historical
1760 1760 * and implied TPI ? (TPI only states that for T_CONN_IND,
1761 1761 * a valid seqnum should not be -1).
1762 1762 *
1763 1763 * -1 means disconnect everything
1764 1764 * regardless even on a listener.
1765 1765 */
1766 1766
1767 1767 int old_state = tcp->tcp_state;
1768 1768 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
1769 1769
1770 1770 /*
1771 1771 * The connection can't be on the tcp_time_wait_head list
1772 1772 * since it is not detached.
1773 1773 */
1774 1774 ASSERT(tcp->tcp_time_wait_next == NULL);
1775 1775 ASSERT(tcp->tcp_time_wait_prev == NULL);
1776 1776 ASSERT(tcp->tcp_time_wait_expire == 0);
1777 1777 /*
1778 1778 * If it used to be a listener, check to make sure no one else
1779 1779 * has taken the port before switching back to LISTEN state.
1780 1780 */
1781 1781 if (connp->conn_ipversion == IPV4_VERSION) {
1782 1782 lconnp = ipcl_lookup_listener_v4(connp->conn_lport,
1783 1783 connp->conn_laddr_v4, IPCL_ZONEID(connp), ipst);
1784 1784 } else {
1785 1785 uint_t ifindex = 0;
1786 1786
1787 1787 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)
1788 1788 ifindex = connp->conn_ixa->ixa_scopeid;
1789 1789
1790 1790 /* Allow conn_bound_if listeners? */
1791 1791 lconnp = ipcl_lookup_listener_v6(connp->conn_lport,
1792 1792 &connp->conn_laddr_v6, ifindex, IPCL_ZONEID(connp),
1793 1793 ipst);
1794 1794 }
1795 1795 if (tcp->tcp_conn_req_max && lconnp == NULL) {
1796 1796 tcp->tcp_state = TCPS_LISTEN;
1797 1797 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1798 1798 connp->conn_ixa, void, NULL, tcp_t *, tcp, void,
1799 1799 NULL, int32_t, old_state);
1800 1800 } else if (old_state > TCPS_BOUND) {
1801 1801 tcp->tcp_conn_req_max = 0;
1802 1802 tcp->tcp_state = TCPS_BOUND;
1803 1803 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1804 1804 connp->conn_ixa, void, NULL, tcp_t *, tcp, void,
1805 1805 NULL, int32_t, old_state);
1806 1806
1807 1807 /*
1808 1808 * If this end point is not going to become a listener,
1809 1809 * decrement the listener connection count if
1810 1810 * necessary. Note that we do not do this if it is
1811 1811 * going to be a listner (the above if case) since
1812 1812 * then it may remove the counter struct.
1813 1813 */
1814 1814 if (tcp->tcp_listen_cnt != NULL)
1815 1815 TCP_DECR_LISTEN_CNT(tcp);
1816 1816 }
1817 1817 if (lconnp != NULL)
1818 1818 CONN_DEC_REF(lconnp);
1819 1819 switch (old_state) {
1820 1820 case TCPS_SYN_SENT:
1821 1821 case TCPS_SYN_RCVD:
1822 1822 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
1823 1823 break;
1824 1824 case TCPS_ESTABLISHED:
1825 1825 case TCPS_CLOSE_WAIT:
1826 1826 TCPS_BUMP_MIB(tcps, tcpEstabResets);
1827 1827 break;
1828 1828 }
1829 1829
1830 1830 if (tcp->tcp_fused)
1831 1831 tcp_unfuse(tcp);
1832 1832
1833 1833 mutex_enter(&tcp->tcp_eager_lock);
1834 1834 if ((tcp->tcp_conn_req_cnt_q0 != 0) ||
1835 1835 (tcp->tcp_conn_req_cnt_q != 0)) {
1836 1836 tcp_eager_cleanup(tcp, 0);
1837 1837 }
1838 1838 mutex_exit(&tcp->tcp_eager_lock);
1839 1839
1840 1840 tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt,
1841 1841 tcp->tcp_rnxt, TH_RST | TH_ACK);
1842 1842
1843 1843 tcp_reinit(tcp);
1844 1844
1845 1845 return (0);
1846 1846 } else if (!tcp_eager_blowoff(tcp, seqnum)) {
1847 1847 return (TBADSEQ);
1848 1848 }
1849 1849 return (0);
1850 1850 }
1851 1851
1852 1852 /*
1853 1853 * Our client hereby directs us to reject the connection request
1854 1854 * that tcp_input_listener() marked with 'seqnum'. Rejection consists
1855 1855 * of sending the appropriate RST, not an ICMP error.
1856 1856 */
1857 1857 void
1858 1858 tcp_disconnect(tcp_t *tcp, mblk_t *mp)
1859 1859 {
1860 1860 t_scalar_t seqnum;
1861 1861 int error;
1862 1862 conn_t *connp = tcp->tcp_connp;
1863 1863
1864 1864 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
1865 1865 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) {
1866 1866 tcp_err_ack(tcp, mp, TPROTO, 0);
1867 1867 return;
1868 1868 }
1869 1869 seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number;
1870 1870 error = tcp_disconnect_common(tcp, seqnum);
1871 1871 if (error != 0)
1872 1872 tcp_err_ack(tcp, mp, error, 0);
1873 1873 else {
1874 1874 if (tcp->tcp_state >= TCPS_ESTABLISHED) {
1875 1875 /* Send M_FLUSH according to TPI */
1876 1876 (void) putnextctl1(connp->conn_rq, M_FLUSH, FLUSHRW);
1877 1877 }
1878 1878 mp = mi_tpi_ok_ack_alloc(mp);
1879 1879 if (mp != NULL)
1880 1880 putnext(connp->conn_rq, mp);
1881 1881 }
1882 1882 }
1883 1883
1884 1884 /*
1885 1885 * Handle reinitialization of a tcp structure.
1886 1886 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
1887 1887 */
1888 1888 static void
1889 1889 tcp_reinit(tcp_t *tcp)
1890 1890 {
1891 1891 mblk_t *mp;
1892 1892 tcp_stack_t *tcps = tcp->tcp_tcps;
1893 1893 conn_t *connp = tcp->tcp_connp;
1894 1894 int32_t oldstate;
1895 1895
1896 1896 /* tcp_reinit should never be called for detached tcp_t's */
1897 1897 ASSERT(tcp->tcp_listener == NULL);
1898 1898 ASSERT((connp->conn_family == AF_INET &&
1899 1899 connp->conn_ipversion == IPV4_VERSION) ||
1900 1900 (connp->conn_family == AF_INET6 &&
1901 1901 (connp->conn_ipversion == IPV4_VERSION ||
1902 1902 connp->conn_ipversion == IPV6_VERSION)));
1903 1903
1904 1904 /* Cancel outstanding timers */
1905 1905 tcp_timers_stop(tcp);
1906 1906
1907 1907 tcp_close_mpp(&tcp->tcp_xmit_head);
1908 1908 if (tcp->tcp_snd_zcopy_aware)
1909 1909 tcp_zcopy_notify(tcp);
1910 1910 tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL;
1911 1911 tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0;
1912 1912 mutex_enter(&tcp->tcp_non_sq_lock);
1913 1913 if (tcp->tcp_flow_stopped &&
1914 1914 TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
1915 1915 tcp_clrqfull(tcp);
1916 1916 }
1917 1917 mutex_exit(&tcp->tcp_non_sq_lock);
1918 1918 tcp_close_mpp(&tcp->tcp_reass_head);
1919 1919 tcp->tcp_reass_tail = NULL;
1920 1920 if (tcp->tcp_rcv_list != NULL) {
1921 1921 /* Free b_next chain */
1922 1922 tcp_close_mpp(&tcp->tcp_rcv_list);
1923 1923 tcp->tcp_rcv_last_head = NULL;
1924 1924 tcp->tcp_rcv_last_tail = NULL;
1925 1925 tcp->tcp_rcv_cnt = 0;
1926 1926 }
1927 1927 tcp->tcp_rcv_last_tail = NULL;
1928 1928
1929 1929 if ((mp = tcp->tcp_urp_mp) != NULL) {
1930 1930 freemsg(mp);
1931 1931 tcp->tcp_urp_mp = NULL;
1932 1932 }
1933 1933 if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
1934 1934 freemsg(mp);
1935 1935 tcp->tcp_urp_mark_mp = NULL;
1936 1936 }
1937 1937 if (tcp->tcp_fused_sigurg_mp != NULL) {
1938 1938 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1939 1939 freeb(tcp->tcp_fused_sigurg_mp);
1940 1940 tcp->tcp_fused_sigurg_mp = NULL;
1941 1941 }
1942 1942 if (tcp->tcp_ordrel_mp != NULL) {
1943 1943 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1944 1944 freeb(tcp->tcp_ordrel_mp);
1945 1945 tcp->tcp_ordrel_mp = NULL;
1946 1946 }
1947 1947
1948 1948 /*
1949 1949 * Following is a union with two members which are
1950 1950 * identical types and size so the following cleanup
1951 1951 * is enough.
1952 1952 */
1953 1953 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
1954 1954
1955 1955 CL_INET_DISCONNECT(connp);
1956 1956
1957 1957 /*
1958 1958 * The connection can't be on the tcp_time_wait_head list
1959 1959 * since it is not detached.
1960 1960 */
1961 1961 ASSERT(tcp->tcp_time_wait_next == NULL);
1962 1962 ASSERT(tcp->tcp_time_wait_prev == NULL);
1963 1963 ASSERT(tcp->tcp_time_wait_expire == 0);
1964 1964
1965 1965 /*
1966 1966 * Reset/preserve other values
1967 1967 */
1968 1968 tcp_reinit_values(tcp);
1969 1969 ipcl_hash_remove(connp);
1970 1970 /* Note that ixa_cred gets cleared in ixa_cleanup */
1971 1971 ixa_cleanup(connp->conn_ixa);
1972 1972 tcp_ipsec_cleanup(tcp);
1973 1973
1974 1974 connp->conn_laddr_v6 = connp->conn_bound_addr_v6;
1975 1975 connp->conn_saddr_v6 = connp->conn_bound_addr_v6;
1976 1976 oldstate = tcp->tcp_state;
1977 1977
1978 1978 if (tcp->tcp_conn_req_max != 0) {
1979 1979 /*
1980 1980 * This is the case when a TLI program uses the same
1981 1981 * transport end point to accept a connection. This
1982 1982 * makes the TCP both a listener and acceptor. When
1983 1983 * this connection is closed, we need to set the state
1984 1984 * back to TCPS_LISTEN. Make sure that the eager list
1985 1985 * is reinitialized.
1986 1986 *
1987 1987 * Note that this stream is still bound to the four
1988 1988 * tuples of the previous connection in IP. If a new
1989 1989 * SYN with different foreign address comes in, IP will
1990 1990 * not find it and will send it to the global queue. In
1991 1991 * the global queue, TCP will do a tcp_lookup_listener()
1992 1992 * to find this stream. This works because this stream
1993 1993 * is only removed from connected hash.
1994 1994 *
1995 1995 */
1996 1996 tcp->tcp_state = TCPS_LISTEN;
1997 1997 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
1998 1998 tcp->tcp_eager_next_drop_q0 = tcp;
1999 1999 tcp->tcp_eager_prev_drop_q0 = tcp;
2000 2000 /*
2001 2001 * Initially set conn_recv to tcp_input_listener_unbound to try
2002 2002 * to pick a good squeue for the listener when the first SYN
2003 2003 * arrives. tcp_input_listener_unbound sets it to
2004 2004 * tcp_input_listener on that first SYN.
2005 2005 */
2006 2006 connp->conn_recv = tcp_input_listener_unbound;
2007 2007
2008 2008 connp->conn_proto = IPPROTO_TCP;
2009 2009 connp->conn_faddr_v6 = ipv6_all_zeros;
2010 2010 connp->conn_fport = 0;
2011 2011
2012 2012 (void) ipcl_bind_insert(connp);
2013 2013 } else {
2014 2014 tcp->tcp_state = TCPS_BOUND;
2015 2015 }
2016 2016
2017 2017 /*
2018 2018 * Initialize to default values
2019 2019 */
2020 2020 tcp_init_values(tcp, NULL);
2021 2021
2022 2022 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
2023 2023 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
2024 2024 int32_t, oldstate);
2025 2025
2026 2026 ASSERT(tcp->tcp_ptpbhn != NULL);
2027 2027 tcp->tcp_rwnd = connp->conn_rcvbuf;
2028 2028 tcp->tcp_mss = connp->conn_ipversion != IPV4_VERSION ?
2029 2029 tcps->tcps_mss_def_ipv6 : tcps->tcps_mss_def_ipv4;
2030 2030 }
2031 2031
2032 2032 /*
2033 2033 * Force values to zero that need be zero.
2034 2034 * Do not touch values asociated with the BOUND or LISTEN state
2035 2035 * since the connection will end up in that state after the reinit.
2036 2036 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
2037 2037 * structure!
2038 2038 */
2039 2039 static void
2040 2040 tcp_reinit_values(tcp_t *tcp)
2041 2041 {
2042 2042 tcp_stack_t *tcps = tcp->tcp_tcps;
2043 2043 conn_t *connp = tcp->tcp_connp;
2044 2044
2045 2045 #ifndef lint
2046 2046 #define DONTCARE(x)
2047 2047 #define PRESERVE(x)
2048 2048 #else
2049 2049 #define DONTCARE(x) ((x) = (x))
2050 2050 #define PRESERVE(x) ((x) = (x))
2051 2051 #endif /* lint */
2052 2052
2053 2053 PRESERVE(tcp->tcp_bind_hash_port);
2054 2054 PRESERVE(tcp->tcp_bind_hash);
2055 2055 PRESERVE(tcp->tcp_ptpbhn);
2056 2056 PRESERVE(tcp->tcp_acceptor_hash);
2057 2057 PRESERVE(tcp->tcp_ptpahn);
2058 2058
2059 2059 /* Should be ASSERT NULL on these with new code! */
2060 2060 ASSERT(tcp->tcp_time_wait_next == NULL);
2061 2061 ASSERT(tcp->tcp_time_wait_prev == NULL);
2062 2062 ASSERT(tcp->tcp_time_wait_expire == 0);
2063 2063 PRESERVE(tcp->tcp_state);
2064 2064 PRESERVE(connp->conn_rq);
2065 2065 PRESERVE(connp->conn_wq);
2066 2066
2067 2067 ASSERT(tcp->tcp_xmit_head == NULL);
2068 2068 ASSERT(tcp->tcp_xmit_last == NULL);
2069 2069 ASSERT(tcp->tcp_unsent == 0);
2070 2070 ASSERT(tcp->tcp_xmit_tail == NULL);
2071 2071 ASSERT(tcp->tcp_xmit_tail_unsent == 0);
2072 2072
2073 2073 tcp->tcp_snxt = 0; /* Displayed in mib */
2074 2074 tcp->tcp_suna = 0; /* Displayed in mib */
2075 2075 tcp->tcp_swnd = 0;
2076 2076 DONTCARE(tcp->tcp_cwnd); /* Init in tcp_process_options */
2077 2077
2078 2078 if (connp->conn_ht_iphc != NULL) {
2079 2079 kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
2080 2080 connp->conn_ht_iphc = NULL;
2081 2081 connp->conn_ht_iphc_allocated = 0;
2082 2082 connp->conn_ht_iphc_len = 0;
2083 2083 connp->conn_ht_ulp = NULL;
2084 2084 connp->conn_ht_ulp_len = 0;
2085 2085 tcp->tcp_ipha = NULL;
2086 2086 tcp->tcp_ip6h = NULL;
2087 2087 tcp->tcp_tcpha = NULL;
2088 2088 }
2089 2089
2090 2090 /* We clear any IP_OPTIONS and extension headers */
2091 2091 ip_pkt_free(&connp->conn_xmit_ipp);
2092 2092
2093 2093 DONTCARE(tcp->tcp_naglim); /* Init in tcp_init_values */
2094 2094 DONTCARE(tcp->tcp_ipha);
2095 2095 DONTCARE(tcp->tcp_ip6h);
2096 2096 DONTCARE(tcp->tcp_tcpha);
2097 2097 tcp->tcp_valid_bits = 0;
2098 2098
2099 2099 DONTCARE(tcp->tcp_timer_backoff); /* Init in tcp_init_values */
2100 2100 DONTCARE(tcp->tcp_last_recv_time); /* Init in tcp_init_values */
2101 2101 tcp->tcp_last_rcv_lbolt = 0;
2102 2102
2103 2103 tcp->tcp_init_cwnd = 0;
2104 2104
2105 2105 tcp->tcp_urp_last_valid = 0;
2106 2106 tcp->tcp_hard_binding = 0;
2107 2107
2108 2108 tcp->tcp_fin_acked = 0;
2109 2109 tcp->tcp_fin_rcvd = 0;
2110 2110 tcp->tcp_fin_sent = 0;
2111 2111 tcp->tcp_ordrel_done = 0;
2112 2112
2113 2113 tcp->tcp_detached = 0;
2114 2114
2115 2115 tcp->tcp_snd_ws_ok = B_FALSE;
2116 2116 tcp->tcp_snd_ts_ok = B_FALSE;
2117 2117 tcp->tcp_zero_win_probe = 0;
2118 2118
2119 2119 tcp->tcp_loopback = 0;
2120 2120 tcp->tcp_localnet = 0;
2121 2121 tcp->tcp_syn_defense = 0;
2122 2122 tcp->tcp_set_timer = 0;
2123 2123
2124 2124 tcp->tcp_active_open = 0;
2125 2125 tcp->tcp_rexmit = B_FALSE;
2126 2126 tcp->tcp_xmit_zc_clean = B_FALSE;
2127 2127
2128 2128 tcp->tcp_snd_sack_ok = B_FALSE;
2129 2129 tcp->tcp_hwcksum = B_FALSE;
2130 2130
2131 2131 DONTCARE(tcp->tcp_maxpsz_multiplier); /* Init in tcp_init_values */
2132 2132
2133 2133 tcp->tcp_conn_def_q0 = 0;
2134 2134 tcp->tcp_ip_forward_progress = B_FALSE;
2135 2135 tcp->tcp_ecn_ok = B_FALSE;
2136 2136
2137 2137 tcp->tcp_cwr = B_FALSE;
2138 2138 tcp->tcp_ecn_echo_on = B_FALSE;
2139 2139 tcp->tcp_is_wnd_shrnk = B_FALSE;
2140 2140
2141 2141 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
2142 2142 bzero(&tcp->tcp_sack_info, sizeof (tcp_sack_info_t));
2143 2143
2144 2144 tcp->tcp_rcv_ws = 0;
2145 2145 tcp->tcp_snd_ws = 0;
2146 2146 tcp->tcp_ts_recent = 0;
2147 2147 tcp->tcp_rnxt = 0; /* Displayed in mib */
2148 2148 DONTCARE(tcp->tcp_rwnd); /* Set in tcp_reinit() */
2149 2149 tcp->tcp_initial_pmtu = 0;
2150 2150
2151 2151 ASSERT(tcp->tcp_reass_head == NULL);
2152 2152 ASSERT(tcp->tcp_reass_tail == NULL);
2153 2153
2154 2154 tcp->tcp_cwnd_cnt = 0;
2155 2155
2156 2156 ASSERT(tcp->tcp_rcv_list == NULL);
2157 2157 ASSERT(tcp->tcp_rcv_last_head == NULL);
2158 2158 ASSERT(tcp->tcp_rcv_last_tail == NULL);
2159 2159 ASSERT(tcp->tcp_rcv_cnt == 0);
2160 2160
2161 2161 DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_set_destination */
2162 2162 DONTCARE(tcp->tcp_cwnd_max); /* Init in tcp_init_values */
2163 2163 tcp->tcp_csuna = 0;
2164 2164
2165 2165 tcp->tcp_rto = 0; /* Displayed in MIB */
2166 2166 DONTCARE(tcp->tcp_rtt_sa); /* Init in tcp_init_values */
2167 2167 DONTCARE(tcp->tcp_rtt_sd); /* Init in tcp_init_values */
2168 2168 tcp->tcp_rtt_update = 0;
2169 2169 tcp->tcp_rtt_sum = 0;
2170 2170 tcp->tcp_rtt_cnt = 0;
2171 2171
2172 2172 DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2173 2173 DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2174 2174
2175 2175 tcp->tcp_rack = 0; /* Displayed in mib */
2176 2176 tcp->tcp_rack_cnt = 0;
2177 2177 tcp->tcp_rack_cur_max = 0;
2178 2178 tcp->tcp_rack_abs_max = 0;
2179 2179
2180 2180 tcp->tcp_max_swnd = 0;
2181 2181
2182 2182 ASSERT(tcp->tcp_listener == NULL);
2183 2183
2184 2184 DONTCARE(tcp->tcp_irs); /* tcp_valid_bits cleared */
2185 2185 DONTCARE(tcp->tcp_iss); /* tcp_valid_bits cleared */
2186 2186 DONTCARE(tcp->tcp_fss); /* tcp_valid_bits cleared */
2187 2187 DONTCARE(tcp->tcp_urg); /* tcp_valid_bits cleared */
2188 2188
2189 2189 ASSERT(tcp->tcp_conn_req_cnt_q == 0);
2190 2190 ASSERT(tcp->tcp_conn_req_cnt_q0 == 0);
2191 2191 PRESERVE(tcp->tcp_conn_req_max);
2192 2192 PRESERVE(tcp->tcp_conn_req_seqnum);
2193 2193
2194 2194 DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */
2195 2195 DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */
2196 2196 DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */
2197 2197 DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */
2198 2198
2199 2199 DONTCARE(tcp->tcp_urp_last); /* tcp_urp_last_valid is cleared */
2200 2200 ASSERT(tcp->tcp_urp_mp == NULL);
2201 2201 ASSERT(tcp->tcp_urp_mark_mp == NULL);
2202 2202 ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
2203 2203
2204 2204 ASSERT(tcp->tcp_eager_next_q == NULL);
2205 2205 ASSERT(tcp->tcp_eager_last_q == NULL);
2206 2206 ASSERT((tcp->tcp_eager_next_q0 == NULL &&
2207 2207 tcp->tcp_eager_prev_q0 == NULL) ||
2208 2208 tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0);
2209 2209 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL);
2210 2210
2211 2211 ASSERT((tcp->tcp_eager_next_drop_q0 == NULL &&
2212 2212 tcp->tcp_eager_prev_drop_q0 == NULL) ||
2213 2213 tcp->tcp_eager_next_drop_q0 == tcp->tcp_eager_prev_drop_q0);
2214 2214
2215 2215 DONTCARE(tcp->tcp_ka_rinterval); /* Init in tcp_init_values */
2216 2216 DONTCARE(tcp->tcp_ka_abort_thres); /* Init in tcp_init_values */
2217 2217 DONTCARE(tcp->tcp_ka_cnt); /* Init in tcp_init_values */
2218 2218
2219 2219 tcp->tcp_client_errno = 0;
2220 2220
2221 2221 DONTCARE(connp->conn_sum); /* Init in tcp_init_values */
2222 2222
2223 2223 connp->conn_faddr_v6 = ipv6_all_zeros; /* Displayed in MIB */
2224 2224
2225 2225 PRESERVE(connp->conn_bound_addr_v6);
2226 2226 tcp->tcp_last_sent_len = 0;
2227 2227 tcp->tcp_dupack_cnt = 0;
2228 2228
2229 2229 connp->conn_fport = 0; /* Displayed in MIB */
2230 2230 PRESERVE(connp->conn_lport);
2231 2231
2232 2232 PRESERVE(tcp->tcp_acceptor_lockp);
2233 2233
2234 2234 ASSERT(tcp->tcp_ordrel_mp == NULL);
2235 2235 PRESERVE(tcp->tcp_acceptor_id);
2236 2236 DONTCARE(tcp->tcp_ipsec_overhead);
2237 2237
2238 2238 PRESERVE(connp->conn_family);
2239 2239 /* Remove any remnants of mapped address binding */
2240 2240 if (connp->conn_family == AF_INET6) {
2241 2241 connp->conn_ipversion = IPV6_VERSION;
2242 2242 tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
2243 2243 } else {
2244 2244 connp->conn_ipversion = IPV4_VERSION;
2245 2245 tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
2246 2246 }
2247 2247
2248 2248 connp->conn_bound_if = 0;
2249 2249 connp->conn_recv_ancillary.crb_all = 0;
2250 2250 tcp->tcp_recvifindex = 0;
2251 2251 tcp->tcp_recvhops = 0;
2252 2252 tcp->tcp_closed = 0;
2253 2253 if (tcp->tcp_hopopts != NULL) {
2254 2254 mi_free(tcp->tcp_hopopts);
2255 2255 tcp->tcp_hopopts = NULL;
2256 2256 tcp->tcp_hopoptslen = 0;
2257 2257 }
2258 2258 ASSERT(tcp->tcp_hopoptslen == 0);
2259 2259 if (tcp->tcp_dstopts != NULL) {
2260 2260 mi_free(tcp->tcp_dstopts);
2261 2261 tcp->tcp_dstopts = NULL;
2262 2262 tcp->tcp_dstoptslen = 0;
2263 2263 }
2264 2264 ASSERT(tcp->tcp_dstoptslen == 0);
2265 2265 if (tcp->tcp_rthdrdstopts != NULL) {
2266 2266 mi_free(tcp->tcp_rthdrdstopts);
2267 2267 tcp->tcp_rthdrdstopts = NULL;
2268 2268 tcp->tcp_rthdrdstoptslen = 0;
2269 2269 }
2270 2270 ASSERT(tcp->tcp_rthdrdstoptslen == 0);
2271 2271 if (tcp->tcp_rthdr != NULL) {
2272 2272 mi_free(tcp->tcp_rthdr);
2273 2273 tcp->tcp_rthdr = NULL;
2274 2274 tcp->tcp_rthdrlen = 0;
2275 2275 }
2276 2276 ASSERT(tcp->tcp_rthdrlen == 0);
2277 2277
2278 2278 /* Reset fusion-related fields */
2279 2279 tcp->tcp_fused = B_FALSE;
2280 2280 tcp->tcp_unfusable = B_FALSE;
2281 2281 tcp->tcp_fused_sigurg = B_FALSE;
2282 2282 tcp->tcp_loopback_peer = NULL;
2283 2283
2284 2284 tcp->tcp_lso = B_FALSE;
2285 2285
2286 2286 tcp->tcp_in_ack_unsent = 0;
2287 2287 tcp->tcp_cork = B_FALSE;
2288 2288 tcp->tcp_tconnind_started = B_FALSE;
2289 2289
2290 2290 PRESERVE(tcp->tcp_squeue_bytes);
2291 2291
2292 2292 tcp->tcp_closemp_used = B_FALSE;
2293 2293
2294 2294 PRESERVE(tcp->tcp_rsrv_mp);
2295 2295 PRESERVE(tcp->tcp_rsrv_mp_lock);
2296 2296
2297 2297 #ifdef DEBUG
2298 2298 DONTCARE(tcp->tcmp_stk[0]);
2299 2299 #endif
2300 2300
2301 2301 PRESERVE(tcp->tcp_connid);
2302 2302
2303 2303 ASSERT(tcp->tcp_listen_cnt == NULL);
2304 2304 ASSERT(tcp->tcp_reass_tid == 0);
2305 2305
2306 2306 /* Allow the CC algorithm to clean up after itself. */
2307 2307 if (tcp->tcp_cc_algo->cb_destroy != NULL)
2308 2308 tcp->tcp_cc_algo->cb_destroy(&tcp->tcp_ccv);
2309 2309 tcp->tcp_cc_algo = NULL;
2310 2310
2311 2311 #undef DONTCARE
2312 2312 #undef PRESERVE
2313 2313 }
2314 2314
2315 2315 /*
2316 2316 * Initialize the various fields in tcp_t. If parent (the listener) is non
2317 2317 * NULL, certain values will be inheritted from it.
2318 2318 */
2319 2319 void
2320 2320 tcp_init_values(tcp_t *tcp, tcp_t *parent)
2321 2321 {
2322 2322 tcp_stack_t *tcps = tcp->tcp_tcps;
2323 2323 conn_t *connp = tcp->tcp_connp;
2324 2324
2325 2325 ASSERT((connp->conn_family == AF_INET &&
2326 2326 connp->conn_ipversion == IPV4_VERSION) ||
2327 2327 (connp->conn_family == AF_INET6 &&
2328 2328 (connp->conn_ipversion == IPV4_VERSION ||
2329 2329 connp->conn_ipversion == IPV6_VERSION)));
2330 2330
2331 2331 tcp->tcp_ccv.type = IPPROTO_TCP;
2332 2332 tcp->tcp_ccv.ccvc.tcp = tcp;
2333 2333
2334 2334 if (parent == NULL) {
2335 2335 tcp->tcp_cc_algo = tcps->tcps_default_cc_algo;
2336 2336
2337 2337 tcp->tcp_naglim = tcps->tcps_naglim_def;
2338 2338
2339 2339 tcp->tcp_rto_initial = tcps->tcps_rexmit_interval_initial;
2340 2340 tcp->tcp_rto_min = tcps->tcps_rexmit_interval_min;
2341 2341 tcp->tcp_rto_max = tcps->tcps_rexmit_interval_max;
2342 2342
2343 2343 tcp->tcp_first_ctimer_threshold =
2344 2344 tcps->tcps_ip_notify_cinterval;
2345 2345 tcp->tcp_second_ctimer_threshold =
2346 2346 tcps->tcps_ip_abort_cinterval;
2347 2347 tcp->tcp_first_timer_threshold = tcps->tcps_ip_notify_interval;
2348 2348 tcp->tcp_second_timer_threshold = tcps->tcps_ip_abort_interval;
2349 2349
2350 2350 tcp->tcp_fin_wait_2_flush_interval =
2351 2351 tcps->tcps_fin_wait_2_flush_interval;
2352 2352
2353 2353 tcp->tcp_ka_interval = tcps->tcps_keepalive_interval;
2354 2354 tcp->tcp_ka_abort_thres = tcps->tcps_keepalive_abort_interval;
2355 2355 tcp->tcp_ka_cnt = 0;
2356 2356 tcp->tcp_ka_rinterval = 0;
2357 2357
2358 2358 /*
2359 2359 * Default value of tcp_init_cwnd is 0, so no need to set here
2360 2360 * if parent is NULL. But we need to inherit it from parent.
2361 2361 */
2362 2362 } else {
2363 2363 /* Inherit various TCP parameters from the parent. */
2364 2364 tcp->tcp_cc_algo = parent->tcp_cc_algo;
2365 2365
2366 2366 tcp->tcp_naglim = parent->tcp_naglim;
2367 2367
2368 2368 tcp->tcp_rto_initial = parent->tcp_rto_initial;
2369 2369 tcp->tcp_rto_min = parent->tcp_rto_min;
2370 2370 tcp->tcp_rto_max = parent->tcp_rto_max;
2371 2371
2372 2372 tcp->tcp_first_ctimer_threshold =
2373 2373 parent->tcp_first_ctimer_threshold;
2374 2374 tcp->tcp_second_ctimer_threshold =
2375 2375 parent->tcp_second_ctimer_threshold;
2376 2376 tcp->tcp_first_timer_threshold =
2377 2377 parent->tcp_first_timer_threshold;
2378 2378 tcp->tcp_second_timer_threshold =
2379 2379 parent->tcp_second_timer_threshold;
2380 2380
2381 2381 tcp->tcp_fin_wait_2_flush_interval =
2382 2382 parent->tcp_fin_wait_2_flush_interval;
2383 2383
2384 2384 tcp->tcp_ka_interval = parent->tcp_ka_interval;
2385 2385 tcp->tcp_ka_abort_thres = parent->tcp_ka_abort_thres;
2386 2386 tcp->tcp_ka_cnt = parent->tcp_ka_cnt;
2387 2387 tcp->tcp_ka_rinterval = parent->tcp_ka_rinterval;
2388 2388
2389 2389 tcp->tcp_init_cwnd = parent->tcp_init_cwnd;
2390 2390 }
2391 2391
2392 2392 if (tcp->tcp_cc_algo->cb_init != NULL)
2393 2393 VERIFY(tcp->tcp_cc_algo->cb_init(&tcp->tcp_ccv) == 0);
2394 2394
2395 2395 /*
2396 2396 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
2397 2397 * will be close to tcp_rexmit_interval_initial. By doing this, we
2398 2398 * allow the algorithm to adjust slowly to large fluctuations of RTT
2399 2399 * during first few transmissions of a connection as seen in slow
2400 2400 * links.
2401 2401 */
2402 2402 tcp->tcp_rtt_sa = MSEC2NSEC(tcp->tcp_rto_initial) << 2;
2403 2403 tcp->tcp_rtt_sd = MSEC2NSEC(tcp->tcp_rto_initial) >> 1;
2404 2404 tcp->tcp_rto = tcp_calculate_rto(tcp, tcps,
2405 2405 tcps->tcps_conn_grace_period);
2406 2406
2407 2407 tcp->tcp_timer_backoff = 0;
2408 2408 tcp->tcp_ms_we_have_waited = 0;
2409 2409 tcp->tcp_last_recv_time = ddi_get_lbolt();
2410 2410 tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_;
2411 2411 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
2412 2412
2413 2413 tcp->tcp_maxpsz_multiplier = tcps->tcps_maxpsz_multiplier;
2414 2414
2415 2415 /* NOTE: ISS is now set in tcp_set_destination(). */
2416 2416
2417 2417 /* Reset fusion-related fields */
2418 2418 tcp->tcp_fused = B_FALSE;
2419 2419 tcp->tcp_unfusable = B_FALSE;
2420 2420 tcp->tcp_fused_sigurg = B_FALSE;
2421 2421 tcp->tcp_loopback_peer = NULL;
2422 2422
2423 2423 /* We rebuild the header template on the next connect/conn_request */
2424 2424
2425 2425 connp->conn_mlp_type = mlptSingle;
2426 2426
2427 2427 /*
2428 2428 * Init the window scale to the max so tcp_rwnd_set() won't pare
2429 2429 * down tcp_rwnd. tcp_set_destination() will set the right value later.
2430 2430 */
2431 2431 tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT;
2432 2432 tcp->tcp_rwnd = connp->conn_rcvbuf;
2433 2433
2434 2434 tcp->tcp_cork = B_FALSE;
2435 2435 /*
2436 2436 * Init the tcp_debug option if it wasn't already set. This value
2437 2437 * determines whether TCP
2438 2438 * calls strlog() to print out debug messages. Doing this
2439 2439 * initialization here means that this value is not inherited thru
2440 2440 * tcp_reinit().
2441 2441 */
2442 2442 if (!connp->conn_debug)
2443 2443 connp->conn_debug = tcps->tcps_dbg;
2444 2444 }
2445 2445
2446 2446 /*
2447 2447 * Update the TCP connection according to change of PMTU.
2448 2448 *
2449 2449 * Path MTU might have changed by either increase or decrease, so need to
2450 2450 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
2451 2451 * or negative MSS, since tcp_mss_set() will do it.
2452 2452 */
2453 2453 void
2454 2454 tcp_update_pmtu(tcp_t *tcp, boolean_t decrease_only)
2455 2455 {
2456 2456 uint32_t pmtu;
2457 2457 int32_t mss;
2458 2458 conn_t *connp = tcp->tcp_connp;
2459 2459 ip_xmit_attr_t *ixa = connp->conn_ixa;
2460 2460 iaflags_t ixaflags;
2461 2461
2462 2462 if (tcp->tcp_tcps->tcps_ignore_path_mtu)
2463 2463 return;
2464 2464
2465 2465 if (tcp->tcp_state < TCPS_ESTABLISHED)
2466 2466 return;
2467 2467
2468 2468 /*
2469 2469 * Always call ip_get_pmtu() to make sure that IP has updated
2470 2470 * ixa_flags properly.
2471 2471 */
2472 2472 pmtu = ip_get_pmtu(ixa);
2473 2473 ixaflags = ixa->ixa_flags;
2474 2474
2475 2475 /*
2476 2476 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
2477 2477 * IPsec overhead if applied. Make sure to use the most recent
2478 2478 * IPsec information.
2479 2479 */
2480 2480 mss = pmtu - connp->conn_ht_iphc_len - conn_ipsec_length(connp);
2481 2481
2482 2482 /*
2483 2483 * Nothing to change, so just return.
2484 2484 */
2485 2485 if (mss == tcp->tcp_mss)
2486 2486 return;
2487 2487
2488 2488 /*
2489 2489 * Currently, for ICMP errors, only PMTU decrease is handled.
2490 2490 */
2491 2491 if (mss > tcp->tcp_mss && decrease_only)
2492 2492 return;
2493 2493
2494 2494 DTRACE_PROBE2(tcp_update_pmtu, int32_t, tcp->tcp_mss, uint32_t, mss);
2495 2495
2496 2496 /*
2497 2497 * Update ixa_fragsize and ixa_pmtu.
2498 2498 */
2499 2499 ixa->ixa_fragsize = ixa->ixa_pmtu = pmtu;
2500 2500
2501 2501 /*
2502 2502 * Adjust MSS and all relevant variables.
2503 2503 */
2504 2504 tcp_mss_set(tcp, mss);
2505 2505
2506 2506 /*
2507 2507 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
2508 2508 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
2509 2509 * has a (potentially different) min size we do the same. Make sure to
2510 2510 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
2511 2511 * fragment the packet.
2512 2512 *
2513 2513 * LSO over IPv6 can not be fragmented. So need to disable LSO
2514 2514 * when IPv6 fragmentation is needed.
2515 2515 */
2516 2516 if (mss < tcp->tcp_tcps->tcps_mss_min)
2517 2517 ixaflags |= IXAF_PMTU_TOO_SMALL;
2518 2518
2519 2519 if (ixaflags & IXAF_PMTU_TOO_SMALL)
2520 2520 ixaflags &= ~(IXAF_DONTFRAG | IXAF_PMTU_IPV4_DF);
2521 2521
2522 2522 if ((connp->conn_ipversion == IPV4_VERSION) &&
2523 2523 !(ixaflags & IXAF_PMTU_IPV4_DF)) {
2524 2524 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0;
2525 2525 }
2526 2526 ixa->ixa_flags = ixaflags;
2527 2527 }
2528 2528
2529 2529 int
2530 2530 tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk)
2531 2531 {
2532 2532 conn_t *connp = tcp->tcp_connp;
2533 2533 queue_t *q = connp->conn_rq;
2534 2534 int32_t mss = tcp->tcp_mss;
2535 2535 int maxpsz;
2536 2536
2537 2537 if (TCP_IS_DETACHED(tcp))
2538 2538 return (mss);
2539 2539 if (tcp->tcp_fused) {
2540 2540 maxpsz = tcp_fuse_maxpsz(tcp);
2541 2541 mss = INFPSZ;
2542 2542 } else if (tcp->tcp_maxpsz_multiplier == 0) {
2543 2543 /*
2544 2544 * Set the sd_qn_maxpsz according to the socket send buffer
2545 2545 * size, and sd_maxblk to INFPSZ (-1). This will essentially
2546 2546 * instruct the stream head to copyin user data into contiguous
2547 2547 * kernel-allocated buffers without breaking it up into smaller
2548 2548 * chunks. We round up the buffer size to the nearest SMSS.
2549 2549 */
2550 2550 maxpsz = MSS_ROUNDUP(connp->conn_sndbuf, mss);
2551 2551 mss = INFPSZ;
2552 2552 } else {
2553 2553 /*
2554 2554 * Set sd_qn_maxpsz to approx half the (receivers) buffer
2555 2555 * (and a multiple of the mss). This instructs the stream
2556 2556 * head to break down larger than SMSS writes into SMSS-
2557 2557 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
2558 2558 */
2559 2559 maxpsz = tcp->tcp_maxpsz_multiplier * mss;
2560 2560 if (maxpsz > connp->conn_sndbuf / 2) {
2561 2561 maxpsz = connp->conn_sndbuf / 2;
2562 2562 /* Round up to nearest mss */
2563 2563 maxpsz = MSS_ROUNDUP(maxpsz, mss);
2564 2564 }
2565 2565 }
2566 2566
2567 2567 (void) proto_set_maxpsz(q, connp, maxpsz);
2568 2568 if (!(IPCL_IS_NONSTR(connp)))
2569 2569 connp->conn_wq->q_maxpsz = maxpsz;
2570 2570 if (set_maxblk)
2571 2571 (void) proto_set_tx_maxblk(q, connp, mss);
2572 2572 return (mss);
2573 2573 }
2574 2574
2575 2575 /* For /dev/tcp aka AF_INET open */
2576 2576 static int
2577 2577 tcp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2578 2578 {
2579 2579 return (tcp_open(q, devp, flag, sflag, credp, B_FALSE));
2580 2580 }
2581 2581
2582 2582 /* For /dev/tcp6 aka AF_INET6 open */
2583 2583 static int
2584 2584 tcp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2585 2585 {
2586 2586 return (tcp_open(q, devp, flag, sflag, credp, B_TRUE));
2587 2587 }
2588 2588
2589 2589 conn_t *
2590 2590 tcp_create_common(cred_t *credp, boolean_t isv6, boolean_t issocket,
2591 2591 int *errorp)
2592 2592 {
2593 2593 tcp_t *tcp = NULL;
2594 2594 conn_t *connp;
2595 2595 zoneid_t zoneid;
2596 2596 tcp_stack_t *tcps;
2597 2597 squeue_t *sqp;
2598 2598
2599 2599 ASSERT(errorp != NULL);
2600 2600 /*
2601 2601 * Find the proper zoneid and netstack.
2602 2602 */
2603 2603 /*
2604 2604 * Special case for install: miniroot needs to be able to
2605 2605 * access files via NFS as though it were always in the
2606 2606 * global zone.
2607 2607 */
2608 2608 if (credp == kcred && nfs_global_client_only != 0) {
2609 2609 zoneid = GLOBAL_ZONEID;
2610 2610 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->
2611 2611 netstack_tcp;
2612 2612 ASSERT(tcps != NULL);
2613 2613 } else {
2614 2614 netstack_t *ns;
2615 2615 int err;
2616 2616
2617 2617 if ((err = secpolicy_basic_net_access(credp)) != 0) {
2618 2618 *errorp = err;
2619 2619 return (NULL);
2620 2620 }
2621 2621
2622 2622 ns = netstack_find_by_cred(credp);
2623 2623 ASSERT(ns != NULL);
2624 2624 tcps = ns->netstack_tcp;
2625 2625 ASSERT(tcps != NULL);
2626 2626
2627 2627 /*
2628 2628 * For exclusive stacks we set the zoneid to zero
2629 2629 * to make TCP operate as if in the global zone.
2630 2630 */
2631 2631 if (tcps->tcps_netstack->netstack_stackid !=
2632 2632 GLOBAL_NETSTACKID)
2633 2633 zoneid = GLOBAL_ZONEID;
2634 2634 else
2635 2635 zoneid = crgetzoneid(credp);
2636 2636 }
2637 2637
2638 2638 sqp = IP_SQUEUE_GET((uint_t)gethrtime());
2639 2639 connp = tcp_get_conn(sqp, tcps);
2640 2640 /*
2641 2641 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
2642 2642 * so we drop it by one.
2643 2643 */
2644 2644 netstack_rele(tcps->tcps_netstack);
2645 2645 if (connp == NULL) {
2646 2646 *errorp = ENOSR;
2647 2647 return (NULL);
2648 2648 }
2649 2649 ASSERT(connp->conn_ixa->ixa_protocol == connp->conn_proto);
2650 2650
2651 2651 connp->conn_sqp = sqp;
2652 2652 connp->conn_initial_sqp = connp->conn_sqp;
2653 2653 connp->conn_ixa->ixa_sqp = connp->conn_sqp;
2654 2654 tcp = connp->conn_tcp;
2655 2655
2656 2656 /*
2657 2657 * Besides asking IP to set the checksum for us, have conn_ip_output
2658 2658 * to do the following checks when necessary:
2659 2659 *
2660 2660 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
2661 2661 * IXAF_VERIFY_PMTU: verify PMTU changes
2662 2662 * IXAF_VERIFY_LSO: verify LSO capability changes
2663 2663 */
2664 2664 connp->conn_ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
2665 2665 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO;
2666 2666
2667 2667 if (!tcps->tcps_dev_flow_ctl)
2668 2668 connp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL;
2669 2669
2670 2670 if (isv6) {
2671 2671 connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT;
2672 2672 connp->conn_ipversion = IPV6_VERSION;
2673 2673 connp->conn_family = AF_INET6;
2674 2674 tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
2675 2675 connp->conn_default_ttl = tcps->tcps_ipv6_hoplimit;
2676 2676 } else {
2677 2677 connp->conn_ipversion = IPV4_VERSION;
2678 2678 connp->conn_family = AF_INET;
2679 2679 tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
2680 2680 connp->conn_default_ttl = tcps->tcps_ipv4_ttl;
2681 2681 }
2682 2682 connp->conn_xmit_ipp.ipp_unicast_hops = connp->conn_default_ttl;
2683 2683
2684 2684 crhold(credp);
2685 2685 connp->conn_cred = credp;
2686 2686 connp->conn_cpid = curproc->p_pid;
2687 2687 connp->conn_open_time = ddi_get_lbolt64();
2688 2688
2689 2689 /* Cache things in the ixa without any refhold */
2690 2690 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED));
2691 2691 connp->conn_ixa->ixa_cred = credp;
2692 2692 connp->conn_ixa->ixa_cpid = connp->conn_cpid;
2693 2693
2694 2694 connp->conn_zoneid = zoneid;
2695 2695 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
2696 2696 connp->conn_ixa->ixa_zoneid = zoneid;
2697 2697 connp->conn_mlp_type = mlptSingle;
2698 2698 ASSERT(connp->conn_netstack == tcps->tcps_netstack);
2699 2699 ASSERT(tcp->tcp_tcps == tcps);
2700 2700
2701 2701 /*
2702 2702 * If the caller has the process-wide flag set, then default to MAC
2703 2703 * exempt mode. This allows read-down to unlabeled hosts.
2704 2704 */
2705 2705 if (getpflags(NET_MAC_AWARE, credp) != 0)
2706 2706 connp->conn_mac_mode = CONN_MAC_AWARE;
2707 2707
2708 2708 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID);
2709 2709
2710 2710 if (issocket) {
2711 2711 tcp->tcp_issocket = 1;
2712 2712 }
2713 2713
2714 2714 connp->conn_rcvbuf = tcps->tcps_recv_hiwat;
2715 2715 connp->conn_sndbuf = tcps->tcps_xmit_hiwat;
2716 2716 if (tcps->tcps_snd_lowat_fraction != 0) {
2717 2717 connp->conn_sndlowat = connp->conn_sndbuf /
2718 2718 tcps->tcps_snd_lowat_fraction;
2719 2719 } else {
2720 2720 connp->conn_sndlowat = tcps->tcps_xmit_lowat;
2721 2721 }
2722 2722 connp->conn_so_type = SOCK_STREAM;
2723 2723 connp->conn_wroff = connp->conn_ht_iphc_allocated +
2724 2724 tcps->tcps_wroff_xtra;
2725 2725
2726 2726 SOCK_CONNID_INIT(tcp->tcp_connid);
2727 2727 /* DTrace ignores this - it isn't a tcp:::state-change */
2728 2728 tcp->tcp_state = TCPS_IDLE;
2729 2729 tcp_init_values(tcp, NULL);
2730 2730 return (connp);
2731 2731 }
2732 2732
2733 2733 static int
2734 2734 tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
2735 2735 boolean_t isv6)
2736 2736 {
2737 2737 tcp_t *tcp = NULL;
2738 2738 conn_t *connp = NULL;
2739 2739 int err;
2740 2740 vmem_t *minor_arena = NULL;
2741 2741 dev_t conn_dev;
2742 2742 boolean_t issocket;
2743 2743
2744 2744 if (q->q_ptr != NULL)
2745 2745 return (0);
2746 2746
2747 2747 if (sflag == MODOPEN)
2748 2748 return (EINVAL);
2749 2749
2750 2750 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
2751 2751 ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
2752 2752 minor_arena = ip_minor_arena_la;
2753 2753 } else {
2754 2754 /*
2755 2755 * Either minor numbers in the large arena were exhausted
2756 2756 * or a non socket application is doing the open.
2757 2757 * Try to allocate from the small arena.
2758 2758 */
2759 2759 if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) {
2760 2760 return (EBUSY);
2761 2761 }
2762 2762 minor_arena = ip_minor_arena_sa;
2763 2763 }
2764 2764
2765 2765 ASSERT(minor_arena != NULL);
2766 2766
2767 2767 *devp = makedevice(getmajor(*devp), (minor_t)conn_dev);
2768 2768
2769 2769 if (flag & SO_FALLBACK) {
2770 2770 /*
2771 2771 * Non streams socket needs a stream to fallback to
2772 2772 */
2773 2773 RD(q)->q_ptr = (void *)conn_dev;
2774 2774 WR(q)->q_qinfo = &tcp_fallback_sock_winit;
2775 2775 WR(q)->q_ptr = (void *)minor_arena;
2776 2776 qprocson(q);
2777 2777 return (0);
2778 2778 } else if (flag & SO_ACCEPTOR) {
2779 2779 q->q_qinfo = &tcp_acceptor_rinit;
2780 2780 /*
2781 2781 * the conn_dev and minor_arena will be subsequently used by
2782 2782 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
2783 2783 * the minor device number for this connection from the q_ptr.
2784 2784 */
2785 2785 RD(q)->q_ptr = (void *)conn_dev;
2786 2786 WR(q)->q_qinfo = &tcp_acceptor_winit;
2787 2787 WR(q)->q_ptr = (void *)minor_arena;
2788 2788 qprocson(q);
2789 2789 return (0);
2790 2790 }
2791 2791
2792 2792 issocket = flag & SO_SOCKSTR;
2793 2793 connp = tcp_create_common(credp, isv6, issocket, &err);
2794 2794
2795 2795 if (connp == NULL) {
2796 2796 inet_minor_free(minor_arena, conn_dev);
2797 2797 q->q_ptr = WR(q)->q_ptr = NULL;
2798 2798 return (err);
2799 2799 }
2800 2800
2801 2801 connp->conn_rq = q;
2802 2802 connp->conn_wq = WR(q);
2803 2803 q->q_ptr = WR(q)->q_ptr = connp;
2804 2804
2805 2805 connp->conn_dev = conn_dev;
2806 2806 connp->conn_minor_arena = minor_arena;
2807 2807
2808 2808 ASSERT(q->q_qinfo == &tcp_rinitv4 || q->q_qinfo == &tcp_rinitv6);
2809 2809 ASSERT(WR(q)->q_qinfo == &tcp_winit);
2810 2810
2811 2811 tcp = connp->conn_tcp;
2812 2812
2813 2813 if (issocket) {
2814 2814 WR(q)->q_qinfo = &tcp_sock_winit;
2815 2815 } else {
2816 2816 #ifdef _ILP32
2817 2817 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q);
2818 2818 #else
2819 2819 tcp->tcp_acceptor_id = conn_dev;
2820 2820 #endif /* _ILP32 */
2821 2821 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp);
2822 2822 }
2823 2823
2824 2824 /*
2825 2825 * Put the ref for TCP. Ref for IP was already put
2826 2826 * by ipcl_conn_create. Also Make the conn_t globally
2827 2827 * visible to walkers
2828 2828 */
2829 2829 mutex_enter(&connp->conn_lock);
2830 2830 CONN_INC_REF_LOCKED(connp);
2831 2831 ASSERT(connp->conn_ref == 2);
2832 2832 connp->conn_state_flags &= ~CONN_INCIPIENT;
2833 2833 mutex_exit(&connp->conn_lock);
2834 2834
2835 2835 qprocson(q);
2836 2836 return (0);
2837 2837 }
2838 2838
2839 2839 /*
2840 2840 * Build/update the tcp header template (in conn_ht_iphc) based on
2841 2841 * conn_xmit_ipp. The headers include ip6_t, any extension
2842 2842 * headers, and the maximum size tcp header (to avoid reallocation
2843 2843 * on the fly for additional tcp options).
2844 2844 *
2845 2845 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
2846 2846 * Returns failure if can't allocate memory.
2847 2847 */
2848 2848 int
2849 2849 tcp_build_hdrs(tcp_t *tcp)
2850 2850 {
2851 2851 tcp_stack_t *tcps = tcp->tcp_tcps;
2852 2852 conn_t *connp = tcp->tcp_connp;
2853 2853 char buf[TCP_MAX_HDR_LENGTH];
2854 2854 uint_t buflen;
2855 2855 uint_t ulplen = TCP_MIN_HEADER_LENGTH;
2856 2856 uint_t extralen = TCP_MAX_TCP_OPTIONS_LENGTH;
2857 2857 tcpha_t *tcpha;
2858 2858 uint32_t cksum;
2859 2859 int error;
2860 2860
2861 2861 /*
2862 2862 * We might be called after the connection is set up, and we might
2863 2863 * have TS options already in the TCP header. Thus we save any
2864 2864 * existing tcp header.
2865 2865 */
2866 2866 buflen = connp->conn_ht_ulp_len;
2867 2867 if (buflen != 0) {
2868 2868 bcopy(connp->conn_ht_ulp, buf, buflen);
2869 2869 extralen -= buflen - ulplen;
2870 2870 ulplen = buflen;
2871 2871 }
2872 2872
2873 2873 /* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
2874 2874 mutex_enter(&connp->conn_lock);
2875 2875 error = conn_build_hdr_template(connp, ulplen, extralen,
2876 2876 &connp->conn_laddr_v6, &connp->conn_faddr_v6, connp->conn_flowinfo);
2877 2877 mutex_exit(&connp->conn_lock);
2878 2878 if (error != 0)
2879 2879 return (error);
2880 2880
2881 2881 /*
2882 2882 * Any routing header/option has been massaged. The checksum difference
2883 2883 * is stored in conn_sum for later use.
2884 2884 */
2885 2885 tcpha = (tcpha_t *)connp->conn_ht_ulp;
2886 2886 tcp->tcp_tcpha = tcpha;
2887 2887
2888 2888 /* restore any old tcp header */
2889 2889 if (buflen != 0) {
2890 2890 bcopy(buf, connp->conn_ht_ulp, buflen);
2891 2891 } else {
2892 2892 tcpha->tha_sum = 0;
2893 2893 tcpha->tha_urp = 0;
2894 2894 tcpha->tha_ack = 0;
2895 2895 tcpha->tha_offset_and_reserved = (5 << 4);
2896 2896 tcpha->tha_lport = connp->conn_lport;
2897 2897 tcpha->tha_fport = connp->conn_fport;
2898 2898 }
2899 2899
2900 2900 /*
2901 2901 * IP wants our header length in the checksum field to
2902 2902 * allow it to perform a single pseudo-header+checksum
2903 2903 * calculation on behalf of TCP.
2904 2904 * Include the adjustment for a source route once IP_OPTIONS is set.
2905 2905 */
2906 2906 cksum = sizeof (tcpha_t) + connp->conn_sum;
2907 2907 cksum = (cksum >> 16) + (cksum & 0xFFFF);
2908 2908 ASSERT(cksum < 0x10000);
2909 2909 tcpha->tha_sum = htons(cksum);
2910 2910
2911 2911 if (connp->conn_ipversion == IPV4_VERSION)
2912 2912 tcp->tcp_ipha = (ipha_t *)connp->conn_ht_iphc;
2913 2913 else
2914 2914 tcp->tcp_ip6h = (ip6_t *)connp->conn_ht_iphc;
2915 2915
2916 2916 if (connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra >
2917 2917 connp->conn_wroff) {
2918 2918 connp->conn_wroff = connp->conn_ht_iphc_allocated +
2919 2919 tcps->tcps_wroff_xtra;
2920 2920 (void) proto_set_tx_wroff(connp->conn_rq, connp,
2921 2921 connp->conn_wroff);
2922 2922 }
2923 2923 return (0);
2924 2924 }
2925 2925
2926 2926 /*
2927 2927 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
2928 2928 * We do not allow the receive window to shrink. After setting rwnd,
2929 2929 * set the flow control hiwat of the stream.
2930 2930 *
2931 2931 * This function is called in 2 cases:
2932 2932 *
2933 2933 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
2934 2934 * connection (passive open) and in tcp_input_data() for active connect.
2935 2935 * This is called after tcp_mss_set() when the desired MSS value is known.
2936 2936 * This makes sure that our window size is a mutiple of the other side's
2937 2937 * MSS.
2938 2938 * 2) Handling SO_RCVBUF option.
2939 2939 *
2940 2940 * It is ASSUMED that the requested size is a multiple of the current MSS.
2941 2941 *
2942 2942 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
2943 2943 * user requests so.
2944 2944 */
2945 2945 int
2946 2946 tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd)
2947 2947 {
2948 2948 uint32_t mss = tcp->tcp_mss;
2949 2949 uint32_t old_max_rwnd;
2950 2950 uint32_t max_transmittable_rwnd;
2951 2951 boolean_t tcp_detached = TCP_IS_DETACHED(tcp);
2952 2952 tcp_stack_t *tcps = tcp->tcp_tcps;
2953 2953 conn_t *connp = tcp->tcp_connp;
2954 2954
2955 2955 /*
2956 2956 * Insist on a receive window that is at least
2957 2957 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
2958 2958 * funny TCP interactions of Nagle algorithm, SWS avoidance
2959 2959 * and delayed acknowledgement.
2960 2960 */
2961 2961 rwnd = MAX(rwnd, tcps->tcps_recv_hiwat_minmss * mss);
2962 2962
2963 2963 if (tcp->tcp_fused) {
2964 2964 size_t sth_hiwat;
2965 2965 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
2966 2966
2967 2967 ASSERT(peer_tcp != NULL);
2968 2968 sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd);
2969 2969 if (!tcp_detached) {
2970 2970 (void) proto_set_rx_hiwat(connp->conn_rq, connp,
2971 2971 sth_hiwat);
2972 2972 tcp_set_recv_threshold(tcp, sth_hiwat >> 3);
2973 2973 }
2974 2974
2975 2975 /* Caller could have changed tcp_rwnd; update tha_win */
2976 2976 if (tcp->tcp_tcpha != NULL) {
2977 2977 tcp->tcp_tcpha->tha_win =
2978 2978 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2979 2979 }
2980 2980 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max)
2981 2981 tcp->tcp_cwnd_max = rwnd;
2982 2982
2983 2983 /*
2984 2984 * In the fusion case, the maxpsz stream head value of
2985 2985 * our peer is set according to its send buffer size
2986 2986 * and our receive buffer size; since the latter may
2987 2987 * have changed we need to update the peer's maxpsz.
2988 2988 */
2989 2989 (void) tcp_maxpsz_set(peer_tcp, B_TRUE);
2990 2990 return (sth_hiwat);
2991 2991 }
2992 2992
2993 2993 if (tcp_detached)
2994 2994 old_max_rwnd = tcp->tcp_rwnd;
2995 2995 else
2996 2996 old_max_rwnd = connp->conn_rcvbuf;
2997 2997
2998 2998
2999 2999 /*
3000 3000 * If window size info has already been exchanged, TCP should not
3001 3001 * shrink the window. Shrinking window is doable if done carefully.
3002 3002 * We may add that support later. But so far there is not a real
3003 3003 * need to do that.
3004 3004 */
3005 3005 if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) {
3006 3006 /* MSS may have changed, do a round up again. */
3007 3007 rwnd = MSS_ROUNDUP(old_max_rwnd, mss);
3008 3008 }
3009 3009
3010 3010 /*
3011 3011 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
3012 3012 * can be applied even before the window scale option is decided.
3013 3013 */
3014 3014 max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws;
3015 3015 if (rwnd > max_transmittable_rwnd) {
3016 3016 rwnd = max_transmittable_rwnd -
3017 3017 (max_transmittable_rwnd % mss);
3018 3018 if (rwnd < mss)
3019 3019 rwnd = max_transmittable_rwnd;
3020 3020 /*
3021 3021 * If we're over the limit we may have to back down tcp_rwnd.
3022 3022 * The increment below won't work for us. So we set all three
3023 3023 * here and the increment below will have no effect.
3024 3024 */
3025 3025 tcp->tcp_rwnd = old_max_rwnd = rwnd;
3026 3026 }
3027 3027 if (tcp->tcp_localnet) {
3028 3028 tcp->tcp_rack_abs_max =
3029 3029 MIN(tcps->tcps_local_dacks_max, rwnd / mss / 2);
3030 3030 } else {
3031 3031 /*
3032 3032 * For a remote host on a different subnet (through a router),
3033 3033 * we ack every other packet to be conforming to RFC1122.
3034 3034 * tcp_deferred_acks_max is default to 2.
3035 3035 */
3036 3036 tcp->tcp_rack_abs_max =
3037 3037 MIN(tcps->tcps_deferred_acks_max, rwnd / mss / 2);
3038 3038 }
3039 3039 if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max)
3040 3040 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max;
3041 3041 else
3042 3042 tcp->tcp_rack_cur_max = 0;
3043 3043 /*
3044 3044 * Increment the current rwnd by the amount the maximum grew (we
3045 3045 * can not overwrite it since we might be in the middle of a
3046 3046 * connection.)
3047 3047 */
3048 3048 tcp->tcp_rwnd += rwnd - old_max_rwnd;
3049 3049 connp->conn_rcvbuf = rwnd;
3050 3050
3051 3051 /* Are we already connected? */
3052 3052 if (tcp->tcp_tcpha != NULL) {
3053 3053 tcp->tcp_tcpha->tha_win =
3054 3054 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
3055 3055 }
3056 3056
3057 3057 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max)
3058 3058 tcp->tcp_cwnd_max = rwnd;
3059 3059
3060 3060 if (tcp_detached)
3061 3061 return (rwnd);
3062 3062
3063 3063 tcp_set_recv_threshold(tcp, rwnd >> 3);
3064 3064
3065 3065 (void) proto_set_rx_hiwat(connp->conn_rq, connp, rwnd);
3066 3066 return (rwnd);
3067 3067 }
3068 3068
3069 3069 int
3070 3070 tcp_do_unbind(conn_t *connp)
3071 3071 {
3072 3072 tcp_t *tcp = connp->conn_tcp;
3073 3073 int32_t oldstate;
3074 3074
3075 3075 switch (tcp->tcp_state) {
3076 3076 case TCPS_BOUND:
3077 3077 case TCPS_LISTEN:
3078 3078 break;
3079 3079 default:
3080 3080 return (-TOUTSTATE);
3081 3081 }
3082 3082
3083 3083 /*
3084 3084 * Need to clean up all the eagers since after the unbind, segments
3085 3085 * will no longer be delivered to this listener stream.
3086 3086 */
3087 3087 mutex_enter(&tcp->tcp_eager_lock);
3088 3088 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
3089 3089 tcp_eager_cleanup(tcp, 0);
3090 3090 }
3091 3091 mutex_exit(&tcp->tcp_eager_lock);
3092 3092
3093 3093 /* Clean up the listener connection counter if necessary. */
3094 3094 if (tcp->tcp_listen_cnt != NULL)
3095 3095 TCP_DECR_LISTEN_CNT(tcp);
3096 3096 connp->conn_laddr_v6 = ipv6_all_zeros;
3097 3097 connp->conn_saddr_v6 = ipv6_all_zeros;
3098 3098 tcp_bind_hash_remove(tcp);
3099 3099 oldstate = tcp->tcp_state;
3100 3100 tcp->tcp_state = TCPS_IDLE;
3101 3101 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
3102 3102 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
3103 3103 int32_t, oldstate);
3104 3104
3105 3105 ip_unbind(connp);
3106 3106 bzero(&connp->conn_ports, sizeof (connp->conn_ports));
3107 3107
3108 3108 return (0);
3109 3109 }
3110 3110
3111 3111 /*
3112 3112 * Collect protocol properties to send to the upper handle.
3113 3113 */
3114 3114 void
3115 3115 tcp_get_proto_props(tcp_t *tcp, struct sock_proto_props *sopp)
3116 3116 {
3117 3117 conn_t *connp = tcp->tcp_connp;
3118 3118
3119 3119 sopp->sopp_flags = SOCKOPT_RCVHIWAT | SOCKOPT_MAXBLK | SOCKOPT_WROFF;
3120 3120 sopp->sopp_maxblk = tcp_maxpsz_set(tcp, B_FALSE);
3121 3121
3122 3122 sopp->sopp_rxhiwat = tcp->tcp_fused ?
3123 3123 tcp_fuse_set_rcv_hiwat(tcp, connp->conn_rcvbuf) :
3124 3124 connp->conn_rcvbuf;
3125 3125 /*
3126 3126 * Determine what write offset value to use depending on SACK and
3127 3127 * whether the endpoint is fused or not.
3128 3128 */
3129 3129 if (tcp->tcp_fused) {
3130 3130 ASSERT(tcp->tcp_loopback);
3131 3131 ASSERT(tcp->tcp_loopback_peer != NULL);
3132 3132 /*
3133 3133 * For fused tcp loopback, set the stream head's write
3134 3134 * offset value to zero since we won't be needing any room
3135 3135 * for TCP/IP headers. This would also improve performance
3136 3136 * since it would reduce the amount of work done by kmem.
3137 3137 * Non-fused tcp loopback case is handled separately below.
3138 3138 */
3139 3139 sopp->sopp_wroff = 0;
3140 3140 /*
3141 3141 * Update the peer's transmit parameters according to
3142 3142 * our recently calculated high water mark value.
3143 3143 */
3144 3144 (void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE);
3145 3145 } else if (tcp->tcp_snd_sack_ok) {
3146 3146 sopp->sopp_wroff = connp->conn_ht_iphc_allocated +
3147 3147 (tcp->tcp_loopback ? 0 : tcp->tcp_tcps->tcps_wroff_xtra);
3148 3148 } else {
3149 3149 sopp->sopp_wroff = connp->conn_ht_iphc_len +
3150 3150 (tcp->tcp_loopback ? 0 : tcp->tcp_tcps->tcps_wroff_xtra);
3151 3151 }
3152 3152
3153 3153 if (tcp->tcp_loopback) {
3154 3154 sopp->sopp_flags |= SOCKOPT_LOOPBACK;
3155 3155 sopp->sopp_loopback = B_TRUE;
3156 3156 }
3157 3157 }
3158 3158
3159 3159 /*
3160 3160 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
3161 3161 */
3162 3162 boolean_t
3163 3163 tcp_zcopy_check(tcp_t *tcp)
3164 3164 {
3165 3165 conn_t *connp = tcp->tcp_connp;
3166 3166 ip_xmit_attr_t *ixa = connp->conn_ixa;
3167 3167 boolean_t zc_enabled = B_FALSE;
3168 3168 tcp_stack_t *tcps = tcp->tcp_tcps;
3169 3169
3170 3170 if (do_tcpzcopy == 2)
3171 3171 zc_enabled = B_TRUE;
3172 3172 else if ((do_tcpzcopy == 1) && (ixa->ixa_flags & IXAF_ZCOPY_CAPAB))
3173 3173 zc_enabled = B_TRUE;
3174 3174
3175 3175 tcp->tcp_snd_zcopy_on = zc_enabled;
3176 3176 if (!TCP_IS_DETACHED(tcp)) {
3177 3177 if (zc_enabled) {
3178 3178 ixa->ixa_flags |= IXAF_VERIFY_ZCOPY;
3179 3179 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3180 3180 ZCVMSAFE);
3181 3181 TCP_STAT(tcps, tcp_zcopy_on);
3182 3182 } else {
3183 3183 ixa->ixa_flags &= ~IXAF_VERIFY_ZCOPY;
3184 3184 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3185 3185 ZCVMUNSAFE);
3186 3186 TCP_STAT(tcps, tcp_zcopy_off);
3187 3187 }
3188 3188 }
3189 3189 return (zc_enabled);
3190 3190 }
3191 3191
3192 3192 /*
3193 3193 * Backoff from a zero-copy message by copying data to a new allocated
3194 3194 * message and freeing the original desballoca'ed segmapped message.
3195 3195 *
3196 3196 * This function is called by following two callers:
3197 3197 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
3198 3198 * the origial desballoca'ed message and notify sockfs. This is in re-
3199 3199 * transmit state.
3200 3200 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
3201 3201 * to be copied to new message.
3202 3202 */
3203 3203 mblk_t *
3204 3204 tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, boolean_t fix_xmitlist)
3205 3205 {
3206 3206 mblk_t *nbp;
3207 3207 mblk_t *head = NULL;
3208 3208 mblk_t *tail = NULL;
3209 3209 tcp_stack_t *tcps = tcp->tcp_tcps;
3210 3210
3211 3211 ASSERT(bp != NULL);
3212 3212 while (bp != NULL) {
3213 3213 if (IS_VMLOANED_MBLK(bp)) {
3214 3214 TCP_STAT(tcps, tcp_zcopy_backoff);
3215 3215 if ((nbp = copyb(bp)) == NULL) {
3216 3216 tcp->tcp_xmit_zc_clean = B_FALSE;
3217 3217 if (tail != NULL)
3218 3218 tail->b_cont = bp;
3219 3219 return ((head == NULL) ? bp : head);
3220 3220 }
3221 3221
3222 3222 if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) {
3223 3223 if (fix_xmitlist)
3224 3224 tcp_zcopy_notify(tcp);
3225 3225 else
3226 3226 nbp->b_datap->db_struioflag |=
3227 3227 STRUIO_ZCNOTIFY;
3228 3228 }
3229 3229 nbp->b_cont = bp->b_cont;
3230 3230
3231 3231 /*
3232 3232 * Copy saved information and adjust tcp_xmit_tail
3233 3233 * if needed.
3234 3234 */
3235 3235 if (fix_xmitlist) {
3236 3236 nbp->b_prev = bp->b_prev;
3237 3237 nbp->b_next = bp->b_next;
3238 3238
3239 3239 if (tcp->tcp_xmit_tail == bp)
3240 3240 tcp->tcp_xmit_tail = nbp;
3241 3241 }
3242 3242
3243 3243 /* Free the original message. */
3244 3244 bp->b_prev = NULL;
3245 3245 bp->b_next = NULL;
3246 3246 freeb(bp);
3247 3247
3248 3248 bp = nbp;
3249 3249 }
3250 3250
3251 3251 if (head == NULL) {
3252 3252 head = bp;
3253 3253 }
3254 3254 if (tail == NULL) {
3255 3255 tail = bp;
3256 3256 } else {
3257 3257 tail->b_cont = bp;
3258 3258 tail = bp;
3259 3259 }
3260 3260
3261 3261 /* Move forward. */
3262 3262 bp = bp->b_cont;
3263 3263 }
3264 3264
3265 3265 if (fix_xmitlist) {
3266 3266 tcp->tcp_xmit_last = tail;
3267 3267 tcp->tcp_xmit_zc_clean = B_TRUE;
3268 3268 }
3269 3269
3270 3270 return (head);
3271 3271 }
3272 3272
3273 3273 void
3274 3274 tcp_zcopy_notify(tcp_t *tcp)
3275 3275 {
3276 3276 struct stdata *stp;
3277 3277 conn_t *connp;
3278 3278
3279 3279 if (tcp->tcp_detached)
3280 3280 return;
3281 3281 connp = tcp->tcp_connp;
3282 3282 if (IPCL_IS_NONSTR(connp)) {
3283 3283 (*connp->conn_upcalls->su_zcopy_notify)
3284 3284 (connp->conn_upper_handle);
3285 3285 return;
3286 3286 }
3287 3287 stp = STREAM(connp->conn_rq);
3288 3288 mutex_enter(&stp->sd_lock);
3289 3289 stp->sd_flag |= STZCNOTIFY;
3290 3290 cv_broadcast(&stp->sd_zcopy_wait);
3291 3291 mutex_exit(&stp->sd_lock);
3292 3292 }
3293 3293
3294 3294 /*
3295 3295 * Update the TCP connection according to change of LSO capability.
3296 3296 */
3297 3297 static void
3298 3298 tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa)
3299 3299 {
3300 3300 /*
3301 3301 * We check against IPv4 header length to preserve the old behavior
3302 3302 * of only enabling LSO when there are no IP options.
3303 3303 * But this restriction might not be necessary at all. Before removing
3304 3304 * it, need to verify how LSO is handled for source routing case, with
3305 3305 * which IP does software checksum.
3306 3306 *
3307 3307 * For IPv6, whenever any extension header is needed, LSO is supressed.
3308 3308 */
3309 3309 if (ixa->ixa_ip_hdr_length != ((ixa->ixa_flags & IXAF_IS_IPV4) ?
3310 3310 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN))
3311 3311 return;
3312 3312
3313 3313 /*
3314 3314 * Either the LSO capability newly became usable, or it has changed.
3315 3315 */
3316 3316 if (ixa->ixa_flags & IXAF_LSO_CAPAB) {
3317 3317 ill_lso_capab_t *lsoc = &ixa->ixa_lso_capab;
3318 3318
3319 3319 ASSERT(lsoc->ill_lso_max > 0);
3320 3320 tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH, lsoc->ill_lso_max);
3321 3321
3322 3322 DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
3323 3323 boolean_t, B_TRUE, uint32_t, tcp->tcp_lso_max);
3324 3324
3325 3325 /*
3326 3326 * If LSO to be enabled, notify the STREAM header with larger
3327 3327 * data block.
3328 3328 */
3329 3329 if (!tcp->tcp_lso)
3330 3330 tcp->tcp_maxpsz_multiplier = 0;
3331 3331
3332 3332 tcp->tcp_lso = B_TRUE;
3333 3333 TCP_STAT(tcp->tcp_tcps, tcp_lso_enabled);
3334 3334 } else { /* LSO capability is not usable any more. */
3335 3335 DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
3336 3336 boolean_t, B_FALSE, uint32_t, tcp->tcp_lso_max);
3337 3337
3338 3338 /*
3339 3339 * If LSO to be disabled, notify the STREAM header with smaller
3340 3340 * data block. And need to restore fragsize to PMTU.
3341 3341 */
3342 3342 if (tcp->tcp_lso) {
3343 3343 tcp->tcp_maxpsz_multiplier =
3344 3344 tcp->tcp_tcps->tcps_maxpsz_multiplier;
3345 3345 ixa->ixa_fragsize = ixa->ixa_pmtu;
3346 3346 tcp->tcp_lso = B_FALSE;
3347 3347 TCP_STAT(tcp->tcp_tcps, tcp_lso_disabled);
3348 3348 }
3349 3349 }
3350 3350
3351 3351 (void) tcp_maxpsz_set(tcp, B_TRUE);
3352 3352 }
3353 3353
3354 3354 /*
3355 3355 * Update the TCP connection according to change of ZEROCOPY capability.
3356 3356 */
3357 3357 static void
3358 3358 tcp_update_zcopy(tcp_t *tcp)
3359 3359 {
3360 3360 conn_t *connp = tcp->tcp_connp;
3361 3361 tcp_stack_t *tcps = tcp->tcp_tcps;
3362 3362
3363 3363 if (tcp->tcp_snd_zcopy_on) {
3364 3364 tcp->tcp_snd_zcopy_on = B_FALSE;
3365 3365 if (!TCP_IS_DETACHED(tcp)) {
3366 3366 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3367 3367 ZCVMUNSAFE);
3368 3368 TCP_STAT(tcps, tcp_zcopy_off);
3369 3369 }
3370 3370 } else {
3371 3371 tcp->tcp_snd_zcopy_on = B_TRUE;
3372 3372 if (!TCP_IS_DETACHED(tcp)) {
3373 3373 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3374 3374 ZCVMSAFE);
3375 3375 TCP_STAT(tcps, tcp_zcopy_on);
3376 3376 }
3377 3377 }
3378 3378 }
3379 3379
3380 3380 /*
3381 3381 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
3382 3382 * so it's safe to update the TCP connection.
3383 3383 */
3384 3384 /* ARGSUSED1 */
3385 3385 static void
3386 3386 tcp_notify(void *arg, ip_xmit_attr_t *ixa, ixa_notify_type_t ntype,
3387 3387 ixa_notify_arg_t narg)
3388 3388 {
3389 3389 tcp_t *tcp = (tcp_t *)arg;
3390 3390 conn_t *connp = tcp->tcp_connp;
3391 3391
3392 3392 switch (ntype) {
3393 3393 case IXAN_LSO:
3394 3394 tcp_update_lso(tcp, connp->conn_ixa);
3395 3395 break;
3396 3396 case IXAN_PMTU:
3397 3397 tcp_update_pmtu(tcp, B_FALSE);
3398 3398 break;
3399 3399 case IXAN_ZCOPY:
3400 3400 tcp_update_zcopy(tcp);
3401 3401 break;
3402 3402 default:
3403 3403 break;
3404 3404 }
3405 3405 }
3406 3406
3407 3407 /*
3408 3408 * The TCP write service routine should never be called...
3409 3409 */
3410 3410 /* ARGSUSED */
3411 3411 static int
3412 3412 tcp_wsrv(queue_t *q)
3413 3413 {
3414 3414 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps;
3415 3415
3416 3416 TCP_STAT(tcps, tcp_wsrv_called);
3417 3417 return (0);
3418 3418 }
3419 3419
3420 3420 /*
3421 3421 * Hash list lookup routine for tcp_t structures.
3422 3422 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
3423 3423 */
3424 3424 tcp_t *
3425 3425 tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *tcps)
3426 3426 {
3427 3427 tf_t *tf;
3428 3428 tcp_t *tcp;
3429 3429
3430 3430 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
3431 3431 mutex_enter(&tf->tf_lock);
3432 3432 for (tcp = tf->tf_tcp; tcp != NULL;
3433 3433 tcp = tcp->tcp_acceptor_hash) {
3434 3434 if (tcp->tcp_acceptor_id == id) {
3435 3435 CONN_INC_REF(tcp->tcp_connp);
3436 3436 mutex_exit(&tf->tf_lock);
3437 3437 return (tcp);
3438 3438 }
3439 3439 }
3440 3440 mutex_exit(&tf->tf_lock);
3441 3441 return (NULL);
3442 3442 }
3443 3443
3444 3444 /*
3445 3445 * Hash list insertion routine for tcp_t structures.
3446 3446 */
3447 3447 void
3448 3448 tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp)
3449 3449 {
3450 3450 tf_t *tf;
3451 3451 tcp_t **tcpp;
3452 3452 tcp_t *tcpnext;
3453 3453 tcp_stack_t *tcps = tcp->tcp_tcps;
3454 3454
3455 3455 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
3456 3456
3457 3457 if (tcp->tcp_ptpahn != NULL)
3458 3458 tcp_acceptor_hash_remove(tcp);
3459 3459 tcpp = &tf->tf_tcp;
3460 3460 mutex_enter(&tf->tf_lock);
3461 3461 tcpnext = tcpp[0];
3462 3462 if (tcpnext)
3463 3463 tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash;
3464 3464 tcp->tcp_acceptor_hash = tcpnext;
3465 3465 tcp->tcp_ptpahn = tcpp;
3466 3466 tcpp[0] = tcp;
3467 3467 tcp->tcp_acceptor_lockp = &tf->tf_lock; /* For tcp_*_hash_remove */
3468 3468 mutex_exit(&tf->tf_lock);
3469 3469 }
3470 3470
3471 3471 /*
3472 3472 * Hash list removal routine for tcp_t structures.
3473 3473 */
3474 3474 void
3475 3475 tcp_acceptor_hash_remove(tcp_t *tcp)
3476 3476 {
3477 3477 tcp_t *tcpnext;
3478 3478 kmutex_t *lockp;
3479 3479
3480 3480 /*
3481 3481 * Extract the lock pointer in case there are concurrent
3482 3482 * hash_remove's for this instance.
3483 3483 */
3484 3484 lockp = tcp->tcp_acceptor_lockp;
3485 3485
3486 3486 if (tcp->tcp_ptpahn == NULL)
3487 3487 return;
3488 3488
3489 3489 ASSERT(lockp != NULL);
3490 3490 mutex_enter(lockp);
3491 3491 if (tcp->tcp_ptpahn) {
3492 3492 tcpnext = tcp->tcp_acceptor_hash;
3493 3493 if (tcpnext) {
3494 3494 tcpnext->tcp_ptpahn = tcp->tcp_ptpahn;
3495 3495 tcp->tcp_acceptor_hash = NULL;
3496 3496 }
3497 3497 *tcp->tcp_ptpahn = tcpnext;
3498 3498 tcp->tcp_ptpahn = NULL;
3499 3499 }
3500 3500 mutex_exit(lockp);
3501 3501 tcp->tcp_acceptor_lockp = NULL;
3502 3502 }
3503 3503
3504 3504 /*
3505 3505 * Type three generator adapted from the random() function in 4.4 BSD:
3506 3506 */
3507 3507
3508 3508 /*
3509 3509 * Copyright (c) 1983, 1993
3510 3510 * The Regents of the University of California. All rights reserved.
3511 3511 *
3512 3512 * Redistribution and use in source and binary forms, with or without
3513 3513 * modification, are permitted provided that the following conditions
3514 3514 * are met:
3515 3515 * 1. Redistributions of source code must retain the above copyright
3516 3516 * notice, this list of conditions and the following disclaimer.
3517 3517 * 2. Redistributions in binary form must reproduce the above copyright
3518 3518 * notice, this list of conditions and the following disclaimer in the
3519 3519 * documentation and/or other materials provided with the distribution.
3520 3520 * 3. All advertising materials mentioning features or use of this software
3521 3521 * must display the following acknowledgement:
3522 3522 * This product includes software developed by the University of
3523 3523 * California, Berkeley and its contributors.
3524 3524 * 4. Neither the name of the University nor the names of its contributors
3525 3525 * may be used to endorse or promote products derived from this software
3526 3526 * without specific prior written permission.
3527 3527 *
3528 3528 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3529 3529 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3530 3530 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3531 3531 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3532 3532 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3533 3533 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3534 3534 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3535 3535 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3536 3536 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3537 3537 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3538 3538 * SUCH DAMAGE.
3539 3539 */
3540 3540
3541 3541 /* Type 3 -- x**31 + x**3 + 1 */
3542 3542 #define DEG_3 31
3543 3543 #define SEP_3 3
3544 3544
3545 3545
3546 3546 /* Protected by tcp_random_lock */
3547 3547 static int tcp_randtbl[DEG_3 + 1];
3548 3548
3549 3549 static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1];
3550 3550 static int *tcp_random_rptr = &tcp_randtbl[1];
3551 3551
3552 3552 static int *tcp_random_state = &tcp_randtbl[1];
3553 3553 static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1];
3554 3554
3555 3555 kmutex_t tcp_random_lock;
3556 3556
3557 3557 void
3558 3558 tcp_random_init(void)
3559 3559 {
3560 3560 int i;
3561 3561 hrtime_t hrt;
3562 3562 time_t wallclock;
3563 3563 uint64_t result;
3564 3564
3565 3565 /*
3566 3566 * Use high-res timer and current time for seed. Gethrtime() returns
3567 3567 * a longlong, which may contain resolution down to nanoseconds.
3568 3568 * The current time will either be a 32-bit or a 64-bit quantity.
3569 3569 * XOR the two together in a 64-bit result variable.
3570 3570 * Convert the result to a 32-bit value by multiplying the high-order
3571 3571 * 32-bits by the low-order 32-bits.
3572 3572 */
3573 3573
3574 3574 hrt = gethrtime();
3575 3575 (void) drv_getparm(TIME, &wallclock);
3576 3576 result = (uint64_t)wallclock ^ (uint64_t)hrt;
3577 3577 mutex_enter(&tcp_random_lock);
3578 3578 tcp_random_state[0] = ((result >> 32) & 0xffffffff) *
3579 3579 (result & 0xffffffff);
3580 3580
3581 3581 for (i = 1; i < DEG_3; i++)
3582 3582 tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1]
3583 3583 + 12345;
3584 3584 tcp_random_fptr = &tcp_random_state[SEP_3];
3585 3585 tcp_random_rptr = &tcp_random_state[0];
3586 3586 mutex_exit(&tcp_random_lock);
3587 3587 for (i = 0; i < 10 * DEG_3; i++)
3588 3588 (void) tcp_random();
3589 3589 }
3590 3590
3591 3591 /*
3592 3592 * tcp_random: Return a random number in the range [1 - (128K + 1)].
3593 3593 * This range is selected to be approximately centered on TCP_ISS / 2,
3594 3594 * and easy to compute. We get this value by generating a 32-bit random
3595 3595 * number, selecting out the high-order 17 bits, and then adding one so
3596 3596 * that we never return zero.
3597 3597 */
3598 3598 int
3599 3599 tcp_random(void)
3600 3600 {
3601 3601 int i;
3602 3602
3603 3603 mutex_enter(&tcp_random_lock);
3604 3604 *tcp_random_fptr += *tcp_random_rptr;
3605 3605
3606 3606 /*
3607 3607 * The high-order bits are more random than the low-order bits,
3608 3608 * so we select out the high-order 17 bits and add one so that
3609 3609 * we never return zero.
3610 3610 */
3611 3611 i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1;
3612 3612 if (++tcp_random_fptr >= tcp_random_end_ptr) {
3613 3613 tcp_random_fptr = tcp_random_state;
3614 3614 ++tcp_random_rptr;
3615 3615 } else if (++tcp_random_rptr >= tcp_random_end_ptr)
3616 3616 tcp_random_rptr = tcp_random_state;
3617 3617
3618 3618 mutex_exit(&tcp_random_lock);
3619 3619 return (i);
3620 3620 }
3621 3621
3622 3622 /*
3623 3623 * Split this function out so that if the secret changes, I'm okay.
3624 3624 *
3625 3625 * Initialize the tcp_iss_cookie and tcp_iss_key.
3626 3626 */
3627 3627
3628 3628 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */
3629 3629
3630 3630 void
3631 3631 tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *tcps)
3632 3632 {
3633 3633 struct {
3634 3634 int32_t current_time;
3635 3635 uint32_t randnum;
3636 3636 uint16_t pad;
3637 3637 uint8_t ether[6];
3638 3638 uint8_t passwd[PASSWD_SIZE];
3639 3639 } tcp_iss_cookie;
3640 3640 time_t t;
3641 3641
3642 3642 /*
3643 3643 * Start with the current absolute time.
3644 3644 */
3645 3645 (void) drv_getparm(TIME, &t);
3646 3646 tcp_iss_cookie.current_time = t;
3647 3647
3648 3648 /*
3649 3649 * XXX - Need a more random number per RFC 1750, not this crap.
3650 3650 * OTOH, if what follows is pretty random, then I'm in better shape.
3651 3651 */
3652 3652 tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random());
3653 3653 tcp_iss_cookie.pad = 0x365c; /* Picked from HMAC pad values. */
3654 3654
3655 3655 /*
3656 3656 * The cpu_type_info is pretty non-random. Ugggh. It does serve
3657 3657 * as a good template.
3658 3658 */
3659 3659 bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd,
3660 3660 min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info)));
3661 3661
3662 3662 /*
3663 3663 * The pass-phrase. Normally this is supplied by user-called NDD.
3664 3664 */
3665 3665 bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len));
3666 3666
3667 3667 /*
3668 3668 * See 4010593 if this section becomes a problem again,
3669 3669 * but the local ethernet address is useful here.
3670 3670 */
3671 3671 (void) localetheraddr(NULL,
3672 3672 (struct ether_addr *)&tcp_iss_cookie.ether);
3673 3673
3674 3674 /*
3675 3675 * Hash 'em all together. The MD5Final is called per-connection.
3676 3676 */
3677 3677 mutex_enter(&tcps->tcps_iss_key_lock);
3678 3678 MD5Init(&tcps->tcps_iss_key);
3679 3679 MD5Update(&tcps->tcps_iss_key, (uchar_t *)&tcp_iss_cookie,
3680 3680 sizeof (tcp_iss_cookie));
3681 3681 mutex_exit(&tcps->tcps_iss_key_lock);
3682 3682 }
3683 3683
3684 3684 /*
3685 3685 * Called by IP when IP is loaded into the kernel
3686 3686 */
3687 3687 void
3688 3688 tcp_ddi_g_init(void)
3689 3689 {
3690 3690 tcp_timercache = kmem_cache_create("tcp_timercache",
3691 3691 sizeof (tcp_timer_t) + sizeof (mblk_t), 0,
3692 3692 NULL, NULL, NULL, NULL, NULL, 0);
3693 3693
3694 3694 tcp_notsack_blk_cache = kmem_cache_create("tcp_notsack_blk_cache",
3695 3695 sizeof (notsack_blk_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
3696 3696
3697 3697 mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL);
3698 3698
3699 3699 /* Initialize the random number generator */
3700 3700 tcp_random_init();
3701 3701
3702 3702 /* A single callback independently of how many netstacks we have */
3703 3703 ip_squeue_init(tcp_squeue_add);
3704 3704
3705 3705 tcp_g_kstat = tcp_g_kstat_init(&tcp_g_statistics);
3706 3706
3707 3707 tcp_squeue_flag = tcp_squeue_switch(tcp_squeue_wput);
3708 3708
3709 3709 /*
3710 3710 * We want to be informed each time a stack is created or
3711 3711 * destroyed in the kernel, so we can maintain the
3712 3712 * set of tcp_stack_t's.
3713 3713 */
3714 3714 netstack_register(NS_TCP, tcp_stack_init, NULL, tcp_stack_fini);
3715 3715 }
3716 3716
3717 3717
3718 3718 #define INET_NAME "ip"
3719 3719
3720 3720 /*
3721 3721 * Initialize the TCP stack instance.
3722 3722 */
3723 3723 static void *
3724 3724 tcp_stack_init(netstackid_t stackid, netstack_t *ns)
3725 3725 {
3726 3726 tcp_stack_t *tcps;
3727 3727 int i;
3728 3728 int error = 0;
3729 3729 major_t major;
3730 3730 size_t arrsz;
3731 3731
3732 3732 tcps = (tcp_stack_t *)kmem_zalloc(sizeof (*tcps), KM_SLEEP);
3733 3733 tcps->tcps_netstack = ns;
3734 3734
3735 3735 /* Initialize locks */
3736 3736 mutex_init(&tcps->tcps_iss_key_lock, NULL, MUTEX_DEFAULT, NULL);
3737 3737 mutex_init(&tcps->tcps_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL);
3738 3738
3739 3739 tcps->tcps_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS;
3740 3740 tcps->tcps_g_epriv_ports[0] = ULP_DEF_EPRIV_PORT1;
3741 3741 tcps->tcps_g_epriv_ports[1] = ULP_DEF_EPRIV_PORT2;
3742 3742 tcps->tcps_min_anonpriv_port = 512;
3743 3743
3744 3744 tcps->tcps_bind_fanout = kmem_zalloc(sizeof (tf_t) *
3745 3745 TCP_BIND_FANOUT_SIZE, KM_SLEEP);
3746 3746 tcps->tcps_acceptor_fanout = kmem_zalloc(sizeof (tf_t) *
3747 3747 TCP_ACCEPTOR_FANOUT_SIZE, KM_SLEEP);
3748 3748
3749 3749 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
3750 3750 mutex_init(&tcps->tcps_bind_fanout[i].tf_lock, NULL,
3751 3751 MUTEX_DEFAULT, NULL);
3752 3752 }
3753 3753
3754 3754 for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
3755 3755 mutex_init(&tcps->tcps_acceptor_fanout[i].tf_lock, NULL,
3756 3756 MUTEX_DEFAULT, NULL);
3757 3757 }
3758 3758
3759 3759 /* TCP's IPsec code calls the packet dropper. */
3760 3760 ip_drop_register(&tcps->tcps_dropper, "TCP IPsec policy enforcement");
3761 3761
3762 3762 arrsz = tcp_propinfo_count * sizeof (mod_prop_info_t);
3763 3763 tcps->tcps_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz,
3764 3764 KM_SLEEP);
3765 3765 bcopy(tcp_propinfo_tbl, tcps->tcps_propinfo_tbl, arrsz);
3766 3766
3767 3767 /*
3768 3768 * Note: To really walk the device tree you need the devinfo
3769 3769 * pointer to your device which is only available after probe/attach.
3770 3770 * The following is safe only because it uses ddi_root_node()
3771 3771 */
3772 3772 tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr,
3773 3773 tcp_opt_obj.odb_opt_arr_cnt);
3774 3774
3775 3775 /*
3776 3776 * Initialize RFC 1948 secret values. This will probably be reset once
3777 3777 * by the boot scripts.
3778 3778 *
3779 3779 * Use NULL name, as the name is caught by the new lockstats.
3780 3780 *
3781 3781 * Initialize with some random, non-guessable string, like the global
3782 3782 * T_INFO_ACK.
3783 3783 */
3784 3784
3785 3785 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack,
3786 3786 sizeof (tcp_g_t_info_ack), tcps);
3787 3787
3788 3788 tcps->tcps_kstat = tcp_kstat2_init(stackid);
3789 3789 tcps->tcps_mibkp = tcp_kstat_init(stackid);
3790 3790
3791 3791 major = mod_name_to_major(INET_NAME);
3792 3792 error = ldi_ident_from_major(major, &tcps->tcps_ldi_ident);
3793 3793 ASSERT(error == 0);
3794 3794 tcps->tcps_ixa_cleanup_mp = allocb_wait(0, BPRI_MED, STR_NOSIG, NULL);
3795 3795 ASSERT(tcps->tcps_ixa_cleanup_mp != NULL);
3796 3796 cv_init(&tcps->tcps_ixa_cleanup_ready_cv, NULL, CV_DEFAULT, NULL);
3797 3797 cv_init(&tcps->tcps_ixa_cleanup_done_cv, NULL, CV_DEFAULT, NULL);
3798 3798 mutex_init(&tcps->tcps_ixa_cleanup_lock, NULL, MUTEX_DEFAULT, NULL);
3799 3799
3800 3800 mutex_init(&tcps->tcps_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
3801 3801 tcps->tcps_reclaim = B_FALSE;
3802 3802 tcps->tcps_reclaim_tid = 0;
3803 3803 tcps->tcps_reclaim_period = tcps->tcps_rexmit_interval_max;
3804 3804
3805 3805 /*
3806 3806 * ncpus is the current number of CPUs, which can be bigger than
3807 3807 * boot_ncpus. But we don't want to use ncpus to allocate all the
3808 3808 * tcp_stats_cpu_t at system boot up time since it will be 1. While
3809 3809 * we handle adding CPU in tcp_cpu_update(), it will be slow if
3810 3810 * there are many CPUs as we will be adding them 1 by 1.
3811 3811 *
3812 3812 * Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
3813 3813 * are not freed until the stack is going away. So there is no need
3814 3814 * to grab a lock to access the per CPU tcps_sc[x] pointer.
3815 3815 */
3816 3816 mutex_enter(&cpu_lock);
3817 3817 tcps->tcps_sc_cnt = MAX(ncpus, boot_ncpus);
3818 3818 mutex_exit(&cpu_lock);
3819 3819 tcps->tcps_sc = kmem_zalloc(max_ncpus * sizeof (tcp_stats_cpu_t *),
3820 3820 KM_SLEEP);
3821 3821 for (i = 0; i < tcps->tcps_sc_cnt; i++) {
3822 3822 tcps->tcps_sc[i] = kmem_zalloc(sizeof (tcp_stats_cpu_t),
3823 3823 KM_SLEEP);
3824 3824 }
3825 3825
3826 3826 mutex_init(&tcps->tcps_listener_conf_lock, NULL, MUTEX_DEFAULT, NULL);
3827 3827 list_create(&tcps->tcps_listener_conf, sizeof (tcp_listener_t),
3828 3828 offsetof(tcp_listener_t, tl_link));
3829 3829
3830 3830 tcps->tcps_default_cc_algo = cc_load_algo(CC_DEFAULT_ALGO_NAME);
3831 3831 VERIFY3P(tcps->tcps_default_cc_algo, !=, NULL);
3832 3832
3833 3833 return (tcps);
3834 3834 }
3835 3835
3836 3836 /*
3837 3837 * Called when the IP module is about to be unloaded.
3838 3838 */
3839 3839 void
3840 3840 tcp_ddi_g_destroy(void)
3841 3841 {
3842 3842 tcp_g_kstat_fini(tcp_g_kstat);
3843 3843 tcp_g_kstat = NULL;
3844 3844 bzero(&tcp_g_statistics, sizeof (tcp_g_statistics));
3845 3845
3846 3846 mutex_destroy(&tcp_random_lock);
3847 3847
3848 3848 kmem_cache_destroy(tcp_timercache);
3849 3849 kmem_cache_destroy(tcp_notsack_blk_cache);
3850 3850
3851 3851 netstack_unregister(NS_TCP);
3852 3852 }
3853 3853
3854 3854 /*
3855 3855 * Free the TCP stack instance.
3856 3856 */
3857 3857 static void
3858 3858 tcp_stack_fini(netstackid_t stackid, void *arg)
3859 3859 {
3860 3860 tcp_stack_t *tcps = (tcp_stack_t *)arg;
3861 3861 int i;
3862 3862
3863 3863 freeb(tcps->tcps_ixa_cleanup_mp);
3864 3864 tcps->tcps_ixa_cleanup_mp = NULL;
3865 3865 cv_destroy(&tcps->tcps_ixa_cleanup_ready_cv);
3866 3866 cv_destroy(&tcps->tcps_ixa_cleanup_done_cv);
3867 3867 mutex_destroy(&tcps->tcps_ixa_cleanup_lock);
3868 3868
3869 3869 /*
3870 3870 * Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
3871 3871 * the timer.
3872 3872 */
3873 3873 mutex_enter(&tcps->tcps_reclaim_lock);
3874 3874 tcps->tcps_reclaim = B_FALSE;
3875 3875 mutex_exit(&tcps->tcps_reclaim_lock);
3876 3876 if (tcps->tcps_reclaim_tid != 0)
3877 3877 (void) untimeout(tcps->tcps_reclaim_tid);
3878 3878 mutex_destroy(&tcps->tcps_reclaim_lock);
3879 3879
3880 3880 tcp_listener_conf_cleanup(tcps);
3881 3881
3882 3882 for (i = 0; i < tcps->tcps_sc_cnt; i++)
3883 3883 kmem_free(tcps->tcps_sc[i], sizeof (tcp_stats_cpu_t));
3884 3884 kmem_free(tcps->tcps_sc, max_ncpus * sizeof (tcp_stats_cpu_t *));
3885 3885
3886 3886 kmem_free(tcps->tcps_propinfo_tbl,
3887 3887 tcp_propinfo_count * sizeof (mod_prop_info_t));
3888 3888 tcps->tcps_propinfo_tbl = NULL;
3889 3889
3890 3890 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
3891 3891 ASSERT(tcps->tcps_bind_fanout[i].tf_tcp == NULL);
3892 3892 mutex_destroy(&tcps->tcps_bind_fanout[i].tf_lock);
3893 3893 }
3894 3894
3895 3895 for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
3896 3896 ASSERT(tcps->tcps_acceptor_fanout[i].tf_tcp == NULL);
3897 3897 mutex_destroy(&tcps->tcps_acceptor_fanout[i].tf_lock);
3898 3898 }
3899 3899
3900 3900 kmem_free(tcps->tcps_bind_fanout, sizeof (tf_t) * TCP_BIND_FANOUT_SIZE);
3901 3901 tcps->tcps_bind_fanout = NULL;
3902 3902
3903 3903 kmem_free(tcps->tcps_acceptor_fanout, sizeof (tf_t) *
3904 3904 TCP_ACCEPTOR_FANOUT_SIZE);
3905 3905 tcps->tcps_acceptor_fanout = NULL;
3906 3906
3907 3907 mutex_destroy(&tcps->tcps_iss_key_lock);
3908 3908 mutex_destroy(&tcps->tcps_epriv_port_lock);
3909 3909
3910 3910 ip_drop_unregister(&tcps->tcps_dropper);
3911 3911
3912 3912 tcp_kstat2_fini(stackid, tcps->tcps_kstat);
3913 3913 tcps->tcps_kstat = NULL;
3914 3914
3915 3915 tcp_kstat_fini(stackid, tcps->tcps_mibkp);
3916 3916 tcps->tcps_mibkp = NULL;
3917 3917
3918 3918 ldi_ident_release(tcps->tcps_ldi_ident);
3919 3919 kmem_free(tcps, sizeof (*tcps));
3920 3920 }
3921 3921
3922 3922 /*
3923 3923 * Generate ISS, taking into account NDD changes may happen halfway through.
3924 3924 * (If the iss is not zero, set it.)
3925 3925 */
3926 3926
3927 3927 static void
3928 3928 tcp_iss_init(tcp_t *tcp)
3929 3929 {
3930 3930 MD5_CTX context;
3931 3931 struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg;
3932 3932 uint32_t answer[4];
3933 3933 tcp_stack_t *tcps = tcp->tcp_tcps;
3934 3934 conn_t *connp = tcp->tcp_connp;
3935 3935
3936 3936 tcps->tcps_iss_incr_extra += (tcps->tcps_iss_incr >> 1);
3937 3937 tcp->tcp_iss = tcps->tcps_iss_incr_extra;
3938 3938 switch (tcps->tcps_strong_iss) {
3939 3939 case 2:
3940 3940 mutex_enter(&tcps->tcps_iss_key_lock);
3941 3941 context = tcps->tcps_iss_key;
3942 3942 mutex_exit(&tcps->tcps_iss_key_lock);
3943 3943 arg.ports = connp->conn_ports;
3944 3944 arg.src = connp->conn_laddr_v6;
3945 3945 arg.dst = connp->conn_faddr_v6;
3946 3946 MD5Update(&context, (uchar_t *)&arg, sizeof (arg));
3947 3947 MD5Final((uchar_t *)answer, &context);
3948 3948 tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3];
3949 3949 /*
3950 3950 * Now that we've hashed into a unique per-connection sequence
3951 3951 * space, add a random increment per strong_iss == 1. So I
3952 3952 * guess we'll have to...
3953 3953 */
3954 3954 /* FALLTHRU */
3955 3955 case 1:
3956 3956 tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random();
3957 3957 break;
3958 3958 default:
3959 3959 tcp->tcp_iss += (uint32_t)gethrestime_sec() *
3960 3960 tcps->tcps_iss_incr;
3961 3961 break;
3962 3962 }
3963 3963 tcp->tcp_valid_bits = TCP_ISS_VALID;
3964 3964 tcp->tcp_fss = tcp->tcp_iss - 1;
3965 3965 tcp->tcp_suna = tcp->tcp_iss;
3966 3966 tcp->tcp_snxt = tcp->tcp_iss + 1;
3967 3967 tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
3968 3968 tcp->tcp_csuna = tcp->tcp_snxt;
3969 3969 }
3970 3970
3971 3971 /*
3972 3972 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
3973 3973 * on the specified backing STREAMS q. Note, the caller may make the
3974 3974 * decision to call based on the tcp_t.tcp_flow_stopped value which
3975 3975 * when check outside the q's lock is only an advisory check ...
3976 3976 */
3977 3977 void
3978 3978 tcp_setqfull(tcp_t *tcp)
3979 3979 {
3980 3980 tcp_stack_t *tcps = tcp->tcp_tcps;
3981 3981 conn_t *connp = tcp->tcp_connp;
3982 3982
3983 3983 if (tcp->tcp_closed)
3984 3984 return;
3985 3985
3986 3986 conn_setqfull(connp, &tcp->tcp_flow_stopped);
3987 3987 if (tcp->tcp_flow_stopped)
3988 3988 TCP_STAT(tcps, tcp_flwctl_on);
3989 3989 }
3990 3990
3991 3991 void
3992 3992 tcp_clrqfull(tcp_t *tcp)
3993 3993 {
3994 3994 conn_t *connp = tcp->tcp_connp;
3995 3995
3996 3996 if (tcp->tcp_closed)
3997 3997 return;
3998 3998 conn_clrqfull(connp, &tcp->tcp_flow_stopped);
3999 3999 }
4000 4000
4001 4001 static int
4002 4002 tcp_squeue_switch(int val)
4003 4003 {
4004 4004 int rval = SQ_FILL;
4005 4005
4006 4006 switch (val) {
4007 4007 case 1:
4008 4008 rval = SQ_NODRAIN;
4009 4009 break;
4010 4010 case 2:
4011 4011 rval = SQ_PROCESS;
4012 4012 break;
4013 4013 default:
4014 4014 break;
4015 4015 }
4016 4016 return (rval);
4017 4017 }
4018 4018
4019 4019 /*
4020 4020 * This is called once for each squeue - globally for all stack
4021 4021 * instances.
4022 4022 */
4023 4023 static void
4024 4024 tcp_squeue_add(squeue_t *sqp)
4025 4025 {
4026 4026 tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc(
4027 4027 sizeof (tcp_squeue_priv_t), KM_SLEEP);
4028 4028
4029 4029 *squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait;
4030 4030 if (tcp_free_list_max_cnt == 0) {
4031 4031 int tcp_ncpus = ((boot_max_ncpus == -1) ?
4032 4032 max_ncpus : boot_max_ncpus);
4033 4033
4034 4034 /*
4035 4035 * Limit number of entries to 1% of availble memory / tcp_ncpus
4036 4036 */
4037 4037 tcp_free_list_max_cnt = (freemem * PAGESIZE) /
4038 4038 (tcp_ncpus * sizeof (tcp_t) * 100);
4039 4039 }
4040 4040 tcp_time_wait->tcp_free_list_cnt = 0;
4041 4041 }
4042 4042 /*
4043 4043 * Return unix error is tli error is TSYSERR, otherwise return a negative
4044 4044 * tli error.
4045 4045 */
4046 4046 int
4047 4047 tcp_do_bind(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr,
4048 4048 boolean_t bind_to_req_port_only)
4049 4049 {
4050 4050 int error;
4051 4051 tcp_t *tcp = connp->conn_tcp;
4052 4052
4053 4053 if (tcp->tcp_state >= TCPS_BOUND) {
4054 4054 if (connp->conn_debug) {
4055 4055 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
4056 4056 "tcp_bind: bad state, %d", tcp->tcp_state);
4057 4057 }
4058 4058 return (-TOUTSTATE);
4059 4059 }
4060 4060
4061 4061 error = tcp_bind_check(connp, sa, len, cr, bind_to_req_port_only);
4062 4062 if (error != 0)
4063 4063 return (error);
4064 4064
4065 4065 ASSERT(tcp->tcp_state == TCPS_BOUND);
4066 4066 tcp->tcp_conn_req_max = 0;
4067 4067 return (0);
4068 4068 }
4069 4069
4070 4070 /*
4071 4071 * If the return value from this function is positive, it's a UNIX error.
4072 4072 * Otherwise, if it's negative, then the absolute value is a TLI error.
4073 4073 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
4074 4074 */
4075 4075 int
4076 4076 tcp_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len,
4077 4077 cred_t *cr, pid_t pid)
4078 4078 {
4079 4079 tcp_t *tcp = connp->conn_tcp;
4080 4080 sin_t *sin = (sin_t *)sa;
4081 4081 sin6_t *sin6 = (sin6_t *)sa;
4082 4082 ipaddr_t *dstaddrp;
4083 4083 in_port_t dstport;
4084 4084 uint_t srcid;
4085 4085 int error;
4086 4086 uint32_t mss;
4087 4087 mblk_t *syn_mp;
4088 4088 tcp_stack_t *tcps = tcp->tcp_tcps;
4089 4089 int32_t oldstate;
4090 4090 ip_xmit_attr_t *ixa = connp->conn_ixa;
4091 4091
4092 4092 oldstate = tcp->tcp_state;
4093 4093
4094 4094 switch (len) {
4095 4095 default:
4096 4096 /*
4097 4097 * Should never happen
4098 4098 */
4099 4099 return (EINVAL);
4100 4100
4101 4101 case sizeof (sin_t):
4102 4102 sin = (sin_t *)sa;
4103 4103 if (sin->sin_port == 0) {
4104 4104 return (-TBADADDR);
4105 4105 }
4106 4106 if (connp->conn_ipv6_v6only) {
4107 4107 return (EAFNOSUPPORT);
4108 4108 }
4109 4109 break;
4110 4110
4111 4111 case sizeof (sin6_t):
4112 4112 sin6 = (sin6_t *)sa;
4113 4113 if (sin6->sin6_port == 0) {
4114 4114 return (-TBADADDR);
4115 4115 }
4116 4116 break;
4117 4117 }
4118 4118 /*
4119 4119 * If we're connecting to an IPv4-mapped IPv6 address, we need to
4120 4120 * make sure that the conn_ipversion is IPV4_VERSION. We
4121 4121 * need to this before we call tcp_bindi() so that the port lookup
4122 4122 * code will look for ports in the correct port space (IPv4 and
4123 4123 * IPv6 have separate port spaces).
4124 4124 */
4125 4125 if (connp->conn_family == AF_INET6 &&
4126 4126 connp->conn_ipversion == IPV6_VERSION &&
4127 4127 IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
4128 4128 if (connp->conn_ipv6_v6only)
4129 4129 return (EADDRNOTAVAIL);
4130 4130
4131 4131 connp->conn_ipversion = IPV4_VERSION;
4132 4132 }
4133 4133
4134 4134 switch (tcp->tcp_state) {
4135 4135 case TCPS_LISTEN:
4136 4136 /*
4137 4137 * Listening sockets are not allowed to issue connect().
4138 4138 */
4139 4139 if (IPCL_IS_NONSTR(connp))
4140 4140 return (EOPNOTSUPP);
4141 4141 /* FALLTHRU */
4142 4142 case TCPS_IDLE:
4143 4143 /*
4144 4144 * We support quick connect, refer to comments in
4145 4145 * tcp_connect_*()
4146 4146 */
4147 4147 /* FALLTHRU */
4148 4148 case TCPS_BOUND:
4149 4149 break;
4150 4150 default:
4151 4151 return (-TOUTSTATE);
4152 4152 }
4153 4153
4154 4154 /*
4155 4155 * We update our cred/cpid based on the caller of connect
4156 4156 */
4157 4157 if (connp->conn_cred != cr) {
4158 4158 crhold(cr);
4159 4159 crfree(connp->conn_cred);
4160 4160 connp->conn_cred = cr;
4161 4161 }
4162 4162 connp->conn_cpid = pid;
4163 4163
4164 4164 /* Cache things in the ixa without any refhold */
4165 4165 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED));
4166 4166 ixa->ixa_cred = cr;
4167 4167 ixa->ixa_cpid = pid;
4168 4168 if (is_system_labeled()) {
4169 4169 /* We need to restart with a label based on the cred */
4170 4170 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred);
4171 4171 }
4172 4172
4173 4173 if (connp->conn_family == AF_INET6) {
4174 4174 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
4175 4175 error = tcp_connect_ipv6(tcp, &sin6->sin6_addr,
4176 4176 sin6->sin6_port, sin6->sin6_flowinfo,
4177 4177 sin6->__sin6_src_id, sin6->sin6_scope_id);
4178 4178 } else {
4179 4179 /*
4180 4180 * Destination adress is mapped IPv6 address.
4181 4181 * Source bound address should be unspecified or
4182 4182 * IPv6 mapped address as well.
4183 4183 */
4184 4184 if (!IN6_IS_ADDR_UNSPECIFIED(
4185 4185 &connp->conn_bound_addr_v6) &&
4186 4186 !IN6_IS_ADDR_V4MAPPED(&connp->conn_bound_addr_v6)) {
4187 4187 return (EADDRNOTAVAIL);
4188 4188 }
4189 4189 dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr));
4190 4190 dstport = sin6->sin6_port;
4191 4191 srcid = sin6->__sin6_src_id;
4192 4192 error = tcp_connect_ipv4(tcp, dstaddrp, dstport,
4193 4193 srcid);
4194 4194 }
4195 4195 } else {
4196 4196 dstaddrp = &sin->sin_addr.s_addr;
4197 4197 dstport = sin->sin_port;
4198 4198 srcid = 0;
4199 4199 error = tcp_connect_ipv4(tcp, dstaddrp, dstport, srcid);
4200 4200 }
4201 4201
4202 4202 if (error != 0)
4203 4203 goto connect_failed;
4204 4204
4205 4205 CL_INET_CONNECT(connp, B_TRUE, error);
4206 4206 if (error != 0)
4207 4207 goto connect_failed;
4208 4208
4209 4209 /* connect succeeded */
4210 4210 TCPS_BUMP_MIB(tcps, tcpActiveOpens);
4211 4211 tcp->tcp_active_open = 1;
4212 4212
4213 4213 /*
4214 4214 * tcp_set_destination() does not adjust for TCP/IP header length.
4215 4215 */
4216 4216 mss = tcp->tcp_mss - connp->conn_ht_iphc_len;
4217 4217
4218 4218 /*
4219 4219 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
4220 4220 * to the nearest MSS.
4221 4221 *
4222 4222 * We do the round up here because we need to get the interface MTU
4223 4223 * first before we can do the round up.
4224 4224 */
4225 4225 tcp->tcp_rwnd = connp->conn_rcvbuf;
4226 4226 tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss),
4227 4227 tcps->tcps_recv_hiwat_minmss * mss);
4228 4228 connp->conn_rcvbuf = tcp->tcp_rwnd;
4229 4229 tcp_set_ws_value(tcp);
4230 4230 tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
4231 4231 if (tcp->tcp_rcv_ws > 0 || tcps->tcps_wscale_always)
4232 4232 tcp->tcp_snd_ws_ok = B_TRUE;
4233 4233
4234 4234 /*
4235 4235 * Set tcp_snd_ts_ok to true
4236 4236 * so that tcp_xmit_mp will
4237 4237 * include the timestamp
4238 4238 * option in the SYN segment.
4239 4239 */
4240 4240 if (tcps->tcps_tstamp_always ||
4241 4241 (tcp->tcp_rcv_ws && tcps->tcps_tstamp_if_wscale)) {
4242 4242 tcp->tcp_snd_ts_ok = B_TRUE;
4243 4243 }
4244 4244
4245 4245 /*
4246 4246 * Note that tcp_snd_sack_ok can be set in tcp_set_destination() if
4247 4247 * the SACK metric is set. So here we just check the per stack SACK
4248 4248 * permitted param.
4249 4249 */
4250 4250 if (tcps->tcps_sack_permitted == 2) {
4251 4251 ASSERT(tcp->tcp_num_sack_blk == 0);
4252 4252 ASSERT(tcp->tcp_notsack_list == NULL);
4253 4253 tcp->tcp_snd_sack_ok = B_TRUE;
4254 4254 }
4255 4255
4256 4256 /*
4257 4257 * Should we use ECN? Note that the current
4258 4258 * default value (SunOS 5.9) of tcp_ecn_permitted
4259 4259 * is 1. The reason for doing this is that there
4260 4260 * are equipments out there that will drop ECN
4261 4261 * enabled IP packets. Setting it to 1 avoids
4262 4262 * compatibility problems.
4263 4263 */
4264 4264 if (tcps->tcps_ecn_permitted == 2)
4265 4265 tcp->tcp_ecn_ok = B_TRUE;
4266 4266
4267 4267 /* Trace change from BOUND -> SYN_SENT here */
4268 4268 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4269 4269 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
4270 4270 int32_t, TCPS_BOUND);
4271 4271
4272 4272 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
4273 4273 syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
4274 4274 tcp->tcp_iss, B_FALSE, NULL, B_FALSE);
4275 4275 if (syn_mp != NULL) {
4276 4276 /*
4277 4277 * We must bump the generation before sending the syn
4278 4278 * to ensure that we use the right generation in case
4279 4279 * this thread issues a "connected" up call.
4280 4280 */
4281 4281 SOCK_CONNID_BUMP(tcp->tcp_connid);
4282 4282 /*
4283 4283 * DTrace sending the first SYN as a
4284 4284 * tcp:::connect-request event.
4285 4285 */
4286 4286 DTRACE_TCP5(connect__request, mblk_t *, NULL,
4287 4287 ip_xmit_attr_t *, connp->conn_ixa,
4288 4288 void_ip_t *, syn_mp->b_rptr, tcp_t *, tcp,
4289 4289 tcph_t *,
4290 4290 &syn_mp->b_rptr[connp->conn_ixa->ixa_ip_hdr_length]);
4291 4291 tcp_send_data(tcp, syn_mp);
4292 4292 }
4293 4293
4294 4294 if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
4295 4295 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
4296 4296 return (0);
4297 4297
4298 4298 connect_failed:
4299 4299 connp->conn_faddr_v6 = ipv6_all_zeros;
4300 4300 connp->conn_fport = 0;
4301 4301 tcp->tcp_state = oldstate;
4302 4302 if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
4303 4303 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
4304 4304 return (error);
4305 4305 }
4306 4306
4307 4307 int
4308 4308 tcp_do_listen(conn_t *connp, struct sockaddr *sa, socklen_t len,
4309 4309 int backlog, cred_t *cr, boolean_t bind_to_req_port_only)
4310 4310 {
4311 4311 tcp_t *tcp = connp->conn_tcp;
4312 4312 int error = 0;
4313 4313 tcp_stack_t *tcps = tcp->tcp_tcps;
4314 4314 int32_t oldstate;
4315 4315
4316 4316 /* All Solaris components should pass a cred for this operation. */
4317 4317 ASSERT(cr != NULL);
4318 4318
4319 4319 if (tcp->tcp_state >= TCPS_BOUND) {
4320 4320 if ((tcp->tcp_state == TCPS_BOUND ||
4321 4321 tcp->tcp_state == TCPS_LISTEN) && backlog > 0) {
4322 4322 /*
4323 4323 * Handle listen() increasing backlog.
4324 4324 * This is more "liberal" then what the TPI spec
4325 4325 * requires but is needed to avoid a t_unbind
4326 4326 * when handling listen() since the port number
↓ open down ↓ |
4292 lines elided |
↑ open up ↑ |
4327 4327 * might be "stolen" between the unbind and bind.
4328 4328 */
4329 4329 goto do_listen;
4330 4330 }
4331 4331 if (connp->conn_debug) {
4332 4332 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
4333 4333 "tcp_listen: bad state, %d", tcp->tcp_state);
4334 4334 }
4335 4335 return (-TOUTSTATE);
4336 4336 } else {
4337 - if (sa == NULL) {
4338 - sin6_t addr;
4339 - sin_t *sin;
4340 - sin6_t *sin6;
4337 + sin6_t addr;
4338 + sin_t *sin;
4339 + sin6_t *sin6;
4341 4340
4341 + if (sa == NULL) {
4342 4342 ASSERT(IPCL_IS_NONSTR(connp));
4343 4343 /* Do an implicit bind: Request for a generic port. */
4344 4344 if (connp->conn_family == AF_INET) {
4345 4345 len = sizeof (sin_t);
4346 4346 sin = (sin_t *)&addr;
4347 4347 *sin = sin_null;
4348 4348 sin->sin_family = AF_INET;
4349 4349 } else {
4350 4350 ASSERT(connp->conn_family == AF_INET6);
4351 4351 len = sizeof (sin6_t);
4352 4352 sin6 = (sin6_t *)&addr;
4353 4353 *sin6 = sin6_null;
4354 4354 sin6->sin6_family = AF_INET6;
4355 4355 }
4356 4356 sa = (struct sockaddr *)&addr;
4357 4357 }
4358 4358
4359 4359 error = tcp_bind_check(connp, sa, len, cr,
4360 4360 bind_to_req_port_only);
4361 4361 if (error)
4362 4362 return (error);
4363 4363 /* Fall through and do the fanout insertion */
4364 4364 }
4365 4365
4366 4366 do_listen:
4367 4367 ASSERT(tcp->tcp_state == TCPS_BOUND || tcp->tcp_state == TCPS_LISTEN);
4368 4368 tcp->tcp_conn_req_max = backlog;
4369 4369 if (tcp->tcp_conn_req_max) {
4370 4370 if (tcp->tcp_conn_req_max < tcps->tcps_conn_req_min)
4371 4371 tcp->tcp_conn_req_max = tcps->tcps_conn_req_min;
4372 4372 if (tcp->tcp_conn_req_max > tcps->tcps_conn_req_max_q)
4373 4373 tcp->tcp_conn_req_max = tcps->tcps_conn_req_max_q;
4374 4374 /*
4375 4375 * If this is a listener, do not reset the eager list
4376 4376 * and other stuffs. Note that we don't check if the
4377 4377 * existing eager list meets the new tcp_conn_req_max
4378 4378 * requirement.
4379 4379 */
4380 4380 if (tcp->tcp_state != TCPS_LISTEN) {
4381 4381 tcp->tcp_state = TCPS_LISTEN;
4382 4382 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4383 4383 connp->conn_ixa, void, NULL, tcp_t *, tcp,
4384 4384 void, NULL, int32_t, TCPS_BOUND);
4385 4385 /* Initialize the chain. Don't need the eager_lock */
4386 4386 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
4387 4387 tcp->tcp_eager_next_drop_q0 = tcp;
4388 4388 tcp->tcp_eager_prev_drop_q0 = tcp;
4389 4389 tcp->tcp_second_ctimer_threshold =
4390 4390 tcps->tcps_ip_abort_linterval;
4391 4391 }
4392 4392 }
4393 4393
4394 4394 /*
4395 4395 * We need to make sure that the conn_recv is set to a non-null
4396 4396 * value before we insert the conn into the classifier table.
4397 4397 * This is to avoid a race with an incoming packet which does an
4398 4398 * ipcl_classify().
4399 4399 * We initially set it to tcp_input_listener_unbound to try to
4400 4400 * pick a good squeue for the listener when the first SYN arrives.
4401 4401 * tcp_input_listener_unbound sets it to tcp_input_listener on that
4402 4402 * first SYN.
4403 4403 */
4404 4404 connp->conn_recv = tcp_input_listener_unbound;
4405 4405
4406 4406 /* Insert the listener in the classifier table */
4407 4407 error = ip_laddr_fanout_insert(connp);
4408 4408 if (error != 0) {
4409 4409 /* Undo the bind - release the port number */
4410 4410 oldstate = tcp->tcp_state;
4411 4411 tcp->tcp_state = TCPS_IDLE;
4412 4412 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4413 4413 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
4414 4414 int32_t, oldstate);
4415 4415 connp->conn_bound_addr_v6 = ipv6_all_zeros;
4416 4416
4417 4417 connp->conn_laddr_v6 = ipv6_all_zeros;
4418 4418 connp->conn_saddr_v6 = ipv6_all_zeros;
4419 4419 connp->conn_ports = 0;
4420 4420
4421 4421 if (connp->conn_anon_port) {
4422 4422 zone_t *zone;
4423 4423
4424 4424 zone = crgetzone(cr);
4425 4425 connp->conn_anon_port = B_FALSE;
4426 4426 (void) tsol_mlp_anon(zone, connp->conn_mlp_type,
4427 4427 connp->conn_proto, connp->conn_lport, B_FALSE);
4428 4428 }
4429 4429 connp->conn_mlp_type = mlptSingle;
4430 4430
4431 4431 tcp_bind_hash_remove(tcp);
4432 4432 return (error);
4433 4433 } else {
4434 4434 /*
4435 4435 * If there is a connection limit, allocate and initialize
4436 4436 * the counter struct. Note that since listen can be called
4437 4437 * multiple times, the struct may have been allready allocated.
4438 4438 */
4439 4439 if (!list_is_empty(&tcps->tcps_listener_conf) &&
4440 4440 tcp->tcp_listen_cnt == NULL) {
4441 4441 tcp_listen_cnt_t *tlc;
4442 4442 uint32_t ratio;
4443 4443
4444 4444 ratio = tcp_find_listener_conf(tcps,
4445 4445 ntohs(connp->conn_lport));
4446 4446 if (ratio != 0) {
4447 4447 uint32_t mem_ratio, tot_buf;
4448 4448
4449 4449 tlc = kmem_alloc(sizeof (tcp_listen_cnt_t),
4450 4450 KM_SLEEP);
4451 4451 /*
4452 4452 * Calculate the connection limit based on
4453 4453 * the configured ratio and maxusers. Maxusers
4454 4454 * are calculated based on memory size,
4455 4455 * ~ 1 user per MB. Note that the conn_rcvbuf
4456 4456 * and conn_sndbuf may change after a
4457 4457 * connection is accepted. So what we have
4458 4458 * is only an approximation.
4459 4459 */
4460 4460 if ((tot_buf = connp->conn_rcvbuf +
4461 4461 connp->conn_sndbuf) < MB) {
4462 4462 mem_ratio = MB / tot_buf;
4463 4463 tlc->tlc_max = maxusers / ratio *
4464 4464 mem_ratio;
4465 4465 } else {
4466 4466 mem_ratio = tot_buf / MB;
4467 4467 tlc->tlc_max = maxusers / ratio /
4468 4468 mem_ratio;
4469 4469 }
4470 4470 /* At least we should allow two connections! */
4471 4471 if (tlc->tlc_max <= tcp_min_conn_listener)
4472 4472 tlc->tlc_max = tcp_min_conn_listener;
4473 4473 tlc->tlc_cnt = 1;
4474 4474 tlc->tlc_drop = 0;
4475 4475 tcp->tcp_listen_cnt = tlc;
4476 4476 }
4477 4477 }
4478 4478 }
4479 4479 return (error);
4480 4480 }
↓ open down ↓ |
129 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX