1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/tihdr.h> 28 #include <sys/policy.h> 29 #include <sys/tsol/tnet.h> 30 31 #include <inet/common.h> 32 #include <inet/ip.h> 33 #include <inet/tcp.h> 34 #include <inet/tcp_impl.h> 35 #include <inet/tcp_stats.h> 36 #include <inet/kstatcom.h> 37 #include <inet/snmpcom.h> 38 39 static int tcp_snmp_state(tcp_t *); 40 static int tcp_kstat_update(kstat_t *, int); 41 static int tcp_kstat2_update(kstat_t *, int); 42 static void tcp_sum_mib(tcp_stack_t *, mib2_tcp_t *); 43 44 static void tcp_add_mib(mib2_tcp_t *, mib2_tcp_t *); 45 static void tcp_add_stats(tcp_stat_counter_t *, tcp_stat_t *); 46 static void tcp_clr_stats(tcp_stat_t *); 47 48 tcp_g_stat_t tcp_g_statistics; 49 kstat_t *tcp_g_kstat; 50 51 /* Translate TCP state to MIB2 TCP state. */ 52 static int 53 tcp_snmp_state(tcp_t *tcp) 54 { 55 if (tcp == NULL) 56 return (0); 57 58 switch (tcp->tcp_state) { 59 case TCPS_CLOSED: 60 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */ 61 case TCPS_BOUND: 62 return (MIB2_TCP_closed); 63 case TCPS_LISTEN: 64 return (MIB2_TCP_listen); 65 case TCPS_SYN_SENT: 66 return (MIB2_TCP_synSent); 67 case TCPS_SYN_RCVD: 68 return (MIB2_TCP_synReceived); 69 case TCPS_ESTABLISHED: 70 return (MIB2_TCP_established); 71 case TCPS_CLOSE_WAIT: 72 return (MIB2_TCP_closeWait); 73 case TCPS_FIN_WAIT_1: 74 return (MIB2_TCP_finWait1); 75 case TCPS_CLOSING: 76 return (MIB2_TCP_closing); 77 case TCPS_LAST_ACK: 78 return (MIB2_TCP_lastAck); 79 case TCPS_FIN_WAIT_2: 80 return (MIB2_TCP_finWait2); 81 case TCPS_TIME_WAIT: 82 return (MIB2_TCP_timeWait); 83 default: 84 return (0); 85 } 86 } 87 88 /* 89 * Return SNMP stuff in buffer in mpdata. 90 */ 91 mblk_t * 92 tcp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req) 93 { 94 mblk_t *mpdata; 95 mblk_t *mp_conn_ctl = NULL; 96 mblk_t *mp_conn_tail; 97 mblk_t *mp_attr_ctl = NULL; 98 mblk_t *mp_attr_tail; 99 mblk_t *mp6_conn_ctl = NULL; 100 mblk_t *mp6_conn_tail; 101 mblk_t *mp6_attr_ctl = NULL; 102 mblk_t *mp6_attr_tail; 103 struct opthdr *optp; 104 mib2_tcpConnEntry_t tce; 105 mib2_tcp6ConnEntry_t tce6; 106 mib2_transportMLPEntry_t mlp; 107 connf_t *connfp; 108 int i; 109 boolean_t ispriv; 110 zoneid_t zoneid; 111 int v4_conn_idx; 112 int v6_conn_idx; 113 conn_t *connp = Q_TO_CONN(q); 114 tcp_stack_t *tcps; 115 ip_stack_t *ipst; 116 mblk_t *mp2ctl; 117 mib2_tcp_t tcp_mib; 118 size_t tcp_mib_size, tce_size, tce6_size; 119 120 /* 121 * make a copy of the original message 122 */ 123 mp2ctl = copymsg(mpctl); 124 125 if (mpctl == NULL || 126 (mpdata = mpctl->b_cont) == NULL || 127 (mp_conn_ctl = copymsg(mpctl)) == NULL || 128 (mp_attr_ctl = copymsg(mpctl)) == NULL || 129 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 130 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 131 freemsg(mp_conn_ctl); 132 freemsg(mp_attr_ctl); 133 freemsg(mp6_conn_ctl); 134 freemsg(mp6_attr_ctl); 135 freemsg(mpctl); 136 freemsg(mp2ctl); 137 return (NULL); 138 } 139 140 ipst = connp->conn_netstack->netstack_ip; 141 tcps = connp->conn_netstack->netstack_tcp; 142 143 if (legacy_req) { 144 tcp_mib_size = LEGACY_MIB_SIZE(&tcp_mib, mib2_tcp_t); 145 tce_size = LEGACY_MIB_SIZE(&tce, mib2_tcpConnEntry_t); 146 tce6_size = LEGACY_MIB_SIZE(&tce6, mib2_tcp6ConnEntry_t); 147 } else { 148 tcp_mib_size = sizeof (mib2_tcp_t); 149 tce_size = sizeof (mib2_tcpConnEntry_t); 150 tce6_size = sizeof (mib2_tcp6ConnEntry_t); 151 } 152 153 bzero(&tcp_mib, sizeof (tcp_mib)); 154 155 /* build table of connections -- need count in fixed part */ 156 SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */ 157 SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min); 158 SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max); 159 SET_MIB(tcp_mib.tcpMaxConn, -1); 160 SET_MIB(tcp_mib.tcpCurrEstab, 0); 161 162 ispriv = 163 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; 164 zoneid = Q_TO_CONN(q)->conn_zoneid; 165 166 v4_conn_idx = v6_conn_idx = 0; 167 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 168 169 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 170 ipst = tcps->tcps_netstack->netstack_ip; 171 172 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 173 174 connp = NULL; 175 176 while ((connp = 177 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) { 178 tcp_t *tcp; 179 boolean_t needattr; 180 181 if (connp->conn_zoneid != zoneid) 182 continue; /* not in this zone */ 183 184 tcp = connp->conn_tcp; 185 TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs); 186 tcp->tcp_ibsegs = 0; 187 TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs); 188 tcp->tcp_obsegs = 0; 189 190 tce6.tcp6ConnState = tce.tcpConnState = 191 tcp_snmp_state(tcp); 192 if (tce.tcpConnState == MIB2_TCP_established || 193 tce.tcpConnState == MIB2_TCP_closeWait) 194 BUMP_MIB(&tcp_mib, tcpCurrEstab); 195 196 needattr = B_FALSE; 197 bzero(&mlp, sizeof (mlp)); 198 if (connp->conn_mlp_type != mlptSingle) { 199 if (connp->conn_mlp_type == mlptShared || 200 connp->conn_mlp_type == mlptBoth) 201 mlp.tme_flags |= MIB2_TMEF_SHARED; 202 if (connp->conn_mlp_type == mlptPrivate || 203 connp->conn_mlp_type == mlptBoth) 204 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 205 needattr = B_TRUE; 206 } 207 if (connp->conn_anon_mlp) { 208 mlp.tme_flags |= MIB2_TMEF_ANONMLP; 209 needattr = B_TRUE; 210 } 211 switch (connp->conn_mac_mode) { 212 case CONN_MAC_DEFAULT: 213 break; 214 case CONN_MAC_AWARE: 215 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT; 216 needattr = B_TRUE; 217 break; 218 case CONN_MAC_IMPLICIT: 219 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT; 220 needattr = B_TRUE; 221 break; 222 } 223 if (connp->conn_ixa->ixa_tsl != NULL) { 224 ts_label_t *tsl; 225 226 tsl = connp->conn_ixa->ixa_tsl; 227 mlp.tme_flags |= MIB2_TMEF_IS_LABELED; 228 mlp.tme_doi = label2doi(tsl); 229 mlp.tme_label = *label2bslabel(tsl); 230 needattr = B_TRUE; 231 } 232 233 /* Create a message to report on IPv6 entries */ 234 if (connp->conn_ipversion == IPV6_VERSION) { 235 tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6; 236 tce6.tcp6ConnRemAddress = connp->conn_faddr_v6; 237 tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport); 238 tce6.tcp6ConnRemPort = ntohs(connp->conn_fport); 239 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) { 240 tce6.tcp6ConnIfIndex = 241 connp->conn_ixa->ixa_scopeid; 242 } else { 243 tce6.tcp6ConnIfIndex = connp->conn_bound_if; 244 } 245 /* Don't want just anybody seeing these... */ 246 if (ispriv) { 247 tce6.tcp6ConnEntryInfo.ce_snxt = 248 tcp->tcp_snxt; 249 tce6.tcp6ConnEntryInfo.ce_suna = 250 tcp->tcp_suna; 251 tce6.tcp6ConnEntryInfo.ce_rnxt = 252 tcp->tcp_rnxt; 253 tce6.tcp6ConnEntryInfo.ce_rack = 254 tcp->tcp_rack; 255 } else { 256 /* 257 * Netstat, unfortunately, uses this to 258 * get send/receive queue sizes. How to fix? 259 * Why not compute the difference only? 260 */ 261 tce6.tcp6ConnEntryInfo.ce_snxt = 262 tcp->tcp_snxt - tcp->tcp_suna; 263 tce6.tcp6ConnEntryInfo.ce_suna = 0; 264 tce6.tcp6ConnEntryInfo.ce_rnxt = 265 tcp->tcp_rnxt - tcp->tcp_rack; 266 tce6.tcp6ConnEntryInfo.ce_rack = 0; 267 } 268 269 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; 270 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 271 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; 272 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; 273 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; 274 275 tce6.tcp6ConnCreationProcess = 276 (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 277 connp->conn_cpid; 278 tce6.tcp6ConnCreationTime = connp->conn_open_time; 279 280 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 281 &mp6_conn_tail, (char *)&tce6, tce6_size); 282 283 mlp.tme_connidx = v6_conn_idx++; 284 if (needattr) 285 (void) snmp_append_data2(mp6_attr_ctl->b_cont, 286 &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); 287 } 288 /* 289 * Create an IPv4 table entry for IPv4 entries and also 290 * for IPv6 entries which are bound to in6addr_any 291 * but don't have IPV6_V6ONLY set. 292 * (i.e. anything an IPv4 peer could connect to) 293 */ 294 if (connp->conn_ipversion == IPV4_VERSION || 295 (tcp->tcp_state <= TCPS_LISTEN && 296 !connp->conn_ipv6_v6only && 297 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) { 298 if (connp->conn_ipversion == IPV6_VERSION) { 299 tce.tcpConnRemAddress = INADDR_ANY; 300 tce.tcpConnLocalAddress = INADDR_ANY; 301 } else { 302 tce.tcpConnRemAddress = 303 connp->conn_faddr_v4; 304 tce.tcpConnLocalAddress = 305 connp->conn_laddr_v4; 306 } 307 tce.tcpConnLocalPort = ntohs(connp->conn_lport); 308 tce.tcpConnRemPort = ntohs(connp->conn_fport); 309 /* Don't want just anybody seeing these... */ 310 if (ispriv) { 311 tce.tcpConnEntryInfo.ce_snxt = 312 tcp->tcp_snxt; 313 tce.tcpConnEntryInfo.ce_suna = 314 tcp->tcp_suna; 315 tce.tcpConnEntryInfo.ce_rnxt = 316 tcp->tcp_rnxt; 317 tce.tcpConnEntryInfo.ce_rack = 318 tcp->tcp_rack; 319 } else { 320 /* 321 * Netstat, unfortunately, uses this to 322 * get send/receive queue sizes. How 323 * to fix? 324 * Why not compute the difference only? 325 */ 326 tce.tcpConnEntryInfo.ce_snxt = 327 tcp->tcp_snxt - tcp->tcp_suna; 328 tce.tcpConnEntryInfo.ce_suna = 0; 329 tce.tcpConnEntryInfo.ce_rnxt = 330 tcp->tcp_rnxt - tcp->tcp_rack; 331 tce.tcpConnEntryInfo.ce_rack = 0; 332 } 333 334 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; 335 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 336 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; 337 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; 338 tce.tcpConnEntryInfo.ce_state = 339 tcp->tcp_state; 340 341 tce.tcpConnCreationProcess = 342 (connp->conn_cpid < 0) ? 343 MIB2_UNKNOWN_PROCESS : 344 connp->conn_cpid; 345 tce.tcpConnCreationTime = connp->conn_open_time; 346 347 (void) snmp_append_data2(mp_conn_ctl->b_cont, 348 &mp_conn_tail, (char *)&tce, tce_size); 349 350 mlp.tme_connidx = v4_conn_idx++; 351 if (needattr) 352 (void) snmp_append_data2( 353 mp_attr_ctl->b_cont, 354 &mp_attr_tail, (char *)&mlp, 355 sizeof (mlp)); 356 } 357 } 358 } 359 360 tcp_sum_mib(tcps, &tcp_mib); 361 362 /* Fixed length structure for IPv4 and IPv6 counters */ 363 SET_MIB(tcp_mib.tcpConnTableSize, tce_size); 364 SET_MIB(tcp_mib.tcp6ConnTableSize, tce6_size); 365 366 /* 367 * Synchronize 32- and 64-bit counters. Note that tcpInSegs and 368 * tcpOutSegs are not updated anywhere in TCP. The new 64 bits 369 * counters are used. Hence the old counters' values in tcp_sc_mib 370 * are always 0. 371 */ 372 SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs); 373 SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs); 374 375 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 376 optp->level = MIB2_TCP; 377 optp->name = 0; 378 (void) snmp_append_data(mpdata, (char *)&tcp_mib, tcp_mib_size); 379 optp->len = msgdsize(mpdata); 380 qreply(q, mpctl); 381 382 /* table of connections... */ 383 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 384 sizeof (struct T_optmgmt_ack)]; 385 optp->level = MIB2_TCP; 386 optp->name = MIB2_TCP_CONN; 387 optp->len = msgdsize(mp_conn_ctl->b_cont); 388 qreply(q, mp_conn_ctl); 389 390 /* table of MLP attributes... */ 391 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 392 sizeof (struct T_optmgmt_ack)]; 393 optp->level = MIB2_TCP; 394 optp->name = EXPER_XPORT_MLP; 395 optp->len = msgdsize(mp_attr_ctl->b_cont); 396 if (optp->len == 0) 397 freemsg(mp_attr_ctl); 398 else 399 qreply(q, mp_attr_ctl); 400 401 /* table of IPv6 connections... */ 402 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 403 sizeof (struct T_optmgmt_ack)]; 404 optp->level = MIB2_TCP6; 405 optp->name = MIB2_TCP6_CONN; 406 optp->len = msgdsize(mp6_conn_ctl->b_cont); 407 qreply(q, mp6_conn_ctl); 408 409 /* table of IPv6 MLP attributes... */ 410 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 411 sizeof (struct T_optmgmt_ack)]; 412 optp->level = MIB2_TCP6; 413 optp->name = EXPER_XPORT_MLP; 414 optp->len = msgdsize(mp6_attr_ctl->b_cont); 415 if (optp->len == 0) 416 freemsg(mp6_attr_ctl); 417 else 418 qreply(q, mp6_attr_ctl); 419 return (mp2ctl); 420 } 421 422 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */ 423 /* ARGSUSED */ 424 int 425 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 426 { 427 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr; 428 429 switch (level) { 430 case MIB2_TCP: 431 switch (name) { 432 case 13: 433 if (tce->tcpConnState != MIB2_TCP_deleteTCB) 434 return (0); 435 /* TODO: delete entry defined by tce */ 436 return (1); 437 default: 438 return (0); 439 } 440 default: 441 return (1); 442 } 443 } 444 445 /* 446 * TCP Kstats implementation 447 */ 448 void * 449 tcp_kstat_init(netstackid_t stackid) 450 { 451 kstat_t *ksp; 452 453 tcp_named_kstat_t template = { 454 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 }, 455 { "rtoMin", KSTAT_DATA_INT32, 0 }, 456 { "rtoMax", KSTAT_DATA_INT32, 0 }, 457 { "maxConn", KSTAT_DATA_INT32, 0 }, 458 { "activeOpens", KSTAT_DATA_UINT32, 0 }, 459 { "passiveOpens", KSTAT_DATA_UINT32, 0 }, 460 { "attemptFails", KSTAT_DATA_UINT32, 0 }, 461 { "estabResets", KSTAT_DATA_UINT32, 0 }, 462 { "currEstab", KSTAT_DATA_UINT32, 0 }, 463 { "inSegs", KSTAT_DATA_UINT64, 0 }, 464 { "outSegs", KSTAT_DATA_UINT64, 0 }, 465 { "retransSegs", KSTAT_DATA_UINT32, 0 }, 466 { "connTableSize", KSTAT_DATA_INT32, 0 }, 467 { "outRsts", KSTAT_DATA_UINT32, 0 }, 468 { "outDataSegs", KSTAT_DATA_UINT32, 0 }, 469 { "outDataBytes", KSTAT_DATA_UINT32, 0 }, 470 { "retransBytes", KSTAT_DATA_UINT32, 0 }, 471 { "outAck", KSTAT_DATA_UINT32, 0 }, 472 { "outAckDelayed", KSTAT_DATA_UINT32, 0 }, 473 { "outUrg", KSTAT_DATA_UINT32, 0 }, 474 { "outWinUpdate", KSTAT_DATA_UINT32, 0 }, 475 { "outWinProbe", KSTAT_DATA_UINT32, 0 }, 476 { "outControl", KSTAT_DATA_UINT32, 0 }, 477 { "outFastRetrans", KSTAT_DATA_UINT32, 0 }, 478 { "inAckSegs", KSTAT_DATA_UINT32, 0 }, 479 { "inAckBytes", KSTAT_DATA_UINT32, 0 }, 480 { "inDupAck", KSTAT_DATA_UINT32, 0 }, 481 { "inAckUnsent", KSTAT_DATA_UINT32, 0 }, 482 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 }, 483 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 }, 484 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 }, 485 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 }, 486 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 }, 487 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 }, 488 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 }, 489 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 }, 490 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 }, 491 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 }, 492 { "inWinProbe", KSTAT_DATA_UINT32, 0 }, 493 { "inWinUpdate", KSTAT_DATA_UINT32, 0 }, 494 { "inClosed", KSTAT_DATA_UINT32, 0 }, 495 { "rttUpdate", KSTAT_DATA_UINT32, 0 }, 496 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 }, 497 { "timRetrans", KSTAT_DATA_UINT32, 0 }, 498 { "timRetransDrop", KSTAT_DATA_UINT32, 0 }, 499 { "timKeepalive", KSTAT_DATA_UINT32, 0 }, 500 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 }, 501 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 }, 502 { "listenDrop", KSTAT_DATA_UINT32, 0 }, 503 { "listenDropQ0", KSTAT_DATA_UINT32, 0 }, 504 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 }, 505 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 }, 506 { "connTableSize6", KSTAT_DATA_INT32, 0 } 507 }; 508 509 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2", 510 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid); 511 512 if (ksp == NULL) 513 return (NULL); 514 515 template.rtoAlgorithm.value.ui32 = 4; 516 template.maxConn.value.i32 = -1; 517 518 bcopy(&template, ksp->ks_data, sizeof (template)); 519 ksp->ks_update = tcp_kstat_update; 520 ksp->ks_private = (void *)(uintptr_t)stackid; 521 522 kstat_install(ksp); 523 return (ksp); 524 } 525 526 void 527 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 528 { 529 if (ksp != NULL) { 530 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 531 kstat_delete_netstack(ksp, stackid); 532 } 533 } 534 535 static int 536 tcp_kstat_update(kstat_t *kp, int rw) 537 { 538 tcp_named_kstat_t *tcpkp; 539 tcp_t *tcp; 540 connf_t *connfp; 541 conn_t *connp; 542 int i; 543 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 544 netstack_t *ns; 545 tcp_stack_t *tcps; 546 ip_stack_t *ipst; 547 mib2_tcp_t tcp_mib; 548 549 if (rw == KSTAT_WRITE) 550 return (EACCES); 551 552 ns = netstack_find_by_stackid(stackid); 553 if (ns == NULL) 554 return (-1); 555 tcps = ns->netstack_tcp; 556 if (tcps == NULL) { 557 netstack_rele(ns); 558 return (-1); 559 } 560 561 tcpkp = (tcp_named_kstat_t *)kp->ks_data; 562 563 tcpkp->currEstab.value.ui32 = 0; 564 tcpkp->rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min; 565 tcpkp->rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max; 566 567 ipst = ns->netstack_ip; 568 569 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 570 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 571 connp = NULL; 572 while ((connp = 573 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) { 574 tcp = connp->conn_tcp; 575 switch (tcp_snmp_state(tcp)) { 576 case MIB2_TCP_established: 577 case MIB2_TCP_closeWait: 578 tcpkp->currEstab.value.ui32++; 579 break; 580 } 581 } 582 } 583 bzero(&tcp_mib, sizeof (tcp_mib)); 584 tcp_sum_mib(tcps, &tcp_mib); 585 586 /* Fixed length structure for IPv4 and IPv6 counters */ 587 SET_MIB(tcp_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t)); 588 SET_MIB(tcp_mib.tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t)); 589 590 tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens; 591 tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens; 592 tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails; 593 tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets; 594 tcpkp->inSegs.value.ui64 = tcp_mib.tcpHCInSegs; 595 tcpkp->outSegs.value.ui64 = tcp_mib.tcpHCOutSegs; 596 tcpkp->retransSegs.value.ui32 = tcp_mib.tcpRetransSegs; 597 tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize; 598 tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts; 599 tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs; 600 tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes; 601 tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes; 602 tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck; 603 tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed; 604 tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg; 605 tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate; 606 tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe; 607 tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl; 608 tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans; 609 tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs; 610 tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes; 611 tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck; 612 tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent; 613 tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs; 614 tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes; 615 tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs; 616 tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes; 617 tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs; 618 tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes; 619 tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs; 620 tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes; 621 tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs; 622 tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes; 623 tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe; 624 tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate; 625 tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed; 626 tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate; 627 tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate; 628 tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans; 629 tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop; 630 tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive; 631 tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe; 632 tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop; 633 tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop; 634 tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0; 635 tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop; 636 tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs; 637 tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize; 638 639 netstack_rele(ns); 640 return (0); 641 } 642 643 /* 644 * kstats related to squeues i.e. not per IP instance 645 */ 646 void * 647 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp) 648 { 649 kstat_t *ksp; 650 651 tcp_g_stat_t template = { 652 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 }, 653 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 }, 654 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 }, 655 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 }, 656 }; 657 658 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net", 659 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 660 KSTAT_FLAG_VIRTUAL); 661 662 if (ksp == NULL) 663 return (NULL); 664 665 bcopy(&template, tcp_g_statp, sizeof (template)); 666 ksp->ks_data = (void *)tcp_g_statp; 667 668 kstat_install(ksp); 669 return (ksp); 670 } 671 672 void 673 tcp_g_kstat_fini(kstat_t *ksp) 674 { 675 if (ksp != NULL) { 676 kstat_delete(ksp); 677 } 678 } 679 680 void * 681 tcp_kstat2_init(netstackid_t stackid) 682 { 683 kstat_t *ksp; 684 685 tcp_stat_t template = { 686 { "tcp_time_wait_syn_success", KSTAT_DATA_UINT64, 0 }, 687 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64, 0 }, 688 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64, 0 }, 689 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64, 0 }, 690 { "tcp_no_listener", KSTAT_DATA_UINT64, 0 }, 691 { "tcp_listendrop", KSTAT_DATA_UINT64, 0 }, 692 { "tcp_listendropq0", KSTAT_DATA_UINT64, 0 }, 693 { "tcp_wsrv_called", KSTAT_DATA_UINT64, 0 }, 694 { "tcp_flwctl_on", KSTAT_DATA_UINT64, 0 }, 695 { "tcp_timer_fire_early", KSTAT_DATA_UINT64, 0 }, 696 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64, 0 }, 697 { "tcp_zcopy_on", KSTAT_DATA_UINT64, 0 }, 698 { "tcp_zcopy_off", KSTAT_DATA_UINT64, 0 }, 699 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64, 0 }, 700 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64, 0 }, 701 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64, 0 }, 702 { "tcp_fusion_urg", KSTAT_DATA_UINT64, 0 }, 703 { "tcp_fusion_putnext", KSTAT_DATA_UINT64, 0 }, 704 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64, 0 }, 705 { "tcp_fusion_aborted", KSTAT_DATA_UINT64, 0 }, 706 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64, 0 }, 707 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64, 0 }, 708 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64, 0 }, 709 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64, 0 }, 710 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64, 0 }, 711 { "tcp_sock_fallback", KSTAT_DATA_UINT64, 0 }, 712 { "tcp_lso_enabled", KSTAT_DATA_UINT64, 0 }, 713 { "tcp_lso_disabled", KSTAT_DATA_UINT64, 0 }, 714 { "tcp_lso_times", KSTAT_DATA_UINT64, 0 }, 715 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64, 0 }, 716 { "tcp_listen_cnt_drop", KSTAT_DATA_UINT64, 0 }, 717 { "tcp_listen_mem_drop", KSTAT_DATA_UINT64, 0 }, 718 { "tcp_zwin_mem_drop", KSTAT_DATA_UINT64, 0 }, 719 { "tcp_zwin_ack_syn", KSTAT_DATA_UINT64, 0 }, 720 { "tcp_rst_unsent", KSTAT_DATA_UINT64, 0 }, 721 { "tcp_reclaim_cnt", KSTAT_DATA_UINT64, 0 }, 722 { "tcp_reass_timeout", KSTAT_DATA_UINT64, 0 }, 723 #ifdef TCP_DEBUG_COUNTER 724 { "tcp_time_wait", KSTAT_DATA_UINT64, 0 }, 725 { "tcp_rput_time_wait", KSTAT_DATA_UINT64, 0 }, 726 { "tcp_detach_time_wait", KSTAT_DATA_UINT64, 0 }, 727 { "tcp_timeout_calls", KSTAT_DATA_UINT64, 0 }, 728 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64, 0 }, 729 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64, 0 }, 730 { "tcp_timeout_canceled", KSTAT_DATA_UINT64, 0 }, 731 { "tcp_timermp_freed", KSTAT_DATA_UINT64, 0 }, 732 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64, 0 }, 733 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64, 0 }, 734 #endif 735 }; 736 737 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net", 738 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 0, 739 stackid); 740 741 if (ksp == NULL) 742 return (NULL); 743 744 bcopy(&template, ksp->ks_data, sizeof (template)); 745 ksp->ks_private = (void *)(uintptr_t)stackid; 746 ksp->ks_update = tcp_kstat2_update; 747 748 kstat_install(ksp); 749 return (ksp); 750 } 751 752 void 753 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 754 { 755 if (ksp != NULL) { 756 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 757 kstat_delete_netstack(ksp, stackid); 758 } 759 } 760 761 /* 762 * Sum up all per CPU tcp_stat_t kstat counters. 763 */ 764 static int 765 tcp_kstat2_update(kstat_t *kp, int rw) 766 { 767 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 768 netstack_t *ns; 769 tcp_stack_t *tcps; 770 tcp_stat_t *stats; 771 int i; 772 int cnt; 773 774 if (rw == KSTAT_WRITE) 775 return (EACCES); 776 777 ns = netstack_find_by_stackid(stackid); 778 if (ns == NULL) 779 return (-1); 780 tcps = ns->netstack_tcp; 781 if (tcps == NULL) { 782 netstack_rele(ns); 783 return (-1); 784 } 785 786 stats = (tcp_stat_t *)kp->ks_data; 787 tcp_clr_stats(stats); 788 789 /* 790 * tcps_sc_cnt may change in the middle of the loop. It is better 791 * to get its value first. 792 */ 793 cnt = tcps->tcps_sc_cnt; 794 for (i = 0; i < cnt; i++) 795 tcp_add_stats(&tcps->tcps_sc[i]->tcp_sc_stats, stats); 796 797 netstack_rele(ns); 798 return (0); 799 } 800 801 /* 802 * To add stats from one mib2_tcp_t to another. Static fields are not added. 803 * The caller should set them up propertly. 804 */ 805 static void 806 tcp_add_mib(mib2_tcp_t *from, mib2_tcp_t *to) 807 { 808 to->tcpActiveOpens += from->tcpActiveOpens; 809 to->tcpPassiveOpens += from->tcpPassiveOpens; 810 to->tcpAttemptFails += from->tcpAttemptFails; 811 to->tcpEstabResets += from->tcpEstabResets; 812 to->tcpInSegs += from->tcpInSegs; 813 to->tcpOutSegs += from->tcpOutSegs; 814 to->tcpRetransSegs += from->tcpRetransSegs; 815 to->tcpOutRsts += from->tcpOutRsts; 816 817 to->tcpOutDataSegs += from->tcpOutDataSegs; 818 to->tcpOutDataBytes += from->tcpOutDataBytes; 819 to->tcpRetransBytes += from->tcpRetransBytes; 820 to->tcpOutAck += from->tcpOutAck; 821 to->tcpOutAckDelayed += from->tcpOutAckDelayed; 822 to->tcpOutUrg += from->tcpOutUrg; 823 to->tcpOutWinUpdate += from->tcpOutWinUpdate; 824 to->tcpOutWinProbe += from->tcpOutWinProbe; 825 to->tcpOutControl += from->tcpOutControl; 826 to->tcpOutFastRetrans += from->tcpOutFastRetrans; 827 828 to->tcpInAckBytes += from->tcpInAckBytes; 829 to->tcpInDupAck += from->tcpInDupAck; 830 to->tcpInAckUnsent += from->tcpInAckUnsent; 831 to->tcpInDataInorderSegs += from->tcpInDataInorderSegs; 832 to->tcpInDataInorderBytes += from->tcpInDataInorderBytes; 833 to->tcpInDataUnorderSegs += from->tcpInDataUnorderSegs; 834 to->tcpInDataUnorderBytes += from->tcpInDataUnorderBytes; 835 to->tcpInDataDupSegs += from->tcpInDataDupSegs; 836 to->tcpInDataDupBytes += from->tcpInDataDupBytes; 837 to->tcpInDataPartDupSegs += from->tcpInDataPartDupSegs; 838 to->tcpInDataPartDupBytes += from->tcpInDataPartDupBytes; 839 to->tcpInDataPastWinSegs += from->tcpInDataPastWinSegs; 840 to->tcpInDataPastWinBytes += from->tcpInDataPastWinBytes; 841 to->tcpInWinProbe += from->tcpInWinProbe; 842 to->tcpInWinUpdate += from->tcpInWinUpdate; 843 to->tcpInClosed += from->tcpInClosed; 844 845 to->tcpRttNoUpdate += from->tcpRttNoUpdate; 846 to->tcpRttUpdate += from->tcpRttUpdate; 847 to->tcpTimRetrans += from->tcpTimRetrans; 848 to->tcpTimRetransDrop += from->tcpTimRetransDrop; 849 to->tcpTimKeepalive += from->tcpTimKeepalive; 850 to->tcpTimKeepaliveProbe += from->tcpTimKeepaliveProbe; 851 to->tcpTimKeepaliveDrop += from->tcpTimKeepaliveDrop; 852 to->tcpListenDrop += from->tcpListenDrop; 853 to->tcpListenDropQ0 += from->tcpListenDropQ0; 854 to->tcpHalfOpenDrop += from->tcpHalfOpenDrop; 855 to->tcpOutSackRetransSegs += from->tcpOutSackRetransSegs; 856 to->tcpHCInSegs += from->tcpHCInSegs; 857 to->tcpHCOutSegs += from->tcpHCOutSegs; 858 } 859 860 /* 861 * To sum up all MIB2 stats for a tcp_stack_t from all per CPU stats. The 862 * caller should initialize the target mib2_tcp_t properly as this function 863 * just adds up all the per CPU stats. 864 */ 865 static void 866 tcp_sum_mib(tcp_stack_t *tcps, mib2_tcp_t *tcp_mib) 867 { 868 int i; 869 int cnt; 870 871 /* 872 * tcps_sc_cnt may change in the middle of the loop. It is better 873 * to get its value first. 874 */ 875 cnt = tcps->tcps_sc_cnt; 876 for (i = 0; i < cnt; i++) 877 tcp_add_mib(&tcps->tcps_sc[i]->tcp_sc_mib, tcp_mib); 878 } 879 880 /* 881 * To set all tcp_stat_t counters to 0. 882 */ 883 static void 884 tcp_clr_stats(tcp_stat_t *stats) 885 { 886 stats->tcp_time_wait_syn_success.value.ui64 = 0; 887 stats->tcp_clean_death_nondetached.value.ui64 = 0; 888 stats->tcp_eager_blowoff_q.value.ui64 = 0; 889 stats->tcp_eager_blowoff_q0.value.ui64 = 0; 890 stats->tcp_no_listener.value.ui64 = 0; 891 stats->tcp_listendrop.value.ui64 = 0; 892 stats->tcp_listendropq0.value.ui64 = 0; 893 stats->tcp_wsrv_called.value.ui64 = 0; 894 stats->tcp_flwctl_on.value.ui64 = 0; 895 stats->tcp_timer_fire_early.value.ui64 = 0; 896 stats->tcp_timer_fire_miss.value.ui64 = 0; 897 stats->tcp_zcopy_on.value.ui64 = 0; 898 stats->tcp_zcopy_off.value.ui64 = 0; 899 stats->tcp_zcopy_backoff.value.ui64 = 0; 900 stats->tcp_fusion_flowctl.value.ui64 = 0; 901 stats->tcp_fusion_backenabled.value.ui64 = 0; 902 stats->tcp_fusion_urg.value.ui64 = 0; 903 stats->tcp_fusion_putnext.value.ui64 = 0; 904 stats->tcp_fusion_unfusable.value.ui64 = 0; 905 stats->tcp_fusion_aborted.value.ui64 = 0; 906 stats->tcp_fusion_unqualified.value.ui64 = 0; 907 stats->tcp_fusion_rrw_busy.value.ui64 = 0; 908 stats->tcp_fusion_rrw_msgcnt.value.ui64 = 0; 909 stats->tcp_fusion_rrw_plugged.value.ui64 = 0; 910 stats->tcp_in_ack_unsent_drop.value.ui64 = 0; 911 stats->tcp_sock_fallback.value.ui64 = 0; 912 stats->tcp_lso_enabled.value.ui64 = 0; 913 stats->tcp_lso_disabled.value.ui64 = 0; 914 stats->tcp_lso_times.value.ui64 = 0; 915 stats->tcp_lso_pkt_out.value.ui64 = 0; 916 stats->tcp_listen_cnt_drop.value.ui64 = 0; 917 stats->tcp_listen_mem_drop.value.ui64 = 0; 918 stats->tcp_zwin_mem_drop.value.ui64 = 0; 919 stats->tcp_zwin_ack_syn.value.ui64 = 0; 920 stats->tcp_rst_unsent.value.ui64 = 0; 921 stats->tcp_reclaim_cnt.value.ui64 = 0; 922 stats->tcp_reass_timeout.value.ui64 = 0; 923 924 #ifdef TCP_DEBUG_COUNTER 925 stats->tcp_time_wait.value.ui64 = 0; 926 stats->tcp_rput_time_wait.value.ui64 = 0; 927 stats->tcp_detach_time_wait.value.ui64 = 0; 928 stats->tcp_timeout_calls.value.ui64 = 0; 929 stats->tcp_timeout_cached_alloc.value.ui64 = 0; 930 stats->tcp_timeout_cancel_reqs.value.ui64 = 0; 931 stats->tcp_timeout_canceled.value.ui64 = 0; 932 stats->tcp_timermp_freed.value.ui64 = 0; 933 stats->tcp_push_timer_cnt.value.ui64 = 0; 934 stats->tcp_ack_timer_cnt.value.ui64 = 0; 935 #endif 936 } 937 938 /* 939 * To add counters from the per CPU tcp_stat_counter_t to the stack 940 * tcp_stat_t. 941 */ 942 static void 943 tcp_add_stats(tcp_stat_counter_t *from, tcp_stat_t *to) 944 { 945 to->tcp_time_wait_syn_success.value.ui64 += 946 from->tcp_time_wait_syn_success; 947 to->tcp_clean_death_nondetached.value.ui64 += 948 from->tcp_clean_death_nondetached; 949 to->tcp_eager_blowoff_q.value.ui64 += 950 from->tcp_eager_blowoff_q; 951 to->tcp_eager_blowoff_q0.value.ui64 += 952 from->tcp_eager_blowoff_q0; 953 to->tcp_no_listener.value.ui64 += 954 from->tcp_no_listener; 955 to->tcp_listendrop.value.ui64 += 956 from->tcp_listendrop; 957 to->tcp_listendropq0.value.ui64 += 958 from->tcp_listendropq0; 959 to->tcp_wsrv_called.value.ui64 += 960 from->tcp_wsrv_called; 961 to->tcp_flwctl_on.value.ui64 += 962 from->tcp_flwctl_on; 963 to->tcp_timer_fire_early.value.ui64 += 964 from->tcp_timer_fire_early; 965 to->tcp_timer_fire_miss.value.ui64 += 966 from->tcp_timer_fire_miss; 967 to->tcp_zcopy_on.value.ui64 += 968 from->tcp_zcopy_on; 969 to->tcp_zcopy_off.value.ui64 += 970 from->tcp_zcopy_off; 971 to->tcp_zcopy_backoff.value.ui64 += 972 from->tcp_zcopy_backoff; 973 to->tcp_fusion_flowctl.value.ui64 += 974 from->tcp_fusion_flowctl; 975 to->tcp_fusion_backenabled.value.ui64 += 976 from->tcp_fusion_backenabled; 977 to->tcp_fusion_urg.value.ui64 += 978 from->tcp_fusion_urg; 979 to->tcp_fusion_putnext.value.ui64 += 980 from->tcp_fusion_putnext; 981 to->tcp_fusion_unfusable.value.ui64 += 982 from->tcp_fusion_unfusable; 983 to->tcp_fusion_aborted.value.ui64 += 984 from->tcp_fusion_aborted; 985 to->tcp_fusion_unqualified.value.ui64 += 986 from->tcp_fusion_unqualified; 987 to->tcp_fusion_rrw_busy.value.ui64 += 988 from->tcp_fusion_rrw_busy; 989 to->tcp_fusion_rrw_msgcnt.value.ui64 += 990 from->tcp_fusion_rrw_msgcnt; 991 to->tcp_fusion_rrw_plugged.value.ui64 += 992 from->tcp_fusion_rrw_plugged; 993 to->tcp_in_ack_unsent_drop.value.ui64 += 994 from->tcp_in_ack_unsent_drop; 995 to->tcp_sock_fallback.value.ui64 += 996 from->tcp_sock_fallback; 997 to->tcp_lso_enabled.value.ui64 += 998 from->tcp_lso_enabled; 999 to->tcp_lso_disabled.value.ui64 += 1000 from->tcp_lso_disabled; 1001 to->tcp_lso_times.value.ui64 += 1002 from->tcp_lso_times; 1003 to->tcp_lso_pkt_out.value.ui64 += 1004 from->tcp_lso_pkt_out; 1005 to->tcp_listen_cnt_drop.value.ui64 += 1006 from->tcp_listen_cnt_drop; 1007 to->tcp_listen_mem_drop.value.ui64 += 1008 from->tcp_listen_mem_drop; 1009 to->tcp_zwin_mem_drop.value.ui64 += 1010 from->tcp_zwin_mem_drop; 1011 to->tcp_zwin_ack_syn.value.ui64 += 1012 from->tcp_zwin_ack_syn; 1013 to->tcp_rst_unsent.value.ui64 += 1014 from->tcp_rst_unsent; 1015 to->tcp_reclaim_cnt.value.ui64 += 1016 from->tcp_reclaim_cnt; 1017 to->tcp_reass_timeout.value.ui64 += 1018 from->tcp_reass_timeout; 1019 1020 #ifdef TCP_DEBUG_COUNTER 1021 to->tcp_time_wait.value.ui64 += 1022 from->tcp_time_wait; 1023 to->tcp_rput_time_wait.value.ui64 += 1024 from->tcp_rput_time_wait; 1025 to->tcp_detach_time_wait.value.ui64 += 1026 from->tcp_detach_time_wait; 1027 to->tcp_timeout_calls.value.ui64 += 1028 from->tcp_timeout_calls; 1029 to->tcp_timeout_cached_alloc.value.ui64 += 1030 from->tcp_timeout_cached_alloc; 1031 to->tcp_timeout_cancel_reqs.value.ui64 += 1032 from->tcp_timeout_cancel_reqs; 1033 to->tcp_timeout_canceled.value.ui64 += 1034 from->tcp_timeout_canceled; 1035 to->tcp_timermp_freed.value.ui64 += 1036 from->tcp_timermp_freed; 1037 to->tcp_push_timer_cnt.value.ui64 += 1038 from->tcp_push_timer_cnt; 1039 to->tcp_ack_timer_cnt.value.ui64 += 1040 from->tcp_ack_timer_cnt; 1041 #endif 1042 }