1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, Joyent Inc. All rights reserved.
25 * Copyright (c) 2015, 2016 by Delphix. All rights reserved.
26 */
27
28 #include <sys/types.h>
29 #include <sys/tihdr.h>
30 #include <sys/policy.h>
31 #include <sys/tsol/tnet.h>
32 #include <sys/kstat.h>
33
34 #include <inet/common.h>
35 #include <inet/ip.h>
36 #include <inet/tcp.h>
37 #include <inet/tcp_impl.h>
38 #include <inet/tcp_stats.h>
39 #include <inet/kstatcom.h>
40 #include <inet/snmpcom.h>
41
42 static int tcp_kstat_update(kstat_t *, int);
43 static int tcp_kstat2_update(kstat_t *, int);
44 static void tcp_sum_mib(tcp_stack_t *, mib2_tcp_t *);
45
46 static void tcp_add_mib(mib2_tcp_t *, mib2_tcp_t *);
47 static void tcp_add_stats(tcp_stat_counter_t *, tcp_stat_t *);
48 static void tcp_clr_stats(tcp_stat_t *);
49
50 tcp_g_stat_t tcp_g_statistics;
51 kstat_t *tcp_g_kstat;
52
53 /* Translate TCP state to MIB2 TCP state. */
54 static int
55 tcp_snmp_state(tcp_t *tcp)
56 {
57 if (tcp == NULL)
58 return (0);
59
60 switch (tcp->tcp_state) {
61 case TCPS_CLOSED:
62 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */
63 case TCPS_BOUND:
64 return (MIB2_TCP_closed);
65 case TCPS_LISTEN:
66 return (MIB2_TCP_listen);
67 case TCPS_SYN_SENT:
68 return (MIB2_TCP_synSent);
69 case TCPS_SYN_RCVD:
70 return (MIB2_TCP_synReceived);
71 case TCPS_ESTABLISHED:
72 return (MIB2_TCP_established);
73 case TCPS_CLOSE_WAIT:
74 return (MIB2_TCP_closeWait);
75 case TCPS_FIN_WAIT_1:
76 return (MIB2_TCP_finWait1);
77 case TCPS_CLOSING:
78 return (MIB2_TCP_closing);
79 case TCPS_LAST_ACK:
80 return (MIB2_TCP_lastAck);
81 case TCPS_FIN_WAIT_2:
82 return (MIB2_TCP_finWait2);
83 case TCPS_TIME_WAIT:
84 return (MIB2_TCP_timeWait);
85 default:
86 return (0);
87 }
88 }
89
90 static void
91 tcp_set_conninfo(tcp_t *tcp, struct tcpConnEntryInfo_s *tcei, boolean_t ispriv)
92 {
93 /* Don't want just anybody seeing these... */
94 if (ispriv) {
95 tcei->ce_snxt = tcp->tcp_snxt;
96 tcei->ce_suna = tcp->tcp_suna;
97 tcei->ce_rnxt = tcp->tcp_rnxt;
98 tcei->ce_rack = tcp->tcp_rack;
99 } else {
100 /*
101 * Netstat, unfortunately, uses this to get send/receive queue
102 * sizes. How to fix? Why not compute the difference only?
103 */
104 tcei->ce_snxt = tcp->tcp_snxt - tcp->tcp_suna;
105 tcei->ce_suna = 0;
106 tcei->ce_rnxt = tcp->tcp_rnxt - tcp->tcp_rack;
107 tcei->ce_rack = 0;
108 }
109
110 tcei->ce_in_data_inorder_bytes = tcp->tcp_cs.tcp_in_data_inorder_bytes;
111 tcei->ce_in_data_inorder_segs = tcp->tcp_cs.tcp_in_data_inorder_segs;
112 tcei->ce_in_data_unorder_bytes = tcp->tcp_cs.tcp_in_data_unorder_bytes;
113 tcei->ce_in_data_unorder_segs = tcp->tcp_cs.tcp_in_data_unorder_segs;
114 tcei->ce_in_zwnd_probes = tcp->tcp_cs.tcp_in_zwnd_probes;
115
116 tcei->ce_out_data_bytes = tcp->tcp_cs.tcp_out_data_bytes;
117 tcei->ce_out_data_segs = tcp->tcp_cs.tcp_out_data_segs;
118 tcei->ce_out_retrans_bytes = tcp->tcp_cs.tcp_out_retrans_bytes;
119 tcei->ce_out_retrans_segs = tcp->tcp_cs.tcp_out_retrans_segs;
120 tcei->ce_out_zwnd_probes = tcp->tcp_cs.tcp_out_zwnd_probes;
121
122 tcei->ce_unsent = tcp->tcp_unsent;
123 tcei->ce_swnd = tcp->tcp_swnd;
124 tcei->ce_cwnd = tcp->tcp_cwnd;
125 tcei->ce_rwnd = tcp->tcp_rwnd;
126 tcei->ce_rto = tcp->tcp_rto;
127 tcei->ce_mss = tcp->tcp_mss;
128 tcei->ce_state = tcp->tcp_state;
129 tcei->ce_rtt_sa = NSEC2USEC(tcp->tcp_rtt_sa >> 3);
130 tcei->ce_rtt_sum = NSEC2USEC(tcp->tcp_rtt_sum);
131 tcei->ce_rtt_cnt = tcp->tcp_rtt_cnt;
132 }
133
134 /*
135 * Return SNMP stuff in buffer in mpdata.
136 */
137 mblk_t *
138 tcp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req)
139 {
140 mblk_t *mpdata;
141 mblk_t *mp_conn_ctl = NULL;
142 mblk_t *mp_conn_tail;
143 mblk_t *mp_attr_ctl = NULL;
144 mblk_t *mp_attr_tail;
145 mblk_t *mp6_conn_ctl = NULL;
146 mblk_t *mp6_conn_tail;
147 mblk_t *mp6_attr_ctl = NULL;
148 mblk_t *mp6_attr_tail;
149 struct opthdr *optp;
150 mib2_tcpConnEntry_t tce;
151 mib2_tcp6ConnEntry_t tce6;
152 mib2_transportMLPEntry_t mlp;
153 connf_t *connfp;
154 int i;
155 boolean_t ispriv;
156 zoneid_t zoneid;
157 int v4_conn_idx;
158 int v6_conn_idx;
159 conn_t *connp = Q_TO_CONN(q);
160 tcp_stack_t *tcps;
161 ip_stack_t *ipst;
162 mblk_t *mp2ctl;
163 mib2_tcp_t tcp_mib;
164 size_t tcp_mib_size, tce_size, tce6_size;
165
166 /*
167 * make a copy of the original message
168 */
169 mp2ctl = copymsg(mpctl);
170
171 if (mpctl == NULL ||
172 (mpdata = mpctl->b_cont) == NULL ||
173 (mp_conn_ctl = copymsg(mpctl)) == NULL ||
174 (mp_attr_ctl = copymsg(mpctl)) == NULL ||
175 (mp6_conn_ctl = copymsg(mpctl)) == NULL ||
176 (mp6_attr_ctl = copymsg(mpctl)) == NULL) {
177 freemsg(mp_conn_ctl);
178 freemsg(mp_attr_ctl);
179 freemsg(mp6_conn_ctl);
180 freemsg(mp6_attr_ctl);
181 freemsg(mpctl);
182 freemsg(mp2ctl);
183 return (NULL);
184 }
185
186 ipst = connp->conn_netstack->netstack_ip;
187 tcps = connp->conn_netstack->netstack_tcp;
188
189 if (legacy_req) {
190 tcp_mib_size = LEGACY_MIB_SIZE(&tcp_mib, mib2_tcp_t);
191 tce_size = LEGACY_MIB_SIZE(&tce, mib2_tcpConnEntry_t);
192 tce6_size = LEGACY_MIB_SIZE(&tce6, mib2_tcp6ConnEntry_t);
193 } else {
194 tcp_mib_size = sizeof (mib2_tcp_t);
195 tce_size = sizeof (mib2_tcpConnEntry_t);
196 tce6_size = sizeof (mib2_tcp6ConnEntry_t);
197 }
198
199 bzero(&tcp_mib, sizeof (tcp_mib));
200
201 /* build table of connections -- need count in fixed part */
202 SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */
203 SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min);
204 SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max);
205 SET_MIB(tcp_mib.tcpMaxConn, -1);
206 SET_MIB(tcp_mib.tcpCurrEstab, 0);
207
208 ispriv =
209 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0;
210 zoneid = Q_TO_CONN(q)->conn_zoneid;
211
212 v4_conn_idx = v6_conn_idx = 0;
213 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL;
214
215 for (i = 0; i < CONN_G_HASH_SIZE; i++) {
216 ipst = tcps->tcps_netstack->netstack_ip;
217
218 connfp = &ipst->ips_ipcl_globalhash_fanout[i];
219
220 connp = NULL;
221
222 while ((connp =
223 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
224 tcp_t *tcp;
225 boolean_t needattr;
226
227 if (connp->conn_zoneid != zoneid)
228 continue; /* not in this zone */
229
230 tcp = connp->conn_tcp;
231 tce6.tcp6ConnState = tce.tcpConnState =
232 tcp_snmp_state(tcp);
233 if (tce.tcpConnState == MIB2_TCP_established ||
234 tce.tcpConnState == MIB2_TCP_closeWait)
235 BUMP_MIB(&tcp_mib, tcpCurrEstab);
236
237 needattr = B_FALSE;
238 bzero(&mlp, sizeof (mlp));
239 if (connp->conn_mlp_type != mlptSingle) {
240 if (connp->conn_mlp_type == mlptShared ||
241 connp->conn_mlp_type == mlptBoth)
242 mlp.tme_flags |= MIB2_TMEF_SHARED;
243 if (connp->conn_mlp_type == mlptPrivate ||
244 connp->conn_mlp_type == mlptBoth)
245 mlp.tme_flags |= MIB2_TMEF_PRIVATE;
246 needattr = B_TRUE;
247 }
248 if (connp->conn_anon_mlp) {
249 mlp.tme_flags |= MIB2_TMEF_ANONMLP;
250 needattr = B_TRUE;
251 }
252 switch (connp->conn_mac_mode) {
253 case CONN_MAC_DEFAULT:
254 break;
255 case CONN_MAC_AWARE:
256 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT;
257 needattr = B_TRUE;
258 break;
259 case CONN_MAC_IMPLICIT:
260 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT;
261 needattr = B_TRUE;
262 break;
263 }
264 if (connp->conn_ixa->ixa_tsl != NULL) {
265 ts_label_t *tsl;
266
267 tsl = connp->conn_ixa->ixa_tsl;
268 mlp.tme_flags |= MIB2_TMEF_IS_LABELED;
269 mlp.tme_doi = label2doi(tsl);
270 mlp.tme_label = *label2bslabel(tsl);
271 needattr = B_TRUE;
272 }
273
274 /* Create a message to report on IPv6 entries */
275 if (connp->conn_ipversion == IPV6_VERSION) {
276 tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6;
277 tce6.tcp6ConnRemAddress = connp->conn_faddr_v6;
278 tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport);
279 tce6.tcp6ConnRemPort = ntohs(connp->conn_fport);
280 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) {
281 tce6.tcp6ConnIfIndex =
282 connp->conn_ixa->ixa_scopeid;
283 } else {
284 tce6.tcp6ConnIfIndex = connp->conn_bound_if;
285 }
286
287 tcp_set_conninfo(tcp, &tce6.tcp6ConnEntryInfo,
288 ispriv);
289
290 tce6.tcp6ConnCreationProcess =
291 (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS :
292 connp->conn_cpid;
293 tce6.tcp6ConnCreationTime = connp->conn_open_time;
294
295 (void) snmp_append_data2(mp6_conn_ctl->b_cont,
296 &mp6_conn_tail, (char *)&tce6, tce6_size);
297
298 mlp.tme_connidx = v6_conn_idx++;
299 if (needattr)
300 (void) snmp_append_data2(mp6_attr_ctl->b_cont,
301 &mp6_attr_tail, (char *)&mlp, sizeof (mlp));
302 }
303 /*
304 * Create an IPv4 table entry for IPv4 entries and also
305 * for IPv6 entries which are bound to in6addr_any
306 * but don't have IPV6_V6ONLY set.
307 * (i.e. anything an IPv4 peer could connect to)
308 */
309 if (connp->conn_ipversion == IPV4_VERSION ||
310 (tcp->tcp_state <= TCPS_LISTEN &&
311 !connp->conn_ipv6_v6only &&
312 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) {
313 if (connp->conn_ipversion == IPV6_VERSION) {
314 tce.tcpConnRemAddress = INADDR_ANY;
315 tce.tcpConnLocalAddress = INADDR_ANY;
316 } else {
317 tce.tcpConnRemAddress =
318 connp->conn_faddr_v4;
319 tce.tcpConnLocalAddress =
320 connp->conn_laddr_v4;
321 }
322 tce.tcpConnLocalPort = ntohs(connp->conn_lport);
323 tce.tcpConnRemPort = ntohs(connp->conn_fport);
324
325 tcp_set_conninfo(tcp, &tce.tcpConnEntryInfo,
326 ispriv);
327
328 tce.tcpConnCreationProcess =
329 (connp->conn_cpid < 0) ?
330 MIB2_UNKNOWN_PROCESS :
331 connp->conn_cpid;
332 tce.tcpConnCreationTime = connp->conn_open_time;
333
334 (void) snmp_append_data2(mp_conn_ctl->b_cont,
335 &mp_conn_tail, (char *)&tce, tce_size);
336
337 mlp.tme_connidx = v4_conn_idx++;
338 if (needattr)
339 (void) snmp_append_data2(
340 mp_attr_ctl->b_cont,
341 &mp_attr_tail, (char *)&mlp,
342 sizeof (mlp));
343 }
344 }
345 }
346
347 tcp_sum_mib(tcps, &tcp_mib);
348
349 /* Fixed length structure for IPv4 and IPv6 counters */
350 SET_MIB(tcp_mib.tcpConnTableSize, tce_size);
351 SET_MIB(tcp_mib.tcp6ConnTableSize, tce6_size);
352
353 /*
354 * Synchronize 32- and 64-bit counters. Note that tcpInSegs and
355 * tcpOutSegs are not updated anywhere in TCP. The new 64 bits
356 * counters are used. Hence the old counters' values in tcp_sc_mib
357 * are always 0.
358 */
359 SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs);
360 SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs);
361
362 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
363 optp->level = MIB2_TCP;
364 optp->name = 0;
365 (void) snmp_append_data(mpdata, (char *)&tcp_mib, tcp_mib_size);
366 optp->len = msgdsize(mpdata);
367 qreply(q, mpctl);
368
369 /* table of connections... */
370 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[
371 sizeof (struct T_optmgmt_ack)];
372 optp->level = MIB2_TCP;
373 optp->name = MIB2_TCP_CONN;
374 optp->len = msgdsize(mp_conn_ctl->b_cont);
375 qreply(q, mp_conn_ctl);
376
377 /* table of MLP attributes... */
378 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[
379 sizeof (struct T_optmgmt_ack)];
380 optp->level = MIB2_TCP;
381 optp->name = EXPER_XPORT_MLP;
382 optp->len = msgdsize(mp_attr_ctl->b_cont);
383 if (optp->len == 0)
384 freemsg(mp_attr_ctl);
385 else
386 qreply(q, mp_attr_ctl);
387
388 /* table of IPv6 connections... */
389 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[
390 sizeof (struct T_optmgmt_ack)];
391 optp->level = MIB2_TCP6;
392 optp->name = MIB2_TCP6_CONN;
393 optp->len = msgdsize(mp6_conn_ctl->b_cont);
394 qreply(q, mp6_conn_ctl);
395
396 /* table of IPv6 MLP attributes... */
397 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[
398 sizeof (struct T_optmgmt_ack)];
399 optp->level = MIB2_TCP6;
400 optp->name = EXPER_XPORT_MLP;
401 optp->len = msgdsize(mp6_attr_ctl->b_cont);
402 if (optp->len == 0)
403 freemsg(mp6_attr_ctl);
404 else
405 qreply(q, mp6_attr_ctl);
406 return (mp2ctl);
407 }
408
409 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */
410 /* ARGSUSED */
411 int
412 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
413 {
414 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr;
415
416 switch (level) {
417 case MIB2_TCP:
418 switch (name) {
419 case 13:
420 if (tce->tcpConnState != MIB2_TCP_deleteTCB)
421 return (0);
422 /* TODO: delete entry defined by tce */
423 return (1);
424 default:
425 return (0);
426 }
427 default:
428 return (1);
429 }
430 }
431
432 /*
433 * TCP Kstats implementation
434 */
435 void *
436 tcp_kstat_init(netstackid_t stackid)
437 {
438 kstat_t *ksp;
439
440 tcp_named_kstat_t template = {
441 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 },
442 { "rtoMin", KSTAT_DATA_INT32, 0 },
443 { "rtoMax", KSTAT_DATA_INT32, 0 },
444 { "maxConn", KSTAT_DATA_INT32, 0 },
445 { "activeOpens", KSTAT_DATA_UINT32, 0 },
446 { "passiveOpens", KSTAT_DATA_UINT32, 0 },
447 { "attemptFails", KSTAT_DATA_UINT32, 0 },
448 { "estabResets", KSTAT_DATA_UINT32, 0 },
449 { "currEstab", KSTAT_DATA_UINT32, 0 },
450 { "inSegs", KSTAT_DATA_UINT64, 0 },
451 { "outSegs", KSTAT_DATA_UINT64, 0 },
452 { "retransSegs", KSTAT_DATA_UINT32, 0 },
453 { "connTableSize", KSTAT_DATA_INT32, 0 },
454 { "outRsts", KSTAT_DATA_UINT32, 0 },
455 { "outDataSegs", KSTAT_DATA_UINT32, 0 },
456 { "outDataBytes", KSTAT_DATA_UINT32, 0 },
457 { "retransBytes", KSTAT_DATA_UINT32, 0 },
458 { "outAck", KSTAT_DATA_UINT32, 0 },
459 { "outAckDelayed", KSTAT_DATA_UINT32, 0 },
460 { "outUrg", KSTAT_DATA_UINT32, 0 },
461 { "outWinUpdate", KSTAT_DATA_UINT32, 0 },
462 { "outWinProbe", KSTAT_DATA_UINT32, 0 },
463 { "outControl", KSTAT_DATA_UINT32, 0 },
464 { "outFastRetrans", KSTAT_DATA_UINT32, 0 },
465 { "inAckSegs", KSTAT_DATA_UINT32, 0 },
466 { "inAckBytes", KSTAT_DATA_UINT32, 0 },
467 { "inDupAck", KSTAT_DATA_UINT32, 0 },
468 { "inAckUnsent", KSTAT_DATA_UINT32, 0 },
469 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 },
470 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 },
471 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 },
472 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 },
473 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 },
474 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 },
475 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 },
476 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 },
477 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 },
478 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 },
479 { "inWinProbe", KSTAT_DATA_UINT32, 0 },
480 { "inWinUpdate", KSTAT_DATA_UINT32, 0 },
481 { "inClosed", KSTAT_DATA_UINT32, 0 },
482 { "rttUpdate", KSTAT_DATA_UINT32, 0 },
483 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 },
484 { "timRetrans", KSTAT_DATA_UINT32, 0 },
485 { "timRetransDrop", KSTAT_DATA_UINT32, 0 },
486 { "timKeepalive", KSTAT_DATA_UINT32, 0 },
487 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 },
488 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 },
489 { "listenDrop", KSTAT_DATA_UINT32, 0 },
490 { "listenDropQ0", KSTAT_DATA_UINT32, 0 },
491 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 },
492 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 },
493 { "connTableSize6", KSTAT_DATA_INT32, 0 }
494 };
495
496 ksp = kstat_create_netstack(TCP_MOD_NAME, stackid, TCP_MOD_NAME, "mib2",
497 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid);
498
499 if (ksp == NULL)
500 return (NULL);
501
502 template.rtoAlgorithm.value.ui32 = 4;
503 template.maxConn.value.i32 = -1;
504
505 bcopy(&template, ksp->ks_data, sizeof (template));
506 ksp->ks_update = tcp_kstat_update;
507 ksp->ks_private = (void *)(uintptr_t)stackid;
508
509 /*
510 * If this is an exclusive netstack for a local zone, the global zone
511 * should still be able to read the kstat.
512 */
513 if (stackid != GLOBAL_NETSTACKID)
514 kstat_zone_add(ksp, GLOBAL_ZONEID);
515
516 kstat_install(ksp);
517 return (ksp);
518 }
519
520 void
521 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
522 {
523 if (ksp != NULL) {
524 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
525 kstat_delete_netstack(ksp, stackid);
526 }
527 }
528
529 static int
530 tcp_kstat_update(kstat_t *kp, int rw)
531 {
532 tcp_named_kstat_t *tcpkp;
533 tcp_t *tcp;
534 connf_t *connfp;
535 conn_t *connp;
536 int i;
537 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private;
538 netstack_t *ns;
539 tcp_stack_t *tcps;
540 ip_stack_t *ipst;
541 mib2_tcp_t tcp_mib;
542
543 if (rw == KSTAT_WRITE)
544 return (EACCES);
545
546 ns = netstack_find_by_stackid(stackid);
547 if (ns == NULL)
548 return (-1);
549 tcps = ns->netstack_tcp;
550 if (tcps == NULL) {
551 netstack_rele(ns);
552 return (-1);
553 }
554
555 tcpkp = (tcp_named_kstat_t *)kp->ks_data;
556
557 tcpkp->currEstab.value.ui32 = 0;
558 tcpkp->rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min;
559 tcpkp->rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max;
560
561 ipst = ns->netstack_ip;
562
563 for (i = 0; i < CONN_G_HASH_SIZE; i++) {
564 connfp = &ipst->ips_ipcl_globalhash_fanout[i];
565 connp = NULL;
566 while ((connp =
567 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
568 tcp = connp->conn_tcp;
569 switch (tcp_snmp_state(tcp)) {
570 case MIB2_TCP_established:
571 case MIB2_TCP_closeWait:
572 tcpkp->currEstab.value.ui32++;
573 break;
574 }
575 }
576 }
577 bzero(&tcp_mib, sizeof (tcp_mib));
578 tcp_sum_mib(tcps, &tcp_mib);
579
580 /* Fixed length structure for IPv4 and IPv6 counters */
581 SET_MIB(tcp_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t));
582 SET_MIB(tcp_mib.tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t));
583
584 tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens;
585 tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens;
586 tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails;
587 tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets;
588 tcpkp->inSegs.value.ui64 = tcp_mib.tcpHCInSegs;
589 tcpkp->outSegs.value.ui64 = tcp_mib.tcpHCOutSegs;
590 tcpkp->retransSegs.value.ui32 = tcp_mib.tcpRetransSegs;
591 tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize;
592 tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts;
593 tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs;
594 tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes;
595 tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes;
596 tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck;
597 tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed;
598 tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg;
599 tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate;
600 tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe;
601 tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl;
602 tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans;
603 tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs;
604 tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes;
605 tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck;
606 tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent;
607 tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs;
608 tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes;
609 tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs;
610 tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes;
611 tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs;
612 tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes;
613 tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs;
614 tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes;
615 tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs;
616 tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes;
617 tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe;
618 tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate;
619 tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed;
620 tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate;
621 tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate;
622 tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans;
623 tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop;
624 tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive;
625 tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe;
626 tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop;
627 tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop;
628 tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0;
629 tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop;
630 tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs;
631 tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize;
632
633 netstack_rele(ns);
634 return (0);
635 }
636
637 /*
638 * kstats related to squeues i.e. not per IP instance
639 */
640 void *
641 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp)
642 {
643 kstat_t *ksp;
644
645 tcp_g_stat_t template = {
646 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 },
647 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 },
648 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 },
649 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 },
650 };
651
652 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net",
653 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
654 KSTAT_FLAG_VIRTUAL);
655
656 if (ksp == NULL)
657 return (NULL);
658
659 bcopy(&template, tcp_g_statp, sizeof (template));
660 ksp->ks_data = (void *)tcp_g_statp;
661
662 kstat_install(ksp);
663 return (ksp);
664 }
665
666 void
667 tcp_g_kstat_fini(kstat_t *ksp)
668 {
669 if (ksp != NULL) {
670 kstat_delete(ksp);
671 }
672 }
673
674 void *
675 tcp_kstat2_init(netstackid_t stackid)
676 {
677 kstat_t *ksp;
678
679 tcp_stat_t template = {
680 { "tcp_time_wait_syn_success", KSTAT_DATA_UINT64, 0 },
681 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64, 0 },
682 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64, 0 },
683 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64, 0 },
684 { "tcp_no_listener", KSTAT_DATA_UINT64, 0 },
685 { "tcp_listendrop", KSTAT_DATA_UINT64, 0 },
686 { "tcp_listendropq0", KSTAT_DATA_UINT64, 0 },
687 { "tcp_wsrv_called", KSTAT_DATA_UINT64, 0 },
688 { "tcp_flwctl_on", KSTAT_DATA_UINT64, 0 },
689 { "tcp_timer_fire_early", KSTAT_DATA_UINT64, 0 },
690 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64, 0 },
691 { "tcp_zcopy_on", KSTAT_DATA_UINT64, 0 },
692 { "tcp_zcopy_off", KSTAT_DATA_UINT64, 0 },
693 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64, 0 },
694 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64, 0 },
695 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64, 0 },
696 { "tcp_fusion_urg", KSTAT_DATA_UINT64, 0 },
697 { "tcp_fusion_putnext", KSTAT_DATA_UINT64, 0 },
698 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64, 0 },
699 { "tcp_fusion_aborted", KSTAT_DATA_UINT64, 0 },
700 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64, 0 },
701 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64, 0 },
702 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64, 0 },
703 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64, 0 },
704 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64, 0 },
705 { "tcp_sock_fallback", KSTAT_DATA_UINT64, 0 },
706 { "tcp_lso_enabled", KSTAT_DATA_UINT64, 0 },
707 { "tcp_lso_disabled", KSTAT_DATA_UINT64, 0 },
708 { "tcp_lso_times", KSTAT_DATA_UINT64, 0 },
709 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64, 0 },
710 { "tcp_listen_cnt_drop", KSTAT_DATA_UINT64, 0 },
711 { "tcp_listen_mem_drop", KSTAT_DATA_UINT64, 0 },
712 { "tcp_zwin_mem_drop", KSTAT_DATA_UINT64, 0 },
713 { "tcp_zwin_ack_syn", KSTAT_DATA_UINT64, 0 },
714 { "tcp_rst_unsent", KSTAT_DATA_UINT64, 0 },
715 { "tcp_reclaim_cnt", KSTAT_DATA_UINT64, 0 },
716 { "tcp_reass_timeout", KSTAT_DATA_UINT64, 0 },
717 #ifdef TCP_DEBUG_COUNTER
718 { "tcp_time_wait", KSTAT_DATA_UINT64, 0 },
719 { "tcp_rput_time_wait", KSTAT_DATA_UINT64, 0 },
720 { "tcp_detach_time_wait", KSTAT_DATA_UINT64, 0 },
721 { "tcp_timeout_calls", KSTAT_DATA_UINT64, 0 },
722 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64, 0 },
723 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64, 0 },
724 { "tcp_timeout_canceled", KSTAT_DATA_UINT64, 0 },
725 { "tcp_timermp_freed", KSTAT_DATA_UINT64, 0 },
726 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64, 0 },
727 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64, 0 },
728 #endif
729 };
730
731 ksp = kstat_create_netstack(TCP_MOD_NAME, stackid, "tcpstat", "net",
732 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 0,
733 stackid);
734
735 if (ksp == NULL)
736 return (NULL);
737
738 bcopy(&template, ksp->ks_data, sizeof (template));
739 ksp->ks_private = (void *)(uintptr_t)stackid;
740 ksp->ks_update = tcp_kstat2_update;
741
742 /*
743 * If this is an exclusive netstack for a local zone, the global zone
744 * should still be able to read the kstat.
745 */
746 if (stackid != GLOBAL_NETSTACKID)
747 kstat_zone_add(ksp, GLOBAL_ZONEID);
748
749 kstat_install(ksp);
750 return (ksp);
751 }
752
753 void
754 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
755 {
756 if (ksp != NULL) {
757 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
758 kstat_delete_netstack(ksp, stackid);
759 }
760 }
761
762 /*
763 * Sum up all per CPU tcp_stat_t kstat counters.
764 */
765 static int
766 tcp_kstat2_update(kstat_t *kp, int rw)
767 {
768 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private;
769 netstack_t *ns;
770 tcp_stack_t *tcps;
771 tcp_stat_t *stats;
772 int i;
773 int cnt;
774
775 if (rw == KSTAT_WRITE)
776 return (EACCES);
777
778 ns = netstack_find_by_stackid(stackid);
779 if (ns == NULL)
780 return (-1);
781 tcps = ns->netstack_tcp;
782 if (tcps == NULL) {
783 netstack_rele(ns);
784 return (-1);
785 }
786
787 stats = (tcp_stat_t *)kp->ks_data;
788 tcp_clr_stats(stats);
789
790 /*
791 * tcps_sc_cnt may change in the middle of the loop. It is better
792 * to get its value first.
793 */
794 cnt = tcps->tcps_sc_cnt;
795 for (i = 0; i < cnt; i++)
796 tcp_add_stats(&tcps->tcps_sc[i]->tcp_sc_stats, stats);
797
798 netstack_rele(ns);
799 return (0);
800 }
801
802 /*
803 * To add stats from one mib2_tcp_t to another. Static fields are not added.
804 * The caller should set them up propertly.
805 */
806 static void
807 tcp_add_mib(mib2_tcp_t *from, mib2_tcp_t *to)
808 {
809 to->tcpActiveOpens += from->tcpActiveOpens;
810 to->tcpPassiveOpens += from->tcpPassiveOpens;
811 to->tcpAttemptFails += from->tcpAttemptFails;
812 to->tcpEstabResets += from->tcpEstabResets;
813 to->tcpInSegs += from->tcpInSegs;
814 to->tcpOutSegs += from->tcpOutSegs;
815 to->tcpRetransSegs += from->tcpRetransSegs;
816 to->tcpOutRsts += from->tcpOutRsts;
817
818 to->tcpOutDataSegs += from->tcpOutDataSegs;
819 to->tcpOutDataBytes += from->tcpOutDataBytes;
820 to->tcpRetransBytes += from->tcpRetransBytes;
821 to->tcpOutAck += from->tcpOutAck;
822 to->tcpOutAckDelayed += from->tcpOutAckDelayed;
823 to->tcpOutUrg += from->tcpOutUrg;
824 to->tcpOutWinUpdate += from->tcpOutWinUpdate;
825 to->tcpOutWinProbe += from->tcpOutWinProbe;
826 to->tcpOutControl += from->tcpOutControl;
827 to->tcpOutFastRetrans += from->tcpOutFastRetrans;
828
829 to->tcpInAckBytes += from->tcpInAckBytes;
830 to->tcpInDupAck += from->tcpInDupAck;
831 to->tcpInAckUnsent += from->tcpInAckUnsent;
832 to->tcpInDataInorderSegs += from->tcpInDataInorderSegs;
833 to->tcpInDataInorderBytes += from->tcpInDataInorderBytes;
834 to->tcpInDataUnorderSegs += from->tcpInDataUnorderSegs;
835 to->tcpInDataUnorderBytes += from->tcpInDataUnorderBytes;
836 to->tcpInDataDupSegs += from->tcpInDataDupSegs;
837 to->tcpInDataDupBytes += from->tcpInDataDupBytes;
838 to->tcpInDataPartDupSegs += from->tcpInDataPartDupSegs;
839 to->tcpInDataPartDupBytes += from->tcpInDataPartDupBytes;
840 to->tcpInDataPastWinSegs += from->tcpInDataPastWinSegs;
841 to->tcpInDataPastWinBytes += from->tcpInDataPastWinBytes;
842 to->tcpInWinProbe += from->tcpInWinProbe;
843 to->tcpInWinUpdate += from->tcpInWinUpdate;
844 to->tcpInClosed += from->tcpInClosed;
845
846 to->tcpRttNoUpdate += from->tcpRttNoUpdate;
847 to->tcpRttUpdate += from->tcpRttUpdate;
848 to->tcpTimRetrans += from->tcpTimRetrans;
849 to->tcpTimRetransDrop += from->tcpTimRetransDrop;
850 to->tcpTimKeepalive += from->tcpTimKeepalive;
851 to->tcpTimKeepaliveProbe += from->tcpTimKeepaliveProbe;
852 to->tcpTimKeepaliveDrop += from->tcpTimKeepaliveDrop;
853 to->tcpListenDrop += from->tcpListenDrop;
854 to->tcpListenDropQ0 += from->tcpListenDropQ0;
855 to->tcpHalfOpenDrop += from->tcpHalfOpenDrop;
856 to->tcpOutSackRetransSegs += from->tcpOutSackRetransSegs;
857 to->tcpHCInSegs += from->tcpHCInSegs;
858 to->tcpHCOutSegs += from->tcpHCOutSegs;
859 }
860
861 /*
862 * To sum up all MIB2 stats for a tcp_stack_t from all per CPU stats. The
863 * caller should initialize the target mib2_tcp_t properly as this function
864 * just adds up all the per CPU stats.
865 */
866 static void
867 tcp_sum_mib(tcp_stack_t *tcps, mib2_tcp_t *tcp_mib)
868 {
869 int i;
870 int cnt;
871
872 /*
873 * tcps_sc_cnt may change in the middle of the loop. It is better
874 * to get its value first.
875 */
876 cnt = tcps->tcps_sc_cnt;
877 for (i = 0; i < cnt; i++)
878 tcp_add_mib(&tcps->tcps_sc[i]->tcp_sc_mib, tcp_mib);
879 }
880
881 /*
882 * To set all tcp_stat_t counters to 0.
883 */
884 static void
885 tcp_clr_stats(tcp_stat_t *stats)
886 {
887 stats->tcp_time_wait_syn_success.value.ui64 = 0;
888 stats->tcp_clean_death_nondetached.value.ui64 = 0;
889 stats->tcp_eager_blowoff_q.value.ui64 = 0;
890 stats->tcp_eager_blowoff_q0.value.ui64 = 0;
891 stats->tcp_no_listener.value.ui64 = 0;
892 stats->tcp_listendrop.value.ui64 = 0;
893 stats->tcp_listendropq0.value.ui64 = 0;
894 stats->tcp_wsrv_called.value.ui64 = 0;
895 stats->tcp_flwctl_on.value.ui64 = 0;
896 stats->tcp_timer_fire_early.value.ui64 = 0;
897 stats->tcp_timer_fire_miss.value.ui64 = 0;
898 stats->tcp_zcopy_on.value.ui64 = 0;
899 stats->tcp_zcopy_off.value.ui64 = 0;
900 stats->tcp_zcopy_backoff.value.ui64 = 0;
901 stats->tcp_fusion_flowctl.value.ui64 = 0;
902 stats->tcp_fusion_backenabled.value.ui64 = 0;
903 stats->tcp_fusion_urg.value.ui64 = 0;
904 stats->tcp_fusion_putnext.value.ui64 = 0;
905 stats->tcp_fusion_unfusable.value.ui64 = 0;
906 stats->tcp_fusion_aborted.value.ui64 = 0;
907 stats->tcp_fusion_unqualified.value.ui64 = 0;
908 stats->tcp_fusion_rrw_busy.value.ui64 = 0;
909 stats->tcp_fusion_rrw_msgcnt.value.ui64 = 0;
910 stats->tcp_fusion_rrw_plugged.value.ui64 = 0;
911 stats->tcp_in_ack_unsent_drop.value.ui64 = 0;
912 stats->tcp_sock_fallback.value.ui64 = 0;
913 stats->tcp_lso_enabled.value.ui64 = 0;
914 stats->tcp_lso_disabled.value.ui64 = 0;
915 stats->tcp_lso_times.value.ui64 = 0;
916 stats->tcp_lso_pkt_out.value.ui64 = 0;
917 stats->tcp_listen_cnt_drop.value.ui64 = 0;
918 stats->tcp_listen_mem_drop.value.ui64 = 0;
919 stats->tcp_zwin_mem_drop.value.ui64 = 0;
920 stats->tcp_zwin_ack_syn.value.ui64 = 0;
921 stats->tcp_rst_unsent.value.ui64 = 0;
922 stats->tcp_reclaim_cnt.value.ui64 = 0;
923 stats->tcp_reass_timeout.value.ui64 = 0;
924
925 #ifdef TCP_DEBUG_COUNTER
926 stats->tcp_time_wait.value.ui64 = 0;
927 stats->tcp_rput_time_wait.value.ui64 = 0;
928 stats->tcp_detach_time_wait.value.ui64 = 0;
929 stats->tcp_timeout_calls.value.ui64 = 0;
930 stats->tcp_timeout_cached_alloc.value.ui64 = 0;
931 stats->tcp_timeout_cancel_reqs.value.ui64 = 0;
932 stats->tcp_timeout_canceled.value.ui64 = 0;
933 stats->tcp_timermp_freed.value.ui64 = 0;
934 stats->tcp_push_timer_cnt.value.ui64 = 0;
935 stats->tcp_ack_timer_cnt.value.ui64 = 0;
936 #endif
937 }
938
939 /*
940 * To add counters from the per CPU tcp_stat_counter_t to the stack
941 * tcp_stat_t.
942 */
943 static void
944 tcp_add_stats(tcp_stat_counter_t *from, tcp_stat_t *to)
945 {
946 to->tcp_time_wait_syn_success.value.ui64 +=
947 from->tcp_time_wait_syn_success;
948 to->tcp_clean_death_nondetached.value.ui64 +=
949 from->tcp_clean_death_nondetached;
950 to->tcp_eager_blowoff_q.value.ui64 +=
951 from->tcp_eager_blowoff_q;
952 to->tcp_eager_blowoff_q0.value.ui64 +=
953 from->tcp_eager_blowoff_q0;
954 to->tcp_no_listener.value.ui64 +=
955 from->tcp_no_listener;
956 to->tcp_listendrop.value.ui64 +=
957 from->tcp_listendrop;
958 to->tcp_listendropq0.value.ui64 +=
959 from->tcp_listendropq0;
960 to->tcp_wsrv_called.value.ui64 +=
961 from->tcp_wsrv_called;
962 to->tcp_flwctl_on.value.ui64 +=
963 from->tcp_flwctl_on;
964 to->tcp_timer_fire_early.value.ui64 +=
965 from->tcp_timer_fire_early;
966 to->tcp_timer_fire_miss.value.ui64 +=
967 from->tcp_timer_fire_miss;
968 to->tcp_zcopy_on.value.ui64 +=
969 from->tcp_zcopy_on;
970 to->tcp_zcopy_off.value.ui64 +=
971 from->tcp_zcopy_off;
972 to->tcp_zcopy_backoff.value.ui64 +=
973 from->tcp_zcopy_backoff;
974 to->tcp_fusion_flowctl.value.ui64 +=
975 from->tcp_fusion_flowctl;
976 to->tcp_fusion_backenabled.value.ui64 +=
977 from->tcp_fusion_backenabled;
978 to->tcp_fusion_urg.value.ui64 +=
979 from->tcp_fusion_urg;
980 to->tcp_fusion_putnext.value.ui64 +=
981 from->tcp_fusion_putnext;
982 to->tcp_fusion_unfusable.value.ui64 +=
983 from->tcp_fusion_unfusable;
984 to->tcp_fusion_aborted.value.ui64 +=
985 from->tcp_fusion_aborted;
986 to->tcp_fusion_unqualified.value.ui64 +=
987 from->tcp_fusion_unqualified;
988 to->tcp_fusion_rrw_busy.value.ui64 +=
989 from->tcp_fusion_rrw_busy;
990 to->tcp_fusion_rrw_msgcnt.value.ui64 +=
991 from->tcp_fusion_rrw_msgcnt;
992 to->tcp_fusion_rrw_plugged.value.ui64 +=
993 from->tcp_fusion_rrw_plugged;
994 to->tcp_in_ack_unsent_drop.value.ui64 +=
995 from->tcp_in_ack_unsent_drop;
996 to->tcp_sock_fallback.value.ui64 +=
997 from->tcp_sock_fallback;
998 to->tcp_lso_enabled.value.ui64 +=
999 from->tcp_lso_enabled;
1000 to->tcp_lso_disabled.value.ui64 +=
1001 from->tcp_lso_disabled;
1002 to->tcp_lso_times.value.ui64 +=
1003 from->tcp_lso_times;
1004 to->tcp_lso_pkt_out.value.ui64 +=
1005 from->tcp_lso_pkt_out;
1006 to->tcp_listen_cnt_drop.value.ui64 +=
1007 from->tcp_listen_cnt_drop;
1008 to->tcp_listen_mem_drop.value.ui64 +=
1009 from->tcp_listen_mem_drop;
1010 to->tcp_zwin_mem_drop.value.ui64 +=
1011 from->tcp_zwin_mem_drop;
1012 to->tcp_zwin_ack_syn.value.ui64 +=
1013 from->tcp_zwin_ack_syn;
1014 to->tcp_rst_unsent.value.ui64 +=
1015 from->tcp_rst_unsent;
1016 to->tcp_reclaim_cnt.value.ui64 +=
1017 from->tcp_reclaim_cnt;
1018 to->tcp_reass_timeout.value.ui64 +=
1019 from->tcp_reass_timeout;
1020
1021 #ifdef TCP_DEBUG_COUNTER
1022 to->tcp_time_wait.value.ui64 +=
1023 from->tcp_time_wait;
1024 to->tcp_rput_time_wait.value.ui64 +=
1025 from->tcp_rput_time_wait;
1026 to->tcp_detach_time_wait.value.ui64 +=
1027 from->tcp_detach_time_wait;
1028 to->tcp_timeout_calls.value.ui64 +=
1029 from->tcp_timeout_calls;
1030 to->tcp_timeout_cached_alloc.value.ui64 +=
1031 from->tcp_timeout_cached_alloc;
1032 to->tcp_timeout_cancel_reqs.value.ui64 +=
1033 from->tcp_timeout_cancel_reqs;
1034 to->tcp_timeout_canceled.value.ui64 +=
1035 from->tcp_timeout_canceled;
1036 to->tcp_timermp_freed.value.ui64 +=
1037 from->tcp_timermp_freed;
1038 to->tcp_push_timer_cnt.value.ui64 +=
1039 from->tcp_push_timer_cnt;
1040 to->tcp_ack_timer_cnt.value.ui64 +=
1041 from->tcp_ack_timer_cnt;
1042 #endif
1043 }