Print this page
11547 Want connstat(1M) command to display per-connection TCP statistics
Portions contributed by: Cody Peter Mello <cody.mello@joyent.com>
Portions contributed by: Ahmed G <ahmedg@delphix.com>
Reviewed by: Jason King <jason.king@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/inet/tcp/tcp_stats.c
+++ new/usr/src/uts/common/inet/tcp/tcp_stats.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2011, Joyent Inc. All rights reserved.
25 + * Copyright (c) 2015, 2016 by Delphix. All rights reserved.
25 26 */
26 27
27 28 #include <sys/types.h>
28 29 #include <sys/tihdr.h>
29 30 #include <sys/policy.h>
30 31 #include <sys/tsol/tnet.h>
31 32 #include <sys/kstat.h>
32 33
33 34 #include <inet/common.h>
34 35 #include <inet/ip.h>
35 36 #include <inet/tcp.h>
36 37 #include <inet/tcp_impl.h>
37 38 #include <inet/tcp_stats.h>
38 39 #include <inet/kstatcom.h>
39 40 #include <inet/snmpcom.h>
40 41
41 42 static int tcp_kstat_update(kstat_t *, int);
42 43 static int tcp_kstat2_update(kstat_t *, int);
43 44 static void tcp_sum_mib(tcp_stack_t *, mib2_tcp_t *);
44 45
45 46 static void tcp_add_mib(mib2_tcp_t *, mib2_tcp_t *);
46 47 static void tcp_add_stats(tcp_stat_counter_t *, tcp_stat_t *);
47 48 static void tcp_clr_stats(tcp_stat_t *);
48 49
49 50 tcp_g_stat_t tcp_g_statistics;
50 51 kstat_t *tcp_g_kstat;
51 52
52 53 /* Translate TCP state to MIB2 TCP state. */
53 54 static int
54 55 tcp_snmp_state(tcp_t *tcp)
55 56 {
56 57 if (tcp == NULL)
57 58 return (0);
58 59
59 60 switch (tcp->tcp_state) {
60 61 case TCPS_CLOSED:
61 62 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */
62 63 case TCPS_BOUND:
63 64 return (MIB2_TCP_closed);
64 65 case TCPS_LISTEN:
65 66 return (MIB2_TCP_listen);
66 67 case TCPS_SYN_SENT:
67 68 return (MIB2_TCP_synSent);
68 69 case TCPS_SYN_RCVD:
69 70 return (MIB2_TCP_synReceived);
70 71 case TCPS_ESTABLISHED:
71 72 return (MIB2_TCP_established);
72 73 case TCPS_CLOSE_WAIT:
73 74 return (MIB2_TCP_closeWait);
74 75 case TCPS_FIN_WAIT_1:
75 76 return (MIB2_TCP_finWait1);
76 77 case TCPS_CLOSING:
77 78 return (MIB2_TCP_closing);
78 79 case TCPS_LAST_ACK:
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
79 80 return (MIB2_TCP_lastAck);
80 81 case TCPS_FIN_WAIT_2:
81 82 return (MIB2_TCP_finWait2);
82 83 case TCPS_TIME_WAIT:
83 84 return (MIB2_TCP_timeWait);
84 85 default:
85 86 return (0);
86 87 }
87 88 }
88 89
90 +static void
91 +tcp_set_conninfo(tcp_t *tcp, struct tcpConnEntryInfo_s *tcei, boolean_t ispriv)
92 +{
93 + /* Don't want just anybody seeing these... */
94 + if (ispriv) {
95 + tcei->ce_snxt = tcp->tcp_snxt;
96 + tcei->ce_suna = tcp->tcp_suna;
97 + tcei->ce_rnxt = tcp->tcp_rnxt;
98 + tcei->ce_rack = tcp->tcp_rack;
99 + } else {
100 + /*
101 + * Netstat, unfortunately, uses this to get send/receive queue
102 + * sizes. How to fix? Why not compute the difference only?
103 + */
104 + tcei->ce_snxt = tcp->tcp_snxt - tcp->tcp_suna;
105 + tcei->ce_suna = 0;
106 + tcei->ce_rnxt = tcp->tcp_rnxt - tcp->tcp_rack;
107 + tcei->ce_rack = 0;
108 + }
109 +
110 + tcei->ce_in_data_inorder_bytes = tcp->tcp_cs.tcp_in_data_inorder_bytes;
111 + tcei->ce_in_data_inorder_segs = tcp->tcp_cs.tcp_in_data_inorder_segs;
112 + tcei->ce_in_data_unorder_bytes = tcp->tcp_cs.tcp_in_data_unorder_bytes;
113 + tcei->ce_in_data_unorder_segs = tcp->tcp_cs.tcp_in_data_unorder_segs;
114 + tcei->ce_in_zwnd_probes = tcp->tcp_cs.tcp_in_zwnd_probes;
115 +
116 + tcei->ce_out_data_bytes = tcp->tcp_cs.tcp_out_data_bytes;
117 + tcei->ce_out_data_segs = tcp->tcp_cs.tcp_out_data_segs;
118 + tcei->ce_out_retrans_bytes = tcp->tcp_cs.tcp_out_retrans_bytes;
119 + tcei->ce_out_retrans_segs = tcp->tcp_cs.tcp_out_retrans_segs;
120 + tcei->ce_out_zwnd_probes = tcp->tcp_cs.tcp_out_zwnd_probes;
121 +
122 + tcei->ce_unsent = tcp->tcp_unsent;
123 + tcei->ce_swnd = tcp->tcp_swnd;
124 + tcei->ce_cwnd = tcp->tcp_cwnd;
125 + tcei->ce_rwnd = tcp->tcp_rwnd;
126 + tcei->ce_rto = tcp->tcp_rto;
127 + tcei->ce_mss = tcp->tcp_mss;
128 + tcei->ce_state = tcp->tcp_state;
129 + tcei->ce_rtt_sa = NSEC2USEC(tcp->tcp_rtt_sa >> 3);
130 + tcei->ce_rtt_sum = NSEC2USEC(tcp->tcp_rtt_sum);
131 + tcei->ce_rtt_cnt = tcp->tcp_rtt_cnt;
132 +}
133 +
89 134 /*
90 135 * Return SNMP stuff in buffer in mpdata.
91 136 */
92 137 mblk_t *
93 138 tcp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req)
94 139 {
95 140 mblk_t *mpdata;
96 141 mblk_t *mp_conn_ctl = NULL;
97 142 mblk_t *mp_conn_tail;
98 143 mblk_t *mp_attr_ctl = NULL;
99 144 mblk_t *mp_attr_tail;
100 145 mblk_t *mp6_conn_ctl = NULL;
101 146 mblk_t *mp6_conn_tail;
102 147 mblk_t *mp6_attr_ctl = NULL;
103 148 mblk_t *mp6_attr_tail;
104 149 struct opthdr *optp;
105 150 mib2_tcpConnEntry_t tce;
106 151 mib2_tcp6ConnEntry_t tce6;
107 152 mib2_transportMLPEntry_t mlp;
108 153 connf_t *connfp;
109 154 int i;
110 155 boolean_t ispriv;
111 156 zoneid_t zoneid;
112 157 int v4_conn_idx;
113 158 int v6_conn_idx;
114 159 conn_t *connp = Q_TO_CONN(q);
115 160 tcp_stack_t *tcps;
116 161 ip_stack_t *ipst;
117 162 mblk_t *mp2ctl;
118 163 mib2_tcp_t tcp_mib;
119 164 size_t tcp_mib_size, tce_size, tce6_size;
120 165
121 166 /*
122 167 * make a copy of the original message
123 168 */
124 169 mp2ctl = copymsg(mpctl);
125 170
126 171 if (mpctl == NULL ||
127 172 (mpdata = mpctl->b_cont) == NULL ||
128 173 (mp_conn_ctl = copymsg(mpctl)) == NULL ||
129 174 (mp_attr_ctl = copymsg(mpctl)) == NULL ||
130 175 (mp6_conn_ctl = copymsg(mpctl)) == NULL ||
131 176 (mp6_attr_ctl = copymsg(mpctl)) == NULL) {
132 177 freemsg(mp_conn_ctl);
133 178 freemsg(mp_attr_ctl);
134 179 freemsg(mp6_conn_ctl);
135 180 freemsg(mp6_attr_ctl);
136 181 freemsg(mpctl);
137 182 freemsg(mp2ctl);
138 183 return (NULL);
139 184 }
140 185
141 186 ipst = connp->conn_netstack->netstack_ip;
142 187 tcps = connp->conn_netstack->netstack_tcp;
143 188
144 189 if (legacy_req) {
145 190 tcp_mib_size = LEGACY_MIB_SIZE(&tcp_mib, mib2_tcp_t);
146 191 tce_size = LEGACY_MIB_SIZE(&tce, mib2_tcpConnEntry_t);
147 192 tce6_size = LEGACY_MIB_SIZE(&tce6, mib2_tcp6ConnEntry_t);
148 193 } else {
149 194 tcp_mib_size = sizeof (mib2_tcp_t);
150 195 tce_size = sizeof (mib2_tcpConnEntry_t);
151 196 tce6_size = sizeof (mib2_tcp6ConnEntry_t);
152 197 }
153 198
154 199 bzero(&tcp_mib, sizeof (tcp_mib));
155 200
156 201 /* build table of connections -- need count in fixed part */
157 202 SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */
158 203 SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min);
159 204 SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max);
160 205 SET_MIB(tcp_mib.tcpMaxConn, -1);
161 206 SET_MIB(tcp_mib.tcpCurrEstab, 0);
162 207
163 208 ispriv =
164 209 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0;
165 210 zoneid = Q_TO_CONN(q)->conn_zoneid;
166 211
167 212 v4_conn_idx = v6_conn_idx = 0;
168 213 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL;
169 214
170 215 for (i = 0; i < CONN_G_HASH_SIZE; i++) {
171 216 ipst = tcps->tcps_netstack->netstack_ip;
172 217
173 218 connfp = &ipst->ips_ipcl_globalhash_fanout[i];
174 219
175 220 connp = NULL;
↓ open down ↓ |
77 lines elided |
↑ open up ↑ |
176 221
177 222 while ((connp =
178 223 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
179 224 tcp_t *tcp;
180 225 boolean_t needattr;
181 226
182 227 if (connp->conn_zoneid != zoneid)
183 228 continue; /* not in this zone */
184 229
185 230 tcp = connp->conn_tcp;
186 - TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
187 - tcp->tcp_ibsegs = 0;
188 - TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
189 - tcp->tcp_obsegs = 0;
190 -
191 231 tce6.tcp6ConnState = tce.tcpConnState =
192 232 tcp_snmp_state(tcp);
193 233 if (tce.tcpConnState == MIB2_TCP_established ||
194 234 tce.tcpConnState == MIB2_TCP_closeWait)
195 235 BUMP_MIB(&tcp_mib, tcpCurrEstab);
196 236
197 237 needattr = B_FALSE;
198 238 bzero(&mlp, sizeof (mlp));
199 239 if (connp->conn_mlp_type != mlptSingle) {
200 240 if (connp->conn_mlp_type == mlptShared ||
201 241 connp->conn_mlp_type == mlptBoth)
202 242 mlp.tme_flags |= MIB2_TMEF_SHARED;
203 243 if (connp->conn_mlp_type == mlptPrivate ||
204 244 connp->conn_mlp_type == mlptBoth)
205 245 mlp.tme_flags |= MIB2_TMEF_PRIVATE;
206 246 needattr = B_TRUE;
207 247 }
208 248 if (connp->conn_anon_mlp) {
209 249 mlp.tme_flags |= MIB2_TMEF_ANONMLP;
210 250 needattr = B_TRUE;
211 251 }
212 252 switch (connp->conn_mac_mode) {
213 253 case CONN_MAC_DEFAULT:
214 254 break;
215 255 case CONN_MAC_AWARE:
216 256 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT;
217 257 needattr = B_TRUE;
218 258 break;
219 259 case CONN_MAC_IMPLICIT:
220 260 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT;
221 261 needattr = B_TRUE;
222 262 break;
223 263 }
224 264 if (connp->conn_ixa->ixa_tsl != NULL) {
225 265 ts_label_t *tsl;
226 266
227 267 tsl = connp->conn_ixa->ixa_tsl;
228 268 mlp.tme_flags |= MIB2_TMEF_IS_LABELED;
229 269 mlp.tme_doi = label2doi(tsl);
230 270 mlp.tme_label = *label2bslabel(tsl);
231 271 needattr = B_TRUE;
232 272 }
233 273
234 274 /* Create a message to report on IPv6 entries */
235 275 if (connp->conn_ipversion == IPV6_VERSION) {
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
236 276 tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6;
237 277 tce6.tcp6ConnRemAddress = connp->conn_faddr_v6;
238 278 tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport);
239 279 tce6.tcp6ConnRemPort = ntohs(connp->conn_fport);
240 280 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) {
241 281 tce6.tcp6ConnIfIndex =
242 282 connp->conn_ixa->ixa_scopeid;
243 283 } else {
244 284 tce6.tcp6ConnIfIndex = connp->conn_bound_if;
245 285 }
246 - /* Don't want just anybody seeing these... */
247 - if (ispriv) {
248 - tce6.tcp6ConnEntryInfo.ce_snxt =
249 - tcp->tcp_snxt;
250 - tce6.tcp6ConnEntryInfo.ce_suna =
251 - tcp->tcp_suna;
252 - tce6.tcp6ConnEntryInfo.ce_rnxt =
253 - tcp->tcp_rnxt;
254 - tce6.tcp6ConnEntryInfo.ce_rack =
255 - tcp->tcp_rack;
256 - } else {
257 - /*
258 - * Netstat, unfortunately, uses this to
259 - * get send/receive queue sizes. How to fix?
260 - * Why not compute the difference only?
261 - */
262 - tce6.tcp6ConnEntryInfo.ce_snxt =
263 - tcp->tcp_snxt - tcp->tcp_suna;
264 - tce6.tcp6ConnEntryInfo.ce_suna = 0;
265 - tce6.tcp6ConnEntryInfo.ce_rnxt =
266 - tcp->tcp_rnxt - tcp->tcp_rack;
267 - tce6.tcp6ConnEntryInfo.ce_rack = 0;
268 - }
269 286
270 - tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd;
271 - tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
272 - tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto;
273 - tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss;
274 - tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state;
287 + tcp_set_conninfo(tcp, &tce6.tcp6ConnEntryInfo,
288 + ispriv);
275 289
276 290 tce6.tcp6ConnCreationProcess =
277 291 (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS :
278 292 connp->conn_cpid;
279 293 tce6.tcp6ConnCreationTime = connp->conn_open_time;
280 294
281 295 (void) snmp_append_data2(mp6_conn_ctl->b_cont,
282 296 &mp6_conn_tail, (char *)&tce6, tce6_size);
283 297
284 298 mlp.tme_connidx = v6_conn_idx++;
285 299 if (needattr)
286 300 (void) snmp_append_data2(mp6_attr_ctl->b_cont,
287 301 &mp6_attr_tail, (char *)&mlp, sizeof (mlp));
288 302 }
289 303 /*
290 304 * Create an IPv4 table entry for IPv4 entries and also
291 305 * for IPv6 entries which are bound to in6addr_any
292 306 * but don't have IPV6_V6ONLY set.
293 307 * (i.e. anything an IPv4 peer could connect to)
294 308 */
295 309 if (connp->conn_ipversion == IPV4_VERSION ||
296 310 (tcp->tcp_state <= TCPS_LISTEN &&
297 311 !connp->conn_ipv6_v6only &&
298 312 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) {
299 313 if (connp->conn_ipversion == IPV6_VERSION) {
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
300 314 tce.tcpConnRemAddress = INADDR_ANY;
301 315 tce.tcpConnLocalAddress = INADDR_ANY;
302 316 } else {
303 317 tce.tcpConnRemAddress =
304 318 connp->conn_faddr_v4;
305 319 tce.tcpConnLocalAddress =
306 320 connp->conn_laddr_v4;
307 321 }
308 322 tce.tcpConnLocalPort = ntohs(connp->conn_lport);
309 323 tce.tcpConnRemPort = ntohs(connp->conn_fport);
310 - /* Don't want just anybody seeing these... */
311 - if (ispriv) {
312 - tce.tcpConnEntryInfo.ce_snxt =
313 - tcp->tcp_snxt;
314 - tce.tcpConnEntryInfo.ce_suna =
315 - tcp->tcp_suna;
316 - tce.tcpConnEntryInfo.ce_rnxt =
317 - tcp->tcp_rnxt;
318 - tce.tcpConnEntryInfo.ce_rack =
319 - tcp->tcp_rack;
320 - } else {
321 - /*
322 - * Netstat, unfortunately, uses this to
323 - * get send/receive queue sizes. How
324 - * to fix?
325 - * Why not compute the difference only?
326 - */
327 - tce.tcpConnEntryInfo.ce_snxt =
328 - tcp->tcp_snxt - tcp->tcp_suna;
329 - tce.tcpConnEntryInfo.ce_suna = 0;
330 - tce.tcpConnEntryInfo.ce_rnxt =
331 - tcp->tcp_rnxt - tcp->tcp_rack;
332 - tce.tcpConnEntryInfo.ce_rack = 0;
333 - }
334 324
335 - tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd;
336 - tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
337 - tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto;
338 - tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss;
339 - tce.tcpConnEntryInfo.ce_state =
340 - tcp->tcp_state;
325 + tcp_set_conninfo(tcp, &tce.tcpConnEntryInfo,
326 + ispriv);
341 327
342 328 tce.tcpConnCreationProcess =
343 329 (connp->conn_cpid < 0) ?
344 330 MIB2_UNKNOWN_PROCESS :
345 331 connp->conn_cpid;
346 332 tce.tcpConnCreationTime = connp->conn_open_time;
347 333
348 334 (void) snmp_append_data2(mp_conn_ctl->b_cont,
349 335 &mp_conn_tail, (char *)&tce, tce_size);
350 336
351 337 mlp.tme_connidx = v4_conn_idx++;
352 338 if (needattr)
353 339 (void) snmp_append_data2(
354 340 mp_attr_ctl->b_cont,
355 341 &mp_attr_tail, (char *)&mlp,
356 342 sizeof (mlp));
357 343 }
358 344 }
359 345 }
360 346
361 347 tcp_sum_mib(tcps, &tcp_mib);
362 348
363 349 /* Fixed length structure for IPv4 and IPv6 counters */
364 350 SET_MIB(tcp_mib.tcpConnTableSize, tce_size);
365 351 SET_MIB(tcp_mib.tcp6ConnTableSize, tce6_size);
366 352
367 353 /*
368 354 * Synchronize 32- and 64-bit counters. Note that tcpInSegs and
369 355 * tcpOutSegs are not updated anywhere in TCP. The new 64 bits
370 356 * counters are used. Hence the old counters' values in tcp_sc_mib
371 357 * are always 0.
372 358 */
373 359 SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs);
374 360 SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs);
375 361
376 362 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
377 363 optp->level = MIB2_TCP;
378 364 optp->name = 0;
379 365 (void) snmp_append_data(mpdata, (char *)&tcp_mib, tcp_mib_size);
380 366 optp->len = msgdsize(mpdata);
381 367 qreply(q, mpctl);
382 368
383 369 /* table of connections... */
384 370 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[
385 371 sizeof (struct T_optmgmt_ack)];
386 372 optp->level = MIB2_TCP;
387 373 optp->name = MIB2_TCP_CONN;
388 374 optp->len = msgdsize(mp_conn_ctl->b_cont);
389 375 qreply(q, mp_conn_ctl);
390 376
391 377 /* table of MLP attributes... */
392 378 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[
393 379 sizeof (struct T_optmgmt_ack)];
394 380 optp->level = MIB2_TCP;
395 381 optp->name = EXPER_XPORT_MLP;
396 382 optp->len = msgdsize(mp_attr_ctl->b_cont);
397 383 if (optp->len == 0)
398 384 freemsg(mp_attr_ctl);
399 385 else
400 386 qreply(q, mp_attr_ctl);
401 387
402 388 /* table of IPv6 connections... */
403 389 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[
404 390 sizeof (struct T_optmgmt_ack)];
405 391 optp->level = MIB2_TCP6;
406 392 optp->name = MIB2_TCP6_CONN;
407 393 optp->len = msgdsize(mp6_conn_ctl->b_cont);
408 394 qreply(q, mp6_conn_ctl);
409 395
410 396 /* table of IPv6 MLP attributes... */
411 397 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[
412 398 sizeof (struct T_optmgmt_ack)];
413 399 optp->level = MIB2_TCP6;
414 400 optp->name = EXPER_XPORT_MLP;
415 401 optp->len = msgdsize(mp6_attr_ctl->b_cont);
416 402 if (optp->len == 0)
417 403 freemsg(mp6_attr_ctl);
418 404 else
419 405 qreply(q, mp6_attr_ctl);
420 406 return (mp2ctl);
421 407 }
422 408
423 409 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */
424 410 /* ARGSUSED */
425 411 int
426 412 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
427 413 {
428 414 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr;
429 415
430 416 switch (level) {
431 417 case MIB2_TCP:
432 418 switch (name) {
433 419 case 13:
434 420 if (tce->tcpConnState != MIB2_TCP_deleteTCB)
435 421 return (0);
436 422 /* TODO: delete entry defined by tce */
437 423 return (1);
438 424 default:
439 425 return (0);
440 426 }
441 427 default:
442 428 return (1);
443 429 }
444 430 }
445 431
446 432 /*
447 433 * TCP Kstats implementation
448 434 */
449 435 void *
450 436 tcp_kstat_init(netstackid_t stackid)
451 437 {
452 438 kstat_t *ksp;
453 439
454 440 tcp_named_kstat_t template = {
455 441 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 },
456 442 { "rtoMin", KSTAT_DATA_INT32, 0 },
457 443 { "rtoMax", KSTAT_DATA_INT32, 0 },
458 444 { "maxConn", KSTAT_DATA_INT32, 0 },
459 445 { "activeOpens", KSTAT_DATA_UINT32, 0 },
460 446 { "passiveOpens", KSTAT_DATA_UINT32, 0 },
461 447 { "attemptFails", KSTAT_DATA_UINT32, 0 },
462 448 { "estabResets", KSTAT_DATA_UINT32, 0 },
463 449 { "currEstab", KSTAT_DATA_UINT32, 0 },
464 450 { "inSegs", KSTAT_DATA_UINT64, 0 },
465 451 { "outSegs", KSTAT_DATA_UINT64, 0 },
466 452 { "retransSegs", KSTAT_DATA_UINT32, 0 },
467 453 { "connTableSize", KSTAT_DATA_INT32, 0 },
468 454 { "outRsts", KSTAT_DATA_UINT32, 0 },
469 455 { "outDataSegs", KSTAT_DATA_UINT32, 0 },
470 456 { "outDataBytes", KSTAT_DATA_UINT32, 0 },
471 457 { "retransBytes", KSTAT_DATA_UINT32, 0 },
472 458 { "outAck", KSTAT_DATA_UINT32, 0 },
473 459 { "outAckDelayed", KSTAT_DATA_UINT32, 0 },
474 460 { "outUrg", KSTAT_DATA_UINT32, 0 },
475 461 { "outWinUpdate", KSTAT_DATA_UINT32, 0 },
476 462 { "outWinProbe", KSTAT_DATA_UINT32, 0 },
477 463 { "outControl", KSTAT_DATA_UINT32, 0 },
478 464 { "outFastRetrans", KSTAT_DATA_UINT32, 0 },
479 465 { "inAckSegs", KSTAT_DATA_UINT32, 0 },
480 466 { "inAckBytes", KSTAT_DATA_UINT32, 0 },
481 467 { "inDupAck", KSTAT_DATA_UINT32, 0 },
482 468 { "inAckUnsent", KSTAT_DATA_UINT32, 0 },
483 469 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 },
484 470 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 },
485 471 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 },
486 472 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 },
487 473 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 },
488 474 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 },
489 475 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 },
490 476 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 },
491 477 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 },
492 478 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 },
493 479 { "inWinProbe", KSTAT_DATA_UINT32, 0 },
494 480 { "inWinUpdate", KSTAT_DATA_UINT32, 0 },
495 481 { "inClosed", KSTAT_DATA_UINT32, 0 },
496 482 { "rttUpdate", KSTAT_DATA_UINT32, 0 },
497 483 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 },
498 484 { "timRetrans", KSTAT_DATA_UINT32, 0 },
499 485 { "timRetransDrop", KSTAT_DATA_UINT32, 0 },
500 486 { "timKeepalive", KSTAT_DATA_UINT32, 0 },
501 487 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 },
502 488 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 },
503 489 { "listenDrop", KSTAT_DATA_UINT32, 0 },
504 490 { "listenDropQ0", KSTAT_DATA_UINT32, 0 },
505 491 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 },
506 492 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 },
507 493 { "connTableSize6", KSTAT_DATA_INT32, 0 }
508 494 };
509 495
510 496 ksp = kstat_create_netstack(TCP_MOD_NAME, stackid, TCP_MOD_NAME, "mib2",
511 497 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid);
512 498
513 499 if (ksp == NULL)
514 500 return (NULL);
515 501
516 502 template.rtoAlgorithm.value.ui32 = 4;
517 503 template.maxConn.value.i32 = -1;
518 504
519 505 bcopy(&template, ksp->ks_data, sizeof (template));
520 506 ksp->ks_update = tcp_kstat_update;
521 507 ksp->ks_private = (void *)(uintptr_t)stackid;
522 508
523 509 /*
524 510 * If this is an exclusive netstack for a local zone, the global zone
525 511 * should still be able to read the kstat.
526 512 */
527 513 if (stackid != GLOBAL_NETSTACKID)
528 514 kstat_zone_add(ksp, GLOBAL_ZONEID);
529 515
530 516 kstat_install(ksp);
531 517 return (ksp);
532 518 }
533 519
534 520 void
535 521 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
536 522 {
537 523 if (ksp != NULL) {
538 524 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
539 525 kstat_delete_netstack(ksp, stackid);
540 526 }
541 527 }
542 528
543 529 static int
544 530 tcp_kstat_update(kstat_t *kp, int rw)
545 531 {
546 532 tcp_named_kstat_t *tcpkp;
547 533 tcp_t *tcp;
548 534 connf_t *connfp;
549 535 conn_t *connp;
550 536 int i;
551 537 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private;
552 538 netstack_t *ns;
553 539 tcp_stack_t *tcps;
554 540 ip_stack_t *ipst;
555 541 mib2_tcp_t tcp_mib;
556 542
557 543 if (rw == KSTAT_WRITE)
558 544 return (EACCES);
559 545
560 546 ns = netstack_find_by_stackid(stackid);
561 547 if (ns == NULL)
562 548 return (-1);
563 549 tcps = ns->netstack_tcp;
564 550 if (tcps == NULL) {
565 551 netstack_rele(ns);
566 552 return (-1);
567 553 }
568 554
569 555 tcpkp = (tcp_named_kstat_t *)kp->ks_data;
570 556
571 557 tcpkp->currEstab.value.ui32 = 0;
572 558 tcpkp->rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min;
573 559 tcpkp->rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max;
574 560
575 561 ipst = ns->netstack_ip;
576 562
577 563 for (i = 0; i < CONN_G_HASH_SIZE; i++) {
578 564 connfp = &ipst->ips_ipcl_globalhash_fanout[i];
579 565 connp = NULL;
580 566 while ((connp =
581 567 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
582 568 tcp = connp->conn_tcp;
583 569 switch (tcp_snmp_state(tcp)) {
584 570 case MIB2_TCP_established:
585 571 case MIB2_TCP_closeWait:
586 572 tcpkp->currEstab.value.ui32++;
587 573 break;
588 574 }
589 575 }
590 576 }
591 577 bzero(&tcp_mib, sizeof (tcp_mib));
592 578 tcp_sum_mib(tcps, &tcp_mib);
593 579
594 580 /* Fixed length structure for IPv4 and IPv6 counters */
595 581 SET_MIB(tcp_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t));
596 582 SET_MIB(tcp_mib.tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t));
597 583
598 584 tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens;
599 585 tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens;
600 586 tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails;
601 587 tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets;
602 588 tcpkp->inSegs.value.ui64 = tcp_mib.tcpHCInSegs;
603 589 tcpkp->outSegs.value.ui64 = tcp_mib.tcpHCOutSegs;
604 590 tcpkp->retransSegs.value.ui32 = tcp_mib.tcpRetransSegs;
605 591 tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize;
606 592 tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts;
607 593 tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs;
608 594 tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes;
609 595 tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes;
610 596 tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck;
611 597 tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed;
612 598 tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg;
613 599 tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate;
614 600 tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe;
615 601 tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl;
616 602 tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans;
617 603 tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs;
618 604 tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes;
619 605 tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck;
620 606 tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent;
621 607 tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs;
622 608 tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes;
623 609 tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs;
624 610 tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes;
625 611 tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs;
626 612 tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes;
627 613 tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs;
628 614 tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes;
629 615 tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs;
630 616 tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes;
631 617 tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe;
632 618 tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate;
633 619 tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed;
634 620 tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate;
635 621 tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate;
636 622 tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans;
637 623 tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop;
638 624 tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive;
639 625 tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe;
640 626 tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop;
641 627 tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop;
642 628 tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0;
643 629 tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop;
644 630 tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs;
645 631 tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize;
646 632
647 633 netstack_rele(ns);
648 634 return (0);
649 635 }
650 636
651 637 /*
652 638 * kstats related to squeues i.e. not per IP instance
653 639 */
654 640 void *
655 641 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp)
656 642 {
657 643 kstat_t *ksp;
658 644
659 645 tcp_g_stat_t template = {
660 646 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 },
661 647 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 },
662 648 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 },
663 649 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 },
664 650 };
665 651
666 652 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net",
667 653 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
668 654 KSTAT_FLAG_VIRTUAL);
669 655
670 656 if (ksp == NULL)
671 657 return (NULL);
672 658
673 659 bcopy(&template, tcp_g_statp, sizeof (template));
674 660 ksp->ks_data = (void *)tcp_g_statp;
675 661
676 662 kstat_install(ksp);
677 663 return (ksp);
678 664 }
679 665
680 666 void
681 667 tcp_g_kstat_fini(kstat_t *ksp)
682 668 {
683 669 if (ksp != NULL) {
684 670 kstat_delete(ksp);
685 671 }
686 672 }
687 673
688 674 void *
689 675 tcp_kstat2_init(netstackid_t stackid)
690 676 {
691 677 kstat_t *ksp;
692 678
693 679 tcp_stat_t template = {
694 680 { "tcp_time_wait_syn_success", KSTAT_DATA_UINT64, 0 },
695 681 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64, 0 },
696 682 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64, 0 },
697 683 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64, 0 },
698 684 { "tcp_no_listener", KSTAT_DATA_UINT64, 0 },
699 685 { "tcp_listendrop", KSTAT_DATA_UINT64, 0 },
700 686 { "tcp_listendropq0", KSTAT_DATA_UINT64, 0 },
701 687 { "tcp_wsrv_called", KSTAT_DATA_UINT64, 0 },
702 688 { "tcp_flwctl_on", KSTAT_DATA_UINT64, 0 },
703 689 { "tcp_timer_fire_early", KSTAT_DATA_UINT64, 0 },
704 690 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64, 0 },
705 691 { "tcp_zcopy_on", KSTAT_DATA_UINT64, 0 },
706 692 { "tcp_zcopy_off", KSTAT_DATA_UINT64, 0 },
707 693 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64, 0 },
708 694 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64, 0 },
709 695 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64, 0 },
710 696 { "tcp_fusion_urg", KSTAT_DATA_UINT64, 0 },
711 697 { "tcp_fusion_putnext", KSTAT_DATA_UINT64, 0 },
712 698 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64, 0 },
713 699 { "tcp_fusion_aborted", KSTAT_DATA_UINT64, 0 },
714 700 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64, 0 },
715 701 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64, 0 },
716 702 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64, 0 },
717 703 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64, 0 },
718 704 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64, 0 },
719 705 { "tcp_sock_fallback", KSTAT_DATA_UINT64, 0 },
720 706 { "tcp_lso_enabled", KSTAT_DATA_UINT64, 0 },
721 707 { "tcp_lso_disabled", KSTAT_DATA_UINT64, 0 },
722 708 { "tcp_lso_times", KSTAT_DATA_UINT64, 0 },
723 709 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64, 0 },
724 710 { "tcp_listen_cnt_drop", KSTAT_DATA_UINT64, 0 },
725 711 { "tcp_listen_mem_drop", KSTAT_DATA_UINT64, 0 },
726 712 { "tcp_zwin_mem_drop", KSTAT_DATA_UINT64, 0 },
727 713 { "tcp_zwin_ack_syn", KSTAT_DATA_UINT64, 0 },
728 714 { "tcp_rst_unsent", KSTAT_DATA_UINT64, 0 },
729 715 { "tcp_reclaim_cnt", KSTAT_DATA_UINT64, 0 },
730 716 { "tcp_reass_timeout", KSTAT_DATA_UINT64, 0 },
731 717 #ifdef TCP_DEBUG_COUNTER
732 718 { "tcp_time_wait", KSTAT_DATA_UINT64, 0 },
733 719 { "tcp_rput_time_wait", KSTAT_DATA_UINT64, 0 },
734 720 { "tcp_detach_time_wait", KSTAT_DATA_UINT64, 0 },
735 721 { "tcp_timeout_calls", KSTAT_DATA_UINT64, 0 },
736 722 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64, 0 },
737 723 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64, 0 },
738 724 { "tcp_timeout_canceled", KSTAT_DATA_UINT64, 0 },
739 725 { "tcp_timermp_freed", KSTAT_DATA_UINT64, 0 },
740 726 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64, 0 },
741 727 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64, 0 },
742 728 #endif
743 729 };
744 730
745 731 ksp = kstat_create_netstack(TCP_MOD_NAME, stackid, "tcpstat", "net",
746 732 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 0,
747 733 stackid);
748 734
749 735 if (ksp == NULL)
750 736 return (NULL);
751 737
752 738 bcopy(&template, ksp->ks_data, sizeof (template));
753 739 ksp->ks_private = (void *)(uintptr_t)stackid;
754 740 ksp->ks_update = tcp_kstat2_update;
755 741
756 742 /*
757 743 * If this is an exclusive netstack for a local zone, the global zone
758 744 * should still be able to read the kstat.
759 745 */
760 746 if (stackid != GLOBAL_NETSTACKID)
761 747 kstat_zone_add(ksp, GLOBAL_ZONEID);
762 748
763 749 kstat_install(ksp);
764 750 return (ksp);
765 751 }
766 752
767 753 void
768 754 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
769 755 {
770 756 if (ksp != NULL) {
771 757 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
772 758 kstat_delete_netstack(ksp, stackid);
773 759 }
774 760 }
775 761
776 762 /*
777 763 * Sum up all per CPU tcp_stat_t kstat counters.
778 764 */
779 765 static int
780 766 tcp_kstat2_update(kstat_t *kp, int rw)
781 767 {
782 768 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private;
783 769 netstack_t *ns;
784 770 tcp_stack_t *tcps;
785 771 tcp_stat_t *stats;
786 772 int i;
787 773 int cnt;
788 774
789 775 if (rw == KSTAT_WRITE)
790 776 return (EACCES);
791 777
792 778 ns = netstack_find_by_stackid(stackid);
793 779 if (ns == NULL)
794 780 return (-1);
795 781 tcps = ns->netstack_tcp;
796 782 if (tcps == NULL) {
797 783 netstack_rele(ns);
798 784 return (-1);
799 785 }
800 786
801 787 stats = (tcp_stat_t *)kp->ks_data;
802 788 tcp_clr_stats(stats);
803 789
804 790 /*
805 791 * tcps_sc_cnt may change in the middle of the loop. It is better
806 792 * to get its value first.
807 793 */
808 794 cnt = tcps->tcps_sc_cnt;
809 795 for (i = 0; i < cnt; i++)
810 796 tcp_add_stats(&tcps->tcps_sc[i]->tcp_sc_stats, stats);
811 797
812 798 netstack_rele(ns);
813 799 return (0);
814 800 }
815 801
816 802 /*
817 803 * To add stats from one mib2_tcp_t to another. Static fields are not added.
818 804 * The caller should set them up propertly.
819 805 */
820 806 static void
821 807 tcp_add_mib(mib2_tcp_t *from, mib2_tcp_t *to)
822 808 {
823 809 to->tcpActiveOpens += from->tcpActiveOpens;
824 810 to->tcpPassiveOpens += from->tcpPassiveOpens;
825 811 to->tcpAttemptFails += from->tcpAttemptFails;
826 812 to->tcpEstabResets += from->tcpEstabResets;
827 813 to->tcpInSegs += from->tcpInSegs;
828 814 to->tcpOutSegs += from->tcpOutSegs;
829 815 to->tcpRetransSegs += from->tcpRetransSegs;
830 816 to->tcpOutRsts += from->tcpOutRsts;
831 817
832 818 to->tcpOutDataSegs += from->tcpOutDataSegs;
833 819 to->tcpOutDataBytes += from->tcpOutDataBytes;
834 820 to->tcpRetransBytes += from->tcpRetransBytes;
835 821 to->tcpOutAck += from->tcpOutAck;
836 822 to->tcpOutAckDelayed += from->tcpOutAckDelayed;
837 823 to->tcpOutUrg += from->tcpOutUrg;
838 824 to->tcpOutWinUpdate += from->tcpOutWinUpdate;
839 825 to->tcpOutWinProbe += from->tcpOutWinProbe;
840 826 to->tcpOutControl += from->tcpOutControl;
841 827 to->tcpOutFastRetrans += from->tcpOutFastRetrans;
842 828
843 829 to->tcpInAckBytes += from->tcpInAckBytes;
844 830 to->tcpInDupAck += from->tcpInDupAck;
845 831 to->tcpInAckUnsent += from->tcpInAckUnsent;
846 832 to->tcpInDataInorderSegs += from->tcpInDataInorderSegs;
847 833 to->tcpInDataInorderBytes += from->tcpInDataInorderBytes;
848 834 to->tcpInDataUnorderSegs += from->tcpInDataUnorderSegs;
849 835 to->tcpInDataUnorderBytes += from->tcpInDataUnorderBytes;
850 836 to->tcpInDataDupSegs += from->tcpInDataDupSegs;
851 837 to->tcpInDataDupBytes += from->tcpInDataDupBytes;
852 838 to->tcpInDataPartDupSegs += from->tcpInDataPartDupSegs;
853 839 to->tcpInDataPartDupBytes += from->tcpInDataPartDupBytes;
854 840 to->tcpInDataPastWinSegs += from->tcpInDataPastWinSegs;
855 841 to->tcpInDataPastWinBytes += from->tcpInDataPastWinBytes;
856 842 to->tcpInWinProbe += from->tcpInWinProbe;
857 843 to->tcpInWinUpdate += from->tcpInWinUpdate;
858 844 to->tcpInClosed += from->tcpInClosed;
859 845
860 846 to->tcpRttNoUpdate += from->tcpRttNoUpdate;
861 847 to->tcpRttUpdate += from->tcpRttUpdate;
862 848 to->tcpTimRetrans += from->tcpTimRetrans;
863 849 to->tcpTimRetransDrop += from->tcpTimRetransDrop;
864 850 to->tcpTimKeepalive += from->tcpTimKeepalive;
865 851 to->tcpTimKeepaliveProbe += from->tcpTimKeepaliveProbe;
866 852 to->tcpTimKeepaliveDrop += from->tcpTimKeepaliveDrop;
867 853 to->tcpListenDrop += from->tcpListenDrop;
868 854 to->tcpListenDropQ0 += from->tcpListenDropQ0;
869 855 to->tcpHalfOpenDrop += from->tcpHalfOpenDrop;
870 856 to->tcpOutSackRetransSegs += from->tcpOutSackRetransSegs;
871 857 to->tcpHCInSegs += from->tcpHCInSegs;
872 858 to->tcpHCOutSegs += from->tcpHCOutSegs;
873 859 }
874 860
875 861 /*
876 862 * To sum up all MIB2 stats for a tcp_stack_t from all per CPU stats. The
877 863 * caller should initialize the target mib2_tcp_t properly as this function
878 864 * just adds up all the per CPU stats.
879 865 */
880 866 static void
881 867 tcp_sum_mib(tcp_stack_t *tcps, mib2_tcp_t *tcp_mib)
882 868 {
883 869 int i;
884 870 int cnt;
885 871
886 872 /*
887 873 * tcps_sc_cnt may change in the middle of the loop. It is better
888 874 * to get its value first.
889 875 */
890 876 cnt = tcps->tcps_sc_cnt;
891 877 for (i = 0; i < cnt; i++)
892 878 tcp_add_mib(&tcps->tcps_sc[i]->tcp_sc_mib, tcp_mib);
893 879 }
894 880
895 881 /*
896 882 * To set all tcp_stat_t counters to 0.
897 883 */
898 884 static void
899 885 tcp_clr_stats(tcp_stat_t *stats)
900 886 {
901 887 stats->tcp_time_wait_syn_success.value.ui64 = 0;
902 888 stats->tcp_clean_death_nondetached.value.ui64 = 0;
903 889 stats->tcp_eager_blowoff_q.value.ui64 = 0;
904 890 stats->tcp_eager_blowoff_q0.value.ui64 = 0;
905 891 stats->tcp_no_listener.value.ui64 = 0;
906 892 stats->tcp_listendrop.value.ui64 = 0;
907 893 stats->tcp_listendropq0.value.ui64 = 0;
908 894 stats->tcp_wsrv_called.value.ui64 = 0;
909 895 stats->tcp_flwctl_on.value.ui64 = 0;
910 896 stats->tcp_timer_fire_early.value.ui64 = 0;
911 897 stats->tcp_timer_fire_miss.value.ui64 = 0;
912 898 stats->tcp_zcopy_on.value.ui64 = 0;
913 899 stats->tcp_zcopy_off.value.ui64 = 0;
914 900 stats->tcp_zcopy_backoff.value.ui64 = 0;
915 901 stats->tcp_fusion_flowctl.value.ui64 = 0;
916 902 stats->tcp_fusion_backenabled.value.ui64 = 0;
917 903 stats->tcp_fusion_urg.value.ui64 = 0;
918 904 stats->tcp_fusion_putnext.value.ui64 = 0;
919 905 stats->tcp_fusion_unfusable.value.ui64 = 0;
920 906 stats->tcp_fusion_aborted.value.ui64 = 0;
921 907 stats->tcp_fusion_unqualified.value.ui64 = 0;
922 908 stats->tcp_fusion_rrw_busy.value.ui64 = 0;
923 909 stats->tcp_fusion_rrw_msgcnt.value.ui64 = 0;
924 910 stats->tcp_fusion_rrw_plugged.value.ui64 = 0;
925 911 stats->tcp_in_ack_unsent_drop.value.ui64 = 0;
926 912 stats->tcp_sock_fallback.value.ui64 = 0;
927 913 stats->tcp_lso_enabled.value.ui64 = 0;
928 914 stats->tcp_lso_disabled.value.ui64 = 0;
929 915 stats->tcp_lso_times.value.ui64 = 0;
930 916 stats->tcp_lso_pkt_out.value.ui64 = 0;
931 917 stats->tcp_listen_cnt_drop.value.ui64 = 0;
932 918 stats->tcp_listen_mem_drop.value.ui64 = 0;
933 919 stats->tcp_zwin_mem_drop.value.ui64 = 0;
934 920 stats->tcp_zwin_ack_syn.value.ui64 = 0;
935 921 stats->tcp_rst_unsent.value.ui64 = 0;
936 922 stats->tcp_reclaim_cnt.value.ui64 = 0;
937 923 stats->tcp_reass_timeout.value.ui64 = 0;
938 924
939 925 #ifdef TCP_DEBUG_COUNTER
940 926 stats->tcp_time_wait.value.ui64 = 0;
941 927 stats->tcp_rput_time_wait.value.ui64 = 0;
942 928 stats->tcp_detach_time_wait.value.ui64 = 0;
943 929 stats->tcp_timeout_calls.value.ui64 = 0;
944 930 stats->tcp_timeout_cached_alloc.value.ui64 = 0;
945 931 stats->tcp_timeout_cancel_reqs.value.ui64 = 0;
946 932 stats->tcp_timeout_canceled.value.ui64 = 0;
947 933 stats->tcp_timermp_freed.value.ui64 = 0;
948 934 stats->tcp_push_timer_cnt.value.ui64 = 0;
949 935 stats->tcp_ack_timer_cnt.value.ui64 = 0;
950 936 #endif
951 937 }
952 938
953 939 /*
954 940 * To add counters from the per CPU tcp_stat_counter_t to the stack
955 941 * tcp_stat_t.
956 942 */
957 943 static void
958 944 tcp_add_stats(tcp_stat_counter_t *from, tcp_stat_t *to)
959 945 {
960 946 to->tcp_time_wait_syn_success.value.ui64 +=
961 947 from->tcp_time_wait_syn_success;
962 948 to->tcp_clean_death_nondetached.value.ui64 +=
963 949 from->tcp_clean_death_nondetached;
964 950 to->tcp_eager_blowoff_q.value.ui64 +=
965 951 from->tcp_eager_blowoff_q;
966 952 to->tcp_eager_blowoff_q0.value.ui64 +=
967 953 from->tcp_eager_blowoff_q0;
968 954 to->tcp_no_listener.value.ui64 +=
969 955 from->tcp_no_listener;
970 956 to->tcp_listendrop.value.ui64 +=
971 957 from->tcp_listendrop;
972 958 to->tcp_listendropq0.value.ui64 +=
973 959 from->tcp_listendropq0;
974 960 to->tcp_wsrv_called.value.ui64 +=
975 961 from->tcp_wsrv_called;
976 962 to->tcp_flwctl_on.value.ui64 +=
977 963 from->tcp_flwctl_on;
978 964 to->tcp_timer_fire_early.value.ui64 +=
979 965 from->tcp_timer_fire_early;
980 966 to->tcp_timer_fire_miss.value.ui64 +=
981 967 from->tcp_timer_fire_miss;
982 968 to->tcp_zcopy_on.value.ui64 +=
983 969 from->tcp_zcopy_on;
984 970 to->tcp_zcopy_off.value.ui64 +=
985 971 from->tcp_zcopy_off;
986 972 to->tcp_zcopy_backoff.value.ui64 +=
987 973 from->tcp_zcopy_backoff;
988 974 to->tcp_fusion_flowctl.value.ui64 +=
989 975 from->tcp_fusion_flowctl;
990 976 to->tcp_fusion_backenabled.value.ui64 +=
991 977 from->tcp_fusion_backenabled;
992 978 to->tcp_fusion_urg.value.ui64 +=
993 979 from->tcp_fusion_urg;
994 980 to->tcp_fusion_putnext.value.ui64 +=
995 981 from->tcp_fusion_putnext;
996 982 to->tcp_fusion_unfusable.value.ui64 +=
997 983 from->tcp_fusion_unfusable;
998 984 to->tcp_fusion_aborted.value.ui64 +=
999 985 from->tcp_fusion_aborted;
1000 986 to->tcp_fusion_unqualified.value.ui64 +=
1001 987 from->tcp_fusion_unqualified;
1002 988 to->tcp_fusion_rrw_busy.value.ui64 +=
1003 989 from->tcp_fusion_rrw_busy;
1004 990 to->tcp_fusion_rrw_msgcnt.value.ui64 +=
1005 991 from->tcp_fusion_rrw_msgcnt;
1006 992 to->tcp_fusion_rrw_plugged.value.ui64 +=
1007 993 from->tcp_fusion_rrw_plugged;
1008 994 to->tcp_in_ack_unsent_drop.value.ui64 +=
1009 995 from->tcp_in_ack_unsent_drop;
1010 996 to->tcp_sock_fallback.value.ui64 +=
1011 997 from->tcp_sock_fallback;
1012 998 to->tcp_lso_enabled.value.ui64 +=
1013 999 from->tcp_lso_enabled;
1014 1000 to->tcp_lso_disabled.value.ui64 +=
1015 1001 from->tcp_lso_disabled;
1016 1002 to->tcp_lso_times.value.ui64 +=
1017 1003 from->tcp_lso_times;
1018 1004 to->tcp_lso_pkt_out.value.ui64 +=
1019 1005 from->tcp_lso_pkt_out;
1020 1006 to->tcp_listen_cnt_drop.value.ui64 +=
1021 1007 from->tcp_listen_cnt_drop;
1022 1008 to->tcp_listen_mem_drop.value.ui64 +=
1023 1009 from->tcp_listen_mem_drop;
1024 1010 to->tcp_zwin_mem_drop.value.ui64 +=
1025 1011 from->tcp_zwin_mem_drop;
1026 1012 to->tcp_zwin_ack_syn.value.ui64 +=
1027 1013 from->tcp_zwin_ack_syn;
1028 1014 to->tcp_rst_unsent.value.ui64 +=
1029 1015 from->tcp_rst_unsent;
1030 1016 to->tcp_reclaim_cnt.value.ui64 +=
1031 1017 from->tcp_reclaim_cnt;
1032 1018 to->tcp_reass_timeout.value.ui64 +=
1033 1019 from->tcp_reass_timeout;
1034 1020
1035 1021 #ifdef TCP_DEBUG_COUNTER
1036 1022 to->tcp_time_wait.value.ui64 +=
1037 1023 from->tcp_time_wait;
1038 1024 to->tcp_rput_time_wait.value.ui64 +=
1039 1025 from->tcp_rput_time_wait;
1040 1026 to->tcp_detach_time_wait.value.ui64 +=
1041 1027 from->tcp_detach_time_wait;
1042 1028 to->tcp_timeout_calls.value.ui64 +=
1043 1029 from->tcp_timeout_calls;
1044 1030 to->tcp_timeout_cached_alloc.value.ui64 +=
1045 1031 from->tcp_timeout_cached_alloc;
1046 1032 to->tcp_timeout_cancel_reqs.value.ui64 +=
1047 1033 from->tcp_timeout_cancel_reqs;
1048 1034 to->tcp_timeout_canceled.value.ui64 +=
1049 1035 from->tcp_timeout_canceled;
1050 1036 to->tcp_timermp_freed.value.ui64 +=
1051 1037 from->tcp_timermp_freed;
1052 1038 to->tcp_push_timer_cnt.value.ui64 +=
1053 1039 from->tcp_push_timer_cnt;
1054 1040 to->tcp_ack_timer_cnt.value.ui64 +=
1055 1041 from->tcp_ack_timer_cnt;
1056 1042 #endif
1057 1043 }
↓ open down ↓ |
707 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX