Print this page
some functions in the tcp module can be static
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/inet/tcp/tcp_stats.c
+++ new/usr/src/uts/common/inet/tcp/tcp_stats.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/tihdr.h>
28 28 #include <sys/policy.h>
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
29 29 #include <sys/tsol/tnet.h>
30 30
31 31 #include <inet/common.h>
32 32 #include <inet/ip.h>
33 33 #include <inet/tcp.h>
34 34 #include <inet/tcp_impl.h>
35 35 #include <inet/tcp_stats.h>
36 36 #include <inet/kstatcom.h>
37 37 #include <inet/snmpcom.h>
38 38
39 -static int tcp_kstat_update(kstat_t *kp, int rw);
40 -static int tcp_kstat2_update(kstat_t *kp, int rw);
39 +static int tcp_snmp_state(tcp_t *);
40 +static int tcp_kstat_update(kstat_t *, int);
41 +static int tcp_kstat2_update(kstat_t *, int);
41 42 static void tcp_sum_mib(tcp_stack_t *, mib2_tcp_t *);
42 43
43 44 static void tcp_add_mib(mib2_tcp_t *, mib2_tcp_t *);
44 45 static void tcp_add_stats(tcp_stat_counter_t *, tcp_stat_t *);
45 46 static void tcp_clr_stats(tcp_stat_t *);
46 47
47 48 tcp_g_stat_t tcp_g_statistics;
48 49 kstat_t *tcp_g_kstat;
49 50
50 51 /* Translate TCP state to MIB2 TCP state. */
51 52 static int
52 53 tcp_snmp_state(tcp_t *tcp)
53 54 {
54 55 if (tcp == NULL)
55 56 return (0);
56 57
57 58 switch (tcp->tcp_state) {
58 59 case TCPS_CLOSED:
59 60 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */
60 61 case TCPS_BOUND:
61 62 return (MIB2_TCP_closed);
62 63 case TCPS_LISTEN:
63 64 return (MIB2_TCP_listen);
64 65 case TCPS_SYN_SENT:
65 66 return (MIB2_TCP_synSent);
66 67 case TCPS_SYN_RCVD:
67 68 return (MIB2_TCP_synReceived);
68 69 case TCPS_ESTABLISHED:
69 70 return (MIB2_TCP_established);
70 71 case TCPS_CLOSE_WAIT:
71 72 return (MIB2_TCP_closeWait);
72 73 case TCPS_FIN_WAIT_1:
73 74 return (MIB2_TCP_finWait1);
74 75 case TCPS_CLOSING:
75 76 return (MIB2_TCP_closing);
76 77 case TCPS_LAST_ACK:
77 78 return (MIB2_TCP_lastAck);
78 79 case TCPS_FIN_WAIT_2:
79 80 return (MIB2_TCP_finWait2);
80 81 case TCPS_TIME_WAIT:
81 82 return (MIB2_TCP_timeWait);
82 83 default:
83 84 return (0);
84 85 }
85 86 }
86 87
87 88 /*
88 89 * Return SNMP stuff in buffer in mpdata.
89 90 */
90 91 mblk_t *
91 92 tcp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req)
92 93 {
93 94 mblk_t *mpdata;
94 95 mblk_t *mp_conn_ctl = NULL;
95 96 mblk_t *mp_conn_tail;
96 97 mblk_t *mp_attr_ctl = NULL;
97 98 mblk_t *mp_attr_tail;
98 99 mblk_t *mp6_conn_ctl = NULL;
99 100 mblk_t *mp6_conn_tail;
100 101 mblk_t *mp6_attr_ctl = NULL;
101 102 mblk_t *mp6_attr_tail;
102 103 struct opthdr *optp;
103 104 mib2_tcpConnEntry_t tce;
104 105 mib2_tcp6ConnEntry_t tce6;
105 106 mib2_transportMLPEntry_t mlp;
106 107 connf_t *connfp;
107 108 int i;
108 109 boolean_t ispriv;
109 110 zoneid_t zoneid;
110 111 int v4_conn_idx;
111 112 int v6_conn_idx;
112 113 conn_t *connp = Q_TO_CONN(q);
113 114 tcp_stack_t *tcps;
114 115 ip_stack_t *ipst;
115 116 mblk_t *mp2ctl;
116 117 mib2_tcp_t tcp_mib;
117 118 size_t tcp_mib_size, tce_size, tce6_size;
118 119
119 120 /*
120 121 * make a copy of the original message
121 122 */
122 123 mp2ctl = copymsg(mpctl);
123 124
124 125 if (mpctl == NULL ||
125 126 (mpdata = mpctl->b_cont) == NULL ||
126 127 (mp_conn_ctl = copymsg(mpctl)) == NULL ||
127 128 (mp_attr_ctl = copymsg(mpctl)) == NULL ||
128 129 (mp6_conn_ctl = copymsg(mpctl)) == NULL ||
129 130 (mp6_attr_ctl = copymsg(mpctl)) == NULL) {
130 131 freemsg(mp_conn_ctl);
131 132 freemsg(mp_attr_ctl);
132 133 freemsg(mp6_conn_ctl);
133 134 freemsg(mp6_attr_ctl);
134 135 freemsg(mpctl);
135 136 freemsg(mp2ctl);
136 137 return (NULL);
137 138 }
138 139
139 140 ipst = connp->conn_netstack->netstack_ip;
140 141 tcps = connp->conn_netstack->netstack_tcp;
141 142
142 143 if (legacy_req) {
143 144 tcp_mib_size = LEGACY_MIB_SIZE(&tcp_mib, mib2_tcp_t);
144 145 tce_size = LEGACY_MIB_SIZE(&tce, mib2_tcpConnEntry_t);
145 146 tce6_size = LEGACY_MIB_SIZE(&tce6, mib2_tcp6ConnEntry_t);
146 147 } else {
147 148 tcp_mib_size = sizeof (mib2_tcp_t);
148 149 tce_size = sizeof (mib2_tcpConnEntry_t);
149 150 tce6_size = sizeof (mib2_tcp6ConnEntry_t);
150 151 }
151 152
152 153 bzero(&tcp_mib, sizeof (tcp_mib));
153 154
154 155 /* build table of connections -- need count in fixed part */
155 156 SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */
156 157 SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min);
157 158 SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max);
158 159 SET_MIB(tcp_mib.tcpMaxConn, -1);
159 160 SET_MIB(tcp_mib.tcpCurrEstab, 0);
160 161
161 162 ispriv =
162 163 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0;
163 164 zoneid = Q_TO_CONN(q)->conn_zoneid;
164 165
165 166 v4_conn_idx = v6_conn_idx = 0;
166 167 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL;
167 168
168 169 for (i = 0; i < CONN_G_HASH_SIZE; i++) {
169 170 ipst = tcps->tcps_netstack->netstack_ip;
170 171
171 172 connfp = &ipst->ips_ipcl_globalhash_fanout[i];
172 173
173 174 connp = NULL;
174 175
175 176 while ((connp =
176 177 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
177 178 tcp_t *tcp;
178 179 boolean_t needattr;
179 180
180 181 if (connp->conn_zoneid != zoneid)
181 182 continue; /* not in this zone */
182 183
183 184 tcp = connp->conn_tcp;
184 185 TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
185 186 tcp->tcp_ibsegs = 0;
186 187 TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
187 188 tcp->tcp_obsegs = 0;
188 189
189 190 tce6.tcp6ConnState = tce.tcpConnState =
190 191 tcp_snmp_state(tcp);
191 192 if (tce.tcpConnState == MIB2_TCP_established ||
192 193 tce.tcpConnState == MIB2_TCP_closeWait)
193 194 BUMP_MIB(&tcp_mib, tcpCurrEstab);
194 195
195 196 needattr = B_FALSE;
196 197 bzero(&mlp, sizeof (mlp));
197 198 if (connp->conn_mlp_type != mlptSingle) {
198 199 if (connp->conn_mlp_type == mlptShared ||
199 200 connp->conn_mlp_type == mlptBoth)
200 201 mlp.tme_flags |= MIB2_TMEF_SHARED;
201 202 if (connp->conn_mlp_type == mlptPrivate ||
202 203 connp->conn_mlp_type == mlptBoth)
203 204 mlp.tme_flags |= MIB2_TMEF_PRIVATE;
204 205 needattr = B_TRUE;
205 206 }
206 207 if (connp->conn_anon_mlp) {
207 208 mlp.tme_flags |= MIB2_TMEF_ANONMLP;
208 209 needattr = B_TRUE;
209 210 }
210 211 switch (connp->conn_mac_mode) {
211 212 case CONN_MAC_DEFAULT:
212 213 break;
213 214 case CONN_MAC_AWARE:
214 215 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT;
215 216 needattr = B_TRUE;
216 217 break;
217 218 case CONN_MAC_IMPLICIT:
218 219 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT;
219 220 needattr = B_TRUE;
220 221 break;
221 222 }
222 223 if (connp->conn_ixa->ixa_tsl != NULL) {
223 224 ts_label_t *tsl;
224 225
225 226 tsl = connp->conn_ixa->ixa_tsl;
226 227 mlp.tme_flags |= MIB2_TMEF_IS_LABELED;
227 228 mlp.tme_doi = label2doi(tsl);
228 229 mlp.tme_label = *label2bslabel(tsl);
229 230 needattr = B_TRUE;
230 231 }
231 232
232 233 /* Create a message to report on IPv6 entries */
233 234 if (connp->conn_ipversion == IPV6_VERSION) {
234 235 tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6;
235 236 tce6.tcp6ConnRemAddress = connp->conn_faddr_v6;
236 237 tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport);
237 238 tce6.tcp6ConnRemPort = ntohs(connp->conn_fport);
238 239 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) {
239 240 tce6.tcp6ConnIfIndex =
240 241 connp->conn_ixa->ixa_scopeid;
241 242 } else {
242 243 tce6.tcp6ConnIfIndex = connp->conn_bound_if;
243 244 }
244 245 /* Don't want just anybody seeing these... */
245 246 if (ispriv) {
246 247 tce6.tcp6ConnEntryInfo.ce_snxt =
247 248 tcp->tcp_snxt;
248 249 tce6.tcp6ConnEntryInfo.ce_suna =
249 250 tcp->tcp_suna;
250 251 tce6.tcp6ConnEntryInfo.ce_rnxt =
251 252 tcp->tcp_rnxt;
252 253 tce6.tcp6ConnEntryInfo.ce_rack =
253 254 tcp->tcp_rack;
254 255 } else {
255 256 /*
256 257 * Netstat, unfortunately, uses this to
257 258 * get send/receive queue sizes. How to fix?
258 259 * Why not compute the difference only?
259 260 */
260 261 tce6.tcp6ConnEntryInfo.ce_snxt =
261 262 tcp->tcp_snxt - tcp->tcp_suna;
262 263 tce6.tcp6ConnEntryInfo.ce_suna = 0;
263 264 tce6.tcp6ConnEntryInfo.ce_rnxt =
264 265 tcp->tcp_rnxt - tcp->tcp_rack;
265 266 tce6.tcp6ConnEntryInfo.ce_rack = 0;
266 267 }
267 268
268 269 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd;
269 270 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
270 271 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto;
271 272 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss;
272 273 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state;
273 274
274 275 tce6.tcp6ConnCreationProcess =
275 276 (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS :
276 277 connp->conn_cpid;
277 278 tce6.tcp6ConnCreationTime = connp->conn_open_time;
278 279
279 280 (void) snmp_append_data2(mp6_conn_ctl->b_cont,
280 281 &mp6_conn_tail, (char *)&tce6, tce6_size);
281 282
282 283 mlp.tme_connidx = v6_conn_idx++;
283 284 if (needattr)
284 285 (void) snmp_append_data2(mp6_attr_ctl->b_cont,
285 286 &mp6_attr_tail, (char *)&mlp, sizeof (mlp));
286 287 }
287 288 /*
288 289 * Create an IPv4 table entry for IPv4 entries and also
289 290 * for IPv6 entries which are bound to in6addr_any
290 291 * but don't have IPV6_V6ONLY set.
291 292 * (i.e. anything an IPv4 peer could connect to)
292 293 */
293 294 if (connp->conn_ipversion == IPV4_VERSION ||
294 295 (tcp->tcp_state <= TCPS_LISTEN &&
295 296 !connp->conn_ipv6_v6only &&
296 297 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) {
297 298 if (connp->conn_ipversion == IPV6_VERSION) {
298 299 tce.tcpConnRemAddress = INADDR_ANY;
299 300 tce.tcpConnLocalAddress = INADDR_ANY;
300 301 } else {
301 302 tce.tcpConnRemAddress =
302 303 connp->conn_faddr_v4;
303 304 tce.tcpConnLocalAddress =
304 305 connp->conn_laddr_v4;
305 306 }
306 307 tce.tcpConnLocalPort = ntohs(connp->conn_lport);
307 308 tce.tcpConnRemPort = ntohs(connp->conn_fport);
308 309 /* Don't want just anybody seeing these... */
309 310 if (ispriv) {
310 311 tce.tcpConnEntryInfo.ce_snxt =
311 312 tcp->tcp_snxt;
312 313 tce.tcpConnEntryInfo.ce_suna =
313 314 tcp->tcp_suna;
314 315 tce.tcpConnEntryInfo.ce_rnxt =
315 316 tcp->tcp_rnxt;
316 317 tce.tcpConnEntryInfo.ce_rack =
317 318 tcp->tcp_rack;
318 319 } else {
319 320 /*
320 321 * Netstat, unfortunately, uses this to
321 322 * get send/receive queue sizes. How
322 323 * to fix?
323 324 * Why not compute the difference only?
324 325 */
325 326 tce.tcpConnEntryInfo.ce_snxt =
326 327 tcp->tcp_snxt - tcp->tcp_suna;
327 328 tce.tcpConnEntryInfo.ce_suna = 0;
328 329 tce.tcpConnEntryInfo.ce_rnxt =
329 330 tcp->tcp_rnxt - tcp->tcp_rack;
330 331 tce.tcpConnEntryInfo.ce_rack = 0;
331 332 }
332 333
333 334 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd;
334 335 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
335 336 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto;
336 337 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss;
337 338 tce.tcpConnEntryInfo.ce_state =
338 339 tcp->tcp_state;
339 340
340 341 tce.tcpConnCreationProcess =
341 342 (connp->conn_cpid < 0) ?
342 343 MIB2_UNKNOWN_PROCESS :
343 344 connp->conn_cpid;
344 345 tce.tcpConnCreationTime = connp->conn_open_time;
345 346
346 347 (void) snmp_append_data2(mp_conn_ctl->b_cont,
347 348 &mp_conn_tail, (char *)&tce, tce_size);
348 349
349 350 mlp.tme_connidx = v4_conn_idx++;
350 351 if (needattr)
351 352 (void) snmp_append_data2(
352 353 mp_attr_ctl->b_cont,
353 354 &mp_attr_tail, (char *)&mlp,
354 355 sizeof (mlp));
355 356 }
356 357 }
357 358 }
358 359
359 360 tcp_sum_mib(tcps, &tcp_mib);
360 361
361 362 /* Fixed length structure for IPv4 and IPv6 counters */
362 363 SET_MIB(tcp_mib.tcpConnTableSize, tce_size);
363 364 SET_MIB(tcp_mib.tcp6ConnTableSize, tce6_size);
364 365
365 366 /*
366 367 * Synchronize 32- and 64-bit counters. Note that tcpInSegs and
367 368 * tcpOutSegs are not updated anywhere in TCP. The new 64 bits
368 369 * counters are used. Hence the old counters' values in tcp_sc_mib
369 370 * are always 0.
370 371 */
371 372 SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs);
372 373 SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs);
373 374
374 375 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
375 376 optp->level = MIB2_TCP;
376 377 optp->name = 0;
377 378 (void) snmp_append_data(mpdata, (char *)&tcp_mib, tcp_mib_size);
378 379 optp->len = msgdsize(mpdata);
379 380 qreply(q, mpctl);
380 381
381 382 /* table of connections... */
382 383 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[
383 384 sizeof (struct T_optmgmt_ack)];
384 385 optp->level = MIB2_TCP;
385 386 optp->name = MIB2_TCP_CONN;
386 387 optp->len = msgdsize(mp_conn_ctl->b_cont);
387 388 qreply(q, mp_conn_ctl);
388 389
389 390 /* table of MLP attributes... */
390 391 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[
391 392 sizeof (struct T_optmgmt_ack)];
392 393 optp->level = MIB2_TCP;
393 394 optp->name = EXPER_XPORT_MLP;
394 395 optp->len = msgdsize(mp_attr_ctl->b_cont);
395 396 if (optp->len == 0)
396 397 freemsg(mp_attr_ctl);
397 398 else
398 399 qreply(q, mp_attr_ctl);
399 400
400 401 /* table of IPv6 connections... */
401 402 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[
402 403 sizeof (struct T_optmgmt_ack)];
403 404 optp->level = MIB2_TCP6;
404 405 optp->name = MIB2_TCP6_CONN;
405 406 optp->len = msgdsize(mp6_conn_ctl->b_cont);
406 407 qreply(q, mp6_conn_ctl);
407 408
408 409 /* table of IPv6 MLP attributes... */
409 410 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[
410 411 sizeof (struct T_optmgmt_ack)];
411 412 optp->level = MIB2_TCP6;
412 413 optp->name = EXPER_XPORT_MLP;
413 414 optp->len = msgdsize(mp6_attr_ctl->b_cont);
414 415 if (optp->len == 0)
415 416 freemsg(mp6_attr_ctl);
416 417 else
417 418 qreply(q, mp6_attr_ctl);
418 419 return (mp2ctl);
419 420 }
420 421
421 422 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */
422 423 /* ARGSUSED */
423 424 int
424 425 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
425 426 {
426 427 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr;
427 428
428 429 switch (level) {
429 430 case MIB2_TCP:
430 431 switch (name) {
431 432 case 13:
432 433 if (tce->tcpConnState != MIB2_TCP_deleteTCB)
433 434 return (0);
434 435 /* TODO: delete entry defined by tce */
435 436 return (1);
436 437 default:
437 438 return (0);
438 439 }
439 440 default:
440 441 return (1);
441 442 }
442 443 }
443 444
444 445 /*
445 446 * TCP Kstats implementation
446 447 */
447 448 void *
448 449 tcp_kstat_init(netstackid_t stackid)
449 450 {
450 451 kstat_t *ksp;
451 452
452 453 tcp_named_kstat_t template = {
453 454 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 },
454 455 { "rtoMin", KSTAT_DATA_INT32, 0 },
455 456 { "rtoMax", KSTAT_DATA_INT32, 0 },
456 457 { "maxConn", KSTAT_DATA_INT32, 0 },
457 458 { "activeOpens", KSTAT_DATA_UINT32, 0 },
458 459 { "passiveOpens", KSTAT_DATA_UINT32, 0 },
459 460 { "attemptFails", KSTAT_DATA_UINT32, 0 },
460 461 { "estabResets", KSTAT_DATA_UINT32, 0 },
461 462 { "currEstab", KSTAT_DATA_UINT32, 0 },
462 463 { "inSegs", KSTAT_DATA_UINT64, 0 },
463 464 { "outSegs", KSTAT_DATA_UINT64, 0 },
464 465 { "retransSegs", KSTAT_DATA_UINT32, 0 },
465 466 { "connTableSize", KSTAT_DATA_INT32, 0 },
466 467 { "outRsts", KSTAT_DATA_UINT32, 0 },
467 468 { "outDataSegs", KSTAT_DATA_UINT32, 0 },
468 469 { "outDataBytes", KSTAT_DATA_UINT32, 0 },
469 470 { "retransBytes", KSTAT_DATA_UINT32, 0 },
470 471 { "outAck", KSTAT_DATA_UINT32, 0 },
471 472 { "outAckDelayed", KSTAT_DATA_UINT32, 0 },
472 473 { "outUrg", KSTAT_DATA_UINT32, 0 },
473 474 { "outWinUpdate", KSTAT_DATA_UINT32, 0 },
474 475 { "outWinProbe", KSTAT_DATA_UINT32, 0 },
475 476 { "outControl", KSTAT_DATA_UINT32, 0 },
476 477 { "outFastRetrans", KSTAT_DATA_UINT32, 0 },
477 478 { "inAckSegs", KSTAT_DATA_UINT32, 0 },
478 479 { "inAckBytes", KSTAT_DATA_UINT32, 0 },
479 480 { "inDupAck", KSTAT_DATA_UINT32, 0 },
480 481 { "inAckUnsent", KSTAT_DATA_UINT32, 0 },
481 482 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 },
482 483 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 },
483 484 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 },
484 485 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 },
485 486 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 },
486 487 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 },
487 488 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 },
488 489 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 },
489 490 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 },
490 491 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 },
491 492 { "inWinProbe", KSTAT_DATA_UINT32, 0 },
492 493 { "inWinUpdate", KSTAT_DATA_UINT32, 0 },
493 494 { "inClosed", KSTAT_DATA_UINT32, 0 },
494 495 { "rttUpdate", KSTAT_DATA_UINT32, 0 },
495 496 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 },
496 497 { "timRetrans", KSTAT_DATA_UINT32, 0 },
497 498 { "timRetransDrop", KSTAT_DATA_UINT32, 0 },
498 499 { "timKeepalive", KSTAT_DATA_UINT32, 0 },
499 500 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 },
500 501 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 },
501 502 { "listenDrop", KSTAT_DATA_UINT32, 0 },
502 503 { "listenDropQ0", KSTAT_DATA_UINT32, 0 },
503 504 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 },
504 505 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 },
505 506 { "connTableSize6", KSTAT_DATA_INT32, 0 }
506 507 };
507 508
508 509 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2",
509 510 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid);
510 511
511 512 if (ksp == NULL)
512 513 return (NULL);
513 514
514 515 template.rtoAlgorithm.value.ui32 = 4;
515 516 template.maxConn.value.i32 = -1;
516 517
517 518 bcopy(&template, ksp->ks_data, sizeof (template));
518 519 ksp->ks_update = tcp_kstat_update;
519 520 ksp->ks_private = (void *)(uintptr_t)stackid;
520 521
521 522 kstat_install(ksp);
522 523 return (ksp);
523 524 }
524 525
525 526 void
526 527 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
527 528 {
528 529 if (ksp != NULL) {
529 530 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
530 531 kstat_delete_netstack(ksp, stackid);
531 532 }
532 533 }
533 534
534 535 static int
535 536 tcp_kstat_update(kstat_t *kp, int rw)
536 537 {
537 538 tcp_named_kstat_t *tcpkp;
538 539 tcp_t *tcp;
539 540 connf_t *connfp;
540 541 conn_t *connp;
541 542 int i;
542 543 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private;
543 544 netstack_t *ns;
544 545 tcp_stack_t *tcps;
545 546 ip_stack_t *ipst;
546 547 mib2_tcp_t tcp_mib;
547 548
548 549 if (rw == KSTAT_WRITE)
549 550 return (EACCES);
550 551
551 552 ns = netstack_find_by_stackid(stackid);
552 553 if (ns == NULL)
553 554 return (-1);
554 555 tcps = ns->netstack_tcp;
555 556 if (tcps == NULL) {
556 557 netstack_rele(ns);
557 558 return (-1);
558 559 }
559 560
560 561 tcpkp = (tcp_named_kstat_t *)kp->ks_data;
561 562
562 563 tcpkp->currEstab.value.ui32 = 0;
563 564 tcpkp->rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min;
564 565 tcpkp->rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max;
565 566
566 567 ipst = ns->netstack_ip;
567 568
568 569 for (i = 0; i < CONN_G_HASH_SIZE; i++) {
569 570 connfp = &ipst->ips_ipcl_globalhash_fanout[i];
570 571 connp = NULL;
571 572 while ((connp =
572 573 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
573 574 tcp = connp->conn_tcp;
574 575 switch (tcp_snmp_state(tcp)) {
575 576 case MIB2_TCP_established:
576 577 case MIB2_TCP_closeWait:
577 578 tcpkp->currEstab.value.ui32++;
578 579 break;
579 580 }
580 581 }
581 582 }
582 583 bzero(&tcp_mib, sizeof (tcp_mib));
583 584 tcp_sum_mib(tcps, &tcp_mib);
584 585
585 586 /* Fixed length structure for IPv4 and IPv6 counters */
586 587 SET_MIB(tcp_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t));
587 588 SET_MIB(tcp_mib.tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t));
588 589
589 590 tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens;
590 591 tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens;
591 592 tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails;
592 593 tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets;
593 594 tcpkp->inSegs.value.ui64 = tcp_mib.tcpHCInSegs;
594 595 tcpkp->outSegs.value.ui64 = tcp_mib.tcpHCOutSegs;
595 596 tcpkp->retransSegs.value.ui32 = tcp_mib.tcpRetransSegs;
596 597 tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize;
597 598 tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts;
598 599 tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs;
599 600 tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes;
600 601 tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes;
601 602 tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck;
602 603 tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed;
603 604 tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg;
604 605 tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate;
605 606 tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe;
606 607 tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl;
607 608 tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans;
608 609 tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs;
609 610 tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes;
610 611 tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck;
611 612 tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent;
612 613 tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs;
613 614 tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes;
614 615 tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs;
615 616 tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes;
616 617 tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs;
617 618 tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes;
618 619 tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs;
619 620 tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes;
620 621 tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs;
621 622 tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes;
622 623 tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe;
623 624 tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate;
624 625 tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed;
625 626 tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate;
626 627 tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate;
627 628 tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans;
628 629 tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop;
629 630 tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive;
630 631 tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe;
631 632 tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop;
632 633 tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop;
633 634 tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0;
634 635 tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop;
635 636 tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs;
636 637 tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize;
637 638
638 639 netstack_rele(ns);
639 640 return (0);
640 641 }
641 642
642 643 /*
643 644 * kstats related to squeues i.e. not per IP instance
644 645 */
645 646 void *
646 647 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp)
647 648 {
648 649 kstat_t *ksp;
649 650
650 651 tcp_g_stat_t template = {
651 652 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 },
652 653 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 },
653 654 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 },
654 655 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 },
655 656 };
656 657
657 658 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net",
658 659 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
659 660 KSTAT_FLAG_VIRTUAL);
660 661
661 662 if (ksp == NULL)
662 663 return (NULL);
663 664
664 665 bcopy(&template, tcp_g_statp, sizeof (template));
665 666 ksp->ks_data = (void *)tcp_g_statp;
666 667
667 668 kstat_install(ksp);
668 669 return (ksp);
669 670 }
670 671
671 672 void
672 673 tcp_g_kstat_fini(kstat_t *ksp)
673 674 {
674 675 if (ksp != NULL) {
675 676 kstat_delete(ksp);
676 677 }
677 678 }
678 679
679 680 void *
680 681 tcp_kstat2_init(netstackid_t stackid)
681 682 {
682 683 kstat_t *ksp;
683 684
684 685 tcp_stat_t template = {
685 686 { "tcp_time_wait_syn_success", KSTAT_DATA_UINT64, 0 },
686 687 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64, 0 },
687 688 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64, 0 },
688 689 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64, 0 },
689 690 { "tcp_no_listener", KSTAT_DATA_UINT64, 0 },
690 691 { "tcp_listendrop", KSTAT_DATA_UINT64, 0 },
691 692 { "tcp_listendropq0", KSTAT_DATA_UINT64, 0 },
692 693 { "tcp_wsrv_called", KSTAT_DATA_UINT64, 0 },
693 694 { "tcp_flwctl_on", KSTAT_DATA_UINT64, 0 },
694 695 { "tcp_timer_fire_early", KSTAT_DATA_UINT64, 0 },
695 696 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64, 0 },
696 697 { "tcp_zcopy_on", KSTAT_DATA_UINT64, 0 },
697 698 { "tcp_zcopy_off", KSTAT_DATA_UINT64, 0 },
698 699 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64, 0 },
699 700 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64, 0 },
700 701 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64, 0 },
701 702 { "tcp_fusion_urg", KSTAT_DATA_UINT64, 0 },
702 703 { "tcp_fusion_putnext", KSTAT_DATA_UINT64, 0 },
703 704 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64, 0 },
704 705 { "tcp_fusion_aborted", KSTAT_DATA_UINT64, 0 },
705 706 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64, 0 },
706 707 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64, 0 },
707 708 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64, 0 },
708 709 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64, 0 },
709 710 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64, 0 },
710 711 { "tcp_sock_fallback", KSTAT_DATA_UINT64, 0 },
711 712 { "tcp_lso_enabled", KSTAT_DATA_UINT64, 0 },
712 713 { "tcp_lso_disabled", KSTAT_DATA_UINT64, 0 },
713 714 { "tcp_lso_times", KSTAT_DATA_UINT64, 0 },
714 715 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64, 0 },
715 716 { "tcp_listen_cnt_drop", KSTAT_DATA_UINT64, 0 },
716 717 { "tcp_listen_mem_drop", KSTAT_DATA_UINT64, 0 },
717 718 { "tcp_zwin_mem_drop", KSTAT_DATA_UINT64, 0 },
718 719 { "tcp_zwin_ack_syn", KSTAT_DATA_UINT64, 0 },
719 720 { "tcp_rst_unsent", KSTAT_DATA_UINT64, 0 },
720 721 { "tcp_reclaim_cnt", KSTAT_DATA_UINT64, 0 },
721 722 { "tcp_reass_timeout", KSTAT_DATA_UINT64, 0 },
722 723 #ifdef TCP_DEBUG_COUNTER
723 724 { "tcp_time_wait", KSTAT_DATA_UINT64, 0 },
724 725 { "tcp_rput_time_wait", KSTAT_DATA_UINT64, 0 },
725 726 { "tcp_detach_time_wait", KSTAT_DATA_UINT64, 0 },
726 727 { "tcp_timeout_calls", KSTAT_DATA_UINT64, 0 },
727 728 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64, 0 },
728 729 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64, 0 },
729 730 { "tcp_timeout_canceled", KSTAT_DATA_UINT64, 0 },
730 731 { "tcp_timermp_freed", KSTAT_DATA_UINT64, 0 },
731 732 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64, 0 },
732 733 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64, 0 },
733 734 #endif
734 735 };
735 736
736 737 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net",
737 738 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 0,
738 739 stackid);
739 740
740 741 if (ksp == NULL)
741 742 return (NULL);
742 743
743 744 bcopy(&template, ksp->ks_data, sizeof (template));
744 745 ksp->ks_private = (void *)(uintptr_t)stackid;
745 746 ksp->ks_update = tcp_kstat2_update;
746 747
747 748 kstat_install(ksp);
748 749 return (ksp);
749 750 }
750 751
751 752 void
752 753 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
753 754 {
754 755 if (ksp != NULL) {
755 756 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
756 757 kstat_delete_netstack(ksp, stackid);
757 758 }
758 759 }
759 760
760 761 /*
761 762 * Sum up all per CPU tcp_stat_t kstat counters.
762 763 */
763 764 static int
764 765 tcp_kstat2_update(kstat_t *kp, int rw)
765 766 {
766 767 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private;
767 768 netstack_t *ns;
768 769 tcp_stack_t *tcps;
769 770 tcp_stat_t *stats;
770 771 int i;
771 772 int cnt;
772 773
773 774 if (rw == KSTAT_WRITE)
774 775 return (EACCES);
775 776
776 777 ns = netstack_find_by_stackid(stackid);
777 778 if (ns == NULL)
778 779 return (-1);
779 780 tcps = ns->netstack_tcp;
780 781 if (tcps == NULL) {
781 782 netstack_rele(ns);
782 783 return (-1);
783 784 }
784 785
785 786 stats = (tcp_stat_t *)kp->ks_data;
786 787 tcp_clr_stats(stats);
787 788
788 789 /*
789 790 * tcps_sc_cnt may change in the middle of the loop. It is better
790 791 * to get its value first.
791 792 */
792 793 cnt = tcps->tcps_sc_cnt;
793 794 for (i = 0; i < cnt; i++)
794 795 tcp_add_stats(&tcps->tcps_sc[i]->tcp_sc_stats, stats);
795 796
796 797 netstack_rele(ns);
797 798 return (0);
798 799 }
799 800
800 801 /*
801 802 * To add stats from one mib2_tcp_t to another. Static fields are not added.
802 803 * The caller should set them up propertly.
803 804 */
804 805 void
805 806 tcp_add_mib(mib2_tcp_t *from, mib2_tcp_t *to)
806 807 {
807 808 to->tcpActiveOpens += from->tcpActiveOpens;
808 809 to->tcpPassiveOpens += from->tcpPassiveOpens;
809 810 to->tcpAttemptFails += from->tcpAttemptFails;
810 811 to->tcpEstabResets += from->tcpEstabResets;
811 812 to->tcpInSegs += from->tcpInSegs;
812 813 to->tcpOutSegs += from->tcpOutSegs;
813 814 to->tcpRetransSegs += from->tcpRetransSegs;
814 815 to->tcpOutRsts += from->tcpOutRsts;
815 816
816 817 to->tcpOutDataSegs += from->tcpOutDataSegs;
817 818 to->tcpOutDataBytes += from->tcpOutDataBytes;
818 819 to->tcpRetransBytes += from->tcpRetransBytes;
819 820 to->tcpOutAck += from->tcpOutAck;
820 821 to->tcpOutAckDelayed += from->tcpOutAckDelayed;
821 822 to->tcpOutUrg += from->tcpOutUrg;
822 823 to->tcpOutWinUpdate += from->tcpOutWinUpdate;
823 824 to->tcpOutWinProbe += from->tcpOutWinProbe;
824 825 to->tcpOutControl += from->tcpOutControl;
825 826 to->tcpOutFastRetrans += from->tcpOutFastRetrans;
826 827
827 828 to->tcpInAckBytes += from->tcpInAckBytes;
828 829 to->tcpInDupAck += from->tcpInDupAck;
829 830 to->tcpInAckUnsent += from->tcpInAckUnsent;
830 831 to->tcpInDataInorderSegs += from->tcpInDataInorderSegs;
831 832 to->tcpInDataInorderBytes += from->tcpInDataInorderBytes;
832 833 to->tcpInDataUnorderSegs += from->tcpInDataUnorderSegs;
833 834 to->tcpInDataUnorderBytes += from->tcpInDataUnorderBytes;
834 835 to->tcpInDataDupSegs += from->tcpInDataDupSegs;
835 836 to->tcpInDataDupBytes += from->tcpInDataDupBytes;
836 837 to->tcpInDataPartDupSegs += from->tcpInDataPartDupSegs;
837 838 to->tcpInDataPartDupBytes += from->tcpInDataPartDupBytes;
838 839 to->tcpInDataPastWinSegs += from->tcpInDataPastWinSegs;
839 840 to->tcpInDataPastWinBytes += from->tcpInDataPastWinBytes;
840 841 to->tcpInWinProbe += from->tcpInWinProbe;
841 842 to->tcpInWinUpdate += from->tcpInWinUpdate;
842 843 to->tcpInClosed += from->tcpInClosed;
843 844
844 845 to->tcpRttNoUpdate += from->tcpRttNoUpdate;
845 846 to->tcpRttUpdate += from->tcpRttUpdate;
846 847 to->tcpTimRetrans += from->tcpTimRetrans;
847 848 to->tcpTimRetransDrop += from->tcpTimRetransDrop;
848 849 to->tcpTimKeepalive += from->tcpTimKeepalive;
849 850 to->tcpTimKeepaliveProbe += from->tcpTimKeepaliveProbe;
850 851 to->tcpTimKeepaliveDrop += from->tcpTimKeepaliveDrop;
851 852 to->tcpListenDrop += from->tcpListenDrop;
852 853 to->tcpListenDropQ0 += from->tcpListenDropQ0;
853 854 to->tcpHalfOpenDrop += from->tcpHalfOpenDrop;
854 855 to->tcpOutSackRetransSegs += from->tcpOutSackRetransSegs;
855 856 to->tcpHCInSegs += from->tcpHCInSegs;
856 857 to->tcpHCOutSegs += from->tcpHCOutSegs;
857 858 }
858 859
859 860 /*
860 861 * To sum up all MIB2 stats for a tcp_stack_t from all per CPU stats. The
861 862 * caller should initialize the target mib2_tcp_t properly as this function
862 863 * just adds up all the per CPU stats.
863 864 */
864 865 static void
865 866 tcp_sum_mib(tcp_stack_t *tcps, mib2_tcp_t *tcp_mib)
866 867 {
867 868 int i;
868 869 int cnt;
869 870
870 871 /*
871 872 * tcps_sc_cnt may change in the middle of the loop. It is better
872 873 * to get its value first.
873 874 */
874 875 cnt = tcps->tcps_sc_cnt;
875 876 for (i = 0; i < cnt; i++)
876 877 tcp_add_mib(&tcps->tcps_sc[i]->tcp_sc_mib, tcp_mib);
877 878 }
878 879
879 880 /*
880 881 * To set all tcp_stat_t counters to 0.
881 882 */
882 883 static void
883 884 tcp_clr_stats(tcp_stat_t *stats)
884 885 {
885 886 stats->tcp_time_wait_syn_success.value.ui64 = 0;
886 887 stats->tcp_clean_death_nondetached.value.ui64 = 0;
887 888 stats->tcp_eager_blowoff_q.value.ui64 = 0;
888 889 stats->tcp_eager_blowoff_q0.value.ui64 = 0;
889 890 stats->tcp_no_listener.value.ui64 = 0;
890 891 stats->tcp_listendrop.value.ui64 = 0;
891 892 stats->tcp_listendropq0.value.ui64 = 0;
892 893 stats->tcp_wsrv_called.value.ui64 = 0;
893 894 stats->tcp_flwctl_on.value.ui64 = 0;
894 895 stats->tcp_timer_fire_early.value.ui64 = 0;
895 896 stats->tcp_timer_fire_miss.value.ui64 = 0;
896 897 stats->tcp_zcopy_on.value.ui64 = 0;
897 898 stats->tcp_zcopy_off.value.ui64 = 0;
898 899 stats->tcp_zcopy_backoff.value.ui64 = 0;
899 900 stats->tcp_fusion_flowctl.value.ui64 = 0;
900 901 stats->tcp_fusion_backenabled.value.ui64 = 0;
901 902 stats->tcp_fusion_urg.value.ui64 = 0;
902 903 stats->tcp_fusion_putnext.value.ui64 = 0;
903 904 stats->tcp_fusion_unfusable.value.ui64 = 0;
904 905 stats->tcp_fusion_aborted.value.ui64 = 0;
905 906 stats->tcp_fusion_unqualified.value.ui64 = 0;
906 907 stats->tcp_fusion_rrw_busy.value.ui64 = 0;
907 908 stats->tcp_fusion_rrw_msgcnt.value.ui64 = 0;
908 909 stats->tcp_fusion_rrw_plugged.value.ui64 = 0;
909 910 stats->tcp_in_ack_unsent_drop.value.ui64 = 0;
910 911 stats->tcp_sock_fallback.value.ui64 = 0;
911 912 stats->tcp_lso_enabled.value.ui64 = 0;
912 913 stats->tcp_lso_disabled.value.ui64 = 0;
913 914 stats->tcp_lso_times.value.ui64 = 0;
914 915 stats->tcp_lso_pkt_out.value.ui64 = 0;
915 916 stats->tcp_listen_cnt_drop.value.ui64 = 0;
916 917 stats->tcp_listen_mem_drop.value.ui64 = 0;
917 918 stats->tcp_zwin_mem_drop.value.ui64 = 0;
918 919 stats->tcp_zwin_ack_syn.value.ui64 = 0;
919 920 stats->tcp_rst_unsent.value.ui64 = 0;
920 921 stats->tcp_reclaim_cnt.value.ui64 = 0;
921 922 stats->tcp_reass_timeout.value.ui64 = 0;
922 923
923 924 #ifdef TCP_DEBUG_COUNTER
924 925 stats->tcp_time_wait.value.ui64 = 0;
925 926 stats->tcp_rput_time_wait.value.ui64 = 0;
926 927 stats->tcp_detach_time_wait.value.ui64 = 0;
927 928 stats->tcp_timeout_calls.value.ui64 = 0;
928 929 stats->tcp_timeout_cached_alloc.value.ui64 = 0;
929 930 stats->tcp_timeout_cancel_reqs.value.ui64 = 0;
930 931 stats->tcp_timeout_canceled.value.ui64 = 0;
931 932 stats->tcp_timermp_freed.value.ui64 = 0;
932 933 stats->tcp_push_timer_cnt.value.ui64 = 0;
933 934 stats->tcp_ack_timer_cnt.value.ui64 = 0;
934 935 #endif
935 936 }
936 937
937 938 /*
938 939 * To add counters from the per CPU tcp_stat_counter_t to the stack
939 940 * tcp_stat_t.
940 941 */
941 942 static void
942 943 tcp_add_stats(tcp_stat_counter_t *from, tcp_stat_t *to)
943 944 {
944 945 to->tcp_time_wait_syn_success.value.ui64 +=
945 946 from->tcp_time_wait_syn_success;
946 947 to->tcp_clean_death_nondetached.value.ui64 +=
947 948 from->tcp_clean_death_nondetached;
948 949 to->tcp_eager_blowoff_q.value.ui64 +=
949 950 from->tcp_eager_blowoff_q;
950 951 to->tcp_eager_blowoff_q0.value.ui64 +=
951 952 from->tcp_eager_blowoff_q0;
952 953 to->tcp_no_listener.value.ui64 +=
953 954 from->tcp_no_listener;
954 955 to->tcp_listendrop.value.ui64 +=
955 956 from->tcp_listendrop;
956 957 to->tcp_listendropq0.value.ui64 +=
957 958 from->tcp_listendropq0;
958 959 to->tcp_wsrv_called.value.ui64 +=
959 960 from->tcp_wsrv_called;
960 961 to->tcp_flwctl_on.value.ui64 +=
961 962 from->tcp_flwctl_on;
962 963 to->tcp_timer_fire_early.value.ui64 +=
963 964 from->tcp_timer_fire_early;
964 965 to->tcp_timer_fire_miss.value.ui64 +=
965 966 from->tcp_timer_fire_miss;
966 967 to->tcp_zcopy_on.value.ui64 +=
967 968 from->tcp_zcopy_on;
968 969 to->tcp_zcopy_off.value.ui64 +=
969 970 from->tcp_zcopy_off;
970 971 to->tcp_zcopy_backoff.value.ui64 +=
971 972 from->tcp_zcopy_backoff;
972 973 to->tcp_fusion_flowctl.value.ui64 +=
973 974 from->tcp_fusion_flowctl;
974 975 to->tcp_fusion_backenabled.value.ui64 +=
975 976 from->tcp_fusion_backenabled;
976 977 to->tcp_fusion_urg.value.ui64 +=
977 978 from->tcp_fusion_urg;
978 979 to->tcp_fusion_putnext.value.ui64 +=
979 980 from->tcp_fusion_putnext;
980 981 to->tcp_fusion_unfusable.value.ui64 +=
981 982 from->tcp_fusion_unfusable;
982 983 to->tcp_fusion_aborted.value.ui64 +=
983 984 from->tcp_fusion_aborted;
984 985 to->tcp_fusion_unqualified.value.ui64 +=
985 986 from->tcp_fusion_unqualified;
986 987 to->tcp_fusion_rrw_busy.value.ui64 +=
987 988 from->tcp_fusion_rrw_busy;
988 989 to->tcp_fusion_rrw_msgcnt.value.ui64 +=
989 990 from->tcp_fusion_rrw_msgcnt;
990 991 to->tcp_fusion_rrw_plugged.value.ui64 +=
991 992 from->tcp_fusion_rrw_plugged;
992 993 to->tcp_in_ack_unsent_drop.value.ui64 +=
993 994 from->tcp_in_ack_unsent_drop;
994 995 to->tcp_sock_fallback.value.ui64 +=
995 996 from->tcp_sock_fallback;
996 997 to->tcp_lso_enabled.value.ui64 +=
997 998 from->tcp_lso_enabled;
998 999 to->tcp_lso_disabled.value.ui64 +=
999 1000 from->tcp_lso_disabled;
1000 1001 to->tcp_lso_times.value.ui64 +=
1001 1002 from->tcp_lso_times;
1002 1003 to->tcp_lso_pkt_out.value.ui64 +=
1003 1004 from->tcp_lso_pkt_out;
1004 1005 to->tcp_listen_cnt_drop.value.ui64 +=
1005 1006 from->tcp_listen_cnt_drop;
1006 1007 to->tcp_listen_mem_drop.value.ui64 +=
1007 1008 from->tcp_listen_mem_drop;
1008 1009 to->tcp_zwin_mem_drop.value.ui64 +=
1009 1010 from->tcp_zwin_mem_drop;
1010 1011 to->tcp_zwin_ack_syn.value.ui64 +=
1011 1012 from->tcp_zwin_ack_syn;
1012 1013 to->tcp_rst_unsent.value.ui64 +=
1013 1014 from->tcp_rst_unsent;
1014 1015 to->tcp_reclaim_cnt.value.ui64 +=
1015 1016 from->tcp_reclaim_cnt;
1016 1017 to->tcp_reass_timeout.value.ui64 +=
1017 1018 from->tcp_reass_timeout;
1018 1019
1019 1020 #ifdef TCP_DEBUG_COUNTER
1020 1021 to->tcp_time_wait.value.ui64 +=
1021 1022 from->tcp_time_wait;
1022 1023 to->tcp_rput_time_wait.value.ui64 +=
1023 1024 from->tcp_rput_time_wait;
1024 1025 to->tcp_detach_time_wait.value.ui64 +=
1025 1026 from->tcp_detach_time_wait;
1026 1027 to->tcp_timeout_calls.value.ui64 +=
1027 1028 from->tcp_timeout_calls;
1028 1029 to->tcp_timeout_cached_alloc.value.ui64 +=
1029 1030 from->tcp_timeout_cached_alloc;
1030 1031 to->tcp_timeout_cancel_reqs.value.ui64 +=
1031 1032 from->tcp_timeout_cancel_reqs;
1032 1033 to->tcp_timeout_canceled.value.ui64 +=
1033 1034 from->tcp_timeout_canceled;
1034 1035 to->tcp_timermp_freed.value.ui64 +=
1035 1036 from->tcp_timermp_freed;
1036 1037 to->tcp_push_timer_cnt.value.ui64 +=
1037 1038 from->tcp_push_timer_cnt;
1038 1039 to->tcp_ack_timer_cnt.value.ui64 +=
1039 1040 from->tcp_ack_timer_cnt;
1040 1041 #endif
1041 1042 }
↓ open down ↓ |
991 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX