1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/tihdr.h> 28 #include <sys/policy.h> 29 #include <sys/tsol/tnet.h> 30 31 #include <inet/common.h> 32 #include <inet/kstatcom.h> 33 #include <inet/snmpcom.h> 34 #include <inet/mib2.h> 35 #include <inet/optcom.h> 36 #include <inet/snmpcom.h> 37 #include <inet/kstatcom.h> 38 #include <inet/udp_impl.h> 39 40 static int udp_kstat_update(kstat_t *, int); 41 static int udp_kstat2_update(kstat_t *, int); 42 static void udp_sum_mib(udp_stack_t *, mib2_udp_t *); 43 static void udp_clr_stats(udp_stat_t *); 44 static void udp_add_stats(udp_stat_counter_t *, udp_stat_t *); 45 static void udp_add_mib(mib2_udp_t *, mib2_udp_t *); 46 /* 47 * return SNMP stuff in buffer in mpdata. We don't hold any lock and report 48 * information that can be changing beneath us. 49 */ 50 mblk_t * 51 udp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req) 52 { 53 mblk_t *mpdata; 54 mblk_t *mp_conn_ctl; 55 mblk_t *mp_attr_ctl; 56 mblk_t *mp6_conn_ctl; 57 mblk_t *mp6_attr_ctl; 58 mblk_t *mp_conn_tail; 59 mblk_t *mp_attr_tail; 60 mblk_t *mp6_conn_tail; 61 mblk_t *mp6_attr_tail; 62 struct opthdr *optp; 63 mib2_udpEntry_t ude; 64 mib2_udp6Entry_t ude6; 65 mib2_transportMLPEntry_t mlp; 66 int state; 67 zoneid_t zoneid; 68 int i; 69 connf_t *connfp; 70 conn_t *connp = Q_TO_CONN(q); 71 int v4_conn_idx; 72 int v6_conn_idx; 73 boolean_t needattr; 74 udp_t *udp; 75 ip_stack_t *ipst = connp->conn_netstack->netstack_ip; 76 udp_stack_t *us = connp->conn_netstack->netstack_udp; 77 mblk_t *mp2ctl; 78 mib2_udp_t udp_mib; 79 size_t udp_mib_size, ude_size, ude6_size; 80 81 82 /* 83 * make a copy of the original message 84 */ 85 mp2ctl = copymsg(mpctl); 86 87 mp_conn_ctl = mp_attr_ctl = mp6_conn_ctl = NULL; 88 if (mpctl == NULL || 89 (mpdata = mpctl->b_cont) == NULL || 90 (mp_conn_ctl = copymsg(mpctl)) == NULL || 91 (mp_attr_ctl = copymsg(mpctl)) == NULL || 92 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 93 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 94 freemsg(mp_conn_ctl); 95 freemsg(mp_attr_ctl); 96 freemsg(mp6_conn_ctl); 97 freemsg(mpctl); 98 freemsg(mp2ctl); 99 return (0); 100 } 101 102 zoneid = connp->conn_zoneid; 103 104 if (legacy_req) { 105 udp_mib_size = LEGACY_MIB_SIZE(&udp_mib, mib2_udp_t); 106 ude_size = LEGACY_MIB_SIZE(&ude, mib2_udpEntry_t); 107 ude6_size = LEGACY_MIB_SIZE(&ude6, mib2_udp6Entry_t); 108 } else { 109 udp_mib_size = sizeof (mib2_udp_t); 110 ude_size = sizeof (mib2_udpEntry_t); 111 ude6_size = sizeof (mib2_udp6Entry_t); 112 } 113 114 bzero(&udp_mib, sizeof (udp_mib)); 115 /* fixed length structure for IPv4 and IPv6 counters */ 116 SET_MIB(udp_mib.udpEntrySize, ude_size); 117 SET_MIB(udp_mib.udp6EntrySize, ude6_size); 118 119 udp_sum_mib(us, &udp_mib); 120 121 /* 122 * Synchronize 32- and 64-bit counters. Note that udpInDatagrams and 123 * udpOutDatagrams are not updated anywhere in UDP. The new 64 bits 124 * counters are used. Hence the old counters' values in us_sc_mib 125 * are always 0. 126 */ 127 SYNC32_MIB(&udp_mib, udpInDatagrams, udpHCInDatagrams); 128 SYNC32_MIB(&udp_mib, udpOutDatagrams, udpHCOutDatagrams); 129 130 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 131 optp->level = MIB2_UDP; 132 optp->name = 0; 133 (void) snmp_append_data(mpdata, (char *)&udp_mib, udp_mib_size); 134 optp->len = msgdsize(mpdata); 135 qreply(q, mpctl); 136 137 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 138 v4_conn_idx = v6_conn_idx = 0; 139 140 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 141 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 142 connp = NULL; 143 144 while ((connp = ipcl_get_next_conn(connfp, connp, 145 IPCL_UDPCONN))) { 146 udp = connp->conn_udp; 147 if (zoneid != connp->conn_zoneid) 148 continue; 149 150 /* 151 * Note that the port numbers are sent in 152 * host byte order 153 */ 154 155 if (udp->udp_state == TS_UNBND) 156 state = MIB2_UDP_unbound; 157 else if (udp->udp_state == TS_IDLE) 158 state = MIB2_UDP_idle; 159 else if (udp->udp_state == TS_DATA_XFER) 160 state = MIB2_UDP_connected; 161 else 162 state = MIB2_UDP_unknown; 163 164 needattr = B_FALSE; 165 bzero(&mlp, sizeof (mlp)); 166 if (connp->conn_mlp_type != mlptSingle) { 167 if (connp->conn_mlp_type == mlptShared || 168 connp->conn_mlp_type == mlptBoth) 169 mlp.tme_flags |= MIB2_TMEF_SHARED; 170 if (connp->conn_mlp_type == mlptPrivate || 171 connp->conn_mlp_type == mlptBoth) 172 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 173 needattr = B_TRUE; 174 } 175 if (connp->conn_anon_mlp) { 176 mlp.tme_flags |= MIB2_TMEF_ANONMLP; 177 needattr = B_TRUE; 178 } 179 switch (connp->conn_mac_mode) { 180 case CONN_MAC_DEFAULT: 181 break; 182 case CONN_MAC_AWARE: 183 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT; 184 needattr = B_TRUE; 185 break; 186 case CONN_MAC_IMPLICIT: 187 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT; 188 needattr = B_TRUE; 189 break; 190 } 191 mutex_enter(&connp->conn_lock); 192 if (udp->udp_state == TS_DATA_XFER && 193 connp->conn_ixa->ixa_tsl != NULL) { 194 ts_label_t *tsl; 195 196 tsl = connp->conn_ixa->ixa_tsl; 197 mlp.tme_flags |= MIB2_TMEF_IS_LABELED; 198 mlp.tme_doi = label2doi(tsl); 199 mlp.tme_label = *label2bslabel(tsl); 200 needattr = B_TRUE; 201 } 202 mutex_exit(&connp->conn_lock); 203 204 /* 205 * Create an IPv4 table entry for IPv4 entries and also 206 * any IPv6 entries which are bound to in6addr_any 207 * (i.e. anything a IPv4 peer could connect/send to). 208 */ 209 if (connp->conn_ipversion == IPV4_VERSION || 210 (udp->udp_state <= TS_IDLE && 211 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) { 212 ude.udpEntryInfo.ue_state = state; 213 /* 214 * If in6addr_any this will set it to 215 * INADDR_ANY 216 */ 217 ude.udpLocalAddress = connp->conn_laddr_v4; 218 ude.udpLocalPort = ntohs(connp->conn_lport); 219 if (udp->udp_state == TS_DATA_XFER) { 220 /* 221 * Can potentially get here for 222 * v6 socket if another process 223 * (say, ping) has just done a 224 * sendto(), changing the state 225 * from the TS_IDLE above to 226 * TS_DATA_XFER by the time we hit 227 * this part of the code. 228 */ 229 ude.udpEntryInfo.ue_RemoteAddress = 230 connp->conn_faddr_v4; 231 ude.udpEntryInfo.ue_RemotePort = 232 ntohs(connp->conn_fport); 233 } else { 234 ude.udpEntryInfo.ue_RemoteAddress = 0; 235 ude.udpEntryInfo.ue_RemotePort = 0; 236 } 237 238 /* 239 * We make the assumption that all udp_t 240 * structs will be created within an address 241 * region no larger than 32-bits. 242 */ 243 ude.udpInstance = (uint32_t)(uintptr_t)udp; 244 ude.udpCreationProcess = 245 (connp->conn_cpid < 0) ? 246 MIB2_UNKNOWN_PROCESS : 247 connp->conn_cpid; 248 ude.udpCreationTime = connp->conn_open_time; 249 250 (void) snmp_append_data2(mp_conn_ctl->b_cont, 251 &mp_conn_tail, (char *)&ude, ude_size); 252 mlp.tme_connidx = v4_conn_idx++; 253 if (needattr) 254 (void) snmp_append_data2( 255 mp_attr_ctl->b_cont, &mp_attr_tail, 256 (char *)&mlp, sizeof (mlp)); 257 } 258 if (connp->conn_ipversion == IPV6_VERSION) { 259 ude6.udp6EntryInfo.ue_state = state; 260 ude6.udp6LocalAddress = connp->conn_laddr_v6; 261 ude6.udp6LocalPort = ntohs(connp->conn_lport); 262 mutex_enter(&connp->conn_lock); 263 if (connp->conn_ixa->ixa_flags & 264 IXAF_SCOPEID_SET) { 265 ude6.udp6IfIndex = 266 connp->conn_ixa->ixa_scopeid; 267 } else { 268 ude6.udp6IfIndex = connp->conn_bound_if; 269 } 270 mutex_exit(&connp->conn_lock); 271 if (udp->udp_state == TS_DATA_XFER) { 272 ude6.udp6EntryInfo.ue_RemoteAddress = 273 connp->conn_faddr_v6; 274 ude6.udp6EntryInfo.ue_RemotePort = 275 ntohs(connp->conn_fport); 276 } else { 277 ude6.udp6EntryInfo.ue_RemoteAddress = 278 sin6_null.sin6_addr; 279 ude6.udp6EntryInfo.ue_RemotePort = 0; 280 } 281 /* 282 * We make the assumption that all udp_t 283 * structs will be created within an address 284 * region no larger than 32-bits. 285 */ 286 ude6.udp6Instance = (uint32_t)(uintptr_t)udp; 287 ude6.udp6CreationProcess = 288 (connp->conn_cpid < 0) ? 289 MIB2_UNKNOWN_PROCESS : 290 connp->conn_cpid; 291 ude6.udp6CreationTime = connp->conn_open_time; 292 293 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 294 &mp6_conn_tail, (char *)&ude6, ude6_size); 295 mlp.tme_connidx = v6_conn_idx++; 296 if (needattr) 297 (void) snmp_append_data2( 298 mp6_attr_ctl->b_cont, 299 &mp6_attr_tail, (char *)&mlp, 300 sizeof (mlp)); 301 } 302 } 303 } 304 305 /* IPv4 UDP endpoints */ 306 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 307 sizeof (struct T_optmgmt_ack)]; 308 optp->level = MIB2_UDP; 309 optp->name = MIB2_UDP_ENTRY; 310 optp->len = msgdsize(mp_conn_ctl->b_cont); 311 qreply(q, mp_conn_ctl); 312 313 /* table of MLP attributes... */ 314 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 315 sizeof (struct T_optmgmt_ack)]; 316 optp->level = MIB2_UDP; 317 optp->name = EXPER_XPORT_MLP; 318 optp->len = msgdsize(mp_attr_ctl->b_cont); 319 if (optp->len == 0) 320 freemsg(mp_attr_ctl); 321 else 322 qreply(q, mp_attr_ctl); 323 324 /* IPv6 UDP endpoints */ 325 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 326 sizeof (struct T_optmgmt_ack)]; 327 optp->level = MIB2_UDP6; 328 optp->name = MIB2_UDP6_ENTRY; 329 optp->len = msgdsize(mp6_conn_ctl->b_cont); 330 qreply(q, mp6_conn_ctl); 331 332 /* table of MLP attributes... */ 333 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 334 sizeof (struct T_optmgmt_ack)]; 335 optp->level = MIB2_UDP6; 336 optp->name = EXPER_XPORT_MLP; 337 optp->len = msgdsize(mp6_attr_ctl->b_cont); 338 if (optp->len == 0) 339 freemsg(mp6_attr_ctl); 340 else 341 qreply(q, mp6_attr_ctl); 342 343 return (mp2ctl); 344 } 345 346 /* 347 * Return 0 if invalid set request, 1 otherwise, including non-udp requests. 348 * NOTE: Per MIB-II, UDP has no writable data. 349 * TODO: If this ever actually tries to set anything, it needs to be 350 * to do the appropriate locking. 351 */ 352 /* ARGSUSED */ 353 int 354 udp_snmp_set(queue_t *q, t_scalar_t level, t_scalar_t name, 355 uchar_t *ptr, int len) 356 { 357 switch (level) { 358 case MIB2_UDP: 359 return (0); 360 default: 361 return (1); 362 } 363 } 364 365 void 366 udp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 367 { 368 if (ksp != NULL) { 369 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 370 kstat_delete_netstack(ksp, stackid); 371 } 372 } 373 374 /* 375 * To add stats from one mib2_udp_t to another. Static fields are not added. 376 * The caller should set them up propertly. 377 */ 378 static void 379 udp_add_mib(mib2_udp_t *from, mib2_udp_t *to) 380 { 381 to->udpHCInDatagrams += from->udpHCInDatagrams; 382 to->udpInErrors += from->udpInErrors; 383 to->udpHCOutDatagrams += from->udpHCOutDatagrams; 384 to->udpOutErrors += from->udpOutErrors; 385 } 386 387 388 void * 389 udp_kstat2_init(netstackid_t stackid) 390 { 391 kstat_t *ksp; 392 393 udp_stat_t template = { 394 { "udp_sock_fallback", KSTAT_DATA_UINT64 }, 395 { "udp_out_opt", KSTAT_DATA_UINT64 }, 396 { "udp_out_err_notconn", KSTAT_DATA_UINT64 }, 397 { "udp_out_err_output", KSTAT_DATA_UINT64 }, 398 { "udp_out_err_tudr", KSTAT_DATA_UINT64 }, 399 #ifdef DEBUG 400 { "udp_data_conn", KSTAT_DATA_UINT64 }, 401 { "udp_data_notconn", KSTAT_DATA_UINT64 }, 402 { "udp_out_lastdst", KSTAT_DATA_UINT64 }, 403 { "udp_out_diffdst", KSTAT_DATA_UINT64 }, 404 { "udp_out_ipv6", KSTAT_DATA_UINT64 }, 405 { "udp_out_mapped", KSTAT_DATA_UINT64 }, 406 { "udp_out_ipv4", KSTAT_DATA_UINT64 }, 407 #endif 408 }; 409 410 ksp = kstat_create_netstack(UDP_MOD_NAME, 0, "udpstat", "net", 411 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 412 0, stackid); 413 414 if (ksp == NULL) 415 return (NULL); 416 417 bcopy(&template, ksp->ks_data, sizeof (template)); 418 ksp->ks_update = udp_kstat2_update; 419 ksp->ks_private = (void *)(uintptr_t)stackid; 420 421 kstat_install(ksp); 422 return (ksp); 423 } 424 425 void 426 udp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 427 { 428 if (ksp != NULL) { 429 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 430 kstat_delete_netstack(ksp, stackid); 431 } 432 } 433 434 /* 435 * To copy counters from the per CPU udpp_stat_counter_t to the stack 436 * udp_stat_t. 437 */ 438 static void 439 udp_add_stats(udp_stat_counter_t *from, udp_stat_t *to) 440 { 441 to->udp_sock_fallback.value.ui64 += from->udp_sock_fallback; 442 to->udp_out_opt.value.ui64 += from->udp_out_opt; 443 to->udp_out_err_notconn.value.ui64 += from->udp_out_err_notconn; 444 to->udp_out_err_output.value.ui64 += from->udp_out_err_output; 445 to->udp_out_err_tudr.value.ui64 += from->udp_out_err_tudr; 446 #ifdef DEBUG 447 to->udp_data_conn.value.ui64 += from->udp_data_conn; 448 to->udp_data_notconn.value.ui64 += from->udp_data_notconn; 449 to->udp_out_lastdst.value.ui64 += from->udp_out_lastdst; 450 to->udp_out_diffdst.value.ui64 += from->udp_out_diffdst; 451 to->udp_out_ipv6.value.ui64 += from->udp_out_ipv6; 452 to->udp_out_mapped.value.ui64 += from->udp_out_mapped; 453 to->udp_out_ipv4.value.ui64 += from->udp_out_ipv4; 454 #endif 455 } 456 457 /* 458 * To set all udp_stat_t counters to 0. 459 */ 460 static void 461 udp_clr_stats(udp_stat_t *stats) 462 { 463 stats->udp_sock_fallback.value.ui64 = 0; 464 stats->udp_out_opt.value.ui64 = 0; 465 stats->udp_out_err_notconn.value.ui64 = 0; 466 stats->udp_out_err_output.value.ui64 = 0; 467 stats->udp_out_err_tudr.value.ui64 = 0; 468 #ifdef DEBUG 469 stats->udp_data_conn.value.ui64 = 0; 470 stats->udp_data_notconn.value.ui64 = 0; 471 stats->udp_out_lastdst.value.ui64 = 0; 472 stats->udp_out_diffdst.value.ui64 = 0; 473 stats->udp_out_ipv6.value.ui64 = 0; 474 stats->udp_out_mapped.value.ui64 = 0; 475 stats->udp_out_ipv4.value.ui64 = 0; 476 #endif 477 } 478 479 int 480 udp_kstat2_update(kstat_t *kp, int rw) 481 { 482 udp_stat_t *stats; 483 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 484 netstack_t *ns; 485 udp_stack_t *us; 486 int i; 487 int cnt; 488 489 if (rw == KSTAT_WRITE) 490 return (EACCES); 491 492 ns = netstack_find_by_stackid(stackid); 493 if (ns == NULL) 494 return (-1); 495 us = ns->netstack_udp; 496 if (us == NULL) { 497 netstack_rele(ns); 498 return (-1); 499 } 500 stats = (udp_stat_t *)kp->ks_data; 501 udp_clr_stats(stats); 502 503 cnt = us->us_sc_cnt; 504 for (i = 0; i < cnt; i++) 505 udp_add_stats(&us->us_sc[i]->udp_sc_stats, stats); 506 507 netstack_rele(ns); 508 return (0); 509 } 510 511 void * 512 udp_kstat_init(netstackid_t stackid) 513 { 514 kstat_t *ksp; 515 516 udp_named_kstat_t template = { 517 { "inDatagrams", KSTAT_DATA_UINT64, {{0}} }, 518 { "inErrors", KSTAT_DATA_UINT32, {{0}} }, 519 { "outDatagrams", KSTAT_DATA_UINT64, {{0}} }, 520 { "entrySize", KSTAT_DATA_INT32, {{0}} }, 521 { "entry6Size", KSTAT_DATA_INT32, {{0}} }, 522 { "outErrors", KSTAT_DATA_UINT32, {{0}} }, 523 }; 524 525 ksp = kstat_create_netstack(UDP_MOD_NAME, 0, UDP_MOD_NAME, "mib2", 526 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(udp_named_kstat_t), 0, stackid); 527 528 if (ksp == NULL) 529 return (NULL); 530 531 template.entrySize.value.ui32 = sizeof (mib2_udpEntry_t); 532 template.entry6Size.value.ui32 = sizeof (mib2_udp6Entry_t); 533 534 bcopy(&template, ksp->ks_data, sizeof (template)); 535 ksp->ks_update = udp_kstat_update; 536 ksp->ks_private = (void *)(uintptr_t)stackid; 537 538 kstat_install(ksp); 539 return (ksp); 540 } 541 542 /* 543 * To sum up all MIB2 stats for a udp_stack_t from all per CPU stats. The 544 * caller should initialize the target mib2_udp_t properly as this function 545 * just adds up all the per CPU stats. 546 */ 547 static void 548 udp_sum_mib(udp_stack_t *us, mib2_udp_t *udp_mib) 549 { 550 int i; 551 int cnt; 552 553 cnt = us->us_sc_cnt; 554 for (i = 0; i < cnt; i++) 555 udp_add_mib(&us->us_sc[i]->udp_sc_mib, udp_mib); 556 } 557 558 static int 559 udp_kstat_update(kstat_t *kp, int rw) 560 { 561 udp_named_kstat_t *udpkp; 562 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 563 netstack_t *ns; 564 udp_stack_t *us; 565 mib2_udp_t udp_mib; 566 567 if (rw == KSTAT_WRITE) 568 return (EACCES); 569 570 ns = netstack_find_by_stackid(stackid); 571 if (ns == NULL) 572 return (-1); 573 us = ns->netstack_udp; 574 if (us == NULL) { 575 netstack_rele(ns); 576 return (-1); 577 } 578 udpkp = (udp_named_kstat_t *)kp->ks_data; 579 580 bzero(&udp_mib, sizeof (udp_mib)); 581 udp_sum_mib(us, &udp_mib); 582 583 udpkp->inDatagrams.value.ui64 = udp_mib.udpHCInDatagrams; 584 udpkp->inErrors.value.ui32 = udp_mib.udpInErrors; 585 udpkp->outDatagrams.value.ui64 = udp_mib.udpHCOutDatagrams; 586 udpkp->outErrors.value.ui32 = udp_mib.udpOutErrors; 587 netstack_rele(ns); 588 return (0); 589 }