Print this page
dccp: starting module template
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/inet/ip/ip.c
+++ new/usr/src/uts/common/inet/ip/ip.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 1990 Mentat Inc.
25 25 * Copyright (c) 2011 Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 #include <sys/types.h>
29 29 #include <sys/stream.h>
30 30 #include <sys/dlpi.h>
31 31 #include <sys/stropts.h>
32 32 #include <sys/sysmacros.h>
33 33 #include <sys/strsubr.h>
34 34 #include <sys/strlog.h>
35 35 #include <sys/strsun.h>
36 36 #include <sys/zone.h>
37 37 #define _SUN_TPI_VERSION 2
38 38 #include <sys/tihdr.h>
39 39 #include <sys/xti_inet.h>
40 40 #include <sys/ddi.h>
41 41 #include <sys/suntpi.h>
42 42 #include <sys/cmn_err.h>
43 43 #include <sys/debug.h>
44 44 #include <sys/kobj.h>
45 45 #include <sys/modctl.h>
46 46 #include <sys/atomic.h>
47 47 #include <sys/policy.h>
48 48 #include <sys/priv.h>
49 49 #include <sys/taskq.h>
50 50
51 51 #include <sys/systm.h>
52 52 #include <sys/param.h>
53 53 #include <sys/kmem.h>
54 54 #include <sys/sdt.h>
55 55 #include <sys/socket.h>
56 56 #include <sys/vtrace.h>
57 57 #include <sys/isa_defs.h>
58 58 #include <sys/mac.h>
59 59 #include <net/if.h>
60 60 #include <net/if_arp.h>
61 61 #include <net/route.h>
62 62 #include <sys/sockio.h>
63 63 #include <netinet/in.h>
64 64 #include <net/if_dl.h>
65 65
66 66 #include <inet/common.h>
67 67 #include <inet/mi.h>
68 68 #include <inet/mib2.h>
69 69 #include <inet/nd.h>
70 70 #include <inet/arp.h>
71 71 #include <inet/snmpcom.h>
72 72 #include <inet/optcom.h>
73 73 #include <inet/kstatcom.h>
74 74
75 75 #include <netinet/igmp_var.h>
76 76 #include <netinet/ip6.h>
77 77 #include <netinet/icmp6.h>
78 78 #include <netinet/sctp.h>
79 79
80 80 #include <inet/ip.h>
81 81 #include <inet/ip_impl.h>
82 82 #include <inet/ip6.h>
83 83 #include <inet/ip6_asp.h>
84 84 #include <inet/tcp.h>
85 85 #include <inet/tcp_impl.h>
86 86 #include <inet/ip_multi.h>
87 87 #include <inet/ip_if.h>
88 88 #include <inet/ip_ire.h>
89 89 #include <inet/ip_ftable.h>
90 90 #include <inet/ip_rts.h>
91 91 #include <inet/ip_ndp.h>
92 92 #include <inet/ip_listutils.h>
93 93 #include <netinet/igmp.h>
94 94 #include <netinet/ip_mroute.h>
95 95 #include <inet/ipp_common.h>
96 96
97 97 #include <net/pfkeyv2.h>
98 98 #include <inet/sadb.h>
99 99 #include <inet/ipsec_impl.h>
100 100 #include <inet/iptun/iptun_impl.h>
101 101 #include <inet/ipdrop.h>
102 102 #include <inet/ip_netinfo.h>
103 103 #include <inet/ilb_ip.h>
↓ open down ↓ |
103 lines elided |
↑ open up ↑ |
104 104
105 105 #include <sys/ethernet.h>
106 106 #include <net/if_types.h>
107 107 #include <sys/cpuvar.h>
108 108
109 109 #include <ipp/ipp.h>
110 110 #include <ipp/ipp_impl.h>
111 111 #include <ipp/ipgpc/ipgpc.h>
112 112
113 113 #include <sys/pattr.h>
114 +#include <inet/dccp.h>
115 +#include <inet/dccp_impl.h>
116 +#include <inet/dccp_ip.h>
114 117 #include <inet/ipclassifier.h>
115 118 #include <inet/sctp_ip.h>
116 119 #include <inet/sctp/sctp_impl.h>
117 120 #include <inet/udp_impl.h>
118 121 #include <inet/rawip_impl.h>
119 122 #include <inet/rts_impl.h>
120 123
121 124 #include <sys/tsol/label.h>
122 125 #include <sys/tsol/tnet.h>
123 126
124 127 #include <sys/squeue_impl.h>
125 128 #include <inet/ip_arp.h>
126 129
127 130 #include <sys/clock_impl.h> /* For LBOLT_FASTPATH{,64} */
128 131
129 132 /*
130 133 * Values for squeue switch:
131 134 * IP_SQUEUE_ENTER_NODRAIN: SQ_NODRAIN
132 135 * IP_SQUEUE_ENTER: SQ_PROCESS
133 136 * IP_SQUEUE_FILL: SQ_FILL
134 137 */
135 138 int ip_squeue_enter = IP_SQUEUE_ENTER; /* Setable in /etc/system */
136 139
137 140 int ip_squeue_flag;
138 141
139 142 /*
140 143 * Setable in /etc/system
141 144 */
142 145 int ip_poll_normal_ms = 100;
143 146 int ip_poll_normal_ticks = 0;
144 147 int ip_modclose_ackwait_ms = 3000;
145 148
146 149 /*
147 150 * It would be nice to have these present only in DEBUG systems, but the
148 151 * current design of the global symbol checking logic requires them to be
149 152 * unconditionally present.
150 153 */
151 154 uint_t ip_thread_data; /* TSD key for debug support */
152 155 krwlock_t ip_thread_rwlock;
153 156 list_t ip_thread_list;
154 157
155 158 /*
156 159 * Structure to represent a linked list of msgblks. Used by ip_snmp_ functions.
157 160 */
158 161
159 162 struct listptr_s {
160 163 mblk_t *lp_head; /* pointer to the head of the list */
161 164 mblk_t *lp_tail; /* pointer to the tail of the list */
162 165 };
163 166
164 167 typedef struct listptr_s listptr_t;
165 168
166 169 /*
167 170 * This is used by ip_snmp_get_mib2_ip_route_media and
168 171 * ip_snmp_get_mib2_ip6_route_media to carry the lists of return data.
169 172 */
170 173 typedef struct iproutedata_s {
171 174 uint_t ird_idx;
172 175 uint_t ird_flags; /* see below */
173 176 listptr_t ird_route; /* ipRouteEntryTable */
174 177 listptr_t ird_netmedia; /* ipNetToMediaEntryTable */
175 178 listptr_t ird_attrs; /* ipRouteAttributeTable */
176 179 } iproutedata_t;
177 180
178 181 /* Include ire_testhidden and IRE_IF_CLONE routes */
179 182 #define IRD_REPORT_ALL 0x01
180 183
181 184 /*
182 185 * Cluster specific hooks. These should be NULL when booted as a non-cluster
183 186 */
184 187
185 188 /*
186 189 * Hook functions to enable cluster networking
187 190 * On non-clustered systems these vectors must always be NULL.
188 191 *
189 192 * Hook function to Check ip specified ip address is a shared ip address
190 193 * in the cluster
191 194 *
192 195 */
193 196 int (*cl_inet_isclusterwide)(netstackid_t stack_id, uint8_t protocol,
194 197 sa_family_t addr_family, uint8_t *laddrp, void *args) = NULL;
195 198
196 199 /*
197 200 * Hook function to generate cluster wide ip fragment identifier
198 201 */
199 202 uint32_t (*cl_inet_ipident)(netstackid_t stack_id, uint8_t protocol,
200 203 sa_family_t addr_family, uint8_t *laddrp, uint8_t *faddrp,
201 204 void *args) = NULL;
202 205
203 206 /*
204 207 * Hook function to generate cluster wide SPI.
205 208 */
206 209 void (*cl_inet_getspi)(netstackid_t, uint8_t, uint8_t *, size_t,
207 210 void *) = NULL;
208 211
209 212 /*
210 213 * Hook function to verify if the SPI is already utlized.
211 214 */
212 215
213 216 int (*cl_inet_checkspi)(netstackid_t, uint8_t, uint32_t, void *) = NULL;
214 217
215 218 /*
216 219 * Hook function to delete the SPI from the cluster wide repository.
217 220 */
218 221
219 222 void (*cl_inet_deletespi)(netstackid_t, uint8_t, uint32_t, void *) = NULL;
220 223
221 224 /*
222 225 * Hook function to inform the cluster when packet received on an IDLE SA
223 226 */
224 227
225 228 void (*cl_inet_idlesa)(netstackid_t, uint8_t, uint32_t, sa_family_t,
226 229 in6_addr_t, in6_addr_t, void *) = NULL;
227 230
228 231 /*
229 232 * Synchronization notes:
230 233 *
231 234 * IP is a fully D_MP STREAMS module/driver. Thus it does not depend on any
232 235 * MT level protection given by STREAMS. IP uses a combination of its own
233 236 * internal serialization mechanism and standard Solaris locking techniques.
234 237 * The internal serialization is per phyint. This is used to serialize
235 238 * plumbing operations, IPMP operations, most set ioctls, etc.
236 239 *
237 240 * Plumbing is a long sequence of operations involving message
238 241 * exchanges between IP, ARP and device drivers. Many set ioctls are typically
239 242 * involved in plumbing operations. A natural model is to serialize these
240 243 * ioctls one per ill. For example plumbing of hme0 and qfe0 can go on in
241 244 * parallel without any interference. But various set ioctls on hme0 are best
242 245 * serialized, along with IPMP operations and processing of DLPI control
243 246 * messages received from drivers on a per phyint basis. This serialization is
244 247 * provided by the ipsq_t and primitives operating on this. Details can
245 248 * be found in ip_if.c above the core primitives operating on ipsq_t.
246 249 *
247 250 * Lookups of an ipif or ill by a thread return a refheld ipif / ill.
248 251 * Simiarly lookup of an ire by a thread also returns a refheld ire.
249 252 * In addition ipif's and ill's referenced by the ire are also indirectly
250 253 * refheld. Thus no ipif or ill can vanish as long as an ipif is refheld
251 254 * directly or indirectly. For example an SIOCSLIFADDR ioctl that changes the
252 255 * address of an ipif has to go through the ipsq_t. This ensures that only
253 256 * one such exclusive operation proceeds at any time on the ipif. It then
254 257 * waits for all refcnts
255 258 * associated with this ipif to come down to zero. The address is changed
256 259 * only after the ipif has been quiesced. Then the ipif is brought up again.
257 260 * More details are described above the comment in ip_sioctl_flags.
258 261 *
259 262 * Packet processing is based mostly on IREs and are fully multi-threaded
260 263 * using standard Solaris MT techniques.
261 264 *
262 265 * There are explicit locks in IP to handle:
263 266 * - The ip_g_head list maintained by mi_open_link() and friends.
264 267 *
265 268 * - The reassembly data structures (one lock per hash bucket)
266 269 *
267 270 * - conn_lock is meant to protect conn_t fields. The fields actually
268 271 * protected by conn_lock are documented in the conn_t definition.
269 272 *
270 273 * - ire_lock to protect some of the fields of the ire, IRE tables
271 274 * (one lock per hash bucket). Refer to ip_ire.c for details.
272 275 *
273 276 * - ndp_g_lock and ncec_lock for protecting NCEs.
274 277 *
275 278 * - ill_lock protects fields of the ill and ipif. Details in ip.h
276 279 *
277 280 * - ill_g_lock: This is a global reader/writer lock. Protects the following
278 281 * * The AVL tree based global multi list of all ills.
279 282 * * The linked list of all ipifs of an ill
280 283 * * The <ipsq-xop> mapping
281 284 * * <ill-phyint> association
282 285 * Insertion/deletion of an ill in the system, insertion/deletion of an ipif
283 286 * into an ill, changing the <ipsq-xop> mapping of an ill, changing the
284 287 * <ill-phyint> assoc of an ill will all have to hold the ill_g_lock as
285 288 * writer for the actual duration of the insertion/deletion/change.
286 289 *
287 290 * - ill_lock: This is a per ill mutex.
288 291 * It protects some members of the ill_t struct; see ip.h for details.
289 292 * It also protects the <ill-phyint> assoc.
290 293 * It also protects the list of ipifs hanging off the ill.
291 294 *
292 295 * - ipsq_lock: This is a per ipsq_t mutex lock.
293 296 * This protects some members of the ipsq_t struct; see ip.h for details.
294 297 * It also protects the <ipsq-ipxop> mapping
295 298 *
296 299 * - ipx_lock: This is a per ipxop_t mutex lock.
297 300 * This protects some members of the ipxop_t struct; see ip.h for details.
298 301 *
299 302 * - phyint_lock: This is a per phyint mutex lock. Protects just the
300 303 * phyint_flags
301 304 *
302 305 * - ip_addr_avail_lock: This is used to ensure the uniqueness of IP addresses.
303 306 * This lock is held in ipif_up_done and the ipif is marked IPIF_UP and the
304 307 * uniqueness check also done atomically.
305 308 *
306 309 * - ill_g_usesrc_lock: This readers/writer lock protects the usesrc
307 310 * group list linked by ill_usesrc_grp_next. It also protects the
308 311 * ill_usesrc_ifindex field. It is taken as a writer when a member of the
309 312 * group is being added or deleted. This lock is taken as a reader when
310 313 * walking the list/group(eg: to get the number of members in a usesrc group).
311 314 * Note, it is only necessary to take this lock if the ill_usesrc_grp_next
312 315 * field is changing state i.e from NULL to non-NULL or vice-versa. For
313 316 * example, it is not necessary to take this lock in the initial portion
314 317 * of ip_sioctl_slifusesrc or at all in ip_sioctl_flags since these
315 318 * operations are executed exclusively and that ensures that the "usesrc
316 319 * group state" cannot change. The "usesrc group state" change can happen
317 320 * only in the latter part of ip_sioctl_slifusesrc and in ill_delete.
318 321 *
319 322 * Changing <ill-phyint>, <ipsq-xop> assocications:
320 323 *
321 324 * To change the <ill-phyint> association, the ill_g_lock must be held
322 325 * as writer, and the ill_locks of both the v4 and v6 instance of the ill
323 326 * must be held.
324 327 *
325 328 * To change the <ipsq-xop> association, the ill_g_lock must be held as
326 329 * writer, the ipsq_lock must be held, and one must be writer on the ipsq.
327 330 * This is only done when ills are added or removed from IPMP groups.
328 331 *
329 332 * To add or delete an ipif from the list of ipifs hanging off the ill,
330 333 * ill_g_lock (writer) and ill_lock must be held and the thread must be
331 334 * a writer on the associated ipsq.
332 335 *
333 336 * To add or delete an ill to the system, the ill_g_lock must be held as
334 337 * writer and the thread must be a writer on the associated ipsq.
335 338 *
336 339 * To add or delete an ilm to an ill, the ill_lock must be held and the thread
337 340 * must be a writer on the associated ipsq.
338 341 *
339 342 * Lock hierarchy
340 343 *
341 344 * Some lock hierarchy scenarios are listed below.
342 345 *
343 346 * ill_g_lock -> conn_lock -> ill_lock -> ipsq_lock -> ipx_lock
344 347 * ill_g_lock -> ill_lock(s) -> phyint_lock
345 348 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock
346 349 * ill_g_lock -> ip_addr_avail_lock
347 350 * conn_lock -> irb_lock -> ill_lock -> ire_lock
348 351 * ill_g_lock -> ip_g_nd_lock
349 352 * ill_g_lock -> ips_ipmp_lock -> ill_lock -> nce_lock
350 353 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock -> nce_lock
351 354 * arl_lock -> ill_lock
352 355 * ips_ire_dep_lock -> irb_lock
353 356 *
354 357 * When more than 1 ill lock is needed to be held, all ill lock addresses
355 358 * are sorted on address and locked starting from highest addressed lock
356 359 * downward.
357 360 *
358 361 * Multicast scenarios
359 362 * ips_ill_g_lock -> ill_mcast_lock
360 363 * conn_ilg_lock -> ips_ill_g_lock -> ill_lock
361 364 * ill_mcast_serializer -> ill_mcast_lock -> ips_ipmp_lock -> ill_lock
362 365 * ill_mcast_serializer -> ill_mcast_lock -> connf_lock -> conn_lock
363 366 * ill_mcast_serializer -> ill_mcast_lock -> conn_ilg_lock
364 367 * ill_mcast_serializer -> ill_mcast_lock -> ips_igmp_timer_lock
365 368 *
366 369 * IPsec scenarios
367 370 *
368 371 * ipsa_lock -> ill_g_lock -> ill_lock
369 372 * ill_g_usesrc_lock -> ill_g_lock -> ill_lock
370 373 *
371 374 * Trusted Solaris scenarios
372 375 *
373 376 * igsa_lock -> gcgrp_rwlock -> gcgrp_lock
374 377 * igsa_lock -> gcdb_lock
375 378 * gcgrp_rwlock -> ire_lock
376 379 * gcgrp_rwlock -> gcdb_lock
377 380 *
378 381 * squeue(sq_lock), flow related (ft_lock, fe_lock) locking
379 382 *
380 383 * cpu_lock --> ill_lock --> sqset_lock --> sq_lock
381 384 * sq_lock -> conn_lock -> QLOCK(q)
382 385 * ill_lock -> ft_lock -> fe_lock
383 386 *
384 387 * Routing/forwarding table locking notes:
385 388 *
386 389 * Lock acquisition order: Radix tree lock, irb_lock.
387 390 * Requirements:
388 391 * i. Walker must not hold any locks during the walker callback.
389 392 * ii Walker must not see a truncated tree during the walk because of any node
390 393 * deletion.
391 394 * iii Existing code assumes ire_bucket is valid if it is non-null and is used
392 395 * in many places in the code to walk the irb list. Thus even if all the
393 396 * ires in a bucket have been deleted, we still can't free the radix node
394 397 * until the ires have actually been inactive'd (freed).
395 398 *
396 399 * Tree traversal - Need to hold the global tree lock in read mode.
397 400 * Before dropping the global tree lock, need to either increment the ire_refcnt
398 401 * to ensure that the radix node can't be deleted.
399 402 *
400 403 * Tree add - Need to hold the global tree lock in write mode to add a
401 404 * radix node. To prevent the node from being deleted, increment the
402 405 * irb_refcnt, after the node is added to the tree. The ire itself is
403 406 * added later while holding the irb_lock, but not the tree lock.
404 407 *
405 408 * Tree delete - Need to hold the global tree lock and irb_lock in write mode.
406 409 * All associated ires must be inactive (i.e. freed), and irb_refcnt
407 410 * must be zero.
408 411 *
409 412 * Walker - Increment irb_refcnt before calling the walker callback. Hold the
410 413 * global tree lock (read mode) for traversal.
411 414 *
412 415 * IRE dependencies - In some cases we hold ips_ire_dep_lock across ire_refrele
413 416 * hence we will acquire irb_lock while holding ips_ire_dep_lock.
414 417 *
415 418 * IPsec notes :
416 419 *
417 420 * IP interacts with the IPsec code (AH/ESP) by storing IPsec attributes
418 421 * in the ip_xmit_attr_t ip_recv_attr_t. For outbound datagrams, the
419 422 * ip_xmit_attr_t has the
420 423 * information used by the IPsec code for applying the right level of
421 424 * protection. The information initialized by IP in the ip_xmit_attr_t
422 425 * is determined by the per-socket policy or global policy in the system.
423 426 * For inbound datagrams, the ip_recv_attr_t
424 427 * starts out with nothing in it. It gets filled
425 428 * with the right information if it goes through the AH/ESP code, which
426 429 * happens if the incoming packet is secure. The information initialized
427 430 * by AH/ESP, is later used by IP (during fanouts to ULP) to see whether
428 431 * the policy requirements needed by per-socket policy or global policy
429 432 * is met or not.
430 433 *
431 434 * For fully connected sockets i.e dst, src [addr, port] is known,
432 435 * conn_policy_cached is set indicating that policy has been cached.
433 436 * conn_in_enforce_policy may or may not be set depending on whether
434 437 * there is a global policy match or per-socket policy match.
435 438 * Policy inheriting happpens in ip_policy_set once the destination is known.
436 439 * Once the right policy is set on the conn_t, policy cannot change for
437 440 * this socket. This makes life simpler for TCP (UDP ?) where
438 441 * re-transmissions go out with the same policy. For symmetry, policy
439 442 * is cached for fully connected UDP sockets also. Thus if policy is cached,
440 443 * it also implies that policy is latched i.e policy cannot change
441 444 * on these sockets. As we have the right policy on the conn, we don't
442 445 * have to lookup global policy for every outbound and inbound datagram
443 446 * and thus serving as an optimization. Note that a global policy change
444 447 * does not affect fully connected sockets if they have policy. If fully
445 448 * connected sockets did not have any policy associated with it, global
446 449 * policy change may affect them.
447 450 *
448 451 * IP Flow control notes:
449 452 * ---------------------
450 453 * Non-TCP streams are flow controlled by IP. The way this is accomplished
451 454 * differs when ILL_CAPAB_DLD_DIRECT is enabled for that IP instance. When
452 455 * ILL_DIRECT_CAPABLE(ill) is TRUE, IP can do direct function calls into
453 456 * GLDv3. Otherwise packets are sent down to lower layers using STREAMS
454 457 * functions.
455 458 *
456 459 * Per Tx ring udp flow control:
457 460 * This is applicable only when ILL_CAPAB_DLD_DIRECT capability is set in
458 461 * the ill (i.e. ILL_DIRECT_CAPABLE(ill) is true).
459 462 *
460 463 * The underlying link can expose multiple Tx rings to the GLDv3 mac layer.
461 464 * To achieve best performance, outgoing traffic need to be fanned out among
462 465 * these Tx ring. mac_tx() is called (via str_mdata_fastpath_put()) to send
463 466 * traffic out of the NIC and it takes a fanout hint. UDP connections pass
464 467 * the address of connp as fanout hint to mac_tx(). Under flow controlled
465 468 * condition, mac_tx() returns a non-NULL cookie (ip_mac_tx_cookie_t). This
466 469 * cookie points to a specific Tx ring that is blocked. The cookie is used to
467 470 * hash into an idl_tx_list[] entry in idl_tx_list[] array. Each idl_tx_list_t
468 471 * point to drain_lists (idl_t's). These drain list will store the blocked UDP
469 472 * connp's. The drain list is not a single list but a configurable number of
470 473 * lists.
471 474 *
472 475 * The diagram below shows idl_tx_list_t's and their drain_lists. ip_stack_t
473 476 * has an array of idl_tx_list_t. The size of the array is TX_FANOUT_SIZE
474 477 * which is equal to 128. This array in turn contains a pointer to idl_t[],
475 478 * the ip drain list. The idl_t[] array size is MIN(max_ncpus, 8). The drain
476 479 * list will point to the list of connp's that are flow controlled.
477 480 *
478 481 * --------------- ------- ------- -------
479 482 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|-->
480 483 * | --------------- ------- ------- -------
481 484 * | --------------- ------- ------- -------
482 485 * |->|drain_list[1]|-->|connp|-->|connp|-->|connp|-->
483 486 * ---------------- | --------------- ------- ------- -------
484 487 * |idl_tx_list[0]|->| --------------- ------- ------- -------
485 488 * ---------------- |->|drain_list[2]|-->|connp|-->|connp|-->|connp|-->
486 489 * | --------------- ------- ------- -------
487 490 * . . . . .
488 491 * | --------------- ------- ------- -------
489 492 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|-->
490 493 * --------------- ------- ------- -------
491 494 * --------------- ------- ------- -------
492 495 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|-->
493 496 * | --------------- ------- ------- -------
494 497 * | --------------- ------- ------- -------
495 498 * ---------------- |->|drain_list[1]|-->|connp|-->|connp|-->|connp|-->
496 499 * |idl_tx_list[1]|->| --------------- ------- ------- -------
497 500 * ---------------- | . . . .
498 501 * | --------------- ------- ------- -------
499 502 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|-->
500 503 * --------------- ------- ------- -------
501 504 * .....
502 505 * ----------------
503 506 * |idl_tx_list[n]|-> ...
504 507 * ----------------
505 508 *
506 509 * When mac_tx() returns a cookie, the cookie is hashed into an index into
507 510 * ips_idl_tx_list[], and conn_drain_insert() is called with the idl_tx_list
508 511 * to insert the conn onto. conn_drain_insert() asserts flow control for the
509 512 * sockets via su_txq_full() (non-STREAMS) or QFULL on conn_wq (STREAMS).
510 513 * Further, conn_blocked is set to indicate that the conn is blocked.
511 514 *
512 515 * GLDv3 calls ill_flow_enable() when flow control is relieved. The cookie
513 516 * passed in the call to ill_flow_enable() identifies the blocked Tx ring and
514 517 * is again hashed to locate the appropriate idl_tx_list, which is then
515 518 * drained via conn_walk_drain(). conn_walk_drain() goes through each conn in
516 519 * the drain list and calls conn_drain_remove() to clear flow control (via
517 520 * calling su_txq_full() or clearing QFULL), and remove the conn from the
518 521 * drain list.
519 522 *
520 523 * Note that the drain list is not a single list but a (configurable) array of
521 524 * lists (8 elements by default). Synchronization between drain insertion and
522 525 * flow control wakeup is handled by using idl_txl->txl_lock, and only
523 526 * conn_drain_insert() and conn_drain_remove() manipulate the drain list.
524 527 *
525 528 * Flow control via STREAMS is used when ILL_DIRECT_CAPABLE() returns FALSE.
526 529 * On the send side, if the packet cannot be sent down to the driver by IP
527 530 * (canput() fails), ip_xmit() drops the packet and returns EWOULDBLOCK to the
528 531 * caller, who may then invoke ixa_check_drain_insert() to insert the conn on
529 532 * the 0'th drain list. When ip_wsrv() runs on the ill_wq because flow
530 533 * control has been relieved, the blocked conns in the 0'th drain list are
531 534 * drained as in the non-STREAMS case.
532 535 *
533 536 * In both the STREAMS and non-STREAMS cases, the sockfs upcall to set QFULL
534 537 * is done when the conn is inserted into the drain list (conn_drain_insert())
535 538 * and cleared when the conn is removed from the it (conn_drain_remove()).
536 539 *
537 540 * IPQOS notes:
538 541 *
539 542 * IPQoS Policies are applied to packets using IPPF (IP Policy framework)
540 543 * and IPQoS modules. IPPF includes hooks in IP at different control points
541 544 * (callout positions) which direct packets to IPQoS modules for policy
542 545 * processing. Policies, if present, are global.
543 546 *
544 547 * The callout positions are located in the following paths:
545 548 * o local_in (packets destined for this host)
546 549 * o local_out (packets orginating from this host )
547 550 * o fwd_in (packets forwarded by this m/c - inbound)
548 551 * o fwd_out (packets forwarded by this m/c - outbound)
549 552 * Hooks at these callout points can be enabled/disabled using the ndd variable
550 553 * ip_policy_mask (a bit mask with the 4 LSB indicating the callout positions).
551 554 * By default all the callout positions are enabled.
552 555 *
553 556 * Outbound (local_out)
554 557 * Hooks are placed in ire_send_wire_v4 and ire_send_wire_v6.
555 558 *
556 559 * Inbound (local_in)
557 560 * Hooks are placed in ip_fanout_v4 and ip_fanout_v6.
558 561 *
559 562 * Forwarding (in and out)
560 563 * Hooks are placed in ire_recv_forward_v4/v6.
561 564 *
562 565 * IP Policy Framework processing (IPPF processing)
563 566 * Policy processing for a packet is initiated by ip_process, which ascertains
564 567 * that the classifier (ipgpc) is loaded and configured, failing which the
565 568 * packet resumes normal processing in IP. If the clasifier is present, the
566 569 * packet is acted upon by one or more IPQoS modules (action instances), per
567 570 * filters configured in ipgpc and resumes normal IP processing thereafter.
568 571 * An action instance can drop a packet in course of its processing.
569 572 *
570 573 * Zones notes:
571 574 *
572 575 * The partitioning rules for networking are as follows:
573 576 * 1) Packets coming from a zone must have a source address belonging to that
574 577 * zone.
575 578 * 2) Packets coming from a zone can only be sent on a physical interface on
576 579 * which the zone has an IP address.
577 580 * 3) Between two zones on the same machine, packet delivery is only allowed if
578 581 * there's a matching route for the destination and zone in the forwarding
579 582 * table.
580 583 * 4) The TCP and UDP port spaces are per-zone; that is, two processes in
581 584 * different zones can bind to the same port with the wildcard address
582 585 * (INADDR_ANY).
583 586 *
584 587 * The granularity of interface partitioning is at the logical interface level.
585 588 * Therefore, every zone has its own IP addresses, and incoming packets can be
586 589 * attributed to a zone unambiguously. A logical interface is placed into a zone
587 590 * using the SIOCSLIFZONE ioctl; this sets the ipif_zoneid field in the ipif_t
588 591 * structure. Rule (1) is implemented by modifying the source address selection
589 592 * algorithm so that the list of eligible addresses is filtered based on the
590 593 * sending process zone.
591 594 *
592 595 * The Internet Routing Entries (IREs) are either exclusive to a zone or shared
593 596 * across all zones, depending on their type. Here is the break-up:
594 597 *
595 598 * IRE type Shared/exclusive
596 599 * -------- ----------------
597 600 * IRE_BROADCAST Exclusive
598 601 * IRE_DEFAULT (default routes) Shared (*)
599 602 * IRE_LOCAL Exclusive (x)
600 603 * IRE_LOOPBACK Exclusive
601 604 * IRE_PREFIX (net routes) Shared (*)
602 605 * IRE_IF_NORESOLVER (interface routes) Exclusive
603 606 * IRE_IF_RESOLVER (interface routes) Exclusive
604 607 * IRE_IF_CLONE (interface routes) Exclusive
605 608 * IRE_HOST (host routes) Shared (*)
606 609 *
607 610 * (*) A zone can only use a default or off-subnet route if the gateway is
608 611 * directly reachable from the zone, that is, if the gateway's address matches
609 612 * one of the zone's logical interfaces.
610 613 *
611 614 * (x) IRE_LOCAL are handled a bit differently.
612 615 * When ip_restrict_interzone_loopback is set (the default),
613 616 * ire_route_recursive restricts loopback using an IRE_LOCAL
614 617 * between zone to the case when L2 would have conceptually looped the packet
615 618 * back, i.e. the loopback which is required since neither Ethernet drivers
616 619 * nor Ethernet hardware loops them back. This is the case when the normal
617 620 * routes (ignoring IREs with different zoneids) would send out the packet on
618 621 * the same ill as the ill with which is IRE_LOCAL is associated.
619 622 *
620 623 * Multiple zones can share a common broadcast address; typically all zones
621 624 * share the 255.255.255.255 address. Incoming as well as locally originated
622 625 * broadcast packets must be dispatched to all the zones on the broadcast
623 626 * network. For directed broadcasts (e.g. 10.16.72.255) this is not trivial
624 627 * since some zones may not be on the 10.16.72/24 network. To handle this, each
625 628 * zone has its own set of IRE_BROADCAST entries; then, broadcast packets are
626 629 * sent to every zone that has an IRE_BROADCAST entry for the destination
627 630 * address on the input ill, see ip_input_broadcast().
628 631 *
629 632 * Applications in different zones can join the same multicast group address.
630 633 * The same logic applies for multicast as for broadcast. ip_input_multicast
631 634 * dispatches packets to all zones that have members on the physical interface.
632 635 */
633 636
634 637 /*
635 638 * Squeue Fanout flags:
636 639 * 0: No fanout.
637 640 * 1: Fanout across all squeues
638 641 */
639 642 boolean_t ip_squeue_fanout = 0;
640 643
641 644 /*
642 645 * Maximum dups allowed per packet.
643 646 */
644 647 uint_t ip_max_frag_dups = 10;
645 648
646 649 static int ip_open(queue_t *q, dev_t *devp, int flag, int sflag,
647 650 cred_t *credp, boolean_t isv6);
648 651 static mblk_t *ip_xmit_attach_llhdr(mblk_t *, nce_t *);
649 652
650 653 static boolean_t icmp_inbound_verify_v4(mblk_t *, icmph_t *, ip_recv_attr_t *);
651 654 static void icmp_inbound_too_big_v4(icmph_t *, ip_recv_attr_t *);
652 655 static void icmp_inbound_error_fanout_v4(mblk_t *, icmph_t *,
653 656 ip_recv_attr_t *);
654 657 static void icmp_options_update(ipha_t *);
655 658 static void icmp_param_problem(mblk_t *, uint8_t, ip_recv_attr_t *);
656 659 static void icmp_pkt(mblk_t *, void *, size_t, ip_recv_attr_t *);
657 660 static mblk_t *icmp_pkt_err_ok(mblk_t *, ip_recv_attr_t *);
658 661 static void icmp_redirect_v4(mblk_t *mp, ipha_t *, icmph_t *,
659 662 ip_recv_attr_t *);
660 663 static void icmp_send_redirect(mblk_t *, ipaddr_t, ip_recv_attr_t *);
661 664 static void icmp_send_reply_v4(mblk_t *, ipha_t *, icmph_t *,
662 665 ip_recv_attr_t *);
663 666
664 667 mblk_t *ip_dlpi_alloc(size_t, t_uscalar_t);
665 668 char *ip_dot_addr(ipaddr_t, char *);
666 669 mblk_t *ip_carve_mp(mblk_t **, ssize_t);
667 670 int ip_close(queue_t *, int);
668 671 static char *ip_dot_saddr(uchar_t *, char *);
669 672 static void ip_lrput(queue_t *, mblk_t *);
670 673 ipaddr_t ip_net_mask(ipaddr_t);
671 674 char *ip_nv_lookup(nv_t *, int);
672 675 void ip_rput(queue_t *, mblk_t *);
673 676 static void ip_rput_dlpi_writer(ipsq_t *dummy_sq, queue_t *q, mblk_t *mp,
674 677 void *dummy_arg);
675 678 int ip_snmp_get(queue_t *, mblk_t *, int, boolean_t);
676 679 static mblk_t *ip_snmp_get_mib2_ip(queue_t *, mblk_t *,
677 680 mib2_ipIfStatsEntry_t *, ip_stack_t *, boolean_t);
678 681 static mblk_t *ip_snmp_get_mib2_ip_traffic_stats(queue_t *, mblk_t *,
679 682 ip_stack_t *, boolean_t);
680 683 static mblk_t *ip_snmp_get_mib2_ip6(queue_t *, mblk_t *, ip_stack_t *,
681 684 boolean_t);
682 685 static mblk_t *ip_snmp_get_mib2_icmp(queue_t *, mblk_t *, ip_stack_t *ipst);
683 686 static mblk_t *ip_snmp_get_mib2_icmp6(queue_t *, mblk_t *, ip_stack_t *ipst);
684 687 static mblk_t *ip_snmp_get_mib2_igmp(queue_t *, mblk_t *, ip_stack_t *ipst);
685 688 static mblk_t *ip_snmp_get_mib2_multi(queue_t *, mblk_t *, ip_stack_t *ipst);
686 689 static mblk_t *ip_snmp_get_mib2_ip_addr(queue_t *, mblk_t *,
687 690 ip_stack_t *ipst, boolean_t);
688 691 static mblk_t *ip_snmp_get_mib2_ip6_addr(queue_t *, mblk_t *,
689 692 ip_stack_t *ipst, boolean_t);
690 693 static mblk_t *ip_snmp_get_mib2_ip_group_src(queue_t *, mblk_t *,
691 694 ip_stack_t *ipst);
692 695 static mblk_t *ip_snmp_get_mib2_ip6_group_src(queue_t *, mblk_t *,
693 696 ip_stack_t *ipst);
694 697 static mblk_t *ip_snmp_get_mib2_ip_group_mem(queue_t *, mblk_t *,
695 698 ip_stack_t *ipst);
696 699 static mblk_t *ip_snmp_get_mib2_ip6_group_mem(queue_t *, mblk_t *,
697 700 ip_stack_t *ipst);
698 701 static mblk_t *ip_snmp_get_mib2_virt_multi(queue_t *, mblk_t *,
699 702 ip_stack_t *ipst);
700 703 static mblk_t *ip_snmp_get_mib2_multi_rtable(queue_t *, mblk_t *,
701 704 ip_stack_t *ipst);
702 705 static mblk_t *ip_snmp_get_mib2_ip_route_media(queue_t *, mblk_t *, int,
703 706 ip_stack_t *ipst);
704 707 static mblk_t *ip_snmp_get_mib2_ip6_route_media(queue_t *, mblk_t *, int,
705 708 ip_stack_t *ipst);
706 709 static void ip_snmp_get2_v4(ire_t *, iproutedata_t *);
707 710 static void ip_snmp_get2_v6_route(ire_t *, iproutedata_t *);
708 711 static int ip_snmp_get2_v4_media(ncec_t *, iproutedata_t *);
709 712 static int ip_snmp_get2_v6_media(ncec_t *, iproutedata_t *);
710 713 int ip_snmp_set(queue_t *, int, int, uchar_t *, int);
711 714
712 715 static mblk_t *ip_fragment_copyhdr(uchar_t *, int, int, ip_stack_t *,
713 716 mblk_t *);
714 717
715 718 static void conn_drain_init(ip_stack_t *);
716 719 static void conn_drain_fini(ip_stack_t *);
717 720 static void conn_drain(conn_t *connp, boolean_t closing);
718 721
719 722 static void conn_walk_drain(ip_stack_t *, idl_tx_list_t *);
720 723 static void conn_walk_sctp(pfv_t, void *, zoneid_t, netstack_t *);
721 724
722 725 static void *ip_stack_init(netstackid_t stackid, netstack_t *ns);
723 726 static void ip_stack_shutdown(netstackid_t stackid, void *arg);
724 727 static void ip_stack_fini(netstackid_t stackid, void *arg);
725 728
726 729 static int ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t,
727 730 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *),
728 731 ire_t *, conn_t *, boolean_t, const in6_addr_t *, mcast_record_t,
729 732 const in6_addr_t *);
730 733
731 734 static int ip_squeue_switch(int);
732 735
733 736 static void *ip_kstat_init(netstackid_t, ip_stack_t *);
734 737 static void ip_kstat_fini(netstackid_t, kstat_t *);
735 738 static int ip_kstat_update(kstat_t *kp, int rw);
736 739 static void *icmp_kstat_init(netstackid_t);
737 740 static void icmp_kstat_fini(netstackid_t, kstat_t *);
738 741 static int icmp_kstat_update(kstat_t *kp, int rw);
739 742 static void *ip_kstat2_init(netstackid_t, ip_stat_t *);
740 743 static void ip_kstat2_fini(netstackid_t, kstat_t *);
741 744
742 745 static void ipobs_init(ip_stack_t *);
743 746 static void ipobs_fini(ip_stack_t *);
744 747
745 748 static int ip_tp_cpu_update(cpu_setup_t, int, void *);
746 749
747 750 ipaddr_t ip_g_all_ones = IP_HOST_MASK;
748 751
749 752 static long ip_rput_pullups;
750 753 int dohwcksum = 1; /* use h/w cksum if supported by the hardware */
751 754
752 755 vmem_t *ip_minor_arena_sa; /* for minor nos. from INET_MIN_DEV+2 thru 2^^18-1 */
753 756 vmem_t *ip_minor_arena_la; /* for minor nos. from 2^^18 thru 2^^32-1 */
754 757
755 758 int ip_debug;
756 759
757 760 /*
758 761 * Multirouting/CGTP stuff
759 762 */
760 763 int ip_cgtp_filter_rev = CGTP_FILTER_REV; /* CGTP hooks version */
761 764
762 765 /*
763 766 * IP tunables related declarations. Definitions are in ip_tunables.c
764 767 */
765 768 extern mod_prop_info_t ip_propinfo_tbl[];
766 769 extern int ip_propinfo_count;
767 770
768 771 /*
769 772 * Table of IP ioctls encoding the various properties of the ioctl and
770 773 * indexed based on the last byte of the ioctl command. Occasionally there
771 774 * is a clash, and there is more than 1 ioctl with the same last byte.
772 775 * In such a case 1 ioctl is encoded in the ndx table and the remaining
773 776 * ioctls are encoded in the misc table. An entry in the ndx table is
774 777 * retrieved by indexing on the last byte of the ioctl command and comparing
775 778 * the ioctl command with the value in the ndx table. In the event of a
776 779 * mismatch the misc table is then searched sequentially for the desired
777 780 * ioctl command.
778 781 *
779 782 * Entry: <command> <copyin_size> <flags> <cmd_type> <function> <restart_func>
780 783 */
781 784 ip_ioctl_cmd_t ip_ndx_ioctl_table[] = {
782 785 /* 000 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
783 786 /* 001 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
784 787 /* 002 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
785 788 /* 003 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
786 789 /* 004 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
787 790 /* 005 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
788 791 /* 006 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
789 792 /* 007 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
790 793 /* 008 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
791 794 /* 009 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
792 795
793 796 /* 010 */ { SIOCADDRT, sizeof (struct rtentry), IPI_PRIV,
794 797 MISC_CMD, ip_siocaddrt, NULL },
795 798 /* 011 */ { SIOCDELRT, sizeof (struct rtentry), IPI_PRIV,
796 799 MISC_CMD, ip_siocdelrt, NULL },
797 800
798 801 /* 012 */ { SIOCSIFADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
799 802 IF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart },
800 803 /* 013 */ { SIOCGIFADDR, sizeof (struct ifreq), IPI_GET_CMD,
801 804 IF_CMD, ip_sioctl_get_addr, NULL },
802 805
803 806 /* 014 */ { SIOCSIFDSTADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
804 807 IF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart },
805 808 /* 015 */ { SIOCGIFDSTADDR, sizeof (struct ifreq),
806 809 IPI_GET_CMD, IF_CMD, ip_sioctl_get_dstaddr, NULL },
807 810
808 811 /* 016 */ { SIOCSIFFLAGS, sizeof (struct ifreq),
809 812 IPI_PRIV | IPI_WR,
810 813 IF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart },
811 814 /* 017 */ { SIOCGIFFLAGS, sizeof (struct ifreq),
812 815 IPI_MODOK | IPI_GET_CMD,
813 816 IF_CMD, ip_sioctl_get_flags, NULL },
814 817
815 818 /* 018 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
816 819 /* 019 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
817 820
818 821 /* copyin size cannot be coded for SIOCGIFCONF */
819 822 /* 020 */ { O_SIOCGIFCONF, 0, IPI_GET_CMD,
820 823 MISC_CMD, ip_sioctl_get_ifconf, NULL },
821 824
822 825 /* 021 */ { SIOCSIFMTU, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
823 826 IF_CMD, ip_sioctl_mtu, NULL },
824 827 /* 022 */ { SIOCGIFMTU, sizeof (struct ifreq), IPI_GET_CMD,
825 828 IF_CMD, ip_sioctl_get_mtu, NULL },
826 829 /* 023 */ { SIOCGIFBRDADDR, sizeof (struct ifreq),
827 830 IPI_GET_CMD, IF_CMD, ip_sioctl_get_brdaddr, NULL },
828 831 /* 024 */ { SIOCSIFBRDADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
829 832 IF_CMD, ip_sioctl_brdaddr, NULL },
830 833 /* 025 */ { SIOCGIFNETMASK, sizeof (struct ifreq),
831 834 IPI_GET_CMD, IF_CMD, ip_sioctl_get_netmask, NULL },
832 835 /* 026 */ { SIOCSIFNETMASK, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
833 836 IF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart },
834 837 /* 027 */ { SIOCGIFMETRIC, sizeof (struct ifreq),
835 838 IPI_GET_CMD, IF_CMD, ip_sioctl_get_metric, NULL },
836 839 /* 028 */ { SIOCSIFMETRIC, sizeof (struct ifreq), IPI_PRIV,
837 840 IF_CMD, ip_sioctl_metric, NULL },
838 841 /* 029 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
839 842
840 843 /* See 166-168 below for extended SIOC*XARP ioctls */
841 844 /* 030 */ { SIOCSARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR,
842 845 ARP_CMD, ip_sioctl_arp, NULL },
843 846 /* 031 */ { SIOCGARP, sizeof (struct arpreq), IPI_GET_CMD,
844 847 ARP_CMD, ip_sioctl_arp, NULL },
845 848 /* 032 */ { SIOCDARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR,
846 849 ARP_CMD, ip_sioctl_arp, NULL },
847 850
848 851 /* 033 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
849 852 /* 034 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
850 853 /* 035 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
851 854 /* 036 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
852 855 /* 037 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
853 856 /* 038 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
854 857 /* 039 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
855 858 /* 040 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
856 859 /* 041 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
857 860 /* 042 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
858 861 /* 043 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
859 862 /* 044 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
860 863 /* 045 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
861 864 /* 046 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
862 865 /* 047 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
863 866 /* 048 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
864 867 /* 049 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
865 868 /* 050 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
866 869 /* 051 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
867 870 /* 052 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
868 871 /* 053 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
869 872
870 873 /* 054 */ { IF_UNITSEL, sizeof (int), IPI_PRIV | IPI_WR | IPI_MODOK,
871 874 MISC_CMD, if_unitsel, if_unitsel_restart },
872 875
873 876 /* 055 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
874 877 /* 056 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
875 878 /* 057 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
876 879 /* 058 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
877 880 /* 059 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
878 881 /* 060 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
879 882 /* 061 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
880 883 /* 062 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
881 884 /* 063 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
882 885 /* 064 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
883 886 /* 065 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
884 887 /* 066 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
885 888 /* 067 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
886 889 /* 068 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
887 890 /* 069 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
888 891 /* 070 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
889 892 /* 071 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
890 893 /* 072 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
891 894
892 895 /* 073 */ { SIOCSIFNAME, sizeof (struct ifreq),
893 896 IPI_PRIV | IPI_WR | IPI_MODOK,
894 897 IF_CMD, ip_sioctl_sifname, NULL },
895 898
896 899 /* 074 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
897 900 /* 075 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
898 901 /* 076 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
899 902 /* 077 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
900 903 /* 078 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
901 904 /* 079 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
902 905 /* 080 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
903 906 /* 081 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
904 907 /* 082 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
905 908 /* 083 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
906 909 /* 084 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
907 910 /* 085 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
908 911 /* 086 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
909 912
910 913 /* 087 */ { SIOCGIFNUM, sizeof (int), IPI_GET_CMD,
911 914 MISC_CMD, ip_sioctl_get_ifnum, NULL },
912 915 /* 088 */ { SIOCGIFMUXID, sizeof (struct ifreq), IPI_GET_CMD,
913 916 IF_CMD, ip_sioctl_get_muxid, NULL },
914 917 /* 089 */ { SIOCSIFMUXID, sizeof (struct ifreq),
915 918 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_muxid, NULL },
916 919
917 920 /* Both if and lif variants share same func */
918 921 /* 090 */ { SIOCGIFINDEX, sizeof (struct ifreq), IPI_GET_CMD,
919 922 IF_CMD, ip_sioctl_get_lifindex, NULL },
920 923 /* Both if and lif variants share same func */
921 924 /* 091 */ { SIOCSIFINDEX, sizeof (struct ifreq),
922 925 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_slifindex, NULL },
923 926
924 927 /* copyin size cannot be coded for SIOCGIFCONF */
925 928 /* 092 */ { SIOCGIFCONF, 0, IPI_GET_CMD,
926 929 MISC_CMD, ip_sioctl_get_ifconf, NULL },
927 930 /* 093 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
928 931 /* 094 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
929 932 /* 095 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
930 933 /* 096 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
931 934 /* 097 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
932 935 /* 098 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
933 936 /* 099 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
934 937 /* 100 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
935 938 /* 101 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
936 939 /* 102 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
937 940 /* 103 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
938 941 /* 104 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
939 942 /* 105 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
940 943 /* 106 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
941 944 /* 107 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
942 945 /* 108 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
943 946 /* 109 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
944 947
945 948 /* 110 */ { SIOCLIFREMOVEIF, sizeof (struct lifreq),
946 949 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_removeif,
947 950 ip_sioctl_removeif_restart },
948 951 /* 111 */ { SIOCLIFADDIF, sizeof (struct lifreq),
949 952 IPI_GET_CMD | IPI_PRIV | IPI_WR,
950 953 LIF_CMD, ip_sioctl_addif, NULL },
951 954 #define SIOCLIFADDR_NDX 112
952 955 /* 112 */ { SIOCSLIFADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
953 956 LIF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart },
954 957 /* 113 */ { SIOCGLIFADDR, sizeof (struct lifreq),
955 958 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_addr, NULL },
956 959 /* 114 */ { SIOCSLIFDSTADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
957 960 LIF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart },
958 961 /* 115 */ { SIOCGLIFDSTADDR, sizeof (struct lifreq),
959 962 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_dstaddr, NULL },
960 963 /* 116 */ { SIOCSLIFFLAGS, sizeof (struct lifreq),
961 964 IPI_PRIV | IPI_WR,
962 965 LIF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart },
963 966 /* 117 */ { SIOCGLIFFLAGS, sizeof (struct lifreq),
964 967 IPI_GET_CMD | IPI_MODOK,
965 968 LIF_CMD, ip_sioctl_get_flags, NULL },
966 969
967 970 /* 118 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
968 971 /* 119 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
969 972
970 973 /* 120 */ { O_SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD,
971 974 ip_sioctl_get_lifconf, NULL },
972 975 /* 121 */ { SIOCSLIFMTU, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
973 976 LIF_CMD, ip_sioctl_mtu, NULL },
974 977 /* 122 */ { SIOCGLIFMTU, sizeof (struct lifreq), IPI_GET_CMD,
975 978 LIF_CMD, ip_sioctl_get_mtu, NULL },
976 979 /* 123 */ { SIOCGLIFBRDADDR, sizeof (struct lifreq),
977 980 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_brdaddr, NULL },
978 981 /* 124 */ { SIOCSLIFBRDADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
979 982 LIF_CMD, ip_sioctl_brdaddr, NULL },
980 983 /* 125 */ { SIOCGLIFNETMASK, sizeof (struct lifreq),
981 984 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_netmask, NULL },
982 985 /* 126 */ { SIOCSLIFNETMASK, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
983 986 LIF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart },
984 987 /* 127 */ { SIOCGLIFMETRIC, sizeof (struct lifreq),
985 988 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_metric, NULL },
986 989 /* 128 */ { SIOCSLIFMETRIC, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
987 990 LIF_CMD, ip_sioctl_metric, NULL },
988 991 /* 129 */ { SIOCSLIFNAME, sizeof (struct lifreq),
989 992 IPI_PRIV | IPI_WR | IPI_MODOK,
990 993 LIF_CMD, ip_sioctl_slifname,
991 994 ip_sioctl_slifname_restart },
992 995
993 996 /* 130 */ { SIOCGLIFNUM, sizeof (struct lifnum), IPI_GET_CMD,
994 997 MISC_CMD, ip_sioctl_get_lifnum, NULL },
995 998 /* 131 */ { SIOCGLIFMUXID, sizeof (struct lifreq),
996 999 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_muxid, NULL },
997 1000 /* 132 */ { SIOCSLIFMUXID, sizeof (struct lifreq),
998 1001 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_muxid, NULL },
999 1002 /* 133 */ { SIOCGLIFINDEX, sizeof (struct lifreq),
1000 1003 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifindex, 0 },
1001 1004 /* 134 */ { SIOCSLIFINDEX, sizeof (struct lifreq),
1002 1005 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifindex, 0 },
1003 1006 /* 135 */ { SIOCSLIFTOKEN, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1004 1007 LIF_CMD, ip_sioctl_token, NULL },
1005 1008 /* 136 */ { SIOCGLIFTOKEN, sizeof (struct lifreq),
1006 1009 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_token, NULL },
1007 1010 /* 137 */ { SIOCSLIFSUBNET, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1008 1011 LIF_CMD, ip_sioctl_subnet, ip_sioctl_subnet_restart },
1009 1012 /* 138 */ { SIOCGLIFSUBNET, sizeof (struct lifreq),
1010 1013 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_subnet, NULL },
1011 1014 /* 139 */ { SIOCSLIFLNKINFO, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1012 1015 LIF_CMD, ip_sioctl_lnkinfo, NULL },
1013 1016
1014 1017 /* 140 */ { SIOCGLIFLNKINFO, sizeof (struct lifreq),
1015 1018 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lnkinfo, NULL },
1016 1019 /* 141 */ { SIOCLIFDELND, sizeof (struct lifreq), IPI_PRIV,
1017 1020 LIF_CMD, ip_siocdelndp_v6, NULL },
1018 1021 /* 142 */ { SIOCLIFGETND, sizeof (struct lifreq), IPI_GET_CMD,
1019 1022 LIF_CMD, ip_siocqueryndp_v6, NULL },
1020 1023 /* 143 */ { SIOCLIFSETND, sizeof (struct lifreq), IPI_PRIV,
1021 1024 LIF_CMD, ip_siocsetndp_v6, NULL },
1022 1025 /* 144 */ { SIOCTMYADDR, sizeof (struct sioc_addrreq), IPI_GET_CMD,
1023 1026 MISC_CMD, ip_sioctl_tmyaddr, NULL },
1024 1027 /* 145 */ { SIOCTONLINK, sizeof (struct sioc_addrreq), IPI_GET_CMD,
1025 1028 MISC_CMD, ip_sioctl_tonlink, NULL },
1026 1029 /* 146 */ { SIOCTMYSITE, sizeof (struct sioc_addrreq), 0,
1027 1030 MISC_CMD, ip_sioctl_tmysite, NULL },
1028 1031 /* 147 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1029 1032 /* 148 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1030 1033 /* IPSECioctls handled in ip_sioctl_copyin_setup itself */
1031 1034 /* 149 */ { SIOCFIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1032 1035 /* 150 */ { SIOCSIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1033 1036 /* 151 */ { SIOCDIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1034 1037 /* 152 */ { SIOCLIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1035 1038
1036 1039 /* 153 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1037 1040
1038 1041 /* 154 */ { SIOCGLIFBINDING, sizeof (struct lifreq), IPI_GET_CMD,
1039 1042 LIF_CMD, ip_sioctl_get_binding, NULL },
1040 1043 /* 155 */ { SIOCSLIFGROUPNAME, sizeof (struct lifreq),
1041 1044 IPI_PRIV | IPI_WR,
1042 1045 LIF_CMD, ip_sioctl_groupname, ip_sioctl_groupname },
1043 1046 /* 156 */ { SIOCGLIFGROUPNAME, sizeof (struct lifreq),
1044 1047 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_groupname, NULL },
1045 1048 /* 157 */ { SIOCGLIFGROUPINFO, sizeof (lifgroupinfo_t),
1046 1049 IPI_GET_CMD, MISC_CMD, ip_sioctl_groupinfo, NULL },
1047 1050
1048 1051 /* Leave 158-160 unused; used to be SIOC*IFARP ioctls */
1049 1052 /* 158 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1050 1053 /* 159 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1051 1054 /* 160 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1052 1055
1053 1056 /* 161 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1054 1057
1055 1058 /* These are handled in ip_sioctl_copyin_setup itself */
1056 1059 /* 162 */ { SIOCGIP6ADDRPOLICY, 0, IPI_NULL_BCONT,
1057 1060 MISC_CMD, NULL, NULL },
1058 1061 /* 163 */ { SIOCSIP6ADDRPOLICY, 0, IPI_PRIV | IPI_NULL_BCONT,
1059 1062 MISC_CMD, NULL, NULL },
1060 1063 /* 164 */ { SIOCGDSTINFO, 0, IPI_GET_CMD, MISC_CMD, NULL, NULL },
1061 1064
1062 1065 /* 165 */ { SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD,
1063 1066 ip_sioctl_get_lifconf, NULL },
1064 1067
1065 1068 /* 166 */ { SIOCSXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR,
1066 1069 XARP_CMD, ip_sioctl_arp, NULL },
1067 1070 /* 167 */ { SIOCGXARP, sizeof (struct xarpreq), IPI_GET_CMD,
1068 1071 XARP_CMD, ip_sioctl_arp, NULL },
1069 1072 /* 168 */ { SIOCDXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR,
1070 1073 XARP_CMD, ip_sioctl_arp, NULL },
1071 1074
1072 1075 /* SIOCPOPSOCKFS is not handled by IP */
1073 1076 /* 169 */ { IPI_DONTCARE /* SIOCPOPSOCKFS */, 0, 0, 0, NULL, NULL },
1074 1077
1075 1078 /* 170 */ { SIOCGLIFZONE, sizeof (struct lifreq),
1076 1079 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifzone, NULL },
1077 1080 /* 171 */ { SIOCSLIFZONE, sizeof (struct lifreq),
1078 1081 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifzone,
1079 1082 ip_sioctl_slifzone_restart },
1080 1083 /* 172-174 are SCTP ioctls and not handled by IP */
1081 1084 /* 172 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1082 1085 /* 173 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1083 1086 /* 174 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1084 1087 /* 175 */ { SIOCGLIFUSESRC, sizeof (struct lifreq),
1085 1088 IPI_GET_CMD, LIF_CMD,
1086 1089 ip_sioctl_get_lifusesrc, 0 },
1087 1090 /* 176 */ { SIOCSLIFUSESRC, sizeof (struct lifreq),
1088 1091 IPI_PRIV | IPI_WR,
1089 1092 LIF_CMD, ip_sioctl_slifusesrc,
1090 1093 NULL },
1091 1094 /* 177 */ { SIOCGLIFSRCOF, 0, IPI_GET_CMD, MISC_CMD,
1092 1095 ip_sioctl_get_lifsrcof, NULL },
1093 1096 /* 178 */ { SIOCGMSFILTER, sizeof (struct group_filter), IPI_GET_CMD,
1094 1097 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1095 1098 /* 179 */ { SIOCSMSFILTER, sizeof (struct group_filter), 0,
1096 1099 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1097 1100 /* 180 */ { SIOCGIPMSFILTER, sizeof (struct ip_msfilter), IPI_GET_CMD,
1098 1101 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1099 1102 /* 181 */ { SIOCSIPMSFILTER, sizeof (struct ip_msfilter), 0,
1100 1103 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1101 1104 /* 182 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1102 1105 /* SIOCSENABLESDP is handled by SDP */
1103 1106 /* 183 */ { IPI_DONTCARE /* SIOCSENABLESDP */, 0, 0, 0, NULL, NULL },
1104 1107 /* 184 */ { IPI_DONTCARE /* SIOCSQPTR */, 0, 0, 0, NULL, NULL },
1105 1108 /* 185 */ { SIOCGIFHWADDR, sizeof (struct ifreq), IPI_GET_CMD,
1106 1109 IF_CMD, ip_sioctl_get_ifhwaddr, NULL },
1107 1110 /* 186 */ { IPI_DONTCARE /* SIOCGSTAMP */, 0, 0, 0, NULL, NULL },
1108 1111 /* 187 */ { SIOCILB, 0, IPI_PRIV | IPI_GET_CMD, MISC_CMD,
1109 1112 ip_sioctl_ilb_cmd, NULL },
1110 1113 /* 188 */ { SIOCGETPROP, 0, IPI_GET_CMD, 0, NULL, NULL },
1111 1114 /* 189 */ { SIOCSETPROP, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL},
1112 1115 /* 190 */ { SIOCGLIFDADSTATE, sizeof (struct lifreq),
1113 1116 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_dadstate, NULL },
1114 1117 /* 191 */ { SIOCSLIFPREFIX, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1115 1118 LIF_CMD, ip_sioctl_prefix, ip_sioctl_prefix_restart },
1116 1119 /* 192 */ { SIOCGLIFHWADDR, sizeof (struct lifreq), IPI_GET_CMD,
1117 1120 LIF_CMD, ip_sioctl_get_lifhwaddr, NULL }
1118 1121 };
1119 1122
1120 1123 int ip_ndx_ioctl_count = sizeof (ip_ndx_ioctl_table) / sizeof (ip_ioctl_cmd_t);
1121 1124
1122 1125 ip_ioctl_cmd_t ip_misc_ioctl_table[] = {
1123 1126 { I_LINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1124 1127 { I_UNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1125 1128 { I_PLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1126 1129 { I_PUNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1127 1130 { ND_GET, 0, 0, 0, NULL, NULL },
1128 1131 { ND_SET, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1129 1132 { IP_IOCTL, 0, 0, 0, NULL, NULL },
1130 1133 { SIOCGETVIFCNT, sizeof (struct sioc_vif_req), IPI_GET_CMD,
1131 1134 MISC_CMD, mrt_ioctl},
1132 1135 { SIOCGETSGCNT, sizeof (struct sioc_sg_req), IPI_GET_CMD,
1133 1136 MISC_CMD, mrt_ioctl},
1134 1137 { SIOCGETLSGCNT, sizeof (struct sioc_lsg_req), IPI_GET_CMD,
1135 1138 MISC_CMD, mrt_ioctl}
1136 1139 };
1137 1140
1138 1141 int ip_misc_ioctl_count =
1139 1142 sizeof (ip_misc_ioctl_table) / sizeof (ip_ioctl_cmd_t);
1140 1143
1141 1144 int conn_drain_nthreads; /* Number of drainers reqd. */
1142 1145 /* Settable in /etc/system */
1143 1146 /* Defined in ip_ire.c */
1144 1147 extern uint32_t ip_ire_max_bucket_cnt, ip6_ire_max_bucket_cnt;
1145 1148 extern uint32_t ip_ire_min_bucket_cnt, ip6_ire_min_bucket_cnt;
1146 1149 extern uint32_t ip_ire_mem_ratio, ip_ire_cpu_ratio;
1147 1150
1148 1151 static nv_t ire_nv_arr[] = {
1149 1152 { IRE_BROADCAST, "BROADCAST" },
1150 1153 { IRE_LOCAL, "LOCAL" },
1151 1154 { IRE_LOOPBACK, "LOOPBACK" },
1152 1155 { IRE_DEFAULT, "DEFAULT" },
1153 1156 { IRE_PREFIX, "PREFIX" },
1154 1157 { IRE_IF_NORESOLVER, "IF_NORESOL" },
1155 1158 { IRE_IF_RESOLVER, "IF_RESOLV" },
1156 1159 { IRE_IF_CLONE, "IF_CLONE" },
1157 1160 { IRE_HOST, "HOST" },
1158 1161 { IRE_MULTICAST, "MULTICAST" },
1159 1162 { IRE_NOROUTE, "NOROUTE" },
1160 1163 { 0 }
1161 1164 };
1162 1165
1163 1166 nv_t *ire_nv_tbl = ire_nv_arr;
1164 1167
1165 1168 /* Simple ICMP IP Header Template */
1166 1169 static ipha_t icmp_ipha = {
1167 1170 IP_SIMPLE_HDR_VERSION, 0, 0, 0, 0, 0, IPPROTO_ICMP
1168 1171 };
1169 1172
1170 1173 struct module_info ip_mod_info = {
1171 1174 IP_MOD_ID, IP_MOD_NAME, IP_MOD_MINPSZ, IP_MOD_MAXPSZ, IP_MOD_HIWAT,
1172 1175 IP_MOD_LOWAT
1173 1176 };
1174 1177
1175 1178 /*
1176 1179 * Duplicate static symbols within a module confuses mdb; so we avoid the
1177 1180 * problem by making the symbols here distinct from those in udp.c.
1178 1181 */
1179 1182
1180 1183 /*
1181 1184 * Entry points for IP as a device and as a module.
1182 1185 * We have separate open functions for the /dev/ip and /dev/ip6 devices.
1183 1186 */
1184 1187 static struct qinit iprinitv4 = {
1185 1188 (pfi_t)ip_rput, NULL, ip_openv4, ip_close, NULL,
1186 1189 &ip_mod_info
1187 1190 };
1188 1191
1189 1192 struct qinit iprinitv6 = {
1190 1193 (pfi_t)ip_rput_v6, NULL, ip_openv6, ip_close, NULL,
1191 1194 &ip_mod_info
1192 1195 };
1193 1196
1194 1197 static struct qinit ipwinit = {
1195 1198 (pfi_t)ip_wput_nondata, (pfi_t)ip_wsrv, NULL, NULL, NULL,
1196 1199 &ip_mod_info
1197 1200 };
1198 1201
1199 1202 static struct qinit iplrinit = {
1200 1203 (pfi_t)ip_lrput, NULL, ip_openv4, ip_close, NULL,
1201 1204 &ip_mod_info
1202 1205 };
1203 1206
1204 1207 static struct qinit iplwinit = {
1205 1208 (pfi_t)ip_lwput, NULL, NULL, NULL, NULL,
1206 1209 &ip_mod_info
1207 1210 };
1208 1211
1209 1212 /* For AF_INET aka /dev/ip */
1210 1213 struct streamtab ipinfov4 = {
1211 1214 &iprinitv4, &ipwinit, &iplrinit, &iplwinit
1212 1215 };
1213 1216
1214 1217 /* For AF_INET6 aka /dev/ip6 */
1215 1218 struct streamtab ipinfov6 = {
1216 1219 &iprinitv6, &ipwinit, &iplrinit, &iplwinit
1217 1220 };
1218 1221
1219 1222 #ifdef DEBUG
1220 1223 boolean_t skip_sctp_cksum = B_FALSE;
1221 1224 #endif
1222 1225
1223 1226 /*
1224 1227 * Generate an ICMP fragmentation needed message.
1225 1228 * When called from ip_output side a minimal ip_recv_attr_t needs to be
1226 1229 * constructed by the caller.
1227 1230 */
1228 1231 void
1229 1232 icmp_frag_needed(mblk_t *mp, int mtu, ip_recv_attr_t *ira)
1230 1233 {
1231 1234 icmph_t icmph;
1232 1235 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
1233 1236
1234 1237 mp = icmp_pkt_err_ok(mp, ira);
1235 1238 if (mp == NULL)
1236 1239 return;
1237 1240
1238 1241 bzero(&icmph, sizeof (icmph_t));
1239 1242 icmph.icmph_type = ICMP_DEST_UNREACHABLE;
1240 1243 icmph.icmph_code = ICMP_FRAGMENTATION_NEEDED;
1241 1244 icmph.icmph_du_mtu = htons((uint16_t)mtu);
1242 1245 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutFragNeeded);
1243 1246 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs);
1244 1247
1245 1248 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
1246 1249 }
1247 1250
1248 1251 /*
1249 1252 * icmp_inbound_v4 deals with ICMP messages that are handled by IP.
1250 1253 * If the ICMP message is consumed by IP, i.e., it should not be delivered
1251 1254 * to any IPPROTO_ICMP raw sockets, then it returns NULL.
1252 1255 * Likewise, if the ICMP error is misformed (too short, etc), then it
1253 1256 * returns NULL. The caller uses this to determine whether or not to send
1254 1257 * to raw sockets.
1255 1258 *
1256 1259 * All error messages are passed to the matching transport stream.
1257 1260 *
1258 1261 * The following cases are handled by icmp_inbound:
1259 1262 * 1) It needs to send a reply back and possibly delivering it
1260 1263 * to the "interested" upper clients.
1261 1264 * 2) Return the mblk so that the caller can pass it to the RAW socket clients.
1262 1265 * 3) It needs to change some values in IP only.
1263 1266 * 4) It needs to change some values in IP and upper layers e.g TCP
1264 1267 * by delivering an error to the upper layers.
1265 1268 *
1266 1269 * We handle the above three cases in the context of IPsec in the
1267 1270 * following way :
1268 1271 *
1269 1272 * 1) Send the reply back in the same way as the request came in.
1270 1273 * If it came in encrypted, it goes out encrypted. If it came in
1271 1274 * clear, it goes out in clear. Thus, this will prevent chosen
1272 1275 * plain text attack.
1273 1276 * 2) The client may or may not expect things to come in secure.
1274 1277 * If it comes in secure, the policy constraints are checked
1275 1278 * before delivering it to the upper layers. If it comes in
1276 1279 * clear, ipsec_inbound_accept_clear will decide whether to
1277 1280 * accept this in clear or not. In both the cases, if the returned
1278 1281 * message (IP header + 8 bytes) that caused the icmp message has
1279 1282 * AH/ESP headers, it is sent up to AH/ESP for validation before
1280 1283 * sending up. If there are only 8 bytes of returned message, then
1281 1284 * upper client will not be notified.
1282 1285 * 3) Check with global policy to see whether it matches the constaints.
1283 1286 * But this will be done only if icmp_accept_messages_in_clear is
1284 1287 * zero.
1285 1288 * 4) If we need to change both in IP and ULP, then the decision taken
1286 1289 * while affecting the values in IP and while delivering up to TCP
1287 1290 * should be the same.
1288 1291 *
1289 1292 * There are two cases.
1290 1293 *
1291 1294 * a) If we reject data at the IP layer (ipsec_check_global_policy()
1292 1295 * failed), we will not deliver it to the ULP, even though they
1293 1296 * are *willing* to accept in *clear*. This is fine as our global
1294 1297 * disposition to icmp messages asks us reject the datagram.
1295 1298 *
1296 1299 * b) If we accept data at the IP layer (ipsec_check_global_policy()
1297 1300 * succeeded or icmp_accept_messages_in_clear is 1), and not able
1298 1301 * to deliver it to ULP (policy failed), it can lead to
1299 1302 * consistency problems. The cases known at this time are
1300 1303 * ICMP_DESTINATION_UNREACHABLE messages with following code
1301 1304 * values :
1302 1305 *
1303 1306 * - ICMP_FRAGMENTATION_NEEDED : IP adapts to the new value
1304 1307 * and Upper layer rejects. Then the communication will
1305 1308 * come to a stop. This is solved by making similar decisions
1306 1309 * at both levels. Currently, when we are unable to deliver
1307 1310 * to the Upper Layer (due to policy failures) while IP has
1308 1311 * adjusted dce_pmtu, the next outbound datagram would
1309 1312 * generate a local ICMP_FRAGMENTATION_NEEDED message - which
1310 1313 * will be with the right level of protection. Thus the right
1311 1314 * value will be communicated even if we are not able to
1312 1315 * communicate when we get from the wire initially. But this
1313 1316 * assumes there would be at least one outbound datagram after
1314 1317 * IP has adjusted its dce_pmtu value. To make things
1315 1318 * simpler, we accept in clear after the validation of
1316 1319 * AH/ESP headers.
1317 1320 *
1318 1321 * - Other ICMP ERRORS : We may not be able to deliver it to the
1319 1322 * upper layer depending on the level of protection the upper
1320 1323 * layer expects and the disposition in ipsec_inbound_accept_clear().
1321 1324 * ipsec_inbound_accept_clear() decides whether a given ICMP error
1322 1325 * should be accepted in clear when the Upper layer expects secure.
1323 1326 * Thus the communication may get aborted by some bad ICMP
1324 1327 * packets.
1325 1328 */
1326 1329 mblk_t *
1327 1330 icmp_inbound_v4(mblk_t *mp, ip_recv_attr_t *ira)
1328 1331 {
1329 1332 icmph_t *icmph;
1330 1333 ipha_t *ipha; /* Outer header */
1331 1334 int ip_hdr_length; /* Outer header length */
1332 1335 boolean_t interested;
1333 1336 ipif_t *ipif;
1334 1337 uint32_t ts;
1335 1338 uint32_t *tsp;
1336 1339 timestruc_t now;
1337 1340 ill_t *ill = ira->ira_ill;
1338 1341 ip_stack_t *ipst = ill->ill_ipst;
1339 1342 zoneid_t zoneid = ira->ira_zoneid;
1340 1343 int len_needed;
1341 1344 mblk_t *mp_ret = NULL;
1342 1345
1343 1346 ipha = (ipha_t *)mp->b_rptr;
1344 1347
1345 1348 BUMP_MIB(&ipst->ips_icmp_mib, icmpInMsgs);
1346 1349
1347 1350 ip_hdr_length = ira->ira_ip_hdr_length;
1348 1351 if ((mp->b_wptr - mp->b_rptr) < (ip_hdr_length + ICMPH_SIZE)) {
1349 1352 if (ira->ira_pktlen < (ip_hdr_length + ICMPH_SIZE)) {
1350 1353 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
1351 1354 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
1352 1355 freemsg(mp);
1353 1356 return (NULL);
1354 1357 }
1355 1358 /* Last chance to get real. */
1356 1359 ipha = ip_pullup(mp, ip_hdr_length + ICMPH_SIZE, ira);
1357 1360 if (ipha == NULL) {
1358 1361 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors);
1359 1362 freemsg(mp);
1360 1363 return (NULL);
1361 1364 }
1362 1365 }
1363 1366
1364 1367 /* The IP header will always be a multiple of four bytes */
1365 1368 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1366 1369 ip2dbg(("icmp_inbound_v4: type %d code %d\n", icmph->icmph_type,
1367 1370 icmph->icmph_code));
1368 1371
1369 1372 /*
1370 1373 * We will set "interested" to "true" if we should pass a copy to
1371 1374 * the transport or if we handle the packet locally.
1372 1375 */
1373 1376 interested = B_FALSE;
1374 1377 switch (icmph->icmph_type) {
1375 1378 case ICMP_ECHO_REPLY:
1376 1379 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchoReps);
1377 1380 break;
1378 1381 case ICMP_DEST_UNREACHABLE:
1379 1382 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED)
1380 1383 BUMP_MIB(&ipst->ips_icmp_mib, icmpInFragNeeded);
1381 1384 interested = B_TRUE; /* Pass up to transport */
1382 1385 BUMP_MIB(&ipst->ips_icmp_mib, icmpInDestUnreachs);
1383 1386 break;
1384 1387 case ICMP_SOURCE_QUENCH:
1385 1388 interested = B_TRUE; /* Pass up to transport */
1386 1389 BUMP_MIB(&ipst->ips_icmp_mib, icmpInSrcQuenchs);
1387 1390 break;
1388 1391 case ICMP_REDIRECT:
1389 1392 if (!ipst->ips_ip_ignore_redirect)
1390 1393 interested = B_TRUE;
1391 1394 BUMP_MIB(&ipst->ips_icmp_mib, icmpInRedirects);
1392 1395 break;
1393 1396 case ICMP_ECHO_REQUEST:
1394 1397 /*
1395 1398 * Whether to respond to echo requests that come in as IP
1396 1399 * broadcasts or as IP multicast is subject to debate
1397 1400 * (what isn't?). We aim to please, you pick it.
1398 1401 * Default is do it.
1399 1402 */
1400 1403 if (ira->ira_flags & IRAF_MULTICAST) {
1401 1404 /* multicast: respond based on tunable */
1402 1405 interested = ipst->ips_ip_g_resp_to_echo_mcast;
1403 1406 } else if (ira->ira_flags & IRAF_BROADCAST) {
1404 1407 /* broadcast: respond based on tunable */
1405 1408 interested = ipst->ips_ip_g_resp_to_echo_bcast;
1406 1409 } else {
1407 1410 /* unicast: always respond */
1408 1411 interested = B_TRUE;
1409 1412 }
1410 1413 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchos);
1411 1414 if (!interested) {
1412 1415 /* We never pass these to RAW sockets */
1413 1416 freemsg(mp);
1414 1417 return (NULL);
1415 1418 }
1416 1419
1417 1420 /* Check db_ref to make sure we can modify the packet. */
1418 1421 if (mp->b_datap->db_ref > 1) {
1419 1422 mblk_t *mp1;
1420 1423
1421 1424 mp1 = copymsg(mp);
1422 1425 freemsg(mp);
1423 1426 if (!mp1) {
1424 1427 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1425 1428 return (NULL);
1426 1429 }
1427 1430 mp = mp1;
1428 1431 ipha = (ipha_t *)mp->b_rptr;
1429 1432 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1430 1433 }
1431 1434 icmph->icmph_type = ICMP_ECHO_REPLY;
1432 1435 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutEchoReps);
1433 1436 icmp_send_reply_v4(mp, ipha, icmph, ira);
1434 1437 return (NULL);
1435 1438
1436 1439 case ICMP_ROUTER_ADVERTISEMENT:
1437 1440 case ICMP_ROUTER_SOLICITATION:
1438 1441 break;
1439 1442 case ICMP_TIME_EXCEEDED:
1440 1443 interested = B_TRUE; /* Pass up to transport */
1441 1444 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimeExcds);
1442 1445 break;
1443 1446 case ICMP_PARAM_PROBLEM:
1444 1447 interested = B_TRUE; /* Pass up to transport */
1445 1448 BUMP_MIB(&ipst->ips_icmp_mib, icmpInParmProbs);
1446 1449 break;
1447 1450 case ICMP_TIME_STAMP_REQUEST:
1448 1451 /* Response to Time Stamp Requests is local policy. */
1449 1452 if (ipst->ips_ip_g_resp_to_timestamp) {
1450 1453 if (ira->ira_flags & IRAF_MULTIBROADCAST)
1451 1454 interested =
1452 1455 ipst->ips_ip_g_resp_to_timestamp_bcast;
1453 1456 else
1454 1457 interested = B_TRUE;
1455 1458 }
1456 1459 if (!interested) {
1457 1460 /* We never pass these to RAW sockets */
1458 1461 freemsg(mp);
1459 1462 return (NULL);
1460 1463 }
1461 1464
1462 1465 /* Make sure we have enough of the packet */
1463 1466 len_needed = ip_hdr_length + ICMPH_SIZE +
1464 1467 3 * sizeof (uint32_t);
1465 1468
1466 1469 if (mp->b_wptr - mp->b_rptr < len_needed) {
1467 1470 ipha = ip_pullup(mp, len_needed, ira);
1468 1471 if (ipha == NULL) {
1469 1472 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1470 1473 ip_drop_input("ipIfStatsInDiscards - ip_pullup",
1471 1474 mp, ill);
1472 1475 freemsg(mp);
1473 1476 return (NULL);
1474 1477 }
1475 1478 /* Refresh following the pullup. */
1476 1479 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1477 1480 }
1478 1481 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestamps);
1479 1482 /* Check db_ref to make sure we can modify the packet. */
1480 1483 if (mp->b_datap->db_ref > 1) {
1481 1484 mblk_t *mp1;
1482 1485
1483 1486 mp1 = copymsg(mp);
1484 1487 freemsg(mp);
1485 1488 if (!mp1) {
1486 1489 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1487 1490 return (NULL);
1488 1491 }
1489 1492 mp = mp1;
1490 1493 ipha = (ipha_t *)mp->b_rptr;
1491 1494 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1492 1495 }
1493 1496 icmph->icmph_type = ICMP_TIME_STAMP_REPLY;
1494 1497 tsp = (uint32_t *)&icmph[1];
1495 1498 tsp++; /* Skip past 'originate time' */
1496 1499 /* Compute # of milliseconds since midnight */
1497 1500 gethrestime(&now);
1498 1501 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
1499 1502 now.tv_nsec / (NANOSEC / MILLISEC);
1500 1503 *tsp++ = htonl(ts); /* Lay in 'receive time' */
1501 1504 *tsp++ = htonl(ts); /* Lay in 'send time' */
1502 1505 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimestampReps);
1503 1506 icmp_send_reply_v4(mp, ipha, icmph, ira);
1504 1507 return (NULL);
1505 1508
1506 1509 case ICMP_TIME_STAMP_REPLY:
1507 1510 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestampReps);
1508 1511 break;
1509 1512 case ICMP_INFO_REQUEST:
1510 1513 /* Per RFC 1122 3.2.2.7, ignore this. */
1511 1514 case ICMP_INFO_REPLY:
1512 1515 break;
1513 1516 case ICMP_ADDRESS_MASK_REQUEST:
1514 1517 if (ira->ira_flags & IRAF_MULTIBROADCAST) {
1515 1518 interested =
1516 1519 ipst->ips_ip_respond_to_address_mask_broadcast;
1517 1520 } else {
1518 1521 interested = B_TRUE;
1519 1522 }
1520 1523 if (!interested) {
1521 1524 /* We never pass these to RAW sockets */
1522 1525 freemsg(mp);
1523 1526 return (NULL);
1524 1527 }
1525 1528 len_needed = ip_hdr_length + ICMPH_SIZE + IP_ADDR_LEN;
1526 1529 if (mp->b_wptr - mp->b_rptr < len_needed) {
1527 1530 ipha = ip_pullup(mp, len_needed, ira);
1528 1531 if (ipha == NULL) {
1529 1532 BUMP_MIB(ill->ill_ip_mib,
1530 1533 ipIfStatsInTruncatedPkts);
1531 1534 ip_drop_input("ipIfStatsInTruncatedPkts", mp,
1532 1535 ill);
1533 1536 freemsg(mp);
1534 1537 return (NULL);
1535 1538 }
1536 1539 /* Refresh following the pullup. */
1537 1540 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1538 1541 }
1539 1542 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMasks);
1540 1543 /* Check db_ref to make sure we can modify the packet. */
1541 1544 if (mp->b_datap->db_ref > 1) {
1542 1545 mblk_t *mp1;
1543 1546
1544 1547 mp1 = copymsg(mp);
1545 1548 freemsg(mp);
1546 1549 if (!mp1) {
1547 1550 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1548 1551 return (NULL);
1549 1552 }
1550 1553 mp = mp1;
1551 1554 ipha = (ipha_t *)mp->b_rptr;
1552 1555 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1553 1556 }
1554 1557 /*
1555 1558 * Need the ipif with the mask be the same as the source
1556 1559 * address of the mask reply. For unicast we have a specific
1557 1560 * ipif. For multicast/broadcast we only handle onlink
1558 1561 * senders, and use the source address to pick an ipif.
1559 1562 */
1560 1563 ipif = ipif_lookup_addr(ipha->ipha_dst, ill, zoneid, ipst);
1561 1564 if (ipif == NULL) {
1562 1565 /* Broadcast or multicast */
1563 1566 ipif = ipif_lookup_remote(ill, ipha->ipha_src, zoneid);
1564 1567 if (ipif == NULL) {
1565 1568 freemsg(mp);
1566 1569 return (NULL);
1567 1570 }
1568 1571 }
1569 1572 icmph->icmph_type = ICMP_ADDRESS_MASK_REPLY;
1570 1573 bcopy(&ipif->ipif_net_mask, &icmph[1], IP_ADDR_LEN);
1571 1574 ipif_refrele(ipif);
1572 1575 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutAddrMaskReps);
1573 1576 icmp_send_reply_v4(mp, ipha, icmph, ira);
1574 1577 return (NULL);
1575 1578
1576 1579 case ICMP_ADDRESS_MASK_REPLY:
1577 1580 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMaskReps);
1578 1581 break;
1579 1582 default:
1580 1583 interested = B_TRUE; /* Pass up to transport */
1581 1584 BUMP_MIB(&ipst->ips_icmp_mib, icmpInUnknowns);
1582 1585 break;
1583 1586 }
1584 1587 /*
1585 1588 * See if there is an ICMP client to avoid an extra copymsg/freemsg
1586 1589 * if there isn't one.
1587 1590 */
1588 1591 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_ICMP].connf_head != NULL) {
1589 1592 /* If there is an ICMP client and we want one too, copy it. */
1590 1593
1591 1594 if (!interested) {
1592 1595 /* Caller will deliver to RAW sockets */
1593 1596 return (mp);
1594 1597 }
1595 1598 mp_ret = copymsg(mp);
1596 1599 if (mp_ret == NULL) {
1597 1600 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1598 1601 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill);
1599 1602 }
1600 1603 } else if (!interested) {
1601 1604 /* Neither we nor raw sockets are interested. Drop packet now */
1602 1605 freemsg(mp);
1603 1606 return (NULL);
1604 1607 }
1605 1608
1606 1609 /*
1607 1610 * ICMP error or redirect packet. Make sure we have enough of
1608 1611 * the header and that db_ref == 1 since we might end up modifying
1609 1612 * the packet.
1610 1613 */
1611 1614 if (mp->b_cont != NULL) {
1612 1615 if (ip_pullup(mp, -1, ira) == NULL) {
1613 1616 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1614 1617 ip_drop_input("ipIfStatsInDiscards - ip_pullup",
1615 1618 mp, ill);
1616 1619 freemsg(mp);
1617 1620 return (mp_ret);
1618 1621 }
1619 1622 }
1620 1623
1621 1624 if (mp->b_datap->db_ref > 1) {
1622 1625 mblk_t *mp1;
1623 1626
1624 1627 mp1 = copymsg(mp);
1625 1628 if (mp1 == NULL) {
1626 1629 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1627 1630 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill);
1628 1631 freemsg(mp);
1629 1632 return (mp_ret);
1630 1633 }
1631 1634 freemsg(mp);
1632 1635 mp = mp1;
1633 1636 }
1634 1637
1635 1638 /*
1636 1639 * In case mp has changed, verify the message before any further
1637 1640 * processes.
1638 1641 */
1639 1642 ipha = (ipha_t *)mp->b_rptr;
1640 1643 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1641 1644 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
1642 1645 freemsg(mp);
1643 1646 return (mp_ret);
1644 1647 }
1645 1648
1646 1649 switch (icmph->icmph_type) {
1647 1650 case ICMP_REDIRECT:
1648 1651 icmp_redirect_v4(mp, ipha, icmph, ira);
1649 1652 break;
1650 1653 case ICMP_DEST_UNREACHABLE:
1651 1654 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED) {
1652 1655 /* Update DCE and adjust MTU is icmp header if needed */
1653 1656 icmp_inbound_too_big_v4(icmph, ira);
1654 1657 }
1655 1658 /* FALLTHRU */
1656 1659 default:
1657 1660 icmp_inbound_error_fanout_v4(mp, icmph, ira);
1658 1661 break;
1659 1662 }
1660 1663 return (mp_ret);
1661 1664 }
1662 1665
1663 1666 /*
1664 1667 * Send an ICMP echo, timestamp or address mask reply.
1665 1668 * The caller has already updated the payload part of the packet.
1666 1669 * We handle the ICMP checksum, IP source address selection and feed
1667 1670 * the packet into ip_output_simple.
1668 1671 */
1669 1672 static void
1670 1673 icmp_send_reply_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph,
1671 1674 ip_recv_attr_t *ira)
1672 1675 {
1673 1676 uint_t ip_hdr_length = ira->ira_ip_hdr_length;
1674 1677 ill_t *ill = ira->ira_ill;
1675 1678 ip_stack_t *ipst = ill->ill_ipst;
1676 1679 ip_xmit_attr_t ixas;
1677 1680
1678 1681 /* Send out an ICMP packet */
1679 1682 icmph->icmph_checksum = 0;
1680 1683 icmph->icmph_checksum = IP_CSUM(mp, ip_hdr_length, 0);
1681 1684 /* Reset time to live. */
1682 1685 ipha->ipha_ttl = ipst->ips_ip_def_ttl;
1683 1686 {
1684 1687 /* Swap source and destination addresses */
1685 1688 ipaddr_t tmp;
1686 1689
1687 1690 tmp = ipha->ipha_src;
1688 1691 ipha->ipha_src = ipha->ipha_dst;
1689 1692 ipha->ipha_dst = tmp;
1690 1693 }
1691 1694 ipha->ipha_ident = 0;
1692 1695 if (!IS_SIMPLE_IPH(ipha))
1693 1696 icmp_options_update(ipha);
1694 1697
1695 1698 bzero(&ixas, sizeof (ixas));
1696 1699 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
1697 1700 ixas.ixa_zoneid = ira->ira_zoneid;
1698 1701 ixas.ixa_cred = kcred;
1699 1702 ixas.ixa_cpid = NOPID;
1700 1703 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */
1701 1704 ixas.ixa_ifindex = 0;
1702 1705 ixas.ixa_ipst = ipst;
1703 1706 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
1704 1707
1705 1708 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
1706 1709 /*
1707 1710 * This packet should go out the same way as it
1708 1711 * came in i.e in clear, independent of the IPsec policy
1709 1712 * for transmitting packets.
1710 1713 */
1711 1714 ixas.ixa_flags |= IXAF_NO_IPSEC;
1712 1715 } else {
1713 1716 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) {
1714 1717 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1715 1718 /* Note: mp already consumed and ip_drop_packet done */
1716 1719 return;
1717 1720 }
1718 1721 }
1719 1722 if (ira->ira_flags & IRAF_MULTIBROADCAST) {
1720 1723 /*
1721 1724 * Not one or our addresses (IRE_LOCALs), thus we let
1722 1725 * ip_output_simple pick the source.
1723 1726 */
1724 1727 ipha->ipha_src = INADDR_ANY;
1725 1728 ixas.ixa_flags |= IXAF_SET_SOURCE;
1726 1729 }
1727 1730 /* Should we send with DF and use dce_pmtu? */
1728 1731 if (ipst->ips_ipv4_icmp_return_pmtu) {
1729 1732 ixas.ixa_flags |= IXAF_PMTU_DISCOVERY;
1730 1733 ipha->ipha_fragment_offset_and_flags |= IPH_DF_HTONS;
1731 1734 }
1732 1735
1733 1736 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs);
1734 1737
1735 1738 (void) ip_output_simple(mp, &ixas);
1736 1739 ixa_cleanup(&ixas);
1737 1740 }
1738 1741
1739 1742 /*
1740 1743 * Verify the ICMP messages for either for ICMP error or redirect packet.
1741 1744 * The caller should have fully pulled up the message. If it's a redirect
1742 1745 * packet, only basic checks on IP header will be done; otherwise, verify
1743 1746 * the packet by looking at the included ULP header.
1744 1747 *
1745 1748 * Called before icmp_inbound_error_fanout_v4 is called.
1746 1749 */
1747 1750 static boolean_t
1748 1751 icmp_inbound_verify_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira)
1749 1752 {
1750 1753 ill_t *ill = ira->ira_ill;
1751 1754 int hdr_length;
1752 1755 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
1753 1756 conn_t *connp;
1754 1757 ipha_t *ipha; /* Inner IP header */
1755 1758
1756 1759 ipha = (ipha_t *)&icmph[1];
1757 1760 if ((uchar_t *)ipha + IP_SIMPLE_HDR_LENGTH > mp->b_wptr)
1758 1761 goto truncated;
1759 1762
1760 1763 hdr_length = IPH_HDR_LENGTH(ipha);
1761 1764
1762 1765 if ((IPH_HDR_VERSION(ipha) != IPV4_VERSION))
1763 1766 goto discard_pkt;
1764 1767
1765 1768 if (hdr_length < sizeof (ipha_t))
1766 1769 goto truncated;
1767 1770
1768 1771 if ((uchar_t *)ipha + hdr_length > mp->b_wptr)
1769 1772 goto truncated;
1770 1773
1771 1774 /*
1772 1775 * Stop here for ICMP_REDIRECT.
1773 1776 */
1774 1777 if (icmph->icmph_type == ICMP_REDIRECT)
1775 1778 return (B_TRUE);
1776 1779
1777 1780 /*
1778 1781 * ICMP errors only.
1779 1782 */
1780 1783 switch (ipha->ipha_protocol) {
1781 1784 case IPPROTO_UDP:
1782 1785 /*
1783 1786 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1784 1787 * transport header.
1785 1788 */
1786 1789 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1787 1790 mp->b_wptr)
1788 1791 goto truncated;
1789 1792 break;
1790 1793 case IPPROTO_TCP: {
1791 1794 tcpha_t *tcpha;
1792 1795
1793 1796 /*
1794 1797 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1795 1798 * transport header.
1796 1799 */
1797 1800 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1798 1801 mp->b_wptr)
1799 1802 goto truncated;
1800 1803
1801 1804 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length);
1802 1805 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN,
1803 1806 ipst);
1804 1807 if (connp == NULL)
1805 1808 goto discard_pkt;
1806 1809
1807 1810 if ((connp->conn_verifyicmp != NULL) &&
1808 1811 !connp->conn_verifyicmp(connp, tcpha, icmph, NULL, ira)) {
1809 1812 CONN_DEC_REF(connp);
1810 1813 goto discard_pkt;
1811 1814 }
1812 1815 CONN_DEC_REF(connp);
1813 1816 break;
↓ open down ↓ |
1690 lines elided |
↑ open up ↑ |
1814 1817 }
1815 1818 case IPPROTO_SCTP:
1816 1819 /*
1817 1820 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1818 1821 * transport header.
1819 1822 */
1820 1823 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1821 1824 mp->b_wptr)
1822 1825 goto truncated;
1823 1826 break;
1827 + case IPPROTO_DCCP: {
1828 + dccpha_t *dccpha;
1829 +
1830 + /*
1831 + * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1832 + * transport header.
1833 + */
1834 + if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1835 + mp->b_wptr)
1836 + goto truncated;
1837 +
1838 + cmn_err(CE_NOTE, "icmp_inbound_verify_v4");
1839 +
1840 + dccpha = (dccpha_t *)((uchar_t *)ipha + hdr_length);
1841 + /* XXX:DCCP */
1842 +/*
1843 + connp = ipcl_dccp_lookup_reversed_ipv4(ipha, dccpha,
1844 + DCCPS_LISTEN, ipst);
1845 + if (connp == NULL) {
1846 + goto discard_pkt;
1847 + }
1848 +
1849 + if ((connp->conn_verifyicmp != NULL) &&
1850 + !connp->conn_verifyicmp(connp, dccpha, icmph, NULL, ira)) {
1851 + CONN_DEC_REF(connp);
1852 + goto discard_pkt;
1853 + }
1854 +
1855 + CONN_DEC_REF(connp);
1856 +*/
1857 + break;
1858 + }
1824 1859 case IPPROTO_ESP:
1825 1860 case IPPROTO_AH:
1826 1861 break;
1827 1862 case IPPROTO_ENCAP:
1828 1863 if ((uchar_t *)ipha + hdr_length + sizeof (ipha_t) >
1829 1864 mp->b_wptr)
1830 1865 goto truncated;
1831 1866 break;
1832 1867 default:
1833 1868 break;
1834 1869 }
1835 1870
1836 1871 return (B_TRUE);
1837 1872
1838 1873 discard_pkt:
1839 1874 /* Bogus ICMP error. */
1840 1875 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1841 1876 return (B_FALSE);
1842 1877
1843 1878 truncated:
1844 1879 /* We pulled up everthing already. Must be truncated */
1845 1880 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
1846 1881 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
1847 1882 return (B_FALSE);
1848 1883 }
1849 1884
1850 1885 /* Table from RFC 1191 */
1851 1886 static int icmp_frag_size_table[] =
1852 1887 { 32000, 17914, 8166, 4352, 2002, 1496, 1006, 508, 296, 68 };
1853 1888
1854 1889 /*
1855 1890 * Process received ICMP Packet too big.
1856 1891 * Just handles the DCE create/update, including using the above table of
1857 1892 * PMTU guesses. The caller is responsible for validating the packet before
1858 1893 * passing it in and also to fanout the ICMP error to any matching transport
1859 1894 * conns. Assumes the message has been fully pulled up and verified.
1860 1895 *
1861 1896 * Before getting here, the caller has called icmp_inbound_verify_v4()
1862 1897 * that should have verified with ULP to prevent undoing the changes we're
1863 1898 * going to make to DCE. For example, TCP might have verified that the packet
1864 1899 * which generated error is in the send window.
1865 1900 *
1866 1901 * In some cases modified this MTU in the ICMP header packet; the caller
1867 1902 * should pass to the matching ULP after this returns.
1868 1903 */
1869 1904 static void
1870 1905 icmp_inbound_too_big_v4(icmph_t *icmph, ip_recv_attr_t *ira)
1871 1906 {
1872 1907 dce_t *dce;
1873 1908 int old_mtu;
1874 1909 int mtu, orig_mtu;
1875 1910 ipaddr_t dst;
1876 1911 boolean_t disable_pmtud;
1877 1912 ill_t *ill = ira->ira_ill;
1878 1913 ip_stack_t *ipst = ill->ill_ipst;
1879 1914 uint_t hdr_length;
1880 1915 ipha_t *ipha;
1881 1916
1882 1917 /* Caller already pulled up everything. */
1883 1918 ipha = (ipha_t *)&icmph[1];
1884 1919 ASSERT(icmph->icmph_type == ICMP_DEST_UNREACHABLE &&
1885 1920 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED);
1886 1921 ASSERT(ill != NULL);
1887 1922
1888 1923 hdr_length = IPH_HDR_LENGTH(ipha);
1889 1924
1890 1925 /*
1891 1926 * We handle path MTU for source routed packets since the DCE
1892 1927 * is looked up using the final destination.
1893 1928 */
1894 1929 dst = ip_get_dst(ipha);
1895 1930
1896 1931 dce = dce_lookup_and_add_v4(dst, ipst);
1897 1932 if (dce == NULL) {
1898 1933 /* Couldn't add a unique one - ENOMEM */
1899 1934 ip1dbg(("icmp_inbound_too_big_v4: no dce for 0x%x\n",
1900 1935 ntohl(dst)));
1901 1936 return;
1902 1937 }
1903 1938
1904 1939 /* Check for MTU discovery advice as described in RFC 1191 */
1905 1940 mtu = ntohs(icmph->icmph_du_mtu);
1906 1941 orig_mtu = mtu;
1907 1942 disable_pmtud = B_FALSE;
1908 1943
1909 1944 mutex_enter(&dce->dce_lock);
1910 1945 if (dce->dce_flags & DCEF_PMTU)
1911 1946 old_mtu = dce->dce_pmtu;
1912 1947 else
1913 1948 old_mtu = ill->ill_mtu;
1914 1949
1915 1950 if (icmph->icmph_du_zero != 0 || mtu < ipst->ips_ip_pmtu_min) {
1916 1951 uint32_t length;
1917 1952 int i;
1918 1953
1919 1954 /*
1920 1955 * Use the table from RFC 1191 to figure out
1921 1956 * the next "plateau" based on the length in
1922 1957 * the original IP packet.
1923 1958 */
1924 1959 length = ntohs(ipha->ipha_length);
1925 1960 DTRACE_PROBE2(ip4__pmtu__guess, dce_t *, dce,
1926 1961 uint32_t, length);
1927 1962 if (old_mtu <= length &&
1928 1963 old_mtu >= length - hdr_length) {
1929 1964 /*
1930 1965 * Handle broken BSD 4.2 systems that
1931 1966 * return the wrong ipha_length in ICMP
1932 1967 * errors.
1933 1968 */
1934 1969 ip1dbg(("Wrong mtu: sent %d, dce %d\n",
1935 1970 length, old_mtu));
1936 1971 length -= hdr_length;
1937 1972 }
1938 1973 for (i = 0; i < A_CNT(icmp_frag_size_table); i++) {
1939 1974 if (length > icmp_frag_size_table[i])
1940 1975 break;
1941 1976 }
1942 1977 if (i == A_CNT(icmp_frag_size_table)) {
1943 1978 /* Smaller than IP_MIN_MTU! */
1944 1979 ip1dbg(("Too big for packet size %d\n",
1945 1980 length));
1946 1981 disable_pmtud = B_TRUE;
1947 1982 mtu = ipst->ips_ip_pmtu_min;
1948 1983 } else {
1949 1984 mtu = icmp_frag_size_table[i];
1950 1985 ip1dbg(("Calculated mtu %d, packet size %d, "
1951 1986 "before %d\n", mtu, length, old_mtu));
1952 1987 if (mtu < ipst->ips_ip_pmtu_min) {
1953 1988 mtu = ipst->ips_ip_pmtu_min;
1954 1989 disable_pmtud = B_TRUE;
1955 1990 }
1956 1991 }
1957 1992 }
1958 1993 if (disable_pmtud)
1959 1994 dce->dce_flags |= DCEF_TOO_SMALL_PMTU;
1960 1995 else
1961 1996 dce->dce_flags &= ~DCEF_TOO_SMALL_PMTU;
1962 1997
1963 1998 dce->dce_pmtu = MIN(old_mtu, mtu);
1964 1999 /* Prepare to send the new max frag size for the ULP. */
1965 2000 icmph->icmph_du_zero = 0;
1966 2001 icmph->icmph_du_mtu = htons((uint16_t)dce->dce_pmtu);
1967 2002 DTRACE_PROBE4(ip4__pmtu__change, icmph_t *, icmph, dce_t *,
1968 2003 dce, int, orig_mtu, int, mtu);
1969 2004
1970 2005 /* We now have a PMTU for sure */
1971 2006 dce->dce_flags |= DCEF_PMTU;
1972 2007 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
1973 2008 mutex_exit(&dce->dce_lock);
1974 2009 /*
1975 2010 * After dropping the lock the new value is visible to everyone.
1976 2011 * Then we bump the generation number so any cached values reinspect
1977 2012 * the dce_t.
1978 2013 */
1979 2014 dce_increment_generation(dce);
1980 2015 dce_refrele(dce);
1981 2016 }
1982 2017
1983 2018 /*
1984 2019 * If the packet in error is Self-Encapsulated, icmp_inbound_error_fanout_v4
1985 2020 * calls this function.
1986 2021 */
1987 2022 static mblk_t *
1988 2023 icmp_inbound_self_encap_error_v4(mblk_t *mp, ipha_t *ipha, ipha_t *in_ipha)
1989 2024 {
1990 2025 int length;
1991 2026
1992 2027 ASSERT(mp->b_datap->db_type == M_DATA);
1993 2028
1994 2029 /* icmp_inbound_v4 has already pulled up the whole error packet */
1995 2030 ASSERT(mp->b_cont == NULL);
1996 2031
1997 2032 /*
1998 2033 * The length that we want to overlay is the inner header
1999 2034 * and what follows it.
2000 2035 */
2001 2036 length = msgdsize(mp) - ((uchar_t *)in_ipha - mp->b_rptr);
2002 2037
2003 2038 /*
2004 2039 * Overlay the inner header and whatever follows it over the
2005 2040 * outer header.
2006 2041 */
2007 2042 bcopy((uchar_t *)in_ipha, (uchar_t *)ipha, length);
2008 2043
2009 2044 /* Adjust for what we removed */
2010 2045 mp->b_wptr -= (uchar_t *)in_ipha - (uchar_t *)ipha;
2011 2046 return (mp);
2012 2047 }
2013 2048
2014 2049 /*
2015 2050 * Try to pass the ICMP message upstream in case the ULP cares.
2016 2051 *
2017 2052 * If the packet that caused the ICMP error is secure, we send
2018 2053 * it to AH/ESP to make sure that the attached packet has a
2019 2054 * valid association. ipha in the code below points to the
2020 2055 * IP header of the packet that caused the error.
2021 2056 *
2022 2057 * For IPsec cases, we let the next-layer-up (which has access to
2023 2058 * cached policy on the conn_t, or can query the SPD directly)
2024 2059 * subtract out any IPsec overhead if they must. We therefore make no
2025 2060 * adjustments here for IPsec overhead.
2026 2061 *
2027 2062 * IFN could have been generated locally or by some router.
2028 2063 *
2029 2064 * LOCAL : ire_send_wire (before calling ipsec_out_process) can call
2030 2065 * icmp_frag_needed/icmp_pkt2big_v6 to generated a local IFN.
2031 2066 * This happens because IP adjusted its value of MTU on an
2032 2067 * earlier IFN message and could not tell the upper layer,
2033 2068 * the new adjusted value of MTU e.g. Packet was encrypted
2034 2069 * or there was not enough information to fanout to upper
2035 2070 * layers. Thus on the next outbound datagram, ire_send_wire
2036 2071 * generates the IFN, where IPsec processing has *not* been
2037 2072 * done.
2038 2073 *
2039 2074 * Note that we retain ixa_fragsize across IPsec thus once
2040 2075 * we have picking ixa_fragsize and entered ipsec_out_process we do
2041 2076 * no change the fragsize even if the path MTU changes before
2042 2077 * we reach ip_output_post_ipsec.
2043 2078 *
2044 2079 * In the local case, IRAF_LOOPBACK will be set indicating
2045 2080 * that IFN was generated locally.
2046 2081 *
2047 2082 * ROUTER : IFN could be secure or non-secure.
2048 2083 *
2049 2084 * * SECURE : We use the IPSEC_IN to fanout to AH/ESP if the
2050 2085 * packet in error has AH/ESP headers to validate the AH/ESP
2051 2086 * headers. AH/ESP will verify whether there is a valid SA or
2052 2087 * not and send it back. We will fanout again if we have more
2053 2088 * data in the packet.
2054 2089 *
2055 2090 * If the packet in error does not have AH/ESP, we handle it
2056 2091 * like any other case.
2057 2092 *
2058 2093 * * NON_SECURE : If the packet in error has AH/ESP headers, we send it
2059 2094 * up to AH/ESP for validation. AH/ESP will verify whether there is a
2060 2095 * valid SA or not and send it back. We will fanout again if
2061 2096 * we have more data in the packet.
2062 2097 *
2063 2098 * If the packet in error does not have AH/ESP, we handle it
2064 2099 * like any other case.
2065 2100 *
2066 2101 * The caller must have called icmp_inbound_verify_v4.
2067 2102 */
2068 2103 static void
2069 2104 icmp_inbound_error_fanout_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira)
2070 2105 {
2071 2106 uint16_t *up; /* Pointer to ports in ULP header */
2072 2107 uint32_t ports; /* reversed ports for fanout */
2073 2108 ipha_t ripha; /* With reversed addresses */
2074 2109 ipha_t *ipha; /* Inner IP header */
2075 2110 uint_t hdr_length; /* Inner IP header length */
2076 2111 tcpha_t *tcpha;
2077 2112 conn_t *connp;
2078 2113 ill_t *ill = ira->ira_ill;
2079 2114 ip_stack_t *ipst = ill->ill_ipst;
2080 2115 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
2081 2116 ill_t *rill = ira->ira_rill;
2082 2117
2083 2118 /* Caller already pulled up everything. */
2084 2119 ipha = (ipha_t *)&icmph[1];
2085 2120 ASSERT((uchar_t *)&ipha[1] <= mp->b_wptr);
2086 2121 ASSERT(mp->b_cont == NULL);
2087 2122
2088 2123 hdr_length = IPH_HDR_LENGTH(ipha);
2089 2124 ira->ira_protocol = ipha->ipha_protocol;
2090 2125
2091 2126 /*
2092 2127 * We need a separate IP header with the source and destination
2093 2128 * addresses reversed to do fanout/classification because the ipha in
2094 2129 * the ICMP error is in the form we sent it out.
2095 2130 */
2096 2131 ripha.ipha_src = ipha->ipha_dst;
2097 2132 ripha.ipha_dst = ipha->ipha_src;
2098 2133 ripha.ipha_protocol = ipha->ipha_protocol;
2099 2134 ripha.ipha_version_and_hdr_length = ipha->ipha_version_and_hdr_length;
2100 2135
2101 2136 ip2dbg(("icmp_inbound_error_v4: proto %d %x to %x: %d/%d\n",
2102 2137 ripha.ipha_protocol, ntohl(ipha->ipha_src),
2103 2138 ntohl(ipha->ipha_dst),
2104 2139 icmph->icmph_type, icmph->icmph_code));
2105 2140
2106 2141 switch (ipha->ipha_protocol) {
2107 2142 case IPPROTO_UDP:
2108 2143 up = (uint16_t *)((uchar_t *)ipha + hdr_length);
2109 2144
2110 2145 /* Attempt to find a client stream based on port. */
2111 2146 ip2dbg(("icmp_inbound_error_v4: UDP ports %d to %d\n",
2112 2147 ntohs(up[0]), ntohs(up[1])));
2113 2148
2114 2149 /* Note that we send error to all matches. */
2115 2150 ira->ira_flags |= IRAF_ICMP_ERROR;
2116 2151 ip_fanout_udp_multi_v4(mp, &ripha, up[0], up[1], ira);
2117 2152 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2118 2153 return;
2119 2154
2120 2155 case IPPROTO_TCP:
2121 2156 /*
2122 2157 * Find a TCP client stream for this packet.
2123 2158 * Note that we do a reverse lookup since the header is
2124 2159 * in the form we sent it out.
2125 2160 */
2126 2161 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length);
2127 2162 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN,
2128 2163 ipst);
2129 2164 if (connp == NULL)
2130 2165 goto discard_pkt;
2131 2166
2132 2167 if (CONN_INBOUND_POLICY_PRESENT(connp, ipss) ||
2133 2168 (ira->ira_flags & IRAF_IPSEC_SECURE)) {
2134 2169 mp = ipsec_check_inbound_policy(mp, connp,
2135 2170 ipha, NULL, ira);
2136 2171 if (mp == NULL) {
2137 2172 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
2138 2173 /* Note that mp is NULL */
2139 2174 ip_drop_input("ipIfStatsInDiscards", mp, ill);
2140 2175 CONN_DEC_REF(connp);
2141 2176 return;
2142 2177 }
2143 2178 }
2144 2179
2145 2180 ira->ira_flags |= IRAF_ICMP_ERROR;
2146 2181 ira->ira_ill = ira->ira_rill = NULL;
2147 2182 if (IPCL_IS_TCP(connp)) {
2148 2183 SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
2149 2184 connp->conn_recvicmp, connp, ira, SQ_FILL,
2150 2185 SQTAG_TCP_INPUT_ICMP_ERR);
2151 2186 } else {
2152 2187 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */
2153 2188 (connp->conn_recv)(connp, mp, NULL, ira);
2154 2189 CONN_DEC_REF(connp);
2155 2190 }
2156 2191 ira->ira_ill = ill;
2157 2192 ira->ira_rill = rill;
2158 2193 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2159 2194 return;
2160 2195
2161 2196 case IPPROTO_SCTP:
↓ open down ↓ |
328 lines elided |
↑ open up ↑ |
2162 2197 up = (uint16_t *)((uchar_t *)ipha + hdr_length);
2163 2198 /* Find a SCTP client stream for this packet. */
2164 2199 ((uint16_t *)&ports)[0] = up[1];
2165 2200 ((uint16_t *)&ports)[1] = up[0];
2166 2201
2167 2202 ira->ira_flags |= IRAF_ICMP_ERROR;
2168 2203 ip_fanout_sctp(mp, &ripha, NULL, ports, ira);
2169 2204 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2170 2205 return;
2171 2206
2207 + case IPPROTO_DCCP:
2208 + cmn_err(CE_NOTE, "icmp_inbound_error_fanout_v4");
2209 + return;
2210 +
2172 2211 case IPPROTO_ESP:
2173 2212 case IPPROTO_AH:
2174 2213 if (!ipsec_loaded(ipss)) {
2175 2214 ip_proto_not_sup(mp, ira);
2176 2215 return;
2177 2216 }
2178 2217
2179 2218 if (ipha->ipha_protocol == IPPROTO_ESP)
2180 2219 mp = ipsecesp_icmp_error(mp, ira);
2181 2220 else
2182 2221 mp = ipsecah_icmp_error(mp, ira);
2183 2222 if (mp == NULL)
2184 2223 return;
2185 2224
2186 2225 /* Just in case ipsec didn't preserve the NULL b_cont */
2187 2226 if (mp->b_cont != NULL) {
2188 2227 if (!pullupmsg(mp, -1))
2189 2228 goto discard_pkt;
2190 2229 }
2191 2230
2192 2231 /*
2193 2232 * Note that ira_pktlen and ira_ip_hdr_length are no longer
2194 2233 * correct, but we don't use them any more here.
2195 2234 *
2196 2235 * If succesful, the mp has been modified to not include
2197 2236 * the ESP/AH header so we can fanout to the ULP's icmp
2198 2237 * error handler.
2199 2238 */
2200 2239 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH)
2201 2240 goto truncated;
2202 2241
2203 2242 /* Verify the modified message before any further processes. */
2204 2243 ipha = (ipha_t *)mp->b_rptr;
2205 2244 hdr_length = IPH_HDR_LENGTH(ipha);
2206 2245 icmph = (icmph_t *)&mp->b_rptr[hdr_length];
2207 2246 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
2208 2247 freemsg(mp);
2209 2248 return;
2210 2249 }
2211 2250
2212 2251 icmp_inbound_error_fanout_v4(mp, icmph, ira);
2213 2252 return;
2214 2253
2215 2254 case IPPROTO_ENCAP: {
2216 2255 /* Look for self-encapsulated packets that caused an error */
2217 2256 ipha_t *in_ipha;
2218 2257
2219 2258 /*
2220 2259 * Caller has verified that length has to be
2221 2260 * at least the size of IP header.
2222 2261 */
2223 2262 ASSERT(hdr_length >= sizeof (ipha_t));
2224 2263 /*
2225 2264 * Check the sanity of the inner IP header like
2226 2265 * we did for the outer header.
2227 2266 */
2228 2267 in_ipha = (ipha_t *)((uchar_t *)ipha + hdr_length);
2229 2268 if ((IPH_HDR_VERSION(in_ipha) != IPV4_VERSION)) {
2230 2269 goto discard_pkt;
2231 2270 }
2232 2271 if (IPH_HDR_LENGTH(in_ipha) < sizeof (ipha_t)) {
2233 2272 goto discard_pkt;
2234 2273 }
2235 2274 /* Check for Self-encapsulated tunnels */
2236 2275 if (in_ipha->ipha_src == ipha->ipha_src &&
2237 2276 in_ipha->ipha_dst == ipha->ipha_dst) {
2238 2277
2239 2278 mp = icmp_inbound_self_encap_error_v4(mp, ipha,
2240 2279 in_ipha);
2241 2280 if (mp == NULL)
2242 2281 goto discard_pkt;
2243 2282
2244 2283 /*
2245 2284 * Just in case self_encap didn't preserve the NULL
2246 2285 * b_cont
2247 2286 */
2248 2287 if (mp->b_cont != NULL) {
2249 2288 if (!pullupmsg(mp, -1))
2250 2289 goto discard_pkt;
2251 2290 }
2252 2291 /*
2253 2292 * Note that ira_pktlen and ira_ip_hdr_length are no
2254 2293 * longer correct, but we don't use them any more here.
2255 2294 */
2256 2295 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH)
2257 2296 goto truncated;
2258 2297
2259 2298 /*
2260 2299 * Verify the modified message before any further
2261 2300 * processes.
2262 2301 */
2263 2302 ipha = (ipha_t *)mp->b_rptr;
2264 2303 hdr_length = IPH_HDR_LENGTH(ipha);
2265 2304 icmph = (icmph_t *)&mp->b_rptr[hdr_length];
2266 2305 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
2267 2306 freemsg(mp);
2268 2307 return;
2269 2308 }
2270 2309
2271 2310 /*
2272 2311 * The packet in error is self-encapsualted.
2273 2312 * And we are finding it further encapsulated
2274 2313 * which we could not have possibly generated.
2275 2314 */
2276 2315 if (ipha->ipha_protocol == IPPROTO_ENCAP) {
2277 2316 goto discard_pkt;
2278 2317 }
2279 2318 icmp_inbound_error_fanout_v4(mp, icmph, ira);
2280 2319 return;
2281 2320 }
2282 2321 /* No self-encapsulated */
2283 2322 /* FALLTHRU */
2284 2323 }
2285 2324 case IPPROTO_IPV6:
2286 2325 if ((connp = ipcl_iptun_classify_v4(&ripha.ipha_src,
2287 2326 &ripha.ipha_dst, ipst)) != NULL) {
2288 2327 ira->ira_flags |= IRAF_ICMP_ERROR;
2289 2328 connp->conn_recvicmp(connp, mp, NULL, ira);
2290 2329 CONN_DEC_REF(connp);
2291 2330 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2292 2331 return;
2293 2332 }
2294 2333 /*
2295 2334 * No IP tunnel is interested, fallthrough and see
2296 2335 * if a raw socket will want it.
2297 2336 */
2298 2337 /* FALLTHRU */
2299 2338 default:
2300 2339 ira->ira_flags |= IRAF_ICMP_ERROR;
2301 2340 ip_fanout_proto_v4(mp, &ripha, ira);
2302 2341 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2303 2342 return;
2304 2343 }
2305 2344 /* NOTREACHED */
2306 2345 discard_pkt:
2307 2346 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
2308 2347 ip1dbg(("icmp_inbound_error_fanout_v4: drop pkt\n"));
2309 2348 ip_drop_input("ipIfStatsInDiscards", mp, ill);
2310 2349 freemsg(mp);
2311 2350 return;
2312 2351
2313 2352 truncated:
2314 2353 /* We pulled up everthing already. Must be truncated */
2315 2354 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
2316 2355 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
2317 2356 freemsg(mp);
2318 2357 }
2319 2358
2320 2359 /*
2321 2360 * Common IP options parser.
2322 2361 *
2323 2362 * Setup routine: fill in *optp with options-parsing state, then
2324 2363 * tail-call ipoptp_next to return the first option.
2325 2364 */
2326 2365 uint8_t
2327 2366 ipoptp_first(ipoptp_t *optp, ipha_t *ipha)
2328 2367 {
2329 2368 uint32_t totallen; /* total length of all options */
2330 2369
2331 2370 totallen = ipha->ipha_version_and_hdr_length -
2332 2371 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
2333 2372 totallen <<= 2;
2334 2373 optp->ipoptp_next = (uint8_t *)(&ipha[1]);
2335 2374 optp->ipoptp_end = optp->ipoptp_next + totallen;
2336 2375 optp->ipoptp_flags = 0;
2337 2376 return (ipoptp_next(optp));
2338 2377 }
2339 2378
2340 2379 /* Like above but without an ipha_t */
2341 2380 uint8_t
2342 2381 ipoptp_first2(ipoptp_t *optp, uint32_t totallen, uint8_t *opt)
2343 2382 {
2344 2383 optp->ipoptp_next = opt;
2345 2384 optp->ipoptp_end = optp->ipoptp_next + totallen;
2346 2385 optp->ipoptp_flags = 0;
2347 2386 return (ipoptp_next(optp));
2348 2387 }
2349 2388
2350 2389 /*
2351 2390 * Common IP options parser: extract next option.
2352 2391 */
2353 2392 uint8_t
2354 2393 ipoptp_next(ipoptp_t *optp)
2355 2394 {
2356 2395 uint8_t *end = optp->ipoptp_end;
2357 2396 uint8_t *cur = optp->ipoptp_next;
2358 2397 uint8_t opt, len, pointer;
2359 2398
2360 2399 /*
2361 2400 * If cur > end already, then the ipoptp_end or ipoptp_next pointer
2362 2401 * has been corrupted.
2363 2402 */
2364 2403 ASSERT(cur <= end);
2365 2404
2366 2405 if (cur == end)
2367 2406 return (IPOPT_EOL);
2368 2407
2369 2408 opt = cur[IPOPT_OPTVAL];
2370 2409
2371 2410 /*
2372 2411 * Skip any NOP options.
2373 2412 */
2374 2413 while (opt == IPOPT_NOP) {
2375 2414 cur++;
2376 2415 if (cur == end)
2377 2416 return (IPOPT_EOL);
2378 2417 opt = cur[IPOPT_OPTVAL];
2379 2418 }
2380 2419
2381 2420 if (opt == IPOPT_EOL)
2382 2421 return (IPOPT_EOL);
2383 2422
2384 2423 /*
2385 2424 * Option requiring a length.
2386 2425 */
2387 2426 if ((cur + 1) >= end) {
2388 2427 optp->ipoptp_flags |= IPOPTP_ERROR;
2389 2428 return (IPOPT_EOL);
2390 2429 }
2391 2430 len = cur[IPOPT_OLEN];
2392 2431 if (len < 2) {
2393 2432 optp->ipoptp_flags |= IPOPTP_ERROR;
2394 2433 return (IPOPT_EOL);
2395 2434 }
2396 2435 optp->ipoptp_cur = cur;
2397 2436 optp->ipoptp_len = len;
2398 2437 optp->ipoptp_next = cur + len;
2399 2438 if (cur + len > end) {
2400 2439 optp->ipoptp_flags |= IPOPTP_ERROR;
2401 2440 return (IPOPT_EOL);
2402 2441 }
2403 2442
2404 2443 /*
2405 2444 * For the options which require a pointer field, make sure
2406 2445 * its there, and make sure it points to either something
2407 2446 * inside this option, or the end of the option.
2408 2447 */
2409 2448 switch (opt) {
2410 2449 case IPOPT_RR:
2411 2450 case IPOPT_TS:
2412 2451 case IPOPT_LSRR:
2413 2452 case IPOPT_SSRR:
2414 2453 if (len <= IPOPT_OFFSET) {
2415 2454 optp->ipoptp_flags |= IPOPTP_ERROR;
2416 2455 return (opt);
2417 2456 }
2418 2457 pointer = cur[IPOPT_OFFSET];
2419 2458 if (pointer - 1 > len) {
2420 2459 optp->ipoptp_flags |= IPOPTP_ERROR;
2421 2460 return (opt);
2422 2461 }
2423 2462 break;
2424 2463 }
2425 2464
2426 2465 /*
2427 2466 * Sanity check the pointer field based on the type of the
2428 2467 * option.
2429 2468 */
2430 2469 switch (opt) {
2431 2470 case IPOPT_RR:
2432 2471 case IPOPT_SSRR:
2433 2472 case IPOPT_LSRR:
2434 2473 if (pointer < IPOPT_MINOFF_SR)
2435 2474 optp->ipoptp_flags |= IPOPTP_ERROR;
2436 2475 break;
2437 2476 case IPOPT_TS:
2438 2477 if (pointer < IPOPT_MINOFF_IT)
2439 2478 optp->ipoptp_flags |= IPOPTP_ERROR;
2440 2479 /*
2441 2480 * Note that the Internet Timestamp option also
2442 2481 * contains two four bit fields (the Overflow field,
2443 2482 * and the Flag field), which follow the pointer
2444 2483 * field. We don't need to check that these fields
2445 2484 * fall within the length of the option because this
2446 2485 * was implicitely done above. We've checked that the
2447 2486 * pointer value is at least IPOPT_MINOFF_IT, and that
2448 2487 * it falls within the option. Since IPOPT_MINOFF_IT >
2449 2488 * IPOPT_POS_OV_FLG, we don't need the explicit check.
2450 2489 */
2451 2490 ASSERT(len > IPOPT_POS_OV_FLG);
2452 2491 break;
2453 2492 }
2454 2493
2455 2494 return (opt);
2456 2495 }
2457 2496
2458 2497 /*
2459 2498 * Use the outgoing IP header to create an IP_OPTIONS option the way
2460 2499 * it was passed down from the application.
2461 2500 *
2462 2501 * This is compatible with BSD in that it returns
2463 2502 * the reverse source route with the final destination
2464 2503 * as the last entry. The first 4 bytes of the option
2465 2504 * will contain the final destination.
2466 2505 */
2467 2506 int
2468 2507 ip_opt_get_user(conn_t *connp, uchar_t *buf)
2469 2508 {
2470 2509 ipoptp_t opts;
2471 2510 uchar_t *opt;
2472 2511 uint8_t optval;
2473 2512 uint8_t optlen;
2474 2513 uint32_t len = 0;
2475 2514 uchar_t *buf1 = buf;
2476 2515 uint32_t totallen;
2477 2516 ipaddr_t dst;
2478 2517 ip_pkt_t *ipp = &connp->conn_xmit_ipp;
2479 2518
2480 2519 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
2481 2520 return (0);
2482 2521
2483 2522 totallen = ipp->ipp_ipv4_options_len;
2484 2523 if (totallen & 0x3)
2485 2524 return (0);
2486 2525
2487 2526 buf += IP_ADDR_LEN; /* Leave room for final destination */
2488 2527 len += IP_ADDR_LEN;
2489 2528 bzero(buf1, IP_ADDR_LEN);
2490 2529
2491 2530 dst = connp->conn_faddr_v4;
2492 2531
2493 2532 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
2494 2533 optval != IPOPT_EOL;
2495 2534 optval = ipoptp_next(&opts)) {
2496 2535 int off;
2497 2536
2498 2537 opt = opts.ipoptp_cur;
2499 2538 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
2500 2539 break;
2501 2540 }
2502 2541 optlen = opts.ipoptp_len;
2503 2542
2504 2543 switch (optval) {
2505 2544 case IPOPT_SSRR:
2506 2545 case IPOPT_LSRR:
2507 2546
2508 2547 /*
2509 2548 * Insert destination as the first entry in the source
2510 2549 * route and move down the entries on step.
2511 2550 * The last entry gets placed at buf1.
2512 2551 */
2513 2552 buf[IPOPT_OPTVAL] = optval;
2514 2553 buf[IPOPT_OLEN] = optlen;
2515 2554 buf[IPOPT_OFFSET] = optlen;
2516 2555
2517 2556 off = optlen - IP_ADDR_LEN;
2518 2557 if (off < 0) {
2519 2558 /* No entries in source route */
2520 2559 break;
2521 2560 }
2522 2561 /* Last entry in source route if not already set */
2523 2562 if (dst == INADDR_ANY)
2524 2563 bcopy(opt + off, buf1, IP_ADDR_LEN);
2525 2564 off -= IP_ADDR_LEN;
2526 2565
2527 2566 while (off > 0) {
2528 2567 bcopy(opt + off,
2529 2568 buf + off + IP_ADDR_LEN,
2530 2569 IP_ADDR_LEN);
2531 2570 off -= IP_ADDR_LEN;
2532 2571 }
2533 2572 /* ipha_dst into first slot */
2534 2573 bcopy(&dst, buf + off + IP_ADDR_LEN,
2535 2574 IP_ADDR_LEN);
2536 2575 buf += optlen;
2537 2576 len += optlen;
2538 2577 break;
2539 2578
2540 2579 default:
2541 2580 bcopy(opt, buf, optlen);
2542 2581 buf += optlen;
2543 2582 len += optlen;
2544 2583 break;
2545 2584 }
2546 2585 }
2547 2586 done:
2548 2587 /* Pad the resulting options */
2549 2588 while (len & 0x3) {
2550 2589 *buf++ = IPOPT_EOL;
2551 2590 len++;
2552 2591 }
2553 2592 return (len);
2554 2593 }
2555 2594
2556 2595 /*
2557 2596 * Update any record route or timestamp options to include this host.
2558 2597 * Reverse any source route option.
2559 2598 * This routine assumes that the options are well formed i.e. that they
2560 2599 * have already been checked.
2561 2600 */
2562 2601 static void
2563 2602 icmp_options_update(ipha_t *ipha)
2564 2603 {
2565 2604 ipoptp_t opts;
2566 2605 uchar_t *opt;
2567 2606 uint8_t optval;
2568 2607 ipaddr_t src; /* Our local address */
2569 2608 ipaddr_t dst;
2570 2609
2571 2610 ip2dbg(("icmp_options_update\n"));
2572 2611 src = ipha->ipha_src;
2573 2612 dst = ipha->ipha_dst;
2574 2613
2575 2614 for (optval = ipoptp_first(&opts, ipha);
2576 2615 optval != IPOPT_EOL;
2577 2616 optval = ipoptp_next(&opts)) {
2578 2617 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
2579 2618 opt = opts.ipoptp_cur;
2580 2619 ip2dbg(("icmp_options_update: opt %d, len %d\n",
2581 2620 optval, opts.ipoptp_len));
2582 2621 switch (optval) {
2583 2622 int off1, off2;
2584 2623 case IPOPT_SSRR:
2585 2624 case IPOPT_LSRR:
2586 2625 /*
2587 2626 * Reverse the source route. The first entry
2588 2627 * should be the next to last one in the current
2589 2628 * source route (the last entry is our address).
2590 2629 * The last entry should be the final destination.
2591 2630 */
2592 2631 off1 = IPOPT_MINOFF_SR - 1;
2593 2632 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1;
2594 2633 if (off2 < 0) {
2595 2634 /* No entries in source route */
2596 2635 ip1dbg((
2597 2636 "icmp_options_update: bad src route\n"));
2598 2637 break;
2599 2638 }
2600 2639 bcopy((char *)opt + off2, &dst, IP_ADDR_LEN);
2601 2640 bcopy(&ipha->ipha_dst, (char *)opt + off2, IP_ADDR_LEN);
2602 2641 bcopy(&dst, &ipha->ipha_dst, IP_ADDR_LEN);
2603 2642 off2 -= IP_ADDR_LEN;
2604 2643
2605 2644 while (off1 < off2) {
2606 2645 bcopy((char *)opt + off1, &src, IP_ADDR_LEN);
2607 2646 bcopy((char *)opt + off2, (char *)opt + off1,
2608 2647 IP_ADDR_LEN);
2609 2648 bcopy(&src, (char *)opt + off2, IP_ADDR_LEN);
2610 2649 off1 += IP_ADDR_LEN;
2611 2650 off2 -= IP_ADDR_LEN;
2612 2651 }
2613 2652 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR;
2614 2653 break;
2615 2654 }
2616 2655 }
2617 2656 }
2618 2657
2619 2658 /*
2620 2659 * Process received ICMP Redirect messages.
2621 2660 * Assumes the caller has verified that the headers are in the pulled up mblk.
2622 2661 * Consumes mp.
2623 2662 */
2624 2663 static void
2625 2664 icmp_redirect_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph, ip_recv_attr_t *ira)
2626 2665 {
2627 2666 ire_t *ire, *nire;
2628 2667 ire_t *prev_ire;
2629 2668 ipaddr_t src, dst, gateway;
2630 2669 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2631 2670 ipha_t *inner_ipha; /* Inner IP header */
2632 2671
2633 2672 /* Caller already pulled up everything. */
2634 2673 inner_ipha = (ipha_t *)&icmph[1];
2635 2674 src = ipha->ipha_src;
2636 2675 dst = inner_ipha->ipha_dst;
2637 2676 gateway = icmph->icmph_rd_gateway;
2638 2677 /* Make sure the new gateway is reachable somehow. */
2639 2678 ire = ire_ftable_lookup_v4(gateway, 0, 0, IRE_ONLINK, NULL,
2640 2679 ALL_ZONES, NULL, MATCH_IRE_TYPE, 0, ipst, NULL);
2641 2680 /*
2642 2681 * Make sure we had a route for the dest in question and that
2643 2682 * that route was pointing to the old gateway (the source of the
2644 2683 * redirect packet.)
2645 2684 * We do longest match and then compare ire_gateway_addr below.
2646 2685 */
2647 2686 prev_ire = ire_ftable_lookup_v4(dst, 0, 0, 0, NULL, ALL_ZONES,
2648 2687 NULL, MATCH_IRE_DSTONLY, 0, ipst, NULL);
2649 2688 /*
2650 2689 * Check that
2651 2690 * the redirect was not from ourselves
2652 2691 * the new gateway and the old gateway are directly reachable
2653 2692 */
2654 2693 if (prev_ire == NULL || ire == NULL ||
2655 2694 (prev_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) ||
2656 2695 (prev_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) ||
2657 2696 !(ire->ire_type & IRE_IF_ALL) ||
2658 2697 prev_ire->ire_gateway_addr != src) {
2659 2698 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects);
2660 2699 ip_drop_input("icmpInBadRedirects - ire", mp, ira->ira_ill);
2661 2700 freemsg(mp);
2662 2701 if (ire != NULL)
2663 2702 ire_refrele(ire);
2664 2703 if (prev_ire != NULL)
2665 2704 ire_refrele(prev_ire);
2666 2705 return;
2667 2706 }
2668 2707
2669 2708 ire_refrele(prev_ire);
2670 2709 ire_refrele(ire);
2671 2710
2672 2711 /*
2673 2712 * TODO: more precise handling for cases 0, 2, 3, the latter two
2674 2713 * require TOS routing
2675 2714 */
2676 2715 switch (icmph->icmph_code) {
2677 2716 case 0:
2678 2717 case 1:
2679 2718 /* TODO: TOS specificity for cases 2 and 3 */
2680 2719 case 2:
2681 2720 case 3:
2682 2721 break;
2683 2722 default:
2684 2723 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects);
2685 2724 ip_drop_input("icmpInBadRedirects - code", mp, ira->ira_ill);
2686 2725 freemsg(mp);
2687 2726 return;
2688 2727 }
2689 2728 /*
2690 2729 * Create a Route Association. This will allow us to remember that
2691 2730 * someone we believe told us to use the particular gateway.
2692 2731 */
2693 2732 ire = ire_create(
2694 2733 (uchar_t *)&dst, /* dest addr */
2695 2734 (uchar_t *)&ip_g_all_ones, /* mask */
2696 2735 (uchar_t *)&gateway, /* gateway addr */
2697 2736 IRE_HOST,
2698 2737 NULL, /* ill */
2699 2738 ALL_ZONES,
2700 2739 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST),
2701 2740 NULL, /* tsol_gc_t */
2702 2741 ipst);
2703 2742
2704 2743 if (ire == NULL) {
2705 2744 freemsg(mp);
2706 2745 return;
2707 2746 }
2708 2747 nire = ire_add(ire);
2709 2748 /* Check if it was a duplicate entry */
2710 2749 if (nire != NULL && nire != ire) {
2711 2750 ASSERT(nire->ire_identical_ref > 1);
2712 2751 ire_delete(nire);
2713 2752 ire_refrele(nire);
2714 2753 nire = NULL;
2715 2754 }
2716 2755 ire = nire;
2717 2756 if (ire != NULL) {
2718 2757 ire_refrele(ire); /* Held in ire_add */
2719 2758
2720 2759 /* tell routing sockets that we received a redirect */
2721 2760 ip_rts_change(RTM_REDIRECT, dst, gateway, IP_HOST_MASK, 0, src,
2722 2761 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST), 0,
2723 2762 (RTA_DST | RTA_GATEWAY | RTA_NETMASK | RTA_AUTHOR), ipst);
2724 2763 }
2725 2764
2726 2765 /*
2727 2766 * Delete any existing IRE_HOST type redirect ires for this destination.
2728 2767 * This together with the added IRE has the effect of
2729 2768 * modifying an existing redirect.
2730 2769 */
2731 2770 prev_ire = ire_ftable_lookup_v4(dst, 0, src, IRE_HOST, NULL,
2732 2771 ALL_ZONES, NULL, (MATCH_IRE_GW | MATCH_IRE_TYPE), 0, ipst, NULL);
2733 2772 if (prev_ire != NULL) {
2734 2773 if (prev_ire ->ire_flags & RTF_DYNAMIC)
2735 2774 ire_delete(prev_ire);
2736 2775 ire_refrele(prev_ire);
2737 2776 }
2738 2777
2739 2778 freemsg(mp);
2740 2779 }
2741 2780
2742 2781 /*
2743 2782 * Generate an ICMP parameter problem message.
2744 2783 * When called from ip_output side a minimal ip_recv_attr_t needs to be
2745 2784 * constructed by the caller.
2746 2785 */
2747 2786 static void
2748 2787 icmp_param_problem(mblk_t *mp, uint8_t ptr, ip_recv_attr_t *ira)
2749 2788 {
2750 2789 icmph_t icmph;
2751 2790 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2752 2791
2753 2792 mp = icmp_pkt_err_ok(mp, ira);
2754 2793 if (mp == NULL)
2755 2794 return;
2756 2795
2757 2796 bzero(&icmph, sizeof (icmph_t));
2758 2797 icmph.icmph_type = ICMP_PARAM_PROBLEM;
2759 2798 icmph.icmph_pp_ptr = ptr;
2760 2799 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutParmProbs);
2761 2800 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
2762 2801 }
2763 2802
2764 2803 /*
2765 2804 * Build and ship an IPv4 ICMP message using the packet data in mp, and
2766 2805 * the ICMP header pointed to by "stuff". (May be called as writer.)
2767 2806 * Note: assumes that icmp_pkt_err_ok has been called to verify that
2768 2807 * an icmp error packet can be sent.
2769 2808 * Assigns an appropriate source address to the packet. If ipha_dst is
2770 2809 * one of our addresses use it for source. Otherwise let ip_output_simple
2771 2810 * pick the source address.
2772 2811 */
2773 2812 static void
2774 2813 icmp_pkt(mblk_t *mp, void *stuff, size_t len, ip_recv_attr_t *ira)
2775 2814 {
2776 2815 ipaddr_t dst;
2777 2816 icmph_t *icmph;
2778 2817 ipha_t *ipha;
2779 2818 uint_t len_needed;
2780 2819 size_t msg_len;
2781 2820 mblk_t *mp1;
2782 2821 ipaddr_t src;
2783 2822 ire_t *ire;
2784 2823 ip_xmit_attr_t ixas;
2785 2824 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2786 2825
2787 2826 ipha = (ipha_t *)mp->b_rptr;
2788 2827
2789 2828 bzero(&ixas, sizeof (ixas));
2790 2829 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
2791 2830 ixas.ixa_zoneid = ira->ira_zoneid;
2792 2831 ixas.ixa_ifindex = 0;
2793 2832 ixas.ixa_ipst = ipst;
2794 2833 ixas.ixa_cred = kcred;
2795 2834 ixas.ixa_cpid = NOPID;
2796 2835 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */
2797 2836 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
2798 2837
2799 2838 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2800 2839 /*
2801 2840 * Apply IPsec based on how IPsec was applied to
2802 2841 * the packet that had the error.
2803 2842 *
2804 2843 * If it was an outbound packet that caused the ICMP
2805 2844 * error, then the caller will have setup the IRA
2806 2845 * appropriately.
2807 2846 */
2808 2847 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) {
2809 2848 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2810 2849 /* Note: mp already consumed and ip_drop_packet done */
2811 2850 return;
2812 2851 }
2813 2852 } else {
2814 2853 /*
2815 2854 * This is in clear. The icmp message we are building
2816 2855 * here should go out in clear, independent of our policy.
2817 2856 */
2818 2857 ixas.ixa_flags |= IXAF_NO_IPSEC;
2819 2858 }
2820 2859
2821 2860 /* Remember our eventual destination */
2822 2861 dst = ipha->ipha_src;
2823 2862
2824 2863 /*
2825 2864 * If the packet was for one of our unicast addresses, make
2826 2865 * sure we respond with that as the source. Otherwise
2827 2866 * have ip_output_simple pick the source address.
2828 2867 */
2829 2868 ire = ire_ftable_lookup_v4(ipha->ipha_dst, 0, 0,
2830 2869 (IRE_LOCAL|IRE_LOOPBACK), NULL, ira->ira_zoneid, NULL,
2831 2870 MATCH_IRE_TYPE|MATCH_IRE_ZONEONLY, 0, ipst, NULL);
2832 2871 if (ire != NULL) {
2833 2872 ire_refrele(ire);
2834 2873 src = ipha->ipha_dst;
2835 2874 } else {
2836 2875 src = INADDR_ANY;
2837 2876 ixas.ixa_flags |= IXAF_SET_SOURCE;
2838 2877 }
2839 2878
2840 2879 /*
2841 2880 * Check if we can send back more then 8 bytes in addition to
2842 2881 * the IP header. We try to send 64 bytes of data and the internal
2843 2882 * header in the special cases of ipv4 encapsulated ipv4 or ipv6.
2844 2883 */
2845 2884 len_needed = IPH_HDR_LENGTH(ipha);
2846 2885 if (ipha->ipha_protocol == IPPROTO_ENCAP ||
2847 2886 ipha->ipha_protocol == IPPROTO_IPV6) {
2848 2887 if (!pullupmsg(mp, -1)) {
2849 2888 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2850 2889 ip_drop_output("ipIfStatsOutDiscards", mp, NULL);
2851 2890 freemsg(mp);
2852 2891 return;
2853 2892 }
2854 2893 ipha = (ipha_t *)mp->b_rptr;
2855 2894
2856 2895 if (ipha->ipha_protocol == IPPROTO_ENCAP) {
2857 2896 len_needed += IPH_HDR_LENGTH(((uchar_t *)ipha +
2858 2897 len_needed));
2859 2898 } else {
2860 2899 ip6_t *ip6h = (ip6_t *)((uchar_t *)ipha + len_needed);
2861 2900
2862 2901 ASSERT(ipha->ipha_protocol == IPPROTO_IPV6);
2863 2902 len_needed += ip_hdr_length_v6(mp, ip6h);
2864 2903 }
2865 2904 }
2866 2905 len_needed += ipst->ips_ip_icmp_return;
2867 2906 msg_len = msgdsize(mp);
2868 2907 if (msg_len > len_needed) {
2869 2908 (void) adjmsg(mp, len_needed - msg_len);
2870 2909 msg_len = len_needed;
2871 2910 }
2872 2911 mp1 = allocb(sizeof (icmp_ipha) + len, BPRI_MED);
2873 2912 if (mp1 == NULL) {
2874 2913 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutErrors);
2875 2914 freemsg(mp);
2876 2915 return;
2877 2916 }
2878 2917 mp1->b_cont = mp;
2879 2918 mp = mp1;
2880 2919
2881 2920 /*
2882 2921 * Set IXAF_TRUSTED_ICMP so we can let the ICMP messages this
2883 2922 * node generates be accepted in peace by all on-host destinations.
2884 2923 * If we do NOT assume that all on-host destinations trust
2885 2924 * self-generated ICMP messages, then rework here, ip6.c, and spd.c.
2886 2925 * (Look for IXAF_TRUSTED_ICMP).
2887 2926 */
2888 2927 ixas.ixa_flags |= IXAF_TRUSTED_ICMP;
2889 2928
2890 2929 ipha = (ipha_t *)mp->b_rptr;
2891 2930 mp1->b_wptr = (uchar_t *)ipha + (sizeof (icmp_ipha) + len);
2892 2931 *ipha = icmp_ipha;
2893 2932 ipha->ipha_src = src;
2894 2933 ipha->ipha_dst = dst;
2895 2934 ipha->ipha_ttl = ipst->ips_ip_def_ttl;
2896 2935 msg_len += sizeof (icmp_ipha) + len;
2897 2936 if (msg_len > IP_MAXPACKET) {
2898 2937 (void) adjmsg(mp, IP_MAXPACKET - msg_len);
2899 2938 msg_len = IP_MAXPACKET;
2900 2939 }
2901 2940 ipha->ipha_length = htons((uint16_t)msg_len);
2902 2941 icmph = (icmph_t *)&ipha[1];
2903 2942 bcopy(stuff, icmph, len);
2904 2943 icmph->icmph_checksum = 0;
2905 2944 icmph->icmph_checksum = IP_CSUM(mp, (int32_t)sizeof (ipha_t), 0);
2906 2945 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs);
2907 2946
2908 2947 (void) ip_output_simple(mp, &ixas);
2909 2948 ixa_cleanup(&ixas);
2910 2949 }
2911 2950
2912 2951 /*
2913 2952 * Determine if an ICMP error packet can be sent given the rate limit.
2914 2953 * The limit consists of an average frequency (icmp_pkt_err_interval measured
2915 2954 * in milliseconds) and a burst size. Burst size number of packets can
2916 2955 * be sent arbitrarely closely spaced.
2917 2956 * The state is tracked using two variables to implement an approximate
2918 2957 * token bucket filter:
2919 2958 * icmp_pkt_err_last - lbolt value when the last burst started
2920 2959 * icmp_pkt_err_sent - number of packets sent in current burst
2921 2960 */
2922 2961 boolean_t
2923 2962 icmp_err_rate_limit(ip_stack_t *ipst)
2924 2963 {
2925 2964 clock_t now = TICK_TO_MSEC(ddi_get_lbolt());
2926 2965 uint_t refilled; /* Number of packets refilled in tbf since last */
2927 2966 /* Guard against changes by loading into local variable */
2928 2967 uint_t err_interval = ipst->ips_ip_icmp_err_interval;
2929 2968
2930 2969 if (err_interval == 0)
2931 2970 return (B_FALSE);
2932 2971
2933 2972 if (ipst->ips_icmp_pkt_err_last > now) {
2934 2973 /* 100HZ lbolt in ms for 32bit arch wraps every 49.7 days */
2935 2974 ipst->ips_icmp_pkt_err_last = 0;
2936 2975 ipst->ips_icmp_pkt_err_sent = 0;
2937 2976 }
2938 2977 /*
2939 2978 * If we are in a burst update the token bucket filter.
2940 2979 * Update the "last" time to be close to "now" but make sure
2941 2980 * we don't loose precision.
2942 2981 */
2943 2982 if (ipst->ips_icmp_pkt_err_sent != 0) {
2944 2983 refilled = (now - ipst->ips_icmp_pkt_err_last)/err_interval;
2945 2984 if (refilled > ipst->ips_icmp_pkt_err_sent) {
2946 2985 ipst->ips_icmp_pkt_err_sent = 0;
2947 2986 } else {
2948 2987 ipst->ips_icmp_pkt_err_sent -= refilled;
2949 2988 ipst->ips_icmp_pkt_err_last += refilled * err_interval;
2950 2989 }
2951 2990 }
2952 2991 if (ipst->ips_icmp_pkt_err_sent == 0) {
2953 2992 /* Start of new burst */
2954 2993 ipst->ips_icmp_pkt_err_last = now;
2955 2994 }
2956 2995 if (ipst->ips_icmp_pkt_err_sent < ipst->ips_ip_icmp_err_burst) {
2957 2996 ipst->ips_icmp_pkt_err_sent++;
2958 2997 ip1dbg(("icmp_err_rate_limit: %d sent in burst\n",
2959 2998 ipst->ips_icmp_pkt_err_sent));
2960 2999 return (B_FALSE);
2961 3000 }
2962 3001 ip1dbg(("icmp_err_rate_limit: dropped\n"));
2963 3002 return (B_TRUE);
2964 3003 }
2965 3004
2966 3005 /*
2967 3006 * Check if it is ok to send an IPv4 ICMP error packet in
2968 3007 * response to the IPv4 packet in mp.
2969 3008 * Free the message and return null if no
2970 3009 * ICMP error packet should be sent.
2971 3010 */
2972 3011 static mblk_t *
2973 3012 icmp_pkt_err_ok(mblk_t *mp, ip_recv_attr_t *ira)
2974 3013 {
2975 3014 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2976 3015 icmph_t *icmph;
2977 3016 ipha_t *ipha;
2978 3017 uint_t len_needed;
2979 3018
2980 3019 if (!mp)
2981 3020 return (NULL);
2982 3021 ipha = (ipha_t *)mp->b_rptr;
2983 3022 if (ip_csum_hdr(ipha)) {
2984 3023 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInCksumErrs);
2985 3024 ip_drop_input("ipIfStatsInCksumErrs", mp, NULL);
2986 3025 freemsg(mp);
2987 3026 return (NULL);
2988 3027 }
2989 3028 if (ip_type_v4(ipha->ipha_dst, ipst) == IRE_BROADCAST ||
2990 3029 ip_type_v4(ipha->ipha_src, ipst) == IRE_BROADCAST ||
2991 3030 CLASSD(ipha->ipha_dst) ||
2992 3031 CLASSD(ipha->ipha_src) ||
2993 3032 (ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET)) {
2994 3033 /* Note: only errors to the fragment with offset 0 */
2995 3034 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
2996 3035 freemsg(mp);
2997 3036 return (NULL);
2998 3037 }
2999 3038 if (ipha->ipha_protocol == IPPROTO_ICMP) {
3000 3039 /*
3001 3040 * Check the ICMP type. RFC 1122 sez: don't send ICMP
3002 3041 * errors in response to any ICMP errors.
3003 3042 */
3004 3043 len_needed = IPH_HDR_LENGTH(ipha) + ICMPH_SIZE;
3005 3044 if (mp->b_wptr - mp->b_rptr < len_needed) {
3006 3045 if (!pullupmsg(mp, len_needed)) {
3007 3046 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors);
3008 3047 freemsg(mp);
3009 3048 return (NULL);
3010 3049 }
3011 3050 ipha = (ipha_t *)mp->b_rptr;
3012 3051 }
3013 3052 icmph = (icmph_t *)
3014 3053 (&((char *)ipha)[IPH_HDR_LENGTH(ipha)]);
3015 3054 switch (icmph->icmph_type) {
3016 3055 case ICMP_DEST_UNREACHABLE:
3017 3056 case ICMP_SOURCE_QUENCH:
3018 3057 case ICMP_TIME_EXCEEDED:
3019 3058 case ICMP_PARAM_PROBLEM:
3020 3059 case ICMP_REDIRECT:
3021 3060 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
3022 3061 freemsg(mp);
3023 3062 return (NULL);
3024 3063 default:
3025 3064 break;
3026 3065 }
3027 3066 }
3028 3067 /*
3029 3068 * If this is a labeled system, then check to see if we're allowed to
3030 3069 * send a response to this particular sender. If not, then just drop.
3031 3070 */
3032 3071 if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) {
3033 3072 ip2dbg(("icmp_pkt_err_ok: can't respond to packet\n"));
3034 3073 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
3035 3074 freemsg(mp);
3036 3075 return (NULL);
3037 3076 }
3038 3077 if (icmp_err_rate_limit(ipst)) {
3039 3078 /*
3040 3079 * Only send ICMP error packets every so often.
3041 3080 * This should be done on a per port/source basis,
3042 3081 * but for now this will suffice.
3043 3082 */
3044 3083 freemsg(mp);
3045 3084 return (NULL);
3046 3085 }
3047 3086 return (mp);
3048 3087 }
3049 3088
3050 3089 /*
3051 3090 * Called when a packet was sent out the same link that it arrived on.
3052 3091 * Check if it is ok to send a redirect and then send it.
3053 3092 */
3054 3093 void
3055 3094 ip_send_potential_redirect_v4(mblk_t *mp, ipha_t *ipha, ire_t *ire,
3056 3095 ip_recv_attr_t *ira)
3057 3096 {
3058 3097 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3059 3098 ipaddr_t src, nhop;
3060 3099 mblk_t *mp1;
3061 3100 ire_t *nhop_ire;
3062 3101
3063 3102 /*
3064 3103 * Check the source address to see if it originated
3065 3104 * on the same logical subnet it is going back out on.
3066 3105 * If so, we should be able to send it a redirect.
3067 3106 * Avoid sending a redirect if the destination
3068 3107 * is directly connected (i.e., we matched an IRE_ONLINK),
3069 3108 * or if the packet was source routed out this interface.
3070 3109 *
3071 3110 * We avoid sending a redirect if the
3072 3111 * destination is directly connected
3073 3112 * because it is possible that multiple
3074 3113 * IP subnets may have been configured on
3075 3114 * the link, and the source may not
3076 3115 * be on the same subnet as ip destination,
3077 3116 * even though they are on the same
3078 3117 * physical link.
3079 3118 */
3080 3119 if ((ire->ire_type & IRE_ONLINK) ||
3081 3120 ip_source_routed(ipha, ipst))
3082 3121 return;
3083 3122
3084 3123 nhop_ire = ire_nexthop(ire);
3085 3124 if (nhop_ire == NULL)
3086 3125 return;
3087 3126
3088 3127 nhop = nhop_ire->ire_addr;
3089 3128
3090 3129 if (nhop_ire->ire_type & IRE_IF_CLONE) {
3091 3130 ire_t *ire2;
3092 3131
3093 3132 /* Follow ire_dep_parent to find non-clone IRE_INTERFACE */
3094 3133 mutex_enter(&nhop_ire->ire_lock);
3095 3134 ire2 = nhop_ire->ire_dep_parent;
3096 3135 if (ire2 != NULL)
3097 3136 ire_refhold(ire2);
3098 3137 mutex_exit(&nhop_ire->ire_lock);
3099 3138 ire_refrele(nhop_ire);
3100 3139 nhop_ire = ire2;
3101 3140 }
3102 3141 if (nhop_ire == NULL)
3103 3142 return;
3104 3143
3105 3144 ASSERT(!(nhop_ire->ire_type & IRE_IF_CLONE));
3106 3145
3107 3146 src = ipha->ipha_src;
3108 3147
3109 3148 /*
3110 3149 * We look at the interface ire for the nexthop,
3111 3150 * to see if ipha_src is in the same subnet
3112 3151 * as the nexthop.
3113 3152 */
3114 3153 if ((src & nhop_ire->ire_mask) == (nhop & nhop_ire->ire_mask)) {
3115 3154 /*
3116 3155 * The source is directly connected.
3117 3156 */
3118 3157 mp1 = copymsg(mp);
3119 3158 if (mp1 != NULL) {
3120 3159 icmp_send_redirect(mp1, nhop, ira);
3121 3160 }
3122 3161 }
3123 3162 ire_refrele(nhop_ire);
3124 3163 }
3125 3164
3126 3165 /*
3127 3166 * Generate an ICMP redirect message.
3128 3167 */
3129 3168 static void
3130 3169 icmp_send_redirect(mblk_t *mp, ipaddr_t gateway, ip_recv_attr_t *ira)
3131 3170 {
3132 3171 icmph_t icmph;
3133 3172 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3134 3173
3135 3174 mp = icmp_pkt_err_ok(mp, ira);
3136 3175 if (mp == NULL)
3137 3176 return;
3138 3177
3139 3178 bzero(&icmph, sizeof (icmph_t));
3140 3179 icmph.icmph_type = ICMP_REDIRECT;
3141 3180 icmph.icmph_code = 1;
3142 3181 icmph.icmph_rd_gateway = gateway;
3143 3182 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutRedirects);
3144 3183 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3145 3184 }
3146 3185
3147 3186 /*
3148 3187 * Generate an ICMP time exceeded message.
3149 3188 */
3150 3189 void
3151 3190 icmp_time_exceeded(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira)
3152 3191 {
3153 3192 icmph_t icmph;
3154 3193 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3155 3194
3156 3195 mp = icmp_pkt_err_ok(mp, ira);
3157 3196 if (mp == NULL)
3158 3197 return;
3159 3198
3160 3199 bzero(&icmph, sizeof (icmph_t));
3161 3200 icmph.icmph_type = ICMP_TIME_EXCEEDED;
3162 3201 icmph.icmph_code = code;
3163 3202 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimeExcds);
3164 3203 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3165 3204 }
3166 3205
3167 3206 /*
3168 3207 * Generate an ICMP unreachable message.
3169 3208 * When called from ip_output side a minimal ip_recv_attr_t needs to be
3170 3209 * constructed by the caller.
3171 3210 */
3172 3211 void
3173 3212 icmp_unreachable(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira)
3174 3213 {
3175 3214 icmph_t icmph;
3176 3215 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3177 3216
3178 3217 mp = icmp_pkt_err_ok(mp, ira);
3179 3218 if (mp == NULL)
3180 3219 return;
3181 3220
3182 3221 bzero(&icmph, sizeof (icmph_t));
3183 3222 icmph.icmph_type = ICMP_DEST_UNREACHABLE;
3184 3223 icmph.icmph_code = code;
3185 3224 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs);
3186 3225 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3187 3226 }
3188 3227
3189 3228 /*
3190 3229 * Latch in the IPsec state for a stream based the policy in the listener
3191 3230 * and the actions in the ip_recv_attr_t.
3192 3231 * Called directly from TCP and SCTP.
3193 3232 */
3194 3233 boolean_t
3195 3234 ip_ipsec_policy_inherit(conn_t *connp, conn_t *lconnp, ip_recv_attr_t *ira)
3196 3235 {
3197 3236 ASSERT(lconnp->conn_policy != NULL);
3198 3237 ASSERT(connp->conn_policy == NULL);
3199 3238
3200 3239 IPPH_REFHOLD(lconnp->conn_policy);
3201 3240 connp->conn_policy = lconnp->conn_policy;
3202 3241
3203 3242 if (ira->ira_ipsec_action != NULL) {
3204 3243 if (connp->conn_latch == NULL) {
3205 3244 connp->conn_latch = iplatch_create();
3206 3245 if (connp->conn_latch == NULL)
3207 3246 return (B_FALSE);
3208 3247 }
3209 3248 ipsec_latch_inbound(connp, ira);
3210 3249 }
3211 3250 return (B_TRUE);
3212 3251 }
3213 3252
3214 3253 /*
3215 3254 * Verify whether or not the IP address is a valid local address.
3216 3255 * Could be a unicast, including one for a down interface.
3217 3256 * If allow_mcbc then a multicast or broadcast address is also
3218 3257 * acceptable.
3219 3258 *
3220 3259 * In the case of a broadcast/multicast address, however, the
3221 3260 * upper protocol is expected to reset the src address
3222 3261 * to zero when we return IPVL_MCAST/IPVL_BCAST so that
3223 3262 * no packets are emitted with broadcast/multicast address as
3224 3263 * source address (that violates hosts requirements RFC 1122)
3225 3264 * The addresses valid for bind are:
3226 3265 * (1) - INADDR_ANY (0)
3227 3266 * (2) - IP address of an UP interface
3228 3267 * (3) - IP address of a DOWN interface
3229 3268 * (4) - valid local IP broadcast addresses. In this case
3230 3269 * the conn will only receive packets destined to
3231 3270 * the specified broadcast address.
3232 3271 * (5) - a multicast address. In this case
3233 3272 * the conn will only receive packets destined to
3234 3273 * the specified multicast address. Note: the
3235 3274 * application still has to issue an
3236 3275 * IP_ADD_MEMBERSHIP socket option.
3237 3276 *
3238 3277 * In all the above cases, the bound address must be valid in the current zone.
3239 3278 * When the address is loopback, multicast or broadcast, there might be many
3240 3279 * matching IREs so bind has to look up based on the zone.
3241 3280 */
3242 3281 ip_laddr_t
3243 3282 ip_laddr_verify_v4(ipaddr_t src_addr, zoneid_t zoneid,
3244 3283 ip_stack_t *ipst, boolean_t allow_mcbc)
3245 3284 {
3246 3285 ire_t *src_ire;
3247 3286
3248 3287 ASSERT(src_addr != INADDR_ANY);
3249 3288
3250 3289 src_ire = ire_ftable_lookup_v4(src_addr, 0, 0, 0,
3251 3290 NULL, zoneid, NULL, MATCH_IRE_ZONEONLY, 0, ipst, NULL);
3252 3291
3253 3292 /*
3254 3293 * If an address other than in6addr_any is requested,
3255 3294 * we verify that it is a valid address for bind
3256 3295 * Note: Following code is in if-else-if form for
3257 3296 * readability compared to a condition check.
3258 3297 */
3259 3298 if (src_ire != NULL && (src_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK))) {
3260 3299 /*
3261 3300 * (2) Bind to address of local UP interface
3262 3301 */
3263 3302 ire_refrele(src_ire);
3264 3303 return (IPVL_UNICAST_UP);
3265 3304 } else if (src_ire != NULL && src_ire->ire_type & IRE_BROADCAST) {
3266 3305 /*
3267 3306 * (4) Bind to broadcast address
3268 3307 */
3269 3308 ire_refrele(src_ire);
3270 3309 if (allow_mcbc)
3271 3310 return (IPVL_BCAST);
3272 3311 else
3273 3312 return (IPVL_BAD);
3274 3313 } else if (CLASSD(src_addr)) {
3275 3314 /* (5) bind to multicast address. */
3276 3315 if (src_ire != NULL)
3277 3316 ire_refrele(src_ire);
3278 3317
3279 3318 if (allow_mcbc)
3280 3319 return (IPVL_MCAST);
3281 3320 else
3282 3321 return (IPVL_BAD);
3283 3322 } else {
3284 3323 ipif_t *ipif;
3285 3324
3286 3325 /*
3287 3326 * (3) Bind to address of local DOWN interface?
3288 3327 * (ipif_lookup_addr() looks up all interfaces
3289 3328 * but we do not get here for UP interfaces
3290 3329 * - case (2) above)
3291 3330 */
3292 3331 if (src_ire != NULL)
3293 3332 ire_refrele(src_ire);
3294 3333
3295 3334 ipif = ipif_lookup_addr(src_addr, NULL, zoneid, ipst);
3296 3335 if (ipif == NULL)
3297 3336 return (IPVL_BAD);
3298 3337
3299 3338 /* Not a useful source? */
3300 3339 if (ipif->ipif_flags & (IPIF_NOLOCAL | IPIF_ANYCAST)) {
3301 3340 ipif_refrele(ipif);
3302 3341 return (IPVL_BAD);
3303 3342 }
3304 3343 ipif_refrele(ipif);
3305 3344 return (IPVL_UNICAST_DOWN);
3306 3345 }
3307 3346 }
3308 3347
3309 3348 /*
3310 3349 * Insert in the bind fanout for IPv4 and IPv6.
3311 3350 * The caller should already have used ip_laddr_verify_v*() before calling
3312 3351 * this.
3313 3352 */
3314 3353 int
3315 3354 ip_laddr_fanout_insert(conn_t *connp)
3316 3355 {
3317 3356 int error;
3318 3357
3319 3358 /*
3320 3359 * Allow setting new policies. For example, disconnects result
3321 3360 * in us being called. As we would have set conn_policy_cached
3322 3361 * to B_TRUE before, we should set it to B_FALSE, so that policy
3323 3362 * can change after the disconnect.
3324 3363 */
3325 3364 connp->conn_policy_cached = B_FALSE;
3326 3365
3327 3366 error = ipcl_bind_insert(connp);
3328 3367 if (error != 0) {
3329 3368 if (connp->conn_anon_port) {
3330 3369 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
3331 3370 connp->conn_mlp_type, connp->conn_proto,
3332 3371 ntohs(connp->conn_lport), B_FALSE);
3333 3372 }
3334 3373 connp->conn_mlp_type = mlptSingle;
3335 3374 }
3336 3375 return (error);
3337 3376 }
3338 3377
3339 3378 /*
3340 3379 * Verify that both the source and destination addresses are valid. If
3341 3380 * IPDF_VERIFY_DST is not set, then the destination address may be unreachable,
3342 3381 * i.e. have no route to it. Protocols like TCP want to verify destination
3343 3382 * reachability, while tunnels do not.
3344 3383 *
3345 3384 * Determine the route, the interface, and (optionally) the source address
3346 3385 * to use to reach a given destination.
3347 3386 * Note that we allow connect to broadcast and multicast addresses when
3348 3387 * IPDF_ALLOW_MCBC is set.
3349 3388 * first_hop and dst_addr are normally the same, but if source routing
3350 3389 * they will differ; in that case the first_hop is what we'll use for the
3351 3390 * routing lookup but the dce and label checks will be done on dst_addr,
3352 3391 *
3353 3392 * If uinfo is set, then we fill in the best available information
3354 3393 * we have for the destination. This is based on (in priority order) any
3355 3394 * metrics and path MTU stored in a dce_t, route metrics, and finally the
3356 3395 * ill_mtu/ill_mc_mtu.
3357 3396 *
3358 3397 * Tsol note: If we have a source route then dst_addr != firsthop. But we
3359 3398 * always do the label check on dst_addr.
3360 3399 */
3361 3400 int
3362 3401 ip_set_destination_v4(ipaddr_t *src_addrp, ipaddr_t dst_addr, ipaddr_t firsthop,
3363 3402 ip_xmit_attr_t *ixa, iulp_t *uinfo, uint32_t flags, uint_t mac_mode)
3364 3403 {
3365 3404 ire_t *ire = NULL;
3366 3405 int error = 0;
3367 3406 ipaddr_t setsrc; /* RTF_SETSRC */
3368 3407 zoneid_t zoneid = ixa->ixa_zoneid; /* Honors SO_ALLZONES */
3369 3408 ip_stack_t *ipst = ixa->ixa_ipst;
3370 3409 dce_t *dce;
3371 3410 uint_t pmtu;
3372 3411 uint_t generation;
3373 3412 nce_t *nce;
3374 3413 ill_t *ill = NULL;
3375 3414 boolean_t multirt = B_FALSE;
3376 3415
3377 3416 ASSERT(ixa->ixa_flags & IXAF_IS_IPV4);
3378 3417
3379 3418 /*
3380 3419 * We never send to zero; the ULPs map it to the loopback address.
3381 3420 * We can't allow it since we use zero to mean unitialized in some
3382 3421 * places.
3383 3422 */
3384 3423 ASSERT(dst_addr != INADDR_ANY);
3385 3424
3386 3425 if (is_system_labeled()) {
3387 3426 ts_label_t *tsl = NULL;
3388 3427
3389 3428 error = tsol_check_dest(ixa->ixa_tsl, &dst_addr, IPV4_VERSION,
3390 3429 mac_mode, (flags & IPDF_ZONE_IS_GLOBAL) != 0, &tsl);
3391 3430 if (error != 0)
3392 3431 return (error);
3393 3432 if (tsl != NULL) {
3394 3433 /* Update the label */
3395 3434 ip_xmit_attr_replace_tsl(ixa, tsl);
3396 3435 }
3397 3436 }
3398 3437
3399 3438 setsrc = INADDR_ANY;
3400 3439 /*
3401 3440 * Select a route; For IPMP interfaces, we would only select
3402 3441 * a "hidden" route (i.e., going through a specific under_ill)
3403 3442 * if ixa_ifindex has been specified.
3404 3443 */
3405 3444 ire = ip_select_route_v4(firsthop, *src_addrp, ixa,
3406 3445 &generation, &setsrc, &error, &multirt);
3407 3446 ASSERT(ire != NULL); /* IRE_NOROUTE if none found */
3408 3447 if (error != 0)
3409 3448 goto bad_addr;
3410 3449
3411 3450 /*
3412 3451 * ire can't be a broadcast or multicast unless IPDF_ALLOW_MCBC is set.
3413 3452 * If IPDF_VERIFY_DST is set, the destination must be reachable;
3414 3453 * Otherwise the destination needn't be reachable.
3415 3454 *
3416 3455 * If we match on a reject or black hole, then we've got a
3417 3456 * local failure. May as well fail out the connect() attempt,
3418 3457 * since it's never going to succeed.
3419 3458 */
3420 3459 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
3421 3460 /*
3422 3461 * If we're verifying destination reachability, we always want
3423 3462 * to complain here.
3424 3463 *
3425 3464 * If we're not verifying destination reachability but the
3426 3465 * destination has a route, we still want to fail on the
3427 3466 * temporary address and broadcast address tests.
3428 3467 *
3429 3468 * In both cases do we let the code continue so some reasonable
3430 3469 * information is returned to the caller. That enables the
3431 3470 * caller to use (and even cache) the IRE. conn_ip_ouput will
3432 3471 * use the generation mismatch path to check for the unreachable
3433 3472 * case thereby avoiding any specific check in the main path.
3434 3473 */
3435 3474 ASSERT(generation == IRE_GENERATION_VERIFY);
3436 3475 if (flags & IPDF_VERIFY_DST) {
3437 3476 /*
3438 3477 * Set errno but continue to set up ixa_ire to be
3439 3478 * the RTF_REJECT|RTF_BLACKHOLE IRE.
3440 3479 * That allows callers to use ip_output to get an
3441 3480 * ICMP error back.
3442 3481 */
3443 3482 if (!(ire->ire_type & IRE_HOST))
3444 3483 error = ENETUNREACH;
3445 3484 else
3446 3485 error = EHOSTUNREACH;
3447 3486 }
3448 3487 }
3449 3488
3450 3489 if ((ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST)) &&
3451 3490 !(flags & IPDF_ALLOW_MCBC)) {
3452 3491 ire_refrele(ire);
3453 3492 ire = ire_reject(ipst, B_FALSE);
3454 3493 generation = IRE_GENERATION_VERIFY;
3455 3494 error = ENETUNREACH;
3456 3495 }
3457 3496
3458 3497 /* Cache things */
3459 3498 if (ixa->ixa_ire != NULL)
3460 3499 ire_refrele_notr(ixa->ixa_ire);
3461 3500 #ifdef DEBUG
3462 3501 ire_refhold_notr(ire);
3463 3502 ire_refrele(ire);
3464 3503 #endif
3465 3504 ixa->ixa_ire = ire;
3466 3505 ixa->ixa_ire_generation = generation;
3467 3506
3468 3507 /*
3469 3508 * Ensure that ixa_dce is always set any time that ixa_ire is set,
3470 3509 * since some callers will send a packet to conn_ip_output() even if
3471 3510 * there's an error.
3472 3511 */
3473 3512 if (flags & IPDF_UNIQUE_DCE) {
3474 3513 /* Fallback to the default dce if allocation fails */
3475 3514 dce = dce_lookup_and_add_v4(dst_addr, ipst);
3476 3515 if (dce != NULL)
3477 3516 generation = dce->dce_generation;
3478 3517 else
3479 3518 dce = dce_lookup_v4(dst_addr, ipst, &generation);
3480 3519 } else {
3481 3520 dce = dce_lookup_v4(dst_addr, ipst, &generation);
3482 3521 }
3483 3522 ASSERT(dce != NULL);
3484 3523 if (ixa->ixa_dce != NULL)
3485 3524 dce_refrele_notr(ixa->ixa_dce);
3486 3525 #ifdef DEBUG
3487 3526 dce_refhold_notr(dce);
3488 3527 dce_refrele(dce);
3489 3528 #endif
3490 3529 ixa->ixa_dce = dce;
3491 3530 ixa->ixa_dce_generation = generation;
3492 3531
3493 3532 /*
3494 3533 * For multicast with multirt we have a flag passed back from
3495 3534 * ire_lookup_multi_ill_v4 since we don't have an IRE for each
3496 3535 * possible multicast address.
3497 3536 * We also need a flag for multicast since we can't check
3498 3537 * whether RTF_MULTIRT is set in ixa_ire for multicast.
3499 3538 */
3500 3539 if (multirt) {
3501 3540 ixa->ixa_postfragfn = ip_postfrag_multirt_v4;
3502 3541 ixa->ixa_flags |= IXAF_MULTIRT_MULTICAST;
3503 3542 } else {
3504 3543 ixa->ixa_postfragfn = ire->ire_postfragfn;
3505 3544 ixa->ixa_flags &= ~IXAF_MULTIRT_MULTICAST;
3506 3545 }
3507 3546 if (!(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) {
3508 3547 /* Get an nce to cache. */
3509 3548 nce = ire_to_nce(ire, firsthop, NULL);
3510 3549 if (nce == NULL) {
3511 3550 /* Allocation failure? */
3512 3551 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3513 3552 } else {
3514 3553 if (ixa->ixa_nce != NULL)
3515 3554 nce_refrele(ixa->ixa_nce);
3516 3555 ixa->ixa_nce = nce;
3517 3556 }
3518 3557 }
3519 3558
3520 3559 /*
3521 3560 * If the source address is a loopback address, the
3522 3561 * destination had best be local or multicast.
3523 3562 * If we are sending to an IRE_LOCAL using a loopback source then
3524 3563 * it had better be the same zoneid.
3525 3564 */
3526 3565 if (*src_addrp == htonl(INADDR_LOOPBACK)) {
3527 3566 if ((ire->ire_type & IRE_LOCAL) && ire->ire_zoneid != zoneid) {
3528 3567 ire = NULL; /* Stored in ixa_ire */
3529 3568 error = EADDRNOTAVAIL;
3530 3569 goto bad_addr;
3531 3570 }
3532 3571 if (!(ire->ire_type & (IRE_LOOPBACK|IRE_LOCAL|IRE_MULTICAST))) {
3533 3572 ire = NULL; /* Stored in ixa_ire */
3534 3573 error = EADDRNOTAVAIL;
3535 3574 goto bad_addr;
3536 3575 }
3537 3576 }
3538 3577 if (ire->ire_type & IRE_BROADCAST) {
3539 3578 /*
3540 3579 * If the ULP didn't have a specified source, then we
3541 3580 * make sure we reselect the source when sending
3542 3581 * broadcasts out different interfaces.
3543 3582 */
3544 3583 if (flags & IPDF_SELECT_SRC)
3545 3584 ixa->ixa_flags |= IXAF_SET_SOURCE;
3546 3585 else
3547 3586 ixa->ixa_flags &= ~IXAF_SET_SOURCE;
3548 3587 }
3549 3588
3550 3589 /*
3551 3590 * Does the caller want us to pick a source address?
3552 3591 */
3553 3592 if (flags & IPDF_SELECT_SRC) {
3554 3593 ipaddr_t src_addr;
3555 3594
3556 3595 /*
3557 3596 * We use use ire_nexthop_ill to avoid the under ipmp
3558 3597 * interface for source address selection. Note that for ipmp
3559 3598 * probe packets, ixa_ifindex would have been specified, and
3560 3599 * the ip_select_route() invocation would have picked an ire
3561 3600 * will ire_ill pointing at an under interface.
3562 3601 */
3563 3602 ill = ire_nexthop_ill(ire);
3564 3603
3565 3604 /* If unreachable we have no ill but need some source */
3566 3605 if (ill == NULL) {
3567 3606 src_addr = htonl(INADDR_LOOPBACK);
3568 3607 /* Make sure we look for a better source address */
3569 3608 generation = SRC_GENERATION_VERIFY;
3570 3609 } else {
3571 3610 error = ip_select_source_v4(ill, setsrc, dst_addr,
3572 3611 ixa->ixa_multicast_ifaddr, zoneid,
3573 3612 ipst, &src_addr, &generation, NULL);
3574 3613 if (error != 0) {
3575 3614 ire = NULL; /* Stored in ixa_ire */
3576 3615 goto bad_addr;
3577 3616 }
3578 3617 }
3579 3618
3580 3619 /*
3581 3620 * We allow the source address to to down.
3582 3621 * However, we check that we don't use the loopback address
3583 3622 * as a source when sending out on the wire.
3584 3623 */
3585 3624 if ((src_addr == htonl(INADDR_LOOPBACK)) &&
3586 3625 !(ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK|IRE_MULTICAST)) &&
3587 3626 !(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) {
3588 3627 ire = NULL; /* Stored in ixa_ire */
3589 3628 error = EADDRNOTAVAIL;
3590 3629 goto bad_addr;
3591 3630 }
3592 3631
3593 3632 *src_addrp = src_addr;
3594 3633 ixa->ixa_src_generation = generation;
3595 3634 }
3596 3635
3597 3636 /*
3598 3637 * Make sure we don't leave an unreachable ixa_nce in place
3599 3638 * since ip_select_route is used when we unplumb i.e., remove
3600 3639 * references on ixa_ire, ixa_nce, and ixa_dce.
3601 3640 */
3602 3641 nce = ixa->ixa_nce;
3603 3642 if (nce != NULL && nce->nce_is_condemned) {
3604 3643 nce_refrele(nce);
3605 3644 ixa->ixa_nce = NULL;
3606 3645 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3607 3646 }
3608 3647
3609 3648 /*
3610 3649 * The caller has set IXAF_PMTU_DISCOVERY if path MTU is desired.
3611 3650 * However, we can't do it for IPv4 multicast or broadcast.
3612 3651 */
3613 3652 if (ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST))
3614 3653 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY;
3615 3654
3616 3655 /*
3617 3656 * Set initial value for fragmentation limit. Either conn_ip_output
3618 3657 * or ULP might updates it when there are routing changes.
3619 3658 * Handles a NULL ixa_ire->ire_ill or a NULL ixa_nce for RTF_REJECT.
3620 3659 */
3621 3660 pmtu = ip_get_pmtu(ixa);
3622 3661 ixa->ixa_fragsize = pmtu;
3623 3662 /* Make sure ixa_fragsize and ixa_pmtu remain identical */
3624 3663 if (ixa->ixa_flags & IXAF_VERIFY_PMTU)
3625 3664 ixa->ixa_pmtu = pmtu;
3626 3665
3627 3666 /*
3628 3667 * Extract information useful for some transports.
3629 3668 * First we look for DCE metrics. Then we take what we have in
3630 3669 * the metrics in the route, where the offlink is used if we have
3631 3670 * one.
3632 3671 */
3633 3672 if (uinfo != NULL) {
3634 3673 bzero(uinfo, sizeof (*uinfo));
3635 3674
3636 3675 if (dce->dce_flags & DCEF_UINFO)
3637 3676 *uinfo = dce->dce_uinfo;
3638 3677
3639 3678 rts_merge_metrics(uinfo, &ire->ire_metrics);
3640 3679
3641 3680 /* Allow ire_metrics to decrease the path MTU from above */
3642 3681 if (uinfo->iulp_mtu == 0 || uinfo->iulp_mtu > pmtu)
3643 3682 uinfo->iulp_mtu = pmtu;
3644 3683
3645 3684 uinfo->iulp_localnet = (ire->ire_type & IRE_ONLINK) != 0;
3646 3685 uinfo->iulp_loopback = (ire->ire_type & IRE_LOOPBACK) != 0;
3647 3686 uinfo->iulp_local = (ire->ire_type & IRE_LOCAL) != 0;
3648 3687 }
3649 3688
3650 3689 if (ill != NULL)
3651 3690 ill_refrele(ill);
3652 3691
3653 3692 return (error);
3654 3693
3655 3694 bad_addr:
3656 3695 if (ire != NULL)
3657 3696 ire_refrele(ire);
3658 3697
3659 3698 if (ill != NULL)
3660 3699 ill_refrele(ill);
3661 3700
3662 3701 /*
3663 3702 * Make sure we don't leave an unreachable ixa_nce in place
3664 3703 * since ip_select_route is used when we unplumb i.e., remove
3665 3704 * references on ixa_ire, ixa_nce, and ixa_dce.
3666 3705 */
3667 3706 nce = ixa->ixa_nce;
3668 3707 if (nce != NULL && nce->nce_is_condemned) {
3669 3708 nce_refrele(nce);
3670 3709 ixa->ixa_nce = NULL;
3671 3710 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3672 3711 }
3673 3712
3674 3713 return (error);
3675 3714 }
3676 3715
3677 3716
3678 3717 /*
3679 3718 * Get the base MTU for the case when path MTU discovery is not used.
3680 3719 * Takes the MTU of the IRE into account.
3681 3720 */
3682 3721 uint_t
3683 3722 ip_get_base_mtu(ill_t *ill, ire_t *ire)
3684 3723 {
3685 3724 uint_t mtu;
3686 3725 uint_t iremtu = ire->ire_metrics.iulp_mtu;
3687 3726
3688 3727 if (ire->ire_type & (IRE_MULTICAST|IRE_BROADCAST))
3689 3728 mtu = ill->ill_mc_mtu;
3690 3729 else
3691 3730 mtu = ill->ill_mtu;
3692 3731
3693 3732 if (iremtu != 0 && iremtu < mtu)
3694 3733 mtu = iremtu;
3695 3734
3696 3735 return (mtu);
3697 3736 }
3698 3737
3699 3738 /*
3700 3739 * Get the PMTU for the attributes. Handles both IPv4 and IPv6.
3701 3740 * Assumes that ixa_ire, dce, and nce have already been set up.
3702 3741 *
3703 3742 * The caller has set IXAF_PMTU_DISCOVERY if path MTU discovery is desired.
3704 3743 * We avoid path MTU discovery if it is disabled with ndd.
3705 3744 * Furtermore, if the path MTU is too small, then we don't set DF for IPv4.
3706 3745 *
3707 3746 * NOTE: We also used to turn it off for source routed packets. That
3708 3747 * is no longer required since the dce is per final destination.
3709 3748 */
3710 3749 uint_t
3711 3750 ip_get_pmtu(ip_xmit_attr_t *ixa)
3712 3751 {
3713 3752 ip_stack_t *ipst = ixa->ixa_ipst;
3714 3753 dce_t *dce;
3715 3754 nce_t *nce;
3716 3755 ire_t *ire;
3717 3756 uint_t pmtu;
3718 3757
3719 3758 ire = ixa->ixa_ire;
3720 3759 dce = ixa->ixa_dce;
3721 3760 nce = ixa->ixa_nce;
3722 3761
3723 3762 /*
3724 3763 * If path MTU discovery has been turned off by ndd, then we ignore
3725 3764 * any dce_pmtu and for IPv4 we will not set DF.
3726 3765 */
3727 3766 if (!ipst->ips_ip_path_mtu_discovery)
3728 3767 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY;
3729 3768
3730 3769 pmtu = IP_MAXPACKET;
3731 3770 /*
3732 3771 * Decide whether whether IPv4 sets DF
3733 3772 * For IPv6 "no DF" means to use the 1280 mtu
3734 3773 */
3735 3774 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) {
3736 3775 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3737 3776 } else {
3738 3777 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF;
3739 3778 if (!(ixa->ixa_flags & IXAF_IS_IPV4))
3740 3779 pmtu = IPV6_MIN_MTU;
3741 3780 }
3742 3781
3743 3782 /* Check if the PMTU is to old before we use it */
3744 3783 if ((dce->dce_flags & DCEF_PMTU) &&
3745 3784 TICK_TO_SEC(ddi_get_lbolt64()) - dce->dce_last_change_time >
3746 3785 ipst->ips_ip_pathmtu_interval) {
3747 3786 /*
3748 3787 * Older than 20 minutes. Drop the path MTU information.
3749 3788 */
3750 3789 mutex_enter(&dce->dce_lock);
3751 3790 dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU);
3752 3791 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
3753 3792 mutex_exit(&dce->dce_lock);
3754 3793 dce_increment_generation(dce);
3755 3794 }
3756 3795
3757 3796 /* The metrics on the route can lower the path MTU */
3758 3797 if (ire->ire_metrics.iulp_mtu != 0 &&
3759 3798 ire->ire_metrics.iulp_mtu < pmtu)
3760 3799 pmtu = ire->ire_metrics.iulp_mtu;
3761 3800
3762 3801 /*
3763 3802 * If the path MTU is smaller than some minimum, we still use dce_pmtu
3764 3803 * above (would be 576 for IPv4 and 1280 for IPv6), but we clear
3765 3804 * IXAF_PMTU_IPV4_DF so that we avoid setting DF for IPv4.
3766 3805 */
3767 3806 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) {
3768 3807 if (dce->dce_flags & DCEF_PMTU) {
3769 3808 if (dce->dce_pmtu < pmtu)
3770 3809 pmtu = dce->dce_pmtu;
3771 3810
3772 3811 if (dce->dce_flags & DCEF_TOO_SMALL_PMTU) {
3773 3812 ixa->ixa_flags |= IXAF_PMTU_TOO_SMALL;
3774 3813 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF;
3775 3814 } else {
3776 3815 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL;
3777 3816 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3778 3817 }
3779 3818 } else {
3780 3819 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL;
3781 3820 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3782 3821 }
3783 3822 }
3784 3823
3785 3824 /*
3786 3825 * If we have an IRE_LOCAL we use the loopback mtu instead of
3787 3826 * the ill for going out the wire i.e., IRE_LOCAL gets the same
3788 3827 * mtu as IRE_LOOPBACK.
3789 3828 */
3790 3829 if (ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) {
3791 3830 uint_t loopback_mtu;
3792 3831
3793 3832 loopback_mtu = (ire->ire_ipversion == IPV6_VERSION) ?
3794 3833 ip_loopback_mtu_v6plus : ip_loopback_mtuplus;
3795 3834
3796 3835 if (loopback_mtu < pmtu)
3797 3836 pmtu = loopback_mtu;
3798 3837 } else if (nce != NULL) {
3799 3838 /*
3800 3839 * Make sure we don't exceed the interface MTU.
3801 3840 * In the case of RTF_REJECT or RTF_BLACKHOLE we might not have
3802 3841 * an ill. We'd use the above IP_MAXPACKET in that case just
3803 3842 * to tell the transport something larger than zero.
3804 3843 */
3805 3844 if (ire->ire_type & (IRE_MULTICAST|IRE_BROADCAST)) {
3806 3845 if (nce->nce_common->ncec_ill->ill_mc_mtu < pmtu)
3807 3846 pmtu = nce->nce_common->ncec_ill->ill_mc_mtu;
3808 3847 if (nce->nce_common->ncec_ill != nce->nce_ill &&
3809 3848 nce->nce_ill->ill_mc_mtu < pmtu) {
3810 3849 /*
3811 3850 * for interfaces in an IPMP group, the mtu of
3812 3851 * the nce_ill (under_ill) could be different
3813 3852 * from the mtu of the ncec_ill, so we take the
3814 3853 * min of the two.
3815 3854 */
3816 3855 pmtu = nce->nce_ill->ill_mc_mtu;
3817 3856 }
3818 3857 } else {
3819 3858 if (nce->nce_common->ncec_ill->ill_mtu < pmtu)
3820 3859 pmtu = nce->nce_common->ncec_ill->ill_mtu;
3821 3860 if (nce->nce_common->ncec_ill != nce->nce_ill &&
3822 3861 nce->nce_ill->ill_mtu < pmtu) {
3823 3862 /*
3824 3863 * for interfaces in an IPMP group, the mtu of
3825 3864 * the nce_ill (under_ill) could be different
3826 3865 * from the mtu of the ncec_ill, so we take the
3827 3866 * min of the two.
3828 3867 */
3829 3868 pmtu = nce->nce_ill->ill_mtu;
3830 3869 }
3831 3870 }
3832 3871 }
3833 3872
3834 3873 /*
3835 3874 * Handle the IPV6_USE_MIN_MTU socket option or ancillary data.
3836 3875 * Only applies to IPv6.
3837 3876 */
3838 3877 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) {
3839 3878 if (ixa->ixa_flags & IXAF_USE_MIN_MTU) {
3840 3879 switch (ixa->ixa_use_min_mtu) {
3841 3880 case IPV6_USE_MIN_MTU_MULTICAST:
3842 3881 if (ire->ire_type & IRE_MULTICAST)
3843 3882 pmtu = IPV6_MIN_MTU;
3844 3883 break;
3845 3884 case IPV6_USE_MIN_MTU_ALWAYS:
3846 3885 pmtu = IPV6_MIN_MTU;
3847 3886 break;
3848 3887 case IPV6_USE_MIN_MTU_NEVER:
3849 3888 break;
3850 3889 }
3851 3890 } else {
3852 3891 /* Default is IPV6_USE_MIN_MTU_MULTICAST */
3853 3892 if (ire->ire_type & IRE_MULTICAST)
3854 3893 pmtu = IPV6_MIN_MTU;
3855 3894 }
3856 3895 }
3857 3896
3858 3897 /*
3859 3898 * After receiving an ICMPv6 "packet too big" message with a
3860 3899 * MTU < 1280, and for multirouted IPv6 packets, the IP layer
3861 3900 * will insert a 8-byte fragment header in every packet. We compensate
3862 3901 * for those cases by returning a smaller path MTU to the ULP.
3863 3902 *
3864 3903 * In the case of CGTP then ip_output will add a fragment header.
3865 3904 * Make sure there is room for it by telling a smaller number
3866 3905 * to the transport.
3867 3906 *
3868 3907 * When IXAF_IPV6_ADDR_FRAGHDR we subtract the frag hdr here
3869 3908 * so the ULPs consistently see a iulp_pmtu and ip_get_pmtu()
3870 3909 * which is the size of the packets it can send.
3871 3910 */
3872 3911 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) {
3873 3912 if ((dce->dce_flags & DCEF_TOO_SMALL_PMTU) ||
3874 3913 (ire->ire_flags & RTF_MULTIRT) ||
3875 3914 (ixa->ixa_flags & IXAF_MULTIRT_MULTICAST)) {
3876 3915 pmtu -= sizeof (ip6_frag_t);
3877 3916 ixa->ixa_flags |= IXAF_IPV6_ADD_FRAGHDR;
3878 3917 }
3879 3918 }
3880 3919
3881 3920 return (pmtu);
3882 3921 }
3883 3922
3884 3923 /*
3885 3924 * Carve "len" bytes out of an mblk chain, consuming any we empty, and duping
3886 3925 * the final piece where we don't. Return a pointer to the first mblk in the
3887 3926 * result, and update the pointer to the next mblk to chew on. If anything
3888 3927 * goes wrong (i.e., dupb fails), we waste everything in sight and return a
3889 3928 * NULL pointer.
3890 3929 */
3891 3930 mblk_t *
3892 3931 ip_carve_mp(mblk_t **mpp, ssize_t len)
3893 3932 {
3894 3933 mblk_t *mp0;
3895 3934 mblk_t *mp1;
3896 3935 mblk_t *mp2;
3897 3936
3898 3937 if (!len || !mpp || !(mp0 = *mpp))
3899 3938 return (NULL);
3900 3939 /* If we aren't going to consume the first mblk, we need a dup. */
3901 3940 if (mp0->b_wptr - mp0->b_rptr > len) {
3902 3941 mp1 = dupb(mp0);
3903 3942 if (mp1) {
3904 3943 /* Partition the data between the two mblks. */
3905 3944 mp1->b_wptr = mp1->b_rptr + len;
3906 3945 mp0->b_rptr = mp1->b_wptr;
3907 3946 /*
3908 3947 * after adjustments if mblk not consumed is now
3909 3948 * unaligned, try to align it. If this fails free
3910 3949 * all messages and let upper layer recover.
3911 3950 */
3912 3951 if (!OK_32PTR(mp0->b_rptr)) {
3913 3952 if (!pullupmsg(mp0, -1)) {
3914 3953 freemsg(mp0);
3915 3954 freemsg(mp1);
3916 3955 *mpp = NULL;
3917 3956 return (NULL);
3918 3957 }
3919 3958 }
3920 3959 }
3921 3960 return (mp1);
3922 3961 }
3923 3962 /* Eat through as many mblks as we need to get len bytes. */
3924 3963 len -= mp0->b_wptr - mp0->b_rptr;
3925 3964 for (mp2 = mp1 = mp0; (mp2 = mp2->b_cont) != 0 && len; mp1 = mp2) {
3926 3965 if (mp2->b_wptr - mp2->b_rptr > len) {
3927 3966 /*
3928 3967 * We won't consume the entire last mblk. Like
3929 3968 * above, dup and partition it.
3930 3969 */
3931 3970 mp1->b_cont = dupb(mp2);
3932 3971 mp1 = mp1->b_cont;
3933 3972 if (!mp1) {
3934 3973 /*
3935 3974 * Trouble. Rather than go to a lot of
3936 3975 * trouble to clean up, we free the messages.
3937 3976 * This won't be any worse than losing it on
3938 3977 * the wire.
3939 3978 */
3940 3979 freemsg(mp0);
3941 3980 freemsg(mp2);
3942 3981 *mpp = NULL;
3943 3982 return (NULL);
3944 3983 }
3945 3984 mp1->b_wptr = mp1->b_rptr + len;
3946 3985 mp2->b_rptr = mp1->b_wptr;
3947 3986 /*
3948 3987 * after adjustments if mblk not consumed is now
3949 3988 * unaligned, try to align it. If this fails free
3950 3989 * all messages and let upper layer recover.
3951 3990 */
3952 3991 if (!OK_32PTR(mp2->b_rptr)) {
3953 3992 if (!pullupmsg(mp2, -1)) {
3954 3993 freemsg(mp0);
3955 3994 freemsg(mp2);
3956 3995 *mpp = NULL;
3957 3996 return (NULL);
3958 3997 }
3959 3998 }
3960 3999 *mpp = mp2;
3961 4000 return (mp0);
3962 4001 }
3963 4002 /* Decrement len by the amount we just got. */
3964 4003 len -= mp2->b_wptr - mp2->b_rptr;
3965 4004 }
3966 4005 /*
3967 4006 * len should be reduced to zero now. If not our caller has
3968 4007 * screwed up.
3969 4008 */
3970 4009 if (len) {
3971 4010 /* Shouldn't happen! */
3972 4011 freemsg(mp0);
3973 4012 *mpp = NULL;
3974 4013 return (NULL);
3975 4014 }
3976 4015 /*
3977 4016 * We consumed up to exactly the end of an mblk. Detach the part
3978 4017 * we are returning from the rest of the chain.
3979 4018 */
3980 4019 mp1->b_cont = NULL;
3981 4020 *mpp = mp2;
3982 4021 return (mp0);
3983 4022 }
3984 4023
3985 4024 /* The ill stream is being unplumbed. Called from ip_close */
3986 4025 int
3987 4026 ip_modclose(ill_t *ill)
3988 4027 {
3989 4028 boolean_t success;
3990 4029 ipsq_t *ipsq;
3991 4030 ipif_t *ipif;
3992 4031 queue_t *q = ill->ill_rq;
3993 4032 ip_stack_t *ipst = ill->ill_ipst;
3994 4033 int i;
3995 4034 arl_ill_common_t *ai = ill->ill_common;
3996 4035
3997 4036 /*
3998 4037 * The punlink prior to this may have initiated a capability
3999 4038 * negotiation. But ipsq_enter will block until that finishes or
4000 4039 * times out.
4001 4040 */
4002 4041 success = ipsq_enter(ill, B_FALSE, NEW_OP);
4003 4042
4004 4043 /*
4005 4044 * Open/close/push/pop is guaranteed to be single threaded
4006 4045 * per stream by STREAMS. FS guarantees that all references
4007 4046 * from top are gone before close is called. So there can't
4008 4047 * be another close thread that has set CONDEMNED on this ill.
4009 4048 * and cause ipsq_enter to return failure.
4010 4049 */
4011 4050 ASSERT(success);
4012 4051 ipsq = ill->ill_phyint->phyint_ipsq;
4013 4052
4014 4053 /*
4015 4054 * Mark it condemned. No new reference will be made to this ill.
4016 4055 * Lookup functions will return an error. Threads that try to
4017 4056 * increment the refcnt must check for ILL_CAN_LOOKUP. This ensures
4018 4057 * that the refcnt will drop down to zero.
4019 4058 */
4020 4059 mutex_enter(&ill->ill_lock);
4021 4060 ill->ill_state_flags |= ILL_CONDEMNED;
4022 4061 for (ipif = ill->ill_ipif; ipif != NULL;
4023 4062 ipif = ipif->ipif_next) {
4024 4063 ipif->ipif_state_flags |= IPIF_CONDEMNED;
4025 4064 }
4026 4065 /*
4027 4066 * Wake up anybody waiting to enter the ipsq. ipsq_enter
4028 4067 * returns error if ILL_CONDEMNED is set
4029 4068 */
4030 4069 cv_broadcast(&ill->ill_cv);
4031 4070 mutex_exit(&ill->ill_lock);
4032 4071
4033 4072 /*
4034 4073 * Send all the deferred DLPI messages downstream which came in
4035 4074 * during the small window right before ipsq_enter(). We do this
4036 4075 * without waiting for the ACKs because all the ACKs for M_PROTO
4037 4076 * messages are ignored in ip_rput() when ILL_CONDEMNED is set.
4038 4077 */
4039 4078 ill_dlpi_send_deferred(ill);
4040 4079
4041 4080 /*
4042 4081 * Shut down fragmentation reassembly.
4043 4082 * ill_frag_timer won't start a timer again.
4044 4083 * Now cancel any existing timer
4045 4084 */
4046 4085 (void) untimeout(ill->ill_frag_timer_id);
4047 4086 (void) ill_frag_timeout(ill, 0);
4048 4087
4049 4088 /*
4050 4089 * Call ill_delete to bring down the ipifs, ilms and ill on
4051 4090 * this ill. Then wait for the refcnts to drop to zero.
4052 4091 * ill_is_freeable checks whether the ill is really quiescent.
4053 4092 * Then make sure that threads that are waiting to enter the
4054 4093 * ipsq have seen the error returned by ipsq_enter and have
4055 4094 * gone away. Then we call ill_delete_tail which does the
4056 4095 * DL_UNBIND_REQ with the driver and then qprocsoff.
4057 4096 */
4058 4097 ill_delete(ill);
4059 4098 mutex_enter(&ill->ill_lock);
4060 4099 while (!ill_is_freeable(ill))
4061 4100 cv_wait(&ill->ill_cv, &ill->ill_lock);
4062 4101
4063 4102 while (ill->ill_waiters)
4064 4103 cv_wait(&ill->ill_cv, &ill->ill_lock);
4065 4104
4066 4105 mutex_exit(&ill->ill_lock);
4067 4106
4068 4107 /*
4069 4108 * ill_delete_tail drops reference on ill_ipst, but we need to keep
4070 4109 * it held until the end of the function since the cleanup
4071 4110 * below needs to be able to use the ip_stack_t.
4072 4111 */
4073 4112 netstack_hold(ipst->ips_netstack);
4074 4113
4075 4114 /* qprocsoff is done via ill_delete_tail */
4076 4115 ill_delete_tail(ill);
4077 4116 /*
4078 4117 * synchronously wait for arp stream to unbind. After this, we
4079 4118 * cannot get any data packets up from the driver.
4080 4119 */
4081 4120 arp_unbind_complete(ill);
4082 4121 ASSERT(ill->ill_ipst == NULL);
4083 4122
4084 4123 /*
4085 4124 * Walk through all conns and qenable those that have queued data.
4086 4125 * Close synchronization needs this to
4087 4126 * be done to ensure that all upper layers blocked
4088 4127 * due to flow control to the closing device
4089 4128 * get unblocked.
4090 4129 */
4091 4130 ip1dbg(("ip_wsrv: walking\n"));
4092 4131 for (i = 0; i < TX_FANOUT_SIZE; i++) {
4093 4132 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[i]);
4094 4133 }
4095 4134
4096 4135 /*
4097 4136 * ai can be null if this is an IPv6 ill, or if the IPv4
4098 4137 * stream is being torn down before ARP was plumbed (e.g.,
4099 4138 * /sbin/ifconfig plumbing a stream twice, and encountering
4100 4139 * an error
4101 4140 */
4102 4141 if (ai != NULL) {
4103 4142 ASSERT(!ill->ill_isv6);
4104 4143 mutex_enter(&ai->ai_lock);
4105 4144 ai->ai_ill = NULL;
4106 4145 if (ai->ai_arl == NULL) {
4107 4146 mutex_destroy(&ai->ai_lock);
4108 4147 kmem_free(ai, sizeof (*ai));
4109 4148 } else {
4110 4149 cv_signal(&ai->ai_ill_unplumb_done);
4111 4150 mutex_exit(&ai->ai_lock);
4112 4151 }
4113 4152 }
4114 4153
4115 4154 mutex_enter(&ipst->ips_ip_mi_lock);
4116 4155 mi_close_unlink(&ipst->ips_ip_g_head, (IDP)ill);
4117 4156 mutex_exit(&ipst->ips_ip_mi_lock);
4118 4157
4119 4158 /*
4120 4159 * credp could be null if the open didn't succeed and ip_modopen
4121 4160 * itself calls ip_close.
4122 4161 */
4123 4162 if (ill->ill_credp != NULL)
4124 4163 crfree(ill->ill_credp);
4125 4164
4126 4165 mutex_destroy(&ill->ill_saved_ire_lock);
4127 4166 mutex_destroy(&ill->ill_lock);
4128 4167 rw_destroy(&ill->ill_mcast_lock);
4129 4168 mutex_destroy(&ill->ill_mcast_serializer);
4130 4169 list_destroy(&ill->ill_nce);
4131 4170
4132 4171 /*
4133 4172 * Now we are done with the module close pieces that
4134 4173 * need the netstack_t.
4135 4174 */
4136 4175 netstack_rele(ipst->ips_netstack);
4137 4176
4138 4177 mi_close_free((IDP)ill);
4139 4178 q->q_ptr = WR(q)->q_ptr = NULL;
4140 4179
4141 4180 ipsq_exit(ipsq);
4142 4181
4143 4182 return (0);
4144 4183 }
4145 4184
4146 4185 /*
4147 4186 * This is called as part of close() for IP, UDP, ICMP, and RTS
4148 4187 * in order to quiesce the conn.
4149 4188 */
4150 4189 void
4151 4190 ip_quiesce_conn(conn_t *connp)
4152 4191 {
4153 4192 boolean_t drain_cleanup_reqd = B_FALSE;
4154 4193 boolean_t conn_ioctl_cleanup_reqd = B_FALSE;
4155 4194 boolean_t ilg_cleanup_reqd = B_FALSE;
4156 4195 ip_stack_t *ipst;
4157 4196
4158 4197 ASSERT(!IPCL_IS_TCP(connp));
4159 4198 ipst = connp->conn_netstack->netstack_ip;
4160 4199
4161 4200 /*
4162 4201 * Mark the conn as closing, and this conn must not be
4163 4202 * inserted in future into any list. Eg. conn_drain_insert(),
4164 4203 * won't insert this conn into the conn_drain_list.
4165 4204 *
4166 4205 * conn_idl, and conn_ilg cannot get set henceforth.
4167 4206 */
4168 4207 mutex_enter(&connp->conn_lock);
4169 4208 ASSERT(!(connp->conn_state_flags & CONN_QUIESCED));
4170 4209 connp->conn_state_flags |= CONN_CLOSING;
4171 4210 if (connp->conn_idl != NULL)
4172 4211 drain_cleanup_reqd = B_TRUE;
4173 4212 if (connp->conn_oper_pending_ill != NULL)
4174 4213 conn_ioctl_cleanup_reqd = B_TRUE;
4175 4214 if (connp->conn_dhcpinit_ill != NULL) {
4176 4215 ASSERT(connp->conn_dhcpinit_ill->ill_dhcpinit != 0);
4177 4216 atomic_dec_32(&connp->conn_dhcpinit_ill->ill_dhcpinit);
4178 4217 ill_set_inputfn(connp->conn_dhcpinit_ill);
4179 4218 connp->conn_dhcpinit_ill = NULL;
4180 4219 }
4181 4220 if (connp->conn_ilg != NULL)
4182 4221 ilg_cleanup_reqd = B_TRUE;
4183 4222 mutex_exit(&connp->conn_lock);
4184 4223
4185 4224 if (conn_ioctl_cleanup_reqd)
4186 4225 conn_ioctl_cleanup(connp);
4187 4226
4188 4227 if (is_system_labeled() && connp->conn_anon_port) {
4189 4228 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
4190 4229 connp->conn_mlp_type, connp->conn_proto,
4191 4230 ntohs(connp->conn_lport), B_FALSE);
4192 4231 connp->conn_anon_port = 0;
4193 4232 }
4194 4233 connp->conn_mlp_type = mlptSingle;
4195 4234
4196 4235 /*
4197 4236 * Remove this conn from any fanout list it is on.
4198 4237 * and then wait for any threads currently operating
4199 4238 * on this endpoint to finish
4200 4239 */
4201 4240 ipcl_hash_remove(connp);
4202 4241
4203 4242 /*
4204 4243 * Remove this conn from the drain list, and do any other cleanup that
4205 4244 * may be required. (TCP conns are never flow controlled, and
4206 4245 * conn_idl will be NULL.)
4207 4246 */
4208 4247 if (drain_cleanup_reqd && connp->conn_idl != NULL) {
4209 4248 idl_t *idl = connp->conn_idl;
4210 4249
4211 4250 mutex_enter(&idl->idl_lock);
4212 4251 conn_drain(connp, B_TRUE);
4213 4252 mutex_exit(&idl->idl_lock);
4214 4253 }
4215 4254
4216 4255 if (connp == ipst->ips_ip_g_mrouter)
4217 4256 (void) ip_mrouter_done(ipst);
4218 4257
4219 4258 if (ilg_cleanup_reqd)
4220 4259 ilg_delete_all(connp);
4221 4260
4222 4261 /*
4223 4262 * Now conn refcnt can increase only thru CONN_INC_REF_LOCKED.
4224 4263 * callers from write side can't be there now because close
4225 4264 * is in progress. The only other caller is ipcl_walk
4226 4265 * which checks for the condemned flag.
4227 4266 */
4228 4267 mutex_enter(&connp->conn_lock);
4229 4268 connp->conn_state_flags |= CONN_CONDEMNED;
4230 4269 while (connp->conn_ref != 1)
4231 4270 cv_wait(&connp->conn_cv, &connp->conn_lock);
4232 4271 connp->conn_state_flags |= CONN_QUIESCED;
4233 4272 mutex_exit(&connp->conn_lock);
4234 4273 }
4235 4274
4236 4275 /* ARGSUSED */
4237 4276 int
4238 4277 ip_close(queue_t *q, int flags)
4239 4278 {
4240 4279 conn_t *connp;
4241 4280
4242 4281 /*
4243 4282 * Call the appropriate delete routine depending on whether this is
4244 4283 * a module or device.
4245 4284 */
4246 4285 if (WR(q)->q_next != NULL) {
4247 4286 /* This is a module close */
4248 4287 return (ip_modclose((ill_t *)q->q_ptr));
4249 4288 }
4250 4289
4251 4290 connp = q->q_ptr;
4252 4291 ip_quiesce_conn(connp);
4253 4292
4254 4293 qprocsoff(q);
4255 4294
4256 4295 /*
4257 4296 * Now we are truly single threaded on this stream, and can
4258 4297 * delete the things hanging off the connp, and finally the connp.
4259 4298 * We removed this connp from the fanout list, it cannot be
4260 4299 * accessed thru the fanouts, and we already waited for the
4261 4300 * conn_ref to drop to 0. We are already in close, so
4262 4301 * there cannot be any other thread from the top. qprocsoff
4263 4302 * has completed, and service has completed or won't run in
4264 4303 * future.
4265 4304 */
4266 4305 ASSERT(connp->conn_ref == 1);
4267 4306
4268 4307 inet_minor_free(connp->conn_minor_arena, connp->conn_dev);
4269 4308
4270 4309 connp->conn_ref--;
4271 4310 ipcl_conn_destroy(connp);
4272 4311
4273 4312 q->q_ptr = WR(q)->q_ptr = NULL;
4274 4313 return (0);
4275 4314 }
4276 4315
4277 4316 /*
4278 4317 * Wapper around putnext() so that ip_rts_request can merely use
4279 4318 * conn_recv.
4280 4319 */
4281 4320 /*ARGSUSED2*/
4282 4321 static void
4283 4322 ip_conn_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
4284 4323 {
4285 4324 conn_t *connp = (conn_t *)arg1;
4286 4325
4287 4326 putnext(connp->conn_rq, mp);
4288 4327 }
4289 4328
4290 4329 /* Dummy in case ICMP error delivery is attempted to a /dev/ip instance */
4291 4330 /* ARGSUSED */
4292 4331 static void
4293 4332 ip_conn_input_icmp(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
4294 4333 {
4295 4334 freemsg(mp);
4296 4335 }
4297 4336
4298 4337 /*
4299 4338 * Called when the module is about to be unloaded
4300 4339 */
4301 4340 void
4302 4341 ip_ddi_destroy(void)
4303 4342 {
↓ open down ↓ |
2122 lines elided |
↑ open up ↑ |
4304 4343 /* This needs to be called before destroying any transports. */
4305 4344 mutex_enter(&cpu_lock);
4306 4345 unregister_cpu_setup_func(ip_tp_cpu_update, NULL);
4307 4346 mutex_exit(&cpu_lock);
4308 4347
4309 4348 tnet_fini();
4310 4349
4311 4350 icmp_ddi_g_destroy();
4312 4351 rts_ddi_g_destroy();
4313 4352 udp_ddi_g_destroy();
4353 + dccp_ddi_g_destroy();
4314 4354 sctp_ddi_g_destroy();
4315 4355 tcp_ddi_g_destroy();
4316 4356 ilb_ddi_g_destroy();
4317 4357 dce_g_destroy();
4318 4358 ipsec_policy_g_destroy();
4319 4359 ipcl_g_destroy();
4320 4360 ip_net_g_destroy();
4321 4361 ip_ire_g_fini();
4322 4362 inet_minor_destroy(ip_minor_arena_sa);
4323 4363 #if defined(_LP64)
4324 4364 inet_minor_destroy(ip_minor_arena_la);
4325 4365 #endif
4326 4366
4327 4367 #ifdef DEBUG
4328 4368 list_destroy(&ip_thread_list);
4329 4369 rw_destroy(&ip_thread_rwlock);
4330 4370 tsd_destroy(&ip_thread_data);
4331 4371 #endif
4332 4372
4333 4373 netstack_unregister(NS_IP);
4334 4374 }
4335 4375
4336 4376 /*
4337 4377 * First step in cleanup.
4338 4378 */
4339 4379 /* ARGSUSED */
4340 4380 static void
4341 4381 ip_stack_shutdown(netstackid_t stackid, void *arg)
4342 4382 {
4343 4383 ip_stack_t *ipst = (ip_stack_t *)arg;
4344 4384
4345 4385 #ifdef NS_DEBUG
4346 4386 printf("ip_stack_shutdown(%p, stack %d)\n", (void *)ipst, stackid);
4347 4387 #endif
4348 4388
4349 4389 /*
4350 4390 * Perform cleanup for special interfaces (loopback and IPMP).
4351 4391 */
4352 4392 ip_interface_cleanup(ipst);
4353 4393
4354 4394 /*
4355 4395 * The *_hook_shutdown()s start the process of notifying any
4356 4396 * consumers that things are going away.... nothing is destroyed.
4357 4397 */
4358 4398 ipv4_hook_shutdown(ipst);
4359 4399 ipv6_hook_shutdown(ipst);
4360 4400 arp_hook_shutdown(ipst);
4361 4401
4362 4402 mutex_enter(&ipst->ips_capab_taskq_lock);
4363 4403 ipst->ips_capab_taskq_quit = B_TRUE;
4364 4404 cv_signal(&ipst->ips_capab_taskq_cv);
4365 4405 mutex_exit(&ipst->ips_capab_taskq_lock);
4366 4406 }
4367 4407
4368 4408 /*
4369 4409 * Free the IP stack instance.
4370 4410 */
4371 4411 static void
4372 4412 ip_stack_fini(netstackid_t stackid, void *arg)
4373 4413 {
4374 4414 ip_stack_t *ipst = (ip_stack_t *)arg;
4375 4415 int ret;
4376 4416
4377 4417 #ifdef NS_DEBUG
4378 4418 printf("ip_stack_fini(%p, stack %d)\n", (void *)ipst, stackid);
4379 4419 #endif
4380 4420 /*
4381 4421 * At this point, all of the notifications that the events and
4382 4422 * protocols are going away have been run, meaning that we can
4383 4423 * now set about starting to clean things up.
4384 4424 */
4385 4425 ipobs_fini(ipst);
4386 4426 ipv4_hook_destroy(ipst);
4387 4427 ipv6_hook_destroy(ipst);
4388 4428 arp_hook_destroy(ipst);
4389 4429 ip_net_destroy(ipst);
4390 4430
4391 4431 ipmp_destroy(ipst);
4392 4432
4393 4433 ip_kstat_fini(stackid, ipst->ips_ip_mibkp);
4394 4434 ipst->ips_ip_mibkp = NULL;
4395 4435 icmp_kstat_fini(stackid, ipst->ips_icmp_mibkp);
4396 4436 ipst->ips_icmp_mibkp = NULL;
4397 4437 ip_kstat2_fini(stackid, ipst->ips_ip_kstat);
4398 4438 ipst->ips_ip_kstat = NULL;
4399 4439 bzero(&ipst->ips_ip_statistics, sizeof (ipst->ips_ip_statistics));
4400 4440 ip6_kstat_fini(stackid, ipst->ips_ip6_kstat);
4401 4441 ipst->ips_ip6_kstat = NULL;
4402 4442 bzero(&ipst->ips_ip6_statistics, sizeof (ipst->ips_ip6_statistics));
4403 4443
4404 4444 kmem_free(ipst->ips_propinfo_tbl,
4405 4445 ip_propinfo_count * sizeof (mod_prop_info_t));
4406 4446 ipst->ips_propinfo_tbl = NULL;
4407 4447
4408 4448 dce_stack_destroy(ipst);
4409 4449 ip_mrouter_stack_destroy(ipst);
4410 4450
4411 4451 ret = untimeout(ipst->ips_igmp_timeout_id);
4412 4452 if (ret == -1) {
4413 4453 ASSERT(ipst->ips_igmp_timeout_id == 0);
4414 4454 } else {
4415 4455 ASSERT(ipst->ips_igmp_timeout_id != 0);
4416 4456 ipst->ips_igmp_timeout_id = 0;
4417 4457 }
4418 4458 ret = untimeout(ipst->ips_igmp_slowtimeout_id);
4419 4459 if (ret == -1) {
4420 4460 ASSERT(ipst->ips_igmp_slowtimeout_id == 0);
4421 4461 } else {
4422 4462 ASSERT(ipst->ips_igmp_slowtimeout_id != 0);
4423 4463 ipst->ips_igmp_slowtimeout_id = 0;
4424 4464 }
4425 4465 ret = untimeout(ipst->ips_mld_timeout_id);
4426 4466 if (ret == -1) {
4427 4467 ASSERT(ipst->ips_mld_timeout_id == 0);
4428 4468 } else {
4429 4469 ASSERT(ipst->ips_mld_timeout_id != 0);
4430 4470 ipst->ips_mld_timeout_id = 0;
4431 4471 }
4432 4472 ret = untimeout(ipst->ips_mld_slowtimeout_id);
4433 4473 if (ret == -1) {
4434 4474 ASSERT(ipst->ips_mld_slowtimeout_id == 0);
4435 4475 } else {
4436 4476 ASSERT(ipst->ips_mld_slowtimeout_id != 0);
4437 4477 ipst->ips_mld_slowtimeout_id = 0;
4438 4478 }
4439 4479
4440 4480 ip_ire_fini(ipst);
4441 4481 ip6_asp_free(ipst);
4442 4482 conn_drain_fini(ipst);
4443 4483 ipcl_destroy(ipst);
4444 4484
4445 4485 mutex_destroy(&ipst->ips_ndp4->ndp_g_lock);
4446 4486 mutex_destroy(&ipst->ips_ndp6->ndp_g_lock);
4447 4487 kmem_free(ipst->ips_ndp4, sizeof (ndp_g_t));
4448 4488 ipst->ips_ndp4 = NULL;
4449 4489 kmem_free(ipst->ips_ndp6, sizeof (ndp_g_t));
4450 4490 ipst->ips_ndp6 = NULL;
4451 4491
4452 4492 if (ipst->ips_loopback_ksp != NULL) {
4453 4493 kstat_delete_netstack(ipst->ips_loopback_ksp, stackid);
4454 4494 ipst->ips_loopback_ksp = NULL;
4455 4495 }
4456 4496
4457 4497 mutex_destroy(&ipst->ips_capab_taskq_lock);
4458 4498 cv_destroy(&ipst->ips_capab_taskq_cv);
4459 4499
4460 4500 rw_destroy(&ipst->ips_srcid_lock);
4461 4501
4462 4502 mutex_destroy(&ipst->ips_ip_mi_lock);
4463 4503 rw_destroy(&ipst->ips_ill_g_usesrc_lock);
4464 4504
4465 4505 mutex_destroy(&ipst->ips_igmp_timer_lock);
4466 4506 mutex_destroy(&ipst->ips_mld_timer_lock);
4467 4507 mutex_destroy(&ipst->ips_igmp_slowtimeout_lock);
4468 4508 mutex_destroy(&ipst->ips_mld_slowtimeout_lock);
4469 4509 mutex_destroy(&ipst->ips_ip_addr_avail_lock);
4470 4510 rw_destroy(&ipst->ips_ill_g_lock);
4471 4511
4472 4512 kmem_free(ipst->ips_phyint_g_list, sizeof (phyint_list_t));
4473 4513 ipst->ips_phyint_g_list = NULL;
4474 4514 kmem_free(ipst->ips_ill_g_heads, sizeof (ill_g_head_t) * MAX_G_HEADS);
4475 4515 ipst->ips_ill_g_heads = NULL;
4476 4516
4477 4517 ldi_ident_release(ipst->ips_ldi_ident);
4478 4518 kmem_free(ipst, sizeof (*ipst));
4479 4519 }
4480 4520
4481 4521 /*
4482 4522 * This function is called from the TSD destructor, and is used to debug
4483 4523 * reference count issues in IP. See block comment in <inet/ip_if.h> for
4484 4524 * details.
4485 4525 */
4486 4526 static void
4487 4527 ip_thread_exit(void *phash)
4488 4528 {
4489 4529 th_hash_t *thh = phash;
4490 4530
4491 4531 rw_enter(&ip_thread_rwlock, RW_WRITER);
4492 4532 list_remove(&ip_thread_list, thh);
4493 4533 rw_exit(&ip_thread_rwlock);
4494 4534 mod_hash_destroy_hash(thh->thh_hash);
4495 4535 kmem_free(thh, sizeof (*thh));
4496 4536 }
4497 4537
4498 4538 /*
4499 4539 * Called when the IP kernel module is loaded into the kernel
4500 4540 */
4501 4541 void
4502 4542 ip_ddi_init(void)
4503 4543 {
4504 4544 ip_squeue_flag = ip_squeue_switch(ip_squeue_enter);
4505 4545
4506 4546 /*
4507 4547 * For IP and TCP the minor numbers should start from 2 since we have 4
4508 4548 * initial devices: ip, ip6, tcp, tcp6.
4509 4549 */
4510 4550 /*
4511 4551 * If this is a 64-bit kernel, then create two separate arenas -
4512 4552 * one for TLIs in the range of INET_MIN_DEV+2 through 2^^18-1, and the
4513 4553 * other for socket apps in the range 2^^18 through 2^^32-1.
4514 4554 */
4515 4555 ip_minor_arena_la = NULL;
4516 4556 ip_minor_arena_sa = NULL;
4517 4557 #if defined(_LP64)
4518 4558 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa",
4519 4559 INET_MIN_DEV + 2, MAXMIN32, KM_SLEEP)) == NULL) {
4520 4560 cmn_err(CE_PANIC,
4521 4561 "ip_ddi_init: ip_minor_arena_sa creation failed\n");
4522 4562 }
4523 4563 if ((ip_minor_arena_la = inet_minor_create("ip_minor_arena_la",
4524 4564 MAXMIN32 + 1, MAXMIN64, KM_SLEEP)) == NULL) {
4525 4565 cmn_err(CE_PANIC,
4526 4566 "ip_ddi_init: ip_minor_arena_la creation failed\n");
4527 4567 }
4528 4568 #else
4529 4569 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa",
4530 4570 INET_MIN_DEV + 2, MAXMIN, KM_SLEEP)) == NULL) {
4531 4571 cmn_err(CE_PANIC,
4532 4572 "ip_ddi_init: ip_minor_arena_sa creation failed\n");
4533 4573 }
4534 4574 #endif
4535 4575 ip_poll_normal_ticks = MSEC_TO_TICK_ROUNDUP(ip_poll_normal_ms);
4536 4576
4537 4577 ipcl_g_init();
4538 4578 ip_ire_g_init();
4539 4579 ip_net_g_init();
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
4540 4580
4541 4581 #ifdef DEBUG
4542 4582 tsd_create(&ip_thread_data, ip_thread_exit);
4543 4583 rw_init(&ip_thread_rwlock, NULL, RW_DEFAULT, NULL);
4544 4584 list_create(&ip_thread_list, sizeof (th_hash_t),
4545 4585 offsetof(th_hash_t, thh_link));
4546 4586 #endif
4547 4587 ipsec_policy_g_init();
4548 4588 tcp_ddi_g_init();
4549 4589 sctp_ddi_g_init();
4590 + dccp_ddi_g_init();
4550 4591 dce_g_init();
4551 4592
4552 4593 /*
4553 4594 * We want to be informed each time a stack is created or
4554 4595 * destroyed in the kernel, so we can maintain the
4555 4596 * set of udp_stack_t's.
4556 4597 */
4557 4598 netstack_register(NS_IP, ip_stack_init, ip_stack_shutdown,
4558 4599 ip_stack_fini);
4559 4600
4560 4601 tnet_init();
4561 4602
4562 4603 udp_ddi_g_init();
4563 4604 rts_ddi_g_init();
4564 4605 icmp_ddi_g_init();
4565 4606 ilb_ddi_g_init();
4566 4607
4567 4608 /* This needs to be called after all transports are initialized. */
4568 4609 mutex_enter(&cpu_lock);
4569 4610 register_cpu_setup_func(ip_tp_cpu_update, NULL);
4570 4611 mutex_exit(&cpu_lock);
4571 4612 }
4572 4613
4573 4614 /*
4574 4615 * Initialize the IP stack instance.
4575 4616 */
4576 4617 static void *
4577 4618 ip_stack_init(netstackid_t stackid, netstack_t *ns)
4578 4619 {
4579 4620 ip_stack_t *ipst;
4580 4621 size_t arrsz;
4581 4622 major_t major;
4582 4623
4583 4624 #ifdef NS_DEBUG
4584 4625 printf("ip_stack_init(stack %d)\n", stackid);
4585 4626 #endif
4586 4627
4587 4628 ipst = (ip_stack_t *)kmem_zalloc(sizeof (*ipst), KM_SLEEP);
4588 4629 ipst->ips_netstack = ns;
4589 4630
4590 4631 ipst->ips_ill_g_heads = kmem_zalloc(sizeof (ill_g_head_t) * MAX_G_HEADS,
4591 4632 KM_SLEEP);
4592 4633 ipst->ips_phyint_g_list = kmem_zalloc(sizeof (phyint_list_t),
4593 4634 KM_SLEEP);
4594 4635 ipst->ips_ndp4 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP);
4595 4636 ipst->ips_ndp6 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP);
4596 4637 mutex_init(&ipst->ips_ndp4->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL);
4597 4638 mutex_init(&ipst->ips_ndp6->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL);
4598 4639
4599 4640 mutex_init(&ipst->ips_igmp_timer_lock, NULL, MUTEX_DEFAULT, NULL);
4600 4641 ipst->ips_igmp_deferred_next = INFINITY;
4601 4642 mutex_init(&ipst->ips_mld_timer_lock, NULL, MUTEX_DEFAULT, NULL);
4602 4643 ipst->ips_mld_deferred_next = INFINITY;
4603 4644 mutex_init(&ipst->ips_igmp_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL);
4604 4645 mutex_init(&ipst->ips_mld_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL);
4605 4646 mutex_init(&ipst->ips_ip_mi_lock, NULL, MUTEX_DEFAULT, NULL);
4606 4647 mutex_init(&ipst->ips_ip_addr_avail_lock, NULL, MUTEX_DEFAULT, NULL);
4607 4648 rw_init(&ipst->ips_ill_g_lock, NULL, RW_DEFAULT, NULL);
4608 4649 rw_init(&ipst->ips_ill_g_usesrc_lock, NULL, RW_DEFAULT, NULL);
4609 4650
4610 4651 ipcl_init(ipst);
4611 4652 ip_ire_init(ipst);
4612 4653 ip6_asp_init(ipst);
4613 4654 ipif_init(ipst);
4614 4655 conn_drain_init(ipst);
4615 4656 ip_mrouter_stack_init(ipst);
4616 4657 dce_stack_init(ipst);
4617 4658
4618 4659 ipst->ips_ip_multirt_log_interval = 1000;
4619 4660
4620 4661 ipst->ips_ill_index = 1;
4621 4662
4622 4663 ipst->ips_saved_ip_forwarding = -1;
4623 4664 ipst->ips_reg_vif_num = ALL_VIFS; /* Index to Register vif */
4624 4665
4625 4666 arrsz = ip_propinfo_count * sizeof (mod_prop_info_t);
4626 4667 ipst->ips_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz, KM_SLEEP);
4627 4668 bcopy(ip_propinfo_tbl, ipst->ips_propinfo_tbl, arrsz);
4628 4669
4629 4670 ipst->ips_ip_mibkp = ip_kstat_init(stackid, ipst);
4630 4671 ipst->ips_icmp_mibkp = icmp_kstat_init(stackid);
4631 4672 ipst->ips_ip_kstat = ip_kstat2_init(stackid, &ipst->ips_ip_statistics);
4632 4673 ipst->ips_ip6_kstat =
4633 4674 ip6_kstat_init(stackid, &ipst->ips_ip6_statistics);
4634 4675
4635 4676 ipst->ips_ip_src_id = 1;
4636 4677 rw_init(&ipst->ips_srcid_lock, NULL, RW_DEFAULT, NULL);
4637 4678
4638 4679 ipst->ips_src_generation = SRC_GENERATION_INITIAL;
4639 4680
4640 4681 ip_net_init(ipst, ns);
4641 4682 ipv4_hook_init(ipst);
4642 4683 ipv6_hook_init(ipst);
4643 4684 arp_hook_init(ipst);
4644 4685 ipmp_init(ipst);
4645 4686 ipobs_init(ipst);
4646 4687
4647 4688 /*
4648 4689 * Create the taskq dispatcher thread and initialize related stuff.
4649 4690 */
4650 4691 mutex_init(&ipst->ips_capab_taskq_lock, NULL, MUTEX_DEFAULT, NULL);
4651 4692 cv_init(&ipst->ips_capab_taskq_cv, NULL, CV_DEFAULT, NULL);
4652 4693 ipst->ips_capab_taskq_thread = thread_create(NULL, 0,
4653 4694 ill_taskq_dispatch, ipst, 0, &p0, TS_RUN, minclsyspri);
4654 4695
4655 4696 major = mod_name_to_major(INET_NAME);
4656 4697 (void) ldi_ident_from_major(major, &ipst->ips_ldi_ident);
4657 4698 return (ipst);
4658 4699 }
4659 4700
4660 4701 /*
4661 4702 * Allocate and initialize a DLPI template of the specified length. (May be
4662 4703 * called as writer.)
4663 4704 */
4664 4705 mblk_t *
4665 4706 ip_dlpi_alloc(size_t len, t_uscalar_t prim)
4666 4707 {
4667 4708 mblk_t *mp;
4668 4709
4669 4710 mp = allocb(len, BPRI_MED);
4670 4711 if (!mp)
4671 4712 return (NULL);
4672 4713
4673 4714 /*
4674 4715 * DLPIv2 says that DL_INFO_REQ and DL_TOKEN_REQ (the latter
4675 4716 * of which we don't seem to use) are sent with M_PCPROTO, and
4676 4717 * that other DLPI are M_PROTO.
4677 4718 */
4678 4719 if (prim == DL_INFO_REQ) {
4679 4720 mp->b_datap->db_type = M_PCPROTO;
4680 4721 } else {
4681 4722 mp->b_datap->db_type = M_PROTO;
4682 4723 }
4683 4724
4684 4725 mp->b_wptr = mp->b_rptr + len;
4685 4726 bzero(mp->b_rptr, len);
4686 4727 ((dl_unitdata_req_t *)mp->b_rptr)->dl_primitive = prim;
4687 4728 return (mp);
4688 4729 }
4689 4730
4690 4731 /*
4691 4732 * Allocate and initialize a DLPI notification. (May be called as writer.)
4692 4733 */
4693 4734 mblk_t *
4694 4735 ip_dlnotify_alloc(uint_t notification, uint_t data)
4695 4736 {
4696 4737 dl_notify_ind_t *notifyp;
4697 4738 mblk_t *mp;
4698 4739
4699 4740 if ((mp = ip_dlpi_alloc(DL_NOTIFY_IND_SIZE, DL_NOTIFY_IND)) == NULL)
4700 4741 return (NULL);
4701 4742
4702 4743 notifyp = (dl_notify_ind_t *)mp->b_rptr;
4703 4744 notifyp->dl_notification = notification;
4704 4745 notifyp->dl_data = data;
4705 4746 return (mp);
4706 4747 }
4707 4748
4708 4749 mblk_t *
4709 4750 ip_dlnotify_alloc2(uint_t notification, uint_t data1, uint_t data2)
4710 4751 {
4711 4752 dl_notify_ind_t *notifyp;
4712 4753 mblk_t *mp;
4713 4754
4714 4755 if ((mp = ip_dlpi_alloc(DL_NOTIFY_IND_SIZE, DL_NOTIFY_IND)) == NULL)
4715 4756 return (NULL);
4716 4757
4717 4758 notifyp = (dl_notify_ind_t *)mp->b_rptr;
4718 4759 notifyp->dl_notification = notification;
4719 4760 notifyp->dl_data1 = data1;
4720 4761 notifyp->dl_data2 = data2;
4721 4762 return (mp);
4722 4763 }
4723 4764
4724 4765 /*
4725 4766 * Debug formatting routine. Returns a character string representation of the
4726 4767 * addr in buf, of the form xxx.xxx.xxx.xxx. This routine takes the address
4727 4768 * in the form of a ipaddr_t and calls ip_dot_saddr with a pointer.
4728 4769 *
4729 4770 * Once the ndd table-printing interfaces are removed, this can be changed to
4730 4771 * standard dotted-decimal form.
4731 4772 */
4732 4773 char *
4733 4774 ip_dot_addr(ipaddr_t addr, char *buf)
4734 4775 {
4735 4776 uint8_t *ap = (uint8_t *)&addr;
4736 4777
4737 4778 (void) mi_sprintf(buf, "%03d.%03d.%03d.%03d",
4738 4779 ap[0] & 0xFF, ap[1] & 0xFF, ap[2] & 0xFF, ap[3] & 0xFF);
4739 4780 return (buf);
4740 4781 }
4741 4782
4742 4783 /*
4743 4784 * Write the given MAC address as a printable string in the usual colon-
4744 4785 * separated format.
4745 4786 */
4746 4787 const char *
4747 4788 mac_colon_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen)
4748 4789 {
4749 4790 char *bp;
4750 4791
4751 4792 if (alen == 0 || buflen < 4)
4752 4793 return ("?");
4753 4794 bp = buf;
4754 4795 for (;;) {
4755 4796 /*
4756 4797 * If there are more MAC address bytes available, but we won't
4757 4798 * have any room to print them, then add "..." to the string
4758 4799 * instead. See below for the 'magic number' explanation.
4759 4800 */
4760 4801 if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) {
4761 4802 (void) strcpy(bp, "...");
4762 4803 break;
4763 4804 }
4764 4805 (void) sprintf(bp, "%02x", *addr++);
4765 4806 bp += 2;
4766 4807 if (--alen == 0)
4767 4808 break;
4768 4809 *bp++ = ':';
4769 4810 buflen -= 3;
4770 4811 /*
4771 4812 * At this point, based on the first 'if' statement above,
4772 4813 * either alen == 1 and buflen >= 3, or alen > 1 and
4773 4814 * buflen >= 4. The first case leaves room for the final "xx"
4774 4815 * number and trailing NUL byte. The second leaves room for at
4775 4816 * least "...". Thus the apparently 'magic' numbers chosen for
4776 4817 * that statement.
4777 4818 */
4778 4819 }
4779 4820 return (buf);
4780 4821 }
4781 4822
4782 4823 /*
4783 4824 * Called when it is conceptually a ULP that would sent the packet
4784 4825 * e.g., port unreachable and protocol unreachable. Check that the packet
4785 4826 * would have passed the IPsec global policy before sending the error.
4786 4827 *
4787 4828 * Send an ICMP error after patching up the packet appropriately.
4788 4829 * Uses ip_drop_input and bumps the appropriate MIB.
4789 4830 */
4790 4831 void
4791 4832 ip_fanout_send_icmp_v4(mblk_t *mp, uint_t icmp_type, uint_t icmp_code,
4792 4833 ip_recv_attr_t *ira)
4793 4834 {
4794 4835 ipha_t *ipha;
4795 4836 boolean_t secure;
4796 4837 ill_t *ill = ira->ira_ill;
4797 4838 ip_stack_t *ipst = ill->ill_ipst;
4798 4839 netstack_t *ns = ipst->ips_netstack;
4799 4840 ipsec_stack_t *ipss = ns->netstack_ipsec;
4800 4841
4801 4842 secure = ira->ira_flags & IRAF_IPSEC_SECURE;
4802 4843
4803 4844 /*
4804 4845 * We are generating an icmp error for some inbound packet.
4805 4846 * Called from all ip_fanout_(udp, tcp, proto) functions.
4806 4847 * Before we generate an error, check with global policy
4807 4848 * to see whether this is allowed to enter the system. As
4808 4849 * there is no "conn", we are checking with global policy.
4809 4850 */
4810 4851 ipha = (ipha_t *)mp->b_rptr;
4811 4852 if (secure || ipss->ipsec_inbound_v4_policy_present) {
4812 4853 mp = ipsec_check_global_policy(mp, NULL, ipha, NULL, ira, ns);
4813 4854 if (mp == NULL)
4814 4855 return;
4815 4856 }
4816 4857
4817 4858 /* We never send errors for protocols that we do implement */
4818 4859 if (ira->ira_protocol == IPPROTO_ICMP ||
4819 4860 ira->ira_protocol == IPPROTO_IGMP) {
4820 4861 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
4821 4862 ip_drop_input("ip_fanout_send_icmp_v4", mp, ill);
4822 4863 freemsg(mp);
4823 4864 return;
4824 4865 }
4825 4866 /*
4826 4867 * Have to correct checksum since
4827 4868 * the packet might have been
4828 4869 * fragmented and the reassembly code in ip_rput
4829 4870 * does not restore the IP checksum.
4830 4871 */
4831 4872 ipha->ipha_hdr_checksum = 0;
4832 4873 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
4833 4874
4834 4875 switch (icmp_type) {
4835 4876 case ICMP_DEST_UNREACHABLE:
4836 4877 switch (icmp_code) {
4837 4878 case ICMP_PROTOCOL_UNREACHABLE:
4838 4879 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInUnknownProtos);
4839 4880 ip_drop_input("ipIfStatsInUnknownProtos", mp, ill);
4840 4881 break;
4841 4882 case ICMP_PORT_UNREACHABLE:
4842 4883 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts);
4843 4884 ip_drop_input("ipIfStatsNoPorts", mp, ill);
4844 4885 break;
4845 4886 }
4846 4887
4847 4888 icmp_unreachable(mp, icmp_code, ira);
4848 4889 break;
4849 4890 default:
4850 4891 #ifdef DEBUG
4851 4892 panic("ip_fanout_send_icmp_v4: wrong type");
4852 4893 /*NOTREACHED*/
4853 4894 #else
4854 4895 freemsg(mp);
4855 4896 break;
4856 4897 #endif
4857 4898 }
4858 4899 }
4859 4900
4860 4901 /*
4861 4902 * Used to send an ICMP error message when a packet is received for
4862 4903 * a protocol that is not supported. The mblk passed as argument
4863 4904 * is consumed by this function.
4864 4905 */
4865 4906 void
4866 4907 ip_proto_not_sup(mblk_t *mp, ip_recv_attr_t *ira)
4867 4908 {
4868 4909 ipha_t *ipha;
4869 4910
4870 4911 ipha = (ipha_t *)mp->b_rptr;
4871 4912 if (ira->ira_flags & IRAF_IS_IPV4) {
4872 4913 ASSERT(IPH_HDR_VERSION(ipha) == IP_VERSION);
4873 4914 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE,
4874 4915 ICMP_PROTOCOL_UNREACHABLE, ira);
4875 4916 } else {
4876 4917 ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION);
4877 4918 ip_fanout_send_icmp_v6(mp, ICMP6_PARAM_PROB,
4878 4919 ICMP6_PARAMPROB_NEXTHEADER, ira);
4879 4920 }
4880 4921 }
4881 4922
4882 4923 /*
4883 4924 * Deliver a rawip packet to the given conn, possibly applying ipsec policy.
4884 4925 * Handles IPv4 and IPv6.
4885 4926 * We are responsible for disposing of mp, such as by freemsg() or putnext()
4886 4927 * Caller is responsible for dropping references to the conn.
4887 4928 */
4888 4929 void
4889 4930 ip_fanout_proto_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
4890 4931 ip_recv_attr_t *ira)
4891 4932 {
4892 4933 ill_t *ill = ira->ira_ill;
4893 4934 ip_stack_t *ipst = ill->ill_ipst;
4894 4935 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
4895 4936 boolean_t secure;
4896 4937 uint_t protocol = ira->ira_protocol;
4897 4938 iaflags_t iraflags = ira->ira_flags;
4898 4939 queue_t *rq;
4899 4940
4900 4941 secure = iraflags & IRAF_IPSEC_SECURE;
4901 4942
4902 4943 rq = connp->conn_rq;
4903 4944 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) {
4904 4945 switch (protocol) {
4905 4946 case IPPROTO_ICMPV6:
4906 4947 BUMP_MIB(ill->ill_icmp6_mib, ipv6IfIcmpInOverflows);
4907 4948 break;
4908 4949 case IPPROTO_ICMP:
4909 4950 BUMP_MIB(&ipst->ips_icmp_mib, icmpInOverflows);
4910 4951 break;
4911 4952 default:
4912 4953 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows);
4913 4954 break;
4914 4955 }
4915 4956 freemsg(mp);
4916 4957 return;
4917 4958 }
4918 4959
4919 4960 ASSERT(!(IPCL_IS_IPTUN(connp)));
4920 4961
4921 4962 if (((iraflags & IRAF_IS_IPV4) ?
4922 4963 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
4923 4964 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
4924 4965 secure) {
4925 4966 mp = ipsec_check_inbound_policy(mp, connp, ipha,
4926 4967 ip6h, ira);
4927 4968 if (mp == NULL) {
4928 4969 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
4929 4970 /* Note that mp is NULL */
4930 4971 ip_drop_input("ipIfStatsInDiscards", mp, ill);
4931 4972 return;
4932 4973 }
4933 4974 }
4934 4975
4935 4976 if (iraflags & IRAF_ICMP_ERROR) {
4936 4977 (connp->conn_recvicmp)(connp, mp, NULL, ira);
4937 4978 } else {
4938 4979 ill_t *rill = ira->ira_rill;
4939 4980
4940 4981 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
4941 4982 ira->ira_ill = ira->ira_rill = NULL;
4942 4983 /* Send it upstream */
4943 4984 (connp->conn_recv)(connp, mp, NULL, ira);
4944 4985 ira->ira_ill = ill;
4945 4986 ira->ira_rill = rill;
4946 4987 }
4947 4988 }
4948 4989
4949 4990 /*
4950 4991 * Handle protocols with which IP is less intimate. There
4951 4992 * can be more than one stream bound to a particular
4952 4993 * protocol. When this is the case, normally each one gets a copy
4953 4994 * of any incoming packets.
4954 4995 *
4955 4996 * IPsec NOTE :
4956 4997 *
4957 4998 * Don't allow a secure packet going up a non-secure connection.
4958 4999 * We don't allow this because
4959 5000 *
4960 5001 * 1) Reply might go out in clear which will be dropped at
4961 5002 * the sending side.
4962 5003 * 2) If the reply goes out in clear it will give the
4963 5004 * adversary enough information for getting the key in
4964 5005 * most of the cases.
4965 5006 *
4966 5007 * Moreover getting a secure packet when we expect clear
4967 5008 * implies that SA's were added without checking for
4968 5009 * policy on both ends. This should not happen once ISAKMP
4969 5010 * is used to negotiate SAs as SAs will be added only after
4970 5011 * verifying the policy.
4971 5012 *
4972 5013 * Zones notes:
4973 5014 * Earlier in ip_input on a system with multiple shared-IP zones we
4974 5015 * duplicate the multicast and broadcast packets and send them up
4975 5016 * with each explicit zoneid that exists on that ill.
4976 5017 * This means that here we can match the zoneid with SO_ALLZONES being special.
4977 5018 */
4978 5019 void
4979 5020 ip_fanout_proto_v4(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
4980 5021 {
4981 5022 mblk_t *mp1;
4982 5023 ipaddr_t laddr;
4983 5024 conn_t *connp, *first_connp, *next_connp;
4984 5025 connf_t *connfp;
4985 5026 ill_t *ill = ira->ira_ill;
4986 5027 ip_stack_t *ipst = ill->ill_ipst;
4987 5028
4988 5029 laddr = ipha->ipha_dst;
4989 5030
4990 5031 connfp = &ipst->ips_ipcl_proto_fanout_v4[ira->ira_protocol];
4991 5032 mutex_enter(&connfp->connf_lock);
4992 5033 connp = connfp->connf_head;
4993 5034 for (connp = connfp->connf_head; connp != NULL;
4994 5035 connp = connp->conn_next) {
4995 5036 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */
4996 5037 if (IPCL_PROTO_MATCH(connp, ira, ipha) &&
4997 5038 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
4998 5039 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp))) {
4999 5040 break;
5000 5041 }
5001 5042 }
5002 5043
5003 5044 if (connp == NULL) {
5004 5045 /*
5005 5046 * No one bound to these addresses. Is
5006 5047 * there a client that wants all
5007 5048 * unclaimed datagrams?
5008 5049 */
5009 5050 mutex_exit(&connfp->connf_lock);
5010 5051 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE,
5011 5052 ICMP_PROTOCOL_UNREACHABLE, ira);
5012 5053 return;
5013 5054 }
5014 5055
5015 5056 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL);
5016 5057
5017 5058 CONN_INC_REF(connp);
5018 5059 first_connp = connp;
5019 5060 connp = connp->conn_next;
5020 5061
5021 5062 for (;;) {
5022 5063 while (connp != NULL) {
5023 5064 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */
5024 5065 if (IPCL_PROTO_MATCH(connp, ira, ipha) &&
5025 5066 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5026 5067 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5027 5068 ira, connp)))
5028 5069 break;
5029 5070 connp = connp->conn_next;
5030 5071 }
5031 5072
5032 5073 if (connp == NULL) {
5033 5074 /* No more interested clients */
5034 5075 connp = first_connp;
5035 5076 break;
5036 5077 }
5037 5078 if (((mp1 = dupmsg(mp)) == NULL) &&
5038 5079 ((mp1 = copymsg(mp)) == NULL)) {
5039 5080 /* Memory allocation failed */
5040 5081 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5041 5082 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5042 5083 connp = first_connp;
5043 5084 break;
5044 5085 }
5045 5086
5046 5087 CONN_INC_REF(connp);
5047 5088 mutex_exit(&connfp->connf_lock);
5048 5089
5049 5090 ip_fanout_proto_conn(connp, mp1, (ipha_t *)mp1->b_rptr, NULL,
5050 5091 ira);
5051 5092
5052 5093 mutex_enter(&connfp->connf_lock);
5053 5094 /* Follow the next pointer before releasing the conn. */
5054 5095 next_connp = connp->conn_next;
5055 5096 CONN_DEC_REF(connp);
5056 5097 connp = next_connp;
5057 5098 }
5058 5099
5059 5100 /* Last one. Send it upstream. */
5060 5101 mutex_exit(&connfp->connf_lock);
5061 5102
5062 5103 ip_fanout_proto_conn(connp, mp, ipha, NULL, ira);
5063 5104
5064 5105 CONN_DEC_REF(connp);
5065 5106 }
5066 5107
5067 5108 /*
5068 5109 * If we have a IPsec NAT-Traversal packet, strip the zero-SPI or
5069 5110 * pass it along to ESP if the SPI is non-zero. Returns the mblk if the mblk
5070 5111 * is not consumed.
5071 5112 *
5072 5113 * One of three things can happen, all of which affect the passed-in mblk:
5073 5114 *
5074 5115 * 1.) The packet is stock UDP and gets its zero-SPI stripped. Return mblk..
5075 5116 *
5076 5117 * 2.) The packet is ESP-in-UDP, gets transformed into an equivalent
5077 5118 * ESP packet, and is passed along to ESP for consumption. Return NULL.
5078 5119 *
5079 5120 * 3.) The packet is an ESP-in-UDP Keepalive. Drop it and return NULL.
5080 5121 */
5081 5122 mblk_t *
5082 5123 zero_spi_check(mblk_t *mp, ip_recv_attr_t *ira)
5083 5124 {
5084 5125 int shift, plen, iph_len;
5085 5126 ipha_t *ipha;
5086 5127 udpha_t *udpha;
5087 5128 uint32_t *spi;
5088 5129 uint32_t esp_ports;
5089 5130 uint8_t *orptr;
5090 5131 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
5091 5132 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
5092 5133
5093 5134 ipha = (ipha_t *)mp->b_rptr;
5094 5135 iph_len = ira->ira_ip_hdr_length;
5095 5136 plen = ira->ira_pktlen;
5096 5137
5097 5138 if (plen - iph_len - sizeof (udpha_t) < sizeof (uint32_t)) {
5098 5139 /*
5099 5140 * Most likely a keepalive for the benefit of an intervening
5100 5141 * NAT. These aren't for us, per se, so drop it.
5101 5142 *
5102 5143 * RFC 3947/8 doesn't say for sure what to do for 2-3
5103 5144 * byte packets (keepalives are 1-byte), but we'll drop them
5104 5145 * also.
5105 5146 */
5106 5147 ip_drop_packet(mp, B_TRUE, ira->ira_ill,
5107 5148 DROPPER(ipss, ipds_esp_nat_t_ka), &ipss->ipsec_dropper);
5108 5149 return (NULL);
5109 5150 }
5110 5151
5111 5152 if (MBLKL(mp) < iph_len + sizeof (udpha_t) + sizeof (*spi)) {
5112 5153 /* might as well pull it all up - it might be ESP. */
5113 5154 if (!pullupmsg(mp, -1)) {
5114 5155 ip_drop_packet(mp, B_TRUE, ira->ira_ill,
5115 5156 DROPPER(ipss, ipds_esp_nomem),
5116 5157 &ipss->ipsec_dropper);
5117 5158 return (NULL);
5118 5159 }
5119 5160
5120 5161 ipha = (ipha_t *)mp->b_rptr;
5121 5162 }
5122 5163 spi = (uint32_t *)(mp->b_rptr + iph_len + sizeof (udpha_t));
5123 5164 if (*spi == 0) {
5124 5165 /* UDP packet - remove 0-spi. */
5125 5166 shift = sizeof (uint32_t);
5126 5167 } else {
5127 5168 /* ESP-in-UDP packet - reduce to ESP. */
5128 5169 ipha->ipha_protocol = IPPROTO_ESP;
5129 5170 shift = sizeof (udpha_t);
5130 5171 }
5131 5172
5132 5173 /* Fix IP header */
5133 5174 ira->ira_pktlen = (plen - shift);
5134 5175 ipha->ipha_length = htons(ira->ira_pktlen);
5135 5176 ipha->ipha_hdr_checksum = 0;
5136 5177
5137 5178 orptr = mp->b_rptr;
5138 5179 mp->b_rptr += shift;
5139 5180
5140 5181 udpha = (udpha_t *)(orptr + iph_len);
5141 5182 if (*spi == 0) {
5142 5183 ASSERT((uint8_t *)ipha == orptr);
5143 5184 udpha->uha_length = htons(plen - shift - iph_len);
5144 5185 iph_len += sizeof (udpha_t); /* For the call to ovbcopy(). */
5145 5186 esp_ports = 0;
5146 5187 } else {
5147 5188 esp_ports = *((uint32_t *)udpha);
5148 5189 ASSERT(esp_ports != 0);
5149 5190 }
5150 5191 ovbcopy(orptr, orptr + shift, iph_len);
5151 5192 if (esp_ports != 0) /* Punt up for ESP processing. */ {
5152 5193 ipha = (ipha_t *)(orptr + shift);
5153 5194
5154 5195 ira->ira_flags |= IRAF_ESP_UDP_PORTS;
5155 5196 ira->ira_esp_udp_ports = esp_ports;
5156 5197 ip_fanout_v4(mp, ipha, ira);
5157 5198 return (NULL);
5158 5199 }
5159 5200 return (mp);
5160 5201 }
5161 5202
5162 5203 /*
5163 5204 * Deliver a udp packet to the given conn, possibly applying ipsec policy.
5164 5205 * Handles IPv4 and IPv6.
5165 5206 * We are responsible for disposing of mp, such as by freemsg() or putnext()
5166 5207 * Caller is responsible for dropping references to the conn.
5167 5208 */
5168 5209 void
5169 5210 ip_fanout_udp_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
5170 5211 ip_recv_attr_t *ira)
5171 5212 {
5172 5213 ill_t *ill = ira->ira_ill;
5173 5214 ip_stack_t *ipst = ill->ill_ipst;
5174 5215 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
5175 5216 boolean_t secure;
5176 5217 iaflags_t iraflags = ira->ira_flags;
5177 5218
5178 5219 secure = iraflags & IRAF_IPSEC_SECURE;
5179 5220
5180 5221 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld :
5181 5222 !canputnext(connp->conn_rq)) {
5182 5223 BUMP_MIB(ill->ill_ip_mib, udpIfStatsInOverflows);
5183 5224 freemsg(mp);
5184 5225 return;
5185 5226 }
5186 5227
5187 5228 if (((iraflags & IRAF_IS_IPV4) ?
5188 5229 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
5189 5230 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
5190 5231 secure) {
5191 5232 mp = ipsec_check_inbound_policy(mp, connp, ipha,
5192 5233 ip6h, ira);
5193 5234 if (mp == NULL) {
5194 5235 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5195 5236 /* Note that mp is NULL */
5196 5237 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5197 5238 return;
5198 5239 }
5199 5240 }
5200 5241
5201 5242 /*
5202 5243 * Since this code is not used for UDP unicast we don't need a NAT_T
5203 5244 * check. Only ip_fanout_v4 has that check.
5204 5245 */
5205 5246 if (ira->ira_flags & IRAF_ICMP_ERROR) {
5206 5247 (connp->conn_recvicmp)(connp, mp, NULL, ira);
5207 5248 } else {
5208 5249 ill_t *rill = ira->ira_rill;
5209 5250
5210 5251 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
5211 5252 ira->ira_ill = ira->ira_rill = NULL;
5212 5253 /* Send it upstream */
5213 5254 (connp->conn_recv)(connp, mp, NULL, ira);
5214 5255 ira->ira_ill = ill;
5215 5256 ira->ira_rill = rill;
5216 5257 }
5217 5258 }
5218 5259
5219 5260 /*
5220 5261 * Fanout for UDP packets that are multicast or broadcast, and ICMP errors.
5221 5262 * (Unicast fanout is handled in ip_input_v4.)
5222 5263 *
5223 5264 * If SO_REUSEADDR is set all multicast and broadcast packets
5224 5265 * will be delivered to all conns bound to the same port.
5225 5266 *
5226 5267 * If there is at least one matching AF_INET receiver, then we will
5227 5268 * ignore any AF_INET6 receivers.
5228 5269 * In the special case where an AF_INET socket binds to 0.0.0.0/<port> and an
5229 5270 * AF_INET6 socket binds to ::/<port>, only the AF_INET socket receives the IPv4
5230 5271 * packets.
5231 5272 *
5232 5273 * Zones notes:
5233 5274 * Earlier in ip_input on a system with multiple shared-IP zones we
5234 5275 * duplicate the multicast and broadcast packets and send them up
5235 5276 * with each explicit zoneid that exists on that ill.
5236 5277 * This means that here we can match the zoneid with SO_ALLZONES being special.
5237 5278 */
5238 5279 void
5239 5280 ip_fanout_udp_multi_v4(mblk_t *mp, ipha_t *ipha, uint16_t lport, uint16_t fport,
5240 5281 ip_recv_attr_t *ira)
5241 5282 {
5242 5283 ipaddr_t laddr;
5243 5284 in6_addr_t v6faddr;
5244 5285 conn_t *connp;
5245 5286 connf_t *connfp;
5246 5287 ipaddr_t faddr;
5247 5288 ill_t *ill = ira->ira_ill;
5248 5289 ip_stack_t *ipst = ill->ill_ipst;
5249 5290
5250 5291 ASSERT(ira->ira_flags & (IRAF_MULTIBROADCAST|IRAF_ICMP_ERROR));
5251 5292
5252 5293 laddr = ipha->ipha_dst;
5253 5294 faddr = ipha->ipha_src;
5254 5295
5255 5296 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)];
5256 5297 mutex_enter(&connfp->connf_lock);
5257 5298 connp = connfp->connf_head;
5258 5299
5259 5300 /*
5260 5301 * If SO_REUSEADDR has been set on the first we send the
5261 5302 * packet to all clients that have joined the group and
5262 5303 * match the port.
5263 5304 */
5264 5305 while (connp != NULL) {
5265 5306 if ((IPCL_UDP_MATCH(connp, lport, laddr, fport, faddr)) &&
5266 5307 conn_wantpacket(connp, ira, ipha) &&
5267 5308 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5268 5309 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp)))
5269 5310 break;
5270 5311 connp = connp->conn_next;
5271 5312 }
5272 5313
5273 5314 if (connp == NULL)
5274 5315 goto notfound;
5275 5316
5276 5317 CONN_INC_REF(connp);
5277 5318
5278 5319 if (connp->conn_reuseaddr) {
5279 5320 conn_t *first_connp = connp;
5280 5321 conn_t *next_connp;
5281 5322 mblk_t *mp1;
5282 5323
5283 5324 connp = connp->conn_next;
5284 5325 for (;;) {
5285 5326 while (connp != NULL) {
5286 5327 if (IPCL_UDP_MATCH(connp, lport, laddr,
5287 5328 fport, faddr) &&
5288 5329 conn_wantpacket(connp, ira, ipha) &&
5289 5330 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5290 5331 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5291 5332 ira, connp)))
5292 5333 break;
5293 5334 connp = connp->conn_next;
5294 5335 }
5295 5336 if (connp == NULL) {
5296 5337 /* No more interested clients */
5297 5338 connp = first_connp;
5298 5339 break;
5299 5340 }
5300 5341 if (((mp1 = dupmsg(mp)) == NULL) &&
5301 5342 ((mp1 = copymsg(mp)) == NULL)) {
5302 5343 /* Memory allocation failed */
5303 5344 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5304 5345 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5305 5346 connp = first_connp;
5306 5347 break;
5307 5348 }
5308 5349 CONN_INC_REF(connp);
5309 5350 mutex_exit(&connfp->connf_lock);
5310 5351
5311 5352 IP_STAT(ipst, ip_udp_fanmb);
5312 5353 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr,
5313 5354 NULL, ira);
5314 5355 mutex_enter(&connfp->connf_lock);
5315 5356 /* Follow the next pointer before releasing the conn */
5316 5357 next_connp = connp->conn_next;
5317 5358 CONN_DEC_REF(connp);
5318 5359 connp = next_connp;
5319 5360 }
5320 5361 }
5321 5362
5322 5363 /* Last one. Send it upstream. */
5323 5364 mutex_exit(&connfp->connf_lock);
5324 5365 IP_STAT(ipst, ip_udp_fanmb);
5325 5366 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira);
5326 5367 CONN_DEC_REF(connp);
5327 5368 return;
5328 5369
5329 5370 notfound:
5330 5371 mutex_exit(&connfp->connf_lock);
5331 5372 /*
5332 5373 * IPv6 endpoints bound to multicast IPv4-mapped addresses
5333 5374 * have already been matched above, since they live in the IPv4
5334 5375 * fanout tables. This implies we only need to
5335 5376 * check for IPv6 in6addr_any endpoints here.
5336 5377 * Thus we compare using ipv6_all_zeros instead of the destination
5337 5378 * address, except for the multicast group membership lookup which
5338 5379 * uses the IPv4 destination.
5339 5380 */
5340 5381 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &v6faddr);
5341 5382 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)];
5342 5383 mutex_enter(&connfp->connf_lock);
5343 5384 connp = connfp->connf_head;
5344 5385 /*
5345 5386 * IPv4 multicast packet being delivered to an AF_INET6
5346 5387 * in6addr_any endpoint.
5347 5388 * Need to check conn_wantpacket(). Note that we use conn_wantpacket()
5348 5389 * and not conn_wantpacket_v6() since any multicast membership is
5349 5390 * for an IPv4-mapped multicast address.
5350 5391 */
5351 5392 while (connp != NULL) {
5352 5393 if (IPCL_UDP_MATCH_V6(connp, lport, ipv6_all_zeros,
5353 5394 fport, v6faddr) &&
5354 5395 conn_wantpacket(connp, ira, ipha) &&
5355 5396 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5356 5397 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp)))
5357 5398 break;
5358 5399 connp = connp->conn_next;
5359 5400 }
5360 5401
5361 5402 if (connp == NULL) {
5362 5403 /*
5363 5404 * No one bound to this port. Is
5364 5405 * there a client that wants all
5365 5406 * unclaimed datagrams?
5366 5407 */
5367 5408 mutex_exit(&connfp->connf_lock);
5368 5409
5369 5410 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_UDP].connf_head !=
5370 5411 NULL) {
5371 5412 ASSERT(ira->ira_protocol == IPPROTO_UDP);
5372 5413 ip_fanout_proto_v4(mp, ipha, ira);
5373 5414 } else {
5374 5415 /*
5375 5416 * We used to attempt to send an icmp error here, but
5376 5417 * since this is known to be a multicast packet
5377 5418 * and we don't send icmp errors in response to
5378 5419 * multicast, just drop the packet and give up sooner.
5379 5420 */
5380 5421 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts);
5381 5422 freemsg(mp);
5382 5423 }
5383 5424 return;
5384 5425 }
5385 5426 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL);
5386 5427
5387 5428 /*
5388 5429 * If SO_REUSEADDR has been set on the first we send the
5389 5430 * packet to all clients that have joined the group and
5390 5431 * match the port.
5391 5432 */
5392 5433 if (connp->conn_reuseaddr) {
5393 5434 conn_t *first_connp = connp;
5394 5435 conn_t *next_connp;
5395 5436 mblk_t *mp1;
5396 5437
5397 5438 CONN_INC_REF(connp);
5398 5439 connp = connp->conn_next;
5399 5440 for (;;) {
5400 5441 while (connp != NULL) {
5401 5442 if (IPCL_UDP_MATCH_V6(connp, lport,
5402 5443 ipv6_all_zeros, fport, v6faddr) &&
5403 5444 conn_wantpacket(connp, ira, ipha) &&
5404 5445 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5405 5446 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5406 5447 ira, connp)))
5407 5448 break;
5408 5449 connp = connp->conn_next;
5409 5450 }
5410 5451 if (connp == NULL) {
5411 5452 /* No more interested clients */
5412 5453 connp = first_connp;
5413 5454 break;
5414 5455 }
5415 5456 if (((mp1 = dupmsg(mp)) == NULL) &&
5416 5457 ((mp1 = copymsg(mp)) == NULL)) {
5417 5458 /* Memory allocation failed */
5418 5459 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5419 5460 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5420 5461 connp = first_connp;
5421 5462 break;
5422 5463 }
5423 5464 CONN_INC_REF(connp);
5424 5465 mutex_exit(&connfp->connf_lock);
5425 5466
5426 5467 IP_STAT(ipst, ip_udp_fanmb);
5427 5468 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr,
5428 5469 NULL, ira);
5429 5470 mutex_enter(&connfp->connf_lock);
5430 5471 /* Follow the next pointer before releasing the conn */
5431 5472 next_connp = connp->conn_next;
5432 5473 CONN_DEC_REF(connp);
5433 5474 connp = next_connp;
5434 5475 }
5435 5476 }
5436 5477
5437 5478 /* Last one. Send it upstream. */
5438 5479 mutex_exit(&connfp->connf_lock);
5439 5480 IP_STAT(ipst, ip_udp_fanmb);
5440 5481 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira);
5441 5482 CONN_DEC_REF(connp);
5442 5483 }
5443 5484
5444 5485 /*
5445 5486 * Split an incoming packet's IPv4 options into the label and the other options.
5446 5487 * If 'allocate' is set it does memory allocation for the ip_pkt_t, including
5447 5488 * clearing out any leftover label or options.
5448 5489 * Otherwise it just makes ipp point into the packet.
5449 5490 *
5450 5491 * Returns zero if ok; ENOMEM if the buffer couldn't be allocated.
5451 5492 */
5452 5493 int
5453 5494 ip_find_hdr_v4(ipha_t *ipha, ip_pkt_t *ipp, boolean_t allocate)
5454 5495 {
5455 5496 uchar_t *opt;
5456 5497 uint32_t totallen;
5457 5498 uint32_t optval;
5458 5499 uint32_t optlen;
5459 5500
5460 5501 ipp->ipp_fields |= IPPF_HOPLIMIT | IPPF_TCLASS | IPPF_ADDR;
5461 5502 ipp->ipp_hoplimit = ipha->ipha_ttl;
5462 5503 ipp->ipp_type_of_service = ipha->ipha_type_of_service;
5463 5504 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &ipp->ipp_addr);
5464 5505
5465 5506 /*
5466 5507 * Get length (in 4 byte octets) of IP header options.
5467 5508 */
5468 5509 totallen = ipha->ipha_version_and_hdr_length -
5469 5510 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
5470 5511
5471 5512 if (totallen == 0) {
5472 5513 if (!allocate)
5473 5514 return (0);
5474 5515
5475 5516 /* Clear out anything from a previous packet */
5476 5517 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
5477 5518 kmem_free(ipp->ipp_ipv4_options,
5478 5519 ipp->ipp_ipv4_options_len);
5479 5520 ipp->ipp_ipv4_options = NULL;
5480 5521 ipp->ipp_ipv4_options_len = 0;
5481 5522 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS;
5482 5523 }
5483 5524 if (ipp->ipp_fields & IPPF_LABEL_V4) {
5484 5525 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
5485 5526 ipp->ipp_label_v4 = NULL;
5486 5527 ipp->ipp_label_len_v4 = 0;
5487 5528 ipp->ipp_fields &= ~IPPF_LABEL_V4;
5488 5529 }
5489 5530 return (0);
5490 5531 }
5491 5532
5492 5533 totallen <<= 2;
5493 5534 opt = (uchar_t *)&ipha[1];
5494 5535 if (!is_system_labeled()) {
5495 5536
5496 5537 copyall:
5497 5538 if (!allocate) {
5498 5539 if (totallen != 0) {
5499 5540 ipp->ipp_ipv4_options = opt;
5500 5541 ipp->ipp_ipv4_options_len = totallen;
5501 5542 ipp->ipp_fields |= IPPF_IPV4_OPTIONS;
5502 5543 }
5503 5544 return (0);
5504 5545 }
5505 5546 /* Just copy all of options */
5506 5547 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
5507 5548 if (totallen == ipp->ipp_ipv4_options_len) {
5508 5549 bcopy(opt, ipp->ipp_ipv4_options, totallen);
5509 5550 return (0);
5510 5551 }
5511 5552 kmem_free(ipp->ipp_ipv4_options,
5512 5553 ipp->ipp_ipv4_options_len);
5513 5554 ipp->ipp_ipv4_options = NULL;
5514 5555 ipp->ipp_ipv4_options_len = 0;
5515 5556 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS;
5516 5557 }
5517 5558 if (totallen == 0)
5518 5559 return (0);
5519 5560
5520 5561 ipp->ipp_ipv4_options = kmem_alloc(totallen, KM_NOSLEEP);
5521 5562 if (ipp->ipp_ipv4_options == NULL)
5522 5563 return (ENOMEM);
5523 5564 ipp->ipp_ipv4_options_len = totallen;
5524 5565 ipp->ipp_fields |= IPPF_IPV4_OPTIONS;
5525 5566 bcopy(opt, ipp->ipp_ipv4_options, totallen);
5526 5567 return (0);
5527 5568 }
5528 5569
5529 5570 if (allocate && (ipp->ipp_fields & IPPF_LABEL_V4)) {
5530 5571 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
5531 5572 ipp->ipp_label_v4 = NULL;
5532 5573 ipp->ipp_label_len_v4 = 0;
5533 5574 ipp->ipp_fields &= ~IPPF_LABEL_V4;
5534 5575 }
5535 5576
5536 5577 /*
5537 5578 * Search for CIPSO option.
5538 5579 * We assume CIPSO is first in options if it is present.
5539 5580 * If it isn't, then ipp_opt_ipv4_options will not include the options
5540 5581 * prior to the CIPSO option.
5541 5582 */
5542 5583 while (totallen != 0) {
5543 5584 switch (optval = opt[IPOPT_OPTVAL]) {
5544 5585 case IPOPT_EOL:
5545 5586 return (0);
5546 5587 case IPOPT_NOP:
5547 5588 optlen = 1;
5548 5589 break;
5549 5590 default:
5550 5591 if (totallen <= IPOPT_OLEN)
5551 5592 return (EINVAL);
5552 5593 optlen = opt[IPOPT_OLEN];
5553 5594 if (optlen < 2)
5554 5595 return (EINVAL);
5555 5596 }
5556 5597 if (optlen > totallen)
5557 5598 return (EINVAL);
5558 5599
5559 5600 switch (optval) {
5560 5601 case IPOPT_COMSEC:
5561 5602 if (!allocate) {
5562 5603 ipp->ipp_label_v4 = opt;
5563 5604 ipp->ipp_label_len_v4 = optlen;
5564 5605 ipp->ipp_fields |= IPPF_LABEL_V4;
5565 5606 } else {
5566 5607 ipp->ipp_label_v4 = kmem_alloc(optlen,
5567 5608 KM_NOSLEEP);
5568 5609 if (ipp->ipp_label_v4 == NULL)
5569 5610 return (ENOMEM);
5570 5611 ipp->ipp_label_len_v4 = optlen;
5571 5612 ipp->ipp_fields |= IPPF_LABEL_V4;
5572 5613 bcopy(opt, ipp->ipp_label_v4, optlen);
5573 5614 }
5574 5615 totallen -= optlen;
5575 5616 opt += optlen;
5576 5617
5577 5618 /* Skip padding bytes until we get to a multiple of 4 */
5578 5619 while ((totallen & 3) != 0 && opt[0] == IPOPT_NOP) {
5579 5620 totallen--;
5580 5621 opt++;
5581 5622 }
5582 5623 /* Remaining as ipp_ipv4_options */
5583 5624 goto copyall;
5584 5625 }
5585 5626 totallen -= optlen;
5586 5627 opt += optlen;
5587 5628 }
5588 5629 /* No CIPSO found; return everything as ipp_ipv4_options */
5589 5630 totallen = ipha->ipha_version_and_hdr_length -
5590 5631 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
5591 5632 totallen <<= 2;
5592 5633 opt = (uchar_t *)&ipha[1];
5593 5634 goto copyall;
5594 5635 }
5595 5636
5596 5637 /*
5597 5638 * Efficient versions of lookup for an IRE when we only
5598 5639 * match the address.
5599 5640 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE.
5600 5641 * Does not handle multicast addresses.
5601 5642 */
5602 5643 uint_t
5603 5644 ip_type_v4(ipaddr_t addr, ip_stack_t *ipst)
5604 5645 {
5605 5646 ire_t *ire;
5606 5647 uint_t result;
5607 5648
5608 5649 ire = ire_ftable_lookup_simple_v4(addr, 0, ipst, NULL);
5609 5650 ASSERT(ire != NULL);
5610 5651 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))
5611 5652 result = IRE_NOROUTE;
5612 5653 else
5613 5654 result = ire->ire_type;
5614 5655 ire_refrele(ire);
5615 5656 return (result);
5616 5657 }
5617 5658
5618 5659 /*
5619 5660 * Efficient versions of lookup for an IRE when we only
5620 5661 * match the address.
5621 5662 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE.
5622 5663 * Does not handle multicast addresses.
5623 5664 */
5624 5665 uint_t
5625 5666 ip_type_v6(const in6_addr_t *addr, ip_stack_t *ipst)
5626 5667 {
5627 5668 ire_t *ire;
5628 5669 uint_t result;
5629 5670
5630 5671 ire = ire_ftable_lookup_simple_v6(addr, 0, ipst, NULL);
5631 5672 ASSERT(ire != NULL);
5632 5673 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))
5633 5674 result = IRE_NOROUTE;
5634 5675 else
5635 5676 result = ire->ire_type;
5636 5677 ire_refrele(ire);
5637 5678 return (result);
5638 5679 }
5639 5680
5640 5681 /*
5641 5682 * Nobody should be sending
5642 5683 * packets up this stream
5643 5684 */
5644 5685 static void
5645 5686 ip_lrput(queue_t *q, mblk_t *mp)
5646 5687 {
5647 5688 switch (mp->b_datap->db_type) {
5648 5689 case M_FLUSH:
5649 5690 /* Turn around */
5650 5691 if (*mp->b_rptr & FLUSHW) {
5651 5692 *mp->b_rptr &= ~FLUSHR;
5652 5693 qreply(q, mp);
5653 5694 return;
5654 5695 }
5655 5696 break;
5656 5697 }
5657 5698 freemsg(mp);
5658 5699 }
5659 5700
5660 5701 /* Nobody should be sending packets down this stream */
5661 5702 /* ARGSUSED */
5662 5703 void
5663 5704 ip_lwput(queue_t *q, mblk_t *mp)
5664 5705 {
5665 5706 freemsg(mp);
5666 5707 }
5667 5708
5668 5709 /*
5669 5710 * Move the first hop in any source route to ipha_dst and remove that part of
5670 5711 * the source route. Called by other protocols. Errors in option formatting
5671 5712 * are ignored - will be handled by ip_output_options. Return the final
5672 5713 * destination (either ipha_dst or the last entry in a source route.)
5673 5714 */
5674 5715 ipaddr_t
5675 5716 ip_massage_options(ipha_t *ipha, netstack_t *ns)
5676 5717 {
5677 5718 ipoptp_t opts;
5678 5719 uchar_t *opt;
5679 5720 uint8_t optval;
5680 5721 uint8_t optlen;
5681 5722 ipaddr_t dst;
5682 5723 int i;
5683 5724 ip_stack_t *ipst = ns->netstack_ip;
5684 5725
5685 5726 ip2dbg(("ip_massage_options\n"));
5686 5727 dst = ipha->ipha_dst;
5687 5728 for (optval = ipoptp_first(&opts, ipha);
5688 5729 optval != IPOPT_EOL;
5689 5730 optval = ipoptp_next(&opts)) {
5690 5731 opt = opts.ipoptp_cur;
5691 5732 switch (optval) {
5692 5733 uint8_t off;
5693 5734 case IPOPT_SSRR:
5694 5735 case IPOPT_LSRR:
5695 5736 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
5696 5737 ip1dbg(("ip_massage_options: bad src route\n"));
5697 5738 break;
5698 5739 }
5699 5740 optlen = opts.ipoptp_len;
5700 5741 off = opt[IPOPT_OFFSET];
5701 5742 off--;
5702 5743 redo_srr:
5703 5744 if (optlen < IP_ADDR_LEN ||
5704 5745 off > optlen - IP_ADDR_LEN) {
5705 5746 /* End of source route */
5706 5747 ip1dbg(("ip_massage_options: end of SR\n"));
5707 5748 break;
5708 5749 }
5709 5750 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
5710 5751 ip1dbg(("ip_massage_options: next hop 0x%x\n",
5711 5752 ntohl(dst)));
5712 5753 /*
5713 5754 * Check if our address is present more than
5714 5755 * once as consecutive hops in source route.
5715 5756 * XXX verify per-interface ip_forwarding
5716 5757 * for source route?
5717 5758 */
5718 5759 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
5719 5760 off += IP_ADDR_LEN;
5720 5761 goto redo_srr;
5721 5762 }
5722 5763 if (dst == htonl(INADDR_LOOPBACK)) {
5723 5764 ip1dbg(("ip_massage_options: loopback addr in "
5724 5765 "source route!\n"));
5725 5766 break;
5726 5767 }
5727 5768 /*
5728 5769 * Update ipha_dst to be the first hop and remove the
5729 5770 * first hop from the source route (by overwriting
5730 5771 * part of the option with NOP options).
5731 5772 */
5732 5773 ipha->ipha_dst = dst;
5733 5774 /* Put the last entry in dst */
5734 5775 off = ((optlen - IP_ADDR_LEN - 3) & ~(IP_ADDR_LEN-1)) +
5735 5776 3;
5736 5777 bcopy(&opt[off], &dst, IP_ADDR_LEN);
5737 5778
5738 5779 ip1dbg(("ip_massage_options: last hop 0x%x\n",
5739 5780 ntohl(dst)));
5740 5781 /* Move down and overwrite */
5741 5782 opt[IP_ADDR_LEN] = opt[0];
5742 5783 opt[IP_ADDR_LEN+1] = opt[IPOPT_OLEN] - IP_ADDR_LEN;
5743 5784 opt[IP_ADDR_LEN+2] = opt[IPOPT_OFFSET];
5744 5785 for (i = 0; i < IP_ADDR_LEN; i++)
5745 5786 opt[i] = IPOPT_NOP;
5746 5787 break;
5747 5788 }
5748 5789 }
5749 5790 return (dst);
5750 5791 }
5751 5792
5752 5793 /*
5753 5794 * Return the network mask
5754 5795 * associated with the specified address.
5755 5796 */
5756 5797 ipaddr_t
5757 5798 ip_net_mask(ipaddr_t addr)
5758 5799 {
5759 5800 uchar_t *up = (uchar_t *)&addr;
5760 5801 ipaddr_t mask = 0;
5761 5802 uchar_t *maskp = (uchar_t *)&mask;
5762 5803
5763 5804 #if defined(__i386) || defined(__amd64)
5764 5805 #define TOTALLY_BRAIN_DAMAGED_C_COMPILER
5765 5806 #endif
5766 5807 #ifdef TOTALLY_BRAIN_DAMAGED_C_COMPILER
5767 5808 maskp[0] = maskp[1] = maskp[2] = maskp[3] = 0;
5768 5809 #endif
5769 5810 if (CLASSD(addr)) {
5770 5811 maskp[0] = 0xF0;
5771 5812 return (mask);
5772 5813 }
5773 5814
5774 5815 /* We assume Class E default netmask to be 32 */
5775 5816 if (CLASSE(addr))
5776 5817 return (0xffffffffU);
5777 5818
5778 5819 if (addr == 0)
5779 5820 return (0);
5780 5821 maskp[0] = 0xFF;
5781 5822 if ((up[0] & 0x80) == 0)
5782 5823 return (mask);
5783 5824
5784 5825 maskp[1] = 0xFF;
5785 5826 if ((up[0] & 0xC0) == 0x80)
5786 5827 return (mask);
5787 5828
5788 5829 maskp[2] = 0xFF;
5789 5830 if ((up[0] & 0xE0) == 0xC0)
5790 5831 return (mask);
5791 5832
5792 5833 /* Otherwise return no mask */
5793 5834 return ((ipaddr_t)0);
5794 5835 }
5795 5836
5796 5837 /* Name/Value Table Lookup Routine */
5797 5838 char *
5798 5839 ip_nv_lookup(nv_t *nv, int value)
5799 5840 {
5800 5841 if (!nv)
5801 5842 return (NULL);
5802 5843 for (; nv->nv_name; nv++) {
5803 5844 if (nv->nv_value == value)
5804 5845 return (nv->nv_name);
5805 5846 }
5806 5847 return ("unknown");
5807 5848 }
5808 5849
5809 5850 static int
5810 5851 ip_wait_for_info_ack(ill_t *ill)
5811 5852 {
5812 5853 int err;
5813 5854
5814 5855 mutex_enter(&ill->ill_lock);
5815 5856 while (ill->ill_state_flags & ILL_LL_SUBNET_PENDING) {
5816 5857 /*
5817 5858 * Return value of 0 indicates a pending signal.
5818 5859 */
5819 5860 err = cv_wait_sig(&ill->ill_cv, &ill->ill_lock);
5820 5861 if (err == 0) {
5821 5862 mutex_exit(&ill->ill_lock);
5822 5863 return (EINTR);
5823 5864 }
5824 5865 }
5825 5866 mutex_exit(&ill->ill_lock);
5826 5867 /*
5827 5868 * ip_rput_other could have set an error in ill_error on
5828 5869 * receipt of M_ERROR.
5829 5870 */
5830 5871 return (ill->ill_error);
5831 5872 }
5832 5873
5833 5874 /*
5834 5875 * This is a module open, i.e. this is a control stream for access
5835 5876 * to a DLPI device. We allocate an ill_t as the instance data in
5836 5877 * this case.
5837 5878 */
5838 5879 static int
5839 5880 ip_modopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5840 5881 {
5841 5882 ill_t *ill;
5842 5883 int err;
5843 5884 zoneid_t zoneid;
5844 5885 netstack_t *ns;
5845 5886 ip_stack_t *ipst;
5846 5887
5847 5888 /*
5848 5889 * Prevent unprivileged processes from pushing IP so that
5849 5890 * they can't send raw IP.
5850 5891 */
5851 5892 if (secpolicy_net_rawaccess(credp) != 0)
5852 5893 return (EPERM);
5853 5894
5854 5895 ns = netstack_find_by_cred(credp);
5855 5896 ASSERT(ns != NULL);
5856 5897 ipst = ns->netstack_ip;
5857 5898 ASSERT(ipst != NULL);
5858 5899
5859 5900 /*
5860 5901 * For exclusive stacks we set the zoneid to zero
5861 5902 * to make IP operate as if in the global zone.
5862 5903 */
5863 5904 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID)
5864 5905 zoneid = GLOBAL_ZONEID;
5865 5906 else
5866 5907 zoneid = crgetzoneid(credp);
5867 5908
5868 5909 ill = (ill_t *)mi_open_alloc_sleep(sizeof (ill_t));
5869 5910 q->q_ptr = WR(q)->q_ptr = ill;
5870 5911 ill->ill_ipst = ipst;
5871 5912 ill->ill_zoneid = zoneid;
5872 5913
5873 5914 /*
5874 5915 * ill_init initializes the ill fields and then sends down
5875 5916 * down a DL_INFO_REQ after calling qprocson.
5876 5917 */
5877 5918 err = ill_init(q, ill);
5878 5919
5879 5920 if (err != 0) {
5880 5921 mi_free(ill);
5881 5922 netstack_rele(ipst->ips_netstack);
5882 5923 q->q_ptr = NULL;
5883 5924 WR(q)->q_ptr = NULL;
5884 5925 return (err);
5885 5926 }
5886 5927
5887 5928 /*
5888 5929 * Wait for the DL_INFO_ACK if a DL_INFO_REQ was sent.
5889 5930 *
5890 5931 * ill_init initializes the ipsq marking this thread as
5891 5932 * writer
5892 5933 */
5893 5934 ipsq_exit(ill->ill_phyint->phyint_ipsq);
5894 5935 err = ip_wait_for_info_ack(ill);
5895 5936 if (err == 0)
5896 5937 ill->ill_credp = credp;
5897 5938 else
5898 5939 goto fail;
5899 5940
5900 5941 crhold(credp);
5901 5942
5902 5943 mutex_enter(&ipst->ips_ip_mi_lock);
5903 5944 err = mi_open_link(&ipst->ips_ip_g_head, (IDP)q->q_ptr, devp, flag,
5904 5945 sflag, credp);
5905 5946 mutex_exit(&ipst->ips_ip_mi_lock);
5906 5947 fail:
5907 5948 if (err) {
5908 5949 (void) ip_close(q, 0);
5909 5950 return (err);
5910 5951 }
5911 5952 return (0);
5912 5953 }
5913 5954
5914 5955 /* For /dev/ip aka AF_INET open */
5915 5956 int
5916 5957 ip_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5917 5958 {
5918 5959 return (ip_open(q, devp, flag, sflag, credp, B_FALSE));
5919 5960 }
5920 5961
5921 5962 /* For /dev/ip6 aka AF_INET6 open */
5922 5963 int
5923 5964 ip_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5924 5965 {
5925 5966 return (ip_open(q, devp, flag, sflag, credp, B_TRUE));
5926 5967 }
5927 5968
5928 5969 /* IP open routine. */
5929 5970 int
5930 5971 ip_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
5931 5972 boolean_t isv6)
5932 5973 {
5933 5974 conn_t *connp;
5934 5975 major_t maj;
5935 5976 zoneid_t zoneid;
5936 5977 netstack_t *ns;
5937 5978 ip_stack_t *ipst;
5938 5979
5939 5980 /* Allow reopen. */
5940 5981 if (q->q_ptr != NULL)
5941 5982 return (0);
5942 5983
5943 5984 if (sflag & MODOPEN) {
5944 5985 /* This is a module open */
5945 5986 return (ip_modopen(q, devp, flag, sflag, credp));
5946 5987 }
5947 5988
5948 5989 if ((flag & ~(FKLYR)) == IP_HELPER_STR) {
5949 5990 /*
5950 5991 * Non streams based socket looking for a stream
5951 5992 * to access IP
5952 5993 */
5953 5994 return (ip_helper_stream_setup(q, devp, flag, sflag,
5954 5995 credp, isv6));
5955 5996 }
5956 5997
5957 5998 ns = netstack_find_by_cred(credp);
5958 5999 ASSERT(ns != NULL);
5959 6000 ipst = ns->netstack_ip;
5960 6001 ASSERT(ipst != NULL);
5961 6002
5962 6003 /*
5963 6004 * For exclusive stacks we set the zoneid to zero
5964 6005 * to make IP operate as if in the global zone.
5965 6006 */
5966 6007 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID)
5967 6008 zoneid = GLOBAL_ZONEID;
5968 6009 else
5969 6010 zoneid = crgetzoneid(credp);
5970 6011
5971 6012 /*
5972 6013 * We are opening as a device. This is an IP client stream, and we
5973 6014 * allocate an conn_t as the instance data.
5974 6015 */
5975 6016 connp = ipcl_conn_create(IPCL_IPCCONN, KM_SLEEP, ipst->ips_netstack);
5976 6017
5977 6018 /*
5978 6019 * ipcl_conn_create did a netstack_hold. Undo the hold that was
5979 6020 * done by netstack_find_by_cred()
5980 6021 */
5981 6022 netstack_rele(ipst->ips_netstack);
5982 6023
5983 6024 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP | IXAF_SET_ULP_CKSUM;
5984 6025 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
5985 6026 connp->conn_ixa->ixa_zoneid = zoneid;
5986 6027 connp->conn_zoneid = zoneid;
5987 6028
5988 6029 connp->conn_rq = q;
5989 6030 q->q_ptr = WR(q)->q_ptr = connp;
5990 6031
5991 6032 /* Minor tells us which /dev entry was opened */
5992 6033 if (isv6) {
5993 6034 connp->conn_family = AF_INET6;
5994 6035 connp->conn_ipversion = IPV6_VERSION;
5995 6036 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4;
5996 6037 connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT;
5997 6038 } else {
5998 6039 connp->conn_family = AF_INET;
5999 6040 connp->conn_ipversion = IPV4_VERSION;
6000 6041 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4;
6001 6042 }
6002 6043
6003 6044 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
6004 6045 ((connp->conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
6005 6046 connp->conn_minor_arena = ip_minor_arena_la;
6006 6047 } else {
6007 6048 /*
6008 6049 * Either minor numbers in the large arena were exhausted
6009 6050 * or a non socket application is doing the open.
6010 6051 * Try to allocate from the small arena.
6011 6052 */
6012 6053 if ((connp->conn_dev =
6013 6054 inet_minor_alloc(ip_minor_arena_sa)) == 0) {
6014 6055 /* CONN_DEC_REF takes care of netstack_rele() */
6015 6056 q->q_ptr = WR(q)->q_ptr = NULL;
6016 6057 CONN_DEC_REF(connp);
6017 6058 return (EBUSY);
6018 6059 }
6019 6060 connp->conn_minor_arena = ip_minor_arena_sa;
6020 6061 }
6021 6062
6022 6063 maj = getemajor(*devp);
6023 6064 *devp = makedevice(maj, (minor_t)connp->conn_dev);
6024 6065
6025 6066 /*
6026 6067 * connp->conn_cred is crfree()ed in ipcl_conn_destroy()
6027 6068 */
6028 6069 connp->conn_cred = credp;
6029 6070 connp->conn_cpid = curproc->p_pid;
6030 6071 /* Cache things in ixa without an extra refhold */
6031 6072 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED));
6032 6073 connp->conn_ixa->ixa_cred = connp->conn_cred;
6033 6074 connp->conn_ixa->ixa_cpid = connp->conn_cpid;
6034 6075 if (is_system_labeled())
6035 6076 connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred);
6036 6077
6037 6078 /*
6038 6079 * Handle IP_IOC_RTS_REQUEST and other ioctls which use conn_recv
6039 6080 */
6040 6081 connp->conn_recv = ip_conn_input;
6041 6082 connp->conn_recvicmp = ip_conn_input_icmp;
6042 6083
6043 6084 crhold(connp->conn_cred);
6044 6085
6045 6086 /*
6046 6087 * If the caller has the process-wide flag set, then default to MAC
6047 6088 * exempt mode. This allows read-down to unlabeled hosts.
6048 6089 */
6049 6090 if (getpflags(NET_MAC_AWARE, credp) != 0)
6050 6091 connp->conn_mac_mode = CONN_MAC_AWARE;
6051 6092
6052 6093 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID);
6053 6094
6054 6095 connp->conn_rq = q;
6055 6096 connp->conn_wq = WR(q);
6056 6097
6057 6098 /* Non-zero default values */
6058 6099 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP;
6059 6100
6060 6101 /*
6061 6102 * Make the conn globally visible to walkers
6062 6103 */
6063 6104 ASSERT(connp->conn_ref == 1);
6064 6105 mutex_enter(&connp->conn_lock);
6065 6106 connp->conn_state_flags &= ~CONN_INCIPIENT;
6066 6107 mutex_exit(&connp->conn_lock);
6067 6108
6068 6109 qprocson(q);
6069 6110
6070 6111 return (0);
6071 6112 }
6072 6113
6073 6114 /*
6074 6115 * Set IPsec policy from an ipsec_req_t. If the req is not "zero" and valid,
6075 6116 * all of them are copied to the conn_t. If the req is "zero", the policy is
6076 6117 * zeroed out. A "zero" policy has zero ipsr_{ah,req,self_encap}_req
6077 6118 * fields.
6078 6119 * We keep only the latest setting of the policy and thus policy setting
6079 6120 * is not incremental/cumulative.
6080 6121 *
6081 6122 * Requests to set policies with multiple alternative actions will
6082 6123 * go through a different API.
6083 6124 */
6084 6125 int
6085 6126 ipsec_set_req(cred_t *cr, conn_t *connp, ipsec_req_t *req)
6086 6127 {
6087 6128 uint_t ah_req = 0;
6088 6129 uint_t esp_req = 0;
6089 6130 uint_t se_req = 0;
6090 6131 ipsec_act_t *actp = NULL;
6091 6132 uint_t nact;
6092 6133 ipsec_policy_head_t *ph;
6093 6134 boolean_t is_pol_reset, is_pol_inserted = B_FALSE;
6094 6135 int error = 0;
6095 6136 netstack_t *ns = connp->conn_netstack;
6096 6137 ip_stack_t *ipst = ns->netstack_ip;
6097 6138 ipsec_stack_t *ipss = ns->netstack_ipsec;
6098 6139
6099 6140 #define REQ_MASK (IPSEC_PREF_REQUIRED|IPSEC_PREF_NEVER)
6100 6141
6101 6142 /*
6102 6143 * The IP_SEC_OPT option does not allow variable length parameters,
6103 6144 * hence a request cannot be NULL.
6104 6145 */
6105 6146 if (req == NULL)
6106 6147 return (EINVAL);
6107 6148
6108 6149 ah_req = req->ipsr_ah_req;
6109 6150 esp_req = req->ipsr_esp_req;
6110 6151 se_req = req->ipsr_self_encap_req;
6111 6152
6112 6153 /* Don't allow setting self-encap without one or more of AH/ESP. */
6113 6154 if (se_req != 0 && esp_req == 0 && ah_req == 0)
6114 6155 return (EINVAL);
6115 6156
6116 6157 /*
6117 6158 * Are we dealing with a request to reset the policy (i.e.
6118 6159 * zero requests).
6119 6160 */
6120 6161 is_pol_reset = ((ah_req & REQ_MASK) == 0 &&
6121 6162 (esp_req & REQ_MASK) == 0 &&
6122 6163 (se_req & REQ_MASK) == 0);
6123 6164
6124 6165 if (!is_pol_reset) {
6125 6166 /*
6126 6167 * If we couldn't load IPsec, fail with "protocol
6127 6168 * not supported".
6128 6169 * IPsec may not have been loaded for a request with zero
6129 6170 * policies, so we don't fail in this case.
6130 6171 */
6131 6172 mutex_enter(&ipss->ipsec_loader_lock);
6132 6173 if (ipss->ipsec_loader_state != IPSEC_LOADER_SUCCEEDED) {
6133 6174 mutex_exit(&ipss->ipsec_loader_lock);
6134 6175 return (EPROTONOSUPPORT);
6135 6176 }
6136 6177 mutex_exit(&ipss->ipsec_loader_lock);
6137 6178
6138 6179 /*
6139 6180 * Test for valid requests. Invalid algorithms
6140 6181 * need to be tested by IPsec code because new
6141 6182 * algorithms can be added dynamically.
6142 6183 */
6143 6184 if ((ah_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 ||
6144 6185 (esp_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 ||
6145 6186 (se_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0) {
6146 6187 return (EINVAL);
6147 6188 }
6148 6189
6149 6190 /*
6150 6191 * Only privileged users can issue these
6151 6192 * requests.
6152 6193 */
6153 6194 if (((ah_req & IPSEC_PREF_NEVER) ||
6154 6195 (esp_req & IPSEC_PREF_NEVER) ||
6155 6196 (se_req & IPSEC_PREF_NEVER)) &&
6156 6197 secpolicy_ip_config(cr, B_FALSE) != 0) {
6157 6198 return (EPERM);
6158 6199 }
6159 6200
6160 6201 /*
6161 6202 * The IPSEC_PREF_REQUIRED and IPSEC_PREF_NEVER
6162 6203 * are mutually exclusive.
6163 6204 */
6164 6205 if (((ah_req & REQ_MASK) == REQ_MASK) ||
6165 6206 ((esp_req & REQ_MASK) == REQ_MASK) ||
6166 6207 ((se_req & REQ_MASK) == REQ_MASK)) {
6167 6208 /* Both of them are set */
6168 6209 return (EINVAL);
6169 6210 }
6170 6211 }
6171 6212
6172 6213 ASSERT(MUTEX_HELD(&connp->conn_lock));
6173 6214
6174 6215 /*
6175 6216 * If we have already cached policies in conn_connect(), don't
6176 6217 * let them change now. We cache policies for connections
6177 6218 * whose src,dst [addr, port] is known.
6178 6219 */
6179 6220 if (connp->conn_policy_cached) {
6180 6221 return (EINVAL);
6181 6222 }
6182 6223
6183 6224 /*
6184 6225 * We have a zero policies, reset the connection policy if already
6185 6226 * set. This will cause the connection to inherit the
6186 6227 * global policy, if any.
6187 6228 */
6188 6229 if (is_pol_reset) {
6189 6230 if (connp->conn_policy != NULL) {
6190 6231 IPPH_REFRELE(connp->conn_policy, ipst->ips_netstack);
6191 6232 connp->conn_policy = NULL;
6192 6233 }
6193 6234 connp->conn_in_enforce_policy = B_FALSE;
6194 6235 connp->conn_out_enforce_policy = B_FALSE;
6195 6236 return (0);
6196 6237 }
6197 6238
6198 6239 ph = connp->conn_policy = ipsec_polhead_split(connp->conn_policy,
6199 6240 ipst->ips_netstack);
6200 6241 if (ph == NULL)
6201 6242 goto enomem;
6202 6243
6203 6244 ipsec_actvec_from_req(req, &actp, &nact, ipst->ips_netstack);
6204 6245 if (actp == NULL)
6205 6246 goto enomem;
6206 6247
6207 6248 /*
6208 6249 * Always insert IPv4 policy entries, since they can also apply to
6209 6250 * ipv6 sockets being used in ipv4-compat mode.
6210 6251 */
6211 6252 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4,
6212 6253 IPSEC_TYPE_INBOUND, ns))
6213 6254 goto enomem;
6214 6255 is_pol_inserted = B_TRUE;
6215 6256 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4,
6216 6257 IPSEC_TYPE_OUTBOUND, ns))
6217 6258 goto enomem;
6218 6259
6219 6260 /*
6220 6261 * We're looking at a v6 socket, also insert the v6-specific
6221 6262 * entries.
6222 6263 */
6223 6264 if (connp->conn_family == AF_INET6) {
6224 6265 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6,
6225 6266 IPSEC_TYPE_INBOUND, ns))
6226 6267 goto enomem;
6227 6268 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6,
6228 6269 IPSEC_TYPE_OUTBOUND, ns))
6229 6270 goto enomem;
6230 6271 }
6231 6272
6232 6273 ipsec_actvec_free(actp, nact);
6233 6274
6234 6275 /*
6235 6276 * If the requests need security, set enforce_policy.
6236 6277 * If the requests are IPSEC_PREF_NEVER, one should
6237 6278 * still set conn_out_enforce_policy so that ip_set_destination
6238 6279 * marks the ip_xmit_attr_t appropriatly. This is needed so that
6239 6280 * for connections that we don't cache policy in at connect time,
6240 6281 * if global policy matches in ip_output_attach_policy, we
6241 6282 * don't wrongly inherit global policy. Similarly, we need
6242 6283 * to set conn_in_enforce_policy also so that we don't verify
6243 6284 * policy wrongly.
6244 6285 */
6245 6286 if ((ah_req & REQ_MASK) != 0 ||
6246 6287 (esp_req & REQ_MASK) != 0 ||
6247 6288 (se_req & REQ_MASK) != 0) {
6248 6289 connp->conn_in_enforce_policy = B_TRUE;
6249 6290 connp->conn_out_enforce_policy = B_TRUE;
6250 6291 }
6251 6292
6252 6293 return (error);
6253 6294 #undef REQ_MASK
6254 6295
6255 6296 /*
6256 6297 * Common memory-allocation-failure exit path.
6257 6298 */
6258 6299 enomem:
6259 6300 if (actp != NULL)
6260 6301 ipsec_actvec_free(actp, nact);
6261 6302 if (is_pol_inserted)
6262 6303 ipsec_polhead_flush(ph, ns);
6263 6304 return (ENOMEM);
6264 6305 }
6265 6306
6266 6307 /*
6267 6308 * Set socket options for joining and leaving multicast groups.
6268 6309 * Common to IPv4 and IPv6; inet6 indicates the type of socket.
6269 6310 * The caller has already check that the option name is consistent with
6270 6311 * the address family of the socket.
6271 6312 */
6272 6313 int
6273 6314 ip_opt_set_multicast_group(conn_t *connp, t_scalar_t name,
6274 6315 uchar_t *invalp, boolean_t inet6, boolean_t checkonly)
6275 6316 {
6276 6317 int *i1 = (int *)invalp;
6277 6318 int error = 0;
6278 6319 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
6279 6320 struct ip_mreq *v4_mreqp;
6280 6321 struct ipv6_mreq *v6_mreqp;
6281 6322 struct group_req *greqp;
6282 6323 ire_t *ire;
6283 6324 boolean_t done = B_FALSE;
6284 6325 ipaddr_t ifaddr;
6285 6326 in6_addr_t v6group;
6286 6327 uint_t ifindex;
6287 6328 boolean_t mcast_opt = B_TRUE;
6288 6329 mcast_record_t fmode;
6289 6330 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *,
6290 6331 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *);
6291 6332
6292 6333 switch (name) {
6293 6334 case IP_ADD_MEMBERSHIP:
6294 6335 case IPV6_JOIN_GROUP:
6295 6336 mcast_opt = B_FALSE;
6296 6337 /* FALLTHRU */
6297 6338 case MCAST_JOIN_GROUP:
6298 6339 fmode = MODE_IS_EXCLUDE;
6299 6340 optfn = ip_opt_add_group;
6300 6341 break;
6301 6342
6302 6343 case IP_DROP_MEMBERSHIP:
6303 6344 case IPV6_LEAVE_GROUP:
6304 6345 mcast_opt = B_FALSE;
6305 6346 /* FALLTHRU */
6306 6347 case MCAST_LEAVE_GROUP:
6307 6348 fmode = MODE_IS_INCLUDE;
6308 6349 optfn = ip_opt_delete_group;
6309 6350 break;
6310 6351 default:
6311 6352 ASSERT(0);
6312 6353 }
6313 6354
6314 6355 if (mcast_opt) {
6315 6356 struct sockaddr_in *sin;
6316 6357 struct sockaddr_in6 *sin6;
6317 6358
6318 6359 greqp = (struct group_req *)i1;
6319 6360 if (greqp->gr_group.ss_family == AF_INET) {
6320 6361 sin = (struct sockaddr_in *)&(greqp->gr_group);
6321 6362 IN6_INADDR_TO_V4MAPPED(&sin->sin_addr, &v6group);
6322 6363 } else {
6323 6364 if (!inet6)
6324 6365 return (EINVAL); /* Not on INET socket */
6325 6366
6326 6367 sin6 = (struct sockaddr_in6 *)&(greqp->gr_group);
6327 6368 v6group = sin6->sin6_addr;
6328 6369 }
6329 6370 ifaddr = INADDR_ANY;
6330 6371 ifindex = greqp->gr_interface;
6331 6372 } else if (inet6) {
6332 6373 v6_mreqp = (struct ipv6_mreq *)i1;
6333 6374 v6group = v6_mreqp->ipv6mr_multiaddr;
6334 6375 ifaddr = INADDR_ANY;
6335 6376 ifindex = v6_mreqp->ipv6mr_interface;
6336 6377 } else {
6337 6378 v4_mreqp = (struct ip_mreq *)i1;
6338 6379 IN6_INADDR_TO_V4MAPPED(&v4_mreqp->imr_multiaddr, &v6group);
6339 6380 ifaddr = (ipaddr_t)v4_mreqp->imr_interface.s_addr;
6340 6381 ifindex = 0;
6341 6382 }
6342 6383
6343 6384 /*
6344 6385 * In the multirouting case, we need to replicate
6345 6386 * the request on all interfaces that will take part
6346 6387 * in replication. We do so because multirouting is
6347 6388 * reflective, thus we will probably receive multi-
6348 6389 * casts on those interfaces.
6349 6390 * The ip_multirt_apply_membership() succeeds if
6350 6391 * the operation succeeds on at least one interface.
6351 6392 */
6352 6393 if (IN6_IS_ADDR_V4MAPPED(&v6group)) {
6353 6394 ipaddr_t group;
6354 6395
6355 6396 IN6_V4MAPPED_TO_IPADDR(&v6group, group);
6356 6397
6357 6398 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0,
6358 6399 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6359 6400 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6360 6401 } else {
6361 6402 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0,
6362 6403 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6363 6404 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6364 6405 }
6365 6406 if (ire != NULL) {
6366 6407 if (ire->ire_flags & RTF_MULTIRT) {
6367 6408 error = ip_multirt_apply_membership(optfn, ire, connp,
6368 6409 checkonly, &v6group, fmode, &ipv6_all_zeros);
6369 6410 done = B_TRUE;
6370 6411 }
6371 6412 ire_refrele(ire);
6372 6413 }
6373 6414
6374 6415 if (!done) {
6375 6416 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex,
6376 6417 fmode, &ipv6_all_zeros);
6377 6418 }
6378 6419 return (error);
6379 6420 }
6380 6421
6381 6422 /*
6382 6423 * Set socket options for joining and leaving multicast groups
6383 6424 * for specific sources.
6384 6425 * Common to IPv4 and IPv6; inet6 indicates the type of socket.
6385 6426 * The caller has already check that the option name is consistent with
6386 6427 * the address family of the socket.
6387 6428 */
6388 6429 int
6389 6430 ip_opt_set_multicast_sources(conn_t *connp, t_scalar_t name,
6390 6431 uchar_t *invalp, boolean_t inet6, boolean_t checkonly)
6391 6432 {
6392 6433 int *i1 = (int *)invalp;
6393 6434 int error = 0;
6394 6435 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
6395 6436 struct ip_mreq_source *imreqp;
6396 6437 struct group_source_req *gsreqp;
6397 6438 in6_addr_t v6group, v6src;
6398 6439 uint32_t ifindex;
6399 6440 ipaddr_t ifaddr;
6400 6441 boolean_t mcast_opt = B_TRUE;
6401 6442 mcast_record_t fmode;
6402 6443 ire_t *ire;
6403 6444 boolean_t done = B_FALSE;
6404 6445 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *,
6405 6446 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *);
6406 6447
6407 6448 switch (name) {
6408 6449 case IP_BLOCK_SOURCE:
6409 6450 mcast_opt = B_FALSE;
6410 6451 /* FALLTHRU */
6411 6452 case MCAST_BLOCK_SOURCE:
6412 6453 fmode = MODE_IS_EXCLUDE;
6413 6454 optfn = ip_opt_add_group;
6414 6455 break;
6415 6456
6416 6457 case IP_UNBLOCK_SOURCE:
6417 6458 mcast_opt = B_FALSE;
6418 6459 /* FALLTHRU */
6419 6460 case MCAST_UNBLOCK_SOURCE:
6420 6461 fmode = MODE_IS_EXCLUDE;
6421 6462 optfn = ip_opt_delete_group;
6422 6463 break;
6423 6464
6424 6465 case IP_ADD_SOURCE_MEMBERSHIP:
6425 6466 mcast_opt = B_FALSE;
6426 6467 /* FALLTHRU */
6427 6468 case MCAST_JOIN_SOURCE_GROUP:
6428 6469 fmode = MODE_IS_INCLUDE;
6429 6470 optfn = ip_opt_add_group;
6430 6471 break;
6431 6472
6432 6473 case IP_DROP_SOURCE_MEMBERSHIP:
6433 6474 mcast_opt = B_FALSE;
6434 6475 /* FALLTHRU */
6435 6476 case MCAST_LEAVE_SOURCE_GROUP:
6436 6477 fmode = MODE_IS_INCLUDE;
6437 6478 optfn = ip_opt_delete_group;
6438 6479 break;
6439 6480 default:
6440 6481 ASSERT(0);
6441 6482 }
6442 6483
6443 6484 if (mcast_opt) {
6444 6485 gsreqp = (struct group_source_req *)i1;
6445 6486 ifindex = gsreqp->gsr_interface;
6446 6487 if (gsreqp->gsr_group.ss_family == AF_INET) {
6447 6488 struct sockaddr_in *s;
6448 6489 s = (struct sockaddr_in *)&gsreqp->gsr_group;
6449 6490 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6group);
6450 6491 s = (struct sockaddr_in *)&gsreqp->gsr_source;
6451 6492 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6src);
6452 6493 } else {
6453 6494 struct sockaddr_in6 *s6;
6454 6495
6455 6496 if (!inet6)
6456 6497 return (EINVAL); /* Not on INET socket */
6457 6498
6458 6499 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_group;
6459 6500 v6group = s6->sin6_addr;
6460 6501 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_source;
6461 6502 v6src = s6->sin6_addr;
6462 6503 }
6463 6504 ifaddr = INADDR_ANY;
6464 6505 } else {
6465 6506 imreqp = (struct ip_mreq_source *)i1;
6466 6507 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_multiaddr, &v6group);
6467 6508 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_sourceaddr, &v6src);
6468 6509 ifaddr = (ipaddr_t)imreqp->imr_interface.s_addr;
6469 6510 ifindex = 0;
6470 6511 }
6471 6512
6472 6513 /*
6473 6514 * Handle src being mapped INADDR_ANY by changing it to unspecified.
6474 6515 */
6475 6516 if (IN6_IS_ADDR_V4MAPPED_ANY(&v6src))
6476 6517 v6src = ipv6_all_zeros;
6477 6518
6478 6519 /*
6479 6520 * In the multirouting case, we need to replicate
6480 6521 * the request as noted in the mcast cases above.
6481 6522 */
6482 6523 if (IN6_IS_ADDR_V4MAPPED(&v6group)) {
6483 6524 ipaddr_t group;
6484 6525
6485 6526 IN6_V4MAPPED_TO_IPADDR(&v6group, group);
6486 6527
6487 6528 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0,
6488 6529 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6489 6530 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6490 6531 } else {
6491 6532 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0,
6492 6533 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6493 6534 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6494 6535 }
6495 6536 if (ire != NULL) {
6496 6537 if (ire->ire_flags & RTF_MULTIRT) {
6497 6538 error = ip_multirt_apply_membership(optfn, ire, connp,
6498 6539 checkonly, &v6group, fmode, &v6src);
6499 6540 done = B_TRUE;
6500 6541 }
6501 6542 ire_refrele(ire);
6502 6543 }
6503 6544 if (!done) {
6504 6545 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex,
6505 6546 fmode, &v6src);
6506 6547 }
6507 6548 return (error);
6508 6549 }
6509 6550
6510 6551 /*
6511 6552 * Given a destination address and a pointer to where to put the information
6512 6553 * this routine fills in the mtuinfo.
6513 6554 * The socket must be connected.
6514 6555 * For sctp conn_faddr is the primary address.
6515 6556 */
6516 6557 int
6517 6558 ip_fill_mtuinfo(conn_t *connp, ip_xmit_attr_t *ixa, struct ip6_mtuinfo *mtuinfo)
6518 6559 {
6519 6560 uint32_t pmtu = IP_MAXPACKET;
6520 6561 uint_t scopeid;
6521 6562
6522 6563 if (IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6))
6523 6564 return (-1);
6524 6565
6525 6566 /* In case we never sent or called ip_set_destination_v4/v6 */
6526 6567 if (ixa->ixa_ire != NULL)
6527 6568 pmtu = ip_get_pmtu(ixa);
6528 6569
6529 6570 if (ixa->ixa_flags & IXAF_SCOPEID_SET)
6530 6571 scopeid = ixa->ixa_scopeid;
6531 6572 else
6532 6573 scopeid = 0;
6533 6574
6534 6575 bzero(mtuinfo, sizeof (*mtuinfo));
6535 6576 mtuinfo->ip6m_addr.sin6_family = AF_INET6;
6536 6577 mtuinfo->ip6m_addr.sin6_port = connp->conn_fport;
6537 6578 mtuinfo->ip6m_addr.sin6_addr = connp->conn_faddr_v6;
6538 6579 mtuinfo->ip6m_addr.sin6_scope_id = scopeid;
6539 6580 mtuinfo->ip6m_mtu = pmtu;
6540 6581
6541 6582 return (sizeof (struct ip6_mtuinfo));
6542 6583 }
6543 6584
6544 6585 /*
6545 6586 * When the src multihoming is changed from weak to [strong, preferred]
6546 6587 * ip_ire_rebind_walker is called to walk the list of all ire_t entries
6547 6588 * and identify routes that were created by user-applications in the
6548 6589 * unbound state (i.e., without RTA_IFP), and for which an ire_ill is not
6549 6590 * currently defined. These routes are then 'rebound', i.e., their ire_ill
6550 6591 * is selected by finding an interface route for the gateway.
6551 6592 */
6552 6593 /* ARGSUSED */
6553 6594 void
6554 6595 ip_ire_rebind_walker(ire_t *ire, void *notused)
6555 6596 {
6556 6597 if (!ire->ire_unbound || ire->ire_ill != NULL)
6557 6598 return;
6558 6599 ire_rebind(ire);
6559 6600 ire_delete(ire);
6560 6601 }
6561 6602
6562 6603 /*
6563 6604 * When the src multihoming is changed from [strong, preferred] to weak,
6564 6605 * ip_ire_unbind_walker is called to walk the list of all ire_t entries, and
6565 6606 * set any entries that were created by user-applications in the unbound state
6566 6607 * (i.e., without RTA_IFP) back to having a NULL ire_ill.
6567 6608 */
6568 6609 /* ARGSUSED */
6569 6610 void
6570 6611 ip_ire_unbind_walker(ire_t *ire, void *notused)
6571 6612 {
6572 6613 ire_t *new_ire;
6573 6614
6574 6615 if (!ire->ire_unbound || ire->ire_ill == NULL)
6575 6616 return;
6576 6617 if (ire->ire_ipversion == IPV6_VERSION) {
6577 6618 new_ire = ire_create_v6(&ire->ire_addr_v6, &ire->ire_mask_v6,
6578 6619 &ire->ire_gateway_addr_v6, ire->ire_type, NULL,
6579 6620 ire->ire_zoneid, ire->ire_flags, NULL, ire->ire_ipst);
6580 6621 } else {
6581 6622 new_ire = ire_create((uchar_t *)&ire->ire_addr,
6582 6623 (uchar_t *)&ire->ire_mask,
6583 6624 (uchar_t *)&ire->ire_gateway_addr, ire->ire_type, NULL,
6584 6625 ire->ire_zoneid, ire->ire_flags, NULL, ire->ire_ipst);
6585 6626 }
6586 6627 if (new_ire == NULL)
6587 6628 return;
6588 6629 new_ire->ire_unbound = B_TRUE;
6589 6630 /*
6590 6631 * The bound ire must first be deleted so that we don't return
6591 6632 * the existing one on the attempt to add the unbound new_ire.
6592 6633 */
6593 6634 ire_delete(ire);
6594 6635 new_ire = ire_add(new_ire);
6595 6636 if (new_ire != NULL)
6596 6637 ire_refrele(new_ire);
6597 6638 }
6598 6639
6599 6640 /*
6600 6641 * When the settings of ip*_strict_src_multihoming tunables are changed,
6601 6642 * all cached routes need to be recomputed. This recomputation needs to be
6602 6643 * done when going from weaker to stronger modes so that the cached ire
6603 6644 * for the connection does not violate the current ip*_strict_src_multihoming
6604 6645 * setting. It also needs to be done when going from stronger to weaker modes,
6605 6646 * so that we fall back to matching on the longest-matching-route (as opposed
6606 6647 * to a shorter match that may have been selected in the strong mode
6607 6648 * to satisfy src_multihoming settings).
6608 6649 *
6609 6650 * The cached ixa_ire entires for all conn_t entries are marked as
6610 6651 * "verify" so that they will be recomputed for the next packet.
6611 6652 */
6612 6653 void
6613 6654 conn_ire_revalidate(conn_t *connp, void *arg)
6614 6655 {
6615 6656 boolean_t isv6 = (boolean_t)arg;
6616 6657
6617 6658 if ((isv6 && connp->conn_ipversion != IPV6_VERSION) ||
6618 6659 (!isv6 && connp->conn_ipversion != IPV4_VERSION))
6619 6660 return;
6620 6661 connp->conn_ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
6621 6662 }
6622 6663
6623 6664 /*
6624 6665 * Handles both IPv4 and IPv6 reassembly - doing the out-of-order cases,
6625 6666 * When an ipf is passed here for the first time, if
6626 6667 * we already have in-order fragments on the queue, we convert from the fast-
6627 6668 * path reassembly scheme to the hard-case scheme. From then on, additional
6628 6669 * fragments are reassembled here. We keep track of the start and end offsets
6629 6670 * of each piece, and the number of holes in the chain. When the hole count
6630 6671 * goes to zero, we are done!
6631 6672 *
6632 6673 * The ipf_count will be updated to account for any mblk(s) added (pointed to
6633 6674 * by mp) or subtracted (freeb()ed dups), upon return the caller must update
6634 6675 * ipfb_count and ill_frag_count by the difference of ipf_count before and
6635 6676 * after the call to ip_reassemble().
6636 6677 */
6637 6678 int
6638 6679 ip_reassemble(mblk_t *mp, ipf_t *ipf, uint_t start, boolean_t more, ill_t *ill,
6639 6680 size_t msg_len)
6640 6681 {
6641 6682 uint_t end;
6642 6683 mblk_t *next_mp;
6643 6684 mblk_t *mp1;
6644 6685 uint_t offset;
6645 6686 boolean_t incr_dups = B_TRUE;
6646 6687 boolean_t offset_zero_seen = B_FALSE;
6647 6688 boolean_t pkt_boundary_checked = B_FALSE;
6648 6689
6649 6690 /* If start == 0 then ipf_nf_hdr_len has to be set. */
6650 6691 ASSERT(start != 0 || ipf->ipf_nf_hdr_len != 0);
6651 6692
6652 6693 /* Add in byte count */
6653 6694 ipf->ipf_count += msg_len;
6654 6695 if (ipf->ipf_end) {
6655 6696 /*
6656 6697 * We were part way through in-order reassembly, but now there
6657 6698 * is a hole. We walk through messages already queued, and
6658 6699 * mark them for hard case reassembly. We know that up till
6659 6700 * now they were in order starting from offset zero.
6660 6701 */
6661 6702 offset = 0;
6662 6703 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) {
6663 6704 IP_REASS_SET_START(mp1, offset);
6664 6705 if (offset == 0) {
6665 6706 ASSERT(ipf->ipf_nf_hdr_len != 0);
6666 6707 offset = -ipf->ipf_nf_hdr_len;
6667 6708 }
6668 6709 offset += mp1->b_wptr - mp1->b_rptr;
6669 6710 IP_REASS_SET_END(mp1, offset);
6670 6711 }
6671 6712 /* One hole at the end. */
6672 6713 ipf->ipf_hole_cnt = 1;
6673 6714 /* Brand it as a hard case, forever. */
6674 6715 ipf->ipf_end = 0;
6675 6716 }
6676 6717 /* Walk through all the new pieces. */
6677 6718 do {
6678 6719 end = start + (mp->b_wptr - mp->b_rptr);
6679 6720 /*
6680 6721 * If start is 0, decrease 'end' only for the first mblk of
6681 6722 * the fragment. Otherwise 'end' can get wrong value in the
6682 6723 * second pass of the loop if first mblk is exactly the
6683 6724 * size of ipf_nf_hdr_len.
6684 6725 */
6685 6726 if (start == 0 && !offset_zero_seen) {
6686 6727 /* First segment */
6687 6728 ASSERT(ipf->ipf_nf_hdr_len != 0);
6688 6729 end -= ipf->ipf_nf_hdr_len;
6689 6730 offset_zero_seen = B_TRUE;
6690 6731 }
6691 6732 next_mp = mp->b_cont;
6692 6733 /*
6693 6734 * We are checking to see if there is any interesing data
6694 6735 * to process. If there isn't and the mblk isn't the
6695 6736 * one which carries the unfragmentable header then we
6696 6737 * drop it. It's possible to have just the unfragmentable
6697 6738 * header come through without any data. That needs to be
6698 6739 * saved.
6699 6740 *
6700 6741 * If the assert at the top of this function holds then the
6701 6742 * term "ipf->ipf_nf_hdr_len != 0" isn't needed. This code
6702 6743 * is infrequently traveled enough that the test is left in
6703 6744 * to protect against future code changes which break that
6704 6745 * invariant.
6705 6746 */
6706 6747 if (start == end && start != 0 && ipf->ipf_nf_hdr_len != 0) {
6707 6748 /* Empty. Blast it. */
6708 6749 IP_REASS_SET_START(mp, 0);
6709 6750 IP_REASS_SET_END(mp, 0);
6710 6751 /*
6711 6752 * If the ipf points to the mblk we are about to free,
6712 6753 * update ipf to point to the next mblk (or NULL
6713 6754 * if none).
6714 6755 */
6715 6756 if (ipf->ipf_mp->b_cont == mp)
6716 6757 ipf->ipf_mp->b_cont = next_mp;
6717 6758 freeb(mp);
6718 6759 continue;
6719 6760 }
6720 6761 mp->b_cont = NULL;
6721 6762 IP_REASS_SET_START(mp, start);
6722 6763 IP_REASS_SET_END(mp, end);
6723 6764 if (!ipf->ipf_tail_mp) {
6724 6765 ipf->ipf_tail_mp = mp;
6725 6766 ipf->ipf_mp->b_cont = mp;
6726 6767 if (start == 0 || !more) {
6727 6768 ipf->ipf_hole_cnt = 1;
6728 6769 /*
6729 6770 * if the first fragment comes in more than one
6730 6771 * mblk, this loop will be executed for each
6731 6772 * mblk. Need to adjust hole count so exiting
6732 6773 * this routine will leave hole count at 1.
6733 6774 */
6734 6775 if (next_mp)
6735 6776 ipf->ipf_hole_cnt++;
6736 6777 } else
6737 6778 ipf->ipf_hole_cnt = 2;
6738 6779 continue;
6739 6780 } else if (ipf->ipf_last_frag_seen && !more &&
6740 6781 !pkt_boundary_checked) {
6741 6782 /*
6742 6783 * We check datagram boundary only if this fragment
6743 6784 * claims to be the last fragment and we have seen a
6744 6785 * last fragment in the past too. We do this only
6745 6786 * once for a given fragment.
6746 6787 *
6747 6788 * start cannot be 0 here as fragments with start=0
6748 6789 * and MF=0 gets handled as a complete packet. These
6749 6790 * fragments should not reach here.
6750 6791 */
6751 6792
6752 6793 if (start + msgdsize(mp) !=
6753 6794 IP_REASS_END(ipf->ipf_tail_mp)) {
6754 6795 /*
6755 6796 * We have two fragments both of which claim
6756 6797 * to be the last fragment but gives conflicting
6757 6798 * information about the whole datagram size.
6758 6799 * Something fishy is going on. Drop the
6759 6800 * fragment and free up the reassembly list.
6760 6801 */
6761 6802 return (IP_REASS_FAILED);
6762 6803 }
6763 6804
6764 6805 /*
6765 6806 * We shouldn't come to this code block again for this
6766 6807 * particular fragment.
6767 6808 */
6768 6809 pkt_boundary_checked = B_TRUE;
6769 6810 }
6770 6811
6771 6812 /* New stuff at or beyond tail? */
6772 6813 offset = IP_REASS_END(ipf->ipf_tail_mp);
6773 6814 if (start >= offset) {
6774 6815 if (ipf->ipf_last_frag_seen) {
6775 6816 /* current fragment is beyond last fragment */
6776 6817 return (IP_REASS_FAILED);
6777 6818 }
6778 6819 /* Link it on end. */
6779 6820 ipf->ipf_tail_mp->b_cont = mp;
6780 6821 ipf->ipf_tail_mp = mp;
6781 6822 if (more) {
6782 6823 if (start != offset)
6783 6824 ipf->ipf_hole_cnt++;
6784 6825 } else if (start == offset && next_mp == NULL)
6785 6826 ipf->ipf_hole_cnt--;
6786 6827 continue;
6787 6828 }
6788 6829 mp1 = ipf->ipf_mp->b_cont;
6789 6830 offset = IP_REASS_START(mp1);
6790 6831 /* New stuff at the front? */
6791 6832 if (start < offset) {
6792 6833 if (start == 0) {
6793 6834 if (end >= offset) {
6794 6835 /* Nailed the hole at the begining. */
6795 6836 ipf->ipf_hole_cnt--;
6796 6837 }
6797 6838 } else if (end < offset) {
6798 6839 /*
6799 6840 * A hole, stuff, and a hole where there used
6800 6841 * to be just a hole.
6801 6842 */
6802 6843 ipf->ipf_hole_cnt++;
6803 6844 }
6804 6845 mp->b_cont = mp1;
6805 6846 /* Check for overlap. */
6806 6847 while (end > offset) {
6807 6848 if (end < IP_REASS_END(mp1)) {
6808 6849 mp->b_wptr -= end - offset;
6809 6850 IP_REASS_SET_END(mp, offset);
6810 6851 BUMP_MIB(ill->ill_ip_mib,
6811 6852 ipIfStatsReasmPartDups);
6812 6853 break;
6813 6854 }
6814 6855 /* Did we cover another hole? */
6815 6856 if ((mp1->b_cont &&
6816 6857 IP_REASS_END(mp1) !=
6817 6858 IP_REASS_START(mp1->b_cont) &&
6818 6859 end >= IP_REASS_START(mp1->b_cont)) ||
6819 6860 (!ipf->ipf_last_frag_seen && !more)) {
6820 6861 ipf->ipf_hole_cnt--;
6821 6862 }
6822 6863 /* Clip out mp1. */
6823 6864 if ((mp->b_cont = mp1->b_cont) == NULL) {
6824 6865 /*
6825 6866 * After clipping out mp1, this guy
6826 6867 * is now hanging off the end.
6827 6868 */
6828 6869 ipf->ipf_tail_mp = mp;
6829 6870 }
6830 6871 IP_REASS_SET_START(mp1, 0);
6831 6872 IP_REASS_SET_END(mp1, 0);
6832 6873 /* Subtract byte count */
6833 6874 ipf->ipf_count -= mp1->b_datap->db_lim -
6834 6875 mp1->b_datap->db_base;
6835 6876 freeb(mp1);
6836 6877 BUMP_MIB(ill->ill_ip_mib,
6837 6878 ipIfStatsReasmPartDups);
6838 6879 mp1 = mp->b_cont;
6839 6880 if (!mp1)
6840 6881 break;
6841 6882 offset = IP_REASS_START(mp1);
6842 6883 }
6843 6884 ipf->ipf_mp->b_cont = mp;
6844 6885 continue;
6845 6886 }
6846 6887 /*
6847 6888 * The new piece starts somewhere between the start of the head
6848 6889 * and before the end of the tail.
6849 6890 */
6850 6891 for (; mp1; mp1 = mp1->b_cont) {
6851 6892 offset = IP_REASS_END(mp1);
6852 6893 if (start < offset) {
6853 6894 if (end <= offset) {
6854 6895 /* Nothing new. */
6855 6896 IP_REASS_SET_START(mp, 0);
6856 6897 IP_REASS_SET_END(mp, 0);
6857 6898 /* Subtract byte count */
6858 6899 ipf->ipf_count -= mp->b_datap->db_lim -
6859 6900 mp->b_datap->db_base;
6860 6901 if (incr_dups) {
6861 6902 ipf->ipf_num_dups++;
6862 6903 incr_dups = B_FALSE;
6863 6904 }
6864 6905 freeb(mp);
6865 6906 BUMP_MIB(ill->ill_ip_mib,
6866 6907 ipIfStatsReasmDuplicates);
6867 6908 break;
6868 6909 }
6869 6910 /*
6870 6911 * Trim redundant stuff off beginning of new
6871 6912 * piece.
6872 6913 */
6873 6914 IP_REASS_SET_START(mp, offset);
6874 6915 mp->b_rptr += offset - start;
6875 6916 BUMP_MIB(ill->ill_ip_mib,
6876 6917 ipIfStatsReasmPartDups);
6877 6918 start = offset;
6878 6919 if (!mp1->b_cont) {
6879 6920 /*
6880 6921 * After trimming, this guy is now
6881 6922 * hanging off the end.
6882 6923 */
6883 6924 mp1->b_cont = mp;
6884 6925 ipf->ipf_tail_mp = mp;
6885 6926 if (!more) {
6886 6927 ipf->ipf_hole_cnt--;
6887 6928 }
6888 6929 break;
6889 6930 }
6890 6931 }
6891 6932 if (start >= IP_REASS_START(mp1->b_cont))
6892 6933 continue;
6893 6934 /* Fill a hole */
6894 6935 if (start > offset)
6895 6936 ipf->ipf_hole_cnt++;
6896 6937 mp->b_cont = mp1->b_cont;
6897 6938 mp1->b_cont = mp;
6898 6939 mp1 = mp->b_cont;
6899 6940 offset = IP_REASS_START(mp1);
6900 6941 if (end >= offset) {
6901 6942 ipf->ipf_hole_cnt--;
6902 6943 /* Check for overlap. */
6903 6944 while (end > offset) {
6904 6945 if (end < IP_REASS_END(mp1)) {
6905 6946 mp->b_wptr -= end - offset;
6906 6947 IP_REASS_SET_END(mp, offset);
6907 6948 /*
6908 6949 * TODO we might bump
6909 6950 * this up twice if there is
6910 6951 * overlap at both ends.
6911 6952 */
6912 6953 BUMP_MIB(ill->ill_ip_mib,
6913 6954 ipIfStatsReasmPartDups);
6914 6955 break;
6915 6956 }
6916 6957 /* Did we cover another hole? */
6917 6958 if ((mp1->b_cont &&
6918 6959 IP_REASS_END(mp1)
6919 6960 != IP_REASS_START(mp1->b_cont) &&
6920 6961 end >=
6921 6962 IP_REASS_START(mp1->b_cont)) ||
6922 6963 (!ipf->ipf_last_frag_seen &&
6923 6964 !more)) {
6924 6965 ipf->ipf_hole_cnt--;
6925 6966 }
6926 6967 /* Clip out mp1. */
6927 6968 if ((mp->b_cont = mp1->b_cont) ==
6928 6969 NULL) {
6929 6970 /*
6930 6971 * After clipping out mp1,
6931 6972 * this guy is now hanging
6932 6973 * off the end.
6933 6974 */
6934 6975 ipf->ipf_tail_mp = mp;
6935 6976 }
6936 6977 IP_REASS_SET_START(mp1, 0);
6937 6978 IP_REASS_SET_END(mp1, 0);
6938 6979 /* Subtract byte count */
6939 6980 ipf->ipf_count -=
6940 6981 mp1->b_datap->db_lim -
6941 6982 mp1->b_datap->db_base;
6942 6983 freeb(mp1);
6943 6984 BUMP_MIB(ill->ill_ip_mib,
6944 6985 ipIfStatsReasmPartDups);
6945 6986 mp1 = mp->b_cont;
6946 6987 if (!mp1)
6947 6988 break;
6948 6989 offset = IP_REASS_START(mp1);
6949 6990 }
6950 6991 }
6951 6992 break;
6952 6993 }
6953 6994 } while (start = end, mp = next_mp);
6954 6995
6955 6996 /* Fragment just processed could be the last one. Remember this fact */
6956 6997 if (!more)
6957 6998 ipf->ipf_last_frag_seen = B_TRUE;
6958 6999
6959 7000 /* Still got holes? */
6960 7001 if (ipf->ipf_hole_cnt)
6961 7002 return (IP_REASS_PARTIAL);
6962 7003 /* Clean up overloaded fields to avoid upstream disasters. */
6963 7004 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) {
6964 7005 IP_REASS_SET_START(mp1, 0);
6965 7006 IP_REASS_SET_END(mp1, 0);
6966 7007 }
6967 7008 return (IP_REASS_COMPLETE);
6968 7009 }
6969 7010
6970 7011 /*
6971 7012 * Fragmentation reassembly. Each ILL has a hash table for
6972 7013 * queuing packets undergoing reassembly for all IPIFs
6973 7014 * associated with the ILL. The hash is based on the packet
6974 7015 * IP ident field. The ILL frag hash table was allocated
6975 7016 * as a timer block at the time the ILL was created. Whenever
6976 7017 * there is anything on the reassembly queue, the timer will
6977 7018 * be running. Returns the reassembled packet if reassembly completes.
6978 7019 */
6979 7020 mblk_t *
6980 7021 ip_input_fragment(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
6981 7022 {
6982 7023 uint32_t frag_offset_flags;
6983 7024 mblk_t *t_mp;
6984 7025 ipaddr_t dst;
6985 7026 uint8_t proto = ipha->ipha_protocol;
6986 7027 uint32_t sum_val;
6987 7028 uint16_t sum_flags;
6988 7029 ipf_t *ipf;
6989 7030 ipf_t **ipfp;
6990 7031 ipfb_t *ipfb;
6991 7032 uint16_t ident;
6992 7033 uint32_t offset;
6993 7034 ipaddr_t src;
6994 7035 uint_t hdr_length;
6995 7036 uint32_t end;
6996 7037 mblk_t *mp1;
6997 7038 mblk_t *tail_mp;
6998 7039 size_t count;
6999 7040 size_t msg_len;
7000 7041 uint8_t ecn_info = 0;
7001 7042 uint32_t packet_size;
7002 7043 boolean_t pruned = B_FALSE;
7003 7044 ill_t *ill = ira->ira_ill;
7004 7045 ip_stack_t *ipst = ill->ill_ipst;
7005 7046
7006 7047 /*
7007 7048 * Drop the fragmented as early as possible, if
7008 7049 * we don't have resource(s) to re-assemble.
7009 7050 */
7010 7051 if (ipst->ips_ip_reass_queue_bytes == 0) {
7011 7052 freemsg(mp);
7012 7053 return (NULL);
7013 7054 }
7014 7055
7015 7056 /* Check for fragmentation offset; return if there's none */
7016 7057 if ((frag_offset_flags = ntohs(ipha->ipha_fragment_offset_and_flags) &
7017 7058 (IPH_MF | IPH_OFFSET)) == 0)
7018 7059 return (mp);
7019 7060
7020 7061 /*
7021 7062 * We utilize hardware computed checksum info only for UDP since
7022 7063 * IP fragmentation is a normal occurrence for the protocol. In
7023 7064 * addition, checksum offload support for IP fragments carrying
7024 7065 * UDP payload is commonly implemented across network adapters.
7025 7066 */
7026 7067 ASSERT(ira->ira_rill != NULL);
7027 7068 if (proto == IPPROTO_UDP && dohwcksum &&
7028 7069 ILL_HCKSUM_CAPABLE(ira->ira_rill) &&
7029 7070 (DB_CKSUMFLAGS(mp) & (HCK_FULLCKSUM | HCK_PARTIALCKSUM))) {
7030 7071 mblk_t *mp1 = mp->b_cont;
7031 7072 int32_t len;
7032 7073
7033 7074 /* Record checksum information from the packet */
7034 7075 sum_val = (uint32_t)DB_CKSUM16(mp);
7035 7076 sum_flags = DB_CKSUMFLAGS(mp);
7036 7077
7037 7078 /* IP payload offset from beginning of mblk */
7038 7079 offset = ((uchar_t *)ipha + IPH_HDR_LENGTH(ipha)) - mp->b_rptr;
7039 7080
7040 7081 if ((sum_flags & HCK_PARTIALCKSUM) &&
7041 7082 (mp1 == NULL || mp1->b_cont == NULL) &&
7042 7083 offset >= DB_CKSUMSTART(mp) &&
7043 7084 ((len = offset - DB_CKSUMSTART(mp)) & 1) == 0) {
7044 7085 uint32_t adj;
7045 7086 /*
7046 7087 * Partial checksum has been calculated by hardware
7047 7088 * and attached to the packet; in addition, any
7048 7089 * prepended extraneous data is even byte aligned.
7049 7090 * If any such data exists, we adjust the checksum;
7050 7091 * this would also handle any postpended data.
7051 7092 */
7052 7093 IP_ADJCKSUM_PARTIAL(mp->b_rptr + DB_CKSUMSTART(mp),
7053 7094 mp, mp1, len, adj);
7054 7095
7055 7096 /* One's complement subtract extraneous checksum */
7056 7097 if (adj >= sum_val)
7057 7098 sum_val = ~(adj - sum_val) & 0xFFFF;
7058 7099 else
7059 7100 sum_val -= adj;
7060 7101 }
7061 7102 } else {
7062 7103 sum_val = 0;
7063 7104 sum_flags = 0;
7064 7105 }
7065 7106
7066 7107 /* Clear hardware checksumming flag */
7067 7108 DB_CKSUMFLAGS(mp) = 0;
7068 7109
7069 7110 ident = ipha->ipha_ident;
7070 7111 offset = (frag_offset_flags << 3) & 0xFFFF;
7071 7112 src = ipha->ipha_src;
7072 7113 dst = ipha->ipha_dst;
7073 7114 hdr_length = IPH_HDR_LENGTH(ipha);
7074 7115 end = ntohs(ipha->ipha_length) - hdr_length;
7075 7116
7076 7117 /* If end == 0 then we have a packet with no data, so just free it */
7077 7118 if (end == 0) {
7078 7119 freemsg(mp);
7079 7120 return (NULL);
7080 7121 }
7081 7122
7082 7123 /* Record the ECN field info. */
7083 7124 ecn_info = (ipha->ipha_type_of_service & 0x3);
7084 7125 if (offset != 0) {
7085 7126 /*
7086 7127 * If this isn't the first piece, strip the header, and
7087 7128 * add the offset to the end value.
7088 7129 */
7089 7130 mp->b_rptr += hdr_length;
7090 7131 end += offset;
7091 7132 }
7092 7133
7093 7134 /* Handle vnic loopback of fragments */
7094 7135 if (mp->b_datap->db_ref > 2)
7095 7136 msg_len = 0;
7096 7137 else
7097 7138 msg_len = MBLKSIZE(mp);
7098 7139
7099 7140 tail_mp = mp;
7100 7141 while (tail_mp->b_cont != NULL) {
7101 7142 tail_mp = tail_mp->b_cont;
7102 7143 if (tail_mp->b_datap->db_ref <= 2)
7103 7144 msg_len += MBLKSIZE(tail_mp);
7104 7145 }
7105 7146
7106 7147 /* If the reassembly list for this ILL will get too big, prune it */
7107 7148 if ((msg_len + sizeof (*ipf) + ill->ill_frag_count) >=
7108 7149 ipst->ips_ip_reass_queue_bytes) {
7109 7150 DTRACE_PROBE3(ip_reass_queue_bytes, uint_t, msg_len,
7110 7151 uint_t, ill->ill_frag_count,
7111 7152 uint_t, ipst->ips_ip_reass_queue_bytes);
7112 7153 ill_frag_prune(ill,
7113 7154 (ipst->ips_ip_reass_queue_bytes < msg_len) ? 0 :
7114 7155 (ipst->ips_ip_reass_queue_bytes - msg_len));
7115 7156 pruned = B_TRUE;
7116 7157 }
7117 7158
7118 7159 ipfb = &ill->ill_frag_hash_tbl[ILL_FRAG_HASH(src, ident)];
7119 7160 mutex_enter(&ipfb->ipfb_lock);
7120 7161
7121 7162 ipfp = &ipfb->ipfb_ipf;
7122 7163 /* Try to find an existing fragment queue for this packet. */
7123 7164 for (;;) {
7124 7165 ipf = ipfp[0];
7125 7166 if (ipf != NULL) {
7126 7167 /*
7127 7168 * It has to match on ident and src/dst address.
7128 7169 */
7129 7170 if (ipf->ipf_ident == ident &&
7130 7171 ipf->ipf_src == src &&
7131 7172 ipf->ipf_dst == dst &&
7132 7173 ipf->ipf_protocol == proto) {
7133 7174 /*
7134 7175 * If we have received too many
7135 7176 * duplicate fragments for this packet
7136 7177 * free it.
7137 7178 */
7138 7179 if (ipf->ipf_num_dups > ip_max_frag_dups) {
7139 7180 ill_frag_free_pkts(ill, ipfb, ipf, 1);
7140 7181 freemsg(mp);
7141 7182 mutex_exit(&ipfb->ipfb_lock);
7142 7183 return (NULL);
7143 7184 }
7144 7185 /* Found it. */
7145 7186 break;
7146 7187 }
7147 7188 ipfp = &ipf->ipf_hash_next;
7148 7189 continue;
7149 7190 }
7150 7191
7151 7192 /*
7152 7193 * If we pruned the list, do we want to store this new
7153 7194 * fragment?. We apply an optimization here based on the
7154 7195 * fact that most fragments will be received in order.
7155 7196 * So if the offset of this incoming fragment is zero,
7156 7197 * it is the first fragment of a new packet. We will
7157 7198 * keep it. Otherwise drop the fragment, as we have
7158 7199 * probably pruned the packet already (since the
7159 7200 * packet cannot be found).
7160 7201 */
7161 7202 if (pruned && offset != 0) {
7162 7203 mutex_exit(&ipfb->ipfb_lock);
7163 7204 freemsg(mp);
7164 7205 return (NULL);
7165 7206 }
7166 7207
7167 7208 if (ipfb->ipfb_frag_pkts >= MAX_FRAG_PKTS(ipst)) {
7168 7209 /*
7169 7210 * Too many fragmented packets in this hash
7170 7211 * bucket. Free the oldest.
7171 7212 */
7172 7213 ill_frag_free_pkts(ill, ipfb, ipfb->ipfb_ipf, 1);
7173 7214 }
7174 7215
7175 7216 /* New guy. Allocate a frag message. */
7176 7217 mp1 = allocb(sizeof (*ipf), BPRI_MED);
7177 7218 if (mp1 == NULL) {
7178 7219 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7179 7220 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7180 7221 freemsg(mp);
7181 7222 reass_done:
7182 7223 mutex_exit(&ipfb->ipfb_lock);
7183 7224 return (NULL);
7184 7225 }
7185 7226
7186 7227 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmReqds);
7187 7228 mp1->b_cont = mp;
7188 7229
7189 7230 /* Initialize the fragment header. */
7190 7231 ipf = (ipf_t *)mp1->b_rptr;
7191 7232 ipf->ipf_mp = mp1;
7192 7233 ipf->ipf_ptphn = ipfp;
7193 7234 ipfp[0] = ipf;
7194 7235 ipf->ipf_hash_next = NULL;
7195 7236 ipf->ipf_ident = ident;
7196 7237 ipf->ipf_protocol = proto;
7197 7238 ipf->ipf_src = src;
7198 7239 ipf->ipf_dst = dst;
7199 7240 ipf->ipf_nf_hdr_len = 0;
7200 7241 /* Record reassembly start time. */
7201 7242 ipf->ipf_timestamp = gethrestime_sec();
7202 7243 /* Record ipf generation and account for frag header */
7203 7244 ipf->ipf_gen = ill->ill_ipf_gen++;
7204 7245 ipf->ipf_count = MBLKSIZE(mp1);
7205 7246 ipf->ipf_last_frag_seen = B_FALSE;
7206 7247 ipf->ipf_ecn = ecn_info;
7207 7248 ipf->ipf_num_dups = 0;
7208 7249 ipfb->ipfb_frag_pkts++;
7209 7250 ipf->ipf_checksum = 0;
7210 7251 ipf->ipf_checksum_flags = 0;
7211 7252
7212 7253 /* Store checksum value in fragment header */
7213 7254 if (sum_flags != 0) {
7214 7255 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7215 7256 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7216 7257 ipf->ipf_checksum = sum_val;
7217 7258 ipf->ipf_checksum_flags = sum_flags;
7218 7259 }
7219 7260
7220 7261 /*
7221 7262 * We handle reassembly two ways. In the easy case,
7222 7263 * where all the fragments show up in order, we do
7223 7264 * minimal bookkeeping, and just clip new pieces on
7224 7265 * the end. If we ever see a hole, then we go off
7225 7266 * to ip_reassemble which has to mark the pieces and
7226 7267 * keep track of the number of holes, etc. Obviously,
7227 7268 * the point of having both mechanisms is so we can
7228 7269 * handle the easy case as efficiently as possible.
7229 7270 */
7230 7271 if (offset == 0) {
7231 7272 /* Easy case, in-order reassembly so far. */
7232 7273 ipf->ipf_count += msg_len;
7233 7274 ipf->ipf_tail_mp = tail_mp;
7234 7275 /*
7235 7276 * Keep track of next expected offset in
7236 7277 * ipf_end.
7237 7278 */
7238 7279 ipf->ipf_end = end;
7239 7280 ipf->ipf_nf_hdr_len = hdr_length;
7240 7281 } else {
7241 7282 /* Hard case, hole at the beginning. */
7242 7283 ipf->ipf_tail_mp = NULL;
7243 7284 /*
7244 7285 * ipf_end == 0 means that we have given up
7245 7286 * on easy reassembly.
7246 7287 */
7247 7288 ipf->ipf_end = 0;
7248 7289
7249 7290 /* Forget checksum offload from now on */
7250 7291 ipf->ipf_checksum_flags = 0;
7251 7292
7252 7293 /*
7253 7294 * ipf_hole_cnt is set by ip_reassemble.
7254 7295 * ipf_count is updated by ip_reassemble.
7255 7296 * No need to check for return value here
7256 7297 * as we don't expect reassembly to complete
7257 7298 * or fail for the first fragment itself.
7258 7299 */
7259 7300 (void) ip_reassemble(mp, ipf,
7260 7301 (frag_offset_flags & IPH_OFFSET) << 3,
7261 7302 (frag_offset_flags & IPH_MF), ill, msg_len);
7262 7303 }
7263 7304 /* Update per ipfb and ill byte counts */
7264 7305 ipfb->ipfb_count += ipf->ipf_count;
7265 7306 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7266 7307 atomic_add_32(&ill->ill_frag_count, ipf->ipf_count);
7267 7308 /* If the frag timer wasn't already going, start it. */
7268 7309 mutex_enter(&ill->ill_lock);
7269 7310 ill_frag_timer_start(ill);
7270 7311 mutex_exit(&ill->ill_lock);
7271 7312 goto reass_done;
7272 7313 }
7273 7314
7274 7315 /*
7275 7316 * If the packet's flag has changed (it could be coming up
7276 7317 * from an interface different than the previous, therefore
7277 7318 * possibly different checksum capability), then forget about
7278 7319 * any stored checksum states. Otherwise add the value to
7279 7320 * the existing one stored in the fragment header.
7280 7321 */
7281 7322 if (sum_flags != 0 && sum_flags == ipf->ipf_checksum_flags) {
7282 7323 sum_val += ipf->ipf_checksum;
7283 7324 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7284 7325 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7285 7326 ipf->ipf_checksum = sum_val;
7286 7327 } else if (ipf->ipf_checksum_flags != 0) {
7287 7328 /* Forget checksum offload from now on */
7288 7329 ipf->ipf_checksum_flags = 0;
7289 7330 }
7290 7331
7291 7332 /*
7292 7333 * We have a new piece of a datagram which is already being
7293 7334 * reassembled. Update the ECN info if all IP fragments
7294 7335 * are ECN capable. If there is one which is not, clear
7295 7336 * all the info. If there is at least one which has CE
7296 7337 * code point, IP needs to report that up to transport.
7297 7338 */
7298 7339 if (ecn_info != IPH_ECN_NECT && ipf->ipf_ecn != IPH_ECN_NECT) {
7299 7340 if (ecn_info == IPH_ECN_CE)
7300 7341 ipf->ipf_ecn = IPH_ECN_CE;
7301 7342 } else {
7302 7343 ipf->ipf_ecn = IPH_ECN_NECT;
7303 7344 }
7304 7345 if (offset && ipf->ipf_end == offset) {
7305 7346 /* The new fragment fits at the end */
7306 7347 ipf->ipf_tail_mp->b_cont = mp;
7307 7348 /* Update the byte count */
7308 7349 ipf->ipf_count += msg_len;
7309 7350 /* Update per ipfb and ill byte counts */
7310 7351 ipfb->ipfb_count += msg_len;
7311 7352 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7312 7353 atomic_add_32(&ill->ill_frag_count, msg_len);
7313 7354 if (frag_offset_flags & IPH_MF) {
7314 7355 /* More to come. */
7315 7356 ipf->ipf_end = end;
7316 7357 ipf->ipf_tail_mp = tail_mp;
7317 7358 goto reass_done;
7318 7359 }
7319 7360 } else {
7320 7361 /* Go do the hard cases. */
7321 7362 int ret;
7322 7363
7323 7364 if (offset == 0)
7324 7365 ipf->ipf_nf_hdr_len = hdr_length;
7325 7366
7326 7367 /* Save current byte count */
7327 7368 count = ipf->ipf_count;
7328 7369 ret = ip_reassemble(mp, ipf,
7329 7370 (frag_offset_flags & IPH_OFFSET) << 3,
7330 7371 (frag_offset_flags & IPH_MF), ill, msg_len);
7331 7372 /* Count of bytes added and subtracted (freeb()ed) */
7332 7373 count = ipf->ipf_count - count;
7333 7374 if (count) {
7334 7375 /* Update per ipfb and ill byte counts */
7335 7376 ipfb->ipfb_count += count;
7336 7377 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7337 7378 atomic_add_32(&ill->ill_frag_count, count);
7338 7379 }
7339 7380 if (ret == IP_REASS_PARTIAL) {
7340 7381 goto reass_done;
7341 7382 } else if (ret == IP_REASS_FAILED) {
7342 7383 /* Reassembly failed. Free up all resources */
7343 7384 ill_frag_free_pkts(ill, ipfb, ipf, 1);
7344 7385 for (t_mp = mp; t_mp != NULL; t_mp = t_mp->b_cont) {
7345 7386 IP_REASS_SET_START(t_mp, 0);
7346 7387 IP_REASS_SET_END(t_mp, 0);
7347 7388 }
7348 7389 freemsg(mp);
7349 7390 goto reass_done;
7350 7391 }
7351 7392 /* We will reach here iff 'ret' is IP_REASS_COMPLETE */
7352 7393 }
7353 7394 /*
7354 7395 * We have completed reassembly. Unhook the frag header from
7355 7396 * the reassembly list.
7356 7397 *
7357 7398 * Before we free the frag header, record the ECN info
7358 7399 * to report back to the transport.
7359 7400 */
7360 7401 ecn_info = ipf->ipf_ecn;
7361 7402 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmOKs);
7362 7403 ipfp = ipf->ipf_ptphn;
7363 7404
7364 7405 /* We need to supply these to caller */
7365 7406 if ((sum_flags = ipf->ipf_checksum_flags) != 0)
7366 7407 sum_val = ipf->ipf_checksum;
7367 7408 else
7368 7409 sum_val = 0;
7369 7410
7370 7411 mp1 = ipf->ipf_mp;
7371 7412 count = ipf->ipf_count;
7372 7413 ipf = ipf->ipf_hash_next;
7373 7414 if (ipf != NULL)
7374 7415 ipf->ipf_ptphn = ipfp;
7375 7416 ipfp[0] = ipf;
7376 7417 atomic_add_32(&ill->ill_frag_count, -count);
7377 7418 ASSERT(ipfb->ipfb_count >= count);
7378 7419 ipfb->ipfb_count -= count;
7379 7420 ipfb->ipfb_frag_pkts--;
7380 7421 mutex_exit(&ipfb->ipfb_lock);
7381 7422 /* Ditch the frag header. */
7382 7423 mp = mp1->b_cont;
7383 7424
7384 7425 freeb(mp1);
7385 7426
7386 7427 /* Restore original IP length in header. */
7387 7428 packet_size = (uint32_t)msgdsize(mp);
7388 7429 if (packet_size > IP_MAXPACKET) {
7389 7430 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7390 7431 ip_drop_input("Reassembled packet too large", mp, ill);
7391 7432 freemsg(mp);
7392 7433 return (NULL);
7393 7434 }
7394 7435
7395 7436 if (DB_REF(mp) > 1) {
7396 7437 mblk_t *mp2 = copymsg(mp);
7397 7438
7398 7439 if (mp2 == NULL) {
7399 7440 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7400 7441 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7401 7442 freemsg(mp);
7402 7443 return (NULL);
7403 7444 }
7404 7445 freemsg(mp);
7405 7446 mp = mp2;
7406 7447 }
7407 7448 ipha = (ipha_t *)mp->b_rptr;
7408 7449
7409 7450 ipha->ipha_length = htons((uint16_t)packet_size);
7410 7451 /* We're now complete, zip the frag state */
7411 7452 ipha->ipha_fragment_offset_and_flags = 0;
7412 7453 /* Record the ECN info. */
7413 7454 ipha->ipha_type_of_service &= 0xFC;
7414 7455 ipha->ipha_type_of_service |= ecn_info;
7415 7456
7416 7457 /* Update the receive attributes */
7417 7458 ira->ira_pktlen = packet_size;
7418 7459 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha);
7419 7460
7420 7461 /* Reassembly is successful; set checksum information in packet */
7421 7462 DB_CKSUM16(mp) = (uint16_t)sum_val;
7422 7463 DB_CKSUMFLAGS(mp) = sum_flags;
7423 7464 DB_CKSUMSTART(mp) = ira->ira_ip_hdr_length;
7424 7465
7425 7466 return (mp);
7426 7467 }
7427 7468
7428 7469 /*
7429 7470 * Pullup function that should be used for IP input in order to
7430 7471 * ensure we do not loose the L2 source address; we need the l2 source
7431 7472 * address for IP_RECVSLLA and for ndp_input.
7432 7473 *
7433 7474 * We return either NULL or b_rptr.
7434 7475 */
7435 7476 void *
7436 7477 ip_pullup(mblk_t *mp, ssize_t len, ip_recv_attr_t *ira)
7437 7478 {
7438 7479 ill_t *ill = ira->ira_ill;
7439 7480
7440 7481 if (ip_rput_pullups++ == 0) {
7441 7482 (void) mi_strlog(ill->ill_rq, 1, SL_ERROR|SL_TRACE,
7442 7483 "ip_pullup: %s forced us to "
7443 7484 " pullup pkt, hdr len %ld, hdr addr %p",
7444 7485 ill->ill_name, len, (void *)mp->b_rptr);
7445 7486 }
7446 7487 if (!(ira->ira_flags & IRAF_L2SRC_SET))
7447 7488 ip_setl2src(mp, ira, ira->ira_rill);
7448 7489 ASSERT(ira->ira_flags & IRAF_L2SRC_SET);
7449 7490 if (!pullupmsg(mp, len))
7450 7491 return (NULL);
7451 7492 else
7452 7493 return (mp->b_rptr);
7453 7494 }
7454 7495
7455 7496 /*
7456 7497 * Make sure ira_l2src has an address. If we don't have one fill with zeros.
7457 7498 * When called from the ULP ira_rill will be NULL hence the caller has to
7458 7499 * pass in the ill.
7459 7500 */
7460 7501 /* ARGSUSED */
7461 7502 void
7462 7503 ip_setl2src(mblk_t *mp, ip_recv_attr_t *ira, ill_t *ill)
7463 7504 {
7464 7505 const uchar_t *addr;
7465 7506 int alen;
7466 7507
7467 7508 if (ira->ira_flags & IRAF_L2SRC_SET)
7468 7509 return;
7469 7510
7470 7511 ASSERT(ill != NULL);
7471 7512 alen = ill->ill_phys_addr_length;
7472 7513 ASSERT(alen <= sizeof (ira->ira_l2src));
7473 7514 if (ira->ira_mhip != NULL &&
7474 7515 (addr = ira->ira_mhip->mhi_saddr) != NULL) {
7475 7516 bcopy(addr, ira->ira_l2src, alen);
7476 7517 } else if ((ira->ira_flags & IRAF_L2SRC_LOOPBACK) &&
7477 7518 (addr = ill->ill_phys_addr) != NULL) {
7478 7519 bcopy(addr, ira->ira_l2src, alen);
7479 7520 } else {
7480 7521 bzero(ira->ira_l2src, alen);
7481 7522 }
7482 7523 ira->ira_flags |= IRAF_L2SRC_SET;
7483 7524 }
7484 7525
7485 7526 /*
7486 7527 * check ip header length and align it.
7487 7528 */
7488 7529 mblk_t *
7489 7530 ip_check_and_align_header(mblk_t *mp, uint_t min_size, ip_recv_attr_t *ira)
7490 7531 {
7491 7532 ill_t *ill = ira->ira_ill;
7492 7533 ssize_t len;
7493 7534
7494 7535 len = MBLKL(mp);
7495 7536
7496 7537 if (!OK_32PTR(mp->b_rptr))
7497 7538 IP_STAT(ill->ill_ipst, ip_notaligned);
7498 7539 else
7499 7540 IP_STAT(ill->ill_ipst, ip_recv_pullup);
7500 7541
7501 7542 /* Guard against bogus device drivers */
7502 7543 if (len < 0) {
7503 7544 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7504 7545 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7505 7546 freemsg(mp);
7506 7547 return (NULL);
7507 7548 }
7508 7549
7509 7550 if (len == 0) {
7510 7551 /* GLD sometimes sends up mblk with b_rptr == b_wptr! */
7511 7552 mblk_t *mp1 = mp->b_cont;
7512 7553
7513 7554 if (!(ira->ira_flags & IRAF_L2SRC_SET))
7514 7555 ip_setl2src(mp, ira, ira->ira_rill);
7515 7556 ASSERT(ira->ira_flags & IRAF_L2SRC_SET);
7516 7557
7517 7558 freeb(mp);
7518 7559 mp = mp1;
7519 7560 if (mp == NULL)
7520 7561 return (NULL);
7521 7562
7522 7563 if (OK_32PTR(mp->b_rptr) && MBLKL(mp) >= min_size)
7523 7564 return (mp);
7524 7565 }
7525 7566 if (ip_pullup(mp, min_size, ira) == NULL) {
7526 7567 if (msgdsize(mp) < min_size) {
7527 7568 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7528 7569 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7529 7570 } else {
7530 7571 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7531 7572 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7532 7573 }
7533 7574 freemsg(mp);
7534 7575 return (NULL);
7535 7576 }
7536 7577 return (mp);
7537 7578 }
7538 7579
7539 7580 /*
7540 7581 * Common code for IPv4 and IPv6 to check and pullup multi-mblks
7541 7582 */
7542 7583 mblk_t *
7543 7584 ip_check_length(mblk_t *mp, uchar_t *rptr, ssize_t len, uint_t pkt_len,
7544 7585 uint_t min_size, ip_recv_attr_t *ira)
7545 7586 {
7546 7587 ill_t *ill = ira->ira_ill;
7547 7588
7548 7589 /*
7549 7590 * Make sure we have data length consistent
7550 7591 * with the IP header.
7551 7592 */
7552 7593 if (mp->b_cont == NULL) {
7553 7594 /* pkt_len is based on ipha_len, not the mblk length */
7554 7595 if (pkt_len < min_size) {
7555 7596 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7556 7597 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7557 7598 freemsg(mp);
7558 7599 return (NULL);
7559 7600 }
7560 7601 if (len < 0) {
7561 7602 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
7562 7603 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
7563 7604 freemsg(mp);
7564 7605 return (NULL);
7565 7606 }
7566 7607 /* Drop any pad */
7567 7608 mp->b_wptr = rptr + pkt_len;
7568 7609 } else if ((len += msgdsize(mp->b_cont)) != 0) {
7569 7610 ASSERT(pkt_len >= min_size);
7570 7611 if (pkt_len < min_size) {
7571 7612 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7572 7613 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7573 7614 freemsg(mp);
7574 7615 return (NULL);
7575 7616 }
7576 7617 if (len < 0) {
7577 7618 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
7578 7619 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
7579 7620 freemsg(mp);
7580 7621 return (NULL);
7581 7622 }
7582 7623 /* Drop any pad */
7583 7624 (void) adjmsg(mp, -len);
7584 7625 /*
7585 7626 * adjmsg may have freed an mblk from the chain, hence
7586 7627 * invalidate any hw checksum here. This will force IP to
7587 7628 * calculate the checksum in sw, but only for this packet.
7588 7629 */
7589 7630 DB_CKSUMFLAGS(mp) = 0;
7590 7631 IP_STAT(ill->ill_ipst, ip_multimblk);
7591 7632 }
7592 7633 return (mp);
7593 7634 }
7594 7635
7595 7636 /*
7596 7637 * Check that the IPv4 opt_len is consistent with the packet and pullup
7597 7638 * the options.
7598 7639 */
7599 7640 mblk_t *
7600 7641 ip_check_optlen(mblk_t *mp, ipha_t *ipha, uint_t opt_len, uint_t pkt_len,
7601 7642 ip_recv_attr_t *ira)
7602 7643 {
7603 7644 ill_t *ill = ira->ira_ill;
7604 7645 ssize_t len;
7605 7646
7606 7647 /* Assume no IPv6 packets arrive over the IPv4 queue */
7607 7648 if (IPH_HDR_VERSION(ipha) != IPV4_VERSION) {
7608 7649 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7609 7650 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInWrongIPVersion);
7610 7651 ip_drop_input("IPvN packet on IPv4 ill", mp, ill);
7611 7652 freemsg(mp);
7612 7653 return (NULL);
7613 7654 }
7614 7655
7615 7656 if (opt_len > (15 - IP_SIMPLE_HDR_LENGTH_IN_WORDS)) {
7616 7657 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7617 7658 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7618 7659 freemsg(mp);
7619 7660 return (NULL);
7620 7661 }
7621 7662 /*
7622 7663 * Recompute complete header length and make sure we
7623 7664 * have access to all of it.
7624 7665 */
7625 7666 len = ((size_t)opt_len + IP_SIMPLE_HDR_LENGTH_IN_WORDS) << 2;
7626 7667 if (len > (mp->b_wptr - mp->b_rptr)) {
7627 7668 if (len > pkt_len) {
7628 7669 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7629 7670 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7630 7671 freemsg(mp);
7631 7672 return (NULL);
7632 7673 }
7633 7674 if (ip_pullup(mp, len, ira) == NULL) {
7634 7675 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7635 7676 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7636 7677 freemsg(mp);
7637 7678 return (NULL);
7638 7679 }
7639 7680 }
7640 7681 return (mp);
7641 7682 }
7642 7683
7643 7684 /*
7644 7685 * Returns a new ire, or the same ire, or NULL.
7645 7686 * If a different IRE is returned, then it is held; the caller
7646 7687 * needs to release it.
7647 7688 * In no case is there any hold/release on the ire argument.
7648 7689 */
7649 7690 ire_t *
7650 7691 ip_check_multihome(void *addr, ire_t *ire, ill_t *ill)
7651 7692 {
7652 7693 ire_t *new_ire;
7653 7694 ill_t *ire_ill;
7654 7695 uint_t ifindex;
7655 7696 ip_stack_t *ipst = ill->ill_ipst;
7656 7697 boolean_t strict_check = B_FALSE;
7657 7698
7658 7699 /*
7659 7700 * IPMP common case: if IRE and ILL are in the same group, there's no
7660 7701 * issue (e.g. packet received on an underlying interface matched an
7661 7702 * IRE_LOCAL on its associated group interface).
7662 7703 */
7663 7704 ASSERT(ire->ire_ill != NULL);
7664 7705 if (IS_IN_SAME_ILLGRP(ill, ire->ire_ill))
7665 7706 return (ire);
7666 7707
7667 7708 /*
7668 7709 * Do another ire lookup here, using the ingress ill, to see if the
7669 7710 * interface is in a usesrc group.
7670 7711 * As long as the ills belong to the same group, we don't consider
7671 7712 * them to be arriving on the wrong interface. Thus, if the switch
7672 7713 * is doing inbound load spreading, we won't drop packets when the
7673 7714 * ip*_strict_dst_multihoming switch is on.
7674 7715 * We also need to check for IPIF_UNNUMBERED point2point interfaces
7675 7716 * where the local address may not be unique. In this case we were
7676 7717 * at the mercy of the initial ire lookup and the IRE_LOCAL it
7677 7718 * actually returned. The new lookup, which is more specific, should
7678 7719 * only find the IRE_LOCAL associated with the ingress ill if one
7679 7720 * exists.
7680 7721 */
7681 7722 if (ire->ire_ipversion == IPV4_VERSION) {
7682 7723 if (ipst->ips_ip_strict_dst_multihoming)
7683 7724 strict_check = B_TRUE;
7684 7725 new_ire = ire_ftable_lookup_v4(*((ipaddr_t *)addr), 0, 0,
7685 7726 IRE_LOCAL, ill, ALL_ZONES, NULL,
7686 7727 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL);
7687 7728 } else {
7688 7729 ASSERT(!IN6_IS_ADDR_MULTICAST((in6_addr_t *)addr));
7689 7730 if (ipst->ips_ipv6_strict_dst_multihoming)
7690 7731 strict_check = B_TRUE;
7691 7732 new_ire = ire_ftable_lookup_v6((in6_addr_t *)addr, NULL, NULL,
7692 7733 IRE_LOCAL, ill, ALL_ZONES, NULL,
7693 7734 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL);
7694 7735 }
7695 7736 /*
7696 7737 * If the same ire that was returned in ip_input() is found then this
7697 7738 * is an indication that usesrc groups are in use. The packet
7698 7739 * arrived on a different ill in the group than the one associated with
7699 7740 * the destination address. If a different ire was found then the same
7700 7741 * IP address must be hosted on multiple ills. This is possible with
7701 7742 * unnumbered point2point interfaces. We switch to use this new ire in
7702 7743 * order to have accurate interface statistics.
7703 7744 */
7704 7745 if (new_ire != NULL) {
7705 7746 /* Note: held in one case but not the other? Caller handles */
7706 7747 if (new_ire != ire)
7707 7748 return (new_ire);
7708 7749 /* Unchanged */
7709 7750 ire_refrele(new_ire);
7710 7751 return (ire);
7711 7752 }
7712 7753
7713 7754 /*
7714 7755 * Chase pointers once and store locally.
7715 7756 */
7716 7757 ASSERT(ire->ire_ill != NULL);
7717 7758 ire_ill = ire->ire_ill;
7718 7759 ifindex = ill->ill_usesrc_ifindex;
7719 7760
7720 7761 /*
7721 7762 * Check if it's a legal address on the 'usesrc' interface.
7722 7763 * For IPMP data addresses the IRE_LOCAL is the upper, hence we
7723 7764 * can just check phyint_ifindex.
7724 7765 */
7725 7766 if (ifindex != 0 && ifindex == ire_ill->ill_phyint->phyint_ifindex) {
7726 7767 return (ire);
7727 7768 }
7728 7769
7729 7770 /*
7730 7771 * If the ip*_strict_dst_multihoming switch is on then we can
7731 7772 * only accept this packet if the interface is marked as routing.
7732 7773 */
7733 7774 if (!(strict_check))
7734 7775 return (ire);
7735 7776
7736 7777 if ((ill->ill_flags & ire->ire_ill->ill_flags & ILLF_ROUTER) != 0) {
7737 7778 return (ire);
7738 7779 }
7739 7780 return (NULL);
7740 7781 }
7741 7782
7742 7783 /*
7743 7784 * This function is used to construct a mac_header_info_s from a
7744 7785 * DL_UNITDATA_IND message.
7745 7786 * The address fields in the mhi structure points into the message,
7746 7787 * thus the caller can't use those fields after freeing the message.
7747 7788 *
7748 7789 * We determine whether the packet received is a non-unicast packet
7749 7790 * and in doing so, determine whether or not it is broadcast vs multicast.
7750 7791 * For it to be a broadcast packet, we must have the appropriate mblk_t
7751 7792 * hanging off the ill_t. If this is either not present or doesn't match
7752 7793 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed
7753 7794 * to be multicast. Thus NICs that have no broadcast address (or no
7754 7795 * capability for one, such as point to point links) cannot return as
7755 7796 * the packet being broadcast.
7756 7797 */
7757 7798 void
7758 7799 ip_dlur_to_mhi(ill_t *ill, mblk_t *mb, struct mac_header_info_s *mhip)
7759 7800 {
7760 7801 dl_unitdata_ind_t *ind = (dl_unitdata_ind_t *)mb->b_rptr;
7761 7802 mblk_t *bmp;
7762 7803 uint_t extra_offset;
7763 7804
7764 7805 bzero(mhip, sizeof (struct mac_header_info_s));
7765 7806
7766 7807 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST;
7767 7808
7768 7809 if (ill->ill_sap_length < 0)
7769 7810 extra_offset = 0;
7770 7811 else
7771 7812 extra_offset = ill->ill_sap_length;
7772 7813
7773 7814 mhip->mhi_daddr = (uchar_t *)ind + ind->dl_dest_addr_offset +
7774 7815 extra_offset;
7775 7816 mhip->mhi_saddr = (uchar_t *)ind + ind->dl_src_addr_offset +
7776 7817 extra_offset;
7777 7818
7778 7819 if (!ind->dl_group_address)
7779 7820 return;
7780 7821
7781 7822 /* Multicast or broadcast */
7782 7823 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST;
7783 7824
7784 7825 if (ind->dl_dest_addr_offset > sizeof (*ind) &&
7785 7826 ind->dl_dest_addr_offset + ind->dl_dest_addr_length < MBLKL(mb) &&
7786 7827 (bmp = ill->ill_bcast_mp) != NULL) {
7787 7828 dl_unitdata_req_t *dlur;
7788 7829 uint8_t *bphys_addr;
7789 7830
7790 7831 dlur = (dl_unitdata_req_t *)bmp->b_rptr;
7791 7832 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset +
7792 7833 extra_offset;
7793 7834
7794 7835 if (bcmp(mhip->mhi_daddr, bphys_addr,
7795 7836 ind->dl_dest_addr_length) == 0)
7796 7837 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST;
7797 7838 }
7798 7839 }
7799 7840
7800 7841 /*
7801 7842 * This function is used to construct a mac_header_info_s from a
7802 7843 * M_DATA fastpath message from a DLPI driver.
7803 7844 * The address fields in the mhi structure points into the message,
7804 7845 * thus the caller can't use those fields after freeing the message.
7805 7846 *
7806 7847 * We determine whether the packet received is a non-unicast packet
7807 7848 * and in doing so, determine whether or not it is broadcast vs multicast.
7808 7849 * For it to be a broadcast packet, we must have the appropriate mblk_t
7809 7850 * hanging off the ill_t. If this is either not present or doesn't match
7810 7851 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed
7811 7852 * to be multicast. Thus NICs that have no broadcast address (or no
7812 7853 * capability for one, such as point to point links) cannot return as
7813 7854 * the packet being broadcast.
7814 7855 */
7815 7856 void
7816 7857 ip_mdata_to_mhi(ill_t *ill, mblk_t *mp, struct mac_header_info_s *mhip)
7817 7858 {
7818 7859 mblk_t *bmp;
7819 7860 struct ether_header *pether;
7820 7861
7821 7862 bzero(mhip, sizeof (struct mac_header_info_s));
7822 7863
7823 7864 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST;
7824 7865
7825 7866 pether = (struct ether_header *)((char *)mp->b_rptr
7826 7867 - sizeof (struct ether_header));
7827 7868
7828 7869 /*
7829 7870 * Make sure the interface is an ethernet type, since we don't
7830 7871 * know the header format for anything but Ethernet. Also make
7831 7872 * sure we are pointing correctly above db_base.
7832 7873 */
7833 7874 if (ill->ill_type != IFT_ETHER)
7834 7875 return;
7835 7876
7836 7877 retry:
7837 7878 if ((uchar_t *)pether < mp->b_datap->db_base)
7838 7879 return;
7839 7880
7840 7881 /* Is there a VLAN tag? */
7841 7882 if (ill->ill_isv6) {
7842 7883 if (pether->ether_type != htons(ETHERTYPE_IPV6)) {
7843 7884 pether = (struct ether_header *)((char *)pether - 4);
7844 7885 goto retry;
7845 7886 }
7846 7887 } else {
7847 7888 if (pether->ether_type != htons(ETHERTYPE_IP)) {
7848 7889 pether = (struct ether_header *)((char *)pether - 4);
7849 7890 goto retry;
7850 7891 }
7851 7892 }
7852 7893 mhip->mhi_daddr = (uchar_t *)&pether->ether_dhost;
7853 7894 mhip->mhi_saddr = (uchar_t *)&pether->ether_shost;
7854 7895
7855 7896 if (!(mhip->mhi_daddr[0] & 0x01))
7856 7897 return;
7857 7898
7858 7899 /* Multicast or broadcast */
7859 7900 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST;
7860 7901
7861 7902 if ((bmp = ill->ill_bcast_mp) != NULL) {
7862 7903 dl_unitdata_req_t *dlur;
7863 7904 uint8_t *bphys_addr;
7864 7905 uint_t addrlen;
7865 7906
7866 7907 dlur = (dl_unitdata_req_t *)bmp->b_rptr;
7867 7908 addrlen = dlur->dl_dest_addr_length;
7868 7909 if (ill->ill_sap_length < 0) {
7869 7910 bphys_addr = (uchar_t *)dlur +
7870 7911 dlur->dl_dest_addr_offset;
7871 7912 addrlen += ill->ill_sap_length;
7872 7913 } else {
7873 7914 bphys_addr = (uchar_t *)dlur +
7874 7915 dlur->dl_dest_addr_offset +
7875 7916 ill->ill_sap_length;
7876 7917 addrlen -= ill->ill_sap_length;
7877 7918 }
7878 7919 if (bcmp(mhip->mhi_daddr, bphys_addr, addrlen) == 0)
7879 7920 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST;
7880 7921 }
7881 7922 }
7882 7923
7883 7924 /*
7884 7925 * Handle anything but M_DATA messages
7885 7926 * We see the DL_UNITDATA_IND which are part
7886 7927 * of the data path, and also the other messages from the driver.
7887 7928 */
7888 7929 void
7889 7930 ip_rput_notdata(ill_t *ill, mblk_t *mp)
7890 7931 {
7891 7932 mblk_t *first_mp;
7892 7933 struct iocblk *iocp;
7893 7934 struct mac_header_info_s mhi;
7894 7935
7895 7936 switch (DB_TYPE(mp)) {
7896 7937 case M_PROTO:
7897 7938 case M_PCPROTO: {
7898 7939 if (((dl_unitdata_ind_t *)mp->b_rptr)->dl_primitive !=
7899 7940 DL_UNITDATA_IND) {
7900 7941 /* Go handle anything other than data elsewhere. */
7901 7942 ip_rput_dlpi(ill, mp);
7902 7943 return;
7903 7944 }
7904 7945
7905 7946 first_mp = mp;
7906 7947 mp = first_mp->b_cont;
7907 7948 first_mp->b_cont = NULL;
7908 7949
7909 7950 if (mp == NULL) {
7910 7951 freeb(first_mp);
7911 7952 return;
7912 7953 }
7913 7954 ip_dlur_to_mhi(ill, first_mp, &mhi);
7914 7955 if (ill->ill_isv6)
7915 7956 ip_input_v6(ill, NULL, mp, &mhi);
7916 7957 else
7917 7958 ip_input(ill, NULL, mp, &mhi);
7918 7959
7919 7960 /* Ditch the DLPI header. */
7920 7961 freeb(first_mp);
7921 7962 return;
7922 7963 }
7923 7964 case M_IOCACK:
7924 7965 iocp = (struct iocblk *)mp->b_rptr;
7925 7966 switch (iocp->ioc_cmd) {
7926 7967 case DL_IOC_HDR_INFO:
7927 7968 ill_fastpath_ack(ill, mp);
7928 7969 return;
7929 7970 default:
7930 7971 putnext(ill->ill_rq, mp);
7931 7972 return;
7932 7973 }
7933 7974 /* FALLTHRU */
7934 7975 case M_ERROR:
7935 7976 case M_HANGUP:
7936 7977 mutex_enter(&ill->ill_lock);
7937 7978 if (ill->ill_state_flags & ILL_CONDEMNED) {
7938 7979 mutex_exit(&ill->ill_lock);
7939 7980 freemsg(mp);
7940 7981 return;
7941 7982 }
7942 7983 ill_refhold_locked(ill);
7943 7984 mutex_exit(&ill->ill_lock);
7944 7985 qwriter_ip(ill, ill->ill_rq, mp, ip_rput_other, CUR_OP,
7945 7986 B_FALSE);
7946 7987 return;
7947 7988 case M_CTL:
7948 7989 putnext(ill->ill_rq, mp);
7949 7990 return;
7950 7991 case M_IOCNAK:
7951 7992 ip1dbg(("got iocnak "));
7952 7993 iocp = (struct iocblk *)mp->b_rptr;
7953 7994 switch (iocp->ioc_cmd) {
7954 7995 case DL_IOC_HDR_INFO:
7955 7996 ip_rput_other(NULL, ill->ill_rq, mp, NULL);
7956 7997 return;
7957 7998 default:
7958 7999 break;
7959 8000 }
7960 8001 /* FALLTHRU */
7961 8002 default:
7962 8003 putnext(ill->ill_rq, mp);
7963 8004 return;
7964 8005 }
7965 8006 }
7966 8007
7967 8008 /* Read side put procedure. Packets coming from the wire arrive here. */
7968 8009 void
7969 8010 ip_rput(queue_t *q, mblk_t *mp)
7970 8011 {
7971 8012 ill_t *ill;
7972 8013 union DL_primitives *dl;
7973 8014
7974 8015 ill = (ill_t *)q->q_ptr;
7975 8016
7976 8017 if (ill->ill_state_flags & (ILL_CONDEMNED | ILL_LL_SUBNET_PENDING)) {
7977 8018 /*
7978 8019 * If things are opening or closing, only accept high-priority
7979 8020 * DLPI messages. (On open ill->ill_ipif has not yet been
7980 8021 * created; on close, things hanging off the ill may have been
7981 8022 * freed already.)
7982 8023 */
7983 8024 dl = (union DL_primitives *)mp->b_rptr;
7984 8025 if (DB_TYPE(mp) != M_PCPROTO ||
7985 8026 dl->dl_primitive == DL_UNITDATA_IND) {
7986 8027 inet_freemsg(mp);
7987 8028 return;
7988 8029 }
7989 8030 }
7990 8031 if (DB_TYPE(mp) == M_DATA) {
7991 8032 struct mac_header_info_s mhi;
7992 8033
7993 8034 ip_mdata_to_mhi(ill, mp, &mhi);
7994 8035 ip_input(ill, NULL, mp, &mhi);
7995 8036 } else {
7996 8037 ip_rput_notdata(ill, mp);
7997 8038 }
7998 8039 }
7999 8040
8000 8041 /*
8001 8042 * Move the information to a copy.
8002 8043 */
8003 8044 mblk_t *
8004 8045 ip_fix_dbref(mblk_t *mp, ip_recv_attr_t *ira)
8005 8046 {
8006 8047 mblk_t *mp1;
8007 8048 ill_t *ill = ira->ira_ill;
8008 8049 ip_stack_t *ipst = ill->ill_ipst;
8009 8050
8010 8051 IP_STAT(ipst, ip_db_ref);
8011 8052
8012 8053 /* Make sure we have ira_l2src before we loose the original mblk */
8013 8054 if (!(ira->ira_flags & IRAF_L2SRC_SET))
8014 8055 ip_setl2src(mp, ira, ira->ira_rill);
8015 8056
8016 8057 mp1 = copymsg(mp);
8017 8058 if (mp1 == NULL) {
8018 8059 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
8019 8060 ip_drop_input("ipIfStatsInDiscards", mp, ill);
8020 8061 freemsg(mp);
8021 8062 return (NULL);
8022 8063 }
8023 8064 /* preserve the hardware checksum flags and data, if present */
8024 8065 if (DB_CKSUMFLAGS(mp) != 0) {
8025 8066 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp);
8026 8067 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp);
8027 8068 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp);
8028 8069 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp);
8029 8070 DB_CKSUM16(mp1) = DB_CKSUM16(mp);
8030 8071 }
8031 8072 freemsg(mp);
8032 8073 return (mp1);
8033 8074 }
8034 8075
8035 8076 static void
8036 8077 ip_dlpi_error(ill_t *ill, t_uscalar_t prim, t_uscalar_t dl_err,
8037 8078 t_uscalar_t err)
8038 8079 {
8039 8080 if (dl_err == DL_SYSERR) {
8040 8081 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
8041 8082 "%s: %s failed: DL_SYSERR (errno %u)\n",
8042 8083 ill->ill_name, dl_primstr(prim), err);
8043 8084 return;
8044 8085 }
8045 8086
8046 8087 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
8047 8088 "%s: %s failed: %s\n", ill->ill_name, dl_primstr(prim),
8048 8089 dl_errstr(dl_err));
8049 8090 }
8050 8091
8051 8092 /*
8052 8093 * ip_rput_dlpi is called by ip_rput to handle all DLPI messages other
8053 8094 * than DL_UNITDATA_IND messages. If we need to process this message
8054 8095 * exclusively, we call qwriter_ip, in which case we also need to call
8055 8096 * ill_refhold before that, since qwriter_ip does an ill_refrele.
8056 8097 */
8057 8098 void
8058 8099 ip_rput_dlpi(ill_t *ill, mblk_t *mp)
8059 8100 {
8060 8101 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr;
8061 8102 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa;
8062 8103 queue_t *q = ill->ill_rq;
8063 8104 t_uscalar_t prim = dloa->dl_primitive;
8064 8105 t_uscalar_t reqprim = DL_PRIM_INVAL;
8065 8106
8066 8107 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi",
8067 8108 char *, dl_primstr(prim), ill_t *, ill);
8068 8109 ip1dbg(("ip_rput_dlpi"));
8069 8110
8070 8111 /*
8071 8112 * If we received an ACK but didn't send a request for it, then it
8072 8113 * can't be part of any pending operation; discard up-front.
8073 8114 */
8074 8115 switch (prim) {
8075 8116 case DL_ERROR_ACK:
8076 8117 reqprim = dlea->dl_error_primitive;
8077 8118 ip2dbg(("ip_rput_dlpi(%s): DL_ERROR_ACK for %s (0x%x): %s "
8078 8119 "(0x%x), unix %u\n", ill->ill_name, dl_primstr(reqprim),
8079 8120 reqprim, dl_errstr(dlea->dl_errno), dlea->dl_errno,
8080 8121 dlea->dl_unix_errno));
8081 8122 break;
8082 8123 case DL_OK_ACK:
8083 8124 reqprim = dloa->dl_correct_primitive;
8084 8125 break;
8085 8126 case DL_INFO_ACK:
8086 8127 reqprim = DL_INFO_REQ;
8087 8128 break;
8088 8129 case DL_BIND_ACK:
8089 8130 reqprim = DL_BIND_REQ;
8090 8131 break;
8091 8132 case DL_PHYS_ADDR_ACK:
8092 8133 reqprim = DL_PHYS_ADDR_REQ;
8093 8134 break;
8094 8135 case DL_NOTIFY_ACK:
8095 8136 reqprim = DL_NOTIFY_REQ;
8096 8137 break;
8097 8138 case DL_CAPABILITY_ACK:
8098 8139 reqprim = DL_CAPABILITY_REQ;
8099 8140 break;
8100 8141 }
8101 8142
8102 8143 if (prim != DL_NOTIFY_IND) {
8103 8144 if (reqprim == DL_PRIM_INVAL ||
8104 8145 !ill_dlpi_pending(ill, reqprim)) {
8105 8146 /* Not a DLPI message we support or expected */
8106 8147 freemsg(mp);
8107 8148 return;
8108 8149 }
8109 8150 ip1dbg(("ip_rput: received %s for %s\n", dl_primstr(prim),
8110 8151 dl_primstr(reqprim)));
8111 8152 }
8112 8153
8113 8154 switch (reqprim) {
8114 8155 case DL_UNBIND_REQ:
8115 8156 /*
8116 8157 * NOTE: we mark the unbind as complete even if we got a
8117 8158 * DL_ERROR_ACK, since there's not much else we can do.
8118 8159 */
8119 8160 mutex_enter(&ill->ill_lock);
8120 8161 ill->ill_state_flags &= ~ILL_DL_UNBIND_IN_PROGRESS;
8121 8162 cv_signal(&ill->ill_cv);
8122 8163 mutex_exit(&ill->ill_lock);
8123 8164 break;
8124 8165
8125 8166 case DL_ENABMULTI_REQ:
8126 8167 if (prim == DL_OK_ACK) {
8127 8168 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS)
8128 8169 ill->ill_dlpi_multicast_state = IDS_OK;
8129 8170 }
8130 8171 break;
8131 8172 }
8132 8173
8133 8174 /*
8134 8175 * The message is one we're waiting for (or DL_NOTIFY_IND), but we
8135 8176 * need to become writer to continue to process it. Because an
8136 8177 * exclusive operation doesn't complete until replies to all queued
8137 8178 * DLPI messages have been received, we know we're in the middle of an
8138 8179 * exclusive operation and pass CUR_OP (except for DL_NOTIFY_IND).
8139 8180 *
8140 8181 * As required by qwriter_ip(), we refhold the ill; it will refrele.
8141 8182 * Since this is on the ill stream we unconditionally bump up the
8142 8183 * refcount without doing ILL_CAN_LOOKUP().
8143 8184 */
8144 8185 ill_refhold(ill);
8145 8186 if (prim == DL_NOTIFY_IND)
8146 8187 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, NEW_OP, B_FALSE);
8147 8188 else
8148 8189 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, CUR_OP, B_FALSE);
8149 8190 }
8150 8191
8151 8192 /*
8152 8193 * Handling of DLPI messages that require exclusive access to the ipsq.
8153 8194 *
8154 8195 * Need to do ipsq_pending_mp_get on ioctl completion, which could
8155 8196 * happen here. (along with mi_copy_done)
8156 8197 */
8157 8198 /* ARGSUSED */
8158 8199 static void
8159 8200 ip_rput_dlpi_writer(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
8160 8201 {
8161 8202 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr;
8162 8203 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa;
8163 8204 int err = 0;
8164 8205 ill_t *ill = (ill_t *)q->q_ptr;
8165 8206 ipif_t *ipif = NULL;
8166 8207 mblk_t *mp1 = NULL;
8167 8208 conn_t *connp = NULL;
8168 8209 t_uscalar_t paddrreq;
8169 8210 mblk_t *mp_hw;
8170 8211 boolean_t success;
8171 8212 boolean_t ioctl_aborted = B_FALSE;
8172 8213 boolean_t log = B_TRUE;
8173 8214
8174 8215 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer",
8175 8216 char *, dl_primstr(dloa->dl_primitive), ill_t *, ill);
8176 8217
8177 8218 ip1dbg(("ip_rput_dlpi_writer .."));
8178 8219 ASSERT(ipsq->ipsq_xop == ill->ill_phyint->phyint_ipsq->ipsq_xop);
8179 8220 ASSERT(IAM_WRITER_ILL(ill));
8180 8221
8181 8222 ipif = ipsq->ipsq_xop->ipx_pending_ipif;
8182 8223 /*
8183 8224 * The current ioctl could have been aborted by the user and a new
8184 8225 * ioctl to bring up another ill could have started. We could still
8185 8226 * get a response from the driver later.
8186 8227 */
8187 8228 if (ipif != NULL && ipif->ipif_ill != ill)
8188 8229 ioctl_aborted = B_TRUE;
8189 8230
8190 8231 switch (dloa->dl_primitive) {
8191 8232 case DL_ERROR_ACK:
8192 8233 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for %s\n",
8193 8234 dl_primstr(dlea->dl_error_primitive)));
8194 8235
8195 8236 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer error",
8196 8237 char *, dl_primstr(dlea->dl_error_primitive),
8197 8238 ill_t *, ill);
8198 8239
8199 8240 switch (dlea->dl_error_primitive) {
8200 8241 case DL_DISABMULTI_REQ:
8201 8242 ill_dlpi_done(ill, dlea->dl_error_primitive);
8202 8243 break;
8203 8244 case DL_PROMISCON_REQ:
8204 8245 case DL_PROMISCOFF_REQ:
8205 8246 case DL_UNBIND_REQ:
8206 8247 case DL_ATTACH_REQ:
8207 8248 case DL_INFO_REQ:
8208 8249 ill_dlpi_done(ill, dlea->dl_error_primitive);
8209 8250 break;
8210 8251 case DL_NOTIFY_REQ:
8211 8252 ill_dlpi_done(ill, DL_NOTIFY_REQ);
8212 8253 log = B_FALSE;
8213 8254 break;
8214 8255 case DL_PHYS_ADDR_REQ:
8215 8256 /*
8216 8257 * For IPv6 only, there are two additional
8217 8258 * phys_addr_req's sent to the driver to get the
8218 8259 * IPv6 token and lla. This allows IP to acquire
8219 8260 * the hardware address format for a given interface
8220 8261 * without having built in knowledge of the hardware
8221 8262 * address. ill_phys_addr_pend keeps track of the last
8222 8263 * DL_PAR sent so we know which response we are
8223 8264 * dealing with. ill_dlpi_done will update
8224 8265 * ill_phys_addr_pend when it sends the next req.
8225 8266 * We don't complete the IOCTL until all three DL_PARs
8226 8267 * have been attempted, so set *_len to 0 and break.
8227 8268 */
8228 8269 paddrreq = ill->ill_phys_addr_pend;
8229 8270 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ);
8230 8271 if (paddrreq == DL_IPV6_TOKEN) {
8231 8272 ill->ill_token_length = 0;
8232 8273 log = B_FALSE;
8233 8274 break;
8234 8275 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) {
8235 8276 ill->ill_nd_lla_len = 0;
8236 8277 log = B_FALSE;
8237 8278 break;
8238 8279 }
8239 8280 /*
8240 8281 * Something went wrong with the DL_PHYS_ADDR_REQ.
8241 8282 * We presumably have an IOCTL hanging out waiting
8242 8283 * for completion. Find it and complete the IOCTL
8243 8284 * with the error noted.
8244 8285 * However, ill_dl_phys was called on an ill queue
8245 8286 * (from SIOCSLIFNAME), thus conn_pending_ill is not
8246 8287 * set. But the ioctl is known to be pending on ill_wq.
8247 8288 */
8248 8289 if (!ill->ill_ifname_pending)
8249 8290 break;
8250 8291 ill->ill_ifname_pending = 0;
8251 8292 if (!ioctl_aborted)
8252 8293 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8253 8294 if (mp1 != NULL) {
8254 8295 /*
8255 8296 * This operation (SIOCSLIFNAME) must have
8256 8297 * happened on the ill. Assert there is no conn
8257 8298 */
8258 8299 ASSERT(connp == NULL);
8259 8300 q = ill->ill_wq;
8260 8301 }
8261 8302 break;
8262 8303 case DL_BIND_REQ:
8263 8304 ill_dlpi_done(ill, DL_BIND_REQ);
8264 8305 if (ill->ill_ifname_pending)
8265 8306 break;
8266 8307 mutex_enter(&ill->ill_lock);
8267 8308 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS;
8268 8309 mutex_exit(&ill->ill_lock);
8269 8310 /*
8270 8311 * Something went wrong with the bind. We presumably
8271 8312 * have an IOCTL hanging out waiting for completion.
8272 8313 * Find it, take down the interface that was coming
8273 8314 * up, and complete the IOCTL with the error noted.
8274 8315 */
8275 8316 if (!ioctl_aborted)
8276 8317 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8277 8318 if (mp1 != NULL) {
8278 8319 /*
8279 8320 * This might be a result of a DL_NOTE_REPLUMB
8280 8321 * notification. In that case, connp is NULL.
8281 8322 */
8282 8323 if (connp != NULL)
8283 8324 q = CONNP_TO_WQ(connp);
8284 8325
8285 8326 (void) ipif_down(ipif, NULL, NULL);
8286 8327 /* error is set below the switch */
8287 8328 }
8288 8329 break;
8289 8330 case DL_ENABMULTI_REQ:
8290 8331 ill_dlpi_done(ill, DL_ENABMULTI_REQ);
8291 8332
8292 8333 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS)
8293 8334 ill->ill_dlpi_multicast_state = IDS_FAILED;
8294 8335 if (ill->ill_dlpi_multicast_state == IDS_FAILED) {
8295 8336
8296 8337 printf("ip: joining multicasts failed (%d)"
8297 8338 " on %s - will use link layer "
8298 8339 "broadcasts for multicast\n",
8299 8340 dlea->dl_errno, ill->ill_name);
8300 8341
8301 8342 /*
8302 8343 * Set up for multi_bcast; We are the
8303 8344 * writer, so ok to access ill->ill_ipif
8304 8345 * without any lock.
8305 8346 */
8306 8347 mutex_enter(&ill->ill_phyint->phyint_lock);
8307 8348 ill->ill_phyint->phyint_flags |=
8308 8349 PHYI_MULTI_BCAST;
8309 8350 mutex_exit(&ill->ill_phyint->phyint_lock);
8310 8351
8311 8352 }
8312 8353 freemsg(mp); /* Don't want to pass this up */
8313 8354 return;
8314 8355 case DL_CAPABILITY_REQ:
8315 8356 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for "
8316 8357 "DL_CAPABILITY REQ\n"));
8317 8358 if (ill->ill_dlpi_capab_state == IDCS_PROBE_SENT)
8318 8359 ill->ill_dlpi_capab_state = IDCS_FAILED;
8319 8360 ill_capability_done(ill);
8320 8361 freemsg(mp);
8321 8362 return;
8322 8363 }
8323 8364 /*
8324 8365 * Note the error for IOCTL completion (mp1 is set when
8325 8366 * ready to complete ioctl). If ill_ifname_pending_err is
8326 8367 * set, an error occured during plumbing (ill_ifname_pending),
8327 8368 * so we want to report that error.
8328 8369 *
8329 8370 * NOTE: there are two addtional DL_PHYS_ADDR_REQ's
8330 8371 * (DL_IPV6_TOKEN and DL_IPV6_LINK_LAYER_ADDR) that are
8331 8372 * expected to get errack'd if the driver doesn't support
8332 8373 * these flags (e.g. ethernet). log will be set to B_FALSE
8333 8374 * if these error conditions are encountered.
8334 8375 */
8335 8376 if (mp1 != NULL) {
8336 8377 if (ill->ill_ifname_pending_err != 0) {
8337 8378 err = ill->ill_ifname_pending_err;
8338 8379 ill->ill_ifname_pending_err = 0;
8339 8380 } else {
8340 8381 err = dlea->dl_unix_errno ?
8341 8382 dlea->dl_unix_errno : ENXIO;
8342 8383 }
8343 8384 /*
8344 8385 * If we're plumbing an interface and an error hasn't already
8345 8386 * been saved, set ill_ifname_pending_err to the error passed
8346 8387 * up. Ignore the error if log is B_FALSE (see comment above).
8347 8388 */
8348 8389 } else if (log && ill->ill_ifname_pending &&
8349 8390 ill->ill_ifname_pending_err == 0) {
8350 8391 ill->ill_ifname_pending_err = dlea->dl_unix_errno ?
8351 8392 dlea->dl_unix_errno : ENXIO;
8352 8393 }
8353 8394
8354 8395 if (log)
8355 8396 ip_dlpi_error(ill, dlea->dl_error_primitive,
8356 8397 dlea->dl_errno, dlea->dl_unix_errno);
8357 8398 break;
8358 8399 case DL_CAPABILITY_ACK:
8359 8400 ill_capability_ack(ill, mp);
8360 8401 /*
8361 8402 * The message has been handed off to ill_capability_ack
8362 8403 * and must not be freed below
8363 8404 */
8364 8405 mp = NULL;
8365 8406 break;
8366 8407
8367 8408 case DL_INFO_ACK:
8368 8409 /* Call a routine to handle this one. */
8369 8410 ill_dlpi_done(ill, DL_INFO_REQ);
8370 8411 ip_ll_subnet_defaults(ill, mp);
8371 8412 ASSERT(!MUTEX_HELD(&ill->ill_phyint->phyint_ipsq->ipsq_lock));
8372 8413 return;
8373 8414 case DL_BIND_ACK:
8374 8415 /*
8375 8416 * We should have an IOCTL waiting on this unless
8376 8417 * sent by ill_dl_phys, in which case just return
8377 8418 */
8378 8419 ill_dlpi_done(ill, DL_BIND_REQ);
8379 8420
8380 8421 if (ill->ill_ifname_pending) {
8381 8422 DTRACE_PROBE2(ip__rput__dlpi__ifname__pending,
8382 8423 ill_t *, ill, mblk_t *, mp);
8383 8424 break;
8384 8425 }
8385 8426 mutex_enter(&ill->ill_lock);
8386 8427 ill->ill_dl_up = 1;
8387 8428 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS;
8388 8429 mutex_exit(&ill->ill_lock);
8389 8430
8390 8431 if (!ioctl_aborted)
8391 8432 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8392 8433 if (mp1 == NULL) {
8393 8434 DTRACE_PROBE1(ip__rput__dlpi__no__mblk, ill_t *, ill);
8394 8435 break;
8395 8436 }
8396 8437 /*
8397 8438 * mp1 was added by ill_dl_up(). if that is a result of
8398 8439 * a DL_NOTE_REPLUMB notification, connp could be NULL.
8399 8440 */
8400 8441 if (connp != NULL)
8401 8442 q = CONNP_TO_WQ(connp);
8402 8443 /*
8403 8444 * We are exclusive. So nothing can change even after
8404 8445 * we get the pending mp.
8405 8446 */
8406 8447 ip1dbg(("ip_rput_dlpi: bind_ack %s\n", ill->ill_name));
8407 8448 DTRACE_PROBE1(ip__rput__dlpi__bind__ack, ill_t *, ill);
8408 8449 ill_nic_event_dispatch(ill, 0, NE_UP, NULL, 0);
8409 8450
8410 8451 /*
8411 8452 * Now bring up the resolver; when that is complete, we'll
8412 8453 * create IREs. Note that we intentionally mirror what
8413 8454 * ipif_up() would have done, because we got here by way of
8414 8455 * ill_dl_up(), which stopped ipif_up()'s processing.
8415 8456 */
8416 8457 if (ill->ill_isv6) {
8417 8458 /*
8418 8459 * v6 interfaces.
8419 8460 * Unlike ARP which has to do another bind
8420 8461 * and attach, once we get here we are
8421 8462 * done with NDP
8422 8463 */
8423 8464 (void) ipif_resolver_up(ipif, Res_act_initial);
8424 8465 if ((err = ipif_ndp_up(ipif, B_TRUE)) == 0)
8425 8466 err = ipif_up_done_v6(ipif);
8426 8467 } else if (ill->ill_net_type == IRE_IF_RESOLVER) {
8427 8468 /*
8428 8469 * ARP and other v4 external resolvers.
8429 8470 * Leave the pending mblk intact so that
8430 8471 * the ioctl completes in ip_rput().
8431 8472 */
8432 8473 if (connp != NULL)
8433 8474 mutex_enter(&connp->conn_lock);
8434 8475 mutex_enter(&ill->ill_lock);
8435 8476 success = ipsq_pending_mp_add(connp, ipif, q, mp1, 0);
8436 8477 mutex_exit(&ill->ill_lock);
8437 8478 if (connp != NULL)
8438 8479 mutex_exit(&connp->conn_lock);
8439 8480 if (success) {
8440 8481 err = ipif_resolver_up(ipif, Res_act_initial);
8441 8482 if (err == EINPROGRESS) {
8442 8483 freemsg(mp);
8443 8484 return;
8444 8485 }
8445 8486 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8446 8487 } else {
8447 8488 /* The conn has started closing */
8448 8489 err = EINTR;
8449 8490 }
8450 8491 } else {
8451 8492 /*
8452 8493 * This one is complete. Reply to pending ioctl.
8453 8494 */
8454 8495 (void) ipif_resolver_up(ipif, Res_act_initial);
8455 8496 err = ipif_up_done(ipif);
8456 8497 }
8457 8498
8458 8499 if ((err == 0) && (ill->ill_up_ipifs)) {
8459 8500 err = ill_up_ipifs(ill, q, mp1);
8460 8501 if (err == EINPROGRESS) {
8461 8502 freemsg(mp);
8462 8503 return;
8463 8504 }
8464 8505 }
8465 8506
8466 8507 /*
8467 8508 * If we have a moved ipif to bring up, and everything has
8468 8509 * succeeded to this point, bring it up on the IPMP ill.
8469 8510 * Otherwise, leave it down -- the admin can try to bring it
8470 8511 * up by hand if need be.
8471 8512 */
8472 8513 if (ill->ill_move_ipif != NULL) {
8473 8514 if (err != 0) {
8474 8515 ill->ill_move_ipif = NULL;
8475 8516 } else {
8476 8517 ipif = ill->ill_move_ipif;
8477 8518 ill->ill_move_ipif = NULL;
8478 8519 err = ipif_up(ipif, q, mp1);
8479 8520 if (err == EINPROGRESS) {
8480 8521 freemsg(mp);
8481 8522 return;
8482 8523 }
8483 8524 }
8484 8525 }
8485 8526 break;
8486 8527
8487 8528 case DL_NOTIFY_IND: {
8488 8529 dl_notify_ind_t *notify = (dl_notify_ind_t *)mp->b_rptr;
8489 8530 uint_t orig_mtu, orig_mc_mtu;
8490 8531
8491 8532 switch (notify->dl_notification) {
8492 8533 case DL_NOTE_PHYS_ADDR:
8493 8534 err = ill_set_phys_addr(ill, mp);
8494 8535 break;
8495 8536
8496 8537 case DL_NOTE_REPLUMB:
8497 8538 /*
8498 8539 * Directly return after calling ill_replumb().
8499 8540 * Note that we should not free mp as it is reused
8500 8541 * in the ill_replumb() function.
8501 8542 */
8502 8543 err = ill_replumb(ill, mp);
8503 8544 return;
8504 8545
8505 8546 case DL_NOTE_FASTPATH_FLUSH:
8506 8547 nce_flush(ill, B_FALSE);
8507 8548 break;
8508 8549
8509 8550 case DL_NOTE_SDU_SIZE:
8510 8551 case DL_NOTE_SDU_SIZE2:
8511 8552 /*
8512 8553 * The dce and fragmentation code can cope with
8513 8554 * this changing while packets are being sent.
8514 8555 * When packets are sent ip_output will discover
8515 8556 * a change.
8516 8557 *
8517 8558 * Change the MTU size of the interface.
8518 8559 */
8519 8560 mutex_enter(&ill->ill_lock);
8520 8561 orig_mtu = ill->ill_mtu;
8521 8562 orig_mc_mtu = ill->ill_mc_mtu;
8522 8563 switch (notify->dl_notification) {
8523 8564 case DL_NOTE_SDU_SIZE:
8524 8565 ill->ill_current_frag =
8525 8566 (uint_t)notify->dl_data;
8526 8567 ill->ill_mc_mtu = (uint_t)notify->dl_data;
8527 8568 break;
8528 8569 case DL_NOTE_SDU_SIZE2:
8529 8570 ill->ill_current_frag =
8530 8571 (uint_t)notify->dl_data1;
8531 8572 ill->ill_mc_mtu = (uint_t)notify->dl_data2;
8532 8573 break;
8533 8574 }
8534 8575 if (ill->ill_current_frag > ill->ill_max_frag)
8535 8576 ill->ill_max_frag = ill->ill_current_frag;
8536 8577
8537 8578 if (!(ill->ill_flags & ILLF_FIXEDMTU)) {
8538 8579 ill->ill_mtu = ill->ill_current_frag;
8539 8580
8540 8581 /*
8541 8582 * If ill_user_mtu was set (via
8542 8583 * SIOCSLIFLNKINFO), clamp ill_mtu at it.
8543 8584 */
8544 8585 if (ill->ill_user_mtu != 0 &&
8545 8586 ill->ill_user_mtu < ill->ill_mtu)
8546 8587 ill->ill_mtu = ill->ill_user_mtu;
8547 8588
8548 8589 if (ill->ill_user_mtu != 0 &&
8549 8590 ill->ill_user_mtu < ill->ill_mc_mtu)
8550 8591 ill->ill_mc_mtu = ill->ill_user_mtu;
8551 8592
8552 8593 if (ill->ill_isv6) {
8553 8594 if (ill->ill_mtu < IPV6_MIN_MTU)
8554 8595 ill->ill_mtu = IPV6_MIN_MTU;
8555 8596 if (ill->ill_mc_mtu < IPV6_MIN_MTU)
8556 8597 ill->ill_mc_mtu = IPV6_MIN_MTU;
8557 8598 } else {
8558 8599 if (ill->ill_mtu < IP_MIN_MTU)
8559 8600 ill->ill_mtu = IP_MIN_MTU;
8560 8601 if (ill->ill_mc_mtu < IP_MIN_MTU)
8561 8602 ill->ill_mc_mtu = IP_MIN_MTU;
8562 8603 }
8563 8604 } else if (ill->ill_mc_mtu > ill->ill_mtu) {
8564 8605 ill->ill_mc_mtu = ill->ill_mtu;
8565 8606 }
8566 8607
8567 8608 mutex_exit(&ill->ill_lock);
8568 8609 /*
8569 8610 * Make sure all dce_generation checks find out
8570 8611 * that ill_mtu/ill_mc_mtu has changed.
8571 8612 */
8572 8613 if (orig_mtu != ill->ill_mtu ||
8573 8614 orig_mc_mtu != ill->ill_mc_mtu) {
8574 8615 dce_increment_all_generations(ill->ill_isv6,
8575 8616 ill->ill_ipst);
8576 8617 }
8577 8618
8578 8619 /*
8579 8620 * Refresh IPMP meta-interface MTU if necessary.
8580 8621 */
8581 8622 if (IS_UNDER_IPMP(ill))
8582 8623 ipmp_illgrp_refresh_mtu(ill->ill_grp);
8583 8624 break;
8584 8625
8585 8626 case DL_NOTE_LINK_UP:
8586 8627 case DL_NOTE_LINK_DOWN: {
8587 8628 /*
8588 8629 * We are writer. ill / phyint / ipsq assocs stable.
8589 8630 * The RUNNING flag reflects the state of the link.
8590 8631 */
8591 8632 phyint_t *phyint = ill->ill_phyint;
8592 8633 uint64_t new_phyint_flags;
8593 8634 boolean_t changed = B_FALSE;
8594 8635 boolean_t went_up;
8595 8636
8596 8637 went_up = notify->dl_notification == DL_NOTE_LINK_UP;
8597 8638 mutex_enter(&phyint->phyint_lock);
8598 8639
8599 8640 new_phyint_flags = went_up ?
8600 8641 phyint->phyint_flags | PHYI_RUNNING :
8601 8642 phyint->phyint_flags & ~PHYI_RUNNING;
8602 8643
8603 8644 if (IS_IPMP(ill)) {
8604 8645 new_phyint_flags = went_up ?
8605 8646 new_phyint_flags & ~PHYI_FAILED :
8606 8647 new_phyint_flags | PHYI_FAILED;
8607 8648 }
8608 8649
8609 8650 if (new_phyint_flags != phyint->phyint_flags) {
8610 8651 phyint->phyint_flags = new_phyint_flags;
8611 8652 changed = B_TRUE;
8612 8653 }
8613 8654 mutex_exit(&phyint->phyint_lock);
8614 8655 /*
8615 8656 * ill_restart_dad handles the DAD restart and routing
8616 8657 * socket notification logic.
8617 8658 */
8618 8659 if (changed) {
8619 8660 ill_restart_dad(phyint->phyint_illv4, went_up);
8620 8661 ill_restart_dad(phyint->phyint_illv6, went_up);
8621 8662 }
8622 8663 break;
8623 8664 }
8624 8665 case DL_NOTE_PROMISC_ON_PHYS: {
8625 8666 phyint_t *phyint = ill->ill_phyint;
8626 8667
8627 8668 mutex_enter(&phyint->phyint_lock);
8628 8669 phyint->phyint_flags |= PHYI_PROMISC;
8629 8670 mutex_exit(&phyint->phyint_lock);
8630 8671 break;
8631 8672 }
8632 8673 case DL_NOTE_PROMISC_OFF_PHYS: {
8633 8674 phyint_t *phyint = ill->ill_phyint;
8634 8675
8635 8676 mutex_enter(&phyint->phyint_lock);
8636 8677 phyint->phyint_flags &= ~PHYI_PROMISC;
8637 8678 mutex_exit(&phyint->phyint_lock);
8638 8679 break;
8639 8680 }
8640 8681 case DL_NOTE_CAPAB_RENEG:
8641 8682 /*
8642 8683 * Something changed on the driver side.
8643 8684 * It wants us to renegotiate the capabilities
8644 8685 * on this ill. One possible cause is the aggregation
8645 8686 * interface under us where a port got added or
8646 8687 * went away.
8647 8688 *
8648 8689 * If the capability negotiation is already done
8649 8690 * or is in progress, reset the capabilities and
8650 8691 * mark the ill's ill_capab_reneg to be B_TRUE,
8651 8692 * so that when the ack comes back, we can start
8652 8693 * the renegotiation process.
8653 8694 *
8654 8695 * Note that if ill_capab_reneg is already B_TRUE
8655 8696 * (ill_dlpi_capab_state is IDS_UNKNOWN in this case),
8656 8697 * the capability resetting request has been sent
8657 8698 * and the renegotiation has not been started yet;
8658 8699 * nothing needs to be done in this case.
8659 8700 */
8660 8701 ipsq_current_start(ipsq, ill->ill_ipif, 0);
8661 8702 ill_capability_reset(ill, B_TRUE);
8662 8703 ipsq_current_finish(ipsq);
8663 8704 break;
8664 8705
8665 8706 case DL_NOTE_ALLOWED_IPS:
8666 8707 ill_set_allowed_ips(ill, mp);
8667 8708 break;
8668 8709 default:
8669 8710 ip0dbg(("ip_rput_dlpi_writer: unknown notification "
8670 8711 "type 0x%x for DL_NOTIFY_IND\n",
8671 8712 notify->dl_notification));
8672 8713 break;
8673 8714 }
8674 8715
8675 8716 /*
8676 8717 * As this is an asynchronous operation, we
8677 8718 * should not call ill_dlpi_done
8678 8719 */
8679 8720 break;
8680 8721 }
8681 8722 case DL_NOTIFY_ACK: {
8682 8723 dl_notify_ack_t *noteack = (dl_notify_ack_t *)mp->b_rptr;
8683 8724
8684 8725 if (noteack->dl_notifications & DL_NOTE_LINK_UP)
8685 8726 ill->ill_note_link = 1;
8686 8727 ill_dlpi_done(ill, DL_NOTIFY_REQ);
8687 8728 break;
8688 8729 }
8689 8730 case DL_PHYS_ADDR_ACK: {
8690 8731 /*
8691 8732 * As part of plumbing the interface via SIOCSLIFNAME,
8692 8733 * ill_dl_phys() will queue a series of DL_PHYS_ADDR_REQs,
8693 8734 * whose answers we receive here. As each answer is received,
8694 8735 * we call ill_dlpi_done() to dispatch the next request as
8695 8736 * we're processing the current one. Once all answers have
8696 8737 * been received, we use ipsq_pending_mp_get() to dequeue the
8697 8738 * outstanding IOCTL and reply to it. (Because ill_dl_phys()
8698 8739 * is invoked from an ill queue, conn_oper_pending_ill is not
8699 8740 * available, but we know the ioctl is pending on ill_wq.)
8700 8741 */
8701 8742 uint_t paddrlen, paddroff;
8702 8743 uint8_t *addr;
8703 8744
8704 8745 paddrreq = ill->ill_phys_addr_pend;
8705 8746 paddrlen = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_length;
8706 8747 paddroff = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_offset;
8707 8748 addr = mp->b_rptr + paddroff;
8708 8749
8709 8750 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ);
8710 8751 if (paddrreq == DL_IPV6_TOKEN) {
8711 8752 /*
8712 8753 * bcopy to low-order bits of ill_token
8713 8754 *
8714 8755 * XXX Temporary hack - currently, all known tokens
8715 8756 * are 64 bits, so I'll cheat for the moment.
8716 8757 */
8717 8758 bcopy(addr, &ill->ill_token.s6_addr32[2], paddrlen);
8718 8759 ill->ill_token_length = paddrlen;
8719 8760 break;
8720 8761 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) {
8721 8762 ASSERT(ill->ill_nd_lla_mp == NULL);
8722 8763 ill_set_ndmp(ill, mp, paddroff, paddrlen);
8723 8764 mp = NULL;
8724 8765 break;
8725 8766 } else if (paddrreq == DL_CURR_DEST_ADDR) {
8726 8767 ASSERT(ill->ill_dest_addr_mp == NULL);
8727 8768 ill->ill_dest_addr_mp = mp;
8728 8769 ill->ill_dest_addr = addr;
8729 8770 mp = NULL;
8730 8771 if (ill->ill_isv6) {
8731 8772 ill_setdesttoken(ill);
8732 8773 ipif_setdestlinklocal(ill->ill_ipif);
8733 8774 }
8734 8775 break;
8735 8776 }
8736 8777
8737 8778 ASSERT(paddrreq == DL_CURR_PHYS_ADDR);
8738 8779 ASSERT(ill->ill_phys_addr_mp == NULL);
8739 8780 if (!ill->ill_ifname_pending)
8740 8781 break;
8741 8782 ill->ill_ifname_pending = 0;
8742 8783 if (!ioctl_aborted)
8743 8784 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8744 8785 if (mp1 != NULL) {
8745 8786 ASSERT(connp == NULL);
8746 8787 q = ill->ill_wq;
8747 8788 }
8748 8789 /*
8749 8790 * If any error acks received during the plumbing sequence,
8750 8791 * ill_ifname_pending_err will be set. Break out and send up
8751 8792 * the error to the pending ioctl.
8752 8793 */
8753 8794 if (ill->ill_ifname_pending_err != 0) {
8754 8795 err = ill->ill_ifname_pending_err;
8755 8796 ill->ill_ifname_pending_err = 0;
8756 8797 break;
8757 8798 }
8758 8799
8759 8800 ill->ill_phys_addr_mp = mp;
8760 8801 ill->ill_phys_addr = (paddrlen == 0 ? NULL : addr);
8761 8802 mp = NULL;
8762 8803
8763 8804 /*
8764 8805 * If paddrlen or ill_phys_addr_length is zero, the DLPI
8765 8806 * provider doesn't support physical addresses. We check both
8766 8807 * paddrlen and ill_phys_addr_length because sppp (PPP) does
8767 8808 * not have physical addresses, but historically adversises a
8768 8809 * physical address length of 0 in its DL_INFO_ACK, but 6 in
8769 8810 * its DL_PHYS_ADDR_ACK.
8770 8811 */
8771 8812 if (paddrlen == 0 || ill->ill_phys_addr_length == 0) {
8772 8813 ill->ill_phys_addr = NULL;
8773 8814 } else if (paddrlen != ill->ill_phys_addr_length) {
8774 8815 ip0dbg(("DL_PHYS_ADDR_ACK: got addrlen %d, expected %d",
8775 8816 paddrlen, ill->ill_phys_addr_length));
8776 8817 err = EINVAL;
8777 8818 break;
8778 8819 }
8779 8820
8780 8821 if (ill->ill_nd_lla_mp == NULL) {
8781 8822 if ((mp_hw = copyb(ill->ill_phys_addr_mp)) == NULL) {
8782 8823 err = ENOMEM;
8783 8824 break;
8784 8825 }
8785 8826 ill_set_ndmp(ill, mp_hw, paddroff, paddrlen);
8786 8827 }
8787 8828
8788 8829 if (ill->ill_isv6) {
8789 8830 ill_setdefaulttoken(ill);
8790 8831 ipif_setlinklocal(ill->ill_ipif);
8791 8832 }
8792 8833 break;
8793 8834 }
8794 8835 case DL_OK_ACK:
8795 8836 ip2dbg(("DL_OK_ACK %s (0x%x)\n",
8796 8837 dl_primstr((int)dloa->dl_correct_primitive),
8797 8838 dloa->dl_correct_primitive));
8798 8839 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer ok",
8799 8840 char *, dl_primstr(dloa->dl_correct_primitive),
8800 8841 ill_t *, ill);
8801 8842
8802 8843 switch (dloa->dl_correct_primitive) {
8803 8844 case DL_ENABMULTI_REQ:
8804 8845 case DL_DISABMULTI_REQ:
8805 8846 ill_dlpi_done(ill, dloa->dl_correct_primitive);
8806 8847 break;
8807 8848 case DL_PROMISCON_REQ:
8808 8849 case DL_PROMISCOFF_REQ:
8809 8850 case DL_UNBIND_REQ:
8810 8851 case DL_ATTACH_REQ:
8811 8852 ill_dlpi_done(ill, dloa->dl_correct_primitive);
8812 8853 break;
8813 8854 }
8814 8855 break;
8815 8856 default:
8816 8857 break;
8817 8858 }
8818 8859
8819 8860 freemsg(mp);
8820 8861 if (mp1 == NULL)
8821 8862 return;
8822 8863
8823 8864 /*
8824 8865 * The operation must complete without EINPROGRESS since
8825 8866 * ipsq_pending_mp_get() has removed the mblk (mp1). Otherwise,
8826 8867 * the operation will be stuck forever inside the IPSQ.
8827 8868 */
8828 8869 ASSERT(err != EINPROGRESS);
8829 8870
8830 8871 DTRACE_PROBE4(ipif__ioctl, char *, "ip_rput_dlpi_writer finish",
8831 8872 int, ipsq->ipsq_xop->ipx_current_ioctl, ill_t *, ill,
8832 8873 ipif_t *, NULL);
8833 8874
8834 8875 switch (ipsq->ipsq_xop->ipx_current_ioctl) {
8835 8876 case 0:
8836 8877 ipsq_current_finish(ipsq);
8837 8878 break;
8838 8879
8839 8880 case SIOCSLIFNAME:
8840 8881 case IF_UNITSEL: {
8841 8882 ill_t *ill_other = ILL_OTHER(ill);
8842 8883
8843 8884 /*
8844 8885 * If SIOCSLIFNAME or IF_UNITSEL is about to succeed, and the
8845 8886 * ill has a peer which is in an IPMP group, then place ill
8846 8887 * into the same group. One catch: although ifconfig plumbs
8847 8888 * the appropriate IPMP meta-interface prior to plumbing this
8848 8889 * ill, it is possible for multiple ifconfig applications to
8849 8890 * race (or for another application to adjust plumbing), in
8850 8891 * which case the IPMP meta-interface we need will be missing.
8851 8892 * If so, kick the phyint out of the group.
8852 8893 */
8853 8894 if (err == 0 && ill_other != NULL && IS_UNDER_IPMP(ill_other)) {
8854 8895 ipmp_grp_t *grp = ill->ill_phyint->phyint_grp;
8855 8896 ipmp_illgrp_t *illg;
8856 8897
8857 8898 illg = ill->ill_isv6 ? grp->gr_v6 : grp->gr_v4;
8858 8899 if (illg == NULL)
8859 8900 ipmp_phyint_leave_grp(ill->ill_phyint);
8860 8901 else
8861 8902 ipmp_ill_join_illgrp(ill, illg);
8862 8903 }
8863 8904
8864 8905 if (ipsq->ipsq_xop->ipx_current_ioctl == IF_UNITSEL)
8865 8906 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
8866 8907 else
8867 8908 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq);
8868 8909 break;
8869 8910 }
8870 8911 case SIOCLIFADDIF:
8871 8912 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq);
8872 8913 break;
8873 8914
8874 8915 default:
8875 8916 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
8876 8917 break;
8877 8918 }
8878 8919 }
8879 8920
8880 8921 /*
8881 8922 * ip_rput_other is called by ip_rput to handle messages modifying the global
8882 8923 * state in IP. If 'ipsq' is non-NULL, caller is writer on it.
8883 8924 */
8884 8925 /* ARGSUSED */
8885 8926 void
8886 8927 ip_rput_other(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
8887 8928 {
8888 8929 ill_t *ill = q->q_ptr;
8889 8930 struct iocblk *iocp;
8890 8931
8891 8932 ip1dbg(("ip_rput_other "));
8892 8933 if (ipsq != NULL) {
8893 8934 ASSERT(IAM_WRITER_IPSQ(ipsq));
8894 8935 ASSERT(ipsq->ipsq_xop ==
8895 8936 ill->ill_phyint->phyint_ipsq->ipsq_xop);
8896 8937 }
8897 8938
8898 8939 switch (mp->b_datap->db_type) {
8899 8940 case M_ERROR:
8900 8941 case M_HANGUP:
8901 8942 /*
8902 8943 * The device has a problem. We force the ILL down. It can
8903 8944 * be brought up again manually using SIOCSIFFLAGS (via
8904 8945 * ifconfig or equivalent).
8905 8946 */
8906 8947 ASSERT(ipsq != NULL);
8907 8948 if (mp->b_rptr < mp->b_wptr)
8908 8949 ill->ill_error = (int)(*mp->b_rptr & 0xFF);
8909 8950 if (ill->ill_error == 0)
8910 8951 ill->ill_error = ENXIO;
8911 8952 if (!ill_down_start(q, mp))
8912 8953 return;
8913 8954 ipif_all_down_tail(ipsq, q, mp, NULL);
8914 8955 break;
8915 8956 case M_IOCNAK: {
8916 8957 iocp = (struct iocblk *)mp->b_rptr;
8917 8958
8918 8959 ASSERT(iocp->ioc_cmd == DL_IOC_HDR_INFO);
8919 8960 /*
8920 8961 * If this was the first attempt, turn off the fastpath
8921 8962 * probing.
8922 8963 */
8923 8964 mutex_enter(&ill->ill_lock);
8924 8965 if (ill->ill_dlpi_fastpath_state == IDS_INPROGRESS) {
8925 8966 ill->ill_dlpi_fastpath_state = IDS_FAILED;
8926 8967 mutex_exit(&ill->ill_lock);
8927 8968 /*
8928 8969 * don't flush the nce_t entries: we use them
8929 8970 * as an index to the ncec itself.
8930 8971 */
8931 8972 ip1dbg(("ip_rput: DLPI fastpath off on interface %s\n",
8932 8973 ill->ill_name));
8933 8974 } else {
8934 8975 mutex_exit(&ill->ill_lock);
8935 8976 }
8936 8977 freemsg(mp);
8937 8978 break;
8938 8979 }
8939 8980 default:
8940 8981 ASSERT(0);
8941 8982 break;
8942 8983 }
8943 8984 }
8944 8985
8945 8986 /*
8946 8987 * Update any source route, record route or timestamp options
8947 8988 * When it fails it has consumed the message and BUMPed the MIB.
8948 8989 */
8949 8990 boolean_t
8950 8991 ip_forward_options(mblk_t *mp, ipha_t *ipha, ill_t *dst_ill,
8951 8992 ip_recv_attr_t *ira)
8952 8993 {
8953 8994 ipoptp_t opts;
8954 8995 uchar_t *opt;
8955 8996 uint8_t optval;
8956 8997 uint8_t optlen;
8957 8998 ipaddr_t dst;
8958 8999 ipaddr_t ifaddr;
8959 9000 uint32_t ts;
8960 9001 timestruc_t now;
8961 9002 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
8962 9003
8963 9004 ip2dbg(("ip_forward_options\n"));
8964 9005 dst = ipha->ipha_dst;
8965 9006 for (optval = ipoptp_first(&opts, ipha);
8966 9007 optval != IPOPT_EOL;
8967 9008 optval = ipoptp_next(&opts)) {
8968 9009 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
8969 9010 opt = opts.ipoptp_cur;
8970 9011 optlen = opts.ipoptp_len;
8971 9012 ip2dbg(("ip_forward_options: opt %d, len %d\n",
8972 9013 optval, opts.ipoptp_len));
8973 9014 switch (optval) {
8974 9015 uint32_t off;
8975 9016 case IPOPT_SSRR:
8976 9017 case IPOPT_LSRR:
8977 9018 /* Check if adminstratively disabled */
8978 9019 if (!ipst->ips_ip_forward_src_routed) {
8979 9020 BUMP_MIB(dst_ill->ill_ip_mib,
8980 9021 ipIfStatsForwProhibits);
8981 9022 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED",
8982 9023 mp, dst_ill);
8983 9024 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED,
8984 9025 ira);
8985 9026 return (B_FALSE);
8986 9027 }
8987 9028 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
8988 9029 /*
8989 9030 * Must be partial since ip_input_options
8990 9031 * checked for strict.
8991 9032 */
8992 9033 break;
8993 9034 }
8994 9035 off = opt[IPOPT_OFFSET];
8995 9036 off--;
8996 9037 redo_srr:
8997 9038 if (optlen < IP_ADDR_LEN ||
8998 9039 off > optlen - IP_ADDR_LEN) {
8999 9040 /* End of source route */
9000 9041 ip1dbg((
9001 9042 "ip_forward_options: end of SR\n"));
9002 9043 break;
9003 9044 }
9004 9045 /* Pick a reasonable address on the outbound if */
9005 9046 ASSERT(dst_ill != NULL);
9006 9047 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst,
9007 9048 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9008 9049 NULL) != 0) {
9009 9050 /* No source! Shouldn't happen */
9010 9051 ifaddr = INADDR_ANY;
9011 9052 }
9012 9053 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9013 9054 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9014 9055 ip1dbg(("ip_forward_options: next hop 0x%x\n",
9015 9056 ntohl(dst)));
9016 9057
9017 9058 /*
9018 9059 * Check if our address is present more than
9019 9060 * once as consecutive hops in source route.
9020 9061 */
9021 9062 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
9022 9063 off += IP_ADDR_LEN;
9023 9064 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9024 9065 goto redo_srr;
9025 9066 }
9026 9067 ipha->ipha_dst = dst;
9027 9068 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9028 9069 break;
9029 9070 case IPOPT_RR:
9030 9071 off = opt[IPOPT_OFFSET];
9031 9072 off--;
9032 9073 if (optlen < IP_ADDR_LEN ||
9033 9074 off > optlen - IP_ADDR_LEN) {
9034 9075 /* No more room - ignore */
9035 9076 ip1dbg((
9036 9077 "ip_forward_options: end of RR\n"));
9037 9078 break;
9038 9079 }
9039 9080 /* Pick a reasonable address on the outbound if */
9040 9081 ASSERT(dst_ill != NULL);
9041 9082 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst,
9042 9083 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9043 9084 NULL) != 0) {
9044 9085 /* No source! Shouldn't happen */
9045 9086 ifaddr = INADDR_ANY;
9046 9087 }
9047 9088 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9048 9089 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9049 9090 break;
9050 9091 case IPOPT_TS:
9051 9092 /* Insert timestamp if there is room */
9052 9093 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9053 9094 case IPOPT_TS_TSONLY:
9054 9095 off = IPOPT_TS_TIMELEN;
9055 9096 break;
9056 9097 case IPOPT_TS_PRESPEC:
9057 9098 case IPOPT_TS_PRESPEC_RFC791:
9058 9099 /* Verify that the address matched */
9059 9100 off = opt[IPOPT_OFFSET] - 1;
9060 9101 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9061 9102 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9062 9103 /* Not for us */
9063 9104 break;
9064 9105 }
9065 9106 /* FALLTHRU */
9066 9107 case IPOPT_TS_TSANDADDR:
9067 9108 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9068 9109 break;
9069 9110 default:
9070 9111 /*
9071 9112 * ip_*put_options should have already
9072 9113 * dropped this packet.
9073 9114 */
9074 9115 cmn_err(CE_PANIC, "ip_forward_options: "
9075 9116 "unknown IT - bug in ip_input_options?\n");
9076 9117 return (B_TRUE); /* Keep "lint" happy */
9077 9118 }
9078 9119 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
9079 9120 /* Increase overflow counter */
9080 9121 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
9081 9122 opt[IPOPT_POS_OV_FLG] =
9082 9123 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) |
9083 9124 (off << 4));
9084 9125 break;
9085 9126 }
9086 9127 off = opt[IPOPT_OFFSET] - 1;
9087 9128 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9088 9129 case IPOPT_TS_PRESPEC:
9089 9130 case IPOPT_TS_PRESPEC_RFC791:
9090 9131 case IPOPT_TS_TSANDADDR:
9091 9132 /* Pick a reasonable addr on the outbound if */
9092 9133 ASSERT(dst_ill != NULL);
9093 9134 if (ip_select_source_v4(dst_ill, INADDR_ANY,
9094 9135 dst, INADDR_ANY, ALL_ZONES, ipst, &ifaddr,
9095 9136 NULL, NULL) != 0) {
9096 9137 /* No source! Shouldn't happen */
9097 9138 ifaddr = INADDR_ANY;
9098 9139 }
9099 9140 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9100 9141 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9101 9142 /* FALLTHRU */
9102 9143 case IPOPT_TS_TSONLY:
9103 9144 off = opt[IPOPT_OFFSET] - 1;
9104 9145 /* Compute # of milliseconds since midnight */
9105 9146 gethrestime(&now);
9106 9147 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
9107 9148 now.tv_nsec / (NANOSEC / MILLISEC);
9108 9149 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
9109 9150 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
9110 9151 break;
9111 9152 }
9112 9153 break;
9113 9154 }
9114 9155 }
9115 9156 return (B_TRUE);
9116 9157 }
9117 9158
9118 9159 /*
9119 9160 * Call ill_frag_timeout to do garbage collection. ill_frag_timeout
9120 9161 * returns 'true' if there are still fragments left on the queue, in
9121 9162 * which case we restart the timer.
9122 9163 */
9123 9164 void
9124 9165 ill_frag_timer(void *arg)
9125 9166 {
9126 9167 ill_t *ill = (ill_t *)arg;
9127 9168 boolean_t frag_pending;
9128 9169 ip_stack_t *ipst = ill->ill_ipst;
9129 9170 time_t timeout;
9130 9171
9131 9172 mutex_enter(&ill->ill_lock);
9132 9173 ASSERT(!ill->ill_fragtimer_executing);
9133 9174 if (ill->ill_state_flags & ILL_CONDEMNED) {
9134 9175 ill->ill_frag_timer_id = 0;
9135 9176 mutex_exit(&ill->ill_lock);
9136 9177 return;
9137 9178 }
9138 9179 ill->ill_fragtimer_executing = 1;
9139 9180 mutex_exit(&ill->ill_lock);
9140 9181
9141 9182 timeout = (ill->ill_isv6 ? ipst->ips_ipv6_reassembly_timeout :
9142 9183 ipst->ips_ip_reassembly_timeout);
9143 9184
9144 9185 frag_pending = ill_frag_timeout(ill, timeout);
9145 9186
9146 9187 /*
9147 9188 * Restart the timer, if we have fragments pending or if someone
9148 9189 * wanted us to be scheduled again.
9149 9190 */
9150 9191 mutex_enter(&ill->ill_lock);
9151 9192 ill->ill_fragtimer_executing = 0;
9152 9193 ill->ill_frag_timer_id = 0;
9153 9194 if (frag_pending || ill->ill_fragtimer_needrestart)
9154 9195 ill_frag_timer_start(ill);
9155 9196 mutex_exit(&ill->ill_lock);
9156 9197 }
9157 9198
9158 9199 void
9159 9200 ill_frag_timer_start(ill_t *ill)
9160 9201 {
9161 9202 ip_stack_t *ipst = ill->ill_ipst;
9162 9203 clock_t timeo_ms;
9163 9204
9164 9205 ASSERT(MUTEX_HELD(&ill->ill_lock));
9165 9206
9166 9207 /* If the ill is closing or opening don't proceed */
9167 9208 if (ill->ill_state_flags & ILL_CONDEMNED)
9168 9209 return;
9169 9210
9170 9211 if (ill->ill_fragtimer_executing) {
9171 9212 /*
9172 9213 * ill_frag_timer is currently executing. Just record the
9173 9214 * the fact that we want the timer to be restarted.
9174 9215 * ill_frag_timer will post a timeout before it returns,
9175 9216 * ensuring it will be called again.
9176 9217 */
9177 9218 ill->ill_fragtimer_needrestart = 1;
9178 9219 return;
9179 9220 }
9180 9221
9181 9222 if (ill->ill_frag_timer_id == 0) {
9182 9223 timeo_ms = (ill->ill_isv6 ? ipst->ips_ipv6_reassembly_timeout :
9183 9224 ipst->ips_ip_reassembly_timeout) * SECONDS;
9184 9225
9185 9226 /*
9186 9227 * The timer is neither running nor is the timeout handler
9187 9228 * executing. Post a timeout so that ill_frag_timer will be
9188 9229 * called
9189 9230 */
9190 9231 ill->ill_frag_timer_id = timeout(ill_frag_timer, ill,
9191 9232 MSEC_TO_TICK(timeo_ms >> 1));
9192 9233 ill->ill_fragtimer_needrestart = 0;
9193 9234 }
9194 9235 }
9195 9236
9196 9237 /*
9197 9238 * Update any source route, record route or timestamp options.
9198 9239 * Check that we are at end of strict source route.
9199 9240 * The options have already been checked for sanity in ip_input_options().
9200 9241 */
9201 9242 boolean_t
9202 9243 ip_input_local_options(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
9203 9244 {
9204 9245 ipoptp_t opts;
9205 9246 uchar_t *opt;
9206 9247 uint8_t optval;
9207 9248 uint8_t optlen;
9208 9249 ipaddr_t dst;
9209 9250 ipaddr_t ifaddr;
9210 9251 uint32_t ts;
9211 9252 timestruc_t now;
9212 9253 ill_t *ill = ira->ira_ill;
9213 9254 ip_stack_t *ipst = ill->ill_ipst;
9214 9255
9215 9256 ip2dbg(("ip_input_local_options\n"));
9216 9257
9217 9258 for (optval = ipoptp_first(&opts, ipha);
9218 9259 optval != IPOPT_EOL;
9219 9260 optval = ipoptp_next(&opts)) {
9220 9261 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
9221 9262 opt = opts.ipoptp_cur;
9222 9263 optlen = opts.ipoptp_len;
9223 9264 ip2dbg(("ip_input_local_options: opt %d, len %d\n",
9224 9265 optval, optlen));
9225 9266 switch (optval) {
9226 9267 uint32_t off;
9227 9268 case IPOPT_SSRR:
9228 9269 case IPOPT_LSRR:
9229 9270 off = opt[IPOPT_OFFSET];
9230 9271 off--;
9231 9272 if (optlen < IP_ADDR_LEN ||
9232 9273 off > optlen - IP_ADDR_LEN) {
9233 9274 /* End of source route */
9234 9275 ip1dbg(("ip_input_local_options: end of SR\n"));
9235 9276 break;
9236 9277 }
9237 9278 /*
9238 9279 * This will only happen if two consecutive entries
9239 9280 * in the source route contains our address or if
9240 9281 * it is a packet with a loose source route which
9241 9282 * reaches us before consuming the whole source route
9242 9283 */
9243 9284 ip1dbg(("ip_input_local_options: not end of SR\n"));
9244 9285 if (optval == IPOPT_SSRR) {
9245 9286 goto bad_src_route;
9246 9287 }
9247 9288 /*
9248 9289 * Hack: instead of dropping the packet truncate the
9249 9290 * source route to what has been used by filling the
9250 9291 * rest with IPOPT_NOP.
9251 9292 */
9252 9293 opt[IPOPT_OLEN] = (uint8_t)off;
9253 9294 while (off < optlen) {
9254 9295 opt[off++] = IPOPT_NOP;
9255 9296 }
9256 9297 break;
9257 9298 case IPOPT_RR:
9258 9299 off = opt[IPOPT_OFFSET];
9259 9300 off--;
9260 9301 if (optlen < IP_ADDR_LEN ||
9261 9302 off > optlen - IP_ADDR_LEN) {
9262 9303 /* No more room - ignore */
9263 9304 ip1dbg((
9264 9305 "ip_input_local_options: end of RR\n"));
9265 9306 break;
9266 9307 }
9267 9308 /* Pick a reasonable address on the outbound if */
9268 9309 if (ip_select_source_v4(ill, INADDR_ANY, ipha->ipha_dst,
9269 9310 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9270 9311 NULL) != 0) {
9271 9312 /* No source! Shouldn't happen */
9272 9313 ifaddr = INADDR_ANY;
9273 9314 }
9274 9315 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9275 9316 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9276 9317 break;
9277 9318 case IPOPT_TS:
9278 9319 /* Insert timestamp if there is romm */
9279 9320 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9280 9321 case IPOPT_TS_TSONLY:
9281 9322 off = IPOPT_TS_TIMELEN;
9282 9323 break;
9283 9324 case IPOPT_TS_PRESPEC:
9284 9325 case IPOPT_TS_PRESPEC_RFC791:
9285 9326 /* Verify that the address matched */
9286 9327 off = opt[IPOPT_OFFSET] - 1;
9287 9328 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9288 9329 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9289 9330 /* Not for us */
9290 9331 break;
9291 9332 }
9292 9333 /* FALLTHRU */
9293 9334 case IPOPT_TS_TSANDADDR:
9294 9335 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9295 9336 break;
9296 9337 default:
9297 9338 /*
9298 9339 * ip_*put_options should have already
9299 9340 * dropped this packet.
9300 9341 */
9301 9342 cmn_err(CE_PANIC, "ip_input_local_options: "
9302 9343 "unknown IT - bug in ip_input_options?\n");
9303 9344 return (B_TRUE); /* Keep "lint" happy */
9304 9345 }
9305 9346 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
9306 9347 /* Increase overflow counter */
9307 9348 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
9308 9349 opt[IPOPT_POS_OV_FLG] =
9309 9350 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) |
9310 9351 (off << 4));
9311 9352 break;
9312 9353 }
9313 9354 off = opt[IPOPT_OFFSET] - 1;
9314 9355 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9315 9356 case IPOPT_TS_PRESPEC:
9316 9357 case IPOPT_TS_PRESPEC_RFC791:
9317 9358 case IPOPT_TS_TSANDADDR:
9318 9359 /* Pick a reasonable addr on the outbound if */
9319 9360 if (ip_select_source_v4(ill, INADDR_ANY,
9320 9361 ipha->ipha_dst, INADDR_ANY, ALL_ZONES, ipst,
9321 9362 &ifaddr, NULL, NULL) != 0) {
9322 9363 /* No source! Shouldn't happen */
9323 9364 ifaddr = INADDR_ANY;
9324 9365 }
9325 9366 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9326 9367 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9327 9368 /* FALLTHRU */
9328 9369 case IPOPT_TS_TSONLY:
9329 9370 off = opt[IPOPT_OFFSET] - 1;
9330 9371 /* Compute # of milliseconds since midnight */
9331 9372 gethrestime(&now);
9332 9373 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
9333 9374 now.tv_nsec / (NANOSEC / MILLISEC);
9334 9375 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
9335 9376 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
9336 9377 break;
9337 9378 }
9338 9379 break;
9339 9380 }
9340 9381 }
9341 9382 return (B_TRUE);
9342 9383
9343 9384 bad_src_route:
9344 9385 /* make sure we clear any indication of a hardware checksum */
9345 9386 DB_CKSUMFLAGS(mp) = 0;
9346 9387 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill);
9347 9388 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira);
9348 9389 return (B_FALSE);
9349 9390
9350 9391 }
9351 9392
9352 9393 /*
9353 9394 * Process IP options in an inbound packet. Always returns the nexthop.
9354 9395 * Normally this is the passed in nexthop, but if there is an option
9355 9396 * that effects the nexthop (such as a source route) that will be returned.
9356 9397 * Sets *errorp if there is an error, in which case an ICMP error has been sent
9357 9398 * and mp freed.
9358 9399 */
9359 9400 ipaddr_t
9360 9401 ip_input_options(ipha_t *ipha, ipaddr_t dst, mblk_t *mp,
9361 9402 ip_recv_attr_t *ira, int *errorp)
9362 9403 {
9363 9404 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
9364 9405 ipoptp_t opts;
9365 9406 uchar_t *opt;
9366 9407 uint8_t optval;
9367 9408 uint8_t optlen;
9368 9409 intptr_t code = 0;
9369 9410 ire_t *ire;
9370 9411
9371 9412 ip2dbg(("ip_input_options\n"));
9372 9413 *errorp = 0;
9373 9414 for (optval = ipoptp_first(&opts, ipha);
9374 9415 optval != IPOPT_EOL;
9375 9416 optval = ipoptp_next(&opts)) {
9376 9417 opt = opts.ipoptp_cur;
9377 9418 optlen = opts.ipoptp_len;
9378 9419 ip2dbg(("ip_input_options: opt %d, len %d\n",
9379 9420 optval, optlen));
9380 9421 /*
9381 9422 * Note: we need to verify the checksum before we
9382 9423 * modify anything thus this routine only extracts the next
9383 9424 * hop dst from any source route.
9384 9425 */
9385 9426 switch (optval) {
9386 9427 uint32_t off;
9387 9428 case IPOPT_SSRR:
9388 9429 case IPOPT_LSRR:
9389 9430 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9390 9431 if (optval == IPOPT_SSRR) {
9391 9432 ip1dbg(("ip_input_options: not next"
9392 9433 " strict source route 0x%x\n",
9393 9434 ntohl(dst)));
9394 9435 code = (char *)&ipha->ipha_dst -
9395 9436 (char *)ipha;
9396 9437 goto param_prob; /* RouterReq's */
9397 9438 }
9398 9439 ip2dbg(("ip_input_options: "
9399 9440 "not next source route 0x%x\n",
9400 9441 ntohl(dst)));
9401 9442 break;
9402 9443 }
9403 9444
9404 9445 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9405 9446 ip1dbg((
9406 9447 "ip_input_options: bad option offset\n"));
9407 9448 code = (char *)&opt[IPOPT_OLEN] -
9408 9449 (char *)ipha;
9409 9450 goto param_prob;
9410 9451 }
9411 9452 off = opt[IPOPT_OFFSET];
9412 9453 off--;
9413 9454 redo_srr:
9414 9455 if (optlen < IP_ADDR_LEN ||
9415 9456 off > optlen - IP_ADDR_LEN) {
9416 9457 /* End of source route */
9417 9458 ip1dbg(("ip_input_options: end of SR\n"));
9418 9459 break;
9419 9460 }
9420 9461 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9421 9462 ip1dbg(("ip_input_options: next hop 0x%x\n",
9422 9463 ntohl(dst)));
9423 9464
9424 9465 /*
9425 9466 * Check if our address is present more than
9426 9467 * once as consecutive hops in source route.
9427 9468 * XXX verify per-interface ip_forwarding
9428 9469 * for source route?
9429 9470 */
9430 9471 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
9431 9472 off += IP_ADDR_LEN;
9432 9473 goto redo_srr;
9433 9474 }
9434 9475
9435 9476 if (dst == htonl(INADDR_LOOPBACK)) {
9436 9477 ip1dbg(("ip_input_options: loopback addr in "
9437 9478 "source route!\n"));
9438 9479 goto bad_src_route;
9439 9480 }
9440 9481 /*
9441 9482 * For strict: verify that dst is directly
9442 9483 * reachable.
9443 9484 */
9444 9485 if (optval == IPOPT_SSRR) {
9445 9486 ire = ire_ftable_lookup_v4(dst, 0, 0,
9446 9487 IRE_INTERFACE, NULL, ALL_ZONES,
9447 9488 ira->ira_tsl,
9448 9489 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst,
9449 9490 NULL);
9450 9491 if (ire == NULL) {
9451 9492 ip1dbg(("ip_input_options: SSRR not "
9452 9493 "directly reachable: 0x%x\n",
9453 9494 ntohl(dst)));
9454 9495 goto bad_src_route;
9455 9496 }
9456 9497 ire_refrele(ire);
9457 9498 }
9458 9499 /*
9459 9500 * Defer update of the offset and the record route
9460 9501 * until the packet is forwarded.
9461 9502 */
9462 9503 break;
9463 9504 case IPOPT_RR:
9464 9505 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9465 9506 ip1dbg((
9466 9507 "ip_input_options: bad option offset\n"));
9467 9508 code = (char *)&opt[IPOPT_OLEN] -
9468 9509 (char *)ipha;
9469 9510 goto param_prob;
9470 9511 }
9471 9512 break;
9472 9513 case IPOPT_TS:
9473 9514 /*
9474 9515 * Verify that length >= 5 and that there is either
9475 9516 * room for another timestamp or that the overflow
9476 9517 * counter is not maxed out.
9477 9518 */
9478 9519 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha;
9479 9520 if (optlen < IPOPT_MINLEN_IT) {
9480 9521 goto param_prob;
9481 9522 }
9482 9523 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9483 9524 ip1dbg((
9484 9525 "ip_input_options: bad option offset\n"));
9485 9526 code = (char *)&opt[IPOPT_OFFSET] -
9486 9527 (char *)ipha;
9487 9528 goto param_prob;
9488 9529 }
9489 9530 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9490 9531 case IPOPT_TS_TSONLY:
9491 9532 off = IPOPT_TS_TIMELEN;
9492 9533 break;
9493 9534 case IPOPT_TS_TSANDADDR:
9494 9535 case IPOPT_TS_PRESPEC:
9495 9536 case IPOPT_TS_PRESPEC_RFC791:
9496 9537 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9497 9538 break;
9498 9539 default:
9499 9540 code = (char *)&opt[IPOPT_POS_OV_FLG] -
9500 9541 (char *)ipha;
9501 9542 goto param_prob;
9502 9543 }
9503 9544 if (opt[IPOPT_OFFSET] - 1 + off > optlen &&
9504 9545 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) {
9505 9546 /*
9506 9547 * No room and the overflow counter is 15
9507 9548 * already.
9508 9549 */
9509 9550 goto param_prob;
9510 9551 }
9511 9552 break;
9512 9553 }
9513 9554 }
9514 9555
9515 9556 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0) {
9516 9557 return (dst);
9517 9558 }
9518 9559
9519 9560 ip1dbg(("ip_input_options: error processing IP options."));
9520 9561 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha;
9521 9562
9522 9563 param_prob:
9523 9564 /* make sure we clear any indication of a hardware checksum */
9524 9565 DB_CKSUMFLAGS(mp) = 0;
9525 9566 ip_drop_input("ICMP_PARAM_PROBLEM", mp, ira->ira_ill);
9526 9567 icmp_param_problem(mp, (uint8_t)code, ira);
9527 9568 *errorp = -1;
9528 9569 return (dst);
9529 9570
9530 9571 bad_src_route:
9531 9572 /* make sure we clear any indication of a hardware checksum */
9532 9573 DB_CKSUMFLAGS(mp) = 0;
9533 9574 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ira->ira_ill);
9534 9575 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira);
9535 9576 *errorp = -1;
9536 9577 return (dst);
9537 9578 }
9538 9579
9539 9580 /*
9540 9581 * IP & ICMP info in >=14 msg's ...
9541 9582 * - ip fixed part (mib2_ip_t)
9542 9583 * - icmp fixed part (mib2_icmp_t)
9543 9584 * - ipAddrEntryTable (ip 20) all IPv4 ipifs
9544 9585 * - ipRouteEntryTable (ip 21) all IPv4 IREs
9545 9586 * - ipNetToMediaEntryTable (ip 22) all IPv4 Neighbor Cache entries
9546 9587 * - ipRouteAttributeTable (ip 102) labeled routes
9547 9588 * - ip multicast membership (ip_member_t)
9548 9589 * - ip multicast source filtering (ip_grpsrc_t)
9549 9590 * - igmp fixed part (struct igmpstat)
9550 9591 * - multicast routing stats (struct mrtstat)
9551 9592 * - multicast routing vifs (array of struct vifctl)
9552 9593 * - multicast routing routes (array of struct mfcctl)
9553 9594 * - ip6 fixed part (mib2_ipv6IfStatsEntry_t)
9554 9595 * One per ill plus one generic
9555 9596 * - icmp6 fixed part (mib2_ipv6IfIcmpEntry_t)
9556 9597 * One per ill plus one generic
9557 9598 * - ipv6RouteEntry all IPv6 IREs
9558 9599 * - ipv6RouteAttributeTable (ip6 102) labeled routes
9559 9600 * - ipv6NetToMediaEntry all IPv6 Neighbor Cache entries
9560 9601 * - ipv6AddrEntry all IPv6 ipifs
9561 9602 * - ipv6 multicast membership (ipv6_member_t)
9562 9603 * - ipv6 multicast source filtering (ipv6_grpsrc_t)
9563 9604 *
9564 9605 * NOTE: original mpctl is copied for msg's 2..N, since its ctl part is
9565 9606 * already filled in by the caller.
9566 9607 * If legacy_req is true then MIB structures needs to be truncated to their
9567 9608 * legacy sizes before being returned.
9568 9609 * Return value of 0 indicates that no messages were sent and caller
9569 9610 * should free mpctl.
9570 9611 */
9571 9612 int
9572 9613 ip_snmp_get(queue_t *q, mblk_t *mpctl, int level, boolean_t legacy_req)
9573 9614 {
9574 9615 ip_stack_t *ipst;
9575 9616 sctp_stack_t *sctps;
9576 9617
9577 9618 if (q->q_next != NULL) {
9578 9619 ipst = ILLQ_TO_IPST(q);
9579 9620 } else {
9580 9621 ipst = CONNQ_TO_IPST(q);
9581 9622 }
9582 9623 ASSERT(ipst != NULL);
9583 9624 sctps = ipst->ips_netstack->netstack_sctp;
9584 9625
9585 9626 if (mpctl == NULL || mpctl->b_cont == NULL) {
9586 9627 return (0);
9587 9628 }
9588 9629
9589 9630 /*
9590 9631 * For the purposes of the (broken) packet shell use
9591 9632 * of the level we make sure MIB2_TCP/MIB2_UDP can be used
9592 9633 * to make TCP and UDP appear first in the list of mib items.
9593 9634 * TBD: We could expand this and use it in netstat so that
9594 9635 * the kernel doesn't have to produce large tables (connections,
9595 9636 * routes, etc) when netstat only wants the statistics or a particular
9596 9637 * table.
9597 9638 */
9598 9639 if (!(level == MIB2_TCP || level == MIB2_UDP)) {
9599 9640 if ((mpctl = icmp_snmp_get(q, mpctl)) == NULL) {
9600 9641 return (1);
9601 9642 }
9602 9643 }
9603 9644
9604 9645 if (level != MIB2_TCP) {
9605 9646 if ((mpctl = udp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9606 9647 return (1);
9607 9648 }
9608 9649 }
9609 9650
9610 9651 if (level != MIB2_UDP) {
9611 9652 if ((mpctl = tcp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9612 9653 return (1);
9613 9654 }
9614 9655 }
9615 9656
9616 9657 if ((mpctl = ip_snmp_get_mib2_ip_traffic_stats(q, mpctl,
9617 9658 ipst, legacy_req)) == NULL) {
9618 9659 return (1);
9619 9660 }
9620 9661
9621 9662 if ((mpctl = ip_snmp_get_mib2_ip6(q, mpctl, ipst,
9622 9663 legacy_req)) == NULL) {
9623 9664 return (1);
9624 9665 }
9625 9666
9626 9667 if ((mpctl = ip_snmp_get_mib2_icmp(q, mpctl, ipst)) == NULL) {
9627 9668 return (1);
9628 9669 }
9629 9670
9630 9671 if ((mpctl = ip_snmp_get_mib2_icmp6(q, mpctl, ipst)) == NULL) {
9631 9672 return (1);
9632 9673 }
9633 9674
9634 9675 if ((mpctl = ip_snmp_get_mib2_igmp(q, mpctl, ipst)) == NULL) {
9635 9676 return (1);
9636 9677 }
9637 9678
9638 9679 if ((mpctl = ip_snmp_get_mib2_multi(q, mpctl, ipst)) == NULL) {
9639 9680 return (1);
9640 9681 }
9641 9682
9642 9683 if ((mpctl = ip_snmp_get_mib2_ip_addr(q, mpctl, ipst,
9643 9684 legacy_req)) == NULL) {
9644 9685 return (1);
9645 9686 }
9646 9687
9647 9688 if ((mpctl = ip_snmp_get_mib2_ip6_addr(q, mpctl, ipst,
9648 9689 legacy_req)) == NULL) {
9649 9690 return (1);
9650 9691 }
9651 9692
9652 9693 if ((mpctl = ip_snmp_get_mib2_ip_group_mem(q, mpctl, ipst)) == NULL) {
9653 9694 return (1);
9654 9695 }
9655 9696
9656 9697 if ((mpctl = ip_snmp_get_mib2_ip6_group_mem(q, mpctl, ipst)) == NULL) {
9657 9698 return (1);
9658 9699 }
9659 9700
9660 9701 if ((mpctl = ip_snmp_get_mib2_ip_group_src(q, mpctl, ipst)) == NULL) {
9661 9702 return (1);
9662 9703 }
9663 9704
9664 9705 if ((mpctl = ip_snmp_get_mib2_ip6_group_src(q, mpctl, ipst)) == NULL) {
9665 9706 return (1);
9666 9707 }
9667 9708
9668 9709 if ((mpctl = ip_snmp_get_mib2_virt_multi(q, mpctl, ipst)) == NULL) {
9669 9710 return (1);
9670 9711 }
9671 9712
9672 9713 if ((mpctl = ip_snmp_get_mib2_multi_rtable(q, mpctl, ipst)) == NULL) {
9673 9714 return (1);
9674 9715 }
9675 9716
9676 9717 mpctl = ip_snmp_get_mib2_ip_route_media(q, mpctl, level, ipst);
↓ open down ↓ |
5117 lines elided |
↑ open up ↑ |
9677 9718 if (mpctl == NULL)
9678 9719 return (1);
9679 9720
9680 9721 mpctl = ip_snmp_get_mib2_ip6_route_media(q, mpctl, level, ipst);
9681 9722 if (mpctl == NULL)
9682 9723 return (1);
9683 9724
9684 9725 if ((mpctl = sctp_snmp_get_mib2(q, mpctl, sctps)) == NULL) {
9685 9726 return (1);
9686 9727 }
9728 +
9687 9729 if ((mpctl = ip_snmp_get_mib2_ip_dce(q, mpctl, ipst)) == NULL) {
9688 9730 return (1);
9689 9731 }
9732 +
9733 + if ((mpctl = dccp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9734 + return (1);
9735 + }
9736 +
9690 9737 freemsg(mpctl);
9691 9738 return (1);
9692 9739 }
9693 9740
9694 9741 /* Get global (legacy) IPv4 statistics */
9695 9742 static mblk_t *
9696 9743 ip_snmp_get_mib2_ip(queue_t *q, mblk_t *mpctl, mib2_ipIfStatsEntry_t *ipmib,
9697 9744 ip_stack_t *ipst, boolean_t legacy_req)
9698 9745 {
9699 9746 mib2_ip_t old_ip_mib;
9700 9747 struct opthdr *optp;
9701 9748 mblk_t *mp2ctl;
9702 9749 mib2_ipAddrEntry_t mae;
9703 9750
9704 9751 /*
9705 9752 * make a copy of the original message
9706 9753 */
9707 9754 mp2ctl = copymsg(mpctl);
9708 9755
9709 9756 /* fixed length IP structure... */
9710 9757 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9711 9758 optp->level = MIB2_IP;
9712 9759 optp->name = 0;
9713 9760 SET_MIB(old_ip_mib.ipForwarding,
9714 9761 (WE_ARE_FORWARDING(ipst) ? 1 : 2));
9715 9762 SET_MIB(old_ip_mib.ipDefaultTTL,
9716 9763 (uint32_t)ipst->ips_ip_def_ttl);
9717 9764 SET_MIB(old_ip_mib.ipReasmTimeout,
9718 9765 ipst->ips_ip_reassembly_timeout);
9719 9766 SET_MIB(old_ip_mib.ipAddrEntrySize,
9720 9767 (legacy_req) ? LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t) :
9721 9768 sizeof (mib2_ipAddrEntry_t));
9722 9769 SET_MIB(old_ip_mib.ipRouteEntrySize,
9723 9770 sizeof (mib2_ipRouteEntry_t));
9724 9771 SET_MIB(old_ip_mib.ipNetToMediaEntrySize,
9725 9772 sizeof (mib2_ipNetToMediaEntry_t));
9726 9773 SET_MIB(old_ip_mib.ipMemberEntrySize, sizeof (ip_member_t));
9727 9774 SET_MIB(old_ip_mib.ipGroupSourceEntrySize, sizeof (ip_grpsrc_t));
9728 9775 SET_MIB(old_ip_mib.ipRouteAttributeSize,
9729 9776 sizeof (mib2_ipAttributeEntry_t));
9730 9777 SET_MIB(old_ip_mib.transportMLPSize, sizeof (mib2_transportMLPEntry_t));
9731 9778 SET_MIB(old_ip_mib.ipDestEntrySize, sizeof (dest_cache_entry_t));
9732 9779
9733 9780 /*
9734 9781 * Grab the statistics from the new IP MIB
9735 9782 */
9736 9783 SET_MIB(old_ip_mib.ipInReceives,
9737 9784 (uint32_t)ipmib->ipIfStatsHCInReceives);
9738 9785 SET_MIB(old_ip_mib.ipInHdrErrors, ipmib->ipIfStatsInHdrErrors);
9739 9786 SET_MIB(old_ip_mib.ipInAddrErrors, ipmib->ipIfStatsInAddrErrors);
9740 9787 SET_MIB(old_ip_mib.ipForwDatagrams,
9741 9788 (uint32_t)ipmib->ipIfStatsHCOutForwDatagrams);
9742 9789 SET_MIB(old_ip_mib.ipInUnknownProtos,
9743 9790 ipmib->ipIfStatsInUnknownProtos);
9744 9791 SET_MIB(old_ip_mib.ipInDiscards, ipmib->ipIfStatsInDiscards);
9745 9792 SET_MIB(old_ip_mib.ipInDelivers,
9746 9793 (uint32_t)ipmib->ipIfStatsHCInDelivers);
9747 9794 SET_MIB(old_ip_mib.ipOutRequests,
9748 9795 (uint32_t)ipmib->ipIfStatsHCOutRequests);
9749 9796 SET_MIB(old_ip_mib.ipOutDiscards, ipmib->ipIfStatsOutDiscards);
9750 9797 SET_MIB(old_ip_mib.ipOutNoRoutes, ipmib->ipIfStatsOutNoRoutes);
9751 9798 SET_MIB(old_ip_mib.ipReasmReqds, ipmib->ipIfStatsReasmReqds);
9752 9799 SET_MIB(old_ip_mib.ipReasmOKs, ipmib->ipIfStatsReasmOKs);
9753 9800 SET_MIB(old_ip_mib.ipReasmFails, ipmib->ipIfStatsReasmFails);
9754 9801 SET_MIB(old_ip_mib.ipFragOKs, ipmib->ipIfStatsOutFragOKs);
9755 9802 SET_MIB(old_ip_mib.ipFragFails, ipmib->ipIfStatsOutFragFails);
9756 9803 SET_MIB(old_ip_mib.ipFragCreates, ipmib->ipIfStatsOutFragCreates);
9757 9804
9758 9805 /* ipRoutingDiscards is not being used */
9759 9806 SET_MIB(old_ip_mib.ipRoutingDiscards, 0);
9760 9807 SET_MIB(old_ip_mib.tcpInErrs, ipmib->tcpIfStatsInErrs);
9761 9808 SET_MIB(old_ip_mib.udpNoPorts, ipmib->udpIfStatsNoPorts);
9762 9809 SET_MIB(old_ip_mib.ipInCksumErrs, ipmib->ipIfStatsInCksumErrs);
9763 9810 SET_MIB(old_ip_mib.ipReasmDuplicates,
9764 9811 ipmib->ipIfStatsReasmDuplicates);
9765 9812 SET_MIB(old_ip_mib.ipReasmPartDups, ipmib->ipIfStatsReasmPartDups);
9766 9813 SET_MIB(old_ip_mib.ipForwProhibits, ipmib->ipIfStatsForwProhibits);
9767 9814 SET_MIB(old_ip_mib.udpInCksumErrs, ipmib->udpIfStatsInCksumErrs);
9768 9815 SET_MIB(old_ip_mib.udpInOverflows, ipmib->udpIfStatsInOverflows);
9769 9816 SET_MIB(old_ip_mib.rawipInOverflows,
9770 9817 ipmib->rawipIfStatsInOverflows);
9771 9818
9772 9819 SET_MIB(old_ip_mib.ipsecInSucceeded, ipmib->ipsecIfStatsInSucceeded);
9773 9820 SET_MIB(old_ip_mib.ipsecInFailed, ipmib->ipsecIfStatsInFailed);
9774 9821 SET_MIB(old_ip_mib.ipInIPv6, ipmib->ipIfStatsInWrongIPVersion);
9775 9822 SET_MIB(old_ip_mib.ipOutIPv6, ipmib->ipIfStatsOutWrongIPVersion);
9776 9823 SET_MIB(old_ip_mib.ipOutSwitchIPv6,
9777 9824 ipmib->ipIfStatsOutSwitchIPVersion);
9778 9825
9779 9826 if (!snmp_append_data(mpctl->b_cont, (char *)&old_ip_mib,
9780 9827 (int)sizeof (old_ip_mib))) {
9781 9828 ip1dbg(("ip_snmp_get_mib2_ip: failed to allocate %u bytes\n",
9782 9829 (uint_t)sizeof (old_ip_mib)));
9783 9830 }
9784 9831
9785 9832 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9786 9833 ip3dbg(("ip_snmp_get_mib2_ip: level %d, name %d, len %d\n",
9787 9834 (int)optp->level, (int)optp->name, (int)optp->len));
9788 9835 qreply(q, mpctl);
9789 9836 return (mp2ctl);
9790 9837 }
9791 9838
9792 9839 /* Per interface IPv4 statistics */
9793 9840 static mblk_t *
9794 9841 ip_snmp_get_mib2_ip_traffic_stats(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
9795 9842 boolean_t legacy_req)
9796 9843 {
9797 9844 struct opthdr *optp;
9798 9845 mblk_t *mp2ctl;
9799 9846 ill_t *ill;
9800 9847 ill_walk_context_t ctx;
9801 9848 mblk_t *mp_tail = NULL;
9802 9849 mib2_ipIfStatsEntry_t global_ip_mib;
9803 9850 mib2_ipAddrEntry_t mae;
9804 9851
9805 9852 /*
9806 9853 * Make a copy of the original message
9807 9854 */
9808 9855 mp2ctl = copymsg(mpctl);
9809 9856
9810 9857 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9811 9858 optp->level = MIB2_IP;
9812 9859 optp->name = MIB2_IP_TRAFFIC_STATS;
9813 9860 /* Include "unknown interface" ip_mib */
9814 9861 ipst->ips_ip_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv4;
9815 9862 ipst->ips_ip_mib.ipIfStatsIfIndex =
9816 9863 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */
9817 9864 SET_MIB(ipst->ips_ip_mib.ipIfStatsForwarding,
9818 9865 (ipst->ips_ip_forwarding ? 1 : 2));
9819 9866 SET_MIB(ipst->ips_ip_mib.ipIfStatsDefaultTTL,
9820 9867 (uint32_t)ipst->ips_ip_def_ttl);
9821 9868 SET_MIB(ipst->ips_ip_mib.ipIfStatsEntrySize,
9822 9869 sizeof (mib2_ipIfStatsEntry_t));
9823 9870 SET_MIB(ipst->ips_ip_mib.ipIfStatsAddrEntrySize,
9824 9871 sizeof (mib2_ipAddrEntry_t));
9825 9872 SET_MIB(ipst->ips_ip_mib.ipIfStatsRouteEntrySize,
9826 9873 sizeof (mib2_ipRouteEntry_t));
9827 9874 SET_MIB(ipst->ips_ip_mib.ipIfStatsNetToMediaEntrySize,
9828 9875 sizeof (mib2_ipNetToMediaEntry_t));
9829 9876 SET_MIB(ipst->ips_ip_mib.ipIfStatsMemberEntrySize,
9830 9877 sizeof (ip_member_t));
9831 9878 SET_MIB(ipst->ips_ip_mib.ipIfStatsGroupSourceEntrySize,
9832 9879 sizeof (ip_grpsrc_t));
9833 9880
9834 9881 bcopy(&ipst->ips_ip_mib, &global_ip_mib, sizeof (global_ip_mib));
9835 9882
9836 9883 if (legacy_req) {
9837 9884 SET_MIB(global_ip_mib.ipIfStatsAddrEntrySize,
9838 9885 LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t));
9839 9886 }
9840 9887
9841 9888 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
9842 9889 (char *)&global_ip_mib, (int)sizeof (global_ip_mib))) {
9843 9890 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9844 9891 "failed to allocate %u bytes\n",
9845 9892 (uint_t)sizeof (global_ip_mib)));
9846 9893 }
9847 9894
9848 9895 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
9849 9896 ill = ILL_START_WALK_V4(&ctx, ipst);
9850 9897 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
9851 9898 ill->ill_ip_mib->ipIfStatsIfIndex =
9852 9899 ill->ill_phyint->phyint_ifindex;
9853 9900 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding,
9854 9901 (ipst->ips_ip_forwarding ? 1 : 2));
9855 9902 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultTTL,
9856 9903 (uint32_t)ipst->ips_ip_def_ttl);
9857 9904
9858 9905 ip_mib2_add_ip_stats(&global_ip_mib, ill->ill_ip_mib);
9859 9906 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
9860 9907 (char *)ill->ill_ip_mib,
9861 9908 (int)sizeof (*ill->ill_ip_mib))) {
9862 9909 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9863 9910 "failed to allocate %u bytes\n",
9864 9911 (uint_t)sizeof (*ill->ill_ip_mib)));
9865 9912 }
9866 9913 }
9867 9914 rw_exit(&ipst->ips_ill_g_lock);
9868 9915
9869 9916 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9870 9917 ip3dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9871 9918 "level %d, name %d, len %d\n",
9872 9919 (int)optp->level, (int)optp->name, (int)optp->len));
9873 9920 qreply(q, mpctl);
9874 9921
9875 9922 if (mp2ctl == NULL)
9876 9923 return (NULL);
9877 9924
9878 9925 return (ip_snmp_get_mib2_ip(q, mp2ctl, &global_ip_mib, ipst,
9879 9926 legacy_req));
9880 9927 }
9881 9928
9882 9929 /* Global IPv4 ICMP statistics */
9883 9930 static mblk_t *
9884 9931 ip_snmp_get_mib2_icmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9885 9932 {
9886 9933 struct opthdr *optp;
9887 9934 mblk_t *mp2ctl;
9888 9935
9889 9936 /*
9890 9937 * Make a copy of the original message
9891 9938 */
9892 9939 mp2ctl = copymsg(mpctl);
9893 9940
9894 9941 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9895 9942 optp->level = MIB2_ICMP;
9896 9943 optp->name = 0;
9897 9944 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_icmp_mib,
9898 9945 (int)sizeof (ipst->ips_icmp_mib))) {
9899 9946 ip1dbg(("ip_snmp_get_mib2_icmp: failed to allocate %u bytes\n",
9900 9947 (uint_t)sizeof (ipst->ips_icmp_mib)));
9901 9948 }
9902 9949 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9903 9950 ip3dbg(("ip_snmp_get_mib2_icmp: level %d, name %d, len %d\n",
9904 9951 (int)optp->level, (int)optp->name, (int)optp->len));
9905 9952 qreply(q, mpctl);
9906 9953 return (mp2ctl);
9907 9954 }
9908 9955
9909 9956 /* Global IPv4 IGMP statistics */
9910 9957 static mblk_t *
9911 9958 ip_snmp_get_mib2_igmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9912 9959 {
9913 9960 struct opthdr *optp;
9914 9961 mblk_t *mp2ctl;
9915 9962
9916 9963 /*
9917 9964 * make a copy of the original message
9918 9965 */
9919 9966 mp2ctl = copymsg(mpctl);
9920 9967
9921 9968 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9922 9969 optp->level = EXPER_IGMP;
9923 9970 optp->name = 0;
9924 9971 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_igmpstat,
9925 9972 (int)sizeof (ipst->ips_igmpstat))) {
9926 9973 ip1dbg(("ip_snmp_get_mib2_igmp: failed to allocate %u bytes\n",
9927 9974 (uint_t)sizeof (ipst->ips_igmpstat)));
9928 9975 }
9929 9976 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9930 9977 ip3dbg(("ip_snmp_get_mib2_igmp: level %d, name %d, len %d\n",
9931 9978 (int)optp->level, (int)optp->name, (int)optp->len));
9932 9979 qreply(q, mpctl);
9933 9980 return (mp2ctl);
9934 9981 }
9935 9982
9936 9983 /* Global IPv4 Multicast Routing statistics */
9937 9984 static mblk_t *
9938 9985 ip_snmp_get_mib2_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9939 9986 {
9940 9987 struct opthdr *optp;
9941 9988 mblk_t *mp2ctl;
9942 9989
9943 9990 /*
9944 9991 * make a copy of the original message
9945 9992 */
9946 9993 mp2ctl = copymsg(mpctl);
9947 9994
9948 9995 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9949 9996 optp->level = EXPER_DVMRP;
9950 9997 optp->name = 0;
9951 9998 if (!ip_mroute_stats(mpctl->b_cont, ipst)) {
9952 9999 ip0dbg(("ip_mroute_stats: failed\n"));
9953 10000 }
9954 10001 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9955 10002 ip3dbg(("ip_snmp_get_mib2_multi: level %d, name %d, len %d\n",
9956 10003 (int)optp->level, (int)optp->name, (int)optp->len));
9957 10004 qreply(q, mpctl);
9958 10005 return (mp2ctl);
9959 10006 }
9960 10007
9961 10008 /* IPv4 address information */
9962 10009 static mblk_t *
9963 10010 ip_snmp_get_mib2_ip_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
9964 10011 boolean_t legacy_req)
9965 10012 {
9966 10013 struct opthdr *optp;
9967 10014 mblk_t *mp2ctl;
9968 10015 mblk_t *mp_tail = NULL;
9969 10016 ill_t *ill;
9970 10017 ipif_t *ipif;
9971 10018 uint_t bitval;
9972 10019 mib2_ipAddrEntry_t mae;
9973 10020 size_t mae_size;
9974 10021 zoneid_t zoneid;
9975 10022 ill_walk_context_t ctx;
9976 10023
9977 10024 /*
9978 10025 * make a copy of the original message
9979 10026 */
9980 10027 mp2ctl = copymsg(mpctl);
9981 10028
9982 10029 mae_size = (legacy_req) ? LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t) :
9983 10030 sizeof (mib2_ipAddrEntry_t);
9984 10031
9985 10032 /* ipAddrEntryTable */
9986 10033
9987 10034 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9988 10035 optp->level = MIB2_IP;
9989 10036 optp->name = MIB2_IP_ADDR;
9990 10037 zoneid = Q_TO_CONN(q)->conn_zoneid;
9991 10038
9992 10039 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
9993 10040 ill = ILL_START_WALK_V4(&ctx, ipst);
9994 10041 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
9995 10042 for (ipif = ill->ill_ipif; ipif != NULL;
9996 10043 ipif = ipif->ipif_next) {
9997 10044 if (ipif->ipif_zoneid != zoneid &&
9998 10045 ipif->ipif_zoneid != ALL_ZONES)
9999 10046 continue;
10000 10047 /* Sum of count from dead IRE_LO* and our current */
10001 10048 mae.ipAdEntInfo.ae_ibcnt = ipif->ipif_ib_pkt_count;
10002 10049 if (ipif->ipif_ire_local != NULL) {
10003 10050 mae.ipAdEntInfo.ae_ibcnt +=
10004 10051 ipif->ipif_ire_local->ire_ib_pkt_count;
10005 10052 }
10006 10053 mae.ipAdEntInfo.ae_obcnt = 0;
10007 10054 mae.ipAdEntInfo.ae_focnt = 0;
10008 10055
10009 10056 ipif_get_name(ipif, mae.ipAdEntIfIndex.o_bytes,
10010 10057 OCTET_LENGTH);
10011 10058 mae.ipAdEntIfIndex.o_length =
10012 10059 mi_strlen(mae.ipAdEntIfIndex.o_bytes);
10013 10060 mae.ipAdEntAddr = ipif->ipif_lcl_addr;
10014 10061 mae.ipAdEntNetMask = ipif->ipif_net_mask;
10015 10062 mae.ipAdEntInfo.ae_subnet = ipif->ipif_subnet;
10016 10063 mae.ipAdEntInfo.ae_subnet_len =
10017 10064 ip_mask_to_plen(ipif->ipif_net_mask);
10018 10065 mae.ipAdEntInfo.ae_src_addr = ipif->ipif_lcl_addr;
10019 10066 for (bitval = 1;
10020 10067 bitval &&
10021 10068 !(bitval & ipif->ipif_brd_addr);
10022 10069 bitval <<= 1)
10023 10070 noop;
10024 10071 mae.ipAdEntBcastAddr = bitval;
10025 10072 mae.ipAdEntReasmMaxSize = IP_MAXPACKET;
10026 10073 mae.ipAdEntInfo.ae_mtu = ipif->ipif_ill->ill_mtu;
10027 10074 mae.ipAdEntInfo.ae_metric = ipif->ipif_ill->ill_metric;
10028 10075 mae.ipAdEntInfo.ae_broadcast_addr =
10029 10076 ipif->ipif_brd_addr;
10030 10077 mae.ipAdEntInfo.ae_pp_dst_addr =
10031 10078 ipif->ipif_pp_dst_addr;
10032 10079 mae.ipAdEntInfo.ae_flags = ipif->ipif_flags |
10033 10080 ill->ill_flags | ill->ill_phyint->phyint_flags;
10034 10081 mae.ipAdEntRetransmitTime =
10035 10082 ill->ill_reachable_retrans_time;
10036 10083
10037 10084 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10038 10085 (char *)&mae, (int)mae_size)) {
10039 10086 ip1dbg(("ip_snmp_get_mib2_ip_addr: failed to "
10040 10087 "allocate %u bytes\n", (uint_t)mae_size));
10041 10088 }
10042 10089 }
10043 10090 }
10044 10091 rw_exit(&ipst->ips_ill_g_lock);
10045 10092
10046 10093 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10047 10094 ip3dbg(("ip_snmp_get_mib2_ip_addr: level %d, name %d, len %d\n",
10048 10095 (int)optp->level, (int)optp->name, (int)optp->len));
10049 10096 qreply(q, mpctl);
10050 10097 return (mp2ctl);
10051 10098 }
10052 10099
10053 10100 /* IPv6 address information */
10054 10101 static mblk_t *
10055 10102 ip_snmp_get_mib2_ip6_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
10056 10103 boolean_t legacy_req)
10057 10104 {
10058 10105 struct opthdr *optp;
10059 10106 mblk_t *mp2ctl;
10060 10107 mblk_t *mp_tail = NULL;
10061 10108 ill_t *ill;
10062 10109 ipif_t *ipif;
10063 10110 mib2_ipv6AddrEntry_t mae6;
10064 10111 size_t mae6_size;
10065 10112 zoneid_t zoneid;
10066 10113 ill_walk_context_t ctx;
10067 10114
10068 10115 /*
10069 10116 * make a copy of the original message
10070 10117 */
10071 10118 mp2ctl = copymsg(mpctl);
10072 10119
10073 10120 mae6_size = (legacy_req) ?
10074 10121 LEGACY_MIB_SIZE(&mae6, mib2_ipv6AddrEntry_t) :
10075 10122 sizeof (mib2_ipv6AddrEntry_t);
10076 10123
10077 10124 /* ipv6AddrEntryTable */
10078 10125
10079 10126 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10080 10127 optp->level = MIB2_IP6;
10081 10128 optp->name = MIB2_IP6_ADDR;
10082 10129 zoneid = Q_TO_CONN(q)->conn_zoneid;
10083 10130
10084 10131 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10085 10132 ill = ILL_START_WALK_V6(&ctx, ipst);
10086 10133 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10087 10134 for (ipif = ill->ill_ipif; ipif != NULL;
10088 10135 ipif = ipif->ipif_next) {
10089 10136 if (ipif->ipif_zoneid != zoneid &&
10090 10137 ipif->ipif_zoneid != ALL_ZONES)
10091 10138 continue;
10092 10139 /* Sum of count from dead IRE_LO* and our current */
10093 10140 mae6.ipv6AddrInfo.ae_ibcnt = ipif->ipif_ib_pkt_count;
10094 10141 if (ipif->ipif_ire_local != NULL) {
10095 10142 mae6.ipv6AddrInfo.ae_ibcnt +=
10096 10143 ipif->ipif_ire_local->ire_ib_pkt_count;
10097 10144 }
10098 10145 mae6.ipv6AddrInfo.ae_obcnt = 0;
10099 10146 mae6.ipv6AddrInfo.ae_focnt = 0;
10100 10147
10101 10148 ipif_get_name(ipif, mae6.ipv6AddrIfIndex.o_bytes,
10102 10149 OCTET_LENGTH);
10103 10150 mae6.ipv6AddrIfIndex.o_length =
10104 10151 mi_strlen(mae6.ipv6AddrIfIndex.o_bytes);
10105 10152 mae6.ipv6AddrAddress = ipif->ipif_v6lcl_addr;
10106 10153 mae6.ipv6AddrPfxLength =
10107 10154 ip_mask_to_plen_v6(&ipif->ipif_v6net_mask);
10108 10155 mae6.ipv6AddrInfo.ae_subnet = ipif->ipif_v6subnet;
10109 10156 mae6.ipv6AddrInfo.ae_subnet_len =
10110 10157 mae6.ipv6AddrPfxLength;
10111 10158 mae6.ipv6AddrInfo.ae_src_addr = ipif->ipif_v6lcl_addr;
10112 10159
10113 10160 /* Type: stateless(1), stateful(2), unknown(3) */
10114 10161 if (ipif->ipif_flags & IPIF_ADDRCONF)
10115 10162 mae6.ipv6AddrType = 1;
10116 10163 else
10117 10164 mae6.ipv6AddrType = 2;
10118 10165 /* Anycast: true(1), false(2) */
10119 10166 if (ipif->ipif_flags & IPIF_ANYCAST)
10120 10167 mae6.ipv6AddrAnycastFlag = 1;
10121 10168 else
10122 10169 mae6.ipv6AddrAnycastFlag = 2;
10123 10170
10124 10171 /*
10125 10172 * Address status: preferred(1), deprecated(2),
10126 10173 * invalid(3), inaccessible(4), unknown(5)
10127 10174 */
10128 10175 if (ipif->ipif_flags & IPIF_NOLOCAL)
10129 10176 mae6.ipv6AddrStatus = 3;
10130 10177 else if (ipif->ipif_flags & IPIF_DEPRECATED)
10131 10178 mae6.ipv6AddrStatus = 2;
10132 10179 else
10133 10180 mae6.ipv6AddrStatus = 1;
10134 10181 mae6.ipv6AddrInfo.ae_mtu = ipif->ipif_ill->ill_mtu;
10135 10182 mae6.ipv6AddrInfo.ae_metric =
10136 10183 ipif->ipif_ill->ill_metric;
10137 10184 mae6.ipv6AddrInfo.ae_pp_dst_addr =
10138 10185 ipif->ipif_v6pp_dst_addr;
10139 10186 mae6.ipv6AddrInfo.ae_flags = ipif->ipif_flags |
10140 10187 ill->ill_flags | ill->ill_phyint->phyint_flags;
10141 10188 mae6.ipv6AddrReasmMaxSize = IP_MAXPACKET;
10142 10189 mae6.ipv6AddrIdentifier = ill->ill_token;
10143 10190 mae6.ipv6AddrIdentifierLen = ill->ill_token_length;
10144 10191 mae6.ipv6AddrReachableTime = ill->ill_reachable_time;
10145 10192 mae6.ipv6AddrRetransmitTime =
10146 10193 ill->ill_reachable_retrans_time;
10147 10194 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10148 10195 (char *)&mae6, (int)mae6_size)) {
10149 10196 ip1dbg(("ip_snmp_get_mib2_ip6_addr: failed to "
10150 10197 "allocate %u bytes\n",
10151 10198 (uint_t)mae6_size));
10152 10199 }
10153 10200 }
10154 10201 }
10155 10202 rw_exit(&ipst->ips_ill_g_lock);
10156 10203
10157 10204 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10158 10205 ip3dbg(("ip_snmp_get_mib2_ip6_addr: level %d, name %d, len %d\n",
10159 10206 (int)optp->level, (int)optp->name, (int)optp->len));
10160 10207 qreply(q, mpctl);
10161 10208 return (mp2ctl);
10162 10209 }
10163 10210
10164 10211 /* IPv4 multicast group membership. */
10165 10212 static mblk_t *
10166 10213 ip_snmp_get_mib2_ip_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10167 10214 {
10168 10215 struct opthdr *optp;
10169 10216 mblk_t *mp2ctl;
10170 10217 ill_t *ill;
10171 10218 ipif_t *ipif;
10172 10219 ilm_t *ilm;
10173 10220 ip_member_t ipm;
10174 10221 mblk_t *mp_tail = NULL;
10175 10222 ill_walk_context_t ctx;
10176 10223 zoneid_t zoneid;
10177 10224
10178 10225 /*
10179 10226 * make a copy of the original message
10180 10227 */
10181 10228 mp2ctl = copymsg(mpctl);
10182 10229 zoneid = Q_TO_CONN(q)->conn_zoneid;
10183 10230
10184 10231 /* ipGroupMember table */
10185 10232 optp = (struct opthdr *)&mpctl->b_rptr[
10186 10233 sizeof (struct T_optmgmt_ack)];
10187 10234 optp->level = MIB2_IP;
10188 10235 optp->name = EXPER_IP_GROUP_MEMBERSHIP;
10189 10236
10190 10237 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10191 10238 ill = ILL_START_WALK_V4(&ctx, ipst);
10192 10239 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10193 10240 /* Make sure the ill isn't going away. */
10194 10241 if (!ill_check_and_refhold(ill))
10195 10242 continue;
10196 10243 rw_exit(&ipst->ips_ill_g_lock);
10197 10244 rw_enter(&ill->ill_mcast_lock, RW_READER);
10198 10245 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10199 10246 if (ilm->ilm_zoneid != zoneid &&
10200 10247 ilm->ilm_zoneid != ALL_ZONES)
10201 10248 continue;
10202 10249
10203 10250 /* Is there an ipif for ilm_ifaddr? */
10204 10251 for (ipif = ill->ill_ipif; ipif != NULL;
10205 10252 ipif = ipif->ipif_next) {
10206 10253 if (!IPIF_IS_CONDEMNED(ipif) &&
10207 10254 ipif->ipif_lcl_addr == ilm->ilm_ifaddr &&
10208 10255 ilm->ilm_ifaddr != INADDR_ANY)
10209 10256 break;
10210 10257 }
10211 10258 if (ipif != NULL) {
10212 10259 ipif_get_name(ipif,
10213 10260 ipm.ipGroupMemberIfIndex.o_bytes,
10214 10261 OCTET_LENGTH);
10215 10262 } else {
10216 10263 ill_get_name(ill,
10217 10264 ipm.ipGroupMemberIfIndex.o_bytes,
10218 10265 OCTET_LENGTH);
10219 10266 }
10220 10267 ipm.ipGroupMemberIfIndex.o_length =
10221 10268 mi_strlen(ipm.ipGroupMemberIfIndex.o_bytes);
10222 10269
10223 10270 ipm.ipGroupMemberAddress = ilm->ilm_addr;
10224 10271 ipm.ipGroupMemberRefCnt = ilm->ilm_refcnt;
10225 10272 ipm.ipGroupMemberFilterMode = ilm->ilm_fmode;
10226 10273 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10227 10274 (char *)&ipm, (int)sizeof (ipm))) {
10228 10275 ip1dbg(("ip_snmp_get_mib2_ip_group: "
10229 10276 "failed to allocate %u bytes\n",
10230 10277 (uint_t)sizeof (ipm)));
10231 10278 }
10232 10279 }
10233 10280 rw_exit(&ill->ill_mcast_lock);
10234 10281 ill_refrele(ill);
10235 10282 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10236 10283 }
10237 10284 rw_exit(&ipst->ips_ill_g_lock);
10238 10285 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10239 10286 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10240 10287 (int)optp->level, (int)optp->name, (int)optp->len));
10241 10288 qreply(q, mpctl);
10242 10289 return (mp2ctl);
10243 10290 }
10244 10291
10245 10292 /* IPv6 multicast group membership. */
10246 10293 static mblk_t *
10247 10294 ip_snmp_get_mib2_ip6_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10248 10295 {
10249 10296 struct opthdr *optp;
10250 10297 mblk_t *mp2ctl;
10251 10298 ill_t *ill;
10252 10299 ilm_t *ilm;
10253 10300 ipv6_member_t ipm6;
10254 10301 mblk_t *mp_tail = NULL;
10255 10302 ill_walk_context_t ctx;
10256 10303 zoneid_t zoneid;
10257 10304
10258 10305 /*
10259 10306 * make a copy of the original message
10260 10307 */
10261 10308 mp2ctl = copymsg(mpctl);
10262 10309 zoneid = Q_TO_CONN(q)->conn_zoneid;
10263 10310
10264 10311 /* ip6GroupMember table */
10265 10312 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10266 10313 optp->level = MIB2_IP6;
10267 10314 optp->name = EXPER_IP6_GROUP_MEMBERSHIP;
10268 10315
10269 10316 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10270 10317 ill = ILL_START_WALK_V6(&ctx, ipst);
10271 10318 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10272 10319 /* Make sure the ill isn't going away. */
10273 10320 if (!ill_check_and_refhold(ill))
10274 10321 continue;
10275 10322 rw_exit(&ipst->ips_ill_g_lock);
10276 10323 /*
10277 10324 * Normally we don't have any members on under IPMP interfaces.
10278 10325 * We report them as a debugging aid.
10279 10326 */
10280 10327 rw_enter(&ill->ill_mcast_lock, RW_READER);
10281 10328 ipm6.ipv6GroupMemberIfIndex = ill->ill_phyint->phyint_ifindex;
10282 10329 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10283 10330 if (ilm->ilm_zoneid != zoneid &&
10284 10331 ilm->ilm_zoneid != ALL_ZONES)
10285 10332 continue; /* not this zone */
10286 10333 ipm6.ipv6GroupMemberAddress = ilm->ilm_v6addr;
10287 10334 ipm6.ipv6GroupMemberRefCnt = ilm->ilm_refcnt;
10288 10335 ipm6.ipv6GroupMemberFilterMode = ilm->ilm_fmode;
10289 10336 if (!snmp_append_data2(mpctl->b_cont,
10290 10337 &mp_tail,
10291 10338 (char *)&ipm6, (int)sizeof (ipm6))) {
10292 10339 ip1dbg(("ip_snmp_get_mib2_ip6_group: "
10293 10340 "failed to allocate %u bytes\n",
10294 10341 (uint_t)sizeof (ipm6)));
10295 10342 }
10296 10343 }
10297 10344 rw_exit(&ill->ill_mcast_lock);
10298 10345 ill_refrele(ill);
10299 10346 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10300 10347 }
10301 10348 rw_exit(&ipst->ips_ill_g_lock);
10302 10349
10303 10350 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10304 10351 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10305 10352 (int)optp->level, (int)optp->name, (int)optp->len));
10306 10353 qreply(q, mpctl);
10307 10354 return (mp2ctl);
10308 10355 }
10309 10356
10310 10357 /* IP multicast filtered sources */
10311 10358 static mblk_t *
10312 10359 ip_snmp_get_mib2_ip_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10313 10360 {
10314 10361 struct opthdr *optp;
10315 10362 mblk_t *mp2ctl;
10316 10363 ill_t *ill;
10317 10364 ipif_t *ipif;
10318 10365 ilm_t *ilm;
10319 10366 ip_grpsrc_t ips;
10320 10367 mblk_t *mp_tail = NULL;
10321 10368 ill_walk_context_t ctx;
10322 10369 zoneid_t zoneid;
10323 10370 int i;
10324 10371 slist_t *sl;
10325 10372
10326 10373 /*
10327 10374 * make a copy of the original message
10328 10375 */
10329 10376 mp2ctl = copymsg(mpctl);
10330 10377 zoneid = Q_TO_CONN(q)->conn_zoneid;
10331 10378
10332 10379 /* ipGroupSource table */
10333 10380 optp = (struct opthdr *)&mpctl->b_rptr[
10334 10381 sizeof (struct T_optmgmt_ack)];
10335 10382 optp->level = MIB2_IP;
10336 10383 optp->name = EXPER_IP_GROUP_SOURCES;
10337 10384
10338 10385 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10339 10386 ill = ILL_START_WALK_V4(&ctx, ipst);
10340 10387 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10341 10388 /* Make sure the ill isn't going away. */
10342 10389 if (!ill_check_and_refhold(ill))
10343 10390 continue;
10344 10391 rw_exit(&ipst->ips_ill_g_lock);
10345 10392 rw_enter(&ill->ill_mcast_lock, RW_READER);
10346 10393 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10347 10394 sl = ilm->ilm_filter;
10348 10395 if (ilm->ilm_zoneid != zoneid &&
10349 10396 ilm->ilm_zoneid != ALL_ZONES)
10350 10397 continue;
10351 10398 if (SLIST_IS_EMPTY(sl))
10352 10399 continue;
10353 10400
10354 10401 /* Is there an ipif for ilm_ifaddr? */
10355 10402 for (ipif = ill->ill_ipif; ipif != NULL;
10356 10403 ipif = ipif->ipif_next) {
10357 10404 if (!IPIF_IS_CONDEMNED(ipif) &&
10358 10405 ipif->ipif_lcl_addr == ilm->ilm_ifaddr &&
10359 10406 ilm->ilm_ifaddr != INADDR_ANY)
10360 10407 break;
10361 10408 }
10362 10409 if (ipif != NULL) {
10363 10410 ipif_get_name(ipif,
10364 10411 ips.ipGroupSourceIfIndex.o_bytes,
10365 10412 OCTET_LENGTH);
10366 10413 } else {
10367 10414 ill_get_name(ill,
10368 10415 ips.ipGroupSourceIfIndex.o_bytes,
10369 10416 OCTET_LENGTH);
10370 10417 }
10371 10418 ips.ipGroupSourceIfIndex.o_length =
10372 10419 mi_strlen(ips.ipGroupSourceIfIndex.o_bytes);
10373 10420
10374 10421 ips.ipGroupSourceGroup = ilm->ilm_addr;
10375 10422 for (i = 0; i < sl->sl_numsrc; i++) {
10376 10423 if (!IN6_IS_ADDR_V4MAPPED(&sl->sl_addr[i]))
10377 10424 continue;
10378 10425 IN6_V4MAPPED_TO_IPADDR(&sl->sl_addr[i],
10379 10426 ips.ipGroupSourceAddress);
10380 10427 if (snmp_append_data2(mpctl->b_cont, &mp_tail,
10381 10428 (char *)&ips, (int)sizeof (ips)) == 0) {
10382 10429 ip1dbg(("ip_snmp_get_mib2_ip_group_src:"
10383 10430 " failed to allocate %u bytes\n",
10384 10431 (uint_t)sizeof (ips)));
10385 10432 }
10386 10433 }
10387 10434 }
10388 10435 rw_exit(&ill->ill_mcast_lock);
10389 10436 ill_refrele(ill);
10390 10437 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10391 10438 }
10392 10439 rw_exit(&ipst->ips_ill_g_lock);
10393 10440 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10394 10441 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10395 10442 (int)optp->level, (int)optp->name, (int)optp->len));
10396 10443 qreply(q, mpctl);
10397 10444 return (mp2ctl);
10398 10445 }
10399 10446
10400 10447 /* IPv6 multicast filtered sources. */
10401 10448 static mblk_t *
10402 10449 ip_snmp_get_mib2_ip6_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10403 10450 {
10404 10451 struct opthdr *optp;
10405 10452 mblk_t *mp2ctl;
10406 10453 ill_t *ill;
10407 10454 ilm_t *ilm;
10408 10455 ipv6_grpsrc_t ips6;
10409 10456 mblk_t *mp_tail = NULL;
10410 10457 ill_walk_context_t ctx;
10411 10458 zoneid_t zoneid;
10412 10459 int i;
10413 10460 slist_t *sl;
10414 10461
10415 10462 /*
10416 10463 * make a copy of the original message
10417 10464 */
10418 10465 mp2ctl = copymsg(mpctl);
10419 10466 zoneid = Q_TO_CONN(q)->conn_zoneid;
10420 10467
10421 10468 /* ip6GroupMember table */
10422 10469 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10423 10470 optp->level = MIB2_IP6;
10424 10471 optp->name = EXPER_IP6_GROUP_SOURCES;
10425 10472
10426 10473 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10427 10474 ill = ILL_START_WALK_V6(&ctx, ipst);
10428 10475 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10429 10476 /* Make sure the ill isn't going away. */
10430 10477 if (!ill_check_and_refhold(ill))
10431 10478 continue;
10432 10479 rw_exit(&ipst->ips_ill_g_lock);
10433 10480 /*
10434 10481 * Normally we don't have any members on under IPMP interfaces.
10435 10482 * We report them as a debugging aid.
10436 10483 */
10437 10484 rw_enter(&ill->ill_mcast_lock, RW_READER);
10438 10485 ips6.ipv6GroupSourceIfIndex = ill->ill_phyint->phyint_ifindex;
10439 10486 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10440 10487 sl = ilm->ilm_filter;
10441 10488 if (ilm->ilm_zoneid != zoneid &&
10442 10489 ilm->ilm_zoneid != ALL_ZONES)
10443 10490 continue;
10444 10491 if (SLIST_IS_EMPTY(sl))
10445 10492 continue;
10446 10493 ips6.ipv6GroupSourceGroup = ilm->ilm_v6addr;
10447 10494 for (i = 0; i < sl->sl_numsrc; i++) {
10448 10495 ips6.ipv6GroupSourceAddress = sl->sl_addr[i];
10449 10496 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10450 10497 (char *)&ips6, (int)sizeof (ips6))) {
10451 10498 ip1dbg(("ip_snmp_get_mib2_ip6_"
10452 10499 "group_src: failed to allocate "
10453 10500 "%u bytes\n",
10454 10501 (uint_t)sizeof (ips6)));
10455 10502 }
10456 10503 }
10457 10504 }
10458 10505 rw_exit(&ill->ill_mcast_lock);
10459 10506 ill_refrele(ill);
10460 10507 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10461 10508 }
10462 10509 rw_exit(&ipst->ips_ill_g_lock);
10463 10510
10464 10511 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10465 10512 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10466 10513 (int)optp->level, (int)optp->name, (int)optp->len));
10467 10514 qreply(q, mpctl);
10468 10515 return (mp2ctl);
10469 10516 }
10470 10517
10471 10518 /* Multicast routing virtual interface table. */
10472 10519 static mblk_t *
10473 10520 ip_snmp_get_mib2_virt_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10474 10521 {
10475 10522 struct opthdr *optp;
10476 10523 mblk_t *mp2ctl;
10477 10524
10478 10525 /*
10479 10526 * make a copy of the original message
10480 10527 */
10481 10528 mp2ctl = copymsg(mpctl);
10482 10529
10483 10530 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10484 10531 optp->level = EXPER_DVMRP;
10485 10532 optp->name = EXPER_DVMRP_VIF;
10486 10533 if (!ip_mroute_vif(mpctl->b_cont, ipst)) {
10487 10534 ip0dbg(("ip_mroute_vif: failed\n"));
10488 10535 }
10489 10536 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10490 10537 ip3dbg(("ip_snmp_get_mib2_virt_multi: level %d, name %d, len %d\n",
10491 10538 (int)optp->level, (int)optp->name, (int)optp->len));
10492 10539 qreply(q, mpctl);
10493 10540 return (mp2ctl);
10494 10541 }
10495 10542
10496 10543 /* Multicast routing table. */
10497 10544 static mblk_t *
10498 10545 ip_snmp_get_mib2_multi_rtable(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10499 10546 {
10500 10547 struct opthdr *optp;
10501 10548 mblk_t *mp2ctl;
10502 10549
10503 10550 /*
10504 10551 * make a copy of the original message
10505 10552 */
10506 10553 mp2ctl = copymsg(mpctl);
10507 10554
10508 10555 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10509 10556 optp->level = EXPER_DVMRP;
10510 10557 optp->name = EXPER_DVMRP_MRT;
10511 10558 if (!ip_mroute_mrt(mpctl->b_cont, ipst)) {
10512 10559 ip0dbg(("ip_mroute_mrt: failed\n"));
10513 10560 }
10514 10561 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10515 10562 ip3dbg(("ip_snmp_get_mib2_multi_rtable: level %d, name %d, len %d\n",
10516 10563 (int)optp->level, (int)optp->name, (int)optp->len));
10517 10564 qreply(q, mpctl);
10518 10565 return (mp2ctl);
10519 10566 }
10520 10567
10521 10568 /*
10522 10569 * Return ipRouteEntryTable, ipNetToMediaEntryTable, and ipRouteAttributeTable
10523 10570 * in one IRE walk.
10524 10571 */
10525 10572 static mblk_t *
10526 10573 ip_snmp_get_mib2_ip_route_media(queue_t *q, mblk_t *mpctl, int level,
10527 10574 ip_stack_t *ipst)
10528 10575 {
10529 10576 struct opthdr *optp;
10530 10577 mblk_t *mp2ctl; /* Returned */
10531 10578 mblk_t *mp3ctl; /* nettomedia */
10532 10579 mblk_t *mp4ctl; /* routeattrs */
10533 10580 iproutedata_t ird;
10534 10581 zoneid_t zoneid;
10535 10582
10536 10583 /*
10537 10584 * make copies of the original message
10538 10585 * - mp2ctl is returned unchanged to the caller for his use
10539 10586 * - mpctl is sent upstream as ipRouteEntryTable
10540 10587 * - mp3ctl is sent upstream as ipNetToMediaEntryTable
10541 10588 * - mp4ctl is sent upstream as ipRouteAttributeTable
10542 10589 */
10543 10590 mp2ctl = copymsg(mpctl);
10544 10591 mp3ctl = copymsg(mpctl);
10545 10592 mp4ctl = copymsg(mpctl);
10546 10593 if (mp3ctl == NULL || mp4ctl == NULL) {
10547 10594 freemsg(mp4ctl);
10548 10595 freemsg(mp3ctl);
10549 10596 freemsg(mp2ctl);
10550 10597 freemsg(mpctl);
10551 10598 return (NULL);
10552 10599 }
10553 10600
10554 10601 bzero(&ird, sizeof (ird));
10555 10602
10556 10603 ird.ird_route.lp_head = mpctl->b_cont;
10557 10604 ird.ird_netmedia.lp_head = mp3ctl->b_cont;
10558 10605 ird.ird_attrs.lp_head = mp4ctl->b_cont;
10559 10606 /*
10560 10607 * If the level has been set the special EXPER_IP_AND_ALL_IRES value,
10561 10608 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is
10562 10609 * intended a temporary solution until a proper MIB API is provided
10563 10610 * that provides complete filtering/caller-opt-in.
10564 10611 */
10565 10612 if (level == EXPER_IP_AND_ALL_IRES)
10566 10613 ird.ird_flags |= IRD_REPORT_ALL;
10567 10614
10568 10615 zoneid = Q_TO_CONN(q)->conn_zoneid;
10569 10616 ire_walk_v4(ip_snmp_get2_v4, &ird, zoneid, ipst);
10570 10617
10571 10618 /* ipRouteEntryTable in mpctl */
10572 10619 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10573 10620 optp->level = MIB2_IP;
10574 10621 optp->name = MIB2_IP_ROUTE;
10575 10622 optp->len = msgdsize(ird.ird_route.lp_head);
10576 10623 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10577 10624 (int)optp->level, (int)optp->name, (int)optp->len));
10578 10625 qreply(q, mpctl);
10579 10626
10580 10627 /* ipNetToMediaEntryTable in mp3ctl */
10581 10628 ncec_walk(NULL, ip_snmp_get2_v4_media, &ird, ipst);
10582 10629
10583 10630 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10584 10631 optp->level = MIB2_IP;
10585 10632 optp->name = MIB2_IP_MEDIA;
10586 10633 optp->len = msgdsize(ird.ird_netmedia.lp_head);
10587 10634 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10588 10635 (int)optp->level, (int)optp->name, (int)optp->len));
10589 10636 qreply(q, mp3ctl);
10590 10637
10591 10638 /* ipRouteAttributeTable in mp4ctl */
10592 10639 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10593 10640 optp->level = MIB2_IP;
10594 10641 optp->name = EXPER_IP_RTATTR;
10595 10642 optp->len = msgdsize(ird.ird_attrs.lp_head);
10596 10643 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10597 10644 (int)optp->level, (int)optp->name, (int)optp->len));
10598 10645 if (optp->len == 0)
10599 10646 freemsg(mp4ctl);
10600 10647 else
10601 10648 qreply(q, mp4ctl);
10602 10649
10603 10650 return (mp2ctl);
10604 10651 }
10605 10652
10606 10653 /*
10607 10654 * Return ipv6RouteEntryTable and ipv6RouteAttributeTable in one IRE walk, and
10608 10655 * ipv6NetToMediaEntryTable in an NDP walk.
10609 10656 */
10610 10657 static mblk_t *
10611 10658 ip_snmp_get_mib2_ip6_route_media(queue_t *q, mblk_t *mpctl, int level,
10612 10659 ip_stack_t *ipst)
10613 10660 {
10614 10661 struct opthdr *optp;
10615 10662 mblk_t *mp2ctl; /* Returned */
10616 10663 mblk_t *mp3ctl; /* nettomedia */
10617 10664 mblk_t *mp4ctl; /* routeattrs */
10618 10665 iproutedata_t ird;
10619 10666 zoneid_t zoneid;
10620 10667
10621 10668 /*
10622 10669 * make copies of the original message
10623 10670 * - mp2ctl is returned unchanged to the caller for his use
10624 10671 * - mpctl is sent upstream as ipv6RouteEntryTable
10625 10672 * - mp3ctl is sent upstream as ipv6NetToMediaEntryTable
10626 10673 * - mp4ctl is sent upstream as ipv6RouteAttributeTable
10627 10674 */
10628 10675 mp2ctl = copymsg(mpctl);
10629 10676 mp3ctl = copymsg(mpctl);
10630 10677 mp4ctl = copymsg(mpctl);
10631 10678 if (mp3ctl == NULL || mp4ctl == NULL) {
10632 10679 freemsg(mp4ctl);
10633 10680 freemsg(mp3ctl);
10634 10681 freemsg(mp2ctl);
10635 10682 freemsg(mpctl);
10636 10683 return (NULL);
10637 10684 }
10638 10685
10639 10686 bzero(&ird, sizeof (ird));
10640 10687
10641 10688 ird.ird_route.lp_head = mpctl->b_cont;
10642 10689 ird.ird_netmedia.lp_head = mp3ctl->b_cont;
10643 10690 ird.ird_attrs.lp_head = mp4ctl->b_cont;
10644 10691 /*
10645 10692 * If the level has been set the special EXPER_IP_AND_ALL_IRES value,
10646 10693 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is
10647 10694 * intended a temporary solution until a proper MIB API is provided
10648 10695 * that provides complete filtering/caller-opt-in.
10649 10696 */
10650 10697 if (level == EXPER_IP_AND_ALL_IRES)
10651 10698 ird.ird_flags |= IRD_REPORT_ALL;
10652 10699
10653 10700 zoneid = Q_TO_CONN(q)->conn_zoneid;
10654 10701 ire_walk_v6(ip_snmp_get2_v6_route, &ird, zoneid, ipst);
10655 10702
10656 10703 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10657 10704 optp->level = MIB2_IP6;
10658 10705 optp->name = MIB2_IP6_ROUTE;
10659 10706 optp->len = msgdsize(ird.ird_route.lp_head);
10660 10707 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10661 10708 (int)optp->level, (int)optp->name, (int)optp->len));
10662 10709 qreply(q, mpctl);
10663 10710
10664 10711 /* ipv6NetToMediaEntryTable in mp3ctl */
10665 10712 ncec_walk(NULL, ip_snmp_get2_v6_media, &ird, ipst);
10666 10713
10667 10714 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10668 10715 optp->level = MIB2_IP6;
10669 10716 optp->name = MIB2_IP6_MEDIA;
10670 10717 optp->len = msgdsize(ird.ird_netmedia.lp_head);
10671 10718 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10672 10719 (int)optp->level, (int)optp->name, (int)optp->len));
10673 10720 qreply(q, mp3ctl);
10674 10721
10675 10722 /* ipv6RouteAttributeTable in mp4ctl */
10676 10723 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10677 10724 optp->level = MIB2_IP6;
10678 10725 optp->name = EXPER_IP_RTATTR;
10679 10726 optp->len = msgdsize(ird.ird_attrs.lp_head);
10680 10727 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10681 10728 (int)optp->level, (int)optp->name, (int)optp->len));
10682 10729 if (optp->len == 0)
10683 10730 freemsg(mp4ctl);
10684 10731 else
10685 10732 qreply(q, mp4ctl);
10686 10733
10687 10734 return (mp2ctl);
10688 10735 }
10689 10736
10690 10737 /*
10691 10738 * IPv6 mib: One per ill
10692 10739 */
10693 10740 static mblk_t *
10694 10741 ip_snmp_get_mib2_ip6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
10695 10742 boolean_t legacy_req)
10696 10743 {
10697 10744 struct opthdr *optp;
10698 10745 mblk_t *mp2ctl;
10699 10746 ill_t *ill;
10700 10747 ill_walk_context_t ctx;
10701 10748 mblk_t *mp_tail = NULL;
10702 10749 mib2_ipv6AddrEntry_t mae6;
10703 10750 mib2_ipIfStatsEntry_t *ise;
10704 10751 size_t ise_size, iae_size;
10705 10752
10706 10753 /*
10707 10754 * Make a copy of the original message
10708 10755 */
10709 10756 mp2ctl = copymsg(mpctl);
10710 10757
10711 10758 /* fixed length IPv6 structure ... */
10712 10759
10713 10760 if (legacy_req) {
10714 10761 ise_size = LEGACY_MIB_SIZE(&ipst->ips_ip6_mib,
10715 10762 mib2_ipIfStatsEntry_t);
10716 10763 iae_size = LEGACY_MIB_SIZE(&mae6, mib2_ipv6AddrEntry_t);
10717 10764 } else {
10718 10765 ise_size = sizeof (mib2_ipIfStatsEntry_t);
10719 10766 iae_size = sizeof (mib2_ipv6AddrEntry_t);
10720 10767 }
10721 10768
10722 10769 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10723 10770 optp->level = MIB2_IP6;
10724 10771 optp->name = 0;
10725 10772 /* Include "unknown interface" ip6_mib */
10726 10773 ipst->ips_ip6_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv6;
10727 10774 ipst->ips_ip6_mib.ipIfStatsIfIndex =
10728 10775 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */
10729 10776 SET_MIB(ipst->ips_ip6_mib.ipIfStatsForwarding,
10730 10777 ipst->ips_ipv6_forwarding ? 1 : 2);
10731 10778 SET_MIB(ipst->ips_ip6_mib.ipIfStatsDefaultHopLimit,
10732 10779 ipst->ips_ipv6_def_hops);
10733 10780 SET_MIB(ipst->ips_ip6_mib.ipIfStatsEntrySize,
10734 10781 sizeof (mib2_ipIfStatsEntry_t));
10735 10782 SET_MIB(ipst->ips_ip6_mib.ipIfStatsAddrEntrySize,
10736 10783 sizeof (mib2_ipv6AddrEntry_t));
10737 10784 SET_MIB(ipst->ips_ip6_mib.ipIfStatsRouteEntrySize,
10738 10785 sizeof (mib2_ipv6RouteEntry_t));
10739 10786 SET_MIB(ipst->ips_ip6_mib.ipIfStatsNetToMediaEntrySize,
10740 10787 sizeof (mib2_ipv6NetToMediaEntry_t));
10741 10788 SET_MIB(ipst->ips_ip6_mib.ipIfStatsMemberEntrySize,
10742 10789 sizeof (ipv6_member_t));
10743 10790 SET_MIB(ipst->ips_ip6_mib.ipIfStatsGroupSourceEntrySize,
10744 10791 sizeof (ipv6_grpsrc_t));
10745 10792
10746 10793 /*
10747 10794 * Synchronize 64- and 32-bit counters
10748 10795 */
10749 10796 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInReceives,
10750 10797 ipIfStatsHCInReceives);
10751 10798 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInDelivers,
10752 10799 ipIfStatsHCInDelivers);
10753 10800 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutRequests,
10754 10801 ipIfStatsHCOutRequests);
10755 10802 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutForwDatagrams,
10756 10803 ipIfStatsHCOutForwDatagrams);
10757 10804 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutMcastPkts,
10758 10805 ipIfStatsHCOutMcastPkts);
10759 10806 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInMcastPkts,
10760 10807 ipIfStatsHCInMcastPkts);
10761 10808
10762 10809 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10763 10810 (char *)&ipst->ips_ip6_mib, (int)ise_size)) {
10764 10811 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate %u bytes\n",
10765 10812 (uint_t)ise_size));
10766 10813 } else if (legacy_req) {
10767 10814 /* Adjust the EntrySize fields for legacy requests. */
10768 10815 ise =
10769 10816 (mib2_ipIfStatsEntry_t *)(mp_tail->b_wptr - (int)ise_size);
10770 10817 SET_MIB(ise->ipIfStatsEntrySize, ise_size);
10771 10818 SET_MIB(ise->ipIfStatsAddrEntrySize, iae_size);
10772 10819 }
10773 10820
10774 10821 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10775 10822 ill = ILL_START_WALK_V6(&ctx, ipst);
10776 10823 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10777 10824 ill->ill_ip_mib->ipIfStatsIfIndex =
10778 10825 ill->ill_phyint->phyint_ifindex;
10779 10826 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding,
10780 10827 ipst->ips_ipv6_forwarding ? 1 : 2);
10781 10828 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultHopLimit,
10782 10829 ill->ill_max_hops);
10783 10830
10784 10831 /*
10785 10832 * Synchronize 64- and 32-bit counters
10786 10833 */
10787 10834 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInReceives,
10788 10835 ipIfStatsHCInReceives);
10789 10836 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInDelivers,
10790 10837 ipIfStatsHCInDelivers);
10791 10838 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutRequests,
10792 10839 ipIfStatsHCOutRequests);
10793 10840 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutForwDatagrams,
10794 10841 ipIfStatsHCOutForwDatagrams);
10795 10842 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutMcastPkts,
10796 10843 ipIfStatsHCOutMcastPkts);
10797 10844 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInMcastPkts,
10798 10845 ipIfStatsHCInMcastPkts);
10799 10846
10800 10847 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10801 10848 (char *)ill->ill_ip_mib, (int)ise_size)) {
10802 10849 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate "
10803 10850 "%u bytes\n", (uint_t)ise_size));
10804 10851 } else if (legacy_req) {
10805 10852 /* Adjust the EntrySize fields for legacy requests. */
10806 10853 ise = (mib2_ipIfStatsEntry_t *)(mp_tail->b_wptr -
10807 10854 (int)ise_size);
10808 10855 SET_MIB(ise->ipIfStatsEntrySize, ise_size);
10809 10856 SET_MIB(ise->ipIfStatsAddrEntrySize, iae_size);
10810 10857 }
10811 10858 }
10812 10859 rw_exit(&ipst->ips_ill_g_lock);
10813 10860
10814 10861 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10815 10862 ip3dbg(("ip_snmp_get_mib2_ip6: level %d, name %d, len %d\n",
10816 10863 (int)optp->level, (int)optp->name, (int)optp->len));
10817 10864 qreply(q, mpctl);
10818 10865 return (mp2ctl);
10819 10866 }
10820 10867
10821 10868 /*
10822 10869 * ICMPv6 mib: One per ill
10823 10870 */
10824 10871 static mblk_t *
10825 10872 ip_snmp_get_mib2_icmp6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10826 10873 {
10827 10874 struct opthdr *optp;
10828 10875 mblk_t *mp2ctl;
10829 10876 ill_t *ill;
10830 10877 ill_walk_context_t ctx;
10831 10878 mblk_t *mp_tail = NULL;
10832 10879 /*
10833 10880 * Make a copy of the original message
10834 10881 */
10835 10882 mp2ctl = copymsg(mpctl);
10836 10883
10837 10884 /* fixed length ICMPv6 structure ... */
10838 10885
10839 10886 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10840 10887 optp->level = MIB2_ICMP6;
10841 10888 optp->name = 0;
10842 10889 /* Include "unknown interface" icmp6_mib */
10843 10890 ipst->ips_icmp6_mib.ipv6IfIcmpIfIndex =
10844 10891 MIB2_UNKNOWN_INTERFACE; /* netstat flag */
10845 10892 ipst->ips_icmp6_mib.ipv6IfIcmpEntrySize =
10846 10893 sizeof (mib2_ipv6IfIcmpEntry_t);
10847 10894 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10848 10895 (char *)&ipst->ips_icmp6_mib,
10849 10896 (int)sizeof (ipst->ips_icmp6_mib))) {
10850 10897 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate %u bytes\n",
10851 10898 (uint_t)sizeof (ipst->ips_icmp6_mib)));
10852 10899 }
10853 10900
10854 10901 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10855 10902 ill = ILL_START_WALK_V6(&ctx, ipst);
10856 10903 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10857 10904 ill->ill_icmp6_mib->ipv6IfIcmpIfIndex =
10858 10905 ill->ill_phyint->phyint_ifindex;
10859 10906 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10860 10907 (char *)ill->ill_icmp6_mib,
10861 10908 (int)sizeof (*ill->ill_icmp6_mib))) {
10862 10909 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate "
10863 10910 "%u bytes\n",
10864 10911 (uint_t)sizeof (*ill->ill_icmp6_mib)));
10865 10912 }
10866 10913 }
10867 10914 rw_exit(&ipst->ips_ill_g_lock);
10868 10915
10869 10916 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10870 10917 ip3dbg(("ip_snmp_get_mib2_icmp6: level %d, name %d, len %d\n",
10871 10918 (int)optp->level, (int)optp->name, (int)optp->len));
10872 10919 qreply(q, mpctl);
10873 10920 return (mp2ctl);
10874 10921 }
10875 10922
10876 10923 /*
10877 10924 * ire_walk routine to create both ipRouteEntryTable and
10878 10925 * ipRouteAttributeTable in one IRE walk
10879 10926 */
10880 10927 static void
10881 10928 ip_snmp_get2_v4(ire_t *ire, iproutedata_t *ird)
10882 10929 {
10883 10930 ill_t *ill;
10884 10931 mib2_ipRouteEntry_t *re;
10885 10932 mib2_ipAttributeEntry_t iaes;
10886 10933 tsol_ire_gw_secattr_t *attrp;
10887 10934 tsol_gc_t *gc = NULL;
10888 10935 tsol_gcgrp_t *gcgrp = NULL;
10889 10936 ip_stack_t *ipst = ire->ire_ipst;
10890 10937
10891 10938 ASSERT(ire->ire_ipversion == IPV4_VERSION);
10892 10939
10893 10940 if (!(ird->ird_flags & IRD_REPORT_ALL)) {
10894 10941 if (ire->ire_testhidden)
10895 10942 return;
10896 10943 if (ire->ire_type & IRE_IF_CLONE)
10897 10944 return;
10898 10945 }
10899 10946
10900 10947 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL)
10901 10948 return;
10902 10949
10903 10950 if ((attrp = ire->ire_gw_secattr) != NULL) {
10904 10951 mutex_enter(&attrp->igsa_lock);
10905 10952 if ((gc = attrp->igsa_gc) != NULL) {
10906 10953 gcgrp = gc->gc_grp;
10907 10954 ASSERT(gcgrp != NULL);
10908 10955 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER);
10909 10956 }
10910 10957 mutex_exit(&attrp->igsa_lock);
10911 10958 }
10912 10959 /*
10913 10960 * Return all IRE types for route table... let caller pick and choose
10914 10961 */
10915 10962 re->ipRouteDest = ire->ire_addr;
10916 10963 ill = ire->ire_ill;
10917 10964 re->ipRouteIfIndex.o_length = 0;
10918 10965 if (ill != NULL) {
10919 10966 ill_get_name(ill, re->ipRouteIfIndex.o_bytes, OCTET_LENGTH);
10920 10967 re->ipRouteIfIndex.o_length =
10921 10968 mi_strlen(re->ipRouteIfIndex.o_bytes);
10922 10969 }
10923 10970 re->ipRouteMetric1 = -1;
10924 10971 re->ipRouteMetric2 = -1;
10925 10972 re->ipRouteMetric3 = -1;
10926 10973 re->ipRouteMetric4 = -1;
10927 10974
10928 10975 re->ipRouteNextHop = ire->ire_gateway_addr;
10929 10976 /* indirect(4), direct(3), or invalid(2) */
10930 10977 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE))
10931 10978 re->ipRouteType = 2;
10932 10979 else if (ire->ire_type & IRE_ONLINK)
10933 10980 re->ipRouteType = 3;
10934 10981 else
10935 10982 re->ipRouteType = 4;
10936 10983
10937 10984 re->ipRouteProto = -1;
10938 10985 re->ipRouteAge = gethrestime_sec() - ire->ire_create_time;
10939 10986 re->ipRouteMask = ire->ire_mask;
10940 10987 re->ipRouteMetric5 = -1;
10941 10988 re->ipRouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu;
10942 10989 if (ire->ire_ill != NULL && re->ipRouteInfo.re_max_frag == 0)
10943 10990 re->ipRouteInfo.re_max_frag = ire->ire_ill->ill_mtu;
10944 10991
10945 10992 re->ipRouteInfo.re_frag_flag = 0;
10946 10993 re->ipRouteInfo.re_rtt = 0;
10947 10994 re->ipRouteInfo.re_src_addr = 0;
10948 10995 re->ipRouteInfo.re_ref = ire->ire_refcnt;
10949 10996 re->ipRouteInfo.re_obpkt = ire->ire_ob_pkt_count;
10950 10997 re->ipRouteInfo.re_ibpkt = ire->ire_ib_pkt_count;
10951 10998 re->ipRouteInfo.re_flags = ire->ire_flags;
10952 10999
10953 11000 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */
10954 11001 if (ire->ire_type & IRE_INTERFACE) {
10955 11002 ire_t *child;
10956 11003
10957 11004 rw_enter(&ipst->ips_ire_dep_lock, RW_READER);
10958 11005 child = ire->ire_dep_children;
10959 11006 while (child != NULL) {
10960 11007 re->ipRouteInfo.re_obpkt += child->ire_ob_pkt_count;
10961 11008 re->ipRouteInfo.re_ibpkt += child->ire_ib_pkt_count;
10962 11009 child = child->ire_dep_sib_next;
10963 11010 }
10964 11011 rw_exit(&ipst->ips_ire_dep_lock);
10965 11012 }
10966 11013
10967 11014 if (ire->ire_flags & RTF_DYNAMIC) {
10968 11015 re->ipRouteInfo.re_ire_type = IRE_HOST_REDIRECT;
10969 11016 } else {
10970 11017 re->ipRouteInfo.re_ire_type = ire->ire_type;
10971 11018 }
10972 11019
10973 11020 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail,
10974 11021 (char *)re, (int)sizeof (*re))) {
10975 11022 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u bytes\n",
10976 11023 (uint_t)sizeof (*re)));
10977 11024 }
10978 11025
10979 11026 if (gc != NULL) {
10980 11027 iaes.iae_routeidx = ird->ird_idx;
10981 11028 iaes.iae_doi = gc->gc_db->gcdb_doi;
10982 11029 iaes.iae_slrange = gc->gc_db->gcdb_slrange;
10983 11030
10984 11031 if (!snmp_append_data2(ird->ird_attrs.lp_head,
10985 11032 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) {
10986 11033 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u "
10987 11034 "bytes\n", (uint_t)sizeof (iaes)));
10988 11035 }
10989 11036 }
10990 11037
10991 11038 /* bump route index for next pass */
10992 11039 ird->ird_idx++;
10993 11040
10994 11041 kmem_free(re, sizeof (*re));
10995 11042 if (gcgrp != NULL)
10996 11043 rw_exit(&gcgrp->gcgrp_rwlock);
10997 11044 }
10998 11045
10999 11046 /*
11000 11047 * ire_walk routine to create ipv6RouteEntryTable and ipRouteEntryTable.
11001 11048 */
11002 11049 static void
11003 11050 ip_snmp_get2_v6_route(ire_t *ire, iproutedata_t *ird)
11004 11051 {
11005 11052 ill_t *ill;
11006 11053 mib2_ipv6RouteEntry_t *re;
11007 11054 mib2_ipAttributeEntry_t iaes;
11008 11055 tsol_ire_gw_secattr_t *attrp;
11009 11056 tsol_gc_t *gc = NULL;
11010 11057 tsol_gcgrp_t *gcgrp = NULL;
11011 11058 ip_stack_t *ipst = ire->ire_ipst;
11012 11059
11013 11060 ASSERT(ire->ire_ipversion == IPV6_VERSION);
11014 11061
11015 11062 if (!(ird->ird_flags & IRD_REPORT_ALL)) {
11016 11063 if (ire->ire_testhidden)
11017 11064 return;
11018 11065 if (ire->ire_type & IRE_IF_CLONE)
11019 11066 return;
11020 11067 }
11021 11068
11022 11069 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL)
11023 11070 return;
11024 11071
11025 11072 if ((attrp = ire->ire_gw_secattr) != NULL) {
11026 11073 mutex_enter(&attrp->igsa_lock);
11027 11074 if ((gc = attrp->igsa_gc) != NULL) {
11028 11075 gcgrp = gc->gc_grp;
11029 11076 ASSERT(gcgrp != NULL);
11030 11077 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER);
11031 11078 }
11032 11079 mutex_exit(&attrp->igsa_lock);
11033 11080 }
11034 11081 /*
11035 11082 * Return all IRE types for route table... let caller pick and choose
11036 11083 */
11037 11084 re->ipv6RouteDest = ire->ire_addr_v6;
11038 11085 re->ipv6RoutePfxLength = ip_mask_to_plen_v6(&ire->ire_mask_v6);
11039 11086 re->ipv6RouteIndex = 0; /* Unique when multiple with same dest/plen */
11040 11087 re->ipv6RouteIfIndex.o_length = 0;
11041 11088 ill = ire->ire_ill;
11042 11089 if (ill != NULL) {
11043 11090 ill_get_name(ill, re->ipv6RouteIfIndex.o_bytes, OCTET_LENGTH);
11044 11091 re->ipv6RouteIfIndex.o_length =
11045 11092 mi_strlen(re->ipv6RouteIfIndex.o_bytes);
11046 11093 }
11047 11094
11048 11095 ASSERT(!(ire->ire_type & IRE_BROADCAST));
11049 11096
11050 11097 mutex_enter(&ire->ire_lock);
11051 11098 re->ipv6RouteNextHop = ire->ire_gateway_addr_v6;
11052 11099 mutex_exit(&ire->ire_lock);
11053 11100
11054 11101 /* remote(4), local(3), or discard(2) */
11055 11102 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE))
11056 11103 re->ipv6RouteType = 2;
11057 11104 else if (ire->ire_type & IRE_ONLINK)
11058 11105 re->ipv6RouteType = 3;
11059 11106 else
11060 11107 re->ipv6RouteType = 4;
11061 11108
11062 11109 re->ipv6RouteProtocol = -1;
11063 11110 re->ipv6RoutePolicy = 0;
11064 11111 re->ipv6RouteAge = gethrestime_sec() - ire->ire_create_time;
11065 11112 re->ipv6RouteNextHopRDI = 0;
11066 11113 re->ipv6RouteWeight = 0;
11067 11114 re->ipv6RouteMetric = 0;
11068 11115 re->ipv6RouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu;
11069 11116 if (ire->ire_ill != NULL && re->ipv6RouteInfo.re_max_frag == 0)
11070 11117 re->ipv6RouteInfo.re_max_frag = ire->ire_ill->ill_mtu;
11071 11118
11072 11119 re->ipv6RouteInfo.re_frag_flag = 0;
11073 11120 re->ipv6RouteInfo.re_rtt = 0;
11074 11121 re->ipv6RouteInfo.re_src_addr = ipv6_all_zeros;
11075 11122 re->ipv6RouteInfo.re_obpkt = ire->ire_ob_pkt_count;
11076 11123 re->ipv6RouteInfo.re_ibpkt = ire->ire_ib_pkt_count;
11077 11124 re->ipv6RouteInfo.re_ref = ire->ire_refcnt;
11078 11125 re->ipv6RouteInfo.re_flags = ire->ire_flags;
11079 11126
11080 11127 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */
11081 11128 if (ire->ire_type & IRE_INTERFACE) {
11082 11129 ire_t *child;
11083 11130
11084 11131 rw_enter(&ipst->ips_ire_dep_lock, RW_READER);
11085 11132 child = ire->ire_dep_children;
11086 11133 while (child != NULL) {
11087 11134 re->ipv6RouteInfo.re_obpkt += child->ire_ob_pkt_count;
11088 11135 re->ipv6RouteInfo.re_ibpkt += child->ire_ib_pkt_count;
11089 11136 child = child->ire_dep_sib_next;
11090 11137 }
11091 11138 rw_exit(&ipst->ips_ire_dep_lock);
11092 11139 }
11093 11140 if (ire->ire_flags & RTF_DYNAMIC) {
11094 11141 re->ipv6RouteInfo.re_ire_type = IRE_HOST_REDIRECT;
11095 11142 } else {
11096 11143 re->ipv6RouteInfo.re_ire_type = ire->ire_type;
11097 11144 }
11098 11145
11099 11146 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail,
11100 11147 (char *)re, (int)sizeof (*re))) {
11101 11148 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u bytes\n",
11102 11149 (uint_t)sizeof (*re)));
11103 11150 }
11104 11151
11105 11152 if (gc != NULL) {
11106 11153 iaes.iae_routeidx = ird->ird_idx;
11107 11154 iaes.iae_doi = gc->gc_db->gcdb_doi;
11108 11155 iaes.iae_slrange = gc->gc_db->gcdb_slrange;
11109 11156
11110 11157 if (!snmp_append_data2(ird->ird_attrs.lp_head,
11111 11158 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) {
11112 11159 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u "
11113 11160 "bytes\n", (uint_t)sizeof (iaes)));
11114 11161 }
11115 11162 }
11116 11163
11117 11164 /* bump route index for next pass */
11118 11165 ird->ird_idx++;
11119 11166
11120 11167 kmem_free(re, sizeof (*re));
11121 11168 if (gcgrp != NULL)
11122 11169 rw_exit(&gcgrp->gcgrp_rwlock);
11123 11170 }
11124 11171
11125 11172 /*
11126 11173 * ncec_walk routine to create ipv6NetToMediaEntryTable
11127 11174 */
11128 11175 static int
11129 11176 ip_snmp_get2_v6_media(ncec_t *ncec, iproutedata_t *ird)
11130 11177 {
11131 11178 ill_t *ill;
11132 11179 mib2_ipv6NetToMediaEntry_t ntme;
11133 11180
11134 11181 ill = ncec->ncec_ill;
11135 11182 /* skip arpce entries, and loopback ncec entries */
11136 11183 if (ill->ill_isv6 == B_FALSE || ill->ill_net_type == IRE_LOOPBACK)
11137 11184 return (0);
11138 11185 /*
11139 11186 * Neighbor cache entry attached to IRE with on-link
11140 11187 * destination.
11141 11188 * We report all IPMP groups on ncec_ill which is normally the upper.
11142 11189 */
11143 11190 ntme.ipv6NetToMediaIfIndex = ill->ill_phyint->phyint_ifindex;
11144 11191 ntme.ipv6NetToMediaNetAddress = ncec->ncec_addr;
11145 11192 ntme.ipv6NetToMediaPhysAddress.o_length = ill->ill_phys_addr_length;
11146 11193 if (ncec->ncec_lladdr != NULL) {
11147 11194 bcopy(ncec->ncec_lladdr, ntme.ipv6NetToMediaPhysAddress.o_bytes,
11148 11195 ntme.ipv6NetToMediaPhysAddress.o_length);
11149 11196 }
11150 11197 /*
11151 11198 * Note: Returns ND_* states. Should be:
11152 11199 * reachable(1), stale(2), delay(3), probe(4),
11153 11200 * invalid(5), unknown(6)
11154 11201 */
11155 11202 ntme.ipv6NetToMediaState = ncec->ncec_state;
11156 11203 ntme.ipv6NetToMediaLastUpdated = 0;
11157 11204
11158 11205 /* other(1), dynamic(2), static(3), local(4) */
11159 11206 if (NCE_MYADDR(ncec)) {
11160 11207 ntme.ipv6NetToMediaType = 4;
11161 11208 } else if (ncec->ncec_flags & NCE_F_PUBLISH) {
11162 11209 ntme.ipv6NetToMediaType = 1; /* proxy */
11163 11210 } else if (ncec->ncec_flags & NCE_F_STATIC) {
11164 11211 ntme.ipv6NetToMediaType = 3;
11165 11212 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST)) {
11166 11213 ntme.ipv6NetToMediaType = 1;
11167 11214 } else {
11168 11215 ntme.ipv6NetToMediaType = 2;
11169 11216 }
11170 11217
11171 11218 if (!snmp_append_data2(ird->ird_netmedia.lp_head,
11172 11219 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) {
11173 11220 ip1dbg(("ip_snmp_get2_v6_media: failed to allocate %u bytes\n",
11174 11221 (uint_t)sizeof (ntme)));
11175 11222 }
11176 11223 return (0);
11177 11224 }
11178 11225
11179 11226 int
11180 11227 nce2ace(ncec_t *ncec)
11181 11228 {
11182 11229 int flags = 0;
11183 11230
11184 11231 if (NCE_ISREACHABLE(ncec))
11185 11232 flags |= ACE_F_RESOLVED;
11186 11233 if (ncec->ncec_flags & NCE_F_AUTHORITY)
11187 11234 flags |= ACE_F_AUTHORITY;
11188 11235 if (ncec->ncec_flags & NCE_F_PUBLISH)
11189 11236 flags |= ACE_F_PUBLISH;
11190 11237 if ((ncec->ncec_flags & NCE_F_NONUD) != 0)
11191 11238 flags |= ACE_F_PERMANENT;
11192 11239 if (NCE_MYADDR(ncec))
11193 11240 flags |= (ACE_F_MYADDR | ACE_F_AUTHORITY);
11194 11241 if (ncec->ncec_flags & NCE_F_UNVERIFIED)
11195 11242 flags |= ACE_F_UNVERIFIED;
11196 11243 if (ncec->ncec_flags & NCE_F_AUTHORITY)
11197 11244 flags |= ACE_F_AUTHORITY;
11198 11245 if (ncec->ncec_flags & NCE_F_DELAYED)
11199 11246 flags |= ACE_F_DELAYED;
11200 11247 return (flags);
11201 11248 }
11202 11249
11203 11250 /*
11204 11251 * ncec_walk routine to create ipNetToMediaEntryTable
11205 11252 */
11206 11253 static int
11207 11254 ip_snmp_get2_v4_media(ncec_t *ncec, iproutedata_t *ird)
11208 11255 {
11209 11256 ill_t *ill;
11210 11257 mib2_ipNetToMediaEntry_t ntme;
11211 11258 const char *name = "unknown";
11212 11259 ipaddr_t ncec_addr;
11213 11260
11214 11261 ill = ncec->ncec_ill;
11215 11262 if (ill->ill_isv6 || (ncec->ncec_flags & NCE_F_BCAST) ||
11216 11263 ill->ill_net_type == IRE_LOOPBACK)
11217 11264 return (0);
11218 11265
11219 11266 /* We report all IPMP groups on ncec_ill which is normally the upper. */
11220 11267 name = ill->ill_name;
11221 11268 /* Based on RFC 4293: other(1), inval(2), dyn(3), stat(4) */
11222 11269 if (NCE_MYADDR(ncec)) {
11223 11270 ntme.ipNetToMediaType = 4;
11224 11271 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST|NCE_F_PUBLISH)) {
11225 11272 ntme.ipNetToMediaType = 1;
11226 11273 } else {
11227 11274 ntme.ipNetToMediaType = 3;
11228 11275 }
11229 11276 ntme.ipNetToMediaIfIndex.o_length = MIN(OCTET_LENGTH, strlen(name));
11230 11277 bcopy(name, ntme.ipNetToMediaIfIndex.o_bytes,
11231 11278 ntme.ipNetToMediaIfIndex.o_length);
11232 11279
11233 11280 IN6_V4MAPPED_TO_IPADDR(&ncec->ncec_addr, ncec_addr);
11234 11281 bcopy(&ncec_addr, &ntme.ipNetToMediaNetAddress, sizeof (ncec_addr));
11235 11282
11236 11283 ntme.ipNetToMediaInfo.ntm_mask.o_length = sizeof (ipaddr_t);
11237 11284 ncec_addr = INADDR_BROADCAST;
11238 11285 bcopy(&ncec_addr, ntme.ipNetToMediaInfo.ntm_mask.o_bytes,
11239 11286 sizeof (ncec_addr));
11240 11287 /*
11241 11288 * map all the flags to the ACE counterpart.
11242 11289 */
11243 11290 ntme.ipNetToMediaInfo.ntm_flags = nce2ace(ncec);
11244 11291
11245 11292 ntme.ipNetToMediaPhysAddress.o_length =
11246 11293 MIN(OCTET_LENGTH, ill->ill_phys_addr_length);
11247 11294
11248 11295 if (!NCE_ISREACHABLE(ncec))
11249 11296 ntme.ipNetToMediaPhysAddress.o_length = 0;
11250 11297 else {
11251 11298 if (ncec->ncec_lladdr != NULL) {
11252 11299 bcopy(ncec->ncec_lladdr,
11253 11300 ntme.ipNetToMediaPhysAddress.o_bytes,
11254 11301 ntme.ipNetToMediaPhysAddress.o_length);
11255 11302 }
11256 11303 }
11257 11304
11258 11305 if (!snmp_append_data2(ird->ird_netmedia.lp_head,
11259 11306 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) {
11260 11307 ip1dbg(("ip_snmp_get2_v4_media: failed to allocate %u bytes\n",
11261 11308 (uint_t)sizeof (ntme)));
11262 11309 }
11263 11310 return (0);
11264 11311 }
11265 11312
11266 11313 /*
11267 11314 * return (0) if invalid set request, 1 otherwise, including non-tcp requests
11268 11315 */
11269 11316 /* ARGSUSED */
11270 11317 int
11271 11318 ip_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
11272 11319 {
11273 11320 switch (level) {
11274 11321 case MIB2_IP:
11275 11322 case MIB2_ICMP:
11276 11323 switch (name) {
11277 11324 default:
11278 11325 break;
11279 11326 }
11280 11327 return (1);
11281 11328 default:
11282 11329 return (1);
11283 11330 }
11284 11331 }
11285 11332
11286 11333 /*
11287 11334 * When there exists both a 64- and 32-bit counter of a particular type
11288 11335 * (i.e., InReceives), only the 64-bit counters are added.
11289 11336 */
11290 11337 void
11291 11338 ip_mib2_add_ip_stats(mib2_ipIfStatsEntry_t *o1, mib2_ipIfStatsEntry_t *o2)
11292 11339 {
11293 11340 UPDATE_MIB(o1, ipIfStatsInHdrErrors, o2->ipIfStatsInHdrErrors);
11294 11341 UPDATE_MIB(o1, ipIfStatsInTooBigErrors, o2->ipIfStatsInTooBigErrors);
11295 11342 UPDATE_MIB(o1, ipIfStatsInNoRoutes, o2->ipIfStatsInNoRoutes);
11296 11343 UPDATE_MIB(o1, ipIfStatsInAddrErrors, o2->ipIfStatsInAddrErrors);
11297 11344 UPDATE_MIB(o1, ipIfStatsInUnknownProtos, o2->ipIfStatsInUnknownProtos);
11298 11345 UPDATE_MIB(o1, ipIfStatsInTruncatedPkts, o2->ipIfStatsInTruncatedPkts);
11299 11346 UPDATE_MIB(o1, ipIfStatsInDiscards, o2->ipIfStatsInDiscards);
11300 11347 UPDATE_MIB(o1, ipIfStatsOutDiscards, o2->ipIfStatsOutDiscards);
11301 11348 UPDATE_MIB(o1, ipIfStatsOutFragOKs, o2->ipIfStatsOutFragOKs);
11302 11349 UPDATE_MIB(o1, ipIfStatsOutFragFails, o2->ipIfStatsOutFragFails);
11303 11350 UPDATE_MIB(o1, ipIfStatsOutFragCreates, o2->ipIfStatsOutFragCreates);
11304 11351 UPDATE_MIB(o1, ipIfStatsReasmReqds, o2->ipIfStatsReasmReqds);
11305 11352 UPDATE_MIB(o1, ipIfStatsReasmOKs, o2->ipIfStatsReasmOKs);
11306 11353 UPDATE_MIB(o1, ipIfStatsReasmFails, o2->ipIfStatsReasmFails);
11307 11354 UPDATE_MIB(o1, ipIfStatsOutNoRoutes, o2->ipIfStatsOutNoRoutes);
11308 11355 UPDATE_MIB(o1, ipIfStatsReasmDuplicates, o2->ipIfStatsReasmDuplicates);
11309 11356 UPDATE_MIB(o1, ipIfStatsReasmPartDups, o2->ipIfStatsReasmPartDups);
11310 11357 UPDATE_MIB(o1, ipIfStatsForwProhibits, o2->ipIfStatsForwProhibits);
11311 11358 UPDATE_MIB(o1, udpInCksumErrs, o2->udpInCksumErrs);
11312 11359 UPDATE_MIB(o1, udpInOverflows, o2->udpInOverflows);
11313 11360 UPDATE_MIB(o1, rawipInOverflows, o2->rawipInOverflows);
11314 11361 UPDATE_MIB(o1, ipIfStatsInWrongIPVersion,
11315 11362 o2->ipIfStatsInWrongIPVersion);
11316 11363 UPDATE_MIB(o1, ipIfStatsOutWrongIPVersion,
11317 11364 o2->ipIfStatsInWrongIPVersion);
11318 11365 UPDATE_MIB(o1, ipIfStatsOutSwitchIPVersion,
11319 11366 o2->ipIfStatsOutSwitchIPVersion);
11320 11367 UPDATE_MIB(o1, ipIfStatsHCInReceives, o2->ipIfStatsHCInReceives);
11321 11368 UPDATE_MIB(o1, ipIfStatsHCInOctets, o2->ipIfStatsHCInOctets);
11322 11369 UPDATE_MIB(o1, ipIfStatsHCInForwDatagrams,
11323 11370 o2->ipIfStatsHCInForwDatagrams);
11324 11371 UPDATE_MIB(o1, ipIfStatsHCInDelivers, o2->ipIfStatsHCInDelivers);
11325 11372 UPDATE_MIB(o1, ipIfStatsHCOutRequests, o2->ipIfStatsHCOutRequests);
11326 11373 UPDATE_MIB(o1, ipIfStatsHCOutForwDatagrams,
11327 11374 o2->ipIfStatsHCOutForwDatagrams);
11328 11375 UPDATE_MIB(o1, ipIfStatsOutFragReqds, o2->ipIfStatsOutFragReqds);
11329 11376 UPDATE_MIB(o1, ipIfStatsHCOutTransmits, o2->ipIfStatsHCOutTransmits);
11330 11377 UPDATE_MIB(o1, ipIfStatsHCOutOctets, o2->ipIfStatsHCOutOctets);
11331 11378 UPDATE_MIB(o1, ipIfStatsHCInMcastPkts, o2->ipIfStatsHCInMcastPkts);
11332 11379 UPDATE_MIB(o1, ipIfStatsHCInMcastOctets, o2->ipIfStatsHCInMcastOctets);
11333 11380 UPDATE_MIB(o1, ipIfStatsHCOutMcastPkts, o2->ipIfStatsHCOutMcastPkts);
11334 11381 UPDATE_MIB(o1, ipIfStatsHCOutMcastOctets,
11335 11382 o2->ipIfStatsHCOutMcastOctets);
11336 11383 UPDATE_MIB(o1, ipIfStatsHCInBcastPkts, o2->ipIfStatsHCInBcastPkts);
11337 11384 UPDATE_MIB(o1, ipIfStatsHCOutBcastPkts, o2->ipIfStatsHCOutBcastPkts);
11338 11385 UPDATE_MIB(o1, ipsecInSucceeded, o2->ipsecInSucceeded);
11339 11386 UPDATE_MIB(o1, ipsecInFailed, o2->ipsecInFailed);
11340 11387 UPDATE_MIB(o1, ipInCksumErrs, o2->ipInCksumErrs);
11341 11388 UPDATE_MIB(o1, tcpInErrs, o2->tcpInErrs);
11342 11389 UPDATE_MIB(o1, udpNoPorts, o2->udpNoPorts);
11343 11390 }
11344 11391
11345 11392 void
11346 11393 ip_mib2_add_icmp6_stats(mib2_ipv6IfIcmpEntry_t *o1, mib2_ipv6IfIcmpEntry_t *o2)
11347 11394 {
11348 11395 UPDATE_MIB(o1, ipv6IfIcmpInMsgs, o2->ipv6IfIcmpInMsgs);
11349 11396 UPDATE_MIB(o1, ipv6IfIcmpInErrors, o2->ipv6IfIcmpInErrors);
11350 11397 UPDATE_MIB(o1, ipv6IfIcmpInDestUnreachs, o2->ipv6IfIcmpInDestUnreachs);
11351 11398 UPDATE_MIB(o1, ipv6IfIcmpInAdminProhibs, o2->ipv6IfIcmpInAdminProhibs);
11352 11399 UPDATE_MIB(o1, ipv6IfIcmpInTimeExcds, o2->ipv6IfIcmpInTimeExcds);
11353 11400 UPDATE_MIB(o1, ipv6IfIcmpInParmProblems, o2->ipv6IfIcmpInParmProblems);
11354 11401 UPDATE_MIB(o1, ipv6IfIcmpInPktTooBigs, o2->ipv6IfIcmpInPktTooBigs);
11355 11402 UPDATE_MIB(o1, ipv6IfIcmpInEchos, o2->ipv6IfIcmpInEchos);
11356 11403 UPDATE_MIB(o1, ipv6IfIcmpInEchoReplies, o2->ipv6IfIcmpInEchoReplies);
11357 11404 UPDATE_MIB(o1, ipv6IfIcmpInRouterSolicits,
11358 11405 o2->ipv6IfIcmpInRouterSolicits);
11359 11406 UPDATE_MIB(o1, ipv6IfIcmpInRouterAdvertisements,
11360 11407 o2->ipv6IfIcmpInRouterAdvertisements);
11361 11408 UPDATE_MIB(o1, ipv6IfIcmpInNeighborSolicits,
11362 11409 o2->ipv6IfIcmpInNeighborSolicits);
11363 11410 UPDATE_MIB(o1, ipv6IfIcmpInNeighborAdvertisements,
11364 11411 o2->ipv6IfIcmpInNeighborAdvertisements);
11365 11412 UPDATE_MIB(o1, ipv6IfIcmpInRedirects, o2->ipv6IfIcmpInRedirects);
11366 11413 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembQueries,
11367 11414 o2->ipv6IfIcmpInGroupMembQueries);
11368 11415 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembResponses,
11369 11416 o2->ipv6IfIcmpInGroupMembResponses);
11370 11417 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembReductions,
11371 11418 o2->ipv6IfIcmpInGroupMembReductions);
11372 11419 UPDATE_MIB(o1, ipv6IfIcmpOutMsgs, o2->ipv6IfIcmpOutMsgs);
11373 11420 UPDATE_MIB(o1, ipv6IfIcmpOutErrors, o2->ipv6IfIcmpOutErrors);
11374 11421 UPDATE_MIB(o1, ipv6IfIcmpOutDestUnreachs,
11375 11422 o2->ipv6IfIcmpOutDestUnreachs);
11376 11423 UPDATE_MIB(o1, ipv6IfIcmpOutAdminProhibs,
11377 11424 o2->ipv6IfIcmpOutAdminProhibs);
11378 11425 UPDATE_MIB(o1, ipv6IfIcmpOutTimeExcds, o2->ipv6IfIcmpOutTimeExcds);
11379 11426 UPDATE_MIB(o1, ipv6IfIcmpOutParmProblems,
11380 11427 o2->ipv6IfIcmpOutParmProblems);
11381 11428 UPDATE_MIB(o1, ipv6IfIcmpOutPktTooBigs, o2->ipv6IfIcmpOutPktTooBigs);
11382 11429 UPDATE_MIB(o1, ipv6IfIcmpOutEchos, o2->ipv6IfIcmpOutEchos);
11383 11430 UPDATE_MIB(o1, ipv6IfIcmpOutEchoReplies, o2->ipv6IfIcmpOutEchoReplies);
11384 11431 UPDATE_MIB(o1, ipv6IfIcmpOutRouterSolicits,
11385 11432 o2->ipv6IfIcmpOutRouterSolicits);
11386 11433 UPDATE_MIB(o1, ipv6IfIcmpOutRouterAdvertisements,
11387 11434 o2->ipv6IfIcmpOutRouterAdvertisements);
11388 11435 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborSolicits,
11389 11436 o2->ipv6IfIcmpOutNeighborSolicits);
11390 11437 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborAdvertisements,
11391 11438 o2->ipv6IfIcmpOutNeighborAdvertisements);
11392 11439 UPDATE_MIB(o1, ipv6IfIcmpOutRedirects, o2->ipv6IfIcmpOutRedirects);
11393 11440 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembQueries,
11394 11441 o2->ipv6IfIcmpOutGroupMembQueries);
11395 11442 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembResponses,
11396 11443 o2->ipv6IfIcmpOutGroupMembResponses);
11397 11444 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembReductions,
11398 11445 o2->ipv6IfIcmpOutGroupMembReductions);
11399 11446 UPDATE_MIB(o1, ipv6IfIcmpInOverflows, o2->ipv6IfIcmpInOverflows);
11400 11447 UPDATE_MIB(o1, ipv6IfIcmpBadHoplimit, o2->ipv6IfIcmpBadHoplimit);
11401 11448 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborAdvertisements,
11402 11449 o2->ipv6IfIcmpInBadNeighborAdvertisements);
11403 11450 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborSolicitations,
11404 11451 o2->ipv6IfIcmpInBadNeighborSolicitations);
11405 11452 UPDATE_MIB(o1, ipv6IfIcmpInBadRedirects, o2->ipv6IfIcmpInBadRedirects);
11406 11453 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembTotal,
11407 11454 o2->ipv6IfIcmpInGroupMembTotal);
11408 11455 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadQueries,
11409 11456 o2->ipv6IfIcmpInGroupMembBadQueries);
11410 11457 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadReports,
11411 11458 o2->ipv6IfIcmpInGroupMembBadReports);
11412 11459 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembOurReports,
11413 11460 o2->ipv6IfIcmpInGroupMembOurReports);
11414 11461 }
11415 11462
11416 11463 /*
11417 11464 * Called before the options are updated to check if this packet will
11418 11465 * be source routed from here.
11419 11466 * This routine assumes that the options are well formed i.e. that they
11420 11467 * have already been checked.
11421 11468 */
11422 11469 boolean_t
11423 11470 ip_source_routed(ipha_t *ipha, ip_stack_t *ipst)
11424 11471 {
11425 11472 ipoptp_t opts;
11426 11473 uchar_t *opt;
11427 11474 uint8_t optval;
11428 11475 uint8_t optlen;
11429 11476 ipaddr_t dst;
11430 11477
11431 11478 if (IS_SIMPLE_IPH(ipha)) {
11432 11479 ip2dbg(("not source routed\n"));
11433 11480 return (B_FALSE);
11434 11481 }
11435 11482 dst = ipha->ipha_dst;
11436 11483 for (optval = ipoptp_first(&opts, ipha);
11437 11484 optval != IPOPT_EOL;
11438 11485 optval = ipoptp_next(&opts)) {
11439 11486 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11440 11487 opt = opts.ipoptp_cur;
11441 11488 optlen = opts.ipoptp_len;
11442 11489 ip2dbg(("ip_source_routed: opt %d, len %d\n",
11443 11490 optval, optlen));
11444 11491 switch (optval) {
11445 11492 uint32_t off;
11446 11493 case IPOPT_SSRR:
11447 11494 case IPOPT_LSRR:
11448 11495 /*
11449 11496 * If dst is one of our addresses and there are some
11450 11497 * entries left in the source route return (true).
11451 11498 */
11452 11499 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
11453 11500 ip2dbg(("ip_source_routed: not next"
11454 11501 " source route 0x%x\n",
11455 11502 ntohl(dst)));
11456 11503 return (B_FALSE);
11457 11504 }
11458 11505 off = opt[IPOPT_OFFSET];
11459 11506 off--;
11460 11507 if (optlen < IP_ADDR_LEN ||
11461 11508 off > optlen - IP_ADDR_LEN) {
11462 11509 /* End of source route */
11463 11510 ip1dbg(("ip_source_routed: end of SR\n"));
11464 11511 return (B_FALSE);
11465 11512 }
11466 11513 return (B_TRUE);
11467 11514 }
11468 11515 }
11469 11516 ip2dbg(("not source routed\n"));
11470 11517 return (B_FALSE);
11471 11518 }
11472 11519
11473 11520 /*
11474 11521 * ip_unbind is called by the transports to remove a conn from
11475 11522 * the fanout table.
11476 11523 */
11477 11524 void
11478 11525 ip_unbind(conn_t *connp)
11479 11526 {
11480 11527
11481 11528 ASSERT(!MUTEX_HELD(&connp->conn_lock));
11482 11529
11483 11530 if (is_system_labeled() && connp->conn_anon_port) {
11484 11531 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
11485 11532 connp->conn_mlp_type, connp->conn_proto,
11486 11533 ntohs(connp->conn_lport), B_FALSE);
11487 11534 connp->conn_anon_port = 0;
11488 11535 }
11489 11536 connp->conn_mlp_type = mlptSingle;
11490 11537
11491 11538 ipcl_hash_remove(connp);
11492 11539 }
11493 11540
11494 11541 /*
11495 11542 * Used for deciding the MSS size for the upper layer. Thus
11496 11543 * we need to check the outbound policy values in the conn.
11497 11544 */
11498 11545 int
11499 11546 conn_ipsec_length(conn_t *connp)
11500 11547 {
11501 11548 ipsec_latch_t *ipl;
11502 11549
11503 11550 ipl = connp->conn_latch;
11504 11551 if (ipl == NULL)
11505 11552 return (0);
11506 11553
11507 11554 if (connp->conn_ixa->ixa_ipsec_policy == NULL)
11508 11555 return (0);
11509 11556
11510 11557 return (connp->conn_ixa->ixa_ipsec_policy->ipsp_act->ipa_ovhd);
11511 11558 }
11512 11559
11513 11560 /*
11514 11561 * Returns an estimate of the IPsec headers size. This is used if
11515 11562 * we don't want to call into IPsec to get the exact size.
11516 11563 */
11517 11564 int
11518 11565 ipsec_out_extra_length(ip_xmit_attr_t *ixa)
11519 11566 {
11520 11567 ipsec_action_t *a;
11521 11568
11522 11569 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
11523 11570 return (0);
11524 11571
11525 11572 a = ixa->ixa_ipsec_action;
11526 11573 if (a == NULL) {
11527 11574 ASSERT(ixa->ixa_ipsec_policy != NULL);
11528 11575 a = ixa->ixa_ipsec_policy->ipsp_act;
11529 11576 }
11530 11577 ASSERT(a != NULL);
11531 11578
11532 11579 return (a->ipa_ovhd);
11533 11580 }
11534 11581
11535 11582 /*
11536 11583 * If there are any source route options, return the true final
11537 11584 * destination. Otherwise, return the destination.
11538 11585 */
11539 11586 ipaddr_t
11540 11587 ip_get_dst(ipha_t *ipha)
11541 11588 {
11542 11589 ipoptp_t opts;
11543 11590 uchar_t *opt;
11544 11591 uint8_t optval;
11545 11592 uint8_t optlen;
11546 11593 ipaddr_t dst;
11547 11594 uint32_t off;
11548 11595
11549 11596 dst = ipha->ipha_dst;
11550 11597
11551 11598 if (IS_SIMPLE_IPH(ipha))
11552 11599 return (dst);
11553 11600
11554 11601 for (optval = ipoptp_first(&opts, ipha);
11555 11602 optval != IPOPT_EOL;
11556 11603 optval = ipoptp_next(&opts)) {
11557 11604 opt = opts.ipoptp_cur;
11558 11605 optlen = opts.ipoptp_len;
11559 11606 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11560 11607 switch (optval) {
11561 11608 case IPOPT_SSRR:
11562 11609 case IPOPT_LSRR:
11563 11610 off = opt[IPOPT_OFFSET];
11564 11611 /*
11565 11612 * If one of the conditions is true, it means
11566 11613 * end of options and dst already has the right
11567 11614 * value.
11568 11615 */
11569 11616 if (!(optlen < IP_ADDR_LEN || off > optlen - 3)) {
11570 11617 off = optlen - IP_ADDR_LEN;
11571 11618 bcopy(&opt[off], &dst, IP_ADDR_LEN);
11572 11619 }
11573 11620 return (dst);
11574 11621 default:
11575 11622 break;
11576 11623 }
11577 11624 }
11578 11625
11579 11626 return (dst);
11580 11627 }
11581 11628
11582 11629 /*
11583 11630 * Outbound IP fragmentation routine.
11584 11631 * Assumes the caller has checked whether or not fragmentation should
11585 11632 * be allowed. Here we copy the DF bit from the header to all the generated
11586 11633 * fragments.
11587 11634 */
11588 11635 int
11589 11636 ip_fragment_v4(mblk_t *mp_orig, nce_t *nce, iaflags_t ixaflags,
11590 11637 uint_t pkt_len, uint32_t max_frag, uint32_t xmit_hint, zoneid_t szone,
11591 11638 zoneid_t nolzid, pfirepostfrag_t postfragfn, uintptr_t *ixa_cookie)
11592 11639 {
11593 11640 int i1;
11594 11641 int hdr_len;
11595 11642 mblk_t *hdr_mp;
11596 11643 ipha_t *ipha;
11597 11644 int ip_data_end;
11598 11645 int len;
11599 11646 mblk_t *mp = mp_orig;
11600 11647 int offset;
11601 11648 ill_t *ill = nce->nce_ill;
11602 11649 ip_stack_t *ipst = ill->ill_ipst;
11603 11650 mblk_t *carve_mp;
11604 11651 uint32_t frag_flag;
11605 11652 uint_t priority = mp->b_band;
11606 11653 int error = 0;
11607 11654
11608 11655 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragReqds);
11609 11656
11610 11657 if (pkt_len != msgdsize(mp)) {
11611 11658 ip0dbg(("Packet length mismatch: %d, %ld\n",
11612 11659 pkt_len, msgdsize(mp)));
11613 11660 freemsg(mp);
11614 11661 return (EINVAL);
11615 11662 }
11616 11663
11617 11664 if (max_frag == 0) {
11618 11665 ip1dbg(("ip_fragment_v4: max_frag is zero. Dropping packet\n"));
11619 11666 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11620 11667 ip_drop_output("FragFails: zero max_frag", mp, ill);
11621 11668 freemsg(mp);
11622 11669 return (EINVAL);
11623 11670 }
11624 11671
11625 11672 ASSERT(MBLKL(mp) >= sizeof (ipha_t));
11626 11673 ipha = (ipha_t *)mp->b_rptr;
11627 11674 ASSERT(ntohs(ipha->ipha_length) == pkt_len);
11628 11675 frag_flag = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_DF;
11629 11676
11630 11677 /*
11631 11678 * Establish the starting offset. May not be zero if we are fragging
11632 11679 * a fragment that is being forwarded.
11633 11680 */
11634 11681 offset = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET;
11635 11682
11636 11683 /* TODO why is this test needed? */
11637 11684 if (((max_frag - ntohs(ipha->ipha_length)) & ~7) < 8) {
11638 11685 /* TODO: notify ulp somehow */
11639 11686 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11640 11687 ip_drop_output("FragFails: bad starting offset", mp, ill);
11641 11688 freemsg(mp);
11642 11689 return (EINVAL);
11643 11690 }
11644 11691
11645 11692 hdr_len = IPH_HDR_LENGTH(ipha);
11646 11693 ipha->ipha_hdr_checksum = 0;
11647 11694
11648 11695 /*
11649 11696 * Establish the number of bytes maximum per frag, after putting
11650 11697 * in the header.
11651 11698 */
11652 11699 len = (max_frag - hdr_len) & ~7;
11653 11700
11654 11701 /* Get a copy of the header for the trailing frags */
11655 11702 hdr_mp = ip_fragment_copyhdr((uchar_t *)ipha, hdr_len, offset, ipst,
11656 11703 mp);
11657 11704 if (hdr_mp == NULL) {
11658 11705 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11659 11706 ip_drop_output("FragFails: no hdr_mp", mp, ill);
11660 11707 freemsg(mp);
11661 11708 return (ENOBUFS);
11662 11709 }
11663 11710
11664 11711 /* Store the starting offset, with the MoreFrags flag. */
11665 11712 i1 = offset | IPH_MF | frag_flag;
11666 11713 ipha->ipha_fragment_offset_and_flags = htons((uint16_t)i1);
11667 11714
11668 11715 /* Establish the ending byte offset, based on the starting offset. */
11669 11716 offset <<= 3;
11670 11717 ip_data_end = offset + ntohs(ipha->ipha_length) - hdr_len;
11671 11718
11672 11719 /* Store the length of the first fragment in the IP header. */
11673 11720 i1 = len + hdr_len;
11674 11721 ASSERT(i1 <= IP_MAXPACKET);
11675 11722 ipha->ipha_length = htons((uint16_t)i1);
11676 11723
11677 11724 /*
11678 11725 * Compute the IP header checksum for the first frag. We have to
11679 11726 * watch out that we stop at the end of the header.
11680 11727 */
11681 11728 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
11682 11729
11683 11730 /*
11684 11731 * Now carve off the first frag. Note that this will include the
11685 11732 * original IP header.
11686 11733 */
11687 11734 if (!(mp = ip_carve_mp(&mp_orig, i1))) {
11688 11735 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11689 11736 ip_drop_output("FragFails: could not carve mp", mp_orig, ill);
11690 11737 freeb(hdr_mp);
11691 11738 freemsg(mp_orig);
11692 11739 return (ENOBUFS);
11693 11740 }
11694 11741
11695 11742 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates);
11696 11743
11697 11744 error = postfragfn(mp, nce, ixaflags, i1, xmit_hint, szone, nolzid,
11698 11745 ixa_cookie);
11699 11746 if (error != 0 && error != EWOULDBLOCK) {
11700 11747 /* No point in sending the other fragments */
11701 11748 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11702 11749 ip_drop_output("FragFails: postfragfn failed", mp_orig, ill);
11703 11750 freeb(hdr_mp);
11704 11751 freemsg(mp_orig);
11705 11752 return (error);
11706 11753 }
11707 11754
11708 11755 /* No need to redo state machine in loop */
11709 11756 ixaflags &= ~IXAF_REACH_CONF;
11710 11757
11711 11758 /* Advance the offset to the second frag starting point. */
11712 11759 offset += len;
11713 11760 /*
11714 11761 * Update hdr_len from the copied header - there might be less options
11715 11762 * in the later fragments.
11716 11763 */
11717 11764 hdr_len = IPH_HDR_LENGTH(hdr_mp->b_rptr);
11718 11765 /* Loop until done. */
11719 11766 for (;;) {
11720 11767 uint16_t offset_and_flags;
11721 11768 uint16_t ip_len;
11722 11769
11723 11770 if (ip_data_end - offset > len) {
11724 11771 /*
11725 11772 * Carve off the appropriate amount from the original
11726 11773 * datagram.
11727 11774 */
11728 11775 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) {
11729 11776 mp = NULL;
11730 11777 break;
11731 11778 }
11732 11779 /*
11733 11780 * More frags after this one. Get another copy
11734 11781 * of the header.
11735 11782 */
11736 11783 if (carve_mp->b_datap->db_ref == 1 &&
11737 11784 hdr_mp->b_wptr - hdr_mp->b_rptr <
11738 11785 carve_mp->b_rptr - carve_mp->b_datap->db_base) {
11739 11786 /* Inline IP header */
11740 11787 carve_mp->b_rptr -= hdr_mp->b_wptr -
11741 11788 hdr_mp->b_rptr;
11742 11789 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr,
11743 11790 hdr_mp->b_wptr - hdr_mp->b_rptr);
11744 11791 mp = carve_mp;
11745 11792 } else {
11746 11793 if (!(mp = copyb(hdr_mp))) {
11747 11794 freemsg(carve_mp);
11748 11795 break;
11749 11796 }
11750 11797 /* Get priority marking, if any. */
11751 11798 mp->b_band = priority;
11752 11799 mp->b_cont = carve_mp;
11753 11800 }
11754 11801 ipha = (ipha_t *)mp->b_rptr;
11755 11802 offset_and_flags = IPH_MF;
11756 11803 } else {
11757 11804 /*
11758 11805 * Last frag. Consume the header. Set len to
11759 11806 * the length of this last piece.
11760 11807 */
11761 11808 len = ip_data_end - offset;
11762 11809
11763 11810 /*
11764 11811 * Carve off the appropriate amount from the original
11765 11812 * datagram.
11766 11813 */
11767 11814 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) {
11768 11815 mp = NULL;
11769 11816 break;
11770 11817 }
11771 11818 if (carve_mp->b_datap->db_ref == 1 &&
11772 11819 hdr_mp->b_wptr - hdr_mp->b_rptr <
11773 11820 carve_mp->b_rptr - carve_mp->b_datap->db_base) {
11774 11821 /* Inline IP header */
11775 11822 carve_mp->b_rptr -= hdr_mp->b_wptr -
11776 11823 hdr_mp->b_rptr;
11777 11824 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr,
11778 11825 hdr_mp->b_wptr - hdr_mp->b_rptr);
11779 11826 mp = carve_mp;
11780 11827 freeb(hdr_mp);
11781 11828 hdr_mp = mp;
11782 11829 } else {
11783 11830 mp = hdr_mp;
11784 11831 /* Get priority marking, if any. */
11785 11832 mp->b_band = priority;
11786 11833 mp->b_cont = carve_mp;
11787 11834 }
11788 11835 ipha = (ipha_t *)mp->b_rptr;
11789 11836 /* A frag of a frag might have IPH_MF non-zero */
11790 11837 offset_and_flags =
11791 11838 ntohs(ipha->ipha_fragment_offset_and_flags) &
11792 11839 IPH_MF;
11793 11840 }
11794 11841 offset_and_flags |= (uint16_t)(offset >> 3);
11795 11842 offset_and_flags |= (uint16_t)frag_flag;
11796 11843 /* Store the offset and flags in the IP header. */
11797 11844 ipha->ipha_fragment_offset_and_flags = htons(offset_and_flags);
11798 11845
11799 11846 /* Store the length in the IP header. */
11800 11847 ip_len = (uint16_t)(len + hdr_len);
11801 11848 ipha->ipha_length = htons(ip_len);
11802 11849
11803 11850 /*
11804 11851 * Set the IP header checksum. Note that mp is just
11805 11852 * the header, so this is easy to pass to ip_csum.
11806 11853 */
11807 11854 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
11808 11855
11809 11856 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates);
11810 11857
11811 11858 error = postfragfn(mp, nce, ixaflags, ip_len, xmit_hint, szone,
11812 11859 nolzid, ixa_cookie);
11813 11860 /* All done if we just consumed the hdr_mp. */
11814 11861 if (mp == hdr_mp) {
11815 11862 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragOKs);
11816 11863 return (error);
11817 11864 }
11818 11865 if (error != 0 && error != EWOULDBLOCK) {
11819 11866 DTRACE_PROBE2(ip__xmit__frag__fail, ill_t *, ill,
11820 11867 mblk_t *, hdr_mp);
11821 11868 /* No point in sending the other fragments */
11822 11869 break;
11823 11870 }
11824 11871
11825 11872 /* Otherwise, advance and loop. */
11826 11873 offset += len;
11827 11874 }
11828 11875 /* Clean up following allocation failure. */
11829 11876 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11830 11877 ip_drop_output("FragFails: loop ended", NULL, ill);
11831 11878 if (mp != hdr_mp)
11832 11879 freeb(hdr_mp);
11833 11880 if (mp != mp_orig)
11834 11881 freemsg(mp_orig);
11835 11882 return (error);
11836 11883 }
11837 11884
11838 11885 /*
11839 11886 * Copy the header plus those options which have the copy bit set
11840 11887 */
11841 11888 static mblk_t *
11842 11889 ip_fragment_copyhdr(uchar_t *rptr, int hdr_len, int offset, ip_stack_t *ipst,
11843 11890 mblk_t *src)
11844 11891 {
11845 11892 mblk_t *mp;
11846 11893 uchar_t *up;
11847 11894
11848 11895 /*
11849 11896 * Quick check if we need to look for options without the copy bit
11850 11897 * set
11851 11898 */
11852 11899 mp = allocb_tmpl(ipst->ips_ip_wroff_extra + hdr_len, src);
11853 11900 if (!mp)
11854 11901 return (mp);
11855 11902 mp->b_rptr += ipst->ips_ip_wroff_extra;
11856 11903 if (hdr_len == IP_SIMPLE_HDR_LENGTH || offset != 0) {
11857 11904 bcopy(rptr, mp->b_rptr, hdr_len);
11858 11905 mp->b_wptr += hdr_len + ipst->ips_ip_wroff_extra;
11859 11906 return (mp);
11860 11907 }
11861 11908 up = mp->b_rptr;
11862 11909 bcopy(rptr, up, IP_SIMPLE_HDR_LENGTH);
11863 11910 up += IP_SIMPLE_HDR_LENGTH;
11864 11911 rptr += IP_SIMPLE_HDR_LENGTH;
11865 11912 hdr_len -= IP_SIMPLE_HDR_LENGTH;
11866 11913 while (hdr_len > 0) {
11867 11914 uint32_t optval;
11868 11915 uint32_t optlen;
11869 11916
11870 11917 optval = *rptr;
11871 11918 if (optval == IPOPT_EOL)
11872 11919 break;
11873 11920 if (optval == IPOPT_NOP)
11874 11921 optlen = 1;
11875 11922 else
11876 11923 optlen = rptr[1];
11877 11924 if (optval & IPOPT_COPY) {
11878 11925 bcopy(rptr, up, optlen);
11879 11926 up += optlen;
11880 11927 }
11881 11928 rptr += optlen;
11882 11929 hdr_len -= optlen;
11883 11930 }
11884 11931 /*
11885 11932 * Make sure that we drop an even number of words by filling
11886 11933 * with EOL to the next word boundary.
11887 11934 */
11888 11935 for (hdr_len = up - (mp->b_rptr + IP_SIMPLE_HDR_LENGTH);
11889 11936 hdr_len & 0x3; hdr_len++)
11890 11937 *up++ = IPOPT_EOL;
11891 11938 mp->b_wptr = up;
11892 11939 /* Update header length */
11893 11940 mp->b_rptr[0] = (uint8_t)((IP_VERSION << 4) | ((up - mp->b_rptr) >> 2));
11894 11941 return (mp);
11895 11942 }
11896 11943
11897 11944 /*
11898 11945 * Update any source route, record route, or timestamp options when
11899 11946 * sending a packet back to ourselves.
11900 11947 * Check that we are at end of strict source route.
11901 11948 * The options have been sanity checked by ip_output_options().
11902 11949 */
11903 11950 void
11904 11951 ip_output_local_options(ipha_t *ipha, ip_stack_t *ipst)
11905 11952 {
11906 11953 ipoptp_t opts;
11907 11954 uchar_t *opt;
11908 11955 uint8_t optval;
11909 11956 uint8_t optlen;
11910 11957 ipaddr_t dst;
11911 11958 uint32_t ts;
11912 11959 timestruc_t now;
11913 11960
11914 11961 for (optval = ipoptp_first(&opts, ipha);
11915 11962 optval != IPOPT_EOL;
11916 11963 optval = ipoptp_next(&opts)) {
11917 11964 opt = opts.ipoptp_cur;
11918 11965 optlen = opts.ipoptp_len;
11919 11966 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11920 11967 switch (optval) {
11921 11968 uint32_t off;
11922 11969 case IPOPT_SSRR:
11923 11970 case IPOPT_LSRR:
11924 11971 off = opt[IPOPT_OFFSET];
11925 11972 off--;
11926 11973 if (optlen < IP_ADDR_LEN ||
11927 11974 off > optlen - IP_ADDR_LEN) {
11928 11975 /* End of source route */
11929 11976 break;
11930 11977 }
11931 11978 /*
11932 11979 * This will only happen if two consecutive entries
11933 11980 * in the source route contains our address or if
11934 11981 * it is a packet with a loose source route which
11935 11982 * reaches us before consuming the whole source route
11936 11983 */
11937 11984
11938 11985 if (optval == IPOPT_SSRR) {
11939 11986 return;
11940 11987 }
11941 11988 /*
11942 11989 * Hack: instead of dropping the packet truncate the
11943 11990 * source route to what has been used by filling the
11944 11991 * rest with IPOPT_NOP.
11945 11992 */
11946 11993 opt[IPOPT_OLEN] = (uint8_t)off;
11947 11994 while (off < optlen) {
11948 11995 opt[off++] = IPOPT_NOP;
11949 11996 }
11950 11997 break;
11951 11998 case IPOPT_RR:
11952 11999 off = opt[IPOPT_OFFSET];
11953 12000 off--;
11954 12001 if (optlen < IP_ADDR_LEN ||
11955 12002 off > optlen - IP_ADDR_LEN) {
11956 12003 /* No more room - ignore */
11957 12004 ip1dbg((
11958 12005 "ip_output_local_options: end of RR\n"));
11959 12006 break;
11960 12007 }
11961 12008 dst = htonl(INADDR_LOOPBACK);
11962 12009 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN);
11963 12010 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
11964 12011 break;
11965 12012 case IPOPT_TS:
11966 12013 /* Insert timestamp if there is romm */
11967 12014 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
11968 12015 case IPOPT_TS_TSONLY:
11969 12016 off = IPOPT_TS_TIMELEN;
11970 12017 break;
11971 12018 case IPOPT_TS_PRESPEC:
11972 12019 case IPOPT_TS_PRESPEC_RFC791:
11973 12020 /* Verify that the address matched */
11974 12021 off = opt[IPOPT_OFFSET] - 1;
11975 12022 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
11976 12023 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
11977 12024 /* Not for us */
11978 12025 break;
11979 12026 }
11980 12027 /* FALLTHRU */
11981 12028 case IPOPT_TS_TSANDADDR:
11982 12029 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
11983 12030 break;
11984 12031 default:
11985 12032 /*
11986 12033 * ip_*put_options should have already
11987 12034 * dropped this packet.
11988 12035 */
11989 12036 cmn_err(CE_PANIC, "ip_output_local_options: "
11990 12037 "unknown IT - bug in ip_output_options?\n");
11991 12038 return; /* Keep "lint" happy */
11992 12039 }
11993 12040 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
11994 12041 /* Increase overflow counter */
11995 12042 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
11996 12043 opt[IPOPT_POS_OV_FLG] = (uint8_t)
11997 12044 (opt[IPOPT_POS_OV_FLG] & 0x0F) |
11998 12045 (off << 4);
11999 12046 break;
12000 12047 }
12001 12048 off = opt[IPOPT_OFFSET] - 1;
12002 12049 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
12003 12050 case IPOPT_TS_PRESPEC:
12004 12051 case IPOPT_TS_PRESPEC_RFC791:
12005 12052 case IPOPT_TS_TSANDADDR:
12006 12053 dst = htonl(INADDR_LOOPBACK);
12007 12054 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN);
12008 12055 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
12009 12056 /* FALLTHRU */
12010 12057 case IPOPT_TS_TSONLY:
12011 12058 off = opt[IPOPT_OFFSET] - 1;
12012 12059 /* Compute # of milliseconds since midnight */
12013 12060 gethrestime(&now);
12014 12061 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
12015 12062 now.tv_nsec / (NANOSEC / MILLISEC);
12016 12063 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
12017 12064 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
12018 12065 break;
12019 12066 }
12020 12067 break;
12021 12068 }
12022 12069 }
12023 12070 }
12024 12071
12025 12072 /*
12026 12073 * Prepend an M_DATA fastpath header, and if none present prepend a
12027 12074 * DL_UNITDATA_REQ. Frees the mblk on failure.
12028 12075 *
12029 12076 * nce_dlur_mp and nce_fp_mp can not disappear once they have been set.
12030 12077 * If there is a change to them, the nce will be deleted (condemned) and
12031 12078 * a new nce_t will be created when packets are sent. Thus we need no locks
12032 12079 * to access those fields.
12033 12080 *
12034 12081 * We preserve b_band to support IPQoS. If a DL_UNITDATA_REQ is prepended
12035 12082 * we place b_band in dl_priority.dl_max.
12036 12083 */
12037 12084 static mblk_t *
12038 12085 ip_xmit_attach_llhdr(mblk_t *mp, nce_t *nce)
12039 12086 {
12040 12087 uint_t hlen;
12041 12088 mblk_t *mp1;
12042 12089 uint_t priority;
12043 12090 uchar_t *rptr;
12044 12091
12045 12092 rptr = mp->b_rptr;
12046 12093
12047 12094 ASSERT(DB_TYPE(mp) == M_DATA);
12048 12095 priority = mp->b_band;
12049 12096
12050 12097 ASSERT(nce != NULL);
12051 12098 if ((mp1 = nce->nce_fp_mp) != NULL) {
12052 12099 hlen = MBLKL(mp1);
12053 12100 /*
12054 12101 * Check if we have enough room to prepend fastpath
12055 12102 * header
12056 12103 */
12057 12104 if (hlen != 0 && (rptr - mp->b_datap->db_base) >= hlen) {
12058 12105 rptr -= hlen;
12059 12106 bcopy(mp1->b_rptr, rptr, hlen);
12060 12107 /*
12061 12108 * Set the b_rptr to the start of the link layer
12062 12109 * header
12063 12110 */
12064 12111 mp->b_rptr = rptr;
12065 12112 return (mp);
12066 12113 }
12067 12114 mp1 = copyb(mp1);
12068 12115 if (mp1 == NULL) {
12069 12116 ill_t *ill = nce->nce_ill;
12070 12117
12071 12118 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12072 12119 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12073 12120 freemsg(mp);
12074 12121 return (NULL);
12075 12122 }
12076 12123 mp1->b_band = priority;
12077 12124 mp1->b_cont = mp;
12078 12125 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp);
12079 12126 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp);
12080 12127 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp);
12081 12128 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp);
12082 12129 DB_LSOMSS(mp1) = DB_LSOMSS(mp);
12083 12130 DTRACE_PROBE1(ip__xmit__copyb, (mblk_t *), mp1);
12084 12131 /*
12085 12132 * XXX disable ICK_VALID and compute checksum
12086 12133 * here; can happen if nce_fp_mp changes and
12087 12134 * it can't be copied now due to insufficient
12088 12135 * space. (unlikely, fp mp can change, but it
12089 12136 * does not increase in length)
12090 12137 */
12091 12138 return (mp1);
12092 12139 }
12093 12140 mp1 = copyb(nce->nce_dlur_mp);
12094 12141
12095 12142 if (mp1 == NULL) {
12096 12143 ill_t *ill = nce->nce_ill;
12097 12144
12098 12145 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12099 12146 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12100 12147 freemsg(mp);
12101 12148 return (NULL);
12102 12149 }
12103 12150 mp1->b_cont = mp;
12104 12151 if (priority != 0) {
12105 12152 mp1->b_band = priority;
12106 12153 ((dl_unitdata_req_t *)(mp1->b_rptr))->dl_priority.dl_max =
12107 12154 priority;
12108 12155 }
12109 12156 return (mp1);
12110 12157 #undef rptr
12111 12158 }
12112 12159
12113 12160 /*
12114 12161 * Finish the outbound IPsec processing. This function is called from
12115 12162 * ipsec_out_process() if the IPsec packet was processed
12116 12163 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed
12117 12164 * asynchronously.
12118 12165 *
12119 12166 * This is common to IPv4 and IPv6.
12120 12167 */
12121 12168 int
12122 12169 ip_output_post_ipsec(mblk_t *mp, ip_xmit_attr_t *ixa)
12123 12170 {
12124 12171 iaflags_t ixaflags = ixa->ixa_flags;
12125 12172 uint_t pktlen;
12126 12173
12127 12174
12128 12175 /* AH/ESP don't update ixa_pktlen when they modify the packet */
12129 12176 if (ixaflags & IXAF_IS_IPV4) {
12130 12177 ipha_t *ipha = (ipha_t *)mp->b_rptr;
12131 12178
12132 12179 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
12133 12180 pktlen = ntohs(ipha->ipha_length);
12134 12181 } else {
12135 12182 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
12136 12183
12137 12184 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
12138 12185 pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN;
12139 12186 }
12140 12187
12141 12188 /*
12142 12189 * We release any hard reference on the SAs here to make
12143 12190 * sure the SAs can be garbage collected. ipsr_sa has a soft reference
12144 12191 * on the SAs.
12145 12192 * If in the future we want the hard latching of the SAs in the
12146 12193 * ip_xmit_attr_t then we should remove this.
12147 12194 */
12148 12195 if (ixa->ixa_ipsec_esp_sa != NULL) {
12149 12196 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
12150 12197 ixa->ixa_ipsec_esp_sa = NULL;
12151 12198 }
12152 12199 if (ixa->ixa_ipsec_ah_sa != NULL) {
12153 12200 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
12154 12201 ixa->ixa_ipsec_ah_sa = NULL;
12155 12202 }
12156 12203
12157 12204 /* Do we need to fragment? */
12158 12205 if ((ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR) ||
12159 12206 pktlen > ixa->ixa_fragsize) {
12160 12207 if (ixaflags & IXAF_IS_IPV4) {
12161 12208 ASSERT(!(ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR));
12162 12209 /*
12163 12210 * We check for the DF case in ipsec_out_process
12164 12211 * hence this only handles the non-DF case.
12165 12212 */
12166 12213 return (ip_fragment_v4(mp, ixa->ixa_nce, ixa->ixa_flags,
12167 12214 pktlen, ixa->ixa_fragsize,
12168 12215 ixa->ixa_xmit_hint, ixa->ixa_zoneid,
12169 12216 ixa->ixa_no_loop_zoneid, ixa->ixa_postfragfn,
12170 12217 &ixa->ixa_cookie));
12171 12218 } else {
12172 12219 mp = ip_fraghdr_add_v6(mp, ixa->ixa_ident, ixa);
12173 12220 if (mp == NULL) {
12174 12221 /* MIB and ip_drop_output already done */
12175 12222 return (ENOMEM);
12176 12223 }
12177 12224 pktlen += sizeof (ip6_frag_t);
12178 12225 if (pktlen > ixa->ixa_fragsize) {
12179 12226 return (ip_fragment_v6(mp, ixa->ixa_nce,
12180 12227 ixa->ixa_flags, pktlen,
12181 12228 ixa->ixa_fragsize, ixa->ixa_xmit_hint,
12182 12229 ixa->ixa_zoneid, ixa->ixa_no_loop_zoneid,
12183 12230 ixa->ixa_postfragfn, &ixa->ixa_cookie));
12184 12231 }
12185 12232 }
12186 12233 }
12187 12234 return ((ixa->ixa_postfragfn)(mp, ixa->ixa_nce, ixa->ixa_flags,
12188 12235 pktlen, ixa->ixa_xmit_hint, ixa->ixa_zoneid,
12189 12236 ixa->ixa_no_loop_zoneid, NULL));
12190 12237 }
12191 12238
12192 12239 /*
12193 12240 * Finish the inbound IPsec processing. This function is called from
12194 12241 * ipsec_out_process() if the IPsec packet was processed
12195 12242 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed
12196 12243 * asynchronously.
12197 12244 *
12198 12245 * This is common to IPv4 and IPv6.
12199 12246 */
12200 12247 void
12201 12248 ip_input_post_ipsec(mblk_t *mp, ip_recv_attr_t *ira)
12202 12249 {
12203 12250 iaflags_t iraflags = ira->ira_flags;
12204 12251
12205 12252 /* Length might have changed */
12206 12253 if (iraflags & IRAF_IS_IPV4) {
12207 12254 ipha_t *ipha = (ipha_t *)mp->b_rptr;
12208 12255
12209 12256 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
12210 12257 ira->ira_pktlen = ntohs(ipha->ipha_length);
12211 12258 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha);
12212 12259 ira->ira_protocol = ipha->ipha_protocol;
12213 12260
12214 12261 ip_fanout_v4(mp, ipha, ira);
12215 12262 } else {
12216 12263 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
12217 12264 uint8_t *nexthdrp;
12218 12265
12219 12266 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
12220 12267 ira->ira_pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN;
12221 12268 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ira->ira_ip_hdr_length,
12222 12269 &nexthdrp)) {
12223 12270 /* Malformed packet */
12224 12271 BUMP_MIB(ira->ira_ill->ill_ip_mib, ipIfStatsInDiscards);
12225 12272 ip_drop_input("ipIfStatsInDiscards", mp, ira->ira_ill);
12226 12273 freemsg(mp);
12227 12274 return;
12228 12275 }
12229 12276 ira->ira_protocol = *nexthdrp;
12230 12277 ip_fanout_v6(mp, ip6h, ira);
12231 12278 }
12232 12279 }
12233 12280
12234 12281 /*
12235 12282 * Select which AH & ESP SA's to use (if any) for the outbound packet.
12236 12283 *
12237 12284 * If this function returns B_TRUE, the requested SA's have been filled
12238 12285 * into the ixa_ipsec_*_sa pointers.
12239 12286 *
12240 12287 * If the function returns B_FALSE, the packet has been "consumed", most
12241 12288 * likely by an ACQUIRE sent up via PF_KEY to a key management daemon.
12242 12289 *
12243 12290 * The SA references created by the protocol-specific "select"
12244 12291 * function will be released in ip_output_post_ipsec.
12245 12292 */
12246 12293 static boolean_t
12247 12294 ipsec_out_select_sa(mblk_t *mp, ip_xmit_attr_t *ixa)
12248 12295 {
12249 12296 boolean_t need_ah_acquire = B_FALSE, need_esp_acquire = B_FALSE;
12250 12297 ipsec_policy_t *pp;
12251 12298 ipsec_action_t *ap;
12252 12299
12253 12300 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE);
12254 12301 ASSERT((ixa->ixa_ipsec_policy != NULL) ||
12255 12302 (ixa->ixa_ipsec_action != NULL));
12256 12303
12257 12304 ap = ixa->ixa_ipsec_action;
12258 12305 if (ap == NULL) {
12259 12306 pp = ixa->ixa_ipsec_policy;
12260 12307 ASSERT(pp != NULL);
12261 12308 ap = pp->ipsp_act;
12262 12309 ASSERT(ap != NULL);
12263 12310 }
12264 12311
12265 12312 /*
12266 12313 * We have an action. now, let's select SA's.
12267 12314 * A side effect of setting ixa_ipsec_*_sa is that it will
12268 12315 * be cached in the conn_t.
12269 12316 */
12270 12317 if (ap->ipa_want_esp) {
12271 12318 if (ixa->ixa_ipsec_esp_sa == NULL) {
12272 12319 need_esp_acquire = !ipsec_outbound_sa(mp, ixa,
12273 12320 IPPROTO_ESP);
12274 12321 }
12275 12322 ASSERT(need_esp_acquire || ixa->ixa_ipsec_esp_sa != NULL);
12276 12323 }
12277 12324
12278 12325 if (ap->ipa_want_ah) {
12279 12326 if (ixa->ixa_ipsec_ah_sa == NULL) {
12280 12327 need_ah_acquire = !ipsec_outbound_sa(mp, ixa,
12281 12328 IPPROTO_AH);
12282 12329 }
12283 12330 ASSERT(need_ah_acquire || ixa->ixa_ipsec_ah_sa != NULL);
12284 12331 /*
12285 12332 * The ESP and AH processing order needs to be preserved
12286 12333 * when both protocols are required (ESP should be applied
12287 12334 * before AH for an outbound packet). Force an ESP ACQUIRE
12288 12335 * when both ESP and AH are required, and an AH ACQUIRE
12289 12336 * is needed.
12290 12337 */
12291 12338 if (ap->ipa_want_esp && need_ah_acquire)
12292 12339 need_esp_acquire = B_TRUE;
12293 12340 }
12294 12341
12295 12342 /*
12296 12343 * Send an ACQUIRE (extended, regular, or both) if we need one.
12297 12344 * Release SAs that got referenced, but will not be used until we
12298 12345 * acquire _all_ of the SAs we need.
12299 12346 */
12300 12347 if (need_ah_acquire || need_esp_acquire) {
12301 12348 if (ixa->ixa_ipsec_ah_sa != NULL) {
12302 12349 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
12303 12350 ixa->ixa_ipsec_ah_sa = NULL;
12304 12351 }
12305 12352 if (ixa->ixa_ipsec_esp_sa != NULL) {
12306 12353 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
12307 12354 ixa->ixa_ipsec_esp_sa = NULL;
12308 12355 }
12309 12356
12310 12357 sadb_acquire(mp, ixa, need_ah_acquire, need_esp_acquire);
12311 12358 return (B_FALSE);
12312 12359 }
12313 12360
12314 12361 return (B_TRUE);
12315 12362 }
12316 12363
12317 12364 /*
12318 12365 * Handle IPsec output processing.
12319 12366 * This function is only entered once for a given packet.
12320 12367 * We try to do things synchronously, but if we need to have user-level
12321 12368 * set up SAs, or ESP or AH uses asynchronous kEF, then the operation
12322 12369 * will be completed
12323 12370 * - when the SAs are added in esp_add_sa_finish/ah_add_sa_finish
12324 12371 * - when asynchronous ESP is done it will do AH
12325 12372 *
12326 12373 * In all cases we come back in ip_output_post_ipsec() to fragment and
12327 12374 * send out the packet.
12328 12375 */
12329 12376 int
12330 12377 ipsec_out_process(mblk_t *mp, ip_xmit_attr_t *ixa)
12331 12378 {
12332 12379 ill_t *ill = ixa->ixa_nce->nce_ill;
12333 12380 ip_stack_t *ipst = ixa->ixa_ipst;
12334 12381 ipsec_stack_t *ipss;
12335 12382 ipsec_policy_t *pp;
12336 12383 ipsec_action_t *ap;
12337 12384
12338 12385 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE);
12339 12386
12340 12387 ASSERT((ixa->ixa_ipsec_policy != NULL) ||
12341 12388 (ixa->ixa_ipsec_action != NULL));
12342 12389
12343 12390 ipss = ipst->ips_netstack->netstack_ipsec;
12344 12391 if (!ipsec_loaded(ipss)) {
12345 12392 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12346 12393 ip_drop_packet(mp, B_TRUE, ill,
12347 12394 DROPPER(ipss, ipds_ip_ipsec_not_loaded),
12348 12395 &ipss->ipsec_dropper);
12349 12396 return (ENOTSUP);
12350 12397 }
12351 12398
12352 12399 ap = ixa->ixa_ipsec_action;
12353 12400 if (ap == NULL) {
12354 12401 pp = ixa->ixa_ipsec_policy;
12355 12402 ASSERT(pp != NULL);
12356 12403 ap = pp->ipsp_act;
12357 12404 ASSERT(ap != NULL);
12358 12405 }
12359 12406
12360 12407 /* Handle explicit drop action and bypass. */
12361 12408 switch (ap->ipa_act.ipa_type) {
12362 12409 case IPSEC_ACT_DISCARD:
12363 12410 case IPSEC_ACT_REJECT:
12364 12411 ip_drop_packet(mp, B_FALSE, ill,
12365 12412 DROPPER(ipss, ipds_spd_explicit), &ipss->ipsec_spd_dropper);
12366 12413 return (EHOSTUNREACH); /* IPsec policy failure */
12367 12414 case IPSEC_ACT_BYPASS:
12368 12415 return (ip_output_post_ipsec(mp, ixa));
12369 12416 }
12370 12417
12371 12418 /*
12372 12419 * The order of processing is first insert a IP header if needed.
12373 12420 * Then insert the ESP header and then the AH header.
12374 12421 */
12375 12422 if ((ixa->ixa_flags & IXAF_IS_IPV4) && ap->ipa_want_se) {
12376 12423 /*
12377 12424 * First get the outer IP header before sending
12378 12425 * it to ESP.
12379 12426 */
12380 12427 ipha_t *oipha, *iipha;
12381 12428 mblk_t *outer_mp, *inner_mp;
12382 12429
12383 12430 if ((outer_mp = allocb(sizeof (ipha_t), BPRI_HI)) == NULL) {
12384 12431 (void) mi_strlog(ill->ill_rq, 0,
12385 12432 SL_ERROR|SL_TRACE|SL_CONSOLE,
12386 12433 "ipsec_out_process: "
12387 12434 "Self-Encapsulation failed: Out of memory\n");
12388 12435 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12389 12436 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12390 12437 freemsg(mp);
12391 12438 return (ENOBUFS);
12392 12439 }
12393 12440 inner_mp = mp;
12394 12441 ASSERT(inner_mp->b_datap->db_type == M_DATA);
12395 12442 oipha = (ipha_t *)outer_mp->b_rptr;
12396 12443 iipha = (ipha_t *)inner_mp->b_rptr;
12397 12444 *oipha = *iipha;
12398 12445 outer_mp->b_wptr += sizeof (ipha_t);
12399 12446 oipha->ipha_length = htons(ntohs(iipha->ipha_length) +
12400 12447 sizeof (ipha_t));
12401 12448 oipha->ipha_protocol = IPPROTO_ENCAP;
12402 12449 oipha->ipha_version_and_hdr_length =
12403 12450 IP_SIMPLE_HDR_VERSION;
12404 12451 oipha->ipha_hdr_checksum = 0;
12405 12452 oipha->ipha_hdr_checksum = ip_csum_hdr(oipha);
12406 12453 outer_mp->b_cont = inner_mp;
12407 12454 mp = outer_mp;
12408 12455
12409 12456 ixa->ixa_flags |= IXAF_IPSEC_TUNNEL;
12410 12457 }
12411 12458
12412 12459 /* If we need to wait for a SA then we can't return any errno */
12413 12460 if (((ap->ipa_want_ah && (ixa->ixa_ipsec_ah_sa == NULL)) ||
12414 12461 (ap->ipa_want_esp && (ixa->ixa_ipsec_esp_sa == NULL))) &&
12415 12462 !ipsec_out_select_sa(mp, ixa))
12416 12463 return (0);
12417 12464
12418 12465 /*
12419 12466 * By now, we know what SA's to use. Toss over to ESP & AH
12420 12467 * to do the heavy lifting.
12421 12468 */
12422 12469 if (ap->ipa_want_esp) {
12423 12470 ASSERT(ixa->ixa_ipsec_esp_sa != NULL);
12424 12471
12425 12472 mp = ixa->ixa_ipsec_esp_sa->ipsa_output_func(mp, ixa);
12426 12473 if (mp == NULL) {
12427 12474 /*
12428 12475 * Either it failed or is pending. In the former case
12429 12476 * ipIfStatsInDiscards was increased.
12430 12477 */
12431 12478 return (0);
12432 12479 }
12433 12480 }
12434 12481
12435 12482 if (ap->ipa_want_ah) {
12436 12483 ASSERT(ixa->ixa_ipsec_ah_sa != NULL);
12437 12484
12438 12485 mp = ixa->ixa_ipsec_ah_sa->ipsa_output_func(mp, ixa);
12439 12486 if (mp == NULL) {
12440 12487 /*
12441 12488 * Either it failed or is pending. In the former case
12442 12489 * ipIfStatsInDiscards was increased.
12443 12490 */
12444 12491 return (0);
12445 12492 }
12446 12493 }
12447 12494 /*
12448 12495 * We are done with IPsec processing. Send it over
12449 12496 * the wire.
12450 12497 */
12451 12498 return (ip_output_post_ipsec(mp, ixa));
12452 12499 }
12453 12500
12454 12501 /*
12455 12502 * ioctls that go through a down/up sequence may need to wait for the down
12456 12503 * to complete. This involves waiting for the ire and ipif refcnts to go down
12457 12504 * to zero. Subsequently the ioctl is restarted from ipif_ill_refrele_tail.
12458 12505 */
12459 12506 /* ARGSUSED */
12460 12507 void
12461 12508 ip_reprocess_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
12462 12509 {
12463 12510 struct iocblk *iocp;
12464 12511 mblk_t *mp1;
12465 12512 ip_ioctl_cmd_t *ipip;
12466 12513 int err;
12467 12514 sin_t *sin;
12468 12515 struct lifreq *lifr;
12469 12516 struct ifreq *ifr;
12470 12517
12471 12518 iocp = (struct iocblk *)mp->b_rptr;
12472 12519 ASSERT(ipsq != NULL);
12473 12520 /* Existence of mp1 verified in ip_wput_nondata */
12474 12521 mp1 = mp->b_cont->b_cont;
12475 12522 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12476 12523 if (ipip->ipi_cmd == SIOCSLIFNAME || ipip->ipi_cmd == IF_UNITSEL) {
12477 12524 /*
12478 12525 * Special case where ipx_current_ipif is not set:
12479 12526 * ill_phyint_reinit merged the v4 and v6 into a single ipsq.
12480 12527 * We are here as were not able to complete the operation in
12481 12528 * ipif_set_values because we could not become exclusive on
12482 12529 * the new ipsq.
12483 12530 */
12484 12531 ill_t *ill = q->q_ptr;
12485 12532 ipsq_current_start(ipsq, ill->ill_ipif, ipip->ipi_cmd);
12486 12533 }
12487 12534 ASSERT(ipsq->ipsq_xop->ipx_current_ipif != NULL);
12488 12535
12489 12536 if (ipip->ipi_cmd_type == IF_CMD) {
12490 12537 /* This a old style SIOC[GS]IF* command */
12491 12538 ifr = (struct ifreq *)mp1->b_rptr;
12492 12539 sin = (sin_t *)&ifr->ifr_addr;
12493 12540 } else if (ipip->ipi_cmd_type == LIF_CMD) {
12494 12541 /* This a new style SIOC[GS]LIF* command */
12495 12542 lifr = (struct lifreq *)mp1->b_rptr;
12496 12543 sin = (sin_t *)&lifr->lifr_addr;
12497 12544 } else {
12498 12545 sin = NULL;
12499 12546 }
12500 12547
12501 12548 err = (*ipip->ipi_func_restart)(ipsq->ipsq_xop->ipx_current_ipif, sin,
12502 12549 q, mp, ipip, mp1->b_rptr);
12503 12550
12504 12551 DTRACE_PROBE4(ipif__ioctl, char *, "ip_reprocess_ioctl finish",
12505 12552 int, ipip->ipi_cmd,
12506 12553 ill_t *, ipsq->ipsq_xop->ipx_current_ipif->ipif_ill,
12507 12554 ipif_t *, ipsq->ipsq_xop->ipx_current_ipif);
12508 12555
12509 12556 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq);
12510 12557 }
12511 12558
12512 12559 /*
12513 12560 * ioctl processing
12514 12561 *
12515 12562 * ioctl processing starts with ip_sioctl_copyin_setup(), which looks up
12516 12563 * the ioctl command in the ioctl tables, determines the copyin data size
12517 12564 * from the ipi_copyin_size field, and does an mi_copyin() of that size.
12518 12565 *
12519 12566 * ioctl processing then continues when the M_IOCDATA makes its way down to
12520 12567 * ip_wput_nondata(). The ioctl is looked up again in the ioctl table, its
12521 12568 * associated 'conn' is refheld till the end of the ioctl and the general
12522 12569 * ioctl processing function ip_process_ioctl() is called to extract the
12523 12570 * arguments and process the ioctl. To simplify extraction, ioctl commands
12524 12571 * are "typed" based on the arguments they take (e.g., LIF_CMD which takes a
12525 12572 * `struct lifreq'), and a common extract function (e.g., ip_extract_lifreq())
12526 12573 * is used to extract the ioctl's arguments.
12527 12574 *
12528 12575 * ip_process_ioctl determines if the ioctl needs to be serialized, and if
12529 12576 * so goes thru the serialization primitive ipsq_try_enter. Then the
12530 12577 * appropriate function to handle the ioctl is called based on the entry in
12531 12578 * the ioctl table. ioctl completion is encapsulated in ip_ioctl_finish
12532 12579 * which also refreleases the 'conn' that was refheld at the start of the
12533 12580 * ioctl. Finally ipsq_exit is called if needed to exit the ipsq.
12534 12581 *
12535 12582 * Many exclusive ioctls go thru an internal down up sequence as part of
12536 12583 * the operation. For example an attempt to change the IP address of an
12537 12584 * ipif entails ipif_down, set address, ipif_up. Bringing down the interface
12538 12585 * does all the cleanup such as deleting all ires that use this address.
12539 12586 * Then we need to wait till all references to the interface go away.
12540 12587 */
12541 12588 void
12542 12589 ip_process_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *arg)
12543 12590 {
12544 12591 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
12545 12592 ip_ioctl_cmd_t *ipip = arg;
12546 12593 ip_extract_func_t *extract_funcp;
12547 12594 cmd_info_t ci;
12548 12595 int err;
12549 12596 boolean_t entered_ipsq = B_FALSE;
12550 12597
12551 12598 ip3dbg(("ip_process_ioctl: ioctl %X\n", iocp->ioc_cmd));
12552 12599
12553 12600 if (ipip == NULL)
12554 12601 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12555 12602
12556 12603 /*
12557 12604 * SIOCLIFADDIF needs to go thru a special path since the
12558 12605 * ill may not exist yet. This happens in the case of lo0
12559 12606 * which is created using this ioctl.
12560 12607 */
12561 12608 if (ipip->ipi_cmd == SIOCLIFADDIF) {
12562 12609 err = ip_sioctl_addif(NULL, NULL, q, mp, NULL, NULL);
12563 12610 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish",
12564 12611 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12565 12612 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12566 12613 return;
12567 12614 }
12568 12615
12569 12616 ci.ci_ipif = NULL;
12570 12617 switch (ipip->ipi_cmd_type) {
12571 12618 case MISC_CMD:
12572 12619 case MSFILT_CMD:
12573 12620 /*
12574 12621 * All MISC_CMD ioctls come in here -- e.g. SIOCGLIFCONF.
12575 12622 */
12576 12623 if (ipip->ipi_cmd == IF_UNITSEL) {
12577 12624 /* ioctl comes down the ill */
12578 12625 ci.ci_ipif = ((ill_t *)q->q_ptr)->ill_ipif;
12579 12626 ipif_refhold(ci.ci_ipif);
12580 12627 }
12581 12628 err = 0;
12582 12629 ci.ci_sin = NULL;
12583 12630 ci.ci_sin6 = NULL;
12584 12631 ci.ci_lifr = NULL;
12585 12632 extract_funcp = NULL;
12586 12633 break;
12587 12634
12588 12635 case IF_CMD:
12589 12636 case LIF_CMD:
12590 12637 extract_funcp = ip_extract_lifreq;
12591 12638 break;
12592 12639
12593 12640 case ARP_CMD:
12594 12641 case XARP_CMD:
12595 12642 extract_funcp = ip_extract_arpreq;
12596 12643 break;
12597 12644
12598 12645 default:
12599 12646 ASSERT(0);
12600 12647 }
12601 12648
12602 12649 if (extract_funcp != NULL) {
12603 12650 err = (*extract_funcp)(q, mp, ipip, &ci);
12604 12651 if (err != 0) {
12605 12652 DTRACE_PROBE4(ipif__ioctl,
12606 12653 char *, "ip_process_ioctl finish err",
12607 12654 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12608 12655 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12609 12656 return;
12610 12657 }
12611 12658
12612 12659 /*
12613 12660 * All of the extraction functions return a refheld ipif.
12614 12661 */
12615 12662 ASSERT(ci.ci_ipif != NULL);
12616 12663 }
12617 12664
12618 12665 if (!(ipip->ipi_flags & IPI_WR)) {
12619 12666 /*
12620 12667 * A return value of EINPROGRESS means the ioctl is
12621 12668 * either queued and waiting for some reason or has
12622 12669 * already completed.
12623 12670 */
12624 12671 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip,
12625 12672 ci.ci_lifr);
12626 12673 if (ci.ci_ipif != NULL) {
12627 12674 DTRACE_PROBE4(ipif__ioctl,
12628 12675 char *, "ip_process_ioctl finish RD",
12629 12676 int, ipip->ipi_cmd, ill_t *, ci.ci_ipif->ipif_ill,
12630 12677 ipif_t *, ci.ci_ipif);
12631 12678 ipif_refrele(ci.ci_ipif);
12632 12679 } else {
12633 12680 DTRACE_PROBE4(ipif__ioctl,
12634 12681 char *, "ip_process_ioctl finish RD",
12635 12682 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12636 12683 }
12637 12684 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12638 12685 return;
12639 12686 }
12640 12687
12641 12688 ASSERT(ci.ci_ipif != NULL);
12642 12689
12643 12690 /*
12644 12691 * If ipsq is non-NULL, we are already being called exclusively
12645 12692 */
12646 12693 ASSERT(ipsq == NULL || IAM_WRITER_IPSQ(ipsq));
12647 12694 if (ipsq == NULL) {
12648 12695 ipsq = ipsq_try_enter(ci.ci_ipif, NULL, q, mp, ip_process_ioctl,
12649 12696 NEW_OP, B_TRUE);
12650 12697 if (ipsq == NULL) {
12651 12698 ipif_refrele(ci.ci_ipif);
12652 12699 return;
12653 12700 }
12654 12701 entered_ipsq = B_TRUE;
12655 12702 }
12656 12703 /*
12657 12704 * Release the ipif so that ipif_down and friends that wait for
12658 12705 * references to go away are not misled about the current ipif_refcnt
12659 12706 * values. We are writer so we can access the ipif even after releasing
12660 12707 * the ipif.
12661 12708 */
12662 12709 ipif_refrele(ci.ci_ipif);
12663 12710
12664 12711 ipsq_current_start(ipsq, ci.ci_ipif, ipip->ipi_cmd);
12665 12712
12666 12713 /*
12667 12714 * A return value of EINPROGRESS means the ioctl is
12668 12715 * either queued and waiting for some reason or has
12669 12716 * already completed.
12670 12717 */
12671 12718 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip, ci.ci_lifr);
12672 12719
12673 12720 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish WR",
12674 12721 int, ipip->ipi_cmd,
12675 12722 ill_t *, ci.ci_ipif == NULL ? NULL : ci.ci_ipif->ipif_ill,
12676 12723 ipif_t *, ci.ci_ipif);
12677 12724 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq);
12678 12725
12679 12726 if (entered_ipsq)
12680 12727 ipsq_exit(ipsq);
12681 12728 }
12682 12729
12683 12730 /*
12684 12731 * Complete the ioctl. Typically ioctls use the mi package and need to
12685 12732 * do mi_copyout/mi_copy_done.
12686 12733 */
12687 12734 void
12688 12735 ip_ioctl_finish(queue_t *q, mblk_t *mp, int err, int mode, ipsq_t *ipsq)
12689 12736 {
12690 12737 conn_t *connp = NULL;
12691 12738
12692 12739 if (err == EINPROGRESS)
12693 12740 return;
12694 12741
12695 12742 if (CONN_Q(q)) {
12696 12743 connp = Q_TO_CONN(q);
12697 12744 ASSERT(connp->conn_ref >= 2);
12698 12745 }
12699 12746
12700 12747 switch (mode) {
12701 12748 case COPYOUT:
12702 12749 if (err == 0)
12703 12750 mi_copyout(q, mp);
12704 12751 else
12705 12752 mi_copy_done(q, mp, err);
12706 12753 break;
12707 12754
12708 12755 case NO_COPYOUT:
12709 12756 mi_copy_done(q, mp, err);
12710 12757 break;
12711 12758
12712 12759 default:
12713 12760 ASSERT(mode == CONN_CLOSE); /* aborted through CONN_CLOSE */
12714 12761 break;
12715 12762 }
12716 12763
12717 12764 /*
12718 12765 * The conn refhold and ioctlref placed on the conn at the start of the
12719 12766 * ioctl are released here.
12720 12767 */
12721 12768 if (connp != NULL) {
12722 12769 CONN_DEC_IOCTLREF(connp);
12723 12770 CONN_OPER_PENDING_DONE(connp);
12724 12771 }
12725 12772
12726 12773 if (ipsq != NULL)
12727 12774 ipsq_current_finish(ipsq);
12728 12775 }
12729 12776
12730 12777 /* Handles all non data messages */
12731 12778 void
12732 12779 ip_wput_nondata(queue_t *q, mblk_t *mp)
12733 12780 {
12734 12781 mblk_t *mp1;
12735 12782 struct iocblk *iocp;
12736 12783 ip_ioctl_cmd_t *ipip;
12737 12784 conn_t *connp;
12738 12785 cred_t *cr;
12739 12786 char *proto_str;
12740 12787
12741 12788 if (CONN_Q(q))
12742 12789 connp = Q_TO_CONN(q);
12743 12790 else
12744 12791 connp = NULL;
12745 12792
12746 12793 switch (DB_TYPE(mp)) {
12747 12794 case M_IOCTL:
12748 12795 /*
12749 12796 * IOCTL processing begins in ip_sioctl_copyin_setup which
12750 12797 * will arrange to copy in associated control structures.
12751 12798 */
12752 12799 ip_sioctl_copyin_setup(q, mp);
12753 12800 return;
12754 12801 case M_IOCDATA:
12755 12802 /*
12756 12803 * Ensure that this is associated with one of our trans-
12757 12804 * parent ioctls. If it's not ours, discard it if we're
12758 12805 * running as a driver, or pass it on if we're a module.
12759 12806 */
12760 12807 iocp = (struct iocblk *)mp->b_rptr;
12761 12808 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12762 12809 if (ipip == NULL) {
12763 12810 if (q->q_next == NULL) {
12764 12811 goto nak;
12765 12812 } else {
12766 12813 putnext(q, mp);
12767 12814 }
12768 12815 return;
12769 12816 }
12770 12817 if ((q->q_next != NULL) && !(ipip->ipi_flags & IPI_MODOK)) {
12771 12818 /*
12772 12819 * The ioctl is one we recognise, but is not consumed
12773 12820 * by IP as a module and we are a module, so we drop
12774 12821 */
12775 12822 goto nak;
12776 12823 }
12777 12824
12778 12825 /* IOCTL continuation following copyin or copyout. */
12779 12826 if (mi_copy_state(q, mp, NULL) == -1) {
12780 12827 /*
12781 12828 * The copy operation failed. mi_copy_state already
12782 12829 * cleaned up, so we're out of here.
12783 12830 */
12784 12831 return;
12785 12832 }
12786 12833 /*
12787 12834 * If we just completed a copy in, we become writer and
12788 12835 * continue processing in ip_sioctl_copyin_done. If it
12789 12836 * was a copy out, we call mi_copyout again. If there is
12790 12837 * nothing more to copy out, it will complete the IOCTL.
12791 12838 */
12792 12839 if (MI_COPY_DIRECTION(mp) == MI_COPY_IN) {
12793 12840 if (!(mp1 = mp->b_cont) || !(mp1 = mp1->b_cont)) {
12794 12841 mi_copy_done(q, mp, EPROTO);
12795 12842 return;
12796 12843 }
12797 12844 /*
12798 12845 * Check for cases that need more copying. A return
12799 12846 * value of 0 means a second copyin has been started,
12800 12847 * so we return; a return value of 1 means no more
12801 12848 * copying is needed, so we continue.
12802 12849 */
12803 12850 if (ipip->ipi_cmd_type == MSFILT_CMD &&
12804 12851 MI_COPY_COUNT(mp) == 1) {
12805 12852 if (ip_copyin_msfilter(q, mp) == 0)
12806 12853 return;
12807 12854 }
12808 12855 /*
12809 12856 * Refhold the conn, till the ioctl completes. This is
12810 12857 * needed in case the ioctl ends up in the pending mp
12811 12858 * list. Every mp in the ipx_pending_mp list must have
12812 12859 * a refhold on the conn to resume processing. The
12813 12860 * refhold is released when the ioctl completes
12814 12861 * (whether normally or abnormally). An ioctlref is also
12815 12862 * placed on the conn to prevent TCP from removing the
12816 12863 * queue needed to send the ioctl reply back.
12817 12864 * In all cases ip_ioctl_finish is called to finish
12818 12865 * the ioctl and release the refholds.
12819 12866 */
12820 12867 if (connp != NULL) {
12821 12868 /* This is not a reentry */
12822 12869 CONN_INC_REF(connp);
12823 12870 CONN_INC_IOCTLREF(connp);
12824 12871 } else {
12825 12872 if (!(ipip->ipi_flags & IPI_MODOK)) {
12826 12873 mi_copy_done(q, mp, EINVAL);
12827 12874 return;
12828 12875 }
12829 12876 }
12830 12877
12831 12878 ip_process_ioctl(NULL, q, mp, ipip);
12832 12879
12833 12880 } else {
12834 12881 mi_copyout(q, mp);
12835 12882 }
12836 12883 return;
12837 12884
12838 12885 case M_IOCNAK:
12839 12886 /*
12840 12887 * The only way we could get here is if a resolver didn't like
12841 12888 * an IOCTL we sent it. This shouldn't happen.
12842 12889 */
12843 12890 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
12844 12891 "ip_wput_nondata: unexpected M_IOCNAK, ioc_cmd 0x%x",
12845 12892 ((struct iocblk *)mp->b_rptr)->ioc_cmd);
12846 12893 freemsg(mp);
12847 12894 return;
12848 12895 case M_IOCACK:
12849 12896 /* /dev/ip shouldn't see this */
12850 12897 goto nak;
12851 12898 case M_FLUSH:
12852 12899 if (*mp->b_rptr & FLUSHW)
12853 12900 flushq(q, FLUSHALL);
12854 12901 if (q->q_next) {
12855 12902 putnext(q, mp);
12856 12903 return;
12857 12904 }
12858 12905 if (*mp->b_rptr & FLUSHR) {
12859 12906 *mp->b_rptr &= ~FLUSHW;
12860 12907 qreply(q, mp);
12861 12908 return;
12862 12909 }
12863 12910 freemsg(mp);
12864 12911 return;
12865 12912 case M_CTL:
12866 12913 break;
12867 12914 case M_PROTO:
12868 12915 case M_PCPROTO:
12869 12916 /*
12870 12917 * The only PROTO messages we expect are SNMP-related.
12871 12918 */
12872 12919 switch (((union T_primitives *)mp->b_rptr)->type) {
12873 12920 case T_SVR4_OPTMGMT_REQ:
12874 12921 ip2dbg(("ip_wput_nondata: T_SVR4_OPTMGMT_REQ "
12875 12922 "flags %x\n",
12876 12923 ((struct T_optmgmt_req *)mp->b_rptr)->MGMT_flags));
12877 12924
12878 12925 if (connp == NULL) {
12879 12926 proto_str = "T_SVR4_OPTMGMT_REQ";
12880 12927 goto protonak;
12881 12928 }
12882 12929
12883 12930 /*
12884 12931 * All Solaris components should pass a db_credp
12885 12932 * for this TPI message, hence we ASSERT.
12886 12933 * But in case there is some other M_PROTO that looks
12887 12934 * like a TPI message sent by some other kernel
12888 12935 * component, we check and return an error.
12889 12936 */
12890 12937 cr = msg_getcred(mp, NULL);
12891 12938 ASSERT(cr != NULL);
12892 12939 if (cr == NULL) {
12893 12940 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, EINVAL);
12894 12941 if (mp != NULL)
12895 12942 qreply(q, mp);
12896 12943 return;
12897 12944 }
12898 12945
12899 12946 if (!snmpcom_req(q, mp, ip_snmp_set, ip_snmp_get, cr)) {
12900 12947 proto_str = "Bad SNMPCOM request?";
12901 12948 goto protonak;
12902 12949 }
12903 12950 return;
12904 12951 default:
12905 12952 ip1dbg(("ip_wput_nondata: dropping M_PROTO prim %u\n",
12906 12953 (int)*(uint_t *)mp->b_rptr));
12907 12954 freemsg(mp);
12908 12955 return;
12909 12956 }
12910 12957 default:
12911 12958 break;
12912 12959 }
12913 12960 if (q->q_next) {
12914 12961 putnext(q, mp);
12915 12962 } else
12916 12963 freemsg(mp);
12917 12964 return;
12918 12965
12919 12966 nak:
12920 12967 iocp->ioc_error = EINVAL;
12921 12968 mp->b_datap->db_type = M_IOCNAK;
12922 12969 iocp->ioc_count = 0;
12923 12970 qreply(q, mp);
12924 12971 return;
12925 12972
12926 12973 protonak:
12927 12974 cmn_err(CE_NOTE, "IP doesn't process %s as a module", proto_str);
12928 12975 if ((mp = mi_tpi_err_ack_alloc(mp, TPROTO, EINVAL)) != NULL)
12929 12976 qreply(q, mp);
12930 12977 }
12931 12978
12932 12979 /*
12933 12980 * Process IP options in an outbound packet. Verify that the nexthop in a
12934 12981 * strict source route is onlink.
12935 12982 * Returns non-zero if something fails in which case an ICMP error has been
12936 12983 * sent and mp freed.
12937 12984 *
12938 12985 * Assumes the ULP has called ip_massage_options to move nexthop into ipha_dst.
12939 12986 */
12940 12987 int
12941 12988 ip_output_options(mblk_t *mp, ipha_t *ipha, ip_xmit_attr_t *ixa, ill_t *ill)
12942 12989 {
12943 12990 ipoptp_t opts;
12944 12991 uchar_t *opt;
12945 12992 uint8_t optval;
12946 12993 uint8_t optlen;
12947 12994 ipaddr_t dst;
12948 12995 intptr_t code = 0;
12949 12996 ire_t *ire;
12950 12997 ip_stack_t *ipst = ixa->ixa_ipst;
12951 12998 ip_recv_attr_t iras;
12952 12999
12953 13000 ip2dbg(("ip_output_options\n"));
12954 13001
12955 13002 dst = ipha->ipha_dst;
12956 13003 for (optval = ipoptp_first(&opts, ipha);
12957 13004 optval != IPOPT_EOL;
12958 13005 optval = ipoptp_next(&opts)) {
12959 13006 opt = opts.ipoptp_cur;
12960 13007 optlen = opts.ipoptp_len;
12961 13008 ip2dbg(("ip_output_options: opt %d, len %d\n",
12962 13009 optval, optlen));
12963 13010 switch (optval) {
12964 13011 uint32_t off;
12965 13012 case IPOPT_SSRR:
12966 13013 case IPOPT_LSRR:
12967 13014 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
12968 13015 ip1dbg((
12969 13016 "ip_output_options: bad option offset\n"));
12970 13017 code = (char *)&opt[IPOPT_OLEN] -
12971 13018 (char *)ipha;
12972 13019 goto param_prob;
12973 13020 }
12974 13021 off = opt[IPOPT_OFFSET];
12975 13022 ip1dbg(("ip_output_options: next hop 0x%x\n",
12976 13023 ntohl(dst)));
12977 13024 /*
12978 13025 * For strict: verify that dst is directly
12979 13026 * reachable.
12980 13027 */
12981 13028 if (optval == IPOPT_SSRR) {
12982 13029 ire = ire_ftable_lookup_v4(dst, 0, 0,
12983 13030 IRE_INTERFACE, NULL, ALL_ZONES,
12984 13031 ixa->ixa_tsl,
12985 13032 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst,
12986 13033 NULL);
12987 13034 if (ire == NULL) {
12988 13035 ip1dbg(("ip_output_options: SSRR not"
12989 13036 " directly reachable: 0x%x\n",
12990 13037 ntohl(dst)));
12991 13038 goto bad_src_route;
12992 13039 }
12993 13040 ire_refrele(ire);
12994 13041 }
12995 13042 break;
12996 13043 case IPOPT_RR:
12997 13044 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
12998 13045 ip1dbg((
12999 13046 "ip_output_options: bad option offset\n"));
13000 13047 code = (char *)&opt[IPOPT_OLEN] -
13001 13048 (char *)ipha;
13002 13049 goto param_prob;
13003 13050 }
13004 13051 break;
13005 13052 case IPOPT_TS:
13006 13053 /*
13007 13054 * Verify that length >=5 and that there is either
13008 13055 * room for another timestamp or that the overflow
13009 13056 * counter is not maxed out.
13010 13057 */
13011 13058 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha;
13012 13059 if (optlen < IPOPT_MINLEN_IT) {
13013 13060 goto param_prob;
13014 13061 }
13015 13062 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
13016 13063 ip1dbg((
13017 13064 "ip_output_options: bad option offset\n"));
13018 13065 code = (char *)&opt[IPOPT_OFFSET] -
13019 13066 (char *)ipha;
13020 13067 goto param_prob;
13021 13068 }
13022 13069 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
13023 13070 case IPOPT_TS_TSONLY:
13024 13071 off = IPOPT_TS_TIMELEN;
13025 13072 break;
13026 13073 case IPOPT_TS_TSANDADDR:
13027 13074 case IPOPT_TS_PRESPEC:
13028 13075 case IPOPT_TS_PRESPEC_RFC791:
13029 13076 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
13030 13077 break;
13031 13078 default:
13032 13079 code = (char *)&opt[IPOPT_POS_OV_FLG] -
13033 13080 (char *)ipha;
13034 13081 goto param_prob;
13035 13082 }
13036 13083 if (opt[IPOPT_OFFSET] - 1 + off > optlen &&
13037 13084 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) {
13038 13085 /*
13039 13086 * No room and the overflow counter is 15
13040 13087 * already.
13041 13088 */
13042 13089 goto param_prob;
13043 13090 }
13044 13091 break;
13045 13092 }
13046 13093 }
13047 13094
13048 13095 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0)
13049 13096 return (0);
13050 13097
13051 13098 ip1dbg(("ip_output_options: error processing IP options."));
13052 13099 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha;
13053 13100
13054 13101 param_prob:
13055 13102 bzero(&iras, sizeof (iras));
13056 13103 iras.ira_ill = iras.ira_rill = ill;
13057 13104 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex;
13058 13105 iras.ira_rifindex = iras.ira_ruifindex;
13059 13106 iras.ira_flags = IRAF_IS_IPV4;
13060 13107
13061 13108 ip_drop_output("ip_output_options", mp, ill);
13062 13109 icmp_param_problem(mp, (uint8_t)code, &iras);
13063 13110 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
13064 13111 return (-1);
13065 13112
13066 13113 bad_src_route:
13067 13114 bzero(&iras, sizeof (iras));
13068 13115 iras.ira_ill = iras.ira_rill = ill;
13069 13116 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex;
13070 13117 iras.ira_rifindex = iras.ira_ruifindex;
13071 13118 iras.ira_flags = IRAF_IS_IPV4;
13072 13119
13073 13120 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill);
13074 13121 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, &iras);
13075 13122 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
13076 13123 return (-1);
13077 13124 }
13078 13125
13079 13126 /*
13080 13127 * The maximum value of conn_drain_list_cnt is CONN_MAXDRAINCNT.
13081 13128 * conn_drain_list_cnt can be changed by setting conn_drain_nthreads
13082 13129 * thru /etc/system.
13083 13130 */
13084 13131 #define CONN_MAXDRAINCNT 64
13085 13132
13086 13133 static void
13087 13134 conn_drain_init(ip_stack_t *ipst)
13088 13135 {
13089 13136 int i, j;
13090 13137 idl_tx_list_t *itl_tx;
13091 13138
13092 13139 ipst->ips_conn_drain_list_cnt = conn_drain_nthreads;
13093 13140
13094 13141 if ((ipst->ips_conn_drain_list_cnt == 0) ||
13095 13142 (ipst->ips_conn_drain_list_cnt > CONN_MAXDRAINCNT)) {
13096 13143 /*
13097 13144 * Default value of the number of drainers is the
13098 13145 * number of cpus, subject to maximum of 8 drainers.
13099 13146 */
13100 13147 if (boot_max_ncpus != -1)
13101 13148 ipst->ips_conn_drain_list_cnt = MIN(boot_max_ncpus, 8);
13102 13149 else
13103 13150 ipst->ips_conn_drain_list_cnt = MIN(max_ncpus, 8);
13104 13151 }
13105 13152
13106 13153 ipst->ips_idl_tx_list =
13107 13154 kmem_zalloc(TX_FANOUT_SIZE * sizeof (idl_tx_list_t), KM_SLEEP);
13108 13155 for (i = 0; i < TX_FANOUT_SIZE; i++) {
13109 13156 itl_tx = &ipst->ips_idl_tx_list[i];
13110 13157 itl_tx->txl_drain_list =
13111 13158 kmem_zalloc(ipst->ips_conn_drain_list_cnt *
13112 13159 sizeof (idl_t), KM_SLEEP);
13113 13160 mutex_init(&itl_tx->txl_lock, NULL, MUTEX_DEFAULT, NULL);
13114 13161 for (j = 0; j < ipst->ips_conn_drain_list_cnt; j++) {
13115 13162 mutex_init(&itl_tx->txl_drain_list[j].idl_lock, NULL,
13116 13163 MUTEX_DEFAULT, NULL);
13117 13164 itl_tx->txl_drain_list[j].idl_itl = itl_tx;
13118 13165 }
13119 13166 }
13120 13167 }
13121 13168
13122 13169 static void
13123 13170 conn_drain_fini(ip_stack_t *ipst)
13124 13171 {
13125 13172 int i;
13126 13173 idl_tx_list_t *itl_tx;
13127 13174
13128 13175 for (i = 0; i < TX_FANOUT_SIZE; i++) {
13129 13176 itl_tx = &ipst->ips_idl_tx_list[i];
13130 13177 kmem_free(itl_tx->txl_drain_list,
13131 13178 ipst->ips_conn_drain_list_cnt * sizeof (idl_t));
13132 13179 }
13133 13180 kmem_free(ipst->ips_idl_tx_list,
13134 13181 TX_FANOUT_SIZE * sizeof (idl_tx_list_t));
13135 13182 ipst->ips_idl_tx_list = NULL;
13136 13183 }
13137 13184
13138 13185 /*
13139 13186 * Flow control has blocked us from proceeding. Insert the given conn in one
13140 13187 * of the conn drain lists. When flow control is unblocked, either ip_wsrv()
13141 13188 * (STREAMS) or ill_flow_enable() (direct) will be called back, which in turn
13142 13189 * will call conn_walk_drain(). See the flow control notes at the top of this
13143 13190 * file for more details.
13144 13191 */
13145 13192 void
13146 13193 conn_drain_insert(conn_t *connp, idl_tx_list_t *tx_list)
13147 13194 {
13148 13195 idl_t *idl = tx_list->txl_drain_list;
13149 13196 uint_t index;
13150 13197 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
13151 13198
13152 13199 mutex_enter(&connp->conn_lock);
13153 13200 if (connp->conn_state_flags & CONN_CLOSING) {
13154 13201 /*
13155 13202 * The conn is closing as a result of which CONN_CLOSING
13156 13203 * is set. Return.
13157 13204 */
13158 13205 mutex_exit(&connp->conn_lock);
13159 13206 return;
13160 13207 } else if (connp->conn_idl == NULL) {
13161 13208 /*
13162 13209 * Assign the next drain list round robin. We dont' use
13163 13210 * a lock, and thus it may not be strictly round robin.
13164 13211 * Atomicity of load/stores is enough to make sure that
13165 13212 * conn_drain_list_index is always within bounds.
13166 13213 */
13167 13214 index = tx_list->txl_drain_index;
13168 13215 ASSERT(index < ipst->ips_conn_drain_list_cnt);
13169 13216 connp->conn_idl = &tx_list->txl_drain_list[index];
13170 13217 index++;
13171 13218 if (index == ipst->ips_conn_drain_list_cnt)
13172 13219 index = 0;
13173 13220 tx_list->txl_drain_index = index;
13174 13221 } else {
13175 13222 ASSERT(connp->conn_idl->idl_itl == tx_list);
13176 13223 }
13177 13224 mutex_exit(&connp->conn_lock);
13178 13225
13179 13226 idl = connp->conn_idl;
13180 13227 mutex_enter(&idl->idl_lock);
13181 13228 if ((connp->conn_drain_prev != NULL) ||
13182 13229 (connp->conn_state_flags & CONN_CLOSING)) {
13183 13230 /*
13184 13231 * The conn is either already in the drain list or closing.
13185 13232 * (We needed to check for CONN_CLOSING again since close can
13186 13233 * sneak in between dropping conn_lock and acquiring idl_lock.)
13187 13234 */
13188 13235 mutex_exit(&idl->idl_lock);
13189 13236 return;
13190 13237 }
13191 13238
13192 13239 /*
13193 13240 * The conn is not in the drain list. Insert it at the
13194 13241 * tail of the drain list. The drain list is circular
13195 13242 * and doubly linked. idl_conn points to the 1st element
13196 13243 * in the list.
13197 13244 */
13198 13245 if (idl->idl_conn == NULL) {
13199 13246 idl->idl_conn = connp;
13200 13247 connp->conn_drain_next = connp;
13201 13248 connp->conn_drain_prev = connp;
13202 13249 } else {
13203 13250 conn_t *head = idl->idl_conn;
13204 13251
13205 13252 connp->conn_drain_next = head;
13206 13253 connp->conn_drain_prev = head->conn_drain_prev;
13207 13254 head->conn_drain_prev->conn_drain_next = connp;
13208 13255 head->conn_drain_prev = connp;
13209 13256 }
13210 13257 /*
13211 13258 * For non streams based sockets assert flow control.
13212 13259 */
13213 13260 conn_setqfull(connp, NULL);
13214 13261 mutex_exit(&idl->idl_lock);
13215 13262 }
13216 13263
13217 13264 static void
13218 13265 conn_drain_remove(conn_t *connp)
13219 13266 {
13220 13267 idl_t *idl = connp->conn_idl;
13221 13268
13222 13269 if (idl != NULL) {
13223 13270 /*
13224 13271 * Remove ourself from the drain list.
13225 13272 */
13226 13273 if (connp->conn_drain_next == connp) {
13227 13274 /* Singleton in the list */
13228 13275 ASSERT(connp->conn_drain_prev == connp);
13229 13276 idl->idl_conn = NULL;
13230 13277 } else {
13231 13278 connp->conn_drain_prev->conn_drain_next =
13232 13279 connp->conn_drain_next;
13233 13280 connp->conn_drain_next->conn_drain_prev =
13234 13281 connp->conn_drain_prev;
13235 13282 if (idl->idl_conn == connp)
13236 13283 idl->idl_conn = connp->conn_drain_next;
13237 13284 }
13238 13285
13239 13286 /*
13240 13287 * NOTE: because conn_idl is associated with a specific drain
13241 13288 * list which in turn is tied to the index the TX ring
13242 13289 * (txl_cookie) hashes to, and because the TX ring can change
13243 13290 * over the lifetime of the conn_t, we must clear conn_idl so
13244 13291 * a subsequent conn_drain_insert() will set conn_idl again
13245 13292 * based on the latest txl_cookie.
13246 13293 */
13247 13294 connp->conn_idl = NULL;
13248 13295 }
13249 13296 connp->conn_drain_next = NULL;
13250 13297 connp->conn_drain_prev = NULL;
13251 13298
13252 13299 conn_clrqfull(connp, NULL);
13253 13300 /*
13254 13301 * For streams based sockets open up flow control.
13255 13302 */
13256 13303 if (!IPCL_IS_NONSTR(connp))
13257 13304 enableok(connp->conn_wq);
13258 13305 }
13259 13306
13260 13307 /*
13261 13308 * This conn is closing, and we are called from ip_close. OR
13262 13309 * this conn is draining because flow-control on the ill has been relieved.
13263 13310 *
13264 13311 * We must also need to remove conn's on this idl from the list, and also
13265 13312 * inform the sockfs upcalls about the change in flow-control.
13266 13313 */
13267 13314 static void
13268 13315 conn_drain(conn_t *connp, boolean_t closing)
13269 13316 {
13270 13317 idl_t *idl;
13271 13318 conn_t *next_connp;
13272 13319
13273 13320 /*
13274 13321 * connp->conn_idl is stable at this point, and no lock is needed
13275 13322 * to check it. If we are called from ip_close, close has already
13276 13323 * set CONN_CLOSING, thus freezing the value of conn_idl, and
13277 13324 * called us only because conn_idl is non-null. If we are called thru
13278 13325 * service, conn_idl could be null, but it cannot change because
13279 13326 * service is single-threaded per queue, and there cannot be another
13280 13327 * instance of service trying to call conn_drain_insert on this conn
13281 13328 * now.
13282 13329 */
13283 13330 ASSERT(!closing || connp == NULL || connp->conn_idl != NULL);
13284 13331
13285 13332 /*
13286 13333 * If the conn doesn't exist or is not on a drain list, bail.
13287 13334 */
13288 13335 if (connp == NULL || connp->conn_idl == NULL ||
13289 13336 connp->conn_drain_prev == NULL) {
13290 13337 return;
13291 13338 }
13292 13339
13293 13340 idl = connp->conn_idl;
13294 13341 ASSERT(MUTEX_HELD(&idl->idl_lock));
13295 13342
13296 13343 if (!closing) {
13297 13344 next_connp = connp->conn_drain_next;
13298 13345 while (next_connp != connp) {
13299 13346 conn_t *delconnp = next_connp;
13300 13347
13301 13348 next_connp = next_connp->conn_drain_next;
13302 13349 conn_drain_remove(delconnp);
13303 13350 }
13304 13351 ASSERT(connp->conn_drain_next == idl->idl_conn);
13305 13352 }
13306 13353 conn_drain_remove(connp);
13307 13354 }
13308 13355
13309 13356 /*
13310 13357 * Write service routine. Shared perimeter entry point.
13311 13358 * The device queue's messages has fallen below the low water mark and STREAMS
13312 13359 * has backenabled the ill_wq. Send sockfs notification about flow-control on
13313 13360 * each waiting conn.
13314 13361 */
13315 13362 void
13316 13363 ip_wsrv(queue_t *q)
13317 13364 {
13318 13365 ill_t *ill;
13319 13366
13320 13367 ill = (ill_t *)q->q_ptr;
13321 13368 if (ill->ill_state_flags == 0) {
13322 13369 ip_stack_t *ipst = ill->ill_ipst;
13323 13370
13324 13371 /*
13325 13372 * The device flow control has opened up.
13326 13373 * Walk through conn drain lists and qenable the
13327 13374 * first conn in each list. This makes sense only
13328 13375 * if the stream is fully plumbed and setup.
13329 13376 * Hence the ill_state_flags check above.
13330 13377 */
13331 13378 ip1dbg(("ip_wsrv: walking\n"));
13332 13379 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[0]);
13333 13380 enableok(ill->ill_wq);
13334 13381 }
13335 13382 }
13336 13383
13337 13384 /*
13338 13385 * Callback to disable flow control in IP.
13339 13386 *
13340 13387 * This is a mac client callback added when the DLD_CAPAB_DIRECT capability
13341 13388 * is enabled.
13342 13389 *
13343 13390 * When MAC_TX() is not able to send any more packets, dld sets its queue
13344 13391 * to QFULL and enable the STREAMS flow control. Later, when the underlying
13345 13392 * driver is able to continue to send packets, it calls mac_tx_(ring_)update()
13346 13393 * function and wakes up corresponding mac worker threads, which in turn
13347 13394 * calls this callback function, and disables flow control.
13348 13395 */
13349 13396 void
13350 13397 ill_flow_enable(void *arg, ip_mac_tx_cookie_t cookie)
13351 13398 {
13352 13399 ill_t *ill = (ill_t *)arg;
13353 13400 ip_stack_t *ipst = ill->ill_ipst;
13354 13401 idl_tx_list_t *idl_txl;
13355 13402
13356 13403 idl_txl = &ipst->ips_idl_tx_list[IDLHASHINDEX(cookie)];
13357 13404 mutex_enter(&idl_txl->txl_lock);
13358 13405 /* add code to to set a flag to indicate idl_txl is enabled */
13359 13406 conn_walk_drain(ipst, idl_txl);
13360 13407 mutex_exit(&idl_txl->txl_lock);
13361 13408 }
13362 13409
13363 13410 /*
13364 13411 * Flow control has been relieved and STREAMS has backenabled us; drain
13365 13412 * all the conn lists on `tx_list'.
13366 13413 */
13367 13414 static void
13368 13415 conn_walk_drain(ip_stack_t *ipst, idl_tx_list_t *tx_list)
13369 13416 {
13370 13417 int i;
13371 13418 idl_t *idl;
13372 13419
13373 13420 IP_STAT(ipst, ip_conn_walk_drain);
13374 13421
13375 13422 for (i = 0; i < ipst->ips_conn_drain_list_cnt; i++) {
13376 13423 idl = &tx_list->txl_drain_list[i];
13377 13424 mutex_enter(&idl->idl_lock);
13378 13425 conn_drain(idl->idl_conn, B_FALSE);
13379 13426 mutex_exit(&idl->idl_lock);
13380 13427 }
13381 13428 }
13382 13429
13383 13430 /*
13384 13431 * Determine if the ill and multicast aspects of that packets
13385 13432 * "matches" the conn.
13386 13433 */
13387 13434 boolean_t
13388 13435 conn_wantpacket(conn_t *connp, ip_recv_attr_t *ira, ipha_t *ipha)
13389 13436 {
13390 13437 ill_t *ill = ira->ira_rill;
13391 13438 zoneid_t zoneid = ira->ira_zoneid;
13392 13439 uint_t in_ifindex;
13393 13440 ipaddr_t dst, src;
13394 13441
13395 13442 dst = ipha->ipha_dst;
13396 13443 src = ipha->ipha_src;
13397 13444
13398 13445 /*
13399 13446 * conn_incoming_ifindex is set by IP_BOUND_IF which limits
13400 13447 * unicast, broadcast and multicast reception to
13401 13448 * conn_incoming_ifindex.
13402 13449 * conn_wantpacket is called for unicast, broadcast and
13403 13450 * multicast packets.
13404 13451 */
13405 13452 in_ifindex = connp->conn_incoming_ifindex;
13406 13453
13407 13454 /* mpathd can bind to the under IPMP interface, which we allow */
13408 13455 if (in_ifindex != 0 && in_ifindex != ill->ill_phyint->phyint_ifindex) {
13409 13456 if (!IS_UNDER_IPMP(ill))
13410 13457 return (B_FALSE);
13411 13458
13412 13459 if (in_ifindex != ipmp_ill_get_ipmp_ifindex(ill))
13413 13460 return (B_FALSE);
13414 13461 }
13415 13462
13416 13463 if (!IPCL_ZONE_MATCH(connp, zoneid))
13417 13464 return (B_FALSE);
13418 13465
13419 13466 if (!(ira->ira_flags & IRAF_MULTICAST))
13420 13467 return (B_TRUE);
13421 13468
13422 13469 if (connp->conn_multi_router) {
13423 13470 /* multicast packet and multicast router socket: send up */
13424 13471 return (B_TRUE);
13425 13472 }
13426 13473
13427 13474 if (ipha->ipha_protocol == IPPROTO_PIM ||
13428 13475 ipha->ipha_protocol == IPPROTO_RSVP)
13429 13476 return (B_TRUE);
13430 13477
13431 13478 return (conn_hasmembers_ill_withsrc_v4(connp, dst, src, ira->ira_ill));
13432 13479 }
13433 13480
13434 13481 void
13435 13482 conn_setqfull(conn_t *connp, boolean_t *flow_stopped)
13436 13483 {
13437 13484 if (IPCL_IS_NONSTR(connp)) {
13438 13485 (*connp->conn_upcalls->su_txq_full)
13439 13486 (connp->conn_upper_handle, B_TRUE);
13440 13487 if (flow_stopped != NULL)
13441 13488 *flow_stopped = B_TRUE;
13442 13489 } else {
13443 13490 queue_t *q = connp->conn_wq;
13444 13491
13445 13492 ASSERT(q != NULL);
13446 13493 if (!(q->q_flag & QFULL)) {
13447 13494 mutex_enter(QLOCK(q));
13448 13495 if (!(q->q_flag & QFULL)) {
13449 13496 /* still need to set QFULL */
13450 13497 q->q_flag |= QFULL;
13451 13498 /* set flow_stopped to true under QLOCK */
13452 13499 if (flow_stopped != NULL)
13453 13500 *flow_stopped = B_TRUE;
13454 13501 mutex_exit(QLOCK(q));
13455 13502 } else {
13456 13503 /* flow_stopped is left unchanged */
13457 13504 mutex_exit(QLOCK(q));
13458 13505 }
13459 13506 }
13460 13507 }
13461 13508 }
13462 13509
13463 13510 void
13464 13511 conn_clrqfull(conn_t *connp, boolean_t *flow_stopped)
13465 13512 {
13466 13513 if (IPCL_IS_NONSTR(connp)) {
13467 13514 (*connp->conn_upcalls->su_txq_full)
13468 13515 (connp->conn_upper_handle, B_FALSE);
13469 13516 if (flow_stopped != NULL)
13470 13517 *flow_stopped = B_FALSE;
13471 13518 } else {
13472 13519 queue_t *q = connp->conn_wq;
13473 13520
13474 13521 ASSERT(q != NULL);
13475 13522 if (q->q_flag & QFULL) {
13476 13523 mutex_enter(QLOCK(q));
13477 13524 if (q->q_flag & QFULL) {
13478 13525 q->q_flag &= ~QFULL;
13479 13526 /* set flow_stopped to false under QLOCK */
13480 13527 if (flow_stopped != NULL)
13481 13528 *flow_stopped = B_FALSE;
13482 13529 mutex_exit(QLOCK(q));
13483 13530 if (q->q_flag & QWANTW)
13484 13531 qbackenable(q, 0);
13485 13532 } else {
13486 13533 /* flow_stopped is left unchanged */
13487 13534 mutex_exit(QLOCK(q));
13488 13535 }
13489 13536 }
13490 13537 }
13491 13538
13492 13539 mutex_enter(&connp->conn_lock);
13493 13540 connp->conn_blocked = B_FALSE;
13494 13541 mutex_exit(&connp->conn_lock);
13495 13542 }
13496 13543
13497 13544 /*
13498 13545 * Return the length in bytes of the IPv4 headers (base header, label, and
13499 13546 * other IP options) that will be needed based on the
13500 13547 * ip_pkt_t structure passed by the caller.
13501 13548 *
13502 13549 * The returned length does not include the length of the upper level
13503 13550 * protocol (ULP) header.
13504 13551 * The caller needs to check that the length doesn't exceed the max for IPv4.
13505 13552 */
13506 13553 int
13507 13554 ip_total_hdrs_len_v4(const ip_pkt_t *ipp)
13508 13555 {
13509 13556 int len;
13510 13557
13511 13558 len = IP_SIMPLE_HDR_LENGTH;
13512 13559 if (ipp->ipp_fields & IPPF_LABEL_V4) {
13513 13560 ASSERT(ipp->ipp_label_len_v4 != 0);
13514 13561 /* We need to round up here */
13515 13562 len += (ipp->ipp_label_len_v4 + 3) & ~3;
13516 13563 }
13517 13564
13518 13565 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
13519 13566 ASSERT(ipp->ipp_ipv4_options_len != 0);
13520 13567 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0);
13521 13568 len += ipp->ipp_ipv4_options_len;
13522 13569 }
13523 13570 return (len);
13524 13571 }
13525 13572
13526 13573 /*
13527 13574 * All-purpose routine to build an IPv4 header with options based
13528 13575 * on the abstract ip_pkt_t.
13529 13576 *
13530 13577 * The caller has to set the source and destination address as well as
13531 13578 * ipha_length. The caller has to massage any source route and compensate
13532 13579 * for the ULP pseudo-header checksum due to the source route.
13533 13580 */
13534 13581 void
13535 13582 ip_build_hdrs_v4(uchar_t *buf, uint_t buf_len, const ip_pkt_t *ipp,
13536 13583 uint8_t protocol)
13537 13584 {
13538 13585 ipha_t *ipha = (ipha_t *)buf;
13539 13586 uint8_t *cp;
13540 13587
13541 13588 /* Initialize IPv4 header */
13542 13589 ipha->ipha_type_of_service = ipp->ipp_type_of_service;
13543 13590 ipha->ipha_length = 0; /* Caller will set later */
13544 13591 ipha->ipha_ident = 0;
13545 13592 ipha->ipha_fragment_offset_and_flags = 0;
13546 13593 ipha->ipha_ttl = ipp->ipp_unicast_hops;
13547 13594 ipha->ipha_protocol = protocol;
13548 13595 ipha->ipha_hdr_checksum = 0;
13549 13596
13550 13597 if ((ipp->ipp_fields & IPPF_ADDR) &&
13551 13598 IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr))
13552 13599 ipha->ipha_src = ipp->ipp_addr_v4;
13553 13600
13554 13601 cp = (uint8_t *)&ipha[1];
13555 13602 if (ipp->ipp_fields & IPPF_LABEL_V4) {
13556 13603 ASSERT(ipp->ipp_label_len_v4 != 0);
13557 13604 bcopy(ipp->ipp_label_v4, cp, ipp->ipp_label_len_v4);
13558 13605 cp += ipp->ipp_label_len_v4;
13559 13606 /* We need to round up here */
13560 13607 while ((uintptr_t)cp & 0x3) {
13561 13608 *cp++ = IPOPT_NOP;
13562 13609 }
13563 13610 }
13564 13611
13565 13612 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
13566 13613 ASSERT(ipp->ipp_ipv4_options_len != 0);
13567 13614 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0);
13568 13615 bcopy(ipp->ipp_ipv4_options, cp, ipp->ipp_ipv4_options_len);
13569 13616 cp += ipp->ipp_ipv4_options_len;
13570 13617 }
13571 13618 ipha->ipha_version_and_hdr_length =
13572 13619 (uint8_t)((IP_VERSION << 4) + buf_len / 4);
13573 13620
13574 13621 ASSERT((int)(cp - buf) == buf_len);
13575 13622 }
13576 13623
13577 13624 /* Allocate the private structure */
13578 13625 static int
13579 13626 ip_priv_alloc(void **bufp)
13580 13627 {
13581 13628 void *buf;
13582 13629
13583 13630 if ((buf = kmem_alloc(sizeof (ip_priv_t), KM_NOSLEEP)) == NULL)
13584 13631 return (ENOMEM);
13585 13632
13586 13633 *bufp = buf;
13587 13634 return (0);
13588 13635 }
13589 13636
13590 13637 /* Function to delete the private structure */
13591 13638 void
13592 13639 ip_priv_free(void *buf)
13593 13640 {
13594 13641 ASSERT(buf != NULL);
13595 13642 kmem_free(buf, sizeof (ip_priv_t));
13596 13643 }
13597 13644
13598 13645 /*
13599 13646 * The entry point for IPPF processing.
13600 13647 * If the classifier (IPGPC_CLASSIFY) is not loaded and configured, the
13601 13648 * routine just returns.
13602 13649 *
13603 13650 * When called, ip_process generates an ipp_packet_t structure
13604 13651 * which holds the state information for this packet and invokes the
13605 13652 * the classifier (via ipp_packet_process). The classification, depending on
13606 13653 * configured filters, results in a list of actions for this packet. Invoking
13607 13654 * an action may cause the packet to be dropped, in which case we return NULL.
13608 13655 * proc indicates the callout position for
13609 13656 * this packet and ill is the interface this packet arrived on or will leave
13610 13657 * on (inbound and outbound resp.).
13611 13658 *
13612 13659 * We do the processing on the rill (mapped to the upper if ipmp), but MIB
13613 13660 * on the ill corrsponding to the destination IP address.
13614 13661 */
13615 13662 mblk_t *
13616 13663 ip_process(ip_proc_t proc, mblk_t *mp, ill_t *rill, ill_t *ill)
13617 13664 {
13618 13665 ip_priv_t *priv;
13619 13666 ipp_action_id_t aid;
13620 13667 int rc = 0;
13621 13668 ipp_packet_t *pp;
13622 13669
13623 13670 /* If the classifier is not loaded, return */
13624 13671 if ((aid = ipp_action_lookup(IPGPC_CLASSIFY)) == IPP_ACTION_INVAL) {
13625 13672 return (mp);
13626 13673 }
13627 13674
13628 13675 ASSERT(mp != NULL);
13629 13676
13630 13677 /* Allocate the packet structure */
13631 13678 rc = ipp_packet_alloc(&pp, "ip", aid);
13632 13679 if (rc != 0)
13633 13680 goto drop;
13634 13681
13635 13682 /* Allocate the private structure */
13636 13683 rc = ip_priv_alloc((void **)&priv);
13637 13684 if (rc != 0) {
13638 13685 ipp_packet_free(pp);
13639 13686 goto drop;
13640 13687 }
13641 13688 priv->proc = proc;
13642 13689 priv->ill_index = ill_get_upper_ifindex(rill);
13643 13690
13644 13691 ipp_packet_set_private(pp, priv, ip_priv_free);
13645 13692 ipp_packet_set_data(pp, mp);
13646 13693
13647 13694 /* Invoke the classifier */
13648 13695 rc = ipp_packet_process(&pp);
13649 13696 if (pp != NULL) {
13650 13697 mp = ipp_packet_get_data(pp);
13651 13698 ipp_packet_free(pp);
13652 13699 if (rc != 0)
13653 13700 goto drop;
13654 13701 return (mp);
13655 13702 } else {
13656 13703 /* No mp to trace in ip_drop_input/ip_drop_output */
13657 13704 mp = NULL;
13658 13705 }
13659 13706 drop:
13660 13707 if (proc == IPP_LOCAL_IN || proc == IPP_FWD_IN) {
13661 13708 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
13662 13709 ip_drop_input("ip_process", mp, ill);
13663 13710 } else {
13664 13711 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
13665 13712 ip_drop_output("ip_process", mp, ill);
13666 13713 }
13667 13714 freemsg(mp);
13668 13715 return (NULL);
13669 13716 }
13670 13717
13671 13718 /*
13672 13719 * Propagate a multicast group membership operation (add/drop) on
13673 13720 * all the interfaces crossed by the related multirt routes.
13674 13721 * The call is considered successful if the operation succeeds
13675 13722 * on at least one interface.
13676 13723 *
13677 13724 * This assumes that a set of IRE_HOST/RTF_MULTIRT has been created for the
13678 13725 * multicast addresses with the ire argument being the first one.
13679 13726 * We walk the bucket to find all the of those.
13680 13727 *
13681 13728 * Common to IPv4 and IPv6.
13682 13729 */
13683 13730 static int
13684 13731 ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t,
13685 13732 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *),
13686 13733 ire_t *ire, conn_t *connp, boolean_t checkonly, const in6_addr_t *v6group,
13687 13734 mcast_record_t fmode, const in6_addr_t *v6src)
13688 13735 {
13689 13736 ire_t *ire_gw;
13690 13737 irb_t *irb;
13691 13738 int ifindex;
13692 13739 int error = 0;
13693 13740 int result;
13694 13741 ip_stack_t *ipst = ire->ire_ipst;
13695 13742 ipaddr_t group;
13696 13743 boolean_t isv6;
13697 13744 int match_flags;
13698 13745
13699 13746 if (IN6_IS_ADDR_V4MAPPED(v6group)) {
13700 13747 IN6_V4MAPPED_TO_IPADDR(v6group, group);
13701 13748 isv6 = B_FALSE;
13702 13749 } else {
13703 13750 isv6 = B_TRUE;
13704 13751 }
13705 13752
13706 13753 irb = ire->ire_bucket;
13707 13754 ASSERT(irb != NULL);
13708 13755
13709 13756 result = 0;
13710 13757 irb_refhold(irb);
13711 13758 for (; ire != NULL; ire = ire->ire_next) {
13712 13759 if ((ire->ire_flags & RTF_MULTIRT) == 0)
13713 13760 continue;
13714 13761
13715 13762 /* We handle -ifp routes by matching on the ill if set */
13716 13763 match_flags = MATCH_IRE_TYPE;
13717 13764 if (ire->ire_ill != NULL)
13718 13765 match_flags |= MATCH_IRE_ILL;
13719 13766
13720 13767 if (isv6) {
13721 13768 if (!IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, v6group))
13722 13769 continue;
13723 13770
13724 13771 ire_gw = ire_ftable_lookup_v6(&ire->ire_gateway_addr_v6,
13725 13772 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL,
13726 13773 match_flags, 0, ipst, NULL);
13727 13774 } else {
13728 13775 if (ire->ire_addr != group)
13729 13776 continue;
13730 13777
13731 13778 ire_gw = ire_ftable_lookup_v4(ire->ire_gateway_addr,
13732 13779 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL,
13733 13780 match_flags, 0, ipst, NULL);
13734 13781 }
13735 13782 /* No interface route exists for the gateway; skip this ire. */
13736 13783 if (ire_gw == NULL)
13737 13784 continue;
13738 13785 if (ire_gw->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
13739 13786 ire_refrele(ire_gw);
13740 13787 continue;
13741 13788 }
13742 13789 ASSERT(ire_gw->ire_ill != NULL); /* IRE_INTERFACE */
13743 13790 ifindex = ire_gw->ire_ill->ill_phyint->phyint_ifindex;
13744 13791
13745 13792 /*
13746 13793 * The operation is considered a success if
13747 13794 * it succeeds at least once on any one interface.
13748 13795 */
13749 13796 error = fn(connp, checkonly, v6group, INADDR_ANY, ifindex,
13750 13797 fmode, v6src);
13751 13798 if (error == 0)
13752 13799 result = CGTP_MCAST_SUCCESS;
13753 13800
13754 13801 ire_refrele(ire_gw);
13755 13802 }
13756 13803 irb_refrele(irb);
13757 13804 /*
13758 13805 * Consider the call as successful if we succeeded on at least
13759 13806 * one interface. Otherwise, return the last encountered error.
13760 13807 */
13761 13808 return (result == CGTP_MCAST_SUCCESS ? 0 : error);
13762 13809 }
13763 13810
13764 13811 /*
13765 13812 * Return the expected CGTP hooks version number.
13766 13813 */
13767 13814 int
13768 13815 ip_cgtp_filter_supported(void)
13769 13816 {
13770 13817 return (ip_cgtp_filter_rev);
13771 13818 }
13772 13819
13773 13820 /*
13774 13821 * CGTP hooks can be registered by invoking this function.
13775 13822 * Checks that the version number matches.
13776 13823 */
13777 13824 int
13778 13825 ip_cgtp_filter_register(netstackid_t stackid, cgtp_filter_ops_t *ops)
13779 13826 {
13780 13827 netstack_t *ns;
13781 13828 ip_stack_t *ipst;
13782 13829
13783 13830 if (ops->cfo_filter_rev != CGTP_FILTER_REV)
13784 13831 return (ENOTSUP);
13785 13832
13786 13833 ns = netstack_find_by_stackid(stackid);
13787 13834 if (ns == NULL)
13788 13835 return (EINVAL);
13789 13836 ipst = ns->netstack_ip;
13790 13837 ASSERT(ipst != NULL);
13791 13838
13792 13839 if (ipst->ips_ip_cgtp_filter_ops != NULL) {
13793 13840 netstack_rele(ns);
13794 13841 return (EALREADY);
13795 13842 }
13796 13843
13797 13844 ipst->ips_ip_cgtp_filter_ops = ops;
13798 13845
13799 13846 ill_set_inputfn_all(ipst);
13800 13847
13801 13848 netstack_rele(ns);
13802 13849 return (0);
13803 13850 }
13804 13851
13805 13852 /*
13806 13853 * CGTP hooks can be unregistered by invoking this function.
13807 13854 * Returns ENXIO if there was no registration.
13808 13855 * Returns EBUSY if the ndd variable has not been turned off.
13809 13856 */
13810 13857 int
13811 13858 ip_cgtp_filter_unregister(netstackid_t stackid)
13812 13859 {
13813 13860 netstack_t *ns;
13814 13861 ip_stack_t *ipst;
13815 13862
13816 13863 ns = netstack_find_by_stackid(stackid);
13817 13864 if (ns == NULL)
13818 13865 return (EINVAL);
13819 13866 ipst = ns->netstack_ip;
13820 13867 ASSERT(ipst != NULL);
13821 13868
13822 13869 if (ipst->ips_ip_cgtp_filter) {
13823 13870 netstack_rele(ns);
13824 13871 return (EBUSY);
13825 13872 }
13826 13873
13827 13874 if (ipst->ips_ip_cgtp_filter_ops == NULL) {
13828 13875 netstack_rele(ns);
13829 13876 return (ENXIO);
13830 13877 }
13831 13878 ipst->ips_ip_cgtp_filter_ops = NULL;
13832 13879
13833 13880 ill_set_inputfn_all(ipst);
13834 13881
13835 13882 netstack_rele(ns);
13836 13883 return (0);
13837 13884 }
13838 13885
13839 13886 /*
13840 13887 * Check whether there is a CGTP filter registration.
13841 13888 * Returns non-zero if there is a registration, otherwise returns zero.
13842 13889 * Note: returns zero if bad stackid.
13843 13890 */
13844 13891 int
13845 13892 ip_cgtp_filter_is_registered(netstackid_t stackid)
13846 13893 {
13847 13894 netstack_t *ns;
13848 13895 ip_stack_t *ipst;
13849 13896 int ret;
13850 13897
13851 13898 ns = netstack_find_by_stackid(stackid);
13852 13899 if (ns == NULL)
13853 13900 return (0);
13854 13901 ipst = ns->netstack_ip;
13855 13902 ASSERT(ipst != NULL);
13856 13903
13857 13904 if (ipst->ips_ip_cgtp_filter_ops != NULL)
13858 13905 ret = 1;
13859 13906 else
13860 13907 ret = 0;
13861 13908
13862 13909 netstack_rele(ns);
13863 13910 return (ret);
13864 13911 }
13865 13912
13866 13913 static int
13867 13914 ip_squeue_switch(int val)
13868 13915 {
13869 13916 int rval;
13870 13917
13871 13918 switch (val) {
13872 13919 case IP_SQUEUE_ENTER_NODRAIN:
13873 13920 rval = SQ_NODRAIN;
13874 13921 break;
13875 13922 case IP_SQUEUE_ENTER:
13876 13923 rval = SQ_PROCESS;
13877 13924 break;
13878 13925 case IP_SQUEUE_FILL:
13879 13926 default:
13880 13927 rval = SQ_FILL;
13881 13928 break;
13882 13929 }
13883 13930 return (rval);
13884 13931 }
13885 13932
13886 13933 static void *
13887 13934 ip_kstat2_init(netstackid_t stackid, ip_stat_t *ip_statisticsp)
13888 13935 {
13889 13936 kstat_t *ksp;
13890 13937
13891 13938 ip_stat_t template = {
13892 13939 { "ip_udp_fannorm", KSTAT_DATA_UINT64 },
13893 13940 { "ip_udp_fanmb", KSTAT_DATA_UINT64 },
13894 13941 { "ip_recv_pullup", KSTAT_DATA_UINT64 },
13895 13942 { "ip_db_ref", KSTAT_DATA_UINT64 },
13896 13943 { "ip_notaligned", KSTAT_DATA_UINT64 },
13897 13944 { "ip_multimblk", KSTAT_DATA_UINT64 },
13898 13945 { "ip_opt", KSTAT_DATA_UINT64 },
13899 13946 { "ipsec_proto_ahesp", KSTAT_DATA_UINT64 },
13900 13947 { "ip_conn_flputbq", KSTAT_DATA_UINT64 },
13901 13948 { "ip_conn_walk_drain", KSTAT_DATA_UINT64 },
13902 13949 { "ip_out_sw_cksum", KSTAT_DATA_UINT64 },
13903 13950 { "ip_out_sw_cksum_bytes", KSTAT_DATA_UINT64 },
13904 13951 { "ip_in_sw_cksum", KSTAT_DATA_UINT64 },
13905 13952 { "ip_ire_reclaim_calls", KSTAT_DATA_UINT64 },
13906 13953 { "ip_ire_reclaim_deleted", KSTAT_DATA_UINT64 },
13907 13954 { "ip_nce_reclaim_calls", KSTAT_DATA_UINT64 },
13908 13955 { "ip_nce_reclaim_deleted", KSTAT_DATA_UINT64 },
13909 13956 { "ip_dce_reclaim_calls", KSTAT_DATA_UINT64 },
13910 13957 { "ip_dce_reclaim_deleted", KSTAT_DATA_UINT64 },
13911 13958 { "ip_tcp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 },
13912 13959 { "ip_tcp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 },
13913 13960 { "ip_tcp_in_sw_cksum_err", KSTAT_DATA_UINT64 },
13914 13961 { "ip_udp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 },
13915 13962 { "ip_udp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 },
13916 13963 { "ip_udp_in_sw_cksum_err", KSTAT_DATA_UINT64 },
13917 13964 { "conn_in_recvdstaddr", KSTAT_DATA_UINT64 },
13918 13965 { "conn_in_recvopts", KSTAT_DATA_UINT64 },
13919 13966 { "conn_in_recvif", KSTAT_DATA_UINT64 },
13920 13967 { "conn_in_recvslla", KSTAT_DATA_UINT64 },
13921 13968 { "conn_in_recvucred", KSTAT_DATA_UINT64 },
13922 13969 { "conn_in_recvttl", KSTAT_DATA_UINT64 },
13923 13970 { "conn_in_recvhopopts", KSTAT_DATA_UINT64 },
13924 13971 { "conn_in_recvhoplimit", KSTAT_DATA_UINT64 },
13925 13972 { "conn_in_recvdstopts", KSTAT_DATA_UINT64 },
13926 13973 { "conn_in_recvrthdrdstopts", KSTAT_DATA_UINT64 },
13927 13974 { "conn_in_recvrthdr", KSTAT_DATA_UINT64 },
13928 13975 { "conn_in_recvpktinfo", KSTAT_DATA_UINT64 },
13929 13976 { "conn_in_recvtclass", KSTAT_DATA_UINT64 },
13930 13977 { "conn_in_timestamp", KSTAT_DATA_UINT64 },
13931 13978 };
13932 13979
13933 13980 ksp = kstat_create_netstack("ip", 0, "ipstat", "net",
13934 13981 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
13935 13982 KSTAT_FLAG_VIRTUAL, stackid);
13936 13983
13937 13984 if (ksp == NULL)
13938 13985 return (NULL);
13939 13986
13940 13987 bcopy(&template, ip_statisticsp, sizeof (template));
13941 13988 ksp->ks_data = (void *)ip_statisticsp;
13942 13989 ksp->ks_private = (void *)(uintptr_t)stackid;
13943 13990
13944 13991 kstat_install(ksp);
13945 13992 return (ksp);
13946 13993 }
13947 13994
13948 13995 static void
13949 13996 ip_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
13950 13997 {
13951 13998 if (ksp != NULL) {
13952 13999 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
13953 14000 kstat_delete_netstack(ksp, stackid);
13954 14001 }
13955 14002 }
13956 14003
13957 14004 static void *
13958 14005 ip_kstat_init(netstackid_t stackid, ip_stack_t *ipst)
13959 14006 {
13960 14007 kstat_t *ksp;
13961 14008
13962 14009 ip_named_kstat_t template = {
13963 14010 { "forwarding", KSTAT_DATA_UINT32, 0 },
13964 14011 { "defaultTTL", KSTAT_DATA_UINT32, 0 },
13965 14012 { "inReceives", KSTAT_DATA_UINT64, 0 },
13966 14013 { "inHdrErrors", KSTAT_DATA_UINT32, 0 },
13967 14014 { "inAddrErrors", KSTAT_DATA_UINT32, 0 },
13968 14015 { "forwDatagrams", KSTAT_DATA_UINT64, 0 },
13969 14016 { "inUnknownProtos", KSTAT_DATA_UINT32, 0 },
13970 14017 { "inDiscards", KSTAT_DATA_UINT32, 0 },
13971 14018 { "inDelivers", KSTAT_DATA_UINT64, 0 },
13972 14019 { "outRequests", KSTAT_DATA_UINT64, 0 },
13973 14020 { "outDiscards", KSTAT_DATA_UINT32, 0 },
13974 14021 { "outNoRoutes", KSTAT_DATA_UINT32, 0 },
13975 14022 { "reasmTimeout", KSTAT_DATA_UINT32, 0 },
13976 14023 { "reasmReqds", KSTAT_DATA_UINT32, 0 },
13977 14024 { "reasmOKs", KSTAT_DATA_UINT32, 0 },
13978 14025 { "reasmFails", KSTAT_DATA_UINT32, 0 },
13979 14026 { "fragOKs", KSTAT_DATA_UINT32, 0 },
13980 14027 { "fragFails", KSTAT_DATA_UINT32, 0 },
13981 14028 { "fragCreates", KSTAT_DATA_UINT32, 0 },
13982 14029 { "addrEntrySize", KSTAT_DATA_INT32, 0 },
13983 14030 { "routeEntrySize", KSTAT_DATA_INT32, 0 },
13984 14031 { "netToMediaEntrySize", KSTAT_DATA_INT32, 0 },
13985 14032 { "routingDiscards", KSTAT_DATA_UINT32, 0 },
13986 14033 { "inErrs", KSTAT_DATA_UINT32, 0 },
13987 14034 { "noPorts", KSTAT_DATA_UINT32, 0 },
13988 14035 { "inCksumErrs", KSTAT_DATA_UINT32, 0 },
13989 14036 { "reasmDuplicates", KSTAT_DATA_UINT32, 0 },
13990 14037 { "reasmPartDups", KSTAT_DATA_UINT32, 0 },
13991 14038 { "forwProhibits", KSTAT_DATA_UINT32, 0 },
13992 14039 { "udpInCksumErrs", KSTAT_DATA_UINT32, 0 },
13993 14040 { "udpInOverflows", KSTAT_DATA_UINT32, 0 },
13994 14041 { "rawipInOverflows", KSTAT_DATA_UINT32, 0 },
13995 14042 { "ipsecInSucceeded", KSTAT_DATA_UINT32, 0 },
13996 14043 { "ipsecInFailed", KSTAT_DATA_INT32, 0 },
13997 14044 { "memberEntrySize", KSTAT_DATA_INT32, 0 },
13998 14045 { "inIPv6", KSTAT_DATA_UINT32, 0 },
13999 14046 { "outIPv6", KSTAT_DATA_UINT32, 0 },
14000 14047 { "outSwitchIPv6", KSTAT_DATA_UINT32, 0 },
14001 14048 };
14002 14049
14003 14050 ksp = kstat_create_netstack("ip", 0, "ip", "mib2", KSTAT_TYPE_NAMED,
14004 14051 NUM_OF_FIELDS(ip_named_kstat_t), 0, stackid);
14005 14052 if (ksp == NULL || ksp->ks_data == NULL)
14006 14053 return (NULL);
14007 14054
14008 14055 template.forwarding.value.ui32 = WE_ARE_FORWARDING(ipst) ? 1:2;
14009 14056 template.defaultTTL.value.ui32 = (uint32_t)ipst->ips_ip_def_ttl;
14010 14057 template.reasmTimeout.value.ui32 = ipst->ips_ip_reassembly_timeout;
14011 14058 template.addrEntrySize.value.i32 = sizeof (mib2_ipAddrEntry_t);
14012 14059 template.routeEntrySize.value.i32 = sizeof (mib2_ipRouteEntry_t);
14013 14060
14014 14061 template.netToMediaEntrySize.value.i32 =
14015 14062 sizeof (mib2_ipNetToMediaEntry_t);
14016 14063
14017 14064 template.memberEntrySize.value.i32 = sizeof (ipv6_member_t);
14018 14065
14019 14066 bcopy(&template, ksp->ks_data, sizeof (template));
14020 14067 ksp->ks_update = ip_kstat_update;
14021 14068 ksp->ks_private = (void *)(uintptr_t)stackid;
14022 14069
14023 14070 kstat_install(ksp);
14024 14071 return (ksp);
14025 14072 }
14026 14073
14027 14074 static void
14028 14075 ip_kstat_fini(netstackid_t stackid, kstat_t *ksp)
14029 14076 {
14030 14077 if (ksp != NULL) {
14031 14078 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
14032 14079 kstat_delete_netstack(ksp, stackid);
14033 14080 }
14034 14081 }
14035 14082
14036 14083 static int
14037 14084 ip_kstat_update(kstat_t *kp, int rw)
14038 14085 {
14039 14086 ip_named_kstat_t *ipkp;
14040 14087 mib2_ipIfStatsEntry_t ipmib;
14041 14088 ill_walk_context_t ctx;
14042 14089 ill_t *ill;
14043 14090 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private;
14044 14091 netstack_t *ns;
14045 14092 ip_stack_t *ipst;
14046 14093
14047 14094 if (kp == NULL || kp->ks_data == NULL)
14048 14095 return (EIO);
14049 14096
14050 14097 if (rw == KSTAT_WRITE)
14051 14098 return (EACCES);
14052 14099
14053 14100 ns = netstack_find_by_stackid(stackid);
14054 14101 if (ns == NULL)
14055 14102 return (-1);
14056 14103 ipst = ns->netstack_ip;
14057 14104 if (ipst == NULL) {
14058 14105 netstack_rele(ns);
14059 14106 return (-1);
14060 14107 }
14061 14108 ipkp = (ip_named_kstat_t *)kp->ks_data;
14062 14109
14063 14110 bcopy(&ipst->ips_ip_mib, &ipmib, sizeof (ipmib));
14064 14111 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
14065 14112 ill = ILL_START_WALK_V4(&ctx, ipst);
14066 14113 for (; ill != NULL; ill = ill_next(&ctx, ill))
14067 14114 ip_mib2_add_ip_stats(&ipmib, ill->ill_ip_mib);
14068 14115 rw_exit(&ipst->ips_ill_g_lock);
14069 14116
14070 14117 ipkp->forwarding.value.ui32 = ipmib.ipIfStatsForwarding;
14071 14118 ipkp->defaultTTL.value.ui32 = ipmib.ipIfStatsDefaultTTL;
14072 14119 ipkp->inReceives.value.ui64 = ipmib.ipIfStatsHCInReceives;
14073 14120 ipkp->inHdrErrors.value.ui32 = ipmib.ipIfStatsInHdrErrors;
14074 14121 ipkp->inAddrErrors.value.ui32 = ipmib.ipIfStatsInAddrErrors;
14075 14122 ipkp->forwDatagrams.value.ui64 = ipmib.ipIfStatsHCOutForwDatagrams;
14076 14123 ipkp->inUnknownProtos.value.ui32 = ipmib.ipIfStatsInUnknownProtos;
14077 14124 ipkp->inDiscards.value.ui32 = ipmib.ipIfStatsInDiscards;
14078 14125 ipkp->inDelivers.value.ui64 = ipmib.ipIfStatsHCInDelivers;
14079 14126 ipkp->outRequests.value.ui64 = ipmib.ipIfStatsHCOutRequests;
14080 14127 ipkp->outDiscards.value.ui32 = ipmib.ipIfStatsOutDiscards;
14081 14128 ipkp->outNoRoutes.value.ui32 = ipmib.ipIfStatsOutNoRoutes;
14082 14129 ipkp->reasmTimeout.value.ui32 = ipst->ips_ip_reassembly_timeout;
14083 14130 ipkp->reasmReqds.value.ui32 = ipmib.ipIfStatsReasmReqds;
14084 14131 ipkp->reasmOKs.value.ui32 = ipmib.ipIfStatsReasmOKs;
14085 14132 ipkp->reasmFails.value.ui32 = ipmib.ipIfStatsReasmFails;
14086 14133 ipkp->fragOKs.value.ui32 = ipmib.ipIfStatsOutFragOKs;
14087 14134 ipkp->fragFails.value.ui32 = ipmib.ipIfStatsOutFragFails;
14088 14135 ipkp->fragCreates.value.ui32 = ipmib.ipIfStatsOutFragCreates;
14089 14136
14090 14137 ipkp->routingDiscards.value.ui32 = 0;
14091 14138 ipkp->inErrs.value.ui32 = ipmib.tcpIfStatsInErrs;
14092 14139 ipkp->noPorts.value.ui32 = ipmib.udpIfStatsNoPorts;
14093 14140 ipkp->inCksumErrs.value.ui32 = ipmib.ipIfStatsInCksumErrs;
14094 14141 ipkp->reasmDuplicates.value.ui32 = ipmib.ipIfStatsReasmDuplicates;
14095 14142 ipkp->reasmPartDups.value.ui32 = ipmib.ipIfStatsReasmPartDups;
14096 14143 ipkp->forwProhibits.value.ui32 = ipmib.ipIfStatsForwProhibits;
14097 14144 ipkp->udpInCksumErrs.value.ui32 = ipmib.udpIfStatsInCksumErrs;
14098 14145 ipkp->udpInOverflows.value.ui32 = ipmib.udpIfStatsInOverflows;
14099 14146 ipkp->rawipInOverflows.value.ui32 = ipmib.rawipIfStatsInOverflows;
14100 14147 ipkp->ipsecInSucceeded.value.ui32 = ipmib.ipsecIfStatsInSucceeded;
14101 14148 ipkp->ipsecInFailed.value.i32 = ipmib.ipsecIfStatsInFailed;
14102 14149
14103 14150 ipkp->inIPv6.value.ui32 = ipmib.ipIfStatsInWrongIPVersion;
14104 14151 ipkp->outIPv6.value.ui32 = ipmib.ipIfStatsOutWrongIPVersion;
14105 14152 ipkp->outSwitchIPv6.value.ui32 = ipmib.ipIfStatsOutSwitchIPVersion;
14106 14153
14107 14154 netstack_rele(ns);
14108 14155
14109 14156 return (0);
14110 14157 }
14111 14158
14112 14159 static void *
14113 14160 icmp_kstat_init(netstackid_t stackid)
14114 14161 {
14115 14162 kstat_t *ksp;
14116 14163
14117 14164 icmp_named_kstat_t template = {
14118 14165 { "inMsgs", KSTAT_DATA_UINT32 },
14119 14166 { "inErrors", KSTAT_DATA_UINT32 },
14120 14167 { "inDestUnreachs", KSTAT_DATA_UINT32 },
14121 14168 { "inTimeExcds", KSTAT_DATA_UINT32 },
14122 14169 { "inParmProbs", KSTAT_DATA_UINT32 },
14123 14170 { "inSrcQuenchs", KSTAT_DATA_UINT32 },
14124 14171 { "inRedirects", KSTAT_DATA_UINT32 },
14125 14172 { "inEchos", KSTAT_DATA_UINT32 },
14126 14173 { "inEchoReps", KSTAT_DATA_UINT32 },
14127 14174 { "inTimestamps", KSTAT_DATA_UINT32 },
14128 14175 { "inTimestampReps", KSTAT_DATA_UINT32 },
14129 14176 { "inAddrMasks", KSTAT_DATA_UINT32 },
14130 14177 { "inAddrMaskReps", KSTAT_DATA_UINT32 },
14131 14178 { "outMsgs", KSTAT_DATA_UINT32 },
14132 14179 { "outErrors", KSTAT_DATA_UINT32 },
14133 14180 { "outDestUnreachs", KSTAT_DATA_UINT32 },
14134 14181 { "outTimeExcds", KSTAT_DATA_UINT32 },
14135 14182 { "outParmProbs", KSTAT_DATA_UINT32 },
14136 14183 { "outSrcQuenchs", KSTAT_DATA_UINT32 },
14137 14184 { "outRedirects", KSTAT_DATA_UINT32 },
14138 14185 { "outEchos", KSTAT_DATA_UINT32 },
14139 14186 { "outEchoReps", KSTAT_DATA_UINT32 },
14140 14187 { "outTimestamps", KSTAT_DATA_UINT32 },
14141 14188 { "outTimestampReps", KSTAT_DATA_UINT32 },
14142 14189 { "outAddrMasks", KSTAT_DATA_UINT32 },
14143 14190 { "outAddrMaskReps", KSTAT_DATA_UINT32 },
14144 14191 { "inChksumErrs", KSTAT_DATA_UINT32 },
14145 14192 { "inUnknowns", KSTAT_DATA_UINT32 },
14146 14193 { "inFragNeeded", KSTAT_DATA_UINT32 },
14147 14194 { "outFragNeeded", KSTAT_DATA_UINT32 },
14148 14195 { "outDrops", KSTAT_DATA_UINT32 },
14149 14196 { "inOverFlows", KSTAT_DATA_UINT32 },
14150 14197 { "inBadRedirects", KSTAT_DATA_UINT32 },
14151 14198 };
14152 14199
14153 14200 ksp = kstat_create_netstack("ip", 0, "icmp", "mib2", KSTAT_TYPE_NAMED,
14154 14201 NUM_OF_FIELDS(icmp_named_kstat_t), 0, stackid);
14155 14202 if (ksp == NULL || ksp->ks_data == NULL)
14156 14203 return (NULL);
14157 14204
14158 14205 bcopy(&template, ksp->ks_data, sizeof (template));
14159 14206
14160 14207 ksp->ks_update = icmp_kstat_update;
14161 14208 ksp->ks_private = (void *)(uintptr_t)stackid;
14162 14209
14163 14210 kstat_install(ksp);
14164 14211 return (ksp);
14165 14212 }
14166 14213
14167 14214 static void
14168 14215 icmp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
14169 14216 {
14170 14217 if (ksp != NULL) {
14171 14218 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
14172 14219 kstat_delete_netstack(ksp, stackid);
14173 14220 }
14174 14221 }
14175 14222
14176 14223 static int
14177 14224 icmp_kstat_update(kstat_t *kp, int rw)
14178 14225 {
14179 14226 icmp_named_kstat_t *icmpkp;
14180 14227 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private;
14181 14228 netstack_t *ns;
14182 14229 ip_stack_t *ipst;
14183 14230
14184 14231 if ((kp == NULL) || (kp->ks_data == NULL))
14185 14232 return (EIO);
14186 14233
14187 14234 if (rw == KSTAT_WRITE)
14188 14235 return (EACCES);
14189 14236
14190 14237 ns = netstack_find_by_stackid(stackid);
14191 14238 if (ns == NULL)
14192 14239 return (-1);
14193 14240 ipst = ns->netstack_ip;
14194 14241 if (ipst == NULL) {
14195 14242 netstack_rele(ns);
14196 14243 return (-1);
14197 14244 }
14198 14245 icmpkp = (icmp_named_kstat_t *)kp->ks_data;
14199 14246
14200 14247 icmpkp->inMsgs.value.ui32 = ipst->ips_icmp_mib.icmpInMsgs;
14201 14248 icmpkp->inErrors.value.ui32 = ipst->ips_icmp_mib.icmpInErrors;
14202 14249 icmpkp->inDestUnreachs.value.ui32 =
14203 14250 ipst->ips_icmp_mib.icmpInDestUnreachs;
14204 14251 icmpkp->inTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpInTimeExcds;
14205 14252 icmpkp->inParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpInParmProbs;
14206 14253 icmpkp->inSrcQuenchs.value.ui32 = ipst->ips_icmp_mib.icmpInSrcQuenchs;
14207 14254 icmpkp->inRedirects.value.ui32 = ipst->ips_icmp_mib.icmpInRedirects;
14208 14255 icmpkp->inEchos.value.ui32 = ipst->ips_icmp_mib.icmpInEchos;
14209 14256 icmpkp->inEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpInEchoReps;
14210 14257 icmpkp->inTimestamps.value.ui32 = ipst->ips_icmp_mib.icmpInTimestamps;
14211 14258 icmpkp->inTimestampReps.value.ui32 =
14212 14259 ipst->ips_icmp_mib.icmpInTimestampReps;
14213 14260 icmpkp->inAddrMasks.value.ui32 = ipst->ips_icmp_mib.icmpInAddrMasks;
14214 14261 icmpkp->inAddrMaskReps.value.ui32 =
14215 14262 ipst->ips_icmp_mib.icmpInAddrMaskReps;
14216 14263 icmpkp->outMsgs.value.ui32 = ipst->ips_icmp_mib.icmpOutMsgs;
14217 14264 icmpkp->outErrors.value.ui32 = ipst->ips_icmp_mib.icmpOutErrors;
14218 14265 icmpkp->outDestUnreachs.value.ui32 =
14219 14266 ipst->ips_icmp_mib.icmpOutDestUnreachs;
14220 14267 icmpkp->outTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpOutTimeExcds;
14221 14268 icmpkp->outParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpOutParmProbs;
14222 14269 icmpkp->outSrcQuenchs.value.ui32 =
14223 14270 ipst->ips_icmp_mib.icmpOutSrcQuenchs;
14224 14271 icmpkp->outRedirects.value.ui32 = ipst->ips_icmp_mib.icmpOutRedirects;
14225 14272 icmpkp->outEchos.value.ui32 = ipst->ips_icmp_mib.icmpOutEchos;
14226 14273 icmpkp->outEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpOutEchoReps;
14227 14274 icmpkp->outTimestamps.value.ui32 =
14228 14275 ipst->ips_icmp_mib.icmpOutTimestamps;
14229 14276 icmpkp->outTimestampReps.value.ui32 =
14230 14277 ipst->ips_icmp_mib.icmpOutTimestampReps;
14231 14278 icmpkp->outAddrMasks.value.ui32 =
14232 14279 ipst->ips_icmp_mib.icmpOutAddrMasks;
14233 14280 icmpkp->outAddrMaskReps.value.ui32 =
14234 14281 ipst->ips_icmp_mib.icmpOutAddrMaskReps;
14235 14282 icmpkp->inCksumErrs.value.ui32 = ipst->ips_icmp_mib.icmpInCksumErrs;
14236 14283 icmpkp->inUnknowns.value.ui32 = ipst->ips_icmp_mib.icmpInUnknowns;
14237 14284 icmpkp->inFragNeeded.value.ui32 = ipst->ips_icmp_mib.icmpInFragNeeded;
14238 14285 icmpkp->outFragNeeded.value.ui32 =
14239 14286 ipst->ips_icmp_mib.icmpOutFragNeeded;
14240 14287 icmpkp->outDrops.value.ui32 = ipst->ips_icmp_mib.icmpOutDrops;
14241 14288 icmpkp->inOverflows.value.ui32 = ipst->ips_icmp_mib.icmpInOverflows;
14242 14289 icmpkp->inBadRedirects.value.ui32 =
14243 14290 ipst->ips_icmp_mib.icmpInBadRedirects;
14244 14291
14245 14292 netstack_rele(ns);
14246 14293 return (0);
14247 14294 }
14248 14295
14249 14296 /*
14250 14297 * This is the fanout function for raw socket opened for SCTP. Note
14251 14298 * that it is called after SCTP checks that there is no socket which
14252 14299 * wants a packet. Then before SCTP handles this out of the blue packet,
14253 14300 * this function is called to see if there is any raw socket for SCTP.
14254 14301 * If there is and it is bound to the correct address, the packet will
14255 14302 * be sent to that socket. Note that only one raw socket can be bound to
14256 14303 * a port. This is assured in ipcl_sctp_hash_insert();
14257 14304 */
14258 14305 void
14259 14306 ip_fanout_sctp_raw(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h, uint32_t ports,
14260 14307 ip_recv_attr_t *ira)
14261 14308 {
14262 14309 conn_t *connp;
14263 14310 queue_t *rq;
14264 14311 boolean_t secure;
14265 14312 ill_t *ill = ira->ira_ill;
14266 14313 ip_stack_t *ipst = ill->ill_ipst;
14267 14314 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
14268 14315 sctp_stack_t *sctps = ipst->ips_netstack->netstack_sctp;
14269 14316 iaflags_t iraflags = ira->ira_flags;
14270 14317 ill_t *rill = ira->ira_rill;
14271 14318
14272 14319 secure = iraflags & IRAF_IPSEC_SECURE;
14273 14320
14274 14321 connp = ipcl_classify_raw(mp, IPPROTO_SCTP, ports, ipha, ip6h,
14275 14322 ira, ipst);
14276 14323 if (connp == NULL) {
14277 14324 /*
14278 14325 * Although raw sctp is not summed, OOB chunks must be.
14279 14326 * Drop the packet here if the sctp checksum failed.
14280 14327 */
14281 14328 if (iraflags & IRAF_SCTP_CSUM_ERR) {
14282 14329 SCTPS_BUMP_MIB(sctps, sctpChecksumError);
14283 14330 freemsg(mp);
14284 14331 return;
14285 14332 }
14286 14333 ira->ira_ill = ira->ira_rill = NULL;
14287 14334 sctp_ootb_input(mp, ira, ipst);
14288 14335 ira->ira_ill = ill;
14289 14336 ira->ira_rill = rill;
14290 14337 return;
14291 14338 }
14292 14339 rq = connp->conn_rq;
14293 14340 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) {
14294 14341 CONN_DEC_REF(connp);
14295 14342 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows);
14296 14343 freemsg(mp);
14297 14344 return;
14298 14345 }
14299 14346 if (((iraflags & IRAF_IS_IPV4) ?
14300 14347 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
14301 14348 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
14302 14349 secure) {
14303 14350 mp = ipsec_check_inbound_policy(mp, connp, ipha,
14304 14351 ip6h, ira);
14305 14352 if (mp == NULL) {
14306 14353 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
14307 14354 /* Note that mp is NULL */
14308 14355 ip_drop_input("ipIfStatsInDiscards", mp, ill);
14309 14356 CONN_DEC_REF(connp);
14310 14357 return;
14311 14358 }
14312 14359 }
14313 14360
14314 14361 if (iraflags & IRAF_ICMP_ERROR) {
14315 14362 (connp->conn_recvicmp)(connp, mp, NULL, ira);
14316 14363 } else {
14317 14364 ill_t *rill = ira->ira_rill;
14318 14365
14319 14366 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
14320 14367 /* This is the SOCK_RAW, IPPROTO_SCTP case. */
14321 14368 ira->ira_ill = ira->ira_rill = NULL;
14322 14369 (connp->conn_recv)(connp, mp, NULL, ira);
14323 14370 ira->ira_ill = ill;
14324 14371 ira->ira_rill = rill;
14325 14372 }
14326 14373 CONN_DEC_REF(connp);
14327 14374 }
14328 14375
14329 14376 /*
14330 14377 * Free a packet that has the link-layer dl_unitdata_req_t or fast-path
14331 14378 * header before the ip payload.
14332 14379 */
14333 14380 static void
14334 14381 ip_xmit_flowctl_drop(ill_t *ill, mblk_t *mp, boolean_t is_fp_mp, int fp_mp_len)
14335 14382 {
14336 14383 int len = (mp->b_wptr - mp->b_rptr);
14337 14384 mblk_t *ip_mp;
14338 14385
14339 14386 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14340 14387 if (is_fp_mp || len != fp_mp_len) {
14341 14388 if (len > fp_mp_len) {
14342 14389 /*
14343 14390 * fastpath header and ip header in the first mblk
14344 14391 */
14345 14392 mp->b_rptr += fp_mp_len;
14346 14393 } else {
14347 14394 /*
14348 14395 * ip_xmit_attach_llhdr had to prepend an mblk to
14349 14396 * attach the fastpath header before ip header.
14350 14397 */
14351 14398 ip_mp = mp->b_cont;
14352 14399 freeb(mp);
14353 14400 mp = ip_mp;
14354 14401 mp->b_rptr += (fp_mp_len - len);
14355 14402 }
14356 14403 } else {
14357 14404 ip_mp = mp->b_cont;
14358 14405 freeb(mp);
14359 14406 mp = ip_mp;
14360 14407 }
14361 14408 ip_drop_output("ipIfStatsOutDiscards - flow ctl", mp, ill);
14362 14409 freemsg(mp);
14363 14410 }
14364 14411
14365 14412 /*
14366 14413 * Normal post fragmentation function.
14367 14414 *
14368 14415 * Send a packet using the passed in nce. This handles both IPv4 and IPv6
14369 14416 * using the same state machine.
14370 14417 *
14371 14418 * We return an error on failure. In particular we return EWOULDBLOCK
14372 14419 * when the driver flow controls. In that case this ensures that ip_wsrv runs
14373 14420 * (currently by canputnext failure resulting in backenabling from GLD.)
14374 14421 * This allows the callers of conn_ip_output() to use EWOULDBLOCK as an
14375 14422 * indication that they can flow control until ip_wsrv() tells then to restart.
14376 14423 *
14377 14424 * If the nce passed by caller is incomplete, this function
14378 14425 * queues the packet and if necessary, sends ARP request and bails.
14379 14426 * If the Neighbor Cache passed is fully resolved, we simply prepend
14380 14427 * the link-layer header to the packet, do ipsec hw acceleration
14381 14428 * work if necessary, and send the packet out on the wire.
14382 14429 */
14383 14430 /* ARGSUSED6 */
14384 14431 int
14385 14432 ip_xmit(mblk_t *mp, nce_t *nce, iaflags_t ixaflags, uint_t pkt_len,
14386 14433 uint32_t xmit_hint, zoneid_t szone, zoneid_t nolzid, uintptr_t *ixacookie)
14387 14434 {
14388 14435 queue_t *wq;
14389 14436 ill_t *ill = nce->nce_ill;
14390 14437 ip_stack_t *ipst = ill->ill_ipst;
14391 14438 uint64_t delta;
14392 14439 boolean_t isv6 = ill->ill_isv6;
14393 14440 boolean_t fp_mp;
14394 14441 ncec_t *ncec = nce->nce_common;
14395 14442 int64_t now = LBOLT_FASTPATH64;
14396 14443 boolean_t is_probe;
14397 14444
14398 14445 DTRACE_PROBE1(ip__xmit, nce_t *, nce);
14399 14446
14400 14447 ASSERT(mp != NULL);
14401 14448 ASSERT(mp->b_datap->db_type == M_DATA);
14402 14449 ASSERT(pkt_len == msgdsize(mp));
14403 14450
14404 14451 /*
14405 14452 * If we have already been here and are coming back after ARP/ND.
14406 14453 * the IXAF_NO_TRACE flag is set. We skip FW_HOOKS, DTRACE and ipobs
14407 14454 * in that case since they have seen the packet when it came here
14408 14455 * the first time.
14409 14456 */
14410 14457 if (ixaflags & IXAF_NO_TRACE)
14411 14458 goto sendit;
14412 14459
14413 14460 if (ixaflags & IXAF_IS_IPV4) {
14414 14461 ipha_t *ipha = (ipha_t *)mp->b_rptr;
14415 14462
14416 14463 ASSERT(!isv6);
14417 14464 ASSERT(pkt_len == ntohs(((ipha_t *)mp->b_rptr)->ipha_length));
14418 14465 if (HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) &&
14419 14466 !(ixaflags & IXAF_NO_PFHOOK)) {
14420 14467 int error;
14421 14468
14422 14469 FW_HOOKS(ipst->ips_ip4_physical_out_event,
14423 14470 ipst->ips_ipv4firewall_physical_out,
14424 14471 NULL, ill, ipha, mp, mp, 0, ipst, error);
14425 14472 DTRACE_PROBE1(ip4__physical__out__end,
14426 14473 mblk_t *, mp);
14427 14474 if (mp == NULL)
14428 14475 return (error);
14429 14476
14430 14477 /* The length could have changed */
14431 14478 pkt_len = msgdsize(mp);
14432 14479 }
14433 14480 if (ipst->ips_ip4_observe.he_interested) {
14434 14481 /*
14435 14482 * Note that for TX the zoneid is the sending
14436 14483 * zone, whether or not MLP is in play.
14437 14484 * Since the szone argument is the IP zoneid (i.e.,
14438 14485 * zero for exclusive-IP zones) and ipobs wants
14439 14486 * the system zoneid, we map it here.
14440 14487 */
14441 14488 szone = IP_REAL_ZONEID(szone, ipst);
14442 14489
14443 14490 /*
14444 14491 * On the outbound path the destination zone will be
14445 14492 * unknown as we're sending this packet out on the
14446 14493 * wire.
14447 14494 */
14448 14495 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES,
14449 14496 ill, ipst);
14450 14497 }
14451 14498 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL,
14452 14499 void_ip_t *, ipha, __dtrace_ipsr_ill_t *, ill,
14453 14500 ipha_t *, ipha, ip6_t *, NULL, int, 0);
14454 14501 } else {
14455 14502 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
14456 14503
14457 14504 ASSERT(isv6);
14458 14505 ASSERT(pkt_len ==
14459 14506 ntohs(((ip6_t *)mp->b_rptr)->ip6_plen) + IPV6_HDR_LEN);
14460 14507 if (HOOKS6_INTERESTED_PHYSICAL_OUT(ipst) &&
14461 14508 !(ixaflags & IXAF_NO_PFHOOK)) {
14462 14509 int error;
14463 14510
14464 14511 FW_HOOKS6(ipst->ips_ip6_physical_out_event,
14465 14512 ipst->ips_ipv6firewall_physical_out,
14466 14513 NULL, ill, ip6h, mp, mp, 0, ipst, error);
14467 14514 DTRACE_PROBE1(ip6__physical__out__end,
14468 14515 mblk_t *, mp);
14469 14516 if (mp == NULL)
14470 14517 return (error);
14471 14518
14472 14519 /* The length could have changed */
14473 14520 pkt_len = msgdsize(mp);
14474 14521 }
14475 14522 if (ipst->ips_ip6_observe.he_interested) {
14476 14523 /* See above */
14477 14524 szone = IP_REAL_ZONEID(szone, ipst);
14478 14525
14479 14526 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES,
14480 14527 ill, ipst);
14481 14528 }
14482 14529 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL,
14483 14530 void_ip_t *, ip6h, __dtrace_ipsr_ill_t *, ill,
14484 14531 ipha_t *, NULL, ip6_t *, ip6h, int, 0);
14485 14532 }
14486 14533
14487 14534 sendit:
14488 14535 /*
14489 14536 * We check the state without a lock because the state can never
14490 14537 * move "backwards" to initial or incomplete.
14491 14538 */
14492 14539 switch (ncec->ncec_state) {
14493 14540 case ND_REACHABLE:
14494 14541 case ND_STALE:
14495 14542 case ND_DELAY:
14496 14543 case ND_PROBE:
14497 14544 mp = ip_xmit_attach_llhdr(mp, nce);
14498 14545 if (mp == NULL) {
14499 14546 /*
14500 14547 * ip_xmit_attach_llhdr has increased
14501 14548 * ipIfStatsOutDiscards and called ip_drop_output()
14502 14549 */
14503 14550 return (ENOBUFS);
14504 14551 }
14505 14552 /*
14506 14553 * check if nce_fastpath completed and we tagged on a
14507 14554 * copy of nce_fp_mp in ip_xmit_attach_llhdr().
14508 14555 */
14509 14556 fp_mp = (mp->b_datap->db_type == M_DATA);
14510 14557
14511 14558 if (fp_mp &&
14512 14559 (ill->ill_capabilities & ILL_CAPAB_DLD_DIRECT)) {
14513 14560 ill_dld_direct_t *idd;
14514 14561
14515 14562 idd = &ill->ill_dld_capab->idc_direct;
14516 14563 /*
14517 14564 * Send the packet directly to DLD, where it
14518 14565 * may be queued depending on the availability
14519 14566 * of transmit resources at the media layer.
14520 14567 * Return value should be taken into
14521 14568 * account and flow control the TCP.
14522 14569 */
14523 14570 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits);
14524 14571 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets,
14525 14572 pkt_len);
14526 14573
14527 14574 if (ixaflags & IXAF_NO_DEV_FLOW_CTL) {
14528 14575 (void) idd->idd_tx_df(idd->idd_tx_dh, mp,
14529 14576 (uintptr_t)xmit_hint, IP_DROP_ON_NO_DESC);
14530 14577 } else {
14531 14578 uintptr_t cookie;
14532 14579
14533 14580 if ((cookie = idd->idd_tx_df(idd->idd_tx_dh,
14534 14581 mp, (uintptr_t)xmit_hint, 0)) != 0) {
14535 14582 if (ixacookie != NULL)
14536 14583 *ixacookie = cookie;
14537 14584 return (EWOULDBLOCK);
14538 14585 }
14539 14586 }
14540 14587 } else {
14541 14588 wq = ill->ill_wq;
14542 14589
14543 14590 if (!(ixaflags & IXAF_NO_DEV_FLOW_CTL) &&
14544 14591 !canputnext(wq)) {
14545 14592 if (ixacookie != NULL)
14546 14593 *ixacookie = 0;
14547 14594 ip_xmit_flowctl_drop(ill, mp, fp_mp,
14548 14595 nce->nce_fp_mp != NULL ?
14549 14596 MBLKL(nce->nce_fp_mp) : 0);
14550 14597 return (EWOULDBLOCK);
14551 14598 }
14552 14599 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits);
14553 14600 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets,
14554 14601 pkt_len);
14555 14602 putnext(wq, mp);
14556 14603 }
14557 14604
14558 14605 /*
14559 14606 * The rest of this function implements Neighbor Unreachability
14560 14607 * detection. Determine if the ncec is eligible for NUD.
14561 14608 */
14562 14609 if (ncec->ncec_flags & NCE_F_NONUD)
14563 14610 return (0);
14564 14611
14565 14612 ASSERT(ncec->ncec_state != ND_INCOMPLETE);
14566 14613
14567 14614 /*
14568 14615 * Check for upper layer advice
14569 14616 */
14570 14617 if (ixaflags & IXAF_REACH_CONF) {
14571 14618 timeout_id_t tid;
14572 14619
14573 14620 /*
14574 14621 * It should be o.k. to check the state without
14575 14622 * a lock here, at most we lose an advice.
14576 14623 */
14577 14624 ncec->ncec_last = TICK_TO_MSEC(now);
14578 14625 if (ncec->ncec_state != ND_REACHABLE) {
14579 14626 mutex_enter(&ncec->ncec_lock);
14580 14627 ncec->ncec_state = ND_REACHABLE;
14581 14628 tid = ncec->ncec_timeout_id;
14582 14629 ncec->ncec_timeout_id = 0;
14583 14630 mutex_exit(&ncec->ncec_lock);
14584 14631 (void) untimeout(tid);
14585 14632 if (ip_debug > 2) {
14586 14633 /* ip1dbg */
14587 14634 pr_addr_dbg("ip_xmit: state"
14588 14635 " for %s changed to"
14589 14636 " REACHABLE\n", AF_INET6,
14590 14637 &ncec->ncec_addr);
14591 14638 }
14592 14639 }
14593 14640 return (0);
14594 14641 }
14595 14642
14596 14643 delta = TICK_TO_MSEC(now) - ncec->ncec_last;
14597 14644 ip1dbg(("ip_xmit: delta = %" PRId64
14598 14645 " ill_reachable_time = %d \n", delta,
14599 14646 ill->ill_reachable_time));
14600 14647 if (delta > (uint64_t)ill->ill_reachable_time) {
14601 14648 mutex_enter(&ncec->ncec_lock);
14602 14649 switch (ncec->ncec_state) {
14603 14650 case ND_REACHABLE:
14604 14651 ASSERT((ncec->ncec_flags & NCE_F_NONUD) == 0);
14605 14652 /* FALLTHROUGH */
14606 14653 case ND_STALE:
14607 14654 /*
14608 14655 * ND_REACHABLE is identical to
14609 14656 * ND_STALE in this specific case. If
14610 14657 * reachable time has expired for this
14611 14658 * neighbor (delta is greater than
14612 14659 * reachable time), conceptually, the
14613 14660 * neighbor cache is no longer in
14614 14661 * REACHABLE state, but already in
14615 14662 * STALE state. So the correct
14616 14663 * transition here is to ND_DELAY.
14617 14664 */
14618 14665 ncec->ncec_state = ND_DELAY;
14619 14666 mutex_exit(&ncec->ncec_lock);
14620 14667 nce_restart_timer(ncec,
14621 14668 ipst->ips_delay_first_probe_time);
14622 14669 if (ip_debug > 3) {
14623 14670 /* ip2dbg */
14624 14671 pr_addr_dbg("ip_xmit: state"
14625 14672 " for %s changed to"
14626 14673 " DELAY\n", AF_INET6,
14627 14674 &ncec->ncec_addr);
14628 14675 }
14629 14676 break;
14630 14677 case ND_DELAY:
14631 14678 case ND_PROBE:
14632 14679 mutex_exit(&ncec->ncec_lock);
14633 14680 /* Timers have already started */
14634 14681 break;
14635 14682 case ND_UNREACHABLE:
14636 14683 /*
14637 14684 * nce_timer has detected that this ncec
14638 14685 * is unreachable and initiated deleting
14639 14686 * this ncec.
14640 14687 * This is a harmless race where we found the
14641 14688 * ncec before it was deleted and have
14642 14689 * just sent out a packet using this
14643 14690 * unreachable ncec.
14644 14691 */
14645 14692 mutex_exit(&ncec->ncec_lock);
14646 14693 break;
14647 14694 default:
14648 14695 ASSERT(0);
14649 14696 mutex_exit(&ncec->ncec_lock);
14650 14697 }
14651 14698 }
14652 14699 return (0);
14653 14700
14654 14701 case ND_INCOMPLETE:
14655 14702 /*
14656 14703 * the state could have changed since we didn't hold the lock.
14657 14704 * Re-verify state under lock.
14658 14705 */
14659 14706 is_probe = ipmp_packet_is_probe(mp, nce->nce_ill);
14660 14707 mutex_enter(&ncec->ncec_lock);
14661 14708 if (NCE_ISREACHABLE(ncec)) {
14662 14709 mutex_exit(&ncec->ncec_lock);
14663 14710 goto sendit;
14664 14711 }
14665 14712 /* queue the packet */
14666 14713 nce_queue_mp(ncec, mp, is_probe);
14667 14714 mutex_exit(&ncec->ncec_lock);
14668 14715 DTRACE_PROBE2(ip__xmit__incomplete,
14669 14716 (ncec_t *), ncec, (mblk_t *), mp);
14670 14717 return (0);
14671 14718
14672 14719 case ND_INITIAL:
14673 14720 /*
14674 14721 * State could have changed since we didn't hold the lock, so
14675 14722 * re-verify state.
14676 14723 */
14677 14724 is_probe = ipmp_packet_is_probe(mp, nce->nce_ill);
14678 14725 mutex_enter(&ncec->ncec_lock);
14679 14726 if (NCE_ISREACHABLE(ncec)) {
14680 14727 mutex_exit(&ncec->ncec_lock);
14681 14728 goto sendit;
14682 14729 }
14683 14730 nce_queue_mp(ncec, mp, is_probe);
14684 14731 if (ncec->ncec_state == ND_INITIAL) {
14685 14732 ncec->ncec_state = ND_INCOMPLETE;
14686 14733 mutex_exit(&ncec->ncec_lock);
14687 14734 /*
14688 14735 * figure out the source we want to use
14689 14736 * and resolve it.
14690 14737 */
14691 14738 ip_ndp_resolve(ncec);
14692 14739 } else {
14693 14740 mutex_exit(&ncec->ncec_lock);
14694 14741 }
14695 14742 return (0);
14696 14743
14697 14744 case ND_UNREACHABLE:
14698 14745 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14699 14746 ip_drop_output("ipIfStatsOutDiscards - ND_UNREACHABLE",
14700 14747 mp, ill);
14701 14748 freemsg(mp);
14702 14749 return (0);
14703 14750
14704 14751 default:
14705 14752 ASSERT(0);
14706 14753 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14707 14754 ip_drop_output("ipIfStatsOutDiscards - ND_other",
14708 14755 mp, ill);
14709 14756 freemsg(mp);
14710 14757 return (ENETUNREACH);
14711 14758 }
14712 14759 }
14713 14760
14714 14761 /*
14715 14762 * Return B_TRUE if the buffers differ in length or content.
14716 14763 * This is used for comparing extension header buffers.
14717 14764 * Note that an extension header would be declared different
14718 14765 * even if all that changed was the next header value in that header i.e.
14719 14766 * what really changed is the next extension header.
14720 14767 */
14721 14768 boolean_t
14722 14769 ip_cmpbuf(const void *abuf, uint_t alen, boolean_t b_valid, const void *bbuf,
14723 14770 uint_t blen)
14724 14771 {
14725 14772 if (!b_valid)
14726 14773 blen = 0;
14727 14774
14728 14775 if (alen != blen)
14729 14776 return (B_TRUE);
14730 14777 if (alen == 0)
14731 14778 return (B_FALSE); /* Both zero length */
14732 14779 return (bcmp(abuf, bbuf, alen));
14733 14780 }
14734 14781
14735 14782 /*
14736 14783 * Preallocate memory for ip_savebuf(). Returns B_TRUE if ok.
14737 14784 * Return B_FALSE if memory allocation fails - don't change any state!
14738 14785 */
14739 14786 boolean_t
14740 14787 ip_allocbuf(void **dstp, uint_t *dstlenp, boolean_t src_valid,
14741 14788 const void *src, uint_t srclen)
14742 14789 {
14743 14790 void *dst;
14744 14791
14745 14792 if (!src_valid)
14746 14793 srclen = 0;
14747 14794
14748 14795 ASSERT(*dstlenp == 0);
14749 14796 if (src != NULL && srclen != 0) {
14750 14797 dst = mi_alloc(srclen, BPRI_MED);
14751 14798 if (dst == NULL)
14752 14799 return (B_FALSE);
14753 14800 } else {
14754 14801 dst = NULL;
14755 14802 }
14756 14803 if (*dstp != NULL)
14757 14804 mi_free(*dstp);
14758 14805 *dstp = dst;
14759 14806 *dstlenp = dst == NULL ? 0 : srclen;
14760 14807 return (B_TRUE);
14761 14808 }
14762 14809
14763 14810 /*
14764 14811 * Replace what is in *dst, *dstlen with the source.
14765 14812 * Assumes ip_allocbuf has already been called.
14766 14813 */
14767 14814 void
14768 14815 ip_savebuf(void **dstp, uint_t *dstlenp, boolean_t src_valid,
14769 14816 const void *src, uint_t srclen)
14770 14817 {
14771 14818 if (!src_valid)
14772 14819 srclen = 0;
14773 14820
14774 14821 ASSERT(*dstlenp == srclen);
14775 14822 if (src != NULL && srclen != 0)
14776 14823 bcopy(src, *dstp, srclen);
14777 14824 }
14778 14825
14779 14826 /*
14780 14827 * Free the storage pointed to by the members of an ip_pkt_t.
14781 14828 */
14782 14829 void
14783 14830 ip_pkt_free(ip_pkt_t *ipp)
14784 14831 {
14785 14832 uint_t fields = ipp->ipp_fields;
14786 14833
14787 14834 if (fields & IPPF_HOPOPTS) {
14788 14835 kmem_free(ipp->ipp_hopopts, ipp->ipp_hopoptslen);
14789 14836 ipp->ipp_hopopts = NULL;
14790 14837 ipp->ipp_hopoptslen = 0;
14791 14838 }
14792 14839 if (fields & IPPF_RTHDRDSTOPTS) {
14793 14840 kmem_free(ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen);
14794 14841 ipp->ipp_rthdrdstopts = NULL;
14795 14842 ipp->ipp_rthdrdstoptslen = 0;
14796 14843 }
14797 14844 if (fields & IPPF_DSTOPTS) {
14798 14845 kmem_free(ipp->ipp_dstopts, ipp->ipp_dstoptslen);
14799 14846 ipp->ipp_dstopts = NULL;
14800 14847 ipp->ipp_dstoptslen = 0;
14801 14848 }
14802 14849 if (fields & IPPF_RTHDR) {
14803 14850 kmem_free(ipp->ipp_rthdr, ipp->ipp_rthdrlen);
14804 14851 ipp->ipp_rthdr = NULL;
14805 14852 ipp->ipp_rthdrlen = 0;
14806 14853 }
14807 14854 if (fields & IPPF_IPV4_OPTIONS) {
14808 14855 kmem_free(ipp->ipp_ipv4_options, ipp->ipp_ipv4_options_len);
14809 14856 ipp->ipp_ipv4_options = NULL;
14810 14857 ipp->ipp_ipv4_options_len = 0;
14811 14858 }
14812 14859 if (fields & IPPF_LABEL_V4) {
14813 14860 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
14814 14861 ipp->ipp_label_v4 = NULL;
14815 14862 ipp->ipp_label_len_v4 = 0;
14816 14863 }
14817 14864 if (fields & IPPF_LABEL_V6) {
14818 14865 kmem_free(ipp->ipp_label_v6, ipp->ipp_label_len_v6);
14819 14866 ipp->ipp_label_v6 = NULL;
14820 14867 ipp->ipp_label_len_v6 = 0;
14821 14868 }
14822 14869 ipp->ipp_fields &= ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14823 14870 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6);
14824 14871 }
14825 14872
14826 14873 /*
14827 14874 * Copy from src to dst and allocate as needed.
14828 14875 * Returns zero or ENOMEM.
14829 14876 *
14830 14877 * The caller must initialize dst to zero.
14831 14878 */
14832 14879 int
14833 14880 ip_pkt_copy(ip_pkt_t *src, ip_pkt_t *dst, int kmflag)
14834 14881 {
14835 14882 uint_t fields = src->ipp_fields;
14836 14883
14837 14884 /* Start with fields that don't require memory allocation */
14838 14885 dst->ipp_fields = fields &
14839 14886 ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14840 14887 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6);
14841 14888
14842 14889 dst->ipp_addr = src->ipp_addr;
14843 14890 dst->ipp_unicast_hops = src->ipp_unicast_hops;
14844 14891 dst->ipp_hoplimit = src->ipp_hoplimit;
14845 14892 dst->ipp_tclass = src->ipp_tclass;
14846 14893 dst->ipp_type_of_service = src->ipp_type_of_service;
14847 14894
14848 14895 if (!(fields & (IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14849 14896 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6)))
14850 14897 return (0);
14851 14898
14852 14899 if (fields & IPPF_HOPOPTS) {
14853 14900 dst->ipp_hopopts = kmem_alloc(src->ipp_hopoptslen, kmflag);
14854 14901 if (dst->ipp_hopopts == NULL) {
14855 14902 ip_pkt_free(dst);
14856 14903 return (ENOMEM);
14857 14904 }
14858 14905 dst->ipp_fields |= IPPF_HOPOPTS;
14859 14906 bcopy(src->ipp_hopopts, dst->ipp_hopopts,
14860 14907 src->ipp_hopoptslen);
14861 14908 dst->ipp_hopoptslen = src->ipp_hopoptslen;
14862 14909 }
14863 14910 if (fields & IPPF_RTHDRDSTOPTS) {
14864 14911 dst->ipp_rthdrdstopts = kmem_alloc(src->ipp_rthdrdstoptslen,
14865 14912 kmflag);
14866 14913 if (dst->ipp_rthdrdstopts == NULL) {
14867 14914 ip_pkt_free(dst);
14868 14915 return (ENOMEM);
14869 14916 }
14870 14917 dst->ipp_fields |= IPPF_RTHDRDSTOPTS;
14871 14918 bcopy(src->ipp_rthdrdstopts, dst->ipp_rthdrdstopts,
14872 14919 src->ipp_rthdrdstoptslen);
14873 14920 dst->ipp_rthdrdstoptslen = src->ipp_rthdrdstoptslen;
14874 14921 }
14875 14922 if (fields & IPPF_DSTOPTS) {
14876 14923 dst->ipp_dstopts = kmem_alloc(src->ipp_dstoptslen, kmflag);
14877 14924 if (dst->ipp_dstopts == NULL) {
14878 14925 ip_pkt_free(dst);
14879 14926 return (ENOMEM);
14880 14927 }
14881 14928 dst->ipp_fields |= IPPF_DSTOPTS;
14882 14929 bcopy(src->ipp_dstopts, dst->ipp_dstopts,
14883 14930 src->ipp_dstoptslen);
14884 14931 dst->ipp_dstoptslen = src->ipp_dstoptslen;
14885 14932 }
14886 14933 if (fields & IPPF_RTHDR) {
14887 14934 dst->ipp_rthdr = kmem_alloc(src->ipp_rthdrlen, kmflag);
14888 14935 if (dst->ipp_rthdr == NULL) {
14889 14936 ip_pkt_free(dst);
14890 14937 return (ENOMEM);
14891 14938 }
14892 14939 dst->ipp_fields |= IPPF_RTHDR;
14893 14940 bcopy(src->ipp_rthdr, dst->ipp_rthdr,
14894 14941 src->ipp_rthdrlen);
14895 14942 dst->ipp_rthdrlen = src->ipp_rthdrlen;
14896 14943 }
14897 14944 if (fields & IPPF_IPV4_OPTIONS) {
14898 14945 dst->ipp_ipv4_options = kmem_alloc(src->ipp_ipv4_options_len,
14899 14946 kmflag);
14900 14947 if (dst->ipp_ipv4_options == NULL) {
14901 14948 ip_pkt_free(dst);
14902 14949 return (ENOMEM);
14903 14950 }
14904 14951 dst->ipp_fields |= IPPF_IPV4_OPTIONS;
14905 14952 bcopy(src->ipp_ipv4_options, dst->ipp_ipv4_options,
14906 14953 src->ipp_ipv4_options_len);
14907 14954 dst->ipp_ipv4_options_len = src->ipp_ipv4_options_len;
14908 14955 }
14909 14956 if (fields & IPPF_LABEL_V4) {
14910 14957 dst->ipp_label_v4 = kmem_alloc(src->ipp_label_len_v4, kmflag);
14911 14958 if (dst->ipp_label_v4 == NULL) {
14912 14959 ip_pkt_free(dst);
14913 14960 return (ENOMEM);
14914 14961 }
14915 14962 dst->ipp_fields |= IPPF_LABEL_V4;
14916 14963 bcopy(src->ipp_label_v4, dst->ipp_label_v4,
14917 14964 src->ipp_label_len_v4);
14918 14965 dst->ipp_label_len_v4 = src->ipp_label_len_v4;
14919 14966 }
14920 14967 if (fields & IPPF_LABEL_V6) {
14921 14968 dst->ipp_label_v6 = kmem_alloc(src->ipp_label_len_v6, kmflag);
14922 14969 if (dst->ipp_label_v6 == NULL) {
14923 14970 ip_pkt_free(dst);
14924 14971 return (ENOMEM);
14925 14972 }
14926 14973 dst->ipp_fields |= IPPF_LABEL_V6;
14927 14974 bcopy(src->ipp_label_v6, dst->ipp_label_v6,
14928 14975 src->ipp_label_len_v6);
14929 14976 dst->ipp_label_len_v6 = src->ipp_label_len_v6;
14930 14977 }
14931 14978 if (fields & IPPF_FRAGHDR) {
14932 14979 dst->ipp_fraghdr = kmem_alloc(src->ipp_fraghdrlen, kmflag);
14933 14980 if (dst->ipp_fraghdr == NULL) {
14934 14981 ip_pkt_free(dst);
14935 14982 return (ENOMEM);
14936 14983 }
14937 14984 dst->ipp_fields |= IPPF_FRAGHDR;
14938 14985 bcopy(src->ipp_fraghdr, dst->ipp_fraghdr,
14939 14986 src->ipp_fraghdrlen);
14940 14987 dst->ipp_fraghdrlen = src->ipp_fraghdrlen;
14941 14988 }
14942 14989 return (0);
14943 14990 }
14944 14991
14945 14992 /*
14946 14993 * Returns INADDR_ANY if no source route
14947 14994 */
14948 14995 ipaddr_t
14949 14996 ip_pkt_source_route_v4(const ip_pkt_t *ipp)
14950 14997 {
14951 14998 ipaddr_t nexthop = INADDR_ANY;
14952 14999 ipoptp_t opts;
14953 15000 uchar_t *opt;
14954 15001 uint8_t optval;
14955 15002 uint8_t optlen;
14956 15003 uint32_t totallen;
14957 15004
14958 15005 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
14959 15006 return (INADDR_ANY);
14960 15007
14961 15008 totallen = ipp->ipp_ipv4_options_len;
14962 15009 if (totallen & 0x3)
14963 15010 return (INADDR_ANY);
14964 15011
14965 15012 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
14966 15013 optval != IPOPT_EOL;
14967 15014 optval = ipoptp_next(&opts)) {
14968 15015 opt = opts.ipoptp_cur;
14969 15016 switch (optval) {
14970 15017 uint8_t off;
14971 15018 case IPOPT_SSRR:
14972 15019 case IPOPT_LSRR:
14973 15020 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
14974 15021 break;
14975 15022 }
14976 15023 optlen = opts.ipoptp_len;
14977 15024 off = opt[IPOPT_OFFSET];
14978 15025 off--;
14979 15026 if (optlen < IP_ADDR_LEN ||
14980 15027 off > optlen - IP_ADDR_LEN) {
14981 15028 /* End of source route */
14982 15029 break;
14983 15030 }
14984 15031 bcopy((char *)opt + off, &nexthop, IP_ADDR_LEN);
14985 15032 if (nexthop == htonl(INADDR_LOOPBACK)) {
14986 15033 /* Ignore */
14987 15034 nexthop = INADDR_ANY;
14988 15035 break;
14989 15036 }
14990 15037 break;
14991 15038 }
14992 15039 }
14993 15040 return (nexthop);
14994 15041 }
14995 15042
14996 15043 /*
14997 15044 * Reverse a source route.
14998 15045 */
14999 15046 void
15000 15047 ip_pkt_source_route_reverse_v4(ip_pkt_t *ipp)
15001 15048 {
15002 15049 ipaddr_t tmp;
15003 15050 ipoptp_t opts;
15004 15051 uchar_t *opt;
15005 15052 uint8_t optval;
15006 15053 uint32_t totallen;
15007 15054
15008 15055 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
15009 15056 return;
15010 15057
15011 15058 totallen = ipp->ipp_ipv4_options_len;
15012 15059 if (totallen & 0x3)
15013 15060 return;
15014 15061
15015 15062 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
15016 15063 optval != IPOPT_EOL;
15017 15064 optval = ipoptp_next(&opts)) {
15018 15065 uint8_t off1, off2;
15019 15066
15020 15067 opt = opts.ipoptp_cur;
15021 15068 switch (optval) {
15022 15069 case IPOPT_SSRR:
15023 15070 case IPOPT_LSRR:
15024 15071 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
15025 15072 break;
15026 15073 }
15027 15074 off1 = IPOPT_MINOFF_SR - 1;
15028 15075 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1;
15029 15076 while (off2 > off1) {
15030 15077 bcopy(opt + off2, &tmp, IP_ADDR_LEN);
15031 15078 bcopy(opt + off1, opt + off2, IP_ADDR_LEN);
15032 15079 bcopy(&tmp, opt + off2, IP_ADDR_LEN);
15033 15080 off2 -= IP_ADDR_LEN;
15034 15081 off1 += IP_ADDR_LEN;
15035 15082 }
15036 15083 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR;
15037 15084 break;
15038 15085 }
15039 15086 }
15040 15087 }
15041 15088
15042 15089 /*
15043 15090 * Returns NULL if no routing header
15044 15091 */
15045 15092 in6_addr_t *
15046 15093 ip_pkt_source_route_v6(const ip_pkt_t *ipp)
15047 15094 {
15048 15095 in6_addr_t *nexthop = NULL;
15049 15096 ip6_rthdr0_t *rthdr;
15050 15097
15051 15098 if (!(ipp->ipp_fields & IPPF_RTHDR))
15052 15099 return (NULL);
15053 15100
15054 15101 rthdr = (ip6_rthdr0_t *)ipp->ipp_rthdr;
15055 15102 if (rthdr->ip6r0_segleft == 0)
15056 15103 return (NULL);
15057 15104
15058 15105 nexthop = (in6_addr_t *)((char *)rthdr + sizeof (*rthdr));
15059 15106 return (nexthop);
15060 15107 }
15061 15108
15062 15109 zoneid_t
15063 15110 ip_get_zoneid_v4(ipaddr_t addr, mblk_t *mp, ip_recv_attr_t *ira,
15064 15111 zoneid_t lookup_zoneid)
15065 15112 {
15066 15113 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
15067 15114 ire_t *ire;
15068 15115 int ire_flags = MATCH_IRE_TYPE;
15069 15116 zoneid_t zoneid = ALL_ZONES;
15070 15117
15071 15118 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE))
15072 15119 return (ALL_ZONES);
15073 15120
15074 15121 if (lookup_zoneid != ALL_ZONES)
15075 15122 ire_flags |= MATCH_IRE_ZONEONLY;
15076 15123 ire = ire_ftable_lookup_v4(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK,
15077 15124 NULL, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL);
15078 15125 if (ire != NULL) {
15079 15126 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst);
15080 15127 ire_refrele(ire);
15081 15128 }
15082 15129 return (zoneid);
15083 15130 }
15084 15131
15085 15132 zoneid_t
15086 15133 ip_get_zoneid_v6(in6_addr_t *addr, mblk_t *mp, const ill_t *ill,
15087 15134 ip_recv_attr_t *ira, zoneid_t lookup_zoneid)
15088 15135 {
15089 15136 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
15090 15137 ire_t *ire;
15091 15138 int ire_flags = MATCH_IRE_TYPE;
15092 15139 zoneid_t zoneid = ALL_ZONES;
15093 15140
15094 15141 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE))
15095 15142 return (ALL_ZONES);
15096 15143
15097 15144 if (IN6_IS_ADDR_LINKLOCAL(addr))
15098 15145 ire_flags |= MATCH_IRE_ILL;
15099 15146
15100 15147 if (lookup_zoneid != ALL_ZONES)
15101 15148 ire_flags |= MATCH_IRE_ZONEONLY;
15102 15149 ire = ire_ftable_lookup_v6(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK,
15103 15150 ill, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL);
15104 15151 if (ire != NULL) {
15105 15152 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst);
15106 15153 ire_refrele(ire);
15107 15154 }
15108 15155 return (zoneid);
15109 15156 }
15110 15157
15111 15158 /*
15112 15159 * IP obserability hook support functions.
15113 15160 */
15114 15161 static void
15115 15162 ipobs_init(ip_stack_t *ipst)
15116 15163 {
15117 15164 netid_t id;
15118 15165
15119 15166 id = net_getnetidbynetstackid(ipst->ips_netstack->netstack_stackid);
15120 15167
15121 15168 ipst->ips_ip4_observe_pr = net_protocol_lookup(id, NHF_INET);
15122 15169 VERIFY(ipst->ips_ip4_observe_pr != NULL);
15123 15170
15124 15171 ipst->ips_ip6_observe_pr = net_protocol_lookup(id, NHF_INET6);
15125 15172 VERIFY(ipst->ips_ip6_observe_pr != NULL);
15126 15173 }
15127 15174
15128 15175 static void
15129 15176 ipobs_fini(ip_stack_t *ipst)
15130 15177 {
15131 15178
15132 15179 VERIFY(net_protocol_release(ipst->ips_ip4_observe_pr) == 0);
15133 15180 VERIFY(net_protocol_release(ipst->ips_ip6_observe_pr) == 0);
15134 15181 }
15135 15182
15136 15183 /*
15137 15184 * hook_pkt_observe_t is composed in network byte order so that the
15138 15185 * entire mblk_t chain handed into hook_run can be used as-is.
15139 15186 * The caveat is that use of the fields, such as the zone fields,
15140 15187 * requires conversion into host byte order first.
15141 15188 */
15142 15189 void
15143 15190 ipobs_hook(mblk_t *mp, int htype, zoneid_t zsrc, zoneid_t zdst,
15144 15191 const ill_t *ill, ip_stack_t *ipst)
15145 15192 {
15146 15193 hook_pkt_observe_t *hdr;
15147 15194 uint64_t grifindex;
15148 15195 mblk_t *imp;
15149 15196
15150 15197 imp = allocb(sizeof (*hdr), BPRI_HI);
15151 15198 if (imp == NULL)
15152 15199 return;
15153 15200
15154 15201 hdr = (hook_pkt_observe_t *)imp->b_rptr;
15155 15202 /*
15156 15203 * b_wptr is set to make the apparent size of the data in the mblk_t
15157 15204 * to exclude the pointers at the end of hook_pkt_observer_t.
15158 15205 */
15159 15206 imp->b_wptr = imp->b_rptr + sizeof (dl_ipnetinfo_t);
15160 15207 imp->b_cont = mp;
15161 15208
15162 15209 ASSERT(DB_TYPE(mp) == M_DATA);
15163 15210
15164 15211 if (IS_UNDER_IPMP(ill))
15165 15212 grifindex = ipmp_ill_get_ipmp_ifindex(ill);
15166 15213 else
15167 15214 grifindex = 0;
15168 15215
15169 15216 hdr->hpo_version = 1;
15170 15217 hdr->hpo_htype = htons(htype);
15171 15218 hdr->hpo_pktlen = htonl((ulong_t)msgdsize(mp));
15172 15219 hdr->hpo_ifindex = htonl(ill->ill_phyint->phyint_ifindex);
15173 15220 hdr->hpo_grifindex = htonl(grifindex);
15174 15221 hdr->hpo_zsrc = htonl(zsrc);
15175 15222 hdr->hpo_zdst = htonl(zdst);
15176 15223 hdr->hpo_pkt = imp;
15177 15224 hdr->hpo_ctx = ipst->ips_netstack;
15178 15225
15179 15226 if (ill->ill_isv6) {
15180 15227 hdr->hpo_family = AF_INET6;
15181 15228 (void) hook_run(ipst->ips_ipv6_net_data->netd_hooks,
15182 15229 ipst->ips_ipv6observing, (hook_data_t)hdr);
15183 15230 } else {
15184 15231 hdr->hpo_family = AF_INET;
15185 15232 (void) hook_run(ipst->ips_ipv4_net_data->netd_hooks,
15186 15233 ipst->ips_ipv4observing, (hook_data_t)hdr);
15187 15234 }
15188 15235
15189 15236 imp->b_cont = NULL;
15190 15237 freemsg(imp);
15191 15238 }
15192 15239
15193 15240 /*
15194 15241 * Utility routine that checks if `v4srcp' is a valid address on underlying
15195 15242 * interface `ill'. If `ipifp' is non-NULL, it's set to a held ipif
15196 15243 * associated with `v4srcp' on success. NOTE: if this is not called from
15197 15244 * inside the IPSQ (ill_g_lock is not held), `ill' may be removed from the
15198 15245 * group during or after this lookup.
15199 15246 */
15200 15247 boolean_t
15201 15248 ipif_lookup_testaddr_v4(ill_t *ill, const in_addr_t *v4srcp, ipif_t **ipifp)
15202 15249 {
15203 15250 ipif_t *ipif;
15204 15251
15205 15252 ipif = ipif_lookup_addr_exact(*v4srcp, ill, ill->ill_ipst);
15206 15253 if (ipif != NULL) {
15207 15254 if (ipifp != NULL)
15208 15255 *ipifp = ipif;
15209 15256 else
15210 15257 ipif_refrele(ipif);
15211 15258 return (B_TRUE);
15212 15259 }
15213 15260
15214 15261 ip1dbg(("ipif_lookup_testaddr_v4: cannot find ipif for src %x\n",
15215 15262 *v4srcp));
15216 15263 return (B_FALSE);
15217 15264 }
15218 15265
15219 15266 /*
15220 15267 * Transport protocol call back function for CPU state change.
15221 15268 */
15222 15269 /* ARGSUSED */
15223 15270 static int
15224 15271 ip_tp_cpu_update(cpu_setup_t what, int id, void *arg)
15225 15272 {
15226 15273 processorid_t cpu_seqid;
15227 15274 netstack_handle_t nh;
15228 15275 netstack_t *ns;
15229 15276
↓ open down ↓ |
5530 lines elided |
↑ open up ↑ |
15230 15277 ASSERT(MUTEX_HELD(&cpu_lock));
15231 15278
15232 15279 switch (what) {
15233 15280 case CPU_CONFIG:
15234 15281 case CPU_ON:
15235 15282 case CPU_INIT:
15236 15283 case CPU_CPUPART_IN:
15237 15284 cpu_seqid = cpu[id]->cpu_seqid;
15238 15285 netstack_next_init(&nh);
15239 15286 while ((ns = netstack_next(&nh)) != NULL) {
15287 + dccp_stack_cpu_add(ns->netstack_dccp, cpu_seqid);
15240 15288 tcp_stack_cpu_add(ns->netstack_tcp, cpu_seqid);
15241 15289 sctp_stack_cpu_add(ns->netstack_sctp, cpu_seqid);
15242 15290 udp_stack_cpu_add(ns->netstack_udp, cpu_seqid);
15243 15291 netstack_rele(ns);
15244 15292 }
15245 15293 netstack_next_fini(&nh);
15246 15294 break;
15247 15295 case CPU_UNCONFIG:
15248 15296 case CPU_OFF:
15249 15297 case CPU_CPUPART_OUT:
15250 15298 /*
15251 15299 * Nothing to do. We don't remove the per CPU stats from
15252 15300 * the IP stack even when the CPU goes offline.
15253 15301 */
15254 15302 break;
15255 15303 default:
15256 15304 break;
15257 15305 }
15258 15306 return (0);
15259 15307 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX