Print this page
dccp: starting module template
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/inet/ip/ip.c
+++ new/usr/src/uts/common/inet/ip/ip.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 1990 Mentat Inc.
25 25 * Copyright (c) 2011 Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 #include <sys/types.h>
29 29 #include <sys/stream.h>
30 30 #include <sys/dlpi.h>
31 31 #include <sys/stropts.h>
32 32 #include <sys/sysmacros.h>
33 33 #include <sys/strsubr.h>
34 34 #include <sys/strlog.h>
35 35 #include <sys/strsun.h>
36 36 #include <sys/zone.h>
37 37 #define _SUN_TPI_VERSION 2
38 38 #include <sys/tihdr.h>
39 39 #include <sys/xti_inet.h>
40 40 #include <sys/ddi.h>
41 41 #include <sys/suntpi.h>
42 42 #include <sys/cmn_err.h>
43 43 #include <sys/debug.h>
44 44 #include <sys/kobj.h>
45 45 #include <sys/modctl.h>
46 46 #include <sys/atomic.h>
47 47 #include <sys/policy.h>
48 48 #include <sys/priv.h>
49 49 #include <sys/taskq.h>
50 50
51 51 #include <sys/systm.h>
52 52 #include <sys/param.h>
53 53 #include <sys/kmem.h>
54 54 #include <sys/sdt.h>
55 55 #include <sys/socket.h>
56 56 #include <sys/vtrace.h>
57 57 #include <sys/isa_defs.h>
58 58 #include <sys/mac.h>
59 59 #include <net/if.h>
60 60 #include <net/if_arp.h>
61 61 #include <net/route.h>
62 62 #include <sys/sockio.h>
63 63 #include <netinet/in.h>
64 64 #include <net/if_dl.h>
65 65
66 66 #include <inet/common.h>
67 67 #include <inet/mi.h>
68 68 #include <inet/mib2.h>
69 69 #include <inet/nd.h>
70 70 #include <inet/arp.h>
71 71 #include <inet/snmpcom.h>
72 72 #include <inet/optcom.h>
73 73 #include <inet/kstatcom.h>
74 74
75 75 #include <netinet/igmp_var.h>
76 76 #include <netinet/ip6.h>
77 77 #include <netinet/icmp6.h>
78 78 #include <netinet/sctp.h>
79 79
80 80 #include <inet/ip.h>
81 81 #include <inet/ip_impl.h>
82 82 #include <inet/ip6.h>
83 83 #include <inet/ip6_asp.h>
84 84 #include <inet/tcp.h>
85 85 #include <inet/tcp_impl.h>
86 86 #include <inet/ip_multi.h>
87 87 #include <inet/ip_if.h>
88 88 #include <inet/ip_ire.h>
89 89 #include <inet/ip_ftable.h>
90 90 #include <inet/ip_rts.h>
91 91 #include <inet/ip_ndp.h>
92 92 #include <inet/ip_listutils.h>
93 93 #include <netinet/igmp.h>
94 94 #include <netinet/ip_mroute.h>
95 95 #include <inet/ipp_common.h>
96 96
97 97 #include <net/pfkeyv2.h>
98 98 #include <inet/sadb.h>
99 99 #include <inet/ipsec_impl.h>
100 100 #include <inet/iptun/iptun_impl.h>
101 101 #include <inet/ipdrop.h>
102 102 #include <inet/ip_netinfo.h>
103 103 #include <inet/ilb_ip.h>
↓ open down ↓ |
103 lines elided |
↑ open up ↑ |
104 104
105 105 #include <sys/ethernet.h>
106 106 #include <net/if_types.h>
107 107 #include <sys/cpuvar.h>
108 108
109 109 #include <ipp/ipp.h>
110 110 #include <ipp/ipp_impl.h>
111 111 #include <ipp/ipgpc/ipgpc.h>
112 112
113 113 #include <sys/pattr.h>
114 +#include <inet/dccp/dccp_ip.h>
115 +#include <inet/dccp/dccp_impl.h>
114 116 #include <inet/ipclassifier.h>
115 117 #include <inet/sctp_ip.h>
116 118 #include <inet/sctp/sctp_impl.h>
117 119 #include <inet/udp_impl.h>
118 120 #include <inet/rawip_impl.h>
119 121 #include <inet/rts_impl.h>
120 122
121 123 #include <sys/tsol/label.h>
122 124 #include <sys/tsol/tnet.h>
123 125
124 126 #include <sys/squeue_impl.h>
125 127 #include <inet/ip_arp.h>
126 128
127 129 #include <sys/clock_impl.h> /* For LBOLT_FASTPATH{,64} */
128 130
129 131 /*
130 132 * Values for squeue switch:
131 133 * IP_SQUEUE_ENTER_NODRAIN: SQ_NODRAIN
132 134 * IP_SQUEUE_ENTER: SQ_PROCESS
133 135 * IP_SQUEUE_FILL: SQ_FILL
134 136 */
135 137 int ip_squeue_enter = IP_SQUEUE_ENTER; /* Setable in /etc/system */
136 138
137 139 int ip_squeue_flag;
138 140
139 141 /*
140 142 * Setable in /etc/system
141 143 */
142 144 int ip_poll_normal_ms = 100;
143 145 int ip_poll_normal_ticks = 0;
144 146 int ip_modclose_ackwait_ms = 3000;
145 147
146 148 /*
147 149 * It would be nice to have these present only in DEBUG systems, but the
148 150 * current design of the global symbol checking logic requires them to be
149 151 * unconditionally present.
150 152 */
151 153 uint_t ip_thread_data; /* TSD key for debug support */
152 154 krwlock_t ip_thread_rwlock;
153 155 list_t ip_thread_list;
154 156
155 157 /*
156 158 * Structure to represent a linked list of msgblks. Used by ip_snmp_ functions.
157 159 */
158 160
159 161 struct listptr_s {
160 162 mblk_t *lp_head; /* pointer to the head of the list */
161 163 mblk_t *lp_tail; /* pointer to the tail of the list */
162 164 };
163 165
164 166 typedef struct listptr_s listptr_t;
165 167
166 168 /*
167 169 * This is used by ip_snmp_get_mib2_ip_route_media and
168 170 * ip_snmp_get_mib2_ip6_route_media to carry the lists of return data.
169 171 */
170 172 typedef struct iproutedata_s {
171 173 uint_t ird_idx;
172 174 uint_t ird_flags; /* see below */
173 175 listptr_t ird_route; /* ipRouteEntryTable */
174 176 listptr_t ird_netmedia; /* ipNetToMediaEntryTable */
175 177 listptr_t ird_attrs; /* ipRouteAttributeTable */
176 178 } iproutedata_t;
177 179
178 180 /* Include ire_testhidden and IRE_IF_CLONE routes */
179 181 #define IRD_REPORT_ALL 0x01
180 182
181 183 /*
182 184 * Cluster specific hooks. These should be NULL when booted as a non-cluster
183 185 */
184 186
185 187 /*
186 188 * Hook functions to enable cluster networking
187 189 * On non-clustered systems these vectors must always be NULL.
188 190 *
189 191 * Hook function to Check ip specified ip address is a shared ip address
190 192 * in the cluster
191 193 *
192 194 */
193 195 int (*cl_inet_isclusterwide)(netstackid_t stack_id, uint8_t protocol,
194 196 sa_family_t addr_family, uint8_t *laddrp, void *args) = NULL;
195 197
196 198 /*
197 199 * Hook function to generate cluster wide ip fragment identifier
198 200 */
199 201 uint32_t (*cl_inet_ipident)(netstackid_t stack_id, uint8_t protocol,
200 202 sa_family_t addr_family, uint8_t *laddrp, uint8_t *faddrp,
201 203 void *args) = NULL;
202 204
203 205 /*
204 206 * Hook function to generate cluster wide SPI.
205 207 */
206 208 void (*cl_inet_getspi)(netstackid_t, uint8_t, uint8_t *, size_t,
207 209 void *) = NULL;
208 210
209 211 /*
210 212 * Hook function to verify if the SPI is already utlized.
211 213 */
212 214
213 215 int (*cl_inet_checkspi)(netstackid_t, uint8_t, uint32_t, void *) = NULL;
214 216
215 217 /*
216 218 * Hook function to delete the SPI from the cluster wide repository.
217 219 */
218 220
219 221 void (*cl_inet_deletespi)(netstackid_t, uint8_t, uint32_t, void *) = NULL;
220 222
221 223 /*
222 224 * Hook function to inform the cluster when packet received on an IDLE SA
223 225 */
224 226
225 227 void (*cl_inet_idlesa)(netstackid_t, uint8_t, uint32_t, sa_family_t,
226 228 in6_addr_t, in6_addr_t, void *) = NULL;
227 229
228 230 /*
229 231 * Synchronization notes:
230 232 *
231 233 * IP is a fully D_MP STREAMS module/driver. Thus it does not depend on any
232 234 * MT level protection given by STREAMS. IP uses a combination of its own
233 235 * internal serialization mechanism and standard Solaris locking techniques.
234 236 * The internal serialization is per phyint. This is used to serialize
235 237 * plumbing operations, IPMP operations, most set ioctls, etc.
236 238 *
237 239 * Plumbing is a long sequence of operations involving message
238 240 * exchanges between IP, ARP and device drivers. Many set ioctls are typically
239 241 * involved in plumbing operations. A natural model is to serialize these
240 242 * ioctls one per ill. For example plumbing of hme0 and qfe0 can go on in
241 243 * parallel without any interference. But various set ioctls on hme0 are best
242 244 * serialized, along with IPMP operations and processing of DLPI control
243 245 * messages received from drivers on a per phyint basis. This serialization is
244 246 * provided by the ipsq_t and primitives operating on this. Details can
245 247 * be found in ip_if.c above the core primitives operating on ipsq_t.
246 248 *
247 249 * Lookups of an ipif or ill by a thread return a refheld ipif / ill.
248 250 * Simiarly lookup of an ire by a thread also returns a refheld ire.
249 251 * In addition ipif's and ill's referenced by the ire are also indirectly
250 252 * refheld. Thus no ipif or ill can vanish as long as an ipif is refheld
251 253 * directly or indirectly. For example an SIOCSLIFADDR ioctl that changes the
252 254 * address of an ipif has to go through the ipsq_t. This ensures that only
253 255 * one such exclusive operation proceeds at any time on the ipif. It then
254 256 * waits for all refcnts
255 257 * associated with this ipif to come down to zero. The address is changed
256 258 * only after the ipif has been quiesced. Then the ipif is brought up again.
257 259 * More details are described above the comment in ip_sioctl_flags.
258 260 *
259 261 * Packet processing is based mostly on IREs and are fully multi-threaded
260 262 * using standard Solaris MT techniques.
261 263 *
262 264 * There are explicit locks in IP to handle:
263 265 * - The ip_g_head list maintained by mi_open_link() and friends.
264 266 *
265 267 * - The reassembly data structures (one lock per hash bucket)
266 268 *
267 269 * - conn_lock is meant to protect conn_t fields. The fields actually
268 270 * protected by conn_lock are documented in the conn_t definition.
269 271 *
270 272 * - ire_lock to protect some of the fields of the ire, IRE tables
271 273 * (one lock per hash bucket). Refer to ip_ire.c for details.
272 274 *
273 275 * - ndp_g_lock and ncec_lock for protecting NCEs.
274 276 *
275 277 * - ill_lock protects fields of the ill and ipif. Details in ip.h
276 278 *
277 279 * - ill_g_lock: This is a global reader/writer lock. Protects the following
278 280 * * The AVL tree based global multi list of all ills.
279 281 * * The linked list of all ipifs of an ill
280 282 * * The <ipsq-xop> mapping
281 283 * * <ill-phyint> association
282 284 * Insertion/deletion of an ill in the system, insertion/deletion of an ipif
283 285 * into an ill, changing the <ipsq-xop> mapping of an ill, changing the
284 286 * <ill-phyint> assoc of an ill will all have to hold the ill_g_lock as
285 287 * writer for the actual duration of the insertion/deletion/change.
286 288 *
287 289 * - ill_lock: This is a per ill mutex.
288 290 * It protects some members of the ill_t struct; see ip.h for details.
289 291 * It also protects the <ill-phyint> assoc.
290 292 * It also protects the list of ipifs hanging off the ill.
291 293 *
292 294 * - ipsq_lock: This is a per ipsq_t mutex lock.
293 295 * This protects some members of the ipsq_t struct; see ip.h for details.
294 296 * It also protects the <ipsq-ipxop> mapping
295 297 *
296 298 * - ipx_lock: This is a per ipxop_t mutex lock.
297 299 * This protects some members of the ipxop_t struct; see ip.h for details.
298 300 *
299 301 * - phyint_lock: This is a per phyint mutex lock. Protects just the
300 302 * phyint_flags
301 303 *
302 304 * - ip_addr_avail_lock: This is used to ensure the uniqueness of IP addresses.
303 305 * This lock is held in ipif_up_done and the ipif is marked IPIF_UP and the
304 306 * uniqueness check also done atomically.
305 307 *
306 308 * - ill_g_usesrc_lock: This readers/writer lock protects the usesrc
307 309 * group list linked by ill_usesrc_grp_next. It also protects the
308 310 * ill_usesrc_ifindex field. It is taken as a writer when a member of the
309 311 * group is being added or deleted. This lock is taken as a reader when
310 312 * walking the list/group(eg: to get the number of members in a usesrc group).
311 313 * Note, it is only necessary to take this lock if the ill_usesrc_grp_next
312 314 * field is changing state i.e from NULL to non-NULL or vice-versa. For
313 315 * example, it is not necessary to take this lock in the initial portion
314 316 * of ip_sioctl_slifusesrc or at all in ip_sioctl_flags since these
315 317 * operations are executed exclusively and that ensures that the "usesrc
316 318 * group state" cannot change. The "usesrc group state" change can happen
317 319 * only in the latter part of ip_sioctl_slifusesrc and in ill_delete.
318 320 *
319 321 * Changing <ill-phyint>, <ipsq-xop> assocications:
320 322 *
321 323 * To change the <ill-phyint> association, the ill_g_lock must be held
322 324 * as writer, and the ill_locks of both the v4 and v6 instance of the ill
323 325 * must be held.
324 326 *
325 327 * To change the <ipsq-xop> association, the ill_g_lock must be held as
326 328 * writer, the ipsq_lock must be held, and one must be writer on the ipsq.
327 329 * This is only done when ills are added or removed from IPMP groups.
328 330 *
329 331 * To add or delete an ipif from the list of ipifs hanging off the ill,
330 332 * ill_g_lock (writer) and ill_lock must be held and the thread must be
331 333 * a writer on the associated ipsq.
332 334 *
333 335 * To add or delete an ill to the system, the ill_g_lock must be held as
334 336 * writer and the thread must be a writer on the associated ipsq.
335 337 *
336 338 * To add or delete an ilm to an ill, the ill_lock must be held and the thread
337 339 * must be a writer on the associated ipsq.
338 340 *
339 341 * Lock hierarchy
340 342 *
341 343 * Some lock hierarchy scenarios are listed below.
342 344 *
343 345 * ill_g_lock -> conn_lock -> ill_lock -> ipsq_lock -> ipx_lock
344 346 * ill_g_lock -> ill_lock(s) -> phyint_lock
345 347 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock
346 348 * ill_g_lock -> ip_addr_avail_lock
347 349 * conn_lock -> irb_lock -> ill_lock -> ire_lock
348 350 * ill_g_lock -> ip_g_nd_lock
349 351 * ill_g_lock -> ips_ipmp_lock -> ill_lock -> nce_lock
350 352 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock -> nce_lock
351 353 * arl_lock -> ill_lock
352 354 * ips_ire_dep_lock -> irb_lock
353 355 *
354 356 * When more than 1 ill lock is needed to be held, all ill lock addresses
355 357 * are sorted on address and locked starting from highest addressed lock
356 358 * downward.
357 359 *
358 360 * Multicast scenarios
359 361 * ips_ill_g_lock -> ill_mcast_lock
360 362 * conn_ilg_lock -> ips_ill_g_lock -> ill_lock
361 363 * ill_mcast_serializer -> ill_mcast_lock -> ips_ipmp_lock -> ill_lock
362 364 * ill_mcast_serializer -> ill_mcast_lock -> connf_lock -> conn_lock
363 365 * ill_mcast_serializer -> ill_mcast_lock -> conn_ilg_lock
364 366 * ill_mcast_serializer -> ill_mcast_lock -> ips_igmp_timer_lock
365 367 *
366 368 * IPsec scenarios
367 369 *
368 370 * ipsa_lock -> ill_g_lock -> ill_lock
369 371 * ill_g_usesrc_lock -> ill_g_lock -> ill_lock
370 372 *
371 373 * Trusted Solaris scenarios
372 374 *
373 375 * igsa_lock -> gcgrp_rwlock -> gcgrp_lock
374 376 * igsa_lock -> gcdb_lock
375 377 * gcgrp_rwlock -> ire_lock
376 378 * gcgrp_rwlock -> gcdb_lock
377 379 *
378 380 * squeue(sq_lock), flow related (ft_lock, fe_lock) locking
379 381 *
380 382 * cpu_lock --> ill_lock --> sqset_lock --> sq_lock
381 383 * sq_lock -> conn_lock -> QLOCK(q)
382 384 * ill_lock -> ft_lock -> fe_lock
383 385 *
384 386 * Routing/forwarding table locking notes:
385 387 *
386 388 * Lock acquisition order: Radix tree lock, irb_lock.
387 389 * Requirements:
388 390 * i. Walker must not hold any locks during the walker callback.
389 391 * ii Walker must not see a truncated tree during the walk because of any node
390 392 * deletion.
391 393 * iii Existing code assumes ire_bucket is valid if it is non-null and is used
392 394 * in many places in the code to walk the irb list. Thus even if all the
393 395 * ires in a bucket have been deleted, we still can't free the radix node
394 396 * until the ires have actually been inactive'd (freed).
395 397 *
396 398 * Tree traversal - Need to hold the global tree lock in read mode.
397 399 * Before dropping the global tree lock, need to either increment the ire_refcnt
398 400 * to ensure that the radix node can't be deleted.
399 401 *
400 402 * Tree add - Need to hold the global tree lock in write mode to add a
401 403 * radix node. To prevent the node from being deleted, increment the
402 404 * irb_refcnt, after the node is added to the tree. The ire itself is
403 405 * added later while holding the irb_lock, but not the tree lock.
404 406 *
405 407 * Tree delete - Need to hold the global tree lock and irb_lock in write mode.
406 408 * All associated ires must be inactive (i.e. freed), and irb_refcnt
407 409 * must be zero.
408 410 *
409 411 * Walker - Increment irb_refcnt before calling the walker callback. Hold the
410 412 * global tree lock (read mode) for traversal.
411 413 *
412 414 * IRE dependencies - In some cases we hold ips_ire_dep_lock across ire_refrele
413 415 * hence we will acquire irb_lock while holding ips_ire_dep_lock.
414 416 *
415 417 * IPsec notes :
416 418 *
417 419 * IP interacts with the IPsec code (AH/ESP) by storing IPsec attributes
418 420 * in the ip_xmit_attr_t ip_recv_attr_t. For outbound datagrams, the
419 421 * ip_xmit_attr_t has the
420 422 * information used by the IPsec code for applying the right level of
421 423 * protection. The information initialized by IP in the ip_xmit_attr_t
422 424 * is determined by the per-socket policy or global policy in the system.
423 425 * For inbound datagrams, the ip_recv_attr_t
424 426 * starts out with nothing in it. It gets filled
425 427 * with the right information if it goes through the AH/ESP code, which
426 428 * happens if the incoming packet is secure. The information initialized
427 429 * by AH/ESP, is later used by IP (during fanouts to ULP) to see whether
428 430 * the policy requirements needed by per-socket policy or global policy
429 431 * is met or not.
430 432 *
431 433 * For fully connected sockets i.e dst, src [addr, port] is known,
432 434 * conn_policy_cached is set indicating that policy has been cached.
433 435 * conn_in_enforce_policy may or may not be set depending on whether
434 436 * there is a global policy match or per-socket policy match.
435 437 * Policy inheriting happpens in ip_policy_set once the destination is known.
436 438 * Once the right policy is set on the conn_t, policy cannot change for
437 439 * this socket. This makes life simpler for TCP (UDP ?) where
438 440 * re-transmissions go out with the same policy. For symmetry, policy
439 441 * is cached for fully connected UDP sockets also. Thus if policy is cached,
440 442 * it also implies that policy is latched i.e policy cannot change
441 443 * on these sockets. As we have the right policy on the conn, we don't
442 444 * have to lookup global policy for every outbound and inbound datagram
443 445 * and thus serving as an optimization. Note that a global policy change
444 446 * does not affect fully connected sockets if they have policy. If fully
445 447 * connected sockets did not have any policy associated with it, global
446 448 * policy change may affect them.
447 449 *
448 450 * IP Flow control notes:
449 451 * ---------------------
450 452 * Non-TCP streams are flow controlled by IP. The way this is accomplished
451 453 * differs when ILL_CAPAB_DLD_DIRECT is enabled for that IP instance. When
452 454 * ILL_DIRECT_CAPABLE(ill) is TRUE, IP can do direct function calls into
453 455 * GLDv3. Otherwise packets are sent down to lower layers using STREAMS
454 456 * functions.
455 457 *
456 458 * Per Tx ring udp flow control:
457 459 * This is applicable only when ILL_CAPAB_DLD_DIRECT capability is set in
458 460 * the ill (i.e. ILL_DIRECT_CAPABLE(ill) is true).
459 461 *
460 462 * The underlying link can expose multiple Tx rings to the GLDv3 mac layer.
461 463 * To achieve best performance, outgoing traffic need to be fanned out among
462 464 * these Tx ring. mac_tx() is called (via str_mdata_fastpath_put()) to send
463 465 * traffic out of the NIC and it takes a fanout hint. UDP connections pass
464 466 * the address of connp as fanout hint to mac_tx(). Under flow controlled
465 467 * condition, mac_tx() returns a non-NULL cookie (ip_mac_tx_cookie_t). This
466 468 * cookie points to a specific Tx ring that is blocked. The cookie is used to
467 469 * hash into an idl_tx_list[] entry in idl_tx_list[] array. Each idl_tx_list_t
468 470 * point to drain_lists (idl_t's). These drain list will store the blocked UDP
469 471 * connp's. The drain list is not a single list but a configurable number of
470 472 * lists.
471 473 *
472 474 * The diagram below shows idl_tx_list_t's and their drain_lists. ip_stack_t
473 475 * has an array of idl_tx_list_t. The size of the array is TX_FANOUT_SIZE
474 476 * which is equal to 128. This array in turn contains a pointer to idl_t[],
475 477 * the ip drain list. The idl_t[] array size is MIN(max_ncpus, 8). The drain
476 478 * list will point to the list of connp's that are flow controlled.
477 479 *
478 480 * --------------- ------- ------- -------
479 481 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|-->
480 482 * | --------------- ------- ------- -------
481 483 * | --------------- ------- ------- -------
482 484 * |->|drain_list[1]|-->|connp|-->|connp|-->|connp|-->
483 485 * ---------------- | --------------- ------- ------- -------
484 486 * |idl_tx_list[0]|->| --------------- ------- ------- -------
485 487 * ---------------- |->|drain_list[2]|-->|connp|-->|connp|-->|connp|-->
486 488 * | --------------- ------- ------- -------
487 489 * . . . . .
488 490 * | --------------- ------- ------- -------
489 491 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|-->
490 492 * --------------- ------- ------- -------
491 493 * --------------- ------- ------- -------
492 494 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|-->
493 495 * | --------------- ------- ------- -------
494 496 * | --------------- ------- ------- -------
495 497 * ---------------- |->|drain_list[1]|-->|connp|-->|connp|-->|connp|-->
496 498 * |idl_tx_list[1]|->| --------------- ------- ------- -------
497 499 * ---------------- | . . . .
498 500 * | --------------- ------- ------- -------
499 501 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|-->
500 502 * --------------- ------- ------- -------
501 503 * .....
502 504 * ----------------
503 505 * |idl_tx_list[n]|-> ...
504 506 * ----------------
505 507 *
506 508 * When mac_tx() returns a cookie, the cookie is hashed into an index into
507 509 * ips_idl_tx_list[], and conn_drain_insert() is called with the idl_tx_list
508 510 * to insert the conn onto. conn_drain_insert() asserts flow control for the
509 511 * sockets via su_txq_full() (non-STREAMS) or QFULL on conn_wq (STREAMS).
510 512 * Further, conn_blocked is set to indicate that the conn is blocked.
511 513 *
512 514 * GLDv3 calls ill_flow_enable() when flow control is relieved. The cookie
513 515 * passed in the call to ill_flow_enable() identifies the blocked Tx ring and
514 516 * is again hashed to locate the appropriate idl_tx_list, which is then
515 517 * drained via conn_walk_drain(). conn_walk_drain() goes through each conn in
516 518 * the drain list and calls conn_drain_remove() to clear flow control (via
517 519 * calling su_txq_full() or clearing QFULL), and remove the conn from the
518 520 * drain list.
519 521 *
520 522 * Note that the drain list is not a single list but a (configurable) array of
521 523 * lists (8 elements by default). Synchronization between drain insertion and
522 524 * flow control wakeup is handled by using idl_txl->txl_lock, and only
523 525 * conn_drain_insert() and conn_drain_remove() manipulate the drain list.
524 526 *
525 527 * Flow control via STREAMS is used when ILL_DIRECT_CAPABLE() returns FALSE.
526 528 * On the send side, if the packet cannot be sent down to the driver by IP
527 529 * (canput() fails), ip_xmit() drops the packet and returns EWOULDBLOCK to the
528 530 * caller, who may then invoke ixa_check_drain_insert() to insert the conn on
529 531 * the 0'th drain list. When ip_wsrv() runs on the ill_wq because flow
530 532 * control has been relieved, the blocked conns in the 0'th drain list are
531 533 * drained as in the non-STREAMS case.
532 534 *
533 535 * In both the STREAMS and non-STREAMS cases, the sockfs upcall to set QFULL
534 536 * is done when the conn is inserted into the drain list (conn_drain_insert())
535 537 * and cleared when the conn is removed from the it (conn_drain_remove()).
536 538 *
537 539 * IPQOS notes:
538 540 *
539 541 * IPQoS Policies are applied to packets using IPPF (IP Policy framework)
540 542 * and IPQoS modules. IPPF includes hooks in IP at different control points
541 543 * (callout positions) which direct packets to IPQoS modules for policy
542 544 * processing. Policies, if present, are global.
543 545 *
544 546 * The callout positions are located in the following paths:
545 547 * o local_in (packets destined for this host)
546 548 * o local_out (packets orginating from this host )
547 549 * o fwd_in (packets forwarded by this m/c - inbound)
548 550 * o fwd_out (packets forwarded by this m/c - outbound)
549 551 * Hooks at these callout points can be enabled/disabled using the ndd variable
550 552 * ip_policy_mask (a bit mask with the 4 LSB indicating the callout positions).
551 553 * By default all the callout positions are enabled.
552 554 *
553 555 * Outbound (local_out)
554 556 * Hooks are placed in ire_send_wire_v4 and ire_send_wire_v6.
555 557 *
556 558 * Inbound (local_in)
557 559 * Hooks are placed in ip_fanout_v4 and ip_fanout_v6.
558 560 *
559 561 * Forwarding (in and out)
560 562 * Hooks are placed in ire_recv_forward_v4/v6.
561 563 *
562 564 * IP Policy Framework processing (IPPF processing)
563 565 * Policy processing for a packet is initiated by ip_process, which ascertains
564 566 * that the classifier (ipgpc) is loaded and configured, failing which the
565 567 * packet resumes normal processing in IP. If the clasifier is present, the
566 568 * packet is acted upon by one or more IPQoS modules (action instances), per
567 569 * filters configured in ipgpc and resumes normal IP processing thereafter.
568 570 * An action instance can drop a packet in course of its processing.
569 571 *
570 572 * Zones notes:
571 573 *
572 574 * The partitioning rules for networking are as follows:
573 575 * 1) Packets coming from a zone must have a source address belonging to that
574 576 * zone.
575 577 * 2) Packets coming from a zone can only be sent on a physical interface on
576 578 * which the zone has an IP address.
577 579 * 3) Between two zones on the same machine, packet delivery is only allowed if
578 580 * there's a matching route for the destination and zone in the forwarding
579 581 * table.
580 582 * 4) The TCP and UDP port spaces are per-zone; that is, two processes in
581 583 * different zones can bind to the same port with the wildcard address
582 584 * (INADDR_ANY).
583 585 *
584 586 * The granularity of interface partitioning is at the logical interface level.
585 587 * Therefore, every zone has its own IP addresses, and incoming packets can be
586 588 * attributed to a zone unambiguously. A logical interface is placed into a zone
587 589 * using the SIOCSLIFZONE ioctl; this sets the ipif_zoneid field in the ipif_t
588 590 * structure. Rule (1) is implemented by modifying the source address selection
589 591 * algorithm so that the list of eligible addresses is filtered based on the
590 592 * sending process zone.
591 593 *
592 594 * The Internet Routing Entries (IREs) are either exclusive to a zone or shared
593 595 * across all zones, depending on their type. Here is the break-up:
594 596 *
595 597 * IRE type Shared/exclusive
596 598 * -------- ----------------
597 599 * IRE_BROADCAST Exclusive
598 600 * IRE_DEFAULT (default routes) Shared (*)
599 601 * IRE_LOCAL Exclusive (x)
600 602 * IRE_LOOPBACK Exclusive
601 603 * IRE_PREFIX (net routes) Shared (*)
602 604 * IRE_IF_NORESOLVER (interface routes) Exclusive
603 605 * IRE_IF_RESOLVER (interface routes) Exclusive
604 606 * IRE_IF_CLONE (interface routes) Exclusive
605 607 * IRE_HOST (host routes) Shared (*)
606 608 *
607 609 * (*) A zone can only use a default or off-subnet route if the gateway is
608 610 * directly reachable from the zone, that is, if the gateway's address matches
609 611 * one of the zone's logical interfaces.
610 612 *
611 613 * (x) IRE_LOCAL are handled a bit differently.
612 614 * When ip_restrict_interzone_loopback is set (the default),
613 615 * ire_route_recursive restricts loopback using an IRE_LOCAL
614 616 * between zone to the case when L2 would have conceptually looped the packet
615 617 * back, i.e. the loopback which is required since neither Ethernet drivers
616 618 * nor Ethernet hardware loops them back. This is the case when the normal
617 619 * routes (ignoring IREs with different zoneids) would send out the packet on
618 620 * the same ill as the ill with which is IRE_LOCAL is associated.
619 621 *
620 622 * Multiple zones can share a common broadcast address; typically all zones
621 623 * share the 255.255.255.255 address. Incoming as well as locally originated
622 624 * broadcast packets must be dispatched to all the zones on the broadcast
623 625 * network. For directed broadcasts (e.g. 10.16.72.255) this is not trivial
624 626 * since some zones may not be on the 10.16.72/24 network. To handle this, each
625 627 * zone has its own set of IRE_BROADCAST entries; then, broadcast packets are
626 628 * sent to every zone that has an IRE_BROADCAST entry for the destination
627 629 * address on the input ill, see ip_input_broadcast().
628 630 *
629 631 * Applications in different zones can join the same multicast group address.
630 632 * The same logic applies for multicast as for broadcast. ip_input_multicast
631 633 * dispatches packets to all zones that have members on the physical interface.
632 634 */
633 635
634 636 /*
635 637 * Squeue Fanout flags:
636 638 * 0: No fanout.
637 639 * 1: Fanout across all squeues
638 640 */
639 641 boolean_t ip_squeue_fanout = 0;
640 642
641 643 /*
642 644 * Maximum dups allowed per packet.
643 645 */
644 646 uint_t ip_max_frag_dups = 10;
645 647
646 648 static int ip_open(queue_t *q, dev_t *devp, int flag, int sflag,
647 649 cred_t *credp, boolean_t isv6);
648 650 static mblk_t *ip_xmit_attach_llhdr(mblk_t *, nce_t *);
649 651
650 652 static boolean_t icmp_inbound_verify_v4(mblk_t *, icmph_t *, ip_recv_attr_t *);
651 653 static void icmp_inbound_too_big_v4(icmph_t *, ip_recv_attr_t *);
652 654 static void icmp_inbound_error_fanout_v4(mblk_t *, icmph_t *,
653 655 ip_recv_attr_t *);
654 656 static void icmp_options_update(ipha_t *);
655 657 static void icmp_param_problem(mblk_t *, uint8_t, ip_recv_attr_t *);
656 658 static void icmp_pkt(mblk_t *, void *, size_t, ip_recv_attr_t *);
657 659 static mblk_t *icmp_pkt_err_ok(mblk_t *, ip_recv_attr_t *);
658 660 static void icmp_redirect_v4(mblk_t *mp, ipha_t *, icmph_t *,
659 661 ip_recv_attr_t *);
660 662 static void icmp_send_redirect(mblk_t *, ipaddr_t, ip_recv_attr_t *);
661 663 static void icmp_send_reply_v4(mblk_t *, ipha_t *, icmph_t *,
662 664 ip_recv_attr_t *);
663 665
664 666 mblk_t *ip_dlpi_alloc(size_t, t_uscalar_t);
665 667 char *ip_dot_addr(ipaddr_t, char *);
666 668 mblk_t *ip_carve_mp(mblk_t **, ssize_t);
667 669 int ip_close(queue_t *, int);
668 670 static char *ip_dot_saddr(uchar_t *, char *);
669 671 static void ip_lrput(queue_t *, mblk_t *);
670 672 ipaddr_t ip_net_mask(ipaddr_t);
671 673 char *ip_nv_lookup(nv_t *, int);
672 674 void ip_rput(queue_t *, mblk_t *);
673 675 static void ip_rput_dlpi_writer(ipsq_t *dummy_sq, queue_t *q, mblk_t *mp,
674 676 void *dummy_arg);
675 677 int ip_snmp_get(queue_t *, mblk_t *, int, boolean_t);
676 678 static mblk_t *ip_snmp_get_mib2_ip(queue_t *, mblk_t *,
677 679 mib2_ipIfStatsEntry_t *, ip_stack_t *, boolean_t);
678 680 static mblk_t *ip_snmp_get_mib2_ip_traffic_stats(queue_t *, mblk_t *,
679 681 ip_stack_t *, boolean_t);
680 682 static mblk_t *ip_snmp_get_mib2_ip6(queue_t *, mblk_t *, ip_stack_t *,
681 683 boolean_t);
682 684 static mblk_t *ip_snmp_get_mib2_icmp(queue_t *, mblk_t *, ip_stack_t *ipst);
683 685 static mblk_t *ip_snmp_get_mib2_icmp6(queue_t *, mblk_t *, ip_stack_t *ipst);
684 686 static mblk_t *ip_snmp_get_mib2_igmp(queue_t *, mblk_t *, ip_stack_t *ipst);
685 687 static mblk_t *ip_snmp_get_mib2_multi(queue_t *, mblk_t *, ip_stack_t *ipst);
686 688 static mblk_t *ip_snmp_get_mib2_ip_addr(queue_t *, mblk_t *,
687 689 ip_stack_t *ipst, boolean_t);
688 690 static mblk_t *ip_snmp_get_mib2_ip6_addr(queue_t *, mblk_t *,
689 691 ip_stack_t *ipst, boolean_t);
690 692 static mblk_t *ip_snmp_get_mib2_ip_group_src(queue_t *, mblk_t *,
691 693 ip_stack_t *ipst);
692 694 static mblk_t *ip_snmp_get_mib2_ip6_group_src(queue_t *, mblk_t *,
693 695 ip_stack_t *ipst);
694 696 static mblk_t *ip_snmp_get_mib2_ip_group_mem(queue_t *, mblk_t *,
695 697 ip_stack_t *ipst);
696 698 static mblk_t *ip_snmp_get_mib2_ip6_group_mem(queue_t *, mblk_t *,
697 699 ip_stack_t *ipst);
698 700 static mblk_t *ip_snmp_get_mib2_virt_multi(queue_t *, mblk_t *,
699 701 ip_stack_t *ipst);
700 702 static mblk_t *ip_snmp_get_mib2_multi_rtable(queue_t *, mblk_t *,
701 703 ip_stack_t *ipst);
702 704 static mblk_t *ip_snmp_get_mib2_ip_route_media(queue_t *, mblk_t *, int,
703 705 ip_stack_t *ipst);
704 706 static mblk_t *ip_snmp_get_mib2_ip6_route_media(queue_t *, mblk_t *, int,
705 707 ip_stack_t *ipst);
706 708 static void ip_snmp_get2_v4(ire_t *, iproutedata_t *);
707 709 static void ip_snmp_get2_v6_route(ire_t *, iproutedata_t *);
708 710 static int ip_snmp_get2_v4_media(ncec_t *, iproutedata_t *);
709 711 static int ip_snmp_get2_v6_media(ncec_t *, iproutedata_t *);
710 712 int ip_snmp_set(queue_t *, int, int, uchar_t *, int);
711 713
712 714 static mblk_t *ip_fragment_copyhdr(uchar_t *, int, int, ip_stack_t *,
713 715 mblk_t *);
714 716
715 717 static void conn_drain_init(ip_stack_t *);
716 718 static void conn_drain_fini(ip_stack_t *);
717 719 static void conn_drain(conn_t *connp, boolean_t closing);
718 720
719 721 static void conn_walk_drain(ip_stack_t *, idl_tx_list_t *);
720 722 static void conn_walk_sctp(pfv_t, void *, zoneid_t, netstack_t *);
721 723
722 724 static void *ip_stack_init(netstackid_t stackid, netstack_t *ns);
723 725 static void ip_stack_shutdown(netstackid_t stackid, void *arg);
724 726 static void ip_stack_fini(netstackid_t stackid, void *arg);
725 727
726 728 static int ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t,
727 729 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *),
728 730 ire_t *, conn_t *, boolean_t, const in6_addr_t *, mcast_record_t,
729 731 const in6_addr_t *);
730 732
731 733 static int ip_squeue_switch(int);
732 734
733 735 static void *ip_kstat_init(netstackid_t, ip_stack_t *);
734 736 static void ip_kstat_fini(netstackid_t, kstat_t *);
735 737 static int ip_kstat_update(kstat_t *kp, int rw);
736 738 static void *icmp_kstat_init(netstackid_t);
737 739 static void icmp_kstat_fini(netstackid_t, kstat_t *);
738 740 static int icmp_kstat_update(kstat_t *kp, int rw);
739 741 static void *ip_kstat2_init(netstackid_t, ip_stat_t *);
740 742 static void ip_kstat2_fini(netstackid_t, kstat_t *);
741 743
742 744 static void ipobs_init(ip_stack_t *);
743 745 static void ipobs_fini(ip_stack_t *);
744 746
745 747 static int ip_tp_cpu_update(cpu_setup_t, int, void *);
746 748
747 749 ipaddr_t ip_g_all_ones = IP_HOST_MASK;
748 750
749 751 static long ip_rput_pullups;
750 752 int dohwcksum = 1; /* use h/w cksum if supported by the hardware */
751 753
752 754 vmem_t *ip_minor_arena_sa; /* for minor nos. from INET_MIN_DEV+2 thru 2^^18-1 */
753 755 vmem_t *ip_minor_arena_la; /* for minor nos. from 2^^18 thru 2^^32-1 */
754 756
755 757 int ip_debug;
756 758
757 759 /*
758 760 * Multirouting/CGTP stuff
759 761 */
760 762 int ip_cgtp_filter_rev = CGTP_FILTER_REV; /* CGTP hooks version */
761 763
762 764 /*
763 765 * IP tunables related declarations. Definitions are in ip_tunables.c
764 766 */
765 767 extern mod_prop_info_t ip_propinfo_tbl[];
766 768 extern int ip_propinfo_count;
767 769
768 770 /*
769 771 * Table of IP ioctls encoding the various properties of the ioctl and
770 772 * indexed based on the last byte of the ioctl command. Occasionally there
771 773 * is a clash, and there is more than 1 ioctl with the same last byte.
772 774 * In such a case 1 ioctl is encoded in the ndx table and the remaining
773 775 * ioctls are encoded in the misc table. An entry in the ndx table is
774 776 * retrieved by indexing on the last byte of the ioctl command and comparing
775 777 * the ioctl command with the value in the ndx table. In the event of a
776 778 * mismatch the misc table is then searched sequentially for the desired
777 779 * ioctl command.
778 780 *
779 781 * Entry: <command> <copyin_size> <flags> <cmd_type> <function> <restart_func>
780 782 */
781 783 ip_ioctl_cmd_t ip_ndx_ioctl_table[] = {
782 784 /* 000 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
783 785 /* 001 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
784 786 /* 002 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
785 787 /* 003 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
786 788 /* 004 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
787 789 /* 005 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
788 790 /* 006 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
789 791 /* 007 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
790 792 /* 008 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
791 793 /* 009 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
792 794
793 795 /* 010 */ { SIOCADDRT, sizeof (struct rtentry), IPI_PRIV,
794 796 MISC_CMD, ip_siocaddrt, NULL },
795 797 /* 011 */ { SIOCDELRT, sizeof (struct rtentry), IPI_PRIV,
796 798 MISC_CMD, ip_siocdelrt, NULL },
797 799
798 800 /* 012 */ { SIOCSIFADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
799 801 IF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart },
800 802 /* 013 */ { SIOCGIFADDR, sizeof (struct ifreq), IPI_GET_CMD,
801 803 IF_CMD, ip_sioctl_get_addr, NULL },
802 804
803 805 /* 014 */ { SIOCSIFDSTADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
804 806 IF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart },
805 807 /* 015 */ { SIOCGIFDSTADDR, sizeof (struct ifreq),
806 808 IPI_GET_CMD, IF_CMD, ip_sioctl_get_dstaddr, NULL },
807 809
808 810 /* 016 */ { SIOCSIFFLAGS, sizeof (struct ifreq),
809 811 IPI_PRIV | IPI_WR,
810 812 IF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart },
811 813 /* 017 */ { SIOCGIFFLAGS, sizeof (struct ifreq),
812 814 IPI_MODOK | IPI_GET_CMD,
813 815 IF_CMD, ip_sioctl_get_flags, NULL },
814 816
815 817 /* 018 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
816 818 /* 019 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
817 819
818 820 /* copyin size cannot be coded for SIOCGIFCONF */
819 821 /* 020 */ { O_SIOCGIFCONF, 0, IPI_GET_CMD,
820 822 MISC_CMD, ip_sioctl_get_ifconf, NULL },
821 823
822 824 /* 021 */ { SIOCSIFMTU, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
823 825 IF_CMD, ip_sioctl_mtu, NULL },
824 826 /* 022 */ { SIOCGIFMTU, sizeof (struct ifreq), IPI_GET_CMD,
825 827 IF_CMD, ip_sioctl_get_mtu, NULL },
826 828 /* 023 */ { SIOCGIFBRDADDR, sizeof (struct ifreq),
827 829 IPI_GET_CMD, IF_CMD, ip_sioctl_get_brdaddr, NULL },
828 830 /* 024 */ { SIOCSIFBRDADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
829 831 IF_CMD, ip_sioctl_brdaddr, NULL },
830 832 /* 025 */ { SIOCGIFNETMASK, sizeof (struct ifreq),
831 833 IPI_GET_CMD, IF_CMD, ip_sioctl_get_netmask, NULL },
832 834 /* 026 */ { SIOCSIFNETMASK, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
833 835 IF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart },
834 836 /* 027 */ { SIOCGIFMETRIC, sizeof (struct ifreq),
835 837 IPI_GET_CMD, IF_CMD, ip_sioctl_get_metric, NULL },
836 838 /* 028 */ { SIOCSIFMETRIC, sizeof (struct ifreq), IPI_PRIV,
837 839 IF_CMD, ip_sioctl_metric, NULL },
838 840 /* 029 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
839 841
840 842 /* See 166-168 below for extended SIOC*XARP ioctls */
841 843 /* 030 */ { SIOCSARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR,
842 844 ARP_CMD, ip_sioctl_arp, NULL },
843 845 /* 031 */ { SIOCGARP, sizeof (struct arpreq), IPI_GET_CMD,
844 846 ARP_CMD, ip_sioctl_arp, NULL },
845 847 /* 032 */ { SIOCDARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR,
846 848 ARP_CMD, ip_sioctl_arp, NULL },
847 849
848 850 /* 033 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
849 851 /* 034 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
850 852 /* 035 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
851 853 /* 036 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
852 854 /* 037 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
853 855 /* 038 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
854 856 /* 039 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
855 857 /* 040 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
856 858 /* 041 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
857 859 /* 042 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
858 860 /* 043 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
859 861 /* 044 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
860 862 /* 045 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
861 863 /* 046 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
862 864 /* 047 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
863 865 /* 048 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
864 866 /* 049 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
865 867 /* 050 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
866 868 /* 051 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
867 869 /* 052 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
868 870 /* 053 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
869 871
870 872 /* 054 */ { IF_UNITSEL, sizeof (int), IPI_PRIV | IPI_WR | IPI_MODOK,
871 873 MISC_CMD, if_unitsel, if_unitsel_restart },
872 874
873 875 /* 055 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
874 876 /* 056 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
875 877 /* 057 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
876 878 /* 058 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
877 879 /* 059 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
878 880 /* 060 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
879 881 /* 061 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
880 882 /* 062 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
881 883 /* 063 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
882 884 /* 064 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
883 885 /* 065 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
884 886 /* 066 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
885 887 /* 067 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
886 888 /* 068 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
887 889 /* 069 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
888 890 /* 070 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
889 891 /* 071 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
890 892 /* 072 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
891 893
892 894 /* 073 */ { SIOCSIFNAME, sizeof (struct ifreq),
893 895 IPI_PRIV | IPI_WR | IPI_MODOK,
894 896 IF_CMD, ip_sioctl_sifname, NULL },
895 897
896 898 /* 074 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
897 899 /* 075 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
898 900 /* 076 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
899 901 /* 077 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
900 902 /* 078 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
901 903 /* 079 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
902 904 /* 080 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
903 905 /* 081 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
904 906 /* 082 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
905 907 /* 083 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
906 908 /* 084 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
907 909 /* 085 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
908 910 /* 086 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
909 911
910 912 /* 087 */ { SIOCGIFNUM, sizeof (int), IPI_GET_CMD,
911 913 MISC_CMD, ip_sioctl_get_ifnum, NULL },
912 914 /* 088 */ { SIOCGIFMUXID, sizeof (struct ifreq), IPI_GET_CMD,
913 915 IF_CMD, ip_sioctl_get_muxid, NULL },
914 916 /* 089 */ { SIOCSIFMUXID, sizeof (struct ifreq),
915 917 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_muxid, NULL },
916 918
917 919 /* Both if and lif variants share same func */
918 920 /* 090 */ { SIOCGIFINDEX, sizeof (struct ifreq), IPI_GET_CMD,
919 921 IF_CMD, ip_sioctl_get_lifindex, NULL },
920 922 /* Both if and lif variants share same func */
921 923 /* 091 */ { SIOCSIFINDEX, sizeof (struct ifreq),
922 924 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_slifindex, NULL },
923 925
924 926 /* copyin size cannot be coded for SIOCGIFCONF */
925 927 /* 092 */ { SIOCGIFCONF, 0, IPI_GET_CMD,
926 928 MISC_CMD, ip_sioctl_get_ifconf, NULL },
927 929 /* 093 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
928 930 /* 094 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
929 931 /* 095 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
930 932 /* 096 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
931 933 /* 097 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
932 934 /* 098 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
933 935 /* 099 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
934 936 /* 100 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
935 937 /* 101 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
936 938 /* 102 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
937 939 /* 103 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
938 940 /* 104 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
939 941 /* 105 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
940 942 /* 106 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
941 943 /* 107 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
942 944 /* 108 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
943 945 /* 109 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
944 946
945 947 /* 110 */ { SIOCLIFREMOVEIF, sizeof (struct lifreq),
946 948 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_removeif,
947 949 ip_sioctl_removeif_restart },
948 950 /* 111 */ { SIOCLIFADDIF, sizeof (struct lifreq),
949 951 IPI_GET_CMD | IPI_PRIV | IPI_WR,
950 952 LIF_CMD, ip_sioctl_addif, NULL },
951 953 #define SIOCLIFADDR_NDX 112
952 954 /* 112 */ { SIOCSLIFADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
953 955 LIF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart },
954 956 /* 113 */ { SIOCGLIFADDR, sizeof (struct lifreq),
955 957 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_addr, NULL },
956 958 /* 114 */ { SIOCSLIFDSTADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
957 959 LIF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart },
958 960 /* 115 */ { SIOCGLIFDSTADDR, sizeof (struct lifreq),
959 961 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_dstaddr, NULL },
960 962 /* 116 */ { SIOCSLIFFLAGS, sizeof (struct lifreq),
961 963 IPI_PRIV | IPI_WR,
962 964 LIF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart },
963 965 /* 117 */ { SIOCGLIFFLAGS, sizeof (struct lifreq),
964 966 IPI_GET_CMD | IPI_MODOK,
965 967 LIF_CMD, ip_sioctl_get_flags, NULL },
966 968
967 969 /* 118 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
968 970 /* 119 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
969 971
970 972 /* 120 */ { O_SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD,
971 973 ip_sioctl_get_lifconf, NULL },
972 974 /* 121 */ { SIOCSLIFMTU, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
973 975 LIF_CMD, ip_sioctl_mtu, NULL },
974 976 /* 122 */ { SIOCGLIFMTU, sizeof (struct lifreq), IPI_GET_CMD,
975 977 LIF_CMD, ip_sioctl_get_mtu, NULL },
976 978 /* 123 */ { SIOCGLIFBRDADDR, sizeof (struct lifreq),
977 979 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_brdaddr, NULL },
978 980 /* 124 */ { SIOCSLIFBRDADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
979 981 LIF_CMD, ip_sioctl_brdaddr, NULL },
980 982 /* 125 */ { SIOCGLIFNETMASK, sizeof (struct lifreq),
981 983 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_netmask, NULL },
982 984 /* 126 */ { SIOCSLIFNETMASK, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
983 985 LIF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart },
984 986 /* 127 */ { SIOCGLIFMETRIC, sizeof (struct lifreq),
985 987 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_metric, NULL },
986 988 /* 128 */ { SIOCSLIFMETRIC, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
987 989 LIF_CMD, ip_sioctl_metric, NULL },
988 990 /* 129 */ { SIOCSLIFNAME, sizeof (struct lifreq),
989 991 IPI_PRIV | IPI_WR | IPI_MODOK,
990 992 LIF_CMD, ip_sioctl_slifname,
991 993 ip_sioctl_slifname_restart },
992 994
993 995 /* 130 */ { SIOCGLIFNUM, sizeof (struct lifnum), IPI_GET_CMD,
994 996 MISC_CMD, ip_sioctl_get_lifnum, NULL },
995 997 /* 131 */ { SIOCGLIFMUXID, sizeof (struct lifreq),
996 998 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_muxid, NULL },
997 999 /* 132 */ { SIOCSLIFMUXID, sizeof (struct lifreq),
998 1000 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_muxid, NULL },
999 1001 /* 133 */ { SIOCGLIFINDEX, sizeof (struct lifreq),
1000 1002 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifindex, 0 },
1001 1003 /* 134 */ { SIOCSLIFINDEX, sizeof (struct lifreq),
1002 1004 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifindex, 0 },
1003 1005 /* 135 */ { SIOCSLIFTOKEN, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1004 1006 LIF_CMD, ip_sioctl_token, NULL },
1005 1007 /* 136 */ { SIOCGLIFTOKEN, sizeof (struct lifreq),
1006 1008 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_token, NULL },
1007 1009 /* 137 */ { SIOCSLIFSUBNET, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1008 1010 LIF_CMD, ip_sioctl_subnet, ip_sioctl_subnet_restart },
1009 1011 /* 138 */ { SIOCGLIFSUBNET, sizeof (struct lifreq),
1010 1012 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_subnet, NULL },
1011 1013 /* 139 */ { SIOCSLIFLNKINFO, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1012 1014 LIF_CMD, ip_sioctl_lnkinfo, NULL },
1013 1015
1014 1016 /* 140 */ { SIOCGLIFLNKINFO, sizeof (struct lifreq),
1015 1017 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lnkinfo, NULL },
1016 1018 /* 141 */ { SIOCLIFDELND, sizeof (struct lifreq), IPI_PRIV,
1017 1019 LIF_CMD, ip_siocdelndp_v6, NULL },
1018 1020 /* 142 */ { SIOCLIFGETND, sizeof (struct lifreq), IPI_GET_CMD,
1019 1021 LIF_CMD, ip_siocqueryndp_v6, NULL },
1020 1022 /* 143 */ { SIOCLIFSETND, sizeof (struct lifreq), IPI_PRIV,
1021 1023 LIF_CMD, ip_siocsetndp_v6, NULL },
1022 1024 /* 144 */ { SIOCTMYADDR, sizeof (struct sioc_addrreq), IPI_GET_CMD,
1023 1025 MISC_CMD, ip_sioctl_tmyaddr, NULL },
1024 1026 /* 145 */ { SIOCTONLINK, sizeof (struct sioc_addrreq), IPI_GET_CMD,
1025 1027 MISC_CMD, ip_sioctl_tonlink, NULL },
1026 1028 /* 146 */ { SIOCTMYSITE, sizeof (struct sioc_addrreq), 0,
1027 1029 MISC_CMD, ip_sioctl_tmysite, NULL },
1028 1030 /* 147 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1029 1031 /* 148 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1030 1032 /* IPSECioctls handled in ip_sioctl_copyin_setup itself */
1031 1033 /* 149 */ { SIOCFIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1032 1034 /* 150 */ { SIOCSIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1033 1035 /* 151 */ { SIOCDIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1034 1036 /* 152 */ { SIOCLIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL },
1035 1037
1036 1038 /* 153 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1037 1039
1038 1040 /* 154 */ { SIOCGLIFBINDING, sizeof (struct lifreq), IPI_GET_CMD,
1039 1041 LIF_CMD, ip_sioctl_get_binding, NULL },
1040 1042 /* 155 */ { SIOCSLIFGROUPNAME, sizeof (struct lifreq),
1041 1043 IPI_PRIV | IPI_WR,
1042 1044 LIF_CMD, ip_sioctl_groupname, ip_sioctl_groupname },
1043 1045 /* 156 */ { SIOCGLIFGROUPNAME, sizeof (struct lifreq),
1044 1046 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_groupname, NULL },
1045 1047 /* 157 */ { SIOCGLIFGROUPINFO, sizeof (lifgroupinfo_t),
1046 1048 IPI_GET_CMD, MISC_CMD, ip_sioctl_groupinfo, NULL },
1047 1049
1048 1050 /* Leave 158-160 unused; used to be SIOC*IFARP ioctls */
1049 1051 /* 158 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1050 1052 /* 159 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1051 1053 /* 160 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1052 1054
1053 1055 /* 161 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1054 1056
1055 1057 /* These are handled in ip_sioctl_copyin_setup itself */
1056 1058 /* 162 */ { SIOCGIP6ADDRPOLICY, 0, IPI_NULL_BCONT,
1057 1059 MISC_CMD, NULL, NULL },
1058 1060 /* 163 */ { SIOCSIP6ADDRPOLICY, 0, IPI_PRIV | IPI_NULL_BCONT,
1059 1061 MISC_CMD, NULL, NULL },
1060 1062 /* 164 */ { SIOCGDSTINFO, 0, IPI_GET_CMD, MISC_CMD, NULL, NULL },
1061 1063
1062 1064 /* 165 */ { SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD,
1063 1065 ip_sioctl_get_lifconf, NULL },
1064 1066
1065 1067 /* 166 */ { SIOCSXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR,
1066 1068 XARP_CMD, ip_sioctl_arp, NULL },
1067 1069 /* 167 */ { SIOCGXARP, sizeof (struct xarpreq), IPI_GET_CMD,
1068 1070 XARP_CMD, ip_sioctl_arp, NULL },
1069 1071 /* 168 */ { SIOCDXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR,
1070 1072 XARP_CMD, ip_sioctl_arp, NULL },
1071 1073
1072 1074 /* SIOCPOPSOCKFS is not handled by IP */
1073 1075 /* 169 */ { IPI_DONTCARE /* SIOCPOPSOCKFS */, 0, 0, 0, NULL, NULL },
1074 1076
1075 1077 /* 170 */ { SIOCGLIFZONE, sizeof (struct lifreq),
1076 1078 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifzone, NULL },
1077 1079 /* 171 */ { SIOCSLIFZONE, sizeof (struct lifreq),
1078 1080 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifzone,
1079 1081 ip_sioctl_slifzone_restart },
1080 1082 /* 172-174 are SCTP ioctls and not handled by IP */
1081 1083 /* 172 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1082 1084 /* 173 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1083 1085 /* 174 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1084 1086 /* 175 */ { SIOCGLIFUSESRC, sizeof (struct lifreq),
1085 1087 IPI_GET_CMD, LIF_CMD,
1086 1088 ip_sioctl_get_lifusesrc, 0 },
1087 1089 /* 176 */ { SIOCSLIFUSESRC, sizeof (struct lifreq),
1088 1090 IPI_PRIV | IPI_WR,
1089 1091 LIF_CMD, ip_sioctl_slifusesrc,
1090 1092 NULL },
1091 1093 /* 177 */ { SIOCGLIFSRCOF, 0, IPI_GET_CMD, MISC_CMD,
1092 1094 ip_sioctl_get_lifsrcof, NULL },
1093 1095 /* 178 */ { SIOCGMSFILTER, sizeof (struct group_filter), IPI_GET_CMD,
1094 1096 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1095 1097 /* 179 */ { SIOCSMSFILTER, sizeof (struct group_filter), 0,
1096 1098 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1097 1099 /* 180 */ { SIOCGIPMSFILTER, sizeof (struct ip_msfilter), IPI_GET_CMD,
1098 1100 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1099 1101 /* 181 */ { SIOCSIPMSFILTER, sizeof (struct ip_msfilter), 0,
1100 1102 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1101 1103 /* 182 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1102 1104 /* SIOCSENABLESDP is handled by SDP */
1103 1105 /* 183 */ { IPI_DONTCARE /* SIOCSENABLESDP */, 0, 0, 0, NULL, NULL },
1104 1106 /* 184 */ { IPI_DONTCARE /* SIOCSQPTR */, 0, 0, 0, NULL, NULL },
1105 1107 /* 185 */ { SIOCGIFHWADDR, sizeof (struct ifreq), IPI_GET_CMD,
1106 1108 IF_CMD, ip_sioctl_get_ifhwaddr, NULL },
1107 1109 /* 186 */ { IPI_DONTCARE /* SIOCGSTAMP */, 0, 0, 0, NULL, NULL },
1108 1110 /* 187 */ { SIOCILB, 0, IPI_PRIV | IPI_GET_CMD, MISC_CMD,
1109 1111 ip_sioctl_ilb_cmd, NULL },
1110 1112 /* 188 */ { SIOCGETPROP, 0, IPI_GET_CMD, 0, NULL, NULL },
1111 1113 /* 189 */ { SIOCSETPROP, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL},
1112 1114 /* 190 */ { SIOCGLIFDADSTATE, sizeof (struct lifreq),
1113 1115 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_dadstate, NULL },
1114 1116 /* 191 */ { SIOCSLIFPREFIX, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1115 1117 LIF_CMD, ip_sioctl_prefix, ip_sioctl_prefix_restart },
1116 1118 /* 192 */ { SIOCGLIFHWADDR, sizeof (struct lifreq), IPI_GET_CMD,
1117 1119 LIF_CMD, ip_sioctl_get_lifhwaddr, NULL }
1118 1120 };
1119 1121
1120 1122 int ip_ndx_ioctl_count = sizeof (ip_ndx_ioctl_table) / sizeof (ip_ioctl_cmd_t);
1121 1123
1122 1124 ip_ioctl_cmd_t ip_misc_ioctl_table[] = {
1123 1125 { I_LINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1124 1126 { I_UNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1125 1127 { I_PLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1126 1128 { I_PUNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1127 1129 { ND_GET, 0, 0, 0, NULL, NULL },
1128 1130 { ND_SET, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1129 1131 { IP_IOCTL, 0, 0, 0, NULL, NULL },
1130 1132 { SIOCGETVIFCNT, sizeof (struct sioc_vif_req), IPI_GET_CMD,
1131 1133 MISC_CMD, mrt_ioctl},
1132 1134 { SIOCGETSGCNT, sizeof (struct sioc_sg_req), IPI_GET_CMD,
1133 1135 MISC_CMD, mrt_ioctl},
1134 1136 { SIOCGETLSGCNT, sizeof (struct sioc_lsg_req), IPI_GET_CMD,
1135 1137 MISC_CMD, mrt_ioctl}
1136 1138 };
1137 1139
1138 1140 int ip_misc_ioctl_count =
1139 1141 sizeof (ip_misc_ioctl_table) / sizeof (ip_ioctl_cmd_t);
1140 1142
1141 1143 int conn_drain_nthreads; /* Number of drainers reqd. */
1142 1144 /* Settable in /etc/system */
1143 1145 /* Defined in ip_ire.c */
1144 1146 extern uint32_t ip_ire_max_bucket_cnt, ip6_ire_max_bucket_cnt;
1145 1147 extern uint32_t ip_ire_min_bucket_cnt, ip6_ire_min_bucket_cnt;
1146 1148 extern uint32_t ip_ire_mem_ratio, ip_ire_cpu_ratio;
1147 1149
1148 1150 static nv_t ire_nv_arr[] = {
1149 1151 { IRE_BROADCAST, "BROADCAST" },
1150 1152 { IRE_LOCAL, "LOCAL" },
1151 1153 { IRE_LOOPBACK, "LOOPBACK" },
1152 1154 { IRE_DEFAULT, "DEFAULT" },
1153 1155 { IRE_PREFIX, "PREFIX" },
1154 1156 { IRE_IF_NORESOLVER, "IF_NORESOL" },
1155 1157 { IRE_IF_RESOLVER, "IF_RESOLV" },
1156 1158 { IRE_IF_CLONE, "IF_CLONE" },
1157 1159 { IRE_HOST, "HOST" },
1158 1160 { IRE_MULTICAST, "MULTICAST" },
1159 1161 { IRE_NOROUTE, "NOROUTE" },
1160 1162 { 0 }
1161 1163 };
1162 1164
1163 1165 nv_t *ire_nv_tbl = ire_nv_arr;
1164 1166
1165 1167 /* Simple ICMP IP Header Template */
1166 1168 static ipha_t icmp_ipha = {
1167 1169 IP_SIMPLE_HDR_VERSION, 0, 0, 0, 0, 0, IPPROTO_ICMP
1168 1170 };
1169 1171
1170 1172 struct module_info ip_mod_info = {
1171 1173 IP_MOD_ID, IP_MOD_NAME, IP_MOD_MINPSZ, IP_MOD_MAXPSZ, IP_MOD_HIWAT,
1172 1174 IP_MOD_LOWAT
1173 1175 };
1174 1176
1175 1177 /*
1176 1178 * Duplicate static symbols within a module confuses mdb; so we avoid the
1177 1179 * problem by making the symbols here distinct from those in udp.c.
1178 1180 */
1179 1181
1180 1182 /*
1181 1183 * Entry points for IP as a device and as a module.
1182 1184 * We have separate open functions for the /dev/ip and /dev/ip6 devices.
1183 1185 */
1184 1186 static struct qinit iprinitv4 = {
1185 1187 (pfi_t)ip_rput, NULL, ip_openv4, ip_close, NULL,
1186 1188 &ip_mod_info
1187 1189 };
1188 1190
1189 1191 struct qinit iprinitv6 = {
1190 1192 (pfi_t)ip_rput_v6, NULL, ip_openv6, ip_close, NULL,
1191 1193 &ip_mod_info
1192 1194 };
1193 1195
1194 1196 static struct qinit ipwinit = {
1195 1197 (pfi_t)ip_wput_nondata, (pfi_t)ip_wsrv, NULL, NULL, NULL,
1196 1198 &ip_mod_info
1197 1199 };
1198 1200
1199 1201 static struct qinit iplrinit = {
1200 1202 (pfi_t)ip_lrput, NULL, ip_openv4, ip_close, NULL,
1201 1203 &ip_mod_info
1202 1204 };
1203 1205
1204 1206 static struct qinit iplwinit = {
1205 1207 (pfi_t)ip_lwput, NULL, NULL, NULL, NULL,
1206 1208 &ip_mod_info
1207 1209 };
1208 1210
1209 1211 /* For AF_INET aka /dev/ip */
1210 1212 struct streamtab ipinfov4 = {
1211 1213 &iprinitv4, &ipwinit, &iplrinit, &iplwinit
1212 1214 };
1213 1215
1214 1216 /* For AF_INET6 aka /dev/ip6 */
1215 1217 struct streamtab ipinfov6 = {
1216 1218 &iprinitv6, &ipwinit, &iplrinit, &iplwinit
1217 1219 };
1218 1220
1219 1221 #ifdef DEBUG
1220 1222 boolean_t skip_sctp_cksum = B_FALSE;
1221 1223 #endif
1222 1224
1223 1225 /*
1224 1226 * Generate an ICMP fragmentation needed message.
1225 1227 * When called from ip_output side a minimal ip_recv_attr_t needs to be
1226 1228 * constructed by the caller.
1227 1229 */
1228 1230 void
1229 1231 icmp_frag_needed(mblk_t *mp, int mtu, ip_recv_attr_t *ira)
1230 1232 {
1231 1233 icmph_t icmph;
1232 1234 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
1233 1235
1234 1236 mp = icmp_pkt_err_ok(mp, ira);
1235 1237 if (mp == NULL)
1236 1238 return;
1237 1239
1238 1240 bzero(&icmph, sizeof (icmph_t));
1239 1241 icmph.icmph_type = ICMP_DEST_UNREACHABLE;
1240 1242 icmph.icmph_code = ICMP_FRAGMENTATION_NEEDED;
1241 1243 icmph.icmph_du_mtu = htons((uint16_t)mtu);
1242 1244 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutFragNeeded);
1243 1245 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs);
1244 1246
1245 1247 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
1246 1248 }
1247 1249
1248 1250 /*
1249 1251 * icmp_inbound_v4 deals with ICMP messages that are handled by IP.
1250 1252 * If the ICMP message is consumed by IP, i.e., it should not be delivered
1251 1253 * to any IPPROTO_ICMP raw sockets, then it returns NULL.
1252 1254 * Likewise, if the ICMP error is misformed (too short, etc), then it
1253 1255 * returns NULL. The caller uses this to determine whether or not to send
1254 1256 * to raw sockets.
1255 1257 *
1256 1258 * All error messages are passed to the matching transport stream.
1257 1259 *
1258 1260 * The following cases are handled by icmp_inbound:
1259 1261 * 1) It needs to send a reply back and possibly delivering it
1260 1262 * to the "interested" upper clients.
1261 1263 * 2) Return the mblk so that the caller can pass it to the RAW socket clients.
1262 1264 * 3) It needs to change some values in IP only.
1263 1265 * 4) It needs to change some values in IP and upper layers e.g TCP
1264 1266 * by delivering an error to the upper layers.
1265 1267 *
1266 1268 * We handle the above three cases in the context of IPsec in the
1267 1269 * following way :
1268 1270 *
1269 1271 * 1) Send the reply back in the same way as the request came in.
1270 1272 * If it came in encrypted, it goes out encrypted. If it came in
1271 1273 * clear, it goes out in clear. Thus, this will prevent chosen
1272 1274 * plain text attack.
1273 1275 * 2) The client may or may not expect things to come in secure.
1274 1276 * If it comes in secure, the policy constraints are checked
1275 1277 * before delivering it to the upper layers. If it comes in
1276 1278 * clear, ipsec_inbound_accept_clear will decide whether to
1277 1279 * accept this in clear or not. In both the cases, if the returned
1278 1280 * message (IP header + 8 bytes) that caused the icmp message has
1279 1281 * AH/ESP headers, it is sent up to AH/ESP for validation before
1280 1282 * sending up. If there are only 8 bytes of returned message, then
1281 1283 * upper client will not be notified.
1282 1284 * 3) Check with global policy to see whether it matches the constaints.
1283 1285 * But this will be done only if icmp_accept_messages_in_clear is
1284 1286 * zero.
1285 1287 * 4) If we need to change both in IP and ULP, then the decision taken
1286 1288 * while affecting the values in IP and while delivering up to TCP
1287 1289 * should be the same.
1288 1290 *
1289 1291 * There are two cases.
1290 1292 *
1291 1293 * a) If we reject data at the IP layer (ipsec_check_global_policy()
1292 1294 * failed), we will not deliver it to the ULP, even though they
1293 1295 * are *willing* to accept in *clear*. This is fine as our global
1294 1296 * disposition to icmp messages asks us reject the datagram.
1295 1297 *
1296 1298 * b) If we accept data at the IP layer (ipsec_check_global_policy()
1297 1299 * succeeded or icmp_accept_messages_in_clear is 1), and not able
1298 1300 * to deliver it to ULP (policy failed), it can lead to
1299 1301 * consistency problems. The cases known at this time are
1300 1302 * ICMP_DESTINATION_UNREACHABLE messages with following code
1301 1303 * values :
1302 1304 *
1303 1305 * - ICMP_FRAGMENTATION_NEEDED : IP adapts to the new value
1304 1306 * and Upper layer rejects. Then the communication will
1305 1307 * come to a stop. This is solved by making similar decisions
1306 1308 * at both levels. Currently, when we are unable to deliver
1307 1309 * to the Upper Layer (due to policy failures) while IP has
1308 1310 * adjusted dce_pmtu, the next outbound datagram would
1309 1311 * generate a local ICMP_FRAGMENTATION_NEEDED message - which
1310 1312 * will be with the right level of protection. Thus the right
1311 1313 * value will be communicated even if we are not able to
1312 1314 * communicate when we get from the wire initially. But this
1313 1315 * assumes there would be at least one outbound datagram after
1314 1316 * IP has adjusted its dce_pmtu value. To make things
1315 1317 * simpler, we accept in clear after the validation of
1316 1318 * AH/ESP headers.
1317 1319 *
1318 1320 * - Other ICMP ERRORS : We may not be able to deliver it to the
1319 1321 * upper layer depending on the level of protection the upper
1320 1322 * layer expects and the disposition in ipsec_inbound_accept_clear().
1321 1323 * ipsec_inbound_accept_clear() decides whether a given ICMP error
1322 1324 * should be accepted in clear when the Upper layer expects secure.
1323 1325 * Thus the communication may get aborted by some bad ICMP
1324 1326 * packets.
1325 1327 */
1326 1328 mblk_t *
1327 1329 icmp_inbound_v4(mblk_t *mp, ip_recv_attr_t *ira)
1328 1330 {
1329 1331 icmph_t *icmph;
1330 1332 ipha_t *ipha; /* Outer header */
1331 1333 int ip_hdr_length; /* Outer header length */
1332 1334 boolean_t interested;
1333 1335 ipif_t *ipif;
1334 1336 uint32_t ts;
1335 1337 uint32_t *tsp;
1336 1338 timestruc_t now;
1337 1339 ill_t *ill = ira->ira_ill;
1338 1340 ip_stack_t *ipst = ill->ill_ipst;
1339 1341 zoneid_t zoneid = ira->ira_zoneid;
1340 1342 int len_needed;
1341 1343 mblk_t *mp_ret = NULL;
1342 1344
1343 1345 ipha = (ipha_t *)mp->b_rptr;
1344 1346
1345 1347 BUMP_MIB(&ipst->ips_icmp_mib, icmpInMsgs);
1346 1348
1347 1349 ip_hdr_length = ira->ira_ip_hdr_length;
1348 1350 if ((mp->b_wptr - mp->b_rptr) < (ip_hdr_length + ICMPH_SIZE)) {
1349 1351 if (ira->ira_pktlen < (ip_hdr_length + ICMPH_SIZE)) {
1350 1352 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
1351 1353 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
1352 1354 freemsg(mp);
1353 1355 return (NULL);
1354 1356 }
1355 1357 /* Last chance to get real. */
1356 1358 ipha = ip_pullup(mp, ip_hdr_length + ICMPH_SIZE, ira);
1357 1359 if (ipha == NULL) {
1358 1360 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors);
1359 1361 freemsg(mp);
1360 1362 return (NULL);
1361 1363 }
1362 1364 }
1363 1365
1364 1366 /* The IP header will always be a multiple of four bytes */
1365 1367 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1366 1368 ip2dbg(("icmp_inbound_v4: type %d code %d\n", icmph->icmph_type,
1367 1369 icmph->icmph_code));
1368 1370
1369 1371 /*
1370 1372 * We will set "interested" to "true" if we should pass a copy to
1371 1373 * the transport or if we handle the packet locally.
1372 1374 */
1373 1375 interested = B_FALSE;
1374 1376 switch (icmph->icmph_type) {
1375 1377 case ICMP_ECHO_REPLY:
1376 1378 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchoReps);
1377 1379 break;
1378 1380 case ICMP_DEST_UNREACHABLE:
1379 1381 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED)
1380 1382 BUMP_MIB(&ipst->ips_icmp_mib, icmpInFragNeeded);
1381 1383 interested = B_TRUE; /* Pass up to transport */
1382 1384 BUMP_MIB(&ipst->ips_icmp_mib, icmpInDestUnreachs);
1383 1385 break;
1384 1386 case ICMP_SOURCE_QUENCH:
1385 1387 interested = B_TRUE; /* Pass up to transport */
1386 1388 BUMP_MIB(&ipst->ips_icmp_mib, icmpInSrcQuenchs);
1387 1389 break;
1388 1390 case ICMP_REDIRECT:
1389 1391 if (!ipst->ips_ip_ignore_redirect)
1390 1392 interested = B_TRUE;
1391 1393 BUMP_MIB(&ipst->ips_icmp_mib, icmpInRedirects);
1392 1394 break;
1393 1395 case ICMP_ECHO_REQUEST:
1394 1396 /*
1395 1397 * Whether to respond to echo requests that come in as IP
1396 1398 * broadcasts or as IP multicast is subject to debate
1397 1399 * (what isn't?). We aim to please, you pick it.
1398 1400 * Default is do it.
1399 1401 */
1400 1402 if (ira->ira_flags & IRAF_MULTICAST) {
1401 1403 /* multicast: respond based on tunable */
1402 1404 interested = ipst->ips_ip_g_resp_to_echo_mcast;
1403 1405 } else if (ira->ira_flags & IRAF_BROADCAST) {
1404 1406 /* broadcast: respond based on tunable */
1405 1407 interested = ipst->ips_ip_g_resp_to_echo_bcast;
1406 1408 } else {
1407 1409 /* unicast: always respond */
1408 1410 interested = B_TRUE;
1409 1411 }
1410 1412 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchos);
1411 1413 if (!interested) {
1412 1414 /* We never pass these to RAW sockets */
1413 1415 freemsg(mp);
1414 1416 return (NULL);
1415 1417 }
1416 1418
1417 1419 /* Check db_ref to make sure we can modify the packet. */
1418 1420 if (mp->b_datap->db_ref > 1) {
1419 1421 mblk_t *mp1;
1420 1422
1421 1423 mp1 = copymsg(mp);
1422 1424 freemsg(mp);
1423 1425 if (!mp1) {
1424 1426 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1425 1427 return (NULL);
1426 1428 }
1427 1429 mp = mp1;
1428 1430 ipha = (ipha_t *)mp->b_rptr;
1429 1431 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1430 1432 }
1431 1433 icmph->icmph_type = ICMP_ECHO_REPLY;
1432 1434 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutEchoReps);
1433 1435 icmp_send_reply_v4(mp, ipha, icmph, ira);
1434 1436 return (NULL);
1435 1437
1436 1438 case ICMP_ROUTER_ADVERTISEMENT:
1437 1439 case ICMP_ROUTER_SOLICITATION:
1438 1440 break;
1439 1441 case ICMP_TIME_EXCEEDED:
1440 1442 interested = B_TRUE; /* Pass up to transport */
1441 1443 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimeExcds);
1442 1444 break;
1443 1445 case ICMP_PARAM_PROBLEM:
1444 1446 interested = B_TRUE; /* Pass up to transport */
1445 1447 BUMP_MIB(&ipst->ips_icmp_mib, icmpInParmProbs);
1446 1448 break;
1447 1449 case ICMP_TIME_STAMP_REQUEST:
1448 1450 /* Response to Time Stamp Requests is local policy. */
1449 1451 if (ipst->ips_ip_g_resp_to_timestamp) {
1450 1452 if (ira->ira_flags & IRAF_MULTIBROADCAST)
1451 1453 interested =
1452 1454 ipst->ips_ip_g_resp_to_timestamp_bcast;
1453 1455 else
1454 1456 interested = B_TRUE;
1455 1457 }
1456 1458 if (!interested) {
1457 1459 /* We never pass these to RAW sockets */
1458 1460 freemsg(mp);
1459 1461 return (NULL);
1460 1462 }
1461 1463
1462 1464 /* Make sure we have enough of the packet */
1463 1465 len_needed = ip_hdr_length + ICMPH_SIZE +
1464 1466 3 * sizeof (uint32_t);
1465 1467
1466 1468 if (mp->b_wptr - mp->b_rptr < len_needed) {
1467 1469 ipha = ip_pullup(mp, len_needed, ira);
1468 1470 if (ipha == NULL) {
1469 1471 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1470 1472 ip_drop_input("ipIfStatsInDiscards - ip_pullup",
1471 1473 mp, ill);
1472 1474 freemsg(mp);
1473 1475 return (NULL);
1474 1476 }
1475 1477 /* Refresh following the pullup. */
1476 1478 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1477 1479 }
1478 1480 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestamps);
1479 1481 /* Check db_ref to make sure we can modify the packet. */
1480 1482 if (mp->b_datap->db_ref > 1) {
1481 1483 mblk_t *mp1;
1482 1484
1483 1485 mp1 = copymsg(mp);
1484 1486 freemsg(mp);
1485 1487 if (!mp1) {
1486 1488 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1487 1489 return (NULL);
1488 1490 }
1489 1491 mp = mp1;
1490 1492 ipha = (ipha_t *)mp->b_rptr;
1491 1493 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1492 1494 }
1493 1495 icmph->icmph_type = ICMP_TIME_STAMP_REPLY;
1494 1496 tsp = (uint32_t *)&icmph[1];
1495 1497 tsp++; /* Skip past 'originate time' */
1496 1498 /* Compute # of milliseconds since midnight */
1497 1499 gethrestime(&now);
1498 1500 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
1499 1501 now.tv_nsec / (NANOSEC / MILLISEC);
1500 1502 *tsp++ = htonl(ts); /* Lay in 'receive time' */
1501 1503 *tsp++ = htonl(ts); /* Lay in 'send time' */
1502 1504 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimestampReps);
1503 1505 icmp_send_reply_v4(mp, ipha, icmph, ira);
1504 1506 return (NULL);
1505 1507
1506 1508 case ICMP_TIME_STAMP_REPLY:
1507 1509 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestampReps);
1508 1510 break;
1509 1511 case ICMP_INFO_REQUEST:
1510 1512 /* Per RFC 1122 3.2.2.7, ignore this. */
1511 1513 case ICMP_INFO_REPLY:
1512 1514 break;
1513 1515 case ICMP_ADDRESS_MASK_REQUEST:
1514 1516 if (ira->ira_flags & IRAF_MULTIBROADCAST) {
1515 1517 interested =
1516 1518 ipst->ips_ip_respond_to_address_mask_broadcast;
1517 1519 } else {
1518 1520 interested = B_TRUE;
1519 1521 }
1520 1522 if (!interested) {
1521 1523 /* We never pass these to RAW sockets */
1522 1524 freemsg(mp);
1523 1525 return (NULL);
1524 1526 }
1525 1527 len_needed = ip_hdr_length + ICMPH_SIZE + IP_ADDR_LEN;
1526 1528 if (mp->b_wptr - mp->b_rptr < len_needed) {
1527 1529 ipha = ip_pullup(mp, len_needed, ira);
1528 1530 if (ipha == NULL) {
1529 1531 BUMP_MIB(ill->ill_ip_mib,
1530 1532 ipIfStatsInTruncatedPkts);
1531 1533 ip_drop_input("ipIfStatsInTruncatedPkts", mp,
1532 1534 ill);
1533 1535 freemsg(mp);
1534 1536 return (NULL);
1535 1537 }
1536 1538 /* Refresh following the pullup. */
1537 1539 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1538 1540 }
1539 1541 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMasks);
1540 1542 /* Check db_ref to make sure we can modify the packet. */
1541 1543 if (mp->b_datap->db_ref > 1) {
1542 1544 mblk_t *mp1;
1543 1545
1544 1546 mp1 = copymsg(mp);
1545 1547 freemsg(mp);
1546 1548 if (!mp1) {
1547 1549 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1548 1550 return (NULL);
1549 1551 }
1550 1552 mp = mp1;
1551 1553 ipha = (ipha_t *)mp->b_rptr;
1552 1554 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1553 1555 }
1554 1556 /*
1555 1557 * Need the ipif with the mask be the same as the source
1556 1558 * address of the mask reply. For unicast we have a specific
1557 1559 * ipif. For multicast/broadcast we only handle onlink
1558 1560 * senders, and use the source address to pick an ipif.
1559 1561 */
1560 1562 ipif = ipif_lookup_addr(ipha->ipha_dst, ill, zoneid, ipst);
1561 1563 if (ipif == NULL) {
1562 1564 /* Broadcast or multicast */
1563 1565 ipif = ipif_lookup_remote(ill, ipha->ipha_src, zoneid);
1564 1566 if (ipif == NULL) {
1565 1567 freemsg(mp);
1566 1568 return (NULL);
1567 1569 }
1568 1570 }
1569 1571 icmph->icmph_type = ICMP_ADDRESS_MASK_REPLY;
1570 1572 bcopy(&ipif->ipif_net_mask, &icmph[1], IP_ADDR_LEN);
1571 1573 ipif_refrele(ipif);
1572 1574 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutAddrMaskReps);
1573 1575 icmp_send_reply_v4(mp, ipha, icmph, ira);
1574 1576 return (NULL);
1575 1577
1576 1578 case ICMP_ADDRESS_MASK_REPLY:
1577 1579 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMaskReps);
1578 1580 break;
1579 1581 default:
1580 1582 interested = B_TRUE; /* Pass up to transport */
1581 1583 BUMP_MIB(&ipst->ips_icmp_mib, icmpInUnknowns);
1582 1584 break;
1583 1585 }
1584 1586 /*
1585 1587 * See if there is an ICMP client to avoid an extra copymsg/freemsg
1586 1588 * if there isn't one.
1587 1589 */
1588 1590 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_ICMP].connf_head != NULL) {
1589 1591 /* If there is an ICMP client and we want one too, copy it. */
1590 1592
1591 1593 if (!interested) {
1592 1594 /* Caller will deliver to RAW sockets */
1593 1595 return (mp);
1594 1596 }
1595 1597 mp_ret = copymsg(mp);
1596 1598 if (mp_ret == NULL) {
1597 1599 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1598 1600 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill);
1599 1601 }
1600 1602 } else if (!interested) {
1601 1603 /* Neither we nor raw sockets are interested. Drop packet now */
1602 1604 freemsg(mp);
1603 1605 return (NULL);
1604 1606 }
1605 1607
1606 1608 /*
1607 1609 * ICMP error or redirect packet. Make sure we have enough of
1608 1610 * the header and that db_ref == 1 since we might end up modifying
1609 1611 * the packet.
1610 1612 */
1611 1613 if (mp->b_cont != NULL) {
1612 1614 if (ip_pullup(mp, -1, ira) == NULL) {
1613 1615 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1614 1616 ip_drop_input("ipIfStatsInDiscards - ip_pullup",
1615 1617 mp, ill);
1616 1618 freemsg(mp);
1617 1619 return (mp_ret);
1618 1620 }
1619 1621 }
1620 1622
1621 1623 if (mp->b_datap->db_ref > 1) {
1622 1624 mblk_t *mp1;
1623 1625
1624 1626 mp1 = copymsg(mp);
1625 1627 if (mp1 == NULL) {
1626 1628 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1627 1629 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill);
1628 1630 freemsg(mp);
1629 1631 return (mp_ret);
1630 1632 }
1631 1633 freemsg(mp);
1632 1634 mp = mp1;
1633 1635 }
1634 1636
1635 1637 /*
1636 1638 * In case mp has changed, verify the message before any further
1637 1639 * processes.
1638 1640 */
1639 1641 ipha = (ipha_t *)mp->b_rptr;
1640 1642 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1641 1643 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
1642 1644 freemsg(mp);
1643 1645 return (mp_ret);
1644 1646 }
1645 1647
1646 1648 switch (icmph->icmph_type) {
1647 1649 case ICMP_REDIRECT:
1648 1650 icmp_redirect_v4(mp, ipha, icmph, ira);
1649 1651 break;
1650 1652 case ICMP_DEST_UNREACHABLE:
1651 1653 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED) {
1652 1654 /* Update DCE and adjust MTU is icmp header if needed */
1653 1655 icmp_inbound_too_big_v4(icmph, ira);
1654 1656 }
1655 1657 /* FALLTHRU */
1656 1658 default:
1657 1659 icmp_inbound_error_fanout_v4(mp, icmph, ira);
1658 1660 break;
1659 1661 }
1660 1662 return (mp_ret);
1661 1663 }
1662 1664
1663 1665 /*
1664 1666 * Send an ICMP echo, timestamp or address mask reply.
1665 1667 * The caller has already updated the payload part of the packet.
1666 1668 * We handle the ICMP checksum, IP source address selection and feed
1667 1669 * the packet into ip_output_simple.
1668 1670 */
1669 1671 static void
1670 1672 icmp_send_reply_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph,
1671 1673 ip_recv_attr_t *ira)
1672 1674 {
1673 1675 uint_t ip_hdr_length = ira->ira_ip_hdr_length;
1674 1676 ill_t *ill = ira->ira_ill;
1675 1677 ip_stack_t *ipst = ill->ill_ipst;
1676 1678 ip_xmit_attr_t ixas;
1677 1679
1678 1680 /* Send out an ICMP packet */
1679 1681 icmph->icmph_checksum = 0;
1680 1682 icmph->icmph_checksum = IP_CSUM(mp, ip_hdr_length, 0);
1681 1683 /* Reset time to live. */
1682 1684 ipha->ipha_ttl = ipst->ips_ip_def_ttl;
1683 1685 {
1684 1686 /* Swap source and destination addresses */
1685 1687 ipaddr_t tmp;
1686 1688
1687 1689 tmp = ipha->ipha_src;
1688 1690 ipha->ipha_src = ipha->ipha_dst;
1689 1691 ipha->ipha_dst = tmp;
1690 1692 }
1691 1693 ipha->ipha_ident = 0;
1692 1694 if (!IS_SIMPLE_IPH(ipha))
1693 1695 icmp_options_update(ipha);
1694 1696
1695 1697 bzero(&ixas, sizeof (ixas));
1696 1698 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
1697 1699 ixas.ixa_zoneid = ira->ira_zoneid;
1698 1700 ixas.ixa_cred = kcred;
1699 1701 ixas.ixa_cpid = NOPID;
1700 1702 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */
1701 1703 ixas.ixa_ifindex = 0;
1702 1704 ixas.ixa_ipst = ipst;
1703 1705 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
1704 1706
1705 1707 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
1706 1708 /*
1707 1709 * This packet should go out the same way as it
1708 1710 * came in i.e in clear, independent of the IPsec policy
1709 1711 * for transmitting packets.
1710 1712 */
1711 1713 ixas.ixa_flags |= IXAF_NO_IPSEC;
1712 1714 } else {
1713 1715 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) {
1714 1716 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1715 1717 /* Note: mp already consumed and ip_drop_packet done */
1716 1718 return;
1717 1719 }
1718 1720 }
1719 1721 if (ira->ira_flags & IRAF_MULTIBROADCAST) {
1720 1722 /*
1721 1723 * Not one or our addresses (IRE_LOCALs), thus we let
1722 1724 * ip_output_simple pick the source.
1723 1725 */
1724 1726 ipha->ipha_src = INADDR_ANY;
1725 1727 ixas.ixa_flags |= IXAF_SET_SOURCE;
1726 1728 }
1727 1729 /* Should we send with DF and use dce_pmtu? */
1728 1730 if (ipst->ips_ipv4_icmp_return_pmtu) {
1729 1731 ixas.ixa_flags |= IXAF_PMTU_DISCOVERY;
1730 1732 ipha->ipha_fragment_offset_and_flags |= IPH_DF_HTONS;
1731 1733 }
1732 1734
1733 1735 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs);
1734 1736
1735 1737 (void) ip_output_simple(mp, &ixas);
1736 1738 ixa_cleanup(&ixas);
1737 1739 }
1738 1740
1739 1741 /*
1740 1742 * Verify the ICMP messages for either for ICMP error or redirect packet.
1741 1743 * The caller should have fully pulled up the message. If it's a redirect
1742 1744 * packet, only basic checks on IP header will be done; otherwise, verify
1743 1745 * the packet by looking at the included ULP header.
1744 1746 *
1745 1747 * Called before icmp_inbound_error_fanout_v4 is called.
1746 1748 */
1747 1749 static boolean_t
1748 1750 icmp_inbound_verify_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira)
1749 1751 {
1750 1752 ill_t *ill = ira->ira_ill;
1751 1753 int hdr_length;
1752 1754 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
1753 1755 conn_t *connp;
1754 1756 ipha_t *ipha; /* Inner IP header */
1755 1757
1756 1758 ipha = (ipha_t *)&icmph[1];
1757 1759 if ((uchar_t *)ipha + IP_SIMPLE_HDR_LENGTH > mp->b_wptr)
1758 1760 goto truncated;
1759 1761
1760 1762 hdr_length = IPH_HDR_LENGTH(ipha);
1761 1763
1762 1764 if ((IPH_HDR_VERSION(ipha) != IPV4_VERSION))
1763 1765 goto discard_pkt;
1764 1766
1765 1767 if (hdr_length < sizeof (ipha_t))
1766 1768 goto truncated;
1767 1769
1768 1770 if ((uchar_t *)ipha + hdr_length > mp->b_wptr)
1769 1771 goto truncated;
1770 1772
1771 1773 /*
1772 1774 * Stop here for ICMP_REDIRECT.
1773 1775 */
1774 1776 if (icmph->icmph_type == ICMP_REDIRECT)
1775 1777 return (B_TRUE);
1776 1778
1777 1779 /*
1778 1780 * ICMP errors only.
1779 1781 */
1780 1782 switch (ipha->ipha_protocol) {
1781 1783 case IPPROTO_UDP:
1782 1784 /*
1783 1785 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1784 1786 * transport header.
1785 1787 */
1786 1788 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1787 1789 mp->b_wptr)
1788 1790 goto truncated;
1789 1791 break;
1790 1792 case IPPROTO_TCP: {
1791 1793 tcpha_t *tcpha;
1792 1794
1793 1795 /*
1794 1796 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1795 1797 * transport header.
1796 1798 */
1797 1799 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1798 1800 mp->b_wptr)
1799 1801 goto truncated;
1800 1802
1801 1803 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length);
1802 1804 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN,
1803 1805 ipst);
1804 1806 if (connp == NULL)
1805 1807 goto discard_pkt;
1806 1808
1807 1809 if ((connp->conn_verifyicmp != NULL) &&
1808 1810 !connp->conn_verifyicmp(connp, tcpha, icmph, NULL, ira)) {
1809 1811 CONN_DEC_REF(connp);
1810 1812 goto discard_pkt;
1811 1813 }
1812 1814 CONN_DEC_REF(connp);
1813 1815 break;
1814 1816 }
1815 1817 case IPPROTO_SCTP:
1816 1818 /*
1817 1819 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1818 1820 * transport header.
1819 1821 */
1820 1822 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1821 1823 mp->b_wptr)
1822 1824 goto truncated;
1823 1825 break;
1824 1826 case IPPROTO_ESP:
1825 1827 case IPPROTO_AH:
1826 1828 break;
1827 1829 case IPPROTO_ENCAP:
1828 1830 if ((uchar_t *)ipha + hdr_length + sizeof (ipha_t) >
1829 1831 mp->b_wptr)
1830 1832 goto truncated;
1831 1833 break;
1832 1834 default:
1833 1835 break;
1834 1836 }
1835 1837
1836 1838 return (B_TRUE);
1837 1839
1838 1840 discard_pkt:
1839 1841 /* Bogus ICMP error. */
1840 1842 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1841 1843 return (B_FALSE);
1842 1844
1843 1845 truncated:
1844 1846 /* We pulled up everthing already. Must be truncated */
1845 1847 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
1846 1848 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
1847 1849 return (B_FALSE);
1848 1850 }
1849 1851
1850 1852 /* Table from RFC 1191 */
1851 1853 static int icmp_frag_size_table[] =
1852 1854 { 32000, 17914, 8166, 4352, 2002, 1496, 1006, 508, 296, 68 };
1853 1855
1854 1856 /*
1855 1857 * Process received ICMP Packet too big.
1856 1858 * Just handles the DCE create/update, including using the above table of
1857 1859 * PMTU guesses. The caller is responsible for validating the packet before
1858 1860 * passing it in and also to fanout the ICMP error to any matching transport
1859 1861 * conns. Assumes the message has been fully pulled up and verified.
1860 1862 *
1861 1863 * Before getting here, the caller has called icmp_inbound_verify_v4()
1862 1864 * that should have verified with ULP to prevent undoing the changes we're
1863 1865 * going to make to DCE. For example, TCP might have verified that the packet
1864 1866 * which generated error is in the send window.
1865 1867 *
1866 1868 * In some cases modified this MTU in the ICMP header packet; the caller
1867 1869 * should pass to the matching ULP after this returns.
1868 1870 */
1869 1871 static void
1870 1872 icmp_inbound_too_big_v4(icmph_t *icmph, ip_recv_attr_t *ira)
1871 1873 {
1872 1874 dce_t *dce;
1873 1875 int old_mtu;
1874 1876 int mtu, orig_mtu;
1875 1877 ipaddr_t dst;
1876 1878 boolean_t disable_pmtud;
1877 1879 ill_t *ill = ira->ira_ill;
1878 1880 ip_stack_t *ipst = ill->ill_ipst;
1879 1881 uint_t hdr_length;
1880 1882 ipha_t *ipha;
1881 1883
1882 1884 /* Caller already pulled up everything. */
1883 1885 ipha = (ipha_t *)&icmph[1];
1884 1886 ASSERT(icmph->icmph_type == ICMP_DEST_UNREACHABLE &&
1885 1887 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED);
1886 1888 ASSERT(ill != NULL);
1887 1889
1888 1890 hdr_length = IPH_HDR_LENGTH(ipha);
1889 1891
1890 1892 /*
1891 1893 * We handle path MTU for source routed packets since the DCE
1892 1894 * is looked up using the final destination.
1893 1895 */
1894 1896 dst = ip_get_dst(ipha);
1895 1897
1896 1898 dce = dce_lookup_and_add_v4(dst, ipst);
1897 1899 if (dce == NULL) {
1898 1900 /* Couldn't add a unique one - ENOMEM */
1899 1901 ip1dbg(("icmp_inbound_too_big_v4: no dce for 0x%x\n",
1900 1902 ntohl(dst)));
1901 1903 return;
1902 1904 }
1903 1905
1904 1906 /* Check for MTU discovery advice as described in RFC 1191 */
1905 1907 mtu = ntohs(icmph->icmph_du_mtu);
1906 1908 orig_mtu = mtu;
1907 1909 disable_pmtud = B_FALSE;
1908 1910
1909 1911 mutex_enter(&dce->dce_lock);
1910 1912 if (dce->dce_flags & DCEF_PMTU)
1911 1913 old_mtu = dce->dce_pmtu;
1912 1914 else
1913 1915 old_mtu = ill->ill_mtu;
1914 1916
1915 1917 if (icmph->icmph_du_zero != 0 || mtu < ipst->ips_ip_pmtu_min) {
1916 1918 uint32_t length;
1917 1919 int i;
1918 1920
1919 1921 /*
1920 1922 * Use the table from RFC 1191 to figure out
1921 1923 * the next "plateau" based on the length in
1922 1924 * the original IP packet.
1923 1925 */
1924 1926 length = ntohs(ipha->ipha_length);
1925 1927 DTRACE_PROBE2(ip4__pmtu__guess, dce_t *, dce,
1926 1928 uint32_t, length);
1927 1929 if (old_mtu <= length &&
1928 1930 old_mtu >= length - hdr_length) {
1929 1931 /*
1930 1932 * Handle broken BSD 4.2 systems that
1931 1933 * return the wrong ipha_length in ICMP
1932 1934 * errors.
1933 1935 */
1934 1936 ip1dbg(("Wrong mtu: sent %d, dce %d\n",
1935 1937 length, old_mtu));
1936 1938 length -= hdr_length;
1937 1939 }
1938 1940 for (i = 0; i < A_CNT(icmp_frag_size_table); i++) {
1939 1941 if (length > icmp_frag_size_table[i])
1940 1942 break;
1941 1943 }
1942 1944 if (i == A_CNT(icmp_frag_size_table)) {
1943 1945 /* Smaller than IP_MIN_MTU! */
1944 1946 ip1dbg(("Too big for packet size %d\n",
1945 1947 length));
1946 1948 disable_pmtud = B_TRUE;
1947 1949 mtu = ipst->ips_ip_pmtu_min;
1948 1950 } else {
1949 1951 mtu = icmp_frag_size_table[i];
1950 1952 ip1dbg(("Calculated mtu %d, packet size %d, "
1951 1953 "before %d\n", mtu, length, old_mtu));
1952 1954 if (mtu < ipst->ips_ip_pmtu_min) {
1953 1955 mtu = ipst->ips_ip_pmtu_min;
1954 1956 disable_pmtud = B_TRUE;
1955 1957 }
1956 1958 }
1957 1959 }
1958 1960 if (disable_pmtud)
1959 1961 dce->dce_flags |= DCEF_TOO_SMALL_PMTU;
1960 1962 else
1961 1963 dce->dce_flags &= ~DCEF_TOO_SMALL_PMTU;
1962 1964
1963 1965 dce->dce_pmtu = MIN(old_mtu, mtu);
1964 1966 /* Prepare to send the new max frag size for the ULP. */
1965 1967 icmph->icmph_du_zero = 0;
1966 1968 icmph->icmph_du_mtu = htons((uint16_t)dce->dce_pmtu);
1967 1969 DTRACE_PROBE4(ip4__pmtu__change, icmph_t *, icmph, dce_t *,
1968 1970 dce, int, orig_mtu, int, mtu);
1969 1971
1970 1972 /* We now have a PMTU for sure */
1971 1973 dce->dce_flags |= DCEF_PMTU;
1972 1974 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
1973 1975 mutex_exit(&dce->dce_lock);
1974 1976 /*
1975 1977 * After dropping the lock the new value is visible to everyone.
1976 1978 * Then we bump the generation number so any cached values reinspect
1977 1979 * the dce_t.
1978 1980 */
1979 1981 dce_increment_generation(dce);
1980 1982 dce_refrele(dce);
1981 1983 }
1982 1984
1983 1985 /*
1984 1986 * If the packet in error is Self-Encapsulated, icmp_inbound_error_fanout_v4
1985 1987 * calls this function.
1986 1988 */
1987 1989 static mblk_t *
1988 1990 icmp_inbound_self_encap_error_v4(mblk_t *mp, ipha_t *ipha, ipha_t *in_ipha)
1989 1991 {
1990 1992 int length;
1991 1993
1992 1994 ASSERT(mp->b_datap->db_type == M_DATA);
1993 1995
1994 1996 /* icmp_inbound_v4 has already pulled up the whole error packet */
1995 1997 ASSERT(mp->b_cont == NULL);
1996 1998
1997 1999 /*
1998 2000 * The length that we want to overlay is the inner header
1999 2001 * and what follows it.
2000 2002 */
2001 2003 length = msgdsize(mp) - ((uchar_t *)in_ipha - mp->b_rptr);
2002 2004
2003 2005 /*
2004 2006 * Overlay the inner header and whatever follows it over the
2005 2007 * outer header.
2006 2008 */
2007 2009 bcopy((uchar_t *)in_ipha, (uchar_t *)ipha, length);
2008 2010
2009 2011 /* Adjust for what we removed */
2010 2012 mp->b_wptr -= (uchar_t *)in_ipha - (uchar_t *)ipha;
2011 2013 return (mp);
2012 2014 }
2013 2015
2014 2016 /*
2015 2017 * Try to pass the ICMP message upstream in case the ULP cares.
2016 2018 *
2017 2019 * If the packet that caused the ICMP error is secure, we send
2018 2020 * it to AH/ESP to make sure that the attached packet has a
2019 2021 * valid association. ipha in the code below points to the
2020 2022 * IP header of the packet that caused the error.
2021 2023 *
2022 2024 * For IPsec cases, we let the next-layer-up (which has access to
2023 2025 * cached policy on the conn_t, or can query the SPD directly)
2024 2026 * subtract out any IPsec overhead if they must. We therefore make no
2025 2027 * adjustments here for IPsec overhead.
2026 2028 *
2027 2029 * IFN could have been generated locally or by some router.
2028 2030 *
2029 2031 * LOCAL : ire_send_wire (before calling ipsec_out_process) can call
2030 2032 * icmp_frag_needed/icmp_pkt2big_v6 to generated a local IFN.
2031 2033 * This happens because IP adjusted its value of MTU on an
2032 2034 * earlier IFN message and could not tell the upper layer,
2033 2035 * the new adjusted value of MTU e.g. Packet was encrypted
2034 2036 * or there was not enough information to fanout to upper
2035 2037 * layers. Thus on the next outbound datagram, ire_send_wire
2036 2038 * generates the IFN, where IPsec processing has *not* been
2037 2039 * done.
2038 2040 *
2039 2041 * Note that we retain ixa_fragsize across IPsec thus once
2040 2042 * we have picking ixa_fragsize and entered ipsec_out_process we do
2041 2043 * no change the fragsize even if the path MTU changes before
2042 2044 * we reach ip_output_post_ipsec.
2043 2045 *
2044 2046 * In the local case, IRAF_LOOPBACK will be set indicating
2045 2047 * that IFN was generated locally.
2046 2048 *
2047 2049 * ROUTER : IFN could be secure or non-secure.
2048 2050 *
2049 2051 * * SECURE : We use the IPSEC_IN to fanout to AH/ESP if the
2050 2052 * packet in error has AH/ESP headers to validate the AH/ESP
2051 2053 * headers. AH/ESP will verify whether there is a valid SA or
2052 2054 * not and send it back. We will fanout again if we have more
2053 2055 * data in the packet.
2054 2056 *
2055 2057 * If the packet in error does not have AH/ESP, we handle it
2056 2058 * like any other case.
2057 2059 *
2058 2060 * * NON_SECURE : If the packet in error has AH/ESP headers, we send it
2059 2061 * up to AH/ESP for validation. AH/ESP will verify whether there is a
2060 2062 * valid SA or not and send it back. We will fanout again if
2061 2063 * we have more data in the packet.
2062 2064 *
2063 2065 * If the packet in error does not have AH/ESP, we handle it
2064 2066 * like any other case.
2065 2067 *
2066 2068 * The caller must have called icmp_inbound_verify_v4.
2067 2069 */
2068 2070 static void
2069 2071 icmp_inbound_error_fanout_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira)
2070 2072 {
2071 2073 uint16_t *up; /* Pointer to ports in ULP header */
2072 2074 uint32_t ports; /* reversed ports for fanout */
2073 2075 ipha_t ripha; /* With reversed addresses */
2074 2076 ipha_t *ipha; /* Inner IP header */
2075 2077 uint_t hdr_length; /* Inner IP header length */
2076 2078 tcpha_t *tcpha;
2077 2079 conn_t *connp;
2078 2080 ill_t *ill = ira->ira_ill;
2079 2081 ip_stack_t *ipst = ill->ill_ipst;
2080 2082 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
2081 2083 ill_t *rill = ira->ira_rill;
2082 2084
2083 2085 /* Caller already pulled up everything. */
2084 2086 ipha = (ipha_t *)&icmph[1];
2085 2087 ASSERT((uchar_t *)&ipha[1] <= mp->b_wptr);
2086 2088 ASSERT(mp->b_cont == NULL);
2087 2089
2088 2090 hdr_length = IPH_HDR_LENGTH(ipha);
2089 2091 ira->ira_protocol = ipha->ipha_protocol;
2090 2092
2091 2093 /*
2092 2094 * We need a separate IP header with the source and destination
2093 2095 * addresses reversed to do fanout/classification because the ipha in
2094 2096 * the ICMP error is in the form we sent it out.
2095 2097 */
2096 2098 ripha.ipha_src = ipha->ipha_dst;
2097 2099 ripha.ipha_dst = ipha->ipha_src;
2098 2100 ripha.ipha_protocol = ipha->ipha_protocol;
2099 2101 ripha.ipha_version_and_hdr_length = ipha->ipha_version_and_hdr_length;
2100 2102
2101 2103 ip2dbg(("icmp_inbound_error_v4: proto %d %x to %x: %d/%d\n",
2102 2104 ripha.ipha_protocol, ntohl(ipha->ipha_src),
2103 2105 ntohl(ipha->ipha_dst),
2104 2106 icmph->icmph_type, icmph->icmph_code));
2105 2107
2106 2108 switch (ipha->ipha_protocol) {
2107 2109 case IPPROTO_UDP:
2108 2110 up = (uint16_t *)((uchar_t *)ipha + hdr_length);
2109 2111
2110 2112 /* Attempt to find a client stream based on port. */
2111 2113 ip2dbg(("icmp_inbound_error_v4: UDP ports %d to %d\n",
2112 2114 ntohs(up[0]), ntohs(up[1])));
2113 2115
2114 2116 /* Note that we send error to all matches. */
2115 2117 ira->ira_flags |= IRAF_ICMP_ERROR;
2116 2118 ip_fanout_udp_multi_v4(mp, &ripha, up[0], up[1], ira);
2117 2119 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2118 2120 return;
2119 2121
2120 2122 case IPPROTO_TCP:
2121 2123 /*
2122 2124 * Find a TCP client stream for this packet.
2123 2125 * Note that we do a reverse lookup since the header is
2124 2126 * in the form we sent it out.
2125 2127 */
2126 2128 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length);
2127 2129 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN,
2128 2130 ipst);
2129 2131 if (connp == NULL)
2130 2132 goto discard_pkt;
2131 2133
2132 2134 if (CONN_INBOUND_POLICY_PRESENT(connp, ipss) ||
2133 2135 (ira->ira_flags & IRAF_IPSEC_SECURE)) {
2134 2136 mp = ipsec_check_inbound_policy(mp, connp,
2135 2137 ipha, NULL, ira);
2136 2138 if (mp == NULL) {
2137 2139 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
2138 2140 /* Note that mp is NULL */
2139 2141 ip_drop_input("ipIfStatsInDiscards", mp, ill);
2140 2142 CONN_DEC_REF(connp);
2141 2143 return;
2142 2144 }
2143 2145 }
2144 2146
2145 2147 ira->ira_flags |= IRAF_ICMP_ERROR;
2146 2148 ira->ira_ill = ira->ira_rill = NULL;
2147 2149 if (IPCL_IS_TCP(connp)) {
2148 2150 SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
2149 2151 connp->conn_recvicmp, connp, ira, SQ_FILL,
2150 2152 SQTAG_TCP_INPUT_ICMP_ERR);
2151 2153 } else {
2152 2154 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */
2153 2155 (connp->conn_recv)(connp, mp, NULL, ira);
2154 2156 CONN_DEC_REF(connp);
2155 2157 }
2156 2158 ira->ira_ill = ill;
2157 2159 ira->ira_rill = rill;
2158 2160 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2159 2161 return;
2160 2162
2161 2163 case IPPROTO_SCTP:
2162 2164 up = (uint16_t *)((uchar_t *)ipha + hdr_length);
2163 2165 /* Find a SCTP client stream for this packet. */
2164 2166 ((uint16_t *)&ports)[0] = up[1];
2165 2167 ((uint16_t *)&ports)[1] = up[0];
2166 2168
2167 2169 ira->ira_flags |= IRAF_ICMP_ERROR;
2168 2170 ip_fanout_sctp(mp, &ripha, NULL, ports, ira);
2169 2171 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2170 2172 return;
2171 2173
2172 2174 case IPPROTO_ESP:
2173 2175 case IPPROTO_AH:
2174 2176 if (!ipsec_loaded(ipss)) {
2175 2177 ip_proto_not_sup(mp, ira);
2176 2178 return;
2177 2179 }
2178 2180
2179 2181 if (ipha->ipha_protocol == IPPROTO_ESP)
2180 2182 mp = ipsecesp_icmp_error(mp, ira);
2181 2183 else
2182 2184 mp = ipsecah_icmp_error(mp, ira);
2183 2185 if (mp == NULL)
2184 2186 return;
2185 2187
2186 2188 /* Just in case ipsec didn't preserve the NULL b_cont */
2187 2189 if (mp->b_cont != NULL) {
2188 2190 if (!pullupmsg(mp, -1))
2189 2191 goto discard_pkt;
2190 2192 }
2191 2193
2192 2194 /*
2193 2195 * Note that ira_pktlen and ira_ip_hdr_length are no longer
2194 2196 * correct, but we don't use them any more here.
2195 2197 *
2196 2198 * If succesful, the mp has been modified to not include
2197 2199 * the ESP/AH header so we can fanout to the ULP's icmp
2198 2200 * error handler.
2199 2201 */
2200 2202 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH)
2201 2203 goto truncated;
2202 2204
2203 2205 /* Verify the modified message before any further processes. */
2204 2206 ipha = (ipha_t *)mp->b_rptr;
2205 2207 hdr_length = IPH_HDR_LENGTH(ipha);
2206 2208 icmph = (icmph_t *)&mp->b_rptr[hdr_length];
2207 2209 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
2208 2210 freemsg(mp);
2209 2211 return;
2210 2212 }
2211 2213
2212 2214 icmp_inbound_error_fanout_v4(mp, icmph, ira);
2213 2215 return;
2214 2216
2215 2217 case IPPROTO_ENCAP: {
2216 2218 /* Look for self-encapsulated packets that caused an error */
2217 2219 ipha_t *in_ipha;
2218 2220
2219 2221 /*
2220 2222 * Caller has verified that length has to be
2221 2223 * at least the size of IP header.
2222 2224 */
2223 2225 ASSERT(hdr_length >= sizeof (ipha_t));
2224 2226 /*
2225 2227 * Check the sanity of the inner IP header like
2226 2228 * we did for the outer header.
2227 2229 */
2228 2230 in_ipha = (ipha_t *)((uchar_t *)ipha + hdr_length);
2229 2231 if ((IPH_HDR_VERSION(in_ipha) != IPV4_VERSION)) {
2230 2232 goto discard_pkt;
2231 2233 }
2232 2234 if (IPH_HDR_LENGTH(in_ipha) < sizeof (ipha_t)) {
2233 2235 goto discard_pkt;
2234 2236 }
2235 2237 /* Check for Self-encapsulated tunnels */
2236 2238 if (in_ipha->ipha_src == ipha->ipha_src &&
2237 2239 in_ipha->ipha_dst == ipha->ipha_dst) {
2238 2240
2239 2241 mp = icmp_inbound_self_encap_error_v4(mp, ipha,
2240 2242 in_ipha);
2241 2243 if (mp == NULL)
2242 2244 goto discard_pkt;
2243 2245
2244 2246 /*
2245 2247 * Just in case self_encap didn't preserve the NULL
2246 2248 * b_cont
2247 2249 */
2248 2250 if (mp->b_cont != NULL) {
2249 2251 if (!pullupmsg(mp, -1))
2250 2252 goto discard_pkt;
2251 2253 }
2252 2254 /*
2253 2255 * Note that ira_pktlen and ira_ip_hdr_length are no
2254 2256 * longer correct, but we don't use them any more here.
2255 2257 */
2256 2258 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH)
2257 2259 goto truncated;
2258 2260
2259 2261 /*
2260 2262 * Verify the modified message before any further
2261 2263 * processes.
2262 2264 */
2263 2265 ipha = (ipha_t *)mp->b_rptr;
2264 2266 hdr_length = IPH_HDR_LENGTH(ipha);
2265 2267 icmph = (icmph_t *)&mp->b_rptr[hdr_length];
2266 2268 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
2267 2269 freemsg(mp);
2268 2270 return;
2269 2271 }
2270 2272
2271 2273 /*
2272 2274 * The packet in error is self-encapsualted.
2273 2275 * And we are finding it further encapsulated
2274 2276 * which we could not have possibly generated.
2275 2277 */
2276 2278 if (ipha->ipha_protocol == IPPROTO_ENCAP) {
2277 2279 goto discard_pkt;
2278 2280 }
2279 2281 icmp_inbound_error_fanout_v4(mp, icmph, ira);
2280 2282 return;
2281 2283 }
2282 2284 /* No self-encapsulated */
2283 2285 /* FALLTHRU */
2284 2286 }
2285 2287 case IPPROTO_IPV6:
2286 2288 if ((connp = ipcl_iptun_classify_v4(&ripha.ipha_src,
2287 2289 &ripha.ipha_dst, ipst)) != NULL) {
2288 2290 ira->ira_flags |= IRAF_ICMP_ERROR;
2289 2291 connp->conn_recvicmp(connp, mp, NULL, ira);
2290 2292 CONN_DEC_REF(connp);
2291 2293 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2292 2294 return;
2293 2295 }
2294 2296 /*
2295 2297 * No IP tunnel is interested, fallthrough and see
2296 2298 * if a raw socket will want it.
2297 2299 */
2298 2300 /* FALLTHRU */
2299 2301 default:
2300 2302 ira->ira_flags |= IRAF_ICMP_ERROR;
2301 2303 ip_fanout_proto_v4(mp, &ripha, ira);
2302 2304 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2303 2305 return;
2304 2306 }
2305 2307 /* NOTREACHED */
2306 2308 discard_pkt:
2307 2309 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
2308 2310 ip1dbg(("icmp_inbound_error_fanout_v4: drop pkt\n"));
2309 2311 ip_drop_input("ipIfStatsInDiscards", mp, ill);
2310 2312 freemsg(mp);
2311 2313 return;
2312 2314
2313 2315 truncated:
2314 2316 /* We pulled up everthing already. Must be truncated */
2315 2317 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
2316 2318 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
2317 2319 freemsg(mp);
2318 2320 }
2319 2321
2320 2322 /*
2321 2323 * Common IP options parser.
2322 2324 *
2323 2325 * Setup routine: fill in *optp with options-parsing state, then
2324 2326 * tail-call ipoptp_next to return the first option.
2325 2327 */
2326 2328 uint8_t
2327 2329 ipoptp_first(ipoptp_t *optp, ipha_t *ipha)
2328 2330 {
2329 2331 uint32_t totallen; /* total length of all options */
2330 2332
2331 2333 totallen = ipha->ipha_version_and_hdr_length -
2332 2334 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
2333 2335 totallen <<= 2;
2334 2336 optp->ipoptp_next = (uint8_t *)(&ipha[1]);
2335 2337 optp->ipoptp_end = optp->ipoptp_next + totallen;
2336 2338 optp->ipoptp_flags = 0;
2337 2339 return (ipoptp_next(optp));
2338 2340 }
2339 2341
2340 2342 /* Like above but without an ipha_t */
2341 2343 uint8_t
2342 2344 ipoptp_first2(ipoptp_t *optp, uint32_t totallen, uint8_t *opt)
2343 2345 {
2344 2346 optp->ipoptp_next = opt;
2345 2347 optp->ipoptp_end = optp->ipoptp_next + totallen;
2346 2348 optp->ipoptp_flags = 0;
2347 2349 return (ipoptp_next(optp));
2348 2350 }
2349 2351
2350 2352 /*
2351 2353 * Common IP options parser: extract next option.
2352 2354 */
2353 2355 uint8_t
2354 2356 ipoptp_next(ipoptp_t *optp)
2355 2357 {
2356 2358 uint8_t *end = optp->ipoptp_end;
2357 2359 uint8_t *cur = optp->ipoptp_next;
2358 2360 uint8_t opt, len, pointer;
2359 2361
2360 2362 /*
2361 2363 * If cur > end already, then the ipoptp_end or ipoptp_next pointer
2362 2364 * has been corrupted.
2363 2365 */
2364 2366 ASSERT(cur <= end);
2365 2367
2366 2368 if (cur == end)
2367 2369 return (IPOPT_EOL);
2368 2370
2369 2371 opt = cur[IPOPT_OPTVAL];
2370 2372
2371 2373 /*
2372 2374 * Skip any NOP options.
2373 2375 */
2374 2376 while (opt == IPOPT_NOP) {
2375 2377 cur++;
2376 2378 if (cur == end)
2377 2379 return (IPOPT_EOL);
2378 2380 opt = cur[IPOPT_OPTVAL];
2379 2381 }
2380 2382
2381 2383 if (opt == IPOPT_EOL)
2382 2384 return (IPOPT_EOL);
2383 2385
2384 2386 /*
2385 2387 * Option requiring a length.
2386 2388 */
2387 2389 if ((cur + 1) >= end) {
2388 2390 optp->ipoptp_flags |= IPOPTP_ERROR;
2389 2391 return (IPOPT_EOL);
2390 2392 }
2391 2393 len = cur[IPOPT_OLEN];
2392 2394 if (len < 2) {
2393 2395 optp->ipoptp_flags |= IPOPTP_ERROR;
2394 2396 return (IPOPT_EOL);
2395 2397 }
2396 2398 optp->ipoptp_cur = cur;
2397 2399 optp->ipoptp_len = len;
2398 2400 optp->ipoptp_next = cur + len;
2399 2401 if (cur + len > end) {
2400 2402 optp->ipoptp_flags |= IPOPTP_ERROR;
2401 2403 return (IPOPT_EOL);
2402 2404 }
2403 2405
2404 2406 /*
2405 2407 * For the options which require a pointer field, make sure
2406 2408 * its there, and make sure it points to either something
2407 2409 * inside this option, or the end of the option.
2408 2410 */
2409 2411 switch (opt) {
2410 2412 case IPOPT_RR:
2411 2413 case IPOPT_TS:
2412 2414 case IPOPT_LSRR:
2413 2415 case IPOPT_SSRR:
2414 2416 if (len <= IPOPT_OFFSET) {
2415 2417 optp->ipoptp_flags |= IPOPTP_ERROR;
2416 2418 return (opt);
2417 2419 }
2418 2420 pointer = cur[IPOPT_OFFSET];
2419 2421 if (pointer - 1 > len) {
2420 2422 optp->ipoptp_flags |= IPOPTP_ERROR;
2421 2423 return (opt);
2422 2424 }
2423 2425 break;
2424 2426 }
2425 2427
2426 2428 /*
2427 2429 * Sanity check the pointer field based on the type of the
2428 2430 * option.
2429 2431 */
2430 2432 switch (opt) {
2431 2433 case IPOPT_RR:
2432 2434 case IPOPT_SSRR:
2433 2435 case IPOPT_LSRR:
2434 2436 if (pointer < IPOPT_MINOFF_SR)
2435 2437 optp->ipoptp_flags |= IPOPTP_ERROR;
2436 2438 break;
2437 2439 case IPOPT_TS:
2438 2440 if (pointer < IPOPT_MINOFF_IT)
2439 2441 optp->ipoptp_flags |= IPOPTP_ERROR;
2440 2442 /*
2441 2443 * Note that the Internet Timestamp option also
2442 2444 * contains two four bit fields (the Overflow field,
2443 2445 * and the Flag field), which follow the pointer
2444 2446 * field. We don't need to check that these fields
2445 2447 * fall within the length of the option because this
2446 2448 * was implicitely done above. We've checked that the
2447 2449 * pointer value is at least IPOPT_MINOFF_IT, and that
2448 2450 * it falls within the option. Since IPOPT_MINOFF_IT >
2449 2451 * IPOPT_POS_OV_FLG, we don't need the explicit check.
2450 2452 */
2451 2453 ASSERT(len > IPOPT_POS_OV_FLG);
2452 2454 break;
2453 2455 }
2454 2456
2455 2457 return (opt);
2456 2458 }
2457 2459
2458 2460 /*
2459 2461 * Use the outgoing IP header to create an IP_OPTIONS option the way
2460 2462 * it was passed down from the application.
2461 2463 *
2462 2464 * This is compatible with BSD in that it returns
2463 2465 * the reverse source route with the final destination
2464 2466 * as the last entry. The first 4 bytes of the option
2465 2467 * will contain the final destination.
2466 2468 */
2467 2469 int
2468 2470 ip_opt_get_user(conn_t *connp, uchar_t *buf)
2469 2471 {
2470 2472 ipoptp_t opts;
2471 2473 uchar_t *opt;
2472 2474 uint8_t optval;
2473 2475 uint8_t optlen;
2474 2476 uint32_t len = 0;
2475 2477 uchar_t *buf1 = buf;
2476 2478 uint32_t totallen;
2477 2479 ipaddr_t dst;
2478 2480 ip_pkt_t *ipp = &connp->conn_xmit_ipp;
2479 2481
2480 2482 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
2481 2483 return (0);
2482 2484
2483 2485 totallen = ipp->ipp_ipv4_options_len;
2484 2486 if (totallen & 0x3)
2485 2487 return (0);
2486 2488
2487 2489 buf += IP_ADDR_LEN; /* Leave room for final destination */
2488 2490 len += IP_ADDR_LEN;
2489 2491 bzero(buf1, IP_ADDR_LEN);
2490 2492
2491 2493 dst = connp->conn_faddr_v4;
2492 2494
2493 2495 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
2494 2496 optval != IPOPT_EOL;
2495 2497 optval = ipoptp_next(&opts)) {
2496 2498 int off;
2497 2499
2498 2500 opt = opts.ipoptp_cur;
2499 2501 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
2500 2502 break;
2501 2503 }
2502 2504 optlen = opts.ipoptp_len;
2503 2505
2504 2506 switch (optval) {
2505 2507 case IPOPT_SSRR:
2506 2508 case IPOPT_LSRR:
2507 2509
2508 2510 /*
2509 2511 * Insert destination as the first entry in the source
2510 2512 * route and move down the entries on step.
2511 2513 * The last entry gets placed at buf1.
2512 2514 */
2513 2515 buf[IPOPT_OPTVAL] = optval;
2514 2516 buf[IPOPT_OLEN] = optlen;
2515 2517 buf[IPOPT_OFFSET] = optlen;
2516 2518
2517 2519 off = optlen - IP_ADDR_LEN;
2518 2520 if (off < 0) {
2519 2521 /* No entries in source route */
2520 2522 break;
2521 2523 }
2522 2524 /* Last entry in source route if not already set */
2523 2525 if (dst == INADDR_ANY)
2524 2526 bcopy(opt + off, buf1, IP_ADDR_LEN);
2525 2527 off -= IP_ADDR_LEN;
2526 2528
2527 2529 while (off > 0) {
2528 2530 bcopy(opt + off,
2529 2531 buf + off + IP_ADDR_LEN,
2530 2532 IP_ADDR_LEN);
2531 2533 off -= IP_ADDR_LEN;
2532 2534 }
2533 2535 /* ipha_dst into first slot */
2534 2536 bcopy(&dst, buf + off + IP_ADDR_LEN,
2535 2537 IP_ADDR_LEN);
2536 2538 buf += optlen;
2537 2539 len += optlen;
2538 2540 break;
2539 2541
2540 2542 default:
2541 2543 bcopy(opt, buf, optlen);
2542 2544 buf += optlen;
2543 2545 len += optlen;
2544 2546 break;
2545 2547 }
2546 2548 }
2547 2549 done:
2548 2550 /* Pad the resulting options */
2549 2551 while (len & 0x3) {
2550 2552 *buf++ = IPOPT_EOL;
2551 2553 len++;
2552 2554 }
2553 2555 return (len);
2554 2556 }
2555 2557
2556 2558 /*
2557 2559 * Update any record route or timestamp options to include this host.
2558 2560 * Reverse any source route option.
2559 2561 * This routine assumes that the options are well formed i.e. that they
2560 2562 * have already been checked.
2561 2563 */
2562 2564 static void
2563 2565 icmp_options_update(ipha_t *ipha)
2564 2566 {
2565 2567 ipoptp_t opts;
2566 2568 uchar_t *opt;
2567 2569 uint8_t optval;
2568 2570 ipaddr_t src; /* Our local address */
2569 2571 ipaddr_t dst;
2570 2572
2571 2573 ip2dbg(("icmp_options_update\n"));
2572 2574 src = ipha->ipha_src;
2573 2575 dst = ipha->ipha_dst;
2574 2576
2575 2577 for (optval = ipoptp_first(&opts, ipha);
2576 2578 optval != IPOPT_EOL;
2577 2579 optval = ipoptp_next(&opts)) {
2578 2580 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
2579 2581 opt = opts.ipoptp_cur;
2580 2582 ip2dbg(("icmp_options_update: opt %d, len %d\n",
2581 2583 optval, opts.ipoptp_len));
2582 2584 switch (optval) {
2583 2585 int off1, off2;
2584 2586 case IPOPT_SSRR:
2585 2587 case IPOPT_LSRR:
2586 2588 /*
2587 2589 * Reverse the source route. The first entry
2588 2590 * should be the next to last one in the current
2589 2591 * source route (the last entry is our address).
2590 2592 * The last entry should be the final destination.
2591 2593 */
2592 2594 off1 = IPOPT_MINOFF_SR - 1;
2593 2595 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1;
2594 2596 if (off2 < 0) {
2595 2597 /* No entries in source route */
2596 2598 ip1dbg((
2597 2599 "icmp_options_update: bad src route\n"));
2598 2600 break;
2599 2601 }
2600 2602 bcopy((char *)opt + off2, &dst, IP_ADDR_LEN);
2601 2603 bcopy(&ipha->ipha_dst, (char *)opt + off2, IP_ADDR_LEN);
2602 2604 bcopy(&dst, &ipha->ipha_dst, IP_ADDR_LEN);
2603 2605 off2 -= IP_ADDR_LEN;
2604 2606
2605 2607 while (off1 < off2) {
2606 2608 bcopy((char *)opt + off1, &src, IP_ADDR_LEN);
2607 2609 bcopy((char *)opt + off2, (char *)opt + off1,
2608 2610 IP_ADDR_LEN);
2609 2611 bcopy(&src, (char *)opt + off2, IP_ADDR_LEN);
2610 2612 off1 += IP_ADDR_LEN;
2611 2613 off2 -= IP_ADDR_LEN;
2612 2614 }
2613 2615 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR;
2614 2616 break;
2615 2617 }
2616 2618 }
2617 2619 }
2618 2620
2619 2621 /*
2620 2622 * Process received ICMP Redirect messages.
2621 2623 * Assumes the caller has verified that the headers are in the pulled up mblk.
2622 2624 * Consumes mp.
2623 2625 */
2624 2626 static void
2625 2627 icmp_redirect_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph, ip_recv_attr_t *ira)
2626 2628 {
2627 2629 ire_t *ire, *nire;
2628 2630 ire_t *prev_ire;
2629 2631 ipaddr_t src, dst, gateway;
2630 2632 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2631 2633 ipha_t *inner_ipha; /* Inner IP header */
2632 2634
2633 2635 /* Caller already pulled up everything. */
2634 2636 inner_ipha = (ipha_t *)&icmph[1];
2635 2637 src = ipha->ipha_src;
2636 2638 dst = inner_ipha->ipha_dst;
2637 2639 gateway = icmph->icmph_rd_gateway;
2638 2640 /* Make sure the new gateway is reachable somehow. */
2639 2641 ire = ire_ftable_lookup_v4(gateway, 0, 0, IRE_ONLINK, NULL,
2640 2642 ALL_ZONES, NULL, MATCH_IRE_TYPE, 0, ipst, NULL);
2641 2643 /*
2642 2644 * Make sure we had a route for the dest in question and that
2643 2645 * that route was pointing to the old gateway (the source of the
2644 2646 * redirect packet.)
2645 2647 * We do longest match and then compare ire_gateway_addr below.
2646 2648 */
2647 2649 prev_ire = ire_ftable_lookup_v4(dst, 0, 0, 0, NULL, ALL_ZONES,
2648 2650 NULL, MATCH_IRE_DSTONLY, 0, ipst, NULL);
2649 2651 /*
2650 2652 * Check that
2651 2653 * the redirect was not from ourselves
2652 2654 * the new gateway and the old gateway are directly reachable
2653 2655 */
2654 2656 if (prev_ire == NULL || ire == NULL ||
2655 2657 (prev_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) ||
2656 2658 (prev_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) ||
2657 2659 !(ire->ire_type & IRE_IF_ALL) ||
2658 2660 prev_ire->ire_gateway_addr != src) {
2659 2661 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects);
2660 2662 ip_drop_input("icmpInBadRedirects - ire", mp, ira->ira_ill);
2661 2663 freemsg(mp);
2662 2664 if (ire != NULL)
2663 2665 ire_refrele(ire);
2664 2666 if (prev_ire != NULL)
2665 2667 ire_refrele(prev_ire);
2666 2668 return;
2667 2669 }
2668 2670
2669 2671 ire_refrele(prev_ire);
2670 2672 ire_refrele(ire);
2671 2673
2672 2674 /*
2673 2675 * TODO: more precise handling for cases 0, 2, 3, the latter two
2674 2676 * require TOS routing
2675 2677 */
2676 2678 switch (icmph->icmph_code) {
2677 2679 case 0:
2678 2680 case 1:
2679 2681 /* TODO: TOS specificity for cases 2 and 3 */
2680 2682 case 2:
2681 2683 case 3:
2682 2684 break;
2683 2685 default:
2684 2686 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects);
2685 2687 ip_drop_input("icmpInBadRedirects - code", mp, ira->ira_ill);
2686 2688 freemsg(mp);
2687 2689 return;
2688 2690 }
2689 2691 /*
2690 2692 * Create a Route Association. This will allow us to remember that
2691 2693 * someone we believe told us to use the particular gateway.
2692 2694 */
2693 2695 ire = ire_create(
2694 2696 (uchar_t *)&dst, /* dest addr */
2695 2697 (uchar_t *)&ip_g_all_ones, /* mask */
2696 2698 (uchar_t *)&gateway, /* gateway addr */
2697 2699 IRE_HOST,
2698 2700 NULL, /* ill */
2699 2701 ALL_ZONES,
2700 2702 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST),
2701 2703 NULL, /* tsol_gc_t */
2702 2704 ipst);
2703 2705
2704 2706 if (ire == NULL) {
2705 2707 freemsg(mp);
2706 2708 return;
2707 2709 }
2708 2710 nire = ire_add(ire);
2709 2711 /* Check if it was a duplicate entry */
2710 2712 if (nire != NULL && nire != ire) {
2711 2713 ASSERT(nire->ire_identical_ref > 1);
2712 2714 ire_delete(nire);
2713 2715 ire_refrele(nire);
2714 2716 nire = NULL;
2715 2717 }
2716 2718 ire = nire;
2717 2719 if (ire != NULL) {
2718 2720 ire_refrele(ire); /* Held in ire_add */
2719 2721
2720 2722 /* tell routing sockets that we received a redirect */
2721 2723 ip_rts_change(RTM_REDIRECT, dst, gateway, IP_HOST_MASK, 0, src,
2722 2724 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST), 0,
2723 2725 (RTA_DST | RTA_GATEWAY | RTA_NETMASK | RTA_AUTHOR), ipst);
2724 2726 }
2725 2727
2726 2728 /*
2727 2729 * Delete any existing IRE_HOST type redirect ires for this destination.
2728 2730 * This together with the added IRE has the effect of
2729 2731 * modifying an existing redirect.
2730 2732 */
2731 2733 prev_ire = ire_ftable_lookup_v4(dst, 0, src, IRE_HOST, NULL,
2732 2734 ALL_ZONES, NULL, (MATCH_IRE_GW | MATCH_IRE_TYPE), 0, ipst, NULL);
2733 2735 if (prev_ire != NULL) {
2734 2736 if (prev_ire ->ire_flags & RTF_DYNAMIC)
2735 2737 ire_delete(prev_ire);
2736 2738 ire_refrele(prev_ire);
2737 2739 }
2738 2740
2739 2741 freemsg(mp);
2740 2742 }
2741 2743
2742 2744 /*
2743 2745 * Generate an ICMP parameter problem message.
2744 2746 * When called from ip_output side a minimal ip_recv_attr_t needs to be
2745 2747 * constructed by the caller.
2746 2748 */
2747 2749 static void
2748 2750 icmp_param_problem(mblk_t *mp, uint8_t ptr, ip_recv_attr_t *ira)
2749 2751 {
2750 2752 icmph_t icmph;
2751 2753 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2752 2754
2753 2755 mp = icmp_pkt_err_ok(mp, ira);
2754 2756 if (mp == NULL)
2755 2757 return;
2756 2758
2757 2759 bzero(&icmph, sizeof (icmph_t));
2758 2760 icmph.icmph_type = ICMP_PARAM_PROBLEM;
2759 2761 icmph.icmph_pp_ptr = ptr;
2760 2762 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutParmProbs);
2761 2763 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
2762 2764 }
2763 2765
2764 2766 /*
2765 2767 * Build and ship an IPv4 ICMP message using the packet data in mp, and
2766 2768 * the ICMP header pointed to by "stuff". (May be called as writer.)
2767 2769 * Note: assumes that icmp_pkt_err_ok has been called to verify that
2768 2770 * an icmp error packet can be sent.
2769 2771 * Assigns an appropriate source address to the packet. If ipha_dst is
2770 2772 * one of our addresses use it for source. Otherwise let ip_output_simple
2771 2773 * pick the source address.
2772 2774 */
2773 2775 static void
2774 2776 icmp_pkt(mblk_t *mp, void *stuff, size_t len, ip_recv_attr_t *ira)
2775 2777 {
2776 2778 ipaddr_t dst;
2777 2779 icmph_t *icmph;
2778 2780 ipha_t *ipha;
2779 2781 uint_t len_needed;
2780 2782 size_t msg_len;
2781 2783 mblk_t *mp1;
2782 2784 ipaddr_t src;
2783 2785 ire_t *ire;
2784 2786 ip_xmit_attr_t ixas;
2785 2787 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2786 2788
2787 2789 ipha = (ipha_t *)mp->b_rptr;
2788 2790
2789 2791 bzero(&ixas, sizeof (ixas));
2790 2792 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
2791 2793 ixas.ixa_zoneid = ira->ira_zoneid;
2792 2794 ixas.ixa_ifindex = 0;
2793 2795 ixas.ixa_ipst = ipst;
2794 2796 ixas.ixa_cred = kcred;
2795 2797 ixas.ixa_cpid = NOPID;
2796 2798 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */
2797 2799 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
2798 2800
2799 2801 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2800 2802 /*
2801 2803 * Apply IPsec based on how IPsec was applied to
2802 2804 * the packet that had the error.
2803 2805 *
2804 2806 * If it was an outbound packet that caused the ICMP
2805 2807 * error, then the caller will have setup the IRA
2806 2808 * appropriately.
2807 2809 */
2808 2810 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) {
2809 2811 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2810 2812 /* Note: mp already consumed and ip_drop_packet done */
2811 2813 return;
2812 2814 }
2813 2815 } else {
2814 2816 /*
2815 2817 * This is in clear. The icmp message we are building
2816 2818 * here should go out in clear, independent of our policy.
2817 2819 */
2818 2820 ixas.ixa_flags |= IXAF_NO_IPSEC;
2819 2821 }
2820 2822
2821 2823 /* Remember our eventual destination */
2822 2824 dst = ipha->ipha_src;
2823 2825
2824 2826 /*
2825 2827 * If the packet was for one of our unicast addresses, make
2826 2828 * sure we respond with that as the source. Otherwise
2827 2829 * have ip_output_simple pick the source address.
2828 2830 */
2829 2831 ire = ire_ftable_lookup_v4(ipha->ipha_dst, 0, 0,
2830 2832 (IRE_LOCAL|IRE_LOOPBACK), NULL, ira->ira_zoneid, NULL,
2831 2833 MATCH_IRE_TYPE|MATCH_IRE_ZONEONLY, 0, ipst, NULL);
2832 2834 if (ire != NULL) {
2833 2835 ire_refrele(ire);
2834 2836 src = ipha->ipha_dst;
2835 2837 } else {
2836 2838 src = INADDR_ANY;
2837 2839 ixas.ixa_flags |= IXAF_SET_SOURCE;
2838 2840 }
2839 2841
2840 2842 /*
2841 2843 * Check if we can send back more then 8 bytes in addition to
2842 2844 * the IP header. We try to send 64 bytes of data and the internal
2843 2845 * header in the special cases of ipv4 encapsulated ipv4 or ipv6.
2844 2846 */
2845 2847 len_needed = IPH_HDR_LENGTH(ipha);
2846 2848 if (ipha->ipha_protocol == IPPROTO_ENCAP ||
2847 2849 ipha->ipha_protocol == IPPROTO_IPV6) {
2848 2850 if (!pullupmsg(mp, -1)) {
2849 2851 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2850 2852 ip_drop_output("ipIfStatsOutDiscards", mp, NULL);
2851 2853 freemsg(mp);
2852 2854 return;
2853 2855 }
2854 2856 ipha = (ipha_t *)mp->b_rptr;
2855 2857
2856 2858 if (ipha->ipha_protocol == IPPROTO_ENCAP) {
2857 2859 len_needed += IPH_HDR_LENGTH(((uchar_t *)ipha +
2858 2860 len_needed));
2859 2861 } else {
2860 2862 ip6_t *ip6h = (ip6_t *)((uchar_t *)ipha + len_needed);
2861 2863
2862 2864 ASSERT(ipha->ipha_protocol == IPPROTO_IPV6);
2863 2865 len_needed += ip_hdr_length_v6(mp, ip6h);
2864 2866 }
2865 2867 }
2866 2868 len_needed += ipst->ips_ip_icmp_return;
2867 2869 msg_len = msgdsize(mp);
2868 2870 if (msg_len > len_needed) {
2869 2871 (void) adjmsg(mp, len_needed - msg_len);
2870 2872 msg_len = len_needed;
2871 2873 }
2872 2874 mp1 = allocb(sizeof (icmp_ipha) + len, BPRI_MED);
2873 2875 if (mp1 == NULL) {
2874 2876 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutErrors);
2875 2877 freemsg(mp);
2876 2878 return;
2877 2879 }
2878 2880 mp1->b_cont = mp;
2879 2881 mp = mp1;
2880 2882
2881 2883 /*
2882 2884 * Set IXAF_TRUSTED_ICMP so we can let the ICMP messages this
2883 2885 * node generates be accepted in peace by all on-host destinations.
2884 2886 * If we do NOT assume that all on-host destinations trust
2885 2887 * self-generated ICMP messages, then rework here, ip6.c, and spd.c.
2886 2888 * (Look for IXAF_TRUSTED_ICMP).
2887 2889 */
2888 2890 ixas.ixa_flags |= IXAF_TRUSTED_ICMP;
2889 2891
2890 2892 ipha = (ipha_t *)mp->b_rptr;
2891 2893 mp1->b_wptr = (uchar_t *)ipha + (sizeof (icmp_ipha) + len);
2892 2894 *ipha = icmp_ipha;
2893 2895 ipha->ipha_src = src;
2894 2896 ipha->ipha_dst = dst;
2895 2897 ipha->ipha_ttl = ipst->ips_ip_def_ttl;
2896 2898 msg_len += sizeof (icmp_ipha) + len;
2897 2899 if (msg_len > IP_MAXPACKET) {
2898 2900 (void) adjmsg(mp, IP_MAXPACKET - msg_len);
2899 2901 msg_len = IP_MAXPACKET;
2900 2902 }
2901 2903 ipha->ipha_length = htons((uint16_t)msg_len);
2902 2904 icmph = (icmph_t *)&ipha[1];
2903 2905 bcopy(stuff, icmph, len);
2904 2906 icmph->icmph_checksum = 0;
2905 2907 icmph->icmph_checksum = IP_CSUM(mp, (int32_t)sizeof (ipha_t), 0);
2906 2908 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs);
2907 2909
2908 2910 (void) ip_output_simple(mp, &ixas);
2909 2911 ixa_cleanup(&ixas);
2910 2912 }
2911 2913
2912 2914 /*
2913 2915 * Determine if an ICMP error packet can be sent given the rate limit.
2914 2916 * The limit consists of an average frequency (icmp_pkt_err_interval measured
2915 2917 * in milliseconds) and a burst size. Burst size number of packets can
2916 2918 * be sent arbitrarely closely spaced.
2917 2919 * The state is tracked using two variables to implement an approximate
2918 2920 * token bucket filter:
2919 2921 * icmp_pkt_err_last - lbolt value when the last burst started
2920 2922 * icmp_pkt_err_sent - number of packets sent in current burst
2921 2923 */
2922 2924 boolean_t
2923 2925 icmp_err_rate_limit(ip_stack_t *ipst)
2924 2926 {
2925 2927 clock_t now = TICK_TO_MSEC(ddi_get_lbolt());
2926 2928 uint_t refilled; /* Number of packets refilled in tbf since last */
2927 2929 /* Guard against changes by loading into local variable */
2928 2930 uint_t err_interval = ipst->ips_ip_icmp_err_interval;
2929 2931
2930 2932 if (err_interval == 0)
2931 2933 return (B_FALSE);
2932 2934
2933 2935 if (ipst->ips_icmp_pkt_err_last > now) {
2934 2936 /* 100HZ lbolt in ms for 32bit arch wraps every 49.7 days */
2935 2937 ipst->ips_icmp_pkt_err_last = 0;
2936 2938 ipst->ips_icmp_pkt_err_sent = 0;
2937 2939 }
2938 2940 /*
2939 2941 * If we are in a burst update the token bucket filter.
2940 2942 * Update the "last" time to be close to "now" but make sure
2941 2943 * we don't loose precision.
2942 2944 */
2943 2945 if (ipst->ips_icmp_pkt_err_sent != 0) {
2944 2946 refilled = (now - ipst->ips_icmp_pkt_err_last)/err_interval;
2945 2947 if (refilled > ipst->ips_icmp_pkt_err_sent) {
2946 2948 ipst->ips_icmp_pkt_err_sent = 0;
2947 2949 } else {
2948 2950 ipst->ips_icmp_pkt_err_sent -= refilled;
2949 2951 ipst->ips_icmp_pkt_err_last += refilled * err_interval;
2950 2952 }
2951 2953 }
2952 2954 if (ipst->ips_icmp_pkt_err_sent == 0) {
2953 2955 /* Start of new burst */
2954 2956 ipst->ips_icmp_pkt_err_last = now;
2955 2957 }
2956 2958 if (ipst->ips_icmp_pkt_err_sent < ipst->ips_ip_icmp_err_burst) {
2957 2959 ipst->ips_icmp_pkt_err_sent++;
2958 2960 ip1dbg(("icmp_err_rate_limit: %d sent in burst\n",
2959 2961 ipst->ips_icmp_pkt_err_sent));
2960 2962 return (B_FALSE);
2961 2963 }
2962 2964 ip1dbg(("icmp_err_rate_limit: dropped\n"));
2963 2965 return (B_TRUE);
2964 2966 }
2965 2967
2966 2968 /*
2967 2969 * Check if it is ok to send an IPv4 ICMP error packet in
2968 2970 * response to the IPv4 packet in mp.
2969 2971 * Free the message and return null if no
2970 2972 * ICMP error packet should be sent.
2971 2973 */
2972 2974 static mblk_t *
2973 2975 icmp_pkt_err_ok(mblk_t *mp, ip_recv_attr_t *ira)
2974 2976 {
2975 2977 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2976 2978 icmph_t *icmph;
2977 2979 ipha_t *ipha;
2978 2980 uint_t len_needed;
2979 2981
2980 2982 if (!mp)
2981 2983 return (NULL);
2982 2984 ipha = (ipha_t *)mp->b_rptr;
2983 2985 if (ip_csum_hdr(ipha)) {
2984 2986 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInCksumErrs);
2985 2987 ip_drop_input("ipIfStatsInCksumErrs", mp, NULL);
2986 2988 freemsg(mp);
2987 2989 return (NULL);
2988 2990 }
2989 2991 if (ip_type_v4(ipha->ipha_dst, ipst) == IRE_BROADCAST ||
2990 2992 ip_type_v4(ipha->ipha_src, ipst) == IRE_BROADCAST ||
2991 2993 CLASSD(ipha->ipha_dst) ||
2992 2994 CLASSD(ipha->ipha_src) ||
2993 2995 (ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET)) {
2994 2996 /* Note: only errors to the fragment with offset 0 */
2995 2997 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
2996 2998 freemsg(mp);
2997 2999 return (NULL);
2998 3000 }
2999 3001 if (ipha->ipha_protocol == IPPROTO_ICMP) {
3000 3002 /*
3001 3003 * Check the ICMP type. RFC 1122 sez: don't send ICMP
3002 3004 * errors in response to any ICMP errors.
3003 3005 */
3004 3006 len_needed = IPH_HDR_LENGTH(ipha) + ICMPH_SIZE;
3005 3007 if (mp->b_wptr - mp->b_rptr < len_needed) {
3006 3008 if (!pullupmsg(mp, len_needed)) {
3007 3009 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors);
3008 3010 freemsg(mp);
3009 3011 return (NULL);
3010 3012 }
3011 3013 ipha = (ipha_t *)mp->b_rptr;
3012 3014 }
3013 3015 icmph = (icmph_t *)
3014 3016 (&((char *)ipha)[IPH_HDR_LENGTH(ipha)]);
3015 3017 switch (icmph->icmph_type) {
3016 3018 case ICMP_DEST_UNREACHABLE:
3017 3019 case ICMP_SOURCE_QUENCH:
3018 3020 case ICMP_TIME_EXCEEDED:
3019 3021 case ICMP_PARAM_PROBLEM:
3020 3022 case ICMP_REDIRECT:
3021 3023 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
3022 3024 freemsg(mp);
3023 3025 return (NULL);
3024 3026 default:
3025 3027 break;
3026 3028 }
3027 3029 }
3028 3030 /*
3029 3031 * If this is a labeled system, then check to see if we're allowed to
3030 3032 * send a response to this particular sender. If not, then just drop.
3031 3033 */
3032 3034 if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) {
3033 3035 ip2dbg(("icmp_pkt_err_ok: can't respond to packet\n"));
3034 3036 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
3035 3037 freemsg(mp);
3036 3038 return (NULL);
3037 3039 }
3038 3040 if (icmp_err_rate_limit(ipst)) {
3039 3041 /*
3040 3042 * Only send ICMP error packets every so often.
3041 3043 * This should be done on a per port/source basis,
3042 3044 * but for now this will suffice.
3043 3045 */
3044 3046 freemsg(mp);
3045 3047 return (NULL);
3046 3048 }
3047 3049 return (mp);
3048 3050 }
3049 3051
3050 3052 /*
3051 3053 * Called when a packet was sent out the same link that it arrived on.
3052 3054 * Check if it is ok to send a redirect and then send it.
3053 3055 */
3054 3056 void
3055 3057 ip_send_potential_redirect_v4(mblk_t *mp, ipha_t *ipha, ire_t *ire,
3056 3058 ip_recv_attr_t *ira)
3057 3059 {
3058 3060 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3059 3061 ipaddr_t src, nhop;
3060 3062 mblk_t *mp1;
3061 3063 ire_t *nhop_ire;
3062 3064
3063 3065 /*
3064 3066 * Check the source address to see if it originated
3065 3067 * on the same logical subnet it is going back out on.
3066 3068 * If so, we should be able to send it a redirect.
3067 3069 * Avoid sending a redirect if the destination
3068 3070 * is directly connected (i.e., we matched an IRE_ONLINK),
3069 3071 * or if the packet was source routed out this interface.
3070 3072 *
3071 3073 * We avoid sending a redirect if the
3072 3074 * destination is directly connected
3073 3075 * because it is possible that multiple
3074 3076 * IP subnets may have been configured on
3075 3077 * the link, and the source may not
3076 3078 * be on the same subnet as ip destination,
3077 3079 * even though they are on the same
3078 3080 * physical link.
3079 3081 */
3080 3082 if ((ire->ire_type & IRE_ONLINK) ||
3081 3083 ip_source_routed(ipha, ipst))
3082 3084 return;
3083 3085
3084 3086 nhop_ire = ire_nexthop(ire);
3085 3087 if (nhop_ire == NULL)
3086 3088 return;
3087 3089
3088 3090 nhop = nhop_ire->ire_addr;
3089 3091
3090 3092 if (nhop_ire->ire_type & IRE_IF_CLONE) {
3091 3093 ire_t *ire2;
3092 3094
3093 3095 /* Follow ire_dep_parent to find non-clone IRE_INTERFACE */
3094 3096 mutex_enter(&nhop_ire->ire_lock);
3095 3097 ire2 = nhop_ire->ire_dep_parent;
3096 3098 if (ire2 != NULL)
3097 3099 ire_refhold(ire2);
3098 3100 mutex_exit(&nhop_ire->ire_lock);
3099 3101 ire_refrele(nhop_ire);
3100 3102 nhop_ire = ire2;
3101 3103 }
3102 3104 if (nhop_ire == NULL)
3103 3105 return;
3104 3106
3105 3107 ASSERT(!(nhop_ire->ire_type & IRE_IF_CLONE));
3106 3108
3107 3109 src = ipha->ipha_src;
3108 3110
3109 3111 /*
3110 3112 * We look at the interface ire for the nexthop,
3111 3113 * to see if ipha_src is in the same subnet
3112 3114 * as the nexthop.
3113 3115 */
3114 3116 if ((src & nhop_ire->ire_mask) == (nhop & nhop_ire->ire_mask)) {
3115 3117 /*
3116 3118 * The source is directly connected.
3117 3119 */
3118 3120 mp1 = copymsg(mp);
3119 3121 if (mp1 != NULL) {
3120 3122 icmp_send_redirect(mp1, nhop, ira);
3121 3123 }
3122 3124 }
3123 3125 ire_refrele(nhop_ire);
3124 3126 }
3125 3127
3126 3128 /*
3127 3129 * Generate an ICMP redirect message.
3128 3130 */
3129 3131 static void
3130 3132 icmp_send_redirect(mblk_t *mp, ipaddr_t gateway, ip_recv_attr_t *ira)
3131 3133 {
3132 3134 icmph_t icmph;
3133 3135 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3134 3136
3135 3137 mp = icmp_pkt_err_ok(mp, ira);
3136 3138 if (mp == NULL)
3137 3139 return;
3138 3140
3139 3141 bzero(&icmph, sizeof (icmph_t));
3140 3142 icmph.icmph_type = ICMP_REDIRECT;
3141 3143 icmph.icmph_code = 1;
3142 3144 icmph.icmph_rd_gateway = gateway;
3143 3145 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutRedirects);
3144 3146 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3145 3147 }
3146 3148
3147 3149 /*
3148 3150 * Generate an ICMP time exceeded message.
3149 3151 */
3150 3152 void
3151 3153 icmp_time_exceeded(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira)
3152 3154 {
3153 3155 icmph_t icmph;
3154 3156 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3155 3157
3156 3158 mp = icmp_pkt_err_ok(mp, ira);
3157 3159 if (mp == NULL)
3158 3160 return;
3159 3161
3160 3162 bzero(&icmph, sizeof (icmph_t));
3161 3163 icmph.icmph_type = ICMP_TIME_EXCEEDED;
3162 3164 icmph.icmph_code = code;
3163 3165 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimeExcds);
3164 3166 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3165 3167 }
3166 3168
3167 3169 /*
3168 3170 * Generate an ICMP unreachable message.
3169 3171 * When called from ip_output side a minimal ip_recv_attr_t needs to be
3170 3172 * constructed by the caller.
3171 3173 */
3172 3174 void
3173 3175 icmp_unreachable(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira)
3174 3176 {
3175 3177 icmph_t icmph;
3176 3178 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3177 3179
3178 3180 mp = icmp_pkt_err_ok(mp, ira);
3179 3181 if (mp == NULL)
3180 3182 return;
3181 3183
3182 3184 bzero(&icmph, sizeof (icmph_t));
3183 3185 icmph.icmph_type = ICMP_DEST_UNREACHABLE;
3184 3186 icmph.icmph_code = code;
3185 3187 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs);
3186 3188 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3187 3189 }
3188 3190
3189 3191 /*
3190 3192 * Latch in the IPsec state for a stream based the policy in the listener
3191 3193 * and the actions in the ip_recv_attr_t.
3192 3194 * Called directly from TCP and SCTP.
3193 3195 */
3194 3196 boolean_t
3195 3197 ip_ipsec_policy_inherit(conn_t *connp, conn_t *lconnp, ip_recv_attr_t *ira)
3196 3198 {
3197 3199 ASSERT(lconnp->conn_policy != NULL);
3198 3200 ASSERT(connp->conn_policy == NULL);
3199 3201
3200 3202 IPPH_REFHOLD(lconnp->conn_policy);
3201 3203 connp->conn_policy = lconnp->conn_policy;
3202 3204
3203 3205 if (ira->ira_ipsec_action != NULL) {
3204 3206 if (connp->conn_latch == NULL) {
3205 3207 connp->conn_latch = iplatch_create();
3206 3208 if (connp->conn_latch == NULL)
3207 3209 return (B_FALSE);
3208 3210 }
3209 3211 ipsec_latch_inbound(connp, ira);
3210 3212 }
3211 3213 return (B_TRUE);
3212 3214 }
3213 3215
3214 3216 /*
3215 3217 * Verify whether or not the IP address is a valid local address.
3216 3218 * Could be a unicast, including one for a down interface.
3217 3219 * If allow_mcbc then a multicast or broadcast address is also
3218 3220 * acceptable.
3219 3221 *
3220 3222 * In the case of a broadcast/multicast address, however, the
3221 3223 * upper protocol is expected to reset the src address
3222 3224 * to zero when we return IPVL_MCAST/IPVL_BCAST so that
3223 3225 * no packets are emitted with broadcast/multicast address as
3224 3226 * source address (that violates hosts requirements RFC 1122)
3225 3227 * The addresses valid for bind are:
3226 3228 * (1) - INADDR_ANY (0)
3227 3229 * (2) - IP address of an UP interface
3228 3230 * (3) - IP address of a DOWN interface
3229 3231 * (4) - valid local IP broadcast addresses. In this case
3230 3232 * the conn will only receive packets destined to
3231 3233 * the specified broadcast address.
3232 3234 * (5) - a multicast address. In this case
3233 3235 * the conn will only receive packets destined to
3234 3236 * the specified multicast address. Note: the
3235 3237 * application still has to issue an
3236 3238 * IP_ADD_MEMBERSHIP socket option.
3237 3239 *
3238 3240 * In all the above cases, the bound address must be valid in the current zone.
3239 3241 * When the address is loopback, multicast or broadcast, there might be many
3240 3242 * matching IREs so bind has to look up based on the zone.
3241 3243 */
3242 3244 ip_laddr_t
3243 3245 ip_laddr_verify_v4(ipaddr_t src_addr, zoneid_t zoneid,
3244 3246 ip_stack_t *ipst, boolean_t allow_mcbc)
3245 3247 {
3246 3248 ire_t *src_ire;
3247 3249
3248 3250 ASSERT(src_addr != INADDR_ANY);
3249 3251
3250 3252 src_ire = ire_ftable_lookup_v4(src_addr, 0, 0, 0,
3251 3253 NULL, zoneid, NULL, MATCH_IRE_ZONEONLY, 0, ipst, NULL);
3252 3254
3253 3255 /*
3254 3256 * If an address other than in6addr_any is requested,
3255 3257 * we verify that it is a valid address for bind
3256 3258 * Note: Following code is in if-else-if form for
3257 3259 * readability compared to a condition check.
3258 3260 */
3259 3261 if (src_ire != NULL && (src_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK))) {
3260 3262 /*
3261 3263 * (2) Bind to address of local UP interface
3262 3264 */
3263 3265 ire_refrele(src_ire);
3264 3266 return (IPVL_UNICAST_UP);
3265 3267 } else if (src_ire != NULL && src_ire->ire_type & IRE_BROADCAST) {
3266 3268 /*
3267 3269 * (4) Bind to broadcast address
3268 3270 */
3269 3271 ire_refrele(src_ire);
3270 3272 if (allow_mcbc)
3271 3273 return (IPVL_BCAST);
3272 3274 else
3273 3275 return (IPVL_BAD);
3274 3276 } else if (CLASSD(src_addr)) {
3275 3277 /* (5) bind to multicast address. */
3276 3278 if (src_ire != NULL)
3277 3279 ire_refrele(src_ire);
3278 3280
3279 3281 if (allow_mcbc)
3280 3282 return (IPVL_MCAST);
3281 3283 else
3282 3284 return (IPVL_BAD);
3283 3285 } else {
3284 3286 ipif_t *ipif;
3285 3287
3286 3288 /*
3287 3289 * (3) Bind to address of local DOWN interface?
3288 3290 * (ipif_lookup_addr() looks up all interfaces
3289 3291 * but we do not get here for UP interfaces
3290 3292 * - case (2) above)
3291 3293 */
3292 3294 if (src_ire != NULL)
3293 3295 ire_refrele(src_ire);
3294 3296
3295 3297 ipif = ipif_lookup_addr(src_addr, NULL, zoneid, ipst);
3296 3298 if (ipif == NULL)
3297 3299 return (IPVL_BAD);
3298 3300
3299 3301 /* Not a useful source? */
3300 3302 if (ipif->ipif_flags & (IPIF_NOLOCAL | IPIF_ANYCAST)) {
3301 3303 ipif_refrele(ipif);
3302 3304 return (IPVL_BAD);
3303 3305 }
3304 3306 ipif_refrele(ipif);
3305 3307 return (IPVL_UNICAST_DOWN);
3306 3308 }
3307 3309 }
3308 3310
3309 3311 /*
3310 3312 * Insert in the bind fanout for IPv4 and IPv6.
3311 3313 * The caller should already have used ip_laddr_verify_v*() before calling
3312 3314 * this.
3313 3315 */
3314 3316 int
3315 3317 ip_laddr_fanout_insert(conn_t *connp)
3316 3318 {
3317 3319 int error;
3318 3320
3319 3321 /*
3320 3322 * Allow setting new policies. For example, disconnects result
3321 3323 * in us being called. As we would have set conn_policy_cached
3322 3324 * to B_TRUE before, we should set it to B_FALSE, so that policy
3323 3325 * can change after the disconnect.
3324 3326 */
3325 3327 connp->conn_policy_cached = B_FALSE;
3326 3328
3327 3329 error = ipcl_bind_insert(connp);
3328 3330 if (error != 0) {
3329 3331 if (connp->conn_anon_port) {
3330 3332 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
3331 3333 connp->conn_mlp_type, connp->conn_proto,
3332 3334 ntohs(connp->conn_lport), B_FALSE);
3333 3335 }
3334 3336 connp->conn_mlp_type = mlptSingle;
3335 3337 }
3336 3338 return (error);
3337 3339 }
3338 3340
3339 3341 /*
3340 3342 * Verify that both the source and destination addresses are valid. If
3341 3343 * IPDF_VERIFY_DST is not set, then the destination address may be unreachable,
3342 3344 * i.e. have no route to it. Protocols like TCP want to verify destination
3343 3345 * reachability, while tunnels do not.
3344 3346 *
3345 3347 * Determine the route, the interface, and (optionally) the source address
3346 3348 * to use to reach a given destination.
3347 3349 * Note that we allow connect to broadcast and multicast addresses when
3348 3350 * IPDF_ALLOW_MCBC is set.
3349 3351 * first_hop and dst_addr are normally the same, but if source routing
3350 3352 * they will differ; in that case the first_hop is what we'll use for the
3351 3353 * routing lookup but the dce and label checks will be done on dst_addr,
3352 3354 *
3353 3355 * If uinfo is set, then we fill in the best available information
3354 3356 * we have for the destination. This is based on (in priority order) any
3355 3357 * metrics and path MTU stored in a dce_t, route metrics, and finally the
3356 3358 * ill_mtu/ill_mc_mtu.
3357 3359 *
3358 3360 * Tsol note: If we have a source route then dst_addr != firsthop. But we
3359 3361 * always do the label check on dst_addr.
3360 3362 */
3361 3363 int
3362 3364 ip_set_destination_v4(ipaddr_t *src_addrp, ipaddr_t dst_addr, ipaddr_t firsthop,
3363 3365 ip_xmit_attr_t *ixa, iulp_t *uinfo, uint32_t flags, uint_t mac_mode)
3364 3366 {
3365 3367 ire_t *ire = NULL;
3366 3368 int error = 0;
3367 3369 ipaddr_t setsrc; /* RTF_SETSRC */
3368 3370 zoneid_t zoneid = ixa->ixa_zoneid; /* Honors SO_ALLZONES */
3369 3371 ip_stack_t *ipst = ixa->ixa_ipst;
3370 3372 dce_t *dce;
3371 3373 uint_t pmtu;
3372 3374 uint_t generation;
3373 3375 nce_t *nce;
3374 3376 ill_t *ill = NULL;
3375 3377 boolean_t multirt = B_FALSE;
3376 3378
3377 3379 ASSERT(ixa->ixa_flags & IXAF_IS_IPV4);
3378 3380
3379 3381 /*
3380 3382 * We never send to zero; the ULPs map it to the loopback address.
3381 3383 * We can't allow it since we use zero to mean unitialized in some
3382 3384 * places.
3383 3385 */
3384 3386 ASSERT(dst_addr != INADDR_ANY);
3385 3387
3386 3388 if (is_system_labeled()) {
3387 3389 ts_label_t *tsl = NULL;
3388 3390
3389 3391 error = tsol_check_dest(ixa->ixa_tsl, &dst_addr, IPV4_VERSION,
3390 3392 mac_mode, (flags & IPDF_ZONE_IS_GLOBAL) != 0, &tsl);
3391 3393 if (error != 0)
3392 3394 return (error);
3393 3395 if (tsl != NULL) {
3394 3396 /* Update the label */
3395 3397 ip_xmit_attr_replace_tsl(ixa, tsl);
3396 3398 }
3397 3399 }
3398 3400
3399 3401 setsrc = INADDR_ANY;
3400 3402 /*
3401 3403 * Select a route; For IPMP interfaces, we would only select
3402 3404 * a "hidden" route (i.e., going through a specific under_ill)
3403 3405 * if ixa_ifindex has been specified.
3404 3406 */
3405 3407 ire = ip_select_route_v4(firsthop, *src_addrp, ixa,
3406 3408 &generation, &setsrc, &error, &multirt);
3407 3409 ASSERT(ire != NULL); /* IRE_NOROUTE if none found */
3408 3410 if (error != 0)
3409 3411 goto bad_addr;
3410 3412
3411 3413 /*
3412 3414 * ire can't be a broadcast or multicast unless IPDF_ALLOW_MCBC is set.
3413 3415 * If IPDF_VERIFY_DST is set, the destination must be reachable;
3414 3416 * Otherwise the destination needn't be reachable.
3415 3417 *
3416 3418 * If we match on a reject or black hole, then we've got a
3417 3419 * local failure. May as well fail out the connect() attempt,
3418 3420 * since it's never going to succeed.
3419 3421 */
3420 3422 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
3421 3423 /*
3422 3424 * If we're verifying destination reachability, we always want
3423 3425 * to complain here.
3424 3426 *
3425 3427 * If we're not verifying destination reachability but the
3426 3428 * destination has a route, we still want to fail on the
3427 3429 * temporary address and broadcast address tests.
3428 3430 *
3429 3431 * In both cases do we let the code continue so some reasonable
3430 3432 * information is returned to the caller. That enables the
3431 3433 * caller to use (and even cache) the IRE. conn_ip_ouput will
3432 3434 * use the generation mismatch path to check for the unreachable
3433 3435 * case thereby avoiding any specific check in the main path.
3434 3436 */
3435 3437 ASSERT(generation == IRE_GENERATION_VERIFY);
3436 3438 if (flags & IPDF_VERIFY_DST) {
3437 3439 /*
3438 3440 * Set errno but continue to set up ixa_ire to be
3439 3441 * the RTF_REJECT|RTF_BLACKHOLE IRE.
3440 3442 * That allows callers to use ip_output to get an
3441 3443 * ICMP error back.
3442 3444 */
3443 3445 if (!(ire->ire_type & IRE_HOST))
3444 3446 error = ENETUNREACH;
3445 3447 else
3446 3448 error = EHOSTUNREACH;
3447 3449 }
3448 3450 }
3449 3451
3450 3452 if ((ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST)) &&
3451 3453 !(flags & IPDF_ALLOW_MCBC)) {
3452 3454 ire_refrele(ire);
3453 3455 ire = ire_reject(ipst, B_FALSE);
3454 3456 generation = IRE_GENERATION_VERIFY;
3455 3457 error = ENETUNREACH;
3456 3458 }
3457 3459
3458 3460 /* Cache things */
3459 3461 if (ixa->ixa_ire != NULL)
3460 3462 ire_refrele_notr(ixa->ixa_ire);
3461 3463 #ifdef DEBUG
3462 3464 ire_refhold_notr(ire);
3463 3465 ire_refrele(ire);
3464 3466 #endif
3465 3467 ixa->ixa_ire = ire;
3466 3468 ixa->ixa_ire_generation = generation;
3467 3469
3468 3470 /*
3469 3471 * Ensure that ixa_dce is always set any time that ixa_ire is set,
3470 3472 * since some callers will send a packet to conn_ip_output() even if
3471 3473 * there's an error.
3472 3474 */
3473 3475 if (flags & IPDF_UNIQUE_DCE) {
3474 3476 /* Fallback to the default dce if allocation fails */
3475 3477 dce = dce_lookup_and_add_v4(dst_addr, ipst);
3476 3478 if (dce != NULL)
3477 3479 generation = dce->dce_generation;
3478 3480 else
3479 3481 dce = dce_lookup_v4(dst_addr, ipst, &generation);
3480 3482 } else {
3481 3483 dce = dce_lookup_v4(dst_addr, ipst, &generation);
3482 3484 }
3483 3485 ASSERT(dce != NULL);
3484 3486 if (ixa->ixa_dce != NULL)
3485 3487 dce_refrele_notr(ixa->ixa_dce);
3486 3488 #ifdef DEBUG
3487 3489 dce_refhold_notr(dce);
3488 3490 dce_refrele(dce);
3489 3491 #endif
3490 3492 ixa->ixa_dce = dce;
3491 3493 ixa->ixa_dce_generation = generation;
3492 3494
3493 3495 /*
3494 3496 * For multicast with multirt we have a flag passed back from
3495 3497 * ire_lookup_multi_ill_v4 since we don't have an IRE for each
3496 3498 * possible multicast address.
3497 3499 * We also need a flag for multicast since we can't check
3498 3500 * whether RTF_MULTIRT is set in ixa_ire for multicast.
3499 3501 */
3500 3502 if (multirt) {
3501 3503 ixa->ixa_postfragfn = ip_postfrag_multirt_v4;
3502 3504 ixa->ixa_flags |= IXAF_MULTIRT_MULTICAST;
3503 3505 } else {
3504 3506 ixa->ixa_postfragfn = ire->ire_postfragfn;
3505 3507 ixa->ixa_flags &= ~IXAF_MULTIRT_MULTICAST;
3506 3508 }
3507 3509 if (!(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) {
3508 3510 /* Get an nce to cache. */
3509 3511 nce = ire_to_nce(ire, firsthop, NULL);
3510 3512 if (nce == NULL) {
3511 3513 /* Allocation failure? */
3512 3514 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3513 3515 } else {
3514 3516 if (ixa->ixa_nce != NULL)
3515 3517 nce_refrele(ixa->ixa_nce);
3516 3518 ixa->ixa_nce = nce;
3517 3519 }
3518 3520 }
3519 3521
3520 3522 /*
3521 3523 * If the source address is a loopback address, the
3522 3524 * destination had best be local or multicast.
3523 3525 * If we are sending to an IRE_LOCAL using a loopback source then
3524 3526 * it had better be the same zoneid.
3525 3527 */
3526 3528 if (*src_addrp == htonl(INADDR_LOOPBACK)) {
3527 3529 if ((ire->ire_type & IRE_LOCAL) && ire->ire_zoneid != zoneid) {
3528 3530 ire = NULL; /* Stored in ixa_ire */
3529 3531 error = EADDRNOTAVAIL;
3530 3532 goto bad_addr;
3531 3533 }
3532 3534 if (!(ire->ire_type & (IRE_LOOPBACK|IRE_LOCAL|IRE_MULTICAST))) {
3533 3535 ire = NULL; /* Stored in ixa_ire */
3534 3536 error = EADDRNOTAVAIL;
3535 3537 goto bad_addr;
3536 3538 }
3537 3539 }
3538 3540 if (ire->ire_type & IRE_BROADCAST) {
3539 3541 /*
3540 3542 * If the ULP didn't have a specified source, then we
3541 3543 * make sure we reselect the source when sending
3542 3544 * broadcasts out different interfaces.
3543 3545 */
3544 3546 if (flags & IPDF_SELECT_SRC)
3545 3547 ixa->ixa_flags |= IXAF_SET_SOURCE;
3546 3548 else
3547 3549 ixa->ixa_flags &= ~IXAF_SET_SOURCE;
3548 3550 }
3549 3551
3550 3552 /*
3551 3553 * Does the caller want us to pick a source address?
3552 3554 */
3553 3555 if (flags & IPDF_SELECT_SRC) {
3554 3556 ipaddr_t src_addr;
3555 3557
3556 3558 /*
3557 3559 * We use use ire_nexthop_ill to avoid the under ipmp
3558 3560 * interface for source address selection. Note that for ipmp
3559 3561 * probe packets, ixa_ifindex would have been specified, and
3560 3562 * the ip_select_route() invocation would have picked an ire
3561 3563 * will ire_ill pointing at an under interface.
3562 3564 */
3563 3565 ill = ire_nexthop_ill(ire);
3564 3566
3565 3567 /* If unreachable we have no ill but need some source */
3566 3568 if (ill == NULL) {
3567 3569 src_addr = htonl(INADDR_LOOPBACK);
3568 3570 /* Make sure we look for a better source address */
3569 3571 generation = SRC_GENERATION_VERIFY;
3570 3572 } else {
3571 3573 error = ip_select_source_v4(ill, setsrc, dst_addr,
3572 3574 ixa->ixa_multicast_ifaddr, zoneid,
3573 3575 ipst, &src_addr, &generation, NULL);
3574 3576 if (error != 0) {
3575 3577 ire = NULL; /* Stored in ixa_ire */
3576 3578 goto bad_addr;
3577 3579 }
3578 3580 }
3579 3581
3580 3582 /*
3581 3583 * We allow the source address to to down.
3582 3584 * However, we check that we don't use the loopback address
3583 3585 * as a source when sending out on the wire.
3584 3586 */
3585 3587 if ((src_addr == htonl(INADDR_LOOPBACK)) &&
3586 3588 !(ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK|IRE_MULTICAST)) &&
3587 3589 !(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) {
3588 3590 ire = NULL; /* Stored in ixa_ire */
3589 3591 error = EADDRNOTAVAIL;
3590 3592 goto bad_addr;
3591 3593 }
3592 3594
3593 3595 *src_addrp = src_addr;
3594 3596 ixa->ixa_src_generation = generation;
3595 3597 }
3596 3598
3597 3599 /*
3598 3600 * Make sure we don't leave an unreachable ixa_nce in place
3599 3601 * since ip_select_route is used when we unplumb i.e., remove
3600 3602 * references on ixa_ire, ixa_nce, and ixa_dce.
3601 3603 */
3602 3604 nce = ixa->ixa_nce;
3603 3605 if (nce != NULL && nce->nce_is_condemned) {
3604 3606 nce_refrele(nce);
3605 3607 ixa->ixa_nce = NULL;
3606 3608 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3607 3609 }
3608 3610
3609 3611 /*
3610 3612 * The caller has set IXAF_PMTU_DISCOVERY if path MTU is desired.
3611 3613 * However, we can't do it for IPv4 multicast or broadcast.
3612 3614 */
3613 3615 if (ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST))
3614 3616 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY;
3615 3617
3616 3618 /*
3617 3619 * Set initial value for fragmentation limit. Either conn_ip_output
3618 3620 * or ULP might updates it when there are routing changes.
3619 3621 * Handles a NULL ixa_ire->ire_ill or a NULL ixa_nce for RTF_REJECT.
3620 3622 */
3621 3623 pmtu = ip_get_pmtu(ixa);
3622 3624 ixa->ixa_fragsize = pmtu;
3623 3625 /* Make sure ixa_fragsize and ixa_pmtu remain identical */
3624 3626 if (ixa->ixa_flags & IXAF_VERIFY_PMTU)
3625 3627 ixa->ixa_pmtu = pmtu;
3626 3628
3627 3629 /*
3628 3630 * Extract information useful for some transports.
3629 3631 * First we look for DCE metrics. Then we take what we have in
3630 3632 * the metrics in the route, where the offlink is used if we have
3631 3633 * one.
3632 3634 */
3633 3635 if (uinfo != NULL) {
3634 3636 bzero(uinfo, sizeof (*uinfo));
3635 3637
3636 3638 if (dce->dce_flags & DCEF_UINFO)
3637 3639 *uinfo = dce->dce_uinfo;
3638 3640
3639 3641 rts_merge_metrics(uinfo, &ire->ire_metrics);
3640 3642
3641 3643 /* Allow ire_metrics to decrease the path MTU from above */
3642 3644 if (uinfo->iulp_mtu == 0 || uinfo->iulp_mtu > pmtu)
3643 3645 uinfo->iulp_mtu = pmtu;
3644 3646
3645 3647 uinfo->iulp_localnet = (ire->ire_type & IRE_ONLINK) != 0;
3646 3648 uinfo->iulp_loopback = (ire->ire_type & IRE_LOOPBACK) != 0;
3647 3649 uinfo->iulp_local = (ire->ire_type & IRE_LOCAL) != 0;
3648 3650 }
3649 3651
3650 3652 if (ill != NULL)
3651 3653 ill_refrele(ill);
3652 3654
3653 3655 return (error);
3654 3656
3655 3657 bad_addr:
3656 3658 if (ire != NULL)
3657 3659 ire_refrele(ire);
3658 3660
3659 3661 if (ill != NULL)
3660 3662 ill_refrele(ill);
3661 3663
3662 3664 /*
3663 3665 * Make sure we don't leave an unreachable ixa_nce in place
3664 3666 * since ip_select_route is used when we unplumb i.e., remove
3665 3667 * references on ixa_ire, ixa_nce, and ixa_dce.
3666 3668 */
3667 3669 nce = ixa->ixa_nce;
3668 3670 if (nce != NULL && nce->nce_is_condemned) {
3669 3671 nce_refrele(nce);
3670 3672 ixa->ixa_nce = NULL;
3671 3673 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3672 3674 }
3673 3675
3674 3676 return (error);
3675 3677 }
3676 3678
3677 3679
3678 3680 /*
3679 3681 * Get the base MTU for the case when path MTU discovery is not used.
3680 3682 * Takes the MTU of the IRE into account.
3681 3683 */
3682 3684 uint_t
3683 3685 ip_get_base_mtu(ill_t *ill, ire_t *ire)
3684 3686 {
3685 3687 uint_t mtu;
3686 3688 uint_t iremtu = ire->ire_metrics.iulp_mtu;
3687 3689
3688 3690 if (ire->ire_type & (IRE_MULTICAST|IRE_BROADCAST))
3689 3691 mtu = ill->ill_mc_mtu;
3690 3692 else
3691 3693 mtu = ill->ill_mtu;
3692 3694
3693 3695 if (iremtu != 0 && iremtu < mtu)
3694 3696 mtu = iremtu;
3695 3697
3696 3698 return (mtu);
3697 3699 }
3698 3700
3699 3701 /*
3700 3702 * Get the PMTU for the attributes. Handles both IPv4 and IPv6.
3701 3703 * Assumes that ixa_ire, dce, and nce have already been set up.
3702 3704 *
3703 3705 * The caller has set IXAF_PMTU_DISCOVERY if path MTU discovery is desired.
3704 3706 * We avoid path MTU discovery if it is disabled with ndd.
3705 3707 * Furtermore, if the path MTU is too small, then we don't set DF for IPv4.
3706 3708 *
3707 3709 * NOTE: We also used to turn it off for source routed packets. That
3708 3710 * is no longer required since the dce is per final destination.
3709 3711 */
3710 3712 uint_t
3711 3713 ip_get_pmtu(ip_xmit_attr_t *ixa)
3712 3714 {
3713 3715 ip_stack_t *ipst = ixa->ixa_ipst;
3714 3716 dce_t *dce;
3715 3717 nce_t *nce;
3716 3718 ire_t *ire;
3717 3719 uint_t pmtu;
3718 3720
3719 3721 ire = ixa->ixa_ire;
3720 3722 dce = ixa->ixa_dce;
3721 3723 nce = ixa->ixa_nce;
3722 3724
3723 3725 /*
3724 3726 * If path MTU discovery has been turned off by ndd, then we ignore
3725 3727 * any dce_pmtu and for IPv4 we will not set DF.
3726 3728 */
3727 3729 if (!ipst->ips_ip_path_mtu_discovery)
3728 3730 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY;
3729 3731
3730 3732 pmtu = IP_MAXPACKET;
3731 3733 /*
3732 3734 * Decide whether whether IPv4 sets DF
3733 3735 * For IPv6 "no DF" means to use the 1280 mtu
3734 3736 */
3735 3737 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) {
3736 3738 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3737 3739 } else {
3738 3740 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF;
3739 3741 if (!(ixa->ixa_flags & IXAF_IS_IPV4))
3740 3742 pmtu = IPV6_MIN_MTU;
3741 3743 }
3742 3744
3743 3745 /* Check if the PMTU is to old before we use it */
3744 3746 if ((dce->dce_flags & DCEF_PMTU) &&
3745 3747 TICK_TO_SEC(ddi_get_lbolt64()) - dce->dce_last_change_time >
3746 3748 ipst->ips_ip_pathmtu_interval) {
3747 3749 /*
3748 3750 * Older than 20 minutes. Drop the path MTU information.
3749 3751 */
3750 3752 mutex_enter(&dce->dce_lock);
3751 3753 dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU);
3752 3754 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
3753 3755 mutex_exit(&dce->dce_lock);
3754 3756 dce_increment_generation(dce);
3755 3757 }
3756 3758
3757 3759 /* The metrics on the route can lower the path MTU */
3758 3760 if (ire->ire_metrics.iulp_mtu != 0 &&
3759 3761 ire->ire_metrics.iulp_mtu < pmtu)
3760 3762 pmtu = ire->ire_metrics.iulp_mtu;
3761 3763
3762 3764 /*
3763 3765 * If the path MTU is smaller than some minimum, we still use dce_pmtu
3764 3766 * above (would be 576 for IPv4 and 1280 for IPv6), but we clear
3765 3767 * IXAF_PMTU_IPV4_DF so that we avoid setting DF for IPv4.
3766 3768 */
3767 3769 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) {
3768 3770 if (dce->dce_flags & DCEF_PMTU) {
3769 3771 if (dce->dce_pmtu < pmtu)
3770 3772 pmtu = dce->dce_pmtu;
3771 3773
3772 3774 if (dce->dce_flags & DCEF_TOO_SMALL_PMTU) {
3773 3775 ixa->ixa_flags |= IXAF_PMTU_TOO_SMALL;
3774 3776 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF;
3775 3777 } else {
3776 3778 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL;
3777 3779 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3778 3780 }
3779 3781 } else {
3780 3782 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL;
3781 3783 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3782 3784 }
3783 3785 }
3784 3786
3785 3787 /*
3786 3788 * If we have an IRE_LOCAL we use the loopback mtu instead of
3787 3789 * the ill for going out the wire i.e., IRE_LOCAL gets the same
3788 3790 * mtu as IRE_LOOPBACK.
3789 3791 */
3790 3792 if (ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) {
3791 3793 uint_t loopback_mtu;
3792 3794
3793 3795 loopback_mtu = (ire->ire_ipversion == IPV6_VERSION) ?
3794 3796 ip_loopback_mtu_v6plus : ip_loopback_mtuplus;
3795 3797
3796 3798 if (loopback_mtu < pmtu)
3797 3799 pmtu = loopback_mtu;
3798 3800 } else if (nce != NULL) {
3799 3801 /*
3800 3802 * Make sure we don't exceed the interface MTU.
3801 3803 * In the case of RTF_REJECT or RTF_BLACKHOLE we might not have
3802 3804 * an ill. We'd use the above IP_MAXPACKET in that case just
3803 3805 * to tell the transport something larger than zero.
3804 3806 */
3805 3807 if (ire->ire_type & (IRE_MULTICAST|IRE_BROADCAST)) {
3806 3808 if (nce->nce_common->ncec_ill->ill_mc_mtu < pmtu)
3807 3809 pmtu = nce->nce_common->ncec_ill->ill_mc_mtu;
3808 3810 if (nce->nce_common->ncec_ill != nce->nce_ill &&
3809 3811 nce->nce_ill->ill_mc_mtu < pmtu) {
3810 3812 /*
3811 3813 * for interfaces in an IPMP group, the mtu of
3812 3814 * the nce_ill (under_ill) could be different
3813 3815 * from the mtu of the ncec_ill, so we take the
3814 3816 * min of the two.
3815 3817 */
3816 3818 pmtu = nce->nce_ill->ill_mc_mtu;
3817 3819 }
3818 3820 } else {
3819 3821 if (nce->nce_common->ncec_ill->ill_mtu < pmtu)
3820 3822 pmtu = nce->nce_common->ncec_ill->ill_mtu;
3821 3823 if (nce->nce_common->ncec_ill != nce->nce_ill &&
3822 3824 nce->nce_ill->ill_mtu < pmtu) {
3823 3825 /*
3824 3826 * for interfaces in an IPMP group, the mtu of
3825 3827 * the nce_ill (under_ill) could be different
3826 3828 * from the mtu of the ncec_ill, so we take the
3827 3829 * min of the two.
3828 3830 */
3829 3831 pmtu = nce->nce_ill->ill_mtu;
3830 3832 }
3831 3833 }
3832 3834 }
3833 3835
3834 3836 /*
3835 3837 * Handle the IPV6_USE_MIN_MTU socket option or ancillary data.
3836 3838 * Only applies to IPv6.
3837 3839 */
3838 3840 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) {
3839 3841 if (ixa->ixa_flags & IXAF_USE_MIN_MTU) {
3840 3842 switch (ixa->ixa_use_min_mtu) {
3841 3843 case IPV6_USE_MIN_MTU_MULTICAST:
3842 3844 if (ire->ire_type & IRE_MULTICAST)
3843 3845 pmtu = IPV6_MIN_MTU;
3844 3846 break;
3845 3847 case IPV6_USE_MIN_MTU_ALWAYS:
3846 3848 pmtu = IPV6_MIN_MTU;
3847 3849 break;
3848 3850 case IPV6_USE_MIN_MTU_NEVER:
3849 3851 break;
3850 3852 }
3851 3853 } else {
3852 3854 /* Default is IPV6_USE_MIN_MTU_MULTICAST */
3853 3855 if (ire->ire_type & IRE_MULTICAST)
3854 3856 pmtu = IPV6_MIN_MTU;
3855 3857 }
3856 3858 }
3857 3859
3858 3860 /*
3859 3861 * After receiving an ICMPv6 "packet too big" message with a
3860 3862 * MTU < 1280, and for multirouted IPv6 packets, the IP layer
3861 3863 * will insert a 8-byte fragment header in every packet. We compensate
3862 3864 * for those cases by returning a smaller path MTU to the ULP.
3863 3865 *
3864 3866 * In the case of CGTP then ip_output will add a fragment header.
3865 3867 * Make sure there is room for it by telling a smaller number
3866 3868 * to the transport.
3867 3869 *
3868 3870 * When IXAF_IPV6_ADDR_FRAGHDR we subtract the frag hdr here
3869 3871 * so the ULPs consistently see a iulp_pmtu and ip_get_pmtu()
3870 3872 * which is the size of the packets it can send.
3871 3873 */
3872 3874 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) {
3873 3875 if ((dce->dce_flags & DCEF_TOO_SMALL_PMTU) ||
3874 3876 (ire->ire_flags & RTF_MULTIRT) ||
3875 3877 (ixa->ixa_flags & IXAF_MULTIRT_MULTICAST)) {
3876 3878 pmtu -= sizeof (ip6_frag_t);
3877 3879 ixa->ixa_flags |= IXAF_IPV6_ADD_FRAGHDR;
3878 3880 }
3879 3881 }
3880 3882
3881 3883 return (pmtu);
3882 3884 }
3883 3885
3884 3886 /*
3885 3887 * Carve "len" bytes out of an mblk chain, consuming any we empty, and duping
3886 3888 * the final piece where we don't. Return a pointer to the first mblk in the
3887 3889 * result, and update the pointer to the next mblk to chew on. If anything
3888 3890 * goes wrong (i.e., dupb fails), we waste everything in sight and return a
3889 3891 * NULL pointer.
3890 3892 */
3891 3893 mblk_t *
3892 3894 ip_carve_mp(mblk_t **mpp, ssize_t len)
3893 3895 {
3894 3896 mblk_t *mp0;
3895 3897 mblk_t *mp1;
3896 3898 mblk_t *mp2;
3897 3899
3898 3900 if (!len || !mpp || !(mp0 = *mpp))
3899 3901 return (NULL);
3900 3902 /* If we aren't going to consume the first mblk, we need a dup. */
3901 3903 if (mp0->b_wptr - mp0->b_rptr > len) {
3902 3904 mp1 = dupb(mp0);
3903 3905 if (mp1) {
3904 3906 /* Partition the data between the two mblks. */
3905 3907 mp1->b_wptr = mp1->b_rptr + len;
3906 3908 mp0->b_rptr = mp1->b_wptr;
3907 3909 /*
3908 3910 * after adjustments if mblk not consumed is now
3909 3911 * unaligned, try to align it. If this fails free
3910 3912 * all messages and let upper layer recover.
3911 3913 */
3912 3914 if (!OK_32PTR(mp0->b_rptr)) {
3913 3915 if (!pullupmsg(mp0, -1)) {
3914 3916 freemsg(mp0);
3915 3917 freemsg(mp1);
3916 3918 *mpp = NULL;
3917 3919 return (NULL);
3918 3920 }
3919 3921 }
3920 3922 }
3921 3923 return (mp1);
3922 3924 }
3923 3925 /* Eat through as many mblks as we need to get len bytes. */
3924 3926 len -= mp0->b_wptr - mp0->b_rptr;
3925 3927 for (mp2 = mp1 = mp0; (mp2 = mp2->b_cont) != 0 && len; mp1 = mp2) {
3926 3928 if (mp2->b_wptr - mp2->b_rptr > len) {
3927 3929 /*
3928 3930 * We won't consume the entire last mblk. Like
3929 3931 * above, dup and partition it.
3930 3932 */
3931 3933 mp1->b_cont = dupb(mp2);
3932 3934 mp1 = mp1->b_cont;
3933 3935 if (!mp1) {
3934 3936 /*
3935 3937 * Trouble. Rather than go to a lot of
3936 3938 * trouble to clean up, we free the messages.
3937 3939 * This won't be any worse than losing it on
3938 3940 * the wire.
3939 3941 */
3940 3942 freemsg(mp0);
3941 3943 freemsg(mp2);
3942 3944 *mpp = NULL;
3943 3945 return (NULL);
3944 3946 }
3945 3947 mp1->b_wptr = mp1->b_rptr + len;
3946 3948 mp2->b_rptr = mp1->b_wptr;
3947 3949 /*
3948 3950 * after adjustments if mblk not consumed is now
3949 3951 * unaligned, try to align it. If this fails free
3950 3952 * all messages and let upper layer recover.
3951 3953 */
3952 3954 if (!OK_32PTR(mp2->b_rptr)) {
3953 3955 if (!pullupmsg(mp2, -1)) {
3954 3956 freemsg(mp0);
3955 3957 freemsg(mp2);
3956 3958 *mpp = NULL;
3957 3959 return (NULL);
3958 3960 }
3959 3961 }
3960 3962 *mpp = mp2;
3961 3963 return (mp0);
3962 3964 }
3963 3965 /* Decrement len by the amount we just got. */
3964 3966 len -= mp2->b_wptr - mp2->b_rptr;
3965 3967 }
3966 3968 /*
3967 3969 * len should be reduced to zero now. If not our caller has
3968 3970 * screwed up.
3969 3971 */
3970 3972 if (len) {
3971 3973 /* Shouldn't happen! */
3972 3974 freemsg(mp0);
3973 3975 *mpp = NULL;
3974 3976 return (NULL);
3975 3977 }
3976 3978 /*
3977 3979 * We consumed up to exactly the end of an mblk. Detach the part
3978 3980 * we are returning from the rest of the chain.
3979 3981 */
3980 3982 mp1->b_cont = NULL;
3981 3983 *mpp = mp2;
3982 3984 return (mp0);
3983 3985 }
3984 3986
3985 3987 /* The ill stream is being unplumbed. Called from ip_close */
3986 3988 int
3987 3989 ip_modclose(ill_t *ill)
3988 3990 {
3989 3991 boolean_t success;
3990 3992 ipsq_t *ipsq;
3991 3993 ipif_t *ipif;
3992 3994 queue_t *q = ill->ill_rq;
3993 3995 ip_stack_t *ipst = ill->ill_ipst;
3994 3996 int i;
3995 3997 arl_ill_common_t *ai = ill->ill_common;
3996 3998
3997 3999 /*
3998 4000 * The punlink prior to this may have initiated a capability
3999 4001 * negotiation. But ipsq_enter will block until that finishes or
4000 4002 * times out.
4001 4003 */
4002 4004 success = ipsq_enter(ill, B_FALSE, NEW_OP);
4003 4005
4004 4006 /*
4005 4007 * Open/close/push/pop is guaranteed to be single threaded
4006 4008 * per stream by STREAMS. FS guarantees that all references
4007 4009 * from top are gone before close is called. So there can't
4008 4010 * be another close thread that has set CONDEMNED on this ill.
4009 4011 * and cause ipsq_enter to return failure.
4010 4012 */
4011 4013 ASSERT(success);
4012 4014 ipsq = ill->ill_phyint->phyint_ipsq;
4013 4015
4014 4016 /*
4015 4017 * Mark it condemned. No new reference will be made to this ill.
4016 4018 * Lookup functions will return an error. Threads that try to
4017 4019 * increment the refcnt must check for ILL_CAN_LOOKUP. This ensures
4018 4020 * that the refcnt will drop down to zero.
4019 4021 */
4020 4022 mutex_enter(&ill->ill_lock);
4021 4023 ill->ill_state_flags |= ILL_CONDEMNED;
4022 4024 for (ipif = ill->ill_ipif; ipif != NULL;
4023 4025 ipif = ipif->ipif_next) {
4024 4026 ipif->ipif_state_flags |= IPIF_CONDEMNED;
4025 4027 }
4026 4028 /*
4027 4029 * Wake up anybody waiting to enter the ipsq. ipsq_enter
4028 4030 * returns error if ILL_CONDEMNED is set
4029 4031 */
4030 4032 cv_broadcast(&ill->ill_cv);
4031 4033 mutex_exit(&ill->ill_lock);
4032 4034
4033 4035 /*
4034 4036 * Send all the deferred DLPI messages downstream which came in
4035 4037 * during the small window right before ipsq_enter(). We do this
4036 4038 * without waiting for the ACKs because all the ACKs for M_PROTO
4037 4039 * messages are ignored in ip_rput() when ILL_CONDEMNED is set.
4038 4040 */
4039 4041 ill_dlpi_send_deferred(ill);
4040 4042
4041 4043 /*
4042 4044 * Shut down fragmentation reassembly.
4043 4045 * ill_frag_timer won't start a timer again.
4044 4046 * Now cancel any existing timer
4045 4047 */
4046 4048 (void) untimeout(ill->ill_frag_timer_id);
4047 4049 (void) ill_frag_timeout(ill, 0);
4048 4050
4049 4051 /*
4050 4052 * Call ill_delete to bring down the ipifs, ilms and ill on
4051 4053 * this ill. Then wait for the refcnts to drop to zero.
4052 4054 * ill_is_freeable checks whether the ill is really quiescent.
4053 4055 * Then make sure that threads that are waiting to enter the
4054 4056 * ipsq have seen the error returned by ipsq_enter and have
4055 4057 * gone away. Then we call ill_delete_tail which does the
4056 4058 * DL_UNBIND_REQ with the driver and then qprocsoff.
4057 4059 */
4058 4060 ill_delete(ill);
4059 4061 mutex_enter(&ill->ill_lock);
4060 4062 while (!ill_is_freeable(ill))
4061 4063 cv_wait(&ill->ill_cv, &ill->ill_lock);
4062 4064
4063 4065 while (ill->ill_waiters)
4064 4066 cv_wait(&ill->ill_cv, &ill->ill_lock);
4065 4067
4066 4068 mutex_exit(&ill->ill_lock);
4067 4069
4068 4070 /*
4069 4071 * ill_delete_tail drops reference on ill_ipst, but we need to keep
4070 4072 * it held until the end of the function since the cleanup
4071 4073 * below needs to be able to use the ip_stack_t.
4072 4074 */
4073 4075 netstack_hold(ipst->ips_netstack);
4074 4076
4075 4077 /* qprocsoff is done via ill_delete_tail */
4076 4078 ill_delete_tail(ill);
4077 4079 /*
4078 4080 * synchronously wait for arp stream to unbind. After this, we
4079 4081 * cannot get any data packets up from the driver.
4080 4082 */
4081 4083 arp_unbind_complete(ill);
4082 4084 ASSERT(ill->ill_ipst == NULL);
4083 4085
4084 4086 /*
4085 4087 * Walk through all conns and qenable those that have queued data.
4086 4088 * Close synchronization needs this to
4087 4089 * be done to ensure that all upper layers blocked
4088 4090 * due to flow control to the closing device
4089 4091 * get unblocked.
4090 4092 */
4091 4093 ip1dbg(("ip_wsrv: walking\n"));
4092 4094 for (i = 0; i < TX_FANOUT_SIZE; i++) {
4093 4095 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[i]);
4094 4096 }
4095 4097
4096 4098 /*
4097 4099 * ai can be null if this is an IPv6 ill, or if the IPv4
4098 4100 * stream is being torn down before ARP was plumbed (e.g.,
4099 4101 * /sbin/ifconfig plumbing a stream twice, and encountering
4100 4102 * an error
4101 4103 */
4102 4104 if (ai != NULL) {
4103 4105 ASSERT(!ill->ill_isv6);
4104 4106 mutex_enter(&ai->ai_lock);
4105 4107 ai->ai_ill = NULL;
4106 4108 if (ai->ai_arl == NULL) {
4107 4109 mutex_destroy(&ai->ai_lock);
4108 4110 kmem_free(ai, sizeof (*ai));
4109 4111 } else {
4110 4112 cv_signal(&ai->ai_ill_unplumb_done);
4111 4113 mutex_exit(&ai->ai_lock);
4112 4114 }
4113 4115 }
4114 4116
4115 4117 mutex_enter(&ipst->ips_ip_mi_lock);
4116 4118 mi_close_unlink(&ipst->ips_ip_g_head, (IDP)ill);
4117 4119 mutex_exit(&ipst->ips_ip_mi_lock);
4118 4120
4119 4121 /*
4120 4122 * credp could be null if the open didn't succeed and ip_modopen
4121 4123 * itself calls ip_close.
4122 4124 */
4123 4125 if (ill->ill_credp != NULL)
4124 4126 crfree(ill->ill_credp);
4125 4127
4126 4128 mutex_destroy(&ill->ill_saved_ire_lock);
4127 4129 mutex_destroy(&ill->ill_lock);
4128 4130 rw_destroy(&ill->ill_mcast_lock);
4129 4131 mutex_destroy(&ill->ill_mcast_serializer);
4130 4132 list_destroy(&ill->ill_nce);
4131 4133
4132 4134 /*
4133 4135 * Now we are done with the module close pieces that
4134 4136 * need the netstack_t.
4135 4137 */
4136 4138 netstack_rele(ipst->ips_netstack);
4137 4139
4138 4140 mi_close_free((IDP)ill);
4139 4141 q->q_ptr = WR(q)->q_ptr = NULL;
4140 4142
4141 4143 ipsq_exit(ipsq);
4142 4144
4143 4145 return (0);
4144 4146 }
4145 4147
4146 4148 /*
4147 4149 * This is called as part of close() for IP, UDP, ICMP, and RTS
4148 4150 * in order to quiesce the conn.
4149 4151 */
4150 4152 void
4151 4153 ip_quiesce_conn(conn_t *connp)
4152 4154 {
4153 4155 boolean_t drain_cleanup_reqd = B_FALSE;
4154 4156 boolean_t conn_ioctl_cleanup_reqd = B_FALSE;
4155 4157 boolean_t ilg_cleanup_reqd = B_FALSE;
4156 4158 ip_stack_t *ipst;
4157 4159
4158 4160 ASSERT(!IPCL_IS_TCP(connp));
4159 4161 ipst = connp->conn_netstack->netstack_ip;
4160 4162
4161 4163 /*
4162 4164 * Mark the conn as closing, and this conn must not be
4163 4165 * inserted in future into any list. Eg. conn_drain_insert(),
4164 4166 * won't insert this conn into the conn_drain_list.
4165 4167 *
4166 4168 * conn_idl, and conn_ilg cannot get set henceforth.
4167 4169 */
4168 4170 mutex_enter(&connp->conn_lock);
4169 4171 ASSERT(!(connp->conn_state_flags & CONN_QUIESCED));
4170 4172 connp->conn_state_flags |= CONN_CLOSING;
4171 4173 if (connp->conn_idl != NULL)
4172 4174 drain_cleanup_reqd = B_TRUE;
4173 4175 if (connp->conn_oper_pending_ill != NULL)
4174 4176 conn_ioctl_cleanup_reqd = B_TRUE;
4175 4177 if (connp->conn_dhcpinit_ill != NULL) {
4176 4178 ASSERT(connp->conn_dhcpinit_ill->ill_dhcpinit != 0);
4177 4179 atomic_dec_32(&connp->conn_dhcpinit_ill->ill_dhcpinit);
4178 4180 ill_set_inputfn(connp->conn_dhcpinit_ill);
4179 4181 connp->conn_dhcpinit_ill = NULL;
4180 4182 }
4181 4183 if (connp->conn_ilg != NULL)
4182 4184 ilg_cleanup_reqd = B_TRUE;
4183 4185 mutex_exit(&connp->conn_lock);
4184 4186
4185 4187 if (conn_ioctl_cleanup_reqd)
4186 4188 conn_ioctl_cleanup(connp);
4187 4189
4188 4190 if (is_system_labeled() && connp->conn_anon_port) {
4189 4191 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
4190 4192 connp->conn_mlp_type, connp->conn_proto,
4191 4193 ntohs(connp->conn_lport), B_FALSE);
4192 4194 connp->conn_anon_port = 0;
4193 4195 }
4194 4196 connp->conn_mlp_type = mlptSingle;
4195 4197
4196 4198 /*
4197 4199 * Remove this conn from any fanout list it is on.
4198 4200 * and then wait for any threads currently operating
4199 4201 * on this endpoint to finish
4200 4202 */
4201 4203 ipcl_hash_remove(connp);
4202 4204
4203 4205 /*
4204 4206 * Remove this conn from the drain list, and do any other cleanup that
4205 4207 * may be required. (TCP conns are never flow controlled, and
4206 4208 * conn_idl will be NULL.)
4207 4209 */
4208 4210 if (drain_cleanup_reqd && connp->conn_idl != NULL) {
4209 4211 idl_t *idl = connp->conn_idl;
4210 4212
4211 4213 mutex_enter(&idl->idl_lock);
4212 4214 conn_drain(connp, B_TRUE);
4213 4215 mutex_exit(&idl->idl_lock);
4214 4216 }
4215 4217
4216 4218 if (connp == ipst->ips_ip_g_mrouter)
4217 4219 (void) ip_mrouter_done(ipst);
4218 4220
4219 4221 if (ilg_cleanup_reqd)
4220 4222 ilg_delete_all(connp);
4221 4223
4222 4224 /*
4223 4225 * Now conn refcnt can increase only thru CONN_INC_REF_LOCKED.
4224 4226 * callers from write side can't be there now because close
4225 4227 * is in progress. The only other caller is ipcl_walk
4226 4228 * which checks for the condemned flag.
4227 4229 */
4228 4230 mutex_enter(&connp->conn_lock);
4229 4231 connp->conn_state_flags |= CONN_CONDEMNED;
4230 4232 while (connp->conn_ref != 1)
4231 4233 cv_wait(&connp->conn_cv, &connp->conn_lock);
4232 4234 connp->conn_state_flags |= CONN_QUIESCED;
4233 4235 mutex_exit(&connp->conn_lock);
4234 4236 }
4235 4237
4236 4238 /* ARGSUSED */
4237 4239 int
4238 4240 ip_close(queue_t *q, int flags)
4239 4241 {
4240 4242 conn_t *connp;
4241 4243
4242 4244 /*
4243 4245 * Call the appropriate delete routine depending on whether this is
4244 4246 * a module or device.
4245 4247 */
4246 4248 if (WR(q)->q_next != NULL) {
4247 4249 /* This is a module close */
4248 4250 return (ip_modclose((ill_t *)q->q_ptr));
4249 4251 }
4250 4252
4251 4253 connp = q->q_ptr;
4252 4254 ip_quiesce_conn(connp);
4253 4255
4254 4256 qprocsoff(q);
4255 4257
4256 4258 /*
4257 4259 * Now we are truly single threaded on this stream, and can
4258 4260 * delete the things hanging off the connp, and finally the connp.
4259 4261 * We removed this connp from the fanout list, it cannot be
4260 4262 * accessed thru the fanouts, and we already waited for the
4261 4263 * conn_ref to drop to 0. We are already in close, so
4262 4264 * there cannot be any other thread from the top. qprocsoff
4263 4265 * has completed, and service has completed or won't run in
4264 4266 * future.
4265 4267 */
4266 4268 ASSERT(connp->conn_ref == 1);
4267 4269
4268 4270 inet_minor_free(connp->conn_minor_arena, connp->conn_dev);
4269 4271
4270 4272 connp->conn_ref--;
4271 4273 ipcl_conn_destroy(connp);
4272 4274
4273 4275 q->q_ptr = WR(q)->q_ptr = NULL;
4274 4276 return (0);
4275 4277 }
4276 4278
4277 4279 /*
4278 4280 * Wapper around putnext() so that ip_rts_request can merely use
4279 4281 * conn_recv.
4280 4282 */
4281 4283 /*ARGSUSED2*/
4282 4284 static void
4283 4285 ip_conn_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
4284 4286 {
4285 4287 conn_t *connp = (conn_t *)arg1;
4286 4288
4287 4289 putnext(connp->conn_rq, mp);
4288 4290 }
4289 4291
4290 4292 /* Dummy in case ICMP error delivery is attempted to a /dev/ip instance */
4291 4293 /* ARGSUSED */
4292 4294 static void
4293 4295 ip_conn_input_icmp(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
4294 4296 {
4295 4297 freemsg(mp);
4296 4298 }
4297 4299
4298 4300 /*
4299 4301 * Called when the module is about to be unloaded
4300 4302 */
4301 4303 void
4302 4304 ip_ddi_destroy(void)
4303 4305 {
↓ open down ↓ |
4180 lines elided |
↑ open up ↑ |
4304 4306 /* This needs to be called before destroying any transports. */
4305 4307 mutex_enter(&cpu_lock);
4306 4308 unregister_cpu_setup_func(ip_tp_cpu_update, NULL);
4307 4309 mutex_exit(&cpu_lock);
4308 4310
4309 4311 tnet_fini();
4310 4312
4311 4313 icmp_ddi_g_destroy();
4312 4314 rts_ddi_g_destroy();
4313 4315 udp_ddi_g_destroy();
4316 + dccp_ddi_g_destroy();
4314 4317 sctp_ddi_g_destroy();
4315 4318 tcp_ddi_g_destroy();
4316 4319 ilb_ddi_g_destroy();
4317 4320 dce_g_destroy();
4318 4321 ipsec_policy_g_destroy();
4319 4322 ipcl_g_destroy();
4320 4323 ip_net_g_destroy();
4321 4324 ip_ire_g_fini();
4322 4325 inet_minor_destroy(ip_minor_arena_sa);
4323 4326 #if defined(_LP64)
4324 4327 inet_minor_destroy(ip_minor_arena_la);
4325 4328 #endif
4326 4329
4327 4330 #ifdef DEBUG
4328 4331 list_destroy(&ip_thread_list);
4329 4332 rw_destroy(&ip_thread_rwlock);
4330 4333 tsd_destroy(&ip_thread_data);
4331 4334 #endif
4332 4335
4333 4336 netstack_unregister(NS_IP);
4334 4337 }
4335 4338
4336 4339 /*
4337 4340 * First step in cleanup.
4338 4341 */
4339 4342 /* ARGSUSED */
4340 4343 static void
4341 4344 ip_stack_shutdown(netstackid_t stackid, void *arg)
4342 4345 {
4343 4346 ip_stack_t *ipst = (ip_stack_t *)arg;
4344 4347
4345 4348 #ifdef NS_DEBUG
4346 4349 printf("ip_stack_shutdown(%p, stack %d)\n", (void *)ipst, stackid);
4347 4350 #endif
4348 4351
4349 4352 /*
4350 4353 * Perform cleanup for special interfaces (loopback and IPMP).
4351 4354 */
4352 4355 ip_interface_cleanup(ipst);
4353 4356
4354 4357 /*
4355 4358 * The *_hook_shutdown()s start the process of notifying any
4356 4359 * consumers that things are going away.... nothing is destroyed.
4357 4360 */
4358 4361 ipv4_hook_shutdown(ipst);
4359 4362 ipv6_hook_shutdown(ipst);
4360 4363 arp_hook_shutdown(ipst);
4361 4364
4362 4365 mutex_enter(&ipst->ips_capab_taskq_lock);
4363 4366 ipst->ips_capab_taskq_quit = B_TRUE;
4364 4367 cv_signal(&ipst->ips_capab_taskq_cv);
4365 4368 mutex_exit(&ipst->ips_capab_taskq_lock);
4366 4369 }
4367 4370
4368 4371 /*
4369 4372 * Free the IP stack instance.
4370 4373 */
4371 4374 static void
4372 4375 ip_stack_fini(netstackid_t stackid, void *arg)
4373 4376 {
4374 4377 ip_stack_t *ipst = (ip_stack_t *)arg;
4375 4378 int ret;
4376 4379
4377 4380 #ifdef NS_DEBUG
4378 4381 printf("ip_stack_fini(%p, stack %d)\n", (void *)ipst, stackid);
4379 4382 #endif
4380 4383 /*
4381 4384 * At this point, all of the notifications that the events and
4382 4385 * protocols are going away have been run, meaning that we can
4383 4386 * now set about starting to clean things up.
4384 4387 */
4385 4388 ipobs_fini(ipst);
4386 4389 ipv4_hook_destroy(ipst);
4387 4390 ipv6_hook_destroy(ipst);
4388 4391 arp_hook_destroy(ipst);
4389 4392 ip_net_destroy(ipst);
4390 4393
4391 4394 ipmp_destroy(ipst);
4392 4395
4393 4396 ip_kstat_fini(stackid, ipst->ips_ip_mibkp);
4394 4397 ipst->ips_ip_mibkp = NULL;
4395 4398 icmp_kstat_fini(stackid, ipst->ips_icmp_mibkp);
4396 4399 ipst->ips_icmp_mibkp = NULL;
4397 4400 ip_kstat2_fini(stackid, ipst->ips_ip_kstat);
4398 4401 ipst->ips_ip_kstat = NULL;
4399 4402 bzero(&ipst->ips_ip_statistics, sizeof (ipst->ips_ip_statistics));
4400 4403 ip6_kstat_fini(stackid, ipst->ips_ip6_kstat);
4401 4404 ipst->ips_ip6_kstat = NULL;
4402 4405 bzero(&ipst->ips_ip6_statistics, sizeof (ipst->ips_ip6_statistics));
4403 4406
4404 4407 kmem_free(ipst->ips_propinfo_tbl,
4405 4408 ip_propinfo_count * sizeof (mod_prop_info_t));
4406 4409 ipst->ips_propinfo_tbl = NULL;
4407 4410
4408 4411 dce_stack_destroy(ipst);
4409 4412 ip_mrouter_stack_destroy(ipst);
4410 4413
4411 4414 ret = untimeout(ipst->ips_igmp_timeout_id);
4412 4415 if (ret == -1) {
4413 4416 ASSERT(ipst->ips_igmp_timeout_id == 0);
4414 4417 } else {
4415 4418 ASSERT(ipst->ips_igmp_timeout_id != 0);
4416 4419 ipst->ips_igmp_timeout_id = 0;
4417 4420 }
4418 4421 ret = untimeout(ipst->ips_igmp_slowtimeout_id);
4419 4422 if (ret == -1) {
4420 4423 ASSERT(ipst->ips_igmp_slowtimeout_id == 0);
4421 4424 } else {
4422 4425 ASSERT(ipst->ips_igmp_slowtimeout_id != 0);
4423 4426 ipst->ips_igmp_slowtimeout_id = 0;
4424 4427 }
4425 4428 ret = untimeout(ipst->ips_mld_timeout_id);
4426 4429 if (ret == -1) {
4427 4430 ASSERT(ipst->ips_mld_timeout_id == 0);
4428 4431 } else {
4429 4432 ASSERT(ipst->ips_mld_timeout_id != 0);
4430 4433 ipst->ips_mld_timeout_id = 0;
4431 4434 }
4432 4435 ret = untimeout(ipst->ips_mld_slowtimeout_id);
4433 4436 if (ret == -1) {
4434 4437 ASSERT(ipst->ips_mld_slowtimeout_id == 0);
4435 4438 } else {
4436 4439 ASSERT(ipst->ips_mld_slowtimeout_id != 0);
4437 4440 ipst->ips_mld_slowtimeout_id = 0;
4438 4441 }
4439 4442
4440 4443 ip_ire_fini(ipst);
4441 4444 ip6_asp_free(ipst);
4442 4445 conn_drain_fini(ipst);
4443 4446 ipcl_destroy(ipst);
4444 4447
4445 4448 mutex_destroy(&ipst->ips_ndp4->ndp_g_lock);
4446 4449 mutex_destroy(&ipst->ips_ndp6->ndp_g_lock);
4447 4450 kmem_free(ipst->ips_ndp4, sizeof (ndp_g_t));
4448 4451 ipst->ips_ndp4 = NULL;
4449 4452 kmem_free(ipst->ips_ndp6, sizeof (ndp_g_t));
4450 4453 ipst->ips_ndp6 = NULL;
4451 4454
4452 4455 if (ipst->ips_loopback_ksp != NULL) {
4453 4456 kstat_delete_netstack(ipst->ips_loopback_ksp, stackid);
4454 4457 ipst->ips_loopback_ksp = NULL;
4455 4458 }
4456 4459
4457 4460 mutex_destroy(&ipst->ips_capab_taskq_lock);
4458 4461 cv_destroy(&ipst->ips_capab_taskq_cv);
4459 4462
4460 4463 rw_destroy(&ipst->ips_srcid_lock);
4461 4464
4462 4465 mutex_destroy(&ipst->ips_ip_mi_lock);
4463 4466 rw_destroy(&ipst->ips_ill_g_usesrc_lock);
4464 4467
4465 4468 mutex_destroy(&ipst->ips_igmp_timer_lock);
4466 4469 mutex_destroy(&ipst->ips_mld_timer_lock);
4467 4470 mutex_destroy(&ipst->ips_igmp_slowtimeout_lock);
4468 4471 mutex_destroy(&ipst->ips_mld_slowtimeout_lock);
4469 4472 mutex_destroy(&ipst->ips_ip_addr_avail_lock);
4470 4473 rw_destroy(&ipst->ips_ill_g_lock);
4471 4474
4472 4475 kmem_free(ipst->ips_phyint_g_list, sizeof (phyint_list_t));
4473 4476 ipst->ips_phyint_g_list = NULL;
4474 4477 kmem_free(ipst->ips_ill_g_heads, sizeof (ill_g_head_t) * MAX_G_HEADS);
4475 4478 ipst->ips_ill_g_heads = NULL;
4476 4479
4477 4480 ldi_ident_release(ipst->ips_ldi_ident);
4478 4481 kmem_free(ipst, sizeof (*ipst));
4479 4482 }
4480 4483
4481 4484 /*
4482 4485 * This function is called from the TSD destructor, and is used to debug
4483 4486 * reference count issues in IP. See block comment in <inet/ip_if.h> for
4484 4487 * details.
4485 4488 */
4486 4489 static void
4487 4490 ip_thread_exit(void *phash)
4488 4491 {
4489 4492 th_hash_t *thh = phash;
4490 4493
4491 4494 rw_enter(&ip_thread_rwlock, RW_WRITER);
4492 4495 list_remove(&ip_thread_list, thh);
4493 4496 rw_exit(&ip_thread_rwlock);
4494 4497 mod_hash_destroy_hash(thh->thh_hash);
4495 4498 kmem_free(thh, sizeof (*thh));
4496 4499 }
4497 4500
4498 4501 /*
4499 4502 * Called when the IP kernel module is loaded into the kernel
4500 4503 */
4501 4504 void
4502 4505 ip_ddi_init(void)
4503 4506 {
4504 4507 ip_squeue_flag = ip_squeue_switch(ip_squeue_enter);
4505 4508
4506 4509 /*
4507 4510 * For IP and TCP the minor numbers should start from 2 since we have 4
4508 4511 * initial devices: ip, ip6, tcp, tcp6.
4509 4512 */
4510 4513 /*
4511 4514 * If this is a 64-bit kernel, then create two separate arenas -
4512 4515 * one for TLIs in the range of INET_MIN_DEV+2 through 2^^18-1, and the
4513 4516 * other for socket apps in the range 2^^18 through 2^^32-1.
4514 4517 */
4515 4518 ip_minor_arena_la = NULL;
4516 4519 ip_minor_arena_sa = NULL;
4517 4520 #if defined(_LP64)
4518 4521 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa",
4519 4522 INET_MIN_DEV + 2, MAXMIN32, KM_SLEEP)) == NULL) {
4520 4523 cmn_err(CE_PANIC,
4521 4524 "ip_ddi_init: ip_minor_arena_sa creation failed\n");
4522 4525 }
4523 4526 if ((ip_minor_arena_la = inet_minor_create("ip_minor_arena_la",
4524 4527 MAXMIN32 + 1, MAXMIN64, KM_SLEEP)) == NULL) {
4525 4528 cmn_err(CE_PANIC,
4526 4529 "ip_ddi_init: ip_minor_arena_la creation failed\n");
4527 4530 }
4528 4531 #else
4529 4532 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa",
4530 4533 INET_MIN_DEV + 2, MAXMIN, KM_SLEEP)) == NULL) {
4531 4534 cmn_err(CE_PANIC,
4532 4535 "ip_ddi_init: ip_minor_arena_sa creation failed\n");
4533 4536 }
4534 4537 #endif
4535 4538 ip_poll_normal_ticks = MSEC_TO_TICK_ROUNDUP(ip_poll_normal_ms);
4536 4539
4537 4540 ipcl_g_init();
4538 4541 ip_ire_g_init();
4539 4542 ip_net_g_init();
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
4540 4543
4541 4544 #ifdef DEBUG
4542 4545 tsd_create(&ip_thread_data, ip_thread_exit);
4543 4546 rw_init(&ip_thread_rwlock, NULL, RW_DEFAULT, NULL);
4544 4547 list_create(&ip_thread_list, sizeof (th_hash_t),
4545 4548 offsetof(th_hash_t, thh_link));
4546 4549 #endif
4547 4550 ipsec_policy_g_init();
4548 4551 tcp_ddi_g_init();
4549 4552 sctp_ddi_g_init();
4553 + dccp_ddi_g_init();
4550 4554 dce_g_init();
4551 4555
4552 4556 /*
4553 4557 * We want to be informed each time a stack is created or
4554 4558 * destroyed in the kernel, so we can maintain the
4555 4559 * set of udp_stack_t's.
4556 4560 */
4557 4561 netstack_register(NS_IP, ip_stack_init, ip_stack_shutdown,
4558 4562 ip_stack_fini);
4559 4563
4560 4564 tnet_init();
4561 4565
4562 4566 udp_ddi_g_init();
4563 4567 rts_ddi_g_init();
4564 4568 icmp_ddi_g_init();
4565 4569 ilb_ddi_g_init();
4566 4570
4567 4571 /* This needs to be called after all transports are initialized. */
4568 4572 mutex_enter(&cpu_lock);
4569 4573 register_cpu_setup_func(ip_tp_cpu_update, NULL);
4570 4574 mutex_exit(&cpu_lock);
4571 4575 }
4572 4576
4573 4577 /*
4574 4578 * Initialize the IP stack instance.
4575 4579 */
4576 4580 static void *
4577 4581 ip_stack_init(netstackid_t stackid, netstack_t *ns)
4578 4582 {
4579 4583 ip_stack_t *ipst;
4580 4584 size_t arrsz;
4581 4585 major_t major;
4582 4586
4583 4587 #ifdef NS_DEBUG
4584 4588 printf("ip_stack_init(stack %d)\n", stackid);
4585 4589 #endif
4586 4590
4587 4591 ipst = (ip_stack_t *)kmem_zalloc(sizeof (*ipst), KM_SLEEP);
4588 4592 ipst->ips_netstack = ns;
4589 4593
4590 4594 ipst->ips_ill_g_heads = kmem_zalloc(sizeof (ill_g_head_t) * MAX_G_HEADS,
4591 4595 KM_SLEEP);
4592 4596 ipst->ips_phyint_g_list = kmem_zalloc(sizeof (phyint_list_t),
4593 4597 KM_SLEEP);
4594 4598 ipst->ips_ndp4 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP);
4595 4599 ipst->ips_ndp6 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP);
4596 4600 mutex_init(&ipst->ips_ndp4->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL);
4597 4601 mutex_init(&ipst->ips_ndp6->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL);
4598 4602
4599 4603 mutex_init(&ipst->ips_igmp_timer_lock, NULL, MUTEX_DEFAULT, NULL);
4600 4604 ipst->ips_igmp_deferred_next = INFINITY;
4601 4605 mutex_init(&ipst->ips_mld_timer_lock, NULL, MUTEX_DEFAULT, NULL);
4602 4606 ipst->ips_mld_deferred_next = INFINITY;
4603 4607 mutex_init(&ipst->ips_igmp_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL);
4604 4608 mutex_init(&ipst->ips_mld_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL);
4605 4609 mutex_init(&ipst->ips_ip_mi_lock, NULL, MUTEX_DEFAULT, NULL);
4606 4610 mutex_init(&ipst->ips_ip_addr_avail_lock, NULL, MUTEX_DEFAULT, NULL);
4607 4611 rw_init(&ipst->ips_ill_g_lock, NULL, RW_DEFAULT, NULL);
4608 4612 rw_init(&ipst->ips_ill_g_usesrc_lock, NULL, RW_DEFAULT, NULL);
4609 4613
4610 4614 ipcl_init(ipst);
4611 4615 ip_ire_init(ipst);
4612 4616 ip6_asp_init(ipst);
4613 4617 ipif_init(ipst);
4614 4618 conn_drain_init(ipst);
4615 4619 ip_mrouter_stack_init(ipst);
4616 4620 dce_stack_init(ipst);
4617 4621
4618 4622 ipst->ips_ip_multirt_log_interval = 1000;
4619 4623
4620 4624 ipst->ips_ill_index = 1;
4621 4625
4622 4626 ipst->ips_saved_ip_forwarding = -1;
4623 4627 ipst->ips_reg_vif_num = ALL_VIFS; /* Index to Register vif */
4624 4628
4625 4629 arrsz = ip_propinfo_count * sizeof (mod_prop_info_t);
4626 4630 ipst->ips_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz, KM_SLEEP);
4627 4631 bcopy(ip_propinfo_tbl, ipst->ips_propinfo_tbl, arrsz);
4628 4632
4629 4633 ipst->ips_ip_mibkp = ip_kstat_init(stackid, ipst);
4630 4634 ipst->ips_icmp_mibkp = icmp_kstat_init(stackid);
4631 4635 ipst->ips_ip_kstat = ip_kstat2_init(stackid, &ipst->ips_ip_statistics);
4632 4636 ipst->ips_ip6_kstat =
4633 4637 ip6_kstat_init(stackid, &ipst->ips_ip6_statistics);
4634 4638
4635 4639 ipst->ips_ip_src_id = 1;
4636 4640 rw_init(&ipst->ips_srcid_lock, NULL, RW_DEFAULT, NULL);
4637 4641
4638 4642 ipst->ips_src_generation = SRC_GENERATION_INITIAL;
4639 4643
4640 4644 ip_net_init(ipst, ns);
4641 4645 ipv4_hook_init(ipst);
4642 4646 ipv6_hook_init(ipst);
4643 4647 arp_hook_init(ipst);
4644 4648 ipmp_init(ipst);
4645 4649 ipobs_init(ipst);
4646 4650
4647 4651 /*
4648 4652 * Create the taskq dispatcher thread and initialize related stuff.
4649 4653 */
4650 4654 mutex_init(&ipst->ips_capab_taskq_lock, NULL, MUTEX_DEFAULT, NULL);
4651 4655 cv_init(&ipst->ips_capab_taskq_cv, NULL, CV_DEFAULT, NULL);
4652 4656 ipst->ips_capab_taskq_thread = thread_create(NULL, 0,
4653 4657 ill_taskq_dispatch, ipst, 0, &p0, TS_RUN, minclsyspri);
4654 4658
4655 4659 major = mod_name_to_major(INET_NAME);
4656 4660 (void) ldi_ident_from_major(major, &ipst->ips_ldi_ident);
4657 4661 return (ipst);
4658 4662 }
4659 4663
4660 4664 /*
4661 4665 * Allocate and initialize a DLPI template of the specified length. (May be
4662 4666 * called as writer.)
4663 4667 */
4664 4668 mblk_t *
4665 4669 ip_dlpi_alloc(size_t len, t_uscalar_t prim)
4666 4670 {
4667 4671 mblk_t *mp;
4668 4672
4669 4673 mp = allocb(len, BPRI_MED);
4670 4674 if (!mp)
4671 4675 return (NULL);
4672 4676
4673 4677 /*
4674 4678 * DLPIv2 says that DL_INFO_REQ and DL_TOKEN_REQ (the latter
4675 4679 * of which we don't seem to use) are sent with M_PCPROTO, and
4676 4680 * that other DLPI are M_PROTO.
4677 4681 */
4678 4682 if (prim == DL_INFO_REQ) {
4679 4683 mp->b_datap->db_type = M_PCPROTO;
4680 4684 } else {
4681 4685 mp->b_datap->db_type = M_PROTO;
4682 4686 }
4683 4687
4684 4688 mp->b_wptr = mp->b_rptr + len;
4685 4689 bzero(mp->b_rptr, len);
4686 4690 ((dl_unitdata_req_t *)mp->b_rptr)->dl_primitive = prim;
4687 4691 return (mp);
4688 4692 }
4689 4693
4690 4694 /*
4691 4695 * Allocate and initialize a DLPI notification. (May be called as writer.)
4692 4696 */
4693 4697 mblk_t *
4694 4698 ip_dlnotify_alloc(uint_t notification, uint_t data)
4695 4699 {
4696 4700 dl_notify_ind_t *notifyp;
4697 4701 mblk_t *mp;
4698 4702
4699 4703 if ((mp = ip_dlpi_alloc(DL_NOTIFY_IND_SIZE, DL_NOTIFY_IND)) == NULL)
4700 4704 return (NULL);
4701 4705
4702 4706 notifyp = (dl_notify_ind_t *)mp->b_rptr;
4703 4707 notifyp->dl_notification = notification;
4704 4708 notifyp->dl_data = data;
4705 4709 return (mp);
4706 4710 }
4707 4711
4708 4712 mblk_t *
4709 4713 ip_dlnotify_alloc2(uint_t notification, uint_t data1, uint_t data2)
4710 4714 {
4711 4715 dl_notify_ind_t *notifyp;
4712 4716 mblk_t *mp;
4713 4717
4714 4718 if ((mp = ip_dlpi_alloc(DL_NOTIFY_IND_SIZE, DL_NOTIFY_IND)) == NULL)
4715 4719 return (NULL);
4716 4720
4717 4721 notifyp = (dl_notify_ind_t *)mp->b_rptr;
4718 4722 notifyp->dl_notification = notification;
4719 4723 notifyp->dl_data1 = data1;
4720 4724 notifyp->dl_data2 = data2;
4721 4725 return (mp);
4722 4726 }
4723 4727
4724 4728 /*
4725 4729 * Debug formatting routine. Returns a character string representation of the
4726 4730 * addr in buf, of the form xxx.xxx.xxx.xxx. This routine takes the address
4727 4731 * in the form of a ipaddr_t and calls ip_dot_saddr with a pointer.
4728 4732 *
4729 4733 * Once the ndd table-printing interfaces are removed, this can be changed to
4730 4734 * standard dotted-decimal form.
4731 4735 */
4732 4736 char *
4733 4737 ip_dot_addr(ipaddr_t addr, char *buf)
4734 4738 {
4735 4739 uint8_t *ap = (uint8_t *)&addr;
4736 4740
4737 4741 (void) mi_sprintf(buf, "%03d.%03d.%03d.%03d",
4738 4742 ap[0] & 0xFF, ap[1] & 0xFF, ap[2] & 0xFF, ap[3] & 0xFF);
4739 4743 return (buf);
4740 4744 }
4741 4745
4742 4746 /*
4743 4747 * Write the given MAC address as a printable string in the usual colon-
4744 4748 * separated format.
4745 4749 */
4746 4750 const char *
4747 4751 mac_colon_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen)
4748 4752 {
4749 4753 char *bp;
4750 4754
4751 4755 if (alen == 0 || buflen < 4)
4752 4756 return ("?");
4753 4757 bp = buf;
4754 4758 for (;;) {
4755 4759 /*
4756 4760 * If there are more MAC address bytes available, but we won't
4757 4761 * have any room to print them, then add "..." to the string
4758 4762 * instead. See below for the 'magic number' explanation.
4759 4763 */
4760 4764 if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) {
4761 4765 (void) strcpy(bp, "...");
4762 4766 break;
4763 4767 }
4764 4768 (void) sprintf(bp, "%02x", *addr++);
4765 4769 bp += 2;
4766 4770 if (--alen == 0)
4767 4771 break;
4768 4772 *bp++ = ':';
4769 4773 buflen -= 3;
4770 4774 /*
4771 4775 * At this point, based on the first 'if' statement above,
4772 4776 * either alen == 1 and buflen >= 3, or alen > 1 and
4773 4777 * buflen >= 4. The first case leaves room for the final "xx"
4774 4778 * number and trailing NUL byte. The second leaves room for at
4775 4779 * least "...". Thus the apparently 'magic' numbers chosen for
4776 4780 * that statement.
4777 4781 */
4778 4782 }
4779 4783 return (buf);
4780 4784 }
4781 4785
4782 4786 /*
4783 4787 * Called when it is conceptually a ULP that would sent the packet
4784 4788 * e.g., port unreachable and protocol unreachable. Check that the packet
4785 4789 * would have passed the IPsec global policy before sending the error.
4786 4790 *
4787 4791 * Send an ICMP error after patching up the packet appropriately.
4788 4792 * Uses ip_drop_input and bumps the appropriate MIB.
4789 4793 */
4790 4794 void
4791 4795 ip_fanout_send_icmp_v4(mblk_t *mp, uint_t icmp_type, uint_t icmp_code,
4792 4796 ip_recv_attr_t *ira)
4793 4797 {
4794 4798 ipha_t *ipha;
4795 4799 boolean_t secure;
4796 4800 ill_t *ill = ira->ira_ill;
4797 4801 ip_stack_t *ipst = ill->ill_ipst;
4798 4802 netstack_t *ns = ipst->ips_netstack;
4799 4803 ipsec_stack_t *ipss = ns->netstack_ipsec;
4800 4804
4801 4805 secure = ira->ira_flags & IRAF_IPSEC_SECURE;
4802 4806
4803 4807 /*
4804 4808 * We are generating an icmp error for some inbound packet.
4805 4809 * Called from all ip_fanout_(udp, tcp, proto) functions.
4806 4810 * Before we generate an error, check with global policy
4807 4811 * to see whether this is allowed to enter the system. As
4808 4812 * there is no "conn", we are checking with global policy.
4809 4813 */
4810 4814 ipha = (ipha_t *)mp->b_rptr;
4811 4815 if (secure || ipss->ipsec_inbound_v4_policy_present) {
4812 4816 mp = ipsec_check_global_policy(mp, NULL, ipha, NULL, ira, ns);
4813 4817 if (mp == NULL)
4814 4818 return;
4815 4819 }
4816 4820
4817 4821 /* We never send errors for protocols that we do implement */
4818 4822 if (ira->ira_protocol == IPPROTO_ICMP ||
4819 4823 ira->ira_protocol == IPPROTO_IGMP) {
4820 4824 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
4821 4825 ip_drop_input("ip_fanout_send_icmp_v4", mp, ill);
4822 4826 freemsg(mp);
4823 4827 return;
4824 4828 }
4825 4829 /*
4826 4830 * Have to correct checksum since
4827 4831 * the packet might have been
4828 4832 * fragmented and the reassembly code in ip_rput
4829 4833 * does not restore the IP checksum.
4830 4834 */
4831 4835 ipha->ipha_hdr_checksum = 0;
4832 4836 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
4833 4837
4834 4838 switch (icmp_type) {
4835 4839 case ICMP_DEST_UNREACHABLE:
4836 4840 switch (icmp_code) {
4837 4841 case ICMP_PROTOCOL_UNREACHABLE:
4838 4842 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInUnknownProtos);
4839 4843 ip_drop_input("ipIfStatsInUnknownProtos", mp, ill);
4840 4844 break;
4841 4845 case ICMP_PORT_UNREACHABLE:
4842 4846 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts);
4843 4847 ip_drop_input("ipIfStatsNoPorts", mp, ill);
4844 4848 break;
4845 4849 }
4846 4850
4847 4851 icmp_unreachable(mp, icmp_code, ira);
4848 4852 break;
4849 4853 default:
4850 4854 #ifdef DEBUG
4851 4855 panic("ip_fanout_send_icmp_v4: wrong type");
4852 4856 /*NOTREACHED*/
4853 4857 #else
4854 4858 freemsg(mp);
4855 4859 break;
4856 4860 #endif
4857 4861 }
4858 4862 }
4859 4863
4860 4864 /*
4861 4865 * Used to send an ICMP error message when a packet is received for
4862 4866 * a protocol that is not supported. The mblk passed as argument
4863 4867 * is consumed by this function.
4864 4868 */
4865 4869 void
4866 4870 ip_proto_not_sup(mblk_t *mp, ip_recv_attr_t *ira)
4867 4871 {
4868 4872 ipha_t *ipha;
4869 4873
4870 4874 ipha = (ipha_t *)mp->b_rptr;
4871 4875 if (ira->ira_flags & IRAF_IS_IPV4) {
4872 4876 ASSERT(IPH_HDR_VERSION(ipha) == IP_VERSION);
4873 4877 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE,
4874 4878 ICMP_PROTOCOL_UNREACHABLE, ira);
4875 4879 } else {
4876 4880 ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION);
4877 4881 ip_fanout_send_icmp_v6(mp, ICMP6_PARAM_PROB,
4878 4882 ICMP6_PARAMPROB_NEXTHEADER, ira);
4879 4883 }
4880 4884 }
4881 4885
4882 4886 /*
4883 4887 * Deliver a rawip packet to the given conn, possibly applying ipsec policy.
4884 4888 * Handles IPv4 and IPv6.
4885 4889 * We are responsible for disposing of mp, such as by freemsg() or putnext()
4886 4890 * Caller is responsible for dropping references to the conn.
4887 4891 */
4888 4892 void
4889 4893 ip_fanout_proto_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
4890 4894 ip_recv_attr_t *ira)
4891 4895 {
4892 4896 ill_t *ill = ira->ira_ill;
4893 4897 ip_stack_t *ipst = ill->ill_ipst;
4894 4898 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
4895 4899 boolean_t secure;
4896 4900 uint_t protocol = ira->ira_protocol;
4897 4901 iaflags_t iraflags = ira->ira_flags;
4898 4902 queue_t *rq;
4899 4903
4900 4904 secure = iraflags & IRAF_IPSEC_SECURE;
4901 4905
4902 4906 rq = connp->conn_rq;
4903 4907 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) {
4904 4908 switch (protocol) {
4905 4909 case IPPROTO_ICMPV6:
4906 4910 BUMP_MIB(ill->ill_icmp6_mib, ipv6IfIcmpInOverflows);
4907 4911 break;
4908 4912 case IPPROTO_ICMP:
4909 4913 BUMP_MIB(&ipst->ips_icmp_mib, icmpInOverflows);
4910 4914 break;
4911 4915 default:
4912 4916 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows);
4913 4917 break;
4914 4918 }
4915 4919 freemsg(mp);
4916 4920 return;
4917 4921 }
4918 4922
4919 4923 ASSERT(!(IPCL_IS_IPTUN(connp)));
4920 4924
4921 4925 if (((iraflags & IRAF_IS_IPV4) ?
4922 4926 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
4923 4927 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
4924 4928 secure) {
4925 4929 mp = ipsec_check_inbound_policy(mp, connp, ipha,
4926 4930 ip6h, ira);
4927 4931 if (mp == NULL) {
4928 4932 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
4929 4933 /* Note that mp is NULL */
4930 4934 ip_drop_input("ipIfStatsInDiscards", mp, ill);
4931 4935 return;
4932 4936 }
4933 4937 }
4934 4938
4935 4939 if (iraflags & IRAF_ICMP_ERROR) {
4936 4940 (connp->conn_recvicmp)(connp, mp, NULL, ira);
4937 4941 } else {
4938 4942 ill_t *rill = ira->ira_rill;
4939 4943
4940 4944 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
4941 4945 ira->ira_ill = ira->ira_rill = NULL;
4942 4946 /* Send it upstream */
4943 4947 (connp->conn_recv)(connp, mp, NULL, ira);
4944 4948 ira->ira_ill = ill;
4945 4949 ira->ira_rill = rill;
4946 4950 }
4947 4951 }
4948 4952
4949 4953 /*
4950 4954 * Handle protocols with which IP is less intimate. There
4951 4955 * can be more than one stream bound to a particular
4952 4956 * protocol. When this is the case, normally each one gets a copy
4953 4957 * of any incoming packets.
4954 4958 *
4955 4959 * IPsec NOTE :
4956 4960 *
4957 4961 * Don't allow a secure packet going up a non-secure connection.
4958 4962 * We don't allow this because
4959 4963 *
4960 4964 * 1) Reply might go out in clear which will be dropped at
4961 4965 * the sending side.
4962 4966 * 2) If the reply goes out in clear it will give the
4963 4967 * adversary enough information for getting the key in
4964 4968 * most of the cases.
4965 4969 *
4966 4970 * Moreover getting a secure packet when we expect clear
4967 4971 * implies that SA's were added without checking for
4968 4972 * policy on both ends. This should not happen once ISAKMP
4969 4973 * is used to negotiate SAs as SAs will be added only after
4970 4974 * verifying the policy.
4971 4975 *
4972 4976 * Zones notes:
4973 4977 * Earlier in ip_input on a system with multiple shared-IP zones we
4974 4978 * duplicate the multicast and broadcast packets and send them up
4975 4979 * with each explicit zoneid that exists on that ill.
4976 4980 * This means that here we can match the zoneid with SO_ALLZONES being special.
4977 4981 */
4978 4982 void
4979 4983 ip_fanout_proto_v4(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
4980 4984 {
4981 4985 mblk_t *mp1;
4982 4986 ipaddr_t laddr;
4983 4987 conn_t *connp, *first_connp, *next_connp;
4984 4988 connf_t *connfp;
4985 4989 ill_t *ill = ira->ira_ill;
4986 4990 ip_stack_t *ipst = ill->ill_ipst;
4987 4991
4988 4992 laddr = ipha->ipha_dst;
4989 4993
4990 4994 connfp = &ipst->ips_ipcl_proto_fanout_v4[ira->ira_protocol];
4991 4995 mutex_enter(&connfp->connf_lock);
4992 4996 connp = connfp->connf_head;
4993 4997 for (connp = connfp->connf_head; connp != NULL;
4994 4998 connp = connp->conn_next) {
4995 4999 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */
4996 5000 if (IPCL_PROTO_MATCH(connp, ira, ipha) &&
4997 5001 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
4998 5002 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp))) {
4999 5003 break;
5000 5004 }
5001 5005 }
5002 5006
5003 5007 if (connp == NULL) {
5004 5008 /*
5005 5009 * No one bound to these addresses. Is
5006 5010 * there a client that wants all
5007 5011 * unclaimed datagrams?
5008 5012 */
5009 5013 mutex_exit(&connfp->connf_lock);
5010 5014 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE,
5011 5015 ICMP_PROTOCOL_UNREACHABLE, ira);
5012 5016 return;
5013 5017 }
5014 5018
5015 5019 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL);
5016 5020
5017 5021 CONN_INC_REF(connp);
5018 5022 first_connp = connp;
5019 5023 connp = connp->conn_next;
5020 5024
5021 5025 for (;;) {
5022 5026 while (connp != NULL) {
5023 5027 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */
5024 5028 if (IPCL_PROTO_MATCH(connp, ira, ipha) &&
5025 5029 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5026 5030 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5027 5031 ira, connp)))
5028 5032 break;
5029 5033 connp = connp->conn_next;
5030 5034 }
5031 5035
5032 5036 if (connp == NULL) {
5033 5037 /* No more interested clients */
5034 5038 connp = first_connp;
5035 5039 break;
5036 5040 }
5037 5041 if (((mp1 = dupmsg(mp)) == NULL) &&
5038 5042 ((mp1 = copymsg(mp)) == NULL)) {
5039 5043 /* Memory allocation failed */
5040 5044 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5041 5045 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5042 5046 connp = first_connp;
5043 5047 break;
5044 5048 }
5045 5049
5046 5050 CONN_INC_REF(connp);
5047 5051 mutex_exit(&connfp->connf_lock);
5048 5052
5049 5053 ip_fanout_proto_conn(connp, mp1, (ipha_t *)mp1->b_rptr, NULL,
5050 5054 ira);
5051 5055
5052 5056 mutex_enter(&connfp->connf_lock);
5053 5057 /* Follow the next pointer before releasing the conn. */
5054 5058 next_connp = connp->conn_next;
5055 5059 CONN_DEC_REF(connp);
5056 5060 connp = next_connp;
5057 5061 }
5058 5062
5059 5063 /* Last one. Send it upstream. */
5060 5064 mutex_exit(&connfp->connf_lock);
5061 5065
5062 5066 ip_fanout_proto_conn(connp, mp, ipha, NULL, ira);
5063 5067
5064 5068 CONN_DEC_REF(connp);
5065 5069 }
5066 5070
5067 5071 /*
5068 5072 * If we have a IPsec NAT-Traversal packet, strip the zero-SPI or
5069 5073 * pass it along to ESP if the SPI is non-zero. Returns the mblk if the mblk
5070 5074 * is not consumed.
5071 5075 *
5072 5076 * One of three things can happen, all of which affect the passed-in mblk:
5073 5077 *
5074 5078 * 1.) The packet is stock UDP and gets its zero-SPI stripped. Return mblk..
5075 5079 *
5076 5080 * 2.) The packet is ESP-in-UDP, gets transformed into an equivalent
5077 5081 * ESP packet, and is passed along to ESP for consumption. Return NULL.
5078 5082 *
5079 5083 * 3.) The packet is an ESP-in-UDP Keepalive. Drop it and return NULL.
5080 5084 */
5081 5085 mblk_t *
5082 5086 zero_spi_check(mblk_t *mp, ip_recv_attr_t *ira)
5083 5087 {
5084 5088 int shift, plen, iph_len;
5085 5089 ipha_t *ipha;
5086 5090 udpha_t *udpha;
5087 5091 uint32_t *spi;
5088 5092 uint32_t esp_ports;
5089 5093 uint8_t *orptr;
5090 5094 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
5091 5095 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
5092 5096
5093 5097 ipha = (ipha_t *)mp->b_rptr;
5094 5098 iph_len = ira->ira_ip_hdr_length;
5095 5099 plen = ira->ira_pktlen;
5096 5100
5097 5101 if (plen - iph_len - sizeof (udpha_t) < sizeof (uint32_t)) {
5098 5102 /*
5099 5103 * Most likely a keepalive for the benefit of an intervening
5100 5104 * NAT. These aren't for us, per se, so drop it.
5101 5105 *
5102 5106 * RFC 3947/8 doesn't say for sure what to do for 2-3
5103 5107 * byte packets (keepalives are 1-byte), but we'll drop them
5104 5108 * also.
5105 5109 */
5106 5110 ip_drop_packet(mp, B_TRUE, ira->ira_ill,
5107 5111 DROPPER(ipss, ipds_esp_nat_t_ka), &ipss->ipsec_dropper);
5108 5112 return (NULL);
5109 5113 }
5110 5114
5111 5115 if (MBLKL(mp) < iph_len + sizeof (udpha_t) + sizeof (*spi)) {
5112 5116 /* might as well pull it all up - it might be ESP. */
5113 5117 if (!pullupmsg(mp, -1)) {
5114 5118 ip_drop_packet(mp, B_TRUE, ira->ira_ill,
5115 5119 DROPPER(ipss, ipds_esp_nomem),
5116 5120 &ipss->ipsec_dropper);
5117 5121 return (NULL);
5118 5122 }
5119 5123
5120 5124 ipha = (ipha_t *)mp->b_rptr;
5121 5125 }
5122 5126 spi = (uint32_t *)(mp->b_rptr + iph_len + sizeof (udpha_t));
5123 5127 if (*spi == 0) {
5124 5128 /* UDP packet - remove 0-spi. */
5125 5129 shift = sizeof (uint32_t);
5126 5130 } else {
5127 5131 /* ESP-in-UDP packet - reduce to ESP. */
5128 5132 ipha->ipha_protocol = IPPROTO_ESP;
5129 5133 shift = sizeof (udpha_t);
5130 5134 }
5131 5135
5132 5136 /* Fix IP header */
5133 5137 ira->ira_pktlen = (plen - shift);
5134 5138 ipha->ipha_length = htons(ira->ira_pktlen);
5135 5139 ipha->ipha_hdr_checksum = 0;
5136 5140
5137 5141 orptr = mp->b_rptr;
5138 5142 mp->b_rptr += shift;
5139 5143
5140 5144 udpha = (udpha_t *)(orptr + iph_len);
5141 5145 if (*spi == 0) {
5142 5146 ASSERT((uint8_t *)ipha == orptr);
5143 5147 udpha->uha_length = htons(plen - shift - iph_len);
5144 5148 iph_len += sizeof (udpha_t); /* For the call to ovbcopy(). */
5145 5149 esp_ports = 0;
5146 5150 } else {
5147 5151 esp_ports = *((uint32_t *)udpha);
5148 5152 ASSERT(esp_ports != 0);
5149 5153 }
5150 5154 ovbcopy(orptr, orptr + shift, iph_len);
5151 5155 if (esp_ports != 0) /* Punt up for ESP processing. */ {
5152 5156 ipha = (ipha_t *)(orptr + shift);
5153 5157
5154 5158 ira->ira_flags |= IRAF_ESP_UDP_PORTS;
5155 5159 ira->ira_esp_udp_ports = esp_ports;
5156 5160 ip_fanout_v4(mp, ipha, ira);
5157 5161 return (NULL);
5158 5162 }
5159 5163 return (mp);
5160 5164 }
5161 5165
5162 5166 /*
5163 5167 * Deliver a udp packet to the given conn, possibly applying ipsec policy.
5164 5168 * Handles IPv4 and IPv6.
5165 5169 * We are responsible for disposing of mp, such as by freemsg() or putnext()
5166 5170 * Caller is responsible for dropping references to the conn.
5167 5171 */
5168 5172 void
5169 5173 ip_fanout_udp_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
5170 5174 ip_recv_attr_t *ira)
5171 5175 {
5172 5176 ill_t *ill = ira->ira_ill;
5173 5177 ip_stack_t *ipst = ill->ill_ipst;
5174 5178 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
5175 5179 boolean_t secure;
5176 5180 iaflags_t iraflags = ira->ira_flags;
5177 5181
5178 5182 secure = iraflags & IRAF_IPSEC_SECURE;
5179 5183
5180 5184 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld :
5181 5185 !canputnext(connp->conn_rq)) {
5182 5186 BUMP_MIB(ill->ill_ip_mib, udpIfStatsInOverflows);
5183 5187 freemsg(mp);
5184 5188 return;
5185 5189 }
5186 5190
5187 5191 if (((iraflags & IRAF_IS_IPV4) ?
5188 5192 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
5189 5193 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
5190 5194 secure) {
5191 5195 mp = ipsec_check_inbound_policy(mp, connp, ipha,
5192 5196 ip6h, ira);
5193 5197 if (mp == NULL) {
5194 5198 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5195 5199 /* Note that mp is NULL */
5196 5200 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5197 5201 return;
5198 5202 }
5199 5203 }
5200 5204
5201 5205 /*
5202 5206 * Since this code is not used for UDP unicast we don't need a NAT_T
5203 5207 * check. Only ip_fanout_v4 has that check.
5204 5208 */
5205 5209 if (ira->ira_flags & IRAF_ICMP_ERROR) {
5206 5210 (connp->conn_recvicmp)(connp, mp, NULL, ira);
5207 5211 } else {
5208 5212 ill_t *rill = ira->ira_rill;
5209 5213
5210 5214 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
5211 5215 ira->ira_ill = ira->ira_rill = NULL;
5212 5216 /* Send it upstream */
5213 5217 (connp->conn_recv)(connp, mp, NULL, ira);
5214 5218 ira->ira_ill = ill;
5215 5219 ira->ira_rill = rill;
5216 5220 }
5217 5221 }
5218 5222
5219 5223 /*
5220 5224 * Fanout for UDP packets that are multicast or broadcast, and ICMP errors.
5221 5225 * (Unicast fanout is handled in ip_input_v4.)
5222 5226 *
5223 5227 * If SO_REUSEADDR is set all multicast and broadcast packets
5224 5228 * will be delivered to all conns bound to the same port.
5225 5229 *
5226 5230 * If there is at least one matching AF_INET receiver, then we will
5227 5231 * ignore any AF_INET6 receivers.
5228 5232 * In the special case where an AF_INET socket binds to 0.0.0.0/<port> and an
5229 5233 * AF_INET6 socket binds to ::/<port>, only the AF_INET socket receives the IPv4
5230 5234 * packets.
5231 5235 *
5232 5236 * Zones notes:
5233 5237 * Earlier in ip_input on a system with multiple shared-IP zones we
5234 5238 * duplicate the multicast and broadcast packets and send them up
5235 5239 * with each explicit zoneid that exists on that ill.
5236 5240 * This means that here we can match the zoneid with SO_ALLZONES being special.
5237 5241 */
5238 5242 void
5239 5243 ip_fanout_udp_multi_v4(mblk_t *mp, ipha_t *ipha, uint16_t lport, uint16_t fport,
5240 5244 ip_recv_attr_t *ira)
5241 5245 {
5242 5246 ipaddr_t laddr;
5243 5247 in6_addr_t v6faddr;
5244 5248 conn_t *connp;
5245 5249 connf_t *connfp;
5246 5250 ipaddr_t faddr;
5247 5251 ill_t *ill = ira->ira_ill;
5248 5252 ip_stack_t *ipst = ill->ill_ipst;
5249 5253
5250 5254 ASSERT(ira->ira_flags & (IRAF_MULTIBROADCAST|IRAF_ICMP_ERROR));
5251 5255
5252 5256 laddr = ipha->ipha_dst;
5253 5257 faddr = ipha->ipha_src;
5254 5258
5255 5259 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)];
5256 5260 mutex_enter(&connfp->connf_lock);
5257 5261 connp = connfp->connf_head;
5258 5262
5259 5263 /*
5260 5264 * If SO_REUSEADDR has been set on the first we send the
5261 5265 * packet to all clients that have joined the group and
5262 5266 * match the port.
5263 5267 */
5264 5268 while (connp != NULL) {
5265 5269 if ((IPCL_UDP_MATCH(connp, lport, laddr, fport, faddr)) &&
5266 5270 conn_wantpacket(connp, ira, ipha) &&
5267 5271 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5268 5272 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp)))
5269 5273 break;
5270 5274 connp = connp->conn_next;
5271 5275 }
5272 5276
5273 5277 if (connp == NULL)
5274 5278 goto notfound;
5275 5279
5276 5280 CONN_INC_REF(connp);
5277 5281
5278 5282 if (connp->conn_reuseaddr) {
5279 5283 conn_t *first_connp = connp;
5280 5284 conn_t *next_connp;
5281 5285 mblk_t *mp1;
5282 5286
5283 5287 connp = connp->conn_next;
5284 5288 for (;;) {
5285 5289 while (connp != NULL) {
5286 5290 if (IPCL_UDP_MATCH(connp, lport, laddr,
5287 5291 fport, faddr) &&
5288 5292 conn_wantpacket(connp, ira, ipha) &&
5289 5293 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5290 5294 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5291 5295 ira, connp)))
5292 5296 break;
5293 5297 connp = connp->conn_next;
5294 5298 }
5295 5299 if (connp == NULL) {
5296 5300 /* No more interested clients */
5297 5301 connp = first_connp;
5298 5302 break;
5299 5303 }
5300 5304 if (((mp1 = dupmsg(mp)) == NULL) &&
5301 5305 ((mp1 = copymsg(mp)) == NULL)) {
5302 5306 /* Memory allocation failed */
5303 5307 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5304 5308 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5305 5309 connp = first_connp;
5306 5310 break;
5307 5311 }
5308 5312 CONN_INC_REF(connp);
5309 5313 mutex_exit(&connfp->connf_lock);
5310 5314
5311 5315 IP_STAT(ipst, ip_udp_fanmb);
5312 5316 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr,
5313 5317 NULL, ira);
5314 5318 mutex_enter(&connfp->connf_lock);
5315 5319 /* Follow the next pointer before releasing the conn */
5316 5320 next_connp = connp->conn_next;
5317 5321 CONN_DEC_REF(connp);
5318 5322 connp = next_connp;
5319 5323 }
5320 5324 }
5321 5325
5322 5326 /* Last one. Send it upstream. */
5323 5327 mutex_exit(&connfp->connf_lock);
5324 5328 IP_STAT(ipst, ip_udp_fanmb);
5325 5329 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira);
5326 5330 CONN_DEC_REF(connp);
5327 5331 return;
5328 5332
5329 5333 notfound:
5330 5334 mutex_exit(&connfp->connf_lock);
5331 5335 /*
5332 5336 * IPv6 endpoints bound to multicast IPv4-mapped addresses
5333 5337 * have already been matched above, since they live in the IPv4
5334 5338 * fanout tables. This implies we only need to
5335 5339 * check for IPv6 in6addr_any endpoints here.
5336 5340 * Thus we compare using ipv6_all_zeros instead of the destination
5337 5341 * address, except for the multicast group membership lookup which
5338 5342 * uses the IPv4 destination.
5339 5343 */
5340 5344 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &v6faddr);
5341 5345 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)];
5342 5346 mutex_enter(&connfp->connf_lock);
5343 5347 connp = connfp->connf_head;
5344 5348 /*
5345 5349 * IPv4 multicast packet being delivered to an AF_INET6
5346 5350 * in6addr_any endpoint.
5347 5351 * Need to check conn_wantpacket(). Note that we use conn_wantpacket()
5348 5352 * and not conn_wantpacket_v6() since any multicast membership is
5349 5353 * for an IPv4-mapped multicast address.
5350 5354 */
5351 5355 while (connp != NULL) {
5352 5356 if (IPCL_UDP_MATCH_V6(connp, lport, ipv6_all_zeros,
5353 5357 fport, v6faddr) &&
5354 5358 conn_wantpacket(connp, ira, ipha) &&
5355 5359 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5356 5360 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp)))
5357 5361 break;
5358 5362 connp = connp->conn_next;
5359 5363 }
5360 5364
5361 5365 if (connp == NULL) {
5362 5366 /*
5363 5367 * No one bound to this port. Is
5364 5368 * there a client that wants all
5365 5369 * unclaimed datagrams?
5366 5370 */
5367 5371 mutex_exit(&connfp->connf_lock);
5368 5372
5369 5373 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_UDP].connf_head !=
5370 5374 NULL) {
5371 5375 ASSERT(ira->ira_protocol == IPPROTO_UDP);
5372 5376 ip_fanout_proto_v4(mp, ipha, ira);
5373 5377 } else {
5374 5378 /*
5375 5379 * We used to attempt to send an icmp error here, but
5376 5380 * since this is known to be a multicast packet
5377 5381 * and we don't send icmp errors in response to
5378 5382 * multicast, just drop the packet and give up sooner.
5379 5383 */
5380 5384 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts);
5381 5385 freemsg(mp);
5382 5386 }
5383 5387 return;
5384 5388 }
5385 5389 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL);
5386 5390
5387 5391 /*
5388 5392 * If SO_REUSEADDR has been set on the first we send the
5389 5393 * packet to all clients that have joined the group and
5390 5394 * match the port.
5391 5395 */
5392 5396 if (connp->conn_reuseaddr) {
5393 5397 conn_t *first_connp = connp;
5394 5398 conn_t *next_connp;
5395 5399 mblk_t *mp1;
5396 5400
5397 5401 CONN_INC_REF(connp);
5398 5402 connp = connp->conn_next;
5399 5403 for (;;) {
5400 5404 while (connp != NULL) {
5401 5405 if (IPCL_UDP_MATCH_V6(connp, lport,
5402 5406 ipv6_all_zeros, fport, v6faddr) &&
5403 5407 conn_wantpacket(connp, ira, ipha) &&
5404 5408 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5405 5409 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5406 5410 ira, connp)))
5407 5411 break;
5408 5412 connp = connp->conn_next;
5409 5413 }
5410 5414 if (connp == NULL) {
5411 5415 /* No more interested clients */
5412 5416 connp = first_connp;
5413 5417 break;
5414 5418 }
5415 5419 if (((mp1 = dupmsg(mp)) == NULL) &&
5416 5420 ((mp1 = copymsg(mp)) == NULL)) {
5417 5421 /* Memory allocation failed */
5418 5422 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5419 5423 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5420 5424 connp = first_connp;
5421 5425 break;
5422 5426 }
5423 5427 CONN_INC_REF(connp);
5424 5428 mutex_exit(&connfp->connf_lock);
5425 5429
5426 5430 IP_STAT(ipst, ip_udp_fanmb);
5427 5431 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr,
5428 5432 NULL, ira);
5429 5433 mutex_enter(&connfp->connf_lock);
5430 5434 /* Follow the next pointer before releasing the conn */
5431 5435 next_connp = connp->conn_next;
5432 5436 CONN_DEC_REF(connp);
5433 5437 connp = next_connp;
5434 5438 }
5435 5439 }
5436 5440
5437 5441 /* Last one. Send it upstream. */
5438 5442 mutex_exit(&connfp->connf_lock);
5439 5443 IP_STAT(ipst, ip_udp_fanmb);
5440 5444 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira);
5441 5445 CONN_DEC_REF(connp);
5442 5446 }
5443 5447
5444 5448 /*
5445 5449 * Split an incoming packet's IPv4 options into the label and the other options.
5446 5450 * If 'allocate' is set it does memory allocation for the ip_pkt_t, including
5447 5451 * clearing out any leftover label or options.
5448 5452 * Otherwise it just makes ipp point into the packet.
5449 5453 *
5450 5454 * Returns zero if ok; ENOMEM if the buffer couldn't be allocated.
5451 5455 */
5452 5456 int
5453 5457 ip_find_hdr_v4(ipha_t *ipha, ip_pkt_t *ipp, boolean_t allocate)
5454 5458 {
5455 5459 uchar_t *opt;
5456 5460 uint32_t totallen;
5457 5461 uint32_t optval;
5458 5462 uint32_t optlen;
5459 5463
5460 5464 ipp->ipp_fields |= IPPF_HOPLIMIT | IPPF_TCLASS | IPPF_ADDR;
5461 5465 ipp->ipp_hoplimit = ipha->ipha_ttl;
5462 5466 ipp->ipp_type_of_service = ipha->ipha_type_of_service;
5463 5467 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &ipp->ipp_addr);
5464 5468
5465 5469 /*
5466 5470 * Get length (in 4 byte octets) of IP header options.
5467 5471 */
5468 5472 totallen = ipha->ipha_version_and_hdr_length -
5469 5473 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
5470 5474
5471 5475 if (totallen == 0) {
5472 5476 if (!allocate)
5473 5477 return (0);
5474 5478
5475 5479 /* Clear out anything from a previous packet */
5476 5480 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
5477 5481 kmem_free(ipp->ipp_ipv4_options,
5478 5482 ipp->ipp_ipv4_options_len);
5479 5483 ipp->ipp_ipv4_options = NULL;
5480 5484 ipp->ipp_ipv4_options_len = 0;
5481 5485 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS;
5482 5486 }
5483 5487 if (ipp->ipp_fields & IPPF_LABEL_V4) {
5484 5488 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
5485 5489 ipp->ipp_label_v4 = NULL;
5486 5490 ipp->ipp_label_len_v4 = 0;
5487 5491 ipp->ipp_fields &= ~IPPF_LABEL_V4;
5488 5492 }
5489 5493 return (0);
5490 5494 }
5491 5495
5492 5496 totallen <<= 2;
5493 5497 opt = (uchar_t *)&ipha[1];
5494 5498 if (!is_system_labeled()) {
5495 5499
5496 5500 copyall:
5497 5501 if (!allocate) {
5498 5502 if (totallen != 0) {
5499 5503 ipp->ipp_ipv4_options = opt;
5500 5504 ipp->ipp_ipv4_options_len = totallen;
5501 5505 ipp->ipp_fields |= IPPF_IPV4_OPTIONS;
5502 5506 }
5503 5507 return (0);
5504 5508 }
5505 5509 /* Just copy all of options */
5506 5510 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
5507 5511 if (totallen == ipp->ipp_ipv4_options_len) {
5508 5512 bcopy(opt, ipp->ipp_ipv4_options, totallen);
5509 5513 return (0);
5510 5514 }
5511 5515 kmem_free(ipp->ipp_ipv4_options,
5512 5516 ipp->ipp_ipv4_options_len);
5513 5517 ipp->ipp_ipv4_options = NULL;
5514 5518 ipp->ipp_ipv4_options_len = 0;
5515 5519 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS;
5516 5520 }
5517 5521 if (totallen == 0)
5518 5522 return (0);
5519 5523
5520 5524 ipp->ipp_ipv4_options = kmem_alloc(totallen, KM_NOSLEEP);
5521 5525 if (ipp->ipp_ipv4_options == NULL)
5522 5526 return (ENOMEM);
5523 5527 ipp->ipp_ipv4_options_len = totallen;
5524 5528 ipp->ipp_fields |= IPPF_IPV4_OPTIONS;
5525 5529 bcopy(opt, ipp->ipp_ipv4_options, totallen);
5526 5530 return (0);
5527 5531 }
5528 5532
5529 5533 if (allocate && (ipp->ipp_fields & IPPF_LABEL_V4)) {
5530 5534 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
5531 5535 ipp->ipp_label_v4 = NULL;
5532 5536 ipp->ipp_label_len_v4 = 0;
5533 5537 ipp->ipp_fields &= ~IPPF_LABEL_V4;
5534 5538 }
5535 5539
5536 5540 /*
5537 5541 * Search for CIPSO option.
5538 5542 * We assume CIPSO is first in options if it is present.
5539 5543 * If it isn't, then ipp_opt_ipv4_options will not include the options
5540 5544 * prior to the CIPSO option.
5541 5545 */
5542 5546 while (totallen != 0) {
5543 5547 switch (optval = opt[IPOPT_OPTVAL]) {
5544 5548 case IPOPT_EOL:
5545 5549 return (0);
5546 5550 case IPOPT_NOP:
5547 5551 optlen = 1;
5548 5552 break;
5549 5553 default:
5550 5554 if (totallen <= IPOPT_OLEN)
5551 5555 return (EINVAL);
5552 5556 optlen = opt[IPOPT_OLEN];
5553 5557 if (optlen < 2)
5554 5558 return (EINVAL);
5555 5559 }
5556 5560 if (optlen > totallen)
5557 5561 return (EINVAL);
5558 5562
5559 5563 switch (optval) {
5560 5564 case IPOPT_COMSEC:
5561 5565 if (!allocate) {
5562 5566 ipp->ipp_label_v4 = opt;
5563 5567 ipp->ipp_label_len_v4 = optlen;
5564 5568 ipp->ipp_fields |= IPPF_LABEL_V4;
5565 5569 } else {
5566 5570 ipp->ipp_label_v4 = kmem_alloc(optlen,
5567 5571 KM_NOSLEEP);
5568 5572 if (ipp->ipp_label_v4 == NULL)
5569 5573 return (ENOMEM);
5570 5574 ipp->ipp_label_len_v4 = optlen;
5571 5575 ipp->ipp_fields |= IPPF_LABEL_V4;
5572 5576 bcopy(opt, ipp->ipp_label_v4, optlen);
5573 5577 }
5574 5578 totallen -= optlen;
5575 5579 opt += optlen;
5576 5580
5577 5581 /* Skip padding bytes until we get to a multiple of 4 */
5578 5582 while ((totallen & 3) != 0 && opt[0] == IPOPT_NOP) {
5579 5583 totallen--;
5580 5584 opt++;
5581 5585 }
5582 5586 /* Remaining as ipp_ipv4_options */
5583 5587 goto copyall;
5584 5588 }
5585 5589 totallen -= optlen;
5586 5590 opt += optlen;
5587 5591 }
5588 5592 /* No CIPSO found; return everything as ipp_ipv4_options */
5589 5593 totallen = ipha->ipha_version_and_hdr_length -
5590 5594 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
5591 5595 totallen <<= 2;
5592 5596 opt = (uchar_t *)&ipha[1];
5593 5597 goto copyall;
5594 5598 }
5595 5599
5596 5600 /*
5597 5601 * Efficient versions of lookup for an IRE when we only
5598 5602 * match the address.
5599 5603 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE.
5600 5604 * Does not handle multicast addresses.
5601 5605 */
5602 5606 uint_t
5603 5607 ip_type_v4(ipaddr_t addr, ip_stack_t *ipst)
5604 5608 {
5605 5609 ire_t *ire;
5606 5610 uint_t result;
5607 5611
5608 5612 ire = ire_ftable_lookup_simple_v4(addr, 0, ipst, NULL);
5609 5613 ASSERT(ire != NULL);
5610 5614 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))
5611 5615 result = IRE_NOROUTE;
5612 5616 else
5613 5617 result = ire->ire_type;
5614 5618 ire_refrele(ire);
5615 5619 return (result);
5616 5620 }
5617 5621
5618 5622 /*
5619 5623 * Efficient versions of lookup for an IRE when we only
5620 5624 * match the address.
5621 5625 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE.
5622 5626 * Does not handle multicast addresses.
5623 5627 */
5624 5628 uint_t
5625 5629 ip_type_v6(const in6_addr_t *addr, ip_stack_t *ipst)
5626 5630 {
5627 5631 ire_t *ire;
5628 5632 uint_t result;
5629 5633
5630 5634 ire = ire_ftable_lookup_simple_v6(addr, 0, ipst, NULL);
5631 5635 ASSERT(ire != NULL);
5632 5636 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))
5633 5637 result = IRE_NOROUTE;
5634 5638 else
5635 5639 result = ire->ire_type;
5636 5640 ire_refrele(ire);
5637 5641 return (result);
5638 5642 }
5639 5643
5640 5644 /*
5641 5645 * Nobody should be sending
5642 5646 * packets up this stream
5643 5647 */
5644 5648 static void
5645 5649 ip_lrput(queue_t *q, mblk_t *mp)
5646 5650 {
5647 5651 switch (mp->b_datap->db_type) {
5648 5652 case M_FLUSH:
5649 5653 /* Turn around */
5650 5654 if (*mp->b_rptr & FLUSHW) {
5651 5655 *mp->b_rptr &= ~FLUSHR;
5652 5656 qreply(q, mp);
5653 5657 return;
5654 5658 }
5655 5659 break;
5656 5660 }
5657 5661 freemsg(mp);
5658 5662 }
5659 5663
5660 5664 /* Nobody should be sending packets down this stream */
5661 5665 /* ARGSUSED */
5662 5666 void
5663 5667 ip_lwput(queue_t *q, mblk_t *mp)
5664 5668 {
5665 5669 freemsg(mp);
5666 5670 }
5667 5671
5668 5672 /*
5669 5673 * Move the first hop in any source route to ipha_dst and remove that part of
5670 5674 * the source route. Called by other protocols. Errors in option formatting
5671 5675 * are ignored - will be handled by ip_output_options. Return the final
5672 5676 * destination (either ipha_dst or the last entry in a source route.)
5673 5677 */
5674 5678 ipaddr_t
5675 5679 ip_massage_options(ipha_t *ipha, netstack_t *ns)
5676 5680 {
5677 5681 ipoptp_t opts;
5678 5682 uchar_t *opt;
5679 5683 uint8_t optval;
5680 5684 uint8_t optlen;
5681 5685 ipaddr_t dst;
5682 5686 int i;
5683 5687 ip_stack_t *ipst = ns->netstack_ip;
5684 5688
5685 5689 ip2dbg(("ip_massage_options\n"));
5686 5690 dst = ipha->ipha_dst;
5687 5691 for (optval = ipoptp_first(&opts, ipha);
5688 5692 optval != IPOPT_EOL;
5689 5693 optval = ipoptp_next(&opts)) {
5690 5694 opt = opts.ipoptp_cur;
5691 5695 switch (optval) {
5692 5696 uint8_t off;
5693 5697 case IPOPT_SSRR:
5694 5698 case IPOPT_LSRR:
5695 5699 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
5696 5700 ip1dbg(("ip_massage_options: bad src route\n"));
5697 5701 break;
5698 5702 }
5699 5703 optlen = opts.ipoptp_len;
5700 5704 off = opt[IPOPT_OFFSET];
5701 5705 off--;
5702 5706 redo_srr:
5703 5707 if (optlen < IP_ADDR_LEN ||
5704 5708 off > optlen - IP_ADDR_LEN) {
5705 5709 /* End of source route */
5706 5710 ip1dbg(("ip_massage_options: end of SR\n"));
5707 5711 break;
5708 5712 }
5709 5713 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
5710 5714 ip1dbg(("ip_massage_options: next hop 0x%x\n",
5711 5715 ntohl(dst)));
5712 5716 /*
5713 5717 * Check if our address is present more than
5714 5718 * once as consecutive hops in source route.
5715 5719 * XXX verify per-interface ip_forwarding
5716 5720 * for source route?
5717 5721 */
5718 5722 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
5719 5723 off += IP_ADDR_LEN;
5720 5724 goto redo_srr;
5721 5725 }
5722 5726 if (dst == htonl(INADDR_LOOPBACK)) {
5723 5727 ip1dbg(("ip_massage_options: loopback addr in "
5724 5728 "source route!\n"));
5725 5729 break;
5726 5730 }
5727 5731 /*
5728 5732 * Update ipha_dst to be the first hop and remove the
5729 5733 * first hop from the source route (by overwriting
5730 5734 * part of the option with NOP options).
5731 5735 */
5732 5736 ipha->ipha_dst = dst;
5733 5737 /* Put the last entry in dst */
5734 5738 off = ((optlen - IP_ADDR_LEN - 3) & ~(IP_ADDR_LEN-1)) +
5735 5739 3;
5736 5740 bcopy(&opt[off], &dst, IP_ADDR_LEN);
5737 5741
5738 5742 ip1dbg(("ip_massage_options: last hop 0x%x\n",
5739 5743 ntohl(dst)));
5740 5744 /* Move down and overwrite */
5741 5745 opt[IP_ADDR_LEN] = opt[0];
5742 5746 opt[IP_ADDR_LEN+1] = opt[IPOPT_OLEN] - IP_ADDR_LEN;
5743 5747 opt[IP_ADDR_LEN+2] = opt[IPOPT_OFFSET];
5744 5748 for (i = 0; i < IP_ADDR_LEN; i++)
5745 5749 opt[i] = IPOPT_NOP;
5746 5750 break;
5747 5751 }
5748 5752 }
5749 5753 return (dst);
5750 5754 }
5751 5755
5752 5756 /*
5753 5757 * Return the network mask
5754 5758 * associated with the specified address.
5755 5759 */
5756 5760 ipaddr_t
5757 5761 ip_net_mask(ipaddr_t addr)
5758 5762 {
5759 5763 uchar_t *up = (uchar_t *)&addr;
5760 5764 ipaddr_t mask = 0;
5761 5765 uchar_t *maskp = (uchar_t *)&mask;
5762 5766
5763 5767 #if defined(__i386) || defined(__amd64)
5764 5768 #define TOTALLY_BRAIN_DAMAGED_C_COMPILER
5765 5769 #endif
5766 5770 #ifdef TOTALLY_BRAIN_DAMAGED_C_COMPILER
5767 5771 maskp[0] = maskp[1] = maskp[2] = maskp[3] = 0;
5768 5772 #endif
5769 5773 if (CLASSD(addr)) {
5770 5774 maskp[0] = 0xF0;
5771 5775 return (mask);
5772 5776 }
5773 5777
5774 5778 /* We assume Class E default netmask to be 32 */
5775 5779 if (CLASSE(addr))
5776 5780 return (0xffffffffU);
5777 5781
5778 5782 if (addr == 0)
5779 5783 return (0);
5780 5784 maskp[0] = 0xFF;
5781 5785 if ((up[0] & 0x80) == 0)
5782 5786 return (mask);
5783 5787
5784 5788 maskp[1] = 0xFF;
5785 5789 if ((up[0] & 0xC0) == 0x80)
5786 5790 return (mask);
5787 5791
5788 5792 maskp[2] = 0xFF;
5789 5793 if ((up[0] & 0xE0) == 0xC0)
5790 5794 return (mask);
5791 5795
5792 5796 /* Otherwise return no mask */
5793 5797 return ((ipaddr_t)0);
5794 5798 }
5795 5799
5796 5800 /* Name/Value Table Lookup Routine */
5797 5801 char *
5798 5802 ip_nv_lookup(nv_t *nv, int value)
5799 5803 {
5800 5804 if (!nv)
5801 5805 return (NULL);
5802 5806 for (; nv->nv_name; nv++) {
5803 5807 if (nv->nv_value == value)
5804 5808 return (nv->nv_name);
5805 5809 }
5806 5810 return ("unknown");
5807 5811 }
5808 5812
5809 5813 static int
5810 5814 ip_wait_for_info_ack(ill_t *ill)
5811 5815 {
5812 5816 int err;
5813 5817
5814 5818 mutex_enter(&ill->ill_lock);
5815 5819 while (ill->ill_state_flags & ILL_LL_SUBNET_PENDING) {
5816 5820 /*
5817 5821 * Return value of 0 indicates a pending signal.
5818 5822 */
5819 5823 err = cv_wait_sig(&ill->ill_cv, &ill->ill_lock);
5820 5824 if (err == 0) {
5821 5825 mutex_exit(&ill->ill_lock);
5822 5826 return (EINTR);
5823 5827 }
5824 5828 }
5825 5829 mutex_exit(&ill->ill_lock);
5826 5830 /*
5827 5831 * ip_rput_other could have set an error in ill_error on
5828 5832 * receipt of M_ERROR.
5829 5833 */
5830 5834 return (ill->ill_error);
5831 5835 }
5832 5836
5833 5837 /*
5834 5838 * This is a module open, i.e. this is a control stream for access
5835 5839 * to a DLPI device. We allocate an ill_t as the instance data in
5836 5840 * this case.
5837 5841 */
5838 5842 static int
5839 5843 ip_modopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5840 5844 {
5841 5845 ill_t *ill;
5842 5846 int err;
5843 5847 zoneid_t zoneid;
5844 5848 netstack_t *ns;
5845 5849 ip_stack_t *ipst;
5846 5850
5847 5851 /*
5848 5852 * Prevent unprivileged processes from pushing IP so that
5849 5853 * they can't send raw IP.
5850 5854 */
5851 5855 if (secpolicy_net_rawaccess(credp) != 0)
5852 5856 return (EPERM);
5853 5857
5854 5858 ns = netstack_find_by_cred(credp);
5855 5859 ASSERT(ns != NULL);
5856 5860 ipst = ns->netstack_ip;
5857 5861 ASSERT(ipst != NULL);
5858 5862
5859 5863 /*
5860 5864 * For exclusive stacks we set the zoneid to zero
5861 5865 * to make IP operate as if in the global zone.
5862 5866 */
5863 5867 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID)
5864 5868 zoneid = GLOBAL_ZONEID;
5865 5869 else
5866 5870 zoneid = crgetzoneid(credp);
5867 5871
5868 5872 ill = (ill_t *)mi_open_alloc_sleep(sizeof (ill_t));
5869 5873 q->q_ptr = WR(q)->q_ptr = ill;
5870 5874 ill->ill_ipst = ipst;
5871 5875 ill->ill_zoneid = zoneid;
5872 5876
5873 5877 /*
5874 5878 * ill_init initializes the ill fields and then sends down
5875 5879 * down a DL_INFO_REQ after calling qprocson.
5876 5880 */
5877 5881 err = ill_init(q, ill);
5878 5882
5879 5883 if (err != 0) {
5880 5884 mi_free(ill);
5881 5885 netstack_rele(ipst->ips_netstack);
5882 5886 q->q_ptr = NULL;
5883 5887 WR(q)->q_ptr = NULL;
5884 5888 return (err);
5885 5889 }
5886 5890
5887 5891 /*
5888 5892 * Wait for the DL_INFO_ACK if a DL_INFO_REQ was sent.
5889 5893 *
5890 5894 * ill_init initializes the ipsq marking this thread as
5891 5895 * writer
5892 5896 */
5893 5897 ipsq_exit(ill->ill_phyint->phyint_ipsq);
5894 5898 err = ip_wait_for_info_ack(ill);
5895 5899 if (err == 0)
5896 5900 ill->ill_credp = credp;
5897 5901 else
5898 5902 goto fail;
5899 5903
5900 5904 crhold(credp);
5901 5905
5902 5906 mutex_enter(&ipst->ips_ip_mi_lock);
5903 5907 err = mi_open_link(&ipst->ips_ip_g_head, (IDP)q->q_ptr, devp, flag,
5904 5908 sflag, credp);
5905 5909 mutex_exit(&ipst->ips_ip_mi_lock);
5906 5910 fail:
5907 5911 if (err) {
5908 5912 (void) ip_close(q, 0);
5909 5913 return (err);
5910 5914 }
5911 5915 return (0);
5912 5916 }
5913 5917
5914 5918 /* For /dev/ip aka AF_INET open */
5915 5919 int
5916 5920 ip_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5917 5921 {
5918 5922 return (ip_open(q, devp, flag, sflag, credp, B_FALSE));
5919 5923 }
5920 5924
5921 5925 /* For /dev/ip6 aka AF_INET6 open */
5922 5926 int
5923 5927 ip_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5924 5928 {
5925 5929 return (ip_open(q, devp, flag, sflag, credp, B_TRUE));
5926 5930 }
5927 5931
5928 5932 /* IP open routine. */
5929 5933 int
5930 5934 ip_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
5931 5935 boolean_t isv6)
5932 5936 {
5933 5937 conn_t *connp;
5934 5938 major_t maj;
5935 5939 zoneid_t zoneid;
5936 5940 netstack_t *ns;
5937 5941 ip_stack_t *ipst;
5938 5942
5939 5943 /* Allow reopen. */
5940 5944 if (q->q_ptr != NULL)
5941 5945 return (0);
5942 5946
5943 5947 if (sflag & MODOPEN) {
5944 5948 /* This is a module open */
5945 5949 return (ip_modopen(q, devp, flag, sflag, credp));
5946 5950 }
5947 5951
5948 5952 if ((flag & ~(FKLYR)) == IP_HELPER_STR) {
5949 5953 /*
5950 5954 * Non streams based socket looking for a stream
5951 5955 * to access IP
5952 5956 */
5953 5957 return (ip_helper_stream_setup(q, devp, flag, sflag,
5954 5958 credp, isv6));
5955 5959 }
5956 5960
5957 5961 ns = netstack_find_by_cred(credp);
5958 5962 ASSERT(ns != NULL);
5959 5963 ipst = ns->netstack_ip;
5960 5964 ASSERT(ipst != NULL);
5961 5965
5962 5966 /*
5963 5967 * For exclusive stacks we set the zoneid to zero
5964 5968 * to make IP operate as if in the global zone.
5965 5969 */
5966 5970 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID)
5967 5971 zoneid = GLOBAL_ZONEID;
5968 5972 else
5969 5973 zoneid = crgetzoneid(credp);
5970 5974
5971 5975 /*
5972 5976 * We are opening as a device. This is an IP client stream, and we
5973 5977 * allocate an conn_t as the instance data.
5974 5978 */
5975 5979 connp = ipcl_conn_create(IPCL_IPCCONN, KM_SLEEP, ipst->ips_netstack);
5976 5980
5977 5981 /*
5978 5982 * ipcl_conn_create did a netstack_hold. Undo the hold that was
5979 5983 * done by netstack_find_by_cred()
5980 5984 */
5981 5985 netstack_rele(ipst->ips_netstack);
5982 5986
5983 5987 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP | IXAF_SET_ULP_CKSUM;
5984 5988 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
5985 5989 connp->conn_ixa->ixa_zoneid = zoneid;
5986 5990 connp->conn_zoneid = zoneid;
5987 5991
5988 5992 connp->conn_rq = q;
5989 5993 q->q_ptr = WR(q)->q_ptr = connp;
5990 5994
5991 5995 /* Minor tells us which /dev entry was opened */
5992 5996 if (isv6) {
5993 5997 connp->conn_family = AF_INET6;
5994 5998 connp->conn_ipversion = IPV6_VERSION;
5995 5999 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4;
5996 6000 connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT;
5997 6001 } else {
5998 6002 connp->conn_family = AF_INET;
5999 6003 connp->conn_ipversion = IPV4_VERSION;
6000 6004 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4;
6001 6005 }
6002 6006
6003 6007 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
6004 6008 ((connp->conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
6005 6009 connp->conn_minor_arena = ip_minor_arena_la;
6006 6010 } else {
6007 6011 /*
6008 6012 * Either minor numbers in the large arena were exhausted
6009 6013 * or a non socket application is doing the open.
6010 6014 * Try to allocate from the small arena.
6011 6015 */
6012 6016 if ((connp->conn_dev =
6013 6017 inet_minor_alloc(ip_minor_arena_sa)) == 0) {
6014 6018 /* CONN_DEC_REF takes care of netstack_rele() */
6015 6019 q->q_ptr = WR(q)->q_ptr = NULL;
6016 6020 CONN_DEC_REF(connp);
6017 6021 return (EBUSY);
6018 6022 }
6019 6023 connp->conn_minor_arena = ip_minor_arena_sa;
6020 6024 }
6021 6025
6022 6026 maj = getemajor(*devp);
6023 6027 *devp = makedevice(maj, (minor_t)connp->conn_dev);
6024 6028
6025 6029 /*
6026 6030 * connp->conn_cred is crfree()ed in ipcl_conn_destroy()
6027 6031 */
6028 6032 connp->conn_cred = credp;
6029 6033 connp->conn_cpid = curproc->p_pid;
6030 6034 /* Cache things in ixa without an extra refhold */
6031 6035 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED));
6032 6036 connp->conn_ixa->ixa_cred = connp->conn_cred;
6033 6037 connp->conn_ixa->ixa_cpid = connp->conn_cpid;
6034 6038 if (is_system_labeled())
6035 6039 connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred);
6036 6040
6037 6041 /*
6038 6042 * Handle IP_IOC_RTS_REQUEST and other ioctls which use conn_recv
6039 6043 */
6040 6044 connp->conn_recv = ip_conn_input;
6041 6045 connp->conn_recvicmp = ip_conn_input_icmp;
6042 6046
6043 6047 crhold(connp->conn_cred);
6044 6048
6045 6049 /*
6046 6050 * If the caller has the process-wide flag set, then default to MAC
6047 6051 * exempt mode. This allows read-down to unlabeled hosts.
6048 6052 */
6049 6053 if (getpflags(NET_MAC_AWARE, credp) != 0)
6050 6054 connp->conn_mac_mode = CONN_MAC_AWARE;
6051 6055
6052 6056 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID);
6053 6057
6054 6058 connp->conn_rq = q;
6055 6059 connp->conn_wq = WR(q);
6056 6060
6057 6061 /* Non-zero default values */
6058 6062 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP;
6059 6063
6060 6064 /*
6061 6065 * Make the conn globally visible to walkers
6062 6066 */
6063 6067 ASSERT(connp->conn_ref == 1);
6064 6068 mutex_enter(&connp->conn_lock);
6065 6069 connp->conn_state_flags &= ~CONN_INCIPIENT;
6066 6070 mutex_exit(&connp->conn_lock);
6067 6071
6068 6072 qprocson(q);
6069 6073
6070 6074 return (0);
6071 6075 }
6072 6076
6073 6077 /*
6074 6078 * Set IPsec policy from an ipsec_req_t. If the req is not "zero" and valid,
6075 6079 * all of them are copied to the conn_t. If the req is "zero", the policy is
6076 6080 * zeroed out. A "zero" policy has zero ipsr_{ah,req,self_encap}_req
6077 6081 * fields.
6078 6082 * We keep only the latest setting of the policy and thus policy setting
6079 6083 * is not incremental/cumulative.
6080 6084 *
6081 6085 * Requests to set policies with multiple alternative actions will
6082 6086 * go through a different API.
6083 6087 */
6084 6088 int
6085 6089 ipsec_set_req(cred_t *cr, conn_t *connp, ipsec_req_t *req)
6086 6090 {
6087 6091 uint_t ah_req = 0;
6088 6092 uint_t esp_req = 0;
6089 6093 uint_t se_req = 0;
6090 6094 ipsec_act_t *actp = NULL;
6091 6095 uint_t nact;
6092 6096 ipsec_policy_head_t *ph;
6093 6097 boolean_t is_pol_reset, is_pol_inserted = B_FALSE;
6094 6098 int error = 0;
6095 6099 netstack_t *ns = connp->conn_netstack;
6096 6100 ip_stack_t *ipst = ns->netstack_ip;
6097 6101 ipsec_stack_t *ipss = ns->netstack_ipsec;
6098 6102
6099 6103 #define REQ_MASK (IPSEC_PREF_REQUIRED|IPSEC_PREF_NEVER)
6100 6104
6101 6105 /*
6102 6106 * The IP_SEC_OPT option does not allow variable length parameters,
6103 6107 * hence a request cannot be NULL.
6104 6108 */
6105 6109 if (req == NULL)
6106 6110 return (EINVAL);
6107 6111
6108 6112 ah_req = req->ipsr_ah_req;
6109 6113 esp_req = req->ipsr_esp_req;
6110 6114 se_req = req->ipsr_self_encap_req;
6111 6115
6112 6116 /* Don't allow setting self-encap without one or more of AH/ESP. */
6113 6117 if (se_req != 0 && esp_req == 0 && ah_req == 0)
6114 6118 return (EINVAL);
6115 6119
6116 6120 /*
6117 6121 * Are we dealing with a request to reset the policy (i.e.
6118 6122 * zero requests).
6119 6123 */
6120 6124 is_pol_reset = ((ah_req & REQ_MASK) == 0 &&
6121 6125 (esp_req & REQ_MASK) == 0 &&
6122 6126 (se_req & REQ_MASK) == 0);
6123 6127
6124 6128 if (!is_pol_reset) {
6125 6129 /*
6126 6130 * If we couldn't load IPsec, fail with "protocol
6127 6131 * not supported".
6128 6132 * IPsec may not have been loaded for a request with zero
6129 6133 * policies, so we don't fail in this case.
6130 6134 */
6131 6135 mutex_enter(&ipss->ipsec_loader_lock);
6132 6136 if (ipss->ipsec_loader_state != IPSEC_LOADER_SUCCEEDED) {
6133 6137 mutex_exit(&ipss->ipsec_loader_lock);
6134 6138 return (EPROTONOSUPPORT);
6135 6139 }
6136 6140 mutex_exit(&ipss->ipsec_loader_lock);
6137 6141
6138 6142 /*
6139 6143 * Test for valid requests. Invalid algorithms
6140 6144 * need to be tested by IPsec code because new
6141 6145 * algorithms can be added dynamically.
6142 6146 */
6143 6147 if ((ah_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 ||
6144 6148 (esp_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 ||
6145 6149 (se_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0) {
6146 6150 return (EINVAL);
6147 6151 }
6148 6152
6149 6153 /*
6150 6154 * Only privileged users can issue these
6151 6155 * requests.
6152 6156 */
6153 6157 if (((ah_req & IPSEC_PREF_NEVER) ||
6154 6158 (esp_req & IPSEC_PREF_NEVER) ||
6155 6159 (se_req & IPSEC_PREF_NEVER)) &&
6156 6160 secpolicy_ip_config(cr, B_FALSE) != 0) {
6157 6161 return (EPERM);
6158 6162 }
6159 6163
6160 6164 /*
6161 6165 * The IPSEC_PREF_REQUIRED and IPSEC_PREF_NEVER
6162 6166 * are mutually exclusive.
6163 6167 */
6164 6168 if (((ah_req & REQ_MASK) == REQ_MASK) ||
6165 6169 ((esp_req & REQ_MASK) == REQ_MASK) ||
6166 6170 ((se_req & REQ_MASK) == REQ_MASK)) {
6167 6171 /* Both of them are set */
6168 6172 return (EINVAL);
6169 6173 }
6170 6174 }
6171 6175
6172 6176 ASSERT(MUTEX_HELD(&connp->conn_lock));
6173 6177
6174 6178 /*
6175 6179 * If we have already cached policies in conn_connect(), don't
6176 6180 * let them change now. We cache policies for connections
6177 6181 * whose src,dst [addr, port] is known.
6178 6182 */
6179 6183 if (connp->conn_policy_cached) {
6180 6184 return (EINVAL);
6181 6185 }
6182 6186
6183 6187 /*
6184 6188 * We have a zero policies, reset the connection policy if already
6185 6189 * set. This will cause the connection to inherit the
6186 6190 * global policy, if any.
6187 6191 */
6188 6192 if (is_pol_reset) {
6189 6193 if (connp->conn_policy != NULL) {
6190 6194 IPPH_REFRELE(connp->conn_policy, ipst->ips_netstack);
6191 6195 connp->conn_policy = NULL;
6192 6196 }
6193 6197 connp->conn_in_enforce_policy = B_FALSE;
6194 6198 connp->conn_out_enforce_policy = B_FALSE;
6195 6199 return (0);
6196 6200 }
6197 6201
6198 6202 ph = connp->conn_policy = ipsec_polhead_split(connp->conn_policy,
6199 6203 ipst->ips_netstack);
6200 6204 if (ph == NULL)
6201 6205 goto enomem;
6202 6206
6203 6207 ipsec_actvec_from_req(req, &actp, &nact, ipst->ips_netstack);
6204 6208 if (actp == NULL)
6205 6209 goto enomem;
6206 6210
6207 6211 /*
6208 6212 * Always insert IPv4 policy entries, since they can also apply to
6209 6213 * ipv6 sockets being used in ipv4-compat mode.
6210 6214 */
6211 6215 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4,
6212 6216 IPSEC_TYPE_INBOUND, ns))
6213 6217 goto enomem;
6214 6218 is_pol_inserted = B_TRUE;
6215 6219 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4,
6216 6220 IPSEC_TYPE_OUTBOUND, ns))
6217 6221 goto enomem;
6218 6222
6219 6223 /*
6220 6224 * We're looking at a v6 socket, also insert the v6-specific
6221 6225 * entries.
6222 6226 */
6223 6227 if (connp->conn_family == AF_INET6) {
6224 6228 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6,
6225 6229 IPSEC_TYPE_INBOUND, ns))
6226 6230 goto enomem;
6227 6231 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6,
6228 6232 IPSEC_TYPE_OUTBOUND, ns))
6229 6233 goto enomem;
6230 6234 }
6231 6235
6232 6236 ipsec_actvec_free(actp, nact);
6233 6237
6234 6238 /*
6235 6239 * If the requests need security, set enforce_policy.
6236 6240 * If the requests are IPSEC_PREF_NEVER, one should
6237 6241 * still set conn_out_enforce_policy so that ip_set_destination
6238 6242 * marks the ip_xmit_attr_t appropriatly. This is needed so that
6239 6243 * for connections that we don't cache policy in at connect time,
6240 6244 * if global policy matches in ip_output_attach_policy, we
6241 6245 * don't wrongly inherit global policy. Similarly, we need
6242 6246 * to set conn_in_enforce_policy also so that we don't verify
6243 6247 * policy wrongly.
6244 6248 */
6245 6249 if ((ah_req & REQ_MASK) != 0 ||
6246 6250 (esp_req & REQ_MASK) != 0 ||
6247 6251 (se_req & REQ_MASK) != 0) {
6248 6252 connp->conn_in_enforce_policy = B_TRUE;
6249 6253 connp->conn_out_enforce_policy = B_TRUE;
6250 6254 }
6251 6255
6252 6256 return (error);
6253 6257 #undef REQ_MASK
6254 6258
6255 6259 /*
6256 6260 * Common memory-allocation-failure exit path.
6257 6261 */
6258 6262 enomem:
6259 6263 if (actp != NULL)
6260 6264 ipsec_actvec_free(actp, nact);
6261 6265 if (is_pol_inserted)
6262 6266 ipsec_polhead_flush(ph, ns);
6263 6267 return (ENOMEM);
6264 6268 }
6265 6269
6266 6270 /*
6267 6271 * Set socket options for joining and leaving multicast groups.
6268 6272 * Common to IPv4 and IPv6; inet6 indicates the type of socket.
6269 6273 * The caller has already check that the option name is consistent with
6270 6274 * the address family of the socket.
6271 6275 */
6272 6276 int
6273 6277 ip_opt_set_multicast_group(conn_t *connp, t_scalar_t name,
6274 6278 uchar_t *invalp, boolean_t inet6, boolean_t checkonly)
6275 6279 {
6276 6280 int *i1 = (int *)invalp;
6277 6281 int error = 0;
6278 6282 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
6279 6283 struct ip_mreq *v4_mreqp;
6280 6284 struct ipv6_mreq *v6_mreqp;
6281 6285 struct group_req *greqp;
6282 6286 ire_t *ire;
6283 6287 boolean_t done = B_FALSE;
6284 6288 ipaddr_t ifaddr;
6285 6289 in6_addr_t v6group;
6286 6290 uint_t ifindex;
6287 6291 boolean_t mcast_opt = B_TRUE;
6288 6292 mcast_record_t fmode;
6289 6293 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *,
6290 6294 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *);
6291 6295
6292 6296 switch (name) {
6293 6297 case IP_ADD_MEMBERSHIP:
6294 6298 case IPV6_JOIN_GROUP:
6295 6299 mcast_opt = B_FALSE;
6296 6300 /* FALLTHRU */
6297 6301 case MCAST_JOIN_GROUP:
6298 6302 fmode = MODE_IS_EXCLUDE;
6299 6303 optfn = ip_opt_add_group;
6300 6304 break;
6301 6305
6302 6306 case IP_DROP_MEMBERSHIP:
6303 6307 case IPV6_LEAVE_GROUP:
6304 6308 mcast_opt = B_FALSE;
6305 6309 /* FALLTHRU */
6306 6310 case MCAST_LEAVE_GROUP:
6307 6311 fmode = MODE_IS_INCLUDE;
6308 6312 optfn = ip_opt_delete_group;
6309 6313 break;
6310 6314 default:
6311 6315 ASSERT(0);
6312 6316 }
6313 6317
6314 6318 if (mcast_opt) {
6315 6319 struct sockaddr_in *sin;
6316 6320 struct sockaddr_in6 *sin6;
6317 6321
6318 6322 greqp = (struct group_req *)i1;
6319 6323 if (greqp->gr_group.ss_family == AF_INET) {
6320 6324 sin = (struct sockaddr_in *)&(greqp->gr_group);
6321 6325 IN6_INADDR_TO_V4MAPPED(&sin->sin_addr, &v6group);
6322 6326 } else {
6323 6327 if (!inet6)
6324 6328 return (EINVAL); /* Not on INET socket */
6325 6329
6326 6330 sin6 = (struct sockaddr_in6 *)&(greqp->gr_group);
6327 6331 v6group = sin6->sin6_addr;
6328 6332 }
6329 6333 ifaddr = INADDR_ANY;
6330 6334 ifindex = greqp->gr_interface;
6331 6335 } else if (inet6) {
6332 6336 v6_mreqp = (struct ipv6_mreq *)i1;
6333 6337 v6group = v6_mreqp->ipv6mr_multiaddr;
6334 6338 ifaddr = INADDR_ANY;
6335 6339 ifindex = v6_mreqp->ipv6mr_interface;
6336 6340 } else {
6337 6341 v4_mreqp = (struct ip_mreq *)i1;
6338 6342 IN6_INADDR_TO_V4MAPPED(&v4_mreqp->imr_multiaddr, &v6group);
6339 6343 ifaddr = (ipaddr_t)v4_mreqp->imr_interface.s_addr;
6340 6344 ifindex = 0;
6341 6345 }
6342 6346
6343 6347 /*
6344 6348 * In the multirouting case, we need to replicate
6345 6349 * the request on all interfaces that will take part
6346 6350 * in replication. We do so because multirouting is
6347 6351 * reflective, thus we will probably receive multi-
6348 6352 * casts on those interfaces.
6349 6353 * The ip_multirt_apply_membership() succeeds if
6350 6354 * the operation succeeds on at least one interface.
6351 6355 */
6352 6356 if (IN6_IS_ADDR_V4MAPPED(&v6group)) {
6353 6357 ipaddr_t group;
6354 6358
6355 6359 IN6_V4MAPPED_TO_IPADDR(&v6group, group);
6356 6360
6357 6361 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0,
6358 6362 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6359 6363 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6360 6364 } else {
6361 6365 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0,
6362 6366 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6363 6367 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6364 6368 }
6365 6369 if (ire != NULL) {
6366 6370 if (ire->ire_flags & RTF_MULTIRT) {
6367 6371 error = ip_multirt_apply_membership(optfn, ire, connp,
6368 6372 checkonly, &v6group, fmode, &ipv6_all_zeros);
6369 6373 done = B_TRUE;
6370 6374 }
6371 6375 ire_refrele(ire);
6372 6376 }
6373 6377
6374 6378 if (!done) {
6375 6379 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex,
6376 6380 fmode, &ipv6_all_zeros);
6377 6381 }
6378 6382 return (error);
6379 6383 }
6380 6384
6381 6385 /*
6382 6386 * Set socket options for joining and leaving multicast groups
6383 6387 * for specific sources.
6384 6388 * Common to IPv4 and IPv6; inet6 indicates the type of socket.
6385 6389 * The caller has already check that the option name is consistent with
6386 6390 * the address family of the socket.
6387 6391 */
6388 6392 int
6389 6393 ip_opt_set_multicast_sources(conn_t *connp, t_scalar_t name,
6390 6394 uchar_t *invalp, boolean_t inet6, boolean_t checkonly)
6391 6395 {
6392 6396 int *i1 = (int *)invalp;
6393 6397 int error = 0;
6394 6398 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
6395 6399 struct ip_mreq_source *imreqp;
6396 6400 struct group_source_req *gsreqp;
6397 6401 in6_addr_t v6group, v6src;
6398 6402 uint32_t ifindex;
6399 6403 ipaddr_t ifaddr;
6400 6404 boolean_t mcast_opt = B_TRUE;
6401 6405 mcast_record_t fmode;
6402 6406 ire_t *ire;
6403 6407 boolean_t done = B_FALSE;
6404 6408 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *,
6405 6409 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *);
6406 6410
6407 6411 switch (name) {
6408 6412 case IP_BLOCK_SOURCE:
6409 6413 mcast_opt = B_FALSE;
6410 6414 /* FALLTHRU */
6411 6415 case MCAST_BLOCK_SOURCE:
6412 6416 fmode = MODE_IS_EXCLUDE;
6413 6417 optfn = ip_opt_add_group;
6414 6418 break;
6415 6419
6416 6420 case IP_UNBLOCK_SOURCE:
6417 6421 mcast_opt = B_FALSE;
6418 6422 /* FALLTHRU */
6419 6423 case MCAST_UNBLOCK_SOURCE:
6420 6424 fmode = MODE_IS_EXCLUDE;
6421 6425 optfn = ip_opt_delete_group;
6422 6426 break;
6423 6427
6424 6428 case IP_ADD_SOURCE_MEMBERSHIP:
6425 6429 mcast_opt = B_FALSE;
6426 6430 /* FALLTHRU */
6427 6431 case MCAST_JOIN_SOURCE_GROUP:
6428 6432 fmode = MODE_IS_INCLUDE;
6429 6433 optfn = ip_opt_add_group;
6430 6434 break;
6431 6435
6432 6436 case IP_DROP_SOURCE_MEMBERSHIP:
6433 6437 mcast_opt = B_FALSE;
6434 6438 /* FALLTHRU */
6435 6439 case MCAST_LEAVE_SOURCE_GROUP:
6436 6440 fmode = MODE_IS_INCLUDE;
6437 6441 optfn = ip_opt_delete_group;
6438 6442 break;
6439 6443 default:
6440 6444 ASSERT(0);
6441 6445 }
6442 6446
6443 6447 if (mcast_opt) {
6444 6448 gsreqp = (struct group_source_req *)i1;
6445 6449 ifindex = gsreqp->gsr_interface;
6446 6450 if (gsreqp->gsr_group.ss_family == AF_INET) {
6447 6451 struct sockaddr_in *s;
6448 6452 s = (struct sockaddr_in *)&gsreqp->gsr_group;
6449 6453 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6group);
6450 6454 s = (struct sockaddr_in *)&gsreqp->gsr_source;
6451 6455 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6src);
6452 6456 } else {
6453 6457 struct sockaddr_in6 *s6;
6454 6458
6455 6459 if (!inet6)
6456 6460 return (EINVAL); /* Not on INET socket */
6457 6461
6458 6462 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_group;
6459 6463 v6group = s6->sin6_addr;
6460 6464 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_source;
6461 6465 v6src = s6->sin6_addr;
6462 6466 }
6463 6467 ifaddr = INADDR_ANY;
6464 6468 } else {
6465 6469 imreqp = (struct ip_mreq_source *)i1;
6466 6470 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_multiaddr, &v6group);
6467 6471 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_sourceaddr, &v6src);
6468 6472 ifaddr = (ipaddr_t)imreqp->imr_interface.s_addr;
6469 6473 ifindex = 0;
6470 6474 }
6471 6475
6472 6476 /*
6473 6477 * Handle src being mapped INADDR_ANY by changing it to unspecified.
6474 6478 */
6475 6479 if (IN6_IS_ADDR_V4MAPPED_ANY(&v6src))
6476 6480 v6src = ipv6_all_zeros;
6477 6481
6478 6482 /*
6479 6483 * In the multirouting case, we need to replicate
6480 6484 * the request as noted in the mcast cases above.
6481 6485 */
6482 6486 if (IN6_IS_ADDR_V4MAPPED(&v6group)) {
6483 6487 ipaddr_t group;
6484 6488
6485 6489 IN6_V4MAPPED_TO_IPADDR(&v6group, group);
6486 6490
6487 6491 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0,
6488 6492 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6489 6493 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6490 6494 } else {
6491 6495 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0,
6492 6496 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6493 6497 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6494 6498 }
6495 6499 if (ire != NULL) {
6496 6500 if (ire->ire_flags & RTF_MULTIRT) {
6497 6501 error = ip_multirt_apply_membership(optfn, ire, connp,
6498 6502 checkonly, &v6group, fmode, &v6src);
6499 6503 done = B_TRUE;
6500 6504 }
6501 6505 ire_refrele(ire);
6502 6506 }
6503 6507 if (!done) {
6504 6508 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex,
6505 6509 fmode, &v6src);
6506 6510 }
6507 6511 return (error);
6508 6512 }
6509 6513
6510 6514 /*
6511 6515 * Given a destination address and a pointer to where to put the information
6512 6516 * this routine fills in the mtuinfo.
6513 6517 * The socket must be connected.
6514 6518 * For sctp conn_faddr is the primary address.
6515 6519 */
6516 6520 int
6517 6521 ip_fill_mtuinfo(conn_t *connp, ip_xmit_attr_t *ixa, struct ip6_mtuinfo *mtuinfo)
6518 6522 {
6519 6523 uint32_t pmtu = IP_MAXPACKET;
6520 6524 uint_t scopeid;
6521 6525
6522 6526 if (IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6))
6523 6527 return (-1);
6524 6528
6525 6529 /* In case we never sent or called ip_set_destination_v4/v6 */
6526 6530 if (ixa->ixa_ire != NULL)
6527 6531 pmtu = ip_get_pmtu(ixa);
6528 6532
6529 6533 if (ixa->ixa_flags & IXAF_SCOPEID_SET)
6530 6534 scopeid = ixa->ixa_scopeid;
6531 6535 else
6532 6536 scopeid = 0;
6533 6537
6534 6538 bzero(mtuinfo, sizeof (*mtuinfo));
6535 6539 mtuinfo->ip6m_addr.sin6_family = AF_INET6;
6536 6540 mtuinfo->ip6m_addr.sin6_port = connp->conn_fport;
6537 6541 mtuinfo->ip6m_addr.sin6_addr = connp->conn_faddr_v6;
6538 6542 mtuinfo->ip6m_addr.sin6_scope_id = scopeid;
6539 6543 mtuinfo->ip6m_mtu = pmtu;
6540 6544
6541 6545 return (sizeof (struct ip6_mtuinfo));
6542 6546 }
6543 6547
6544 6548 /*
6545 6549 * When the src multihoming is changed from weak to [strong, preferred]
6546 6550 * ip_ire_rebind_walker is called to walk the list of all ire_t entries
6547 6551 * and identify routes that were created by user-applications in the
6548 6552 * unbound state (i.e., without RTA_IFP), and for which an ire_ill is not
6549 6553 * currently defined. These routes are then 'rebound', i.e., their ire_ill
6550 6554 * is selected by finding an interface route for the gateway.
6551 6555 */
6552 6556 /* ARGSUSED */
6553 6557 void
6554 6558 ip_ire_rebind_walker(ire_t *ire, void *notused)
6555 6559 {
6556 6560 if (!ire->ire_unbound || ire->ire_ill != NULL)
6557 6561 return;
6558 6562 ire_rebind(ire);
6559 6563 ire_delete(ire);
6560 6564 }
6561 6565
6562 6566 /*
6563 6567 * When the src multihoming is changed from [strong, preferred] to weak,
6564 6568 * ip_ire_unbind_walker is called to walk the list of all ire_t entries, and
6565 6569 * set any entries that were created by user-applications in the unbound state
6566 6570 * (i.e., without RTA_IFP) back to having a NULL ire_ill.
6567 6571 */
6568 6572 /* ARGSUSED */
6569 6573 void
6570 6574 ip_ire_unbind_walker(ire_t *ire, void *notused)
6571 6575 {
6572 6576 ire_t *new_ire;
6573 6577
6574 6578 if (!ire->ire_unbound || ire->ire_ill == NULL)
6575 6579 return;
6576 6580 if (ire->ire_ipversion == IPV6_VERSION) {
6577 6581 new_ire = ire_create_v6(&ire->ire_addr_v6, &ire->ire_mask_v6,
6578 6582 &ire->ire_gateway_addr_v6, ire->ire_type, NULL,
6579 6583 ire->ire_zoneid, ire->ire_flags, NULL, ire->ire_ipst);
6580 6584 } else {
6581 6585 new_ire = ire_create((uchar_t *)&ire->ire_addr,
6582 6586 (uchar_t *)&ire->ire_mask,
6583 6587 (uchar_t *)&ire->ire_gateway_addr, ire->ire_type, NULL,
6584 6588 ire->ire_zoneid, ire->ire_flags, NULL, ire->ire_ipst);
6585 6589 }
6586 6590 if (new_ire == NULL)
6587 6591 return;
6588 6592 new_ire->ire_unbound = B_TRUE;
6589 6593 /*
6590 6594 * The bound ire must first be deleted so that we don't return
6591 6595 * the existing one on the attempt to add the unbound new_ire.
6592 6596 */
6593 6597 ire_delete(ire);
6594 6598 new_ire = ire_add(new_ire);
6595 6599 if (new_ire != NULL)
6596 6600 ire_refrele(new_ire);
6597 6601 }
6598 6602
6599 6603 /*
6600 6604 * When the settings of ip*_strict_src_multihoming tunables are changed,
6601 6605 * all cached routes need to be recomputed. This recomputation needs to be
6602 6606 * done when going from weaker to stronger modes so that the cached ire
6603 6607 * for the connection does not violate the current ip*_strict_src_multihoming
6604 6608 * setting. It also needs to be done when going from stronger to weaker modes,
6605 6609 * so that we fall back to matching on the longest-matching-route (as opposed
6606 6610 * to a shorter match that may have been selected in the strong mode
6607 6611 * to satisfy src_multihoming settings).
6608 6612 *
6609 6613 * The cached ixa_ire entires for all conn_t entries are marked as
6610 6614 * "verify" so that they will be recomputed for the next packet.
6611 6615 */
6612 6616 void
6613 6617 conn_ire_revalidate(conn_t *connp, void *arg)
6614 6618 {
6615 6619 boolean_t isv6 = (boolean_t)arg;
6616 6620
6617 6621 if ((isv6 && connp->conn_ipversion != IPV6_VERSION) ||
6618 6622 (!isv6 && connp->conn_ipversion != IPV4_VERSION))
6619 6623 return;
6620 6624 connp->conn_ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
6621 6625 }
6622 6626
6623 6627 /*
6624 6628 * Handles both IPv4 and IPv6 reassembly - doing the out-of-order cases,
6625 6629 * When an ipf is passed here for the first time, if
6626 6630 * we already have in-order fragments on the queue, we convert from the fast-
6627 6631 * path reassembly scheme to the hard-case scheme. From then on, additional
6628 6632 * fragments are reassembled here. We keep track of the start and end offsets
6629 6633 * of each piece, and the number of holes in the chain. When the hole count
6630 6634 * goes to zero, we are done!
6631 6635 *
6632 6636 * The ipf_count will be updated to account for any mblk(s) added (pointed to
6633 6637 * by mp) or subtracted (freeb()ed dups), upon return the caller must update
6634 6638 * ipfb_count and ill_frag_count by the difference of ipf_count before and
6635 6639 * after the call to ip_reassemble().
6636 6640 */
6637 6641 int
6638 6642 ip_reassemble(mblk_t *mp, ipf_t *ipf, uint_t start, boolean_t more, ill_t *ill,
6639 6643 size_t msg_len)
6640 6644 {
6641 6645 uint_t end;
6642 6646 mblk_t *next_mp;
6643 6647 mblk_t *mp1;
6644 6648 uint_t offset;
6645 6649 boolean_t incr_dups = B_TRUE;
6646 6650 boolean_t offset_zero_seen = B_FALSE;
6647 6651 boolean_t pkt_boundary_checked = B_FALSE;
6648 6652
6649 6653 /* If start == 0 then ipf_nf_hdr_len has to be set. */
6650 6654 ASSERT(start != 0 || ipf->ipf_nf_hdr_len != 0);
6651 6655
6652 6656 /* Add in byte count */
6653 6657 ipf->ipf_count += msg_len;
6654 6658 if (ipf->ipf_end) {
6655 6659 /*
6656 6660 * We were part way through in-order reassembly, but now there
6657 6661 * is a hole. We walk through messages already queued, and
6658 6662 * mark them for hard case reassembly. We know that up till
6659 6663 * now they were in order starting from offset zero.
6660 6664 */
6661 6665 offset = 0;
6662 6666 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) {
6663 6667 IP_REASS_SET_START(mp1, offset);
6664 6668 if (offset == 0) {
6665 6669 ASSERT(ipf->ipf_nf_hdr_len != 0);
6666 6670 offset = -ipf->ipf_nf_hdr_len;
6667 6671 }
6668 6672 offset += mp1->b_wptr - mp1->b_rptr;
6669 6673 IP_REASS_SET_END(mp1, offset);
6670 6674 }
6671 6675 /* One hole at the end. */
6672 6676 ipf->ipf_hole_cnt = 1;
6673 6677 /* Brand it as a hard case, forever. */
6674 6678 ipf->ipf_end = 0;
6675 6679 }
6676 6680 /* Walk through all the new pieces. */
6677 6681 do {
6678 6682 end = start + (mp->b_wptr - mp->b_rptr);
6679 6683 /*
6680 6684 * If start is 0, decrease 'end' only for the first mblk of
6681 6685 * the fragment. Otherwise 'end' can get wrong value in the
6682 6686 * second pass of the loop if first mblk is exactly the
6683 6687 * size of ipf_nf_hdr_len.
6684 6688 */
6685 6689 if (start == 0 && !offset_zero_seen) {
6686 6690 /* First segment */
6687 6691 ASSERT(ipf->ipf_nf_hdr_len != 0);
6688 6692 end -= ipf->ipf_nf_hdr_len;
6689 6693 offset_zero_seen = B_TRUE;
6690 6694 }
6691 6695 next_mp = mp->b_cont;
6692 6696 /*
6693 6697 * We are checking to see if there is any interesing data
6694 6698 * to process. If there isn't and the mblk isn't the
6695 6699 * one which carries the unfragmentable header then we
6696 6700 * drop it. It's possible to have just the unfragmentable
6697 6701 * header come through without any data. That needs to be
6698 6702 * saved.
6699 6703 *
6700 6704 * If the assert at the top of this function holds then the
6701 6705 * term "ipf->ipf_nf_hdr_len != 0" isn't needed. This code
6702 6706 * is infrequently traveled enough that the test is left in
6703 6707 * to protect against future code changes which break that
6704 6708 * invariant.
6705 6709 */
6706 6710 if (start == end && start != 0 && ipf->ipf_nf_hdr_len != 0) {
6707 6711 /* Empty. Blast it. */
6708 6712 IP_REASS_SET_START(mp, 0);
6709 6713 IP_REASS_SET_END(mp, 0);
6710 6714 /*
6711 6715 * If the ipf points to the mblk we are about to free,
6712 6716 * update ipf to point to the next mblk (or NULL
6713 6717 * if none).
6714 6718 */
6715 6719 if (ipf->ipf_mp->b_cont == mp)
6716 6720 ipf->ipf_mp->b_cont = next_mp;
6717 6721 freeb(mp);
6718 6722 continue;
6719 6723 }
6720 6724 mp->b_cont = NULL;
6721 6725 IP_REASS_SET_START(mp, start);
6722 6726 IP_REASS_SET_END(mp, end);
6723 6727 if (!ipf->ipf_tail_mp) {
6724 6728 ipf->ipf_tail_mp = mp;
6725 6729 ipf->ipf_mp->b_cont = mp;
6726 6730 if (start == 0 || !more) {
6727 6731 ipf->ipf_hole_cnt = 1;
6728 6732 /*
6729 6733 * if the first fragment comes in more than one
6730 6734 * mblk, this loop will be executed for each
6731 6735 * mblk. Need to adjust hole count so exiting
6732 6736 * this routine will leave hole count at 1.
6733 6737 */
6734 6738 if (next_mp)
6735 6739 ipf->ipf_hole_cnt++;
6736 6740 } else
6737 6741 ipf->ipf_hole_cnt = 2;
6738 6742 continue;
6739 6743 } else if (ipf->ipf_last_frag_seen && !more &&
6740 6744 !pkt_boundary_checked) {
6741 6745 /*
6742 6746 * We check datagram boundary only if this fragment
6743 6747 * claims to be the last fragment and we have seen a
6744 6748 * last fragment in the past too. We do this only
6745 6749 * once for a given fragment.
6746 6750 *
6747 6751 * start cannot be 0 here as fragments with start=0
6748 6752 * and MF=0 gets handled as a complete packet. These
6749 6753 * fragments should not reach here.
6750 6754 */
6751 6755
6752 6756 if (start + msgdsize(mp) !=
6753 6757 IP_REASS_END(ipf->ipf_tail_mp)) {
6754 6758 /*
6755 6759 * We have two fragments both of which claim
6756 6760 * to be the last fragment but gives conflicting
6757 6761 * information about the whole datagram size.
6758 6762 * Something fishy is going on. Drop the
6759 6763 * fragment and free up the reassembly list.
6760 6764 */
6761 6765 return (IP_REASS_FAILED);
6762 6766 }
6763 6767
6764 6768 /*
6765 6769 * We shouldn't come to this code block again for this
6766 6770 * particular fragment.
6767 6771 */
6768 6772 pkt_boundary_checked = B_TRUE;
6769 6773 }
6770 6774
6771 6775 /* New stuff at or beyond tail? */
6772 6776 offset = IP_REASS_END(ipf->ipf_tail_mp);
6773 6777 if (start >= offset) {
6774 6778 if (ipf->ipf_last_frag_seen) {
6775 6779 /* current fragment is beyond last fragment */
6776 6780 return (IP_REASS_FAILED);
6777 6781 }
6778 6782 /* Link it on end. */
6779 6783 ipf->ipf_tail_mp->b_cont = mp;
6780 6784 ipf->ipf_tail_mp = mp;
6781 6785 if (more) {
6782 6786 if (start != offset)
6783 6787 ipf->ipf_hole_cnt++;
6784 6788 } else if (start == offset && next_mp == NULL)
6785 6789 ipf->ipf_hole_cnt--;
6786 6790 continue;
6787 6791 }
6788 6792 mp1 = ipf->ipf_mp->b_cont;
6789 6793 offset = IP_REASS_START(mp1);
6790 6794 /* New stuff at the front? */
6791 6795 if (start < offset) {
6792 6796 if (start == 0) {
6793 6797 if (end >= offset) {
6794 6798 /* Nailed the hole at the begining. */
6795 6799 ipf->ipf_hole_cnt--;
6796 6800 }
6797 6801 } else if (end < offset) {
6798 6802 /*
6799 6803 * A hole, stuff, and a hole where there used
6800 6804 * to be just a hole.
6801 6805 */
6802 6806 ipf->ipf_hole_cnt++;
6803 6807 }
6804 6808 mp->b_cont = mp1;
6805 6809 /* Check for overlap. */
6806 6810 while (end > offset) {
6807 6811 if (end < IP_REASS_END(mp1)) {
6808 6812 mp->b_wptr -= end - offset;
6809 6813 IP_REASS_SET_END(mp, offset);
6810 6814 BUMP_MIB(ill->ill_ip_mib,
6811 6815 ipIfStatsReasmPartDups);
6812 6816 break;
6813 6817 }
6814 6818 /* Did we cover another hole? */
6815 6819 if ((mp1->b_cont &&
6816 6820 IP_REASS_END(mp1) !=
6817 6821 IP_REASS_START(mp1->b_cont) &&
6818 6822 end >= IP_REASS_START(mp1->b_cont)) ||
6819 6823 (!ipf->ipf_last_frag_seen && !more)) {
6820 6824 ipf->ipf_hole_cnt--;
6821 6825 }
6822 6826 /* Clip out mp1. */
6823 6827 if ((mp->b_cont = mp1->b_cont) == NULL) {
6824 6828 /*
6825 6829 * After clipping out mp1, this guy
6826 6830 * is now hanging off the end.
6827 6831 */
6828 6832 ipf->ipf_tail_mp = mp;
6829 6833 }
6830 6834 IP_REASS_SET_START(mp1, 0);
6831 6835 IP_REASS_SET_END(mp1, 0);
6832 6836 /* Subtract byte count */
6833 6837 ipf->ipf_count -= mp1->b_datap->db_lim -
6834 6838 mp1->b_datap->db_base;
6835 6839 freeb(mp1);
6836 6840 BUMP_MIB(ill->ill_ip_mib,
6837 6841 ipIfStatsReasmPartDups);
6838 6842 mp1 = mp->b_cont;
6839 6843 if (!mp1)
6840 6844 break;
6841 6845 offset = IP_REASS_START(mp1);
6842 6846 }
6843 6847 ipf->ipf_mp->b_cont = mp;
6844 6848 continue;
6845 6849 }
6846 6850 /*
6847 6851 * The new piece starts somewhere between the start of the head
6848 6852 * and before the end of the tail.
6849 6853 */
6850 6854 for (; mp1; mp1 = mp1->b_cont) {
6851 6855 offset = IP_REASS_END(mp1);
6852 6856 if (start < offset) {
6853 6857 if (end <= offset) {
6854 6858 /* Nothing new. */
6855 6859 IP_REASS_SET_START(mp, 0);
6856 6860 IP_REASS_SET_END(mp, 0);
6857 6861 /* Subtract byte count */
6858 6862 ipf->ipf_count -= mp->b_datap->db_lim -
6859 6863 mp->b_datap->db_base;
6860 6864 if (incr_dups) {
6861 6865 ipf->ipf_num_dups++;
6862 6866 incr_dups = B_FALSE;
6863 6867 }
6864 6868 freeb(mp);
6865 6869 BUMP_MIB(ill->ill_ip_mib,
6866 6870 ipIfStatsReasmDuplicates);
6867 6871 break;
6868 6872 }
6869 6873 /*
6870 6874 * Trim redundant stuff off beginning of new
6871 6875 * piece.
6872 6876 */
6873 6877 IP_REASS_SET_START(mp, offset);
6874 6878 mp->b_rptr += offset - start;
6875 6879 BUMP_MIB(ill->ill_ip_mib,
6876 6880 ipIfStatsReasmPartDups);
6877 6881 start = offset;
6878 6882 if (!mp1->b_cont) {
6879 6883 /*
6880 6884 * After trimming, this guy is now
6881 6885 * hanging off the end.
6882 6886 */
6883 6887 mp1->b_cont = mp;
6884 6888 ipf->ipf_tail_mp = mp;
6885 6889 if (!more) {
6886 6890 ipf->ipf_hole_cnt--;
6887 6891 }
6888 6892 break;
6889 6893 }
6890 6894 }
6891 6895 if (start >= IP_REASS_START(mp1->b_cont))
6892 6896 continue;
6893 6897 /* Fill a hole */
6894 6898 if (start > offset)
6895 6899 ipf->ipf_hole_cnt++;
6896 6900 mp->b_cont = mp1->b_cont;
6897 6901 mp1->b_cont = mp;
6898 6902 mp1 = mp->b_cont;
6899 6903 offset = IP_REASS_START(mp1);
6900 6904 if (end >= offset) {
6901 6905 ipf->ipf_hole_cnt--;
6902 6906 /* Check for overlap. */
6903 6907 while (end > offset) {
6904 6908 if (end < IP_REASS_END(mp1)) {
6905 6909 mp->b_wptr -= end - offset;
6906 6910 IP_REASS_SET_END(mp, offset);
6907 6911 /*
6908 6912 * TODO we might bump
6909 6913 * this up twice if there is
6910 6914 * overlap at both ends.
6911 6915 */
6912 6916 BUMP_MIB(ill->ill_ip_mib,
6913 6917 ipIfStatsReasmPartDups);
6914 6918 break;
6915 6919 }
6916 6920 /* Did we cover another hole? */
6917 6921 if ((mp1->b_cont &&
6918 6922 IP_REASS_END(mp1)
6919 6923 != IP_REASS_START(mp1->b_cont) &&
6920 6924 end >=
6921 6925 IP_REASS_START(mp1->b_cont)) ||
6922 6926 (!ipf->ipf_last_frag_seen &&
6923 6927 !more)) {
6924 6928 ipf->ipf_hole_cnt--;
6925 6929 }
6926 6930 /* Clip out mp1. */
6927 6931 if ((mp->b_cont = mp1->b_cont) ==
6928 6932 NULL) {
6929 6933 /*
6930 6934 * After clipping out mp1,
6931 6935 * this guy is now hanging
6932 6936 * off the end.
6933 6937 */
6934 6938 ipf->ipf_tail_mp = mp;
6935 6939 }
6936 6940 IP_REASS_SET_START(mp1, 0);
6937 6941 IP_REASS_SET_END(mp1, 0);
6938 6942 /* Subtract byte count */
6939 6943 ipf->ipf_count -=
6940 6944 mp1->b_datap->db_lim -
6941 6945 mp1->b_datap->db_base;
6942 6946 freeb(mp1);
6943 6947 BUMP_MIB(ill->ill_ip_mib,
6944 6948 ipIfStatsReasmPartDups);
6945 6949 mp1 = mp->b_cont;
6946 6950 if (!mp1)
6947 6951 break;
6948 6952 offset = IP_REASS_START(mp1);
6949 6953 }
6950 6954 }
6951 6955 break;
6952 6956 }
6953 6957 } while (start = end, mp = next_mp);
6954 6958
6955 6959 /* Fragment just processed could be the last one. Remember this fact */
6956 6960 if (!more)
6957 6961 ipf->ipf_last_frag_seen = B_TRUE;
6958 6962
6959 6963 /* Still got holes? */
6960 6964 if (ipf->ipf_hole_cnt)
6961 6965 return (IP_REASS_PARTIAL);
6962 6966 /* Clean up overloaded fields to avoid upstream disasters. */
6963 6967 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) {
6964 6968 IP_REASS_SET_START(mp1, 0);
6965 6969 IP_REASS_SET_END(mp1, 0);
6966 6970 }
6967 6971 return (IP_REASS_COMPLETE);
6968 6972 }
6969 6973
6970 6974 /*
6971 6975 * Fragmentation reassembly. Each ILL has a hash table for
6972 6976 * queuing packets undergoing reassembly for all IPIFs
6973 6977 * associated with the ILL. The hash is based on the packet
6974 6978 * IP ident field. The ILL frag hash table was allocated
6975 6979 * as a timer block at the time the ILL was created. Whenever
6976 6980 * there is anything on the reassembly queue, the timer will
6977 6981 * be running. Returns the reassembled packet if reassembly completes.
6978 6982 */
6979 6983 mblk_t *
6980 6984 ip_input_fragment(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
6981 6985 {
6982 6986 uint32_t frag_offset_flags;
6983 6987 mblk_t *t_mp;
6984 6988 ipaddr_t dst;
6985 6989 uint8_t proto = ipha->ipha_protocol;
6986 6990 uint32_t sum_val;
6987 6991 uint16_t sum_flags;
6988 6992 ipf_t *ipf;
6989 6993 ipf_t **ipfp;
6990 6994 ipfb_t *ipfb;
6991 6995 uint16_t ident;
6992 6996 uint32_t offset;
6993 6997 ipaddr_t src;
6994 6998 uint_t hdr_length;
6995 6999 uint32_t end;
6996 7000 mblk_t *mp1;
6997 7001 mblk_t *tail_mp;
6998 7002 size_t count;
6999 7003 size_t msg_len;
7000 7004 uint8_t ecn_info = 0;
7001 7005 uint32_t packet_size;
7002 7006 boolean_t pruned = B_FALSE;
7003 7007 ill_t *ill = ira->ira_ill;
7004 7008 ip_stack_t *ipst = ill->ill_ipst;
7005 7009
7006 7010 /*
7007 7011 * Drop the fragmented as early as possible, if
7008 7012 * we don't have resource(s) to re-assemble.
7009 7013 */
7010 7014 if (ipst->ips_ip_reass_queue_bytes == 0) {
7011 7015 freemsg(mp);
7012 7016 return (NULL);
7013 7017 }
7014 7018
7015 7019 /* Check for fragmentation offset; return if there's none */
7016 7020 if ((frag_offset_flags = ntohs(ipha->ipha_fragment_offset_and_flags) &
7017 7021 (IPH_MF | IPH_OFFSET)) == 0)
7018 7022 return (mp);
7019 7023
7020 7024 /*
7021 7025 * We utilize hardware computed checksum info only for UDP since
7022 7026 * IP fragmentation is a normal occurrence for the protocol. In
7023 7027 * addition, checksum offload support for IP fragments carrying
7024 7028 * UDP payload is commonly implemented across network adapters.
7025 7029 */
7026 7030 ASSERT(ira->ira_rill != NULL);
7027 7031 if (proto == IPPROTO_UDP && dohwcksum &&
7028 7032 ILL_HCKSUM_CAPABLE(ira->ira_rill) &&
7029 7033 (DB_CKSUMFLAGS(mp) & (HCK_FULLCKSUM | HCK_PARTIALCKSUM))) {
7030 7034 mblk_t *mp1 = mp->b_cont;
7031 7035 int32_t len;
7032 7036
7033 7037 /* Record checksum information from the packet */
7034 7038 sum_val = (uint32_t)DB_CKSUM16(mp);
7035 7039 sum_flags = DB_CKSUMFLAGS(mp);
7036 7040
7037 7041 /* IP payload offset from beginning of mblk */
7038 7042 offset = ((uchar_t *)ipha + IPH_HDR_LENGTH(ipha)) - mp->b_rptr;
7039 7043
7040 7044 if ((sum_flags & HCK_PARTIALCKSUM) &&
7041 7045 (mp1 == NULL || mp1->b_cont == NULL) &&
7042 7046 offset >= DB_CKSUMSTART(mp) &&
7043 7047 ((len = offset - DB_CKSUMSTART(mp)) & 1) == 0) {
7044 7048 uint32_t adj;
7045 7049 /*
7046 7050 * Partial checksum has been calculated by hardware
7047 7051 * and attached to the packet; in addition, any
7048 7052 * prepended extraneous data is even byte aligned.
7049 7053 * If any such data exists, we adjust the checksum;
7050 7054 * this would also handle any postpended data.
7051 7055 */
7052 7056 IP_ADJCKSUM_PARTIAL(mp->b_rptr + DB_CKSUMSTART(mp),
7053 7057 mp, mp1, len, adj);
7054 7058
7055 7059 /* One's complement subtract extraneous checksum */
7056 7060 if (adj >= sum_val)
7057 7061 sum_val = ~(adj - sum_val) & 0xFFFF;
7058 7062 else
7059 7063 sum_val -= adj;
7060 7064 }
7061 7065 } else {
7062 7066 sum_val = 0;
7063 7067 sum_flags = 0;
7064 7068 }
7065 7069
7066 7070 /* Clear hardware checksumming flag */
7067 7071 DB_CKSUMFLAGS(mp) = 0;
7068 7072
7069 7073 ident = ipha->ipha_ident;
7070 7074 offset = (frag_offset_flags << 3) & 0xFFFF;
7071 7075 src = ipha->ipha_src;
7072 7076 dst = ipha->ipha_dst;
7073 7077 hdr_length = IPH_HDR_LENGTH(ipha);
7074 7078 end = ntohs(ipha->ipha_length) - hdr_length;
7075 7079
7076 7080 /* If end == 0 then we have a packet with no data, so just free it */
7077 7081 if (end == 0) {
7078 7082 freemsg(mp);
7079 7083 return (NULL);
7080 7084 }
7081 7085
7082 7086 /* Record the ECN field info. */
7083 7087 ecn_info = (ipha->ipha_type_of_service & 0x3);
7084 7088 if (offset != 0) {
7085 7089 /*
7086 7090 * If this isn't the first piece, strip the header, and
7087 7091 * add the offset to the end value.
7088 7092 */
7089 7093 mp->b_rptr += hdr_length;
7090 7094 end += offset;
7091 7095 }
7092 7096
7093 7097 /* Handle vnic loopback of fragments */
7094 7098 if (mp->b_datap->db_ref > 2)
7095 7099 msg_len = 0;
7096 7100 else
7097 7101 msg_len = MBLKSIZE(mp);
7098 7102
7099 7103 tail_mp = mp;
7100 7104 while (tail_mp->b_cont != NULL) {
7101 7105 tail_mp = tail_mp->b_cont;
7102 7106 if (tail_mp->b_datap->db_ref <= 2)
7103 7107 msg_len += MBLKSIZE(tail_mp);
7104 7108 }
7105 7109
7106 7110 /* If the reassembly list for this ILL will get too big, prune it */
7107 7111 if ((msg_len + sizeof (*ipf) + ill->ill_frag_count) >=
7108 7112 ipst->ips_ip_reass_queue_bytes) {
7109 7113 DTRACE_PROBE3(ip_reass_queue_bytes, uint_t, msg_len,
7110 7114 uint_t, ill->ill_frag_count,
7111 7115 uint_t, ipst->ips_ip_reass_queue_bytes);
7112 7116 ill_frag_prune(ill,
7113 7117 (ipst->ips_ip_reass_queue_bytes < msg_len) ? 0 :
7114 7118 (ipst->ips_ip_reass_queue_bytes - msg_len));
7115 7119 pruned = B_TRUE;
7116 7120 }
7117 7121
7118 7122 ipfb = &ill->ill_frag_hash_tbl[ILL_FRAG_HASH(src, ident)];
7119 7123 mutex_enter(&ipfb->ipfb_lock);
7120 7124
7121 7125 ipfp = &ipfb->ipfb_ipf;
7122 7126 /* Try to find an existing fragment queue for this packet. */
7123 7127 for (;;) {
7124 7128 ipf = ipfp[0];
7125 7129 if (ipf != NULL) {
7126 7130 /*
7127 7131 * It has to match on ident and src/dst address.
7128 7132 */
7129 7133 if (ipf->ipf_ident == ident &&
7130 7134 ipf->ipf_src == src &&
7131 7135 ipf->ipf_dst == dst &&
7132 7136 ipf->ipf_protocol == proto) {
7133 7137 /*
7134 7138 * If we have received too many
7135 7139 * duplicate fragments for this packet
7136 7140 * free it.
7137 7141 */
7138 7142 if (ipf->ipf_num_dups > ip_max_frag_dups) {
7139 7143 ill_frag_free_pkts(ill, ipfb, ipf, 1);
7140 7144 freemsg(mp);
7141 7145 mutex_exit(&ipfb->ipfb_lock);
7142 7146 return (NULL);
7143 7147 }
7144 7148 /* Found it. */
7145 7149 break;
7146 7150 }
7147 7151 ipfp = &ipf->ipf_hash_next;
7148 7152 continue;
7149 7153 }
7150 7154
7151 7155 /*
7152 7156 * If we pruned the list, do we want to store this new
7153 7157 * fragment?. We apply an optimization here based on the
7154 7158 * fact that most fragments will be received in order.
7155 7159 * So if the offset of this incoming fragment is zero,
7156 7160 * it is the first fragment of a new packet. We will
7157 7161 * keep it. Otherwise drop the fragment, as we have
7158 7162 * probably pruned the packet already (since the
7159 7163 * packet cannot be found).
7160 7164 */
7161 7165 if (pruned && offset != 0) {
7162 7166 mutex_exit(&ipfb->ipfb_lock);
7163 7167 freemsg(mp);
7164 7168 return (NULL);
7165 7169 }
7166 7170
7167 7171 if (ipfb->ipfb_frag_pkts >= MAX_FRAG_PKTS(ipst)) {
7168 7172 /*
7169 7173 * Too many fragmented packets in this hash
7170 7174 * bucket. Free the oldest.
7171 7175 */
7172 7176 ill_frag_free_pkts(ill, ipfb, ipfb->ipfb_ipf, 1);
7173 7177 }
7174 7178
7175 7179 /* New guy. Allocate a frag message. */
7176 7180 mp1 = allocb(sizeof (*ipf), BPRI_MED);
7177 7181 if (mp1 == NULL) {
7178 7182 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7179 7183 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7180 7184 freemsg(mp);
7181 7185 reass_done:
7182 7186 mutex_exit(&ipfb->ipfb_lock);
7183 7187 return (NULL);
7184 7188 }
7185 7189
7186 7190 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmReqds);
7187 7191 mp1->b_cont = mp;
7188 7192
7189 7193 /* Initialize the fragment header. */
7190 7194 ipf = (ipf_t *)mp1->b_rptr;
7191 7195 ipf->ipf_mp = mp1;
7192 7196 ipf->ipf_ptphn = ipfp;
7193 7197 ipfp[0] = ipf;
7194 7198 ipf->ipf_hash_next = NULL;
7195 7199 ipf->ipf_ident = ident;
7196 7200 ipf->ipf_protocol = proto;
7197 7201 ipf->ipf_src = src;
7198 7202 ipf->ipf_dst = dst;
7199 7203 ipf->ipf_nf_hdr_len = 0;
7200 7204 /* Record reassembly start time. */
7201 7205 ipf->ipf_timestamp = gethrestime_sec();
7202 7206 /* Record ipf generation and account for frag header */
7203 7207 ipf->ipf_gen = ill->ill_ipf_gen++;
7204 7208 ipf->ipf_count = MBLKSIZE(mp1);
7205 7209 ipf->ipf_last_frag_seen = B_FALSE;
7206 7210 ipf->ipf_ecn = ecn_info;
7207 7211 ipf->ipf_num_dups = 0;
7208 7212 ipfb->ipfb_frag_pkts++;
7209 7213 ipf->ipf_checksum = 0;
7210 7214 ipf->ipf_checksum_flags = 0;
7211 7215
7212 7216 /* Store checksum value in fragment header */
7213 7217 if (sum_flags != 0) {
7214 7218 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7215 7219 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7216 7220 ipf->ipf_checksum = sum_val;
7217 7221 ipf->ipf_checksum_flags = sum_flags;
7218 7222 }
7219 7223
7220 7224 /*
7221 7225 * We handle reassembly two ways. In the easy case,
7222 7226 * where all the fragments show up in order, we do
7223 7227 * minimal bookkeeping, and just clip new pieces on
7224 7228 * the end. If we ever see a hole, then we go off
7225 7229 * to ip_reassemble which has to mark the pieces and
7226 7230 * keep track of the number of holes, etc. Obviously,
7227 7231 * the point of having both mechanisms is so we can
7228 7232 * handle the easy case as efficiently as possible.
7229 7233 */
7230 7234 if (offset == 0) {
7231 7235 /* Easy case, in-order reassembly so far. */
7232 7236 ipf->ipf_count += msg_len;
7233 7237 ipf->ipf_tail_mp = tail_mp;
7234 7238 /*
7235 7239 * Keep track of next expected offset in
7236 7240 * ipf_end.
7237 7241 */
7238 7242 ipf->ipf_end = end;
7239 7243 ipf->ipf_nf_hdr_len = hdr_length;
7240 7244 } else {
7241 7245 /* Hard case, hole at the beginning. */
7242 7246 ipf->ipf_tail_mp = NULL;
7243 7247 /*
7244 7248 * ipf_end == 0 means that we have given up
7245 7249 * on easy reassembly.
7246 7250 */
7247 7251 ipf->ipf_end = 0;
7248 7252
7249 7253 /* Forget checksum offload from now on */
7250 7254 ipf->ipf_checksum_flags = 0;
7251 7255
7252 7256 /*
7253 7257 * ipf_hole_cnt is set by ip_reassemble.
7254 7258 * ipf_count is updated by ip_reassemble.
7255 7259 * No need to check for return value here
7256 7260 * as we don't expect reassembly to complete
7257 7261 * or fail for the first fragment itself.
7258 7262 */
7259 7263 (void) ip_reassemble(mp, ipf,
7260 7264 (frag_offset_flags & IPH_OFFSET) << 3,
7261 7265 (frag_offset_flags & IPH_MF), ill, msg_len);
7262 7266 }
7263 7267 /* Update per ipfb and ill byte counts */
7264 7268 ipfb->ipfb_count += ipf->ipf_count;
7265 7269 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7266 7270 atomic_add_32(&ill->ill_frag_count, ipf->ipf_count);
7267 7271 /* If the frag timer wasn't already going, start it. */
7268 7272 mutex_enter(&ill->ill_lock);
7269 7273 ill_frag_timer_start(ill);
7270 7274 mutex_exit(&ill->ill_lock);
7271 7275 goto reass_done;
7272 7276 }
7273 7277
7274 7278 /*
7275 7279 * If the packet's flag has changed (it could be coming up
7276 7280 * from an interface different than the previous, therefore
7277 7281 * possibly different checksum capability), then forget about
7278 7282 * any stored checksum states. Otherwise add the value to
7279 7283 * the existing one stored in the fragment header.
7280 7284 */
7281 7285 if (sum_flags != 0 && sum_flags == ipf->ipf_checksum_flags) {
7282 7286 sum_val += ipf->ipf_checksum;
7283 7287 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7284 7288 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7285 7289 ipf->ipf_checksum = sum_val;
7286 7290 } else if (ipf->ipf_checksum_flags != 0) {
7287 7291 /* Forget checksum offload from now on */
7288 7292 ipf->ipf_checksum_flags = 0;
7289 7293 }
7290 7294
7291 7295 /*
7292 7296 * We have a new piece of a datagram which is already being
7293 7297 * reassembled. Update the ECN info if all IP fragments
7294 7298 * are ECN capable. If there is one which is not, clear
7295 7299 * all the info. If there is at least one which has CE
7296 7300 * code point, IP needs to report that up to transport.
7297 7301 */
7298 7302 if (ecn_info != IPH_ECN_NECT && ipf->ipf_ecn != IPH_ECN_NECT) {
7299 7303 if (ecn_info == IPH_ECN_CE)
7300 7304 ipf->ipf_ecn = IPH_ECN_CE;
7301 7305 } else {
7302 7306 ipf->ipf_ecn = IPH_ECN_NECT;
7303 7307 }
7304 7308 if (offset && ipf->ipf_end == offset) {
7305 7309 /* The new fragment fits at the end */
7306 7310 ipf->ipf_tail_mp->b_cont = mp;
7307 7311 /* Update the byte count */
7308 7312 ipf->ipf_count += msg_len;
7309 7313 /* Update per ipfb and ill byte counts */
7310 7314 ipfb->ipfb_count += msg_len;
7311 7315 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7312 7316 atomic_add_32(&ill->ill_frag_count, msg_len);
7313 7317 if (frag_offset_flags & IPH_MF) {
7314 7318 /* More to come. */
7315 7319 ipf->ipf_end = end;
7316 7320 ipf->ipf_tail_mp = tail_mp;
7317 7321 goto reass_done;
7318 7322 }
7319 7323 } else {
7320 7324 /* Go do the hard cases. */
7321 7325 int ret;
7322 7326
7323 7327 if (offset == 0)
7324 7328 ipf->ipf_nf_hdr_len = hdr_length;
7325 7329
7326 7330 /* Save current byte count */
7327 7331 count = ipf->ipf_count;
7328 7332 ret = ip_reassemble(mp, ipf,
7329 7333 (frag_offset_flags & IPH_OFFSET) << 3,
7330 7334 (frag_offset_flags & IPH_MF), ill, msg_len);
7331 7335 /* Count of bytes added and subtracted (freeb()ed) */
7332 7336 count = ipf->ipf_count - count;
7333 7337 if (count) {
7334 7338 /* Update per ipfb and ill byte counts */
7335 7339 ipfb->ipfb_count += count;
7336 7340 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7337 7341 atomic_add_32(&ill->ill_frag_count, count);
7338 7342 }
7339 7343 if (ret == IP_REASS_PARTIAL) {
7340 7344 goto reass_done;
7341 7345 } else if (ret == IP_REASS_FAILED) {
7342 7346 /* Reassembly failed. Free up all resources */
7343 7347 ill_frag_free_pkts(ill, ipfb, ipf, 1);
7344 7348 for (t_mp = mp; t_mp != NULL; t_mp = t_mp->b_cont) {
7345 7349 IP_REASS_SET_START(t_mp, 0);
7346 7350 IP_REASS_SET_END(t_mp, 0);
7347 7351 }
7348 7352 freemsg(mp);
7349 7353 goto reass_done;
7350 7354 }
7351 7355 /* We will reach here iff 'ret' is IP_REASS_COMPLETE */
7352 7356 }
7353 7357 /*
7354 7358 * We have completed reassembly. Unhook the frag header from
7355 7359 * the reassembly list.
7356 7360 *
7357 7361 * Before we free the frag header, record the ECN info
7358 7362 * to report back to the transport.
7359 7363 */
7360 7364 ecn_info = ipf->ipf_ecn;
7361 7365 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmOKs);
7362 7366 ipfp = ipf->ipf_ptphn;
7363 7367
7364 7368 /* We need to supply these to caller */
7365 7369 if ((sum_flags = ipf->ipf_checksum_flags) != 0)
7366 7370 sum_val = ipf->ipf_checksum;
7367 7371 else
7368 7372 sum_val = 0;
7369 7373
7370 7374 mp1 = ipf->ipf_mp;
7371 7375 count = ipf->ipf_count;
7372 7376 ipf = ipf->ipf_hash_next;
7373 7377 if (ipf != NULL)
7374 7378 ipf->ipf_ptphn = ipfp;
7375 7379 ipfp[0] = ipf;
7376 7380 atomic_add_32(&ill->ill_frag_count, -count);
7377 7381 ASSERT(ipfb->ipfb_count >= count);
7378 7382 ipfb->ipfb_count -= count;
7379 7383 ipfb->ipfb_frag_pkts--;
7380 7384 mutex_exit(&ipfb->ipfb_lock);
7381 7385 /* Ditch the frag header. */
7382 7386 mp = mp1->b_cont;
7383 7387
7384 7388 freeb(mp1);
7385 7389
7386 7390 /* Restore original IP length in header. */
7387 7391 packet_size = (uint32_t)msgdsize(mp);
7388 7392 if (packet_size > IP_MAXPACKET) {
7389 7393 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7390 7394 ip_drop_input("Reassembled packet too large", mp, ill);
7391 7395 freemsg(mp);
7392 7396 return (NULL);
7393 7397 }
7394 7398
7395 7399 if (DB_REF(mp) > 1) {
7396 7400 mblk_t *mp2 = copymsg(mp);
7397 7401
7398 7402 if (mp2 == NULL) {
7399 7403 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7400 7404 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7401 7405 freemsg(mp);
7402 7406 return (NULL);
7403 7407 }
7404 7408 freemsg(mp);
7405 7409 mp = mp2;
7406 7410 }
7407 7411 ipha = (ipha_t *)mp->b_rptr;
7408 7412
7409 7413 ipha->ipha_length = htons((uint16_t)packet_size);
7410 7414 /* We're now complete, zip the frag state */
7411 7415 ipha->ipha_fragment_offset_and_flags = 0;
7412 7416 /* Record the ECN info. */
7413 7417 ipha->ipha_type_of_service &= 0xFC;
7414 7418 ipha->ipha_type_of_service |= ecn_info;
7415 7419
7416 7420 /* Update the receive attributes */
7417 7421 ira->ira_pktlen = packet_size;
7418 7422 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha);
7419 7423
7420 7424 /* Reassembly is successful; set checksum information in packet */
7421 7425 DB_CKSUM16(mp) = (uint16_t)sum_val;
7422 7426 DB_CKSUMFLAGS(mp) = sum_flags;
7423 7427 DB_CKSUMSTART(mp) = ira->ira_ip_hdr_length;
7424 7428
7425 7429 return (mp);
7426 7430 }
7427 7431
7428 7432 /*
7429 7433 * Pullup function that should be used for IP input in order to
7430 7434 * ensure we do not loose the L2 source address; we need the l2 source
7431 7435 * address for IP_RECVSLLA and for ndp_input.
7432 7436 *
7433 7437 * We return either NULL or b_rptr.
7434 7438 */
7435 7439 void *
7436 7440 ip_pullup(mblk_t *mp, ssize_t len, ip_recv_attr_t *ira)
7437 7441 {
7438 7442 ill_t *ill = ira->ira_ill;
7439 7443
7440 7444 if (ip_rput_pullups++ == 0) {
7441 7445 (void) mi_strlog(ill->ill_rq, 1, SL_ERROR|SL_TRACE,
7442 7446 "ip_pullup: %s forced us to "
7443 7447 " pullup pkt, hdr len %ld, hdr addr %p",
7444 7448 ill->ill_name, len, (void *)mp->b_rptr);
7445 7449 }
7446 7450 if (!(ira->ira_flags & IRAF_L2SRC_SET))
7447 7451 ip_setl2src(mp, ira, ira->ira_rill);
7448 7452 ASSERT(ira->ira_flags & IRAF_L2SRC_SET);
7449 7453 if (!pullupmsg(mp, len))
7450 7454 return (NULL);
7451 7455 else
7452 7456 return (mp->b_rptr);
7453 7457 }
7454 7458
7455 7459 /*
7456 7460 * Make sure ira_l2src has an address. If we don't have one fill with zeros.
7457 7461 * When called from the ULP ira_rill will be NULL hence the caller has to
7458 7462 * pass in the ill.
7459 7463 */
7460 7464 /* ARGSUSED */
7461 7465 void
7462 7466 ip_setl2src(mblk_t *mp, ip_recv_attr_t *ira, ill_t *ill)
7463 7467 {
7464 7468 const uchar_t *addr;
7465 7469 int alen;
7466 7470
7467 7471 if (ira->ira_flags & IRAF_L2SRC_SET)
7468 7472 return;
7469 7473
7470 7474 ASSERT(ill != NULL);
7471 7475 alen = ill->ill_phys_addr_length;
7472 7476 ASSERT(alen <= sizeof (ira->ira_l2src));
7473 7477 if (ira->ira_mhip != NULL &&
7474 7478 (addr = ira->ira_mhip->mhi_saddr) != NULL) {
7475 7479 bcopy(addr, ira->ira_l2src, alen);
7476 7480 } else if ((ira->ira_flags & IRAF_L2SRC_LOOPBACK) &&
7477 7481 (addr = ill->ill_phys_addr) != NULL) {
7478 7482 bcopy(addr, ira->ira_l2src, alen);
7479 7483 } else {
7480 7484 bzero(ira->ira_l2src, alen);
7481 7485 }
7482 7486 ira->ira_flags |= IRAF_L2SRC_SET;
7483 7487 }
7484 7488
7485 7489 /*
7486 7490 * check ip header length and align it.
7487 7491 */
7488 7492 mblk_t *
7489 7493 ip_check_and_align_header(mblk_t *mp, uint_t min_size, ip_recv_attr_t *ira)
7490 7494 {
7491 7495 ill_t *ill = ira->ira_ill;
7492 7496 ssize_t len;
7493 7497
7494 7498 len = MBLKL(mp);
7495 7499
7496 7500 if (!OK_32PTR(mp->b_rptr))
7497 7501 IP_STAT(ill->ill_ipst, ip_notaligned);
7498 7502 else
7499 7503 IP_STAT(ill->ill_ipst, ip_recv_pullup);
7500 7504
7501 7505 /* Guard against bogus device drivers */
7502 7506 if (len < 0) {
7503 7507 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7504 7508 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7505 7509 freemsg(mp);
7506 7510 return (NULL);
7507 7511 }
7508 7512
7509 7513 if (len == 0) {
7510 7514 /* GLD sometimes sends up mblk with b_rptr == b_wptr! */
7511 7515 mblk_t *mp1 = mp->b_cont;
7512 7516
7513 7517 if (!(ira->ira_flags & IRAF_L2SRC_SET))
7514 7518 ip_setl2src(mp, ira, ira->ira_rill);
7515 7519 ASSERT(ira->ira_flags & IRAF_L2SRC_SET);
7516 7520
7517 7521 freeb(mp);
7518 7522 mp = mp1;
7519 7523 if (mp == NULL)
7520 7524 return (NULL);
7521 7525
7522 7526 if (OK_32PTR(mp->b_rptr) && MBLKL(mp) >= min_size)
7523 7527 return (mp);
7524 7528 }
7525 7529 if (ip_pullup(mp, min_size, ira) == NULL) {
7526 7530 if (msgdsize(mp) < min_size) {
7527 7531 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7528 7532 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7529 7533 } else {
7530 7534 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7531 7535 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7532 7536 }
7533 7537 freemsg(mp);
7534 7538 return (NULL);
7535 7539 }
7536 7540 return (mp);
7537 7541 }
7538 7542
7539 7543 /*
7540 7544 * Common code for IPv4 and IPv6 to check and pullup multi-mblks
7541 7545 */
7542 7546 mblk_t *
7543 7547 ip_check_length(mblk_t *mp, uchar_t *rptr, ssize_t len, uint_t pkt_len,
7544 7548 uint_t min_size, ip_recv_attr_t *ira)
7545 7549 {
7546 7550 ill_t *ill = ira->ira_ill;
7547 7551
7548 7552 /*
7549 7553 * Make sure we have data length consistent
7550 7554 * with the IP header.
7551 7555 */
7552 7556 if (mp->b_cont == NULL) {
7553 7557 /* pkt_len is based on ipha_len, not the mblk length */
7554 7558 if (pkt_len < min_size) {
7555 7559 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7556 7560 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7557 7561 freemsg(mp);
7558 7562 return (NULL);
7559 7563 }
7560 7564 if (len < 0) {
7561 7565 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
7562 7566 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
7563 7567 freemsg(mp);
7564 7568 return (NULL);
7565 7569 }
7566 7570 /* Drop any pad */
7567 7571 mp->b_wptr = rptr + pkt_len;
7568 7572 } else if ((len += msgdsize(mp->b_cont)) != 0) {
7569 7573 ASSERT(pkt_len >= min_size);
7570 7574 if (pkt_len < min_size) {
7571 7575 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7572 7576 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7573 7577 freemsg(mp);
7574 7578 return (NULL);
7575 7579 }
7576 7580 if (len < 0) {
7577 7581 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
7578 7582 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
7579 7583 freemsg(mp);
7580 7584 return (NULL);
7581 7585 }
7582 7586 /* Drop any pad */
7583 7587 (void) adjmsg(mp, -len);
7584 7588 /*
7585 7589 * adjmsg may have freed an mblk from the chain, hence
7586 7590 * invalidate any hw checksum here. This will force IP to
7587 7591 * calculate the checksum in sw, but only for this packet.
7588 7592 */
7589 7593 DB_CKSUMFLAGS(mp) = 0;
7590 7594 IP_STAT(ill->ill_ipst, ip_multimblk);
7591 7595 }
7592 7596 return (mp);
7593 7597 }
7594 7598
7595 7599 /*
7596 7600 * Check that the IPv4 opt_len is consistent with the packet and pullup
7597 7601 * the options.
7598 7602 */
7599 7603 mblk_t *
7600 7604 ip_check_optlen(mblk_t *mp, ipha_t *ipha, uint_t opt_len, uint_t pkt_len,
7601 7605 ip_recv_attr_t *ira)
7602 7606 {
7603 7607 ill_t *ill = ira->ira_ill;
7604 7608 ssize_t len;
7605 7609
7606 7610 /* Assume no IPv6 packets arrive over the IPv4 queue */
7607 7611 if (IPH_HDR_VERSION(ipha) != IPV4_VERSION) {
7608 7612 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7609 7613 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInWrongIPVersion);
7610 7614 ip_drop_input("IPvN packet on IPv4 ill", mp, ill);
7611 7615 freemsg(mp);
7612 7616 return (NULL);
7613 7617 }
7614 7618
7615 7619 if (opt_len > (15 - IP_SIMPLE_HDR_LENGTH_IN_WORDS)) {
7616 7620 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7617 7621 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7618 7622 freemsg(mp);
7619 7623 return (NULL);
7620 7624 }
7621 7625 /*
7622 7626 * Recompute complete header length and make sure we
7623 7627 * have access to all of it.
7624 7628 */
7625 7629 len = ((size_t)opt_len + IP_SIMPLE_HDR_LENGTH_IN_WORDS) << 2;
7626 7630 if (len > (mp->b_wptr - mp->b_rptr)) {
7627 7631 if (len > pkt_len) {
7628 7632 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7629 7633 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7630 7634 freemsg(mp);
7631 7635 return (NULL);
7632 7636 }
7633 7637 if (ip_pullup(mp, len, ira) == NULL) {
7634 7638 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7635 7639 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7636 7640 freemsg(mp);
7637 7641 return (NULL);
7638 7642 }
7639 7643 }
7640 7644 return (mp);
7641 7645 }
7642 7646
7643 7647 /*
7644 7648 * Returns a new ire, or the same ire, or NULL.
7645 7649 * If a different IRE is returned, then it is held; the caller
7646 7650 * needs to release it.
7647 7651 * In no case is there any hold/release on the ire argument.
7648 7652 */
7649 7653 ire_t *
7650 7654 ip_check_multihome(void *addr, ire_t *ire, ill_t *ill)
7651 7655 {
7652 7656 ire_t *new_ire;
7653 7657 ill_t *ire_ill;
7654 7658 uint_t ifindex;
7655 7659 ip_stack_t *ipst = ill->ill_ipst;
7656 7660 boolean_t strict_check = B_FALSE;
7657 7661
7658 7662 /*
7659 7663 * IPMP common case: if IRE and ILL are in the same group, there's no
7660 7664 * issue (e.g. packet received on an underlying interface matched an
7661 7665 * IRE_LOCAL on its associated group interface).
7662 7666 */
7663 7667 ASSERT(ire->ire_ill != NULL);
7664 7668 if (IS_IN_SAME_ILLGRP(ill, ire->ire_ill))
7665 7669 return (ire);
7666 7670
7667 7671 /*
7668 7672 * Do another ire lookup here, using the ingress ill, to see if the
7669 7673 * interface is in a usesrc group.
7670 7674 * As long as the ills belong to the same group, we don't consider
7671 7675 * them to be arriving on the wrong interface. Thus, if the switch
7672 7676 * is doing inbound load spreading, we won't drop packets when the
7673 7677 * ip*_strict_dst_multihoming switch is on.
7674 7678 * We also need to check for IPIF_UNNUMBERED point2point interfaces
7675 7679 * where the local address may not be unique. In this case we were
7676 7680 * at the mercy of the initial ire lookup and the IRE_LOCAL it
7677 7681 * actually returned. The new lookup, which is more specific, should
7678 7682 * only find the IRE_LOCAL associated with the ingress ill if one
7679 7683 * exists.
7680 7684 */
7681 7685 if (ire->ire_ipversion == IPV4_VERSION) {
7682 7686 if (ipst->ips_ip_strict_dst_multihoming)
7683 7687 strict_check = B_TRUE;
7684 7688 new_ire = ire_ftable_lookup_v4(*((ipaddr_t *)addr), 0, 0,
7685 7689 IRE_LOCAL, ill, ALL_ZONES, NULL,
7686 7690 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL);
7687 7691 } else {
7688 7692 ASSERT(!IN6_IS_ADDR_MULTICAST((in6_addr_t *)addr));
7689 7693 if (ipst->ips_ipv6_strict_dst_multihoming)
7690 7694 strict_check = B_TRUE;
7691 7695 new_ire = ire_ftable_lookup_v6((in6_addr_t *)addr, NULL, NULL,
7692 7696 IRE_LOCAL, ill, ALL_ZONES, NULL,
7693 7697 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL);
7694 7698 }
7695 7699 /*
7696 7700 * If the same ire that was returned in ip_input() is found then this
7697 7701 * is an indication that usesrc groups are in use. The packet
7698 7702 * arrived on a different ill in the group than the one associated with
7699 7703 * the destination address. If a different ire was found then the same
7700 7704 * IP address must be hosted on multiple ills. This is possible with
7701 7705 * unnumbered point2point interfaces. We switch to use this new ire in
7702 7706 * order to have accurate interface statistics.
7703 7707 */
7704 7708 if (new_ire != NULL) {
7705 7709 /* Note: held in one case but not the other? Caller handles */
7706 7710 if (new_ire != ire)
7707 7711 return (new_ire);
7708 7712 /* Unchanged */
7709 7713 ire_refrele(new_ire);
7710 7714 return (ire);
7711 7715 }
7712 7716
7713 7717 /*
7714 7718 * Chase pointers once and store locally.
7715 7719 */
7716 7720 ASSERT(ire->ire_ill != NULL);
7717 7721 ire_ill = ire->ire_ill;
7718 7722 ifindex = ill->ill_usesrc_ifindex;
7719 7723
7720 7724 /*
7721 7725 * Check if it's a legal address on the 'usesrc' interface.
7722 7726 * For IPMP data addresses the IRE_LOCAL is the upper, hence we
7723 7727 * can just check phyint_ifindex.
7724 7728 */
7725 7729 if (ifindex != 0 && ifindex == ire_ill->ill_phyint->phyint_ifindex) {
7726 7730 return (ire);
7727 7731 }
7728 7732
7729 7733 /*
7730 7734 * If the ip*_strict_dst_multihoming switch is on then we can
7731 7735 * only accept this packet if the interface is marked as routing.
7732 7736 */
7733 7737 if (!(strict_check))
7734 7738 return (ire);
7735 7739
7736 7740 if ((ill->ill_flags & ire->ire_ill->ill_flags & ILLF_ROUTER) != 0) {
7737 7741 return (ire);
7738 7742 }
7739 7743 return (NULL);
7740 7744 }
7741 7745
7742 7746 /*
7743 7747 * This function is used to construct a mac_header_info_s from a
7744 7748 * DL_UNITDATA_IND message.
7745 7749 * The address fields in the mhi structure points into the message,
7746 7750 * thus the caller can't use those fields after freeing the message.
7747 7751 *
7748 7752 * We determine whether the packet received is a non-unicast packet
7749 7753 * and in doing so, determine whether or not it is broadcast vs multicast.
7750 7754 * For it to be a broadcast packet, we must have the appropriate mblk_t
7751 7755 * hanging off the ill_t. If this is either not present or doesn't match
7752 7756 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed
7753 7757 * to be multicast. Thus NICs that have no broadcast address (or no
7754 7758 * capability for one, such as point to point links) cannot return as
7755 7759 * the packet being broadcast.
7756 7760 */
7757 7761 void
7758 7762 ip_dlur_to_mhi(ill_t *ill, mblk_t *mb, struct mac_header_info_s *mhip)
7759 7763 {
7760 7764 dl_unitdata_ind_t *ind = (dl_unitdata_ind_t *)mb->b_rptr;
7761 7765 mblk_t *bmp;
7762 7766 uint_t extra_offset;
7763 7767
7764 7768 bzero(mhip, sizeof (struct mac_header_info_s));
7765 7769
7766 7770 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST;
7767 7771
7768 7772 if (ill->ill_sap_length < 0)
7769 7773 extra_offset = 0;
7770 7774 else
7771 7775 extra_offset = ill->ill_sap_length;
7772 7776
7773 7777 mhip->mhi_daddr = (uchar_t *)ind + ind->dl_dest_addr_offset +
7774 7778 extra_offset;
7775 7779 mhip->mhi_saddr = (uchar_t *)ind + ind->dl_src_addr_offset +
7776 7780 extra_offset;
7777 7781
7778 7782 if (!ind->dl_group_address)
7779 7783 return;
7780 7784
7781 7785 /* Multicast or broadcast */
7782 7786 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST;
7783 7787
7784 7788 if (ind->dl_dest_addr_offset > sizeof (*ind) &&
7785 7789 ind->dl_dest_addr_offset + ind->dl_dest_addr_length < MBLKL(mb) &&
7786 7790 (bmp = ill->ill_bcast_mp) != NULL) {
7787 7791 dl_unitdata_req_t *dlur;
7788 7792 uint8_t *bphys_addr;
7789 7793
7790 7794 dlur = (dl_unitdata_req_t *)bmp->b_rptr;
7791 7795 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset +
7792 7796 extra_offset;
7793 7797
7794 7798 if (bcmp(mhip->mhi_daddr, bphys_addr,
7795 7799 ind->dl_dest_addr_length) == 0)
7796 7800 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST;
7797 7801 }
7798 7802 }
7799 7803
7800 7804 /*
7801 7805 * This function is used to construct a mac_header_info_s from a
7802 7806 * M_DATA fastpath message from a DLPI driver.
7803 7807 * The address fields in the mhi structure points into the message,
7804 7808 * thus the caller can't use those fields after freeing the message.
7805 7809 *
7806 7810 * We determine whether the packet received is a non-unicast packet
7807 7811 * and in doing so, determine whether or not it is broadcast vs multicast.
7808 7812 * For it to be a broadcast packet, we must have the appropriate mblk_t
7809 7813 * hanging off the ill_t. If this is either not present or doesn't match
7810 7814 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed
7811 7815 * to be multicast. Thus NICs that have no broadcast address (or no
7812 7816 * capability for one, such as point to point links) cannot return as
7813 7817 * the packet being broadcast.
7814 7818 */
7815 7819 void
7816 7820 ip_mdata_to_mhi(ill_t *ill, mblk_t *mp, struct mac_header_info_s *mhip)
7817 7821 {
7818 7822 mblk_t *bmp;
7819 7823 struct ether_header *pether;
7820 7824
7821 7825 bzero(mhip, sizeof (struct mac_header_info_s));
7822 7826
7823 7827 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST;
7824 7828
7825 7829 pether = (struct ether_header *)((char *)mp->b_rptr
7826 7830 - sizeof (struct ether_header));
7827 7831
7828 7832 /*
7829 7833 * Make sure the interface is an ethernet type, since we don't
7830 7834 * know the header format for anything but Ethernet. Also make
7831 7835 * sure we are pointing correctly above db_base.
7832 7836 */
7833 7837 if (ill->ill_type != IFT_ETHER)
7834 7838 return;
7835 7839
7836 7840 retry:
7837 7841 if ((uchar_t *)pether < mp->b_datap->db_base)
7838 7842 return;
7839 7843
7840 7844 /* Is there a VLAN tag? */
7841 7845 if (ill->ill_isv6) {
7842 7846 if (pether->ether_type != htons(ETHERTYPE_IPV6)) {
7843 7847 pether = (struct ether_header *)((char *)pether - 4);
7844 7848 goto retry;
7845 7849 }
7846 7850 } else {
7847 7851 if (pether->ether_type != htons(ETHERTYPE_IP)) {
7848 7852 pether = (struct ether_header *)((char *)pether - 4);
7849 7853 goto retry;
7850 7854 }
7851 7855 }
7852 7856 mhip->mhi_daddr = (uchar_t *)&pether->ether_dhost;
7853 7857 mhip->mhi_saddr = (uchar_t *)&pether->ether_shost;
7854 7858
7855 7859 if (!(mhip->mhi_daddr[0] & 0x01))
7856 7860 return;
7857 7861
7858 7862 /* Multicast or broadcast */
7859 7863 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST;
7860 7864
7861 7865 if ((bmp = ill->ill_bcast_mp) != NULL) {
7862 7866 dl_unitdata_req_t *dlur;
7863 7867 uint8_t *bphys_addr;
7864 7868 uint_t addrlen;
7865 7869
7866 7870 dlur = (dl_unitdata_req_t *)bmp->b_rptr;
7867 7871 addrlen = dlur->dl_dest_addr_length;
7868 7872 if (ill->ill_sap_length < 0) {
7869 7873 bphys_addr = (uchar_t *)dlur +
7870 7874 dlur->dl_dest_addr_offset;
7871 7875 addrlen += ill->ill_sap_length;
7872 7876 } else {
7873 7877 bphys_addr = (uchar_t *)dlur +
7874 7878 dlur->dl_dest_addr_offset +
7875 7879 ill->ill_sap_length;
7876 7880 addrlen -= ill->ill_sap_length;
7877 7881 }
7878 7882 if (bcmp(mhip->mhi_daddr, bphys_addr, addrlen) == 0)
7879 7883 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST;
7880 7884 }
7881 7885 }
7882 7886
7883 7887 /*
7884 7888 * Handle anything but M_DATA messages
7885 7889 * We see the DL_UNITDATA_IND which are part
7886 7890 * of the data path, and also the other messages from the driver.
7887 7891 */
7888 7892 void
7889 7893 ip_rput_notdata(ill_t *ill, mblk_t *mp)
7890 7894 {
7891 7895 mblk_t *first_mp;
7892 7896 struct iocblk *iocp;
7893 7897 struct mac_header_info_s mhi;
7894 7898
7895 7899 switch (DB_TYPE(mp)) {
7896 7900 case M_PROTO:
7897 7901 case M_PCPROTO: {
7898 7902 if (((dl_unitdata_ind_t *)mp->b_rptr)->dl_primitive !=
7899 7903 DL_UNITDATA_IND) {
7900 7904 /* Go handle anything other than data elsewhere. */
7901 7905 ip_rput_dlpi(ill, mp);
7902 7906 return;
7903 7907 }
7904 7908
7905 7909 first_mp = mp;
7906 7910 mp = first_mp->b_cont;
7907 7911 first_mp->b_cont = NULL;
7908 7912
7909 7913 if (mp == NULL) {
7910 7914 freeb(first_mp);
7911 7915 return;
7912 7916 }
7913 7917 ip_dlur_to_mhi(ill, first_mp, &mhi);
7914 7918 if (ill->ill_isv6)
7915 7919 ip_input_v6(ill, NULL, mp, &mhi);
7916 7920 else
7917 7921 ip_input(ill, NULL, mp, &mhi);
7918 7922
7919 7923 /* Ditch the DLPI header. */
7920 7924 freeb(first_mp);
7921 7925 return;
7922 7926 }
7923 7927 case M_IOCACK:
7924 7928 iocp = (struct iocblk *)mp->b_rptr;
7925 7929 switch (iocp->ioc_cmd) {
7926 7930 case DL_IOC_HDR_INFO:
7927 7931 ill_fastpath_ack(ill, mp);
7928 7932 return;
7929 7933 default:
7930 7934 putnext(ill->ill_rq, mp);
7931 7935 return;
7932 7936 }
7933 7937 /* FALLTHRU */
7934 7938 case M_ERROR:
7935 7939 case M_HANGUP:
7936 7940 mutex_enter(&ill->ill_lock);
7937 7941 if (ill->ill_state_flags & ILL_CONDEMNED) {
7938 7942 mutex_exit(&ill->ill_lock);
7939 7943 freemsg(mp);
7940 7944 return;
7941 7945 }
7942 7946 ill_refhold_locked(ill);
7943 7947 mutex_exit(&ill->ill_lock);
7944 7948 qwriter_ip(ill, ill->ill_rq, mp, ip_rput_other, CUR_OP,
7945 7949 B_FALSE);
7946 7950 return;
7947 7951 case M_CTL:
7948 7952 putnext(ill->ill_rq, mp);
7949 7953 return;
7950 7954 case M_IOCNAK:
7951 7955 ip1dbg(("got iocnak "));
7952 7956 iocp = (struct iocblk *)mp->b_rptr;
7953 7957 switch (iocp->ioc_cmd) {
7954 7958 case DL_IOC_HDR_INFO:
7955 7959 ip_rput_other(NULL, ill->ill_rq, mp, NULL);
7956 7960 return;
7957 7961 default:
7958 7962 break;
7959 7963 }
7960 7964 /* FALLTHRU */
7961 7965 default:
7962 7966 putnext(ill->ill_rq, mp);
7963 7967 return;
7964 7968 }
7965 7969 }
7966 7970
7967 7971 /* Read side put procedure. Packets coming from the wire arrive here. */
7968 7972 void
7969 7973 ip_rput(queue_t *q, mblk_t *mp)
7970 7974 {
7971 7975 ill_t *ill;
7972 7976 union DL_primitives *dl;
7973 7977
7974 7978 ill = (ill_t *)q->q_ptr;
7975 7979
7976 7980 if (ill->ill_state_flags & (ILL_CONDEMNED | ILL_LL_SUBNET_PENDING)) {
7977 7981 /*
7978 7982 * If things are opening or closing, only accept high-priority
7979 7983 * DLPI messages. (On open ill->ill_ipif has not yet been
7980 7984 * created; on close, things hanging off the ill may have been
7981 7985 * freed already.)
7982 7986 */
7983 7987 dl = (union DL_primitives *)mp->b_rptr;
7984 7988 if (DB_TYPE(mp) != M_PCPROTO ||
7985 7989 dl->dl_primitive == DL_UNITDATA_IND) {
7986 7990 inet_freemsg(mp);
7987 7991 return;
7988 7992 }
7989 7993 }
7990 7994 if (DB_TYPE(mp) == M_DATA) {
7991 7995 struct mac_header_info_s mhi;
7992 7996
7993 7997 ip_mdata_to_mhi(ill, mp, &mhi);
7994 7998 ip_input(ill, NULL, mp, &mhi);
7995 7999 } else {
7996 8000 ip_rput_notdata(ill, mp);
7997 8001 }
7998 8002 }
7999 8003
8000 8004 /*
8001 8005 * Move the information to a copy.
8002 8006 */
8003 8007 mblk_t *
8004 8008 ip_fix_dbref(mblk_t *mp, ip_recv_attr_t *ira)
8005 8009 {
8006 8010 mblk_t *mp1;
8007 8011 ill_t *ill = ira->ira_ill;
8008 8012 ip_stack_t *ipst = ill->ill_ipst;
8009 8013
8010 8014 IP_STAT(ipst, ip_db_ref);
8011 8015
8012 8016 /* Make sure we have ira_l2src before we loose the original mblk */
8013 8017 if (!(ira->ira_flags & IRAF_L2SRC_SET))
8014 8018 ip_setl2src(mp, ira, ira->ira_rill);
8015 8019
8016 8020 mp1 = copymsg(mp);
8017 8021 if (mp1 == NULL) {
8018 8022 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
8019 8023 ip_drop_input("ipIfStatsInDiscards", mp, ill);
8020 8024 freemsg(mp);
8021 8025 return (NULL);
8022 8026 }
8023 8027 /* preserve the hardware checksum flags and data, if present */
8024 8028 if (DB_CKSUMFLAGS(mp) != 0) {
8025 8029 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp);
8026 8030 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp);
8027 8031 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp);
8028 8032 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp);
8029 8033 DB_CKSUM16(mp1) = DB_CKSUM16(mp);
8030 8034 }
8031 8035 freemsg(mp);
8032 8036 return (mp1);
8033 8037 }
8034 8038
8035 8039 static void
8036 8040 ip_dlpi_error(ill_t *ill, t_uscalar_t prim, t_uscalar_t dl_err,
8037 8041 t_uscalar_t err)
8038 8042 {
8039 8043 if (dl_err == DL_SYSERR) {
8040 8044 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
8041 8045 "%s: %s failed: DL_SYSERR (errno %u)\n",
8042 8046 ill->ill_name, dl_primstr(prim), err);
8043 8047 return;
8044 8048 }
8045 8049
8046 8050 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
8047 8051 "%s: %s failed: %s\n", ill->ill_name, dl_primstr(prim),
8048 8052 dl_errstr(dl_err));
8049 8053 }
8050 8054
8051 8055 /*
8052 8056 * ip_rput_dlpi is called by ip_rput to handle all DLPI messages other
8053 8057 * than DL_UNITDATA_IND messages. If we need to process this message
8054 8058 * exclusively, we call qwriter_ip, in which case we also need to call
8055 8059 * ill_refhold before that, since qwriter_ip does an ill_refrele.
8056 8060 */
8057 8061 void
8058 8062 ip_rput_dlpi(ill_t *ill, mblk_t *mp)
8059 8063 {
8060 8064 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr;
8061 8065 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa;
8062 8066 queue_t *q = ill->ill_rq;
8063 8067 t_uscalar_t prim = dloa->dl_primitive;
8064 8068 t_uscalar_t reqprim = DL_PRIM_INVAL;
8065 8069
8066 8070 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi",
8067 8071 char *, dl_primstr(prim), ill_t *, ill);
8068 8072 ip1dbg(("ip_rput_dlpi"));
8069 8073
8070 8074 /*
8071 8075 * If we received an ACK but didn't send a request for it, then it
8072 8076 * can't be part of any pending operation; discard up-front.
8073 8077 */
8074 8078 switch (prim) {
8075 8079 case DL_ERROR_ACK:
8076 8080 reqprim = dlea->dl_error_primitive;
8077 8081 ip2dbg(("ip_rput_dlpi(%s): DL_ERROR_ACK for %s (0x%x): %s "
8078 8082 "(0x%x), unix %u\n", ill->ill_name, dl_primstr(reqprim),
8079 8083 reqprim, dl_errstr(dlea->dl_errno), dlea->dl_errno,
8080 8084 dlea->dl_unix_errno));
8081 8085 break;
8082 8086 case DL_OK_ACK:
8083 8087 reqprim = dloa->dl_correct_primitive;
8084 8088 break;
8085 8089 case DL_INFO_ACK:
8086 8090 reqprim = DL_INFO_REQ;
8087 8091 break;
8088 8092 case DL_BIND_ACK:
8089 8093 reqprim = DL_BIND_REQ;
8090 8094 break;
8091 8095 case DL_PHYS_ADDR_ACK:
8092 8096 reqprim = DL_PHYS_ADDR_REQ;
8093 8097 break;
8094 8098 case DL_NOTIFY_ACK:
8095 8099 reqprim = DL_NOTIFY_REQ;
8096 8100 break;
8097 8101 case DL_CAPABILITY_ACK:
8098 8102 reqprim = DL_CAPABILITY_REQ;
8099 8103 break;
8100 8104 }
8101 8105
8102 8106 if (prim != DL_NOTIFY_IND) {
8103 8107 if (reqprim == DL_PRIM_INVAL ||
8104 8108 !ill_dlpi_pending(ill, reqprim)) {
8105 8109 /* Not a DLPI message we support or expected */
8106 8110 freemsg(mp);
8107 8111 return;
8108 8112 }
8109 8113 ip1dbg(("ip_rput: received %s for %s\n", dl_primstr(prim),
8110 8114 dl_primstr(reqprim)));
8111 8115 }
8112 8116
8113 8117 switch (reqprim) {
8114 8118 case DL_UNBIND_REQ:
8115 8119 /*
8116 8120 * NOTE: we mark the unbind as complete even if we got a
8117 8121 * DL_ERROR_ACK, since there's not much else we can do.
8118 8122 */
8119 8123 mutex_enter(&ill->ill_lock);
8120 8124 ill->ill_state_flags &= ~ILL_DL_UNBIND_IN_PROGRESS;
8121 8125 cv_signal(&ill->ill_cv);
8122 8126 mutex_exit(&ill->ill_lock);
8123 8127 break;
8124 8128
8125 8129 case DL_ENABMULTI_REQ:
8126 8130 if (prim == DL_OK_ACK) {
8127 8131 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS)
8128 8132 ill->ill_dlpi_multicast_state = IDS_OK;
8129 8133 }
8130 8134 break;
8131 8135 }
8132 8136
8133 8137 /*
8134 8138 * The message is one we're waiting for (or DL_NOTIFY_IND), but we
8135 8139 * need to become writer to continue to process it. Because an
8136 8140 * exclusive operation doesn't complete until replies to all queued
8137 8141 * DLPI messages have been received, we know we're in the middle of an
8138 8142 * exclusive operation and pass CUR_OP (except for DL_NOTIFY_IND).
8139 8143 *
8140 8144 * As required by qwriter_ip(), we refhold the ill; it will refrele.
8141 8145 * Since this is on the ill stream we unconditionally bump up the
8142 8146 * refcount without doing ILL_CAN_LOOKUP().
8143 8147 */
8144 8148 ill_refhold(ill);
8145 8149 if (prim == DL_NOTIFY_IND)
8146 8150 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, NEW_OP, B_FALSE);
8147 8151 else
8148 8152 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, CUR_OP, B_FALSE);
8149 8153 }
8150 8154
8151 8155 /*
8152 8156 * Handling of DLPI messages that require exclusive access to the ipsq.
8153 8157 *
8154 8158 * Need to do ipsq_pending_mp_get on ioctl completion, which could
8155 8159 * happen here. (along with mi_copy_done)
8156 8160 */
8157 8161 /* ARGSUSED */
8158 8162 static void
8159 8163 ip_rput_dlpi_writer(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
8160 8164 {
8161 8165 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr;
8162 8166 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa;
8163 8167 int err = 0;
8164 8168 ill_t *ill = (ill_t *)q->q_ptr;
8165 8169 ipif_t *ipif = NULL;
8166 8170 mblk_t *mp1 = NULL;
8167 8171 conn_t *connp = NULL;
8168 8172 t_uscalar_t paddrreq;
8169 8173 mblk_t *mp_hw;
8170 8174 boolean_t success;
8171 8175 boolean_t ioctl_aborted = B_FALSE;
8172 8176 boolean_t log = B_TRUE;
8173 8177
8174 8178 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer",
8175 8179 char *, dl_primstr(dloa->dl_primitive), ill_t *, ill);
8176 8180
8177 8181 ip1dbg(("ip_rput_dlpi_writer .."));
8178 8182 ASSERT(ipsq->ipsq_xop == ill->ill_phyint->phyint_ipsq->ipsq_xop);
8179 8183 ASSERT(IAM_WRITER_ILL(ill));
8180 8184
8181 8185 ipif = ipsq->ipsq_xop->ipx_pending_ipif;
8182 8186 /*
8183 8187 * The current ioctl could have been aborted by the user and a new
8184 8188 * ioctl to bring up another ill could have started. We could still
8185 8189 * get a response from the driver later.
8186 8190 */
8187 8191 if (ipif != NULL && ipif->ipif_ill != ill)
8188 8192 ioctl_aborted = B_TRUE;
8189 8193
8190 8194 switch (dloa->dl_primitive) {
8191 8195 case DL_ERROR_ACK:
8192 8196 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for %s\n",
8193 8197 dl_primstr(dlea->dl_error_primitive)));
8194 8198
8195 8199 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer error",
8196 8200 char *, dl_primstr(dlea->dl_error_primitive),
8197 8201 ill_t *, ill);
8198 8202
8199 8203 switch (dlea->dl_error_primitive) {
8200 8204 case DL_DISABMULTI_REQ:
8201 8205 ill_dlpi_done(ill, dlea->dl_error_primitive);
8202 8206 break;
8203 8207 case DL_PROMISCON_REQ:
8204 8208 case DL_PROMISCOFF_REQ:
8205 8209 case DL_UNBIND_REQ:
8206 8210 case DL_ATTACH_REQ:
8207 8211 case DL_INFO_REQ:
8208 8212 ill_dlpi_done(ill, dlea->dl_error_primitive);
8209 8213 break;
8210 8214 case DL_NOTIFY_REQ:
8211 8215 ill_dlpi_done(ill, DL_NOTIFY_REQ);
8212 8216 log = B_FALSE;
8213 8217 break;
8214 8218 case DL_PHYS_ADDR_REQ:
8215 8219 /*
8216 8220 * For IPv6 only, there are two additional
8217 8221 * phys_addr_req's sent to the driver to get the
8218 8222 * IPv6 token and lla. This allows IP to acquire
8219 8223 * the hardware address format for a given interface
8220 8224 * without having built in knowledge of the hardware
8221 8225 * address. ill_phys_addr_pend keeps track of the last
8222 8226 * DL_PAR sent so we know which response we are
8223 8227 * dealing with. ill_dlpi_done will update
8224 8228 * ill_phys_addr_pend when it sends the next req.
8225 8229 * We don't complete the IOCTL until all three DL_PARs
8226 8230 * have been attempted, so set *_len to 0 and break.
8227 8231 */
8228 8232 paddrreq = ill->ill_phys_addr_pend;
8229 8233 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ);
8230 8234 if (paddrreq == DL_IPV6_TOKEN) {
8231 8235 ill->ill_token_length = 0;
8232 8236 log = B_FALSE;
8233 8237 break;
8234 8238 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) {
8235 8239 ill->ill_nd_lla_len = 0;
8236 8240 log = B_FALSE;
8237 8241 break;
8238 8242 }
8239 8243 /*
8240 8244 * Something went wrong with the DL_PHYS_ADDR_REQ.
8241 8245 * We presumably have an IOCTL hanging out waiting
8242 8246 * for completion. Find it and complete the IOCTL
8243 8247 * with the error noted.
8244 8248 * However, ill_dl_phys was called on an ill queue
8245 8249 * (from SIOCSLIFNAME), thus conn_pending_ill is not
8246 8250 * set. But the ioctl is known to be pending on ill_wq.
8247 8251 */
8248 8252 if (!ill->ill_ifname_pending)
8249 8253 break;
8250 8254 ill->ill_ifname_pending = 0;
8251 8255 if (!ioctl_aborted)
8252 8256 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8253 8257 if (mp1 != NULL) {
8254 8258 /*
8255 8259 * This operation (SIOCSLIFNAME) must have
8256 8260 * happened on the ill. Assert there is no conn
8257 8261 */
8258 8262 ASSERT(connp == NULL);
8259 8263 q = ill->ill_wq;
8260 8264 }
8261 8265 break;
8262 8266 case DL_BIND_REQ:
8263 8267 ill_dlpi_done(ill, DL_BIND_REQ);
8264 8268 if (ill->ill_ifname_pending)
8265 8269 break;
8266 8270 mutex_enter(&ill->ill_lock);
8267 8271 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS;
8268 8272 mutex_exit(&ill->ill_lock);
8269 8273 /*
8270 8274 * Something went wrong with the bind. We presumably
8271 8275 * have an IOCTL hanging out waiting for completion.
8272 8276 * Find it, take down the interface that was coming
8273 8277 * up, and complete the IOCTL with the error noted.
8274 8278 */
8275 8279 if (!ioctl_aborted)
8276 8280 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8277 8281 if (mp1 != NULL) {
8278 8282 /*
8279 8283 * This might be a result of a DL_NOTE_REPLUMB
8280 8284 * notification. In that case, connp is NULL.
8281 8285 */
8282 8286 if (connp != NULL)
8283 8287 q = CONNP_TO_WQ(connp);
8284 8288
8285 8289 (void) ipif_down(ipif, NULL, NULL);
8286 8290 /* error is set below the switch */
8287 8291 }
8288 8292 break;
8289 8293 case DL_ENABMULTI_REQ:
8290 8294 ill_dlpi_done(ill, DL_ENABMULTI_REQ);
8291 8295
8292 8296 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS)
8293 8297 ill->ill_dlpi_multicast_state = IDS_FAILED;
8294 8298 if (ill->ill_dlpi_multicast_state == IDS_FAILED) {
8295 8299
8296 8300 printf("ip: joining multicasts failed (%d)"
8297 8301 " on %s - will use link layer "
8298 8302 "broadcasts for multicast\n",
8299 8303 dlea->dl_errno, ill->ill_name);
8300 8304
8301 8305 /*
8302 8306 * Set up for multi_bcast; We are the
8303 8307 * writer, so ok to access ill->ill_ipif
8304 8308 * without any lock.
8305 8309 */
8306 8310 mutex_enter(&ill->ill_phyint->phyint_lock);
8307 8311 ill->ill_phyint->phyint_flags |=
8308 8312 PHYI_MULTI_BCAST;
8309 8313 mutex_exit(&ill->ill_phyint->phyint_lock);
8310 8314
8311 8315 }
8312 8316 freemsg(mp); /* Don't want to pass this up */
8313 8317 return;
8314 8318 case DL_CAPABILITY_REQ:
8315 8319 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for "
8316 8320 "DL_CAPABILITY REQ\n"));
8317 8321 if (ill->ill_dlpi_capab_state == IDCS_PROBE_SENT)
8318 8322 ill->ill_dlpi_capab_state = IDCS_FAILED;
8319 8323 ill_capability_done(ill);
8320 8324 freemsg(mp);
8321 8325 return;
8322 8326 }
8323 8327 /*
8324 8328 * Note the error for IOCTL completion (mp1 is set when
8325 8329 * ready to complete ioctl). If ill_ifname_pending_err is
8326 8330 * set, an error occured during plumbing (ill_ifname_pending),
8327 8331 * so we want to report that error.
8328 8332 *
8329 8333 * NOTE: there are two addtional DL_PHYS_ADDR_REQ's
8330 8334 * (DL_IPV6_TOKEN and DL_IPV6_LINK_LAYER_ADDR) that are
8331 8335 * expected to get errack'd if the driver doesn't support
8332 8336 * these flags (e.g. ethernet). log will be set to B_FALSE
8333 8337 * if these error conditions are encountered.
8334 8338 */
8335 8339 if (mp1 != NULL) {
8336 8340 if (ill->ill_ifname_pending_err != 0) {
8337 8341 err = ill->ill_ifname_pending_err;
8338 8342 ill->ill_ifname_pending_err = 0;
8339 8343 } else {
8340 8344 err = dlea->dl_unix_errno ?
8341 8345 dlea->dl_unix_errno : ENXIO;
8342 8346 }
8343 8347 /*
8344 8348 * If we're plumbing an interface and an error hasn't already
8345 8349 * been saved, set ill_ifname_pending_err to the error passed
8346 8350 * up. Ignore the error if log is B_FALSE (see comment above).
8347 8351 */
8348 8352 } else if (log && ill->ill_ifname_pending &&
8349 8353 ill->ill_ifname_pending_err == 0) {
8350 8354 ill->ill_ifname_pending_err = dlea->dl_unix_errno ?
8351 8355 dlea->dl_unix_errno : ENXIO;
8352 8356 }
8353 8357
8354 8358 if (log)
8355 8359 ip_dlpi_error(ill, dlea->dl_error_primitive,
8356 8360 dlea->dl_errno, dlea->dl_unix_errno);
8357 8361 break;
8358 8362 case DL_CAPABILITY_ACK:
8359 8363 ill_capability_ack(ill, mp);
8360 8364 /*
8361 8365 * The message has been handed off to ill_capability_ack
8362 8366 * and must not be freed below
8363 8367 */
8364 8368 mp = NULL;
8365 8369 break;
8366 8370
8367 8371 case DL_INFO_ACK:
8368 8372 /* Call a routine to handle this one. */
8369 8373 ill_dlpi_done(ill, DL_INFO_REQ);
8370 8374 ip_ll_subnet_defaults(ill, mp);
8371 8375 ASSERT(!MUTEX_HELD(&ill->ill_phyint->phyint_ipsq->ipsq_lock));
8372 8376 return;
8373 8377 case DL_BIND_ACK:
8374 8378 /*
8375 8379 * We should have an IOCTL waiting on this unless
8376 8380 * sent by ill_dl_phys, in which case just return
8377 8381 */
8378 8382 ill_dlpi_done(ill, DL_BIND_REQ);
8379 8383
8380 8384 if (ill->ill_ifname_pending) {
8381 8385 DTRACE_PROBE2(ip__rput__dlpi__ifname__pending,
8382 8386 ill_t *, ill, mblk_t *, mp);
8383 8387 break;
8384 8388 }
8385 8389 mutex_enter(&ill->ill_lock);
8386 8390 ill->ill_dl_up = 1;
8387 8391 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS;
8388 8392 mutex_exit(&ill->ill_lock);
8389 8393
8390 8394 if (!ioctl_aborted)
8391 8395 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8392 8396 if (mp1 == NULL) {
8393 8397 DTRACE_PROBE1(ip__rput__dlpi__no__mblk, ill_t *, ill);
8394 8398 break;
8395 8399 }
8396 8400 /*
8397 8401 * mp1 was added by ill_dl_up(). if that is a result of
8398 8402 * a DL_NOTE_REPLUMB notification, connp could be NULL.
8399 8403 */
8400 8404 if (connp != NULL)
8401 8405 q = CONNP_TO_WQ(connp);
8402 8406 /*
8403 8407 * We are exclusive. So nothing can change even after
8404 8408 * we get the pending mp.
8405 8409 */
8406 8410 ip1dbg(("ip_rput_dlpi: bind_ack %s\n", ill->ill_name));
8407 8411 DTRACE_PROBE1(ip__rput__dlpi__bind__ack, ill_t *, ill);
8408 8412 ill_nic_event_dispatch(ill, 0, NE_UP, NULL, 0);
8409 8413
8410 8414 /*
8411 8415 * Now bring up the resolver; when that is complete, we'll
8412 8416 * create IREs. Note that we intentionally mirror what
8413 8417 * ipif_up() would have done, because we got here by way of
8414 8418 * ill_dl_up(), which stopped ipif_up()'s processing.
8415 8419 */
8416 8420 if (ill->ill_isv6) {
8417 8421 /*
8418 8422 * v6 interfaces.
8419 8423 * Unlike ARP which has to do another bind
8420 8424 * and attach, once we get here we are
8421 8425 * done with NDP
8422 8426 */
8423 8427 (void) ipif_resolver_up(ipif, Res_act_initial);
8424 8428 if ((err = ipif_ndp_up(ipif, B_TRUE)) == 0)
8425 8429 err = ipif_up_done_v6(ipif);
8426 8430 } else if (ill->ill_net_type == IRE_IF_RESOLVER) {
8427 8431 /*
8428 8432 * ARP and other v4 external resolvers.
8429 8433 * Leave the pending mblk intact so that
8430 8434 * the ioctl completes in ip_rput().
8431 8435 */
8432 8436 if (connp != NULL)
8433 8437 mutex_enter(&connp->conn_lock);
8434 8438 mutex_enter(&ill->ill_lock);
8435 8439 success = ipsq_pending_mp_add(connp, ipif, q, mp1, 0);
8436 8440 mutex_exit(&ill->ill_lock);
8437 8441 if (connp != NULL)
8438 8442 mutex_exit(&connp->conn_lock);
8439 8443 if (success) {
8440 8444 err = ipif_resolver_up(ipif, Res_act_initial);
8441 8445 if (err == EINPROGRESS) {
8442 8446 freemsg(mp);
8443 8447 return;
8444 8448 }
8445 8449 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8446 8450 } else {
8447 8451 /* The conn has started closing */
8448 8452 err = EINTR;
8449 8453 }
8450 8454 } else {
8451 8455 /*
8452 8456 * This one is complete. Reply to pending ioctl.
8453 8457 */
8454 8458 (void) ipif_resolver_up(ipif, Res_act_initial);
8455 8459 err = ipif_up_done(ipif);
8456 8460 }
8457 8461
8458 8462 if ((err == 0) && (ill->ill_up_ipifs)) {
8459 8463 err = ill_up_ipifs(ill, q, mp1);
8460 8464 if (err == EINPROGRESS) {
8461 8465 freemsg(mp);
8462 8466 return;
8463 8467 }
8464 8468 }
8465 8469
8466 8470 /*
8467 8471 * If we have a moved ipif to bring up, and everything has
8468 8472 * succeeded to this point, bring it up on the IPMP ill.
8469 8473 * Otherwise, leave it down -- the admin can try to bring it
8470 8474 * up by hand if need be.
8471 8475 */
8472 8476 if (ill->ill_move_ipif != NULL) {
8473 8477 if (err != 0) {
8474 8478 ill->ill_move_ipif = NULL;
8475 8479 } else {
8476 8480 ipif = ill->ill_move_ipif;
8477 8481 ill->ill_move_ipif = NULL;
8478 8482 err = ipif_up(ipif, q, mp1);
8479 8483 if (err == EINPROGRESS) {
8480 8484 freemsg(mp);
8481 8485 return;
8482 8486 }
8483 8487 }
8484 8488 }
8485 8489 break;
8486 8490
8487 8491 case DL_NOTIFY_IND: {
8488 8492 dl_notify_ind_t *notify = (dl_notify_ind_t *)mp->b_rptr;
8489 8493 uint_t orig_mtu, orig_mc_mtu;
8490 8494
8491 8495 switch (notify->dl_notification) {
8492 8496 case DL_NOTE_PHYS_ADDR:
8493 8497 err = ill_set_phys_addr(ill, mp);
8494 8498 break;
8495 8499
8496 8500 case DL_NOTE_REPLUMB:
8497 8501 /*
8498 8502 * Directly return after calling ill_replumb().
8499 8503 * Note that we should not free mp as it is reused
8500 8504 * in the ill_replumb() function.
8501 8505 */
8502 8506 err = ill_replumb(ill, mp);
8503 8507 return;
8504 8508
8505 8509 case DL_NOTE_FASTPATH_FLUSH:
8506 8510 nce_flush(ill, B_FALSE);
8507 8511 break;
8508 8512
8509 8513 case DL_NOTE_SDU_SIZE:
8510 8514 case DL_NOTE_SDU_SIZE2:
8511 8515 /*
8512 8516 * The dce and fragmentation code can cope with
8513 8517 * this changing while packets are being sent.
8514 8518 * When packets are sent ip_output will discover
8515 8519 * a change.
8516 8520 *
8517 8521 * Change the MTU size of the interface.
8518 8522 */
8519 8523 mutex_enter(&ill->ill_lock);
8520 8524 orig_mtu = ill->ill_mtu;
8521 8525 orig_mc_mtu = ill->ill_mc_mtu;
8522 8526 switch (notify->dl_notification) {
8523 8527 case DL_NOTE_SDU_SIZE:
8524 8528 ill->ill_current_frag =
8525 8529 (uint_t)notify->dl_data;
8526 8530 ill->ill_mc_mtu = (uint_t)notify->dl_data;
8527 8531 break;
8528 8532 case DL_NOTE_SDU_SIZE2:
8529 8533 ill->ill_current_frag =
8530 8534 (uint_t)notify->dl_data1;
8531 8535 ill->ill_mc_mtu = (uint_t)notify->dl_data2;
8532 8536 break;
8533 8537 }
8534 8538 if (ill->ill_current_frag > ill->ill_max_frag)
8535 8539 ill->ill_max_frag = ill->ill_current_frag;
8536 8540
8537 8541 if (!(ill->ill_flags & ILLF_FIXEDMTU)) {
8538 8542 ill->ill_mtu = ill->ill_current_frag;
8539 8543
8540 8544 /*
8541 8545 * If ill_user_mtu was set (via
8542 8546 * SIOCSLIFLNKINFO), clamp ill_mtu at it.
8543 8547 */
8544 8548 if (ill->ill_user_mtu != 0 &&
8545 8549 ill->ill_user_mtu < ill->ill_mtu)
8546 8550 ill->ill_mtu = ill->ill_user_mtu;
8547 8551
8548 8552 if (ill->ill_user_mtu != 0 &&
8549 8553 ill->ill_user_mtu < ill->ill_mc_mtu)
8550 8554 ill->ill_mc_mtu = ill->ill_user_mtu;
8551 8555
8552 8556 if (ill->ill_isv6) {
8553 8557 if (ill->ill_mtu < IPV6_MIN_MTU)
8554 8558 ill->ill_mtu = IPV6_MIN_MTU;
8555 8559 if (ill->ill_mc_mtu < IPV6_MIN_MTU)
8556 8560 ill->ill_mc_mtu = IPV6_MIN_MTU;
8557 8561 } else {
8558 8562 if (ill->ill_mtu < IP_MIN_MTU)
8559 8563 ill->ill_mtu = IP_MIN_MTU;
8560 8564 if (ill->ill_mc_mtu < IP_MIN_MTU)
8561 8565 ill->ill_mc_mtu = IP_MIN_MTU;
8562 8566 }
8563 8567 } else if (ill->ill_mc_mtu > ill->ill_mtu) {
8564 8568 ill->ill_mc_mtu = ill->ill_mtu;
8565 8569 }
8566 8570
8567 8571 mutex_exit(&ill->ill_lock);
8568 8572 /*
8569 8573 * Make sure all dce_generation checks find out
8570 8574 * that ill_mtu/ill_mc_mtu has changed.
8571 8575 */
8572 8576 if (orig_mtu != ill->ill_mtu ||
8573 8577 orig_mc_mtu != ill->ill_mc_mtu) {
8574 8578 dce_increment_all_generations(ill->ill_isv6,
8575 8579 ill->ill_ipst);
8576 8580 }
8577 8581
8578 8582 /*
8579 8583 * Refresh IPMP meta-interface MTU if necessary.
8580 8584 */
8581 8585 if (IS_UNDER_IPMP(ill))
8582 8586 ipmp_illgrp_refresh_mtu(ill->ill_grp);
8583 8587 break;
8584 8588
8585 8589 case DL_NOTE_LINK_UP:
8586 8590 case DL_NOTE_LINK_DOWN: {
8587 8591 /*
8588 8592 * We are writer. ill / phyint / ipsq assocs stable.
8589 8593 * The RUNNING flag reflects the state of the link.
8590 8594 */
8591 8595 phyint_t *phyint = ill->ill_phyint;
8592 8596 uint64_t new_phyint_flags;
8593 8597 boolean_t changed = B_FALSE;
8594 8598 boolean_t went_up;
8595 8599
8596 8600 went_up = notify->dl_notification == DL_NOTE_LINK_UP;
8597 8601 mutex_enter(&phyint->phyint_lock);
8598 8602
8599 8603 new_phyint_flags = went_up ?
8600 8604 phyint->phyint_flags | PHYI_RUNNING :
8601 8605 phyint->phyint_flags & ~PHYI_RUNNING;
8602 8606
8603 8607 if (IS_IPMP(ill)) {
8604 8608 new_phyint_flags = went_up ?
8605 8609 new_phyint_flags & ~PHYI_FAILED :
8606 8610 new_phyint_flags | PHYI_FAILED;
8607 8611 }
8608 8612
8609 8613 if (new_phyint_flags != phyint->phyint_flags) {
8610 8614 phyint->phyint_flags = new_phyint_flags;
8611 8615 changed = B_TRUE;
8612 8616 }
8613 8617 mutex_exit(&phyint->phyint_lock);
8614 8618 /*
8615 8619 * ill_restart_dad handles the DAD restart and routing
8616 8620 * socket notification logic.
8617 8621 */
8618 8622 if (changed) {
8619 8623 ill_restart_dad(phyint->phyint_illv4, went_up);
8620 8624 ill_restart_dad(phyint->phyint_illv6, went_up);
8621 8625 }
8622 8626 break;
8623 8627 }
8624 8628 case DL_NOTE_PROMISC_ON_PHYS: {
8625 8629 phyint_t *phyint = ill->ill_phyint;
8626 8630
8627 8631 mutex_enter(&phyint->phyint_lock);
8628 8632 phyint->phyint_flags |= PHYI_PROMISC;
8629 8633 mutex_exit(&phyint->phyint_lock);
8630 8634 break;
8631 8635 }
8632 8636 case DL_NOTE_PROMISC_OFF_PHYS: {
8633 8637 phyint_t *phyint = ill->ill_phyint;
8634 8638
8635 8639 mutex_enter(&phyint->phyint_lock);
8636 8640 phyint->phyint_flags &= ~PHYI_PROMISC;
8637 8641 mutex_exit(&phyint->phyint_lock);
8638 8642 break;
8639 8643 }
8640 8644 case DL_NOTE_CAPAB_RENEG:
8641 8645 /*
8642 8646 * Something changed on the driver side.
8643 8647 * It wants us to renegotiate the capabilities
8644 8648 * on this ill. One possible cause is the aggregation
8645 8649 * interface under us where a port got added or
8646 8650 * went away.
8647 8651 *
8648 8652 * If the capability negotiation is already done
8649 8653 * or is in progress, reset the capabilities and
8650 8654 * mark the ill's ill_capab_reneg to be B_TRUE,
8651 8655 * so that when the ack comes back, we can start
8652 8656 * the renegotiation process.
8653 8657 *
8654 8658 * Note that if ill_capab_reneg is already B_TRUE
8655 8659 * (ill_dlpi_capab_state is IDS_UNKNOWN in this case),
8656 8660 * the capability resetting request has been sent
8657 8661 * and the renegotiation has not been started yet;
8658 8662 * nothing needs to be done in this case.
8659 8663 */
8660 8664 ipsq_current_start(ipsq, ill->ill_ipif, 0);
8661 8665 ill_capability_reset(ill, B_TRUE);
8662 8666 ipsq_current_finish(ipsq);
8663 8667 break;
8664 8668
8665 8669 case DL_NOTE_ALLOWED_IPS:
8666 8670 ill_set_allowed_ips(ill, mp);
8667 8671 break;
8668 8672 default:
8669 8673 ip0dbg(("ip_rput_dlpi_writer: unknown notification "
8670 8674 "type 0x%x for DL_NOTIFY_IND\n",
8671 8675 notify->dl_notification));
8672 8676 break;
8673 8677 }
8674 8678
8675 8679 /*
8676 8680 * As this is an asynchronous operation, we
8677 8681 * should not call ill_dlpi_done
8678 8682 */
8679 8683 break;
8680 8684 }
8681 8685 case DL_NOTIFY_ACK: {
8682 8686 dl_notify_ack_t *noteack = (dl_notify_ack_t *)mp->b_rptr;
8683 8687
8684 8688 if (noteack->dl_notifications & DL_NOTE_LINK_UP)
8685 8689 ill->ill_note_link = 1;
8686 8690 ill_dlpi_done(ill, DL_NOTIFY_REQ);
8687 8691 break;
8688 8692 }
8689 8693 case DL_PHYS_ADDR_ACK: {
8690 8694 /*
8691 8695 * As part of plumbing the interface via SIOCSLIFNAME,
8692 8696 * ill_dl_phys() will queue a series of DL_PHYS_ADDR_REQs,
8693 8697 * whose answers we receive here. As each answer is received,
8694 8698 * we call ill_dlpi_done() to dispatch the next request as
8695 8699 * we're processing the current one. Once all answers have
8696 8700 * been received, we use ipsq_pending_mp_get() to dequeue the
8697 8701 * outstanding IOCTL and reply to it. (Because ill_dl_phys()
8698 8702 * is invoked from an ill queue, conn_oper_pending_ill is not
8699 8703 * available, but we know the ioctl is pending on ill_wq.)
8700 8704 */
8701 8705 uint_t paddrlen, paddroff;
8702 8706 uint8_t *addr;
8703 8707
8704 8708 paddrreq = ill->ill_phys_addr_pend;
8705 8709 paddrlen = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_length;
8706 8710 paddroff = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_offset;
8707 8711 addr = mp->b_rptr + paddroff;
8708 8712
8709 8713 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ);
8710 8714 if (paddrreq == DL_IPV6_TOKEN) {
8711 8715 /*
8712 8716 * bcopy to low-order bits of ill_token
8713 8717 *
8714 8718 * XXX Temporary hack - currently, all known tokens
8715 8719 * are 64 bits, so I'll cheat for the moment.
8716 8720 */
8717 8721 bcopy(addr, &ill->ill_token.s6_addr32[2], paddrlen);
8718 8722 ill->ill_token_length = paddrlen;
8719 8723 break;
8720 8724 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) {
8721 8725 ASSERT(ill->ill_nd_lla_mp == NULL);
8722 8726 ill_set_ndmp(ill, mp, paddroff, paddrlen);
8723 8727 mp = NULL;
8724 8728 break;
8725 8729 } else if (paddrreq == DL_CURR_DEST_ADDR) {
8726 8730 ASSERT(ill->ill_dest_addr_mp == NULL);
8727 8731 ill->ill_dest_addr_mp = mp;
8728 8732 ill->ill_dest_addr = addr;
8729 8733 mp = NULL;
8730 8734 if (ill->ill_isv6) {
8731 8735 ill_setdesttoken(ill);
8732 8736 ipif_setdestlinklocal(ill->ill_ipif);
8733 8737 }
8734 8738 break;
8735 8739 }
8736 8740
8737 8741 ASSERT(paddrreq == DL_CURR_PHYS_ADDR);
8738 8742 ASSERT(ill->ill_phys_addr_mp == NULL);
8739 8743 if (!ill->ill_ifname_pending)
8740 8744 break;
8741 8745 ill->ill_ifname_pending = 0;
8742 8746 if (!ioctl_aborted)
8743 8747 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8744 8748 if (mp1 != NULL) {
8745 8749 ASSERT(connp == NULL);
8746 8750 q = ill->ill_wq;
8747 8751 }
8748 8752 /*
8749 8753 * If any error acks received during the plumbing sequence,
8750 8754 * ill_ifname_pending_err will be set. Break out and send up
8751 8755 * the error to the pending ioctl.
8752 8756 */
8753 8757 if (ill->ill_ifname_pending_err != 0) {
8754 8758 err = ill->ill_ifname_pending_err;
8755 8759 ill->ill_ifname_pending_err = 0;
8756 8760 break;
8757 8761 }
8758 8762
8759 8763 ill->ill_phys_addr_mp = mp;
8760 8764 ill->ill_phys_addr = (paddrlen == 0 ? NULL : addr);
8761 8765 mp = NULL;
8762 8766
8763 8767 /*
8764 8768 * If paddrlen or ill_phys_addr_length is zero, the DLPI
8765 8769 * provider doesn't support physical addresses. We check both
8766 8770 * paddrlen and ill_phys_addr_length because sppp (PPP) does
8767 8771 * not have physical addresses, but historically adversises a
8768 8772 * physical address length of 0 in its DL_INFO_ACK, but 6 in
8769 8773 * its DL_PHYS_ADDR_ACK.
8770 8774 */
8771 8775 if (paddrlen == 0 || ill->ill_phys_addr_length == 0) {
8772 8776 ill->ill_phys_addr = NULL;
8773 8777 } else if (paddrlen != ill->ill_phys_addr_length) {
8774 8778 ip0dbg(("DL_PHYS_ADDR_ACK: got addrlen %d, expected %d",
8775 8779 paddrlen, ill->ill_phys_addr_length));
8776 8780 err = EINVAL;
8777 8781 break;
8778 8782 }
8779 8783
8780 8784 if (ill->ill_nd_lla_mp == NULL) {
8781 8785 if ((mp_hw = copyb(ill->ill_phys_addr_mp)) == NULL) {
8782 8786 err = ENOMEM;
8783 8787 break;
8784 8788 }
8785 8789 ill_set_ndmp(ill, mp_hw, paddroff, paddrlen);
8786 8790 }
8787 8791
8788 8792 if (ill->ill_isv6) {
8789 8793 ill_setdefaulttoken(ill);
8790 8794 ipif_setlinklocal(ill->ill_ipif);
8791 8795 }
8792 8796 break;
8793 8797 }
8794 8798 case DL_OK_ACK:
8795 8799 ip2dbg(("DL_OK_ACK %s (0x%x)\n",
8796 8800 dl_primstr((int)dloa->dl_correct_primitive),
8797 8801 dloa->dl_correct_primitive));
8798 8802 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer ok",
8799 8803 char *, dl_primstr(dloa->dl_correct_primitive),
8800 8804 ill_t *, ill);
8801 8805
8802 8806 switch (dloa->dl_correct_primitive) {
8803 8807 case DL_ENABMULTI_REQ:
8804 8808 case DL_DISABMULTI_REQ:
8805 8809 ill_dlpi_done(ill, dloa->dl_correct_primitive);
8806 8810 break;
8807 8811 case DL_PROMISCON_REQ:
8808 8812 case DL_PROMISCOFF_REQ:
8809 8813 case DL_UNBIND_REQ:
8810 8814 case DL_ATTACH_REQ:
8811 8815 ill_dlpi_done(ill, dloa->dl_correct_primitive);
8812 8816 break;
8813 8817 }
8814 8818 break;
8815 8819 default:
8816 8820 break;
8817 8821 }
8818 8822
8819 8823 freemsg(mp);
8820 8824 if (mp1 == NULL)
8821 8825 return;
8822 8826
8823 8827 /*
8824 8828 * The operation must complete without EINPROGRESS since
8825 8829 * ipsq_pending_mp_get() has removed the mblk (mp1). Otherwise,
8826 8830 * the operation will be stuck forever inside the IPSQ.
8827 8831 */
8828 8832 ASSERT(err != EINPROGRESS);
8829 8833
8830 8834 DTRACE_PROBE4(ipif__ioctl, char *, "ip_rput_dlpi_writer finish",
8831 8835 int, ipsq->ipsq_xop->ipx_current_ioctl, ill_t *, ill,
8832 8836 ipif_t *, NULL);
8833 8837
8834 8838 switch (ipsq->ipsq_xop->ipx_current_ioctl) {
8835 8839 case 0:
8836 8840 ipsq_current_finish(ipsq);
8837 8841 break;
8838 8842
8839 8843 case SIOCSLIFNAME:
8840 8844 case IF_UNITSEL: {
8841 8845 ill_t *ill_other = ILL_OTHER(ill);
8842 8846
8843 8847 /*
8844 8848 * If SIOCSLIFNAME or IF_UNITSEL is about to succeed, and the
8845 8849 * ill has a peer which is in an IPMP group, then place ill
8846 8850 * into the same group. One catch: although ifconfig plumbs
8847 8851 * the appropriate IPMP meta-interface prior to plumbing this
8848 8852 * ill, it is possible for multiple ifconfig applications to
8849 8853 * race (or for another application to adjust plumbing), in
8850 8854 * which case the IPMP meta-interface we need will be missing.
8851 8855 * If so, kick the phyint out of the group.
8852 8856 */
8853 8857 if (err == 0 && ill_other != NULL && IS_UNDER_IPMP(ill_other)) {
8854 8858 ipmp_grp_t *grp = ill->ill_phyint->phyint_grp;
8855 8859 ipmp_illgrp_t *illg;
8856 8860
8857 8861 illg = ill->ill_isv6 ? grp->gr_v6 : grp->gr_v4;
8858 8862 if (illg == NULL)
8859 8863 ipmp_phyint_leave_grp(ill->ill_phyint);
8860 8864 else
8861 8865 ipmp_ill_join_illgrp(ill, illg);
8862 8866 }
8863 8867
8864 8868 if (ipsq->ipsq_xop->ipx_current_ioctl == IF_UNITSEL)
8865 8869 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
8866 8870 else
8867 8871 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq);
8868 8872 break;
8869 8873 }
8870 8874 case SIOCLIFADDIF:
8871 8875 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq);
8872 8876 break;
8873 8877
8874 8878 default:
8875 8879 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
8876 8880 break;
8877 8881 }
8878 8882 }
8879 8883
8880 8884 /*
8881 8885 * ip_rput_other is called by ip_rput to handle messages modifying the global
8882 8886 * state in IP. If 'ipsq' is non-NULL, caller is writer on it.
8883 8887 */
8884 8888 /* ARGSUSED */
8885 8889 void
8886 8890 ip_rput_other(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
8887 8891 {
8888 8892 ill_t *ill = q->q_ptr;
8889 8893 struct iocblk *iocp;
8890 8894
8891 8895 ip1dbg(("ip_rput_other "));
8892 8896 if (ipsq != NULL) {
8893 8897 ASSERT(IAM_WRITER_IPSQ(ipsq));
8894 8898 ASSERT(ipsq->ipsq_xop ==
8895 8899 ill->ill_phyint->phyint_ipsq->ipsq_xop);
8896 8900 }
8897 8901
8898 8902 switch (mp->b_datap->db_type) {
8899 8903 case M_ERROR:
8900 8904 case M_HANGUP:
8901 8905 /*
8902 8906 * The device has a problem. We force the ILL down. It can
8903 8907 * be brought up again manually using SIOCSIFFLAGS (via
8904 8908 * ifconfig or equivalent).
8905 8909 */
8906 8910 ASSERT(ipsq != NULL);
8907 8911 if (mp->b_rptr < mp->b_wptr)
8908 8912 ill->ill_error = (int)(*mp->b_rptr & 0xFF);
8909 8913 if (ill->ill_error == 0)
8910 8914 ill->ill_error = ENXIO;
8911 8915 if (!ill_down_start(q, mp))
8912 8916 return;
8913 8917 ipif_all_down_tail(ipsq, q, mp, NULL);
8914 8918 break;
8915 8919 case M_IOCNAK: {
8916 8920 iocp = (struct iocblk *)mp->b_rptr;
8917 8921
8918 8922 ASSERT(iocp->ioc_cmd == DL_IOC_HDR_INFO);
8919 8923 /*
8920 8924 * If this was the first attempt, turn off the fastpath
8921 8925 * probing.
8922 8926 */
8923 8927 mutex_enter(&ill->ill_lock);
8924 8928 if (ill->ill_dlpi_fastpath_state == IDS_INPROGRESS) {
8925 8929 ill->ill_dlpi_fastpath_state = IDS_FAILED;
8926 8930 mutex_exit(&ill->ill_lock);
8927 8931 /*
8928 8932 * don't flush the nce_t entries: we use them
8929 8933 * as an index to the ncec itself.
8930 8934 */
8931 8935 ip1dbg(("ip_rput: DLPI fastpath off on interface %s\n",
8932 8936 ill->ill_name));
8933 8937 } else {
8934 8938 mutex_exit(&ill->ill_lock);
8935 8939 }
8936 8940 freemsg(mp);
8937 8941 break;
8938 8942 }
8939 8943 default:
8940 8944 ASSERT(0);
8941 8945 break;
8942 8946 }
8943 8947 }
8944 8948
8945 8949 /*
8946 8950 * Update any source route, record route or timestamp options
8947 8951 * When it fails it has consumed the message and BUMPed the MIB.
8948 8952 */
8949 8953 boolean_t
8950 8954 ip_forward_options(mblk_t *mp, ipha_t *ipha, ill_t *dst_ill,
8951 8955 ip_recv_attr_t *ira)
8952 8956 {
8953 8957 ipoptp_t opts;
8954 8958 uchar_t *opt;
8955 8959 uint8_t optval;
8956 8960 uint8_t optlen;
8957 8961 ipaddr_t dst;
8958 8962 ipaddr_t ifaddr;
8959 8963 uint32_t ts;
8960 8964 timestruc_t now;
8961 8965 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
8962 8966
8963 8967 ip2dbg(("ip_forward_options\n"));
8964 8968 dst = ipha->ipha_dst;
8965 8969 for (optval = ipoptp_first(&opts, ipha);
8966 8970 optval != IPOPT_EOL;
8967 8971 optval = ipoptp_next(&opts)) {
8968 8972 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
8969 8973 opt = opts.ipoptp_cur;
8970 8974 optlen = opts.ipoptp_len;
8971 8975 ip2dbg(("ip_forward_options: opt %d, len %d\n",
8972 8976 optval, opts.ipoptp_len));
8973 8977 switch (optval) {
8974 8978 uint32_t off;
8975 8979 case IPOPT_SSRR:
8976 8980 case IPOPT_LSRR:
8977 8981 /* Check if adminstratively disabled */
8978 8982 if (!ipst->ips_ip_forward_src_routed) {
8979 8983 BUMP_MIB(dst_ill->ill_ip_mib,
8980 8984 ipIfStatsForwProhibits);
8981 8985 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED",
8982 8986 mp, dst_ill);
8983 8987 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED,
8984 8988 ira);
8985 8989 return (B_FALSE);
8986 8990 }
8987 8991 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
8988 8992 /*
8989 8993 * Must be partial since ip_input_options
8990 8994 * checked for strict.
8991 8995 */
8992 8996 break;
8993 8997 }
8994 8998 off = opt[IPOPT_OFFSET];
8995 8999 off--;
8996 9000 redo_srr:
8997 9001 if (optlen < IP_ADDR_LEN ||
8998 9002 off > optlen - IP_ADDR_LEN) {
8999 9003 /* End of source route */
9000 9004 ip1dbg((
9001 9005 "ip_forward_options: end of SR\n"));
9002 9006 break;
9003 9007 }
9004 9008 /* Pick a reasonable address on the outbound if */
9005 9009 ASSERT(dst_ill != NULL);
9006 9010 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst,
9007 9011 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9008 9012 NULL) != 0) {
9009 9013 /* No source! Shouldn't happen */
9010 9014 ifaddr = INADDR_ANY;
9011 9015 }
9012 9016 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9013 9017 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9014 9018 ip1dbg(("ip_forward_options: next hop 0x%x\n",
9015 9019 ntohl(dst)));
9016 9020
9017 9021 /*
9018 9022 * Check if our address is present more than
9019 9023 * once as consecutive hops in source route.
9020 9024 */
9021 9025 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
9022 9026 off += IP_ADDR_LEN;
9023 9027 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9024 9028 goto redo_srr;
9025 9029 }
9026 9030 ipha->ipha_dst = dst;
9027 9031 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9028 9032 break;
9029 9033 case IPOPT_RR:
9030 9034 off = opt[IPOPT_OFFSET];
9031 9035 off--;
9032 9036 if (optlen < IP_ADDR_LEN ||
9033 9037 off > optlen - IP_ADDR_LEN) {
9034 9038 /* No more room - ignore */
9035 9039 ip1dbg((
9036 9040 "ip_forward_options: end of RR\n"));
9037 9041 break;
9038 9042 }
9039 9043 /* Pick a reasonable address on the outbound if */
9040 9044 ASSERT(dst_ill != NULL);
9041 9045 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst,
9042 9046 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9043 9047 NULL) != 0) {
9044 9048 /* No source! Shouldn't happen */
9045 9049 ifaddr = INADDR_ANY;
9046 9050 }
9047 9051 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9048 9052 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9049 9053 break;
9050 9054 case IPOPT_TS:
9051 9055 /* Insert timestamp if there is room */
9052 9056 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9053 9057 case IPOPT_TS_TSONLY:
9054 9058 off = IPOPT_TS_TIMELEN;
9055 9059 break;
9056 9060 case IPOPT_TS_PRESPEC:
9057 9061 case IPOPT_TS_PRESPEC_RFC791:
9058 9062 /* Verify that the address matched */
9059 9063 off = opt[IPOPT_OFFSET] - 1;
9060 9064 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9061 9065 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9062 9066 /* Not for us */
9063 9067 break;
9064 9068 }
9065 9069 /* FALLTHRU */
9066 9070 case IPOPT_TS_TSANDADDR:
9067 9071 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9068 9072 break;
9069 9073 default:
9070 9074 /*
9071 9075 * ip_*put_options should have already
9072 9076 * dropped this packet.
9073 9077 */
9074 9078 cmn_err(CE_PANIC, "ip_forward_options: "
9075 9079 "unknown IT - bug in ip_input_options?\n");
9076 9080 return (B_TRUE); /* Keep "lint" happy */
9077 9081 }
9078 9082 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
9079 9083 /* Increase overflow counter */
9080 9084 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
9081 9085 opt[IPOPT_POS_OV_FLG] =
9082 9086 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) |
9083 9087 (off << 4));
9084 9088 break;
9085 9089 }
9086 9090 off = opt[IPOPT_OFFSET] - 1;
9087 9091 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9088 9092 case IPOPT_TS_PRESPEC:
9089 9093 case IPOPT_TS_PRESPEC_RFC791:
9090 9094 case IPOPT_TS_TSANDADDR:
9091 9095 /* Pick a reasonable addr on the outbound if */
9092 9096 ASSERT(dst_ill != NULL);
9093 9097 if (ip_select_source_v4(dst_ill, INADDR_ANY,
9094 9098 dst, INADDR_ANY, ALL_ZONES, ipst, &ifaddr,
9095 9099 NULL, NULL) != 0) {
9096 9100 /* No source! Shouldn't happen */
9097 9101 ifaddr = INADDR_ANY;
9098 9102 }
9099 9103 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9100 9104 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9101 9105 /* FALLTHRU */
9102 9106 case IPOPT_TS_TSONLY:
9103 9107 off = opt[IPOPT_OFFSET] - 1;
9104 9108 /* Compute # of milliseconds since midnight */
9105 9109 gethrestime(&now);
9106 9110 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
9107 9111 now.tv_nsec / (NANOSEC / MILLISEC);
9108 9112 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
9109 9113 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
9110 9114 break;
9111 9115 }
9112 9116 break;
9113 9117 }
9114 9118 }
9115 9119 return (B_TRUE);
9116 9120 }
9117 9121
9118 9122 /*
9119 9123 * Call ill_frag_timeout to do garbage collection. ill_frag_timeout
9120 9124 * returns 'true' if there are still fragments left on the queue, in
9121 9125 * which case we restart the timer.
9122 9126 */
9123 9127 void
9124 9128 ill_frag_timer(void *arg)
9125 9129 {
9126 9130 ill_t *ill = (ill_t *)arg;
9127 9131 boolean_t frag_pending;
9128 9132 ip_stack_t *ipst = ill->ill_ipst;
9129 9133 time_t timeout;
9130 9134
9131 9135 mutex_enter(&ill->ill_lock);
9132 9136 ASSERT(!ill->ill_fragtimer_executing);
9133 9137 if (ill->ill_state_flags & ILL_CONDEMNED) {
9134 9138 ill->ill_frag_timer_id = 0;
9135 9139 mutex_exit(&ill->ill_lock);
9136 9140 return;
9137 9141 }
9138 9142 ill->ill_fragtimer_executing = 1;
9139 9143 mutex_exit(&ill->ill_lock);
9140 9144
9141 9145 timeout = (ill->ill_isv6 ? ipst->ips_ipv6_reassembly_timeout :
9142 9146 ipst->ips_ip_reassembly_timeout);
9143 9147
9144 9148 frag_pending = ill_frag_timeout(ill, timeout);
9145 9149
9146 9150 /*
9147 9151 * Restart the timer, if we have fragments pending or if someone
9148 9152 * wanted us to be scheduled again.
9149 9153 */
9150 9154 mutex_enter(&ill->ill_lock);
9151 9155 ill->ill_fragtimer_executing = 0;
9152 9156 ill->ill_frag_timer_id = 0;
9153 9157 if (frag_pending || ill->ill_fragtimer_needrestart)
9154 9158 ill_frag_timer_start(ill);
9155 9159 mutex_exit(&ill->ill_lock);
9156 9160 }
9157 9161
9158 9162 void
9159 9163 ill_frag_timer_start(ill_t *ill)
9160 9164 {
9161 9165 ip_stack_t *ipst = ill->ill_ipst;
9162 9166 clock_t timeo_ms;
9163 9167
9164 9168 ASSERT(MUTEX_HELD(&ill->ill_lock));
9165 9169
9166 9170 /* If the ill is closing or opening don't proceed */
9167 9171 if (ill->ill_state_flags & ILL_CONDEMNED)
9168 9172 return;
9169 9173
9170 9174 if (ill->ill_fragtimer_executing) {
9171 9175 /*
9172 9176 * ill_frag_timer is currently executing. Just record the
9173 9177 * the fact that we want the timer to be restarted.
9174 9178 * ill_frag_timer will post a timeout before it returns,
9175 9179 * ensuring it will be called again.
9176 9180 */
9177 9181 ill->ill_fragtimer_needrestart = 1;
9178 9182 return;
9179 9183 }
9180 9184
9181 9185 if (ill->ill_frag_timer_id == 0) {
9182 9186 timeo_ms = (ill->ill_isv6 ? ipst->ips_ipv6_reassembly_timeout :
9183 9187 ipst->ips_ip_reassembly_timeout) * SECONDS;
9184 9188
9185 9189 /*
9186 9190 * The timer is neither running nor is the timeout handler
9187 9191 * executing. Post a timeout so that ill_frag_timer will be
9188 9192 * called
9189 9193 */
9190 9194 ill->ill_frag_timer_id = timeout(ill_frag_timer, ill,
9191 9195 MSEC_TO_TICK(timeo_ms >> 1));
9192 9196 ill->ill_fragtimer_needrestart = 0;
9193 9197 }
9194 9198 }
9195 9199
9196 9200 /*
9197 9201 * Update any source route, record route or timestamp options.
9198 9202 * Check that we are at end of strict source route.
9199 9203 * The options have already been checked for sanity in ip_input_options().
9200 9204 */
9201 9205 boolean_t
9202 9206 ip_input_local_options(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
9203 9207 {
9204 9208 ipoptp_t opts;
9205 9209 uchar_t *opt;
9206 9210 uint8_t optval;
9207 9211 uint8_t optlen;
9208 9212 ipaddr_t dst;
9209 9213 ipaddr_t ifaddr;
9210 9214 uint32_t ts;
9211 9215 timestruc_t now;
9212 9216 ill_t *ill = ira->ira_ill;
9213 9217 ip_stack_t *ipst = ill->ill_ipst;
9214 9218
9215 9219 ip2dbg(("ip_input_local_options\n"));
9216 9220
9217 9221 for (optval = ipoptp_first(&opts, ipha);
9218 9222 optval != IPOPT_EOL;
9219 9223 optval = ipoptp_next(&opts)) {
9220 9224 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
9221 9225 opt = opts.ipoptp_cur;
9222 9226 optlen = opts.ipoptp_len;
9223 9227 ip2dbg(("ip_input_local_options: opt %d, len %d\n",
9224 9228 optval, optlen));
9225 9229 switch (optval) {
9226 9230 uint32_t off;
9227 9231 case IPOPT_SSRR:
9228 9232 case IPOPT_LSRR:
9229 9233 off = opt[IPOPT_OFFSET];
9230 9234 off--;
9231 9235 if (optlen < IP_ADDR_LEN ||
9232 9236 off > optlen - IP_ADDR_LEN) {
9233 9237 /* End of source route */
9234 9238 ip1dbg(("ip_input_local_options: end of SR\n"));
9235 9239 break;
9236 9240 }
9237 9241 /*
9238 9242 * This will only happen if two consecutive entries
9239 9243 * in the source route contains our address or if
9240 9244 * it is a packet with a loose source route which
9241 9245 * reaches us before consuming the whole source route
9242 9246 */
9243 9247 ip1dbg(("ip_input_local_options: not end of SR\n"));
9244 9248 if (optval == IPOPT_SSRR) {
9245 9249 goto bad_src_route;
9246 9250 }
9247 9251 /*
9248 9252 * Hack: instead of dropping the packet truncate the
9249 9253 * source route to what has been used by filling the
9250 9254 * rest with IPOPT_NOP.
9251 9255 */
9252 9256 opt[IPOPT_OLEN] = (uint8_t)off;
9253 9257 while (off < optlen) {
9254 9258 opt[off++] = IPOPT_NOP;
9255 9259 }
9256 9260 break;
9257 9261 case IPOPT_RR:
9258 9262 off = opt[IPOPT_OFFSET];
9259 9263 off--;
9260 9264 if (optlen < IP_ADDR_LEN ||
9261 9265 off > optlen - IP_ADDR_LEN) {
9262 9266 /* No more room - ignore */
9263 9267 ip1dbg((
9264 9268 "ip_input_local_options: end of RR\n"));
9265 9269 break;
9266 9270 }
9267 9271 /* Pick a reasonable address on the outbound if */
9268 9272 if (ip_select_source_v4(ill, INADDR_ANY, ipha->ipha_dst,
9269 9273 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9270 9274 NULL) != 0) {
9271 9275 /* No source! Shouldn't happen */
9272 9276 ifaddr = INADDR_ANY;
9273 9277 }
9274 9278 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9275 9279 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9276 9280 break;
9277 9281 case IPOPT_TS:
9278 9282 /* Insert timestamp if there is romm */
9279 9283 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9280 9284 case IPOPT_TS_TSONLY:
9281 9285 off = IPOPT_TS_TIMELEN;
9282 9286 break;
9283 9287 case IPOPT_TS_PRESPEC:
9284 9288 case IPOPT_TS_PRESPEC_RFC791:
9285 9289 /* Verify that the address matched */
9286 9290 off = opt[IPOPT_OFFSET] - 1;
9287 9291 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9288 9292 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9289 9293 /* Not for us */
9290 9294 break;
9291 9295 }
9292 9296 /* FALLTHRU */
9293 9297 case IPOPT_TS_TSANDADDR:
9294 9298 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9295 9299 break;
9296 9300 default:
9297 9301 /*
9298 9302 * ip_*put_options should have already
9299 9303 * dropped this packet.
9300 9304 */
9301 9305 cmn_err(CE_PANIC, "ip_input_local_options: "
9302 9306 "unknown IT - bug in ip_input_options?\n");
9303 9307 return (B_TRUE); /* Keep "lint" happy */
9304 9308 }
9305 9309 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
9306 9310 /* Increase overflow counter */
9307 9311 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
9308 9312 opt[IPOPT_POS_OV_FLG] =
9309 9313 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) |
9310 9314 (off << 4));
9311 9315 break;
9312 9316 }
9313 9317 off = opt[IPOPT_OFFSET] - 1;
9314 9318 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9315 9319 case IPOPT_TS_PRESPEC:
9316 9320 case IPOPT_TS_PRESPEC_RFC791:
9317 9321 case IPOPT_TS_TSANDADDR:
9318 9322 /* Pick a reasonable addr on the outbound if */
9319 9323 if (ip_select_source_v4(ill, INADDR_ANY,
9320 9324 ipha->ipha_dst, INADDR_ANY, ALL_ZONES, ipst,
9321 9325 &ifaddr, NULL, NULL) != 0) {
9322 9326 /* No source! Shouldn't happen */
9323 9327 ifaddr = INADDR_ANY;
9324 9328 }
9325 9329 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9326 9330 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9327 9331 /* FALLTHRU */
9328 9332 case IPOPT_TS_TSONLY:
9329 9333 off = opt[IPOPT_OFFSET] - 1;
9330 9334 /* Compute # of milliseconds since midnight */
9331 9335 gethrestime(&now);
9332 9336 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
9333 9337 now.tv_nsec / (NANOSEC / MILLISEC);
9334 9338 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
9335 9339 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
9336 9340 break;
9337 9341 }
9338 9342 break;
9339 9343 }
9340 9344 }
9341 9345 return (B_TRUE);
9342 9346
9343 9347 bad_src_route:
9344 9348 /* make sure we clear any indication of a hardware checksum */
9345 9349 DB_CKSUMFLAGS(mp) = 0;
9346 9350 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill);
9347 9351 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira);
9348 9352 return (B_FALSE);
9349 9353
9350 9354 }
9351 9355
9352 9356 /*
9353 9357 * Process IP options in an inbound packet. Always returns the nexthop.
9354 9358 * Normally this is the passed in nexthop, but if there is an option
9355 9359 * that effects the nexthop (such as a source route) that will be returned.
9356 9360 * Sets *errorp if there is an error, in which case an ICMP error has been sent
9357 9361 * and mp freed.
9358 9362 */
9359 9363 ipaddr_t
9360 9364 ip_input_options(ipha_t *ipha, ipaddr_t dst, mblk_t *mp,
9361 9365 ip_recv_attr_t *ira, int *errorp)
9362 9366 {
9363 9367 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
9364 9368 ipoptp_t opts;
9365 9369 uchar_t *opt;
9366 9370 uint8_t optval;
9367 9371 uint8_t optlen;
9368 9372 intptr_t code = 0;
9369 9373 ire_t *ire;
9370 9374
9371 9375 ip2dbg(("ip_input_options\n"));
9372 9376 *errorp = 0;
9373 9377 for (optval = ipoptp_first(&opts, ipha);
9374 9378 optval != IPOPT_EOL;
9375 9379 optval = ipoptp_next(&opts)) {
9376 9380 opt = opts.ipoptp_cur;
9377 9381 optlen = opts.ipoptp_len;
9378 9382 ip2dbg(("ip_input_options: opt %d, len %d\n",
9379 9383 optval, optlen));
9380 9384 /*
9381 9385 * Note: we need to verify the checksum before we
9382 9386 * modify anything thus this routine only extracts the next
9383 9387 * hop dst from any source route.
9384 9388 */
9385 9389 switch (optval) {
9386 9390 uint32_t off;
9387 9391 case IPOPT_SSRR:
9388 9392 case IPOPT_LSRR:
9389 9393 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9390 9394 if (optval == IPOPT_SSRR) {
9391 9395 ip1dbg(("ip_input_options: not next"
9392 9396 " strict source route 0x%x\n",
9393 9397 ntohl(dst)));
9394 9398 code = (char *)&ipha->ipha_dst -
9395 9399 (char *)ipha;
9396 9400 goto param_prob; /* RouterReq's */
9397 9401 }
9398 9402 ip2dbg(("ip_input_options: "
9399 9403 "not next source route 0x%x\n",
9400 9404 ntohl(dst)));
9401 9405 break;
9402 9406 }
9403 9407
9404 9408 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9405 9409 ip1dbg((
9406 9410 "ip_input_options: bad option offset\n"));
9407 9411 code = (char *)&opt[IPOPT_OLEN] -
9408 9412 (char *)ipha;
9409 9413 goto param_prob;
9410 9414 }
9411 9415 off = opt[IPOPT_OFFSET];
9412 9416 off--;
9413 9417 redo_srr:
9414 9418 if (optlen < IP_ADDR_LEN ||
9415 9419 off > optlen - IP_ADDR_LEN) {
9416 9420 /* End of source route */
9417 9421 ip1dbg(("ip_input_options: end of SR\n"));
9418 9422 break;
9419 9423 }
9420 9424 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9421 9425 ip1dbg(("ip_input_options: next hop 0x%x\n",
9422 9426 ntohl(dst)));
9423 9427
9424 9428 /*
9425 9429 * Check if our address is present more than
9426 9430 * once as consecutive hops in source route.
9427 9431 * XXX verify per-interface ip_forwarding
9428 9432 * for source route?
9429 9433 */
9430 9434 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
9431 9435 off += IP_ADDR_LEN;
9432 9436 goto redo_srr;
9433 9437 }
9434 9438
9435 9439 if (dst == htonl(INADDR_LOOPBACK)) {
9436 9440 ip1dbg(("ip_input_options: loopback addr in "
9437 9441 "source route!\n"));
9438 9442 goto bad_src_route;
9439 9443 }
9440 9444 /*
9441 9445 * For strict: verify that dst is directly
9442 9446 * reachable.
9443 9447 */
9444 9448 if (optval == IPOPT_SSRR) {
9445 9449 ire = ire_ftable_lookup_v4(dst, 0, 0,
9446 9450 IRE_INTERFACE, NULL, ALL_ZONES,
9447 9451 ira->ira_tsl,
9448 9452 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst,
9449 9453 NULL);
9450 9454 if (ire == NULL) {
9451 9455 ip1dbg(("ip_input_options: SSRR not "
9452 9456 "directly reachable: 0x%x\n",
9453 9457 ntohl(dst)));
9454 9458 goto bad_src_route;
9455 9459 }
9456 9460 ire_refrele(ire);
9457 9461 }
9458 9462 /*
9459 9463 * Defer update of the offset and the record route
9460 9464 * until the packet is forwarded.
9461 9465 */
9462 9466 break;
9463 9467 case IPOPT_RR:
9464 9468 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9465 9469 ip1dbg((
9466 9470 "ip_input_options: bad option offset\n"));
9467 9471 code = (char *)&opt[IPOPT_OLEN] -
9468 9472 (char *)ipha;
9469 9473 goto param_prob;
9470 9474 }
9471 9475 break;
9472 9476 case IPOPT_TS:
9473 9477 /*
9474 9478 * Verify that length >= 5 and that there is either
9475 9479 * room for another timestamp or that the overflow
9476 9480 * counter is not maxed out.
9477 9481 */
9478 9482 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha;
9479 9483 if (optlen < IPOPT_MINLEN_IT) {
9480 9484 goto param_prob;
9481 9485 }
9482 9486 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9483 9487 ip1dbg((
9484 9488 "ip_input_options: bad option offset\n"));
9485 9489 code = (char *)&opt[IPOPT_OFFSET] -
9486 9490 (char *)ipha;
9487 9491 goto param_prob;
9488 9492 }
9489 9493 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9490 9494 case IPOPT_TS_TSONLY:
9491 9495 off = IPOPT_TS_TIMELEN;
9492 9496 break;
9493 9497 case IPOPT_TS_TSANDADDR:
9494 9498 case IPOPT_TS_PRESPEC:
9495 9499 case IPOPT_TS_PRESPEC_RFC791:
9496 9500 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9497 9501 break;
9498 9502 default:
9499 9503 code = (char *)&opt[IPOPT_POS_OV_FLG] -
9500 9504 (char *)ipha;
9501 9505 goto param_prob;
9502 9506 }
9503 9507 if (opt[IPOPT_OFFSET] - 1 + off > optlen &&
9504 9508 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) {
9505 9509 /*
9506 9510 * No room and the overflow counter is 15
9507 9511 * already.
9508 9512 */
9509 9513 goto param_prob;
9510 9514 }
9511 9515 break;
9512 9516 }
9513 9517 }
9514 9518
9515 9519 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0) {
9516 9520 return (dst);
9517 9521 }
9518 9522
9519 9523 ip1dbg(("ip_input_options: error processing IP options."));
9520 9524 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha;
9521 9525
9522 9526 param_prob:
9523 9527 /* make sure we clear any indication of a hardware checksum */
9524 9528 DB_CKSUMFLAGS(mp) = 0;
9525 9529 ip_drop_input("ICMP_PARAM_PROBLEM", mp, ira->ira_ill);
9526 9530 icmp_param_problem(mp, (uint8_t)code, ira);
9527 9531 *errorp = -1;
9528 9532 return (dst);
9529 9533
9530 9534 bad_src_route:
9531 9535 /* make sure we clear any indication of a hardware checksum */
9532 9536 DB_CKSUMFLAGS(mp) = 0;
9533 9537 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ira->ira_ill);
9534 9538 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira);
9535 9539 *errorp = -1;
9536 9540 return (dst);
9537 9541 }
9538 9542
9539 9543 /*
9540 9544 * IP & ICMP info in >=14 msg's ...
9541 9545 * - ip fixed part (mib2_ip_t)
9542 9546 * - icmp fixed part (mib2_icmp_t)
9543 9547 * - ipAddrEntryTable (ip 20) all IPv4 ipifs
9544 9548 * - ipRouteEntryTable (ip 21) all IPv4 IREs
9545 9549 * - ipNetToMediaEntryTable (ip 22) all IPv4 Neighbor Cache entries
9546 9550 * - ipRouteAttributeTable (ip 102) labeled routes
9547 9551 * - ip multicast membership (ip_member_t)
9548 9552 * - ip multicast source filtering (ip_grpsrc_t)
9549 9553 * - igmp fixed part (struct igmpstat)
9550 9554 * - multicast routing stats (struct mrtstat)
9551 9555 * - multicast routing vifs (array of struct vifctl)
9552 9556 * - multicast routing routes (array of struct mfcctl)
9553 9557 * - ip6 fixed part (mib2_ipv6IfStatsEntry_t)
9554 9558 * One per ill plus one generic
9555 9559 * - icmp6 fixed part (mib2_ipv6IfIcmpEntry_t)
9556 9560 * One per ill plus one generic
9557 9561 * - ipv6RouteEntry all IPv6 IREs
9558 9562 * - ipv6RouteAttributeTable (ip6 102) labeled routes
9559 9563 * - ipv6NetToMediaEntry all IPv6 Neighbor Cache entries
9560 9564 * - ipv6AddrEntry all IPv6 ipifs
9561 9565 * - ipv6 multicast membership (ipv6_member_t)
9562 9566 * - ipv6 multicast source filtering (ipv6_grpsrc_t)
9563 9567 *
9564 9568 * NOTE: original mpctl is copied for msg's 2..N, since its ctl part is
9565 9569 * already filled in by the caller.
9566 9570 * If legacy_req is true then MIB structures needs to be truncated to their
9567 9571 * legacy sizes before being returned.
9568 9572 * Return value of 0 indicates that no messages were sent and caller
9569 9573 * should free mpctl.
9570 9574 */
9571 9575 int
9572 9576 ip_snmp_get(queue_t *q, mblk_t *mpctl, int level, boolean_t legacy_req)
9573 9577 {
9574 9578 ip_stack_t *ipst;
9575 9579 sctp_stack_t *sctps;
9576 9580
9577 9581 if (q->q_next != NULL) {
9578 9582 ipst = ILLQ_TO_IPST(q);
9579 9583 } else {
9580 9584 ipst = CONNQ_TO_IPST(q);
9581 9585 }
9582 9586 ASSERT(ipst != NULL);
9583 9587 sctps = ipst->ips_netstack->netstack_sctp;
9584 9588
9585 9589 if (mpctl == NULL || mpctl->b_cont == NULL) {
9586 9590 return (0);
9587 9591 }
9588 9592
9589 9593 /*
9590 9594 * For the purposes of the (broken) packet shell use
9591 9595 * of the level we make sure MIB2_TCP/MIB2_UDP can be used
9592 9596 * to make TCP and UDP appear first in the list of mib items.
9593 9597 * TBD: We could expand this and use it in netstat so that
9594 9598 * the kernel doesn't have to produce large tables (connections,
9595 9599 * routes, etc) when netstat only wants the statistics or a particular
9596 9600 * table.
9597 9601 */
9598 9602 if (!(level == MIB2_TCP || level == MIB2_UDP)) {
9599 9603 if ((mpctl = icmp_snmp_get(q, mpctl)) == NULL) {
9600 9604 return (1);
9601 9605 }
9602 9606 }
9603 9607
9604 9608 if (level != MIB2_TCP) {
9605 9609 if ((mpctl = udp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9606 9610 return (1);
9607 9611 }
9608 9612 }
9609 9613
9610 9614 if (level != MIB2_UDP) {
9611 9615 if ((mpctl = tcp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9612 9616 return (1);
9613 9617 }
9614 9618 }
9615 9619
9616 9620 if ((mpctl = ip_snmp_get_mib2_ip_traffic_stats(q, mpctl,
9617 9621 ipst, legacy_req)) == NULL) {
9618 9622 return (1);
9619 9623 }
9620 9624
9621 9625 if ((mpctl = ip_snmp_get_mib2_ip6(q, mpctl, ipst,
9622 9626 legacy_req)) == NULL) {
9623 9627 return (1);
9624 9628 }
9625 9629
9626 9630 if ((mpctl = ip_snmp_get_mib2_icmp(q, mpctl, ipst)) == NULL) {
9627 9631 return (1);
9628 9632 }
9629 9633
9630 9634 if ((mpctl = ip_snmp_get_mib2_icmp6(q, mpctl, ipst)) == NULL) {
9631 9635 return (1);
9632 9636 }
9633 9637
9634 9638 if ((mpctl = ip_snmp_get_mib2_igmp(q, mpctl, ipst)) == NULL) {
9635 9639 return (1);
9636 9640 }
9637 9641
9638 9642 if ((mpctl = ip_snmp_get_mib2_multi(q, mpctl, ipst)) == NULL) {
9639 9643 return (1);
9640 9644 }
9641 9645
9642 9646 if ((mpctl = ip_snmp_get_mib2_ip_addr(q, mpctl, ipst,
9643 9647 legacy_req)) == NULL) {
9644 9648 return (1);
9645 9649 }
9646 9650
9647 9651 if ((mpctl = ip_snmp_get_mib2_ip6_addr(q, mpctl, ipst,
9648 9652 legacy_req)) == NULL) {
9649 9653 return (1);
9650 9654 }
9651 9655
9652 9656 if ((mpctl = ip_snmp_get_mib2_ip_group_mem(q, mpctl, ipst)) == NULL) {
9653 9657 return (1);
9654 9658 }
9655 9659
9656 9660 if ((mpctl = ip_snmp_get_mib2_ip6_group_mem(q, mpctl, ipst)) == NULL) {
9657 9661 return (1);
9658 9662 }
9659 9663
9660 9664 if ((mpctl = ip_snmp_get_mib2_ip_group_src(q, mpctl, ipst)) == NULL) {
9661 9665 return (1);
9662 9666 }
9663 9667
9664 9668 if ((mpctl = ip_snmp_get_mib2_ip6_group_src(q, mpctl, ipst)) == NULL) {
9665 9669 return (1);
9666 9670 }
9667 9671
9668 9672 if ((mpctl = ip_snmp_get_mib2_virt_multi(q, mpctl, ipst)) == NULL) {
9669 9673 return (1);
9670 9674 }
9671 9675
9672 9676 if ((mpctl = ip_snmp_get_mib2_multi_rtable(q, mpctl, ipst)) == NULL) {
9673 9677 return (1);
9674 9678 }
9675 9679
9676 9680 mpctl = ip_snmp_get_mib2_ip_route_media(q, mpctl, level, ipst);
↓ open down ↓ |
5117 lines elided |
↑ open up ↑ |
9677 9681 if (mpctl == NULL)
9678 9682 return (1);
9679 9683
9680 9684 mpctl = ip_snmp_get_mib2_ip6_route_media(q, mpctl, level, ipst);
9681 9685 if (mpctl == NULL)
9682 9686 return (1);
9683 9687
9684 9688 if ((mpctl = sctp_snmp_get_mib2(q, mpctl, sctps)) == NULL) {
9685 9689 return (1);
9686 9690 }
9691 +
9687 9692 if ((mpctl = ip_snmp_get_mib2_ip_dce(q, mpctl, ipst)) == NULL) {
9688 9693 return (1);
9689 9694 }
9695 +
9696 + if ((mpctl = dccp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9697 + return (1);
9698 + }
9699 +
9690 9700 freemsg(mpctl);
9691 9701 return (1);
9692 9702 }
9693 9703
9694 9704 /* Get global (legacy) IPv4 statistics */
9695 9705 static mblk_t *
9696 9706 ip_snmp_get_mib2_ip(queue_t *q, mblk_t *mpctl, mib2_ipIfStatsEntry_t *ipmib,
9697 9707 ip_stack_t *ipst, boolean_t legacy_req)
9698 9708 {
9699 9709 mib2_ip_t old_ip_mib;
9700 9710 struct opthdr *optp;
9701 9711 mblk_t *mp2ctl;
9702 9712 mib2_ipAddrEntry_t mae;
9703 9713
9704 9714 /*
9705 9715 * make a copy of the original message
9706 9716 */
9707 9717 mp2ctl = copymsg(mpctl);
9708 9718
9709 9719 /* fixed length IP structure... */
9710 9720 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9711 9721 optp->level = MIB2_IP;
9712 9722 optp->name = 0;
9713 9723 SET_MIB(old_ip_mib.ipForwarding,
9714 9724 (WE_ARE_FORWARDING(ipst) ? 1 : 2));
9715 9725 SET_MIB(old_ip_mib.ipDefaultTTL,
9716 9726 (uint32_t)ipst->ips_ip_def_ttl);
9717 9727 SET_MIB(old_ip_mib.ipReasmTimeout,
9718 9728 ipst->ips_ip_reassembly_timeout);
9719 9729 SET_MIB(old_ip_mib.ipAddrEntrySize,
9720 9730 (legacy_req) ? LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t) :
9721 9731 sizeof (mib2_ipAddrEntry_t));
9722 9732 SET_MIB(old_ip_mib.ipRouteEntrySize,
9723 9733 sizeof (mib2_ipRouteEntry_t));
9724 9734 SET_MIB(old_ip_mib.ipNetToMediaEntrySize,
9725 9735 sizeof (mib2_ipNetToMediaEntry_t));
9726 9736 SET_MIB(old_ip_mib.ipMemberEntrySize, sizeof (ip_member_t));
9727 9737 SET_MIB(old_ip_mib.ipGroupSourceEntrySize, sizeof (ip_grpsrc_t));
9728 9738 SET_MIB(old_ip_mib.ipRouteAttributeSize,
9729 9739 sizeof (mib2_ipAttributeEntry_t));
9730 9740 SET_MIB(old_ip_mib.transportMLPSize, sizeof (mib2_transportMLPEntry_t));
9731 9741 SET_MIB(old_ip_mib.ipDestEntrySize, sizeof (dest_cache_entry_t));
9732 9742
9733 9743 /*
9734 9744 * Grab the statistics from the new IP MIB
9735 9745 */
9736 9746 SET_MIB(old_ip_mib.ipInReceives,
9737 9747 (uint32_t)ipmib->ipIfStatsHCInReceives);
9738 9748 SET_MIB(old_ip_mib.ipInHdrErrors, ipmib->ipIfStatsInHdrErrors);
9739 9749 SET_MIB(old_ip_mib.ipInAddrErrors, ipmib->ipIfStatsInAddrErrors);
9740 9750 SET_MIB(old_ip_mib.ipForwDatagrams,
9741 9751 (uint32_t)ipmib->ipIfStatsHCOutForwDatagrams);
9742 9752 SET_MIB(old_ip_mib.ipInUnknownProtos,
9743 9753 ipmib->ipIfStatsInUnknownProtos);
9744 9754 SET_MIB(old_ip_mib.ipInDiscards, ipmib->ipIfStatsInDiscards);
9745 9755 SET_MIB(old_ip_mib.ipInDelivers,
9746 9756 (uint32_t)ipmib->ipIfStatsHCInDelivers);
9747 9757 SET_MIB(old_ip_mib.ipOutRequests,
9748 9758 (uint32_t)ipmib->ipIfStatsHCOutRequests);
9749 9759 SET_MIB(old_ip_mib.ipOutDiscards, ipmib->ipIfStatsOutDiscards);
9750 9760 SET_MIB(old_ip_mib.ipOutNoRoutes, ipmib->ipIfStatsOutNoRoutes);
9751 9761 SET_MIB(old_ip_mib.ipReasmReqds, ipmib->ipIfStatsReasmReqds);
9752 9762 SET_MIB(old_ip_mib.ipReasmOKs, ipmib->ipIfStatsReasmOKs);
9753 9763 SET_MIB(old_ip_mib.ipReasmFails, ipmib->ipIfStatsReasmFails);
9754 9764 SET_MIB(old_ip_mib.ipFragOKs, ipmib->ipIfStatsOutFragOKs);
9755 9765 SET_MIB(old_ip_mib.ipFragFails, ipmib->ipIfStatsOutFragFails);
9756 9766 SET_MIB(old_ip_mib.ipFragCreates, ipmib->ipIfStatsOutFragCreates);
9757 9767
9758 9768 /* ipRoutingDiscards is not being used */
9759 9769 SET_MIB(old_ip_mib.ipRoutingDiscards, 0);
9760 9770 SET_MIB(old_ip_mib.tcpInErrs, ipmib->tcpIfStatsInErrs);
9761 9771 SET_MIB(old_ip_mib.udpNoPorts, ipmib->udpIfStatsNoPorts);
9762 9772 SET_MIB(old_ip_mib.ipInCksumErrs, ipmib->ipIfStatsInCksumErrs);
9763 9773 SET_MIB(old_ip_mib.ipReasmDuplicates,
9764 9774 ipmib->ipIfStatsReasmDuplicates);
9765 9775 SET_MIB(old_ip_mib.ipReasmPartDups, ipmib->ipIfStatsReasmPartDups);
9766 9776 SET_MIB(old_ip_mib.ipForwProhibits, ipmib->ipIfStatsForwProhibits);
9767 9777 SET_MIB(old_ip_mib.udpInCksumErrs, ipmib->udpIfStatsInCksumErrs);
9768 9778 SET_MIB(old_ip_mib.udpInOverflows, ipmib->udpIfStatsInOverflows);
9769 9779 SET_MIB(old_ip_mib.rawipInOverflows,
9770 9780 ipmib->rawipIfStatsInOverflows);
9771 9781
9772 9782 SET_MIB(old_ip_mib.ipsecInSucceeded, ipmib->ipsecIfStatsInSucceeded);
9773 9783 SET_MIB(old_ip_mib.ipsecInFailed, ipmib->ipsecIfStatsInFailed);
9774 9784 SET_MIB(old_ip_mib.ipInIPv6, ipmib->ipIfStatsInWrongIPVersion);
9775 9785 SET_MIB(old_ip_mib.ipOutIPv6, ipmib->ipIfStatsOutWrongIPVersion);
9776 9786 SET_MIB(old_ip_mib.ipOutSwitchIPv6,
9777 9787 ipmib->ipIfStatsOutSwitchIPVersion);
9778 9788
9779 9789 if (!snmp_append_data(mpctl->b_cont, (char *)&old_ip_mib,
9780 9790 (int)sizeof (old_ip_mib))) {
9781 9791 ip1dbg(("ip_snmp_get_mib2_ip: failed to allocate %u bytes\n",
9782 9792 (uint_t)sizeof (old_ip_mib)));
9783 9793 }
9784 9794
9785 9795 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9786 9796 ip3dbg(("ip_snmp_get_mib2_ip: level %d, name %d, len %d\n",
9787 9797 (int)optp->level, (int)optp->name, (int)optp->len));
9788 9798 qreply(q, mpctl);
9789 9799 return (mp2ctl);
9790 9800 }
9791 9801
9792 9802 /* Per interface IPv4 statistics */
9793 9803 static mblk_t *
9794 9804 ip_snmp_get_mib2_ip_traffic_stats(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
9795 9805 boolean_t legacy_req)
9796 9806 {
9797 9807 struct opthdr *optp;
9798 9808 mblk_t *mp2ctl;
9799 9809 ill_t *ill;
9800 9810 ill_walk_context_t ctx;
9801 9811 mblk_t *mp_tail = NULL;
9802 9812 mib2_ipIfStatsEntry_t global_ip_mib;
9803 9813 mib2_ipAddrEntry_t mae;
9804 9814
9805 9815 /*
9806 9816 * Make a copy of the original message
9807 9817 */
9808 9818 mp2ctl = copymsg(mpctl);
9809 9819
9810 9820 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9811 9821 optp->level = MIB2_IP;
9812 9822 optp->name = MIB2_IP_TRAFFIC_STATS;
9813 9823 /* Include "unknown interface" ip_mib */
9814 9824 ipst->ips_ip_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv4;
9815 9825 ipst->ips_ip_mib.ipIfStatsIfIndex =
9816 9826 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */
9817 9827 SET_MIB(ipst->ips_ip_mib.ipIfStatsForwarding,
9818 9828 (ipst->ips_ip_forwarding ? 1 : 2));
9819 9829 SET_MIB(ipst->ips_ip_mib.ipIfStatsDefaultTTL,
9820 9830 (uint32_t)ipst->ips_ip_def_ttl);
9821 9831 SET_MIB(ipst->ips_ip_mib.ipIfStatsEntrySize,
9822 9832 sizeof (mib2_ipIfStatsEntry_t));
9823 9833 SET_MIB(ipst->ips_ip_mib.ipIfStatsAddrEntrySize,
9824 9834 sizeof (mib2_ipAddrEntry_t));
9825 9835 SET_MIB(ipst->ips_ip_mib.ipIfStatsRouteEntrySize,
9826 9836 sizeof (mib2_ipRouteEntry_t));
9827 9837 SET_MIB(ipst->ips_ip_mib.ipIfStatsNetToMediaEntrySize,
9828 9838 sizeof (mib2_ipNetToMediaEntry_t));
9829 9839 SET_MIB(ipst->ips_ip_mib.ipIfStatsMemberEntrySize,
9830 9840 sizeof (ip_member_t));
9831 9841 SET_MIB(ipst->ips_ip_mib.ipIfStatsGroupSourceEntrySize,
9832 9842 sizeof (ip_grpsrc_t));
9833 9843
9834 9844 bcopy(&ipst->ips_ip_mib, &global_ip_mib, sizeof (global_ip_mib));
9835 9845
9836 9846 if (legacy_req) {
9837 9847 SET_MIB(global_ip_mib.ipIfStatsAddrEntrySize,
9838 9848 LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t));
9839 9849 }
9840 9850
9841 9851 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
9842 9852 (char *)&global_ip_mib, (int)sizeof (global_ip_mib))) {
9843 9853 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9844 9854 "failed to allocate %u bytes\n",
9845 9855 (uint_t)sizeof (global_ip_mib)));
9846 9856 }
9847 9857
9848 9858 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
9849 9859 ill = ILL_START_WALK_V4(&ctx, ipst);
9850 9860 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
9851 9861 ill->ill_ip_mib->ipIfStatsIfIndex =
9852 9862 ill->ill_phyint->phyint_ifindex;
9853 9863 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding,
9854 9864 (ipst->ips_ip_forwarding ? 1 : 2));
9855 9865 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultTTL,
9856 9866 (uint32_t)ipst->ips_ip_def_ttl);
9857 9867
9858 9868 ip_mib2_add_ip_stats(&global_ip_mib, ill->ill_ip_mib);
9859 9869 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
9860 9870 (char *)ill->ill_ip_mib,
9861 9871 (int)sizeof (*ill->ill_ip_mib))) {
9862 9872 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9863 9873 "failed to allocate %u bytes\n",
9864 9874 (uint_t)sizeof (*ill->ill_ip_mib)));
9865 9875 }
9866 9876 }
9867 9877 rw_exit(&ipst->ips_ill_g_lock);
9868 9878
9869 9879 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9870 9880 ip3dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9871 9881 "level %d, name %d, len %d\n",
9872 9882 (int)optp->level, (int)optp->name, (int)optp->len));
9873 9883 qreply(q, mpctl);
9874 9884
9875 9885 if (mp2ctl == NULL)
9876 9886 return (NULL);
9877 9887
9878 9888 return (ip_snmp_get_mib2_ip(q, mp2ctl, &global_ip_mib, ipst,
9879 9889 legacy_req));
9880 9890 }
9881 9891
9882 9892 /* Global IPv4 ICMP statistics */
9883 9893 static mblk_t *
9884 9894 ip_snmp_get_mib2_icmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9885 9895 {
9886 9896 struct opthdr *optp;
9887 9897 mblk_t *mp2ctl;
9888 9898
9889 9899 /*
9890 9900 * Make a copy of the original message
9891 9901 */
9892 9902 mp2ctl = copymsg(mpctl);
9893 9903
9894 9904 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9895 9905 optp->level = MIB2_ICMP;
9896 9906 optp->name = 0;
9897 9907 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_icmp_mib,
9898 9908 (int)sizeof (ipst->ips_icmp_mib))) {
9899 9909 ip1dbg(("ip_snmp_get_mib2_icmp: failed to allocate %u bytes\n",
9900 9910 (uint_t)sizeof (ipst->ips_icmp_mib)));
9901 9911 }
9902 9912 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9903 9913 ip3dbg(("ip_snmp_get_mib2_icmp: level %d, name %d, len %d\n",
9904 9914 (int)optp->level, (int)optp->name, (int)optp->len));
9905 9915 qreply(q, mpctl);
9906 9916 return (mp2ctl);
9907 9917 }
9908 9918
9909 9919 /* Global IPv4 IGMP statistics */
9910 9920 static mblk_t *
9911 9921 ip_snmp_get_mib2_igmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9912 9922 {
9913 9923 struct opthdr *optp;
9914 9924 mblk_t *mp2ctl;
9915 9925
9916 9926 /*
9917 9927 * make a copy of the original message
9918 9928 */
9919 9929 mp2ctl = copymsg(mpctl);
9920 9930
9921 9931 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9922 9932 optp->level = EXPER_IGMP;
9923 9933 optp->name = 0;
9924 9934 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_igmpstat,
9925 9935 (int)sizeof (ipst->ips_igmpstat))) {
9926 9936 ip1dbg(("ip_snmp_get_mib2_igmp: failed to allocate %u bytes\n",
9927 9937 (uint_t)sizeof (ipst->ips_igmpstat)));
9928 9938 }
9929 9939 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9930 9940 ip3dbg(("ip_snmp_get_mib2_igmp: level %d, name %d, len %d\n",
9931 9941 (int)optp->level, (int)optp->name, (int)optp->len));
9932 9942 qreply(q, mpctl);
9933 9943 return (mp2ctl);
9934 9944 }
9935 9945
9936 9946 /* Global IPv4 Multicast Routing statistics */
9937 9947 static mblk_t *
9938 9948 ip_snmp_get_mib2_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9939 9949 {
9940 9950 struct opthdr *optp;
9941 9951 mblk_t *mp2ctl;
9942 9952
9943 9953 /*
9944 9954 * make a copy of the original message
9945 9955 */
9946 9956 mp2ctl = copymsg(mpctl);
9947 9957
9948 9958 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9949 9959 optp->level = EXPER_DVMRP;
9950 9960 optp->name = 0;
9951 9961 if (!ip_mroute_stats(mpctl->b_cont, ipst)) {
9952 9962 ip0dbg(("ip_mroute_stats: failed\n"));
9953 9963 }
9954 9964 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9955 9965 ip3dbg(("ip_snmp_get_mib2_multi: level %d, name %d, len %d\n",
9956 9966 (int)optp->level, (int)optp->name, (int)optp->len));
9957 9967 qreply(q, mpctl);
9958 9968 return (mp2ctl);
9959 9969 }
9960 9970
9961 9971 /* IPv4 address information */
9962 9972 static mblk_t *
9963 9973 ip_snmp_get_mib2_ip_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
9964 9974 boolean_t legacy_req)
9965 9975 {
9966 9976 struct opthdr *optp;
9967 9977 mblk_t *mp2ctl;
9968 9978 mblk_t *mp_tail = NULL;
9969 9979 ill_t *ill;
9970 9980 ipif_t *ipif;
9971 9981 uint_t bitval;
9972 9982 mib2_ipAddrEntry_t mae;
9973 9983 size_t mae_size;
9974 9984 zoneid_t zoneid;
9975 9985 ill_walk_context_t ctx;
9976 9986
9977 9987 /*
9978 9988 * make a copy of the original message
9979 9989 */
9980 9990 mp2ctl = copymsg(mpctl);
9981 9991
9982 9992 mae_size = (legacy_req) ? LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t) :
9983 9993 sizeof (mib2_ipAddrEntry_t);
9984 9994
9985 9995 /* ipAddrEntryTable */
9986 9996
9987 9997 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9988 9998 optp->level = MIB2_IP;
9989 9999 optp->name = MIB2_IP_ADDR;
9990 10000 zoneid = Q_TO_CONN(q)->conn_zoneid;
9991 10001
9992 10002 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
9993 10003 ill = ILL_START_WALK_V4(&ctx, ipst);
9994 10004 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
9995 10005 for (ipif = ill->ill_ipif; ipif != NULL;
9996 10006 ipif = ipif->ipif_next) {
9997 10007 if (ipif->ipif_zoneid != zoneid &&
9998 10008 ipif->ipif_zoneid != ALL_ZONES)
9999 10009 continue;
10000 10010 /* Sum of count from dead IRE_LO* and our current */
10001 10011 mae.ipAdEntInfo.ae_ibcnt = ipif->ipif_ib_pkt_count;
10002 10012 if (ipif->ipif_ire_local != NULL) {
10003 10013 mae.ipAdEntInfo.ae_ibcnt +=
10004 10014 ipif->ipif_ire_local->ire_ib_pkt_count;
10005 10015 }
10006 10016 mae.ipAdEntInfo.ae_obcnt = 0;
10007 10017 mae.ipAdEntInfo.ae_focnt = 0;
10008 10018
10009 10019 ipif_get_name(ipif, mae.ipAdEntIfIndex.o_bytes,
10010 10020 OCTET_LENGTH);
10011 10021 mae.ipAdEntIfIndex.o_length =
10012 10022 mi_strlen(mae.ipAdEntIfIndex.o_bytes);
10013 10023 mae.ipAdEntAddr = ipif->ipif_lcl_addr;
10014 10024 mae.ipAdEntNetMask = ipif->ipif_net_mask;
10015 10025 mae.ipAdEntInfo.ae_subnet = ipif->ipif_subnet;
10016 10026 mae.ipAdEntInfo.ae_subnet_len =
10017 10027 ip_mask_to_plen(ipif->ipif_net_mask);
10018 10028 mae.ipAdEntInfo.ae_src_addr = ipif->ipif_lcl_addr;
10019 10029 for (bitval = 1;
10020 10030 bitval &&
10021 10031 !(bitval & ipif->ipif_brd_addr);
10022 10032 bitval <<= 1)
10023 10033 noop;
10024 10034 mae.ipAdEntBcastAddr = bitval;
10025 10035 mae.ipAdEntReasmMaxSize = IP_MAXPACKET;
10026 10036 mae.ipAdEntInfo.ae_mtu = ipif->ipif_ill->ill_mtu;
10027 10037 mae.ipAdEntInfo.ae_metric = ipif->ipif_ill->ill_metric;
10028 10038 mae.ipAdEntInfo.ae_broadcast_addr =
10029 10039 ipif->ipif_brd_addr;
10030 10040 mae.ipAdEntInfo.ae_pp_dst_addr =
10031 10041 ipif->ipif_pp_dst_addr;
10032 10042 mae.ipAdEntInfo.ae_flags = ipif->ipif_flags |
10033 10043 ill->ill_flags | ill->ill_phyint->phyint_flags;
10034 10044 mae.ipAdEntRetransmitTime =
10035 10045 ill->ill_reachable_retrans_time;
10036 10046
10037 10047 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10038 10048 (char *)&mae, (int)mae_size)) {
10039 10049 ip1dbg(("ip_snmp_get_mib2_ip_addr: failed to "
10040 10050 "allocate %u bytes\n", (uint_t)mae_size));
10041 10051 }
10042 10052 }
10043 10053 }
10044 10054 rw_exit(&ipst->ips_ill_g_lock);
10045 10055
10046 10056 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10047 10057 ip3dbg(("ip_snmp_get_mib2_ip_addr: level %d, name %d, len %d\n",
10048 10058 (int)optp->level, (int)optp->name, (int)optp->len));
10049 10059 qreply(q, mpctl);
10050 10060 return (mp2ctl);
10051 10061 }
10052 10062
10053 10063 /* IPv6 address information */
10054 10064 static mblk_t *
10055 10065 ip_snmp_get_mib2_ip6_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
10056 10066 boolean_t legacy_req)
10057 10067 {
10058 10068 struct opthdr *optp;
10059 10069 mblk_t *mp2ctl;
10060 10070 mblk_t *mp_tail = NULL;
10061 10071 ill_t *ill;
10062 10072 ipif_t *ipif;
10063 10073 mib2_ipv6AddrEntry_t mae6;
10064 10074 size_t mae6_size;
10065 10075 zoneid_t zoneid;
10066 10076 ill_walk_context_t ctx;
10067 10077
10068 10078 /*
10069 10079 * make a copy of the original message
10070 10080 */
10071 10081 mp2ctl = copymsg(mpctl);
10072 10082
10073 10083 mae6_size = (legacy_req) ?
10074 10084 LEGACY_MIB_SIZE(&mae6, mib2_ipv6AddrEntry_t) :
10075 10085 sizeof (mib2_ipv6AddrEntry_t);
10076 10086
10077 10087 /* ipv6AddrEntryTable */
10078 10088
10079 10089 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10080 10090 optp->level = MIB2_IP6;
10081 10091 optp->name = MIB2_IP6_ADDR;
10082 10092 zoneid = Q_TO_CONN(q)->conn_zoneid;
10083 10093
10084 10094 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10085 10095 ill = ILL_START_WALK_V6(&ctx, ipst);
10086 10096 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10087 10097 for (ipif = ill->ill_ipif; ipif != NULL;
10088 10098 ipif = ipif->ipif_next) {
10089 10099 if (ipif->ipif_zoneid != zoneid &&
10090 10100 ipif->ipif_zoneid != ALL_ZONES)
10091 10101 continue;
10092 10102 /* Sum of count from dead IRE_LO* and our current */
10093 10103 mae6.ipv6AddrInfo.ae_ibcnt = ipif->ipif_ib_pkt_count;
10094 10104 if (ipif->ipif_ire_local != NULL) {
10095 10105 mae6.ipv6AddrInfo.ae_ibcnt +=
10096 10106 ipif->ipif_ire_local->ire_ib_pkt_count;
10097 10107 }
10098 10108 mae6.ipv6AddrInfo.ae_obcnt = 0;
10099 10109 mae6.ipv6AddrInfo.ae_focnt = 0;
10100 10110
10101 10111 ipif_get_name(ipif, mae6.ipv6AddrIfIndex.o_bytes,
10102 10112 OCTET_LENGTH);
10103 10113 mae6.ipv6AddrIfIndex.o_length =
10104 10114 mi_strlen(mae6.ipv6AddrIfIndex.o_bytes);
10105 10115 mae6.ipv6AddrAddress = ipif->ipif_v6lcl_addr;
10106 10116 mae6.ipv6AddrPfxLength =
10107 10117 ip_mask_to_plen_v6(&ipif->ipif_v6net_mask);
10108 10118 mae6.ipv6AddrInfo.ae_subnet = ipif->ipif_v6subnet;
10109 10119 mae6.ipv6AddrInfo.ae_subnet_len =
10110 10120 mae6.ipv6AddrPfxLength;
10111 10121 mae6.ipv6AddrInfo.ae_src_addr = ipif->ipif_v6lcl_addr;
10112 10122
10113 10123 /* Type: stateless(1), stateful(2), unknown(3) */
10114 10124 if (ipif->ipif_flags & IPIF_ADDRCONF)
10115 10125 mae6.ipv6AddrType = 1;
10116 10126 else
10117 10127 mae6.ipv6AddrType = 2;
10118 10128 /* Anycast: true(1), false(2) */
10119 10129 if (ipif->ipif_flags & IPIF_ANYCAST)
10120 10130 mae6.ipv6AddrAnycastFlag = 1;
10121 10131 else
10122 10132 mae6.ipv6AddrAnycastFlag = 2;
10123 10133
10124 10134 /*
10125 10135 * Address status: preferred(1), deprecated(2),
10126 10136 * invalid(3), inaccessible(4), unknown(5)
10127 10137 */
10128 10138 if (ipif->ipif_flags & IPIF_NOLOCAL)
10129 10139 mae6.ipv6AddrStatus = 3;
10130 10140 else if (ipif->ipif_flags & IPIF_DEPRECATED)
10131 10141 mae6.ipv6AddrStatus = 2;
10132 10142 else
10133 10143 mae6.ipv6AddrStatus = 1;
10134 10144 mae6.ipv6AddrInfo.ae_mtu = ipif->ipif_ill->ill_mtu;
10135 10145 mae6.ipv6AddrInfo.ae_metric =
10136 10146 ipif->ipif_ill->ill_metric;
10137 10147 mae6.ipv6AddrInfo.ae_pp_dst_addr =
10138 10148 ipif->ipif_v6pp_dst_addr;
10139 10149 mae6.ipv6AddrInfo.ae_flags = ipif->ipif_flags |
10140 10150 ill->ill_flags | ill->ill_phyint->phyint_flags;
10141 10151 mae6.ipv6AddrReasmMaxSize = IP_MAXPACKET;
10142 10152 mae6.ipv6AddrIdentifier = ill->ill_token;
10143 10153 mae6.ipv6AddrIdentifierLen = ill->ill_token_length;
10144 10154 mae6.ipv6AddrReachableTime = ill->ill_reachable_time;
10145 10155 mae6.ipv6AddrRetransmitTime =
10146 10156 ill->ill_reachable_retrans_time;
10147 10157 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10148 10158 (char *)&mae6, (int)mae6_size)) {
10149 10159 ip1dbg(("ip_snmp_get_mib2_ip6_addr: failed to "
10150 10160 "allocate %u bytes\n",
10151 10161 (uint_t)mae6_size));
10152 10162 }
10153 10163 }
10154 10164 }
10155 10165 rw_exit(&ipst->ips_ill_g_lock);
10156 10166
10157 10167 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10158 10168 ip3dbg(("ip_snmp_get_mib2_ip6_addr: level %d, name %d, len %d\n",
10159 10169 (int)optp->level, (int)optp->name, (int)optp->len));
10160 10170 qreply(q, mpctl);
10161 10171 return (mp2ctl);
10162 10172 }
10163 10173
10164 10174 /* IPv4 multicast group membership. */
10165 10175 static mblk_t *
10166 10176 ip_snmp_get_mib2_ip_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10167 10177 {
10168 10178 struct opthdr *optp;
10169 10179 mblk_t *mp2ctl;
10170 10180 ill_t *ill;
10171 10181 ipif_t *ipif;
10172 10182 ilm_t *ilm;
10173 10183 ip_member_t ipm;
10174 10184 mblk_t *mp_tail = NULL;
10175 10185 ill_walk_context_t ctx;
10176 10186 zoneid_t zoneid;
10177 10187
10178 10188 /*
10179 10189 * make a copy of the original message
10180 10190 */
10181 10191 mp2ctl = copymsg(mpctl);
10182 10192 zoneid = Q_TO_CONN(q)->conn_zoneid;
10183 10193
10184 10194 /* ipGroupMember table */
10185 10195 optp = (struct opthdr *)&mpctl->b_rptr[
10186 10196 sizeof (struct T_optmgmt_ack)];
10187 10197 optp->level = MIB2_IP;
10188 10198 optp->name = EXPER_IP_GROUP_MEMBERSHIP;
10189 10199
10190 10200 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10191 10201 ill = ILL_START_WALK_V4(&ctx, ipst);
10192 10202 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10193 10203 /* Make sure the ill isn't going away. */
10194 10204 if (!ill_check_and_refhold(ill))
10195 10205 continue;
10196 10206 rw_exit(&ipst->ips_ill_g_lock);
10197 10207 rw_enter(&ill->ill_mcast_lock, RW_READER);
10198 10208 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10199 10209 if (ilm->ilm_zoneid != zoneid &&
10200 10210 ilm->ilm_zoneid != ALL_ZONES)
10201 10211 continue;
10202 10212
10203 10213 /* Is there an ipif for ilm_ifaddr? */
10204 10214 for (ipif = ill->ill_ipif; ipif != NULL;
10205 10215 ipif = ipif->ipif_next) {
10206 10216 if (!IPIF_IS_CONDEMNED(ipif) &&
10207 10217 ipif->ipif_lcl_addr == ilm->ilm_ifaddr &&
10208 10218 ilm->ilm_ifaddr != INADDR_ANY)
10209 10219 break;
10210 10220 }
10211 10221 if (ipif != NULL) {
10212 10222 ipif_get_name(ipif,
10213 10223 ipm.ipGroupMemberIfIndex.o_bytes,
10214 10224 OCTET_LENGTH);
10215 10225 } else {
10216 10226 ill_get_name(ill,
10217 10227 ipm.ipGroupMemberIfIndex.o_bytes,
10218 10228 OCTET_LENGTH);
10219 10229 }
10220 10230 ipm.ipGroupMemberIfIndex.o_length =
10221 10231 mi_strlen(ipm.ipGroupMemberIfIndex.o_bytes);
10222 10232
10223 10233 ipm.ipGroupMemberAddress = ilm->ilm_addr;
10224 10234 ipm.ipGroupMemberRefCnt = ilm->ilm_refcnt;
10225 10235 ipm.ipGroupMemberFilterMode = ilm->ilm_fmode;
10226 10236 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10227 10237 (char *)&ipm, (int)sizeof (ipm))) {
10228 10238 ip1dbg(("ip_snmp_get_mib2_ip_group: "
10229 10239 "failed to allocate %u bytes\n",
10230 10240 (uint_t)sizeof (ipm)));
10231 10241 }
10232 10242 }
10233 10243 rw_exit(&ill->ill_mcast_lock);
10234 10244 ill_refrele(ill);
10235 10245 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10236 10246 }
10237 10247 rw_exit(&ipst->ips_ill_g_lock);
10238 10248 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10239 10249 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10240 10250 (int)optp->level, (int)optp->name, (int)optp->len));
10241 10251 qreply(q, mpctl);
10242 10252 return (mp2ctl);
10243 10253 }
10244 10254
10245 10255 /* IPv6 multicast group membership. */
10246 10256 static mblk_t *
10247 10257 ip_snmp_get_mib2_ip6_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10248 10258 {
10249 10259 struct opthdr *optp;
10250 10260 mblk_t *mp2ctl;
10251 10261 ill_t *ill;
10252 10262 ilm_t *ilm;
10253 10263 ipv6_member_t ipm6;
10254 10264 mblk_t *mp_tail = NULL;
10255 10265 ill_walk_context_t ctx;
10256 10266 zoneid_t zoneid;
10257 10267
10258 10268 /*
10259 10269 * make a copy of the original message
10260 10270 */
10261 10271 mp2ctl = copymsg(mpctl);
10262 10272 zoneid = Q_TO_CONN(q)->conn_zoneid;
10263 10273
10264 10274 /* ip6GroupMember table */
10265 10275 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10266 10276 optp->level = MIB2_IP6;
10267 10277 optp->name = EXPER_IP6_GROUP_MEMBERSHIP;
10268 10278
10269 10279 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10270 10280 ill = ILL_START_WALK_V6(&ctx, ipst);
10271 10281 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10272 10282 /* Make sure the ill isn't going away. */
10273 10283 if (!ill_check_and_refhold(ill))
10274 10284 continue;
10275 10285 rw_exit(&ipst->ips_ill_g_lock);
10276 10286 /*
10277 10287 * Normally we don't have any members on under IPMP interfaces.
10278 10288 * We report them as a debugging aid.
10279 10289 */
10280 10290 rw_enter(&ill->ill_mcast_lock, RW_READER);
10281 10291 ipm6.ipv6GroupMemberIfIndex = ill->ill_phyint->phyint_ifindex;
10282 10292 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10283 10293 if (ilm->ilm_zoneid != zoneid &&
10284 10294 ilm->ilm_zoneid != ALL_ZONES)
10285 10295 continue; /* not this zone */
10286 10296 ipm6.ipv6GroupMemberAddress = ilm->ilm_v6addr;
10287 10297 ipm6.ipv6GroupMemberRefCnt = ilm->ilm_refcnt;
10288 10298 ipm6.ipv6GroupMemberFilterMode = ilm->ilm_fmode;
10289 10299 if (!snmp_append_data2(mpctl->b_cont,
10290 10300 &mp_tail,
10291 10301 (char *)&ipm6, (int)sizeof (ipm6))) {
10292 10302 ip1dbg(("ip_snmp_get_mib2_ip6_group: "
10293 10303 "failed to allocate %u bytes\n",
10294 10304 (uint_t)sizeof (ipm6)));
10295 10305 }
10296 10306 }
10297 10307 rw_exit(&ill->ill_mcast_lock);
10298 10308 ill_refrele(ill);
10299 10309 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10300 10310 }
10301 10311 rw_exit(&ipst->ips_ill_g_lock);
10302 10312
10303 10313 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10304 10314 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10305 10315 (int)optp->level, (int)optp->name, (int)optp->len));
10306 10316 qreply(q, mpctl);
10307 10317 return (mp2ctl);
10308 10318 }
10309 10319
10310 10320 /* IP multicast filtered sources */
10311 10321 static mblk_t *
10312 10322 ip_snmp_get_mib2_ip_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10313 10323 {
10314 10324 struct opthdr *optp;
10315 10325 mblk_t *mp2ctl;
10316 10326 ill_t *ill;
10317 10327 ipif_t *ipif;
10318 10328 ilm_t *ilm;
10319 10329 ip_grpsrc_t ips;
10320 10330 mblk_t *mp_tail = NULL;
10321 10331 ill_walk_context_t ctx;
10322 10332 zoneid_t zoneid;
10323 10333 int i;
10324 10334 slist_t *sl;
10325 10335
10326 10336 /*
10327 10337 * make a copy of the original message
10328 10338 */
10329 10339 mp2ctl = copymsg(mpctl);
10330 10340 zoneid = Q_TO_CONN(q)->conn_zoneid;
10331 10341
10332 10342 /* ipGroupSource table */
10333 10343 optp = (struct opthdr *)&mpctl->b_rptr[
10334 10344 sizeof (struct T_optmgmt_ack)];
10335 10345 optp->level = MIB2_IP;
10336 10346 optp->name = EXPER_IP_GROUP_SOURCES;
10337 10347
10338 10348 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10339 10349 ill = ILL_START_WALK_V4(&ctx, ipst);
10340 10350 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10341 10351 /* Make sure the ill isn't going away. */
10342 10352 if (!ill_check_and_refhold(ill))
10343 10353 continue;
10344 10354 rw_exit(&ipst->ips_ill_g_lock);
10345 10355 rw_enter(&ill->ill_mcast_lock, RW_READER);
10346 10356 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10347 10357 sl = ilm->ilm_filter;
10348 10358 if (ilm->ilm_zoneid != zoneid &&
10349 10359 ilm->ilm_zoneid != ALL_ZONES)
10350 10360 continue;
10351 10361 if (SLIST_IS_EMPTY(sl))
10352 10362 continue;
10353 10363
10354 10364 /* Is there an ipif for ilm_ifaddr? */
10355 10365 for (ipif = ill->ill_ipif; ipif != NULL;
10356 10366 ipif = ipif->ipif_next) {
10357 10367 if (!IPIF_IS_CONDEMNED(ipif) &&
10358 10368 ipif->ipif_lcl_addr == ilm->ilm_ifaddr &&
10359 10369 ilm->ilm_ifaddr != INADDR_ANY)
10360 10370 break;
10361 10371 }
10362 10372 if (ipif != NULL) {
10363 10373 ipif_get_name(ipif,
10364 10374 ips.ipGroupSourceIfIndex.o_bytes,
10365 10375 OCTET_LENGTH);
10366 10376 } else {
10367 10377 ill_get_name(ill,
10368 10378 ips.ipGroupSourceIfIndex.o_bytes,
10369 10379 OCTET_LENGTH);
10370 10380 }
10371 10381 ips.ipGroupSourceIfIndex.o_length =
10372 10382 mi_strlen(ips.ipGroupSourceIfIndex.o_bytes);
10373 10383
10374 10384 ips.ipGroupSourceGroup = ilm->ilm_addr;
10375 10385 for (i = 0; i < sl->sl_numsrc; i++) {
10376 10386 if (!IN6_IS_ADDR_V4MAPPED(&sl->sl_addr[i]))
10377 10387 continue;
10378 10388 IN6_V4MAPPED_TO_IPADDR(&sl->sl_addr[i],
10379 10389 ips.ipGroupSourceAddress);
10380 10390 if (snmp_append_data2(mpctl->b_cont, &mp_tail,
10381 10391 (char *)&ips, (int)sizeof (ips)) == 0) {
10382 10392 ip1dbg(("ip_snmp_get_mib2_ip_group_src:"
10383 10393 " failed to allocate %u bytes\n",
10384 10394 (uint_t)sizeof (ips)));
10385 10395 }
10386 10396 }
10387 10397 }
10388 10398 rw_exit(&ill->ill_mcast_lock);
10389 10399 ill_refrele(ill);
10390 10400 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10391 10401 }
10392 10402 rw_exit(&ipst->ips_ill_g_lock);
10393 10403 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10394 10404 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10395 10405 (int)optp->level, (int)optp->name, (int)optp->len));
10396 10406 qreply(q, mpctl);
10397 10407 return (mp2ctl);
10398 10408 }
10399 10409
10400 10410 /* IPv6 multicast filtered sources. */
10401 10411 static mblk_t *
10402 10412 ip_snmp_get_mib2_ip6_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10403 10413 {
10404 10414 struct opthdr *optp;
10405 10415 mblk_t *mp2ctl;
10406 10416 ill_t *ill;
10407 10417 ilm_t *ilm;
10408 10418 ipv6_grpsrc_t ips6;
10409 10419 mblk_t *mp_tail = NULL;
10410 10420 ill_walk_context_t ctx;
10411 10421 zoneid_t zoneid;
10412 10422 int i;
10413 10423 slist_t *sl;
10414 10424
10415 10425 /*
10416 10426 * make a copy of the original message
10417 10427 */
10418 10428 mp2ctl = copymsg(mpctl);
10419 10429 zoneid = Q_TO_CONN(q)->conn_zoneid;
10420 10430
10421 10431 /* ip6GroupMember table */
10422 10432 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10423 10433 optp->level = MIB2_IP6;
10424 10434 optp->name = EXPER_IP6_GROUP_SOURCES;
10425 10435
10426 10436 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10427 10437 ill = ILL_START_WALK_V6(&ctx, ipst);
10428 10438 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10429 10439 /* Make sure the ill isn't going away. */
10430 10440 if (!ill_check_and_refhold(ill))
10431 10441 continue;
10432 10442 rw_exit(&ipst->ips_ill_g_lock);
10433 10443 /*
10434 10444 * Normally we don't have any members on under IPMP interfaces.
10435 10445 * We report them as a debugging aid.
10436 10446 */
10437 10447 rw_enter(&ill->ill_mcast_lock, RW_READER);
10438 10448 ips6.ipv6GroupSourceIfIndex = ill->ill_phyint->phyint_ifindex;
10439 10449 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10440 10450 sl = ilm->ilm_filter;
10441 10451 if (ilm->ilm_zoneid != zoneid &&
10442 10452 ilm->ilm_zoneid != ALL_ZONES)
10443 10453 continue;
10444 10454 if (SLIST_IS_EMPTY(sl))
10445 10455 continue;
10446 10456 ips6.ipv6GroupSourceGroup = ilm->ilm_v6addr;
10447 10457 for (i = 0; i < sl->sl_numsrc; i++) {
10448 10458 ips6.ipv6GroupSourceAddress = sl->sl_addr[i];
10449 10459 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10450 10460 (char *)&ips6, (int)sizeof (ips6))) {
10451 10461 ip1dbg(("ip_snmp_get_mib2_ip6_"
10452 10462 "group_src: failed to allocate "
10453 10463 "%u bytes\n",
10454 10464 (uint_t)sizeof (ips6)));
10455 10465 }
10456 10466 }
10457 10467 }
10458 10468 rw_exit(&ill->ill_mcast_lock);
10459 10469 ill_refrele(ill);
10460 10470 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10461 10471 }
10462 10472 rw_exit(&ipst->ips_ill_g_lock);
10463 10473
10464 10474 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10465 10475 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10466 10476 (int)optp->level, (int)optp->name, (int)optp->len));
10467 10477 qreply(q, mpctl);
10468 10478 return (mp2ctl);
10469 10479 }
10470 10480
10471 10481 /* Multicast routing virtual interface table. */
10472 10482 static mblk_t *
10473 10483 ip_snmp_get_mib2_virt_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10474 10484 {
10475 10485 struct opthdr *optp;
10476 10486 mblk_t *mp2ctl;
10477 10487
10478 10488 /*
10479 10489 * make a copy of the original message
10480 10490 */
10481 10491 mp2ctl = copymsg(mpctl);
10482 10492
10483 10493 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10484 10494 optp->level = EXPER_DVMRP;
10485 10495 optp->name = EXPER_DVMRP_VIF;
10486 10496 if (!ip_mroute_vif(mpctl->b_cont, ipst)) {
10487 10497 ip0dbg(("ip_mroute_vif: failed\n"));
10488 10498 }
10489 10499 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10490 10500 ip3dbg(("ip_snmp_get_mib2_virt_multi: level %d, name %d, len %d\n",
10491 10501 (int)optp->level, (int)optp->name, (int)optp->len));
10492 10502 qreply(q, mpctl);
10493 10503 return (mp2ctl);
10494 10504 }
10495 10505
10496 10506 /* Multicast routing table. */
10497 10507 static mblk_t *
10498 10508 ip_snmp_get_mib2_multi_rtable(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10499 10509 {
10500 10510 struct opthdr *optp;
10501 10511 mblk_t *mp2ctl;
10502 10512
10503 10513 /*
10504 10514 * make a copy of the original message
10505 10515 */
10506 10516 mp2ctl = copymsg(mpctl);
10507 10517
10508 10518 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10509 10519 optp->level = EXPER_DVMRP;
10510 10520 optp->name = EXPER_DVMRP_MRT;
10511 10521 if (!ip_mroute_mrt(mpctl->b_cont, ipst)) {
10512 10522 ip0dbg(("ip_mroute_mrt: failed\n"));
10513 10523 }
10514 10524 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10515 10525 ip3dbg(("ip_snmp_get_mib2_multi_rtable: level %d, name %d, len %d\n",
10516 10526 (int)optp->level, (int)optp->name, (int)optp->len));
10517 10527 qreply(q, mpctl);
10518 10528 return (mp2ctl);
10519 10529 }
10520 10530
10521 10531 /*
10522 10532 * Return ipRouteEntryTable, ipNetToMediaEntryTable, and ipRouteAttributeTable
10523 10533 * in one IRE walk.
10524 10534 */
10525 10535 static mblk_t *
10526 10536 ip_snmp_get_mib2_ip_route_media(queue_t *q, mblk_t *mpctl, int level,
10527 10537 ip_stack_t *ipst)
10528 10538 {
10529 10539 struct opthdr *optp;
10530 10540 mblk_t *mp2ctl; /* Returned */
10531 10541 mblk_t *mp3ctl; /* nettomedia */
10532 10542 mblk_t *mp4ctl; /* routeattrs */
10533 10543 iproutedata_t ird;
10534 10544 zoneid_t zoneid;
10535 10545
10536 10546 /*
10537 10547 * make copies of the original message
10538 10548 * - mp2ctl is returned unchanged to the caller for his use
10539 10549 * - mpctl is sent upstream as ipRouteEntryTable
10540 10550 * - mp3ctl is sent upstream as ipNetToMediaEntryTable
10541 10551 * - mp4ctl is sent upstream as ipRouteAttributeTable
10542 10552 */
10543 10553 mp2ctl = copymsg(mpctl);
10544 10554 mp3ctl = copymsg(mpctl);
10545 10555 mp4ctl = copymsg(mpctl);
10546 10556 if (mp3ctl == NULL || mp4ctl == NULL) {
10547 10557 freemsg(mp4ctl);
10548 10558 freemsg(mp3ctl);
10549 10559 freemsg(mp2ctl);
10550 10560 freemsg(mpctl);
10551 10561 return (NULL);
10552 10562 }
10553 10563
10554 10564 bzero(&ird, sizeof (ird));
10555 10565
10556 10566 ird.ird_route.lp_head = mpctl->b_cont;
10557 10567 ird.ird_netmedia.lp_head = mp3ctl->b_cont;
10558 10568 ird.ird_attrs.lp_head = mp4ctl->b_cont;
10559 10569 /*
10560 10570 * If the level has been set the special EXPER_IP_AND_ALL_IRES value,
10561 10571 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is
10562 10572 * intended a temporary solution until a proper MIB API is provided
10563 10573 * that provides complete filtering/caller-opt-in.
10564 10574 */
10565 10575 if (level == EXPER_IP_AND_ALL_IRES)
10566 10576 ird.ird_flags |= IRD_REPORT_ALL;
10567 10577
10568 10578 zoneid = Q_TO_CONN(q)->conn_zoneid;
10569 10579 ire_walk_v4(ip_snmp_get2_v4, &ird, zoneid, ipst);
10570 10580
10571 10581 /* ipRouteEntryTable in mpctl */
10572 10582 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10573 10583 optp->level = MIB2_IP;
10574 10584 optp->name = MIB2_IP_ROUTE;
10575 10585 optp->len = msgdsize(ird.ird_route.lp_head);
10576 10586 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10577 10587 (int)optp->level, (int)optp->name, (int)optp->len));
10578 10588 qreply(q, mpctl);
10579 10589
10580 10590 /* ipNetToMediaEntryTable in mp3ctl */
10581 10591 ncec_walk(NULL, ip_snmp_get2_v4_media, &ird, ipst);
10582 10592
10583 10593 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10584 10594 optp->level = MIB2_IP;
10585 10595 optp->name = MIB2_IP_MEDIA;
10586 10596 optp->len = msgdsize(ird.ird_netmedia.lp_head);
10587 10597 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10588 10598 (int)optp->level, (int)optp->name, (int)optp->len));
10589 10599 qreply(q, mp3ctl);
10590 10600
10591 10601 /* ipRouteAttributeTable in mp4ctl */
10592 10602 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10593 10603 optp->level = MIB2_IP;
10594 10604 optp->name = EXPER_IP_RTATTR;
10595 10605 optp->len = msgdsize(ird.ird_attrs.lp_head);
10596 10606 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10597 10607 (int)optp->level, (int)optp->name, (int)optp->len));
10598 10608 if (optp->len == 0)
10599 10609 freemsg(mp4ctl);
10600 10610 else
10601 10611 qreply(q, mp4ctl);
10602 10612
10603 10613 return (mp2ctl);
10604 10614 }
10605 10615
10606 10616 /*
10607 10617 * Return ipv6RouteEntryTable and ipv6RouteAttributeTable in one IRE walk, and
10608 10618 * ipv6NetToMediaEntryTable in an NDP walk.
10609 10619 */
10610 10620 static mblk_t *
10611 10621 ip_snmp_get_mib2_ip6_route_media(queue_t *q, mblk_t *mpctl, int level,
10612 10622 ip_stack_t *ipst)
10613 10623 {
10614 10624 struct opthdr *optp;
10615 10625 mblk_t *mp2ctl; /* Returned */
10616 10626 mblk_t *mp3ctl; /* nettomedia */
10617 10627 mblk_t *mp4ctl; /* routeattrs */
10618 10628 iproutedata_t ird;
10619 10629 zoneid_t zoneid;
10620 10630
10621 10631 /*
10622 10632 * make copies of the original message
10623 10633 * - mp2ctl is returned unchanged to the caller for his use
10624 10634 * - mpctl is sent upstream as ipv6RouteEntryTable
10625 10635 * - mp3ctl is sent upstream as ipv6NetToMediaEntryTable
10626 10636 * - mp4ctl is sent upstream as ipv6RouteAttributeTable
10627 10637 */
10628 10638 mp2ctl = copymsg(mpctl);
10629 10639 mp3ctl = copymsg(mpctl);
10630 10640 mp4ctl = copymsg(mpctl);
10631 10641 if (mp3ctl == NULL || mp4ctl == NULL) {
10632 10642 freemsg(mp4ctl);
10633 10643 freemsg(mp3ctl);
10634 10644 freemsg(mp2ctl);
10635 10645 freemsg(mpctl);
10636 10646 return (NULL);
10637 10647 }
10638 10648
10639 10649 bzero(&ird, sizeof (ird));
10640 10650
10641 10651 ird.ird_route.lp_head = mpctl->b_cont;
10642 10652 ird.ird_netmedia.lp_head = mp3ctl->b_cont;
10643 10653 ird.ird_attrs.lp_head = mp4ctl->b_cont;
10644 10654 /*
10645 10655 * If the level has been set the special EXPER_IP_AND_ALL_IRES value,
10646 10656 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is
10647 10657 * intended a temporary solution until a proper MIB API is provided
10648 10658 * that provides complete filtering/caller-opt-in.
10649 10659 */
10650 10660 if (level == EXPER_IP_AND_ALL_IRES)
10651 10661 ird.ird_flags |= IRD_REPORT_ALL;
10652 10662
10653 10663 zoneid = Q_TO_CONN(q)->conn_zoneid;
10654 10664 ire_walk_v6(ip_snmp_get2_v6_route, &ird, zoneid, ipst);
10655 10665
10656 10666 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10657 10667 optp->level = MIB2_IP6;
10658 10668 optp->name = MIB2_IP6_ROUTE;
10659 10669 optp->len = msgdsize(ird.ird_route.lp_head);
10660 10670 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10661 10671 (int)optp->level, (int)optp->name, (int)optp->len));
10662 10672 qreply(q, mpctl);
10663 10673
10664 10674 /* ipv6NetToMediaEntryTable in mp3ctl */
10665 10675 ncec_walk(NULL, ip_snmp_get2_v6_media, &ird, ipst);
10666 10676
10667 10677 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10668 10678 optp->level = MIB2_IP6;
10669 10679 optp->name = MIB2_IP6_MEDIA;
10670 10680 optp->len = msgdsize(ird.ird_netmedia.lp_head);
10671 10681 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10672 10682 (int)optp->level, (int)optp->name, (int)optp->len));
10673 10683 qreply(q, mp3ctl);
10674 10684
10675 10685 /* ipv6RouteAttributeTable in mp4ctl */
10676 10686 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10677 10687 optp->level = MIB2_IP6;
10678 10688 optp->name = EXPER_IP_RTATTR;
10679 10689 optp->len = msgdsize(ird.ird_attrs.lp_head);
10680 10690 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10681 10691 (int)optp->level, (int)optp->name, (int)optp->len));
10682 10692 if (optp->len == 0)
10683 10693 freemsg(mp4ctl);
10684 10694 else
10685 10695 qreply(q, mp4ctl);
10686 10696
10687 10697 return (mp2ctl);
10688 10698 }
10689 10699
10690 10700 /*
10691 10701 * IPv6 mib: One per ill
10692 10702 */
10693 10703 static mblk_t *
10694 10704 ip_snmp_get_mib2_ip6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
10695 10705 boolean_t legacy_req)
10696 10706 {
10697 10707 struct opthdr *optp;
10698 10708 mblk_t *mp2ctl;
10699 10709 ill_t *ill;
10700 10710 ill_walk_context_t ctx;
10701 10711 mblk_t *mp_tail = NULL;
10702 10712 mib2_ipv6AddrEntry_t mae6;
10703 10713 mib2_ipIfStatsEntry_t *ise;
10704 10714 size_t ise_size, iae_size;
10705 10715
10706 10716 /*
10707 10717 * Make a copy of the original message
10708 10718 */
10709 10719 mp2ctl = copymsg(mpctl);
10710 10720
10711 10721 /* fixed length IPv6 structure ... */
10712 10722
10713 10723 if (legacy_req) {
10714 10724 ise_size = LEGACY_MIB_SIZE(&ipst->ips_ip6_mib,
10715 10725 mib2_ipIfStatsEntry_t);
10716 10726 iae_size = LEGACY_MIB_SIZE(&mae6, mib2_ipv6AddrEntry_t);
10717 10727 } else {
10718 10728 ise_size = sizeof (mib2_ipIfStatsEntry_t);
10719 10729 iae_size = sizeof (mib2_ipv6AddrEntry_t);
10720 10730 }
10721 10731
10722 10732 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10723 10733 optp->level = MIB2_IP6;
10724 10734 optp->name = 0;
10725 10735 /* Include "unknown interface" ip6_mib */
10726 10736 ipst->ips_ip6_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv6;
10727 10737 ipst->ips_ip6_mib.ipIfStatsIfIndex =
10728 10738 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */
10729 10739 SET_MIB(ipst->ips_ip6_mib.ipIfStatsForwarding,
10730 10740 ipst->ips_ipv6_forwarding ? 1 : 2);
10731 10741 SET_MIB(ipst->ips_ip6_mib.ipIfStatsDefaultHopLimit,
10732 10742 ipst->ips_ipv6_def_hops);
10733 10743 SET_MIB(ipst->ips_ip6_mib.ipIfStatsEntrySize,
10734 10744 sizeof (mib2_ipIfStatsEntry_t));
10735 10745 SET_MIB(ipst->ips_ip6_mib.ipIfStatsAddrEntrySize,
10736 10746 sizeof (mib2_ipv6AddrEntry_t));
10737 10747 SET_MIB(ipst->ips_ip6_mib.ipIfStatsRouteEntrySize,
10738 10748 sizeof (mib2_ipv6RouteEntry_t));
10739 10749 SET_MIB(ipst->ips_ip6_mib.ipIfStatsNetToMediaEntrySize,
10740 10750 sizeof (mib2_ipv6NetToMediaEntry_t));
10741 10751 SET_MIB(ipst->ips_ip6_mib.ipIfStatsMemberEntrySize,
10742 10752 sizeof (ipv6_member_t));
10743 10753 SET_MIB(ipst->ips_ip6_mib.ipIfStatsGroupSourceEntrySize,
10744 10754 sizeof (ipv6_grpsrc_t));
10745 10755
10746 10756 /*
10747 10757 * Synchronize 64- and 32-bit counters
10748 10758 */
10749 10759 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInReceives,
10750 10760 ipIfStatsHCInReceives);
10751 10761 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInDelivers,
10752 10762 ipIfStatsHCInDelivers);
10753 10763 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutRequests,
10754 10764 ipIfStatsHCOutRequests);
10755 10765 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutForwDatagrams,
10756 10766 ipIfStatsHCOutForwDatagrams);
10757 10767 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutMcastPkts,
10758 10768 ipIfStatsHCOutMcastPkts);
10759 10769 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInMcastPkts,
10760 10770 ipIfStatsHCInMcastPkts);
10761 10771
10762 10772 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10763 10773 (char *)&ipst->ips_ip6_mib, (int)ise_size)) {
10764 10774 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate %u bytes\n",
10765 10775 (uint_t)ise_size));
10766 10776 } else if (legacy_req) {
10767 10777 /* Adjust the EntrySize fields for legacy requests. */
10768 10778 ise =
10769 10779 (mib2_ipIfStatsEntry_t *)(mp_tail->b_wptr - (int)ise_size);
10770 10780 SET_MIB(ise->ipIfStatsEntrySize, ise_size);
10771 10781 SET_MIB(ise->ipIfStatsAddrEntrySize, iae_size);
10772 10782 }
10773 10783
10774 10784 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10775 10785 ill = ILL_START_WALK_V6(&ctx, ipst);
10776 10786 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10777 10787 ill->ill_ip_mib->ipIfStatsIfIndex =
10778 10788 ill->ill_phyint->phyint_ifindex;
10779 10789 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding,
10780 10790 ipst->ips_ipv6_forwarding ? 1 : 2);
10781 10791 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultHopLimit,
10782 10792 ill->ill_max_hops);
10783 10793
10784 10794 /*
10785 10795 * Synchronize 64- and 32-bit counters
10786 10796 */
10787 10797 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInReceives,
10788 10798 ipIfStatsHCInReceives);
10789 10799 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInDelivers,
10790 10800 ipIfStatsHCInDelivers);
10791 10801 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutRequests,
10792 10802 ipIfStatsHCOutRequests);
10793 10803 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutForwDatagrams,
10794 10804 ipIfStatsHCOutForwDatagrams);
10795 10805 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutMcastPkts,
10796 10806 ipIfStatsHCOutMcastPkts);
10797 10807 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInMcastPkts,
10798 10808 ipIfStatsHCInMcastPkts);
10799 10809
10800 10810 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10801 10811 (char *)ill->ill_ip_mib, (int)ise_size)) {
10802 10812 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate "
10803 10813 "%u bytes\n", (uint_t)ise_size));
10804 10814 } else if (legacy_req) {
10805 10815 /* Adjust the EntrySize fields for legacy requests. */
10806 10816 ise = (mib2_ipIfStatsEntry_t *)(mp_tail->b_wptr -
10807 10817 (int)ise_size);
10808 10818 SET_MIB(ise->ipIfStatsEntrySize, ise_size);
10809 10819 SET_MIB(ise->ipIfStatsAddrEntrySize, iae_size);
10810 10820 }
10811 10821 }
10812 10822 rw_exit(&ipst->ips_ill_g_lock);
10813 10823
10814 10824 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10815 10825 ip3dbg(("ip_snmp_get_mib2_ip6: level %d, name %d, len %d\n",
10816 10826 (int)optp->level, (int)optp->name, (int)optp->len));
10817 10827 qreply(q, mpctl);
10818 10828 return (mp2ctl);
10819 10829 }
10820 10830
10821 10831 /*
10822 10832 * ICMPv6 mib: One per ill
10823 10833 */
10824 10834 static mblk_t *
10825 10835 ip_snmp_get_mib2_icmp6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10826 10836 {
10827 10837 struct opthdr *optp;
10828 10838 mblk_t *mp2ctl;
10829 10839 ill_t *ill;
10830 10840 ill_walk_context_t ctx;
10831 10841 mblk_t *mp_tail = NULL;
10832 10842 /*
10833 10843 * Make a copy of the original message
10834 10844 */
10835 10845 mp2ctl = copymsg(mpctl);
10836 10846
10837 10847 /* fixed length ICMPv6 structure ... */
10838 10848
10839 10849 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10840 10850 optp->level = MIB2_ICMP6;
10841 10851 optp->name = 0;
10842 10852 /* Include "unknown interface" icmp6_mib */
10843 10853 ipst->ips_icmp6_mib.ipv6IfIcmpIfIndex =
10844 10854 MIB2_UNKNOWN_INTERFACE; /* netstat flag */
10845 10855 ipst->ips_icmp6_mib.ipv6IfIcmpEntrySize =
10846 10856 sizeof (mib2_ipv6IfIcmpEntry_t);
10847 10857 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10848 10858 (char *)&ipst->ips_icmp6_mib,
10849 10859 (int)sizeof (ipst->ips_icmp6_mib))) {
10850 10860 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate %u bytes\n",
10851 10861 (uint_t)sizeof (ipst->ips_icmp6_mib)));
10852 10862 }
10853 10863
10854 10864 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10855 10865 ill = ILL_START_WALK_V6(&ctx, ipst);
10856 10866 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10857 10867 ill->ill_icmp6_mib->ipv6IfIcmpIfIndex =
10858 10868 ill->ill_phyint->phyint_ifindex;
10859 10869 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10860 10870 (char *)ill->ill_icmp6_mib,
10861 10871 (int)sizeof (*ill->ill_icmp6_mib))) {
10862 10872 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate "
10863 10873 "%u bytes\n",
10864 10874 (uint_t)sizeof (*ill->ill_icmp6_mib)));
10865 10875 }
10866 10876 }
10867 10877 rw_exit(&ipst->ips_ill_g_lock);
10868 10878
10869 10879 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10870 10880 ip3dbg(("ip_snmp_get_mib2_icmp6: level %d, name %d, len %d\n",
10871 10881 (int)optp->level, (int)optp->name, (int)optp->len));
10872 10882 qreply(q, mpctl);
10873 10883 return (mp2ctl);
10874 10884 }
10875 10885
10876 10886 /*
10877 10887 * ire_walk routine to create both ipRouteEntryTable and
10878 10888 * ipRouteAttributeTable in one IRE walk
10879 10889 */
10880 10890 static void
10881 10891 ip_snmp_get2_v4(ire_t *ire, iproutedata_t *ird)
10882 10892 {
10883 10893 ill_t *ill;
10884 10894 mib2_ipRouteEntry_t *re;
10885 10895 mib2_ipAttributeEntry_t iaes;
10886 10896 tsol_ire_gw_secattr_t *attrp;
10887 10897 tsol_gc_t *gc = NULL;
10888 10898 tsol_gcgrp_t *gcgrp = NULL;
10889 10899 ip_stack_t *ipst = ire->ire_ipst;
10890 10900
10891 10901 ASSERT(ire->ire_ipversion == IPV4_VERSION);
10892 10902
10893 10903 if (!(ird->ird_flags & IRD_REPORT_ALL)) {
10894 10904 if (ire->ire_testhidden)
10895 10905 return;
10896 10906 if (ire->ire_type & IRE_IF_CLONE)
10897 10907 return;
10898 10908 }
10899 10909
10900 10910 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL)
10901 10911 return;
10902 10912
10903 10913 if ((attrp = ire->ire_gw_secattr) != NULL) {
10904 10914 mutex_enter(&attrp->igsa_lock);
10905 10915 if ((gc = attrp->igsa_gc) != NULL) {
10906 10916 gcgrp = gc->gc_grp;
10907 10917 ASSERT(gcgrp != NULL);
10908 10918 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER);
10909 10919 }
10910 10920 mutex_exit(&attrp->igsa_lock);
10911 10921 }
10912 10922 /*
10913 10923 * Return all IRE types for route table... let caller pick and choose
10914 10924 */
10915 10925 re->ipRouteDest = ire->ire_addr;
10916 10926 ill = ire->ire_ill;
10917 10927 re->ipRouteIfIndex.o_length = 0;
10918 10928 if (ill != NULL) {
10919 10929 ill_get_name(ill, re->ipRouteIfIndex.o_bytes, OCTET_LENGTH);
10920 10930 re->ipRouteIfIndex.o_length =
10921 10931 mi_strlen(re->ipRouteIfIndex.o_bytes);
10922 10932 }
10923 10933 re->ipRouteMetric1 = -1;
10924 10934 re->ipRouteMetric2 = -1;
10925 10935 re->ipRouteMetric3 = -1;
10926 10936 re->ipRouteMetric4 = -1;
10927 10937
10928 10938 re->ipRouteNextHop = ire->ire_gateway_addr;
10929 10939 /* indirect(4), direct(3), or invalid(2) */
10930 10940 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE))
10931 10941 re->ipRouteType = 2;
10932 10942 else if (ire->ire_type & IRE_ONLINK)
10933 10943 re->ipRouteType = 3;
10934 10944 else
10935 10945 re->ipRouteType = 4;
10936 10946
10937 10947 re->ipRouteProto = -1;
10938 10948 re->ipRouteAge = gethrestime_sec() - ire->ire_create_time;
10939 10949 re->ipRouteMask = ire->ire_mask;
10940 10950 re->ipRouteMetric5 = -1;
10941 10951 re->ipRouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu;
10942 10952 if (ire->ire_ill != NULL && re->ipRouteInfo.re_max_frag == 0)
10943 10953 re->ipRouteInfo.re_max_frag = ire->ire_ill->ill_mtu;
10944 10954
10945 10955 re->ipRouteInfo.re_frag_flag = 0;
10946 10956 re->ipRouteInfo.re_rtt = 0;
10947 10957 re->ipRouteInfo.re_src_addr = 0;
10948 10958 re->ipRouteInfo.re_ref = ire->ire_refcnt;
10949 10959 re->ipRouteInfo.re_obpkt = ire->ire_ob_pkt_count;
10950 10960 re->ipRouteInfo.re_ibpkt = ire->ire_ib_pkt_count;
10951 10961 re->ipRouteInfo.re_flags = ire->ire_flags;
10952 10962
10953 10963 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */
10954 10964 if (ire->ire_type & IRE_INTERFACE) {
10955 10965 ire_t *child;
10956 10966
10957 10967 rw_enter(&ipst->ips_ire_dep_lock, RW_READER);
10958 10968 child = ire->ire_dep_children;
10959 10969 while (child != NULL) {
10960 10970 re->ipRouteInfo.re_obpkt += child->ire_ob_pkt_count;
10961 10971 re->ipRouteInfo.re_ibpkt += child->ire_ib_pkt_count;
10962 10972 child = child->ire_dep_sib_next;
10963 10973 }
10964 10974 rw_exit(&ipst->ips_ire_dep_lock);
10965 10975 }
10966 10976
10967 10977 if (ire->ire_flags & RTF_DYNAMIC) {
10968 10978 re->ipRouteInfo.re_ire_type = IRE_HOST_REDIRECT;
10969 10979 } else {
10970 10980 re->ipRouteInfo.re_ire_type = ire->ire_type;
10971 10981 }
10972 10982
10973 10983 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail,
10974 10984 (char *)re, (int)sizeof (*re))) {
10975 10985 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u bytes\n",
10976 10986 (uint_t)sizeof (*re)));
10977 10987 }
10978 10988
10979 10989 if (gc != NULL) {
10980 10990 iaes.iae_routeidx = ird->ird_idx;
10981 10991 iaes.iae_doi = gc->gc_db->gcdb_doi;
10982 10992 iaes.iae_slrange = gc->gc_db->gcdb_slrange;
10983 10993
10984 10994 if (!snmp_append_data2(ird->ird_attrs.lp_head,
10985 10995 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) {
10986 10996 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u "
10987 10997 "bytes\n", (uint_t)sizeof (iaes)));
10988 10998 }
10989 10999 }
10990 11000
10991 11001 /* bump route index for next pass */
10992 11002 ird->ird_idx++;
10993 11003
10994 11004 kmem_free(re, sizeof (*re));
10995 11005 if (gcgrp != NULL)
10996 11006 rw_exit(&gcgrp->gcgrp_rwlock);
10997 11007 }
10998 11008
10999 11009 /*
11000 11010 * ire_walk routine to create ipv6RouteEntryTable and ipRouteEntryTable.
11001 11011 */
11002 11012 static void
11003 11013 ip_snmp_get2_v6_route(ire_t *ire, iproutedata_t *ird)
11004 11014 {
11005 11015 ill_t *ill;
11006 11016 mib2_ipv6RouteEntry_t *re;
11007 11017 mib2_ipAttributeEntry_t iaes;
11008 11018 tsol_ire_gw_secattr_t *attrp;
11009 11019 tsol_gc_t *gc = NULL;
11010 11020 tsol_gcgrp_t *gcgrp = NULL;
11011 11021 ip_stack_t *ipst = ire->ire_ipst;
11012 11022
11013 11023 ASSERT(ire->ire_ipversion == IPV6_VERSION);
11014 11024
11015 11025 if (!(ird->ird_flags & IRD_REPORT_ALL)) {
11016 11026 if (ire->ire_testhidden)
11017 11027 return;
11018 11028 if (ire->ire_type & IRE_IF_CLONE)
11019 11029 return;
11020 11030 }
11021 11031
11022 11032 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL)
11023 11033 return;
11024 11034
11025 11035 if ((attrp = ire->ire_gw_secattr) != NULL) {
11026 11036 mutex_enter(&attrp->igsa_lock);
11027 11037 if ((gc = attrp->igsa_gc) != NULL) {
11028 11038 gcgrp = gc->gc_grp;
11029 11039 ASSERT(gcgrp != NULL);
11030 11040 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER);
11031 11041 }
11032 11042 mutex_exit(&attrp->igsa_lock);
11033 11043 }
11034 11044 /*
11035 11045 * Return all IRE types for route table... let caller pick and choose
11036 11046 */
11037 11047 re->ipv6RouteDest = ire->ire_addr_v6;
11038 11048 re->ipv6RoutePfxLength = ip_mask_to_plen_v6(&ire->ire_mask_v6);
11039 11049 re->ipv6RouteIndex = 0; /* Unique when multiple with same dest/plen */
11040 11050 re->ipv6RouteIfIndex.o_length = 0;
11041 11051 ill = ire->ire_ill;
11042 11052 if (ill != NULL) {
11043 11053 ill_get_name(ill, re->ipv6RouteIfIndex.o_bytes, OCTET_LENGTH);
11044 11054 re->ipv6RouteIfIndex.o_length =
11045 11055 mi_strlen(re->ipv6RouteIfIndex.o_bytes);
11046 11056 }
11047 11057
11048 11058 ASSERT(!(ire->ire_type & IRE_BROADCAST));
11049 11059
11050 11060 mutex_enter(&ire->ire_lock);
11051 11061 re->ipv6RouteNextHop = ire->ire_gateway_addr_v6;
11052 11062 mutex_exit(&ire->ire_lock);
11053 11063
11054 11064 /* remote(4), local(3), or discard(2) */
11055 11065 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE))
11056 11066 re->ipv6RouteType = 2;
11057 11067 else if (ire->ire_type & IRE_ONLINK)
11058 11068 re->ipv6RouteType = 3;
11059 11069 else
11060 11070 re->ipv6RouteType = 4;
11061 11071
11062 11072 re->ipv6RouteProtocol = -1;
11063 11073 re->ipv6RoutePolicy = 0;
11064 11074 re->ipv6RouteAge = gethrestime_sec() - ire->ire_create_time;
11065 11075 re->ipv6RouteNextHopRDI = 0;
11066 11076 re->ipv6RouteWeight = 0;
11067 11077 re->ipv6RouteMetric = 0;
11068 11078 re->ipv6RouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu;
11069 11079 if (ire->ire_ill != NULL && re->ipv6RouteInfo.re_max_frag == 0)
11070 11080 re->ipv6RouteInfo.re_max_frag = ire->ire_ill->ill_mtu;
11071 11081
11072 11082 re->ipv6RouteInfo.re_frag_flag = 0;
11073 11083 re->ipv6RouteInfo.re_rtt = 0;
11074 11084 re->ipv6RouteInfo.re_src_addr = ipv6_all_zeros;
11075 11085 re->ipv6RouteInfo.re_obpkt = ire->ire_ob_pkt_count;
11076 11086 re->ipv6RouteInfo.re_ibpkt = ire->ire_ib_pkt_count;
11077 11087 re->ipv6RouteInfo.re_ref = ire->ire_refcnt;
11078 11088 re->ipv6RouteInfo.re_flags = ire->ire_flags;
11079 11089
11080 11090 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */
11081 11091 if (ire->ire_type & IRE_INTERFACE) {
11082 11092 ire_t *child;
11083 11093
11084 11094 rw_enter(&ipst->ips_ire_dep_lock, RW_READER);
11085 11095 child = ire->ire_dep_children;
11086 11096 while (child != NULL) {
11087 11097 re->ipv6RouteInfo.re_obpkt += child->ire_ob_pkt_count;
11088 11098 re->ipv6RouteInfo.re_ibpkt += child->ire_ib_pkt_count;
11089 11099 child = child->ire_dep_sib_next;
11090 11100 }
11091 11101 rw_exit(&ipst->ips_ire_dep_lock);
11092 11102 }
11093 11103 if (ire->ire_flags & RTF_DYNAMIC) {
11094 11104 re->ipv6RouteInfo.re_ire_type = IRE_HOST_REDIRECT;
11095 11105 } else {
11096 11106 re->ipv6RouteInfo.re_ire_type = ire->ire_type;
11097 11107 }
11098 11108
11099 11109 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail,
11100 11110 (char *)re, (int)sizeof (*re))) {
11101 11111 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u bytes\n",
11102 11112 (uint_t)sizeof (*re)));
11103 11113 }
11104 11114
11105 11115 if (gc != NULL) {
11106 11116 iaes.iae_routeidx = ird->ird_idx;
11107 11117 iaes.iae_doi = gc->gc_db->gcdb_doi;
11108 11118 iaes.iae_slrange = gc->gc_db->gcdb_slrange;
11109 11119
11110 11120 if (!snmp_append_data2(ird->ird_attrs.lp_head,
11111 11121 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) {
11112 11122 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u "
11113 11123 "bytes\n", (uint_t)sizeof (iaes)));
11114 11124 }
11115 11125 }
11116 11126
11117 11127 /* bump route index for next pass */
11118 11128 ird->ird_idx++;
11119 11129
11120 11130 kmem_free(re, sizeof (*re));
11121 11131 if (gcgrp != NULL)
11122 11132 rw_exit(&gcgrp->gcgrp_rwlock);
11123 11133 }
11124 11134
11125 11135 /*
11126 11136 * ncec_walk routine to create ipv6NetToMediaEntryTable
11127 11137 */
11128 11138 static int
11129 11139 ip_snmp_get2_v6_media(ncec_t *ncec, iproutedata_t *ird)
11130 11140 {
11131 11141 ill_t *ill;
11132 11142 mib2_ipv6NetToMediaEntry_t ntme;
11133 11143
11134 11144 ill = ncec->ncec_ill;
11135 11145 /* skip arpce entries, and loopback ncec entries */
11136 11146 if (ill->ill_isv6 == B_FALSE || ill->ill_net_type == IRE_LOOPBACK)
11137 11147 return (0);
11138 11148 /*
11139 11149 * Neighbor cache entry attached to IRE with on-link
11140 11150 * destination.
11141 11151 * We report all IPMP groups on ncec_ill which is normally the upper.
11142 11152 */
11143 11153 ntme.ipv6NetToMediaIfIndex = ill->ill_phyint->phyint_ifindex;
11144 11154 ntme.ipv6NetToMediaNetAddress = ncec->ncec_addr;
11145 11155 ntme.ipv6NetToMediaPhysAddress.o_length = ill->ill_phys_addr_length;
11146 11156 if (ncec->ncec_lladdr != NULL) {
11147 11157 bcopy(ncec->ncec_lladdr, ntme.ipv6NetToMediaPhysAddress.o_bytes,
11148 11158 ntme.ipv6NetToMediaPhysAddress.o_length);
11149 11159 }
11150 11160 /*
11151 11161 * Note: Returns ND_* states. Should be:
11152 11162 * reachable(1), stale(2), delay(3), probe(4),
11153 11163 * invalid(5), unknown(6)
11154 11164 */
11155 11165 ntme.ipv6NetToMediaState = ncec->ncec_state;
11156 11166 ntme.ipv6NetToMediaLastUpdated = 0;
11157 11167
11158 11168 /* other(1), dynamic(2), static(3), local(4) */
11159 11169 if (NCE_MYADDR(ncec)) {
11160 11170 ntme.ipv6NetToMediaType = 4;
11161 11171 } else if (ncec->ncec_flags & NCE_F_PUBLISH) {
11162 11172 ntme.ipv6NetToMediaType = 1; /* proxy */
11163 11173 } else if (ncec->ncec_flags & NCE_F_STATIC) {
11164 11174 ntme.ipv6NetToMediaType = 3;
11165 11175 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST)) {
11166 11176 ntme.ipv6NetToMediaType = 1;
11167 11177 } else {
11168 11178 ntme.ipv6NetToMediaType = 2;
11169 11179 }
11170 11180
11171 11181 if (!snmp_append_data2(ird->ird_netmedia.lp_head,
11172 11182 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) {
11173 11183 ip1dbg(("ip_snmp_get2_v6_media: failed to allocate %u bytes\n",
11174 11184 (uint_t)sizeof (ntme)));
11175 11185 }
11176 11186 return (0);
11177 11187 }
11178 11188
11179 11189 int
11180 11190 nce2ace(ncec_t *ncec)
11181 11191 {
11182 11192 int flags = 0;
11183 11193
11184 11194 if (NCE_ISREACHABLE(ncec))
11185 11195 flags |= ACE_F_RESOLVED;
11186 11196 if (ncec->ncec_flags & NCE_F_AUTHORITY)
11187 11197 flags |= ACE_F_AUTHORITY;
11188 11198 if (ncec->ncec_flags & NCE_F_PUBLISH)
11189 11199 flags |= ACE_F_PUBLISH;
11190 11200 if ((ncec->ncec_flags & NCE_F_NONUD) != 0)
11191 11201 flags |= ACE_F_PERMANENT;
11192 11202 if (NCE_MYADDR(ncec))
11193 11203 flags |= (ACE_F_MYADDR | ACE_F_AUTHORITY);
11194 11204 if (ncec->ncec_flags & NCE_F_UNVERIFIED)
11195 11205 flags |= ACE_F_UNVERIFIED;
11196 11206 if (ncec->ncec_flags & NCE_F_AUTHORITY)
11197 11207 flags |= ACE_F_AUTHORITY;
11198 11208 if (ncec->ncec_flags & NCE_F_DELAYED)
11199 11209 flags |= ACE_F_DELAYED;
11200 11210 return (flags);
11201 11211 }
11202 11212
11203 11213 /*
11204 11214 * ncec_walk routine to create ipNetToMediaEntryTable
11205 11215 */
11206 11216 static int
11207 11217 ip_snmp_get2_v4_media(ncec_t *ncec, iproutedata_t *ird)
11208 11218 {
11209 11219 ill_t *ill;
11210 11220 mib2_ipNetToMediaEntry_t ntme;
11211 11221 const char *name = "unknown";
11212 11222 ipaddr_t ncec_addr;
11213 11223
11214 11224 ill = ncec->ncec_ill;
11215 11225 if (ill->ill_isv6 || (ncec->ncec_flags & NCE_F_BCAST) ||
11216 11226 ill->ill_net_type == IRE_LOOPBACK)
11217 11227 return (0);
11218 11228
11219 11229 /* We report all IPMP groups on ncec_ill which is normally the upper. */
11220 11230 name = ill->ill_name;
11221 11231 /* Based on RFC 4293: other(1), inval(2), dyn(3), stat(4) */
11222 11232 if (NCE_MYADDR(ncec)) {
11223 11233 ntme.ipNetToMediaType = 4;
11224 11234 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST|NCE_F_PUBLISH)) {
11225 11235 ntme.ipNetToMediaType = 1;
11226 11236 } else {
11227 11237 ntme.ipNetToMediaType = 3;
11228 11238 }
11229 11239 ntme.ipNetToMediaIfIndex.o_length = MIN(OCTET_LENGTH, strlen(name));
11230 11240 bcopy(name, ntme.ipNetToMediaIfIndex.o_bytes,
11231 11241 ntme.ipNetToMediaIfIndex.o_length);
11232 11242
11233 11243 IN6_V4MAPPED_TO_IPADDR(&ncec->ncec_addr, ncec_addr);
11234 11244 bcopy(&ncec_addr, &ntme.ipNetToMediaNetAddress, sizeof (ncec_addr));
11235 11245
11236 11246 ntme.ipNetToMediaInfo.ntm_mask.o_length = sizeof (ipaddr_t);
11237 11247 ncec_addr = INADDR_BROADCAST;
11238 11248 bcopy(&ncec_addr, ntme.ipNetToMediaInfo.ntm_mask.o_bytes,
11239 11249 sizeof (ncec_addr));
11240 11250 /*
11241 11251 * map all the flags to the ACE counterpart.
11242 11252 */
11243 11253 ntme.ipNetToMediaInfo.ntm_flags = nce2ace(ncec);
11244 11254
11245 11255 ntme.ipNetToMediaPhysAddress.o_length =
11246 11256 MIN(OCTET_LENGTH, ill->ill_phys_addr_length);
11247 11257
11248 11258 if (!NCE_ISREACHABLE(ncec))
11249 11259 ntme.ipNetToMediaPhysAddress.o_length = 0;
11250 11260 else {
11251 11261 if (ncec->ncec_lladdr != NULL) {
11252 11262 bcopy(ncec->ncec_lladdr,
11253 11263 ntme.ipNetToMediaPhysAddress.o_bytes,
11254 11264 ntme.ipNetToMediaPhysAddress.o_length);
11255 11265 }
11256 11266 }
11257 11267
11258 11268 if (!snmp_append_data2(ird->ird_netmedia.lp_head,
11259 11269 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) {
11260 11270 ip1dbg(("ip_snmp_get2_v4_media: failed to allocate %u bytes\n",
11261 11271 (uint_t)sizeof (ntme)));
11262 11272 }
11263 11273 return (0);
11264 11274 }
11265 11275
11266 11276 /*
11267 11277 * return (0) if invalid set request, 1 otherwise, including non-tcp requests
11268 11278 */
11269 11279 /* ARGSUSED */
11270 11280 int
11271 11281 ip_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
11272 11282 {
11273 11283 switch (level) {
11274 11284 case MIB2_IP:
11275 11285 case MIB2_ICMP:
11276 11286 switch (name) {
11277 11287 default:
11278 11288 break;
11279 11289 }
11280 11290 return (1);
11281 11291 default:
11282 11292 return (1);
11283 11293 }
11284 11294 }
11285 11295
11286 11296 /*
11287 11297 * When there exists both a 64- and 32-bit counter of a particular type
11288 11298 * (i.e., InReceives), only the 64-bit counters are added.
11289 11299 */
11290 11300 void
11291 11301 ip_mib2_add_ip_stats(mib2_ipIfStatsEntry_t *o1, mib2_ipIfStatsEntry_t *o2)
11292 11302 {
11293 11303 UPDATE_MIB(o1, ipIfStatsInHdrErrors, o2->ipIfStatsInHdrErrors);
11294 11304 UPDATE_MIB(o1, ipIfStatsInTooBigErrors, o2->ipIfStatsInTooBigErrors);
11295 11305 UPDATE_MIB(o1, ipIfStatsInNoRoutes, o2->ipIfStatsInNoRoutes);
11296 11306 UPDATE_MIB(o1, ipIfStatsInAddrErrors, o2->ipIfStatsInAddrErrors);
11297 11307 UPDATE_MIB(o1, ipIfStatsInUnknownProtos, o2->ipIfStatsInUnknownProtos);
11298 11308 UPDATE_MIB(o1, ipIfStatsInTruncatedPkts, o2->ipIfStatsInTruncatedPkts);
11299 11309 UPDATE_MIB(o1, ipIfStatsInDiscards, o2->ipIfStatsInDiscards);
11300 11310 UPDATE_MIB(o1, ipIfStatsOutDiscards, o2->ipIfStatsOutDiscards);
11301 11311 UPDATE_MIB(o1, ipIfStatsOutFragOKs, o2->ipIfStatsOutFragOKs);
11302 11312 UPDATE_MIB(o1, ipIfStatsOutFragFails, o2->ipIfStatsOutFragFails);
11303 11313 UPDATE_MIB(o1, ipIfStatsOutFragCreates, o2->ipIfStatsOutFragCreates);
11304 11314 UPDATE_MIB(o1, ipIfStatsReasmReqds, o2->ipIfStatsReasmReqds);
11305 11315 UPDATE_MIB(o1, ipIfStatsReasmOKs, o2->ipIfStatsReasmOKs);
11306 11316 UPDATE_MIB(o1, ipIfStatsReasmFails, o2->ipIfStatsReasmFails);
11307 11317 UPDATE_MIB(o1, ipIfStatsOutNoRoutes, o2->ipIfStatsOutNoRoutes);
11308 11318 UPDATE_MIB(o1, ipIfStatsReasmDuplicates, o2->ipIfStatsReasmDuplicates);
11309 11319 UPDATE_MIB(o1, ipIfStatsReasmPartDups, o2->ipIfStatsReasmPartDups);
11310 11320 UPDATE_MIB(o1, ipIfStatsForwProhibits, o2->ipIfStatsForwProhibits);
11311 11321 UPDATE_MIB(o1, udpInCksumErrs, o2->udpInCksumErrs);
11312 11322 UPDATE_MIB(o1, udpInOverflows, o2->udpInOverflows);
11313 11323 UPDATE_MIB(o1, rawipInOverflows, o2->rawipInOverflows);
11314 11324 UPDATE_MIB(o1, ipIfStatsInWrongIPVersion,
11315 11325 o2->ipIfStatsInWrongIPVersion);
11316 11326 UPDATE_MIB(o1, ipIfStatsOutWrongIPVersion,
11317 11327 o2->ipIfStatsInWrongIPVersion);
11318 11328 UPDATE_MIB(o1, ipIfStatsOutSwitchIPVersion,
11319 11329 o2->ipIfStatsOutSwitchIPVersion);
11320 11330 UPDATE_MIB(o1, ipIfStatsHCInReceives, o2->ipIfStatsHCInReceives);
11321 11331 UPDATE_MIB(o1, ipIfStatsHCInOctets, o2->ipIfStatsHCInOctets);
11322 11332 UPDATE_MIB(o1, ipIfStatsHCInForwDatagrams,
11323 11333 o2->ipIfStatsHCInForwDatagrams);
11324 11334 UPDATE_MIB(o1, ipIfStatsHCInDelivers, o2->ipIfStatsHCInDelivers);
11325 11335 UPDATE_MIB(o1, ipIfStatsHCOutRequests, o2->ipIfStatsHCOutRequests);
11326 11336 UPDATE_MIB(o1, ipIfStatsHCOutForwDatagrams,
11327 11337 o2->ipIfStatsHCOutForwDatagrams);
11328 11338 UPDATE_MIB(o1, ipIfStatsOutFragReqds, o2->ipIfStatsOutFragReqds);
11329 11339 UPDATE_MIB(o1, ipIfStatsHCOutTransmits, o2->ipIfStatsHCOutTransmits);
11330 11340 UPDATE_MIB(o1, ipIfStatsHCOutOctets, o2->ipIfStatsHCOutOctets);
11331 11341 UPDATE_MIB(o1, ipIfStatsHCInMcastPkts, o2->ipIfStatsHCInMcastPkts);
11332 11342 UPDATE_MIB(o1, ipIfStatsHCInMcastOctets, o2->ipIfStatsHCInMcastOctets);
11333 11343 UPDATE_MIB(o1, ipIfStatsHCOutMcastPkts, o2->ipIfStatsHCOutMcastPkts);
11334 11344 UPDATE_MIB(o1, ipIfStatsHCOutMcastOctets,
11335 11345 o2->ipIfStatsHCOutMcastOctets);
11336 11346 UPDATE_MIB(o1, ipIfStatsHCInBcastPkts, o2->ipIfStatsHCInBcastPkts);
11337 11347 UPDATE_MIB(o1, ipIfStatsHCOutBcastPkts, o2->ipIfStatsHCOutBcastPkts);
11338 11348 UPDATE_MIB(o1, ipsecInSucceeded, o2->ipsecInSucceeded);
11339 11349 UPDATE_MIB(o1, ipsecInFailed, o2->ipsecInFailed);
11340 11350 UPDATE_MIB(o1, ipInCksumErrs, o2->ipInCksumErrs);
11341 11351 UPDATE_MIB(o1, tcpInErrs, o2->tcpInErrs);
11342 11352 UPDATE_MIB(o1, udpNoPorts, o2->udpNoPorts);
11343 11353 }
11344 11354
11345 11355 void
11346 11356 ip_mib2_add_icmp6_stats(mib2_ipv6IfIcmpEntry_t *o1, mib2_ipv6IfIcmpEntry_t *o2)
11347 11357 {
11348 11358 UPDATE_MIB(o1, ipv6IfIcmpInMsgs, o2->ipv6IfIcmpInMsgs);
11349 11359 UPDATE_MIB(o1, ipv6IfIcmpInErrors, o2->ipv6IfIcmpInErrors);
11350 11360 UPDATE_MIB(o1, ipv6IfIcmpInDestUnreachs, o2->ipv6IfIcmpInDestUnreachs);
11351 11361 UPDATE_MIB(o1, ipv6IfIcmpInAdminProhibs, o2->ipv6IfIcmpInAdminProhibs);
11352 11362 UPDATE_MIB(o1, ipv6IfIcmpInTimeExcds, o2->ipv6IfIcmpInTimeExcds);
11353 11363 UPDATE_MIB(o1, ipv6IfIcmpInParmProblems, o2->ipv6IfIcmpInParmProblems);
11354 11364 UPDATE_MIB(o1, ipv6IfIcmpInPktTooBigs, o2->ipv6IfIcmpInPktTooBigs);
11355 11365 UPDATE_MIB(o1, ipv6IfIcmpInEchos, o2->ipv6IfIcmpInEchos);
11356 11366 UPDATE_MIB(o1, ipv6IfIcmpInEchoReplies, o2->ipv6IfIcmpInEchoReplies);
11357 11367 UPDATE_MIB(o1, ipv6IfIcmpInRouterSolicits,
11358 11368 o2->ipv6IfIcmpInRouterSolicits);
11359 11369 UPDATE_MIB(o1, ipv6IfIcmpInRouterAdvertisements,
11360 11370 o2->ipv6IfIcmpInRouterAdvertisements);
11361 11371 UPDATE_MIB(o1, ipv6IfIcmpInNeighborSolicits,
11362 11372 o2->ipv6IfIcmpInNeighborSolicits);
11363 11373 UPDATE_MIB(o1, ipv6IfIcmpInNeighborAdvertisements,
11364 11374 o2->ipv6IfIcmpInNeighborAdvertisements);
11365 11375 UPDATE_MIB(o1, ipv6IfIcmpInRedirects, o2->ipv6IfIcmpInRedirects);
11366 11376 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembQueries,
11367 11377 o2->ipv6IfIcmpInGroupMembQueries);
11368 11378 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembResponses,
11369 11379 o2->ipv6IfIcmpInGroupMembResponses);
11370 11380 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembReductions,
11371 11381 o2->ipv6IfIcmpInGroupMembReductions);
11372 11382 UPDATE_MIB(o1, ipv6IfIcmpOutMsgs, o2->ipv6IfIcmpOutMsgs);
11373 11383 UPDATE_MIB(o1, ipv6IfIcmpOutErrors, o2->ipv6IfIcmpOutErrors);
11374 11384 UPDATE_MIB(o1, ipv6IfIcmpOutDestUnreachs,
11375 11385 o2->ipv6IfIcmpOutDestUnreachs);
11376 11386 UPDATE_MIB(o1, ipv6IfIcmpOutAdminProhibs,
11377 11387 o2->ipv6IfIcmpOutAdminProhibs);
11378 11388 UPDATE_MIB(o1, ipv6IfIcmpOutTimeExcds, o2->ipv6IfIcmpOutTimeExcds);
11379 11389 UPDATE_MIB(o1, ipv6IfIcmpOutParmProblems,
11380 11390 o2->ipv6IfIcmpOutParmProblems);
11381 11391 UPDATE_MIB(o1, ipv6IfIcmpOutPktTooBigs, o2->ipv6IfIcmpOutPktTooBigs);
11382 11392 UPDATE_MIB(o1, ipv6IfIcmpOutEchos, o2->ipv6IfIcmpOutEchos);
11383 11393 UPDATE_MIB(o1, ipv6IfIcmpOutEchoReplies, o2->ipv6IfIcmpOutEchoReplies);
11384 11394 UPDATE_MIB(o1, ipv6IfIcmpOutRouterSolicits,
11385 11395 o2->ipv6IfIcmpOutRouterSolicits);
11386 11396 UPDATE_MIB(o1, ipv6IfIcmpOutRouterAdvertisements,
11387 11397 o2->ipv6IfIcmpOutRouterAdvertisements);
11388 11398 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborSolicits,
11389 11399 o2->ipv6IfIcmpOutNeighborSolicits);
11390 11400 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborAdvertisements,
11391 11401 o2->ipv6IfIcmpOutNeighborAdvertisements);
11392 11402 UPDATE_MIB(o1, ipv6IfIcmpOutRedirects, o2->ipv6IfIcmpOutRedirects);
11393 11403 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembQueries,
11394 11404 o2->ipv6IfIcmpOutGroupMembQueries);
11395 11405 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembResponses,
11396 11406 o2->ipv6IfIcmpOutGroupMembResponses);
11397 11407 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembReductions,
11398 11408 o2->ipv6IfIcmpOutGroupMembReductions);
11399 11409 UPDATE_MIB(o1, ipv6IfIcmpInOverflows, o2->ipv6IfIcmpInOverflows);
11400 11410 UPDATE_MIB(o1, ipv6IfIcmpBadHoplimit, o2->ipv6IfIcmpBadHoplimit);
11401 11411 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborAdvertisements,
11402 11412 o2->ipv6IfIcmpInBadNeighborAdvertisements);
11403 11413 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborSolicitations,
11404 11414 o2->ipv6IfIcmpInBadNeighborSolicitations);
11405 11415 UPDATE_MIB(o1, ipv6IfIcmpInBadRedirects, o2->ipv6IfIcmpInBadRedirects);
11406 11416 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembTotal,
11407 11417 o2->ipv6IfIcmpInGroupMembTotal);
11408 11418 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadQueries,
11409 11419 o2->ipv6IfIcmpInGroupMembBadQueries);
11410 11420 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadReports,
11411 11421 o2->ipv6IfIcmpInGroupMembBadReports);
11412 11422 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembOurReports,
11413 11423 o2->ipv6IfIcmpInGroupMembOurReports);
11414 11424 }
11415 11425
11416 11426 /*
11417 11427 * Called before the options are updated to check if this packet will
11418 11428 * be source routed from here.
11419 11429 * This routine assumes that the options are well formed i.e. that they
11420 11430 * have already been checked.
11421 11431 */
11422 11432 boolean_t
11423 11433 ip_source_routed(ipha_t *ipha, ip_stack_t *ipst)
11424 11434 {
11425 11435 ipoptp_t opts;
11426 11436 uchar_t *opt;
11427 11437 uint8_t optval;
11428 11438 uint8_t optlen;
11429 11439 ipaddr_t dst;
11430 11440
11431 11441 if (IS_SIMPLE_IPH(ipha)) {
11432 11442 ip2dbg(("not source routed\n"));
11433 11443 return (B_FALSE);
11434 11444 }
11435 11445 dst = ipha->ipha_dst;
11436 11446 for (optval = ipoptp_first(&opts, ipha);
11437 11447 optval != IPOPT_EOL;
11438 11448 optval = ipoptp_next(&opts)) {
11439 11449 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11440 11450 opt = opts.ipoptp_cur;
11441 11451 optlen = opts.ipoptp_len;
11442 11452 ip2dbg(("ip_source_routed: opt %d, len %d\n",
11443 11453 optval, optlen));
11444 11454 switch (optval) {
11445 11455 uint32_t off;
11446 11456 case IPOPT_SSRR:
11447 11457 case IPOPT_LSRR:
11448 11458 /*
11449 11459 * If dst is one of our addresses and there are some
11450 11460 * entries left in the source route return (true).
11451 11461 */
11452 11462 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
11453 11463 ip2dbg(("ip_source_routed: not next"
11454 11464 " source route 0x%x\n",
11455 11465 ntohl(dst)));
11456 11466 return (B_FALSE);
11457 11467 }
11458 11468 off = opt[IPOPT_OFFSET];
11459 11469 off--;
11460 11470 if (optlen < IP_ADDR_LEN ||
11461 11471 off > optlen - IP_ADDR_LEN) {
11462 11472 /* End of source route */
11463 11473 ip1dbg(("ip_source_routed: end of SR\n"));
11464 11474 return (B_FALSE);
11465 11475 }
11466 11476 return (B_TRUE);
11467 11477 }
11468 11478 }
11469 11479 ip2dbg(("not source routed\n"));
11470 11480 return (B_FALSE);
11471 11481 }
11472 11482
11473 11483 /*
11474 11484 * ip_unbind is called by the transports to remove a conn from
11475 11485 * the fanout table.
11476 11486 */
11477 11487 void
11478 11488 ip_unbind(conn_t *connp)
11479 11489 {
11480 11490
11481 11491 ASSERT(!MUTEX_HELD(&connp->conn_lock));
11482 11492
11483 11493 if (is_system_labeled() && connp->conn_anon_port) {
11484 11494 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
11485 11495 connp->conn_mlp_type, connp->conn_proto,
11486 11496 ntohs(connp->conn_lport), B_FALSE);
11487 11497 connp->conn_anon_port = 0;
11488 11498 }
11489 11499 connp->conn_mlp_type = mlptSingle;
11490 11500
11491 11501 ipcl_hash_remove(connp);
11492 11502 }
11493 11503
11494 11504 /*
11495 11505 * Used for deciding the MSS size for the upper layer. Thus
11496 11506 * we need to check the outbound policy values in the conn.
11497 11507 */
11498 11508 int
11499 11509 conn_ipsec_length(conn_t *connp)
11500 11510 {
11501 11511 ipsec_latch_t *ipl;
11502 11512
11503 11513 ipl = connp->conn_latch;
11504 11514 if (ipl == NULL)
11505 11515 return (0);
11506 11516
11507 11517 if (connp->conn_ixa->ixa_ipsec_policy == NULL)
11508 11518 return (0);
11509 11519
11510 11520 return (connp->conn_ixa->ixa_ipsec_policy->ipsp_act->ipa_ovhd);
11511 11521 }
11512 11522
11513 11523 /*
11514 11524 * Returns an estimate of the IPsec headers size. This is used if
11515 11525 * we don't want to call into IPsec to get the exact size.
11516 11526 */
11517 11527 int
11518 11528 ipsec_out_extra_length(ip_xmit_attr_t *ixa)
11519 11529 {
11520 11530 ipsec_action_t *a;
11521 11531
11522 11532 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
11523 11533 return (0);
11524 11534
11525 11535 a = ixa->ixa_ipsec_action;
11526 11536 if (a == NULL) {
11527 11537 ASSERT(ixa->ixa_ipsec_policy != NULL);
11528 11538 a = ixa->ixa_ipsec_policy->ipsp_act;
11529 11539 }
11530 11540 ASSERT(a != NULL);
11531 11541
11532 11542 return (a->ipa_ovhd);
11533 11543 }
11534 11544
11535 11545 /*
11536 11546 * If there are any source route options, return the true final
11537 11547 * destination. Otherwise, return the destination.
11538 11548 */
11539 11549 ipaddr_t
11540 11550 ip_get_dst(ipha_t *ipha)
11541 11551 {
11542 11552 ipoptp_t opts;
11543 11553 uchar_t *opt;
11544 11554 uint8_t optval;
11545 11555 uint8_t optlen;
11546 11556 ipaddr_t dst;
11547 11557 uint32_t off;
11548 11558
11549 11559 dst = ipha->ipha_dst;
11550 11560
11551 11561 if (IS_SIMPLE_IPH(ipha))
11552 11562 return (dst);
11553 11563
11554 11564 for (optval = ipoptp_first(&opts, ipha);
11555 11565 optval != IPOPT_EOL;
11556 11566 optval = ipoptp_next(&opts)) {
11557 11567 opt = opts.ipoptp_cur;
11558 11568 optlen = opts.ipoptp_len;
11559 11569 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11560 11570 switch (optval) {
11561 11571 case IPOPT_SSRR:
11562 11572 case IPOPT_LSRR:
11563 11573 off = opt[IPOPT_OFFSET];
11564 11574 /*
11565 11575 * If one of the conditions is true, it means
11566 11576 * end of options and dst already has the right
11567 11577 * value.
11568 11578 */
11569 11579 if (!(optlen < IP_ADDR_LEN || off > optlen - 3)) {
11570 11580 off = optlen - IP_ADDR_LEN;
11571 11581 bcopy(&opt[off], &dst, IP_ADDR_LEN);
11572 11582 }
11573 11583 return (dst);
11574 11584 default:
11575 11585 break;
11576 11586 }
11577 11587 }
11578 11588
11579 11589 return (dst);
11580 11590 }
11581 11591
11582 11592 /*
11583 11593 * Outbound IP fragmentation routine.
11584 11594 * Assumes the caller has checked whether or not fragmentation should
11585 11595 * be allowed. Here we copy the DF bit from the header to all the generated
11586 11596 * fragments.
11587 11597 */
11588 11598 int
11589 11599 ip_fragment_v4(mblk_t *mp_orig, nce_t *nce, iaflags_t ixaflags,
11590 11600 uint_t pkt_len, uint32_t max_frag, uint32_t xmit_hint, zoneid_t szone,
11591 11601 zoneid_t nolzid, pfirepostfrag_t postfragfn, uintptr_t *ixa_cookie)
11592 11602 {
11593 11603 int i1;
11594 11604 int hdr_len;
11595 11605 mblk_t *hdr_mp;
11596 11606 ipha_t *ipha;
11597 11607 int ip_data_end;
11598 11608 int len;
11599 11609 mblk_t *mp = mp_orig;
11600 11610 int offset;
11601 11611 ill_t *ill = nce->nce_ill;
11602 11612 ip_stack_t *ipst = ill->ill_ipst;
11603 11613 mblk_t *carve_mp;
11604 11614 uint32_t frag_flag;
11605 11615 uint_t priority = mp->b_band;
11606 11616 int error = 0;
11607 11617
11608 11618 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragReqds);
11609 11619
11610 11620 if (pkt_len != msgdsize(mp)) {
11611 11621 ip0dbg(("Packet length mismatch: %d, %ld\n",
11612 11622 pkt_len, msgdsize(mp)));
11613 11623 freemsg(mp);
11614 11624 return (EINVAL);
11615 11625 }
11616 11626
11617 11627 if (max_frag == 0) {
11618 11628 ip1dbg(("ip_fragment_v4: max_frag is zero. Dropping packet\n"));
11619 11629 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11620 11630 ip_drop_output("FragFails: zero max_frag", mp, ill);
11621 11631 freemsg(mp);
11622 11632 return (EINVAL);
11623 11633 }
11624 11634
11625 11635 ASSERT(MBLKL(mp) >= sizeof (ipha_t));
11626 11636 ipha = (ipha_t *)mp->b_rptr;
11627 11637 ASSERT(ntohs(ipha->ipha_length) == pkt_len);
11628 11638 frag_flag = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_DF;
11629 11639
11630 11640 /*
11631 11641 * Establish the starting offset. May not be zero if we are fragging
11632 11642 * a fragment that is being forwarded.
11633 11643 */
11634 11644 offset = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET;
11635 11645
11636 11646 /* TODO why is this test needed? */
11637 11647 if (((max_frag - ntohs(ipha->ipha_length)) & ~7) < 8) {
11638 11648 /* TODO: notify ulp somehow */
11639 11649 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11640 11650 ip_drop_output("FragFails: bad starting offset", mp, ill);
11641 11651 freemsg(mp);
11642 11652 return (EINVAL);
11643 11653 }
11644 11654
11645 11655 hdr_len = IPH_HDR_LENGTH(ipha);
11646 11656 ipha->ipha_hdr_checksum = 0;
11647 11657
11648 11658 /*
11649 11659 * Establish the number of bytes maximum per frag, after putting
11650 11660 * in the header.
11651 11661 */
11652 11662 len = (max_frag - hdr_len) & ~7;
11653 11663
11654 11664 /* Get a copy of the header for the trailing frags */
11655 11665 hdr_mp = ip_fragment_copyhdr((uchar_t *)ipha, hdr_len, offset, ipst,
11656 11666 mp);
11657 11667 if (hdr_mp == NULL) {
11658 11668 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11659 11669 ip_drop_output("FragFails: no hdr_mp", mp, ill);
11660 11670 freemsg(mp);
11661 11671 return (ENOBUFS);
11662 11672 }
11663 11673
11664 11674 /* Store the starting offset, with the MoreFrags flag. */
11665 11675 i1 = offset | IPH_MF | frag_flag;
11666 11676 ipha->ipha_fragment_offset_and_flags = htons((uint16_t)i1);
11667 11677
11668 11678 /* Establish the ending byte offset, based on the starting offset. */
11669 11679 offset <<= 3;
11670 11680 ip_data_end = offset + ntohs(ipha->ipha_length) - hdr_len;
11671 11681
11672 11682 /* Store the length of the first fragment in the IP header. */
11673 11683 i1 = len + hdr_len;
11674 11684 ASSERT(i1 <= IP_MAXPACKET);
11675 11685 ipha->ipha_length = htons((uint16_t)i1);
11676 11686
11677 11687 /*
11678 11688 * Compute the IP header checksum for the first frag. We have to
11679 11689 * watch out that we stop at the end of the header.
11680 11690 */
11681 11691 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
11682 11692
11683 11693 /*
11684 11694 * Now carve off the first frag. Note that this will include the
11685 11695 * original IP header.
11686 11696 */
11687 11697 if (!(mp = ip_carve_mp(&mp_orig, i1))) {
11688 11698 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11689 11699 ip_drop_output("FragFails: could not carve mp", mp_orig, ill);
11690 11700 freeb(hdr_mp);
11691 11701 freemsg(mp_orig);
11692 11702 return (ENOBUFS);
11693 11703 }
11694 11704
11695 11705 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates);
11696 11706
11697 11707 error = postfragfn(mp, nce, ixaflags, i1, xmit_hint, szone, nolzid,
11698 11708 ixa_cookie);
11699 11709 if (error != 0 && error != EWOULDBLOCK) {
11700 11710 /* No point in sending the other fragments */
11701 11711 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11702 11712 ip_drop_output("FragFails: postfragfn failed", mp_orig, ill);
11703 11713 freeb(hdr_mp);
11704 11714 freemsg(mp_orig);
11705 11715 return (error);
11706 11716 }
11707 11717
11708 11718 /* No need to redo state machine in loop */
11709 11719 ixaflags &= ~IXAF_REACH_CONF;
11710 11720
11711 11721 /* Advance the offset to the second frag starting point. */
11712 11722 offset += len;
11713 11723 /*
11714 11724 * Update hdr_len from the copied header - there might be less options
11715 11725 * in the later fragments.
11716 11726 */
11717 11727 hdr_len = IPH_HDR_LENGTH(hdr_mp->b_rptr);
11718 11728 /* Loop until done. */
11719 11729 for (;;) {
11720 11730 uint16_t offset_and_flags;
11721 11731 uint16_t ip_len;
11722 11732
11723 11733 if (ip_data_end - offset > len) {
11724 11734 /*
11725 11735 * Carve off the appropriate amount from the original
11726 11736 * datagram.
11727 11737 */
11728 11738 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) {
11729 11739 mp = NULL;
11730 11740 break;
11731 11741 }
11732 11742 /*
11733 11743 * More frags after this one. Get another copy
11734 11744 * of the header.
11735 11745 */
11736 11746 if (carve_mp->b_datap->db_ref == 1 &&
11737 11747 hdr_mp->b_wptr - hdr_mp->b_rptr <
11738 11748 carve_mp->b_rptr - carve_mp->b_datap->db_base) {
11739 11749 /* Inline IP header */
11740 11750 carve_mp->b_rptr -= hdr_mp->b_wptr -
11741 11751 hdr_mp->b_rptr;
11742 11752 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr,
11743 11753 hdr_mp->b_wptr - hdr_mp->b_rptr);
11744 11754 mp = carve_mp;
11745 11755 } else {
11746 11756 if (!(mp = copyb(hdr_mp))) {
11747 11757 freemsg(carve_mp);
11748 11758 break;
11749 11759 }
11750 11760 /* Get priority marking, if any. */
11751 11761 mp->b_band = priority;
11752 11762 mp->b_cont = carve_mp;
11753 11763 }
11754 11764 ipha = (ipha_t *)mp->b_rptr;
11755 11765 offset_and_flags = IPH_MF;
11756 11766 } else {
11757 11767 /*
11758 11768 * Last frag. Consume the header. Set len to
11759 11769 * the length of this last piece.
11760 11770 */
11761 11771 len = ip_data_end - offset;
11762 11772
11763 11773 /*
11764 11774 * Carve off the appropriate amount from the original
11765 11775 * datagram.
11766 11776 */
11767 11777 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) {
11768 11778 mp = NULL;
11769 11779 break;
11770 11780 }
11771 11781 if (carve_mp->b_datap->db_ref == 1 &&
11772 11782 hdr_mp->b_wptr - hdr_mp->b_rptr <
11773 11783 carve_mp->b_rptr - carve_mp->b_datap->db_base) {
11774 11784 /* Inline IP header */
11775 11785 carve_mp->b_rptr -= hdr_mp->b_wptr -
11776 11786 hdr_mp->b_rptr;
11777 11787 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr,
11778 11788 hdr_mp->b_wptr - hdr_mp->b_rptr);
11779 11789 mp = carve_mp;
11780 11790 freeb(hdr_mp);
11781 11791 hdr_mp = mp;
11782 11792 } else {
11783 11793 mp = hdr_mp;
11784 11794 /* Get priority marking, if any. */
11785 11795 mp->b_band = priority;
11786 11796 mp->b_cont = carve_mp;
11787 11797 }
11788 11798 ipha = (ipha_t *)mp->b_rptr;
11789 11799 /* A frag of a frag might have IPH_MF non-zero */
11790 11800 offset_and_flags =
11791 11801 ntohs(ipha->ipha_fragment_offset_and_flags) &
11792 11802 IPH_MF;
11793 11803 }
11794 11804 offset_and_flags |= (uint16_t)(offset >> 3);
11795 11805 offset_and_flags |= (uint16_t)frag_flag;
11796 11806 /* Store the offset and flags in the IP header. */
11797 11807 ipha->ipha_fragment_offset_and_flags = htons(offset_and_flags);
11798 11808
11799 11809 /* Store the length in the IP header. */
11800 11810 ip_len = (uint16_t)(len + hdr_len);
11801 11811 ipha->ipha_length = htons(ip_len);
11802 11812
11803 11813 /*
11804 11814 * Set the IP header checksum. Note that mp is just
11805 11815 * the header, so this is easy to pass to ip_csum.
11806 11816 */
11807 11817 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
11808 11818
11809 11819 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates);
11810 11820
11811 11821 error = postfragfn(mp, nce, ixaflags, ip_len, xmit_hint, szone,
11812 11822 nolzid, ixa_cookie);
11813 11823 /* All done if we just consumed the hdr_mp. */
11814 11824 if (mp == hdr_mp) {
11815 11825 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragOKs);
11816 11826 return (error);
11817 11827 }
11818 11828 if (error != 0 && error != EWOULDBLOCK) {
11819 11829 DTRACE_PROBE2(ip__xmit__frag__fail, ill_t *, ill,
11820 11830 mblk_t *, hdr_mp);
11821 11831 /* No point in sending the other fragments */
11822 11832 break;
11823 11833 }
11824 11834
11825 11835 /* Otherwise, advance and loop. */
11826 11836 offset += len;
11827 11837 }
11828 11838 /* Clean up following allocation failure. */
11829 11839 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11830 11840 ip_drop_output("FragFails: loop ended", NULL, ill);
11831 11841 if (mp != hdr_mp)
11832 11842 freeb(hdr_mp);
11833 11843 if (mp != mp_orig)
11834 11844 freemsg(mp_orig);
11835 11845 return (error);
11836 11846 }
11837 11847
11838 11848 /*
11839 11849 * Copy the header plus those options which have the copy bit set
11840 11850 */
11841 11851 static mblk_t *
11842 11852 ip_fragment_copyhdr(uchar_t *rptr, int hdr_len, int offset, ip_stack_t *ipst,
11843 11853 mblk_t *src)
11844 11854 {
11845 11855 mblk_t *mp;
11846 11856 uchar_t *up;
11847 11857
11848 11858 /*
11849 11859 * Quick check if we need to look for options without the copy bit
11850 11860 * set
11851 11861 */
11852 11862 mp = allocb_tmpl(ipst->ips_ip_wroff_extra + hdr_len, src);
11853 11863 if (!mp)
11854 11864 return (mp);
11855 11865 mp->b_rptr += ipst->ips_ip_wroff_extra;
11856 11866 if (hdr_len == IP_SIMPLE_HDR_LENGTH || offset != 0) {
11857 11867 bcopy(rptr, mp->b_rptr, hdr_len);
11858 11868 mp->b_wptr += hdr_len + ipst->ips_ip_wroff_extra;
11859 11869 return (mp);
11860 11870 }
11861 11871 up = mp->b_rptr;
11862 11872 bcopy(rptr, up, IP_SIMPLE_HDR_LENGTH);
11863 11873 up += IP_SIMPLE_HDR_LENGTH;
11864 11874 rptr += IP_SIMPLE_HDR_LENGTH;
11865 11875 hdr_len -= IP_SIMPLE_HDR_LENGTH;
11866 11876 while (hdr_len > 0) {
11867 11877 uint32_t optval;
11868 11878 uint32_t optlen;
11869 11879
11870 11880 optval = *rptr;
11871 11881 if (optval == IPOPT_EOL)
11872 11882 break;
11873 11883 if (optval == IPOPT_NOP)
11874 11884 optlen = 1;
11875 11885 else
11876 11886 optlen = rptr[1];
11877 11887 if (optval & IPOPT_COPY) {
11878 11888 bcopy(rptr, up, optlen);
11879 11889 up += optlen;
11880 11890 }
11881 11891 rptr += optlen;
11882 11892 hdr_len -= optlen;
11883 11893 }
11884 11894 /*
11885 11895 * Make sure that we drop an even number of words by filling
11886 11896 * with EOL to the next word boundary.
11887 11897 */
11888 11898 for (hdr_len = up - (mp->b_rptr + IP_SIMPLE_HDR_LENGTH);
11889 11899 hdr_len & 0x3; hdr_len++)
11890 11900 *up++ = IPOPT_EOL;
11891 11901 mp->b_wptr = up;
11892 11902 /* Update header length */
11893 11903 mp->b_rptr[0] = (uint8_t)((IP_VERSION << 4) | ((up - mp->b_rptr) >> 2));
11894 11904 return (mp);
11895 11905 }
11896 11906
11897 11907 /*
11898 11908 * Update any source route, record route, or timestamp options when
11899 11909 * sending a packet back to ourselves.
11900 11910 * Check that we are at end of strict source route.
11901 11911 * The options have been sanity checked by ip_output_options().
11902 11912 */
11903 11913 void
11904 11914 ip_output_local_options(ipha_t *ipha, ip_stack_t *ipst)
11905 11915 {
11906 11916 ipoptp_t opts;
11907 11917 uchar_t *opt;
11908 11918 uint8_t optval;
11909 11919 uint8_t optlen;
11910 11920 ipaddr_t dst;
11911 11921 uint32_t ts;
11912 11922 timestruc_t now;
11913 11923
11914 11924 for (optval = ipoptp_first(&opts, ipha);
11915 11925 optval != IPOPT_EOL;
11916 11926 optval = ipoptp_next(&opts)) {
11917 11927 opt = opts.ipoptp_cur;
11918 11928 optlen = opts.ipoptp_len;
11919 11929 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11920 11930 switch (optval) {
11921 11931 uint32_t off;
11922 11932 case IPOPT_SSRR:
11923 11933 case IPOPT_LSRR:
11924 11934 off = opt[IPOPT_OFFSET];
11925 11935 off--;
11926 11936 if (optlen < IP_ADDR_LEN ||
11927 11937 off > optlen - IP_ADDR_LEN) {
11928 11938 /* End of source route */
11929 11939 break;
11930 11940 }
11931 11941 /*
11932 11942 * This will only happen if two consecutive entries
11933 11943 * in the source route contains our address or if
11934 11944 * it is a packet with a loose source route which
11935 11945 * reaches us before consuming the whole source route
11936 11946 */
11937 11947
11938 11948 if (optval == IPOPT_SSRR) {
11939 11949 return;
11940 11950 }
11941 11951 /*
11942 11952 * Hack: instead of dropping the packet truncate the
11943 11953 * source route to what has been used by filling the
11944 11954 * rest with IPOPT_NOP.
11945 11955 */
11946 11956 opt[IPOPT_OLEN] = (uint8_t)off;
11947 11957 while (off < optlen) {
11948 11958 opt[off++] = IPOPT_NOP;
11949 11959 }
11950 11960 break;
11951 11961 case IPOPT_RR:
11952 11962 off = opt[IPOPT_OFFSET];
11953 11963 off--;
11954 11964 if (optlen < IP_ADDR_LEN ||
11955 11965 off > optlen - IP_ADDR_LEN) {
11956 11966 /* No more room - ignore */
11957 11967 ip1dbg((
11958 11968 "ip_output_local_options: end of RR\n"));
11959 11969 break;
11960 11970 }
11961 11971 dst = htonl(INADDR_LOOPBACK);
11962 11972 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN);
11963 11973 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
11964 11974 break;
11965 11975 case IPOPT_TS:
11966 11976 /* Insert timestamp if there is romm */
11967 11977 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
11968 11978 case IPOPT_TS_TSONLY:
11969 11979 off = IPOPT_TS_TIMELEN;
11970 11980 break;
11971 11981 case IPOPT_TS_PRESPEC:
11972 11982 case IPOPT_TS_PRESPEC_RFC791:
11973 11983 /* Verify that the address matched */
11974 11984 off = opt[IPOPT_OFFSET] - 1;
11975 11985 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
11976 11986 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
11977 11987 /* Not for us */
11978 11988 break;
11979 11989 }
11980 11990 /* FALLTHRU */
11981 11991 case IPOPT_TS_TSANDADDR:
11982 11992 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
11983 11993 break;
11984 11994 default:
11985 11995 /*
11986 11996 * ip_*put_options should have already
11987 11997 * dropped this packet.
11988 11998 */
11989 11999 cmn_err(CE_PANIC, "ip_output_local_options: "
11990 12000 "unknown IT - bug in ip_output_options?\n");
11991 12001 return; /* Keep "lint" happy */
11992 12002 }
11993 12003 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
11994 12004 /* Increase overflow counter */
11995 12005 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
11996 12006 opt[IPOPT_POS_OV_FLG] = (uint8_t)
11997 12007 (opt[IPOPT_POS_OV_FLG] & 0x0F) |
11998 12008 (off << 4);
11999 12009 break;
12000 12010 }
12001 12011 off = opt[IPOPT_OFFSET] - 1;
12002 12012 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
12003 12013 case IPOPT_TS_PRESPEC:
12004 12014 case IPOPT_TS_PRESPEC_RFC791:
12005 12015 case IPOPT_TS_TSANDADDR:
12006 12016 dst = htonl(INADDR_LOOPBACK);
12007 12017 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN);
12008 12018 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
12009 12019 /* FALLTHRU */
12010 12020 case IPOPT_TS_TSONLY:
12011 12021 off = opt[IPOPT_OFFSET] - 1;
12012 12022 /* Compute # of milliseconds since midnight */
12013 12023 gethrestime(&now);
12014 12024 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
12015 12025 now.tv_nsec / (NANOSEC / MILLISEC);
12016 12026 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
12017 12027 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
12018 12028 break;
12019 12029 }
12020 12030 break;
12021 12031 }
12022 12032 }
12023 12033 }
12024 12034
12025 12035 /*
12026 12036 * Prepend an M_DATA fastpath header, and if none present prepend a
12027 12037 * DL_UNITDATA_REQ. Frees the mblk on failure.
12028 12038 *
12029 12039 * nce_dlur_mp and nce_fp_mp can not disappear once they have been set.
12030 12040 * If there is a change to them, the nce will be deleted (condemned) and
12031 12041 * a new nce_t will be created when packets are sent. Thus we need no locks
12032 12042 * to access those fields.
12033 12043 *
12034 12044 * We preserve b_band to support IPQoS. If a DL_UNITDATA_REQ is prepended
12035 12045 * we place b_band in dl_priority.dl_max.
12036 12046 */
12037 12047 static mblk_t *
12038 12048 ip_xmit_attach_llhdr(mblk_t *mp, nce_t *nce)
12039 12049 {
12040 12050 uint_t hlen;
12041 12051 mblk_t *mp1;
12042 12052 uint_t priority;
12043 12053 uchar_t *rptr;
12044 12054
12045 12055 rptr = mp->b_rptr;
12046 12056
12047 12057 ASSERT(DB_TYPE(mp) == M_DATA);
12048 12058 priority = mp->b_band;
12049 12059
12050 12060 ASSERT(nce != NULL);
12051 12061 if ((mp1 = nce->nce_fp_mp) != NULL) {
12052 12062 hlen = MBLKL(mp1);
12053 12063 /*
12054 12064 * Check if we have enough room to prepend fastpath
12055 12065 * header
12056 12066 */
12057 12067 if (hlen != 0 && (rptr - mp->b_datap->db_base) >= hlen) {
12058 12068 rptr -= hlen;
12059 12069 bcopy(mp1->b_rptr, rptr, hlen);
12060 12070 /*
12061 12071 * Set the b_rptr to the start of the link layer
12062 12072 * header
12063 12073 */
12064 12074 mp->b_rptr = rptr;
12065 12075 return (mp);
12066 12076 }
12067 12077 mp1 = copyb(mp1);
12068 12078 if (mp1 == NULL) {
12069 12079 ill_t *ill = nce->nce_ill;
12070 12080
12071 12081 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12072 12082 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12073 12083 freemsg(mp);
12074 12084 return (NULL);
12075 12085 }
12076 12086 mp1->b_band = priority;
12077 12087 mp1->b_cont = mp;
12078 12088 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp);
12079 12089 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp);
12080 12090 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp);
12081 12091 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp);
12082 12092 DB_LSOMSS(mp1) = DB_LSOMSS(mp);
12083 12093 DTRACE_PROBE1(ip__xmit__copyb, (mblk_t *), mp1);
12084 12094 /*
12085 12095 * XXX disable ICK_VALID and compute checksum
12086 12096 * here; can happen if nce_fp_mp changes and
12087 12097 * it can't be copied now due to insufficient
12088 12098 * space. (unlikely, fp mp can change, but it
12089 12099 * does not increase in length)
12090 12100 */
12091 12101 return (mp1);
12092 12102 }
12093 12103 mp1 = copyb(nce->nce_dlur_mp);
12094 12104
12095 12105 if (mp1 == NULL) {
12096 12106 ill_t *ill = nce->nce_ill;
12097 12107
12098 12108 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12099 12109 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12100 12110 freemsg(mp);
12101 12111 return (NULL);
12102 12112 }
12103 12113 mp1->b_cont = mp;
12104 12114 if (priority != 0) {
12105 12115 mp1->b_band = priority;
12106 12116 ((dl_unitdata_req_t *)(mp1->b_rptr))->dl_priority.dl_max =
12107 12117 priority;
12108 12118 }
12109 12119 return (mp1);
12110 12120 #undef rptr
12111 12121 }
12112 12122
12113 12123 /*
12114 12124 * Finish the outbound IPsec processing. This function is called from
12115 12125 * ipsec_out_process() if the IPsec packet was processed
12116 12126 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed
12117 12127 * asynchronously.
12118 12128 *
12119 12129 * This is common to IPv4 and IPv6.
12120 12130 */
12121 12131 int
12122 12132 ip_output_post_ipsec(mblk_t *mp, ip_xmit_attr_t *ixa)
12123 12133 {
12124 12134 iaflags_t ixaflags = ixa->ixa_flags;
12125 12135 uint_t pktlen;
12126 12136
12127 12137
12128 12138 /* AH/ESP don't update ixa_pktlen when they modify the packet */
12129 12139 if (ixaflags & IXAF_IS_IPV4) {
12130 12140 ipha_t *ipha = (ipha_t *)mp->b_rptr;
12131 12141
12132 12142 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
12133 12143 pktlen = ntohs(ipha->ipha_length);
12134 12144 } else {
12135 12145 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
12136 12146
12137 12147 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
12138 12148 pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN;
12139 12149 }
12140 12150
12141 12151 /*
12142 12152 * We release any hard reference on the SAs here to make
12143 12153 * sure the SAs can be garbage collected. ipsr_sa has a soft reference
12144 12154 * on the SAs.
12145 12155 * If in the future we want the hard latching of the SAs in the
12146 12156 * ip_xmit_attr_t then we should remove this.
12147 12157 */
12148 12158 if (ixa->ixa_ipsec_esp_sa != NULL) {
12149 12159 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
12150 12160 ixa->ixa_ipsec_esp_sa = NULL;
12151 12161 }
12152 12162 if (ixa->ixa_ipsec_ah_sa != NULL) {
12153 12163 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
12154 12164 ixa->ixa_ipsec_ah_sa = NULL;
12155 12165 }
12156 12166
12157 12167 /* Do we need to fragment? */
12158 12168 if ((ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR) ||
12159 12169 pktlen > ixa->ixa_fragsize) {
12160 12170 if (ixaflags & IXAF_IS_IPV4) {
12161 12171 ASSERT(!(ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR));
12162 12172 /*
12163 12173 * We check for the DF case in ipsec_out_process
12164 12174 * hence this only handles the non-DF case.
12165 12175 */
12166 12176 return (ip_fragment_v4(mp, ixa->ixa_nce, ixa->ixa_flags,
12167 12177 pktlen, ixa->ixa_fragsize,
12168 12178 ixa->ixa_xmit_hint, ixa->ixa_zoneid,
12169 12179 ixa->ixa_no_loop_zoneid, ixa->ixa_postfragfn,
12170 12180 &ixa->ixa_cookie));
12171 12181 } else {
12172 12182 mp = ip_fraghdr_add_v6(mp, ixa->ixa_ident, ixa);
12173 12183 if (mp == NULL) {
12174 12184 /* MIB and ip_drop_output already done */
12175 12185 return (ENOMEM);
12176 12186 }
12177 12187 pktlen += sizeof (ip6_frag_t);
12178 12188 if (pktlen > ixa->ixa_fragsize) {
12179 12189 return (ip_fragment_v6(mp, ixa->ixa_nce,
12180 12190 ixa->ixa_flags, pktlen,
12181 12191 ixa->ixa_fragsize, ixa->ixa_xmit_hint,
12182 12192 ixa->ixa_zoneid, ixa->ixa_no_loop_zoneid,
12183 12193 ixa->ixa_postfragfn, &ixa->ixa_cookie));
12184 12194 }
12185 12195 }
12186 12196 }
12187 12197 return ((ixa->ixa_postfragfn)(mp, ixa->ixa_nce, ixa->ixa_flags,
12188 12198 pktlen, ixa->ixa_xmit_hint, ixa->ixa_zoneid,
12189 12199 ixa->ixa_no_loop_zoneid, NULL));
12190 12200 }
12191 12201
12192 12202 /*
12193 12203 * Finish the inbound IPsec processing. This function is called from
12194 12204 * ipsec_out_process() if the IPsec packet was processed
12195 12205 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed
12196 12206 * asynchronously.
12197 12207 *
12198 12208 * This is common to IPv4 and IPv6.
12199 12209 */
12200 12210 void
12201 12211 ip_input_post_ipsec(mblk_t *mp, ip_recv_attr_t *ira)
12202 12212 {
12203 12213 iaflags_t iraflags = ira->ira_flags;
12204 12214
12205 12215 /* Length might have changed */
12206 12216 if (iraflags & IRAF_IS_IPV4) {
12207 12217 ipha_t *ipha = (ipha_t *)mp->b_rptr;
12208 12218
12209 12219 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
12210 12220 ira->ira_pktlen = ntohs(ipha->ipha_length);
12211 12221 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha);
12212 12222 ira->ira_protocol = ipha->ipha_protocol;
12213 12223
12214 12224 ip_fanout_v4(mp, ipha, ira);
12215 12225 } else {
12216 12226 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
12217 12227 uint8_t *nexthdrp;
12218 12228
12219 12229 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
12220 12230 ira->ira_pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN;
12221 12231 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ira->ira_ip_hdr_length,
12222 12232 &nexthdrp)) {
12223 12233 /* Malformed packet */
12224 12234 BUMP_MIB(ira->ira_ill->ill_ip_mib, ipIfStatsInDiscards);
12225 12235 ip_drop_input("ipIfStatsInDiscards", mp, ira->ira_ill);
12226 12236 freemsg(mp);
12227 12237 return;
12228 12238 }
12229 12239 ira->ira_protocol = *nexthdrp;
12230 12240 ip_fanout_v6(mp, ip6h, ira);
12231 12241 }
12232 12242 }
12233 12243
12234 12244 /*
12235 12245 * Select which AH & ESP SA's to use (if any) for the outbound packet.
12236 12246 *
12237 12247 * If this function returns B_TRUE, the requested SA's have been filled
12238 12248 * into the ixa_ipsec_*_sa pointers.
12239 12249 *
12240 12250 * If the function returns B_FALSE, the packet has been "consumed", most
12241 12251 * likely by an ACQUIRE sent up via PF_KEY to a key management daemon.
12242 12252 *
12243 12253 * The SA references created by the protocol-specific "select"
12244 12254 * function will be released in ip_output_post_ipsec.
12245 12255 */
12246 12256 static boolean_t
12247 12257 ipsec_out_select_sa(mblk_t *mp, ip_xmit_attr_t *ixa)
12248 12258 {
12249 12259 boolean_t need_ah_acquire = B_FALSE, need_esp_acquire = B_FALSE;
12250 12260 ipsec_policy_t *pp;
12251 12261 ipsec_action_t *ap;
12252 12262
12253 12263 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE);
12254 12264 ASSERT((ixa->ixa_ipsec_policy != NULL) ||
12255 12265 (ixa->ixa_ipsec_action != NULL));
12256 12266
12257 12267 ap = ixa->ixa_ipsec_action;
12258 12268 if (ap == NULL) {
12259 12269 pp = ixa->ixa_ipsec_policy;
12260 12270 ASSERT(pp != NULL);
12261 12271 ap = pp->ipsp_act;
12262 12272 ASSERT(ap != NULL);
12263 12273 }
12264 12274
12265 12275 /*
12266 12276 * We have an action. now, let's select SA's.
12267 12277 * A side effect of setting ixa_ipsec_*_sa is that it will
12268 12278 * be cached in the conn_t.
12269 12279 */
12270 12280 if (ap->ipa_want_esp) {
12271 12281 if (ixa->ixa_ipsec_esp_sa == NULL) {
12272 12282 need_esp_acquire = !ipsec_outbound_sa(mp, ixa,
12273 12283 IPPROTO_ESP);
12274 12284 }
12275 12285 ASSERT(need_esp_acquire || ixa->ixa_ipsec_esp_sa != NULL);
12276 12286 }
12277 12287
12278 12288 if (ap->ipa_want_ah) {
12279 12289 if (ixa->ixa_ipsec_ah_sa == NULL) {
12280 12290 need_ah_acquire = !ipsec_outbound_sa(mp, ixa,
12281 12291 IPPROTO_AH);
12282 12292 }
12283 12293 ASSERT(need_ah_acquire || ixa->ixa_ipsec_ah_sa != NULL);
12284 12294 /*
12285 12295 * The ESP and AH processing order needs to be preserved
12286 12296 * when both protocols are required (ESP should be applied
12287 12297 * before AH for an outbound packet). Force an ESP ACQUIRE
12288 12298 * when both ESP and AH are required, and an AH ACQUIRE
12289 12299 * is needed.
12290 12300 */
12291 12301 if (ap->ipa_want_esp && need_ah_acquire)
12292 12302 need_esp_acquire = B_TRUE;
12293 12303 }
12294 12304
12295 12305 /*
12296 12306 * Send an ACQUIRE (extended, regular, or both) if we need one.
12297 12307 * Release SAs that got referenced, but will not be used until we
12298 12308 * acquire _all_ of the SAs we need.
12299 12309 */
12300 12310 if (need_ah_acquire || need_esp_acquire) {
12301 12311 if (ixa->ixa_ipsec_ah_sa != NULL) {
12302 12312 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
12303 12313 ixa->ixa_ipsec_ah_sa = NULL;
12304 12314 }
12305 12315 if (ixa->ixa_ipsec_esp_sa != NULL) {
12306 12316 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
12307 12317 ixa->ixa_ipsec_esp_sa = NULL;
12308 12318 }
12309 12319
12310 12320 sadb_acquire(mp, ixa, need_ah_acquire, need_esp_acquire);
12311 12321 return (B_FALSE);
12312 12322 }
12313 12323
12314 12324 return (B_TRUE);
12315 12325 }
12316 12326
12317 12327 /*
12318 12328 * Handle IPsec output processing.
12319 12329 * This function is only entered once for a given packet.
12320 12330 * We try to do things synchronously, but if we need to have user-level
12321 12331 * set up SAs, or ESP or AH uses asynchronous kEF, then the operation
12322 12332 * will be completed
12323 12333 * - when the SAs are added in esp_add_sa_finish/ah_add_sa_finish
12324 12334 * - when asynchronous ESP is done it will do AH
12325 12335 *
12326 12336 * In all cases we come back in ip_output_post_ipsec() to fragment and
12327 12337 * send out the packet.
12328 12338 */
12329 12339 int
12330 12340 ipsec_out_process(mblk_t *mp, ip_xmit_attr_t *ixa)
12331 12341 {
12332 12342 ill_t *ill = ixa->ixa_nce->nce_ill;
12333 12343 ip_stack_t *ipst = ixa->ixa_ipst;
12334 12344 ipsec_stack_t *ipss;
12335 12345 ipsec_policy_t *pp;
12336 12346 ipsec_action_t *ap;
12337 12347
12338 12348 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE);
12339 12349
12340 12350 ASSERT((ixa->ixa_ipsec_policy != NULL) ||
12341 12351 (ixa->ixa_ipsec_action != NULL));
12342 12352
12343 12353 ipss = ipst->ips_netstack->netstack_ipsec;
12344 12354 if (!ipsec_loaded(ipss)) {
12345 12355 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12346 12356 ip_drop_packet(mp, B_TRUE, ill,
12347 12357 DROPPER(ipss, ipds_ip_ipsec_not_loaded),
12348 12358 &ipss->ipsec_dropper);
12349 12359 return (ENOTSUP);
12350 12360 }
12351 12361
12352 12362 ap = ixa->ixa_ipsec_action;
12353 12363 if (ap == NULL) {
12354 12364 pp = ixa->ixa_ipsec_policy;
12355 12365 ASSERT(pp != NULL);
12356 12366 ap = pp->ipsp_act;
12357 12367 ASSERT(ap != NULL);
12358 12368 }
12359 12369
12360 12370 /* Handle explicit drop action and bypass. */
12361 12371 switch (ap->ipa_act.ipa_type) {
12362 12372 case IPSEC_ACT_DISCARD:
12363 12373 case IPSEC_ACT_REJECT:
12364 12374 ip_drop_packet(mp, B_FALSE, ill,
12365 12375 DROPPER(ipss, ipds_spd_explicit), &ipss->ipsec_spd_dropper);
12366 12376 return (EHOSTUNREACH); /* IPsec policy failure */
12367 12377 case IPSEC_ACT_BYPASS:
12368 12378 return (ip_output_post_ipsec(mp, ixa));
12369 12379 }
12370 12380
12371 12381 /*
12372 12382 * The order of processing is first insert a IP header if needed.
12373 12383 * Then insert the ESP header and then the AH header.
12374 12384 */
12375 12385 if ((ixa->ixa_flags & IXAF_IS_IPV4) && ap->ipa_want_se) {
12376 12386 /*
12377 12387 * First get the outer IP header before sending
12378 12388 * it to ESP.
12379 12389 */
12380 12390 ipha_t *oipha, *iipha;
12381 12391 mblk_t *outer_mp, *inner_mp;
12382 12392
12383 12393 if ((outer_mp = allocb(sizeof (ipha_t), BPRI_HI)) == NULL) {
12384 12394 (void) mi_strlog(ill->ill_rq, 0,
12385 12395 SL_ERROR|SL_TRACE|SL_CONSOLE,
12386 12396 "ipsec_out_process: "
12387 12397 "Self-Encapsulation failed: Out of memory\n");
12388 12398 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12389 12399 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12390 12400 freemsg(mp);
12391 12401 return (ENOBUFS);
12392 12402 }
12393 12403 inner_mp = mp;
12394 12404 ASSERT(inner_mp->b_datap->db_type == M_DATA);
12395 12405 oipha = (ipha_t *)outer_mp->b_rptr;
12396 12406 iipha = (ipha_t *)inner_mp->b_rptr;
12397 12407 *oipha = *iipha;
12398 12408 outer_mp->b_wptr += sizeof (ipha_t);
12399 12409 oipha->ipha_length = htons(ntohs(iipha->ipha_length) +
12400 12410 sizeof (ipha_t));
12401 12411 oipha->ipha_protocol = IPPROTO_ENCAP;
12402 12412 oipha->ipha_version_and_hdr_length =
12403 12413 IP_SIMPLE_HDR_VERSION;
12404 12414 oipha->ipha_hdr_checksum = 0;
12405 12415 oipha->ipha_hdr_checksum = ip_csum_hdr(oipha);
12406 12416 outer_mp->b_cont = inner_mp;
12407 12417 mp = outer_mp;
12408 12418
12409 12419 ixa->ixa_flags |= IXAF_IPSEC_TUNNEL;
12410 12420 }
12411 12421
12412 12422 /* If we need to wait for a SA then we can't return any errno */
12413 12423 if (((ap->ipa_want_ah && (ixa->ixa_ipsec_ah_sa == NULL)) ||
12414 12424 (ap->ipa_want_esp && (ixa->ixa_ipsec_esp_sa == NULL))) &&
12415 12425 !ipsec_out_select_sa(mp, ixa))
12416 12426 return (0);
12417 12427
12418 12428 /*
12419 12429 * By now, we know what SA's to use. Toss over to ESP & AH
12420 12430 * to do the heavy lifting.
12421 12431 */
12422 12432 if (ap->ipa_want_esp) {
12423 12433 ASSERT(ixa->ixa_ipsec_esp_sa != NULL);
12424 12434
12425 12435 mp = ixa->ixa_ipsec_esp_sa->ipsa_output_func(mp, ixa);
12426 12436 if (mp == NULL) {
12427 12437 /*
12428 12438 * Either it failed or is pending. In the former case
12429 12439 * ipIfStatsInDiscards was increased.
12430 12440 */
12431 12441 return (0);
12432 12442 }
12433 12443 }
12434 12444
12435 12445 if (ap->ipa_want_ah) {
12436 12446 ASSERT(ixa->ixa_ipsec_ah_sa != NULL);
12437 12447
12438 12448 mp = ixa->ixa_ipsec_ah_sa->ipsa_output_func(mp, ixa);
12439 12449 if (mp == NULL) {
12440 12450 /*
12441 12451 * Either it failed or is pending. In the former case
12442 12452 * ipIfStatsInDiscards was increased.
12443 12453 */
12444 12454 return (0);
12445 12455 }
12446 12456 }
12447 12457 /*
12448 12458 * We are done with IPsec processing. Send it over
12449 12459 * the wire.
12450 12460 */
12451 12461 return (ip_output_post_ipsec(mp, ixa));
12452 12462 }
12453 12463
12454 12464 /*
12455 12465 * ioctls that go through a down/up sequence may need to wait for the down
12456 12466 * to complete. This involves waiting for the ire and ipif refcnts to go down
12457 12467 * to zero. Subsequently the ioctl is restarted from ipif_ill_refrele_tail.
12458 12468 */
12459 12469 /* ARGSUSED */
12460 12470 void
12461 12471 ip_reprocess_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
12462 12472 {
12463 12473 struct iocblk *iocp;
12464 12474 mblk_t *mp1;
12465 12475 ip_ioctl_cmd_t *ipip;
12466 12476 int err;
12467 12477 sin_t *sin;
12468 12478 struct lifreq *lifr;
12469 12479 struct ifreq *ifr;
12470 12480
12471 12481 iocp = (struct iocblk *)mp->b_rptr;
12472 12482 ASSERT(ipsq != NULL);
12473 12483 /* Existence of mp1 verified in ip_wput_nondata */
12474 12484 mp1 = mp->b_cont->b_cont;
12475 12485 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12476 12486 if (ipip->ipi_cmd == SIOCSLIFNAME || ipip->ipi_cmd == IF_UNITSEL) {
12477 12487 /*
12478 12488 * Special case where ipx_current_ipif is not set:
12479 12489 * ill_phyint_reinit merged the v4 and v6 into a single ipsq.
12480 12490 * We are here as were not able to complete the operation in
12481 12491 * ipif_set_values because we could not become exclusive on
12482 12492 * the new ipsq.
12483 12493 */
12484 12494 ill_t *ill = q->q_ptr;
12485 12495 ipsq_current_start(ipsq, ill->ill_ipif, ipip->ipi_cmd);
12486 12496 }
12487 12497 ASSERT(ipsq->ipsq_xop->ipx_current_ipif != NULL);
12488 12498
12489 12499 if (ipip->ipi_cmd_type == IF_CMD) {
12490 12500 /* This a old style SIOC[GS]IF* command */
12491 12501 ifr = (struct ifreq *)mp1->b_rptr;
12492 12502 sin = (sin_t *)&ifr->ifr_addr;
12493 12503 } else if (ipip->ipi_cmd_type == LIF_CMD) {
12494 12504 /* This a new style SIOC[GS]LIF* command */
12495 12505 lifr = (struct lifreq *)mp1->b_rptr;
12496 12506 sin = (sin_t *)&lifr->lifr_addr;
12497 12507 } else {
12498 12508 sin = NULL;
12499 12509 }
12500 12510
12501 12511 err = (*ipip->ipi_func_restart)(ipsq->ipsq_xop->ipx_current_ipif, sin,
12502 12512 q, mp, ipip, mp1->b_rptr);
12503 12513
12504 12514 DTRACE_PROBE4(ipif__ioctl, char *, "ip_reprocess_ioctl finish",
12505 12515 int, ipip->ipi_cmd,
12506 12516 ill_t *, ipsq->ipsq_xop->ipx_current_ipif->ipif_ill,
12507 12517 ipif_t *, ipsq->ipsq_xop->ipx_current_ipif);
12508 12518
12509 12519 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq);
12510 12520 }
12511 12521
12512 12522 /*
12513 12523 * ioctl processing
12514 12524 *
12515 12525 * ioctl processing starts with ip_sioctl_copyin_setup(), which looks up
12516 12526 * the ioctl command in the ioctl tables, determines the copyin data size
12517 12527 * from the ipi_copyin_size field, and does an mi_copyin() of that size.
12518 12528 *
12519 12529 * ioctl processing then continues when the M_IOCDATA makes its way down to
12520 12530 * ip_wput_nondata(). The ioctl is looked up again in the ioctl table, its
12521 12531 * associated 'conn' is refheld till the end of the ioctl and the general
12522 12532 * ioctl processing function ip_process_ioctl() is called to extract the
12523 12533 * arguments and process the ioctl. To simplify extraction, ioctl commands
12524 12534 * are "typed" based on the arguments they take (e.g., LIF_CMD which takes a
12525 12535 * `struct lifreq'), and a common extract function (e.g., ip_extract_lifreq())
12526 12536 * is used to extract the ioctl's arguments.
12527 12537 *
12528 12538 * ip_process_ioctl determines if the ioctl needs to be serialized, and if
12529 12539 * so goes thru the serialization primitive ipsq_try_enter. Then the
12530 12540 * appropriate function to handle the ioctl is called based on the entry in
12531 12541 * the ioctl table. ioctl completion is encapsulated in ip_ioctl_finish
12532 12542 * which also refreleases the 'conn' that was refheld at the start of the
12533 12543 * ioctl. Finally ipsq_exit is called if needed to exit the ipsq.
12534 12544 *
12535 12545 * Many exclusive ioctls go thru an internal down up sequence as part of
12536 12546 * the operation. For example an attempt to change the IP address of an
12537 12547 * ipif entails ipif_down, set address, ipif_up. Bringing down the interface
12538 12548 * does all the cleanup such as deleting all ires that use this address.
12539 12549 * Then we need to wait till all references to the interface go away.
12540 12550 */
12541 12551 void
12542 12552 ip_process_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *arg)
12543 12553 {
12544 12554 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
12545 12555 ip_ioctl_cmd_t *ipip = arg;
12546 12556 ip_extract_func_t *extract_funcp;
12547 12557 cmd_info_t ci;
12548 12558 int err;
12549 12559 boolean_t entered_ipsq = B_FALSE;
12550 12560
12551 12561 ip3dbg(("ip_process_ioctl: ioctl %X\n", iocp->ioc_cmd));
12552 12562
12553 12563 if (ipip == NULL)
12554 12564 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12555 12565
12556 12566 /*
12557 12567 * SIOCLIFADDIF needs to go thru a special path since the
12558 12568 * ill may not exist yet. This happens in the case of lo0
12559 12569 * which is created using this ioctl.
12560 12570 */
12561 12571 if (ipip->ipi_cmd == SIOCLIFADDIF) {
12562 12572 err = ip_sioctl_addif(NULL, NULL, q, mp, NULL, NULL);
12563 12573 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish",
12564 12574 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12565 12575 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12566 12576 return;
12567 12577 }
12568 12578
12569 12579 ci.ci_ipif = NULL;
12570 12580 switch (ipip->ipi_cmd_type) {
12571 12581 case MISC_CMD:
12572 12582 case MSFILT_CMD:
12573 12583 /*
12574 12584 * All MISC_CMD ioctls come in here -- e.g. SIOCGLIFCONF.
12575 12585 */
12576 12586 if (ipip->ipi_cmd == IF_UNITSEL) {
12577 12587 /* ioctl comes down the ill */
12578 12588 ci.ci_ipif = ((ill_t *)q->q_ptr)->ill_ipif;
12579 12589 ipif_refhold(ci.ci_ipif);
12580 12590 }
12581 12591 err = 0;
12582 12592 ci.ci_sin = NULL;
12583 12593 ci.ci_sin6 = NULL;
12584 12594 ci.ci_lifr = NULL;
12585 12595 extract_funcp = NULL;
12586 12596 break;
12587 12597
12588 12598 case IF_CMD:
12589 12599 case LIF_CMD:
12590 12600 extract_funcp = ip_extract_lifreq;
12591 12601 break;
12592 12602
12593 12603 case ARP_CMD:
12594 12604 case XARP_CMD:
12595 12605 extract_funcp = ip_extract_arpreq;
12596 12606 break;
12597 12607
12598 12608 default:
12599 12609 ASSERT(0);
12600 12610 }
12601 12611
12602 12612 if (extract_funcp != NULL) {
12603 12613 err = (*extract_funcp)(q, mp, ipip, &ci);
12604 12614 if (err != 0) {
12605 12615 DTRACE_PROBE4(ipif__ioctl,
12606 12616 char *, "ip_process_ioctl finish err",
12607 12617 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12608 12618 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12609 12619 return;
12610 12620 }
12611 12621
12612 12622 /*
12613 12623 * All of the extraction functions return a refheld ipif.
12614 12624 */
12615 12625 ASSERT(ci.ci_ipif != NULL);
12616 12626 }
12617 12627
12618 12628 if (!(ipip->ipi_flags & IPI_WR)) {
12619 12629 /*
12620 12630 * A return value of EINPROGRESS means the ioctl is
12621 12631 * either queued and waiting for some reason or has
12622 12632 * already completed.
12623 12633 */
12624 12634 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip,
12625 12635 ci.ci_lifr);
12626 12636 if (ci.ci_ipif != NULL) {
12627 12637 DTRACE_PROBE4(ipif__ioctl,
12628 12638 char *, "ip_process_ioctl finish RD",
12629 12639 int, ipip->ipi_cmd, ill_t *, ci.ci_ipif->ipif_ill,
12630 12640 ipif_t *, ci.ci_ipif);
12631 12641 ipif_refrele(ci.ci_ipif);
12632 12642 } else {
12633 12643 DTRACE_PROBE4(ipif__ioctl,
12634 12644 char *, "ip_process_ioctl finish RD",
12635 12645 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12636 12646 }
12637 12647 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12638 12648 return;
12639 12649 }
12640 12650
12641 12651 ASSERT(ci.ci_ipif != NULL);
12642 12652
12643 12653 /*
12644 12654 * If ipsq is non-NULL, we are already being called exclusively
12645 12655 */
12646 12656 ASSERT(ipsq == NULL || IAM_WRITER_IPSQ(ipsq));
12647 12657 if (ipsq == NULL) {
12648 12658 ipsq = ipsq_try_enter(ci.ci_ipif, NULL, q, mp, ip_process_ioctl,
12649 12659 NEW_OP, B_TRUE);
12650 12660 if (ipsq == NULL) {
12651 12661 ipif_refrele(ci.ci_ipif);
12652 12662 return;
12653 12663 }
12654 12664 entered_ipsq = B_TRUE;
12655 12665 }
12656 12666 /*
12657 12667 * Release the ipif so that ipif_down and friends that wait for
12658 12668 * references to go away are not misled about the current ipif_refcnt
12659 12669 * values. We are writer so we can access the ipif even after releasing
12660 12670 * the ipif.
12661 12671 */
12662 12672 ipif_refrele(ci.ci_ipif);
12663 12673
12664 12674 ipsq_current_start(ipsq, ci.ci_ipif, ipip->ipi_cmd);
12665 12675
12666 12676 /*
12667 12677 * A return value of EINPROGRESS means the ioctl is
12668 12678 * either queued and waiting for some reason or has
12669 12679 * already completed.
12670 12680 */
12671 12681 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip, ci.ci_lifr);
12672 12682
12673 12683 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish WR",
12674 12684 int, ipip->ipi_cmd,
12675 12685 ill_t *, ci.ci_ipif == NULL ? NULL : ci.ci_ipif->ipif_ill,
12676 12686 ipif_t *, ci.ci_ipif);
12677 12687 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq);
12678 12688
12679 12689 if (entered_ipsq)
12680 12690 ipsq_exit(ipsq);
12681 12691 }
12682 12692
12683 12693 /*
12684 12694 * Complete the ioctl. Typically ioctls use the mi package and need to
12685 12695 * do mi_copyout/mi_copy_done.
12686 12696 */
12687 12697 void
12688 12698 ip_ioctl_finish(queue_t *q, mblk_t *mp, int err, int mode, ipsq_t *ipsq)
12689 12699 {
12690 12700 conn_t *connp = NULL;
12691 12701
12692 12702 if (err == EINPROGRESS)
12693 12703 return;
12694 12704
12695 12705 if (CONN_Q(q)) {
12696 12706 connp = Q_TO_CONN(q);
12697 12707 ASSERT(connp->conn_ref >= 2);
12698 12708 }
12699 12709
12700 12710 switch (mode) {
12701 12711 case COPYOUT:
12702 12712 if (err == 0)
12703 12713 mi_copyout(q, mp);
12704 12714 else
12705 12715 mi_copy_done(q, mp, err);
12706 12716 break;
12707 12717
12708 12718 case NO_COPYOUT:
12709 12719 mi_copy_done(q, mp, err);
12710 12720 break;
12711 12721
12712 12722 default:
12713 12723 ASSERT(mode == CONN_CLOSE); /* aborted through CONN_CLOSE */
12714 12724 break;
12715 12725 }
12716 12726
12717 12727 /*
12718 12728 * The conn refhold and ioctlref placed on the conn at the start of the
12719 12729 * ioctl are released here.
12720 12730 */
12721 12731 if (connp != NULL) {
12722 12732 CONN_DEC_IOCTLREF(connp);
12723 12733 CONN_OPER_PENDING_DONE(connp);
12724 12734 }
12725 12735
12726 12736 if (ipsq != NULL)
12727 12737 ipsq_current_finish(ipsq);
12728 12738 }
12729 12739
12730 12740 /* Handles all non data messages */
12731 12741 void
12732 12742 ip_wput_nondata(queue_t *q, mblk_t *mp)
12733 12743 {
12734 12744 mblk_t *mp1;
12735 12745 struct iocblk *iocp;
12736 12746 ip_ioctl_cmd_t *ipip;
12737 12747 conn_t *connp;
12738 12748 cred_t *cr;
12739 12749 char *proto_str;
12740 12750
12741 12751 if (CONN_Q(q))
12742 12752 connp = Q_TO_CONN(q);
12743 12753 else
12744 12754 connp = NULL;
12745 12755
12746 12756 switch (DB_TYPE(mp)) {
12747 12757 case M_IOCTL:
12748 12758 /*
12749 12759 * IOCTL processing begins in ip_sioctl_copyin_setup which
12750 12760 * will arrange to copy in associated control structures.
12751 12761 */
12752 12762 ip_sioctl_copyin_setup(q, mp);
12753 12763 return;
12754 12764 case M_IOCDATA:
12755 12765 /*
12756 12766 * Ensure that this is associated with one of our trans-
12757 12767 * parent ioctls. If it's not ours, discard it if we're
12758 12768 * running as a driver, or pass it on if we're a module.
12759 12769 */
12760 12770 iocp = (struct iocblk *)mp->b_rptr;
12761 12771 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12762 12772 if (ipip == NULL) {
12763 12773 if (q->q_next == NULL) {
12764 12774 goto nak;
12765 12775 } else {
12766 12776 putnext(q, mp);
12767 12777 }
12768 12778 return;
12769 12779 }
12770 12780 if ((q->q_next != NULL) && !(ipip->ipi_flags & IPI_MODOK)) {
12771 12781 /*
12772 12782 * The ioctl is one we recognise, but is not consumed
12773 12783 * by IP as a module and we are a module, so we drop
12774 12784 */
12775 12785 goto nak;
12776 12786 }
12777 12787
12778 12788 /* IOCTL continuation following copyin or copyout. */
12779 12789 if (mi_copy_state(q, mp, NULL) == -1) {
12780 12790 /*
12781 12791 * The copy operation failed. mi_copy_state already
12782 12792 * cleaned up, so we're out of here.
12783 12793 */
12784 12794 return;
12785 12795 }
12786 12796 /*
12787 12797 * If we just completed a copy in, we become writer and
12788 12798 * continue processing in ip_sioctl_copyin_done. If it
12789 12799 * was a copy out, we call mi_copyout again. If there is
12790 12800 * nothing more to copy out, it will complete the IOCTL.
12791 12801 */
12792 12802 if (MI_COPY_DIRECTION(mp) == MI_COPY_IN) {
12793 12803 if (!(mp1 = mp->b_cont) || !(mp1 = mp1->b_cont)) {
12794 12804 mi_copy_done(q, mp, EPROTO);
12795 12805 return;
12796 12806 }
12797 12807 /*
12798 12808 * Check for cases that need more copying. A return
12799 12809 * value of 0 means a second copyin has been started,
12800 12810 * so we return; a return value of 1 means no more
12801 12811 * copying is needed, so we continue.
12802 12812 */
12803 12813 if (ipip->ipi_cmd_type == MSFILT_CMD &&
12804 12814 MI_COPY_COUNT(mp) == 1) {
12805 12815 if (ip_copyin_msfilter(q, mp) == 0)
12806 12816 return;
12807 12817 }
12808 12818 /*
12809 12819 * Refhold the conn, till the ioctl completes. This is
12810 12820 * needed in case the ioctl ends up in the pending mp
12811 12821 * list. Every mp in the ipx_pending_mp list must have
12812 12822 * a refhold on the conn to resume processing. The
12813 12823 * refhold is released when the ioctl completes
12814 12824 * (whether normally or abnormally). An ioctlref is also
12815 12825 * placed on the conn to prevent TCP from removing the
12816 12826 * queue needed to send the ioctl reply back.
12817 12827 * In all cases ip_ioctl_finish is called to finish
12818 12828 * the ioctl and release the refholds.
12819 12829 */
12820 12830 if (connp != NULL) {
12821 12831 /* This is not a reentry */
12822 12832 CONN_INC_REF(connp);
12823 12833 CONN_INC_IOCTLREF(connp);
12824 12834 } else {
12825 12835 if (!(ipip->ipi_flags & IPI_MODOK)) {
12826 12836 mi_copy_done(q, mp, EINVAL);
12827 12837 return;
12828 12838 }
12829 12839 }
12830 12840
12831 12841 ip_process_ioctl(NULL, q, mp, ipip);
12832 12842
12833 12843 } else {
12834 12844 mi_copyout(q, mp);
12835 12845 }
12836 12846 return;
12837 12847
12838 12848 case M_IOCNAK:
12839 12849 /*
12840 12850 * The only way we could get here is if a resolver didn't like
12841 12851 * an IOCTL we sent it. This shouldn't happen.
12842 12852 */
12843 12853 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
12844 12854 "ip_wput_nondata: unexpected M_IOCNAK, ioc_cmd 0x%x",
12845 12855 ((struct iocblk *)mp->b_rptr)->ioc_cmd);
12846 12856 freemsg(mp);
12847 12857 return;
12848 12858 case M_IOCACK:
12849 12859 /* /dev/ip shouldn't see this */
12850 12860 goto nak;
12851 12861 case M_FLUSH:
12852 12862 if (*mp->b_rptr & FLUSHW)
12853 12863 flushq(q, FLUSHALL);
12854 12864 if (q->q_next) {
12855 12865 putnext(q, mp);
12856 12866 return;
12857 12867 }
12858 12868 if (*mp->b_rptr & FLUSHR) {
12859 12869 *mp->b_rptr &= ~FLUSHW;
12860 12870 qreply(q, mp);
12861 12871 return;
12862 12872 }
12863 12873 freemsg(mp);
12864 12874 return;
12865 12875 case M_CTL:
12866 12876 break;
12867 12877 case M_PROTO:
12868 12878 case M_PCPROTO:
12869 12879 /*
12870 12880 * The only PROTO messages we expect are SNMP-related.
12871 12881 */
12872 12882 switch (((union T_primitives *)mp->b_rptr)->type) {
12873 12883 case T_SVR4_OPTMGMT_REQ:
12874 12884 ip2dbg(("ip_wput_nondata: T_SVR4_OPTMGMT_REQ "
12875 12885 "flags %x\n",
12876 12886 ((struct T_optmgmt_req *)mp->b_rptr)->MGMT_flags));
12877 12887
12878 12888 if (connp == NULL) {
12879 12889 proto_str = "T_SVR4_OPTMGMT_REQ";
12880 12890 goto protonak;
12881 12891 }
12882 12892
12883 12893 /*
12884 12894 * All Solaris components should pass a db_credp
12885 12895 * for this TPI message, hence we ASSERT.
12886 12896 * But in case there is some other M_PROTO that looks
12887 12897 * like a TPI message sent by some other kernel
12888 12898 * component, we check and return an error.
12889 12899 */
12890 12900 cr = msg_getcred(mp, NULL);
12891 12901 ASSERT(cr != NULL);
12892 12902 if (cr == NULL) {
12893 12903 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, EINVAL);
12894 12904 if (mp != NULL)
12895 12905 qreply(q, mp);
12896 12906 return;
12897 12907 }
12898 12908
12899 12909 if (!snmpcom_req(q, mp, ip_snmp_set, ip_snmp_get, cr)) {
12900 12910 proto_str = "Bad SNMPCOM request?";
12901 12911 goto protonak;
12902 12912 }
12903 12913 return;
12904 12914 default:
12905 12915 ip1dbg(("ip_wput_nondata: dropping M_PROTO prim %u\n",
12906 12916 (int)*(uint_t *)mp->b_rptr));
12907 12917 freemsg(mp);
12908 12918 return;
12909 12919 }
12910 12920 default:
12911 12921 break;
12912 12922 }
12913 12923 if (q->q_next) {
12914 12924 putnext(q, mp);
12915 12925 } else
12916 12926 freemsg(mp);
12917 12927 return;
12918 12928
12919 12929 nak:
12920 12930 iocp->ioc_error = EINVAL;
12921 12931 mp->b_datap->db_type = M_IOCNAK;
12922 12932 iocp->ioc_count = 0;
12923 12933 qreply(q, mp);
12924 12934 return;
12925 12935
12926 12936 protonak:
12927 12937 cmn_err(CE_NOTE, "IP doesn't process %s as a module", proto_str);
12928 12938 if ((mp = mi_tpi_err_ack_alloc(mp, TPROTO, EINVAL)) != NULL)
12929 12939 qreply(q, mp);
12930 12940 }
12931 12941
12932 12942 /*
12933 12943 * Process IP options in an outbound packet. Verify that the nexthop in a
12934 12944 * strict source route is onlink.
12935 12945 * Returns non-zero if something fails in which case an ICMP error has been
12936 12946 * sent and mp freed.
12937 12947 *
12938 12948 * Assumes the ULP has called ip_massage_options to move nexthop into ipha_dst.
12939 12949 */
12940 12950 int
12941 12951 ip_output_options(mblk_t *mp, ipha_t *ipha, ip_xmit_attr_t *ixa, ill_t *ill)
12942 12952 {
12943 12953 ipoptp_t opts;
12944 12954 uchar_t *opt;
12945 12955 uint8_t optval;
12946 12956 uint8_t optlen;
12947 12957 ipaddr_t dst;
12948 12958 intptr_t code = 0;
12949 12959 ire_t *ire;
12950 12960 ip_stack_t *ipst = ixa->ixa_ipst;
12951 12961 ip_recv_attr_t iras;
12952 12962
12953 12963 ip2dbg(("ip_output_options\n"));
12954 12964
12955 12965 dst = ipha->ipha_dst;
12956 12966 for (optval = ipoptp_first(&opts, ipha);
12957 12967 optval != IPOPT_EOL;
12958 12968 optval = ipoptp_next(&opts)) {
12959 12969 opt = opts.ipoptp_cur;
12960 12970 optlen = opts.ipoptp_len;
12961 12971 ip2dbg(("ip_output_options: opt %d, len %d\n",
12962 12972 optval, optlen));
12963 12973 switch (optval) {
12964 12974 uint32_t off;
12965 12975 case IPOPT_SSRR:
12966 12976 case IPOPT_LSRR:
12967 12977 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
12968 12978 ip1dbg((
12969 12979 "ip_output_options: bad option offset\n"));
12970 12980 code = (char *)&opt[IPOPT_OLEN] -
12971 12981 (char *)ipha;
12972 12982 goto param_prob;
12973 12983 }
12974 12984 off = opt[IPOPT_OFFSET];
12975 12985 ip1dbg(("ip_output_options: next hop 0x%x\n",
12976 12986 ntohl(dst)));
12977 12987 /*
12978 12988 * For strict: verify that dst is directly
12979 12989 * reachable.
12980 12990 */
12981 12991 if (optval == IPOPT_SSRR) {
12982 12992 ire = ire_ftable_lookup_v4(dst, 0, 0,
12983 12993 IRE_INTERFACE, NULL, ALL_ZONES,
12984 12994 ixa->ixa_tsl,
12985 12995 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst,
12986 12996 NULL);
12987 12997 if (ire == NULL) {
12988 12998 ip1dbg(("ip_output_options: SSRR not"
12989 12999 " directly reachable: 0x%x\n",
12990 13000 ntohl(dst)));
12991 13001 goto bad_src_route;
12992 13002 }
12993 13003 ire_refrele(ire);
12994 13004 }
12995 13005 break;
12996 13006 case IPOPT_RR:
12997 13007 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
12998 13008 ip1dbg((
12999 13009 "ip_output_options: bad option offset\n"));
13000 13010 code = (char *)&opt[IPOPT_OLEN] -
13001 13011 (char *)ipha;
13002 13012 goto param_prob;
13003 13013 }
13004 13014 break;
13005 13015 case IPOPT_TS:
13006 13016 /*
13007 13017 * Verify that length >=5 and that there is either
13008 13018 * room for another timestamp or that the overflow
13009 13019 * counter is not maxed out.
13010 13020 */
13011 13021 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha;
13012 13022 if (optlen < IPOPT_MINLEN_IT) {
13013 13023 goto param_prob;
13014 13024 }
13015 13025 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
13016 13026 ip1dbg((
13017 13027 "ip_output_options: bad option offset\n"));
13018 13028 code = (char *)&opt[IPOPT_OFFSET] -
13019 13029 (char *)ipha;
13020 13030 goto param_prob;
13021 13031 }
13022 13032 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
13023 13033 case IPOPT_TS_TSONLY:
13024 13034 off = IPOPT_TS_TIMELEN;
13025 13035 break;
13026 13036 case IPOPT_TS_TSANDADDR:
13027 13037 case IPOPT_TS_PRESPEC:
13028 13038 case IPOPT_TS_PRESPEC_RFC791:
13029 13039 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
13030 13040 break;
13031 13041 default:
13032 13042 code = (char *)&opt[IPOPT_POS_OV_FLG] -
13033 13043 (char *)ipha;
13034 13044 goto param_prob;
13035 13045 }
13036 13046 if (opt[IPOPT_OFFSET] - 1 + off > optlen &&
13037 13047 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) {
13038 13048 /*
13039 13049 * No room and the overflow counter is 15
13040 13050 * already.
13041 13051 */
13042 13052 goto param_prob;
13043 13053 }
13044 13054 break;
13045 13055 }
13046 13056 }
13047 13057
13048 13058 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0)
13049 13059 return (0);
13050 13060
13051 13061 ip1dbg(("ip_output_options: error processing IP options."));
13052 13062 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha;
13053 13063
13054 13064 param_prob:
13055 13065 bzero(&iras, sizeof (iras));
13056 13066 iras.ira_ill = iras.ira_rill = ill;
13057 13067 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex;
13058 13068 iras.ira_rifindex = iras.ira_ruifindex;
13059 13069 iras.ira_flags = IRAF_IS_IPV4;
13060 13070
13061 13071 ip_drop_output("ip_output_options", mp, ill);
13062 13072 icmp_param_problem(mp, (uint8_t)code, &iras);
13063 13073 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
13064 13074 return (-1);
13065 13075
13066 13076 bad_src_route:
13067 13077 bzero(&iras, sizeof (iras));
13068 13078 iras.ira_ill = iras.ira_rill = ill;
13069 13079 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex;
13070 13080 iras.ira_rifindex = iras.ira_ruifindex;
13071 13081 iras.ira_flags = IRAF_IS_IPV4;
13072 13082
13073 13083 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill);
13074 13084 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, &iras);
13075 13085 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
13076 13086 return (-1);
13077 13087 }
13078 13088
13079 13089 /*
13080 13090 * The maximum value of conn_drain_list_cnt is CONN_MAXDRAINCNT.
13081 13091 * conn_drain_list_cnt can be changed by setting conn_drain_nthreads
13082 13092 * thru /etc/system.
13083 13093 */
13084 13094 #define CONN_MAXDRAINCNT 64
13085 13095
13086 13096 static void
13087 13097 conn_drain_init(ip_stack_t *ipst)
13088 13098 {
13089 13099 int i, j;
13090 13100 idl_tx_list_t *itl_tx;
13091 13101
13092 13102 ipst->ips_conn_drain_list_cnt = conn_drain_nthreads;
13093 13103
13094 13104 if ((ipst->ips_conn_drain_list_cnt == 0) ||
13095 13105 (ipst->ips_conn_drain_list_cnt > CONN_MAXDRAINCNT)) {
13096 13106 /*
13097 13107 * Default value of the number of drainers is the
13098 13108 * number of cpus, subject to maximum of 8 drainers.
13099 13109 */
13100 13110 if (boot_max_ncpus != -1)
13101 13111 ipst->ips_conn_drain_list_cnt = MIN(boot_max_ncpus, 8);
13102 13112 else
13103 13113 ipst->ips_conn_drain_list_cnt = MIN(max_ncpus, 8);
13104 13114 }
13105 13115
13106 13116 ipst->ips_idl_tx_list =
13107 13117 kmem_zalloc(TX_FANOUT_SIZE * sizeof (idl_tx_list_t), KM_SLEEP);
13108 13118 for (i = 0; i < TX_FANOUT_SIZE; i++) {
13109 13119 itl_tx = &ipst->ips_idl_tx_list[i];
13110 13120 itl_tx->txl_drain_list =
13111 13121 kmem_zalloc(ipst->ips_conn_drain_list_cnt *
13112 13122 sizeof (idl_t), KM_SLEEP);
13113 13123 mutex_init(&itl_tx->txl_lock, NULL, MUTEX_DEFAULT, NULL);
13114 13124 for (j = 0; j < ipst->ips_conn_drain_list_cnt; j++) {
13115 13125 mutex_init(&itl_tx->txl_drain_list[j].idl_lock, NULL,
13116 13126 MUTEX_DEFAULT, NULL);
13117 13127 itl_tx->txl_drain_list[j].idl_itl = itl_tx;
13118 13128 }
13119 13129 }
13120 13130 }
13121 13131
13122 13132 static void
13123 13133 conn_drain_fini(ip_stack_t *ipst)
13124 13134 {
13125 13135 int i;
13126 13136 idl_tx_list_t *itl_tx;
13127 13137
13128 13138 for (i = 0; i < TX_FANOUT_SIZE; i++) {
13129 13139 itl_tx = &ipst->ips_idl_tx_list[i];
13130 13140 kmem_free(itl_tx->txl_drain_list,
13131 13141 ipst->ips_conn_drain_list_cnt * sizeof (idl_t));
13132 13142 }
13133 13143 kmem_free(ipst->ips_idl_tx_list,
13134 13144 TX_FANOUT_SIZE * sizeof (idl_tx_list_t));
13135 13145 ipst->ips_idl_tx_list = NULL;
13136 13146 }
13137 13147
13138 13148 /*
13139 13149 * Flow control has blocked us from proceeding. Insert the given conn in one
13140 13150 * of the conn drain lists. When flow control is unblocked, either ip_wsrv()
13141 13151 * (STREAMS) or ill_flow_enable() (direct) will be called back, which in turn
13142 13152 * will call conn_walk_drain(). See the flow control notes at the top of this
13143 13153 * file for more details.
13144 13154 */
13145 13155 void
13146 13156 conn_drain_insert(conn_t *connp, idl_tx_list_t *tx_list)
13147 13157 {
13148 13158 idl_t *idl = tx_list->txl_drain_list;
13149 13159 uint_t index;
13150 13160 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
13151 13161
13152 13162 mutex_enter(&connp->conn_lock);
13153 13163 if (connp->conn_state_flags & CONN_CLOSING) {
13154 13164 /*
13155 13165 * The conn is closing as a result of which CONN_CLOSING
13156 13166 * is set. Return.
13157 13167 */
13158 13168 mutex_exit(&connp->conn_lock);
13159 13169 return;
13160 13170 } else if (connp->conn_idl == NULL) {
13161 13171 /*
13162 13172 * Assign the next drain list round robin. We dont' use
13163 13173 * a lock, and thus it may not be strictly round robin.
13164 13174 * Atomicity of load/stores is enough to make sure that
13165 13175 * conn_drain_list_index is always within bounds.
13166 13176 */
13167 13177 index = tx_list->txl_drain_index;
13168 13178 ASSERT(index < ipst->ips_conn_drain_list_cnt);
13169 13179 connp->conn_idl = &tx_list->txl_drain_list[index];
13170 13180 index++;
13171 13181 if (index == ipst->ips_conn_drain_list_cnt)
13172 13182 index = 0;
13173 13183 tx_list->txl_drain_index = index;
13174 13184 } else {
13175 13185 ASSERT(connp->conn_idl->idl_itl == tx_list);
13176 13186 }
13177 13187 mutex_exit(&connp->conn_lock);
13178 13188
13179 13189 idl = connp->conn_idl;
13180 13190 mutex_enter(&idl->idl_lock);
13181 13191 if ((connp->conn_drain_prev != NULL) ||
13182 13192 (connp->conn_state_flags & CONN_CLOSING)) {
13183 13193 /*
13184 13194 * The conn is either already in the drain list or closing.
13185 13195 * (We needed to check for CONN_CLOSING again since close can
13186 13196 * sneak in between dropping conn_lock and acquiring idl_lock.)
13187 13197 */
13188 13198 mutex_exit(&idl->idl_lock);
13189 13199 return;
13190 13200 }
13191 13201
13192 13202 /*
13193 13203 * The conn is not in the drain list. Insert it at the
13194 13204 * tail of the drain list. The drain list is circular
13195 13205 * and doubly linked. idl_conn points to the 1st element
13196 13206 * in the list.
13197 13207 */
13198 13208 if (idl->idl_conn == NULL) {
13199 13209 idl->idl_conn = connp;
13200 13210 connp->conn_drain_next = connp;
13201 13211 connp->conn_drain_prev = connp;
13202 13212 } else {
13203 13213 conn_t *head = idl->idl_conn;
13204 13214
13205 13215 connp->conn_drain_next = head;
13206 13216 connp->conn_drain_prev = head->conn_drain_prev;
13207 13217 head->conn_drain_prev->conn_drain_next = connp;
13208 13218 head->conn_drain_prev = connp;
13209 13219 }
13210 13220 /*
13211 13221 * For non streams based sockets assert flow control.
13212 13222 */
13213 13223 conn_setqfull(connp, NULL);
13214 13224 mutex_exit(&idl->idl_lock);
13215 13225 }
13216 13226
13217 13227 static void
13218 13228 conn_drain_remove(conn_t *connp)
13219 13229 {
13220 13230 idl_t *idl = connp->conn_idl;
13221 13231
13222 13232 if (idl != NULL) {
13223 13233 /*
13224 13234 * Remove ourself from the drain list.
13225 13235 */
13226 13236 if (connp->conn_drain_next == connp) {
13227 13237 /* Singleton in the list */
13228 13238 ASSERT(connp->conn_drain_prev == connp);
13229 13239 idl->idl_conn = NULL;
13230 13240 } else {
13231 13241 connp->conn_drain_prev->conn_drain_next =
13232 13242 connp->conn_drain_next;
13233 13243 connp->conn_drain_next->conn_drain_prev =
13234 13244 connp->conn_drain_prev;
13235 13245 if (idl->idl_conn == connp)
13236 13246 idl->idl_conn = connp->conn_drain_next;
13237 13247 }
13238 13248
13239 13249 /*
13240 13250 * NOTE: because conn_idl is associated with a specific drain
13241 13251 * list which in turn is tied to the index the TX ring
13242 13252 * (txl_cookie) hashes to, and because the TX ring can change
13243 13253 * over the lifetime of the conn_t, we must clear conn_idl so
13244 13254 * a subsequent conn_drain_insert() will set conn_idl again
13245 13255 * based on the latest txl_cookie.
13246 13256 */
13247 13257 connp->conn_idl = NULL;
13248 13258 }
13249 13259 connp->conn_drain_next = NULL;
13250 13260 connp->conn_drain_prev = NULL;
13251 13261
13252 13262 conn_clrqfull(connp, NULL);
13253 13263 /*
13254 13264 * For streams based sockets open up flow control.
13255 13265 */
13256 13266 if (!IPCL_IS_NONSTR(connp))
13257 13267 enableok(connp->conn_wq);
13258 13268 }
13259 13269
13260 13270 /*
13261 13271 * This conn is closing, and we are called from ip_close. OR
13262 13272 * this conn is draining because flow-control on the ill has been relieved.
13263 13273 *
13264 13274 * We must also need to remove conn's on this idl from the list, and also
13265 13275 * inform the sockfs upcalls about the change in flow-control.
13266 13276 */
13267 13277 static void
13268 13278 conn_drain(conn_t *connp, boolean_t closing)
13269 13279 {
13270 13280 idl_t *idl;
13271 13281 conn_t *next_connp;
13272 13282
13273 13283 /*
13274 13284 * connp->conn_idl is stable at this point, and no lock is needed
13275 13285 * to check it. If we are called from ip_close, close has already
13276 13286 * set CONN_CLOSING, thus freezing the value of conn_idl, and
13277 13287 * called us only because conn_idl is non-null. If we are called thru
13278 13288 * service, conn_idl could be null, but it cannot change because
13279 13289 * service is single-threaded per queue, and there cannot be another
13280 13290 * instance of service trying to call conn_drain_insert on this conn
13281 13291 * now.
13282 13292 */
13283 13293 ASSERT(!closing || connp == NULL || connp->conn_idl != NULL);
13284 13294
13285 13295 /*
13286 13296 * If the conn doesn't exist or is not on a drain list, bail.
13287 13297 */
13288 13298 if (connp == NULL || connp->conn_idl == NULL ||
13289 13299 connp->conn_drain_prev == NULL) {
13290 13300 return;
13291 13301 }
13292 13302
13293 13303 idl = connp->conn_idl;
13294 13304 ASSERT(MUTEX_HELD(&idl->idl_lock));
13295 13305
13296 13306 if (!closing) {
13297 13307 next_connp = connp->conn_drain_next;
13298 13308 while (next_connp != connp) {
13299 13309 conn_t *delconnp = next_connp;
13300 13310
13301 13311 next_connp = next_connp->conn_drain_next;
13302 13312 conn_drain_remove(delconnp);
13303 13313 }
13304 13314 ASSERT(connp->conn_drain_next == idl->idl_conn);
13305 13315 }
13306 13316 conn_drain_remove(connp);
13307 13317 }
13308 13318
13309 13319 /*
13310 13320 * Write service routine. Shared perimeter entry point.
13311 13321 * The device queue's messages has fallen below the low water mark and STREAMS
13312 13322 * has backenabled the ill_wq. Send sockfs notification about flow-control on
13313 13323 * each waiting conn.
13314 13324 */
13315 13325 void
13316 13326 ip_wsrv(queue_t *q)
13317 13327 {
13318 13328 ill_t *ill;
13319 13329
13320 13330 ill = (ill_t *)q->q_ptr;
13321 13331 if (ill->ill_state_flags == 0) {
13322 13332 ip_stack_t *ipst = ill->ill_ipst;
13323 13333
13324 13334 /*
13325 13335 * The device flow control has opened up.
13326 13336 * Walk through conn drain lists and qenable the
13327 13337 * first conn in each list. This makes sense only
13328 13338 * if the stream is fully plumbed and setup.
13329 13339 * Hence the ill_state_flags check above.
13330 13340 */
13331 13341 ip1dbg(("ip_wsrv: walking\n"));
13332 13342 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[0]);
13333 13343 enableok(ill->ill_wq);
13334 13344 }
13335 13345 }
13336 13346
13337 13347 /*
13338 13348 * Callback to disable flow control in IP.
13339 13349 *
13340 13350 * This is a mac client callback added when the DLD_CAPAB_DIRECT capability
13341 13351 * is enabled.
13342 13352 *
13343 13353 * When MAC_TX() is not able to send any more packets, dld sets its queue
13344 13354 * to QFULL and enable the STREAMS flow control. Later, when the underlying
13345 13355 * driver is able to continue to send packets, it calls mac_tx_(ring_)update()
13346 13356 * function and wakes up corresponding mac worker threads, which in turn
13347 13357 * calls this callback function, and disables flow control.
13348 13358 */
13349 13359 void
13350 13360 ill_flow_enable(void *arg, ip_mac_tx_cookie_t cookie)
13351 13361 {
13352 13362 ill_t *ill = (ill_t *)arg;
13353 13363 ip_stack_t *ipst = ill->ill_ipst;
13354 13364 idl_tx_list_t *idl_txl;
13355 13365
13356 13366 idl_txl = &ipst->ips_idl_tx_list[IDLHASHINDEX(cookie)];
13357 13367 mutex_enter(&idl_txl->txl_lock);
13358 13368 /* add code to to set a flag to indicate idl_txl is enabled */
13359 13369 conn_walk_drain(ipst, idl_txl);
13360 13370 mutex_exit(&idl_txl->txl_lock);
13361 13371 }
13362 13372
13363 13373 /*
13364 13374 * Flow control has been relieved and STREAMS has backenabled us; drain
13365 13375 * all the conn lists on `tx_list'.
13366 13376 */
13367 13377 static void
13368 13378 conn_walk_drain(ip_stack_t *ipst, idl_tx_list_t *tx_list)
13369 13379 {
13370 13380 int i;
13371 13381 idl_t *idl;
13372 13382
13373 13383 IP_STAT(ipst, ip_conn_walk_drain);
13374 13384
13375 13385 for (i = 0; i < ipst->ips_conn_drain_list_cnt; i++) {
13376 13386 idl = &tx_list->txl_drain_list[i];
13377 13387 mutex_enter(&idl->idl_lock);
13378 13388 conn_drain(idl->idl_conn, B_FALSE);
13379 13389 mutex_exit(&idl->idl_lock);
13380 13390 }
13381 13391 }
13382 13392
13383 13393 /*
13384 13394 * Determine if the ill and multicast aspects of that packets
13385 13395 * "matches" the conn.
13386 13396 */
13387 13397 boolean_t
13388 13398 conn_wantpacket(conn_t *connp, ip_recv_attr_t *ira, ipha_t *ipha)
13389 13399 {
13390 13400 ill_t *ill = ira->ira_rill;
13391 13401 zoneid_t zoneid = ira->ira_zoneid;
13392 13402 uint_t in_ifindex;
13393 13403 ipaddr_t dst, src;
13394 13404
13395 13405 dst = ipha->ipha_dst;
13396 13406 src = ipha->ipha_src;
13397 13407
13398 13408 /*
13399 13409 * conn_incoming_ifindex is set by IP_BOUND_IF which limits
13400 13410 * unicast, broadcast and multicast reception to
13401 13411 * conn_incoming_ifindex.
13402 13412 * conn_wantpacket is called for unicast, broadcast and
13403 13413 * multicast packets.
13404 13414 */
13405 13415 in_ifindex = connp->conn_incoming_ifindex;
13406 13416
13407 13417 /* mpathd can bind to the under IPMP interface, which we allow */
13408 13418 if (in_ifindex != 0 && in_ifindex != ill->ill_phyint->phyint_ifindex) {
13409 13419 if (!IS_UNDER_IPMP(ill))
13410 13420 return (B_FALSE);
13411 13421
13412 13422 if (in_ifindex != ipmp_ill_get_ipmp_ifindex(ill))
13413 13423 return (B_FALSE);
13414 13424 }
13415 13425
13416 13426 if (!IPCL_ZONE_MATCH(connp, zoneid))
13417 13427 return (B_FALSE);
13418 13428
13419 13429 if (!(ira->ira_flags & IRAF_MULTICAST))
13420 13430 return (B_TRUE);
13421 13431
13422 13432 if (connp->conn_multi_router) {
13423 13433 /* multicast packet and multicast router socket: send up */
13424 13434 return (B_TRUE);
13425 13435 }
13426 13436
13427 13437 if (ipha->ipha_protocol == IPPROTO_PIM ||
13428 13438 ipha->ipha_protocol == IPPROTO_RSVP)
13429 13439 return (B_TRUE);
13430 13440
13431 13441 return (conn_hasmembers_ill_withsrc_v4(connp, dst, src, ira->ira_ill));
13432 13442 }
13433 13443
13434 13444 void
13435 13445 conn_setqfull(conn_t *connp, boolean_t *flow_stopped)
13436 13446 {
13437 13447 if (IPCL_IS_NONSTR(connp)) {
13438 13448 (*connp->conn_upcalls->su_txq_full)
13439 13449 (connp->conn_upper_handle, B_TRUE);
13440 13450 if (flow_stopped != NULL)
13441 13451 *flow_stopped = B_TRUE;
13442 13452 } else {
13443 13453 queue_t *q = connp->conn_wq;
13444 13454
13445 13455 ASSERT(q != NULL);
13446 13456 if (!(q->q_flag & QFULL)) {
13447 13457 mutex_enter(QLOCK(q));
13448 13458 if (!(q->q_flag & QFULL)) {
13449 13459 /* still need to set QFULL */
13450 13460 q->q_flag |= QFULL;
13451 13461 /* set flow_stopped to true under QLOCK */
13452 13462 if (flow_stopped != NULL)
13453 13463 *flow_stopped = B_TRUE;
13454 13464 mutex_exit(QLOCK(q));
13455 13465 } else {
13456 13466 /* flow_stopped is left unchanged */
13457 13467 mutex_exit(QLOCK(q));
13458 13468 }
13459 13469 }
13460 13470 }
13461 13471 }
13462 13472
13463 13473 void
13464 13474 conn_clrqfull(conn_t *connp, boolean_t *flow_stopped)
13465 13475 {
13466 13476 if (IPCL_IS_NONSTR(connp)) {
13467 13477 (*connp->conn_upcalls->su_txq_full)
13468 13478 (connp->conn_upper_handle, B_FALSE);
13469 13479 if (flow_stopped != NULL)
13470 13480 *flow_stopped = B_FALSE;
13471 13481 } else {
13472 13482 queue_t *q = connp->conn_wq;
13473 13483
13474 13484 ASSERT(q != NULL);
13475 13485 if (q->q_flag & QFULL) {
13476 13486 mutex_enter(QLOCK(q));
13477 13487 if (q->q_flag & QFULL) {
13478 13488 q->q_flag &= ~QFULL;
13479 13489 /* set flow_stopped to false under QLOCK */
13480 13490 if (flow_stopped != NULL)
13481 13491 *flow_stopped = B_FALSE;
13482 13492 mutex_exit(QLOCK(q));
13483 13493 if (q->q_flag & QWANTW)
13484 13494 qbackenable(q, 0);
13485 13495 } else {
13486 13496 /* flow_stopped is left unchanged */
13487 13497 mutex_exit(QLOCK(q));
13488 13498 }
13489 13499 }
13490 13500 }
13491 13501
13492 13502 mutex_enter(&connp->conn_lock);
13493 13503 connp->conn_blocked = B_FALSE;
13494 13504 mutex_exit(&connp->conn_lock);
13495 13505 }
13496 13506
13497 13507 /*
13498 13508 * Return the length in bytes of the IPv4 headers (base header, label, and
13499 13509 * other IP options) that will be needed based on the
13500 13510 * ip_pkt_t structure passed by the caller.
13501 13511 *
13502 13512 * The returned length does not include the length of the upper level
13503 13513 * protocol (ULP) header.
13504 13514 * The caller needs to check that the length doesn't exceed the max for IPv4.
13505 13515 */
13506 13516 int
13507 13517 ip_total_hdrs_len_v4(const ip_pkt_t *ipp)
13508 13518 {
13509 13519 int len;
13510 13520
13511 13521 len = IP_SIMPLE_HDR_LENGTH;
13512 13522 if (ipp->ipp_fields & IPPF_LABEL_V4) {
13513 13523 ASSERT(ipp->ipp_label_len_v4 != 0);
13514 13524 /* We need to round up here */
13515 13525 len += (ipp->ipp_label_len_v4 + 3) & ~3;
13516 13526 }
13517 13527
13518 13528 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
13519 13529 ASSERT(ipp->ipp_ipv4_options_len != 0);
13520 13530 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0);
13521 13531 len += ipp->ipp_ipv4_options_len;
13522 13532 }
13523 13533 return (len);
13524 13534 }
13525 13535
13526 13536 /*
13527 13537 * All-purpose routine to build an IPv4 header with options based
13528 13538 * on the abstract ip_pkt_t.
13529 13539 *
13530 13540 * The caller has to set the source and destination address as well as
13531 13541 * ipha_length. The caller has to massage any source route and compensate
13532 13542 * for the ULP pseudo-header checksum due to the source route.
13533 13543 */
13534 13544 void
13535 13545 ip_build_hdrs_v4(uchar_t *buf, uint_t buf_len, const ip_pkt_t *ipp,
13536 13546 uint8_t protocol)
13537 13547 {
13538 13548 ipha_t *ipha = (ipha_t *)buf;
13539 13549 uint8_t *cp;
13540 13550
13541 13551 /* Initialize IPv4 header */
13542 13552 ipha->ipha_type_of_service = ipp->ipp_type_of_service;
13543 13553 ipha->ipha_length = 0; /* Caller will set later */
13544 13554 ipha->ipha_ident = 0;
13545 13555 ipha->ipha_fragment_offset_and_flags = 0;
13546 13556 ipha->ipha_ttl = ipp->ipp_unicast_hops;
13547 13557 ipha->ipha_protocol = protocol;
13548 13558 ipha->ipha_hdr_checksum = 0;
13549 13559
13550 13560 if ((ipp->ipp_fields & IPPF_ADDR) &&
13551 13561 IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr))
13552 13562 ipha->ipha_src = ipp->ipp_addr_v4;
13553 13563
13554 13564 cp = (uint8_t *)&ipha[1];
13555 13565 if (ipp->ipp_fields & IPPF_LABEL_V4) {
13556 13566 ASSERT(ipp->ipp_label_len_v4 != 0);
13557 13567 bcopy(ipp->ipp_label_v4, cp, ipp->ipp_label_len_v4);
13558 13568 cp += ipp->ipp_label_len_v4;
13559 13569 /* We need to round up here */
13560 13570 while ((uintptr_t)cp & 0x3) {
13561 13571 *cp++ = IPOPT_NOP;
13562 13572 }
13563 13573 }
13564 13574
13565 13575 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
13566 13576 ASSERT(ipp->ipp_ipv4_options_len != 0);
13567 13577 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0);
13568 13578 bcopy(ipp->ipp_ipv4_options, cp, ipp->ipp_ipv4_options_len);
13569 13579 cp += ipp->ipp_ipv4_options_len;
13570 13580 }
13571 13581 ipha->ipha_version_and_hdr_length =
13572 13582 (uint8_t)((IP_VERSION << 4) + buf_len / 4);
13573 13583
13574 13584 ASSERT((int)(cp - buf) == buf_len);
13575 13585 }
13576 13586
13577 13587 /* Allocate the private structure */
13578 13588 static int
13579 13589 ip_priv_alloc(void **bufp)
13580 13590 {
13581 13591 void *buf;
13582 13592
13583 13593 if ((buf = kmem_alloc(sizeof (ip_priv_t), KM_NOSLEEP)) == NULL)
13584 13594 return (ENOMEM);
13585 13595
13586 13596 *bufp = buf;
13587 13597 return (0);
13588 13598 }
13589 13599
13590 13600 /* Function to delete the private structure */
13591 13601 void
13592 13602 ip_priv_free(void *buf)
13593 13603 {
13594 13604 ASSERT(buf != NULL);
13595 13605 kmem_free(buf, sizeof (ip_priv_t));
13596 13606 }
13597 13607
13598 13608 /*
13599 13609 * The entry point for IPPF processing.
13600 13610 * If the classifier (IPGPC_CLASSIFY) is not loaded and configured, the
13601 13611 * routine just returns.
13602 13612 *
13603 13613 * When called, ip_process generates an ipp_packet_t structure
13604 13614 * which holds the state information for this packet and invokes the
13605 13615 * the classifier (via ipp_packet_process). The classification, depending on
13606 13616 * configured filters, results in a list of actions for this packet. Invoking
13607 13617 * an action may cause the packet to be dropped, in which case we return NULL.
13608 13618 * proc indicates the callout position for
13609 13619 * this packet and ill is the interface this packet arrived on or will leave
13610 13620 * on (inbound and outbound resp.).
13611 13621 *
13612 13622 * We do the processing on the rill (mapped to the upper if ipmp), but MIB
13613 13623 * on the ill corrsponding to the destination IP address.
13614 13624 */
13615 13625 mblk_t *
13616 13626 ip_process(ip_proc_t proc, mblk_t *mp, ill_t *rill, ill_t *ill)
13617 13627 {
13618 13628 ip_priv_t *priv;
13619 13629 ipp_action_id_t aid;
13620 13630 int rc = 0;
13621 13631 ipp_packet_t *pp;
13622 13632
13623 13633 /* If the classifier is not loaded, return */
13624 13634 if ((aid = ipp_action_lookup(IPGPC_CLASSIFY)) == IPP_ACTION_INVAL) {
13625 13635 return (mp);
13626 13636 }
13627 13637
13628 13638 ASSERT(mp != NULL);
13629 13639
13630 13640 /* Allocate the packet structure */
13631 13641 rc = ipp_packet_alloc(&pp, "ip", aid);
13632 13642 if (rc != 0)
13633 13643 goto drop;
13634 13644
13635 13645 /* Allocate the private structure */
13636 13646 rc = ip_priv_alloc((void **)&priv);
13637 13647 if (rc != 0) {
13638 13648 ipp_packet_free(pp);
13639 13649 goto drop;
13640 13650 }
13641 13651 priv->proc = proc;
13642 13652 priv->ill_index = ill_get_upper_ifindex(rill);
13643 13653
13644 13654 ipp_packet_set_private(pp, priv, ip_priv_free);
13645 13655 ipp_packet_set_data(pp, mp);
13646 13656
13647 13657 /* Invoke the classifier */
13648 13658 rc = ipp_packet_process(&pp);
13649 13659 if (pp != NULL) {
13650 13660 mp = ipp_packet_get_data(pp);
13651 13661 ipp_packet_free(pp);
13652 13662 if (rc != 0)
13653 13663 goto drop;
13654 13664 return (mp);
13655 13665 } else {
13656 13666 /* No mp to trace in ip_drop_input/ip_drop_output */
13657 13667 mp = NULL;
13658 13668 }
13659 13669 drop:
13660 13670 if (proc == IPP_LOCAL_IN || proc == IPP_FWD_IN) {
13661 13671 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
13662 13672 ip_drop_input("ip_process", mp, ill);
13663 13673 } else {
13664 13674 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
13665 13675 ip_drop_output("ip_process", mp, ill);
13666 13676 }
13667 13677 freemsg(mp);
13668 13678 return (NULL);
13669 13679 }
13670 13680
13671 13681 /*
13672 13682 * Propagate a multicast group membership operation (add/drop) on
13673 13683 * all the interfaces crossed by the related multirt routes.
13674 13684 * The call is considered successful if the operation succeeds
13675 13685 * on at least one interface.
13676 13686 *
13677 13687 * This assumes that a set of IRE_HOST/RTF_MULTIRT has been created for the
13678 13688 * multicast addresses with the ire argument being the first one.
13679 13689 * We walk the bucket to find all the of those.
13680 13690 *
13681 13691 * Common to IPv4 and IPv6.
13682 13692 */
13683 13693 static int
13684 13694 ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t,
13685 13695 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *),
13686 13696 ire_t *ire, conn_t *connp, boolean_t checkonly, const in6_addr_t *v6group,
13687 13697 mcast_record_t fmode, const in6_addr_t *v6src)
13688 13698 {
13689 13699 ire_t *ire_gw;
13690 13700 irb_t *irb;
13691 13701 int ifindex;
13692 13702 int error = 0;
13693 13703 int result;
13694 13704 ip_stack_t *ipst = ire->ire_ipst;
13695 13705 ipaddr_t group;
13696 13706 boolean_t isv6;
13697 13707 int match_flags;
13698 13708
13699 13709 if (IN6_IS_ADDR_V4MAPPED(v6group)) {
13700 13710 IN6_V4MAPPED_TO_IPADDR(v6group, group);
13701 13711 isv6 = B_FALSE;
13702 13712 } else {
13703 13713 isv6 = B_TRUE;
13704 13714 }
13705 13715
13706 13716 irb = ire->ire_bucket;
13707 13717 ASSERT(irb != NULL);
13708 13718
13709 13719 result = 0;
13710 13720 irb_refhold(irb);
13711 13721 for (; ire != NULL; ire = ire->ire_next) {
13712 13722 if ((ire->ire_flags & RTF_MULTIRT) == 0)
13713 13723 continue;
13714 13724
13715 13725 /* We handle -ifp routes by matching on the ill if set */
13716 13726 match_flags = MATCH_IRE_TYPE;
13717 13727 if (ire->ire_ill != NULL)
13718 13728 match_flags |= MATCH_IRE_ILL;
13719 13729
13720 13730 if (isv6) {
13721 13731 if (!IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, v6group))
13722 13732 continue;
13723 13733
13724 13734 ire_gw = ire_ftable_lookup_v6(&ire->ire_gateway_addr_v6,
13725 13735 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL,
13726 13736 match_flags, 0, ipst, NULL);
13727 13737 } else {
13728 13738 if (ire->ire_addr != group)
13729 13739 continue;
13730 13740
13731 13741 ire_gw = ire_ftable_lookup_v4(ire->ire_gateway_addr,
13732 13742 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL,
13733 13743 match_flags, 0, ipst, NULL);
13734 13744 }
13735 13745 /* No interface route exists for the gateway; skip this ire. */
13736 13746 if (ire_gw == NULL)
13737 13747 continue;
13738 13748 if (ire_gw->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
13739 13749 ire_refrele(ire_gw);
13740 13750 continue;
13741 13751 }
13742 13752 ASSERT(ire_gw->ire_ill != NULL); /* IRE_INTERFACE */
13743 13753 ifindex = ire_gw->ire_ill->ill_phyint->phyint_ifindex;
13744 13754
13745 13755 /*
13746 13756 * The operation is considered a success if
13747 13757 * it succeeds at least once on any one interface.
13748 13758 */
13749 13759 error = fn(connp, checkonly, v6group, INADDR_ANY, ifindex,
13750 13760 fmode, v6src);
13751 13761 if (error == 0)
13752 13762 result = CGTP_MCAST_SUCCESS;
13753 13763
13754 13764 ire_refrele(ire_gw);
13755 13765 }
13756 13766 irb_refrele(irb);
13757 13767 /*
13758 13768 * Consider the call as successful if we succeeded on at least
13759 13769 * one interface. Otherwise, return the last encountered error.
13760 13770 */
13761 13771 return (result == CGTP_MCAST_SUCCESS ? 0 : error);
13762 13772 }
13763 13773
13764 13774 /*
13765 13775 * Return the expected CGTP hooks version number.
13766 13776 */
13767 13777 int
13768 13778 ip_cgtp_filter_supported(void)
13769 13779 {
13770 13780 return (ip_cgtp_filter_rev);
13771 13781 }
13772 13782
13773 13783 /*
13774 13784 * CGTP hooks can be registered by invoking this function.
13775 13785 * Checks that the version number matches.
13776 13786 */
13777 13787 int
13778 13788 ip_cgtp_filter_register(netstackid_t stackid, cgtp_filter_ops_t *ops)
13779 13789 {
13780 13790 netstack_t *ns;
13781 13791 ip_stack_t *ipst;
13782 13792
13783 13793 if (ops->cfo_filter_rev != CGTP_FILTER_REV)
13784 13794 return (ENOTSUP);
13785 13795
13786 13796 ns = netstack_find_by_stackid(stackid);
13787 13797 if (ns == NULL)
13788 13798 return (EINVAL);
13789 13799 ipst = ns->netstack_ip;
13790 13800 ASSERT(ipst != NULL);
13791 13801
13792 13802 if (ipst->ips_ip_cgtp_filter_ops != NULL) {
13793 13803 netstack_rele(ns);
13794 13804 return (EALREADY);
13795 13805 }
13796 13806
13797 13807 ipst->ips_ip_cgtp_filter_ops = ops;
13798 13808
13799 13809 ill_set_inputfn_all(ipst);
13800 13810
13801 13811 netstack_rele(ns);
13802 13812 return (0);
13803 13813 }
13804 13814
13805 13815 /*
13806 13816 * CGTP hooks can be unregistered by invoking this function.
13807 13817 * Returns ENXIO if there was no registration.
13808 13818 * Returns EBUSY if the ndd variable has not been turned off.
13809 13819 */
13810 13820 int
13811 13821 ip_cgtp_filter_unregister(netstackid_t stackid)
13812 13822 {
13813 13823 netstack_t *ns;
13814 13824 ip_stack_t *ipst;
13815 13825
13816 13826 ns = netstack_find_by_stackid(stackid);
13817 13827 if (ns == NULL)
13818 13828 return (EINVAL);
13819 13829 ipst = ns->netstack_ip;
13820 13830 ASSERT(ipst != NULL);
13821 13831
13822 13832 if (ipst->ips_ip_cgtp_filter) {
13823 13833 netstack_rele(ns);
13824 13834 return (EBUSY);
13825 13835 }
13826 13836
13827 13837 if (ipst->ips_ip_cgtp_filter_ops == NULL) {
13828 13838 netstack_rele(ns);
13829 13839 return (ENXIO);
13830 13840 }
13831 13841 ipst->ips_ip_cgtp_filter_ops = NULL;
13832 13842
13833 13843 ill_set_inputfn_all(ipst);
13834 13844
13835 13845 netstack_rele(ns);
13836 13846 return (0);
13837 13847 }
13838 13848
13839 13849 /*
13840 13850 * Check whether there is a CGTP filter registration.
13841 13851 * Returns non-zero if there is a registration, otherwise returns zero.
13842 13852 * Note: returns zero if bad stackid.
13843 13853 */
13844 13854 int
13845 13855 ip_cgtp_filter_is_registered(netstackid_t stackid)
13846 13856 {
13847 13857 netstack_t *ns;
13848 13858 ip_stack_t *ipst;
13849 13859 int ret;
13850 13860
13851 13861 ns = netstack_find_by_stackid(stackid);
13852 13862 if (ns == NULL)
13853 13863 return (0);
13854 13864 ipst = ns->netstack_ip;
13855 13865 ASSERT(ipst != NULL);
13856 13866
13857 13867 if (ipst->ips_ip_cgtp_filter_ops != NULL)
13858 13868 ret = 1;
13859 13869 else
13860 13870 ret = 0;
13861 13871
13862 13872 netstack_rele(ns);
13863 13873 return (ret);
13864 13874 }
13865 13875
13866 13876 static int
13867 13877 ip_squeue_switch(int val)
13868 13878 {
13869 13879 int rval;
13870 13880
13871 13881 switch (val) {
13872 13882 case IP_SQUEUE_ENTER_NODRAIN:
13873 13883 rval = SQ_NODRAIN;
13874 13884 break;
13875 13885 case IP_SQUEUE_ENTER:
13876 13886 rval = SQ_PROCESS;
13877 13887 break;
13878 13888 case IP_SQUEUE_FILL:
13879 13889 default:
13880 13890 rval = SQ_FILL;
13881 13891 break;
13882 13892 }
13883 13893 return (rval);
13884 13894 }
13885 13895
13886 13896 static void *
13887 13897 ip_kstat2_init(netstackid_t stackid, ip_stat_t *ip_statisticsp)
13888 13898 {
13889 13899 kstat_t *ksp;
13890 13900
13891 13901 ip_stat_t template = {
13892 13902 { "ip_udp_fannorm", KSTAT_DATA_UINT64 },
13893 13903 { "ip_udp_fanmb", KSTAT_DATA_UINT64 },
13894 13904 { "ip_recv_pullup", KSTAT_DATA_UINT64 },
13895 13905 { "ip_db_ref", KSTAT_DATA_UINT64 },
13896 13906 { "ip_notaligned", KSTAT_DATA_UINT64 },
13897 13907 { "ip_multimblk", KSTAT_DATA_UINT64 },
13898 13908 { "ip_opt", KSTAT_DATA_UINT64 },
13899 13909 { "ipsec_proto_ahesp", KSTAT_DATA_UINT64 },
13900 13910 { "ip_conn_flputbq", KSTAT_DATA_UINT64 },
13901 13911 { "ip_conn_walk_drain", KSTAT_DATA_UINT64 },
13902 13912 { "ip_out_sw_cksum", KSTAT_DATA_UINT64 },
13903 13913 { "ip_out_sw_cksum_bytes", KSTAT_DATA_UINT64 },
13904 13914 { "ip_in_sw_cksum", KSTAT_DATA_UINT64 },
13905 13915 { "ip_ire_reclaim_calls", KSTAT_DATA_UINT64 },
13906 13916 { "ip_ire_reclaim_deleted", KSTAT_DATA_UINT64 },
13907 13917 { "ip_nce_reclaim_calls", KSTAT_DATA_UINT64 },
13908 13918 { "ip_nce_reclaim_deleted", KSTAT_DATA_UINT64 },
13909 13919 { "ip_dce_reclaim_calls", KSTAT_DATA_UINT64 },
13910 13920 { "ip_dce_reclaim_deleted", KSTAT_DATA_UINT64 },
13911 13921 { "ip_tcp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 },
13912 13922 { "ip_tcp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 },
13913 13923 { "ip_tcp_in_sw_cksum_err", KSTAT_DATA_UINT64 },
13914 13924 { "ip_udp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 },
13915 13925 { "ip_udp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 },
13916 13926 { "ip_udp_in_sw_cksum_err", KSTAT_DATA_UINT64 },
13917 13927 { "conn_in_recvdstaddr", KSTAT_DATA_UINT64 },
13918 13928 { "conn_in_recvopts", KSTAT_DATA_UINT64 },
13919 13929 { "conn_in_recvif", KSTAT_DATA_UINT64 },
13920 13930 { "conn_in_recvslla", KSTAT_DATA_UINT64 },
13921 13931 { "conn_in_recvucred", KSTAT_DATA_UINT64 },
13922 13932 { "conn_in_recvttl", KSTAT_DATA_UINT64 },
13923 13933 { "conn_in_recvhopopts", KSTAT_DATA_UINT64 },
13924 13934 { "conn_in_recvhoplimit", KSTAT_DATA_UINT64 },
13925 13935 { "conn_in_recvdstopts", KSTAT_DATA_UINT64 },
13926 13936 { "conn_in_recvrthdrdstopts", KSTAT_DATA_UINT64 },
13927 13937 { "conn_in_recvrthdr", KSTAT_DATA_UINT64 },
13928 13938 { "conn_in_recvpktinfo", KSTAT_DATA_UINT64 },
13929 13939 { "conn_in_recvtclass", KSTAT_DATA_UINT64 },
13930 13940 { "conn_in_timestamp", KSTAT_DATA_UINT64 },
13931 13941 };
13932 13942
13933 13943 ksp = kstat_create_netstack("ip", 0, "ipstat", "net",
13934 13944 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
13935 13945 KSTAT_FLAG_VIRTUAL, stackid);
13936 13946
13937 13947 if (ksp == NULL)
13938 13948 return (NULL);
13939 13949
13940 13950 bcopy(&template, ip_statisticsp, sizeof (template));
13941 13951 ksp->ks_data = (void *)ip_statisticsp;
13942 13952 ksp->ks_private = (void *)(uintptr_t)stackid;
13943 13953
13944 13954 kstat_install(ksp);
13945 13955 return (ksp);
13946 13956 }
13947 13957
13948 13958 static void
13949 13959 ip_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
13950 13960 {
13951 13961 if (ksp != NULL) {
13952 13962 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
13953 13963 kstat_delete_netstack(ksp, stackid);
13954 13964 }
13955 13965 }
13956 13966
13957 13967 static void *
13958 13968 ip_kstat_init(netstackid_t stackid, ip_stack_t *ipst)
13959 13969 {
13960 13970 kstat_t *ksp;
13961 13971
13962 13972 ip_named_kstat_t template = {
13963 13973 { "forwarding", KSTAT_DATA_UINT32, 0 },
13964 13974 { "defaultTTL", KSTAT_DATA_UINT32, 0 },
13965 13975 { "inReceives", KSTAT_DATA_UINT64, 0 },
13966 13976 { "inHdrErrors", KSTAT_DATA_UINT32, 0 },
13967 13977 { "inAddrErrors", KSTAT_DATA_UINT32, 0 },
13968 13978 { "forwDatagrams", KSTAT_DATA_UINT64, 0 },
13969 13979 { "inUnknownProtos", KSTAT_DATA_UINT32, 0 },
13970 13980 { "inDiscards", KSTAT_DATA_UINT32, 0 },
13971 13981 { "inDelivers", KSTAT_DATA_UINT64, 0 },
13972 13982 { "outRequests", KSTAT_DATA_UINT64, 0 },
13973 13983 { "outDiscards", KSTAT_DATA_UINT32, 0 },
13974 13984 { "outNoRoutes", KSTAT_DATA_UINT32, 0 },
13975 13985 { "reasmTimeout", KSTAT_DATA_UINT32, 0 },
13976 13986 { "reasmReqds", KSTAT_DATA_UINT32, 0 },
13977 13987 { "reasmOKs", KSTAT_DATA_UINT32, 0 },
13978 13988 { "reasmFails", KSTAT_DATA_UINT32, 0 },
13979 13989 { "fragOKs", KSTAT_DATA_UINT32, 0 },
13980 13990 { "fragFails", KSTAT_DATA_UINT32, 0 },
13981 13991 { "fragCreates", KSTAT_DATA_UINT32, 0 },
13982 13992 { "addrEntrySize", KSTAT_DATA_INT32, 0 },
13983 13993 { "routeEntrySize", KSTAT_DATA_INT32, 0 },
13984 13994 { "netToMediaEntrySize", KSTAT_DATA_INT32, 0 },
13985 13995 { "routingDiscards", KSTAT_DATA_UINT32, 0 },
13986 13996 { "inErrs", KSTAT_DATA_UINT32, 0 },
13987 13997 { "noPorts", KSTAT_DATA_UINT32, 0 },
13988 13998 { "inCksumErrs", KSTAT_DATA_UINT32, 0 },
13989 13999 { "reasmDuplicates", KSTAT_DATA_UINT32, 0 },
13990 14000 { "reasmPartDups", KSTAT_DATA_UINT32, 0 },
13991 14001 { "forwProhibits", KSTAT_DATA_UINT32, 0 },
13992 14002 { "udpInCksumErrs", KSTAT_DATA_UINT32, 0 },
13993 14003 { "udpInOverflows", KSTAT_DATA_UINT32, 0 },
13994 14004 { "rawipInOverflows", KSTAT_DATA_UINT32, 0 },
13995 14005 { "ipsecInSucceeded", KSTAT_DATA_UINT32, 0 },
13996 14006 { "ipsecInFailed", KSTAT_DATA_INT32, 0 },
13997 14007 { "memberEntrySize", KSTAT_DATA_INT32, 0 },
13998 14008 { "inIPv6", KSTAT_DATA_UINT32, 0 },
13999 14009 { "outIPv6", KSTAT_DATA_UINT32, 0 },
14000 14010 { "outSwitchIPv6", KSTAT_DATA_UINT32, 0 },
14001 14011 };
14002 14012
14003 14013 ksp = kstat_create_netstack("ip", 0, "ip", "mib2", KSTAT_TYPE_NAMED,
14004 14014 NUM_OF_FIELDS(ip_named_kstat_t), 0, stackid);
14005 14015 if (ksp == NULL || ksp->ks_data == NULL)
14006 14016 return (NULL);
14007 14017
14008 14018 template.forwarding.value.ui32 = WE_ARE_FORWARDING(ipst) ? 1:2;
14009 14019 template.defaultTTL.value.ui32 = (uint32_t)ipst->ips_ip_def_ttl;
14010 14020 template.reasmTimeout.value.ui32 = ipst->ips_ip_reassembly_timeout;
14011 14021 template.addrEntrySize.value.i32 = sizeof (mib2_ipAddrEntry_t);
14012 14022 template.routeEntrySize.value.i32 = sizeof (mib2_ipRouteEntry_t);
14013 14023
14014 14024 template.netToMediaEntrySize.value.i32 =
14015 14025 sizeof (mib2_ipNetToMediaEntry_t);
14016 14026
14017 14027 template.memberEntrySize.value.i32 = sizeof (ipv6_member_t);
14018 14028
14019 14029 bcopy(&template, ksp->ks_data, sizeof (template));
14020 14030 ksp->ks_update = ip_kstat_update;
14021 14031 ksp->ks_private = (void *)(uintptr_t)stackid;
14022 14032
14023 14033 kstat_install(ksp);
14024 14034 return (ksp);
14025 14035 }
14026 14036
14027 14037 static void
14028 14038 ip_kstat_fini(netstackid_t stackid, kstat_t *ksp)
14029 14039 {
14030 14040 if (ksp != NULL) {
14031 14041 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
14032 14042 kstat_delete_netstack(ksp, stackid);
14033 14043 }
14034 14044 }
14035 14045
14036 14046 static int
14037 14047 ip_kstat_update(kstat_t *kp, int rw)
14038 14048 {
14039 14049 ip_named_kstat_t *ipkp;
14040 14050 mib2_ipIfStatsEntry_t ipmib;
14041 14051 ill_walk_context_t ctx;
14042 14052 ill_t *ill;
14043 14053 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private;
14044 14054 netstack_t *ns;
14045 14055 ip_stack_t *ipst;
14046 14056
14047 14057 if (kp == NULL || kp->ks_data == NULL)
14048 14058 return (EIO);
14049 14059
14050 14060 if (rw == KSTAT_WRITE)
14051 14061 return (EACCES);
14052 14062
14053 14063 ns = netstack_find_by_stackid(stackid);
14054 14064 if (ns == NULL)
14055 14065 return (-1);
14056 14066 ipst = ns->netstack_ip;
14057 14067 if (ipst == NULL) {
14058 14068 netstack_rele(ns);
14059 14069 return (-1);
14060 14070 }
14061 14071 ipkp = (ip_named_kstat_t *)kp->ks_data;
14062 14072
14063 14073 bcopy(&ipst->ips_ip_mib, &ipmib, sizeof (ipmib));
14064 14074 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
14065 14075 ill = ILL_START_WALK_V4(&ctx, ipst);
14066 14076 for (; ill != NULL; ill = ill_next(&ctx, ill))
14067 14077 ip_mib2_add_ip_stats(&ipmib, ill->ill_ip_mib);
14068 14078 rw_exit(&ipst->ips_ill_g_lock);
14069 14079
14070 14080 ipkp->forwarding.value.ui32 = ipmib.ipIfStatsForwarding;
14071 14081 ipkp->defaultTTL.value.ui32 = ipmib.ipIfStatsDefaultTTL;
14072 14082 ipkp->inReceives.value.ui64 = ipmib.ipIfStatsHCInReceives;
14073 14083 ipkp->inHdrErrors.value.ui32 = ipmib.ipIfStatsInHdrErrors;
14074 14084 ipkp->inAddrErrors.value.ui32 = ipmib.ipIfStatsInAddrErrors;
14075 14085 ipkp->forwDatagrams.value.ui64 = ipmib.ipIfStatsHCOutForwDatagrams;
14076 14086 ipkp->inUnknownProtos.value.ui32 = ipmib.ipIfStatsInUnknownProtos;
14077 14087 ipkp->inDiscards.value.ui32 = ipmib.ipIfStatsInDiscards;
14078 14088 ipkp->inDelivers.value.ui64 = ipmib.ipIfStatsHCInDelivers;
14079 14089 ipkp->outRequests.value.ui64 = ipmib.ipIfStatsHCOutRequests;
14080 14090 ipkp->outDiscards.value.ui32 = ipmib.ipIfStatsOutDiscards;
14081 14091 ipkp->outNoRoutes.value.ui32 = ipmib.ipIfStatsOutNoRoutes;
14082 14092 ipkp->reasmTimeout.value.ui32 = ipst->ips_ip_reassembly_timeout;
14083 14093 ipkp->reasmReqds.value.ui32 = ipmib.ipIfStatsReasmReqds;
14084 14094 ipkp->reasmOKs.value.ui32 = ipmib.ipIfStatsReasmOKs;
14085 14095 ipkp->reasmFails.value.ui32 = ipmib.ipIfStatsReasmFails;
14086 14096 ipkp->fragOKs.value.ui32 = ipmib.ipIfStatsOutFragOKs;
14087 14097 ipkp->fragFails.value.ui32 = ipmib.ipIfStatsOutFragFails;
14088 14098 ipkp->fragCreates.value.ui32 = ipmib.ipIfStatsOutFragCreates;
14089 14099
14090 14100 ipkp->routingDiscards.value.ui32 = 0;
14091 14101 ipkp->inErrs.value.ui32 = ipmib.tcpIfStatsInErrs;
14092 14102 ipkp->noPorts.value.ui32 = ipmib.udpIfStatsNoPorts;
14093 14103 ipkp->inCksumErrs.value.ui32 = ipmib.ipIfStatsInCksumErrs;
14094 14104 ipkp->reasmDuplicates.value.ui32 = ipmib.ipIfStatsReasmDuplicates;
14095 14105 ipkp->reasmPartDups.value.ui32 = ipmib.ipIfStatsReasmPartDups;
14096 14106 ipkp->forwProhibits.value.ui32 = ipmib.ipIfStatsForwProhibits;
14097 14107 ipkp->udpInCksumErrs.value.ui32 = ipmib.udpIfStatsInCksumErrs;
14098 14108 ipkp->udpInOverflows.value.ui32 = ipmib.udpIfStatsInOverflows;
14099 14109 ipkp->rawipInOverflows.value.ui32 = ipmib.rawipIfStatsInOverflows;
14100 14110 ipkp->ipsecInSucceeded.value.ui32 = ipmib.ipsecIfStatsInSucceeded;
14101 14111 ipkp->ipsecInFailed.value.i32 = ipmib.ipsecIfStatsInFailed;
14102 14112
14103 14113 ipkp->inIPv6.value.ui32 = ipmib.ipIfStatsInWrongIPVersion;
14104 14114 ipkp->outIPv6.value.ui32 = ipmib.ipIfStatsOutWrongIPVersion;
14105 14115 ipkp->outSwitchIPv6.value.ui32 = ipmib.ipIfStatsOutSwitchIPVersion;
14106 14116
14107 14117 netstack_rele(ns);
14108 14118
14109 14119 return (0);
14110 14120 }
14111 14121
14112 14122 static void *
14113 14123 icmp_kstat_init(netstackid_t stackid)
14114 14124 {
14115 14125 kstat_t *ksp;
14116 14126
14117 14127 icmp_named_kstat_t template = {
14118 14128 { "inMsgs", KSTAT_DATA_UINT32 },
14119 14129 { "inErrors", KSTAT_DATA_UINT32 },
14120 14130 { "inDestUnreachs", KSTAT_DATA_UINT32 },
14121 14131 { "inTimeExcds", KSTAT_DATA_UINT32 },
14122 14132 { "inParmProbs", KSTAT_DATA_UINT32 },
14123 14133 { "inSrcQuenchs", KSTAT_DATA_UINT32 },
14124 14134 { "inRedirects", KSTAT_DATA_UINT32 },
14125 14135 { "inEchos", KSTAT_DATA_UINT32 },
14126 14136 { "inEchoReps", KSTAT_DATA_UINT32 },
14127 14137 { "inTimestamps", KSTAT_DATA_UINT32 },
14128 14138 { "inTimestampReps", KSTAT_DATA_UINT32 },
14129 14139 { "inAddrMasks", KSTAT_DATA_UINT32 },
14130 14140 { "inAddrMaskReps", KSTAT_DATA_UINT32 },
14131 14141 { "outMsgs", KSTAT_DATA_UINT32 },
14132 14142 { "outErrors", KSTAT_DATA_UINT32 },
14133 14143 { "outDestUnreachs", KSTAT_DATA_UINT32 },
14134 14144 { "outTimeExcds", KSTAT_DATA_UINT32 },
14135 14145 { "outParmProbs", KSTAT_DATA_UINT32 },
14136 14146 { "outSrcQuenchs", KSTAT_DATA_UINT32 },
14137 14147 { "outRedirects", KSTAT_DATA_UINT32 },
14138 14148 { "outEchos", KSTAT_DATA_UINT32 },
14139 14149 { "outEchoReps", KSTAT_DATA_UINT32 },
14140 14150 { "outTimestamps", KSTAT_DATA_UINT32 },
14141 14151 { "outTimestampReps", KSTAT_DATA_UINT32 },
14142 14152 { "outAddrMasks", KSTAT_DATA_UINT32 },
14143 14153 { "outAddrMaskReps", KSTAT_DATA_UINT32 },
14144 14154 { "inChksumErrs", KSTAT_DATA_UINT32 },
14145 14155 { "inUnknowns", KSTAT_DATA_UINT32 },
14146 14156 { "inFragNeeded", KSTAT_DATA_UINT32 },
14147 14157 { "outFragNeeded", KSTAT_DATA_UINT32 },
14148 14158 { "outDrops", KSTAT_DATA_UINT32 },
14149 14159 { "inOverFlows", KSTAT_DATA_UINT32 },
14150 14160 { "inBadRedirects", KSTAT_DATA_UINT32 },
14151 14161 };
14152 14162
14153 14163 ksp = kstat_create_netstack("ip", 0, "icmp", "mib2", KSTAT_TYPE_NAMED,
14154 14164 NUM_OF_FIELDS(icmp_named_kstat_t), 0, stackid);
14155 14165 if (ksp == NULL || ksp->ks_data == NULL)
14156 14166 return (NULL);
14157 14167
14158 14168 bcopy(&template, ksp->ks_data, sizeof (template));
14159 14169
14160 14170 ksp->ks_update = icmp_kstat_update;
14161 14171 ksp->ks_private = (void *)(uintptr_t)stackid;
14162 14172
14163 14173 kstat_install(ksp);
14164 14174 return (ksp);
14165 14175 }
14166 14176
14167 14177 static void
14168 14178 icmp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
14169 14179 {
14170 14180 if (ksp != NULL) {
14171 14181 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
14172 14182 kstat_delete_netstack(ksp, stackid);
14173 14183 }
14174 14184 }
14175 14185
14176 14186 static int
14177 14187 icmp_kstat_update(kstat_t *kp, int rw)
14178 14188 {
14179 14189 icmp_named_kstat_t *icmpkp;
14180 14190 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private;
14181 14191 netstack_t *ns;
14182 14192 ip_stack_t *ipst;
14183 14193
14184 14194 if ((kp == NULL) || (kp->ks_data == NULL))
14185 14195 return (EIO);
14186 14196
14187 14197 if (rw == KSTAT_WRITE)
14188 14198 return (EACCES);
14189 14199
14190 14200 ns = netstack_find_by_stackid(stackid);
14191 14201 if (ns == NULL)
14192 14202 return (-1);
14193 14203 ipst = ns->netstack_ip;
14194 14204 if (ipst == NULL) {
14195 14205 netstack_rele(ns);
14196 14206 return (-1);
14197 14207 }
14198 14208 icmpkp = (icmp_named_kstat_t *)kp->ks_data;
14199 14209
14200 14210 icmpkp->inMsgs.value.ui32 = ipst->ips_icmp_mib.icmpInMsgs;
14201 14211 icmpkp->inErrors.value.ui32 = ipst->ips_icmp_mib.icmpInErrors;
14202 14212 icmpkp->inDestUnreachs.value.ui32 =
14203 14213 ipst->ips_icmp_mib.icmpInDestUnreachs;
14204 14214 icmpkp->inTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpInTimeExcds;
14205 14215 icmpkp->inParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpInParmProbs;
14206 14216 icmpkp->inSrcQuenchs.value.ui32 = ipst->ips_icmp_mib.icmpInSrcQuenchs;
14207 14217 icmpkp->inRedirects.value.ui32 = ipst->ips_icmp_mib.icmpInRedirects;
14208 14218 icmpkp->inEchos.value.ui32 = ipst->ips_icmp_mib.icmpInEchos;
14209 14219 icmpkp->inEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpInEchoReps;
14210 14220 icmpkp->inTimestamps.value.ui32 = ipst->ips_icmp_mib.icmpInTimestamps;
14211 14221 icmpkp->inTimestampReps.value.ui32 =
14212 14222 ipst->ips_icmp_mib.icmpInTimestampReps;
14213 14223 icmpkp->inAddrMasks.value.ui32 = ipst->ips_icmp_mib.icmpInAddrMasks;
14214 14224 icmpkp->inAddrMaskReps.value.ui32 =
14215 14225 ipst->ips_icmp_mib.icmpInAddrMaskReps;
14216 14226 icmpkp->outMsgs.value.ui32 = ipst->ips_icmp_mib.icmpOutMsgs;
14217 14227 icmpkp->outErrors.value.ui32 = ipst->ips_icmp_mib.icmpOutErrors;
14218 14228 icmpkp->outDestUnreachs.value.ui32 =
14219 14229 ipst->ips_icmp_mib.icmpOutDestUnreachs;
14220 14230 icmpkp->outTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpOutTimeExcds;
14221 14231 icmpkp->outParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpOutParmProbs;
14222 14232 icmpkp->outSrcQuenchs.value.ui32 =
14223 14233 ipst->ips_icmp_mib.icmpOutSrcQuenchs;
14224 14234 icmpkp->outRedirects.value.ui32 = ipst->ips_icmp_mib.icmpOutRedirects;
14225 14235 icmpkp->outEchos.value.ui32 = ipst->ips_icmp_mib.icmpOutEchos;
14226 14236 icmpkp->outEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpOutEchoReps;
14227 14237 icmpkp->outTimestamps.value.ui32 =
14228 14238 ipst->ips_icmp_mib.icmpOutTimestamps;
14229 14239 icmpkp->outTimestampReps.value.ui32 =
14230 14240 ipst->ips_icmp_mib.icmpOutTimestampReps;
14231 14241 icmpkp->outAddrMasks.value.ui32 =
14232 14242 ipst->ips_icmp_mib.icmpOutAddrMasks;
14233 14243 icmpkp->outAddrMaskReps.value.ui32 =
14234 14244 ipst->ips_icmp_mib.icmpOutAddrMaskReps;
14235 14245 icmpkp->inCksumErrs.value.ui32 = ipst->ips_icmp_mib.icmpInCksumErrs;
14236 14246 icmpkp->inUnknowns.value.ui32 = ipst->ips_icmp_mib.icmpInUnknowns;
14237 14247 icmpkp->inFragNeeded.value.ui32 = ipst->ips_icmp_mib.icmpInFragNeeded;
14238 14248 icmpkp->outFragNeeded.value.ui32 =
14239 14249 ipst->ips_icmp_mib.icmpOutFragNeeded;
14240 14250 icmpkp->outDrops.value.ui32 = ipst->ips_icmp_mib.icmpOutDrops;
14241 14251 icmpkp->inOverflows.value.ui32 = ipst->ips_icmp_mib.icmpInOverflows;
14242 14252 icmpkp->inBadRedirects.value.ui32 =
14243 14253 ipst->ips_icmp_mib.icmpInBadRedirects;
14244 14254
14245 14255 netstack_rele(ns);
14246 14256 return (0);
14247 14257 }
14248 14258
14249 14259 /*
14250 14260 * This is the fanout function for raw socket opened for SCTP. Note
14251 14261 * that it is called after SCTP checks that there is no socket which
14252 14262 * wants a packet. Then before SCTP handles this out of the blue packet,
14253 14263 * this function is called to see if there is any raw socket for SCTP.
14254 14264 * If there is and it is bound to the correct address, the packet will
14255 14265 * be sent to that socket. Note that only one raw socket can be bound to
14256 14266 * a port. This is assured in ipcl_sctp_hash_insert();
14257 14267 */
14258 14268 void
14259 14269 ip_fanout_sctp_raw(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h, uint32_t ports,
14260 14270 ip_recv_attr_t *ira)
14261 14271 {
14262 14272 conn_t *connp;
14263 14273 queue_t *rq;
14264 14274 boolean_t secure;
14265 14275 ill_t *ill = ira->ira_ill;
14266 14276 ip_stack_t *ipst = ill->ill_ipst;
14267 14277 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
14268 14278 sctp_stack_t *sctps = ipst->ips_netstack->netstack_sctp;
14269 14279 iaflags_t iraflags = ira->ira_flags;
14270 14280 ill_t *rill = ira->ira_rill;
14271 14281
14272 14282 secure = iraflags & IRAF_IPSEC_SECURE;
14273 14283
14274 14284 connp = ipcl_classify_raw(mp, IPPROTO_SCTP, ports, ipha, ip6h,
14275 14285 ira, ipst);
14276 14286 if (connp == NULL) {
14277 14287 /*
14278 14288 * Although raw sctp is not summed, OOB chunks must be.
14279 14289 * Drop the packet here if the sctp checksum failed.
14280 14290 */
14281 14291 if (iraflags & IRAF_SCTP_CSUM_ERR) {
14282 14292 SCTPS_BUMP_MIB(sctps, sctpChecksumError);
14283 14293 freemsg(mp);
14284 14294 return;
14285 14295 }
14286 14296 ira->ira_ill = ira->ira_rill = NULL;
14287 14297 sctp_ootb_input(mp, ira, ipst);
14288 14298 ira->ira_ill = ill;
14289 14299 ira->ira_rill = rill;
14290 14300 return;
14291 14301 }
14292 14302 rq = connp->conn_rq;
14293 14303 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) {
14294 14304 CONN_DEC_REF(connp);
14295 14305 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows);
14296 14306 freemsg(mp);
14297 14307 return;
14298 14308 }
14299 14309 if (((iraflags & IRAF_IS_IPV4) ?
14300 14310 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
14301 14311 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
14302 14312 secure) {
14303 14313 mp = ipsec_check_inbound_policy(mp, connp, ipha,
14304 14314 ip6h, ira);
14305 14315 if (mp == NULL) {
14306 14316 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
14307 14317 /* Note that mp is NULL */
14308 14318 ip_drop_input("ipIfStatsInDiscards", mp, ill);
14309 14319 CONN_DEC_REF(connp);
14310 14320 return;
14311 14321 }
14312 14322 }
14313 14323
14314 14324 if (iraflags & IRAF_ICMP_ERROR) {
14315 14325 (connp->conn_recvicmp)(connp, mp, NULL, ira);
14316 14326 } else {
14317 14327 ill_t *rill = ira->ira_rill;
14318 14328
14319 14329 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
14320 14330 /* This is the SOCK_RAW, IPPROTO_SCTP case. */
14321 14331 ira->ira_ill = ira->ira_rill = NULL;
14322 14332 (connp->conn_recv)(connp, mp, NULL, ira);
14323 14333 ira->ira_ill = ill;
14324 14334 ira->ira_rill = rill;
14325 14335 }
14326 14336 CONN_DEC_REF(connp);
14327 14337 }
14328 14338
14329 14339 /*
14330 14340 * Free a packet that has the link-layer dl_unitdata_req_t or fast-path
14331 14341 * header before the ip payload.
14332 14342 */
14333 14343 static void
14334 14344 ip_xmit_flowctl_drop(ill_t *ill, mblk_t *mp, boolean_t is_fp_mp, int fp_mp_len)
14335 14345 {
14336 14346 int len = (mp->b_wptr - mp->b_rptr);
14337 14347 mblk_t *ip_mp;
14338 14348
14339 14349 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14340 14350 if (is_fp_mp || len != fp_mp_len) {
14341 14351 if (len > fp_mp_len) {
14342 14352 /*
14343 14353 * fastpath header and ip header in the first mblk
14344 14354 */
14345 14355 mp->b_rptr += fp_mp_len;
14346 14356 } else {
14347 14357 /*
14348 14358 * ip_xmit_attach_llhdr had to prepend an mblk to
14349 14359 * attach the fastpath header before ip header.
14350 14360 */
14351 14361 ip_mp = mp->b_cont;
14352 14362 freeb(mp);
14353 14363 mp = ip_mp;
14354 14364 mp->b_rptr += (fp_mp_len - len);
14355 14365 }
14356 14366 } else {
14357 14367 ip_mp = mp->b_cont;
14358 14368 freeb(mp);
14359 14369 mp = ip_mp;
14360 14370 }
14361 14371 ip_drop_output("ipIfStatsOutDiscards - flow ctl", mp, ill);
14362 14372 freemsg(mp);
14363 14373 }
14364 14374
14365 14375 /*
14366 14376 * Normal post fragmentation function.
14367 14377 *
14368 14378 * Send a packet using the passed in nce. This handles both IPv4 and IPv6
14369 14379 * using the same state machine.
14370 14380 *
14371 14381 * We return an error on failure. In particular we return EWOULDBLOCK
14372 14382 * when the driver flow controls. In that case this ensures that ip_wsrv runs
14373 14383 * (currently by canputnext failure resulting in backenabling from GLD.)
14374 14384 * This allows the callers of conn_ip_output() to use EWOULDBLOCK as an
14375 14385 * indication that they can flow control until ip_wsrv() tells then to restart.
14376 14386 *
14377 14387 * If the nce passed by caller is incomplete, this function
14378 14388 * queues the packet and if necessary, sends ARP request and bails.
14379 14389 * If the Neighbor Cache passed is fully resolved, we simply prepend
14380 14390 * the link-layer header to the packet, do ipsec hw acceleration
14381 14391 * work if necessary, and send the packet out on the wire.
14382 14392 */
14383 14393 /* ARGSUSED6 */
14384 14394 int
14385 14395 ip_xmit(mblk_t *mp, nce_t *nce, iaflags_t ixaflags, uint_t pkt_len,
14386 14396 uint32_t xmit_hint, zoneid_t szone, zoneid_t nolzid, uintptr_t *ixacookie)
14387 14397 {
14388 14398 queue_t *wq;
14389 14399 ill_t *ill = nce->nce_ill;
14390 14400 ip_stack_t *ipst = ill->ill_ipst;
14391 14401 uint64_t delta;
14392 14402 boolean_t isv6 = ill->ill_isv6;
14393 14403 boolean_t fp_mp;
14394 14404 ncec_t *ncec = nce->nce_common;
14395 14405 int64_t now = LBOLT_FASTPATH64;
14396 14406 boolean_t is_probe;
14397 14407
14398 14408 DTRACE_PROBE1(ip__xmit, nce_t *, nce);
14399 14409
14400 14410 ASSERT(mp != NULL);
14401 14411 ASSERT(mp->b_datap->db_type == M_DATA);
14402 14412 ASSERT(pkt_len == msgdsize(mp));
14403 14413
14404 14414 /*
14405 14415 * If we have already been here and are coming back after ARP/ND.
14406 14416 * the IXAF_NO_TRACE flag is set. We skip FW_HOOKS, DTRACE and ipobs
14407 14417 * in that case since they have seen the packet when it came here
14408 14418 * the first time.
14409 14419 */
14410 14420 if (ixaflags & IXAF_NO_TRACE)
14411 14421 goto sendit;
14412 14422
14413 14423 if (ixaflags & IXAF_IS_IPV4) {
14414 14424 ipha_t *ipha = (ipha_t *)mp->b_rptr;
14415 14425
14416 14426 ASSERT(!isv6);
14417 14427 ASSERT(pkt_len == ntohs(((ipha_t *)mp->b_rptr)->ipha_length));
14418 14428 if (HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) &&
14419 14429 !(ixaflags & IXAF_NO_PFHOOK)) {
14420 14430 int error;
14421 14431
14422 14432 FW_HOOKS(ipst->ips_ip4_physical_out_event,
14423 14433 ipst->ips_ipv4firewall_physical_out,
14424 14434 NULL, ill, ipha, mp, mp, 0, ipst, error);
14425 14435 DTRACE_PROBE1(ip4__physical__out__end,
14426 14436 mblk_t *, mp);
14427 14437 if (mp == NULL)
14428 14438 return (error);
14429 14439
14430 14440 /* The length could have changed */
14431 14441 pkt_len = msgdsize(mp);
14432 14442 }
14433 14443 if (ipst->ips_ip4_observe.he_interested) {
14434 14444 /*
14435 14445 * Note that for TX the zoneid is the sending
14436 14446 * zone, whether or not MLP is in play.
14437 14447 * Since the szone argument is the IP zoneid (i.e.,
14438 14448 * zero for exclusive-IP zones) and ipobs wants
14439 14449 * the system zoneid, we map it here.
14440 14450 */
14441 14451 szone = IP_REAL_ZONEID(szone, ipst);
14442 14452
14443 14453 /*
14444 14454 * On the outbound path the destination zone will be
14445 14455 * unknown as we're sending this packet out on the
14446 14456 * wire.
14447 14457 */
14448 14458 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES,
14449 14459 ill, ipst);
14450 14460 }
14451 14461 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL,
14452 14462 void_ip_t *, ipha, __dtrace_ipsr_ill_t *, ill,
14453 14463 ipha_t *, ipha, ip6_t *, NULL, int, 0);
14454 14464 } else {
14455 14465 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
14456 14466
14457 14467 ASSERT(isv6);
14458 14468 ASSERT(pkt_len ==
14459 14469 ntohs(((ip6_t *)mp->b_rptr)->ip6_plen) + IPV6_HDR_LEN);
14460 14470 if (HOOKS6_INTERESTED_PHYSICAL_OUT(ipst) &&
14461 14471 !(ixaflags & IXAF_NO_PFHOOK)) {
14462 14472 int error;
14463 14473
14464 14474 FW_HOOKS6(ipst->ips_ip6_physical_out_event,
14465 14475 ipst->ips_ipv6firewall_physical_out,
14466 14476 NULL, ill, ip6h, mp, mp, 0, ipst, error);
14467 14477 DTRACE_PROBE1(ip6__physical__out__end,
14468 14478 mblk_t *, mp);
14469 14479 if (mp == NULL)
14470 14480 return (error);
14471 14481
14472 14482 /* The length could have changed */
14473 14483 pkt_len = msgdsize(mp);
14474 14484 }
14475 14485 if (ipst->ips_ip6_observe.he_interested) {
14476 14486 /* See above */
14477 14487 szone = IP_REAL_ZONEID(szone, ipst);
14478 14488
14479 14489 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES,
14480 14490 ill, ipst);
14481 14491 }
14482 14492 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL,
14483 14493 void_ip_t *, ip6h, __dtrace_ipsr_ill_t *, ill,
14484 14494 ipha_t *, NULL, ip6_t *, ip6h, int, 0);
14485 14495 }
14486 14496
14487 14497 sendit:
14488 14498 /*
14489 14499 * We check the state without a lock because the state can never
14490 14500 * move "backwards" to initial or incomplete.
14491 14501 */
14492 14502 switch (ncec->ncec_state) {
14493 14503 case ND_REACHABLE:
14494 14504 case ND_STALE:
14495 14505 case ND_DELAY:
14496 14506 case ND_PROBE:
14497 14507 mp = ip_xmit_attach_llhdr(mp, nce);
14498 14508 if (mp == NULL) {
14499 14509 /*
14500 14510 * ip_xmit_attach_llhdr has increased
14501 14511 * ipIfStatsOutDiscards and called ip_drop_output()
14502 14512 */
14503 14513 return (ENOBUFS);
14504 14514 }
14505 14515 /*
14506 14516 * check if nce_fastpath completed and we tagged on a
14507 14517 * copy of nce_fp_mp in ip_xmit_attach_llhdr().
14508 14518 */
14509 14519 fp_mp = (mp->b_datap->db_type == M_DATA);
14510 14520
14511 14521 if (fp_mp &&
14512 14522 (ill->ill_capabilities & ILL_CAPAB_DLD_DIRECT)) {
14513 14523 ill_dld_direct_t *idd;
14514 14524
14515 14525 idd = &ill->ill_dld_capab->idc_direct;
14516 14526 /*
14517 14527 * Send the packet directly to DLD, where it
14518 14528 * may be queued depending on the availability
14519 14529 * of transmit resources at the media layer.
14520 14530 * Return value should be taken into
14521 14531 * account and flow control the TCP.
14522 14532 */
14523 14533 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits);
14524 14534 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets,
14525 14535 pkt_len);
14526 14536
14527 14537 if (ixaflags & IXAF_NO_DEV_FLOW_CTL) {
14528 14538 (void) idd->idd_tx_df(idd->idd_tx_dh, mp,
14529 14539 (uintptr_t)xmit_hint, IP_DROP_ON_NO_DESC);
14530 14540 } else {
14531 14541 uintptr_t cookie;
14532 14542
14533 14543 if ((cookie = idd->idd_tx_df(idd->idd_tx_dh,
14534 14544 mp, (uintptr_t)xmit_hint, 0)) != 0) {
14535 14545 if (ixacookie != NULL)
14536 14546 *ixacookie = cookie;
14537 14547 return (EWOULDBLOCK);
14538 14548 }
14539 14549 }
14540 14550 } else {
14541 14551 wq = ill->ill_wq;
14542 14552
14543 14553 if (!(ixaflags & IXAF_NO_DEV_FLOW_CTL) &&
14544 14554 !canputnext(wq)) {
14545 14555 if (ixacookie != NULL)
14546 14556 *ixacookie = 0;
14547 14557 ip_xmit_flowctl_drop(ill, mp, fp_mp,
14548 14558 nce->nce_fp_mp != NULL ?
14549 14559 MBLKL(nce->nce_fp_mp) : 0);
14550 14560 return (EWOULDBLOCK);
14551 14561 }
14552 14562 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits);
14553 14563 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets,
14554 14564 pkt_len);
14555 14565 putnext(wq, mp);
14556 14566 }
14557 14567
14558 14568 /*
14559 14569 * The rest of this function implements Neighbor Unreachability
14560 14570 * detection. Determine if the ncec is eligible for NUD.
14561 14571 */
14562 14572 if (ncec->ncec_flags & NCE_F_NONUD)
14563 14573 return (0);
14564 14574
14565 14575 ASSERT(ncec->ncec_state != ND_INCOMPLETE);
14566 14576
14567 14577 /*
14568 14578 * Check for upper layer advice
14569 14579 */
14570 14580 if (ixaflags & IXAF_REACH_CONF) {
14571 14581 timeout_id_t tid;
14572 14582
14573 14583 /*
14574 14584 * It should be o.k. to check the state without
14575 14585 * a lock here, at most we lose an advice.
14576 14586 */
14577 14587 ncec->ncec_last = TICK_TO_MSEC(now);
14578 14588 if (ncec->ncec_state != ND_REACHABLE) {
14579 14589 mutex_enter(&ncec->ncec_lock);
14580 14590 ncec->ncec_state = ND_REACHABLE;
14581 14591 tid = ncec->ncec_timeout_id;
14582 14592 ncec->ncec_timeout_id = 0;
14583 14593 mutex_exit(&ncec->ncec_lock);
14584 14594 (void) untimeout(tid);
14585 14595 if (ip_debug > 2) {
14586 14596 /* ip1dbg */
14587 14597 pr_addr_dbg("ip_xmit: state"
14588 14598 " for %s changed to"
14589 14599 " REACHABLE\n", AF_INET6,
14590 14600 &ncec->ncec_addr);
14591 14601 }
14592 14602 }
14593 14603 return (0);
14594 14604 }
14595 14605
14596 14606 delta = TICK_TO_MSEC(now) - ncec->ncec_last;
14597 14607 ip1dbg(("ip_xmit: delta = %" PRId64
14598 14608 " ill_reachable_time = %d \n", delta,
14599 14609 ill->ill_reachable_time));
14600 14610 if (delta > (uint64_t)ill->ill_reachable_time) {
14601 14611 mutex_enter(&ncec->ncec_lock);
14602 14612 switch (ncec->ncec_state) {
14603 14613 case ND_REACHABLE:
14604 14614 ASSERT((ncec->ncec_flags & NCE_F_NONUD) == 0);
14605 14615 /* FALLTHROUGH */
14606 14616 case ND_STALE:
14607 14617 /*
14608 14618 * ND_REACHABLE is identical to
14609 14619 * ND_STALE in this specific case. If
14610 14620 * reachable time has expired for this
14611 14621 * neighbor (delta is greater than
14612 14622 * reachable time), conceptually, the
14613 14623 * neighbor cache is no longer in
14614 14624 * REACHABLE state, but already in
14615 14625 * STALE state. So the correct
14616 14626 * transition here is to ND_DELAY.
14617 14627 */
14618 14628 ncec->ncec_state = ND_DELAY;
14619 14629 mutex_exit(&ncec->ncec_lock);
14620 14630 nce_restart_timer(ncec,
14621 14631 ipst->ips_delay_first_probe_time);
14622 14632 if (ip_debug > 3) {
14623 14633 /* ip2dbg */
14624 14634 pr_addr_dbg("ip_xmit: state"
14625 14635 " for %s changed to"
14626 14636 " DELAY\n", AF_INET6,
14627 14637 &ncec->ncec_addr);
14628 14638 }
14629 14639 break;
14630 14640 case ND_DELAY:
14631 14641 case ND_PROBE:
14632 14642 mutex_exit(&ncec->ncec_lock);
14633 14643 /* Timers have already started */
14634 14644 break;
14635 14645 case ND_UNREACHABLE:
14636 14646 /*
14637 14647 * nce_timer has detected that this ncec
14638 14648 * is unreachable and initiated deleting
14639 14649 * this ncec.
14640 14650 * This is a harmless race where we found the
14641 14651 * ncec before it was deleted and have
14642 14652 * just sent out a packet using this
14643 14653 * unreachable ncec.
14644 14654 */
14645 14655 mutex_exit(&ncec->ncec_lock);
14646 14656 break;
14647 14657 default:
14648 14658 ASSERT(0);
14649 14659 mutex_exit(&ncec->ncec_lock);
14650 14660 }
14651 14661 }
14652 14662 return (0);
14653 14663
14654 14664 case ND_INCOMPLETE:
14655 14665 /*
14656 14666 * the state could have changed since we didn't hold the lock.
14657 14667 * Re-verify state under lock.
14658 14668 */
14659 14669 is_probe = ipmp_packet_is_probe(mp, nce->nce_ill);
14660 14670 mutex_enter(&ncec->ncec_lock);
14661 14671 if (NCE_ISREACHABLE(ncec)) {
14662 14672 mutex_exit(&ncec->ncec_lock);
14663 14673 goto sendit;
14664 14674 }
14665 14675 /* queue the packet */
14666 14676 nce_queue_mp(ncec, mp, is_probe);
14667 14677 mutex_exit(&ncec->ncec_lock);
14668 14678 DTRACE_PROBE2(ip__xmit__incomplete,
14669 14679 (ncec_t *), ncec, (mblk_t *), mp);
14670 14680 return (0);
14671 14681
14672 14682 case ND_INITIAL:
14673 14683 /*
14674 14684 * State could have changed since we didn't hold the lock, so
14675 14685 * re-verify state.
14676 14686 */
14677 14687 is_probe = ipmp_packet_is_probe(mp, nce->nce_ill);
14678 14688 mutex_enter(&ncec->ncec_lock);
14679 14689 if (NCE_ISREACHABLE(ncec)) {
14680 14690 mutex_exit(&ncec->ncec_lock);
14681 14691 goto sendit;
14682 14692 }
14683 14693 nce_queue_mp(ncec, mp, is_probe);
14684 14694 if (ncec->ncec_state == ND_INITIAL) {
14685 14695 ncec->ncec_state = ND_INCOMPLETE;
14686 14696 mutex_exit(&ncec->ncec_lock);
14687 14697 /*
14688 14698 * figure out the source we want to use
14689 14699 * and resolve it.
14690 14700 */
14691 14701 ip_ndp_resolve(ncec);
14692 14702 } else {
14693 14703 mutex_exit(&ncec->ncec_lock);
14694 14704 }
14695 14705 return (0);
14696 14706
14697 14707 case ND_UNREACHABLE:
14698 14708 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14699 14709 ip_drop_output("ipIfStatsOutDiscards - ND_UNREACHABLE",
14700 14710 mp, ill);
14701 14711 freemsg(mp);
14702 14712 return (0);
14703 14713
14704 14714 default:
14705 14715 ASSERT(0);
14706 14716 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14707 14717 ip_drop_output("ipIfStatsOutDiscards - ND_other",
14708 14718 mp, ill);
14709 14719 freemsg(mp);
14710 14720 return (ENETUNREACH);
14711 14721 }
14712 14722 }
14713 14723
14714 14724 /*
14715 14725 * Return B_TRUE if the buffers differ in length or content.
14716 14726 * This is used for comparing extension header buffers.
14717 14727 * Note that an extension header would be declared different
14718 14728 * even if all that changed was the next header value in that header i.e.
14719 14729 * what really changed is the next extension header.
14720 14730 */
14721 14731 boolean_t
14722 14732 ip_cmpbuf(const void *abuf, uint_t alen, boolean_t b_valid, const void *bbuf,
14723 14733 uint_t blen)
14724 14734 {
14725 14735 if (!b_valid)
14726 14736 blen = 0;
14727 14737
14728 14738 if (alen != blen)
14729 14739 return (B_TRUE);
14730 14740 if (alen == 0)
14731 14741 return (B_FALSE); /* Both zero length */
14732 14742 return (bcmp(abuf, bbuf, alen));
14733 14743 }
14734 14744
14735 14745 /*
14736 14746 * Preallocate memory for ip_savebuf(). Returns B_TRUE if ok.
14737 14747 * Return B_FALSE if memory allocation fails - don't change any state!
14738 14748 */
14739 14749 boolean_t
14740 14750 ip_allocbuf(void **dstp, uint_t *dstlenp, boolean_t src_valid,
14741 14751 const void *src, uint_t srclen)
14742 14752 {
14743 14753 void *dst;
14744 14754
14745 14755 if (!src_valid)
14746 14756 srclen = 0;
14747 14757
14748 14758 ASSERT(*dstlenp == 0);
14749 14759 if (src != NULL && srclen != 0) {
14750 14760 dst = mi_alloc(srclen, BPRI_MED);
14751 14761 if (dst == NULL)
14752 14762 return (B_FALSE);
14753 14763 } else {
14754 14764 dst = NULL;
14755 14765 }
14756 14766 if (*dstp != NULL)
14757 14767 mi_free(*dstp);
14758 14768 *dstp = dst;
14759 14769 *dstlenp = dst == NULL ? 0 : srclen;
14760 14770 return (B_TRUE);
14761 14771 }
14762 14772
14763 14773 /*
14764 14774 * Replace what is in *dst, *dstlen with the source.
14765 14775 * Assumes ip_allocbuf has already been called.
14766 14776 */
14767 14777 void
14768 14778 ip_savebuf(void **dstp, uint_t *dstlenp, boolean_t src_valid,
14769 14779 const void *src, uint_t srclen)
14770 14780 {
14771 14781 if (!src_valid)
14772 14782 srclen = 0;
14773 14783
14774 14784 ASSERT(*dstlenp == srclen);
14775 14785 if (src != NULL && srclen != 0)
14776 14786 bcopy(src, *dstp, srclen);
14777 14787 }
14778 14788
14779 14789 /*
14780 14790 * Free the storage pointed to by the members of an ip_pkt_t.
14781 14791 */
14782 14792 void
14783 14793 ip_pkt_free(ip_pkt_t *ipp)
14784 14794 {
14785 14795 uint_t fields = ipp->ipp_fields;
14786 14796
14787 14797 if (fields & IPPF_HOPOPTS) {
14788 14798 kmem_free(ipp->ipp_hopopts, ipp->ipp_hopoptslen);
14789 14799 ipp->ipp_hopopts = NULL;
14790 14800 ipp->ipp_hopoptslen = 0;
14791 14801 }
14792 14802 if (fields & IPPF_RTHDRDSTOPTS) {
14793 14803 kmem_free(ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen);
14794 14804 ipp->ipp_rthdrdstopts = NULL;
14795 14805 ipp->ipp_rthdrdstoptslen = 0;
14796 14806 }
14797 14807 if (fields & IPPF_DSTOPTS) {
14798 14808 kmem_free(ipp->ipp_dstopts, ipp->ipp_dstoptslen);
14799 14809 ipp->ipp_dstopts = NULL;
14800 14810 ipp->ipp_dstoptslen = 0;
14801 14811 }
14802 14812 if (fields & IPPF_RTHDR) {
14803 14813 kmem_free(ipp->ipp_rthdr, ipp->ipp_rthdrlen);
14804 14814 ipp->ipp_rthdr = NULL;
14805 14815 ipp->ipp_rthdrlen = 0;
14806 14816 }
14807 14817 if (fields & IPPF_IPV4_OPTIONS) {
14808 14818 kmem_free(ipp->ipp_ipv4_options, ipp->ipp_ipv4_options_len);
14809 14819 ipp->ipp_ipv4_options = NULL;
14810 14820 ipp->ipp_ipv4_options_len = 0;
14811 14821 }
14812 14822 if (fields & IPPF_LABEL_V4) {
14813 14823 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
14814 14824 ipp->ipp_label_v4 = NULL;
14815 14825 ipp->ipp_label_len_v4 = 0;
14816 14826 }
14817 14827 if (fields & IPPF_LABEL_V6) {
14818 14828 kmem_free(ipp->ipp_label_v6, ipp->ipp_label_len_v6);
14819 14829 ipp->ipp_label_v6 = NULL;
14820 14830 ipp->ipp_label_len_v6 = 0;
14821 14831 }
14822 14832 ipp->ipp_fields &= ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14823 14833 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6);
14824 14834 }
14825 14835
14826 14836 /*
14827 14837 * Copy from src to dst and allocate as needed.
14828 14838 * Returns zero or ENOMEM.
14829 14839 *
14830 14840 * The caller must initialize dst to zero.
14831 14841 */
14832 14842 int
14833 14843 ip_pkt_copy(ip_pkt_t *src, ip_pkt_t *dst, int kmflag)
14834 14844 {
14835 14845 uint_t fields = src->ipp_fields;
14836 14846
14837 14847 /* Start with fields that don't require memory allocation */
14838 14848 dst->ipp_fields = fields &
14839 14849 ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14840 14850 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6);
14841 14851
14842 14852 dst->ipp_addr = src->ipp_addr;
14843 14853 dst->ipp_unicast_hops = src->ipp_unicast_hops;
14844 14854 dst->ipp_hoplimit = src->ipp_hoplimit;
14845 14855 dst->ipp_tclass = src->ipp_tclass;
14846 14856 dst->ipp_type_of_service = src->ipp_type_of_service;
14847 14857
14848 14858 if (!(fields & (IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14849 14859 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6)))
14850 14860 return (0);
14851 14861
14852 14862 if (fields & IPPF_HOPOPTS) {
14853 14863 dst->ipp_hopopts = kmem_alloc(src->ipp_hopoptslen, kmflag);
14854 14864 if (dst->ipp_hopopts == NULL) {
14855 14865 ip_pkt_free(dst);
14856 14866 return (ENOMEM);
14857 14867 }
14858 14868 dst->ipp_fields |= IPPF_HOPOPTS;
14859 14869 bcopy(src->ipp_hopopts, dst->ipp_hopopts,
14860 14870 src->ipp_hopoptslen);
14861 14871 dst->ipp_hopoptslen = src->ipp_hopoptslen;
14862 14872 }
14863 14873 if (fields & IPPF_RTHDRDSTOPTS) {
14864 14874 dst->ipp_rthdrdstopts = kmem_alloc(src->ipp_rthdrdstoptslen,
14865 14875 kmflag);
14866 14876 if (dst->ipp_rthdrdstopts == NULL) {
14867 14877 ip_pkt_free(dst);
14868 14878 return (ENOMEM);
14869 14879 }
14870 14880 dst->ipp_fields |= IPPF_RTHDRDSTOPTS;
14871 14881 bcopy(src->ipp_rthdrdstopts, dst->ipp_rthdrdstopts,
14872 14882 src->ipp_rthdrdstoptslen);
14873 14883 dst->ipp_rthdrdstoptslen = src->ipp_rthdrdstoptslen;
14874 14884 }
14875 14885 if (fields & IPPF_DSTOPTS) {
14876 14886 dst->ipp_dstopts = kmem_alloc(src->ipp_dstoptslen, kmflag);
14877 14887 if (dst->ipp_dstopts == NULL) {
14878 14888 ip_pkt_free(dst);
14879 14889 return (ENOMEM);
14880 14890 }
14881 14891 dst->ipp_fields |= IPPF_DSTOPTS;
14882 14892 bcopy(src->ipp_dstopts, dst->ipp_dstopts,
14883 14893 src->ipp_dstoptslen);
14884 14894 dst->ipp_dstoptslen = src->ipp_dstoptslen;
14885 14895 }
14886 14896 if (fields & IPPF_RTHDR) {
14887 14897 dst->ipp_rthdr = kmem_alloc(src->ipp_rthdrlen, kmflag);
14888 14898 if (dst->ipp_rthdr == NULL) {
14889 14899 ip_pkt_free(dst);
14890 14900 return (ENOMEM);
14891 14901 }
14892 14902 dst->ipp_fields |= IPPF_RTHDR;
14893 14903 bcopy(src->ipp_rthdr, dst->ipp_rthdr,
14894 14904 src->ipp_rthdrlen);
14895 14905 dst->ipp_rthdrlen = src->ipp_rthdrlen;
14896 14906 }
14897 14907 if (fields & IPPF_IPV4_OPTIONS) {
14898 14908 dst->ipp_ipv4_options = kmem_alloc(src->ipp_ipv4_options_len,
14899 14909 kmflag);
14900 14910 if (dst->ipp_ipv4_options == NULL) {
14901 14911 ip_pkt_free(dst);
14902 14912 return (ENOMEM);
14903 14913 }
14904 14914 dst->ipp_fields |= IPPF_IPV4_OPTIONS;
14905 14915 bcopy(src->ipp_ipv4_options, dst->ipp_ipv4_options,
14906 14916 src->ipp_ipv4_options_len);
14907 14917 dst->ipp_ipv4_options_len = src->ipp_ipv4_options_len;
14908 14918 }
14909 14919 if (fields & IPPF_LABEL_V4) {
14910 14920 dst->ipp_label_v4 = kmem_alloc(src->ipp_label_len_v4, kmflag);
14911 14921 if (dst->ipp_label_v4 == NULL) {
14912 14922 ip_pkt_free(dst);
14913 14923 return (ENOMEM);
14914 14924 }
14915 14925 dst->ipp_fields |= IPPF_LABEL_V4;
14916 14926 bcopy(src->ipp_label_v4, dst->ipp_label_v4,
14917 14927 src->ipp_label_len_v4);
14918 14928 dst->ipp_label_len_v4 = src->ipp_label_len_v4;
14919 14929 }
14920 14930 if (fields & IPPF_LABEL_V6) {
14921 14931 dst->ipp_label_v6 = kmem_alloc(src->ipp_label_len_v6, kmflag);
14922 14932 if (dst->ipp_label_v6 == NULL) {
14923 14933 ip_pkt_free(dst);
14924 14934 return (ENOMEM);
14925 14935 }
14926 14936 dst->ipp_fields |= IPPF_LABEL_V6;
14927 14937 bcopy(src->ipp_label_v6, dst->ipp_label_v6,
14928 14938 src->ipp_label_len_v6);
14929 14939 dst->ipp_label_len_v6 = src->ipp_label_len_v6;
14930 14940 }
14931 14941 if (fields & IPPF_FRAGHDR) {
14932 14942 dst->ipp_fraghdr = kmem_alloc(src->ipp_fraghdrlen, kmflag);
14933 14943 if (dst->ipp_fraghdr == NULL) {
14934 14944 ip_pkt_free(dst);
14935 14945 return (ENOMEM);
14936 14946 }
14937 14947 dst->ipp_fields |= IPPF_FRAGHDR;
14938 14948 bcopy(src->ipp_fraghdr, dst->ipp_fraghdr,
14939 14949 src->ipp_fraghdrlen);
14940 14950 dst->ipp_fraghdrlen = src->ipp_fraghdrlen;
14941 14951 }
14942 14952 return (0);
14943 14953 }
14944 14954
14945 14955 /*
14946 14956 * Returns INADDR_ANY if no source route
14947 14957 */
14948 14958 ipaddr_t
14949 14959 ip_pkt_source_route_v4(const ip_pkt_t *ipp)
14950 14960 {
14951 14961 ipaddr_t nexthop = INADDR_ANY;
14952 14962 ipoptp_t opts;
14953 14963 uchar_t *opt;
14954 14964 uint8_t optval;
14955 14965 uint8_t optlen;
14956 14966 uint32_t totallen;
14957 14967
14958 14968 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
14959 14969 return (INADDR_ANY);
14960 14970
14961 14971 totallen = ipp->ipp_ipv4_options_len;
14962 14972 if (totallen & 0x3)
14963 14973 return (INADDR_ANY);
14964 14974
14965 14975 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
14966 14976 optval != IPOPT_EOL;
14967 14977 optval = ipoptp_next(&opts)) {
14968 14978 opt = opts.ipoptp_cur;
14969 14979 switch (optval) {
14970 14980 uint8_t off;
14971 14981 case IPOPT_SSRR:
14972 14982 case IPOPT_LSRR:
14973 14983 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
14974 14984 break;
14975 14985 }
14976 14986 optlen = opts.ipoptp_len;
14977 14987 off = opt[IPOPT_OFFSET];
14978 14988 off--;
14979 14989 if (optlen < IP_ADDR_LEN ||
14980 14990 off > optlen - IP_ADDR_LEN) {
14981 14991 /* End of source route */
14982 14992 break;
14983 14993 }
14984 14994 bcopy((char *)opt + off, &nexthop, IP_ADDR_LEN);
14985 14995 if (nexthop == htonl(INADDR_LOOPBACK)) {
14986 14996 /* Ignore */
14987 14997 nexthop = INADDR_ANY;
14988 14998 break;
14989 14999 }
14990 15000 break;
14991 15001 }
14992 15002 }
14993 15003 return (nexthop);
14994 15004 }
14995 15005
14996 15006 /*
14997 15007 * Reverse a source route.
14998 15008 */
14999 15009 void
15000 15010 ip_pkt_source_route_reverse_v4(ip_pkt_t *ipp)
15001 15011 {
15002 15012 ipaddr_t tmp;
15003 15013 ipoptp_t opts;
15004 15014 uchar_t *opt;
15005 15015 uint8_t optval;
15006 15016 uint32_t totallen;
15007 15017
15008 15018 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
15009 15019 return;
15010 15020
15011 15021 totallen = ipp->ipp_ipv4_options_len;
15012 15022 if (totallen & 0x3)
15013 15023 return;
15014 15024
15015 15025 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
15016 15026 optval != IPOPT_EOL;
15017 15027 optval = ipoptp_next(&opts)) {
15018 15028 uint8_t off1, off2;
15019 15029
15020 15030 opt = opts.ipoptp_cur;
15021 15031 switch (optval) {
15022 15032 case IPOPT_SSRR:
15023 15033 case IPOPT_LSRR:
15024 15034 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
15025 15035 break;
15026 15036 }
15027 15037 off1 = IPOPT_MINOFF_SR - 1;
15028 15038 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1;
15029 15039 while (off2 > off1) {
15030 15040 bcopy(opt + off2, &tmp, IP_ADDR_LEN);
15031 15041 bcopy(opt + off1, opt + off2, IP_ADDR_LEN);
15032 15042 bcopy(&tmp, opt + off2, IP_ADDR_LEN);
15033 15043 off2 -= IP_ADDR_LEN;
15034 15044 off1 += IP_ADDR_LEN;
15035 15045 }
15036 15046 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR;
15037 15047 break;
15038 15048 }
15039 15049 }
15040 15050 }
15041 15051
15042 15052 /*
15043 15053 * Returns NULL if no routing header
15044 15054 */
15045 15055 in6_addr_t *
15046 15056 ip_pkt_source_route_v6(const ip_pkt_t *ipp)
15047 15057 {
15048 15058 in6_addr_t *nexthop = NULL;
15049 15059 ip6_rthdr0_t *rthdr;
15050 15060
15051 15061 if (!(ipp->ipp_fields & IPPF_RTHDR))
15052 15062 return (NULL);
15053 15063
15054 15064 rthdr = (ip6_rthdr0_t *)ipp->ipp_rthdr;
15055 15065 if (rthdr->ip6r0_segleft == 0)
15056 15066 return (NULL);
15057 15067
15058 15068 nexthop = (in6_addr_t *)((char *)rthdr + sizeof (*rthdr));
15059 15069 return (nexthop);
15060 15070 }
15061 15071
15062 15072 zoneid_t
15063 15073 ip_get_zoneid_v4(ipaddr_t addr, mblk_t *mp, ip_recv_attr_t *ira,
15064 15074 zoneid_t lookup_zoneid)
15065 15075 {
15066 15076 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
15067 15077 ire_t *ire;
15068 15078 int ire_flags = MATCH_IRE_TYPE;
15069 15079 zoneid_t zoneid = ALL_ZONES;
15070 15080
15071 15081 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE))
15072 15082 return (ALL_ZONES);
15073 15083
15074 15084 if (lookup_zoneid != ALL_ZONES)
15075 15085 ire_flags |= MATCH_IRE_ZONEONLY;
15076 15086 ire = ire_ftable_lookup_v4(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK,
15077 15087 NULL, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL);
15078 15088 if (ire != NULL) {
15079 15089 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst);
15080 15090 ire_refrele(ire);
15081 15091 }
15082 15092 return (zoneid);
15083 15093 }
15084 15094
15085 15095 zoneid_t
15086 15096 ip_get_zoneid_v6(in6_addr_t *addr, mblk_t *mp, const ill_t *ill,
15087 15097 ip_recv_attr_t *ira, zoneid_t lookup_zoneid)
15088 15098 {
15089 15099 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
15090 15100 ire_t *ire;
15091 15101 int ire_flags = MATCH_IRE_TYPE;
15092 15102 zoneid_t zoneid = ALL_ZONES;
15093 15103
15094 15104 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE))
15095 15105 return (ALL_ZONES);
15096 15106
15097 15107 if (IN6_IS_ADDR_LINKLOCAL(addr))
15098 15108 ire_flags |= MATCH_IRE_ILL;
15099 15109
15100 15110 if (lookup_zoneid != ALL_ZONES)
15101 15111 ire_flags |= MATCH_IRE_ZONEONLY;
15102 15112 ire = ire_ftable_lookup_v6(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK,
15103 15113 ill, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL);
15104 15114 if (ire != NULL) {
15105 15115 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst);
15106 15116 ire_refrele(ire);
15107 15117 }
15108 15118 return (zoneid);
15109 15119 }
15110 15120
15111 15121 /*
15112 15122 * IP obserability hook support functions.
15113 15123 */
15114 15124 static void
15115 15125 ipobs_init(ip_stack_t *ipst)
15116 15126 {
15117 15127 netid_t id;
15118 15128
15119 15129 id = net_getnetidbynetstackid(ipst->ips_netstack->netstack_stackid);
15120 15130
15121 15131 ipst->ips_ip4_observe_pr = net_protocol_lookup(id, NHF_INET);
15122 15132 VERIFY(ipst->ips_ip4_observe_pr != NULL);
15123 15133
15124 15134 ipst->ips_ip6_observe_pr = net_protocol_lookup(id, NHF_INET6);
15125 15135 VERIFY(ipst->ips_ip6_observe_pr != NULL);
15126 15136 }
15127 15137
15128 15138 static void
15129 15139 ipobs_fini(ip_stack_t *ipst)
15130 15140 {
15131 15141
15132 15142 VERIFY(net_protocol_release(ipst->ips_ip4_observe_pr) == 0);
15133 15143 VERIFY(net_protocol_release(ipst->ips_ip6_observe_pr) == 0);
15134 15144 }
15135 15145
15136 15146 /*
15137 15147 * hook_pkt_observe_t is composed in network byte order so that the
15138 15148 * entire mblk_t chain handed into hook_run can be used as-is.
15139 15149 * The caveat is that use of the fields, such as the zone fields,
15140 15150 * requires conversion into host byte order first.
15141 15151 */
15142 15152 void
15143 15153 ipobs_hook(mblk_t *mp, int htype, zoneid_t zsrc, zoneid_t zdst,
15144 15154 const ill_t *ill, ip_stack_t *ipst)
15145 15155 {
15146 15156 hook_pkt_observe_t *hdr;
15147 15157 uint64_t grifindex;
15148 15158 mblk_t *imp;
15149 15159
15150 15160 imp = allocb(sizeof (*hdr), BPRI_HI);
15151 15161 if (imp == NULL)
15152 15162 return;
15153 15163
15154 15164 hdr = (hook_pkt_observe_t *)imp->b_rptr;
15155 15165 /*
15156 15166 * b_wptr is set to make the apparent size of the data in the mblk_t
15157 15167 * to exclude the pointers at the end of hook_pkt_observer_t.
15158 15168 */
15159 15169 imp->b_wptr = imp->b_rptr + sizeof (dl_ipnetinfo_t);
15160 15170 imp->b_cont = mp;
15161 15171
15162 15172 ASSERT(DB_TYPE(mp) == M_DATA);
15163 15173
15164 15174 if (IS_UNDER_IPMP(ill))
15165 15175 grifindex = ipmp_ill_get_ipmp_ifindex(ill);
15166 15176 else
15167 15177 grifindex = 0;
15168 15178
15169 15179 hdr->hpo_version = 1;
15170 15180 hdr->hpo_htype = htons(htype);
15171 15181 hdr->hpo_pktlen = htonl((ulong_t)msgdsize(mp));
15172 15182 hdr->hpo_ifindex = htonl(ill->ill_phyint->phyint_ifindex);
15173 15183 hdr->hpo_grifindex = htonl(grifindex);
15174 15184 hdr->hpo_zsrc = htonl(zsrc);
15175 15185 hdr->hpo_zdst = htonl(zdst);
15176 15186 hdr->hpo_pkt = imp;
15177 15187 hdr->hpo_ctx = ipst->ips_netstack;
15178 15188
15179 15189 if (ill->ill_isv6) {
15180 15190 hdr->hpo_family = AF_INET6;
15181 15191 (void) hook_run(ipst->ips_ipv6_net_data->netd_hooks,
15182 15192 ipst->ips_ipv6observing, (hook_data_t)hdr);
15183 15193 } else {
15184 15194 hdr->hpo_family = AF_INET;
15185 15195 (void) hook_run(ipst->ips_ipv4_net_data->netd_hooks,
15186 15196 ipst->ips_ipv4observing, (hook_data_t)hdr);
15187 15197 }
15188 15198
15189 15199 imp->b_cont = NULL;
15190 15200 freemsg(imp);
15191 15201 }
15192 15202
15193 15203 /*
15194 15204 * Utility routine that checks if `v4srcp' is a valid address on underlying
15195 15205 * interface `ill'. If `ipifp' is non-NULL, it's set to a held ipif
15196 15206 * associated with `v4srcp' on success. NOTE: if this is not called from
15197 15207 * inside the IPSQ (ill_g_lock is not held), `ill' may be removed from the
15198 15208 * group during or after this lookup.
15199 15209 */
15200 15210 boolean_t
15201 15211 ipif_lookup_testaddr_v4(ill_t *ill, const in_addr_t *v4srcp, ipif_t **ipifp)
15202 15212 {
15203 15213 ipif_t *ipif;
15204 15214
15205 15215 ipif = ipif_lookup_addr_exact(*v4srcp, ill, ill->ill_ipst);
15206 15216 if (ipif != NULL) {
15207 15217 if (ipifp != NULL)
15208 15218 *ipifp = ipif;
15209 15219 else
15210 15220 ipif_refrele(ipif);
15211 15221 return (B_TRUE);
15212 15222 }
15213 15223
15214 15224 ip1dbg(("ipif_lookup_testaddr_v4: cannot find ipif for src %x\n",
15215 15225 *v4srcp));
15216 15226 return (B_FALSE);
15217 15227 }
15218 15228
15219 15229 /*
15220 15230 * Transport protocol call back function for CPU state change.
15221 15231 */
15222 15232 /* ARGSUSED */
15223 15233 static int
15224 15234 ip_tp_cpu_update(cpu_setup_t what, int id, void *arg)
15225 15235 {
15226 15236 processorid_t cpu_seqid;
15227 15237 netstack_handle_t nh;
15228 15238 netstack_t *ns;
15229 15239
↓ open down ↓ |
5530 lines elided |
↑ open up ↑ |
15230 15240 ASSERT(MUTEX_HELD(&cpu_lock));
15231 15241
15232 15242 switch (what) {
15233 15243 case CPU_CONFIG:
15234 15244 case CPU_ON:
15235 15245 case CPU_INIT:
15236 15246 case CPU_CPUPART_IN:
15237 15247 cpu_seqid = cpu[id]->cpu_seqid;
15238 15248 netstack_next_init(&nh);
15239 15249 while ((ns = netstack_next(&nh)) != NULL) {
15250 + dccp_stack_cpu_add(ns->netstack_dccp, cpu_seqid);
15240 15251 tcp_stack_cpu_add(ns->netstack_tcp, cpu_seqid);
15241 15252 sctp_stack_cpu_add(ns->netstack_sctp, cpu_seqid);
15242 15253 udp_stack_cpu_add(ns->netstack_udp, cpu_seqid);
15243 15254 netstack_rele(ns);
15244 15255 }
15245 15256 netstack_next_fini(&nh);
15246 15257 break;
15247 15258 case CPU_UNCONFIG:
15248 15259 case CPU_OFF:
15249 15260 case CPU_CPUPART_OUT:
15250 15261 /*
15251 15262 * Nothing to do. We don't remove the per CPU stats from
15252 15263 * the IP stack even when the CPU goes offline.
15253 15264 */
15254 15265 break;
15255 15266 default:
15256 15267 break;
15257 15268 }
15258 15269 return (0);
15259 15270 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX