Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/gldutil.c
+++ new/usr/src/uts/common/io/gldutil.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 *
21 21 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
22 22 * Use is subject to license terms.
23 23 */
24 24
25 -#pragma ident "%Z%%M% %I% %E% SMI"
26 -
27 25 /*
28 26 * gld - Generic LAN Driver
29 27 * media dependent routines
30 28 */
31 29
32 30 #include <sys/types.h>
33 31 #include <sys/errno.h>
34 32 #include <sys/stropts.h>
35 33 #include <sys/stream.h>
36 34 #include <sys/kmem.h>
37 35 #include <sys/stat.h>
38 36 #include <sys/modctl.h>
39 37 #include <sys/kstat.h>
40 38 #include <sys/debug.h>
41 39
42 40 #include <sys/byteorder.h>
43 41 #include <sys/strsun.h>
44 42 #include <sys/dlpi.h>
45 43 #include <sys/ethernet.h>
46 44 #include <sys/multidata.h>
47 45 #include <sys/gld.h>
48 46 #include <sys/gldpriv.h>
49 47 #include <sys/ddi.h>
50 48 #include <sys/sunddi.h>
51 49 #include <sys/sysmacros.h>
52 50 #include <sys/ib/clients/ibd/ibd.h>
53 51 #include <sys/pattr.h>
54 52
55 53 #define DLSAPLENGTH(macinfo) \
56 54 ((macinfo)->gldm_addrlen + ABS((macinfo)->gldm_saplen))
57 55
58 56 #ifdef GLD_DEBUG
59 57 extern int gld_debug;
60 58 #endif
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
61 59
62 60 extern void gld_bitrevcopy(caddr_t src, caddr_t target, size_t n);
63 61 extern char *gld_macaddr_sprintf(char *, unsigned char *, int);
64 62 extern gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
65 63 extern uint32_t gld_global_options;
66 64
67 65 static struct llc_snap_hdr llc_snap_def = {
68 66 LSAP_SNAP, /* DLSAP 0xaa */
69 67 LSAP_SNAP, /* SLSAP 0xaa */
70 68 CNTL_LLC_UI, /* Control 0x03 */
71 - 0x00, 0x00, 0x00, /* Org[3] */
69 + { 0x00, 0x00, 0x00 }, /* Org[3] */
72 70 0x00 /* Type */
73 71 };
74 72
75 73 #define ISETHERTYPE(snaphdr) \
76 74 (snaphdr->d_lsap == LSAP_SNAP && \
77 75 snaphdr->s_lsap == LSAP_SNAP && \
78 76 snaphdr->control == CNTL_LLC_UI && \
79 77 snaphdr->org[0] == 0 && \
80 78 snaphdr->org[1] == 0 && \
81 79 snaphdr->org[2] == 0)
82 80
83 81 /* ======== */
84 82 /* Ethernet */
85 83 /* ======== */
86 84
87 85 static mac_addr_t ether_broadcast = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
88 86
89 87 void
90 88 gld_init_ether(gld_mac_info_t *macinfo)
91 89 {
92 90 struct gldkstats *sp =
93 91 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->kstatp->ks_data;
94 92
95 93 /* Assumptions we make for this medium */
96 94 ASSERT(macinfo->gldm_type == DL_ETHER);
97 95 ASSERT(macinfo->gldm_addrlen == 6);
98 96 ASSERT(macinfo->gldm_saplen == -2);
99 97 #ifndef lint
100 98 ASSERT(sizeof (struct ether_header) == 14);
101 99 ASSERT(sizeof (mac_addr_t) == 6);
102 100 #endif
103 101
104 102 kstat_named_init(&sp->glds_frame, "align_errors", KSTAT_DATA_ULONG);
105 103 kstat_named_init(&sp->glds_crc, "fcs_errors", KSTAT_DATA_ULONG);
106 104 kstat_named_init(&sp->glds_collisions, "collisions", KSTAT_DATA_ULONG);
107 105 kstat_named_init(&sp->glds_nocarrier, "carrier_errors",
108 106 KSTAT_DATA_ULONG);
109 107 kstat_named_init(&sp->glds_defer, "defer_xmts", KSTAT_DATA_ULONG);
110 108 kstat_named_init(&sp->glds_xmtlatecoll, "tx_late_collisions",
111 109 KSTAT_DATA_ULONG);
112 110 kstat_named_init(&sp->glds_short, "runt_errors", KSTAT_DATA_ULONG);
113 111 kstat_named_init(&sp->glds_excoll, "ex_collisions", KSTAT_DATA_ULONG);
114 112
115 113 /*
116 114 * only initialize the new statistics if the driver
117 115 * knows about them.
118 116 */
119 117 if (macinfo->gldm_driver_version != GLD_VERSION_200)
120 118 return;
121 119
122 120 kstat_named_init(&sp->glds_dot3_first_coll,
123 121 "first_collisions", KSTAT_DATA_UINT32);
124 122 kstat_named_init(&sp->glds_dot3_multi_coll,
125 123 "multi_collisions", KSTAT_DATA_UINT32);
126 124 kstat_named_init(&sp->glds_dot3_sqe_error,
127 125 "sqe_errors", KSTAT_DATA_UINT32);
128 126 kstat_named_init(&sp->glds_dot3_mac_xmt_error,
129 127 "macxmt_errors", KSTAT_DATA_UINT32);
130 128 kstat_named_init(&sp->glds_dot3_mac_rcv_error,
131 129 "macrcv_errors", KSTAT_DATA_UINT32);
132 130 kstat_named_init(&sp->glds_dot3_frame_too_long,
133 131 "toolong_errors", KSTAT_DATA_UINT32);
134 132 kstat_named_init(&sp->glds_duplex, "duplex", KSTAT_DATA_CHAR);
135 133 }
136 134
137 135 /*ARGSUSED*/
138 136 void
139 137 gld_uninit_ether(gld_mac_info_t *macinfo)
140 138 {
141 139 }
142 140
143 141 int
144 142 gld_interpret_ether(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
145 143 packet_flag_t flags)
146 144 {
147 145 struct ether_header *mh;
148 146 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
149 147 struct llc_snap_hdr *snaphdr;
150 148 mblk_t *pmp = NULL, *savemp = mp;
151 149 unsigned short typelen;
152 150 int ret = 0;
153 151
154 152 /*
155 153 * Quickly handle receive fastpath for IPQ hack.
156 154 */
157 155 if (flags == GLD_RXQUICK) {
158 156 pktinfo->pktLen = msgdsize(mp);
159 157 /*
160 158 * Check whether the header is contiguous, which
161 159 * also implicitly makes sure the packet is big enough.
162 160 */
163 161 if (MBLKL(mp) < sizeof (struct ether_header))
164 162 return (-1);
165 163 mh = (struct ether_header *)mp->b_rptr;
166 164 pktinfo->ethertype = REF_NET_USHORT(mh->ether_type);
167 165 pktinfo->isForMe = mac_eq(&mh->ether_dhost,
168 166 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
169 167 pktinfo->macLen = sizeof (struct ether_header);
170 168
171 169 return (0);
172 170 }
173 171
174 172 bzero((void *)pktinfo, sizeof (*pktinfo));
175 173
176 174 pktinfo->pktLen = msgdsize(mp);
177 175
178 176 /* make sure packet has at least a whole mac header */
179 177 if (pktinfo->pktLen < sizeof (struct ether_header))
180 178 return (-1);
181 179
182 180 /* make sure the mac header falls into contiguous memory */
183 181 if (MBLKL(mp) < sizeof (struct ether_header)) {
184 182 if ((pmp = msgpullup(mp, -1)) == NULL) {
185 183 #ifdef GLD_DEBUG
186 184 if (gld_debug & GLDERRS)
187 185 cmn_err(CE_WARN,
188 186 "GLD: interpret_ether cannot msgpullup");
189 187 #endif
190 188 return (-1);
191 189 }
192 190 mp = pmp; /* this mblk contains the whole mac header */
193 191 }
194 192
195 193 mh = (struct ether_header *)mp->b_rptr;
196 194
197 195 /* Check to see if the mac is a broadcast or multicast address. */
198 196 if (mac_eq(&mh->ether_dhost, ether_broadcast, macinfo->gldm_addrlen))
199 197 pktinfo->isBroadcast = 1;
200 198 else if (mh->ether_dhost.ether_addr_octet[0] & 1)
201 199 pktinfo->isMulticast = 1;
202 200
203 201 typelen = REF_NET_USHORT(mh->ether_type);
204 202 /*
205 203 * If the hardware is capable of VLAN tag insertion
206 204 * strip out the VLAN tag info. Knowing hardware is
207 205 * capable of VLAN can be established by the presance
208 206 * of non null 'macinfo->gldm_send_tagged'.
209 207 */
210 208 if (flags == GLD_TX) {
211 209 if ((typelen == ETHERTYPE_VLAN) &&
212 210 (macinfo->gldm_send_tagged != NULL)) {
213 211 struct ether_vlan_header *evhp;
214 212 uint16_t tci;
215 213
216 214 if ((MBLKL(mp) < sizeof (struct ether_vlan_header)) &&
217 215 (pullupmsg(mp, sizeof (struct ether_vlan_header))
218 216 == 0)) {
219 217 ret = -1;
220 218 goto out;
221 219 }
222 220 evhp = (struct ether_vlan_header *)mp->b_rptr;
223 221 tci = REF_NET_USHORT(evhp->ether_tci);
224 222
225 223 /*
226 224 * We don't allow the VID and priority are both zero.
227 225 */
228 226 if ((GLD_VTAG_PRI((int32_t)tci) == 0 &&
229 227 GLD_VTAG_VID((int32_t)tci) == VLAN_VID_NONE) ||
230 228 (GLD_VTAG_CFI((uint32_t)tci)) != VLAN_CFI_ETHER) {
231 229 ret = -1;
232 230 goto out;
233 231 }
234 232
235 233 /*
236 234 * Remember the VTAG info in order to reinsert it,
237 235 * Then strip the tag. This is required because some
238 236 * drivers do not allow the size of message (passed
239 237 * by the gldm_send_tagged() function) to be greater
240 238 * than ETHERMAX.
241 239 */
242 240 GLD_SAVE_MBLK_VTAG(savemp, GLD_TCI2VTAG(tci));
243 241 ovbcopy(mp->b_rptr, mp->b_rptr + VTAG_SIZE,
244 242 2 * ETHERADDRL);
245 243 mp->b_rptr += VTAG_SIZE;
246 244 }
247 245 goto out; /* Got all info we need for xmit case */
248 246 }
249 247
250 248 ASSERT(GLDM_LOCK_HELD(macinfo));
251 249
252 250 /*
253 251 * Deal with the mac header
254 252 */
255 253
256 254 mac_copy(&mh->ether_dhost, pktinfo->dhost, macinfo->gldm_addrlen);
257 255 mac_copy(&mh->ether_shost, pktinfo->shost, macinfo->gldm_addrlen);
258 256
259 257 pktinfo->isLooped = mac_eq(pktinfo->shost,
260 258 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
261 259 pktinfo->isForMe = mac_eq(pktinfo->dhost,
262 260 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
263 261
264 262 pktinfo->macLen = sizeof (struct ether_header);
265 263
266 264 if (typelen > ETHERMTU) {
267 265 pktinfo->ethertype = typelen; /* use type interpretation */
268 266 goto out;
269 267 }
270 268
271 269 /*
272 270 * Packet is 802.3 so the ether type/length field
273 271 * specifies the number of bytes that should be present
274 272 * in the data field. Additional bytes are padding, and
275 273 * should be removed
276 274 */
277 275 {
278 276 int delta = pktinfo->pktLen -
279 277 (sizeof (struct ether_header) + typelen);
280 278
281 279 if (delta > 0 && adjmsg(mp, -delta))
282 280 pktinfo->pktLen -= delta;
283 281 }
284 282
285 283 /*
286 284 * Before trying to look beyond the MAC header, make sure the LLC
287 285 * header exists, and that both it and any SNAP header are contiguous.
288 286 */
289 287 if (pktinfo->pktLen < pktinfo->macLen + LLC_HDR1_LEN)
290 288 goto out; /* LLC hdr should have been there! */
291 289
292 290 pktinfo->isLLC = 1;
293 291
294 292 if (gld_global_options & GLD_OPT_NO_ETHRXSNAP ||
295 293 pktinfo->pktLen < pktinfo->macLen + LLC_SNAP_HDR_LEN)
296 294 goto out;
297 295
298 296 if (MBLKL(mp) < sizeof (struct ether_header) + LLC_SNAP_HDR_LEN &&
299 297 MBLKL(mp) < pktinfo->pktLen) {
300 298 /*
301 299 * we don't have the entire packet within the first mblk (and
302 300 * therefore we didn't do the msgpullup above), AND the first
303 301 * mblk may not contain all the data we need to look at.
304 302 */
305 303 ASSERT(pmp == NULL); /* couldn't have done msgpullup above */
306 304 if ((pmp = msgpullup(mp, -1)) == NULL) {
307 305 #ifdef GLD_DEBUG
308 306 if (gld_debug & GLDERRS)
309 307 cmn_err(CE_WARN,
310 308 "GLD: interpret_ether cannot msgpullup2");
311 309 #endif
312 310 goto out; /* can't interpret this pkt further */
313 311 }
314 312 mp = pmp; /* this mblk should contain everything needed */
315 313 }
316 314
317 315 /*
318 316 * Check SAP/SNAP information for EtherType.
319 317 */
320 318
321 319 snaphdr = (struct llc_snap_hdr *)(mp->b_rptr + pktinfo->macLen);
322 320 if (ISETHERTYPE(snaphdr)) {
323 321 pktinfo->ethertype = REF_NET_USHORT(snaphdr->type);
324 322 pktinfo->hdrLen = LLC_SNAP_HDR_LEN;
325 323 }
326 324 out:
327 325 if (pmp != NULL)
328 326 freemsg(pmp);
329 327
330 328 return (ret);
331 329 }
332 330
333 331 mblk_t *
334 332 gld_unitdata_ether(gld_t *gld, mblk_t *mp)
335 333 {
336 334 gld_mac_info_t *macinfo = gld->gld_mac_info;
337 335 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
338 336 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
339 337 mac_addr_t dhost;
340 338 unsigned short typelen;
341 339 mblk_t *nmp;
342 340 struct ether_header *mh;
343 341 int hdrlen;
344 342 uint32_t vptag;
345 343 gld_vlan_t *gld_vlan;
346 344
347 345 ASSERT(macinfo);
348 346
349 347 /* extract needed info from the mblk before we maybe reuse it */
350 348 mac_copy(gldp->glda_addr, dhost, macinfo->gldm_addrlen);
351 349
352 350 /* look in the unitdata request for a sap, else use bound one */
353 351 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
354 352 REF_HOST_USHORT(gldp->glda_sap) != 0)
355 353 typelen = REF_HOST_USHORT(gldp->glda_sap);
356 354 else
357 355 typelen = gld->gld_sap;
358 356
359 357 /*
360 358 * We take values less than or equal to ETHERMTU to mean that the
361 359 * packet should not have an encoded EtherType and so we use the
362 360 * IEEE 802.3 length interpretation of the type/length field.
363 361 */
364 362 if (typelen <= ETHERMTU)
365 363 typelen = msgdsize(mp);
366 364
367 365 hdrlen = sizeof (struct ether_header);
368 366
369 367 /*
370 368 * Check to see if VLAN is enabled on this stream
371 369 * if so then make the header bigger to hold a clone
372 370 * vlan tag.
373 371 */
374 372 gld_vlan = (gld_vlan_t *)gld->gld_vlan;
375 373 if (gld_vlan && (gld_vlan->gldv_id != VLAN_VID_NONE)) {
376 374 hdrlen += VTAG_SIZE;
377 375 vptag = gld_vlan->gldv_ptag;
378 376 }
379 377
380 378 /* need a buffer big enough for the headers */
381 379 nmp = mp->b_cont; /* where the packet payload M_DATA is */
382 380 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
383 381 /* it fits at the beginning of the first M_DATA block */
384 382 freeb(mp); /* don't need the M_PROTO anymore */
385 383 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
386 384 /* we can reuse the dl_unitdata_req M_PROTO mblk */
387 385 nmp = mp;
388 386 DB_TYPE(nmp) = M_DATA;
389 387 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
390 388 } else {
391 389 /* we need to allocate one */
392 390 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
393 391 return (NULL);
394 392 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
395 393 linkb(nmp, mp->b_cont);
396 394 freeb(mp);
397 395 }
398 396
399 397 /* Got the space, now copy in the header components */
400 398
401 399 nmp->b_rptr -= sizeof (typelen);
402 400 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, typelen);
403 401 if (hdrlen > sizeof (struct ether_header)) {
404 402 nmp->b_rptr -= sizeof (uint16_t);
405 403 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
406 404 vptag >>= 16;
407 405 nmp->b_rptr -= sizeof (uint16_t);
408 406 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
409 407 }
410 408 nmp->b_rptr -= (ETHERADDRL * 2);
411 409 mh = (struct ether_header *)nmp->b_rptr;
412 410 mac_copy(dhost, &mh->ether_dhost, macinfo->gldm_addrlen);
413 411
414 412 /*
415 413 * We access the mac address without the mutex to prevent
416 414 * mutex contention (BUG 4211361)
417 415 */
418 416 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
419 417 &mh->ether_shost, macinfo->gldm_addrlen);
420 418
421 419 return (nmp);
422 420 }
423 421
424 422 /*
425 423 * Insert the VLAN tag into the packet. The packet now is an Ethernet header
426 424 * without VLAN tag information.
427 425 */
428 426 mblk_t *
429 427 gld_insert_vtag_ether(mblk_t *mp, uint32_t vtag)
430 428 {
431 429 struct ether_vlan_header *evhp;
432 430 struct ether_header *ehp;
433 431 mblk_t *nmp;
434 432
435 433 if (vtag == VLAN_VID_NONE)
436 434 return (mp);
437 435
438 436 if (DB_REF(mp) == 1 && MBLKHEAD(mp) >= VTAG_SIZE) {
439 437 /* it fits at the beginning of the message block */
440 438 nmp = mp;
441 439 ovbcopy(nmp->b_rptr, nmp->b_rptr - VTAG_SIZE, 2 * ETHERADDRL);
442 440 nmp->b_rptr -= VTAG_SIZE;
443 441 evhp = (struct ether_vlan_header *)nmp->b_rptr;
444 442 } else {
445 443 /* we need to allocate one */
446 444 if ((nmp = allocb(sizeof (struct ether_vlan_header),
447 445 BPRI_MED)) == NULL) {
448 446 return (NULL);
449 447 }
450 448 nmp->b_wptr += sizeof (struct ether_vlan_header);
451 449
452 450 /* transfer the ether_header fields */
453 451 evhp = (struct ether_vlan_header *)nmp->b_rptr;
454 452 ehp = (struct ether_header *)mp->b_rptr;
455 453 mac_copy(&ehp->ether_dhost, &evhp->ether_dhost, ETHERADDRL);
456 454 mac_copy(&ehp->ether_shost, &evhp->ether_shost, ETHERADDRL);
457 455 bcopy(&ehp->ether_type, &evhp->ether_type, sizeof (uint16_t));
458 456
459 457 /* offset the mp of the MAC header length. */
460 458 mp->b_rptr += sizeof (struct ether_header);
461 459 if (MBLKL(mp) == 0) {
462 460 nmp->b_cont = mp->b_cont;
463 461 freeb(mp);
464 462 } else {
465 463 nmp->b_cont = mp;
466 464 }
467 465 }
468 466
469 467 SET_NET_USHORT(evhp->ether_tci, vtag);
470 468 vtag >>= 16;
471 469 SET_NET_USHORT(evhp->ether_tpid, vtag);
472 470 return (nmp);
473 471 }
474 472
475 473 mblk_t *
476 474 gld_fastpath_ether(gld_t *gld, mblk_t *mp)
477 475 {
478 476 gld_mac_info_t *macinfo = gld->gld_mac_info;
479 477 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
480 478 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
481 479 unsigned short typelen;
482 480 mblk_t *nmp;
483 481 struct ether_header *mh;
484 482 int hdrlen;
485 483 uint32_t vptag;
486 484 gld_vlan_t *gld_vlan;
487 485
488 486 ASSERT(macinfo);
489 487
490 488 /* look in the unitdata request for a sap, else use bound one */
491 489 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
492 490 REF_HOST_USHORT(gldp->glda_sap) != 0)
493 491 typelen = REF_HOST_USHORT(gldp->glda_sap);
494 492 else
495 493 typelen = gld->gld_sap;
496 494
497 495 /*
498 496 * We only do fast-path for EtherType encoding because this is the only
499 497 * case where the media header will be consistent from packet to packet.
500 498 */
501 499 if (typelen <= ETHERMTU)
502 500 return (NULL);
503 501
504 502 /*
505 503 * Initialize the fast path header to include the
506 504 * basic source address information and type field.
507 505 */
508 506 hdrlen = sizeof (struct ether_header);
509 507
510 508 /*
511 509 * Check to see if VLAN is enabled on this stream
512 510 * if so then make the header bigger to hold a clone
513 511 * vlan tag.
514 512 */
515 513 gld_vlan = (gld_vlan_t *)gld->gld_vlan;
516 514 if (gld_vlan && (gld_vlan->gldv_id != VLAN_VID_NONE)) {
517 515 hdrlen += VTAG_SIZE;
518 516 vptag = gld_vlan->gldv_ptag;
519 517 }
520 518
521 519 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
522 520 return (NULL);
523 521
524 522 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
525 523
526 524 /* Got the space, now copy in the header components */
527 525
528 526 nmp->b_rptr -= sizeof (typelen);
529 527 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, typelen);
530 528
531 529 /*
532 530 * If the header is for a VLAN stream, then add
533 531 * in the VLAN tag to the clone header.
534 532 */
535 533 if (hdrlen > sizeof (struct ether_header)) {
536 534 nmp->b_rptr -= sizeof (uint16_t);
537 535 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
538 536 vptag >>= 16;
539 537 nmp->b_rptr -= sizeof (uint16_t);
540 538 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
541 539 }
542 540 nmp->b_rptr -= (ETHERADDRL * 2);
543 541 mh = (struct ether_header *)nmp->b_rptr;
544 542 mac_copy(gldp->glda_addr, &mh->ether_dhost, macinfo->gldm_addrlen);
545 543
546 544 GLDM_LOCK(macinfo, RW_WRITER);
547 545 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
548 546 &mh->ether_shost, macinfo->gldm_addrlen);
549 547 GLDM_UNLOCK(macinfo);
550 548
551 549 return (nmp);
552 550 }
553 551
554 552 /* == */
555 553 /* IB */
556 554 /* == */
557 555
558 556 void
559 557 gld_init_ib(gld_mac_info_t *macinfo)
560 558 {
561 559 /*
562 560 * Currently, the generic stats maintained by GLD is
563 561 * sufficient for IPoIB.
564 562 */
565 563
566 564 /* Assumptions we make for this medium */
567 565 ASSERT(macinfo->gldm_type == DL_IB);
568 566 ASSERT(macinfo->gldm_addrlen == IPOIB_ADDRL);
569 567 ASSERT(macinfo->gldm_saplen == -2);
570 568 }
571 569
572 570 /* ARGSUSED */
573 571 void
574 572 gld_uninit_ib(gld_mac_info_t *macinfo)
575 573 {
576 574 }
577 575
578 576 /*
579 577 * The packet format sent to the driver is:
580 578 * IPOIB_ADDRL bytes dest addr :: 2b sap :: 2b 0s :: data
581 579 * The packet format received from the driver is:
582 580 * IPOIB_GRH_SIZE bytes pseudo GRH :: 2b sap :: 2b 0s :: data.
583 581 */
584 582 int
585 583 gld_interpret_ib(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
586 584 packet_flag_t flags)
587 585 {
588 586 ipoib_pgrh_t *grh;
589 587 ipoib_ptxhdr_t *gldp;
590 588 mblk_t *pmp = NULL;
591 589 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
592 590
593 591 /*
594 592 * Quickly handle receive fastpath for IPQ hack.
595 593 */
596 594 if (flags == GLD_RXQUICK) {
597 595 pktinfo->pktLen = msgdsize(mp) - IPOIB_GRH_SIZE;
598 596
599 597 /*
600 598 * Check whether the header is contiguous, which
601 599 * also implicitly makes sure the packet is big enough.
602 600 */
603 601 if (MBLKL(mp) < (IPOIB_GRH_SIZE + IPOIB_HDRSIZE))
604 602 return (-1);
605 603
606 604 /*
607 605 * Almost all times, unicast will not have
608 606 * a valid pgrh; quickly identify and ask for
609 607 * IPQ hack optimization only in that case.
610 608 */
611 609 grh = (ipoib_pgrh_t *)mp->b_rptr;
612 610 if (grh->ipoib_vertcflow == 0) {
613 611 struct ipoib_header *ihp = (struct ipoib_header *)
614 612 (mp->b_rptr + IPOIB_GRH_SIZE);
615 613
616 614 pktinfo->isForMe = 1;
617 615 pktinfo->ethertype = REF_NET_USHORT(ihp->ipoib_type);
618 616 pktinfo->macLen = IPOIB_GRH_SIZE + IPOIB_HDRSIZE;
619 617 return (0);
620 618 } else {
621 619 return (-1);
622 620 }
623 621 }
624 622
625 623 /*
626 624 * Handle the GLD_TX, GLD_RX, GLD_RXLOOP cases now.
627 625 */
628 626 ASSERT(flags != GLD_RXQUICK);
629 627 bzero((void *)pktinfo, sizeof (*pktinfo));
630 628
631 629 if (flags != GLD_RX) {
632 630 /*
633 631 * GLD_TX and GLD_RXLOOP cases.
634 632 */
635 633 gldp = (ipoib_ptxhdr_t *)mp->b_rptr;
636 634 pktinfo->pktLen = msgdsize(mp);
637 635
638 636 /* make sure packet has at least a pseudo header */
639 637 if (pktinfo->pktLen < sizeof (ipoib_ptxhdr_t))
640 638 return (-1);
641 639
642 640 /* make sure the mac header falls into contiguous memory */
643 641 if (MBLKL(mp) < sizeof (ipoib_ptxhdr_t)) {
644 642 if ((pmp = msgpullup(mp, -1)) == NULL) {
645 643 #ifdef GLD_DEBUG
646 644 if (gld_debug & GLDERRS)
647 645 cmn_err(CE_WARN,
648 646 "GLD: interpret_ib "
649 647 "cannot msgpullup");
650 648 #endif
651 649 return (-1);
652 650 }
653 651 /* this mblk contains the whole mac header */
654 652 mp = pmp;
655 653 }
656 654
657 655 /*
658 656 * Check if mac is broadcast or multicast address; all these
659 657 * types of address have the top 4 bytes as 0x00FFFFFF.
660 658 */
661 659 if (mac_eq(&gldp->ipoib_dest, macinfo->gldm_broadcast_addr,
662 660 sizeof (uint32_t))) {
663 661 if (mac_eq(&gldp->ipoib_dest,
664 662 macinfo->gldm_broadcast_addr, IPOIB_ADDRL))
665 663 pktinfo->isBroadcast = 1;
666 664 else
667 665 pktinfo->isMulticast = 1;
668 666 }
669 667
670 668 /*
671 669 * Only count bytes we will be sending over the wire
672 670 * or looping back.
673 671 */
674 672 pktinfo->pktLen -= IPOIB_ADDRL;
675 673 if (flags == GLD_TX)
676 674 goto out; /* Got all info we need for xmit case */
677 675
678 676 /*
679 677 * Loopback case: this is a dup'ed message.
680 678 */
681 679 mp->b_rptr += IPOIB_ADDRL;
682 680 mac_copy(&gldp->ipoib_dest, pktinfo->dhost, IPOIB_ADDRL);
683 681 mac_copy(mac_pvt->curr_macaddr, pktinfo->shost, IPOIB_ADDRL);
684 682 } else {
685 683 /*
686 684 * GLD_RX case; process packet sent from driver.
687 685 */
688 686 ipoib_mac_t *mact, *tact;
689 687 ib_qpn_t dqpn;
690 688
691 689 pktinfo->pktLen = msgdsize(mp);
692 690 /* make sure packet has at least pgrh and mac header */
693 691 if (pktinfo->pktLen < (IPOIB_GRH_SIZE + IPOIB_HDRSIZE))
694 692 return (-1);
695 693
696 694 /* make sure the header falls into contiguous memory */
697 695 if (MBLKL(mp) < (IPOIB_GRH_SIZE + IPOIB_HDRSIZE)) {
698 696 if ((pmp = msgpullup(mp, -1)) == NULL) {
699 697 #ifdef GLD_DEBUG
700 698 if (gld_debug & GLDERRS)
701 699 cmn_err(CE_WARN,
702 700 "GLD: interpret_ib "
703 701 "cannot msgpullup2");
704 702 #endif
705 703 return (-1);
706 704 }
707 705 /* this mblk contains the whole mac header */
708 706 mp = pmp;
709 707 }
710 708
711 709 grh = (ipoib_pgrh_t *)mp->b_rptr;
712 710 mp->b_rptr += IPOIB_GRH_SIZE;
713 711 pktinfo->pktLen -= IPOIB_GRH_SIZE;
714 712 if (grh->ipoib_vertcflow) {
715 713 /*
716 714 * First, copy source address from grh.
717 715 */
718 716 mact = (ipoib_mac_t *)pktinfo->shost;
719 717 mac_copy(&grh->ipoib_sqpn, &mact->ipoib_qpn,
720 718 IPOIB_ADDRL);
721 719
722 720 /*
723 721 * Then copy destination address from grh;
724 722 * first, the 16 bytes of GID.
725 723 */
726 724 mact = (ipoib_mac_t *)pktinfo->dhost;
727 725 mac_copy(&grh->ipoib_dgid_pref,
728 726 &mact->ipoib_gidpref, IPOIB_ADDRL -
729 727 sizeof (mact->ipoib_qpn));
730 728 tact = (ipoib_mac_t *)mac_pvt->curr_macaddr;
731 729
732 730 /* Is this a multicast address */
733 731 if (*(uchar_t *)(grh->ipoib_dgid_pref) == 0xFF) {
734 732 /*
735 733 * Only check for hardware looping in
736 734 * multicast case. It is assumed higher
737 735 * layer code (IP) will stop unicast loops;
738 736 * ie will prevent a transmit to self.
739 737 */
740 738 if (bcmp(&grh->ipoib_sqpn, tact,
741 739 IPOIB_ADDRL) == 0)
742 740 pktinfo->isLooped = 1;
743 741
744 742 tact = (ipoib_mac_t *)macinfo->
745 743 gldm_broadcast_addr;
746 744 if (mac_eq(tact->ipoib_gidpref,
747 745 grh->ipoib_dgid_pref,
748 746 IPOIB_ADDRL - sizeof (tact->ipoib_qpn)))
749 747 pktinfo->isBroadcast = 1;
750 748 else
751 749 pktinfo->isMulticast = 1;
752 750 /*
753 751 * Now copy the 4 bytes QPN part of the
754 752 * destination address.
755 753 */
756 754 dqpn = htonl(IB_MC_QPN);
757 755 mac_copy(&dqpn, &mact->ipoib_qpn,
758 756 sizeof (mact->ipoib_qpn));
759 757 } else {
760 758 /*
761 759 * Now copy the 4 bytes QPN part of the
762 760 * destination address.
763 761 */
764 762 mac_copy(&tact->ipoib_qpn, &mact->ipoib_qpn,
765 763 sizeof (mact->ipoib_qpn));
766 764 /*
767 765 * Any unicast packets received on IBA are
768 766 * for the node.
769 767 */
770 768 pktinfo->isForMe = 1;
771 769 }
772 770 } else {
773 771 /*
774 772 * It can not be a IBA multicast packet.
775 773 * Must have been unicast to us. We do not
776 774 * have shost information, which is used in
777 775 * gld_addudind(); IP/ARP does not care.
778 776 */
779 777 pktinfo->nosource = 1;
780 778 mac_copy(mac_pvt->curr_macaddr, pktinfo->dhost,
781 779 IPOIB_ADDRL);
782 780 /*
783 781 * Any unicast packets received on IBA are
784 782 * for the node.
785 783 */
786 784 pktinfo->isForMe = 1;
787 785 }
788 786 }
789 787
790 788 ASSERT((flags == GLD_RX) || (flags == GLD_RXLOOP));
791 789 ASSERT(GLDM_LOCK_HELD(macinfo));
792 790 pktinfo->ethertype = REF_NET_USHORT(((ipoib_hdr_t *)
793 791 (mp->b_rptr))->ipoib_type);
794 792 pktinfo->macLen = IPOIB_HDRSIZE;
795 793
796 794 out:
797 795 if (pmp != NULL)
798 796 freemsg(pmp);
799 797
800 798 return (0);
801 799 }
802 800
803 801 /*
804 802 * The packet format sent to the driver is: 2b sap :: 2b 0s :: data
805 803 */
806 804 void
807 805 gld_interpret_mdt_ib(gld_mac_info_t *macinfo, mblk_t *mp, pdescinfo_t *pinfo,
808 806 pktinfo_t *pktinfo, mdt_packet_flag_t flags)
809 807 {
810 808 gld_mac_pvt_t *mac_pvt;
811 809 multidata_t *dlmdp;
812 810 pattrinfo_t attr_info = { PATTR_DSTADDRSAP, };
813 811 pattr_t *patr;
814 812 ipoib_ptxhdr_t *dlap = NULL;
815 813
816 814 /*
817 815 * Per packet formatting.
818 816 */
819 817 if (flags == GLD_MDT_TXPKT) {
820 818 ipoib_hdr_t *hptr;
821 819 uint_t seg;
822 820
823 821 if (PDESC_HDRL(pinfo) == 0)
824 822 return;
825 823
826 824 /*
827 825 * Update packet's link header.
828 826 */
829 827 pinfo->hdr_rptr -= IPOIB_HDRSIZE;
830 828 hptr = (ipoib_hdr_t *)pinfo->hdr_rptr;
831 829 hptr->ipoib_mbz = htons(0);
832 830 hptr->ipoib_type = pktinfo->ethertype;
833 831
834 832 /*
835 833 * Total #bytes that will be put on wire.
836 834 */
837 835 pktinfo->pktLen = PDESC_HDRL(pinfo);
838 836 for (seg = 0; seg < pinfo->pld_cnt; seg++)
839 837 pktinfo->pktLen += PDESC_PLDL(pinfo, seg);
840 838
841 839 return;
842 840 }
843 841
844 842 /*
845 843 * The following two cases of GLD_MDT_TX and GLD_MDT_RXLOOP are per
846 844 * MDT message processing.
847 845 */
848 846 dlmdp = mmd_getmultidata(mp);
849 847 patr = mmd_getpattr(dlmdp, NULL, &attr_info);
850 848 ASSERT(patr != NULL);
851 849 ASSERT(macinfo->gldm_saplen == -2);
852 850 if (patr != NULL)
853 851 dlap = (ipoib_ptxhdr_t *)((pattr_addr_t *)attr_info.buf)->addr;
854 852
855 853 if (flags == GLD_MDT_TX) {
856 854 bzero((void *)pktinfo, sizeof (*pktinfo));
857 855 if (dlap == NULL)
858 856 return;
859 857
860 858 /*
861 859 * Check if mac is broadcast or multicast address; all these
862 860 * types of address have the top 4 bytes as 0x00FFFFFF.
863 861 */
864 862 if (mac_eq(dlap, macinfo->gldm_broadcast_addr,
865 863 sizeof (uint32_t))) {
866 864 if (mac_eq(dlap, macinfo->gldm_broadcast_addr,
867 865 IPOIB_ADDRL))
868 866 pktinfo->isBroadcast = 1;
869 867 else
870 868 pktinfo->isMulticast = 1;
871 869 }
872 870 pktinfo->ethertype = REF_NET_USHORT(dlap->
873 871 ipoib_rhdr.ipoib_type);
874 872 } else {
875 873 ASSERT(flags == GLD_MDT_RXLOOP);
876 874 pktinfo->macLen = IPOIB_HDRSIZE;
877 875 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
878 876 mac_copy(mac_pvt->curr_macaddr, pktinfo->shost, IPOIB_ADDRL);
879 877 if (dlap == NULL)
880 878 return;
881 879 mac_copy(&dlap->ipoib_dest, pktinfo->dhost, IPOIB_ADDRL);
882 880 }
883 881 }
884 882
885 883 mblk_t *
886 884 gld_unitdata_ib(gld_t *gld, mblk_t *mp)
887 885 {
888 886 gld_mac_info_t *macinfo = gld->gld_mac_info;
889 887 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
890 888 ipoib_ptxhdr_t *gldp = IPOIBDLSAP(dlp, dlp->dl_dest_addr_offset);
891 889 ipoib_mac_t dhost;
892 890 unsigned short type;
893 891 mblk_t *nmp;
894 892 int hdrlen;
895 893
896 894 ASSERT(macinfo != NULL);
897 895
898 896 /* extract needed info from the mblk before we maybe reuse it */
899 897 mac_copy(&gldp->ipoib_dest, &dhost, IPOIB_ADDRL);
900 898
901 899 /* look in the unitdata request for a sap, else use bound one */
902 900 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
903 901 REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type) != 0)
904 902 type = REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type);
905 903 else
906 904 type = gld->gld_sap;
907 905
908 906 hdrlen = sizeof (ipoib_ptxhdr_t);
909 907
910 908 /* need a buffer big enough for the headers */
911 909 nmp = mp->b_cont; /* where the packet payload M_DATA is */
912 910 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
913 911 /* it fits at the beginning of the first M_DATA block */
914 912 freeb(mp); /* don't need the M_PROTO anymore */
915 913 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
916 914 /* we can reuse the dl_unitdata_req M_PROTO mblk */
917 915 nmp = mp;
918 916 DB_TYPE(nmp) = M_DATA;
919 917 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
920 918 } else {
921 919 /* we need to allocate one */
922 920 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
923 921 return (NULL);
924 922 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
925 923 linkb(nmp, mp->b_cont);
926 924 freeb(mp);
927 925 }
928 926
929 927 /* Got the space, now copy in the header components */
930 928
931 929 nmp->b_rptr -= sizeof (ipoib_ptxhdr_t);
932 930 gldp = (ipoib_ptxhdr_t *)nmp->b_rptr;
933 931 SET_NET_USHORT(gldp->ipoib_rhdr.ipoib_type, type);
934 932 gldp->ipoib_rhdr.ipoib_mbz = 0;
935 933 mac_copy(&dhost, &gldp->ipoib_dest, IPOIB_ADDRL);
936 934
937 935 return (nmp);
938 936 }
939 937
940 938 mblk_t *
941 939 gld_fastpath_ib(gld_t *gld, mblk_t *mp)
942 940 {
943 941 gld_mac_info_t *macinfo = gld->gld_mac_info;
944 942 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
945 943 ipoib_ptxhdr_t *gldp = IPOIBDLSAP(dlp, dlp->dl_dest_addr_offset);
946 944 unsigned short type;
947 945 mblk_t *nmp;
948 946 ipoib_ptxhdr_t *tgldp;
949 947 int hdrlen;
950 948
951 949 ASSERT(macinfo != NULL);
952 950
953 951 /* look in the unitdata request for a sap, else use bound one */
954 952 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
955 953 REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type) != 0)
956 954 type = REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type);
957 955 else
958 956 type = gld->gld_sap;
959 957
960 958 hdrlen = sizeof (ipoib_ptxhdr_t);
961 959
962 960 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
963 961 return (NULL);
964 962
965 963 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
966 964
967 965 /* Got the space, now copy in the header components */
968 966
969 967 nmp->b_rptr -= sizeof (ipoib_ptxhdr_t);
970 968 tgldp = (ipoib_ptxhdr_t *)nmp->b_rptr;
971 969 tgldp->ipoib_rhdr.ipoib_type = htons(type);
972 970 tgldp->ipoib_rhdr.ipoib_mbz = 0;
973 971 mac_copy(&gldp->ipoib_dest, &tgldp->ipoib_dest, IPOIB_ADDRL);
974 972
975 973 return (nmp);
976 974 }
977 975
978 976 /* ==== */
979 977 /* FDDI */
980 978 /* ==== */
981 979
982 980 void
983 981 gld_init_fddi(gld_mac_info_t *macinfo)
984 982 {
985 983 struct gldkstats *sp =
986 984 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->kstatp->ks_data;
987 985
988 986 /* Assumptions we make for this medium */
989 987 ASSERT(macinfo->gldm_type == DL_FDDI);
990 988 ASSERT(macinfo->gldm_addrlen == 6);
991 989 ASSERT(macinfo->gldm_saplen == -2);
992 990 #ifndef lint
993 991 ASSERT(sizeof (struct fddi_mac_frm) == 13);
994 992 ASSERT(sizeof (mac_addr_t) == 6);
995 993 #endif
996 994
997 995 /* Wire address format is bit reversed from canonical format */
998 996 macinfo->gldm_options |= GLDOPT_CANONICAL_ADDR;
999 997
1000 998 kstat_named_init(&sp->glds_fddi_mac_error,
1001 999 "mac_errors", KSTAT_DATA_UINT32);
1002 1000 kstat_named_init(&sp->glds_fddi_mac_lost,
1003 1001 "mac_lost_errors", KSTAT_DATA_UINT32);
1004 1002 kstat_named_init(&sp->glds_fddi_mac_token,
1005 1003 "mac_tokens", KSTAT_DATA_UINT32);
1006 1004 kstat_named_init(&sp->glds_fddi_mac_tvx_expired,
1007 1005 "mac_tvx_expired", KSTAT_DATA_UINT32);
1008 1006 kstat_named_init(&sp->glds_fddi_mac_late,
1009 1007 "mac_late", KSTAT_DATA_UINT32);
1010 1008 kstat_named_init(&sp->glds_fddi_mac_ring_op,
1011 1009 "mac_ring_ops", KSTAT_DATA_UINT32);
1012 1010 }
1013 1011
1014 1012 /*ARGSUSED*/
1015 1013 void
1016 1014 gld_uninit_fddi(gld_mac_info_t *macinfo)
1017 1015 {
1018 1016 }
1019 1017
1020 1018 int
1021 1019 gld_interpret_fddi(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
1022 1020 packet_flag_t flags)
1023 1021 {
1024 1022 struct fddi_mac_frm *mh;
1025 1023 gld_mac_pvt_t *mac_pvt;
1026 1024 struct llc_snap_hdr *snaphdr;
1027 1025 mblk_t *pmp = NULL;
1028 1026
1029 1027 /*
1030 1028 * Quickly handle receive fastpath; FDDI does not support IPQ hack.
1031 1029 */
1032 1030 if (flags == GLD_RXQUICK) {
1033 1031 pktinfo->pktLen = msgdsize(mp);
1034 1032 return (-1);
1035 1033 }
1036 1034
1037 1035 bzero((void *)pktinfo, sizeof (*pktinfo));
1038 1036
1039 1037 pktinfo->pktLen = msgdsize(mp);
1040 1038
1041 1039 /* make sure packet has at least a whole mac header */
1042 1040 if (pktinfo->pktLen < sizeof (struct fddi_mac_frm))
1043 1041 return (-1);
1044 1042
1045 1043 /* make sure the mac header falls into contiguous memory */
1046 1044 if (MBLKL(mp) < sizeof (struct fddi_mac_frm)) {
1047 1045 if ((pmp = msgpullup(mp, -1)) == NULL) {
1048 1046 #ifdef GLD_DEBUG
1049 1047 if (gld_debug & GLDERRS)
1050 1048 cmn_err(CE_WARN,
1051 1049 "GLD: interpret_fddi cannot msgpullup");
1052 1050 #endif
1053 1051 return (-1);
1054 1052 }
1055 1053 mp = pmp; /* this mblk contains the whole mac header */
1056 1054 }
1057 1055
1058 1056 mh = (struct fddi_mac_frm *)mp->b_rptr;
1059 1057
1060 1058 /* Check to see if the mac is a broadcast or multicast address. */
1061 1059 /* NB we are still in wire format (non canonical) */
1062 1060 /* mac_eq works because ether_broadcast is the same either way */
1063 1061 if (mac_eq(mh->fddi_dhost, ether_broadcast, macinfo->gldm_addrlen))
1064 1062 pktinfo->isBroadcast = 1;
1065 1063 else if (mh->fddi_dhost[0] & 0x80)
1066 1064 pktinfo->isMulticast = 1;
1067 1065
1068 1066 if (flags == GLD_TX)
1069 1067 goto out; /* Got all info we need for xmit case */
1070 1068
1071 1069 ASSERT(GLDM_LOCK_HELD(macinfo));
1072 1070
1073 1071 /*
1074 1072 * Deal with the mac header
1075 1073 */
1076 1074
1077 1075 cmac_copy(mh->fddi_dhost, pktinfo->dhost,
1078 1076 macinfo->gldm_addrlen, macinfo);
1079 1077 cmac_copy(mh->fddi_shost, pktinfo->shost,
1080 1078 macinfo->gldm_addrlen, macinfo);
1081 1079
1082 1080 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1083 1081 pktinfo->isLooped = mac_eq(pktinfo->shost,
1084 1082 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1085 1083 pktinfo->isForMe = mac_eq(pktinfo->dhost,
1086 1084 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1087 1085
1088 1086 pktinfo->macLen = sizeof (struct fddi_mac_frm);
1089 1087
1090 1088 /*
1091 1089 * Before trying to look beyond the MAC header, make sure the LLC
1092 1090 * header exists, and that both it and any SNAP header are contiguous.
1093 1091 */
1094 1092 if (MBLKL(mp) < sizeof (struct fddi_mac_frm) + LLC_SNAP_HDR_LEN &&
1095 1093 MBLKL(mp) < pktinfo->pktLen) {
1096 1094 /*
1097 1095 * we don't have the entire packet within the first mblk (and
1098 1096 * therefore we didn't do the msgpullup above), AND the first
1099 1097 * mblk may not contain all the data we need to look at.
1100 1098 */
1101 1099 ASSERT(pmp == NULL); /* couldn't have done msgpullup above */
1102 1100 if ((pmp = msgpullup(mp, -1)) == NULL) {
1103 1101 #ifdef GLD_DEBUG
1104 1102 if (gld_debug & GLDERRS)
1105 1103 cmn_err(CE_WARN,
1106 1104 "GLD: interpret_fddi cannot msgpullup2");
1107 1105 #endif
1108 1106 goto out; /* can't interpret this pkt further */
1109 1107 }
1110 1108 mp = pmp; /* this mblk should contain everything needed */
1111 1109 }
1112 1110
1113 1111 /*
1114 1112 * Check SAP/SNAP information.
1115 1113 */
1116 1114 if ((mh->fddi_fc & 0x70) == 0x50) {
1117 1115 if (pktinfo->pktLen < pktinfo->macLen + LLC_HDR1_LEN)
1118 1116 goto out;
1119 1117
1120 1118 pktinfo->isLLC = 1;
1121 1119
1122 1120 if (pktinfo->pktLen < pktinfo->macLen + LLC_SNAP_HDR_LEN)
1123 1121 goto out;
1124 1122
1125 1123 snaphdr = (struct llc_snap_hdr *)(mp->b_rptr + pktinfo->macLen);
1126 1124 if (ISETHERTYPE(snaphdr)) {
1127 1125 pktinfo->ethertype = REF_NET_USHORT(snaphdr->type);
1128 1126 pktinfo->hdrLen = LLC_SNAP_HDR_LEN;
1129 1127 }
1130 1128 }
1131 1129 out:
1132 1130 if (pmp != NULL)
1133 1131 freemsg(pmp);
1134 1132
1135 1133 return (0);
1136 1134 }
1137 1135
1138 1136 mblk_t *
1139 1137 gld_unitdata_fddi(gld_t *gld, mblk_t *mp)
1140 1138 {
1141 1139 gld_mac_info_t *macinfo = gld->gld_mac_info;
1142 1140 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
1143 1141 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1144 1142 mac_addr_t dhost;
1145 1143 unsigned short type;
1146 1144 mblk_t *nmp;
1147 1145 struct fddi_mac_frm *mh;
1148 1146 int hdrlen;
1149 1147
1150 1148 ASSERT(macinfo);
1151 1149
1152 1150 /* extract needed info from the mblk before we maybe reuse it */
1153 1151 mac_copy(gldp->glda_addr, dhost, macinfo->gldm_addrlen);
1154 1152
1155 1153 /* look in the unitdata request for a sap, else use bound one */
1156 1154 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1157 1155 REF_HOST_USHORT(gldp->glda_sap) != 0)
1158 1156 type = REF_HOST_USHORT(gldp->glda_sap);
1159 1157 else
1160 1158 type = gld->gld_sap;
1161 1159
1162 1160
1163 1161 hdrlen = sizeof (struct fddi_mac_frm);
1164 1162
1165 1163 /*
1166 1164 * Check whether we need to do EtherType encoding or whether the packet
1167 1165 * is LLC.
1168 1166 */
1169 1167 if (type > GLD_MAX_802_SAP)
1170 1168 hdrlen += sizeof (struct llc_snap_hdr);
1171 1169
1172 1170 /* need a buffer big enough for the headers */
1173 1171 nmp = mp->b_cont; /* where the packet payload M_DATA is */
1174 1172 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
1175 1173 /* it fits at the beginning of the first M_DATA block */
1176 1174 freeb(mp); /* don't need the M_PROTO anymore */
1177 1175 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
1178 1176 /* we can reuse the dl_unitdata_req M_PROTO mblk */
1179 1177 nmp = mp;
1180 1178 DB_TYPE(nmp) = M_DATA;
1181 1179 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1182 1180 } else {
1183 1181 /* we need to allocate one */
1184 1182 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
1185 1183 return (NULL);
1186 1184 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1187 1185 linkb(nmp, mp->b_cont);
1188 1186 freeb(mp);
1189 1187 }
1190 1188
1191 1189
1192 1190 /* Got the space, now copy in the header components */
1193 1191 if (type > GLD_MAX_802_SAP) {
1194 1192 /* create the snap header */
1195 1193 struct llc_snap_hdr *snap;
1196 1194 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1197 1195 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1198 1196 *snap = llc_snap_def;
1199 1197 SET_NET_USHORT(snap->type, type);
1200 1198 }
1201 1199
1202 1200 nmp->b_rptr -= sizeof (struct fddi_mac_frm);
1203 1201
1204 1202 mh = (struct fddi_mac_frm *)nmp->b_rptr;
1205 1203
1206 1204 mh->fddi_fc = 0x50;
1207 1205 cmac_copy(dhost, mh->fddi_dhost, macinfo->gldm_addrlen, macinfo);
1208 1206
1209 1207 /*
1210 1208 * We access the mac address without the mutex to prevent
1211 1209 * mutex contention (BUG 4211361)
1212 1210 */
1213 1211 cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1214 1212 mh->fddi_shost, macinfo->gldm_addrlen, macinfo);
1215 1213 return (nmp);
1216 1214 }
1217 1215
1218 1216 mblk_t *
1219 1217 gld_fastpath_fddi(gld_t *gld, mblk_t *mp)
1220 1218 {
1221 1219 gld_mac_info_t *macinfo = gld->gld_mac_info;
1222 1220 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
1223 1221 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1224 1222 unsigned short type;
1225 1223 mblk_t *nmp;
1226 1224 struct fddi_mac_frm *mh;
1227 1225 int hdrlen;
1228 1226
1229 1227 ASSERT(macinfo);
1230 1228
1231 1229 /* look in the unitdata request for a sap, else use bound one */
1232 1230 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1233 1231 REF_HOST_USHORT(gldp->glda_sap) != 0)
1234 1232 type = REF_HOST_USHORT(gldp->glda_sap);
1235 1233 else
1236 1234 type = gld->gld_sap;
1237 1235
1238 1236 hdrlen = sizeof (struct fddi_mac_frm);
1239 1237
1240 1238 /*
1241 1239 * Check whether we need to do EtherType encoding or whether the packet
1242 1240 * will be LLC.
1243 1241 */
1244 1242 if (type > GLD_MAX_802_SAP)
1245 1243 hdrlen += sizeof (struct llc_snap_hdr);
1246 1244
1247 1245 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
1248 1246 return (NULL);
1249 1247
1250 1248 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1251 1249
1252 1250 /* Got the space, now copy in the header components */
1253 1251
1254 1252 if (type > GLD_MAX_802_SAP) {
1255 1253 /* create the snap header */
1256 1254 struct llc_snap_hdr *snap;
1257 1255 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1258 1256 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1259 1257 *snap = llc_snap_def;
1260 1258 snap->type = htons(type); /* we know it's aligned */
1261 1259 }
1262 1260
1263 1261 nmp->b_rptr -= sizeof (struct fddi_mac_frm);
1264 1262
1265 1263 mh = (struct fddi_mac_frm *)nmp->b_rptr;
1266 1264 mh->fddi_fc = 0x50;
1267 1265 cmac_copy(gldp->glda_addr, mh->fddi_dhost,
1268 1266 macinfo->gldm_addrlen, macinfo);
1269 1267
1270 1268 GLDM_LOCK(macinfo, RW_WRITER);
1271 1269 cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1272 1270 mh->fddi_shost, macinfo->gldm_addrlen, macinfo);
1273 1271 GLDM_UNLOCK(macinfo);
1274 1272
1275 1273 return (nmp);
1276 1274 }
1277 1275
1278 1276 /* ========== */
1279 1277 /* Token Ring */
1280 1278 /* ========== */
1281 1279
1282 1280 #define GLD_SR_VAR(macinfo) \
1283 1281 (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->data)
1284 1282
1285 1283 #define GLD_SR_HASH(macinfo) ((struct srtab **)GLD_SR_VAR(macinfo))
1286 1284
1287 1285 #define GLD_SR_MUTEX(macinfo) \
1288 1286 (&((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->datalock)
1289 1287
1290 1288 static void gld_sr_clear(gld_mac_info_t *);
1291 1289 static void gld_rcc_receive(gld_mac_info_t *, pktinfo_t *, struct gld_ri *,
1292 1290 uchar_t *, int);
1293 1291 static void gld_rcc_send(gld_mac_info_t *, queue_t *, uchar_t *,
1294 1292 struct gld_ri **, uchar_t *);
1295 1293
1296 1294 static mac_addr_t tokenbroadcastaddr2 = { 0xc0, 0x00, 0xff, 0xff, 0xff, 0xff };
1297 1295 static struct gld_ri ri_ste_def;
1298 1296
1299 1297 void
1300 1298 gld_init_tr(gld_mac_info_t *macinfo)
1301 1299 {
1302 1300 struct gldkstats *sp =
1303 1301 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->kstatp->ks_data;
1304 1302
1305 1303 /* avoid endian-dependent code by initializing here instead of static */
1306 1304 ri_ste_def.len = 2;
1307 1305 ri_ste_def.rt = RT_STE;
1308 1306 ri_ste_def.mtu = RT_MTU_MAX;
1309 1307 ri_ste_def.dir = 0;
1310 1308 ri_ste_def.res = 0;
1311 1309
1312 1310 /* Assumptions we make for this medium */
1313 1311 ASSERT(macinfo->gldm_type == DL_TPR);
1314 1312 ASSERT(macinfo->gldm_addrlen == 6);
1315 1313 ASSERT(macinfo->gldm_saplen == -2);
1316 1314 #ifndef lint
1317 1315 ASSERT(sizeof (struct tr_mac_frm_nori) == 14);
1318 1316 ASSERT(sizeof (mac_addr_t) == 6);
1319 1317 #endif
1320 1318
1321 1319 mutex_init(GLD_SR_MUTEX(macinfo), NULL, MUTEX_DRIVER, NULL);
1322 1320
1323 1321 GLD_SR_VAR(macinfo) = kmem_zalloc(sizeof (struct srtab *)*SR_HASH_SIZE,
1324 1322 KM_SLEEP);
1325 1323
1326 1324 /* Default is RDE enabled for this medium */
1327 1325 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled =
1328 1326 ddi_getprop(DDI_DEV_T_NONE, macinfo->gldm_devinfo, 0,
1329 1327 "gld_rde_enable", 1);
1330 1328
1331 1329 /*
1332 1330 * Default is to use STE for unknown paths if RDE is enabled.
1333 1331 * If RDE is disabled, default is to use NULL RIF fields.
1334 1332 *
1335 1333 * It's possible to force use of STE for ALL packets:
1336 1334 * disable RDE but enable STE. This may be useful for
1337 1335 * non-transparent bridges, when it is not desired to run
1338 1336 * the RDE algorithms.
1339 1337 */
1340 1338 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste =
1341 1339 ddi_getprop(DDI_DEV_T_NONE, macinfo->gldm_devinfo, 0,
1342 1340 "gld_rde_str_indicator_ste",
1343 1341 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled);
1344 1342
1345 1343 /* Default 10 second route timeout on lack of activity */
1346 1344 {
1347 1345 int t = ddi_getprop(DDI_DEV_T_NONE, macinfo->gldm_devinfo, 0,
1348 1346 "gld_rde_timeout", 10);
1349 1347 if (t < 1)
1350 1348 t = 1; /* Let's be reasonable */
1351 1349 if (t > 600)
1352 1350 t = 600; /* Let's be reasonable */
1353 1351 /* We're using ticks (lbolts) for our timeout -- convert from seconds */
1354 1352 t = drv_usectohz(1000000 * t);
1355 1353 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_timeout = t;
1356 1354 }
1357 1355
1358 1356 kstat_named_init(&sp->glds_dot5_line_error,
1359 1357 "line_errors", KSTAT_DATA_UINT32);
1360 1358 kstat_named_init(&sp->glds_dot5_burst_error,
1361 1359 "burst_errors", KSTAT_DATA_UINT32);
1362 1360 kstat_named_init(&sp->glds_dot5_signal_loss,
1363 1361 "signal_losses", KSTAT_DATA_UINT32);
1364 1362
1365 1363 /*
1366 1364 * only initialize the new statistics if the driver
1367 1365 * knows about them.
1368 1366 */
1369 1367 if (macinfo->gldm_driver_version != GLD_VERSION_200)
1370 1368 return;
1371 1369
1372 1370 kstat_named_init(&sp->glds_dot5_ace_error,
1373 1371 "ace_errors", KSTAT_DATA_UINT32);
1374 1372 kstat_named_init(&sp->glds_dot5_internal_error,
1375 1373 "internal_errors", KSTAT_DATA_UINT32);
1376 1374 kstat_named_init(&sp->glds_dot5_lost_frame_error,
1377 1375 "lost_frame_errors", KSTAT_DATA_UINT32);
1378 1376 kstat_named_init(&sp->glds_dot5_frame_copied_error,
1379 1377 "frame_copied_errors", KSTAT_DATA_UINT32);
1380 1378 kstat_named_init(&sp->glds_dot5_token_error,
1381 1379 "token_errors", KSTAT_DATA_UINT32);
1382 1380 kstat_named_init(&sp->glds_dot5_freq_error,
1383 1381 "freq_errors", KSTAT_DATA_UINT32);
1384 1382 }
1385 1383
1386 1384 void
1387 1385 gld_uninit_tr(gld_mac_info_t *macinfo)
1388 1386 {
1389 1387 mutex_destroy(GLD_SR_MUTEX(macinfo));
1390 1388 gld_sr_clear(macinfo);
1391 1389 kmem_free(GLD_SR_VAR(macinfo), sizeof (struct srtab *) * SR_HASH_SIZE);
1392 1390 }
1393 1391
1394 1392 int
1395 1393 gld_interpret_tr(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
1396 1394 packet_flag_t flags)
1397 1395 {
1398 1396 struct tr_mac_frm *mh;
1399 1397 gld_mac_pvt_t *mac_pvt;
1400 1398 struct llc_snap_hdr *snaphdr;
1401 1399 mblk_t *pmp = NULL;
1402 1400 struct gld_ri *rh;
1403 1401
1404 1402 /*
1405 1403 * Quickly handle receive fastpath; TR does not support IPQ hack.
1406 1404 */
1407 1405 if (flags == GLD_RXQUICK) {
1408 1406 pktinfo->pktLen = msgdsize(mp);
1409 1407 return (-1);
1410 1408 }
1411 1409
1412 1410 bzero((void *)pktinfo, sizeof (*pktinfo));
1413 1411
1414 1412 pktinfo->pktLen = msgdsize(mp);
1415 1413
1416 1414 /* make sure packet has at least a whole mac header */
1417 1415 if (pktinfo->pktLen < sizeof (struct tr_mac_frm_nori))
1418 1416 return (-1);
1419 1417
1420 1418 /* make sure the mac header falls into contiguous memory */
1421 1419 if (MBLKL(mp) < sizeof (struct tr_mac_frm_nori)) {
1422 1420 if ((pmp = msgpullup(mp, -1)) == NULL) {
1423 1421 #ifdef GLD_DEBUG
1424 1422 if (gld_debug & GLDERRS)
1425 1423 cmn_err(CE_WARN,
1426 1424 "GLD: interpret_tr cannot msgpullup");
1427 1425 #endif
1428 1426 return (-1);
1429 1427 }
1430 1428 mp = pmp; /* this mblk contains the whole mac header */
1431 1429 }
1432 1430
1433 1431 mh = (struct tr_mac_frm *)mp->b_rptr;
1434 1432
1435 1433 /* Check to see if the mac is a broadcast or multicast address. */
1436 1434 if (mac_eq(mh->tr_dhost, ether_broadcast, macinfo->gldm_addrlen) ||
1437 1435 mac_eq(mh->tr_dhost, tokenbroadcastaddr2, macinfo->gldm_addrlen))
1438 1436 pktinfo->isBroadcast = 1;
1439 1437 else if (mh->tr_dhost[0] & 0x80)
1440 1438 pktinfo->isMulticast = 1;
1441 1439
1442 1440 if (flags == GLD_TX)
1443 1441 goto out; /* Got all info we need for xmit case */
1444 1442
1445 1443 ASSERT(GLDM_LOCK_HELD(macinfo));
1446 1444
1447 1445 /*
1448 1446 * Deal with the mac header
1449 1447 */
1450 1448
1451 1449 mac_copy(mh->tr_dhost, pktinfo->dhost, macinfo->gldm_addrlen);
1452 1450 mac_copy(mh->tr_shost, pktinfo->shost, macinfo->gldm_addrlen);
1453 1451 pktinfo->shost[0] &= ~0x80; /* turn off RIF indicator */
1454 1452
1455 1453 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1456 1454 pktinfo->isLooped = mac_eq(pktinfo->shost,
1457 1455 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1458 1456 pktinfo->isForMe = mac_eq(pktinfo->dhost,
1459 1457 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1460 1458
1461 1459 rh = (struct gld_ri *)NULL;
1462 1460 pktinfo->macLen = sizeof (struct tr_mac_frm_nori);
1463 1461
1464 1462 /*
1465 1463 * Before trying to look beyond the MAC header, make sure the data
1466 1464 * structures are all contiguously where we can conveniently look at
1467 1465 * them. We'll use a worst-case estimate of how many bytes into the
1468 1466 * packet data we'll be needing to look. Things will be more efficient
1469 1467 * if the driver puts at least this much into the first mblk.
1470 1468 *
1471 1469 * Even after this, we still will have to do checks against the total
1472 1470 * length of the packet. A bad incoming packet may not hold all the
1473 1471 * data structures it says it does.
1474 1472 */
1475 1473 if (MBLKL(mp) < sizeof (struct tr_mac_frm) +
1476 1474 LLC_HDR1_LEN + sizeof (struct rde_pdu) &&
1477 1475 MBLKL(mp) < pktinfo->pktLen) {
1478 1476 /*
1479 1477 * we don't have the entire packet within the first mblk (and
1480 1478 * therefore we didn't do the msgpullup above), AND the first
1481 1479 * mblk may not contain all the data we need to look at.
1482 1480 */
1483 1481 ASSERT(pmp == NULL); /* couldn't have done msgpullup above */
1484 1482 if ((pmp = msgpullup(mp, -1)) == NULL) {
1485 1483 #ifdef GLD_DEBUG
1486 1484 if (gld_debug & GLDERRS)
1487 1485 cmn_err(CE_WARN,
1488 1486 "GLD: interpret_tr cannot msgpullup2");
1489 1487 #endif
1490 1488 goto out; /* can't interpret this pkt further */
1491 1489 }
1492 1490 mp = pmp; /* this mblk should contain everything needed */
1493 1491 mh = (struct tr_mac_frm *)mp->b_rptr; /* to look at RIF */
1494 1492 }
1495 1493
1496 1494 if (mh->tr_shost[0] & 0x80) {
1497 1495 /* Routing Information Field (RIF) is present */
1498 1496 if (pktinfo->pktLen < sizeof (struct tr_mac_frm_nori) + 2)
1499 1497 goto out; /* RIF should have been there! */
1500 1498 rh = (struct gld_ri *)&mh->tr_ri;
1501 1499 if ((rh->len & 1) || rh->len < 2) {
1502 1500 /* Bogus RIF, don't handle this packet */
1503 1501 #ifdef GLD_DEBUG
1504 1502 if (gld_debug & GLDERRS)
1505 1503 cmn_err(CE_WARN,
1506 1504 "GLD: received TR packet with "
1507 1505 "bogus RIF length %d",
1508 1506 rh->len);
1509 1507 #endif
1510 1508 goto out;
1511 1509 }
1512 1510 if (pktinfo->pktLen < sizeof (struct tr_mac_frm_nori) + rh->len)
1513 1511 goto out; /* RIF should have been there! */
1514 1512 pktinfo->macLen += rh->len;
1515 1513 }
1516 1514
1517 1515 if ((mh->tr_fc & 0xc0) == 0x40) {
1518 1516 if (pktinfo->pktLen < pktinfo->macLen + LLC_HDR1_LEN)
1519 1517 goto out;
1520 1518
1521 1519 pktinfo->isLLC = 1;
1522 1520
1523 1521 if (pktinfo->pktLen < pktinfo->macLen + LLC_SNAP_HDR_LEN)
1524 1522 goto out;
1525 1523
1526 1524 snaphdr = (struct llc_snap_hdr *)(mp->b_rptr + pktinfo->macLen);
1527 1525 if (ISETHERTYPE(snaphdr)) {
1528 1526 pktinfo->ethertype = REF_NET_USHORT(snaphdr->type);
1529 1527 pktinfo->hdrLen = LLC_SNAP_HDR_LEN;
1530 1528 }
1531 1529
1532 1530 /* Inform the Route Control Component of received LLC frame */
1533 1531 gld_rcc_receive(macinfo, pktinfo, rh,
1534 1532 mp->b_rptr + pktinfo->macLen,
1535 1533 pktinfo->pktLen - pktinfo->macLen);
1536 1534 }
1537 1535 out:
1538 1536 if (pmp != NULL)
1539 1537 freemsg(pmp);
1540 1538
1541 1539 return (0);
1542 1540 }
1543 1541
1544 1542 mblk_t *
1545 1543 gld_unitdata_tr(gld_t *gld, mblk_t *mp)
1546 1544 {
1547 1545 gld_mac_info_t *macinfo = gld->gld_mac_info;
1548 1546 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
1549 1547 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1550 1548 mac_addr_t dhost;
1551 1549 unsigned short type;
1552 1550 mblk_t *nmp, *llcmp, *pmp = NULL;
1553 1551 struct tr_mac_frm_nori *mh;
1554 1552 int hdrlen;
1555 1553 struct gld_ri *rh;
1556 1554
1557 1555 ASSERT(macinfo);
1558 1556
1559 1557 /* extract needed info from the mblk before we maybe reuse it */
1560 1558 mac_copy(gldp->glda_addr, dhost, macinfo->gldm_addrlen);
1561 1559
1562 1560 /* look in the unitdata request for a sap, else use bound one */
1563 1561 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1564 1562 REF_HOST_USHORT(gldp->glda_sap) != 0)
1565 1563 type = REF_HOST_USHORT(gldp->glda_sap);
1566 1564 else
1567 1565 type = gld->gld_sap;
1568 1566
1569 1567 /* includes maximum possible Routing Information Field (RIF) size */
1570 1568 hdrlen = sizeof (struct tr_mac_frm);
1571 1569
1572 1570 /*
1573 1571 * Check whether we need to do EtherType encoding or whether the packet
1574 1572 * is LLC.
1575 1573 */
1576 1574 if (type > GLD_MAX_802_SAP)
1577 1575 hdrlen += sizeof (struct llc_snap_hdr);
1578 1576
1579 1577 /* need a buffer big enough for the headers */
1580 1578 llcmp = nmp = mp->b_cont; /* where the packet payload M_DATA is */
1581 1579
1582 1580 /*
1583 1581 * We are going to need to look at the LLC header, so make sure it
1584 1582 * is contiguously in a single mblk. If we're the ones who create
1585 1583 * the LLC header (below, in the case where sap > 0xff) then we don't
1586 1584 * have to worry about it here.
1587 1585 */
1588 1586 ASSERT(nmp != NULL); /* gld_unitdata guarantees msgdsize > 0 */
1589 1587 if (type <= GLD_MAX_802_SAP) {
1590 1588 if (MBLKL(llcmp) < LLC_HDR1_LEN) {
1591 1589 llcmp = pmp = msgpullup(nmp, LLC_HDR1_LEN);
1592 1590 if (pmp == NULL) {
1593 1591 #ifdef GLD_DEBUG
1594 1592 if (gld_debug & GLDERRS)
1595 1593 cmn_err(CE_WARN,
1596 1594 "GLD: unitdata_tr "
1597 1595 "cannot msgpullup");
1598 1596 #endif
1599 1597 return (NULL);
1600 1598 }
1601 1599 }
1602 1600 }
1603 1601
1604 1602 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
1605 1603 /* it fits at the beginning of the first M_DATA block */
1606 1604 freeb(mp); /* don't need the M_PROTO anymore */
1607 1605 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
1608 1606 /* we can reuse the dl_unitdata_req M_PROTO mblk */
1609 1607 nmp = mp;
1610 1608 DB_TYPE(nmp) = M_DATA;
1611 1609 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1612 1610 } else {
1613 1611 /* we need to allocate one */
1614 1612 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL) {
1615 1613 if (pmp != NULL)
1616 1614 freemsg(pmp);
1617 1615 return (NULL);
1618 1616 }
1619 1617 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1620 1618 linkb(nmp, mp->b_cont);
1621 1619 freeb(mp);
1622 1620 }
1623 1621
1624 1622 /* Got the space, now copy in the header components */
1625 1623 if (type > GLD_MAX_802_SAP) {
1626 1624 /* create the snap header */
1627 1625 struct llc_snap_hdr *snap;
1628 1626 llcmp = nmp; /* LLC header is going to be in this mblk */
1629 1627 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1630 1628 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1631 1629 *snap = llc_snap_def;
1632 1630 SET_NET_USHORT(snap->type, type);
1633 1631 }
1634 1632
1635 1633 /* Hold SR tables still while we maybe point at an entry */
1636 1634 mutex_enter(GLD_SR_MUTEX(macinfo));
1637 1635
1638 1636 gld_rcc_send(macinfo, WR(gld->gld_qptr), dhost, &rh, llcmp->b_rptr);
1639 1637
1640 1638 if (rh != NULL) {
1641 1639 /* copy in the RIF */
1642 1640 ASSERT(rh->len <= sizeof (struct gld_ri));
1643 1641 nmp->b_rptr -= rh->len;
1644 1642 bcopy((caddr_t)rh, (caddr_t)nmp->b_rptr, rh->len);
1645 1643 }
1646 1644
1647 1645 mutex_exit(GLD_SR_MUTEX(macinfo));
1648 1646
1649 1647 /* no longer need the pulled-up mblk */
1650 1648 if (pmp != NULL)
1651 1649 freemsg(pmp);
1652 1650
1653 1651 /*
1654 1652 * fill in token ring header
1655 1653 */
1656 1654 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
1657 1655 mh = (struct tr_mac_frm_nori *)nmp->b_rptr;
1658 1656 mh->tr_ac = 0x10;
1659 1657 mh->tr_fc = 0x40;
1660 1658 mac_copy(dhost, mh->tr_dhost, macinfo->gldm_addrlen);
1661 1659
1662 1660 /*
1663 1661 * We access the mac address without the mutex to prevent
1664 1662 * mutex contention (BUG 4211361)
1665 1663 */
1666 1664 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1667 1665 mh->tr_shost, macinfo->gldm_addrlen);
1668 1666
1669 1667 if (rh != NULL)
1670 1668 mh->tr_shost[0] |= 0x80;
1671 1669 else
1672 1670 mh->tr_shost[0] &= ~0x80;
1673 1671
1674 1672 return (nmp);
1675 1673 }
1676 1674
1677 1675 /*
1678 1676 * We cannot have our client sending us "fastpath" M_DATA messages,
1679 1677 * because to do that we must provide to him a fixed MAC header to
1680 1678 * be prepended to each outgoing packet. But with Source Routing
1681 1679 * media, the length and content of the MAC header changes as the
1682 1680 * routes change, so there is no fixed header we can provide. So
1683 1681 * we decline to accept M_DATA messages if Source Routing is enabled.
1684 1682 */
1685 1683 mblk_t *
1686 1684 gld_fastpath_tr(gld_t *gld, mblk_t *mp)
1687 1685 {
1688 1686 gld_mac_info_t *macinfo = gld->gld_mac_info;
1689 1687 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
1690 1688 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1691 1689 unsigned short type;
1692 1690 mblk_t *nmp;
1693 1691 struct tr_mac_frm_nori *mh;
1694 1692 int hdrlen;
1695 1693
1696 1694 ASSERT(macinfo);
1697 1695
1698 1696 /*
1699 1697 * If we are doing Source Routing, then we cannot provide a fixed
1700 1698 * MAC header, so fail.
1701 1699 */
1702 1700 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled)
1703 1701 return (NULL);
1704 1702
1705 1703 /* look in the unitdata request for a sap, else use bound one */
1706 1704 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1707 1705 REF_HOST_USHORT(gldp->glda_sap) != 0)
1708 1706 type = REF_HOST_USHORT(gldp->glda_sap);
1709 1707 else
1710 1708 type = gld->gld_sap;
1711 1709
1712 1710 hdrlen = sizeof (struct tr_mac_frm_nori);
1713 1711
1714 1712 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste)
1715 1713 hdrlen += ri_ste_def.len;
1716 1714
1717 1715 /*
1718 1716 * Check whether we need to do EtherType encoding or whether the packet
1719 1717 * will be LLC.
1720 1718 */
1721 1719 if (type > GLD_MAX_802_SAP)
1722 1720 hdrlen += sizeof (struct llc_snap_hdr);
1723 1721
1724 1722 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
1725 1723 return (NULL);
1726 1724
1727 1725 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1728 1726
1729 1727 /* Got the space, now copy in the header components */
1730 1728
1731 1729 if (type > GLD_MAX_802_SAP) {
1732 1730 /* create the snap header */
1733 1731 struct llc_snap_hdr *snap;
1734 1732 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1735 1733 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1736 1734 *snap = llc_snap_def;
1737 1735 snap->type = htons(type); /* we know it's aligned */
1738 1736 }
1739 1737
1740 1738 /* RDE is disabled, use NULL RIF, or STE RIF */
1741 1739 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste) {
1742 1740 nmp->b_rptr -= ri_ste_def.len;
1743 1741 bcopy((caddr_t)&ri_ste_def, (caddr_t)nmp->b_rptr,
1744 1742 ri_ste_def.len);
1745 1743 }
1746 1744
1747 1745 /*
1748 1746 * fill in token ring header
1749 1747 */
1750 1748 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
1751 1749 mh = (struct tr_mac_frm_nori *)nmp->b_rptr;
1752 1750 mh->tr_ac = 0x10;
1753 1751 mh->tr_fc = 0x40;
1754 1752 mac_copy(gldp->glda_addr, mh->tr_dhost, macinfo->gldm_addrlen);
1755 1753
1756 1754 GLDM_LOCK(macinfo, RW_WRITER);
1757 1755 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1758 1756 mh->tr_shost, macinfo->gldm_addrlen);
1759 1757 GLDM_UNLOCK(macinfo);
1760 1758
1761 1759 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste)
1762 1760 mh->tr_shost[0] |= 0x80;
1763 1761 else
1764 1762 mh->tr_shost[0] &= ~0x80;
1765 1763
1766 1764 return (nmp);
1767 1765 }
1768 1766
1769 1767 /*
1770 1768 * Route Determination Entity (ISO 8802-2 / IEEE 802.2 : 1994, Section 9)
1771 1769 *
1772 1770 * RDE is an LLC layer entity. GLD is a MAC layer entity. The proper
1773 1771 * solution to this architectural anomaly is to move RDE support out of GLD
1774 1772 * and into LLC where it belongs. In particular, only LLC has the knowledge
1775 1773 * necessary to reply to XID and TEST packets. If and when it comes time to
1776 1774 * move RDE out of GLD to LLC, the LLC-to-GLD interface should be modified
1777 1775 * to use MA_UNITDATA structures rather than DL_UNITDATA structures. Of
1778 1776 * course, GLD will still have to continue to also support the DL_ structures
1779 1777 * as long as IP is not layered over LLC. Another, perhaps better, idea
1780 1778 * would be to make RDE an autopush module on top of the token ring drivers:
1781 1779 * RDE would sit between LLC and GLD. It would then also sit between IP and
1782 1780 * GLD, providing services to all clients of GLD/tokenring. In that case,
1783 1781 * GLD would still have to continue to support the DL_ interface for non-
1784 1782 * Token Ring interfaces, using the MA_ interface only for media supporting
1785 1783 * Source Routing media.
1786 1784 *
1787 1785 * At present, Token Ring is the only source routing medium we support.
1788 1786 * Since Token Ring is not at this time a strategic network medium for Sun,
1789 1787 * rather than devote a large amount of resources to creating a proper
1790 1788 * architecture and implementation of RDE, we do the minimum necessary to
1791 1789 * get it to work. The interface between the above token ring code and the
1792 1790 * below RDE code is designed to make it relatively easy to change to an
1793 1791 * MA_UNITDATA model later should this ever become a priority.
1794 1792 */
1795 1793
1796 1794 static void gld_send_rqr(gld_mac_info_t *, uchar_t *, struct gld_ri *,
1797 1795 struct rde_pdu *, int);
1798 1796 static void gld_rde_pdu_req(gld_mac_info_t *, queue_t *, uchar_t *,
1799 1797 struct gld_ri *, uchar_t, uchar_t, uchar_t);
1800 1798 static void gld_get_route(gld_mac_info_t *, queue_t *, uchar_t *,
1801 1799 struct gld_ri **, uchar_t, uchar_t);
1802 1800 static void gld_reset_route(gld_mac_info_t *, queue_t *,
1803 1801 uchar_t *, uchar_t, uchar_t);
1804 1802 static void gld_rde_pdu_ind(gld_mac_info_t *, struct gld_ri *, struct rde_pdu *,
1805 1803 int);
1806 1804 static void gld_rif_ind(gld_mac_info_t *, struct gld_ri *, uchar_t *,
1807 1805 uchar_t, uchar_t);
1808 1806 static struct srtab **gld_sr_hash(struct srtab **, uchar_t *, int);
1809 1807 static struct srtab *gld_sr_lookup_entry(gld_mac_info_t *, uchar_t *);
1810 1808 static struct srtab *gld_sr_create_entry(gld_mac_info_t *, uchar_t *);
1811 1809
1812 1810 /*
1813 1811 * This routine implements a modified subset of the 802.2 RDE RCC receive
1814 1812 * actions:
1815 1813 * we implement RCC receive events 3 to 12 (ISO 8802-2:1994 9.6.3.4);
1816 1814 * we omit special handling for the NULL SAP;
1817 1815 * we omit XID/TEST handling;
1818 1816 * we pass all packets (including RDE) upstream to LLC.
1819 1817 */
1820 1818 static void
1821 1819 gld_rcc_receive(gld_mac_info_t *macinfo, pktinfo_t *pktinfo, struct gld_ri *rh,
1822 1820 uchar_t *llcpkt, int llcpktlen)
1823 1821 {
1824 1822 struct llc_snap_hdr *snaphdr = (struct llc_snap_hdr *)(llcpkt);
1825 1823
1826 1824 if (!((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled)
1827 1825 return;
1828 1826
1829 1827 /*
1830 1828 * First, ensure this packet wasn't something we received just
1831 1829 * because we were in promiscuous mode. Since none of the below
1832 1830 * code wants to see group addressed packets anyway, we can do
1833 1831 * this check up front. Since we're doing that, we can omit the
1834 1832 * checks for group addressed packets below.
1835 1833 */
1836 1834 if (!pktinfo->isForMe)
1837 1835 return; /* Event 6 */
1838 1836
1839 1837 /* Process a subset of Route Determination Entity (RDE) packets */
1840 1838 if (snaphdr->d_lsap == LSAP_RDE) {
1841 1839 struct rde_pdu *pdu = (struct rde_pdu *)(llcpkt + LLC_HDR1_LEN);
1842 1840 int pdulen = llcpktlen - LLC_HDR1_LEN;
1843 1841
1844 1842 /* sanity check the PDU */
1845 1843 if ((pdulen < sizeof (struct rde_pdu)) ||
1846 1844 (snaphdr->s_lsap != LSAP_RDE))
1847 1845 return;
1848 1846
1849 1847 /* we only handle route discovery PDUs, not XID/TEST/other */
1850 1848 if (snaphdr->control != CNTL_LLC_UI)
1851 1849 return;
1852 1850
1853 1851 switch (pdu->rde_ptype) {
1854 1852 case RDE_RQC: /* Route Query Command; Events 8 - 11 */
1855 1853 gld_send_rqr(macinfo, pktinfo->shost, rh, pdu, pdulen);
1856 1854 /* FALLTHROUGH */
1857 1855 case RDE_RQR: /* Route Query Response; Event 12 */
1858 1856 case RDE_RS: /* Route Selected; Event 7 */
1859 1857 gld_rde_pdu_ind(macinfo, rh, pdu, pdulen);
1860 1858 break;
1861 1859 default: /* ignore if unrecognized ptype */
1862 1860 return;
1863 1861 }
1864 1862
1865 1863 return;
1866 1864 }
1867 1865
1868 1866 /* Consider routes seen in other IA SRF packets */
1869 1867
1870 1868 if (rh == NULL)
1871 1869 return; /* no RIF; Event 3 */
1872 1870
1873 1871 if ((rh->rt & 0x04) != 0)
1874 1872 return; /* not SRF; Event 5 */
1875 1873
1876 1874 gld_rif_ind(macinfo, rh, pktinfo->shost, snaphdr->s_lsap,
1877 1875 snaphdr->d_lsap); /* Event 4 */
1878 1876 }
1879 1877
1880 1878 /*
1881 1879 * Send RQR: 802.2 9.6.3.4.2(9) RCC Receive Events 8-11
1882 1880 *
1883 1881 * The routing processing really doesn't belong here; it should be handled in
1884 1882 * the LLC layer above. If that were the case then RDE could just send down
1885 1883 * an extra MA_UNITDATA_REQ with the info needed to construct the packet. But
1886 1884 * at the time we get control here, it's not a particularly good time to be
1887 1885 * constructing packets and trying to send them. Specifically, at this layer
1888 1886 * we need to construct the full media packet, which means the below routine
1889 1887 * knows that it is dealing with Token Ring media. If this were instead done
1890 1888 * via a proper MA_UNITDATA interface, the RDE stuff could all be completely
1891 1889 * media independent. But since TR is the only source routing medium we
1892 1890 * support, this works even though it is not clean.
1893 1891 *
1894 1892 * We "know" that the only time we can get here is from the "interpret"
1895 1893 * routine, and only when it was called at receive time.
1896 1894 */
1897 1895 static void
1898 1896 gld_send_rqr(gld_mac_info_t *macinfo, uchar_t *shost, struct gld_ri *rh,
1899 1897 struct rde_pdu *pdu, int pdulen)
1900 1898 {
1901 1899 mblk_t *nmp;
1902 1900 int nlen;
1903 1901 struct tr_mac_frm_nori *nmh;
1904 1902 struct gld_ri *nrh;
1905 1903 struct llc_snap_hdr *nsnaphdr;
1906 1904 struct rde_pdu *npdu;
1907 1905
1908 1906 /* We know and assume we're on the receive path */
1909 1907 ASSERT(GLDM_LOCK_HELD(macinfo));
1910 1908
1911 1909 if (pdulen < sizeof (struct rde_pdu))
1912 1910 return; /* Bad incoming PDU */
1913 1911
1914 1912 nlen = sizeof (struct tr_mac_frm) + LLC_HDR1_LEN +
1915 1913 sizeof (struct rde_pdu);
1916 1914
1917 1915 if ((nmp = allocb(nlen, BPRI_MED)) == NULL)
1918 1916 return;
1919 1917
1920 1918 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1921 1919
1922 1920 nmp->b_rptr -= sizeof (struct rde_pdu);
1923 1921 npdu = (struct rde_pdu *)(nmp->b_rptr);
1924 1922 *npdu = *pdu; /* copy orig/target macaddr/saps */
1925 1923 npdu->rde_ver = 1;
1926 1924 npdu->rde_ptype = RDE_RQR;
1927 1925 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1928 1926 npdu->rde_target_mac, macinfo->gldm_addrlen);
1929 1927
1930 1928 nmp->b_rptr -= LLC_HDR1_LEN;
1931 1929 nsnaphdr = (struct llc_snap_hdr *)(nmp->b_rptr);
1932 1930 nsnaphdr->s_lsap = nsnaphdr->d_lsap = LSAP_RDE;
1933 1931 nsnaphdr->control = CNTL_LLC_UI;
1934 1932
1935 1933 if (rh == NULL || (rh->rt & 0x06) == 0x06 ||
1936 1934 rh->len > sizeof (struct gld_ri)) {
1937 1935 /* no RIF (Event 8), or RIF type STE (Event 9): send ARE RQR */
1938 1936 nmp->b_rptr -= 2;
1939 1937 nrh = (struct gld_ri *)(nmp->b_rptr);
1940 1938 nrh->len = 2;
1941 1939 nrh->rt = RT_ARE;
1942 1940 nrh->dir = 0;
1943 1941 nrh->res = 0;
1944 1942 nrh->mtu = RT_MTU_MAX;
1945 1943 } else {
1946 1944 /*
1947 1945 * RIF must be ARE (Event 10) or SRF (Event 11):
1948 1946 * send SRF (reverse) RQR
1949 1947 */
1950 1948 ASSERT(rh->len <= sizeof (struct gld_ri));
1951 1949 nmp->b_rptr -= rh->len;
1952 1950 nrh = (struct gld_ri *)(nmp->b_rptr);
1953 1951 bcopy(rh, nrh, rh->len); /* copy incoming RIF */
1954 1952 nrh->rt = RT_SRF; /* make it SRF */
1955 1953 nrh->dir ^= 1; /* reverse direction */
1956 1954 }
1957 1955
1958 1956 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
1959 1957 nmh = (struct tr_mac_frm_nori *)(nmp->b_rptr);
1960 1958 nmh->tr_ac = 0x10;
1961 1959 nmh->tr_fc = 0x40;
1962 1960 mac_copy(shost, nmh->tr_dhost, macinfo->gldm_addrlen);
1963 1961 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1964 1962 nmh->tr_shost, macinfo->gldm_addrlen);
1965 1963 nmh->tr_shost[0] |= 0x80; /* indicate RIF present */
1966 1964
1967 1965 /*
1968 1966 * Packet assembled; send it.
1969 1967 *
1970 1968 * As noted before, this is not really a good time to be trying to
1971 1969 * send out packets. We have no obvious queue to use if the packet
1972 1970 * can't be sent right away. We pick one arbitrarily.
1973 1971 */
1974 1972 {
1975 1973 gld_vlan_t *vlan;
1976 1974 queue_t *q;
1977 1975
1978 1976 if ((vlan = gld_find_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1979 1977 /* oops, no vlan on the list for this macinfo! */
1980 1978 /* this should not happen */
1981 1979 freeb(nmp);
1982 1980 return;
1983 1981 }
1984 1982 q = vlan->gldv_str_next->gld_qptr;
1985 1983
1986 1984 /*
1987 1985 * Queue the packet and let gld_wsrv
1988 1986 * handle it, thus preventing a panic
1989 1987 * caused by v2 TR in promiscuous mode
1990 1988 * where it attempts to get the mutex
1991 1989 * in this thread while already holding
1992 1990 * it.
1993 1991 */
1994 1992 (void) putbq(WR(q), nmp);
1995 1993 qenable(WR(q));
1996 1994 }
1997 1995 }
1998 1996
1999 1997 /*
2000 1998 * This routine implements a modified subset of the 802.2 RDE RCC send actions:
2001 1999 * we implement RCC send events 5 to 10 (ISO 8802-2:1994 9.6.3.5);
2002 2000 * we omit special handling for the NULL SAP;
2003 2001 * events 11 to 12 are handled by gld_rde_pdu_req below;
2004 2002 * we require an immediate response to our GET_ROUTE_REQUEST.
2005 2003 */
2006 2004 static void
2007 2005 gld_rcc_send(gld_mac_info_t *macinfo, queue_t *q, uchar_t *dhost,
2008 2006 struct gld_ri **rhp, uchar_t *llcpkt)
2009 2007 {
2010 2008 struct llc_snap_hdr *snaphdr = (struct llc_snap_hdr *)(llcpkt);
2011 2009
2012 2010 /*
2013 2011 * Our caller has to take the mutex because: to avoid an extra bcopy
2014 2012 * of the RIF on every transmit, we pass back a pointer to our sr
2015 2013 * table entry via rhp. He has to keep the mutex until he has a
2016 2014 * chance to copy the RIF out into the outgoing packet, so that we
2017 2015 * don't modify the entry while he's trying to copy it. This is a
2018 2016 * little ugly, but saves the extra bcopy.
2019 2017 */
2020 2018 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2021 2019
2022 2020 *rhp = (struct gld_ri *)NULL; /* start off clean (no RIF) */
2023 2021
2024 2022 if (!((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled) {
2025 2023 /* RDE is disabled -- use NULL or STE always */
2026 2024 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->
2027 2025 rde_str_indicator_ste)
2028 2026 *rhp = &ri_ste_def; /* STE option */
2029 2027 return;
2030 2028 }
2031 2029
2032 2030 if (!(dhost[0] & 0x80)) {
2033 2031 /* individual address; Events 7 - 10 */
2034 2032 if ((snaphdr->control & 0xef) == 0xe3) {
2035 2033 /* TEST command, reset the route */
2036 2034 gld_reset_route(macinfo, q,
2037 2035 dhost, snaphdr->d_lsap, snaphdr->s_lsap);
2038 2036 }
2039 2037 gld_get_route(macinfo, q,
2040 2038 dhost, rhp, snaphdr->d_lsap, snaphdr->s_lsap);
2041 2039 }
2042 2040
2043 2041 if (*rhp == NULL) {
2044 2042 /*
2045 2043 * group address (Events 5 - 6),
2046 2044 * or no route available (Events 8 - 9):
2047 2045 * Need to send NSR or STE, as configured.
2048 2046 */
2049 2047 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->
2050 2048 rde_str_indicator_ste)
2051 2049 *rhp = &ri_ste_def; /* STE option */
2052 2050 }
2053 2051 }
2054 2052
2055 2053 /*
2056 2054 * RCC send events 11 - 12
2057 2055 *
2058 2056 * At present we only handle the RQC ptype.
2059 2057 *
2060 2058 * We "know" that the only time we can get here is from the "unitdata"
2061 2059 * routine, called at wsrv time.
2062 2060 *
2063 2061 * If we ever implement the RS ptype (Event 13), this may no longer be true!
2064 2062 */
2065 2063 static void
2066 2064 gld_rde_pdu_req(gld_mac_info_t *macinfo, queue_t *q, uchar_t *dhost,
2067 2065 struct gld_ri *rh, uchar_t dsap, uchar_t ssap, uchar_t ptype)
2068 2066 {
2069 2067 mblk_t *nmp;
2070 2068 int nlen;
2071 2069 struct tr_mac_frm_nori *nmh;
2072 2070 struct gld_ri *nrh;
2073 2071 struct llc_snap_hdr *nsnaphdr;
2074 2072 struct rde_pdu *npdu;
2075 2073 int srpresent = 0;
2076 2074
2077 2075 /* if you change this to process other types, review all code below */
2078 2076 ASSERT(ptype == RDE_RQC);
2079 2077 ASSERT(rh == NULL); /* RQC never uses SRF */
2080 2078
2081 2079 nlen = sizeof (struct tr_mac_frm) + LLC_HDR1_LEN +
2082 2080 sizeof (struct rde_pdu);
2083 2081
2084 2082 if ((nmp = allocb(nlen, BPRI_MED)) == NULL)
2085 2083 return;
2086 2084
2087 2085 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
2088 2086
2089 2087 nmp->b_rptr -= sizeof (struct rde_pdu);
2090 2088 npdu = (struct rde_pdu *)(nmp->b_rptr);
2091 2089 npdu->rde_ver = 1;
2092 2090 npdu->rde_ptype = ptype;
2093 2091 mac_copy(dhost, &npdu->rde_target_mac, 6);
2094 2092
2095 2093 /*
2096 2094 * access the mac address without a mutex - take a risk -
2097 2095 * to prevent mutex contention (BUG 4211361)
2098 2096 */
2099 2097 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
2100 2098 &npdu->rde_orig_mac, 6);
2101 2099 npdu->rde_target_sap = dsap;
2102 2100 npdu->rde_orig_sap = ssap;
2103 2101
2104 2102 nmp->b_rptr -= LLC_HDR1_LEN;
2105 2103 nsnaphdr = (struct llc_snap_hdr *)(nmp->b_rptr);
2106 2104 nsnaphdr->s_lsap = nsnaphdr->d_lsap = LSAP_RDE;
2107 2105 nsnaphdr->control = CNTL_LLC_UI;
2108 2106
2109 2107 #if 0 /* we don't need this for now */
2110 2108 if (rh != NULL) {
2111 2109 /* send an SRF frame with specified RIF */
2112 2110 ASSERT(rh->len <= sizeof (struct gld_ri));
2113 2111 nmp->b_rptr -= rh->len;
2114 2112 nrh = (struct gld_ri *)(nmp->b_rptr);
2115 2113 bcopy(rh, nrh, rh->len);
2116 2114 ASSERT(nrh->rt == RT_SRF);
2117 2115 srpresent = 1;
2118 2116 } else
2119 2117 #endif
2120 2118
2121 2119 /* Need to send NSR or STE, as configured. */
2122 2120 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste) {
2123 2121 /* send an STE frame */
2124 2122 nmp->b_rptr -= 2;
2125 2123 nrh = (struct gld_ri *)(nmp->b_rptr);
2126 2124 nrh->len = 2;
2127 2125 nrh->rt = RT_STE;
2128 2126 nrh->dir = 0;
2129 2127 nrh->res = 0;
2130 2128 nrh->mtu = RT_MTU_MAX;
2131 2129 srpresent = 1;
2132 2130 } /* else send an NSR frame */
2133 2131
2134 2132 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
2135 2133 nmh = (struct tr_mac_frm_nori *)(nmp->b_rptr);
2136 2134 nmh->tr_ac = 0x10;
2137 2135 nmh->tr_fc = 0x40;
2138 2136 mac_copy(dhost, nmh->tr_dhost, macinfo->gldm_addrlen);
2139 2137 /*
2140 2138 * access the mac address without a mutex - take a risk -
2141 2139 * to prevent mutex contention - BUG 4211361
2142 2140 */
2143 2141 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
2144 2142 nmh->tr_shost, macinfo->gldm_addrlen);
2145 2143
2146 2144 if (srpresent)
2147 2145 nmh->tr_shost[0] |= 0x80;
2148 2146 else
2149 2147 nmh->tr_shost[0] &= ~0x80;
2150 2148
2151 2149 /*
2152 2150 * Packet assembled; send it.
2153 2151 *
2154 2152 * Since we own the SR_MUTEX, we don't want to take the maclock
2155 2153 * mutex (since they are acquired in the opposite order on the
2156 2154 * receive path, so deadlock could occur). We could rearrange
2157 2155 * the code in gld_get_route() and drop the SR_MUTEX around the
2158 2156 * call to gld_rde_pdu_req(), but that's kind of ugly. Rather,
2159 2157 * we just refrain from calling gld_start() from here, and
2160 2158 * instead just queue the packet for wsrv to send next. Besides,
2161 2159 * it's more important to get the packet we're working on out
2162 2160 * quickly than this RQC.
2163 2161 */
2164 2162 (void) putbq(WR(q), nmp);
2165 2163 qenable(WR(q));
2166 2164 }
2167 2165
2168 2166 /*
2169 2167 * Route Determination Component (RDC)
2170 2168 *
2171 2169 * We do not implement separate routes for each SAP, as specified by
2172 2170 * ISO 8802-2; instead we implement only one route per remote mac address.
2173 2171 */
2174 2172 static void
2175 2173 gld_get_route(gld_mac_info_t *macinfo, queue_t *q, uchar_t *dhost,
2176 2174 struct gld_ri **rhp, uchar_t dsap, uchar_t ssap)
2177 2175 {
2178 2176 struct srtab *sr;
2179 2177 clock_t t = ddi_get_lbolt();
2180 2178
2181 2179 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2182 2180
2183 2181 sr = gld_sr_lookup_entry(macinfo, dhost);
2184 2182
2185 2183 if (sr == NULL) {
2186 2184 /*
2187 2185 * we have no entry -- never heard of this address:
2188 2186 * create an empty entry and initiate RQC
2189 2187 */
2190 2188 sr = gld_sr_create_entry(macinfo, dhost);
2191 2189 gld_rde_pdu_req(macinfo, q, dhost, (struct gld_ri *)NULL,
2192 2190 dsap, ssap, RDE_RQC);
2193 2191 if (sr)
2194 2192 sr->sr_timer = t;
2195 2193 *rhp = NULL; /* we have no route yet */
2196 2194 return;
2197 2195 }
2198 2196
2199 2197 /* we have an entry; see if we know a route yet */
2200 2198
2201 2199 if (sr->sr_ri.len == 0) {
2202 2200 /* Have asked RQC, but no reply (yet) */
2203 2201 if (t - sr->sr_timer >
2204 2202 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_timeout) {
2205 2203 /* RQR overdue, resend RQC */
2206 2204 gld_rde_pdu_req(macinfo, q, dhost,
2207 2205 (struct gld_ri *)NULL, dsap, ssap, RDE_RQC);
2208 2206 sr->sr_timer = t;
2209 2207 }
2210 2208 *rhp = NULL; /* we have no route yet */
2211 2209 return;
2212 2210 }
2213 2211
2214 2212 /* we know a route, or it's local */
2215 2213
2216 2214 /* if it might be stale, reset and get a new one */
2217 2215 if (t - sr->sr_timer >
2218 2216 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_timeout) {
2219 2217 gld_rde_pdu_req(macinfo, q, dhost,
2220 2218 (struct gld_ri *)NULL, dsap, ssap, RDE_RQC);
2221 2219 sr->sr_ri.len = 0;
2222 2220 sr->sr_timer = t;
2223 2221 *rhp = NULL; /* we have no route */
2224 2222 return;
2225 2223 }
2226 2224
2227 2225 if (sr->sr_ri.len == 2) {
2228 2226 /* the remote site is on our local ring -- no route needed */
2229 2227 *rhp = NULL;
2230 2228 return;
2231 2229 }
2232 2230
2233 2231 *rhp = &sr->sr_ri; /* we have a route, return it */
2234 2232 }
2235 2233
2236 2234 /*
2237 2235 * zap the specified entry and reinitiate RQC
2238 2236 */
2239 2237 static void
2240 2238 gld_reset_route(gld_mac_info_t *macinfo, queue_t *q,
2241 2239 uchar_t *dhost, uchar_t dsap, uchar_t ssap)
2242 2240 {
2243 2241 struct srtab *sr;
2244 2242
2245 2243 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2246 2244
2247 2245 sr = gld_sr_create_entry(macinfo, dhost);
2248 2246 gld_rde_pdu_req(macinfo, q, dhost, (struct gld_ri *)NULL,
2249 2247 dsap, ssap, RDE_RQC);
2250 2248 if (sr == NULL)
2251 2249 return;
2252 2250
2253 2251 sr->sr_ri.len = 0;
2254 2252 sr->sr_timer = ddi_get_lbolt();
2255 2253 }
2256 2254
2257 2255 /*
2258 2256 * This routine is called when an RDE PDU is received from our peer.
2259 2257 * If it is an RS (Route Selected) PDU, we adopt the specified route.
2260 2258 * If it is an RQR (reply to our previous RQC), we evaluate the
2261 2259 * specified route in comparison with our current known route, if any,
2262 2260 * and we keep the "better" of the two routes.
2263 2261 */
2264 2262 static void
2265 2263 gld_rde_pdu_ind(gld_mac_info_t *macinfo, struct gld_ri *rh, struct rde_pdu *pdu,
2266 2264 int pdulen)
2267 2265 {
2268 2266 struct srtab *sr;
2269 2267 uchar_t *otherhost;
2270 2268
2271 2269 if (pdulen < sizeof (struct rde_pdu))
2272 2270 return; /* Bad incoming PDU */
2273 2271
2274 2272 if (pdu->rde_ptype == RDE_RQC)
2275 2273 return; /* ignore RQC */
2276 2274
2277 2275 if (pdu->rde_ptype != RDE_RQR && pdu->rde_ptype != RDE_RS) {
2278 2276 #ifdef GLD_DEBUG
2279 2277 if (gld_debug & GLDERRS)
2280 2278 cmn_err(CE_WARN, "gld: bogus RDE ptype 0x%x received",
2281 2279 pdu->rde_ptype);
2282 2280 #endif
2283 2281 return;
2284 2282 }
2285 2283
2286 2284 if (rh == NULL) {
2287 2285 #ifdef GLD_DEBUG
2288 2286 if (gld_debug & GLDERRS)
2289 2287 cmn_err(CE_WARN,
2290 2288 "gld: bogus NULL RIF, ptype 0x%x received",
2291 2289 pdu->rde_ptype);
2292 2290 #endif
2293 2291 return;
2294 2292 }
2295 2293
2296 2294 ASSERT(rh->len >= 2);
2297 2295 ASSERT(rh->len <= sizeof (struct gld_ri));
2298 2296 ASSERT((rh->len & 1) == 0);
2299 2297
2300 2298 if (pdu->rde_ptype == RDE_RQR) {
2301 2299 /* A reply to our RQC has his address as target mac */
2302 2300 otherhost = pdu->rde_target_mac;
2303 2301 } else {
2304 2302 ASSERT(pdu->rde_ptype == RDE_RS);
2305 2303 /* An RS has his address as orig mac */
2306 2304 otherhost = pdu->rde_orig_mac;
2307 2305 }
2308 2306
2309 2307 mutex_enter(GLD_SR_MUTEX(macinfo));
2310 2308
2311 2309 if ((sr = gld_sr_create_entry(macinfo, otherhost)) == NULL) {
2312 2310 mutex_exit(GLD_SR_MUTEX(macinfo));
2313 2311 return; /* oh well, out of memory */
2314 2312 }
2315 2313
2316 2314 if (pdu->rde_ptype == RDE_RQR) {
2317 2315 /* see if new route is better than what we may already have */
2318 2316 if (sr->sr_ri.len != 0 &&
2319 2317 sr->sr_ri.len <= rh->len) {
2320 2318 mutex_exit(GLD_SR_MUTEX(macinfo));
2321 2319 return; /* we have one, and new one is no shorter */
2322 2320 }
2323 2321 }
2324 2322
2325 2323 /* adopt the new route */
2326 2324 bcopy((caddr_t)rh, (caddr_t)&sr->sr_ri, rh->len); /* copy incom RIF */
2327 2325 sr->sr_ri.rt = RT_SRF; /* make it a clean SRF */
2328 2326 sr->sr_ri.dir ^= 1; /* reverse direction */
2329 2327 sr->sr_timer = ddi_get_lbolt();
2330 2328
2331 2329 mutex_exit(GLD_SR_MUTEX(macinfo));
2332 2330 }
2333 2331
2334 2332 /*
2335 2333 * This routine is called when a packet with a RIF is received. Our
2336 2334 * policy is to adopt the route.
2337 2335 */
2338 2336 /* ARGSUSED3 */
2339 2337 static void
2340 2338 gld_rif_ind(gld_mac_info_t *macinfo, struct gld_ri *rh, uchar_t *shost,
2341 2339 uchar_t ssap, uchar_t dsap)
2342 2340 {
2343 2341 struct srtab *sr;
2344 2342
2345 2343 ASSERT(rh != NULL); /* ensure RIF */
2346 2344 ASSERT((rh->rt & 0x04) == 0); /* ensure SRF */
2347 2345 ASSERT(rh->len >= 2);
2348 2346 ASSERT(rh->len <= sizeof (struct gld_ri));
2349 2347 ASSERT((rh->len & 1) == 0);
2350 2348
2351 2349 mutex_enter(GLD_SR_MUTEX(macinfo));
2352 2350
2353 2351 if ((sr = gld_sr_create_entry(macinfo, shost)) == NULL) {
2354 2352 mutex_exit(GLD_SR_MUTEX(macinfo));
2355 2353 return; /* oh well, out of memory */
2356 2354 }
2357 2355
2358 2356 /* we have an entry; fill it in */
2359 2357 bcopy((caddr_t)rh, (caddr_t)&sr->sr_ri, rh->len); /* copy incom RIF */
2360 2358 sr->sr_ri.rt = RT_SRF; /* make it a clean SRF */
2361 2359 sr->sr_ri.dir ^= 1; /* reverse direction */
2362 2360 sr->sr_timer = ddi_get_lbolt();
2363 2361
2364 2362 mutex_exit(GLD_SR_MUTEX(macinfo));
2365 2363 }
2366 2364
2367 2365 static struct srtab **
2368 2366 gld_sr_hash(struct srtab **sr_hash_tbl, uchar_t *addr, int addr_length)
2369 2367 {
2370 2368 uint_t hashval = 0;
2371 2369
2372 2370 while (--addr_length >= 0)
2373 2371 hashval ^= *addr++;
2374 2372
2375 2373 return (&sr_hash_tbl[hashval % SR_HASH_SIZE]);
2376 2374 }
2377 2375
2378 2376 static struct srtab *
2379 2377 gld_sr_lookup_entry(gld_mac_info_t *macinfo, uchar_t *macaddr)
2380 2378 {
2381 2379 struct srtab *sr;
2382 2380
2383 2381 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2384 2382
2385 2383 for (sr = *gld_sr_hash(GLD_SR_HASH(macinfo), macaddr,
2386 2384 macinfo->gldm_addrlen); sr; sr = sr->sr_next)
2387 2385 if (mac_eq(macaddr, sr->sr_mac, macinfo->gldm_addrlen))
2388 2386 return (sr);
2389 2387
2390 2388 return ((struct srtab *)0);
2391 2389 }
2392 2390
2393 2391 static struct srtab *
2394 2392 gld_sr_create_entry(gld_mac_info_t *macinfo, uchar_t *macaddr)
2395 2393 {
2396 2394 struct srtab *sr;
2397 2395 struct srtab **srp;
2398 2396
2399 2397 ASSERT(!(macaddr[0] & 0x80)); /* no group addresses here */
2400 2398 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2401 2399
2402 2400 srp = gld_sr_hash(GLD_SR_HASH(macinfo), macaddr, macinfo->gldm_addrlen);
2403 2401
2404 2402 for (sr = *srp; sr; sr = sr->sr_next)
2405 2403 if (mac_eq(macaddr, sr->sr_mac, macinfo->gldm_addrlen))
2406 2404 return (sr);
2407 2405
2408 2406 if (!(sr = kmem_zalloc(sizeof (struct srtab), KM_NOSLEEP))) {
2409 2407 #ifdef GLD_DEBUG
2410 2408 if (gld_debug & GLDERRS)
2411 2409 cmn_err(CE_WARN,
2412 2410 "gld: gld_sr_create_entry kmem_alloc failed");
2413 2411 #endif
2414 2412 return ((struct srtab *)0);
2415 2413 }
2416 2414
2417 2415 bcopy((caddr_t)macaddr, (caddr_t)sr->sr_mac, macinfo->gldm_addrlen);
2418 2416
2419 2417 sr->sr_next = *srp;
2420 2418 *srp = sr;
2421 2419 return (sr);
2422 2420 }
2423 2421
2424 2422 static void
2425 2423 gld_sr_clear(gld_mac_info_t *macinfo)
2426 2424 {
2427 2425 int i;
2428 2426 struct srtab **sr_hash_tbl = GLD_SR_HASH(macinfo);
2429 2427 struct srtab **srp, *sr;
2430 2428
2431 2429 /*
2432 2430 * Walk through the table, deleting all entries.
2433 2431 *
2434 2432 * Only called from uninit, so don't need the mutex.
2435 2433 */
2436 2434 for (i = 0; i < SR_HASH_SIZE; i++) {
2437 2435 for (srp = &sr_hash_tbl[i]; (sr = *srp) != NULL; ) {
2438 2436 *srp = sr->sr_next;
2439 2437 kmem_free((char *)sr, sizeof (struct srtab));
2440 2438 }
2441 2439 }
2442 2440 }
2443 2441
2444 2442 #ifdef DEBUG
2445 2443 void
2446 2444 gld_sr_dump(gld_mac_info_t *macinfo)
2447 2445 {
2448 2446 int i, j;
2449 2447 struct srtab **sr_hash_tbl;
2450 2448 struct srtab *sr;
2451 2449
2452 2450 sr_hash_tbl = GLD_SR_HASH(macinfo);
2453 2451 if (sr_hash_tbl == NULL)
2454 2452 return;
2455 2453
2456 2454 mutex_enter(GLD_SR_MUTEX(macinfo));
2457 2455
2458 2456 /*
2459 2457 * Walk through the table, printing all entries
2460 2458 */
2461 2459 cmn_err(CE_NOTE, "GLD Source Routing Table (0x%p):", (void *)macinfo);
2462 2460 cmn_err(CE_CONT, "Addr len,rt,dir,mtu,res rng,brg0 rng,brg1...\n");
2463 2461 for (i = 0; i < SR_HASH_SIZE; i++) {
2464 2462 for (sr = sr_hash_tbl[i]; sr; sr = sr->sr_next) {
2465 2463 cmn_err(CE_CONT,
2466 2464 "%x:%x:%x:%x:%x:%x %d,%x,%x,%x,%x ",
2467 2465 sr->sr_mac[0], sr->sr_mac[1], sr->sr_mac[2],
2468 2466 sr->sr_mac[3], sr->sr_mac[4], sr->sr_mac[5],
2469 2467 sr->sr_ri.len, sr->sr_ri.rt, sr->sr_ri.dir,
2470 2468 sr->sr_ri.mtu, sr->sr_ri.res);
2471 2469 if (sr->sr_ri.len)
2472 2470 for (j = 0; j < (sr->sr_ri.len - 2) / 2; j++)
2473 2471 cmn_err(CE_CONT, "%x ",
2474 2472 REF_NET_USHORT(*(unsigned short *)
2475 2473 &sr->sr_ri.rd[j]));
2476 2474 cmn_err(CE_CONT, "\n");
2477 2475 }
2478 2476 }
2479 2477
2480 2478 mutex_exit(GLD_SR_MUTEX(macinfo));
2481 2479 }
2482 2480 #endif
↓ open down ↓ |
2401 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX