Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/bge/bge_main2.c
+++ new/usr/src/uts/common/io/bge/bge_main2.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010-2013, by Broadcom, Inc.
24 24 * All Rights Reserved.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2002, 2010, Oracle and/or its affiliates.
29 29 * All rights reserved.
30 30 */
31 31
32 32 #include "bge_impl.h"
33 33 #include <sys/sdt.h>
34 34 #include <sys/mac_provider.h>
35 35 #include <sys/mac.h>
36 36 #include <sys/mac_flow.h>
37 37
38 38
39 39 #ifndef STRINGIFY
40 40 #define XSTRINGIFY(x) #x
41 41 #define STRINGIFY(x) XSTRINGIFY(x)
42 42 #endif
43 43
44 44 /*
45 45 * This is the string displayed by modinfo, etc.
46 46 */
47 47 static char bge_ident[] = "Broadcom Gb Ethernet";
48 48
49 49 /*
50 50 * Property names
51 51 */
52 52 static char debug_propname[] = "bge-debug-flags";
53 53 static char clsize_propname[] = "cache-line-size";
54 54 static char latency_propname[] = "latency-timer";
55 55 static char localmac_boolname[] = "local-mac-address?";
56 56 static char localmac_propname[] = "local-mac-address";
57 57 static char macaddr_propname[] = "mac-address";
58 58 static char subdev_propname[] = "subsystem-id";
59 59 static char subven_propname[] = "subsystem-vendor-id";
60 60 static char rxrings_propname[] = "bge-rx-rings";
61 61 static char txrings_propname[] = "bge-tx-rings";
62 62 static char eee_propname[] = "bge-eee";
63 63 static char fm_cap[] = "fm-capable";
64 64 static char default_mtu[] = "default_mtu";
65 65
66 66 static int bge_add_intrs(bge_t *, int);
67 67 static void bge_rem_intrs(bge_t *);
68 68 static int bge_unicst_set(void *, const uint8_t *, int);
69 69 static int bge_addmac(void *, const uint8_t *);
70 70 static int bge_remmac(void *, const uint8_t *);
71 71
72 72 /*
73 73 * Describes the chip's DMA engine
74 74 */
75 75 static ddi_dma_attr_t dma_attr = {
76 76 DMA_ATTR_V0, /* dma_attr_version */
77 77 0x0000000000000000ull, /* dma_attr_addr_lo */
78 78 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
79 79 0x00000000FFFFFFFFull, /* dma_attr_count_max */
80 80 0x0000000000000001ull, /* dma_attr_align */
81 81 0x00000FFF, /* dma_attr_burstsizes */
82 82 0x00000001, /* dma_attr_minxfer */
83 83 0x000000000000FFFFull, /* dma_attr_maxxfer */
84 84 0x00000000FFFFFFFFull, /* dma_attr_seg */
85 85 1, /* dma_attr_sgllen */
86 86 0x00000001, /* dma_attr_granular */
87 87 DDI_DMA_FLAGERR /* dma_attr_flags */
88 88 };
89 89
90 90 /*
91 91 * PIO access attributes for registers
92 92 */
93 93 static ddi_device_acc_attr_t bge_reg_accattr = {
94 94 DDI_DEVICE_ATTR_V1,
95 95 DDI_NEVERSWAP_ACC,
96 96 DDI_STRICTORDER_ACC,
97 97 DDI_FLAGERR_ACC
98 98 };
99 99
100 100 /*
101 101 * DMA access attributes for descriptors: NOT to be byte swapped.
102 102 */
103 103 static ddi_device_acc_attr_t bge_desc_accattr = {
104 104 DDI_DEVICE_ATTR_V0,
105 105 DDI_NEVERSWAP_ACC,
106 106 DDI_STRICTORDER_ACC
107 107 };
108 108
109 109 /*
110 110 * DMA access attributes for data: NOT to be byte swapped.
111 111 */
112 112 static ddi_device_acc_attr_t bge_data_accattr = {
113 113 DDI_DEVICE_ATTR_V0,
114 114 DDI_NEVERSWAP_ACC,
115 115 DDI_STRICTORDER_ACC
116 116 };
117 117
118 118 static int bge_m_start(void *);
119 119 static void bge_m_stop(void *);
120 120 static int bge_m_promisc(void *, boolean_t);
121 121 static int bge_m_unicst(void * pArg, const uint8_t *);
122 122 static int bge_m_multicst(void *, boolean_t, const uint8_t *);
123 123 static void bge_m_resources(void * arg);
124 124 static void bge_m_ioctl(void *, queue_t *, mblk_t *);
125 125 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *);
126 126 static int bge_unicst_set(void *, const uint8_t *,
127 127 int);
128 128 static int bge_m_setprop(void *, const char *, mac_prop_id_t,
129 129 uint_t, const void *);
130 130 static int bge_m_getprop(void *, const char *, mac_prop_id_t,
131 131 uint_t, void *);
132 132 static void bge_m_propinfo(void *, const char *, mac_prop_id_t,
133 133 mac_prop_info_handle_t);
134 134 static int bge_set_priv_prop(bge_t *, const char *, uint_t,
135 135 const void *);
136 136 static int bge_get_priv_prop(bge_t *, const char *, uint_t,
137 137 void *);
138 138 static void bge_priv_propinfo(const char *,
139 139 mac_prop_info_handle_t);
140 140
141 141 static mac_callbacks_t bge_m_callbacks = {
142 142 MC_IOCTL
143 143 #ifdef MC_RESOURCES
144 144 | MC_RESOURCES
145 145 #endif
146 146 #ifdef MC_SETPROP
147 147 | MC_SETPROP
148 148 #endif
149 149 #ifdef MC_GETPROP
150 150 | MC_GETPROP
151 151 #endif
152 152 #ifdef MC_PROPINFO
153 153 | MC_PROPINFO
154 154 #endif
155 155 | MC_GETCAPAB,
156 156 bge_m_stat,
157 157 bge_m_start,
158 158 bge_m_stop,
159 159 bge_m_promisc,
160 160 bge_m_multicst,
161 161 bge_m_unicst,
162 162 bge_m_tx,
163 163 #ifdef MC_RESOURCES
164 164 bge_m_resources,
165 165 #else
166 166 NULL,
167 167 #endif
168 168 bge_m_ioctl,
169 169 bge_m_getcapab,
170 170 #ifdef MC_OPEN
171 171 NULL,
172 172 NULL,
173 173 #endif
174 174 #ifdef MC_SETPROP
175 175 bge_m_setprop,
176 176 #endif
177 177 #ifdef MC_GETPROP
178 178 bge_m_getprop,
179 179 #endif
180 180 #ifdef MC_PROPINFO
181 181 bge_m_propinfo
182 182 #endif
183 183 };
184 184
185 185 char *bge_priv_prop[] = {
186 186 "_adv_asym_pause_cap",
187 187 "_adv_pause_cap",
188 188 "_drain_max",
189 189 "_msi_cnt",
190 190 "_rx_intr_coalesce_blank_time",
191 191 "_tx_intr_coalesce_blank_time",
192 192 "_rx_intr_coalesce_pkt_cnt",
193 193 "_tx_intr_coalesce_pkt_cnt",
194 194 NULL
195 195 };
196 196
197 197 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0};
198 198 /*
199 199 * ========== Transmit and receive ring reinitialisation ==========
200 200 */
201 201
202 202 /*
203 203 * These <reinit> routines each reset the specified ring to an initial
204 204 * state, assuming that the corresponding <init> routine has already
205 205 * been called exactly once.
206 206 */
207 207
208 208 static void
209 209 bge_reinit_send_ring(send_ring_t *srp)
210 210 {
211 211 bge_queue_t *txbuf_queue;
212 212 bge_queue_item_t *txbuf_head;
213 213 sw_txbuf_t *txbuf;
214 214 sw_sbd_t *ssbdp;
215 215 uint32_t slot;
216 216
217 217 /*
218 218 * Reinitialise control variables ...
219 219 */
220 220 srp->tx_flow = 0;
221 221 srp->tx_next = 0;
222 222 srp->txfill_next = 0;
223 223 srp->tx_free = srp->desc.nslots;
224 224 ASSERT(mutex_owned(srp->tc_lock));
225 225 srp->tc_next = 0;
226 226 srp->txpkt_next = 0;
227 227 srp->tx_block = 0;
228 228 srp->tx_nobd = 0;
229 229 srp->tx_nobuf = 0;
230 230
231 231 /*
232 232 * Initialize the tx buffer push queue
233 233 */
234 234 mutex_enter(srp->freetxbuf_lock);
235 235 mutex_enter(srp->txbuf_lock);
236 236 txbuf_queue = &srp->freetxbuf_queue;
237 237 txbuf_queue->head = NULL;
238 238 txbuf_queue->count = 0;
239 239 txbuf_queue->lock = srp->freetxbuf_lock;
240 240 srp->txbuf_push_queue = txbuf_queue;
241 241
242 242 /*
243 243 * Initialize the tx buffer pop queue
244 244 */
245 245 txbuf_queue = &srp->txbuf_queue;
246 246 txbuf_queue->head = NULL;
247 247 txbuf_queue->count = 0;
248 248 txbuf_queue->lock = srp->txbuf_lock;
249 249 srp->txbuf_pop_queue = txbuf_queue;
250 250 txbuf_head = srp->txbuf_head;
251 251 txbuf = srp->txbuf;
252 252 for (slot = 0; slot < srp->tx_buffers; ++slot) {
253 253 txbuf_head->item = txbuf;
254 254 txbuf_head->next = txbuf_queue->head;
255 255 txbuf_queue->head = txbuf_head;
256 256 txbuf_queue->count++;
257 257 txbuf++;
258 258 txbuf_head++;
259 259 }
260 260 mutex_exit(srp->txbuf_lock);
261 261 mutex_exit(srp->freetxbuf_lock);
262 262
263 263 /*
264 264 * Zero and sync all the h/w Send Buffer Descriptors
265 265 */
266 266 DMA_ZERO(srp->desc);
267 267 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
268 268 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
269 269 ssbdp = srp->sw_sbds;
270 270 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot)
271 271 ssbdp->pbuf = NULL;
272 272 }
273 273
274 274 static void
275 275 bge_reinit_recv_ring(recv_ring_t *rrp)
276 276 {
277 277 /*
278 278 * Reinitialise control variables ...
279 279 */
280 280 rrp->rx_next = 0;
281 281 }
282 282
283 283 static void
284 284 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring)
285 285 {
286 286 bge_rbd_t *hw_rbd_p;
287 287 sw_rbd_t *srbdp;
288 288 uint32_t bufsize;
289 289 uint32_t nslots;
290 290 uint32_t slot;
291 291
292 292 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = {
293 293 RBD_FLAG_STD_RING,
294 294 RBD_FLAG_JUMBO_RING,
295 295 RBD_FLAG_MINI_RING
296 296 };
297 297
298 298 /*
299 299 * Zero, initialise and sync all the h/w Receive Buffer Descriptors
300 300 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>,
301 301 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>)
302 302 * should be zeroed, and so don't need to be set up specifically
303 303 * once the whole area has been cleared.
304 304 */
305 305 DMA_ZERO(brp->desc);
306 306
307 307 hw_rbd_p = DMA_VPTR(brp->desc);
308 308 nslots = brp->desc.nslots;
309 309 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
310 310 bufsize = brp->buf[0].size;
311 311 srbdp = brp->sw_rbds;
312 312 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) {
313 313 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress;
314 314 hw_rbd_p->index = (uint16_t)slot;
315 315 hw_rbd_p->len = (uint16_t)bufsize;
316 316 hw_rbd_p->opaque = srbdp->pbuf.token;
317 317 hw_rbd_p->flags |= ring_type_flag[ring];
318 318 }
319 319
320 320 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV);
321 321
322 322 /*
323 323 * Finally, reinitialise the ring control variables ...
324 324 */
325 325 brp->rf_next = (nslots != 0) ? (nslots-1) : 0;
326 326 }
327 327
328 328 /*
329 329 * Reinitialize all rings
330 330 */
331 331 static void
332 332 bge_reinit_rings(bge_t *bgep)
333 333 {
334 334 uint32_t ring;
335 335
336 336 ASSERT(mutex_owned(bgep->genlock));
337 337
338 338 /*
339 339 * Send Rings ...
340 340 */
341 341 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring)
342 342 bge_reinit_send_ring(&bgep->send[ring]);
343 343
344 344 /*
345 345 * Receive Return Rings ...
346 346 */
347 347 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring)
348 348 bge_reinit_recv_ring(&bgep->recv[ring]);
349 349
350 350 /*
351 351 * Receive Producer Rings ...
352 352 */
353 353 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
354 354 bge_reinit_buff_ring(&bgep->buff[ring], ring);
355 355 }
356 356
357 357 /*
358 358 * ========== Internal state management entry points ==========
359 359 */
360 360
361 361 #undef BGE_DBG
362 362 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */
363 363
364 364 /*
365 365 * These routines provide all the functionality required by the
366 366 * corresponding GLD entry points, but don't update the GLD state
367 367 * so they can be called internally without disturbing our record
368 368 * of what GLD thinks we should be doing ...
369 369 */
370 370
371 371 /*
372 372 * bge_reset() -- reset h/w & rings to initial state
373 373 */
374 374 static int
375 375 #ifdef BGE_IPMI_ASF
376 376 bge_reset(bge_t *bgep, uint_t asf_mode)
377 377 #else
378 378 bge_reset(bge_t *bgep)
379 379 #endif
380 380 {
381 381 uint32_t ring;
382 382 int retval;
383 383
384 384 BGE_TRACE(("bge_reset($%p)", (void *)bgep));
385 385
386 386 ASSERT(mutex_owned(bgep->genlock));
387 387
388 388 /*
389 389 * Grab all the other mutexes in the world (this should
390 390 * ensure no other threads are manipulating driver state)
391 391 */
392 392 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
393 393 mutex_enter(bgep->recv[ring].rx_lock);
394 394 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
395 395 mutex_enter(bgep->buff[ring].rf_lock);
396 396 rw_enter(bgep->errlock, RW_WRITER);
397 397 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
398 398 mutex_enter(bgep->send[ring].tx_lock);
399 399 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
400 400 mutex_enter(bgep->send[ring].tc_lock);
401 401
402 402 #ifdef BGE_IPMI_ASF
403 403 retval = bge_chip_reset(bgep, B_TRUE, asf_mode);
404 404 #else
405 405 retval = bge_chip_reset(bgep, B_TRUE);
406 406 #endif
407 407 bge_reinit_rings(bgep);
408 408
409 409 /*
410 410 * Free the world ...
411 411 */
412 412 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; )
413 413 mutex_exit(bgep->send[ring].tc_lock);
414 414 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
415 415 mutex_exit(bgep->send[ring].tx_lock);
416 416 rw_exit(bgep->errlock);
417 417 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; )
418 418 mutex_exit(bgep->buff[ring].rf_lock);
419 419 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; )
420 420 mutex_exit(bgep->recv[ring].rx_lock);
421 421
422 422 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep));
423 423 return (retval);
424 424 }
425 425
426 426 /*
427 427 * bge_stop() -- stop processing, don't reset h/w or rings
428 428 */
429 429 static void
430 430 bge_stop(bge_t *bgep)
431 431 {
432 432 BGE_TRACE(("bge_stop($%p)", (void *)bgep));
433 433
434 434 ASSERT(mutex_owned(bgep->genlock));
435 435
436 436 #ifdef BGE_IPMI_ASF
437 437 if (bgep->asf_enabled) {
438 438 bgep->asf_pseudostop = B_TRUE;
439 439 } else {
440 440 #endif
441 441 bge_chip_stop(bgep, B_FALSE);
442 442 #ifdef BGE_IPMI_ASF
443 443 }
444 444 #endif
445 445
446 446 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep));
447 447 }
448 448
449 449 /*
450 450 * bge_start() -- start transmitting/receiving
451 451 */
452 452 static int
453 453 bge_start(bge_t *bgep, boolean_t reset_phys)
454 454 {
455 455 int retval;
456 456
457 457 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys));
458 458
459 459 ASSERT(mutex_owned(bgep->genlock));
460 460
461 461 /*
462 462 * Start chip processing, including enabling interrupts
463 463 */
464 464 retval = bge_chip_start(bgep, reset_phys);
465 465
466 466 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys));
467 467 return (retval);
468 468 }
469 469
470 470 /*
471 471 * bge_restart - restart transmitting/receiving after error or suspend
472 472 */
473 473 int
474 474 bge_restart(bge_t *bgep, boolean_t reset_phys)
475 475 {
476 476 int retval = DDI_SUCCESS;
477 477 ASSERT(mutex_owned(bgep->genlock));
478 478
479 479 #ifdef BGE_IPMI_ASF
480 480 if (bgep->asf_enabled) {
481 481 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS)
482 482 retval = DDI_FAILURE;
483 483 } else
484 484 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS)
485 485 retval = DDI_FAILURE;
486 486 #else
487 487 if (bge_reset(bgep) != DDI_SUCCESS)
488 488 retval = DDI_FAILURE;
489 489 #endif
490 490 if (bgep->bge_mac_state == BGE_MAC_STARTED) {
491 491 if (bge_start(bgep, reset_phys) != DDI_SUCCESS)
492 492 retval = DDI_FAILURE;
493 493 bgep->watchdog = 0;
494 494 ddi_trigger_softintr(bgep->drain_id);
495 495 }
496 496
497 497 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys));
498 498 return (retval);
499 499 }
500 500
501 501
502 502 /*
503 503 * ========== Nemo-required management entry points ==========
504 504 */
505 505
506 506 #undef BGE_DBG
507 507 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */
508 508
509 509 /*
510 510 * bge_m_stop() -- stop transmitting/receiving
511 511 */
512 512 static void
513 513 bge_m_stop(void *arg)
514 514 {
515 515 bge_t *bgep = arg; /* private device info */
516 516 send_ring_t *srp;
517 517 uint32_t ring;
518 518
519 519 BGE_TRACE(("bge_m_stop($%p)", arg));
520 520
521 521 /*
522 522 * Just stop processing, then record new GLD state
523 523 */
524 524 mutex_enter(bgep->genlock);
525 525 if (!(bgep->progress & PROGRESS_INTR)) {
526 526 /* can happen during autorecovery */
527 527 bgep->bge_chip_state = BGE_CHIP_STOPPED;
528 528 } else
529 529 bge_stop(bgep);
530 530
531 531 bgep->link_state = LINK_STATE_UNKNOWN;
532 532 mac_link_update(bgep->mh, bgep->link_state);
533 533
534 534 /*
535 535 * Free the possible tx buffers allocated in tx process.
536 536 */
537 537 #ifdef BGE_IPMI_ASF
538 538 if (!bgep->asf_pseudostop)
539 539 #endif
540 540 {
541 541 rw_enter(bgep->errlock, RW_WRITER);
542 542 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) {
543 543 srp = &bgep->send[ring];
544 544 mutex_enter(srp->tx_lock);
545 545 if (srp->tx_array > 1)
546 546 bge_free_txbuf_arrays(srp);
547 547 mutex_exit(srp->tx_lock);
548 548 }
549 549 rw_exit(bgep->errlock);
550 550 }
551 551 bgep->bge_mac_state = BGE_MAC_STOPPED;
552 552 BGE_DEBUG(("bge_m_stop($%p) done", arg));
553 553 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
554 554 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
555 555 mutex_exit(bgep->genlock);
556 556 }
557 557
558 558 /*
559 559 * bge_m_start() -- start transmitting/receiving
560 560 */
561 561 static int
562 562 bge_m_start(void *arg)
563 563 {
564 564 bge_t *bgep = arg; /* private device info */
565 565
566 566 BGE_TRACE(("bge_m_start($%p)", arg));
567 567
568 568 /*
569 569 * Start processing and record new GLD state
570 570 */
571 571 mutex_enter(bgep->genlock);
572 572 if (!(bgep->progress & PROGRESS_INTR)) {
573 573 /* can happen during autorecovery */
574 574 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
575 575 mutex_exit(bgep->genlock);
576 576 return (EIO);
577 577 }
578 578 #ifdef BGE_IPMI_ASF
579 579 if (bgep->asf_enabled) {
580 580 if ((bgep->asf_status == ASF_STAT_RUN) &&
581 581 (bgep->asf_pseudostop)) {
582 582 bgep->bge_mac_state = BGE_MAC_STARTED;
583 583 /* forcing a mac link update here */
584 584 bge_phys_check(bgep);
585 585 bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP :
586 586 LINK_STATE_DOWN;
587 587 mac_link_update(bgep->mh, bgep->link_state);
588 588 mutex_exit(bgep->genlock);
589 589 return (0);
590 590 }
591 591 }
592 592 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
593 593 #else
594 594 if (bge_reset(bgep) != DDI_SUCCESS) {
595 595 #endif
596 596 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
597 597 (void) bge_check_acc_handle(bgep, bgep->io_handle);
598 598 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
599 599 mutex_exit(bgep->genlock);
600 600 return (EIO);
601 601 }
602 602 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) {
603 603 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
604 604 (void) bge_check_acc_handle(bgep, bgep->io_handle);
605 605 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
606 606 mutex_exit(bgep->genlock);
607 607 return (EIO);
608 608 }
609 609 bgep->watchdog = 0;
610 610 bgep->bge_mac_state = BGE_MAC_STARTED;
611 611 BGE_DEBUG(("bge_m_start($%p) done", arg));
612 612
613 613 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
614 614 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
615 615 mutex_exit(bgep->genlock);
616 616 return (EIO);
617 617 }
618 618 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
619 619 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
620 620 mutex_exit(bgep->genlock);
621 621 return (EIO);
622 622 }
623 623 #ifdef BGE_IPMI_ASF
624 624 if (bgep->asf_enabled) {
625 625 if (bgep->asf_status != ASF_STAT_RUN) {
626 626 /* start ASF heart beat */
627 627 bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
628 628 (void *)bgep,
629 629 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
630 630 bgep->asf_status = ASF_STAT_RUN;
631 631 }
632 632 }
633 633 #endif
634 634 mutex_exit(bgep->genlock);
635 635
636 636 return (0);
637 637 }
638 638
639 639 /*
640 640 * bge_unicst_set() -- set the physical network address
641 641 */
642 642 static int
643 643 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot)
644 644 {
645 645 bge_t *bgep = arg; /* private device info */
646 646
647 647 BGE_TRACE(("bge_unicst_set($%p, %s)", arg,
648 648 ether_sprintf((void *)macaddr)));
649 649 /*
650 650 * Remember the new current address in the driver state
651 651 * Sync the chip's idea of the address too ...
652 652 */
653 653 mutex_enter(bgep->genlock);
654 654 if (!(bgep->progress & PROGRESS_INTR)) {
655 655 /* can happen during autorecovery */
656 656 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
657 657 mutex_exit(bgep->genlock);
658 658 return (EIO);
659 659 }
660 660 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr);
661 661 #ifdef BGE_IPMI_ASF
662 662 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) {
663 663 #else
664 664 if (bge_chip_sync(bgep) == DDI_FAILURE) {
665 665 #endif
666 666 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
667 667 (void) bge_check_acc_handle(bgep, bgep->io_handle);
668 668 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
669 669 mutex_exit(bgep->genlock);
670 670 return (EIO);
671 671 }
672 672 #ifdef BGE_IPMI_ASF
673 673 if (bgep->asf_enabled) {
674 674 /*
675 675 * The above bge_chip_sync() function wrote the ethernet MAC
676 676 * addresses registers which destroyed the IPMI/ASF sideband.
677 677 * Here, we have to reset chip to make IPMI/ASF sideband work.
678 678 */
679 679 if (bgep->asf_status == ASF_STAT_RUN) {
680 680 /*
681 681 * We must stop ASF heart beat before bge_chip_stop(),
682 682 * otherwise some computers (ex. IBM HS20 blade server)
683 683 * may crash.
684 684 */
685 685 bge_asf_update_status(bgep);
686 686 bge_asf_stop_timer(bgep);
687 687 bgep->asf_status = ASF_STAT_STOP;
688 688
689 689 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
690 690 }
691 691 bge_chip_stop(bgep, B_FALSE);
692 692
693 693 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) {
694 694 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
695 695 (void) bge_check_acc_handle(bgep, bgep->io_handle);
696 696 ddi_fm_service_impact(bgep->devinfo,
697 697 DDI_SERVICE_DEGRADED);
698 698 mutex_exit(bgep->genlock);
699 699 return (EIO);
700 700 }
701 701
702 702 /*
703 703 * Start our ASF heartbeat counter as soon as possible.
704 704 */
705 705 if (bgep->asf_status != ASF_STAT_RUN) {
706 706 /* start ASF heart beat */
707 707 bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
708 708 (void *)bgep,
709 709 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
710 710 bgep->asf_status = ASF_STAT_RUN;
711 711 }
712 712 }
713 713 #endif
714 714 BGE_DEBUG(("bge_unicst_set($%p) done", arg));
715 715 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
716 716 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
717 717 mutex_exit(bgep->genlock);
718 718 return (EIO);
719 719 }
720 720 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
721 721 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
722 722 mutex_exit(bgep->genlock);
723 723 return (EIO);
724 724 }
725 725 mutex_exit(bgep->genlock);
726 726
727 727 return (0);
728 728 }
729 729
730 730 extern void bge_wake_factotum(bge_t *);
731 731
732 732 static boolean_t
733 733 bge_param_locked(mac_prop_id_t pr_num)
734 734 {
735 735 /*
736 736 * All adv_* parameters are locked (read-only) while
737 737 * the device is in any sort of loopback mode ...
738 738 */
739 739 switch (pr_num) {
740 740 case MAC_PROP_ADV_1000FDX_CAP:
741 741 case MAC_PROP_EN_1000FDX_CAP:
742 742 case MAC_PROP_ADV_1000HDX_CAP:
743 743 case MAC_PROP_EN_1000HDX_CAP:
744 744 case MAC_PROP_ADV_100FDX_CAP:
745 745 case MAC_PROP_EN_100FDX_CAP:
746 746 case MAC_PROP_ADV_100HDX_CAP:
747 747 case MAC_PROP_EN_100HDX_CAP:
748 748 case MAC_PROP_ADV_10FDX_CAP:
749 749 case MAC_PROP_EN_10FDX_CAP:
750 750 case MAC_PROP_ADV_10HDX_CAP:
751 751 case MAC_PROP_EN_10HDX_CAP:
752 752 case MAC_PROP_AUTONEG:
753 753 case MAC_PROP_FLOWCTRL:
754 754 return (B_TRUE);
755 755 }
756 756 return (B_FALSE);
757 757 }
758 758 /*
759 759 * callback functions for set/get of properties
760 760 */
761 761 static int
762 762 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
763 763 uint_t pr_valsize, const void *pr_val)
764 764 {
765 765 bge_t *bgep = barg;
766 766 int err = 0;
767 767 uint32_t cur_mtu, new_mtu;
768 768 link_flowctrl_t fl;
769 769
770 770 mutex_enter(bgep->genlock);
771 771 if (bgep->param_loop_mode != BGE_LOOP_NONE &&
772 772 bge_param_locked(pr_num)) {
773 773 /*
774 774 * All adv_* parameters are locked (read-only)
775 775 * while the device is in any sort of loopback mode.
776 776 */
777 777 mutex_exit(bgep->genlock);
778 778 return (EBUSY);
779 779 }
780 780 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
781 781 ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
782 782 (pr_num == MAC_PROP_EN_100HDX_CAP) ||
783 783 (pr_num == MAC_PROP_EN_10FDX_CAP) ||
784 784 (pr_num == MAC_PROP_EN_10HDX_CAP))) {
785 785 /*
786 786 * these properties are read/write on copper,
787 787 * read-only and 0 on serdes
788 788 */
789 789 mutex_exit(bgep->genlock);
790 790 return (ENOTSUP);
791 791 }
792 792 if (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
793 793 ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
794 794 (pr_num == MAC_PROP_EN_1000HDX_CAP))) {
795 795 mutex_exit(bgep->genlock);
796 796 return (ENOTSUP);
797 797 }
798 798
799 799 switch (pr_num) {
800 800 case MAC_PROP_EN_1000FDX_CAP:
801 801 bgep->param_en_1000fdx = *(uint8_t *)pr_val;
802 802 bgep->param_adv_1000fdx = *(uint8_t *)pr_val;
803 803 goto reprogram;
804 804 case MAC_PROP_EN_1000HDX_CAP:
805 805 bgep->param_en_1000hdx = *(uint8_t *)pr_val;
806 806 bgep->param_adv_1000hdx = *(uint8_t *)pr_val;
807 807 goto reprogram;
808 808 case MAC_PROP_EN_100FDX_CAP:
809 809 bgep->param_en_100fdx = *(uint8_t *)pr_val;
810 810 bgep->param_adv_100fdx = *(uint8_t *)pr_val;
811 811 goto reprogram;
812 812 case MAC_PROP_EN_100HDX_CAP:
813 813 bgep->param_en_100hdx = *(uint8_t *)pr_val;
814 814 bgep->param_adv_100hdx = *(uint8_t *)pr_val;
815 815 goto reprogram;
816 816 case MAC_PROP_EN_10FDX_CAP:
817 817 bgep->param_en_10fdx = *(uint8_t *)pr_val;
818 818 bgep->param_adv_10fdx = *(uint8_t *)pr_val;
819 819 goto reprogram;
820 820 case MAC_PROP_EN_10HDX_CAP:
821 821 bgep->param_en_10hdx = *(uint8_t *)pr_val;
822 822 bgep->param_adv_10hdx = *(uint8_t *)pr_val;
823 823 reprogram:
824 824 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL)
825 825 err = EINVAL;
826 826 break;
827 827 case MAC_PROP_ADV_1000FDX_CAP:
828 828 case MAC_PROP_ADV_1000HDX_CAP:
829 829 case MAC_PROP_ADV_100FDX_CAP:
830 830 case MAC_PROP_ADV_100HDX_CAP:
831 831 case MAC_PROP_ADV_10FDX_CAP:
832 832 case MAC_PROP_ADV_10HDX_CAP:
833 833 case MAC_PROP_STATUS:
834 834 case MAC_PROP_SPEED:
835 835 case MAC_PROP_DUPLEX:
836 836 err = ENOTSUP; /* read-only prop. Can't set this */
837 837 break;
838 838 case MAC_PROP_AUTONEG:
839 839 bgep->param_adv_autoneg = *(uint8_t *)pr_val;
840 840 if (bge_reprogram(bgep) == IOC_INVAL)
841 841 err = EINVAL;
842 842 break;
843 843 case MAC_PROP_MTU:
844 844 cur_mtu = bgep->chipid.default_mtu;
845 845 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
846 846
847 847 if (new_mtu == cur_mtu) {
848 848 err = 0;
849 849 break;
850 850 }
851 851 if (new_mtu < BGE_DEFAULT_MTU ||
852 852 new_mtu > BGE_MAXIMUM_MTU) {
853 853 err = EINVAL;
854 854 break;
855 855 }
856 856 if ((new_mtu > BGE_DEFAULT_MTU) &&
857 857 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) {
858 858 err = EINVAL;
859 859 break;
860 860 }
861 861 if (bgep->bge_mac_state == BGE_MAC_STARTED) {
862 862 err = EBUSY;
863 863 break;
864 864 }
865 865 bgep->chipid.default_mtu = new_mtu;
866 866 if (bge_chip_id_init(bgep)) {
867 867 err = EINVAL;
868 868 break;
869 869 }
870 870 bgep->bge_dma_error = B_TRUE;
871 871 bgep->manual_reset = B_TRUE;
872 872 bge_chip_stop(bgep, B_TRUE);
873 873 bge_wake_factotum(bgep);
874 874 err = 0;
875 875 break;
876 876 case MAC_PROP_FLOWCTRL:
877 877 bcopy(pr_val, &fl, sizeof (fl));
878 878 switch (fl) {
879 879 default:
880 880 err = ENOTSUP;
881 881 break;
882 882 case LINK_FLOWCTRL_NONE:
883 883 bgep->param_adv_pause = 0;
884 884 bgep->param_adv_asym_pause = 0;
885 885
886 886 bgep->param_link_rx_pause = B_FALSE;
887 887 bgep->param_link_tx_pause = B_FALSE;
888 888 break;
889 889 case LINK_FLOWCTRL_RX:
890 890 bgep->param_adv_pause = 1;
891 891 bgep->param_adv_asym_pause = 1;
892 892
893 893 bgep->param_link_rx_pause = B_TRUE;
894 894 bgep->param_link_tx_pause = B_FALSE;
895 895 break;
896 896 case LINK_FLOWCTRL_TX:
897 897 bgep->param_adv_pause = 0;
898 898 bgep->param_adv_asym_pause = 1;
899 899
900 900 bgep->param_link_rx_pause = B_FALSE;
901 901 bgep->param_link_tx_pause = B_TRUE;
902 902 break;
903 903 case LINK_FLOWCTRL_BI:
904 904 bgep->param_adv_pause = 1;
905 905 bgep->param_adv_asym_pause = 0;
906 906
907 907 bgep->param_link_rx_pause = B_TRUE;
908 908 bgep->param_link_tx_pause = B_TRUE;
909 909 break;
910 910 }
911 911
912 912 if (err == 0) {
913 913 if (bge_reprogram(bgep) == IOC_INVAL)
914 914 err = EINVAL;
915 915 }
916 916
917 917 break;
918 918 case MAC_PROP_PRIVATE:
919 919 err = bge_set_priv_prop(bgep, pr_name, pr_valsize,
920 920 pr_val);
921 921 break;
922 922 default:
923 923 err = ENOTSUP;
924 924 break;
925 925 }
926 926 mutex_exit(bgep->genlock);
927 927 return (err);
928 928 }
929 929
930 930 /* ARGSUSED */
931 931 static int
932 932 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
933 933 uint_t pr_valsize, void *pr_val)
934 934 {
935 935 bge_t *bgep = barg;
936 936 int err = 0;
937 937
938 938 switch (pr_num) {
939 939 case MAC_PROP_DUPLEX:
940 940 ASSERT(pr_valsize >= sizeof (link_duplex_t));
941 941 bcopy(&bgep->param_link_duplex, pr_val,
942 942 sizeof (link_duplex_t));
943 943 break;
944 944 case MAC_PROP_SPEED: {
945 945 uint64_t speed = bgep->param_link_speed * 1000000ull;
946 946
947 947 ASSERT(pr_valsize >= sizeof (speed));
948 948 bcopy(&speed, pr_val, sizeof (speed));
949 949 break;
950 950 }
951 951 case MAC_PROP_STATUS:
952 952 ASSERT(pr_valsize >= sizeof (link_state_t));
953 953 bcopy(&bgep->link_state, pr_val,
954 954 sizeof (link_state_t));
955 955 break;
956 956 case MAC_PROP_AUTONEG:
957 957 *(uint8_t *)pr_val = bgep->param_adv_autoneg;
958 958 break;
959 959 case MAC_PROP_FLOWCTRL: {
960 960 link_flowctrl_t fl;
961 961
962 962 ASSERT(pr_valsize >= sizeof (fl));
963 963
964 964 if (bgep->param_link_rx_pause &&
965 965 !bgep->param_link_tx_pause)
966 966 fl = LINK_FLOWCTRL_RX;
967 967
968 968 if (!bgep->param_link_rx_pause &&
969 969 !bgep->param_link_tx_pause)
970 970 fl = LINK_FLOWCTRL_NONE;
971 971
972 972 if (!bgep->param_link_rx_pause &&
973 973 bgep->param_link_tx_pause)
974 974 fl = LINK_FLOWCTRL_TX;
975 975
976 976 if (bgep->param_link_rx_pause &&
977 977 bgep->param_link_tx_pause)
978 978 fl = LINK_FLOWCTRL_BI;
979 979 bcopy(&fl, pr_val, sizeof (fl));
980 980 break;
981 981 }
982 982 case MAC_PROP_ADV_1000FDX_CAP:
983 983 *(uint8_t *)pr_val = bgep->param_adv_1000fdx;
984 984 break;
985 985 case MAC_PROP_EN_1000FDX_CAP:
986 986 *(uint8_t *)pr_val = bgep->param_en_1000fdx;
987 987 break;
988 988 case MAC_PROP_ADV_1000HDX_CAP:
989 989 *(uint8_t *)pr_val = bgep->param_adv_1000hdx;
990 990 break;
991 991 case MAC_PROP_EN_1000HDX_CAP:
992 992 *(uint8_t *)pr_val = bgep->param_en_1000hdx;
993 993 break;
994 994 case MAC_PROP_ADV_100FDX_CAP:
995 995 *(uint8_t *)pr_val = bgep->param_adv_100fdx;
996 996 break;
997 997 case MAC_PROP_EN_100FDX_CAP:
998 998 *(uint8_t *)pr_val = bgep->param_en_100fdx;
999 999 break;
1000 1000 case MAC_PROP_ADV_100HDX_CAP:
1001 1001 *(uint8_t *)pr_val = bgep->param_adv_100hdx;
1002 1002 break;
1003 1003 case MAC_PROP_EN_100HDX_CAP:
1004 1004 *(uint8_t *)pr_val = bgep->param_en_100hdx;
1005 1005 break;
1006 1006 case MAC_PROP_ADV_10FDX_CAP:
1007 1007 *(uint8_t *)pr_val = bgep->param_adv_10fdx;
1008 1008 break;
1009 1009 case MAC_PROP_EN_10FDX_CAP:
1010 1010 *(uint8_t *)pr_val = bgep->param_en_10fdx;
1011 1011 break;
1012 1012 case MAC_PROP_ADV_10HDX_CAP:
1013 1013 *(uint8_t *)pr_val = bgep->param_adv_10hdx;
1014 1014 break;
1015 1015 case MAC_PROP_EN_10HDX_CAP:
1016 1016 *(uint8_t *)pr_val = bgep->param_en_10hdx;
1017 1017 break;
1018 1018 case MAC_PROP_ADV_100T4_CAP:
1019 1019 case MAC_PROP_EN_100T4_CAP:
1020 1020 *(uint8_t *)pr_val = 0;
1021 1021 break;
1022 1022 case MAC_PROP_PRIVATE:
1023 1023 err = bge_get_priv_prop(bgep, pr_name,
1024 1024 pr_valsize, pr_val);
1025 1025 return (err);
1026 1026 default:
1027 1027 return (ENOTSUP);
1028 1028 }
1029 1029 return (0);
1030 1030 }
1031 1031
1032 1032 static void
1033 1033 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1034 1034 mac_prop_info_handle_t prh)
1035 1035 {
1036 1036 bge_t *bgep = barg;
1037 1037 int flags = bgep->chipid.flags;
1038 1038
1039 1039 /*
1040 1040 * By default permissions are read/write unless specified
1041 1041 * otherwise by the driver.
1042 1042 */
1043 1043
1044 1044 switch (pr_num) {
1045 1045 case MAC_PROP_DUPLEX:
1046 1046 case MAC_PROP_SPEED:
1047 1047 case MAC_PROP_STATUS:
1048 1048 case MAC_PROP_ADV_1000FDX_CAP:
1049 1049 case MAC_PROP_ADV_1000HDX_CAP:
1050 1050 case MAC_PROP_ADV_100FDX_CAP:
1051 1051 case MAC_PROP_ADV_100HDX_CAP:
1052 1052 case MAC_PROP_ADV_10FDX_CAP:
1053 1053 case MAC_PROP_ADV_10HDX_CAP:
1054 1054 case MAC_PROP_ADV_100T4_CAP:
1055 1055 case MAC_PROP_EN_100T4_CAP:
1056 1056 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1057 1057 break;
1058 1058
1059 1059 case MAC_PROP_EN_1000FDX_CAP:
1060 1060 case MAC_PROP_EN_1000HDX_CAP:
1061 1061 if (DEVICE_5906_SERIES_CHIPSETS(bgep))
1062 1062 mac_prop_info_set_default_uint8(prh, 0);
1063 1063 else
1064 1064 mac_prop_info_set_default_uint8(prh, 1);
1065 1065 break;
1066 1066
1067 1067 case MAC_PROP_EN_100FDX_CAP:
1068 1068 case MAC_PROP_EN_100HDX_CAP:
1069 1069 case MAC_PROP_EN_10FDX_CAP:
1070 1070 case MAC_PROP_EN_10HDX_CAP:
1071 1071 mac_prop_info_set_default_uint8(prh,
1072 1072 (flags & CHIP_FLAG_SERDES) ? 0 : 1);
1073 1073 break;
1074 1074
1075 1075 case MAC_PROP_AUTONEG:
1076 1076 mac_prop_info_set_default_uint8(prh, 1);
1077 1077 break;
1078 1078
1079 1079 case MAC_PROP_FLOWCTRL:
1080 1080 mac_prop_info_set_default_link_flowctrl(prh,
1081 1081 LINK_FLOWCTRL_BI);
1082 1082 break;
1083 1083
1084 1084 case MAC_PROP_MTU:
1085 1085 mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU,
1086 1086 (flags & CHIP_FLAG_NO_JUMBO) ?
1087 1087 BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU);
1088 1088 break;
1089 1089
1090 1090 case MAC_PROP_PRIVATE:
1091 1091 bge_priv_propinfo(pr_name, prh);
1092 1092 break;
1093 1093 }
1094 1094
1095 1095 mutex_enter(bgep->genlock);
1096 1096 if ((bgep->param_loop_mode != BGE_LOOP_NONE &&
1097 1097 bge_param_locked(pr_num)) ||
1098 1098 ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
1099 1099 ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
1100 1100 (pr_num == MAC_PROP_EN_100HDX_CAP) ||
1101 1101 (pr_num == MAC_PROP_EN_10FDX_CAP) ||
1102 1102 (pr_num == MAC_PROP_EN_10HDX_CAP))) ||
1103 1103 (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
1104 1104 ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
1105 1105 (pr_num == MAC_PROP_EN_1000HDX_CAP))))
1106 1106 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1107 1107 mutex_exit(bgep->genlock);
1108 1108 }
1109 1109
1110 1110 /* ARGSUSED */
1111 1111 static int
1112 1112 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize,
1113 1113 const void *pr_val)
1114 1114 {
1115 1115 int err = 0;
1116 1116 long result;
1117 1117
1118 1118 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1119 1119 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1120 1120 if (result > 1 || result < 0) {
1121 1121 err = EINVAL;
1122 1122 } else {
1123 1123 bgep->param_adv_pause = (uint32_t)result;
1124 1124 if (bge_reprogram(bgep) == IOC_INVAL)
1125 1125 err = EINVAL;
1126 1126 }
1127 1127 return (err);
1128 1128 }
1129 1129 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1130 1130 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1131 1131 if (result > 1 || result < 0) {
1132 1132 err = EINVAL;
1133 1133 } else {
1134 1134 bgep->param_adv_asym_pause = (uint32_t)result;
1135 1135 if (bge_reprogram(bgep) == IOC_INVAL)
1136 1136 err = EINVAL;
1137 1137 }
1138 1138 return (err);
1139 1139 }
1140 1140 if (strcmp(pr_name, "_drain_max") == 0) {
1141 1141
1142 1142 /*
1143 1143 * on the Tx side, we need to update the h/w register for
1144 1144 * real packet transmission per packet. The drain_max parameter
1145 1145 * is used to reduce the register access. This parameter
1146 1146 * controls the max number of packets that we will hold before
1147 1147 * updating the bge h/w to trigger h/w transmit. The bge
1148 1148 * chipset usually has a max of 512 Tx descriptors, thus
1149 1149 * the upper bound on drain_max is 512.
1150 1150 */
1151 1151 if (pr_val == NULL) {
1152 1152 err = EINVAL;
1153 1153 return (err);
1154 1154 }
1155 1155 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1156 1156 if (result > 512 || result < 1)
1157 1157 err = EINVAL;
1158 1158 else {
1159 1159 bgep->param_drain_max = (uint32_t)result;
1160 1160 if (bge_reprogram(bgep) == IOC_INVAL)
1161 1161 err = EINVAL;
1162 1162 }
1163 1163 return (err);
1164 1164 }
1165 1165 if (strcmp(pr_name, "_msi_cnt") == 0) {
1166 1166
1167 1167 if (pr_val == NULL) {
1168 1168 err = EINVAL;
1169 1169 return (err);
1170 1170 }
1171 1171 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1172 1172 if (result > 7 || result < 0)
1173 1173 err = EINVAL;
1174 1174 else {
1175 1175 bgep->param_msi_cnt = (uint32_t)result;
1176 1176 if (bge_reprogram(bgep) == IOC_INVAL)
1177 1177 err = EINVAL;
1178 1178 }
1179 1179 return (err);
1180 1180 }
1181 1181 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) {
1182 1182 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1183 1183 return (EINVAL);
1184 1184 if (result < 0)
1185 1185 err = EINVAL;
1186 1186 else {
1187 1187 bgep->chipid.rx_ticks_norm = (uint32_t)result;
1188 1188 bge_chip_coalesce_update(bgep);
1189 1189 }
1190 1190 return (err);
1191 1191 }
1192 1192
1193 1193 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) {
1194 1194 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1195 1195 return (EINVAL);
1196 1196
1197 1197 if (result < 0)
1198 1198 err = EINVAL;
1199 1199 else {
1200 1200 bgep->chipid.rx_count_norm = (uint32_t)result;
1201 1201 bge_chip_coalesce_update(bgep);
1202 1202 }
1203 1203 return (err);
1204 1204 }
1205 1205 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) {
1206 1206 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1207 1207 return (EINVAL);
1208 1208 if (result < 0)
1209 1209 err = EINVAL;
1210 1210 else {
1211 1211 bgep->chipid.tx_ticks_norm = (uint32_t)result;
1212 1212 bge_chip_coalesce_update(bgep);
1213 1213 }
1214 1214 return (err);
1215 1215 }
1216 1216
1217 1217 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) {
1218 1218 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1219 1219 return (EINVAL);
1220 1220
1221 1221 if (result < 0)
1222 1222 err = EINVAL;
1223 1223 else {
1224 1224 bgep->chipid.tx_count_norm = (uint32_t)result;
1225 1225 bge_chip_coalesce_update(bgep);
1226 1226 }
1227 1227 return (err);
1228 1228 }
1229 1229 return (ENOTSUP);
1230 1230 }
1231 1231
1232 1232 static int
1233 1233 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize,
1234 1234 void *pr_val)
1235 1235 {
1236 1236 int value;
1237 1237
1238 1238 if (strcmp(pr_name, "_adv_pause_cap") == 0)
1239 1239 value = bge->param_adv_pause;
1240 1240 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1241 1241 value = bge->param_adv_asym_pause;
1242 1242 else if (strcmp(pr_name, "_drain_max") == 0)
1243 1243 value = bge->param_drain_max;
1244 1244 else if (strcmp(pr_name, "_msi_cnt") == 0)
1245 1245 value = bge->param_msi_cnt;
1246 1246 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1247 1247 value = bge->chipid.rx_ticks_norm;
1248 1248 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1249 1249 value = bge->chipid.tx_ticks_norm;
1250 1250 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1251 1251 value = bge->chipid.rx_count_norm;
1252 1252 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1253 1253 value = bge->chipid.tx_count_norm;
1254 1254 else
1255 1255 return (ENOTSUP);
1256 1256
1257 1257 (void) snprintf(pr_val, pr_valsize, "%d", value);
1258 1258 return (0);
1259 1259 }
1260 1260
1261 1261 static void
1262 1262 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph)
1263 1263 {
1264 1264 char valstr[64];
1265 1265 int value;
1266 1266
1267 1267 if (strcmp(pr_name, "_adv_pause_cap") == 0)
1268 1268 value = 1;
1269 1269 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1270 1270 value = 1;
1271 1271 else if (strcmp(pr_name, "_drain_max") == 0)
1272 1272 value = 64;
1273 1273 else if (strcmp(pr_name, "_msi_cnt") == 0)
1274 1274 value = 0;
1275 1275 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1276 1276 value = bge_rx_ticks_norm;
1277 1277 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1278 1278 value = bge_tx_ticks_norm;
1279 1279 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1280 1280 value = bge_rx_count_norm;
1281 1281 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1282 1282 value = bge_tx_count_norm;
1283 1283 else
1284 1284 return;
1285 1285
1286 1286 (void) snprintf(valstr, sizeof (valstr), "%d", value);
1287 1287 mac_prop_info_set_default_str(mph, valstr);
1288 1288 }
1289 1289
1290 1290
1291 1291 static int
1292 1292 bge_m_unicst(void * arg, const uint8_t * mac_addr)
1293 1293 {
1294 1294 bge_t *bgep = arg;
1295 1295 int i;
1296 1296
1297 1297 /* XXX sets the mac address for all ring slots... OK? */
1298 1298 for (i = 0; i < MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); i++)
1299 1299 bge_addmac(&bgep->recv[i], mac_addr);
1300 1300
1301 1301 return (0);
1302 1302 }
1303 1303
1304 1304
1305 1305 /*
1306 1306 * Compute the index of the required bit in the multicast hash map.
1307 1307 * This must mirror the way the hardware actually does it!
1308 1308 * See Broadcom document 570X-PG102-R page 125.
1309 1309 */
1310 1310 static uint32_t
1311 1311 bge_hash_index(const uint8_t *mca)
1312 1312 {
1313 1313 uint32_t hash;
1314 1314
1315 1315 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table);
1316 1316
1317 1317 return (hash);
1318 1318 }
1319 1319
1320 1320 /*
1321 1321 * bge_m_multicst_add() -- enable/disable a multicast address
1322 1322 */
1323 1323 static int
1324 1324 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1325 1325 {
1326 1326 bge_t *bgep = arg; /* private device info */
1327 1327 uint32_t hash;
1328 1328 uint32_t index;
1329 1329 uint32_t word;
1330 1330 uint32_t bit;
1331 1331 uint8_t *refp;
1332 1332
1333 1333 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg,
1334 1334 (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1335 1335
1336 1336 /*
1337 1337 * Precalculate all required masks, pointers etc ...
1338 1338 */
1339 1339 hash = bge_hash_index(mca);
1340 1340 index = hash % BGE_HASH_TABLE_SIZE;
1341 1341 word = index/32u;
1342 1342 bit = 1 << (index % 32u);
1343 1343 refp = &bgep->mcast_refs[index];
1344 1344
1345 1345 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d",
1346 1346 hash, index, word, bit, *refp));
1347 1347
1348 1348 /*
1349 1349 * We must set the appropriate bit in the hash map (and the
1350 1350 * corresponding h/w register) when the refcount goes from 0
1351 1351 * to >0, and clear it when the last ref goes away (refcount
1352 1352 * goes from >0 back to 0). If we change the hash map, we
1353 1353 * must also update the chip's hardware map registers.
1354 1354 */
1355 1355 mutex_enter(bgep->genlock);
1356 1356 if (!(bgep->progress & PROGRESS_INTR)) {
1357 1357 /* can happen during autorecovery */
1358 1358 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1359 1359 mutex_exit(bgep->genlock);
1360 1360 return (EIO);
1361 1361 }
1362 1362 if (add) {
1363 1363 if ((*refp)++ == 0) {
1364 1364 bgep->mcast_hash[word] |= bit;
1365 1365 #ifdef BGE_IPMI_ASF
1366 1366 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1367 1367 #else
1368 1368 if (bge_chip_sync(bgep) == DDI_FAILURE) {
1369 1369 #endif
1370 1370 (void) bge_check_acc_handle(bgep,
1371 1371 bgep->cfg_handle);
1372 1372 (void) bge_check_acc_handle(bgep,
1373 1373 bgep->io_handle);
1374 1374 ddi_fm_service_impact(bgep->devinfo,
1375 1375 DDI_SERVICE_DEGRADED);
1376 1376 mutex_exit(bgep->genlock);
1377 1377 return (EIO);
1378 1378 }
1379 1379 }
1380 1380 } else {
1381 1381 if (--(*refp) == 0) {
1382 1382 bgep->mcast_hash[word] &= ~bit;
1383 1383 #ifdef BGE_IPMI_ASF
1384 1384 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1385 1385 #else
1386 1386 if (bge_chip_sync(bgep) == DDI_FAILURE) {
1387 1387 #endif
1388 1388 (void) bge_check_acc_handle(bgep,
1389 1389 bgep->cfg_handle);
1390 1390 (void) bge_check_acc_handle(bgep,
1391 1391 bgep->io_handle);
1392 1392 ddi_fm_service_impact(bgep->devinfo,
1393 1393 DDI_SERVICE_DEGRADED);
1394 1394 mutex_exit(bgep->genlock);
1395 1395 return (EIO);
1396 1396 }
1397 1397 }
1398 1398 }
1399 1399 BGE_DEBUG(("bge_m_multicst($%p) done", arg));
1400 1400 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1401 1401 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1402 1402 mutex_exit(bgep->genlock);
1403 1403 return (EIO);
1404 1404 }
1405 1405 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1406 1406 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1407 1407 mutex_exit(bgep->genlock);
1408 1408 return (EIO);
1409 1409 }
1410 1410 mutex_exit(bgep->genlock);
1411 1411
1412 1412 return (0);
1413 1413 }
1414 1414
1415 1415 /*
1416 1416 * bge_m_promisc() -- set or reset promiscuous mode on the board
1417 1417 *
1418 1418 * Program the hardware to enable/disable promiscuous and/or
1419 1419 * receive-all-multicast modes.
1420 1420 */
1421 1421 static int
1422 1422 bge_m_promisc(void *arg, boolean_t on)
1423 1423 {
1424 1424 bge_t *bgep = arg;
1425 1425
1426 1426 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on));
1427 1427
1428 1428 /*
1429 1429 * Store MAC layer specified mode and pass to chip layer to update h/w
1430 1430 */
1431 1431 mutex_enter(bgep->genlock);
1432 1432 if (!(bgep->progress & PROGRESS_INTR)) {
1433 1433 /* can happen during autorecovery */
1434 1434 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1435 1435 mutex_exit(bgep->genlock);
1436 1436 return (EIO);
1437 1437 }
1438 1438 bgep->promisc = on;
1439 1439 #ifdef BGE_IPMI_ASF
1440 1440 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1441 1441 #else
1442 1442 if (bge_chip_sync(bgep) == DDI_FAILURE) {
1443 1443 #endif
1444 1444 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
1445 1445 (void) bge_check_acc_handle(bgep, bgep->io_handle);
1446 1446 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1447 1447 mutex_exit(bgep->genlock);
1448 1448 return (EIO);
1449 1449 }
1450 1450 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg));
1451 1451 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1452 1452 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1453 1453 mutex_exit(bgep->genlock);
1454 1454 return (EIO);
1455 1455 }
1456 1456 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1457 1457 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1458 1458 mutex_exit(bgep->genlock);
1459 1459 return (EIO);
1460 1460 }
1461 1461 mutex_exit(bgep->genlock);
1462 1462 return (0);
1463 1463 }
1464 1464
1465 1465 #ifdef MC_RESOURCES
1466 1466
1467 1467 static void
1468 1468 bge_blank(void * arg, time_t tick_cnt, uint_t pkt_cnt)
1469 1469 {
1470 1470 (void)arg;
1471 1471 (void)tick_cnt;
1472 1472 (void)pkt_cnt;
1473 1473 }
1474 1474
1475 1475 static void
1476 1476 bge_m_resources(void * arg)
1477 1477 {
1478 1478 bge_t *bgep = arg;
1479 1479 mac_rx_fifo_t mrf;
1480 1480 int i;
1481 1481
1482 1482 mrf.mrf_type = MAC_RX_FIFO;
1483 1483 mrf.mrf_blank = bge_blank;
1484 1484 mrf.mrf_arg = (void *)bgep;
1485 1485 mrf.mrf_normal_blank_time = 25;
1486 1486 mrf.mrf_normal_pkt_count = 8;
1487 1487
1488 1488 for (i = 0; i < BGE_RECV_RINGS_MAX; i++) {
1489 1489 bgep->macRxResourceHandles[i] =
1490 1490 mac_resource_add(bgep->mh, (mac_resource_t *)&mrf);
1491 1491 }
1492 1492 }
1493 1493
1494 1494 #endif /* MC_RESOURCES */
1495 1495
1496 1496 /*
1497 1497 * Find the slot for the specified unicast address
1498 1498 */
1499 1499 int
1500 1500 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr)
1501 1501 {
1502 1502 int slot;
1503 1503
1504 1504 ASSERT(mutex_owned(bgep->genlock));
1505 1505
1506 1506 for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1507 1507 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0)
1508 1508 return (slot);
1509 1509 }
1510 1510
1511 1511 return (-1);
1512 1512 }
1513 1513
1514 1514 /*
1515 1515 * Programs the classifier to start steering packets matching 'mac_addr' to the
1516 1516 * specified ring 'arg'.
1517 1517 */
1518 1518 static int
1519 1519 bge_addmac(void *arg, const uint8_t * mac_addr)
1520 1520 {
1521 1521 recv_ring_t *rrp = (recv_ring_t *)arg;
1522 1522 bge_t *bgep = rrp->bgep;
1523 1523 bge_recv_rule_t *rulep = bgep->recv_rules;
1524 1524 bge_rule_info_t *rinfop = NULL;
1525 1525 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1;
1526 1526 int i;
1527 1527 uint16_t tmp16;
1528 1528 uint32_t tmp32;
1529 1529 int slot;
1530 1530 int err;
1531 1531
1532 1532 mutex_enter(bgep->genlock);
1533 1533 if (bgep->unicst_addr_avail == 0) {
1534 1534 mutex_exit(bgep->genlock);
1535 1535 return (ENOSPC);
1536 1536 }
1537 1537
1538 1538 /*
1539 1539 * First add the unicast address to a available slot.
1540 1540 */
1541 1541 slot = bge_unicst_find(bgep, mac_addr);
1542 1542 ASSERT(slot == -1);
1543 1543
1544 1544 for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1545 1545 if (!bgep->curr_addr[slot].set) {
1546 1546 bgep->curr_addr[slot].set = B_TRUE;
1547 1547 break;
1548 1548 }
1549 1549 }
1550 1550
1551 1551 ASSERT(slot < bgep->unicst_addr_total);
1552 1552 bgep->unicst_addr_avail--;
1553 1553 mutex_exit(bgep->genlock);
1554 1554
1555 1555 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0)
1556 1556 goto fail;
1557 1557
1558 1558 /* A rule is already here. Deny this. */
1559 1559 if (rrp->mac_addr_rule != NULL) {
1560 1560 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY;
1561 1561 goto fail;
1562 1562 }
1563 1563
1564 1564 /*
1565 1565 * Allocate a bge_rule_info_t to keep track of which rule slots
1566 1566 * are being used.
1567 1567 */
1568 1568 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP);
1569 1569 if (rinfop == NULL) {
1570 1570 err = ENOMEM;
1571 1571 goto fail;
1572 1572 }
1573 1573
1574 1574 /*
1575 1575 * Look for the starting slot to place the rules.
1576 1576 * The two slots we reserve must be contiguous.
1577 1577 */
1578 1578 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++)
1579 1579 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 &&
1580 1580 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0)
1581 1581 break;
1582 1582
1583 1583 ASSERT(i + 1 < RECV_RULES_NUM_MAX);
1584 1584
1585 1585 bcopy(mac_addr, &tmp32, sizeof (tmp32));
1586 1586 rulep[i].mask_value = ntohl(tmp32);
1587 1587 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND;
1588 1588 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value);
1589 1589 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control);
1590 1590
1591 1591 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16));
1592 1592 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16);
1593 1593 rulep[i+1].control = RULE_DEST_MAC_2(ring);
1594 1594 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value);
1595 1595 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control);
1596 1596 rinfop->start = i;
1597 1597 rinfop->count = 2;
1598 1598
1599 1599 rrp->mac_addr_rule = rinfop;
1600 1600 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL);
1601 1601
1602 1602 return (0);
1603 1603
1604 1604 fail:
1605 1605 /* Clear the address just set */
1606 1606 (void) bge_unicst_set(bgep, zero_addr, slot);
1607 1607 mutex_enter(bgep->genlock);
1608 1608 bgep->curr_addr[slot].set = B_FALSE;
1609 1609 bgep->unicst_addr_avail++;
1610 1610 mutex_exit(bgep->genlock);
1611 1611
1612 1612 return (err);
1613 1613 }
1614 1614
1615 1615 /*
1616 1616 * Stop classifying packets matching the MAC address to the specified ring.
1617 1617 */
1618 1618 static int
1619 1619 bge_remmac(void *arg, const uint8_t *mac_addr)
1620 1620 {
1621 1621 recv_ring_t *rrp = (recv_ring_t *)arg;
1622 1622 bge_t *bgep = rrp->bgep;
1623 1623 bge_recv_rule_t *rulep = bgep->recv_rules;
1624 1624 bge_rule_info_t *rinfop = rrp->mac_addr_rule;
1625 1625 int start;
1626 1626 int slot;
1627 1627 int err;
1628 1628
1629 1629 /*
1630 1630 * Remove the MAC address from its slot.
1631 1631 */
1632 1632 mutex_enter(bgep->genlock);
1633 1633 slot = bge_unicst_find(bgep, mac_addr);
1634 1634 if (slot == -1) {
1635 1635 mutex_exit(bgep->genlock);
1636 1636 return (EINVAL);
1637 1637 }
1638 1638
1639 1639 ASSERT(bgep->curr_addr[slot].set);
1640 1640 mutex_exit(bgep->genlock);
1641 1641
1642 1642 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0)
1643 1643 return (err);
1644 1644
1645 1645 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0)
1646 1646 return (EINVAL);
1647 1647
1648 1648 start = rinfop->start;
1649 1649 rulep[start].mask_value = 0;
1650 1650 rulep[start].control = 0;
1651 1651 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1652 1652 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1653 1653 start++;
1654 1654 rulep[start].mask_value = 0;
1655 1655 rulep[start].control = 0;
1656 1656 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1657 1657 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1658 1658
1659 1659 kmem_free(rinfop, sizeof (bge_rule_info_t));
1660 1660 rrp->mac_addr_rule = NULL;
1661 1661 bzero(rrp->mac_addr_val, ETHERADDRL);
1662 1662
1663 1663 mutex_enter(bgep->genlock);
1664 1664 bgep->curr_addr[slot].set = B_FALSE;
1665 1665 bgep->unicst_addr_avail++;
1666 1666 mutex_exit(bgep->genlock);
1667 1667
1668 1668 return (0);
1669 1669 }
1670 1670
1671 1671
1672 1672 static int
1673 1673 bge_flag_intr_enable(mac_ring_driver_t ih)
1674 1674 {
1675 1675 recv_ring_t *rrp = (recv_ring_t *)ih;
1676 1676 bge_t *bgep = rrp->bgep;
1677 1677
1678 1678 mutex_enter(bgep->genlock);
1679 1679 rrp->poll_flag = 0;
1680 1680 mutex_exit(bgep->genlock);
1681 1681
1682 1682 return (0);
1683 1683 }
1684 1684
1685 1685 static int
1686 1686 bge_flag_intr_disable(mac_ring_driver_t ih)
1687 1687 {
1688 1688 recv_ring_t *rrp = (recv_ring_t *)ih;
1689 1689 bge_t *bgep = rrp->bgep;
1690 1690
1691 1691 mutex_enter(bgep->genlock);
1692 1692 rrp->poll_flag = 1;
1693 1693 mutex_exit(bgep->genlock);
1694 1694
1695 1695 return (0);
1696 1696 }
1697 1697
1698 1698 static int
1699 1699 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1700 1700 {
1701 1701 recv_ring_t *rx_ring;
1702 1702
1703 1703 rx_ring = (recv_ring_t *)rh;
1704 1704 mutex_enter(rx_ring->rx_lock);
1705 1705 rx_ring->ring_gen_num = mr_gen_num;
1706 1706 mutex_exit(rx_ring->rx_lock);
1707 1707 return (0);
1708 1708 }
1709 1709
1710 1710
1711 1711 /*
1712 1712 * Callback funtion for MAC layer to register all rings
1713 1713 * for given ring_group, noted by rg_index.
1714 1714 */
1715 1715 void
1716 1716 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1717 1717 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1718 1718 {
1719 1719 bge_t *bgep = arg;
1720 1720 mac_intr_t *mintr;
1721 1721
1722 1722 switch (rtype) {
1723 1723 case MAC_RING_TYPE_RX: {
1724 1724 recv_ring_t *rx_ring;
1725 1725 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1726 1726 MAC_ADDRESS_REGS_MAX) && index == 0);
1727 1727
1728 1728 rx_ring = &bgep->recv[rg_index];
1729 1729 rx_ring->ring_handle = rh;
1730 1730
1731 1731 infop->mri_driver = (mac_ring_driver_t)rx_ring;
1732 1732 infop->mri_start = bge_ring_start;
1733 1733 infop->mri_stop = NULL;
1734 1734 infop->mri_poll = bge_poll_ring;
1735 1735 infop->mri_stat = bge_rx_ring_stat;
1736 1736
1737 1737 mintr = &infop->mri_intr;
1738 1738 mintr->mi_enable = (mac_intr_enable_t)bge_flag_intr_enable;
1739 1739 mintr->mi_disable = (mac_intr_disable_t)bge_flag_intr_disable;
1740 1740
1741 1741 break;
1742 1742 }
1743 1743 case MAC_RING_TYPE_TX:
1744 1744 default:
1745 1745 ASSERT(0);
1746 1746 break;
1747 1747 }
1748 1748 }
1749 1749
1750 1750 /*
1751 1751 * Fill infop passed as argument
1752 1752 * fill in respective ring_group info
1753 1753 * Each group has a single ring in it. We keep it simple
1754 1754 * and use the same internal handle for rings and groups.
1755 1755 */
1756 1756 void
1757 1757 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index,
1758 1758 mac_group_info_t * infop, mac_group_handle_t gh)
1759 1759 {
1760 1760 bge_t *bgep = arg;
1761 1761
1762 1762 switch (rtype) {
1763 1763 case MAC_RING_TYPE_RX: {
1764 1764 recv_ring_t *rx_ring;
1765 1765
1766 1766 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1767 1767 MAC_ADDRESS_REGS_MAX));
1768 1768 rx_ring = &bgep->recv[rg_index];
1769 1769 rx_ring->ring_group_handle = gh;
1770 1770
1771 1771 infop->mgi_driver = (mac_group_driver_t)rx_ring;
1772 1772 infop->mgi_start = NULL;
1773 1773 infop->mgi_stop = NULL;
1774 1774 infop->mgi_addmac = bge_addmac;
1775 1775 infop->mgi_remmac = bge_remmac;
1776 1776 infop->mgi_count = 1;
1777 1777 break;
1778 1778 }
1779 1779 case MAC_RING_TYPE_TX:
1780 1780 default:
1781 1781 ASSERT(0);
1782 1782 break;
1783 1783 }
1784 1784 }
1785 1785
1786 1786
1787 1787 /*ARGSUSED*/
1788 1788 static boolean_t
1789 1789 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1790 1790 {
1791 1791 bge_t *bgep = arg;
1792 1792 mac_capab_rings_t *cap_rings;
1793 1793
1794 1794 switch (cap) {
1795 1795 case MAC_CAPAB_HCKSUM: {
1796 1796 uint32_t *txflags = cap_data;
1797 1797
1798 1798 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
1799 1799 break;
1800 1800 }
1801 1801
1802 1802 case MAC_CAPAB_RINGS:
1803 1803 cap_rings = (mac_capab_rings_t *)cap_data;
1804 1804
1805 1805 /* Temporarily disable multiple tx rings. */
1806 1806 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
1807 1807 return (B_FALSE);
1808 1808
1809 1809 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
1810 1810 cap_rings->mr_rnum =
1811 1811 cap_rings->mr_gnum =
1812 1812 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX);
1813 1813 cap_rings->mr_rget = bge_fill_ring;
1814 1814 cap_rings->mr_gget = bge_fill_group;
1815 1815 break;
1816 1816
1817 1817 default:
1818 1818 return (B_FALSE);
1819 1819 }
1820 1820 return (B_TRUE);
1821 1821 }
1822 1822
1823 1823 #ifdef NOT_SUPPORTED_XXX
1824 1824
1825 1825 /*
1826 1826 * Loopback ioctl code
1827 1827 */
1828 1828
1829 1829 static lb_property_t loopmodes[] = {
1830 1830 { normal, "normal", BGE_LOOP_NONE },
1831 1831 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 },
1832 1832 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 },
1833 1833 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 },
1834 1834 { internal, "PHY", BGE_LOOP_INTERNAL_PHY },
1835 1835 { internal, "MAC", BGE_LOOP_INTERNAL_MAC }
1836 1836 };
1837 1837
1838 1838 static enum ioc_reply
1839 1839 bge_set_loop_mode(bge_t *bgep, uint32_t mode)
1840 1840 {
1841 1841 /*
1842 1842 * If the mode isn't being changed, there's nothing to do ...
1843 1843 */
1844 1844 if (mode == bgep->param_loop_mode)
1845 1845 return (IOC_ACK);
1846 1846
1847 1847 /*
1848 1848 * Validate the requested mode and prepare a suitable message
1849 1849 * to explain the link down/up cycle that the change will
1850 1850 * probably induce ...
1851 1851 */
1852 1852 switch (mode) {
1853 1853 default:
1854 1854 return (IOC_INVAL);
1855 1855
1856 1856 case BGE_LOOP_NONE:
1857 1857 case BGE_LOOP_EXTERNAL_1000:
1858 1858 case BGE_LOOP_EXTERNAL_100:
1859 1859 case BGE_LOOP_EXTERNAL_10:
1860 1860 case BGE_LOOP_INTERNAL_PHY:
1861 1861 case BGE_LOOP_INTERNAL_MAC:
1862 1862 break;
1863 1863 }
1864 1864
1865 1865 /*
1866 1866 * All OK; tell the caller to reprogram
1867 1867 * the PHY and/or MAC for the new mode ...
1868 1868 */
1869 1869 bgep->param_loop_mode = mode;
1870 1870 return (IOC_RESTART_ACK);
1871 1871 }
1872 1872
1873 1873 static enum ioc_reply
1874 1874 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1875 1875 {
1876 1876 lb_info_sz_t *lbsp;
1877 1877 lb_property_t *lbpp;
1878 1878 uint32_t *lbmp;
1879 1879 int cmd;
1880 1880
1881 1881 _NOTE(ARGUNUSED(wq))
1882 1882
1883 1883 /*
1884 1884 * Validate format of ioctl
1885 1885 */
1886 1886 if (mp->b_cont == NULL)
1887 1887 return (IOC_INVAL);
1888 1888
1889 1889 cmd = iocp->ioc_cmd;
1890 1890 switch (cmd) {
1891 1891 default:
1892 1892 /* NOTREACHED */
1893 1893 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd);
1894 1894 return (IOC_INVAL);
1895 1895
1896 1896 case LB_GET_INFO_SIZE:
1897 1897 if (iocp->ioc_count != sizeof (lb_info_sz_t))
1898 1898 return (IOC_INVAL);
1899 1899 lbsp = (void *)mp->b_cont->b_rptr;
1900 1900 *lbsp = sizeof (loopmodes);
1901 1901 return (IOC_REPLY);
1902 1902
1903 1903 case LB_GET_INFO:
1904 1904 if (iocp->ioc_count != sizeof (loopmodes))
1905 1905 return (IOC_INVAL);
1906 1906 lbpp = (void *)mp->b_cont->b_rptr;
1907 1907 bcopy(loopmodes, lbpp, sizeof (loopmodes));
1908 1908 return (IOC_REPLY);
1909 1909
1910 1910 case LB_GET_MODE:
1911 1911 if (iocp->ioc_count != sizeof (uint32_t))
1912 1912 return (IOC_INVAL);
1913 1913 lbmp = (void *)mp->b_cont->b_rptr;
1914 1914 *lbmp = bgep->param_loop_mode;
1915 1915 return (IOC_REPLY);
1916 1916
1917 1917 case LB_SET_MODE:
1918 1918 if (iocp->ioc_count != sizeof (uint32_t))
1919 1919 return (IOC_INVAL);
1920 1920 lbmp = (void *)mp->b_cont->b_rptr;
1921 1921 return (bge_set_loop_mode(bgep, *lbmp));
1922 1922 }
1923 1923 }
1924 1924
1925 1925 #endif /* NOT_SUPPORTED_XXX */
1926 1926
1927 1927 /*
1928 1928 * Specific bge IOCTLs, the gld module handles the generic ones.
1929 1929 */
1930 1930 static void
1931 1931 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1932 1932 {
1933 1933 bge_t *bgep = arg;
1934 1934 struct iocblk *iocp;
1935 1935 enum ioc_reply status;
1936 1936 boolean_t need_privilege;
1937 1937 int err;
1938 1938 int cmd;
1939 1939
1940 1940 /*
1941 1941 * Validate the command before bothering with the mutex ...
1942 1942 */
1943 1943 iocp = (void *)mp->b_rptr;
1944 1944 iocp->ioc_error = 0;
1945 1945 need_privilege = B_TRUE;
1946 1946 cmd = iocp->ioc_cmd;
1947 1947 switch (cmd) {
1948 1948 default:
1949 1949 miocnak(wq, mp, 0, EINVAL);
1950 1950 return;
1951 1951
1952 1952 case BGE_MII_READ:
1953 1953 case BGE_MII_WRITE:
1954 1954 case BGE_SEE_READ:
1955 1955 case BGE_SEE_WRITE:
1956 1956 case BGE_FLASH_READ:
1957 1957 case BGE_FLASH_WRITE:
1958 1958 case BGE_DIAG:
1959 1959 case BGE_PEEK:
1960 1960 case BGE_POKE:
1961 1961 case BGE_PHY_RESET:
1962 1962 case BGE_SOFT_RESET:
1963 1963 case BGE_HARD_RESET:
1964 1964 break;
1965 1965
1966 1966 #ifdef NOT_SUPPORTED_XXX
1967 1967 case LB_GET_INFO_SIZE:
1968 1968 case LB_GET_INFO:
1969 1969 case LB_GET_MODE:
1970 1970 need_privilege = B_FALSE;
1971 1971 /* FALLTHRU */
1972 1972 case LB_SET_MODE:
1973 1973 break;
1974 1974 #endif
1975 1975
1976 1976 }
1977 1977
1978 1978 if (need_privilege) {
1979 1979 /*
1980 1980 * Check for specific net_config privilege on Solaris 10+.
1981 1981 */
1982 1982 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1983 1983 if (err != 0) {
1984 1984 miocnak(wq, mp, 0, err);
1985 1985 return;
1986 1986 }
1987 1987 }
1988 1988
1989 1989 mutex_enter(bgep->genlock);
1990 1990 if (!(bgep->progress & PROGRESS_INTR)) {
1991 1991 /* can happen during autorecovery */
1992 1992 mutex_exit(bgep->genlock);
1993 1993 miocnak(wq, mp, 0, EIO);
1994 1994 return;
1995 1995 }
1996 1996
1997 1997 switch (cmd) {
1998 1998 default:
1999 1999 _NOTE(NOTREACHED)
2000 2000 status = IOC_INVAL;
2001 2001 break;
2002 2002
2003 2003 case BGE_MII_READ:
2004 2004 case BGE_MII_WRITE:
2005 2005 case BGE_SEE_READ:
2006 2006 case BGE_SEE_WRITE:
2007 2007 case BGE_FLASH_READ:
2008 2008 case BGE_FLASH_WRITE:
2009 2009 case BGE_DIAG:
2010 2010 case BGE_PEEK:
2011 2011 case BGE_POKE:
2012 2012 case BGE_PHY_RESET:
2013 2013 case BGE_SOFT_RESET:
2014 2014 case BGE_HARD_RESET:
2015 2015 status = bge_chip_ioctl(bgep, wq, mp, iocp);
2016 2016 break;
2017 2017
2018 2018 #ifdef NOT_SUPPORTED_XXX
2019 2019 case LB_GET_INFO_SIZE:
2020 2020 case LB_GET_INFO:
2021 2021 case LB_GET_MODE:
2022 2022 case LB_SET_MODE:
2023 2023 status = bge_loop_ioctl(bgep, wq, mp, iocp);
2024 2024 break;
2025 2025 #endif
2026 2026
2027 2027 }
2028 2028
2029 2029 /*
2030 2030 * Do we need to reprogram the PHY and/or the MAC?
2031 2031 * Do it now, while we still have the mutex.
2032 2032 *
2033 2033 * Note: update the PHY first, 'cos it controls the
2034 2034 * speed/duplex parameters that the MAC code uses.
2035 2035 */
2036 2036 switch (status) {
2037 2037 case IOC_RESTART_REPLY:
2038 2038 case IOC_RESTART_ACK:
2039 2039 if (bge_reprogram(bgep) == IOC_INVAL)
2040 2040 status = IOC_INVAL;
2041 2041 break;
2042 2042 }
2043 2043
2044 2044 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
2045 2045 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2046 2046 status = IOC_INVAL;
2047 2047 }
2048 2048 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
2049 2049 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2050 2050 status = IOC_INVAL;
2051 2051 }
2052 2052 mutex_exit(bgep->genlock);
2053 2053
2054 2054 /*
2055 2055 * Finally, decide how to reply
2056 2056 */
2057 2057 switch (status) {
2058 2058 default:
2059 2059 case IOC_INVAL:
2060 2060 /*
2061 2061 * Error, reply with a NAK and EINVAL or the specified error
2062 2062 */
2063 2063 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2064 2064 EINVAL : iocp->ioc_error);
2065 2065 break;
2066 2066
2067 2067 case IOC_DONE:
2068 2068 /*
2069 2069 * OK, reply already sent
2070 2070 */
2071 2071 break;
2072 2072
2073 2073 case IOC_RESTART_ACK:
2074 2074 case IOC_ACK:
2075 2075 /*
2076 2076 * OK, reply with an ACK
2077 2077 */
2078 2078 miocack(wq, mp, 0, 0);
2079 2079 break;
2080 2080
2081 2081 case IOC_RESTART_REPLY:
2082 2082 case IOC_REPLY:
2083 2083 /*
2084 2084 * OK, send prepared reply as ACK or NAK
2085 2085 */
2086 2086 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2087 2087 M_IOCACK : M_IOCNAK;
2088 2088 qreply(wq, mp);
2089 2089 break;
2090 2090 }
2091 2091 }
2092 2092
2093 2093 /*
2094 2094 * ========== Per-instance setup/teardown code ==========
2095 2095 */
2096 2096
2097 2097 #undef BGE_DBG
2098 2098 #define BGE_DBG BGE_DBG_MEM /* debug flag for this code */
2099 2099 /*
2100 2100 * Allocate an area of memory and a DMA handle for accessing it
2101 2101 */
2102 2102 static int
2103 2103 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p,
2104 2104 uint_t dma_flags, dma_area_t *dma_p)
2105 2105 {
2106 2106 caddr_t va;
2107 2107 int err;
2108 2108
2109 2109 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
2110 2110 (void *)bgep, memsize, attr_p, dma_flags, dma_p));
2111 2111
2112 2112 /*
2113 2113 * Allocate handle
2114 2114 */
2115 2115 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr,
2116 2116 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
2117 2117 if (err != DDI_SUCCESS)
2118 2118 return (DDI_FAILURE);
2119 2119
2120 2120 /*
2121 2121 * Allocate memory
2122 2122 */
2123 2123 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
2124 2124 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength,
2125 2125 &dma_p->acc_hdl);
2126 2126 if (err != DDI_SUCCESS)
2127 2127 return (DDI_FAILURE);
2128 2128
2129 2129 /*
2130 2130 * Bind the two together
2131 2131 */
2132 2132 dma_p->mem_va = va;
2133 2133 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
2134 2134 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
2135 2135 &dma_p->cookie, &dma_p->ncookies);
2136 2136
2137 2137 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies",
2138 2138 dma_p->alength, err, dma_p->ncookies));
2139 2139
2140 2140 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
2141 2141 return (DDI_FAILURE);
2142 2142
2143 2143 dma_p->nslots = ~0U;
2144 2144 dma_p->size = ~0U;
2145 2145 dma_p->token = ~0U;
2146 2146 dma_p->offset = 0;
2147 2147 return (DDI_SUCCESS);
2148 2148 }
2149 2149
2150 2150 /*
2151 2151 * Free one allocated area of DMAable memory
2152 2152 */
2153 2153 static void
2154 2154 bge_free_dma_mem(dma_area_t *dma_p)
2155 2155 {
2156 2156 if (dma_p->dma_hdl != NULL) {
2157 2157 if (dma_p->ncookies) {
2158 2158 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
2159 2159 dma_p->ncookies = 0;
2160 2160 }
2161 2161 ddi_dma_free_handle(&dma_p->dma_hdl);
2162 2162 dma_p->dma_hdl = NULL;
2163 2163 }
2164 2164
2165 2165 if (dma_p->acc_hdl != NULL) {
2166 2166 ddi_dma_mem_free(&dma_p->acc_hdl);
2167 2167 dma_p->acc_hdl = NULL;
2168 2168 }
2169 2169 }
2170 2170 /*
2171 2171 * Utility routine to carve a slice off a chunk of allocated memory,
2172 2172 * updating the chunk descriptor accordingly. The size of the slice
2173 2173 * is given by the product of the <qty> and <size> parameters.
2174 2174 */
2175 2175 static void
2176 2176 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
2177 2177 uint32_t qty, uint32_t size)
2178 2178 {
2179 2179 static uint32_t sequence = 0xbcd5704a;
2180 2180 size_t totsize;
2181 2181
2182 2182 totsize = qty*size;
2183 2183 ASSERT(totsize <= chunk->alength);
2184 2184
2185 2185 *slice = *chunk;
2186 2186 slice->nslots = qty;
2187 2187 slice->size = size;
2188 2188 slice->alength = totsize;
2189 2189 slice->token = ++sequence;
2190 2190
2191 2191 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
2192 2192 chunk->alength -= totsize;
2193 2193 chunk->offset += totsize;
2194 2194 chunk->cookie.dmac_laddress += totsize;
2195 2195 chunk->cookie.dmac_size -= totsize;
2196 2196 }
2197 2197
2198 2198 /*
2199 2199 * Initialise the specified Receive Producer (Buffer) Ring, using
2200 2200 * the information in the <dma_area> descriptors that it contains
2201 2201 * to set up all the other fields. This routine should be called
2202 2202 * only once for each ring.
2203 2203 */
2204 2204 static void
2205 2205 bge_init_buff_ring(bge_t *bgep, uint64_t ring)
2206 2206 {
2207 2207 buff_ring_t *brp;
2208 2208 bge_status_t *bsp;
2209 2209 sw_rbd_t *srbdp;
2210 2210 dma_area_t pbuf;
2211 2211 uint32_t bufsize;
2212 2212 uint32_t nslots;
2213 2213 uint32_t slot;
2214 2214 uint32_t split;
2215 2215
2216 2216 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = {
2217 2217 NIC_MEM_SHADOW_BUFF_STD,
2218 2218 NIC_MEM_SHADOW_BUFF_JUMBO,
2219 2219 NIC_MEM_SHADOW_BUFF_MINI
2220 2220 };
2221 2221 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = {
2222 2222 RECV_STD_PROD_INDEX_REG,
2223 2223 RECV_JUMBO_PROD_INDEX_REG,
2224 2224 RECV_MINI_PROD_INDEX_REG
2225 2225 };
2226 2226 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = {
2227 2227 STATUS_STD_BUFF_CONS_INDEX,
2228 2228 STATUS_JUMBO_BUFF_CONS_INDEX,
2229 2229 STATUS_MINI_BUFF_CONS_INDEX
2230 2230 };
2231 2231
2232 2232 BGE_TRACE(("bge_init_buff_ring($%p, %d)",
2233 2233 (void *)bgep, ring));
2234 2234
2235 2235 brp = &bgep->buff[ring];
2236 2236 nslots = brp->desc.nslots;
2237 2237 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
2238 2238 bufsize = brp->buf[0].size;
2239 2239
2240 2240 /*
2241 2241 * Set up the copy of the h/w RCB
2242 2242 *
2243 2243 * Note: unlike Send & Receive Return Rings, (where the max_len
2244 2244 * field holds the number of slots), in a Receive Buffer Ring
2245 2245 * this field indicates the size of each buffer in the ring.
2246 2246 */
2247 2247 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress;
2248 2248 brp->hw_rcb.max_len = (uint16_t)bufsize;
2249 2249 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2250 2250 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring];
2251 2251
2252 2252 /*
2253 2253 * Other one-off initialisation of per-ring data
2254 2254 */
2255 2255 brp->bgep = bgep;
2256 2256 bsp = DMA_VPTR(bgep->status_block);
2257 2257 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]];
2258 2258 brp->chip_mbx_reg = mailbox_regs[ring];
2259 2259 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER,
2260 2260 DDI_INTR_PRI(bgep->intr_pri));
2261 2261
2262 2262 /*
2263 2263 * Allocate the array of s/w Receive Buffer Descriptors
2264 2264 */
2265 2265 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP);
2266 2266 brp->sw_rbds = srbdp;
2267 2267
2268 2268 /*
2269 2269 * Now initialise each array element once and for all
2270 2270 */
2271 2271 for (split = 0; split < BGE_SPLIT; ++split) {
2272 2272 pbuf = brp->buf[split];
2273 2273 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot)
2274 2274 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize);
2275 2275 ASSERT(pbuf.alength == 0);
2276 2276 }
2277 2277 }
2278 2278
2279 2279 /*
2280 2280 * Clean up initialisation done above before the memory is freed
2281 2281 */
2282 2282 static void
2283 2283 bge_fini_buff_ring(bge_t *bgep, uint64_t ring)
2284 2284 {
2285 2285 buff_ring_t *brp;
2286 2286 sw_rbd_t *srbdp;
2287 2287
2288 2288 BGE_TRACE(("bge_fini_buff_ring($%p, %d)",
2289 2289 (void *)bgep, ring));
2290 2290
2291 2291 brp = &bgep->buff[ring];
2292 2292 srbdp = brp->sw_rbds;
2293 2293 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp));
2294 2294
2295 2295 mutex_destroy(brp->rf_lock);
2296 2296 }
2297 2297
2298 2298 /*
2299 2299 * Initialise the specified Receive (Return) Ring, using the
2300 2300 * information in the <dma_area> descriptors that it contains
2301 2301 * to set up all the other fields. This routine should be called
2302 2302 * only once for each ring.
2303 2303 */
2304 2304 static void
2305 2305 bge_init_recv_ring(bge_t *bgep, uint64_t ring)
2306 2306 {
2307 2307 recv_ring_t *rrp;
2308 2308 bge_status_t *bsp;
2309 2309 uint32_t nslots;
2310 2310
2311 2311 BGE_TRACE(("bge_init_recv_ring($%p, %d)",
2312 2312 (void *)bgep, ring));
2313 2313
2314 2314 /*
2315 2315 * The chip architecture requires that receive return rings have
2316 2316 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103.
2317 2317 */
2318 2318 rrp = &bgep->recv[ring];
2319 2319 nslots = rrp->desc.nslots;
2320 2320 ASSERT(nslots == 0 || nslots == 512 ||
2321 2321 nslots == 1024 || nslots == 2048);
2322 2322
2323 2323 /*
2324 2324 * Set up the copy of the h/w RCB
2325 2325 */
2326 2326 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress;
2327 2327 rrp->hw_rcb.max_len = (uint16_t)nslots;
2328 2328 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2329 2329 rrp->hw_rcb.nic_ring_addr = 0;
2330 2330
2331 2331 /*
2332 2332 * Other one-off initialisation of per-ring data
2333 2333 */
2334 2334 rrp->bgep = bgep;
2335 2335 bsp = DMA_VPTR(bgep->status_block);
2336 2336 rrp->prod_index_p = RECV_INDEX_P(bsp, ring);
2337 2337 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring);
2338 2338 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER,
2339 2339 DDI_INTR_PRI(bgep->intr_pri));
2340 2340 }
2341 2341
2342 2342
2343 2343 /*
2344 2344 * Clean up initialisation done above before the memory is freed
2345 2345 */
2346 2346 static void
2347 2347 bge_fini_recv_ring(bge_t *bgep, uint64_t ring)
2348 2348 {
2349 2349 recv_ring_t *rrp;
2350 2350
2351 2351 BGE_TRACE(("bge_fini_recv_ring($%p, %d)",
2352 2352 (void *)bgep, ring));
2353 2353
2354 2354 rrp = &bgep->recv[ring];
2355 2355 if (rrp->rx_softint)
2356 2356 ddi_remove_softintr(rrp->rx_softint);
2357 2357 mutex_destroy(rrp->rx_lock);
2358 2358 }
2359 2359
2360 2360 /*
2361 2361 * Initialise the specified Send Ring, using the information in the
2362 2362 * <dma_area> descriptors that it contains to set up all the other
2363 2363 * fields. This routine should be called only once for each ring.
2364 2364 */
2365 2365 static void
2366 2366 bge_init_send_ring(bge_t *bgep, uint64_t ring)
2367 2367 {
2368 2368 send_ring_t *srp;
2369 2369 bge_status_t *bsp;
2370 2370 sw_sbd_t *ssbdp;
2371 2371 dma_area_t desc;
2372 2372 dma_area_t pbuf;
2373 2373 uint32_t nslots;
2374 2374 uint32_t slot;
2375 2375 uint32_t split;
2376 2376 sw_txbuf_t *txbuf;
2377 2377
2378 2378 BGE_TRACE(("bge_init_send_ring($%p, %d)",
2379 2379 (void *)bgep, ring));
2380 2380
2381 2381 /*
2382 2382 * The chip architecture requires that host-based send rings
2383 2383 * have 512 elements per ring. See 570X-PG102-R page 56.
2384 2384 */
2385 2385 srp = &bgep->send[ring];
2386 2386 nslots = srp->desc.nslots;
2387 2387 ASSERT(nslots == 0 || nslots == 512);
2388 2388
2389 2389 /*
2390 2390 * Set up the copy of the h/w RCB
2391 2391 */
2392 2392 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress;
2393 2393 srp->hw_rcb.max_len = (uint16_t)nslots;
2394 2394 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2395 2395 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots);
2396 2396
2397 2397 /*
2398 2398 * Other one-off initialisation of per-ring data
2399 2399 */
2400 2400 srp->bgep = bgep;
2401 2401 bsp = DMA_VPTR(bgep->status_block);
2402 2402 srp->cons_index_p = SEND_INDEX_P(bsp, ring);
2403 2403 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring);
2404 2404 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
2405 2405 DDI_INTR_PRI(bgep->intr_pri));
2406 2406 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER,
2407 2407 DDI_INTR_PRI(bgep->intr_pri));
2408 2408 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER,
2409 2409 DDI_INTR_PRI(bgep->intr_pri));
2410 2410 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
2411 2411 DDI_INTR_PRI(bgep->intr_pri));
2412 2412 if (nslots == 0)
2413 2413 return;
2414 2414
2415 2415 /*
2416 2416 * Allocate the array of s/w Send Buffer Descriptors
2417 2417 */
2418 2418 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
2419 2419 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP);
2420 2420 srp->txbuf_head =
2421 2421 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP);
2422 2422 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP);
2423 2423 srp->sw_sbds = ssbdp;
2424 2424 srp->txbuf = txbuf;
2425 2425 srp->tx_buffers = BGE_SEND_BUF_NUM;
2426 2426 srp->tx_buffers_low = srp->tx_buffers / 4;
2427 2427 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT)
2428 2428 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO;
2429 2429 else
2430 2430 srp->tx_array_max = BGE_SEND_BUF_ARRAY;
2431 2431 srp->tx_array = 1;
2432 2432
2433 2433 /*
2434 2434 * Chunk tx desc area
2435 2435 */
2436 2436 desc = srp->desc;
2437 2437 for (slot = 0; slot < nslots; ++ssbdp, ++slot) {
2438 2438 bge_slice_chunk(&ssbdp->desc, &desc, 1,
2439 2439 sizeof (bge_sbd_t));
2440 2440 }
2441 2441 ASSERT(desc.alength == 0);
2442 2442
2443 2443 /*
2444 2444 * Chunk tx buffer area
2445 2445 */
2446 2446 for (split = 0; split < BGE_SPLIT; ++split) {
2447 2447 pbuf = srp->buf[0][split];
2448 2448 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2449 2449 bge_slice_chunk(&txbuf->buf, &pbuf, 1,
2450 2450 bgep->chipid.snd_buff_size);
2451 2451 txbuf++;
2452 2452 }
2453 2453 ASSERT(pbuf.alength == 0);
2454 2454 }
2455 2455 }
2456 2456
2457 2457 /*
2458 2458 * Clean up initialisation done above before the memory is freed
2459 2459 */
2460 2460 static void
2461 2461 bge_fini_send_ring(bge_t *bgep, uint64_t ring)
2462 2462 {
2463 2463 send_ring_t *srp;
2464 2464 uint32_t array;
2465 2465 uint32_t split;
2466 2466 uint32_t nslots;
2467 2467
2468 2468 BGE_TRACE(("bge_fini_send_ring($%p, %d)",
2469 2469 (void *)bgep, ring));
2470 2470
2471 2471 srp = &bgep->send[ring];
2472 2472 mutex_destroy(srp->tc_lock);
2473 2473 mutex_destroy(srp->freetxbuf_lock);
2474 2474 mutex_destroy(srp->txbuf_lock);
2475 2475 mutex_destroy(srp->tx_lock);
2476 2476 nslots = srp->desc.nslots;
2477 2477 if (nslots == 0)
2478 2478 return;
2479 2479
2480 2480 for (array = 1; array < srp->tx_array; ++array)
2481 2481 for (split = 0; split < BGE_SPLIT; ++split)
2482 2482 bge_free_dma_mem(&srp->buf[array][split]);
2483 2483 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds));
2484 2484 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head));
2485 2485 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf));
2486 2486 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp));
2487 2487 srp->sw_sbds = NULL;
2488 2488 srp->txbuf_head = NULL;
2489 2489 srp->txbuf = NULL;
2490 2490 srp->pktp = NULL;
2491 2491 }
2492 2492
2493 2493 /*
2494 2494 * Initialise all transmit, receive, and buffer rings.
2495 2495 */
2496 2496 void
2497 2497 bge_init_rings(bge_t *bgep)
2498 2498 {
2499 2499 uint32_t ring;
2500 2500
2501 2501 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep));
2502 2502
2503 2503 /*
2504 2504 * Perform one-off initialisation of each ring ...
2505 2505 */
2506 2506 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2507 2507 bge_init_send_ring(bgep, ring);
2508 2508 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2509 2509 bge_init_recv_ring(bgep, ring);
2510 2510 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2511 2511 bge_init_buff_ring(bgep, ring);
2512 2512 }
2513 2513
2514 2514 /*
2515 2515 * Undo the work of bge_init_rings() above before the memory is freed
2516 2516 */
2517 2517 void
2518 2518 bge_fini_rings(bge_t *bgep)
2519 2519 {
2520 2520 uint32_t ring;
2521 2521
2522 2522 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep));
2523 2523
2524 2524 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2525 2525 bge_fini_buff_ring(bgep, ring);
2526 2526 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2527 2527 bge_fini_recv_ring(bgep, ring);
2528 2528 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2529 2529 bge_fini_send_ring(bgep, ring);
2530 2530 }
2531 2531
2532 2532 /*
2533 2533 * Called from the bge_m_stop() to free the tx buffers which are
2534 2534 * allocated from the tx process.
2535 2535 */
2536 2536 void
2537 2537 bge_free_txbuf_arrays(send_ring_t *srp)
2538 2538 {
2539 2539 uint32_t array;
2540 2540 uint32_t split;
2541 2541
2542 2542 ASSERT(mutex_owned(srp->tx_lock));
2543 2543
2544 2544 /*
2545 2545 * Free the extra tx buffer DMA area
2546 2546 */
2547 2547 for (array = 1; array < srp->tx_array; ++array)
2548 2548 for (split = 0; split < BGE_SPLIT; ++split)
2549 2549 bge_free_dma_mem(&srp->buf[array][split]);
2550 2550
2551 2551 /*
2552 2552 * Restore initial tx buffer numbers
2553 2553 */
2554 2554 srp->tx_array = 1;
2555 2555 srp->tx_buffers = BGE_SEND_BUF_NUM;
2556 2556 srp->tx_buffers_low = srp->tx_buffers / 4;
2557 2557 srp->tx_flow = 0;
2558 2558 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
2559 2559 }
2560 2560
2561 2561 /*
2562 2562 * Called from tx process to allocate more tx buffers
2563 2563 */
2564 2564 bge_queue_item_t *
2565 2565 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp)
2566 2566 {
2567 2567 bge_queue_t *txbuf_queue;
2568 2568 bge_queue_item_t *txbuf_item_last;
2569 2569 bge_queue_item_t *txbuf_item;
2570 2570 bge_queue_item_t *txbuf_item_rtn;
2571 2571 sw_txbuf_t *txbuf;
2572 2572 dma_area_t area;
2573 2573 size_t txbuffsize;
2574 2574 uint32_t slot;
2575 2575 uint32_t array;
2576 2576 uint32_t split;
2577 2577 uint32_t err;
2578 2578
2579 2579 ASSERT(mutex_owned(srp->tx_lock));
2580 2580
2581 2581 array = srp->tx_array;
2582 2582 if (array >= srp->tx_array_max)
2583 2583 return (NULL);
2584 2584
2585 2585 /*
2586 2586 * Allocate memory & handles for TX buffers
2587 2587 */
2588 2588 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2589 2589 ASSERT((txbuffsize % BGE_SPLIT) == 0);
2590 2590 for (split = 0; split < BGE_SPLIT; ++split) {
2591 2591 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2592 2592 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2593 2593 &srp->buf[array][split]);
2594 2594 if (err != DDI_SUCCESS) {
2595 2595 /* Free the last already allocated OK chunks */
2596 2596 for (slot = 0; slot <= split; ++slot)
2597 2597 bge_free_dma_mem(&srp->buf[array][slot]);
2598 2598 srp->tx_alloc_fail++;
2599 2599 return (NULL);
2600 2600 }
2601 2601 }
2602 2602
2603 2603 /*
2604 2604 * Chunk tx buffer area
2605 2605 */
2606 2606 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2607 2607 for (split = 0; split < BGE_SPLIT; ++split) {
2608 2608 area = srp->buf[array][split];
2609 2609 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2610 2610 bge_slice_chunk(&txbuf->buf, &area, 1,
2611 2611 bgep->chipid.snd_buff_size);
2612 2612 txbuf++;
2613 2613 }
2614 2614 }
2615 2615
2616 2616 /*
2617 2617 * Add above buffers to the tx buffer pop queue
2618 2618 */
2619 2619 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2620 2620 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2621 2621 txbuf_item_last = NULL;
2622 2622 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) {
2623 2623 txbuf_item->item = txbuf;
2624 2624 txbuf_item->next = txbuf_item_last;
2625 2625 txbuf_item_last = txbuf_item;
2626 2626 txbuf++;
2627 2627 txbuf_item++;
2628 2628 }
2629 2629 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2630 2630 txbuf_item_rtn = txbuf_item;
2631 2631 txbuf_item++;
2632 2632 txbuf_queue = srp->txbuf_pop_queue;
2633 2633 mutex_enter(txbuf_queue->lock);
2634 2634 txbuf_item->next = txbuf_queue->head;
2635 2635 txbuf_queue->head = txbuf_item_last;
2636 2636 txbuf_queue->count += BGE_SEND_BUF_NUM - 1;
2637 2637 mutex_exit(txbuf_queue->lock);
2638 2638
2639 2639 srp->tx_array++;
2640 2640 srp->tx_buffers += BGE_SEND_BUF_NUM;
2641 2641 srp->tx_buffers_low = srp->tx_buffers / 4;
2642 2642
2643 2643 return (txbuf_item_rtn);
2644 2644 }
2645 2645
2646 2646 /*
2647 2647 * This function allocates all the transmit and receive buffers
2648 2648 * and descriptors, in four chunks.
2649 2649 */
2650 2650 int
2651 2651 bge_alloc_bufs(bge_t *bgep)
2652 2652 {
2653 2653 dma_area_t area;
2654 2654 size_t rxbuffsize;
2655 2655 size_t txbuffsize;
2656 2656 size_t rxbuffdescsize;
2657 2657 size_t rxdescsize;
2658 2658 size_t txdescsize;
2659 2659 uint32_t ring;
2660 2660 uint32_t rx_rings = bgep->chipid.rx_rings;
2661 2661 uint32_t tx_rings = bgep->chipid.tx_rings;
2662 2662 int split;
2663 2663 int err;
2664 2664
2665 2665 BGE_TRACE(("bge_alloc_bufs($%p)",
2666 2666 (void *)bgep));
2667 2667
2668 2668 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size;
2669 2669 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size;
2670 2670 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE;
2671 2671
2672 2672 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2673 2673 txbuffsize *= tx_rings;
2674 2674
2675 2675 rxdescsize = rx_rings*bgep->chipid.recv_slots;
2676 2676 rxdescsize *= sizeof (bge_rbd_t);
2677 2677
2678 2678 rxbuffdescsize = BGE_STD_SLOTS_USED;
2679 2679 rxbuffdescsize += bgep->chipid.jumbo_slots;
2680 2680 rxbuffdescsize += BGE_MINI_SLOTS_USED;
2681 2681 rxbuffdescsize *= sizeof (bge_rbd_t);
2682 2682
2683 2683 txdescsize = tx_rings*BGE_SEND_SLOTS_USED;
2684 2684 txdescsize *= sizeof (bge_sbd_t);
2685 2685 txdescsize += sizeof (bge_statistics_t);
2686 2686 txdescsize += sizeof (bge_status_t);
2687 2687 txdescsize += BGE_STATUS_PADDING;
2688 2688
2689 2689 /*
2690 2690 * Enable PCI relaxed ordering only for RX/TX data buffers
2691 2691 */
2692 2692 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) ||
2693 2693 DEVICE_5725_SERIES_CHIPSETS(bgep))) {
2694 2694 if (bge_relaxed_ordering)
2695 2695 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2696 2696 }
2697 2697
2698 2698 /*
2699 2699 * Allocate memory & handles for RX buffers
2700 2700 */
2701 2701 ASSERT((rxbuffsize % BGE_SPLIT) == 0);
2702 2702 for (split = 0; split < BGE_SPLIT; ++split) {
2703 2703 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT,
2704 2704 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE,
2705 2705 &bgep->rx_buff[split]);
2706 2706 if (err != DDI_SUCCESS)
2707 2707 return (DDI_FAILURE);
2708 2708 }
2709 2709 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Buffers (rxbuffsize = %d)",
2710 2710 rxbuffsize/BGE_SPLIT,
2711 2711 rxbuffsize));
2712 2712
2713 2713 /*
2714 2714 * Allocate memory & handles for TX buffers
2715 2715 */
2716 2716 ASSERT((txbuffsize % BGE_SPLIT) == 0);
2717 2717 for (split = 0; split < BGE_SPLIT; ++split) {
2718 2718 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2719 2719 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2720 2720 &bgep->tx_buff[split]);
2721 2721 if (err != DDI_SUCCESS)
2722 2722 return (DDI_FAILURE);
2723 2723 }
2724 2724 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Tx Buffers (txbuffsize = %d)",
2725 2725 txbuffsize/BGE_SPLIT,
2726 2726 txbuffsize));
2727 2727
2728 2728 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) ||
2729 2729 DEVICE_5725_SERIES_CHIPSETS(bgep))) {
2730 2730 /* no relaxed ordering for descriptors rings? */
2731 2731 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING;
2732 2732 }
2733 2733
2734 2734 /*
2735 2735 * Allocate memory & handles for receive return rings
2736 2736 */
2737 2737 ASSERT((rxdescsize % rx_rings) == 0);
2738 2738 for (split = 0; split < rx_rings; ++split) {
2739 2739 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings,
2740 2740 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2741 2741 &bgep->rx_desc[split]);
2742 2742 if (err != DDI_SUCCESS)
2743 2743 return (DDI_FAILURE);
2744 2744 }
2745 2745 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Descs cons (rx_rings = %d, rxdescsize = %d)",
2746 2746 rxdescsize/rx_rings,
2747 2747 rx_rings,
2748 2748 rxdescsize));
2749 2749
2750 2750 /*
2751 2751 * Allocate memory & handles for buffer (producer) descriptor rings.
2752 2752 * Note that split=rx_rings.
2753 2753 */
2754 2754 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr,
2755 2755 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]);
2756 2756 if (err != DDI_SUCCESS)
2757 2757 return (DDI_FAILURE);
2758 2758 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Rx Descs prod (rxbuffdescsize = %d)",
2759 2759 rxdescsize));
2760 2760
2761 2761 /*
2762 2762 * Allocate memory & handles for TX descriptor rings,
2763 2763 * status block, and statistics area
2764 2764 */
2765 2765 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr,
2766 2766 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc);
2767 2767 if (err != DDI_SUCCESS)
2768 2768 return (DDI_FAILURE);
2769 2769 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Tx Descs / Status Block / Stats (txdescdize = %d)",
2770 2770 txdescsize));
2771 2771
2772 2772 /*
2773 2773 * Now carve up each of the allocated areas ...
2774 2774 */
2775 2775
2776 2776 /* rx buffers */
2777 2777 for (split = 0; split < BGE_SPLIT; ++split) {
2778 2778 area = bgep->rx_buff[split];
2779 2779
2780 2780 BGE_DEBUG(("RXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2781 2781 split,
2782 2782 area.mem_va,
2783 2783 area.alength,
2784 2784 area.offset,
2785 2785 area.cookie.dmac_laddress,
2786 2786 area.cookie.dmac_size));
2787 2787
2788 2788 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split],
2789 2789 &area, BGE_STD_SLOTS_USED/BGE_SPLIT,
2790 2790 bgep->chipid.std_buf_size);
2791 2791
2792 2792 BGE_DEBUG(("RXB SLCE %d STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2793 2793 split,
2794 2794 bgep->buff[BGE_STD_BUFF_RING].buf[split].mem_va,
2795 2795 bgep->buff[BGE_STD_BUFF_RING].buf[split].alength,
2796 2796 bgep->buff[BGE_STD_BUFF_RING].buf[split].offset,
2797 2797 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_laddress,
2798 2798 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_size,
2799 2799 BGE_STD_SLOTS_USED/BGE_SPLIT,
2800 2800 bgep->chipid.std_buf_size));
2801 2801
2802 2802 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split],
2803 2803 &area, bgep->chipid.jumbo_slots/BGE_SPLIT,
2804 2804 bgep->chipid.recv_jumbo_size);
2805 2805
2806 2806 if ((bgep->chipid.jumbo_slots / BGE_SPLIT) > 0)
2807 2807 {
2808 2808 BGE_DEBUG(("RXB SLCE %d JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2809 2809 split,
2810 2810 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].mem_va,
2811 2811 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].alength,
2812 2812 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].offset,
2813 2813 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_laddress,
2814 2814 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_size,
2815 2815 bgep->chipid.jumbo_slots/BGE_SPLIT,
2816 2816 bgep->chipid.recv_jumbo_size));
2817 2817 }
2818 2818
2819 2819 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split],
2820 2820 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT,
2821 2821 BGE_MINI_BUFF_SIZE);
2822 2822
2823 2823 if ((BGE_MINI_SLOTS_USED / BGE_SPLIT) > 0)
2824 2824 {
2825 2825 BGE_DEBUG(("RXB SLCE %d MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2826 2826 split,
2827 2827 bgep->buff[BGE_MINI_BUFF_RING].buf[split].mem_va,
2828 2828 bgep->buff[BGE_MINI_BUFF_RING].buf[split].alength,
2829 2829 bgep->buff[BGE_MINI_BUFF_RING].buf[split].offset,
2830 2830 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_laddress,
2831 2831 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_size,
2832 2832 BGE_MINI_SLOTS_USED/BGE_SPLIT,
2833 2833 BGE_MINI_BUFF_SIZE));
2834 2834 }
2835 2835
2836 2836 BGE_DEBUG(("RXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2837 2837 split,
2838 2838 area.mem_va,
2839 2839 area.alength,
2840 2840 area.offset,
2841 2841 area.cookie.dmac_laddress,
2842 2842 area.cookie.dmac_size));
2843 2843 }
2844 2844
2845 2845 /* tx buffers */
2846 2846 for (split = 0; split < BGE_SPLIT; ++split) {
2847 2847 area = bgep->tx_buff[split];
2848 2848
2849 2849 BGE_DEBUG(("TXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2850 2850 split,
2851 2851 area.mem_va,
2852 2852 area.alength,
2853 2853 area.offset,
2854 2854 area.cookie.dmac_laddress,
2855 2855 area.cookie.dmac_size));
2856 2856
2857 2857 for (ring = 0; ring < tx_rings; ++ring) {
2858 2858 bge_slice_chunk(&bgep->send[ring].buf[0][split],
2859 2859 &area, BGE_SEND_BUF_NUM/BGE_SPLIT,
2860 2860 bgep->chipid.snd_buff_size);
2861 2861
2862 2862 BGE_DEBUG(("TXB SLCE %d RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2863 2863 split, ring,
2864 2864 bgep->send[ring].buf[0][split].mem_va,
2865 2865 bgep->send[ring].buf[0][split].alength,
2866 2866 bgep->send[ring].buf[0][split].offset,
2867 2867 bgep->send[ring].buf[0][split].cookie.dmac_laddress,
2868 2868 bgep->send[ring].buf[0][split].cookie.dmac_size,
2869 2869 BGE_SEND_BUF_NUM/BGE_SPLIT,
2870 2870 bgep->chipid.snd_buff_size));
2871 2871 }
2872 2872
2873 2873 for (; ring < BGE_SEND_RINGS_MAX; ++ring) {
2874 2874 bge_slice_chunk(&bgep->send[ring].buf[0][split],
2875 2875 &area, 0, bgep->chipid.snd_buff_size);
2876 2876 }
2877 2877
2878 2878 BGE_DEBUG(("TXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2879 2879 split,
2880 2880 area.mem_va,
2881 2881 area.alength,
2882 2882 area.offset,
2883 2883 area.cookie.dmac_laddress,
2884 2884 area.cookie.dmac_size));
2885 2885 }
2886 2886
2887 2887 for (ring = 0; ring < rx_rings; ++ring) {
2888 2888 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring],
2889 2889 bgep->chipid.recv_slots, sizeof (bge_rbd_t));
2890 2890
2891 2891 BGE_DEBUG(("RXD CONS RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2892 2892 ring,
2893 2893 bgep->recv[ring].desc.mem_va,
2894 2894 bgep->recv[ring].desc.alength,
2895 2895 bgep->recv[ring].desc.offset,
2896 2896 bgep->recv[ring].desc.cookie.dmac_laddress,
2897 2897 bgep->recv[ring].desc.cookie.dmac_size,
2898 2898 bgep->chipid.recv_slots,
2899 2899 sizeof(bge_rbd_t)));
2900 2900 }
2901 2901
2902 2902 /* dma alloc for rxbuffdescsize is located at bgep->rx_desc[#rings] */
2903 2903 area = bgep->rx_desc[rx_rings]; /* note rx_rings = one beyond rings */
2904 2904
2905 2905 for (; ring < BGE_RECV_RINGS_MAX; ++ring) /* skip unused rings */
2906 2906 bge_slice_chunk(&bgep->recv[ring].desc, &area,
2907 2907 0, sizeof (bge_rbd_t));
2908 2908
2909 2909 BGE_DEBUG(("RXD PROD INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2910 2910 area.mem_va,
2911 2911 area.alength,
2912 2912 area.offset,
2913 2913 area.cookie.dmac_laddress,
2914 2914 area.cookie.dmac_size));
2915 2915
2916 2916 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area,
2917 2917 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t));
2918 2918 BGE_DEBUG(("RXD PROD STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2919 2919 bgep->buff[BGE_STD_BUFF_RING].desc.mem_va,
2920 2920 bgep->buff[BGE_STD_BUFF_RING].desc.alength,
2921 2921 bgep->buff[BGE_STD_BUFF_RING].desc.offset,
2922 2922 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_laddress,
2923 2923 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_size,
2924 2924 BGE_STD_SLOTS_USED,
2925 2925 sizeof(bge_rbd_t)));
2926 2926
2927 2927 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area,
2928 2928 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t));
2929 2929 BGE_DEBUG(("RXD PROD JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2930 2930 bgep->buff[BGE_JUMBO_BUFF_RING].desc.mem_va,
2931 2931 bgep->buff[BGE_JUMBO_BUFF_RING].desc.alength,
2932 2932 bgep->buff[BGE_JUMBO_BUFF_RING].desc.offset,
2933 2933 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_laddress,
2934 2934 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_size,
2935 2935 bgep->chipid.jumbo_slots,
2936 2936 sizeof(bge_rbd_t)));
2937 2937
2938 2938 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area,
2939 2939 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t));
2940 2940 BGE_DEBUG(("RXD PROD MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2941 2941 bgep->buff[BGE_MINI_BUFF_RING].desc.mem_va,
2942 2942 bgep->buff[BGE_MINI_BUFF_RING].desc.alength,
2943 2943 bgep->buff[BGE_MINI_BUFF_RING].desc.offset,
2944 2944 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_laddress,
2945 2945 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_size,
2946 2946 BGE_MINI_SLOTS_USED,
2947 2947 sizeof(bge_rbd_t)));
2948 2948
2949 2949 BGE_DEBUG(("RXD PROD DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2950 2950 area.mem_va,
2951 2951 area.alength,
2952 2952 area.offset,
2953 2953 area.cookie.dmac_laddress,
2954 2954 area.cookie.dmac_size));
2955 2955
2956 2956 ASSERT(area.alength == 0);
2957 2957
2958 2958 area = bgep->tx_desc;
2959 2959
2960 2960 BGE_DEBUG(("TXD INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2961 2961 area.mem_va,
2962 2962 area.alength,
2963 2963 area.offset,
2964 2964 area.cookie.dmac_laddress,
2965 2965 area.cookie.dmac_size));
2966 2966
2967 2967 for (ring = 0; ring < tx_rings; ++ring) {
2968 2968 bge_slice_chunk(&bgep->send[ring].desc, &area,
2969 2969 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t));
2970 2970
2971 2971 BGE_DEBUG(("TXD RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2972 2972 ring,
2973 2973 bgep->send[ring].desc.mem_va,
2974 2974 bgep->send[ring].desc.alength,
2975 2975 bgep->send[ring].desc.offset,
2976 2976 bgep->send[ring].desc.cookie.dmac_laddress,
2977 2977 bgep->send[ring].desc.cookie.dmac_size,
2978 2978 BGE_SEND_SLOTS_USED,
2979 2979 sizeof(bge_sbd_t)));
2980 2980 }
2981 2981
2982 2982 for (; ring < BGE_SEND_RINGS_MAX; ++ring) /* skip unused rings */
2983 2983 bge_slice_chunk(&bgep->send[ring].desc, &area,
2984 2984 0, sizeof (bge_sbd_t));
2985 2985
2986 2986 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t));
2987 2987 BGE_DEBUG(("TXD STATISTICS: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2988 2988 bgep->statistics.mem_va,
2989 2989 bgep->statistics.alength,
2990 2990 bgep->statistics.offset,
2991 2991 bgep->statistics.cookie.dmac_laddress,
2992 2992 bgep->statistics.cookie.dmac_size,
2993 2993 1,
2994 2994 sizeof(bge_statistics_t)));
2995 2995
2996 2996 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t));
2997 2997 BGE_DEBUG(("TXD STATUS BLOCK: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2998 2998 bgep->status_block.mem_va,
2999 2999 bgep->status_block.alength,
3000 3000 bgep->status_block.offset,
3001 3001 bgep->status_block.cookie.dmac_laddress,
3002 3002 bgep->status_block.cookie.dmac_size,
3003 3003 1,
3004 3004 sizeof(bge_status_t)));
3005 3005
3006 3006 BGE_DEBUG(("TXD DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
3007 3007 area.mem_va,
3008 3008 area.alength,
3009 3009 area.offset,
3010 3010 area.cookie.dmac_laddress,
3011 3011 area.cookie.dmac_size));
3012 3012
3013 3013 ASSERT(area.alength == BGE_STATUS_PADDING);
3014 3014
3015 3015 DMA_ZERO(bgep->status_block);
3016 3016
3017 3017 return (DDI_SUCCESS);
3018 3018 }
3019 3019
3020 3020 #undef BGE_DBG
3021 3021 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */
3022 3022
3023 3023 /*
3024 3024 * This routine frees the transmit and receive buffers and descriptors.
3025 3025 * Make sure the chip is stopped before calling it!
3026 3026 */
3027 3027 void
3028 3028 bge_free_bufs(bge_t *bgep)
3029 3029 {
3030 3030 int split;
3031 3031
3032 3032 BGE_TRACE(("bge_free_bufs($%p)",
3033 3033 (void *)bgep));
3034 3034
3035 3035 bge_free_dma_mem(&bgep->tx_desc);
3036 3036 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split)
3037 3037 bge_free_dma_mem(&bgep->rx_desc[split]);
3038 3038 for (split = 0; split < BGE_SPLIT; ++split)
3039 3039 bge_free_dma_mem(&bgep->tx_buff[split]);
3040 3040 for (split = 0; split < BGE_SPLIT; ++split)
3041 3041 bge_free_dma_mem(&bgep->rx_buff[split]);
3042 3042 }
3043 3043
3044 3044 /*
3045 3045 * Determine (initial) MAC address ("BIA") to use for this interface
3046 3046 */
3047 3047
3048 3048 static void
3049 3049 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp)
3050 3050 {
3051 3051 struct ether_addr sysaddr;
3052 3052 char propbuf[8]; /* "true" or "false", plus NUL */
3053 3053 uchar_t *bytes;
3054 3054 int *ints;
3055 3055 uint_t nelts;
3056 3056 int err;
3057 3057
3058 3058 BGE_TRACE(("bge_find_mac_address($%p)",
3059 3059 (void *)bgep));
3060 3060
3061 3061 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)",
3062 3062 cidp->hw_mac_addr,
3063 3063 ether_sprintf((void *)cidp->vendor_addr.addr),
3064 3064 cidp->vendor_addr.set ? "" : "not "));
3065 3065
3066 3066 /*
3067 3067 * The "vendor's factory-set address" may already have
3068 3068 * been extracted from the chip, but if the property
3069 3069 * "local-mac-address" is set we use that instead. It
3070 3070 * will normally be set by OBP, but it could also be
3071 3071 * specified in a .conf file(!)
3072 3072 *
3073 3073 * There doesn't seem to be a way to define byte-array
3074 3074 * properties in a .conf, so we check whether it looks
3075 3075 * like an array of 6 ints instead.
3076 3076 *
3077 3077 * Then, we check whether it looks like an array of 6
3078 3078 * bytes (which it should, if OBP set it). If we can't
3079 3079 * make sense of it either way, we'll ignore it.
3080 3080 */
3081 3081 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
3082 3082 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts);
3083 3083 if (err == DDI_PROP_SUCCESS) {
3084 3084 if (nelts == ETHERADDRL) {
3085 3085 while (nelts--)
3086 3086 cidp->vendor_addr.addr[nelts] = ints[nelts];
3087 3087 cidp->vendor_addr.set = B_TRUE;
3088 3088 }
3089 3089 ddi_prop_free(ints);
3090 3090 }
3091 3091
3092 3092 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
3093 3093 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts);
3094 3094 if (err == DDI_PROP_SUCCESS) {
3095 3095 if (nelts == ETHERADDRL) {
3096 3096 while (nelts--)
3097 3097 cidp->vendor_addr.addr[nelts] = bytes[nelts];
3098 3098 cidp->vendor_addr.set = B_TRUE;
3099 3099 }
3100 3100 ddi_prop_free(bytes);
3101 3101 }
3102 3102
3103 3103 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)",
3104 3104 ether_sprintf((void *)cidp->vendor_addr.addr),
3105 3105 cidp->vendor_addr.set ? "" : "not "));
3106 3106
3107 3107 /*
3108 3108 * Look up the OBP property "local-mac-address?". Note that even
3109 3109 * though its value is a string (which should be "true" or "false"),
3110 3110 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero
3111 3111 * the buffer first and then fetch the property as an untyped array;
3112 3112 * this may or may not include a final NUL, but since there will
3113 3113 * always be one left at the end of the buffer we can now treat it
3114 3114 * as a string anyway.
3115 3115 */
3116 3116 nelts = sizeof (propbuf);
3117 3117 bzero(propbuf, nelts--);
3118 3118 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo,
3119 3119 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts);
3120 3120
3121 3121 /*
3122 3122 * Now, if the address still isn't set from the hardware (SEEPROM)
3123 3123 * or the OBP or .conf property, OR if the user has foolishly set
3124 3124 * 'local-mac-address? = false', use "the system address" instead
3125 3125 * (but only if it's non-null i.e. has been set from the IDPROM).
3126 3126 */
3127 3127 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0)
3128 3128 if (localetheraddr(NULL, &sysaddr) != 0) {
3129 3129 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr);
3130 3130 cidp->vendor_addr.set = B_TRUE;
3131 3131 }
3132 3132
3133 3133 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)",
3134 3134 ether_sprintf((void *)cidp->vendor_addr.addr),
3135 3135 cidp->vendor_addr.set ? "" : "not "));
3136 3136
3137 3137 /*
3138 3138 * Finally(!), if there's a valid "mac-address" property (created
3139 3139 * if we netbooted from this interface), we must use this instead
3140 3140 * of any of the above to ensure that the NFS/install server doesn't
3141 3141 * get confused by the address changing as Solaris takes over!
3142 3142 */
3143 3143 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
3144 3144 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts);
3145 3145 if (err == DDI_PROP_SUCCESS) {
3146 3146 if (nelts == ETHERADDRL) {
3147 3147 while (nelts--)
3148 3148 cidp->vendor_addr.addr[nelts] = bytes[nelts];
3149 3149 cidp->vendor_addr.set = B_TRUE;
3150 3150 }
3151 3151 ddi_prop_free(bytes);
3152 3152 }
3153 3153
3154 3154 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)",
3155 3155 ether_sprintf((void *)cidp->vendor_addr.addr),
3156 3156 cidp->vendor_addr.set ? "" : "not "));
3157 3157 }
3158 3158
3159 3159 /*ARGSUSED*/
3160 3160 int
3161 3161 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle)
3162 3162 {
3163 3163 ddi_fm_error_t de;
3164 3164
3165 3165 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
3166 3166 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
3167 3167 return (de.fme_status);
3168 3168 }
3169 3169
3170 3170 /*ARGSUSED*/
3171 3171 int
3172 3172 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle)
3173 3173 {
3174 3174 ddi_fm_error_t de;
3175 3175
3176 3176 ASSERT(bgep->progress & PROGRESS_BUFS);
3177 3177 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
3178 3178 return (de.fme_status);
3179 3179 }
3180 3180
3181 3181 /*
3182 3182 * The IO fault service error handling callback function
3183 3183 */
3184 3184 /*ARGSUSED*/
3185 3185 static int
3186 3186 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
3187 3187 {
3188 3188 /*
3189 3189 * as the driver can always deal with an error in any dma or
3190 3190 * access handle, we can just return the fme_status value.
3191 3191 */
3192 3192 pci_ereport_post(dip, err, NULL);
3193 3193 return (err->fme_status);
3194 3194 }
3195 3195
3196 3196 static void
3197 3197 bge_fm_init(bge_t *bgep)
3198 3198 {
3199 3199 ddi_iblock_cookie_t iblk;
3200 3200
3201 3201 /* Only register with IO Fault Services if we have some capability */
3202 3202 if (bgep->fm_capabilities) {
3203 3203 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
3204 3204 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
3205 3205
3206 3206 /* Register capabilities with IO Fault Services */
3207 3207 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk);
3208 3208
3209 3209 /*
3210 3210 * Initialize pci ereport capabilities if ereport capable
3211 3211 */
3212 3212 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
3213 3213 DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3214 3214 pci_ereport_setup(bgep->devinfo);
3215 3215
3216 3216 /*
3217 3217 * Register error callback if error callback capable
3218 3218 */
3219 3219 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3220 3220 ddi_fm_handler_register(bgep->devinfo,
3221 3221 bge_fm_error_cb, (void*) bgep);
3222 3222 } else {
3223 3223 /*
3224 3224 * These fields have to be cleared of FMA if there are no
3225 3225 * FMA capabilities at runtime.
3226 3226 */
3227 3227 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
3228 3228 dma_attr.dma_attr_flags = 0;
3229 3229 }
3230 3230 }
3231 3231
3232 3232 static void
3233 3233 bge_fm_fini(bge_t *bgep)
3234 3234 {
3235 3235 /* Only unregister FMA capabilities if we registered some */
3236 3236 if (bgep->fm_capabilities) {
3237 3237
3238 3238 /*
3239 3239 * Release any resources allocated by pci_ereport_setup()
3240 3240 */
3241 3241 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
3242 3242 DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3243 3243 pci_ereport_teardown(bgep->devinfo);
3244 3244
3245 3245 /*
3246 3246 * Un-register error callback if error callback capable
3247 3247 */
3248 3248 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3249 3249 ddi_fm_handler_unregister(bgep->devinfo);
3250 3250
3251 3251 /* Unregister from IO Fault Services */
3252 3252 ddi_fm_fini(bgep->devinfo);
3253 3253 }
3254 3254 }
3255 3255
3256 3256 static void
3257 3257 #ifdef BGE_IPMI_ASF
3258 3258 bge_unattach(bge_t *bgep, uint_t asf_mode)
3259 3259 #else
3260 3260 bge_unattach(bge_t *bgep)
3261 3261 #endif
3262 3262 {
3263 3263 BGE_TRACE(("bge_unattach($%p)",
3264 3264 (void *)bgep));
3265 3265
3266 3266 /*
3267 3267 * Flag that no more activity may be initiated
3268 3268 */
3269 3269 bgep->progress &= ~PROGRESS_READY;
3270 3270
3271 3271 /*
3272 3272 * Quiesce the PHY and MAC (leave it reset but still powered).
3273 3273 * Clean up and free all BGE data structures
3274 3274 */
3275 3275 if (bgep->periodic_id != NULL) {
3276 3276 ddi_periodic_delete(bgep->periodic_id);
3277 3277 bgep->periodic_id = NULL;
3278 3278 }
3279 3279
3280 3280 if (bgep->progress & PROGRESS_KSTATS)
3281 3281 bge_fini_kstats(bgep);
3282 3282 if (bgep->progress & PROGRESS_PHY)
3283 3283 bge_phys_reset(bgep);
3284 3284 if (bgep->progress & PROGRESS_HWINT) {
3285 3285 mutex_enter(bgep->genlock);
3286 3286 #ifdef BGE_IPMI_ASF
3287 3287 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS)
3288 3288 #else
3289 3289 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS)
3290 3290 #endif
3291 3291 ddi_fm_service_impact(bgep->devinfo,
3292 3292 DDI_SERVICE_UNAFFECTED);
3293 3293 #ifdef BGE_IPMI_ASF
3294 3294 if (bgep->asf_enabled) {
3295 3295 /*
3296 3296 * This register has been overlaid. We restore its
3297 3297 * initial value here.
3298 3298 */
3299 3299 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR,
3300 3300 BGE_NIC_DATA_SIG);
3301 3301 }
3302 3302 #endif
3303 3303 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
3304 3304 ddi_fm_service_impact(bgep->devinfo,
3305 3305 DDI_SERVICE_UNAFFECTED);
3306 3306 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
3307 3307 ddi_fm_service_impact(bgep->devinfo,
3308 3308 DDI_SERVICE_UNAFFECTED);
3309 3309 mutex_exit(bgep->genlock);
3310 3310 }
3311 3311 if (bgep->progress & PROGRESS_INTR) {
3312 3312 bge_intr_disable(bgep);
3313 3313 bge_fini_rings(bgep);
3314 3314 }
3315 3315 if (bgep->progress & PROGRESS_HWINT) {
3316 3316 bge_rem_intrs(bgep);
3317 3317 rw_destroy(bgep->errlock);
3318 3318 mutex_destroy(bgep->softintrlock);
3319 3319 mutex_destroy(bgep->genlock);
3320 3320 }
3321 3321 if (bgep->progress & PROGRESS_FACTOTUM)
3322 3322 ddi_remove_softintr(bgep->factotum_id);
3323 3323 if (bgep->progress & PROGRESS_RESCHED)
3324 3324 ddi_remove_softintr(bgep->drain_id);
3325 3325 if (bgep->progress & PROGRESS_BUFS)
3326 3326 bge_free_bufs(bgep);
3327 3327 if (bgep->progress & PROGRESS_REGS) {
3328 3328 ddi_regs_map_free(&bgep->io_handle);
3329 3329 if (bgep->ape_enabled)
3330 3330 ddi_regs_map_free(&bgep->ape_handle);
3331 3331 }
3332 3332 if (bgep->progress & PROGRESS_CFG)
3333 3333 pci_config_teardown(&bgep->cfg_handle);
3334 3334
3335 3335 bge_fm_fini(bgep);
3336 3336
3337 3337 ddi_remove_minor_node(bgep->devinfo, NULL);
3338 3338 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t));
3339 3339 kmem_free(bgep, sizeof (*bgep));
3340 3340 }
3341 3341
3342 3342 static int
3343 3343 bge_resume(dev_info_t *devinfo)
3344 3344 {
3345 3345 bge_t *bgep; /* Our private data */
3346 3346 chip_id_t *cidp;
3347 3347 chip_id_t chipid;
3348 3348
3349 3349 bgep = ddi_get_driver_private(devinfo);
3350 3350 if (bgep == NULL)
3351 3351 return (DDI_FAILURE);
3352 3352
3353 3353 /*
3354 3354 * Refuse to resume if the data structures aren't consistent
3355 3355 */
3356 3356 if (bgep->devinfo != devinfo)
3357 3357 return (DDI_FAILURE);
3358 3358
3359 3359 #ifdef BGE_IPMI_ASF
3360 3360 /*
3361 3361 * Power management hasn't been supported in BGE now. If you
3362 3362 * want to implement it, please add the ASF/IPMI related
3363 3363 * code here.
3364 3364 */
3365 3365
3366 3366 #endif
3367 3367
3368 3368 /*
3369 3369 * Read chip ID & set up config space command register(s)
3370 3370 * Refuse to resume if the chip has changed its identity!
3371 3371 */
3372 3372 cidp = &bgep->chipid;
3373 3373 mutex_enter(bgep->genlock);
3374 3374 bge_chip_cfg_init(bgep, &chipid, B_FALSE);
3375 3375 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3376 3376 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3377 3377 mutex_exit(bgep->genlock);
3378 3378 return (DDI_FAILURE);
3379 3379 }
3380 3380 mutex_exit(bgep->genlock);
3381 3381 if (chipid.vendor != cidp->vendor)
3382 3382 return (DDI_FAILURE);
3383 3383 if (chipid.device != cidp->device)
3384 3384 return (DDI_FAILURE);
3385 3385 if (chipid.revision != cidp->revision)
3386 3386 return (DDI_FAILURE);
3387 3387 if (chipid.asic_rev != cidp->asic_rev)
3388 3388 return (DDI_FAILURE);
3389 3389
3390 3390 /*
3391 3391 * All OK, reinitialise h/w & kick off GLD scheduling
3392 3392 */
3393 3393 mutex_enter(bgep->genlock);
3394 3394 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) {
3395 3395 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
3396 3396 (void) bge_check_acc_handle(bgep, bgep->io_handle);
3397 3397 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3398 3398 mutex_exit(bgep->genlock);
3399 3399 return (DDI_FAILURE);
3400 3400 }
3401 3401 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3402 3402 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3403 3403 mutex_exit(bgep->genlock);
3404 3404 return (DDI_FAILURE);
3405 3405 }
3406 3406 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
3407 3407 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3408 3408 mutex_exit(bgep->genlock);
3409 3409 return (DDI_FAILURE);
3410 3410 }
3411 3411 mutex_exit(bgep->genlock);
3412 3412 return (DDI_SUCCESS);
3413 3413 }
3414 3414
3415 3415 static int
3416 3416 bge_fw_img_is_valid(bge_t *bgep, uint32_t offset)
3417 3417 {
3418 3418 uint32_t val;
3419 3419
3420 3420 if (bge_nvmem_read32(bgep, offset, &val) ||
3421 3421 (val & 0xfc000000) != 0x0c000000 ||
3422 3422 bge_nvmem_read32(bgep, offset + 4, &val) ||
3423 3423 val != 0)
3424 3424 return (0);
3425 3425
3426 3426 return (1);
3427 3427 }
3428 3428
3429 3429 static void
3430 3430 bge_read_mgmtfw_ver(bge_t *bgep)
3431 3431 {
3432 3432 uint32_t val;
3433 3433 uint32_t offset;
3434 3434 uint32_t start;
3435 3435 int i, vlen;
3436 3436
3437 3437 for (offset = NVM_DIR_START;
3438 3438 offset < NVM_DIR_END;
3439 3439 offset += NVM_DIRENT_SIZE) {
3440 3440 if (bge_nvmem_read32(bgep, offset, &val))
3441 3441 return;
3442 3442
3443 3443 if ((val >> NVM_DIRTYPE_SHIFT) == NVM_DIRTYPE_ASFINI)
3444 3444 break;
3445 3445 }
3446 3446
3447 3447 if (offset == NVM_DIR_END)
3448 3448 return;
3449 3449
3450 3450 if (bge_nvmem_read32(bgep, offset - 4, &start))
3451 3451 return;
3452 3452
3453 3453 if (bge_nvmem_read32(bgep, offset + 4, &offset) ||
3454 3454 !bge_fw_img_is_valid(bgep, offset) ||
3455 3455 bge_nvmem_read32(bgep, offset + 8, &val))
3456 3456 return;
3457 3457
3458 3458 offset += val - start;
3459 3459
3460 3460 vlen = strlen(bgep->fw_version);
3461 3461
3462 3462 bgep->fw_version[vlen++] = ',';
3463 3463 bgep->fw_version[vlen++] = ' ';
3464 3464
3465 3465 for (i = 0; i < 4; i++) {
3466 3466 uint32_t v;
3467 3467
3468 3468 if (bge_nvmem_read32(bgep, offset, &v))
3469 3469 return;
3470 3470
3471 3471 v = BE_32(v);
3472 3472
3473 3473 offset += sizeof(v);
3474 3474
3475 3475 if (vlen > BGE_FW_VER_SIZE - sizeof(v)) {
3476 3476 memcpy(&bgep->fw_version[vlen], &v, BGE_FW_VER_SIZE - vlen);
3477 3477 break;
3478 3478 }
3479 3479
3480 3480 memcpy(&bgep->fw_version[vlen], &v, sizeof(v));
3481 3481 vlen += sizeof(v);
3482 3482 }
3483 3483 }
3484 3484
3485 3485 static void
3486 3486 bge_read_dash_ver(bge_t *bgep)
3487 3487 {
3488 3488 int vlen;
3489 3489 uint32_t apedata;
3490 3490 char *fwtype;
3491 3491
3492 3492 if (!bgep->ape_enabled || !bgep->asf_enabled)
3493 3493 return;
3494 3494
3495 3495 apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG);
3496 3496 if (apedata != APE_SEG_SIG_MAGIC)
3497 3497 return;
3498 3498
3499 3499 apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS);
3500 3500 if (!(apedata & APE_FW_STATUS_READY))
3501 3501 return;
3502 3502
3503 3503 apedata = bge_ape_get32(bgep, BGE_APE_FW_VERSION);
3504 3504
3505 3505 if (bge_ape_get32(bgep, BGE_APE_FW_FEATURES) &
3506 3506 BGE_APE_FW_FEATURE_NCSI) {
3507 3507 bgep->ape_has_ncsi = B_TRUE;
3508 3508 fwtype = "NCSI";
3509 3509 } else if ((bgep->chipid.device == DEVICE_ID_5725) ||
3510 3510 (bgep->chipid.device == DEVICE_ID_5727)) {
3511 3511 fwtype = "SMASH";
3512 3512 } else {
3513 3513 fwtype = "DASH";
3514 3514 }
3515 3515
3516 3516 vlen = strlen(bgep->fw_version);
3517 3517
3518 3518 snprintf(&bgep->fw_version[vlen], BGE_FW_VER_SIZE - vlen,
3519 3519 " %s v%d.%d.%d.%d", fwtype,
3520 3520 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
3521 3521 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
3522 3522 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
3523 3523 (apedata & APE_FW_VERSION_BLDMSK));
3524 3524 }
3525 3525
3526 3526 static void
3527 3527 bge_read_bc_ver(bge_t *bgep)
3528 3528 {
3529 3529 uint32_t val;
3530 3530 uint32_t offset;
3531 3531 uint32_t start;
3532 3532 uint32_t ver_offset;
3533 3533 int i, dst_off;
3534 3534 uint32_t major;
3535 3535 uint32_t minor;
3536 3536 boolean_t newver = B_FALSE;
3537 3537
3538 3538 if (bge_nvmem_read32(bgep, 0xc, &offset) ||
3539 3539 bge_nvmem_read32(bgep, 0x4, &start))
3540 3540 return;
3541 3541
3542 3542 if (bge_nvmem_read32(bgep, offset, &val))
3543 3543 return;
3544 3544
3545 3545 if ((val & 0xfc000000) == 0x0c000000) {
3546 3546 if (bge_nvmem_read32(bgep, offset + 4, &val))
3547 3547 return;
3548 3548
3549 3549 if (val == 0)
3550 3550 newver = B_TRUE;
3551 3551 }
3552 3552
3553 3553 dst_off = strlen(bgep->fw_version);
3554 3554
3555 3555 if (newver) {
3556 3556 if (((BGE_FW_VER_SIZE - dst_off) < 16) ||
3557 3557 bge_nvmem_read32(bgep, offset + 8, &ver_offset))
3558 3558 return;
3559 3559
3560 3560 offset = offset + ver_offset - start;
3561 3561 for (i = 0; i < 16; i += 4) {
3562 3562 if (bge_nvmem_read32(bgep, offset + i, &val))
3563 3563 return;
3564 3564 val = BE_32(val);
3565 3565 memcpy(bgep->fw_version + dst_off + i, &val,
3566 3566 sizeof(val));
3567 3567 }
3568 3568 } else {
3569 3569 if (bge_nvmem_read32(bgep, NVM_PTREV_BCVER, &ver_offset))
3570 3570 return;
3571 3571
3572 3572 major = (ver_offset & NVM_BCVER_MAJMSK) >> NVM_BCVER_MAJSFT;
3573 3573 minor = ver_offset & NVM_BCVER_MINMSK;
3574 3574 snprintf(&bgep->fw_version[dst_off], BGE_FW_VER_SIZE - dst_off,
3575 3575 "v%d.%02d", major, minor);
3576 3576 }
3577 3577 }
3578 3578
3579 3579 static void
3580 3580 bge_read_fw_ver(bge_t *bgep)
3581 3581 {
3582 3582 uint32_t val;
3583 3583 uint32_t magic;
3584 3584
3585 3585 *bgep->fw_version = 0;
3586 3586
3587 3587 if ((bgep->chipid.nvtype == BGE_NVTYPE_NONE) ||
3588 3588 (bgep->chipid.nvtype == BGE_NVTYPE_UNKNOWN)) {
3589 3589 snprintf(bgep->fw_version, sizeof(bgep->fw_version), "sb");
3590 3590 return;
3591 3591 }
3592 3592
3593 3593 mutex_enter(bgep->genlock);
3594 3594
3595 3595 bge_nvmem_read32(bgep, 0, &magic);
3596 3596
3597 3597 if (magic == EEPROM_MAGIC) {
3598 3598 bge_read_bc_ver(bgep);
3599 3599 } else {
3600 3600 /* ignore other configs for now */
3601 3601 mutex_exit(bgep->genlock);
3602 3602 return;
3603 3603 }
3604 3604
3605 3605 if (bgep->ape_enabled) {
3606 3606 if (bgep->asf_enabled) {
3607 3607 bge_read_dash_ver(bgep);
3608 3608 }
3609 3609 } else if (bgep->asf_enabled) {
3610 3610 bge_read_mgmtfw_ver(bgep);
3611 3611 }
3612 3612
3613 3613 mutex_exit(bgep->genlock);
3614 3614
3615 3615 bgep->fw_version[BGE_FW_VER_SIZE - 1] = 0; /* safety */
3616 3616 }
3617 3617
3618 3618 /*
3619 3619 * attach(9E) -- Attach a device to the system
3620 3620 *
3621 3621 * Called once for each board successfully probed.
3622 3622 */
3623 3623 static int
3624 3624 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3625 3625 {
3626 3626 bge_t *bgep; /* Our private data */
3627 3627 mac_register_t *macp;
3628 3628 chip_id_t *cidp;
3629 3629 caddr_t regs;
3630 3630 int instance;
3631 3631 int err;
3632 3632 int intr_types;
3633 3633 int *props = NULL;
3634 3634 uint_t numProps;
3635 3635 uint32_t regval;
3636 3636 uint32_t pci_state_reg;
3637 3637 #ifdef BGE_IPMI_ASF
3638 3638 uint32_t mhcrValue;
3639 3639 #ifdef __sparc
3640 3640 uint16_t value16;
3641 3641 #endif
3642 3642 #ifdef BGE_NETCONSOLE
3643 3643 int retval;
3644 3644 #endif
3645 3645 #endif
3646 3646
3647 3647 instance = ddi_get_instance(devinfo);
3648 3648
3649 3649 BGE_GTRACE(("bge_attach($%p, %d) instance %d",
3650 3650 (void *)devinfo, cmd, instance));
3651 3651 BGE_BRKPT(NULL, "bge_attach");
3652 3652
3653 3653 switch (cmd) {
3654 3654 default:
3655 3655 return (DDI_FAILURE);
3656 3656
3657 3657 case DDI_RESUME:
3658 3658 return (bge_resume(devinfo));
3659 3659
3660 3660 case DDI_ATTACH:
3661 3661 break;
3662 3662 }
3663 3663
3664 3664 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP);
3665 3665 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP);
3666 3666 ddi_set_driver_private(devinfo, bgep);
3667 3667 bgep->bge_guard = BGE_GUARD;
3668 3668 bgep->devinfo = devinfo;
3669 3669 bgep->param_drain_max = 64;
3670 3670 bgep->param_msi_cnt = 0;
3671 3671 bgep->param_loop_mode = 0;
3672 3672
3673 3673 /*
3674 3674 * Initialize more fields in BGE private data
3675 3675 */
3676 3676 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3677 3677 DDI_PROP_DONTPASS, debug_propname, bge_debug);
3678 3678 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d",
3679 3679 BGE_DRIVER_NAME, instance);
3680 3680
3681 3681 /*
3682 3682 * Initialize for fma support
3683 3683 */
3684 3684 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3685 3685 DDI_PROP_DONTPASS, fm_cap,
3686 3686 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
3687 3687 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
3688 3688 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities));
3689 3689 bge_fm_init(bgep);
3690 3690
3691 3691 /*
3692 3692 * Look up the IOMMU's page size for DVMA mappings (must be
3693 3693 * a power of 2) and convert to a mask. This can be used to
3694 3694 * determine whether a message buffer crosses a page boundary.
3695 3695 * Note: in 2s complement binary notation, if X is a power of
3696 3696 * 2, then -X has the representation "11...1100...00".
3697 3697 */
3698 3698 bgep->pagemask = dvma_pagesize(devinfo);
3699 3699 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask));
3700 3700 bgep->pagemask = -bgep->pagemask;
3701 3701
3702 3702 /*
3703 3703 * Map config space registers
3704 3704 * Read chip ID & set up config space command register(s)
3705 3705 *
3706 3706 * Note: this leaves the chip accessible by Memory Space
3707 3707 * accesses, but with interrupts and Bus Mastering off.
3708 3708 * This should ensure that nothing untoward will happen
3709 3709 * if it has been left active by the (net-)bootloader.
3710 3710 * We'll re-enable Bus Mastering once we've reset the chip,
3711 3711 * and allow interrupts only when everything else is set up.
3712 3712 */
3713 3713 err = pci_config_setup(devinfo, &bgep->cfg_handle);
3714 3714 #ifdef BGE_IPMI_ASF
3715 3715 #ifdef __sparc
3716 3716 /*
3717 3717 * We need to determine the type of chipset for accessing some configure
3718 3718 * registers. (This information will be used by bge_ind_put32,
3719 3719 * bge_ind_get32 and bge_nic_read32)
3720 3720 */
3721 3721 bgep->chipid.device = pci_config_get16(bgep->cfg_handle,
3722 3722 PCI_CONF_DEVID);
3723 3723 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM);
3724 3724 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME);
3725 3725 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16);
3726 3726 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3727 3727 MHCR_ENABLE_TAGGED_STATUS_MODE |
3728 3728 MHCR_MASK_INTERRUPT_MODE |
3729 3729 MHCR_MASK_PCI_INT_OUTPUT |
3730 3730 MHCR_CLEAR_INTERRUPT_INTA |
3731 3731 MHCR_ENABLE_ENDIAN_WORD_SWAP |
3732 3732 MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3733 3733 /*
3734 3734 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
3735 3735 * has been set in PCI_CONF_COMM already, we need to write the
3736 3736 * byte-swapped value to it. So we just write zero first for simplicity.
3737 3737 */
3738 3738 if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3739 3739 DEVICE_5725_SERIES_CHIPSETS(bgep))
3740 3740 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3741 3741 #else
3742 3742 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3743 3743 MHCR_ENABLE_TAGGED_STATUS_MODE |
3744 3744 MHCR_MASK_INTERRUPT_MODE |
3745 3745 MHCR_MASK_PCI_INT_OUTPUT |
3746 3746 MHCR_CLEAR_INTERRUPT_INTA;
3747 3747 #endif
3748 3748 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue);
3749 3749 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG,
3750 3750 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3751 3751 MEMORY_ARBITER_ENABLE);
3752 3752 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) {
3753 3753 bgep->asf_wordswapped = B_TRUE;
3754 3754 } else {
3755 3755 bgep->asf_wordswapped = B_FALSE;
3756 3756 }
3757 3757 bge_asf_get_config(bgep);
3758 3758 #endif
3759 3759 if (err != DDI_SUCCESS) {
3760 3760 bge_problem(bgep, "pci_config_setup() failed");
3761 3761 goto attach_fail;
3762 3762 }
3763 3763 bgep->progress |= PROGRESS_CFG;
3764 3764 cidp = &bgep->chipid;
3765 3765 bzero(cidp, sizeof(*cidp));
3766 3766 bge_chip_cfg_init(bgep, cidp, B_FALSE);
3767 3767 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3768 3768 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3769 3769 goto attach_fail;
3770 3770 }
3771 3771
3772 3772 #ifdef BGE_IPMI_ASF
3773 3773 if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
3774 3774 DEVICE_5714_SERIES_CHIPSETS(bgep)) {
3775 3775 bgep->asf_newhandshake = B_TRUE;
3776 3776 } else {
3777 3777 bgep->asf_newhandshake = B_FALSE;
3778 3778 }
3779 3779 #endif
3780 3780
3781 3781 /*
3782 3782 * Update those parts of the chip ID derived from volatile
3783 3783 * registers with the values seen by OBP (in case the chip
3784 3784 * has been reset externally and therefore lost them).
3785 3785 */
3786 3786 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3787 3787 DDI_PROP_DONTPASS, subven_propname, cidp->subven);
3788 3788 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3789 3789 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev);
3790 3790 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3791 3791 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize);
3792 3792 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3793 3793 DDI_PROP_DONTPASS, latency_propname, cidp->latency);
3794 3794 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3795 3795 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings);
3796 3796 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3797 3797 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings);
3798 3798 cidp->eee = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3799 3799 DDI_PROP_DONTPASS, eee_propname, cidp->eee);
3800 3800
3801 3801 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3802 3802 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU);
3803 3803 if ((cidp->default_mtu < BGE_DEFAULT_MTU) ||
3804 3804 (cidp->default_mtu > BGE_MAXIMUM_MTU)) {
3805 3805 cidp->default_mtu = BGE_DEFAULT_MTU;
3806 3806 }
3807 3807
3808 3808 /*
3809 3809 * Map operating registers
3810 3810 */
3811 3811 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER,
3812 3812 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle);
3813 3813 if (err != DDI_SUCCESS) {
3814 3814 bge_problem(bgep, "ddi_regs_map_setup() failed");
3815 3815 goto attach_fail;
3816 3816 }
3817 3817 bgep->io_regs = regs;
3818 3818
3819 3819 bgep->ape_enabled = B_FALSE;
3820 3820 bgep->ape_regs = NULL;
3821 3821 if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3822 3822 DEVICE_5725_SERIES_CHIPSETS(bgep)) {
3823 3823 err = ddi_regs_map_setup(devinfo, BGE_PCI_APEREGS_RNUMBER,
3824 3824 ®s, 0, 0, &bge_reg_accattr, &bgep->ape_handle);
3825 3825 if (err != DDI_SUCCESS) {
3826 3826 ddi_regs_map_free(&bgep->io_handle);
3827 3827 bge_problem(bgep, "ddi_regs_map_setup() failed");
3828 3828 goto attach_fail;
3829 3829 }
3830 3830 bgep->ape_regs = regs;
3831 3831 bgep->ape_enabled = B_TRUE;
3832 3832
3833 3833 /*
3834 3834 * Allow reads and writes to the
3835 3835 * APE register and memory space.
3836 3836 */
3837 3837
3838 3838 pci_state_reg = pci_config_get32(bgep->cfg_handle,
3839 3839 PCI_CONF_BGE_PCISTATE);
3840 3840 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
3841 3841 PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR;
3842 3842 pci_config_put32(bgep->cfg_handle,
3843 3843 PCI_CONF_BGE_PCISTATE, pci_state_reg);
3844 3844
3845 3845 bge_ape_lock_init(bgep);
3846 3846 }
3847 3847
3848 3848 bgep->progress |= PROGRESS_REGS;
3849 3849
3850 3850 /*
3851 3851 * Characterise the device, so we know its requirements.
3852 3852 * Then allocate the appropriate TX and RX descriptors & buffers.
3853 3853 */
3854 3854 if (bge_chip_id_init(bgep) == EIO) {
3855 3855 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3856 3856 goto attach_fail;
3857 3857 }
3858 3858
3859 3859 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
3860 3860 0, "reg", &props, &numProps);
3861 3861 if ((err == DDI_PROP_SUCCESS) && (numProps > 0)) {
3862 3862 bgep->pci_bus = PCI_REG_BUS_G(props[0]);
3863 3863 bgep->pci_dev = PCI_REG_DEV_G(props[0]);
3864 3864 bgep->pci_func = PCI_REG_FUNC_G(props[0]);
3865 3865 ddi_prop_free(props);
3866 3866 }
3867 3867
3868 3868 if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3869 3869 DEVICE_5725_SERIES_CHIPSETS(bgep)) {
3870 3870 regval = bge_reg_get32(bgep, CPMU_STATUS_REG);
3871 3871 if ((bgep->chipid.device == DEVICE_ID_5719) ||
3872 3872 (bgep->chipid.device == DEVICE_ID_5720)) {
3873 3873 bgep->pci_func =
3874 3874 ((regval & CPMU_STATUS_FUNC_NUM_5719) >>
3875 3875 CPMU_STATUS_FUNC_NUM_5719_SHIFT);
3876 3876 } else {
3877 3877 bgep->pci_func = ((regval & CPMU_STATUS_FUNC_NUM) >>
3878 3878 CPMU_STATUS_FUNC_NUM_SHIFT);
3879 3879 }
3880 3880 }
3881 3881
3882 3882 err = bge_alloc_bufs(bgep);
3883 3883 if (err != DDI_SUCCESS) {
3884 3884 bge_problem(bgep, "DMA buffer allocation failed");
3885 3885 goto attach_fail;
3886 3886 }
3887 3887 bgep->progress |= PROGRESS_BUFS;
3888 3888
3889 3889 /*
3890 3890 * Add the softint handlers:
3891 3891 *
3892 3892 * Both of these handlers are used to avoid restrictions on the
3893 3893 * context and/or mutexes required for some operations. In
3894 3894 * particular, the hardware interrupt handler and its subfunctions
3895 3895 * can detect a number of conditions that we don't want to handle
3896 3896 * in that context or with that set of mutexes held. So, these
3897 3897 * softints are triggered instead:
3898 3898 *
3899 3899 * the <resched> softint is triggered if we have previously
3900 3900 * had to refuse to send a packet because of resource shortage
3901 3901 * (we've run out of transmit buffers), but the send completion
3902 3902 * interrupt handler has now detected that more buffers have
3903 3903 * become available.
3904 3904 *
3905 3905 * the <factotum> is triggered if the h/w interrupt handler
3906 3906 * sees the <link state changed> or <error> bits in the status
3907 3907 * block. It's also triggered periodically to poll the link
3908 3908 * state, just in case we aren't getting link status change
3909 3909 * interrupts ...
3910 3910 */
3911 3911 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id,
3912 3912 NULL, NULL, bge_send_drain, (caddr_t)bgep);
3913 3913 if (err != DDI_SUCCESS) {
3914 3914 bge_problem(bgep, "ddi_add_softintr() failed");
3915 3915 goto attach_fail;
3916 3916 }
3917 3917 bgep->progress |= PROGRESS_RESCHED;
3918 3918 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id,
3919 3919 NULL, NULL, bge_chip_factotum, (caddr_t)bgep);
3920 3920 if (err != DDI_SUCCESS) {
3921 3921 bge_problem(bgep, "ddi_add_softintr() failed");
3922 3922 goto attach_fail;
3923 3923 }
3924 3924 bgep->progress |= PROGRESS_FACTOTUM;
3925 3925
3926 3926 /* Get supported interrupt types */
3927 3927 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) {
3928 3928 bge_error(bgep, "ddi_intr_get_supported_types failed\n");
3929 3929
3930 3930 goto attach_fail;
3931 3931 }
3932 3932
3933 3933 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x",
3934 3934 bgep->ifname, intr_types));
3935 3935
3936 3936 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) {
3937 3937 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
3938 3938 bge_error(bgep, "MSI registration failed, "
3939 3939 "trying FIXED interrupt type\n");
3940 3940 } else {
3941 3941 BGE_DEBUG(("%s: Using MSI interrupt type",
3942 3942 bgep->ifname));
3943 3943 bgep->intr_type = DDI_INTR_TYPE_MSI;
3944 3944 bgep->progress |= PROGRESS_HWINT;
3945 3945 }
3946 3946 }
3947 3947
3948 3948 if (!(bgep->progress & PROGRESS_HWINT) &&
3949 3949 (intr_types & DDI_INTR_TYPE_FIXED)) {
3950 3950 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
3951 3951 bge_error(bgep, "FIXED interrupt "
3952 3952 "registration failed\n");
3953 3953 goto attach_fail;
3954 3954 }
3955 3955
3956 3956 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname));
3957 3957
3958 3958 bgep->intr_type = DDI_INTR_TYPE_FIXED;
3959 3959 bgep->progress |= PROGRESS_HWINT;
3960 3960 }
3961 3961
3962 3962 if (!(bgep->progress & PROGRESS_HWINT)) {
3963 3963 bge_error(bgep, "No interrupts registered\n");
3964 3964 goto attach_fail;
3965 3965 }
3966 3966
3967 3967 /*
3968 3968 * Note that interrupts are not enabled yet as
3969 3969 * mutex locks are not initialized. Initialize mutex locks.
3970 3970 */
3971 3971 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER,
3972 3972 DDI_INTR_PRI(bgep->intr_pri));
3973 3973 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER,
3974 3974 DDI_INTR_PRI(bgep->intr_pri));
3975 3975 rw_init(bgep->errlock, NULL, RW_DRIVER,
3976 3976 DDI_INTR_PRI(bgep->intr_pri));
3977 3977
3978 3978 /*
3979 3979 * Initialize rings.
3980 3980 */
3981 3981 bge_init_rings(bgep);
3982 3982
3983 3983 /*
3984 3984 * Now that mutex locks are initialized, enable interrupts.
3985 3985 */
3986 3986 bge_intr_enable(bgep);
3987 3987 bgep->progress |= PROGRESS_INTR;
3988 3988
3989 3989 /*
3990 3990 * Initialise link state variables
3991 3991 * Stop, reset & reinitialise the chip.
3992 3992 * Initialise the (internal) PHY.
3993 3993 */
3994 3994 bgep->link_state = LINK_STATE_UNKNOWN;
3995 3995
3996 3996 mutex_enter(bgep->genlock);
3997 3997
3998 3998 /*
3999 3999 * Reset chip & rings to initial state; also reset address
4000 4000 * filtering, promiscuity, loopback mode.
4001 4001 */
4002 4002 #ifdef BGE_IPMI_ASF
4003 4003 #ifdef BGE_NETCONSOLE
4004 4004 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
4005 4005 #else
4006 4006 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) {
4007 4007 #endif
4008 4008 #else
4009 4009 if (bge_reset(bgep) != DDI_SUCCESS) {
4010 4010 #endif
4011 4011 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
4012 4012 (void) bge_check_acc_handle(bgep, bgep->io_handle);
4013 4013 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4014 4014 mutex_exit(bgep->genlock);
4015 4015 goto attach_fail;
4016 4016 }
4017 4017
4018 4018 #ifdef BGE_IPMI_ASF
4019 4019 if (bgep->asf_enabled) {
4020 4020 bgep->asf_status = ASF_STAT_RUN_INIT;
4021 4021 }
4022 4022 #endif
4023 4023
4024 4024 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash));
4025 4025 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs));
4026 4026 bgep->promisc = B_FALSE;
4027 4027 bgep->param_loop_mode = BGE_LOOP_NONE;
4028 4028 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
4029 4029 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4030 4030 mutex_exit(bgep->genlock);
4031 4031 goto attach_fail;
4032 4032 }
4033 4033 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
4034 4034 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4035 4035 mutex_exit(bgep->genlock);
4036 4036 goto attach_fail;
4037 4037 }
4038 4038
4039 4039 mutex_exit(bgep->genlock);
4040 4040
4041 4041 if (bge_phys_init(bgep) == EIO) {
4042 4042 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4043 4043 goto attach_fail;
4044 4044 }
4045 4045 bgep->progress |= PROGRESS_PHY;
4046 4046
4047 4047 /*
4048 4048 * initialize NDD-tweakable parameters
4049 4049 */
4050 4050 if (bge_nd_init(bgep)) {
4051 4051 bge_problem(bgep, "bge_nd_init() failed");
4052 4052 goto attach_fail;
4053 4053 }
4054 4054 bgep->progress |= PROGRESS_NDD;
4055 4055
4056 4056 /*
4057 4057 * Create & initialise named kstats
4058 4058 */
4059 4059 bge_init_kstats(bgep, instance);
4060 4060 bgep->progress |= PROGRESS_KSTATS;
4061 4061
4062 4062 /*
4063 4063 * Determine whether to override the chip's own MAC address
4064 4064 */
4065 4065 bge_find_mac_address(bgep, cidp);
4066 4066 {
4067 4067 int slot;
4068 4068 for (slot = 0; slot < MAC_ADDRESS_REGS_MAX; slot++) {
4069 4069 ethaddr_copy(cidp->vendor_addr.addr,
4070 4070 bgep->curr_addr[slot].addr);
4071 4071 bgep->curr_addr[slot].set = 1;
4072 4072 }
4073 4073 }
4074 4074
4075 4075 bge_read_fw_ver(bgep);
4076 4076
4077 4077 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX;
4078 4078 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX;
4079 4079
4080 4080 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4081 4081 goto attach_fail;
4082 4082 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4083 4083 macp->m_driver = bgep;
4084 4084 macp->m_dip = devinfo;
4085 4085 macp->m_src_addr = cidp->vendor_addr.addr;
4086 4086 macp->m_callbacks = &bge_m_callbacks;
4087 4087 macp->m_min_sdu = 0;
4088 4088 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header);
4089 4089 macp->m_margin = VLAN_TAGSZ;
4090 4090 macp->m_priv_props = bge_priv_prop;
4091 4091
4092 4092 #if defined(ILLUMOS)
4093 4093 bge_m_unicst(bgep, cidp->vendor_addr.addr);
4094 4094 #endif
4095 4095
4096 4096 /*
4097 4097 * Finally, we're ready to register ourselves with the MAC layer
4098 4098 * interface; if this succeeds, we're all ready to start()
4099 4099 */
4100 4100 err = mac_register(macp, &bgep->mh);
4101 4101 mac_free(macp);
4102 4102 if (err != 0)
4103 4103 goto attach_fail;
4104 4104
4105 4105 mac_link_update(bgep->mh, LINK_STATE_UNKNOWN);
4106 4106
4107 4107 /*
4108 4108 * Register a periodical handler.
4109 4109 * bge_chip_cyclic() is invoked in kernel context.
4110 4110 */
4111 4111 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep,
4112 4112 BGE_CYCLIC_PERIOD, DDI_IPL_0);
4113 4113
4114 4114 bgep->progress |= PROGRESS_READY;
4115 4115 ASSERT(bgep->bge_guard == BGE_GUARD);
4116 4116 #ifdef BGE_IPMI_ASF
4117 4117 #ifdef BGE_NETCONSOLE
4118 4118 if (bgep->asf_enabled) {
4119 4119 mutex_enter(bgep->genlock);
4120 4120 retval = bge_chip_start(bgep, B_TRUE);
4121 4121 mutex_exit(bgep->genlock);
4122 4122 if (retval != DDI_SUCCESS)
4123 4123 goto attach_fail;
4124 4124 }
4125 4125 #endif
4126 4126 #endif
4127 4127
4128 4128 ddi_report_dev(devinfo);
4129 4129
4130 4130 return (DDI_SUCCESS);
4131 4131
4132 4132 attach_fail:
4133 4133 #ifdef BGE_IPMI_ASF
4134 4134 bge_unattach(bgep, ASF_MODE_SHUTDOWN);
4135 4135 #else
4136 4136 bge_unattach(bgep);
4137 4137 #endif
4138 4138 return (DDI_FAILURE);
4139 4139 }
4140 4140
4141 4141 /*
4142 4142 * bge_suspend() -- suspend transmit/receive for powerdown
4143 4143 */
4144 4144 static int
4145 4145 bge_suspend(bge_t *bgep)
4146 4146 {
4147 4147 /*
4148 4148 * Stop processing and idle (powerdown) the PHY ...
4149 4149 */
4150 4150 mutex_enter(bgep->genlock);
4151 4151 #ifdef BGE_IPMI_ASF
4152 4152 /*
4153 4153 * Power management hasn't been supported in BGE now. If you
4154 4154 * want to implement it, please add the ASF/IPMI related
4155 4155 * code here.
4156 4156 */
4157 4157 #endif
4158 4158 bge_stop(bgep);
4159 4159 if (bge_phys_idle(bgep) != DDI_SUCCESS) {
4160 4160 (void) bge_check_acc_handle(bgep, bgep->io_handle);
4161 4161 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4162 4162 mutex_exit(bgep->genlock);
4163 4163 return (DDI_FAILURE);
4164 4164 }
4165 4165 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
4166 4166 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4167 4167 mutex_exit(bgep->genlock);
4168 4168 return (DDI_FAILURE);
4169 4169 }
4170 4170 mutex_exit(bgep->genlock);
4171 4171
4172 4172 return (DDI_SUCCESS);
4173 4173 }
4174 4174
4175 4175 /*
4176 4176 * quiesce(9E) entry point.
4177 4177 *
4178 4178 * This function is called when the system is single-threaded at high
4179 4179 * PIL with preemption disabled. Therefore, this function must not be
4180 4180 * blocked.
4181 4181 *
4182 4182 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4183 4183 * DDI_FAILURE indicates an error condition and should almost never happen.
4184 4184 */
4185 4185 #ifdef __sparc
4186 4186 #define bge_quiesce ddi_quiesce_not_supported
4187 4187 #else
4188 4188 static int
4189 4189 bge_quiesce(dev_info_t *devinfo)
4190 4190 {
4191 4191 bge_t *bgep = ddi_get_driver_private(devinfo);
4192 4192
4193 4193 if (bgep == NULL)
4194 4194 return (DDI_FAILURE);
4195 4195
4196 4196 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4197 4197 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR,
4198 4198 MHCR_MASK_PCI_INT_OUTPUT);
4199 4199 } else {
4200 4200 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE);
4201 4201 }
4202 4202
4203 4203 /* Stop the chip */
4204 4204 bge_chip_stop_nonblocking(bgep);
4205 4205
4206 4206 return (DDI_SUCCESS);
4207 4207 }
4208 4208 #endif
4209 4209
4210 4210 /*
4211 4211 * detach(9E) -- Detach a device from the system
4212 4212 */
4213 4213 static int
4214 4214 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
4215 4215 {
4216 4216 bge_t *bgep;
4217 4217 #ifdef BGE_IPMI_ASF
4218 4218 uint_t asf_mode;
4219 4219 asf_mode = ASF_MODE_NONE;
4220 4220 #endif
4221 4221
4222 4222 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd));
4223 4223
4224 4224 bgep = ddi_get_driver_private(devinfo);
4225 4225
4226 4226 switch (cmd) {
4227 4227 default:
4228 4228 return (DDI_FAILURE);
4229 4229
4230 4230 case DDI_SUSPEND:
4231 4231 return (bge_suspend(bgep));
4232 4232
4233 4233 case DDI_DETACH:
4234 4234 break;
4235 4235 }
4236 4236
4237 4237 #ifdef BGE_IPMI_ASF
4238 4238 mutex_enter(bgep->genlock);
4239 4239 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) ||
4240 4240 (bgep->asf_status == ASF_STAT_RUN_INIT))) {
4241 4241
4242 4242 bge_asf_update_status(bgep);
4243 4243 if (bgep->asf_status == ASF_STAT_RUN) {
4244 4244 bge_asf_stop_timer(bgep);
4245 4245 }
4246 4246 bgep->asf_status = ASF_STAT_STOP;
4247 4247
4248 4248 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
4249 4249
4250 4250 if (bgep->asf_pseudostop) {
4251 4251 bge_chip_stop(bgep, B_FALSE);
4252 4252 bgep->bge_mac_state = BGE_MAC_STOPPED;
4253 4253 bgep->asf_pseudostop = B_FALSE;
4254 4254 }
4255 4255
4256 4256 asf_mode = ASF_MODE_POST_SHUTDOWN;
4257 4257
4258 4258 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
4259 4259 ddi_fm_service_impact(bgep->devinfo,
4260 4260 DDI_SERVICE_UNAFFECTED);
4261 4261 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4262 4262 ddi_fm_service_impact(bgep->devinfo,
4263 4263 DDI_SERVICE_UNAFFECTED);
4264 4264 }
4265 4265 mutex_exit(bgep->genlock);
4266 4266 #endif
4267 4267
4268 4268 /*
4269 4269 * Unregister from the GLD subsystem. This can fail, in
4270 4270 * particular if there are DLPI style-2 streams still open -
4271 4271 * in which case we just return failure without shutting
4272 4272 * down chip operations.
4273 4273 */
4274 4274 if (mac_unregister(bgep->mh) != 0)
4275 4275 return (DDI_FAILURE);
4276 4276
4277 4277 /*
4278 4278 * All activity stopped, so we can clean up & exit
4279 4279 */
4280 4280 #ifdef BGE_IPMI_ASF
4281 4281 bge_unattach(bgep, asf_mode);
4282 4282 #else
4283 4283 bge_unattach(bgep);
4284 4284 #endif
4285 4285 return (DDI_SUCCESS);
4286 4286 }
4287 4287
4288 4288
4289 4289 /*
4290 4290 * ========== Module Loading Data & Entry Points ==========
4291 4291 */
4292 4292
4293 4293 #undef BGE_DBG
4294 4294 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */
4295 4295
4296 4296 DDI_DEFINE_STREAM_OPS(bge_dev_ops,
4297 4297 nulldev, /* identify */
4298 4298 nulldev, /* probe */
4299 4299 bge_attach, /* attach */
4300 4300 bge_detach, /* detach */
4301 4301 nodev, /* reset */
4302 4302 NULL, /* cb_ops */
4303 4303 D_MP, /* bus_ops */
4304 4304 NULL, /* power */
↓ open down ↓ |
4304 lines elided |
↑ open up ↑ |
4305 4305 bge_quiesce /* quiesce */
4306 4306 );
4307 4307
4308 4308 static struct modldrv bge_modldrv = {
4309 4309 &mod_driverops, /* Type of module. This one is a driver */
4310 4310 bge_ident, /* short description */
4311 4311 &bge_dev_ops /* driver specific ops */
4312 4312 };
4313 4313
4314 4314 static struct modlinkage modlinkage = {
4315 - MODREV_1, (void *)&bge_modldrv, NULL
4315 + MODREV_1, { (void *)&bge_modldrv, NULL }
4316 4316 };
4317 4317
4318 4318
4319 4319 int
4320 4320 _info(struct modinfo *modinfop)
4321 4321 {
4322 4322 return (mod_info(&modlinkage, modinfop));
4323 4323 }
4324 4324
4325 4325 int
4326 4326 _init(void)
4327 4327 {
4328 4328 int status;
4329 4329
4330 4330 mac_init_ops(&bge_dev_ops, "bge");
4331 4331 status = mod_install(&modlinkage);
4332 4332 if (status == DDI_SUCCESS)
4333 4333 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL);
4334 4334 else
4335 4335 mac_fini_ops(&bge_dev_ops);
4336 4336 return (status);
4337 4337 }
4338 4338
4339 4339 int
4340 4340 _fini(void)
4341 4341 {
4342 4342 int status;
4343 4343
4344 4344 status = mod_remove(&modlinkage);
4345 4345 if (status == DDI_SUCCESS) {
4346 4346 mac_fini_ops(&bge_dev_ops);
4347 4347 mutex_destroy(bge_log_mutex);
4348 4348 }
4349 4349 return (status);
4350 4350 }
4351 4351
4352 4352
4353 4353 /*
4354 4354 * bge_add_intrs:
4355 4355 *
4356 4356 * Register FIXED or MSI interrupts.
4357 4357 */
4358 4358 static int
4359 4359 bge_add_intrs(bge_t *bgep, int intr_type)
4360 4360 {
4361 4361 dev_info_t *dip = bgep->devinfo;
4362 4362 int avail, actual, intr_size, count = 0;
4363 4363 int i, flag, ret;
4364 4364
4365 4365 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type));
4366 4366
4367 4367 /* Get number of interrupts */
4368 4368 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
4369 4369 if ((ret != DDI_SUCCESS) || (count == 0)) {
4370 4370 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, "
4371 4371 "count: %d", ret, count);
4372 4372
4373 4373 return (DDI_FAILURE);
4374 4374 }
4375 4375
4376 4376 /* Get number of available interrupts */
4377 4377 ret = ddi_intr_get_navail(dip, intr_type, &avail);
4378 4378 if ((ret != DDI_SUCCESS) || (avail == 0)) {
4379 4379 bge_error(bgep, "ddi_intr_get_navail() failure, "
4380 4380 "ret: %d, avail: %d\n", ret, avail);
4381 4381
4382 4382 return (DDI_FAILURE);
4383 4383 }
4384 4384
4385 4385 if (avail < count) {
4386 4386 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d",
4387 4387 bgep->ifname, count, avail));
4388 4388 }
4389 4389
4390 4390 /*
4391 4391 * BGE hardware generates only single MSI even though it claims
4392 4392 * to support multiple MSIs. So, hard code MSI count value to 1.
4393 4393 */
4394 4394 if (intr_type == DDI_INTR_TYPE_MSI) {
4395 4395 count = 1;
4396 4396 flag = DDI_INTR_ALLOC_STRICT;
4397 4397 } else {
4398 4398 flag = DDI_INTR_ALLOC_NORMAL;
4399 4399 }
4400 4400
4401 4401 /* Allocate an array of interrupt handles */
4402 4402 intr_size = count * sizeof (ddi_intr_handle_t);
4403 4403 bgep->htable = kmem_alloc(intr_size, KM_SLEEP);
4404 4404
4405 4405 /* Call ddi_intr_alloc() */
4406 4406 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0,
4407 4407 count, &actual, flag);
4408 4408
4409 4409 if ((ret != DDI_SUCCESS) || (actual == 0)) {
4410 4410 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret);
4411 4411
4412 4412 kmem_free(bgep->htable, intr_size);
4413 4413 return (DDI_FAILURE);
4414 4414 }
4415 4415
4416 4416 if (actual < count) {
4417 4417 BGE_DEBUG(("%s: Requested: %d, Received: %d",
4418 4418 bgep->ifname, count, actual));
4419 4419 }
4420 4420
4421 4421 bgep->intr_cnt = actual;
4422 4422
4423 4423 /*
4424 4424 * Get priority for first msi, assume remaining are all the same
4425 4425 */
4426 4426 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) !=
4427 4427 DDI_SUCCESS) {
4428 4428 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret);
4429 4429
4430 4430 /* Free already allocated intr */
4431 4431 for (i = 0; i < actual; i++) {
4432 4432 (void) ddi_intr_free(bgep->htable[i]);
4433 4433 }
4434 4434
4435 4435 kmem_free(bgep->htable, intr_size);
4436 4436 return (DDI_FAILURE);
4437 4437 }
4438 4438
4439 4439 /* Call ddi_intr_add_handler() */
4440 4440 for (i = 0; i < actual; i++) {
4441 4441 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr,
4442 4442 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
4443 4443 bge_error(bgep, "ddi_intr_add_handler() "
4444 4444 "failed %d\n", ret);
4445 4445
4446 4446 /* Free already allocated intr */
4447 4447 for (i = 0; i < actual; i++) {
4448 4448 (void) ddi_intr_free(bgep->htable[i]);
4449 4449 }
4450 4450
4451 4451 kmem_free(bgep->htable, intr_size);
4452 4452 return (DDI_FAILURE);
4453 4453 }
4454 4454 }
4455 4455
4456 4456 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap))
4457 4457 != DDI_SUCCESS) {
4458 4458 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret);
4459 4459
4460 4460 for (i = 0; i < actual; i++) {
4461 4461 (void) ddi_intr_remove_handler(bgep->htable[i]);
4462 4462 (void) ddi_intr_free(bgep->htable[i]);
4463 4463 }
4464 4464
4465 4465 kmem_free(bgep->htable, intr_size);
4466 4466 return (DDI_FAILURE);
4467 4467 }
4468 4468
4469 4469 return (DDI_SUCCESS);
4470 4470 }
4471 4471
4472 4472 /*
4473 4473 * bge_rem_intrs:
4474 4474 *
4475 4475 * Unregister FIXED or MSI interrupts
4476 4476 */
4477 4477 static void
4478 4478 bge_rem_intrs(bge_t *bgep)
4479 4479 {
4480 4480 int i;
4481 4481
4482 4482 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep));
4483 4483
4484 4484 /* Call ddi_intr_remove_handler() */
4485 4485 for (i = 0; i < bgep->intr_cnt; i++) {
4486 4486 (void) ddi_intr_remove_handler(bgep->htable[i]);
4487 4487 (void) ddi_intr_free(bgep->htable[i]);
4488 4488 }
4489 4489
4490 4490 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t));
4491 4491 }
4492 4492
4493 4493
4494 4494 void
4495 4495 bge_intr_enable(bge_t *bgep)
4496 4496 {
4497 4497 int i;
4498 4498
4499 4499 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
4500 4500 /* Call ddi_intr_block_enable() for MSI interrupts */
4501 4501 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt);
4502 4502 } else {
4503 4503 /* Call ddi_intr_enable for MSI or FIXED interrupts */
4504 4504 for (i = 0; i < bgep->intr_cnt; i++) {
4505 4505 (void) ddi_intr_enable(bgep->htable[i]);
4506 4506 }
4507 4507 }
4508 4508 }
4509 4509
4510 4510
4511 4511 void
4512 4512 bge_intr_disable(bge_t *bgep)
4513 4513 {
4514 4514 int i;
4515 4515
4516 4516 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
4517 4517 /* Call ddi_intr_block_disable() */
4518 4518 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt);
4519 4519 } else {
4520 4520 for (i = 0; i < bgep->intr_cnt; i++) {
4521 4521 (void) ddi_intr_disable(bgep->htable[i]);
4522 4522 }
4523 4523 }
4524 4524 }
4525 4525
4526 4526 int
4527 4527 bge_reprogram(bge_t *bgep)
4528 4528 {
4529 4529 int status = 0;
4530 4530
4531 4531 ASSERT(mutex_owned(bgep->genlock));
4532 4532
4533 4533 if (bge_phys_update(bgep) != DDI_SUCCESS) {
4534 4534 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4535 4535 status = IOC_INVAL;
4536 4536 }
4537 4537 #ifdef BGE_IPMI_ASF
4538 4538 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
4539 4539 #else
4540 4540 if (bge_chip_sync(bgep) == DDI_FAILURE) {
4541 4541 #endif
4542 4542 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4543 4543 status = IOC_INVAL;
4544 4544 }
4545 4545 if (bgep->intr_type == DDI_INTR_TYPE_MSI)
4546 4546 bge_chip_msi_trig(bgep);
4547 4547 return (status);
4548 4548 }
↓ open down ↓ |
223 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX