Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/bfe/bfe.c
+++ new/usr/src/uts/common/io/bfe/bfe.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 #include <sys/stream.h>
26 26 #include <sys/strsun.h>
27 27 #include <sys/stat.h>
28 28 #include <sys/pci.h>
29 29 #include <sys/modctl.h>
30 30 #include <sys/kstat.h>
31 31 #include <sys/ethernet.h>
32 32 #include <sys/devops.h>
33 33 #include <sys/debug.h>
34 34 #include <sys/conf.h>
35 35 #include <sys/sysmacros.h>
36 36 #include <sys/dditypes.h>
37 37 #include <sys/ddi.h>
38 38 #include <sys/sunddi.h>
39 39 #include <sys/miiregs.h>
40 40 #include <sys/byteorder.h>
41 41 #include <sys/cyclic.h>
42 42 #include <sys/note.h>
43 43 #include <sys/crc32.h>
44 44 #include <sys/mac_provider.h>
45 45 #include <sys/mac_ether.h>
46 46 #include <sys/vlan.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/sdt.h>
49 49 #include <sys/strsubr.h>
50 50
51 51 #include "bfe.h"
52 52 #include "bfe_hw.h"
53 53
54 54
55 55 /*
56 56 * Broadcom BCM4401 chipsets use two rings :
57 57 *
58 58 * - One TX : For sending packets down the wire.
59 59 * - One RX : For receving packets.
60 60 *
61 61 * Each ring can have any number of descriptors (configured during attach).
62 62 * As of now we configure only 128 descriptor per ring (TX/RX). Each descriptor
63 63 * has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for
64 64 * the packet and control information (like start/end of frame or end of table).
65 65 * The descriptor table is allocated first and then a DMA buffer (for a packet)
66 66 * is allocated and linked to each descriptor.
67 67 *
68 68 * Each descriptor entry is bfe_desc_t structure in bfe. During TX/RX
69 69 * interrupt, the stat register will point to current descriptor being
70 70 * processed.
71 71 *
72 72 * Here's an example of TX and RX ring :
73 73 *
74 74 * TX:
75 75 *
76 76 * Base of the descriptor table is programmed using BFE_DMATX_CTRL control
77 77 * register. Each 'addr' points to DMA buffer (or packet data buffer) to
78 78 * be transmitted and 'ctl' has the length of the packet (usually MTU).
79 79 *
80 80 * ----------------------|
81 81 * | addr |Descriptor 0 |
82 82 * | ctl | |
83 83 * ----------------------|
84 84 * | addr |Descriptor 1 | SOF (start of the frame)
85 85 * | ctl | |
86 86 * ----------------------|
87 87 * | ... |Descriptor... | EOF (end of the frame)
88 88 * | ... | |
89 89 * ----------------------|
90 90 * | addr |Descritor 127 |
91 91 * | ctl | EOT | EOT (End of Table)
92 92 * ----------------------|
93 93 *
94 94 * 'r_curr_desc' : pointer to current descriptor which can be used to transmit
95 95 * a packet.
96 96 * 'r_avail_desc' : decremented whenever a packet is being sent.
97 97 * 'r_cons_desc' : incremented whenever a packet is sent down the wire and
98 98 * notified by an interrupt to bfe driver.
99 99 *
100 100 * RX:
101 101 *
102 102 * Base of the descriptor table is programmed using BFE_DMARX_CTRL control
103 103 * register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl'
104 104 * contains the size of the DMA buffer and all the DMA buffers are
105 105 * pre-allocated during attach and hence the maxmium size of the packet is
106 106 * also known (r_buf_len from the bfe_rint_t structure). During RX interrupt
107 107 * the packet length is embedded in bfe_header_t which is added by the
108 108 * chip in the beginning of the packet.
109 109 *
110 110 * ----------------------|
111 111 * | addr |Descriptor 0 |
112 112 * | ctl | |
113 113 * ----------------------|
114 114 * | addr |Descriptor 1 |
115 115 * | ctl | |
116 116 * ----------------------|
117 117 * | ... |Descriptor... |
118 118 * | ... | |
119 119 * ----------------------|
120 120 * | addr |Descriptor 127|
121 121 * | ctl | EOT | EOT (End of Table)
122 122 * ----------------------|
123 123 *
124 124 * 'r_curr_desc' : pointer to current descriptor while receving a packet.
125 125 *
126 126 */
127 127
128 128 #define MODULE_NAME "bfe"
129 129
130 130 /*
131 131 * Used for checking PHY (link state, speed)
132 132 */
133 133 #define BFE_TIMEOUT_INTERVAL (1000 * 1000 * 1000)
134 134
135 135
136 136 /*
137 137 * Chip restart action and reason for restart
138 138 */
139 139 #define BFE_ACTION_RESTART 0x1 /* For restarting the chip */
140 140 #define BFE_ACTION_RESTART_SETPROP 0x2 /* restart due to setprop */
141 141 #define BFE_ACTION_RESTART_FAULT 0x4 /* restart due to fault */
142 142 #define BFE_ACTION_RESTART_PKT 0x8 /* restart due to pkt timeout */
143 143
144 144 static char bfe_ident[] = "bfe driver for Broadcom BCM4401 chipsets";
145 145
146 146 /*
147 147 * Function Prototypes for bfe driver.
148 148 */
149 149 static int bfe_check_link(bfe_t *);
150 150 static void bfe_report_link(bfe_t *);
151 151 static void bfe_chip_halt(bfe_t *);
152 152 static void bfe_chip_reset(bfe_t *);
153 153 static void bfe_tx_desc_init(bfe_ring_t *);
154 154 static void bfe_rx_desc_init(bfe_ring_t *);
155 155 static void bfe_set_rx_mode(bfe_t *);
156 156 static void bfe_enable_chip_intrs(bfe_t *);
157 157 static void bfe_chip_restart(bfe_t *);
158 158 static void bfe_init_vars(bfe_t *);
159 159 static void bfe_clear_stats(bfe_t *);
160 160 static void bfe_gather_stats(bfe_t *);
161 161 static void bfe_error(dev_info_t *, char *, ...);
162 162 static int bfe_mac_getprop(void *, const char *, mac_prop_id_t, uint_t,
163 163 void *);
164 164 static int bfe_mac_setprop(void *, const char *, mac_prop_id_t, uint_t,
165 165 const void *);
166 166 static int bfe_tx_reclaim(bfe_ring_t *);
167 167 int bfe_mac_set_ether_addr(void *, const uint8_t *);
168 168
169 169
170 170 /*
171 171 * Macros for ddi_dma_sync().
172 172 */
173 173 #define SYNC_DESC(r, s, l, d) \
174 174 (void) ddi_dma_sync(r->r_desc_dma_handle, \
175 175 (off_t)(s * sizeof (bfe_desc_t)), \
176 176 (size_t)(l * sizeof (bfe_desc_t)), \
177 177 d)
178 178
179 179 #define SYNC_BUF(r, s, b, l, d) \
180 180 (void) ddi_dma_sync(r->r_buf_dma[s].handle, \
181 181 (off_t)(b), (size_t)(l), d)
182 182
183 183 /*
184 184 * Supported Broadcom BCM4401 Cards.
185 185 */
186 186 static bfe_cards_t bfe_cards[] = {
187 187 { 0x14e4, 0x170c, "BCM4401 100Base-TX"},
188 188 };
189 189
190 190
191 191 /*
192 192 * DMA attributes for device registers, packet data (buffer) and
193 193 * descriptor table.
194 194 */
195 195 static struct ddi_device_acc_attr bfe_dev_attr = {
196 196 DDI_DEVICE_ATTR_V0,
197 197 DDI_STRUCTURE_LE_ACC,
198 198 DDI_STRICTORDER_ACC
199 199 };
200 200
201 201 static struct ddi_device_acc_attr bfe_buf_attr = {
202 202 DDI_DEVICE_ATTR_V0,
203 203 DDI_NEVERSWAP_ACC, /* native endianness */
204 204 DDI_STRICTORDER_ACC
205 205 };
206 206
207 207 static ddi_dma_attr_t bfe_dma_attr_buf = {
208 208 DMA_ATTR_V0, /* dma_attr_version */
209 209 0, /* dma_attr_addr_lo */
210 210 BFE_PCI_DMA - 1, /* dma_attr_addr_hi */
211 211 0x1fff, /* dma_attr_count_max */
212 212 8, /* dma_attr_align */
213 213 0, /* dma_attr_burstsizes */
214 214 1, /* dma_attr_minxfer */
215 215 0x1fff, /* dma_attr_maxxfer */
216 216 BFE_PCI_DMA - 1, /* dma_attr_seg */
217 217 1, /* dma_attr_sgllen */
218 218 1, /* dma_attr_granular */
219 219 0 /* dma_attr_flags */
220 220 };
221 221
222 222 static ddi_dma_attr_t bfe_dma_attr_desc = {
223 223 DMA_ATTR_V0, /* dma_attr_version */
224 224 0, /* dma_attr_addr_lo */
225 225 BFE_PCI_DMA - 1, /* dma_attr_addr_hi */
226 226 BFE_PCI_DMA - 1, /* dma_attr_count_max */
227 227 BFE_DESC_ALIGN, /* dma_attr_align */
228 228 0, /* dma_attr_burstsizes */
229 229 1, /* dma_attr_minxfer */
230 230 BFE_PCI_DMA - 1, /* dma_attr_maxxfer */
231 231 BFE_PCI_DMA - 1, /* dma_attr_seg */
232 232 1, /* dma_attr_sgllen */
233 233 1, /* dma_attr_granular */
234 234 0 /* dma_attr_flags */
235 235 };
236 236
237 237 /*
238 238 * Ethernet broadcast addresses.
239 239 */
240 240 static uchar_t bfe_broadcast[ETHERADDRL] = {
241 241 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
242 242 };
243 243
244 244 #define ASSERT_ALL_LOCKS(bfe) { \
245 245 ASSERT(mutex_owned(&bfe->bfe_tx_ring.r_lock)); \
246 246 ASSERT(rw_write_held(&bfe->bfe_rwlock)); \
247 247 }
248 248
249 249 /*
250 250 * Debugging and error reproting code.
251 251 */
252 252 static void
253 253 bfe_error(dev_info_t *dip, char *fmt, ...)
254 254 {
255 255 va_list ap;
256 256 char buf[256];
257 257
258 258 va_start(ap, fmt);
259 259 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
260 260 va_end(ap);
261 261
262 262 if (dip) {
263 263 cmn_err(CE_WARN, "%s%d: %s",
264 264 ddi_driver_name(dip), ddi_get_instance(dip), buf);
265 265 } else {
266 266 cmn_err(CE_WARN, "bfe: %s", buf);
267 267 }
268 268 }
269 269
270 270 /*
271 271 * Grabs all necessary locks to block any other operation on the chip.
272 272 */
273 273 static void
274 274 bfe_grab_locks(bfe_t *bfe)
275 275 {
276 276 bfe_ring_t *tx = &bfe->bfe_tx_ring;
277 277
278 278 /*
279 279 * Grab all the locks.
280 280 * - bfe_rwlock : locks down whole chip including RX.
281 281 * - tx's r_lock : locks down only TX side.
282 282 */
283 283 rw_enter(&bfe->bfe_rwlock, RW_WRITER);
284 284 mutex_enter(&tx->r_lock);
285 285
286 286 /*
287 287 * Note that we don't use RX's r_lock.
288 288 */
289 289 }
290 290
291 291 /*
292 292 * Release lock on chip/drver.
293 293 */
294 294 static void
295 295 bfe_release_locks(bfe_t *bfe)
296 296 {
297 297 bfe_ring_t *tx = &bfe->bfe_tx_ring;
298 298
299 299 /*
300 300 * Release all the locks in the order in which they were grabbed.
301 301 */
302 302 mutex_exit(&tx->r_lock);
303 303 rw_exit(&bfe->bfe_rwlock);
304 304 }
305 305
306 306
307 307 /*
308 308 * It's used to make sure that the write to device register was successful.
309 309 */
310 310 static int
311 311 bfe_wait_bit(bfe_t *bfe, uint32_t reg, uint32_t bit,
312 312 ulong_t t, const int clear)
313 313 {
314 314 ulong_t i;
315 315 uint32_t v;
316 316
317 317 for (i = 0; i < t; i++) {
318 318 v = INL(bfe, reg);
319 319
320 320 if (clear && !(v & bit))
321 321 break;
322 322
323 323 if (!clear && (v & bit))
324 324 break;
325 325
326 326 drv_usecwait(10);
327 327 }
328 328
329 329 /* if device still didn't see the value */
330 330 if (i == t)
331 331 return (-1);
332 332
333 333 return (0);
334 334 }
335 335
336 336 /*
337 337 * PHY functions (read, write, stop, reset and startup)
338 338 */
339 339 static int
340 340 bfe_read_phy(bfe_t *bfe, uint32_t reg)
341 341 {
342 342 OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
343 343 OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
344 344 (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
345 345 (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
346 346 (reg << BFE_MDIO_RA_SHIFT) |
347 347 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
348 348
349 349 (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
350 350
351 351 return ((INL(bfe, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA));
352 352 }
353 353
354 354 static void
355 355 bfe_write_phy(bfe_t *bfe, uint32_t reg, uint32_t val)
356 356 {
357 357 OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
358 358 OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
359 359 (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
360 360 (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
361 361 (reg << BFE_MDIO_RA_SHIFT) |
362 362 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
363 363 (val & BFE_MDIO_DATA_DATA)));
364 364
365 365 (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
366 366 }
367 367
368 368 /*
369 369 * It resets the PHY layer.
370 370 */
371 371 static int
372 372 bfe_reset_phy(bfe_t *bfe)
373 373 {
374 374 uint32_t i;
375 375
376 376 bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_RESET);
377 377 drv_usecwait(100);
378 378 for (i = 0; i < 10; i++) {
379 379 if (bfe_read_phy(bfe, MII_CONTROL) &
380 380 MII_CONTROL_RESET) {
381 381 drv_usecwait(500);
382 382 continue;
383 383 }
384 384
385 385 break;
386 386 }
387 387
388 388 if (i == 10) {
389 389 bfe_error(bfe->bfe_dip, "Timeout waiting for PHY to reset");
390 390 bfe->bfe_phy_state = BFE_PHY_RESET_TIMEOUT;
391 391 return (BFE_FAILURE);
392 392 }
393 393
394 394 bfe->bfe_phy_state = BFE_PHY_RESET_DONE;
395 395
396 396 return (BFE_SUCCESS);
397 397 }
398 398
399 399 /*
400 400 * Make sure timer function is out of our way and especially during
401 401 * detach.
402 402 */
403 403 static void
404 404 bfe_stop_timer(bfe_t *bfe)
405 405 {
406 406 if (bfe->bfe_periodic_id) {
407 407 ddi_periodic_delete(bfe->bfe_periodic_id);
408 408 bfe->bfe_periodic_id = NULL;
409 409 }
410 410 }
411 411
412 412 /*
413 413 * Stops the PHY
414 414 */
415 415 static void
416 416 bfe_stop_phy(bfe_t *bfe)
417 417 {
418 418 bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_PWRDN |
419 419 MII_CONTROL_ISOLATE);
420 420
421 421 bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
422 422 bfe->bfe_chip.speed = 0;
423 423 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
424 424
425 425 bfe->bfe_phy_state = BFE_PHY_STOPPED;
426 426
427 427 /*
428 428 * Report the link status to MAC layer.
429 429 */
430 430 if (bfe->bfe_machdl != NULL)
431 431 (void) bfe_report_link(bfe);
432 432 }
433 433
434 434 static int
435 435 bfe_probe_phy(bfe_t *bfe)
436 436 {
437 437 int phy;
438 438 uint32_t status;
439 439
440 440 if (bfe->bfe_phy_addr) {
441 441 status = bfe_read_phy(bfe, MII_STATUS);
442 442 if (status != 0xffff && status != 0) {
443 443 bfe_write_phy(bfe, MII_CONTROL, 0);
444 444 return (BFE_SUCCESS);
445 445 }
446 446 }
447 447
448 448 for (phy = 0; phy < 32; phy++) {
449 449 bfe->bfe_phy_addr = phy;
450 450 status = bfe_read_phy(bfe, MII_STATUS);
451 451 if (status != 0xffff && status != 0) {
452 452 bfe_write_phy(bfe, MII_CONTROL, 0);
453 453 return (BFE_SUCCESS);
454 454 }
455 455 }
456 456
457 457 return (BFE_FAILURE);
458 458 }
459 459
460 460 /*
461 461 * This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link
462 462 * status.
463 463 */
464 464 static void
465 465 bfe_timeout(void *arg)
466 466 {
467 467 bfe_t *bfe = (bfe_t *)arg;
468 468 int resched = 0;
469 469
470 470 /*
471 471 * We don't grab any lock because bfe can't go away.
472 472 * untimeout() will wait for this timeout instance to complete.
473 473 */
474 474 if (bfe->bfe_chip_action & BFE_ACTION_RESTART) {
475 475 /*
476 476 * Restart the chip.
477 477 */
478 478 bfe_grab_locks(bfe);
479 479 bfe_chip_restart(bfe);
480 480 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART;
481 481 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_FAULT;
482 482 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_PKT;
483 483 bfe_release_locks(bfe);
484 484 mac_tx_update(bfe->bfe_machdl);
485 485 /* Restart will register a new timeout */
486 486 return;
487 487 }
488 488
489 489 rw_enter(&bfe->bfe_rwlock, RW_READER);
490 490
491 491 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
492 492 hrtime_t hr;
493 493
494 494 hr = gethrtime();
495 495 if (bfe->bfe_tx_stall_time != 0 &&
496 496 hr > bfe->bfe_tx_stall_time) {
497 497 DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
498 498 char *, "pkt timeout");
499 499 bfe->bfe_chip_action |=
500 500 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_PKT);
501 501 bfe->bfe_tx_stall_time = 0;
502 502 }
503 503 }
504 504
505 505 if (bfe->bfe_phy_state == BFE_PHY_STARTED) {
506 506 /*
507 507 * Report the link status to MAC layer if link status changed.
508 508 */
509 509 if (bfe_check_link(bfe)) {
510 510 bfe_report_link(bfe);
511 511 if (bfe->bfe_chip.link == LINK_STATE_UP) {
512 512 uint32_t val, flow;
513 513
514 514 val = INL(bfe, BFE_TX_CTRL);
515 515 val &= ~BFE_TX_DUPLEX;
516 516 if (bfe->bfe_chip.duplex == LINK_DUPLEX_FULL) {
517 517 val |= BFE_TX_DUPLEX;
518 518 flow = INL(bfe, BFE_RXCONF);
519 519 flow &= ~BFE_RXCONF_FLOW;
520 520 OUTL(bfe, BFE_RXCONF, flow);
521 521
522 522 flow = INL(bfe, BFE_MAC_FLOW);
523 523 flow &= ~(BFE_FLOW_RX_HIWAT);
524 524 OUTL(bfe, BFE_MAC_FLOW, flow);
525 525 }
526 526
527 527 resched = 1;
528 528
529 529 OUTL(bfe, BFE_TX_CTRL, val);
530 530 DTRACE_PROBE1(link__up,
531 531 int, bfe->bfe_unit);
532 532 }
533 533 }
534 534 }
535 535
536 536 rw_exit(&bfe->bfe_rwlock);
537 537
538 538 if (resched)
539 539 mac_tx_update(bfe->bfe_machdl);
540 540 }
541 541
542 542 /*
543 543 * Starts PHY layer.
544 544 */
545 545 static int
546 546 bfe_startup_phy(bfe_t *bfe)
547 547 {
548 548 uint16_t bmsr, bmcr, anar;
549 549 int prog, s;
550 550 int phyid1, phyid2;
551 551
552 552 if (bfe_probe_phy(bfe) == BFE_FAILURE) {
553 553 bfe->bfe_phy_state = BFE_PHY_NOTFOUND;
554 554 return (BFE_FAILURE);
555 555 }
556 556
557 557 (void) bfe_reset_phy(bfe);
558 558
559 559 phyid1 = bfe_read_phy(bfe, MII_PHYIDH);
560 560 phyid2 = bfe_read_phy(bfe, MII_PHYIDL);
561 561 bfe->bfe_phy_id = (phyid1 << 16) | phyid2;
562 562
563 563 bmsr = bfe_read_phy(bfe, MII_STATUS);
564 564 anar = bfe_read_phy(bfe, MII_AN_ADVERT);
565 565
566 566 again:
567 567 anar &= ~(MII_ABILITY_100BASE_T4 |
568 568 MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX |
569 569 MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T);
570 570
571 571 /*
572 572 * Supported hardware modes are in bmsr.
573 573 */
574 574 bfe->bfe_chip.bmsr = bmsr;
575 575
576 576 /*
577 577 * Assume no capabilities are supported in the hardware.
578 578 */
579 579 bfe->bfe_cap_aneg = bfe->bfe_cap_100T4 =
580 580 bfe->bfe_cap_100fdx = bfe->bfe_cap_100hdx =
581 581 bfe->bfe_cap_10fdx = bfe->bfe_cap_10hdx = 0;
582 582
583 583 /*
584 584 * Assume property is set.
585 585 */
586 586 s = 1;
587 587 if (!(bfe->bfe_chip_action & BFE_ACTION_RESTART_SETPROP)) {
588 588 /*
589 589 * Property is not set which means bfe_mac_setprop()
590 590 * is not called on us.
591 591 */
592 592 s = 0;
593 593 }
594 594
595 595 bmcr = prog = 0;
596 596
597 597 if (bmsr & MII_STATUS_100_BASEX_FD) {
598 598 bfe->bfe_cap_100fdx = 1;
599 599 if (s == 0) {
600 600 anar |= MII_ABILITY_100BASE_TX_FD;
601 601 bfe->bfe_adv_100fdx = 1;
602 602 prog++;
603 603 } else if (bfe->bfe_adv_100fdx) {
604 604 anar |= MII_ABILITY_100BASE_TX_FD;
605 605 prog++;
606 606 }
607 607 }
608 608
609 609 if (bmsr & MII_STATUS_100_BASE_T4) {
610 610 bfe->bfe_cap_100T4 = 1;
611 611 if (s == 0) {
612 612 anar |= MII_ABILITY_100BASE_T4;
613 613 bfe->bfe_adv_100T4 = 1;
614 614 prog++;
615 615 } else if (bfe->bfe_adv_100T4) {
616 616 anar |= MII_ABILITY_100BASE_T4;
617 617 prog++;
618 618 }
619 619 }
620 620
621 621 if (bmsr & MII_STATUS_100_BASEX) {
622 622 bfe->bfe_cap_100hdx = 1;
623 623 if (s == 0) {
624 624 anar |= MII_ABILITY_100BASE_TX;
625 625 bfe->bfe_adv_100hdx = 1;
626 626 prog++;
627 627 } else if (bfe->bfe_adv_100hdx) {
628 628 anar |= MII_ABILITY_100BASE_TX;
629 629 prog++;
630 630 }
631 631 }
632 632
633 633 if (bmsr & MII_STATUS_10_FD) {
634 634 bfe->bfe_cap_10fdx = 1;
635 635 if (s == 0) {
636 636 anar |= MII_ABILITY_10BASE_T_FD;
637 637 bfe->bfe_adv_10fdx = 1;
638 638 prog++;
639 639 } else if (bfe->bfe_adv_10fdx) {
640 640 anar |= MII_ABILITY_10BASE_T_FD;
641 641 prog++;
642 642 }
643 643 }
644 644
645 645 if (bmsr & MII_STATUS_10) {
646 646 bfe->bfe_cap_10hdx = 1;
647 647 if (s == 0) {
648 648 anar |= MII_ABILITY_10BASE_T;
649 649 bfe->bfe_adv_10hdx = 1;
650 650 prog++;
651 651 } else if (bfe->bfe_adv_10hdx) {
652 652 anar |= MII_ABILITY_10BASE_T;
653 653 prog++;
654 654 }
655 655 }
656 656
657 657 if (bmsr & MII_STATUS_CANAUTONEG) {
658 658 bfe->bfe_cap_aneg = 1;
659 659 if (s == 0) {
660 660 bfe->bfe_adv_aneg = 1;
661 661 }
662 662 }
663 663
664 664 if (prog == 0) {
665 665 if (s == 0) {
666 666 bfe_error(bfe->bfe_dip,
667 667 "No valid link mode selected. Powering down PHY");
668 668 bfe_stop_phy(bfe);
669 669 bfe_report_link(bfe);
670 670 return (BFE_FAILURE);
671 671 }
672 672
673 673 /*
674 674 * If property is set then user would have goofed up. So we
675 675 * go back to default properties.
676 676 */
677 677 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_SETPROP;
678 678 goto again;
679 679 }
680 680
681 681 if (bfe->bfe_adv_aneg && (bmsr & MII_STATUS_CANAUTONEG)) {
682 682 bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN);
683 683 } else {
684 684 if (bfe->bfe_adv_100fdx)
685 685 bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
686 686 else if (bfe->bfe_adv_100hdx)
687 687 bmcr = MII_CONTROL_100MB;
688 688 else if (bfe->bfe_adv_10fdx)
689 689 bmcr = MII_CONTROL_FDUPLEX;
690 690 else
691 691 bmcr = 0; /* 10HDX */
692 692 }
693 693
694 694 if (prog)
695 695 bfe_write_phy(bfe, MII_AN_ADVERT, anar);
696 696
697 697 if (bmcr)
698 698 bfe_write_phy(bfe, MII_CONTROL, bmcr);
699 699
700 700 bfe->bfe_mii_anar = anar;
701 701 bfe->bfe_mii_bmcr = bmcr;
702 702 bfe->bfe_phy_state = BFE_PHY_STARTED;
703 703
704 704 if (bfe->bfe_periodic_id == NULL) {
705 705 bfe->bfe_periodic_id = ddi_periodic_add(bfe_timeout,
706 706 (void *)bfe, BFE_TIMEOUT_INTERVAL, DDI_IPL_0);
707 707
708 708 DTRACE_PROBE1(first__timeout, int, bfe->bfe_unit);
709 709 }
710 710
711 711 DTRACE_PROBE4(phy_started, int, bfe->bfe_unit,
712 712 int, bmsr, int, bmcr, int, anar);
713 713
714 714 return (BFE_SUCCESS);
715 715 }
716 716
717 717 /*
718 718 * Reports link status back to MAC Layer.
719 719 */
720 720 static void
721 721 bfe_report_link(bfe_t *bfe)
722 722 {
723 723 mac_link_update(bfe->bfe_machdl, bfe->bfe_chip.link);
724 724 }
725 725
726 726 /*
727 727 * Reads PHY/MII registers and get the link status for us.
728 728 */
729 729 static int
730 730 bfe_check_link(bfe_t *bfe)
731 731 {
732 732 uint16_t bmsr, bmcr, anar, anlpar;
733 733 int speed, duplex, link;
734 734
735 735 speed = bfe->bfe_chip.speed;
736 736 duplex = bfe->bfe_chip.duplex;
737 737 link = bfe->bfe_chip.link;
738 738
739 739 bmsr = bfe_read_phy(bfe, MII_STATUS);
740 740 bfe->bfe_mii_bmsr = bmsr;
741 741
742 742 bmcr = bfe_read_phy(bfe, MII_CONTROL);
743 743
744 744 anar = bfe_read_phy(bfe, MII_AN_ADVERT);
745 745 bfe->bfe_mii_anar = anar;
746 746
747 747 anlpar = bfe_read_phy(bfe, MII_AN_LPABLE);
748 748 bfe->bfe_mii_anlpar = anlpar;
749 749
750 750 bfe->bfe_mii_exp = bfe_read_phy(bfe, MII_AN_EXPANSION);
751 751
752 752 /*
753 753 * If exp register is not present in PHY.
754 754 */
755 755 if (bfe->bfe_mii_exp == 0xffff) {
756 756 bfe->bfe_mii_exp = 0;
757 757 }
758 758
759 759 if ((bmsr & MII_STATUS_LINKUP) == 0) {
760 760 bfe->bfe_chip.link = LINK_STATE_DOWN;
761 761 bfe->bfe_chip.speed = 0;
762 762 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
763 763 goto done;
764 764 }
765 765
766 766 bfe->bfe_chip.link = LINK_STATE_UP;
767 767
768 768 if (!(bmcr & MII_CONTROL_ANE)) {
769 769 /* Forced mode */
770 770 if (bmcr & MII_CONTROL_100MB)
771 771 bfe->bfe_chip.speed = 100000000;
772 772 else
773 773 bfe->bfe_chip.speed = 10000000;
774 774
775 775 if (bmcr & MII_CONTROL_FDUPLEX)
776 776 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
777 777 else
778 778 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
779 779
780 780 } else if ((!(bmsr & MII_STATUS_CANAUTONEG)) ||
781 781 (!(bmsr & MII_STATUS_ANDONE))) {
782 782 bfe->bfe_chip.speed = 0;
783 783 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
784 784 } else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) {
785 785 bfe->bfe_chip.speed = 100000000;
786 786 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
787 787 } else if (anar & anlpar & MII_ABILITY_100BASE_T4) {
788 788 bfe->bfe_chip.speed = 100000000;
789 789 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
790 790 } else if (anar & anlpar & MII_ABILITY_100BASE_TX) {
791 791 bfe->bfe_chip.speed = 100000000;
792 792 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
793 793 } else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) {
794 794 bfe->bfe_chip.speed = 10000000;
795 795 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
796 796 } else if (anar & anlpar & MII_ABILITY_10BASE_T) {
797 797 bfe->bfe_chip.speed = 10000000;
798 798 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
799 799 } else {
800 800 bfe->bfe_chip.speed = 0;
801 801 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
802 802 }
803 803
804 804 done:
805 805 /*
806 806 * If speed or link status or duplex mode changed then report to
807 807 * MAC layer which is done by the caller.
808 808 */
809 809 if (speed != bfe->bfe_chip.speed ||
810 810 duplex != bfe->bfe_chip.duplex ||
811 811 link != bfe->bfe_chip.link) {
812 812 return (1);
813 813 }
814 814
815 815 return (0);
816 816 }
817 817
818 818 static void
819 819 bfe_cam_write(bfe_t *bfe, uchar_t *d, int index)
820 820 {
821 821 uint32_t v;
822 822
823 823 v = ((uint32_t)d[2] << 24);
824 824 v |= ((uint32_t)d[3] << 16);
825 825 v |= ((uint32_t)d[4] << 8);
826 826 v |= (uint32_t)d[5];
827 827
828 828 OUTL(bfe, BFE_CAM_DATA_LO, v);
829 829 v = (BFE_CAM_HI_VALID |
830 830 (((uint32_t)d[0]) << 8) |
831 831 (((uint32_t)d[1])));
832 832
833 833 OUTL(bfe, BFE_CAM_DATA_HI, v);
834 834 OUTL(bfe, BFE_CAM_CTRL, (BFE_CAM_WRITE |
835 835 ((uint32_t)index << BFE_CAM_INDEX_SHIFT)));
836 836 (void) bfe_wait_bit(bfe, BFE_CAM_CTRL, BFE_CAM_BUSY, 10, 1);
837 837 }
838 838
839 839 /*
840 840 * Chip related functions (halt, reset, start).
841 841 */
842 842 static void
843 843 bfe_chip_halt(bfe_t *bfe)
844 844 {
845 845 /*
846 846 * Disables interrupts.
847 847 */
848 848 OUTL(bfe, BFE_INTR_MASK, 0);
849 849 FLUSH(bfe, BFE_INTR_MASK);
850 850
851 851 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE);
852 852
853 853 /*
854 854 * Wait until TX and RX finish their job.
855 855 */
856 856 (void) bfe_wait_bit(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE, 20, 1);
857 857
858 858 /*
859 859 * Disables DMA engine.
860 860 */
861 861 OUTL(bfe, BFE_DMARX_CTRL, 0);
862 862 OUTL(bfe, BFE_DMATX_CTRL, 0);
863 863
864 864 drv_usecwait(10);
865 865
866 866 bfe->bfe_chip_state = BFE_CHIP_HALT;
867 867 }
868 868
869 869 static void
870 870 bfe_chip_restart(bfe_t *bfe)
871 871 {
872 872 DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
873 873 int, bfe->bfe_chip_action);
874 874
875 875 /*
876 876 * Halt chip and PHY.
877 877 */
878 878 bfe_chip_halt(bfe);
879 879 bfe_stop_phy(bfe);
880 880 bfe->bfe_chip_state = BFE_CHIP_STOPPED;
881 881
882 882 /*
883 883 * Init variables.
884 884 */
885 885 bfe_init_vars(bfe);
886 886
887 887 /*
888 888 * Reset chip and start PHY.
889 889 */
890 890 bfe_chip_reset(bfe);
891 891
892 892 /*
893 893 * DMA descriptor rings.
894 894 */
895 895 bfe_tx_desc_init(&bfe->bfe_tx_ring);
896 896 bfe_rx_desc_init(&bfe->bfe_rx_ring);
897 897
898 898 bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
899 899 bfe_set_rx_mode(bfe);
900 900 bfe_enable_chip_intrs(bfe);
901 901 }
902 902
903 903 /*
904 904 * Disables core by stopping the clock.
905 905 */
906 906 static void
907 907 bfe_core_disable(bfe_t *bfe)
908 908 {
909 909 if ((INL(bfe, BFE_SBTMSLOW) & BFE_RESET))
910 910 return;
911 911
912 912 OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
913 913 (void) bfe_wait_bit(bfe, BFE_SBTMSLOW, BFE_REJECT, 100, 0);
914 914 (void) bfe_wait_bit(bfe, BFE_SBTMSHIGH, BFE_BUSY, 100, 1);
915 915 OUTL(bfe, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET));
916 916 FLUSH(bfe, BFE_SBTMSLOW);
917 917 drv_usecwait(10);
918 918 OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
919 919 drv_usecwait(10);
920 920 }
921 921
922 922 /*
923 923 * Resets core.
924 924 */
925 925 static void
926 926 bfe_core_reset(bfe_t *bfe)
927 927 {
928 928 uint32_t val;
929 929
930 930 /*
931 931 * First disable the core.
932 932 */
933 933 bfe_core_disable(bfe);
934 934
935 935 OUTL(bfe, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
936 936 FLUSH(bfe, BFE_SBTMSLOW);
937 937 drv_usecwait(1);
938 938
939 939 if (INL(bfe, BFE_SBTMSHIGH) & BFE_SERR)
940 940 OUTL(bfe, BFE_SBTMSHIGH, 0);
941 941
942 942 val = INL(bfe, BFE_SBIMSTATE);
943 943 if (val & (BFE_IBE | BFE_TO))
944 944 OUTL(bfe, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
945 945
946 946 OUTL(bfe, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
947 947 FLUSH(bfe, BFE_SBTMSLOW);
948 948 drv_usecwait(1);
949 949
950 950 OUTL(bfe, BFE_SBTMSLOW, BFE_CLOCK);
951 951 FLUSH(bfe, BFE_SBTMSLOW);
952 952 drv_usecwait(1);
953 953 }
954 954
955 955 static void
956 956 bfe_setup_config(bfe_t *bfe, uint32_t cores)
957 957 {
958 958 uint32_t bar_orig, val;
959 959
960 960 /*
961 961 * Change bar0 window to map sbtopci registers.
962 962 */
963 963 bar_orig = pci_config_get32(bfe->bfe_conf_handle, BFE_BAR0_WIN);
964 964 pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, BFE_REG_PCI);
965 965
966 966 /* Just read it and don't do anything */
967 967 val = INL(bfe, BFE_SBIDHIGH) & BFE_IDH_CORE;
968 968
969 969 val = INL(bfe, BFE_SBINTVEC);
970 970 val |= cores;
971 971 OUTL(bfe, BFE_SBINTVEC, val);
972 972
973 973 val = INL(bfe, BFE_SSB_PCI_TRANS_2);
974 974 val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
975 975 OUTL(bfe, BFE_SSB_PCI_TRANS_2, val);
976 976
977 977 /*
978 978 * Restore bar0 window mapping.
979 979 */
980 980 pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, bar_orig);
981 981 }
982 982
983 983 /*
984 984 * Resets chip and starts PHY.
985 985 */
986 986 static void
987 987 bfe_chip_reset(bfe_t *bfe)
988 988 {
989 989 uint32_t val;
990 990
991 991 /* Set the interrupt vector for the enet core */
992 992 bfe_setup_config(bfe, BFE_INTVEC_ENET0);
993 993
994 994 /* check if core is up */
995 995 val = INL(bfe, BFE_SBTMSLOW) &
996 996 (BFE_RESET | BFE_REJECT | BFE_CLOCK);
997 997
998 998 if (val == BFE_CLOCK) {
999 999 OUTL(bfe, BFE_RCV_LAZY, 0);
1000 1000 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE);
1001 1001 (void) bfe_wait_bit(bfe, BFE_ENET_CTRL,
1002 1002 BFE_ENET_DISABLE, 10, 1);
1003 1003 OUTL(bfe, BFE_DMATX_CTRL, 0);
1004 1004 FLUSH(bfe, BFE_DMARX_STAT);
1005 1005 drv_usecwait(20000); /* 20 milli seconds */
1006 1006 if (INL(bfe, BFE_DMARX_STAT) & BFE_STAT_EMASK) {
1007 1007 (void) bfe_wait_bit(bfe, BFE_DMARX_STAT, BFE_STAT_SIDLE,
1008 1008 10, 0);
1009 1009 }
1010 1010 OUTL(bfe, BFE_DMARX_CTRL, 0);
1011 1011 }
1012 1012
1013 1013 bfe_core_reset(bfe);
1014 1014 bfe_clear_stats(bfe);
1015 1015
1016 1016 OUTL(bfe, BFE_MDIO_CTRL, 0x8d);
1017 1017 val = INL(bfe, BFE_DEVCTRL);
1018 1018 if (!(val & BFE_IPP))
1019 1019 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_EPSEL);
1020 1020 else if (INL(bfe, BFE_DEVCTRL & BFE_EPR)) {
1021 1021 OUTL_AND(bfe, BFE_DEVCTRL, ~BFE_EPR);
1022 1022 drv_usecwait(20000); /* 20 milli seconds */
1023 1023 }
1024 1024
1025 1025 OUTL_OR(bfe, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
1026 1026
1027 1027 OUTL_AND(bfe, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
1028 1028
1029 1029 OUTL(bfe, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
1030 1030 BFE_LAZY_FC_MASK));
1031 1031
1032 1032 OUTL_OR(bfe, BFE_RCV_LAZY, 0);
1033 1033
1034 1034 OUTL(bfe, BFE_RXMAXLEN, bfe->bfe_rx_ring.r_buf_len);
1035 1035 OUTL(bfe, BFE_TXMAXLEN, bfe->bfe_tx_ring.r_buf_len);
1036 1036
1037 1037 OUTL(bfe, BFE_TX_WMARK, 56);
1038 1038
1039 1039 /* Program DMA channels */
1040 1040 OUTL(bfe, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
1041 1041
1042 1042 /*
1043 1043 * DMA addresses need to be added to BFE_PCI_DMA
1044 1044 */
1045 1045 OUTL(bfe, BFE_DMATX_ADDR,
1046 1046 bfe->bfe_tx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1047 1047
1048 1048 OUTL(bfe, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT)
1049 1049 | BFE_RX_CTRL_ENABLE);
1050 1050
1051 1051 OUTL(bfe, BFE_DMARX_ADDR,
1052 1052 bfe->bfe_rx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1053 1053
1054 1054 (void) bfe_startup_phy(bfe);
1055 1055
1056 1056 bfe->bfe_chip_state = BFE_CHIP_INITIALIZED;
1057 1057 }
1058 1058
1059 1059 /*
1060 1060 * It enables interrupts. Should be the last step while starting chip.
1061 1061 */
1062 1062 static void
1063 1063 bfe_enable_chip_intrs(bfe_t *bfe)
1064 1064 {
1065 1065 /* Enable the chip and core */
1066 1066 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1067 1067
1068 1068 /* Enable interrupts */
1069 1069 OUTL(bfe, BFE_INTR_MASK, BFE_IMASK_DEF);
↓ open down ↓ |
1069 lines elided |
↑ open up ↑ |
1070 1070 }
1071 1071
1072 1072 /*
1073 1073 * Common code to take care of setting RX side mode (filter).
1074 1074 */
1075 1075 static void
1076 1076 bfe_set_rx_mode(bfe_t *bfe)
1077 1077 {
1078 1078 uint32_t val;
1079 1079 int i;
1080 - ether_addr_t mac[ETHERADDRL] = {0, 0, 0, 0, 0, 0};
1080 + ether_addr_t mac[ETHERADDRL] = {{0, 0, 0, 0, 0, 0}};
1081 1081
1082 1082 /*
1083 1083 * We don't touch RX filter if we were asked to suspend. It's fine
1084 1084 * if chip is not active (no interface is plumbed on us).
1085 1085 */
1086 1086 if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED)
1087 1087 return;
1088 1088
1089 1089 val = INL(bfe, BFE_RXCONF);
1090 1090
1091 1091 val &= ~BFE_RXCONF_PROMISC;
1092 1092 val &= ~BFE_RXCONF_DBCAST;
1093 1093
1094 1094 if ((bfe->bfe_chip_mode & BFE_RX_MODE_ENABLE) == 0) {
1095 1095 OUTL(bfe, BFE_CAM_CTRL, 0);
1096 1096 FLUSH(bfe, BFE_CAM_CTRL);
1097 1097 } else if (bfe->bfe_chip_mode & BFE_RX_MODE_PROMISC) {
1098 1098 val |= BFE_RXCONF_PROMISC;
1099 1099 val &= ~BFE_RXCONF_DBCAST;
1100 1100 } else {
1101 1101 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1102 1102 /* Flush everything */
1103 1103 OUTL(bfe, BFE_RXCONF, val |
1104 1104 BFE_RXCONF_PROMISC | BFE_RXCONF_ALLMULTI);
1105 1105 FLUSH(bfe, BFE_RXCONF);
1106 1106 }
1107 1107
1108 1108 /* Disable CAM */
1109 1109 OUTL(bfe, BFE_CAM_CTRL, 0);
1110 1110 FLUSH(bfe, BFE_CAM_CTRL);
1111 1111
1112 1112 /*
1113 1113 * We receive all multicast packets.
1114 1114 */
1115 1115 val |= BFE_RXCONF_ALLMULTI;
1116 1116
1117 1117 for (i = 0; i < BFE_MAX_MULTICAST_TABLE - 1; i++) {
1118 1118 bfe_cam_write(bfe, (uchar_t *)mac, i);
1119 1119 }
1120 1120
1121 1121 bfe_cam_write(bfe, bfe->bfe_ether_addr, i);
1122 1122
1123 1123 /* Enable CAM */
1124 1124 OUTL_OR(bfe, BFE_CAM_CTRL, BFE_CAM_ENABLE);
1125 1125 FLUSH(bfe, BFE_CAM_CTRL);
1126 1126 }
1127 1127
1128 1128 DTRACE_PROBE2(rx__mode__filter, int, bfe->bfe_unit,
1129 1129 int, val);
1130 1130
1131 1131 OUTL(bfe, BFE_RXCONF, val);
1132 1132 FLUSH(bfe, BFE_RXCONF);
1133 1133 }
1134 1134
1135 1135 /*
1136 1136 * Reset various variable values to initial state.
1137 1137 */
1138 1138 static void
1139 1139 bfe_init_vars(bfe_t *bfe)
1140 1140 {
1141 1141 bfe->bfe_chip_mode = BFE_RX_MODE_ENABLE;
1142 1142
1143 1143 /* Initial assumption */
1144 1144 bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
1145 1145 bfe->bfe_chip.speed = 0;
1146 1146 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
1147 1147
1148 1148 bfe->bfe_periodic_id = NULL;
1149 1149 bfe->bfe_chip_state = BFE_CHIP_UNINITIALIZED;
1150 1150
1151 1151 bfe->bfe_tx_stall_time = 0;
1152 1152 }
1153 1153
1154 1154 /*
1155 1155 * Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry
1156 1156 * has control (desc_ctl) and address (desc_addr) member.
1157 1157 */
1158 1158 static void
1159 1159 bfe_tx_desc_init(bfe_ring_t *r)
1160 1160 {
1161 1161 int i;
1162 1162 uint32_t v;
1163 1163
1164 1164 for (i = 0; i < r->r_ndesc; i++) {
1165 1165 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1166 1166 (r->r_buf_dma[i].len & BFE_DESC_LEN));
1167 1167
1168 1168 /*
1169 1169 * DMA addresses need to be added to BFE_PCI_DMA
1170 1170 */
1171 1171 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1172 1172 (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1173 1173 }
1174 1174
1175 1175 v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1176 1176 PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1177 1177 v | BFE_DESC_EOT);
1178 1178
1179 1179 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1180 1180
1181 1181 r->r_curr_desc = 0;
1182 1182 r->r_avail_desc = TX_NUM_DESC;
1183 1183 r->r_cons_desc = 0;
1184 1184 }
1185 1185
1186 1186 /*
1187 1187 * Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry
1188 1188 * has control (desc_ctl) and address (desc_addr) member.
1189 1189 */
1190 1190 static void
1191 1191 bfe_rx_desc_init(bfe_ring_t *r)
1192 1192 {
1193 1193 int i;
1194 1194 uint32_t v;
1195 1195
1196 1196 for (i = 0; i < r->r_ndesc; i++) {
1197 1197 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1198 1198 (r->r_buf_dma[i].len& BFE_DESC_LEN));
1199 1199
1200 1200 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1201 1201 (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1202 1202
1203 1203 /* Initialize rx header (len, flags) */
1204 1204 bzero(r->r_buf_dma[i].addr, sizeof (bfe_rx_header_t));
1205 1205
1206 1206 (void) SYNC_BUF(r, i, 0, sizeof (bfe_rx_header_t),
1207 1207 DDI_DMA_SYNC_FORDEV);
1208 1208 }
1209 1209
1210 1210 v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1211 1211 PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1212 1212 v | BFE_DESC_EOT);
1213 1213
1214 1214 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1215 1215
1216 1216 /* TAIL of RX Descriptor */
1217 1217 OUTL(r->r_bfe, BFE_DMARX_PTR, ((i) * sizeof (bfe_desc_t)));
1218 1218
1219 1219 r->r_curr_desc = 0;
1220 1220 r->r_avail_desc = RX_NUM_DESC;
1221 1221 }
1222 1222
1223 1223 static int
1224 1224 bfe_chip_start(bfe_t *bfe)
1225 1225 {
1226 1226 ASSERT_ALL_LOCKS(bfe);
1227 1227
1228 1228 /*
1229 1229 * Stop the chip first & then Reset the chip. At last enable interrupts.
1230 1230 */
1231 1231 bfe_chip_halt(bfe);
1232 1232 bfe_stop_phy(bfe);
1233 1233
1234 1234 /*
1235 1235 * Reset chip and start PHY.
1236 1236 */
1237 1237 bfe_chip_reset(bfe);
1238 1238
1239 1239 /*
1240 1240 * Initailize Descriptor Rings.
1241 1241 */
1242 1242 bfe_tx_desc_init(&bfe->bfe_tx_ring);
1243 1243 bfe_rx_desc_init(&bfe->bfe_rx_ring);
1244 1244
1245 1245 bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
1246 1246 bfe->bfe_chip_mode |= BFE_RX_MODE_ENABLE;
1247 1247 bfe_set_rx_mode(bfe);
1248 1248 bfe_enable_chip_intrs(bfe);
1249 1249
1250 1250 /* Check link, speed and duplex mode */
1251 1251 (void) bfe_check_link(bfe);
1252 1252
1253 1253 return (DDI_SUCCESS);
1254 1254 }
1255 1255
1256 1256
1257 1257 /*
1258 1258 * Clear chip statistics.
1259 1259 */
1260 1260 static void
1261 1261 bfe_clear_stats(bfe_t *bfe)
1262 1262 {
1263 1263 ulong_t r;
1264 1264
1265 1265 OUTL(bfe, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
1266 1266
1267 1267 /*
1268 1268 * Stat registers are cleared by reading.
1269 1269 */
1270 1270 for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4)
1271 1271 (void) INL(bfe, r);
1272 1272
1273 1273 for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4)
1274 1274 (void) INL(bfe, r);
1275 1275 }
1276 1276
1277 1277 /*
1278 1278 * Collect chip statistics.
1279 1279 */
1280 1280 static void
1281 1281 bfe_gather_stats(bfe_t *bfe)
1282 1282 {
1283 1283 ulong_t r;
1284 1284 uint32_t *v;
1285 1285 uint32_t txerr = 0, rxerr = 0, coll = 0;
1286 1286
1287 1287 v = &bfe->bfe_hw_stats.tx_good_octets;
1288 1288 for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4) {
1289 1289 *v += INL(bfe, r);
1290 1290 v++;
1291 1291 }
1292 1292
1293 1293 v = &bfe->bfe_hw_stats.rx_good_octets;
1294 1294 for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4) {
1295 1295 *v += INL(bfe, r);
1296 1296 v++;
1297 1297 }
1298 1298
1299 1299 /*
1300 1300 * TX :
1301 1301 * -------
1302 1302 * tx_good_octets, tx_good_pkts, tx_octets
1303 1303 * tx_pkts, tx_broadcast_pkts, tx_multicast_pkts
1304 1304 * tx_len_64, tx_len_65_to_127, tx_len_128_to_255
1305 1305 * tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max
1306 1306 * tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts
1307 1307 * tx_underruns, tx_total_cols, tx_single_cols
1308 1308 * tx_multiple_cols, tx_excessive_cols, tx_late_cols
1309 1309 * tx_defered, tx_carrier_lost, tx_pause_pkts
1310 1310 *
1311 1311 * RX :
1312 1312 * -------
1313 1313 * rx_good_octets, rx_good_pkts, rx_octets
1314 1314 * rx_pkts, rx_broadcast_pkts, rx_multicast_pkts
1315 1315 * rx_len_64, rx_len_65_to_127, rx_len_128_to_255
1316 1316 * rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max
1317 1317 * rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts
1318 1318 * rx_missed_pkts, rx_crc_align_errs, rx_undersize
1319 1319 * rx_crc_errs, rx_align_errs, rx_symbol_errs
1320 1320 * rx_pause_pkts, rx_nonpause_pkts
1321 1321 */
1322 1322
1323 1323 bfe->bfe_stats.ether_stat_carrier_errors =
1324 1324 bfe->bfe_hw_stats.tx_carrier_lost;
1325 1325
1326 1326 /* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */
1327 1327
1328 1328 bfe->bfe_stats.ether_stat_ex_collisions =
1329 1329 bfe->bfe_hw_stats.tx_excessive_cols;
1330 1330 txerr += bfe->bfe_hw_stats.tx_excessive_cols;
1331 1331 coll += bfe->bfe_hw_stats.tx_excessive_cols;
1332 1332
1333 1333 bfe->bfe_stats.ether_stat_fcs_errors =
1334 1334 bfe->bfe_hw_stats.rx_crc_errs;
1335 1335 rxerr += bfe->bfe_hw_stats.rx_crc_errs;
1336 1336
1337 1337 bfe->bfe_stats.ether_stat_first_collisions =
1338 1338 bfe->bfe_hw_stats.tx_single_cols;
1339 1339 coll += bfe->bfe_hw_stats.tx_single_cols;
1340 1340 bfe->bfe_stats.ether_stat_multi_collisions =
1341 1341 bfe->bfe_hw_stats.tx_multiple_cols;
1342 1342 coll += bfe->bfe_hw_stats.tx_multiple_cols;
1343 1343
1344 1344 bfe->bfe_stats.ether_stat_toolong_errors =
1345 1345 bfe->bfe_hw_stats.rx_oversize_pkts;
1346 1346 rxerr += bfe->bfe_hw_stats.rx_oversize_pkts;
1347 1347
1348 1348 bfe->bfe_stats.ether_stat_tooshort_errors =
1349 1349 bfe->bfe_hw_stats.rx_undersize;
1350 1350 rxerr += bfe->bfe_hw_stats.rx_undersize;
1351 1351
1352 1352 bfe->bfe_stats.ether_stat_tx_late_collisions +=
1353 1353 bfe->bfe_hw_stats.tx_late_cols;
1354 1354
1355 1355 bfe->bfe_stats.ether_stat_defer_xmts +=
1356 1356 bfe->bfe_hw_stats.tx_defered;
1357 1357
1358 1358 bfe->bfe_stats.ether_stat_macrcv_errors += rxerr;
1359 1359 bfe->bfe_stats.ether_stat_macxmt_errors += txerr;
1360 1360
1361 1361 bfe->bfe_stats.collisions += coll;
1362 1362 }
1363 1363
1364 1364 /*
1365 1365 * Gets the state for dladm command and all.
1366 1366 */
1367 1367 int
1368 1368 bfe_mac_getstat(void *arg, uint_t stat, uint64_t *val)
1369 1369 {
1370 1370 bfe_t *bfe = (bfe_t *)arg;
1371 1371 uint64_t v;
1372 1372 int err = 0;
1373 1373
1374 1374 rw_enter(&bfe->bfe_rwlock, RW_READER);
1375 1375
1376 1376
1377 1377 switch (stat) {
1378 1378 default:
1379 1379 err = ENOTSUP;
1380 1380 break;
1381 1381
1382 1382 case MAC_STAT_IFSPEED:
1383 1383 /*
1384 1384 * MAC layer will ask for IFSPEED first and hence we
1385 1385 * collect it only once.
1386 1386 */
1387 1387 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1388 1388 /*
1389 1389 * Update stats from the hardware.
1390 1390 */
1391 1391 bfe_gather_stats(bfe);
1392 1392 }
1393 1393 v = bfe->bfe_chip.speed;
1394 1394 break;
1395 1395
1396 1396 case ETHER_STAT_ADV_CAP_100T4:
1397 1397 v = bfe->bfe_adv_100T4;
1398 1398 break;
1399 1399
1400 1400 case ETHER_STAT_ADV_CAP_100FDX:
1401 1401 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX_FD) != 0;
1402 1402 break;
1403 1403
1404 1404 case ETHER_STAT_ADV_CAP_100HDX:
1405 1405 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX) != 0;
1406 1406 break;
1407 1407
1408 1408 case ETHER_STAT_ADV_CAP_10FDX:
1409 1409 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T_FD) != 0;
1410 1410 break;
1411 1411
1412 1412 case ETHER_STAT_ADV_CAP_10HDX:
1413 1413 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T) != 0;
1414 1414 break;
1415 1415
1416 1416 case ETHER_STAT_ADV_CAP_ASMPAUSE:
1417 1417 v = 0;
1418 1418 break;
1419 1419
1420 1420 case ETHER_STAT_ADV_CAP_AUTONEG:
1421 1421 v = bfe->bfe_adv_aneg;
1422 1422 break;
1423 1423
1424 1424 case ETHER_STAT_ADV_CAP_PAUSE:
1425 1425 v = (bfe->bfe_mii_anar & MII_ABILITY_PAUSE) != 0;
1426 1426 break;
1427 1427
1428 1428 case ETHER_STAT_ADV_REMFAULT:
1429 1429 v = (bfe->bfe_mii_anar & MII_AN_ADVERT_REMFAULT) != 0;
1430 1430 break;
1431 1431
1432 1432 case ETHER_STAT_ALIGN_ERRORS:
1433 1433 /* MIB */
1434 1434 v = bfe->bfe_stats.ether_stat_align_errors;
1435 1435 break;
1436 1436
1437 1437 case ETHER_STAT_CAP_100T4:
1438 1438 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASE_T4) != 0;
1439 1439 break;
1440 1440
1441 1441 case ETHER_STAT_CAP_100FDX:
1442 1442 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX_FD) != 0;
1443 1443 break;
1444 1444
1445 1445 case ETHER_STAT_CAP_100HDX:
1446 1446 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX) != 0;
1447 1447 break;
1448 1448
1449 1449 case ETHER_STAT_CAP_10FDX:
1450 1450 v = (bfe->bfe_mii_bmsr & MII_STATUS_10_FD) != 0;
1451 1451 break;
1452 1452
1453 1453 case ETHER_STAT_CAP_10HDX:
1454 1454 v = (bfe->bfe_mii_bmsr & MII_STATUS_10) != 0;
1455 1455 break;
1456 1456
1457 1457 case ETHER_STAT_CAP_ASMPAUSE:
1458 1458 v = 0;
1459 1459 break;
1460 1460
1461 1461 case ETHER_STAT_CAP_AUTONEG:
1462 1462 v = ((bfe->bfe_mii_bmsr & MII_STATUS_CANAUTONEG) != 0);
1463 1463 break;
1464 1464
1465 1465 case ETHER_STAT_CAP_PAUSE:
1466 1466 v = 1;
1467 1467 break;
1468 1468
1469 1469 case ETHER_STAT_CAP_REMFAULT:
1470 1470 v = (bfe->bfe_mii_bmsr & MII_STATUS_REMFAULT) != 0;
1471 1471 break;
1472 1472
1473 1473 case ETHER_STAT_CARRIER_ERRORS:
1474 1474 v = bfe->bfe_stats.ether_stat_carrier_errors;
1475 1475 break;
1476 1476
1477 1477 case ETHER_STAT_JABBER_ERRORS:
1478 1478 err = ENOTSUP;
1479 1479 break;
1480 1480
1481 1481 case ETHER_STAT_DEFER_XMTS:
1482 1482 v = bfe->bfe_stats.ether_stat_defer_xmts;
1483 1483 break;
1484 1484
1485 1485 case ETHER_STAT_EX_COLLISIONS:
1486 1486 /* MIB */
1487 1487 v = bfe->bfe_stats.ether_stat_ex_collisions;
1488 1488 break;
1489 1489
1490 1490 case ETHER_STAT_FCS_ERRORS:
1491 1491 /* MIB */
1492 1492 v = bfe->bfe_stats.ether_stat_fcs_errors;
1493 1493 break;
1494 1494
1495 1495 case ETHER_STAT_FIRST_COLLISIONS:
1496 1496 /* MIB */
1497 1497 v = bfe->bfe_stats.ether_stat_first_collisions;
1498 1498 break;
1499 1499
1500 1500 case ETHER_STAT_LINK_ASMPAUSE:
1501 1501 v = 0;
1502 1502 break;
1503 1503
1504 1504 case ETHER_STAT_LINK_AUTONEG:
1505 1505 v = (bfe->bfe_mii_bmcr & MII_CONTROL_ANE) != 0 &&
1506 1506 (bfe->bfe_mii_bmsr & MII_STATUS_ANDONE) != 0;
1507 1507 break;
1508 1508
1509 1509 case ETHER_STAT_LINK_DUPLEX:
1510 1510 v = bfe->bfe_chip.duplex;
1511 1511 break;
1512 1512
1513 1513 case ETHER_STAT_LP_CAP_100T4:
1514 1514 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_T4) != 0;
1515 1515 break;
1516 1516
1517 1517 case ETHER_STAT_LP_CAP_100FDX:
1518 1518 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX_FD) != 0;
1519 1519 break;
1520 1520
1521 1521 case ETHER_STAT_LP_CAP_100HDX:
1522 1522 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX) != 0;
1523 1523 break;
1524 1524
1525 1525 case ETHER_STAT_LP_CAP_10FDX:
1526 1526 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T_FD) != 0;
1527 1527 break;
1528 1528
1529 1529 case ETHER_STAT_LP_CAP_10HDX:
1530 1530 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T) != 0;
1531 1531 break;
1532 1532
1533 1533 case ETHER_STAT_LP_CAP_ASMPAUSE:
1534 1534 v = 0;
1535 1535 break;
1536 1536
1537 1537 case ETHER_STAT_LP_CAP_AUTONEG:
1538 1538 v = (bfe->bfe_mii_exp & MII_AN_EXP_LPCANAN) != 0;
1539 1539 break;
1540 1540
1541 1541 case ETHER_STAT_LP_CAP_PAUSE:
1542 1542 v = (bfe->bfe_mii_anlpar & MII_ABILITY_PAUSE) != 0;
1543 1543 break;
1544 1544
1545 1545 case ETHER_STAT_LP_REMFAULT:
1546 1546 v = (bfe->bfe_mii_anlpar & MII_STATUS_REMFAULT) != 0;
1547 1547 break;
1548 1548
1549 1549 case ETHER_STAT_MACRCV_ERRORS:
1550 1550 v = bfe->bfe_stats.ether_stat_macrcv_errors;
1551 1551 break;
1552 1552
1553 1553 case ETHER_STAT_MACXMT_ERRORS:
1554 1554 v = bfe->bfe_stats.ether_stat_macxmt_errors;
1555 1555 break;
1556 1556
1557 1557 case ETHER_STAT_MULTI_COLLISIONS:
1558 1558 v = bfe->bfe_stats.ether_stat_multi_collisions;
1559 1559 break;
1560 1560
1561 1561 case ETHER_STAT_SQE_ERRORS:
1562 1562 err = ENOTSUP;
1563 1563 break;
1564 1564
1565 1565 case ETHER_STAT_TOOLONG_ERRORS:
1566 1566 v = bfe->bfe_stats.ether_stat_toolong_errors;
1567 1567 break;
1568 1568
1569 1569 case ETHER_STAT_TOOSHORT_ERRORS:
1570 1570 v = bfe->bfe_stats.ether_stat_tooshort_errors;
1571 1571 break;
1572 1572
1573 1573 case ETHER_STAT_TX_LATE_COLLISIONS:
1574 1574 v = bfe->bfe_stats.ether_stat_tx_late_collisions;
1575 1575 break;
1576 1576
1577 1577 case ETHER_STAT_XCVR_ADDR:
1578 1578 v = bfe->bfe_phy_addr;
1579 1579 break;
1580 1580
1581 1581 case ETHER_STAT_XCVR_ID:
1582 1582 v = bfe->bfe_phy_id;
1583 1583 break;
1584 1584
1585 1585 case MAC_STAT_BRDCSTRCV:
1586 1586 v = bfe->bfe_stats.brdcstrcv;
1587 1587 break;
1588 1588
1589 1589 case MAC_STAT_BRDCSTXMT:
1590 1590 v = bfe->bfe_stats.brdcstxmt;
1591 1591 break;
1592 1592
1593 1593 case MAC_STAT_MULTIXMT:
1594 1594 v = bfe->bfe_stats.multixmt;
1595 1595 break;
1596 1596
1597 1597 case MAC_STAT_COLLISIONS:
1598 1598 v = bfe->bfe_stats.collisions;
1599 1599 break;
1600 1600
1601 1601 case MAC_STAT_IERRORS:
1602 1602 v = bfe->bfe_stats.ierrors;
1603 1603 break;
1604 1604
1605 1605 case MAC_STAT_IPACKETS:
1606 1606 v = bfe->bfe_stats.ipackets;
1607 1607 break;
1608 1608
1609 1609 case MAC_STAT_MULTIRCV:
1610 1610 v = bfe->bfe_stats.multircv;
1611 1611 break;
1612 1612
1613 1613 case MAC_STAT_NORCVBUF:
1614 1614 v = bfe->bfe_stats.norcvbuf;
1615 1615 break;
1616 1616
1617 1617 case MAC_STAT_NOXMTBUF:
1618 1618 v = bfe->bfe_stats.noxmtbuf;
1619 1619 break;
1620 1620
1621 1621 case MAC_STAT_OBYTES:
1622 1622 v = bfe->bfe_stats.obytes;
1623 1623 break;
1624 1624
1625 1625 case MAC_STAT_OERRORS:
1626 1626 /* MIB */
1627 1627 v = bfe->bfe_stats.ether_stat_macxmt_errors;
1628 1628 break;
1629 1629
1630 1630 case MAC_STAT_OPACKETS:
1631 1631 v = bfe->bfe_stats.opackets;
1632 1632 break;
1633 1633
1634 1634 case MAC_STAT_RBYTES:
1635 1635 v = bfe->bfe_stats.rbytes;
1636 1636 break;
1637 1637
1638 1638 case MAC_STAT_UNDERFLOWS:
1639 1639 v = bfe->bfe_stats.underflows;
1640 1640 break;
1641 1641
1642 1642 case MAC_STAT_OVERFLOWS:
1643 1643 v = bfe->bfe_stats.overflows;
1644 1644 break;
1645 1645 }
1646 1646
1647 1647 rw_exit(&bfe->bfe_rwlock);
1648 1648
1649 1649 *val = v;
1650 1650 return (err);
1651 1651 }
1652 1652
1653 1653 int
1654 1654 bfe_mac_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1655 1655 void *val)
1656 1656 {
1657 1657 bfe_t *bfe = (bfe_t *)arg;
1658 1658 int err = 0;
1659 1659
1660 1660 switch (num) {
1661 1661 case MAC_PROP_DUPLEX:
1662 1662 ASSERT(sz >= sizeof (link_duplex_t));
1663 1663 bcopy(&bfe->bfe_chip.duplex, val, sizeof (link_duplex_t));
1664 1664 break;
1665 1665
1666 1666 case MAC_PROP_SPEED:
1667 1667 ASSERT(sz >= sizeof (uint64_t));
1668 1668 bcopy(&bfe->bfe_chip.speed, val, sizeof (uint64_t));
1669 1669 break;
1670 1670
1671 1671 case MAC_PROP_AUTONEG:
1672 1672 *(uint8_t *)val = bfe->bfe_adv_aneg;
1673 1673 break;
1674 1674
1675 1675 case MAC_PROP_ADV_100FDX_CAP:
1676 1676 *(uint8_t *)val = bfe->bfe_adv_100fdx;
1677 1677 break;
1678 1678
1679 1679 case MAC_PROP_EN_100FDX_CAP:
1680 1680 *(uint8_t *)val = bfe->bfe_adv_100fdx;
1681 1681 break;
1682 1682
1683 1683 case MAC_PROP_ADV_100HDX_CAP:
1684 1684 *(uint8_t *)val = bfe->bfe_adv_100hdx;
1685 1685 break;
1686 1686
1687 1687 case MAC_PROP_EN_100HDX_CAP:
1688 1688 *(uint8_t *)val = bfe->bfe_adv_100hdx;
1689 1689 break;
1690 1690
1691 1691 case MAC_PROP_ADV_10FDX_CAP:
1692 1692 *(uint8_t *)val = bfe->bfe_adv_10fdx;
1693 1693 break;
1694 1694
1695 1695 case MAC_PROP_EN_10FDX_CAP:
1696 1696 *(uint8_t *)val = bfe->bfe_adv_10fdx;
1697 1697 break;
1698 1698
1699 1699 case MAC_PROP_ADV_10HDX_CAP:
1700 1700 *(uint8_t *)val = bfe->bfe_adv_10hdx;
1701 1701 break;
1702 1702
1703 1703 case MAC_PROP_EN_10HDX_CAP:
1704 1704 *(uint8_t *)val = bfe->bfe_adv_10hdx;
1705 1705 break;
1706 1706
1707 1707 case MAC_PROP_ADV_100T4_CAP:
1708 1708 *(uint8_t *)val = bfe->bfe_adv_100T4;
1709 1709 break;
1710 1710
1711 1711 case MAC_PROP_EN_100T4_CAP:
1712 1712 *(uint8_t *)val = bfe->bfe_adv_100T4;
1713 1713 break;
1714 1714
1715 1715 default:
1716 1716 err = ENOTSUP;
1717 1717 }
1718 1718
1719 1719 return (err);
1720 1720 }
1721 1721
1722 1722
1723 1723 static void
1724 1724 bfe_mac_propinfo(void *arg, const char *name, mac_prop_id_t num,
1725 1725 mac_prop_info_handle_t prh)
1726 1726 {
1727 1727 bfe_t *bfe = (bfe_t *)arg;
1728 1728
1729 1729 switch (num) {
1730 1730 case MAC_PROP_DUPLEX:
1731 1731 case MAC_PROP_SPEED:
1732 1732 case MAC_PROP_ADV_100FDX_CAP:
1733 1733 case MAC_PROP_ADV_100HDX_CAP:
1734 1734 case MAC_PROP_ADV_10FDX_CAP:
1735 1735 case MAC_PROP_ADV_10HDX_CAP:
1736 1736 case MAC_PROP_ADV_100T4_CAP:
1737 1737 case MAC_PROP_EN_100T4_CAP:
1738 1738 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1739 1739 break;
1740 1740
1741 1741 case MAC_PROP_AUTONEG:
1742 1742 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_aneg);
1743 1743 break;
1744 1744
1745 1745 case MAC_PROP_EN_100FDX_CAP:
1746 1746 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100fdx);
1747 1747 break;
1748 1748
1749 1749 case MAC_PROP_EN_100HDX_CAP:
1750 1750 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100hdx);
1751 1751 break;
1752 1752
1753 1753 case MAC_PROP_EN_10FDX_CAP:
1754 1754 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10fdx);
1755 1755 break;
1756 1756
1757 1757 case MAC_PROP_EN_10HDX_CAP:
1758 1758 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10hdx);
1759 1759 break;
1760 1760 }
1761 1761 }
1762 1762
1763 1763
1764 1764 /*ARGSUSED*/
1765 1765 int
1766 1766 bfe_mac_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1767 1767 const void *val)
1768 1768 {
1769 1769 bfe_t *bfe = (bfe_t *)arg;
1770 1770 uint8_t *advp;
1771 1771 uint8_t *capp;
1772 1772 int r = 0;
1773 1773
1774 1774 switch (num) {
1775 1775 case MAC_PROP_EN_100FDX_CAP:
1776 1776 advp = &bfe->bfe_adv_100fdx;
1777 1777 capp = &bfe->bfe_cap_100fdx;
1778 1778 break;
1779 1779
1780 1780 case MAC_PROP_EN_100HDX_CAP:
1781 1781 advp = &bfe->bfe_adv_100hdx;
1782 1782 capp = &bfe->bfe_cap_100hdx;
1783 1783 break;
1784 1784
1785 1785 case MAC_PROP_EN_10FDX_CAP:
1786 1786 advp = &bfe->bfe_adv_10fdx;
1787 1787 capp = &bfe->bfe_cap_10fdx;
1788 1788 break;
1789 1789
1790 1790 case MAC_PROP_EN_10HDX_CAP:
1791 1791 advp = &bfe->bfe_adv_10hdx;
1792 1792 capp = &bfe->bfe_cap_10hdx;
1793 1793 break;
1794 1794
1795 1795 case MAC_PROP_AUTONEG:
1796 1796 advp = &bfe->bfe_adv_aneg;
1797 1797 capp = &bfe->bfe_cap_aneg;
1798 1798 break;
1799 1799
1800 1800 default:
1801 1801 return (ENOTSUP);
1802 1802 }
1803 1803
1804 1804 if (*capp == 0)
1805 1805 return (ENOTSUP);
1806 1806
1807 1807 bfe_grab_locks(bfe);
1808 1808
1809 1809 if (*advp != *(const uint8_t *)val) {
1810 1810 *advp = *(const uint8_t *)val;
1811 1811
1812 1812 bfe->bfe_chip_action = BFE_ACTION_RESTART_SETPROP;
1813 1813 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1814 1814 /*
1815 1815 * We need to stop the timer before grabbing locks
1816 1816 * otherwise we can land-up in deadlock with untimeout.
1817 1817 */
1818 1818 bfe_stop_timer(bfe);
1819 1819
1820 1820 bfe->bfe_chip_action |= BFE_ACTION_RESTART;
1821 1821
1822 1822 bfe_chip_restart(bfe);
1823 1823
1824 1824 /*
1825 1825 * We leave SETPROP because properties can be
1826 1826 * temporary.
1827 1827 */
1828 1828 bfe->bfe_chip_action &= ~(BFE_ACTION_RESTART);
1829 1829 r = 1;
1830 1830 }
1831 1831 }
1832 1832
1833 1833 bfe_release_locks(bfe);
1834 1834
1835 1835 /* kick-off a potential stopped downstream */
1836 1836 if (r)
1837 1837 mac_tx_update(bfe->bfe_machdl);
1838 1838
1839 1839 return (0);
1840 1840 }
1841 1841
1842 1842
1843 1843 int
1844 1844 bfe_mac_set_ether_addr(void *arg, const uint8_t *ea)
1845 1845 {
1846 1846 bfe_t *bfe = (bfe_t *)arg;
1847 1847
1848 1848 bfe_grab_locks(bfe);
1849 1849 bcopy(ea, bfe->bfe_ether_addr, ETHERADDRL);
1850 1850 bfe_set_rx_mode(bfe);
1851 1851 bfe_release_locks(bfe);
1852 1852 return (0);
1853 1853 }
1854 1854
1855 1855 int
1856 1856 bfe_mac_start(void *arg)
1857 1857 {
1858 1858 bfe_t *bfe = (bfe_t *)arg;
1859 1859
1860 1860 bfe_grab_locks(bfe);
1861 1861 if (bfe_chip_start(bfe) == DDI_FAILURE) {
1862 1862 bfe_release_locks(bfe);
1863 1863 return (EINVAL);
1864 1864 }
1865 1865
1866 1866 bfe_release_locks(bfe);
1867 1867
1868 1868 mac_tx_update(bfe->bfe_machdl);
1869 1869
1870 1870 return (0);
1871 1871 }
1872 1872
1873 1873 void
1874 1874 bfe_mac_stop(void *arg)
1875 1875 {
1876 1876 bfe_t *bfe = (bfe_t *)arg;
1877 1877
1878 1878 /*
1879 1879 * We need to stop the timer before grabbing locks otherwise
1880 1880 * we can land-up in deadlock with untimeout.
1881 1881 */
1882 1882 bfe_stop_timer(bfe);
1883 1883
1884 1884 bfe_grab_locks(bfe);
1885 1885
1886 1886 /*
1887 1887 * First halt the chip by disabling interrupts.
1888 1888 */
1889 1889 bfe_chip_halt(bfe);
1890 1890 bfe_stop_phy(bfe);
1891 1891
1892 1892 bfe->bfe_chip_state = BFE_CHIP_STOPPED;
1893 1893
1894 1894 /*
1895 1895 * This will leave the PHY running.
1896 1896 */
1897 1897 bfe_chip_reset(bfe);
1898 1898
1899 1899 /*
1900 1900 * Disable RX register.
1901 1901 */
1902 1902 bfe->bfe_chip_mode &= ~BFE_RX_MODE_ENABLE;
1903 1903 bfe_set_rx_mode(bfe);
1904 1904
1905 1905 bfe_release_locks(bfe);
1906 1906 }
1907 1907
1908 1908 /*
1909 1909 * Send a packet down the wire.
1910 1910 */
1911 1911 static int
1912 1912 bfe_send_a_packet(bfe_t *bfe, mblk_t *mp)
1913 1913 {
1914 1914 bfe_ring_t *r = &bfe->bfe_tx_ring;
1915 1915 uint32_t cur = r->r_curr_desc;
1916 1916 uint32_t next;
1917 1917 size_t pktlen = msgsize(mp);
1918 1918 uchar_t *buf;
1919 1919 uint32_t v;
1920 1920
1921 1921 ASSERT(MUTEX_HELD(&r->r_lock));
1922 1922 ASSERT(mp != NULL);
1923 1923
1924 1924 if (pktlen > r->r_buf_len) {
1925 1925 freemsg(mp);
1926 1926 return (BFE_SUCCESS);
1927 1927 }
1928 1928
1929 1929 /*
1930 1930 * There is a big reason why we don't check for '0'. It becomes easy
1931 1931 * for us to not roll over the ring since we are based on producer (tx)
1932 1932 * and consumer (reclaim by an interrupt) model. Especially when we
1933 1933 * run out of TX descriptor, chip will send a single interrupt and
1934 1934 * both producer and consumer counter will be same. So we keep a
1935 1935 * difference of 1 always.
1936 1936 */
1937 1937 if (r->r_avail_desc <= 1) {
1938 1938 bfe->bfe_stats.noxmtbuf++;
1939 1939 bfe->bfe_tx_resched = 1;
1940 1940 return (BFE_FAILURE);
1941 1941 }
1942 1942
1943 1943 /*
1944 1944 * Get the DMA buffer to hold packet.
1945 1945 */
1946 1946 buf = (uchar_t *)r->r_buf_dma[cur].addr;
1947 1947
1948 1948 mcopymsg(mp, buf); /* it also frees mp */
1949 1949
1950 1950 /*
1951 1951 * Gather statistics.
1952 1952 */
1953 1953 if (buf[0] & 0x1) {
1954 1954 if (bcmp(buf, bfe_broadcast, ETHERADDRL) != 0)
1955 1955 bfe->bfe_stats.multixmt++;
1956 1956 else
1957 1957 bfe->bfe_stats.brdcstxmt++;
1958 1958 }
1959 1959 bfe->bfe_stats.opackets++;
1960 1960 bfe->bfe_stats.obytes += pktlen;
1961 1961
1962 1962
1963 1963 /*
1964 1964 * Program the DMA descriptor (start and end of frame are same).
1965 1965 */
1966 1966 next = cur;
1967 1967 v = (pktlen & BFE_DESC_LEN) | BFE_DESC_IOC | BFE_DESC_SOF |
1968 1968 BFE_DESC_EOF;
1969 1969
1970 1970 if (cur == (TX_NUM_DESC - 1))
1971 1971 v |= BFE_DESC_EOT;
1972 1972
1973 1973 PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_ctl), v);
1974 1974
1975 1975 /*
1976 1976 * DMA addresses need to be added to BFE_PCI_DMA
1977 1977 */
1978 1978 PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_addr),
1979 1979 (r->r_buf_dma[cur].cookie.dmac_laddress + BFE_PCI_DMA));
1980 1980
1981 1981 /*
1982 1982 * Sync the packet data for the device.
1983 1983 */
1984 1984 (void) SYNC_BUF(r, cur, 0, pktlen, DDI_DMA_SYNC_FORDEV);
1985 1985
1986 1986 /* Move to next descriptor slot */
1987 1987 BFE_INC_SLOT(next, TX_NUM_DESC);
1988 1988
1989 1989 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1990 1990
1991 1991 r->r_curr_desc = next;
1992 1992
1993 1993 /*
1994 1994 * The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,...
1995 1995 * descriptor slot are being programmed.
1996 1996 */
1997 1997 OUTL(bfe, BFE_DMATX_PTR, next * sizeof (bfe_desc_t));
1998 1998 FLUSH(bfe, BFE_DMATX_PTR);
1999 1999
2000 2000 r->r_avail_desc--;
2001 2001
2002 2002 /*
2003 2003 * Let timeout know that it must reset the chip if a
2004 2004 * packet is not sent down the wire for more than 5 seconds.
2005 2005 */
2006 2006 bfe->bfe_tx_stall_time = gethrtime() + (5 * 1000000000ULL);
2007 2007
2008 2008 return (BFE_SUCCESS);
2009 2009 }
2010 2010
2011 2011 mblk_t *
2012 2012 bfe_mac_transmit_packet(void *arg, mblk_t *mp)
2013 2013 {
2014 2014 bfe_t *bfe = (bfe_t *)arg;
2015 2015 bfe_ring_t *r = &bfe->bfe_tx_ring;
2016 2016 mblk_t *nmp;
2017 2017
2018 2018 mutex_enter(&r->r_lock);
2019 2019
2020 2020 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2021 2021 DTRACE_PROBE1(tx__chip__not__active, int, bfe->bfe_unit);
2022 2022
2023 2023 freemsgchain(mp);
2024 2024 mutex_exit(&r->r_lock);
2025 2025 return (NULL);
2026 2026 }
2027 2027
2028 2028
2029 2029 while (mp != NULL) {
2030 2030 nmp = mp->b_next;
2031 2031 mp->b_next = NULL;
2032 2032
2033 2033 if (bfe_send_a_packet(bfe, mp) == BFE_FAILURE) {
2034 2034 mp->b_next = nmp;
2035 2035 break;
2036 2036 }
2037 2037 mp = nmp;
2038 2038 }
2039 2039
2040 2040 mutex_exit(&r->r_lock);
2041 2041
2042 2042 return (mp);
2043 2043 }
2044 2044
2045 2045 int
2046 2046 bfe_mac_set_promisc(void *arg, boolean_t promiscflag)
2047 2047 {
2048 2048 bfe_t *bfe = (bfe_t *)arg;
2049 2049
2050 2050 bfe_grab_locks(bfe);
2051 2051 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2052 2052 bfe_release_locks(bfe);
2053 2053 return (EIO);
2054 2054 }
2055 2055
2056 2056 if (promiscflag) {
2057 2057 /* Set Promiscous on */
2058 2058 bfe->bfe_chip_mode |= BFE_RX_MODE_PROMISC;
2059 2059 } else {
2060 2060 bfe->bfe_chip_mode &= ~BFE_RX_MODE_PROMISC;
2061 2061 }
2062 2062
2063 2063 bfe_set_rx_mode(bfe);
2064 2064 bfe_release_locks(bfe);
2065 2065
2066 2066 return (0);
2067 2067 }
2068 2068
2069 2069 int
2070 2070 bfe_mac_set_multicast(void *arg, boolean_t add, const uint8_t *macaddr)
2071 2071 {
2072 2072 /*
2073 2073 * It was too much of pain to implement multicast in CAM. Instead
2074 2074 * we never disable multicast filter.
2075 2075 */
2076 2076 return (0);
2077 2077 }
2078 2078
2079 2079 static mac_callbacks_t bfe_mac_callbacks = {
2080 2080 MC_SETPROP | MC_GETPROP | MC_PROPINFO,
2081 2081 bfe_mac_getstat, /* gets stats */
2082 2082 bfe_mac_start, /* starts mac */
2083 2083 bfe_mac_stop, /* stops mac */
2084 2084 bfe_mac_set_promisc, /* sets promisc mode for snoop */
2085 2085 bfe_mac_set_multicast, /* multicast implementation */
2086 2086 bfe_mac_set_ether_addr, /* sets ethernet address (unicast) */
2087 2087 bfe_mac_transmit_packet, /* transmits packet */
2088 2088 NULL,
2089 2089 NULL, /* ioctl */
2090 2090 NULL, /* getcap */
2091 2091 NULL, /* open */
2092 2092 NULL, /* close */
2093 2093 bfe_mac_setprop,
2094 2094 bfe_mac_getprop,
2095 2095 bfe_mac_propinfo
2096 2096 };
2097 2097
2098 2098 static void
2099 2099 bfe_error_handler(bfe_t *bfe, int intr_mask)
2100 2100 {
2101 2101 uint32_t v;
2102 2102
2103 2103 if (intr_mask & BFE_ISTAT_RFO) {
2104 2104 bfe->bfe_stats.overflows++;
2105 2105 bfe->bfe_chip_action |=
2106 2106 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2107 2107 goto action;
2108 2108 }
2109 2109
2110 2110 if (intr_mask & BFE_ISTAT_TFU) {
2111 2111 bfe->bfe_stats.underflows++;
2112 2112 return;
2113 2113 }
2114 2114
2115 2115 /* Descriptor Protocol Error */
2116 2116 if (intr_mask & BFE_ISTAT_DPE) {
2117 2117 bfe_error(bfe->bfe_dip,
2118 2118 "Descriptor Protocol Error. Halting Chip");
2119 2119 bfe->bfe_chip_action |=
2120 2120 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2121 2121 goto action;
2122 2122 }
2123 2123
2124 2124 /* Descriptor Error */
2125 2125 if (intr_mask & BFE_ISTAT_DSCE) {
2126 2126 bfe_error(bfe->bfe_dip, "Descriptor Error. Restarting Chip");
2127 2127 goto action;
2128 2128 }
2129 2129
2130 2130 /* Receive Descr. Underflow */
2131 2131 if (intr_mask & BFE_ISTAT_RDU) {
2132 2132 bfe_error(bfe->bfe_dip,
2133 2133 "Receive Descriptor Underflow. Restarting Chip");
2134 2134 bfe->bfe_stats.ether_stat_macrcv_errors++;
2135 2135 bfe->bfe_chip_action |=
2136 2136 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2137 2137 goto action;
2138 2138 }
2139 2139
2140 2140 v = INL(bfe, BFE_DMATX_STAT);
2141 2141
2142 2142 /* Error while sending a packet */
2143 2143 if (v & BFE_STAT_EMASK) {
2144 2144 bfe->bfe_stats.ether_stat_macxmt_errors++;
2145 2145 bfe_error(bfe->bfe_dip,
2146 2146 "Error while sending a packet. Restarting Chip");
2147 2147 }
2148 2148
2149 2149 /* Error while receiving a packet */
2150 2150 v = INL(bfe, BFE_DMARX_STAT);
2151 2151 if (v & BFE_RX_FLAG_ERRORS) {
2152 2152 bfe->bfe_stats.ierrors++;
2153 2153 bfe_error(bfe->bfe_dip,
2154 2154 "Error while receiving a packet. Restarting Chip");
2155 2155 }
2156 2156
2157 2157
2158 2158 bfe->bfe_chip_action |=
2159 2159 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2160 2160
2161 2161 action:
2162 2162 bfe_chip_halt(bfe);
2163 2163 }
2164 2164
2165 2165 /*
2166 2166 * It will recycle a RX descriptor slot.
2167 2167 */
2168 2168 static void
2169 2169 bfe_rx_desc_buf_reinit(bfe_t *bfe, uint_t slot)
2170 2170 {
2171 2171 bfe_ring_t *r = &bfe->bfe_rx_ring;
2172 2172 uint32_t v;
2173 2173
2174 2174 slot %= RX_NUM_DESC;
2175 2175
2176 2176 bzero(r->r_buf_dma[slot].addr, sizeof (bfe_rx_header_t));
2177 2177
2178 2178 (void) SYNC_BUF(r, slot, 0, BFE_RX_OFFSET, DDI_DMA_SYNC_FORDEV);
2179 2179
2180 2180 v = r->r_buf_dma[slot].len & BFE_DESC_LEN;
2181 2181 if (slot == (RX_NUM_DESC - 1))
2182 2182 v |= BFE_DESC_EOT;
2183 2183
2184 2184 PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_ctl), v);
2185 2185
2186 2186 /*
2187 2187 * DMA addresses need to be added to BFE_PCI_DMA
2188 2188 */
2189 2189 PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_addr),
2190 2190 (r->r_buf_dma[slot].cookie.dmac_laddress + BFE_PCI_DMA));
2191 2191 }
2192 2192
2193 2193 /*
2194 2194 * Gets called from interrupt context to handle RX interrupt.
2195 2195 */
2196 2196 static mblk_t *
2197 2197 bfe_receive(bfe_t *bfe, int intr_mask)
2198 2198 {
2199 2199 int rxstat, current;
2200 2200 mblk_t *mp = NULL, *rx_head, *rx_tail;
2201 2201 uchar_t *rx_header;
2202 2202 uint16_t len;
2203 2203 uchar_t *bp;
2204 2204 bfe_ring_t *r = &bfe->bfe_rx_ring;
2205 2205 int i;
2206 2206
2207 2207 rxstat = INL(bfe, BFE_DMARX_STAT);
2208 2208 current = (rxstat & BFE_STAT_CDMASK) / sizeof (bfe_desc_t);
2209 2209 i = r->r_curr_desc;
2210 2210
2211 2211 rx_head = rx_tail = NULL;
2212 2212
2213 2213 DTRACE_PROBE3(receive, int, bfe->bfe_unit,
2214 2214 int, r->r_curr_desc,
2215 2215 int, current);
2216 2216
2217 2217 for (i = r->r_curr_desc; i != current;
2218 2218 BFE_INC_SLOT(i, RX_NUM_DESC)) {
2219 2219
2220 2220 /*
2221 2221 * Sync the buffer associated with the descriptor table entry.
2222 2222 */
2223 2223 (void) SYNC_BUF(r, i, 0, r->r_buf_dma[i].len,
2224 2224 DDI_DMA_SYNC_FORKERNEL);
2225 2225
2226 2226 rx_header = (void *)r->r_buf_dma[i].addr;
2227 2227
2228 2228 /*
2229 2229 * We do this to make sure we are endian neutral. Chip is
2230 2230 * big endian.
2231 2231 *
2232 2232 * The header looks like :-
2233 2233 *
2234 2234 * Offset 0 -> uint16_t len
2235 2235 * Offset 2 -> uint16_t flags
2236 2236 * Offset 4 -> uint16_t pad[12]
2237 2237 */
2238 2238 len = (rx_header[1] << 8) | rx_header[0];
2239 2239 len -= 4; /* CRC bytes need to be removed */
2240 2240
2241 2241 /*
2242 2242 * Don't receive this packet if pkt length is greater than
2243 2243 * MTU + VLAN_TAGSZ.
2244 2244 */
2245 2245 if (len > r->r_buf_len) {
2246 2246 /* Recycle slot for later use */
2247 2247 bfe_rx_desc_buf_reinit(bfe, i);
2248 2248 continue;
2249 2249 }
2250 2250
2251 2251 if ((mp = allocb(len + VLAN_TAGSZ, BPRI_MED)) != NULL) {
2252 2252 mp->b_rptr += VLAN_TAGSZ;
2253 2253 bp = mp->b_rptr;
2254 2254 mp->b_wptr = bp + len;
2255 2255
2256 2256 /* sizeof (bfe_rx_header_t) + 2 */
2257 2257 bcopy(r->r_buf_dma[i].addr +
2258 2258 BFE_RX_OFFSET, bp, len);
2259 2259
2260 2260 mp->b_next = NULL;
2261 2261 if (rx_tail == NULL)
2262 2262 rx_head = rx_tail = mp;
2263 2263 else {
2264 2264 rx_tail->b_next = mp;
2265 2265 rx_tail = mp;
2266 2266 }
2267 2267
2268 2268 /* Number of packets received so far */
2269 2269 bfe->bfe_stats.ipackets++;
2270 2270
2271 2271 /* Total bytes of packets received so far */
2272 2272 bfe->bfe_stats.rbytes += len;
2273 2273
2274 2274 if (bcmp(mp->b_rptr, bfe_broadcast, ETHERADDRL) == 0)
2275 2275 bfe->bfe_stats.brdcstrcv++;
2276 2276 else
2277 2277 bfe->bfe_stats.multircv++;
2278 2278 } else {
2279 2279 bfe->bfe_stats.norcvbuf++;
2280 2280 /* Recycle the slot for later use */
2281 2281 bfe_rx_desc_buf_reinit(bfe, i);
2282 2282 break;
2283 2283 }
2284 2284
2285 2285 /*
2286 2286 * Reinitialize the current descriptor slot's buffer so that
2287 2287 * it can be reused.
2288 2288 */
2289 2289 bfe_rx_desc_buf_reinit(bfe, i);
2290 2290 }
2291 2291
2292 2292 r->r_curr_desc = i;
2293 2293
2294 2294 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
2295 2295
2296 2296 return (rx_head);
2297 2297 }
2298 2298
2299 2299 static int
2300 2300 bfe_tx_reclaim(bfe_ring_t *r)
2301 2301 {
2302 2302 uint32_t cur, start;
2303 2303 uint32_t v;
2304 2304
2305 2305 cur = INL(r->r_bfe, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
2306 2306 cur = cur / sizeof (bfe_desc_t);
2307 2307
2308 2308 /*
2309 2309 * Start with the last descriptor consumed by the chip.
2310 2310 */
2311 2311 start = r->r_cons_desc;
2312 2312
2313 2313 DTRACE_PROBE3(tx__reclaim, int, r->r_bfe->bfe_unit,
2314 2314 int, start,
2315 2315 int, cur);
2316 2316
2317 2317 /*
2318 2318 * There will be at least one descriptor to process.
2319 2319 */
2320 2320 while (start != cur) {
2321 2321 r->r_avail_desc++;
2322 2322 v = r->r_buf_dma[start].len & BFE_DESC_LEN;
2323 2323 if (start == (TX_NUM_DESC - 1))
2324 2324 v |= BFE_DESC_EOT;
2325 2325
2326 2326 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_ctl), v);
2327 2327 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_addr),
2328 2328 (r->r_buf_dma[start].cookie.dmac_laddress + BFE_PCI_DMA));
2329 2329
2330 2330 /* Move to next descriptor in TX ring */
2331 2331 BFE_INC_SLOT(start, TX_NUM_DESC);
2332 2332 }
2333 2333
2334 2334 (void) ddi_dma_sync(r->r_desc_dma_handle,
2335 2335 0, (r->r_ndesc * sizeof (bfe_desc_t)),
2336 2336 DDI_DMA_SYNC_FORDEV);
2337 2337
2338 2338 r->r_cons_desc = start; /* consumed pointer */
2339 2339 r->r_bfe->bfe_tx_stall_time = 0;
2340 2340
2341 2341 return (cur);
2342 2342 }
2343 2343
2344 2344 static int
2345 2345 bfe_tx_done(bfe_t *bfe, int intr_mask)
2346 2346 {
2347 2347 bfe_ring_t *r = &bfe->bfe_tx_ring;
2348 2348 int resched = 0;
2349 2349
2350 2350 mutex_enter(&r->r_lock);
2351 2351 (void) bfe_tx_reclaim(r);
2352 2352
2353 2353 if (bfe->bfe_tx_resched) {
2354 2354 resched = 1;
2355 2355 bfe->bfe_tx_resched = 0;
2356 2356 }
2357 2357 mutex_exit(&r->r_lock);
2358 2358
2359 2359 return (resched);
2360 2360 }
2361 2361
2362 2362 /*
2363 2363 * ISR for interrupt handling
2364 2364 */
2365 2365 static uint_t
2366 2366 bfe_interrupt(caddr_t arg1, caddr_t arg2)
2367 2367 {
2368 2368 bfe_t *bfe = (void *)arg1;
2369 2369 uint32_t intr_stat;
2370 2370 mblk_t *rx_head = NULL;
2371 2371 int resched = 0;
2372 2372
2373 2373 /*
2374 2374 * Grab the lock to avoid stopping the chip while this interrupt
2375 2375 * is handled.
2376 2376 */
2377 2377 rw_enter(&bfe->bfe_rwlock, RW_READER);
2378 2378
2379 2379 /*
2380 2380 * It's necessary to read intr stat again because masking interrupt
2381 2381 * register does not really mask interrupts coming from the chip.
2382 2382 */
2383 2383 intr_stat = INL(bfe, BFE_INTR_STAT);
2384 2384 intr_stat &= BFE_IMASK_DEF;
2385 2385 OUTL(bfe, BFE_INTR_STAT, intr_stat);
2386 2386 (void) INL(bfe, BFE_INTR_STAT);
2387 2387
2388 2388 if (intr_stat == 0) {
2389 2389 rw_exit(&bfe->bfe_rwlock);
2390 2390 return (DDI_INTR_UNCLAIMED);
2391 2391 }
2392 2392
2393 2393 DTRACE_PROBE2(bfe__interrupt, int, bfe->bfe_unit,
2394 2394 int, intr_stat);
2395 2395
2396 2396 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2397 2397 /*
2398 2398 * If chip is suspended then we just return.
2399 2399 */
2400 2400 if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED) {
2401 2401 rw_exit(&bfe->bfe_rwlock);
2402 2402 DTRACE_PROBE1(interrupt__chip__is__suspend, int,
2403 2403 bfe->bfe_unit);
2404 2404 return (DDI_INTR_CLAIMED);
2405 2405 }
2406 2406
2407 2407 /*
2408 2408 * Halt the chip again i.e basically disable interrupts.
2409 2409 */
2410 2410 bfe_chip_halt(bfe);
2411 2411 rw_exit(&bfe->bfe_rwlock);
2412 2412 DTRACE_PROBE1(interrupt__chip__not__active, int,
2413 2413 bfe->bfe_unit);
2414 2414 return (DDI_INTR_CLAIMED);
2415 2415 }
2416 2416
2417 2417 /* A packet was received */
2418 2418 if (intr_stat & BFE_ISTAT_RX) {
2419 2419 rx_head = bfe_receive(bfe, intr_stat);
2420 2420 }
2421 2421
2422 2422 /* A packet was sent down the wire */
2423 2423 if (intr_stat & BFE_ISTAT_TX) {
2424 2424 resched = bfe_tx_done(bfe, intr_stat);
2425 2425 }
2426 2426
2427 2427 /* There was an error */
2428 2428 if (intr_stat & BFE_ISTAT_ERRORS) {
2429 2429 bfe_error_handler(bfe, intr_stat);
2430 2430 }
2431 2431
2432 2432 rw_exit(&bfe->bfe_rwlock);
2433 2433
2434 2434 /*
2435 2435 * Pass the list of packets received from chip to MAC layer.
2436 2436 */
2437 2437 if (rx_head) {
2438 2438 mac_rx(bfe->bfe_machdl, 0, rx_head);
2439 2439 }
2440 2440
2441 2441 /*
2442 2442 * Let the MAC start sending pkts to a potential stopped stream.
2443 2443 */
2444 2444 if (resched)
2445 2445 mac_tx_update(bfe->bfe_machdl);
2446 2446
2447 2447 return (DDI_INTR_CLAIMED);
2448 2448 }
2449 2449
2450 2450 /*
2451 2451 * Removes registered interrupt handler.
2452 2452 */
2453 2453 static void
2454 2454 bfe_remove_intr(bfe_t *bfe)
2455 2455 {
2456 2456 (void) ddi_intr_remove_handler(bfe->bfe_intrhdl);
2457 2457 (void) ddi_intr_free(bfe->bfe_intrhdl);
2458 2458 }
2459 2459
2460 2460 /*
2461 2461 * Add an interrupt for the driver.
2462 2462 */
2463 2463 static int
2464 2464 bfe_add_intr(bfe_t *bfe)
2465 2465 {
2466 2466 int nintrs = 1;
2467 2467 int ret;
2468 2468
2469 2469 ret = ddi_intr_alloc(bfe->bfe_dip, &bfe->bfe_intrhdl,
2470 2470 DDI_INTR_TYPE_FIXED, /* type */
2471 2471 0, /* inumber */
2472 2472 1, /* count */
2473 2473 &nintrs, /* actual nintrs */
2474 2474 DDI_INTR_ALLOC_STRICT);
2475 2475
2476 2476 if (ret != DDI_SUCCESS) {
2477 2477 bfe_error(bfe->bfe_dip, "ddi_intr_alloc() failed"
2478 2478 " : ret : %d", ret);
2479 2479 return (DDI_FAILURE);
2480 2480 }
2481 2481
2482 2482 ret = ddi_intr_add_handler(bfe->bfe_intrhdl, bfe_interrupt, bfe, NULL);
2483 2483 if (ret != DDI_SUCCESS) {
2484 2484 bfe_error(bfe->bfe_dip, "ddi_intr_add_handler() failed");
2485 2485 (void) ddi_intr_free(bfe->bfe_intrhdl);
2486 2486 return (DDI_FAILURE);
2487 2487 }
2488 2488
2489 2489 ret = ddi_intr_get_pri(bfe->bfe_intrhdl, &bfe->bfe_intrpri);
2490 2490 if (ret != DDI_SUCCESS) {
2491 2491 bfe_error(bfe->bfe_dip, "ddi_intr_get_pri() failed");
2492 2492 bfe_remove_intr(bfe);
2493 2493 return (DDI_FAILURE);
2494 2494 }
2495 2495
2496 2496 return (DDI_SUCCESS);
2497 2497 }
2498 2498
2499 2499
2500 2500 /*
2501 2501 * Identify chipset family.
2502 2502 */
2503 2503 static int
2504 2504 bfe_identify_hardware(bfe_t *bfe)
2505 2505 {
2506 2506 uint16_t vid, did;
2507 2507 int i;
2508 2508
2509 2509 vid = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_VENID);
2510 2510 did = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_DEVID);
2511 2511
2512 2512 for (i = 0; i < (sizeof (bfe_cards) / sizeof (bfe_cards_t)); i++) {
2513 2513 if (bfe_cards[i].vendor_id == vid &&
2514 2514 bfe_cards[i].device_id == did) {
2515 2515 return (BFE_SUCCESS);
2516 2516 }
2517 2517 }
2518 2518
2519 2519 bfe_error(bfe->bfe_dip, "bfe driver is attaching to unknown pci%d,%d"
2520 2520 " vendor/device-id card", vid, did);
2521 2521
2522 2522 return (BFE_SUCCESS);
2523 2523 }
2524 2524
2525 2525 /*
2526 2526 * Maps device registers.
2527 2527 */
2528 2528 static int
2529 2529 bfe_regs_map(bfe_t *bfe)
2530 2530 {
2531 2531 dev_info_t *dip = bfe->bfe_dip;
2532 2532 int ret;
2533 2533
2534 2534 ret = ddi_regs_map_setup(dip, 1, &bfe->bfe_mem_regset.addr, 0, 0,
2535 2535 &bfe_dev_attr, &bfe->bfe_mem_regset.hdl);
2536 2536
2537 2537 if (ret != DDI_SUCCESS) {
2538 2538 bfe_error(bfe->bfe_dip, "ddi_regs_map_setup failed");
2539 2539 return (DDI_FAILURE);
2540 2540 }
2541 2541
2542 2542 return (DDI_SUCCESS);
2543 2543 }
2544 2544
2545 2545 static void
2546 2546 bfe_unmap_regs(bfe_t *bfe)
2547 2547 {
2548 2548 ddi_regs_map_free(&bfe->bfe_mem_regset.hdl);
2549 2549 }
2550 2550
2551 2551 static int
2552 2552 bfe_get_chip_config(bfe_t *bfe)
2553 2553 {
2554 2554 uint32_t prom[BFE_EEPROM_SIZE];
2555 2555 int i;
2556 2556
2557 2557 /*
2558 2558 * Read EEPROM in prom[]
2559 2559 */
2560 2560 for (i = 0; i < BFE_EEPROM_SIZE; i++) {
2561 2561 prom[i] = INL(bfe, BFE_EEPROM_BASE + i * sizeof (uint32_t));
2562 2562 }
2563 2563
2564 2564 bfe->bfe_dev_addr[0] = bfe->bfe_ether_addr[0] =
2565 2565 INB(bfe, BFE_EEPROM_BASE + 79);
2566 2566
2567 2567 bfe->bfe_dev_addr[1] = bfe->bfe_ether_addr[1] =
2568 2568 INB(bfe, BFE_EEPROM_BASE + 78);
2569 2569
2570 2570 bfe->bfe_dev_addr[2] = bfe->bfe_ether_addr[2] =
2571 2571 INB(bfe, BFE_EEPROM_BASE + 81);
2572 2572
2573 2573 bfe->bfe_dev_addr[3] = bfe->bfe_ether_addr[3] =
2574 2574 INB(bfe, BFE_EEPROM_BASE + 80);
2575 2575
2576 2576 bfe->bfe_dev_addr[4] = bfe->bfe_ether_addr[4] =
2577 2577 INB(bfe, BFE_EEPROM_BASE + 83);
2578 2578
2579 2579 bfe->bfe_dev_addr[5] = bfe->bfe_ether_addr[5] =
2580 2580 INB(bfe, BFE_EEPROM_BASE + 82);
2581 2581
2582 2582 bfe->bfe_phy_addr = -1;
2583 2583
2584 2584 return (DDI_SUCCESS);
2585 2585 }
2586 2586
2587 2587 /*
2588 2588 * Ring Management routines
2589 2589 */
2590 2590 static int
2591 2591 bfe_ring_buf_alloc(bfe_t *bfe, bfe_ring_t *r, int slot, int d)
2592 2592 {
2593 2593 int err;
2594 2594 uint_t count = 0;
2595 2595
2596 2596 err = ddi_dma_alloc_handle(bfe->bfe_dip,
2597 2597 &bfe_dma_attr_buf, DDI_DMA_SLEEP, NULL,
2598 2598 &r->r_buf_dma[slot].handle);
2599 2599
2600 2600 if (err != DDI_SUCCESS) {
2601 2601 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2602 2602 " alloc_handle failed");
2603 2603 goto fail0;
2604 2604 }
2605 2605
2606 2606 err = ddi_dma_mem_alloc(r->r_buf_dma[slot].handle,
2607 2607 r->r_buf_len, &bfe_buf_attr, DDI_DMA_STREAMING,
2608 2608 DDI_DMA_SLEEP, NULL, &r->r_buf_dma[slot].addr,
2609 2609 &r->r_buf_dma[slot].len,
2610 2610 &r->r_buf_dma[slot].acchdl);
2611 2611
2612 2612 if (err != DDI_SUCCESS) {
2613 2613 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2614 2614 " mem_alloc failed :%d", err);
2615 2615 goto fail1;
2616 2616 }
2617 2617
2618 2618 err = ddi_dma_addr_bind_handle(r->r_buf_dma[slot].handle,
2619 2619 NULL, r->r_buf_dma[slot].addr,
2620 2620 r->r_buf_dma[slot].len,
2621 2621 (DDI_DMA_RDWR | DDI_DMA_STREAMING),
2622 2622 DDI_DMA_SLEEP, NULL,
2623 2623 &r->r_buf_dma[slot].cookie,
2624 2624 &count);
2625 2625
2626 2626 if (err != DDI_DMA_MAPPED) {
2627 2627 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2628 2628 " bind_handle failed");
2629 2629 goto fail2;
2630 2630 }
2631 2631
2632 2632 if (count > 1) {
2633 2633 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2634 2634 " more than one DMA cookie");
2635 2635 (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2636 2636 goto fail2;
2637 2637 }
2638 2638
2639 2639 return (DDI_SUCCESS);
2640 2640 fail2:
2641 2641 ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2642 2642 fail1:
2643 2643 ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2644 2644 fail0:
2645 2645 return (DDI_FAILURE);
2646 2646 }
2647 2647
2648 2648 static void
2649 2649 bfe_ring_buf_free(bfe_ring_t *r, int slot)
2650 2650 {
2651 2651 if (r->r_buf_dma == NULL)
2652 2652 return;
2653 2653
2654 2654 (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2655 2655 ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2656 2656 ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2657 2657 }
2658 2658
2659 2659 static void
2660 2660 bfe_buffer_free(bfe_ring_t *r)
2661 2661 {
2662 2662 int i;
2663 2663
2664 2664 for (i = 0; i < r->r_ndesc; i++) {
2665 2665 bfe_ring_buf_free(r, i);
2666 2666 }
2667 2667 }
2668 2668
2669 2669 static void
2670 2670 bfe_ring_desc_free(bfe_ring_t *r)
2671 2671 {
2672 2672 (void) ddi_dma_unbind_handle(r->r_desc_dma_handle);
2673 2673 ddi_dma_mem_free(&r->r_desc_acc_handle);
2674 2674 ddi_dma_free_handle(&r->r_desc_dma_handle);
2675 2675 kmem_free(r->r_buf_dma, r->r_ndesc * sizeof (bfe_dma_t));
2676 2676
2677 2677 r->r_buf_dma = NULL;
2678 2678 r->r_desc = NULL;
2679 2679 }
2680 2680
2681 2681
2682 2682 static int
2683 2683 bfe_ring_desc_alloc(bfe_t *bfe, bfe_ring_t *r, int d)
2684 2684 {
2685 2685 int err, i, fail = 0;
2686 2686 caddr_t ring;
2687 2687 size_t size_krnl = 0, size_dma = 0, ring_len = 0;
2688 2688 ddi_dma_cookie_t cookie;
2689 2689 uint_t count = 0;
2690 2690
2691 2691 ASSERT(bfe != NULL);
2692 2692
2693 2693 size_krnl = r->r_ndesc * sizeof (bfe_dma_t);
2694 2694 size_dma = r->r_ndesc * sizeof (bfe_desc_t);
2695 2695 r->r_buf_dma = kmem_zalloc(size_krnl, KM_SLEEP);
2696 2696
2697 2697
2698 2698 err = ddi_dma_alloc_handle(bfe->bfe_dip, &bfe_dma_attr_desc,
2699 2699 DDI_DMA_SLEEP, NULL, &r->r_desc_dma_handle);
2700 2700
2701 2701 if (err != DDI_SUCCESS) {
2702 2702 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2703 2703 " ddi_dma_alloc_handle()");
2704 2704 kmem_free(r->r_buf_dma, size_krnl);
2705 2705 return (DDI_FAILURE);
2706 2706 }
2707 2707
2708 2708
2709 2709 err = ddi_dma_mem_alloc(r->r_desc_dma_handle,
2710 2710 size_dma, &bfe_buf_attr,
2711 2711 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2712 2712 &ring, &ring_len, &r->r_desc_acc_handle);
2713 2713
2714 2714 if (err != DDI_SUCCESS) {
2715 2715 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2716 2716 " ddi_dma_mem_alloc()");
2717 2717 ddi_dma_free_handle(&r->r_desc_dma_handle);
2718 2718 kmem_free(r->r_buf_dma, size_krnl);
2719 2719 return (DDI_FAILURE);
2720 2720 }
2721 2721
2722 2722 err = ddi_dma_addr_bind_handle(r->r_desc_dma_handle,
2723 2723 NULL, ring, ring_len,
2724 2724 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2725 2725 DDI_DMA_SLEEP, NULL,
2726 2726 &cookie, &count);
2727 2727
2728 2728 if (err != DDI_SUCCESS) {
2729 2729 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2730 2730 " ddi_dma_addr_bind_handle()");
2731 2731 ddi_dma_mem_free(&r->r_desc_acc_handle);
2732 2732 ddi_dma_free_handle(&r->r_desc_dma_handle);
2733 2733 kmem_free(r->r_buf_dma, size_krnl);
2734 2734 return (DDI_FAILURE);
2735 2735 }
2736 2736
2737 2737 /*
2738 2738 * We don't want to have multiple cookies. Descriptor should be
2739 2739 * aligned to PAGESIZE boundary.
2740 2740 */
2741 2741 ASSERT(count == 1);
2742 2742
2743 2743 /* The actual descriptor for the ring */
2744 2744 r->r_desc_len = ring_len;
2745 2745 r->r_desc_cookie = cookie;
2746 2746
2747 2747 r->r_desc = (void *)ring;
2748 2748
2749 2749 bzero(r->r_desc, size_dma);
2750 2750 bzero(r->r_desc, ring_len);
2751 2751
2752 2752 /* For each descriptor, allocate a DMA buffer */
2753 2753 fail = 0;
2754 2754 for (i = 0; i < r->r_ndesc; i++) {
2755 2755 if (bfe_ring_buf_alloc(bfe, r, i, d) != DDI_SUCCESS) {
2756 2756 i--;
2757 2757 fail = 1;
2758 2758 break;
2759 2759 }
2760 2760 }
2761 2761
2762 2762 if (fail) {
2763 2763 while (i-- >= 0) {
2764 2764 bfe_ring_buf_free(r, i);
2765 2765 }
2766 2766
2767 2767 /* We don't need the descriptor anymore */
2768 2768 bfe_ring_desc_free(r);
2769 2769 return (DDI_FAILURE);
2770 2770 }
2771 2771
2772 2772 return (DDI_SUCCESS);
2773 2773 }
2774 2774
2775 2775 static int
2776 2776 bfe_rings_alloc(bfe_t *bfe)
2777 2777 {
2778 2778 /* TX */
2779 2779 mutex_init(&bfe->bfe_tx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2780 2780 bfe->bfe_tx_ring.r_lockp = &bfe->bfe_tx_ring.r_lock;
2781 2781 bfe->bfe_tx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2782 2782 VLAN_TAGSZ + ETHERFCSL;
2783 2783 bfe->bfe_tx_ring.r_ndesc = TX_NUM_DESC;
2784 2784 bfe->bfe_tx_ring.r_bfe = bfe;
2785 2785 bfe->bfe_tx_ring.r_avail_desc = TX_NUM_DESC;
2786 2786
2787 2787 /* RX */
2788 2788 mutex_init(&bfe->bfe_rx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2789 2789 bfe->bfe_rx_ring.r_lockp = &bfe->bfe_rx_ring.r_lock;
2790 2790 bfe->bfe_rx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2791 2791 VLAN_TAGSZ + ETHERFCSL + RX_HEAD_ROOM;
2792 2792 bfe->bfe_rx_ring.r_ndesc = RX_NUM_DESC;
2793 2793 bfe->bfe_rx_ring.r_bfe = bfe;
2794 2794 bfe->bfe_rx_ring.r_avail_desc = RX_NUM_DESC;
2795 2795
2796 2796 /* Allocate TX Ring */
2797 2797 if (bfe_ring_desc_alloc(bfe, &bfe->bfe_tx_ring,
2798 2798 DDI_DMA_WRITE) != DDI_SUCCESS)
2799 2799 return (DDI_FAILURE);
2800 2800
2801 2801 /* Allocate RX Ring */
2802 2802 if (bfe_ring_desc_alloc(bfe, &bfe->bfe_rx_ring,
2803 2803 DDI_DMA_READ) != DDI_SUCCESS) {
2804 2804 cmn_err(CE_NOTE, "RX ring allocation failed");
2805 2805 bfe_ring_desc_free(&bfe->bfe_tx_ring);
2806 2806 return (DDI_FAILURE);
2807 2807 }
2808 2808
2809 2809 bfe->bfe_tx_ring.r_flags = BFE_RING_ALLOCATED;
2810 2810 bfe->bfe_rx_ring.r_flags = BFE_RING_ALLOCATED;
2811 2811
2812 2812 return (DDI_SUCCESS);
2813 2813 }
2814 2814
2815 2815 static int
2816 2816 bfe_resume(dev_info_t *dip)
2817 2817 {
2818 2818 bfe_t *bfe;
2819 2819 int err = DDI_SUCCESS;
2820 2820
2821 2821 if ((bfe = ddi_get_driver_private(dip)) == NULL) {
2822 2822 bfe_error(dip, "Unexpected error (no driver private data)"
2823 2823 " while resume");
2824 2824 return (DDI_FAILURE);
2825 2825 }
2826 2826
2827 2827 /*
2828 2828 * Grab all the locks first.
2829 2829 */
2830 2830 bfe_grab_locks(bfe);
2831 2831 bfe->bfe_chip_state = BFE_CHIP_RESUME;
2832 2832
2833 2833 bfe_init_vars(bfe);
2834 2834 /* PHY will also start running */
2835 2835 bfe_chip_reset(bfe);
2836 2836 if (bfe_chip_start(bfe) == DDI_FAILURE) {
2837 2837 bfe_error(dip, "Could not resume chip");
2838 2838 err = DDI_FAILURE;
2839 2839 }
2840 2840
2841 2841 bfe_release_locks(bfe);
2842 2842
2843 2843 if (err == DDI_SUCCESS)
2844 2844 mac_tx_update(bfe->bfe_machdl);
2845 2845
2846 2846 return (err);
2847 2847 }
2848 2848
2849 2849 static int
2850 2850 bfe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2851 2851 {
2852 2852 int unit;
2853 2853 bfe_t *bfe;
2854 2854 mac_register_t *macreg;
2855 2855 int ret;
2856 2856
2857 2857 switch (cmd) {
2858 2858 case DDI_RESUME:
2859 2859 return (bfe_resume(dip));
2860 2860
2861 2861 case DDI_ATTACH:
2862 2862 break;
2863 2863
2864 2864 default:
2865 2865 return (DDI_FAILURE);
2866 2866 }
2867 2867
2868 2868
2869 2869 unit = ddi_get_instance(dip);
2870 2870
2871 2871 bfe = kmem_zalloc(sizeof (bfe_t), KM_SLEEP);
2872 2872 bfe->bfe_dip = dip;
2873 2873 bfe->bfe_unit = unit;
2874 2874
2875 2875 if (pci_config_setup(dip, &bfe->bfe_conf_handle) != DDI_SUCCESS) {
2876 2876 bfe_error(dip, "pci_config_setup failed");
2877 2877 goto fail0;
2878 2878 }
2879 2879
2880 2880 /*
2881 2881 * Enable IO space, Bus Master and Memory Space accessess.
2882 2882 */
2883 2883 ret = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_COMM);
2884 2884 pci_config_put16(bfe->bfe_conf_handle, PCI_CONF_COMM,
2885 2885 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME | ret);
2886 2886
2887 2887 ddi_set_driver_private(dip, bfe);
2888 2888
2889 2889 /* Identify hardware */
2890 2890 if (bfe_identify_hardware(bfe) == BFE_FAILURE) {
2891 2891 bfe_error(dip, "Could not identify device");
2892 2892 goto fail1;
2893 2893 }
2894 2894
2895 2895 if (bfe_regs_map(bfe) != DDI_SUCCESS) {
2896 2896 bfe_error(dip, "Could not map device registers");
2897 2897 goto fail1;
2898 2898 }
2899 2899
2900 2900 (void) bfe_get_chip_config(bfe);
2901 2901
2902 2902 /*
2903 2903 * Register with MAC layer
2904 2904 */
2905 2905 if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
2906 2906 bfe_error(dip, "mac_alloc() failed");
2907 2907 goto fail2;
2908 2908 }
2909 2909
2910 2910 macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2911 2911 macreg->m_driver = bfe;
2912 2912 macreg->m_dip = dip;
2913 2913 macreg->m_instance = unit;
2914 2914 macreg->m_src_addr = bfe->bfe_ether_addr;
2915 2915 macreg->m_callbacks = &bfe_mac_callbacks;
2916 2916 macreg->m_min_sdu = 0;
2917 2917 macreg->m_max_sdu = ETHERMTU;
2918 2918 macreg->m_margin = VLAN_TAGSZ;
2919 2919
2920 2920 if ((ret = mac_register(macreg, &bfe->bfe_machdl)) != 0) {
2921 2921 bfe_error(dip, "mac_register() failed with %d error", ret);
2922 2922 mac_free(macreg);
2923 2923 goto fail2;
2924 2924 }
2925 2925
2926 2926 mac_free(macreg);
2927 2927
2928 2928 rw_init(&bfe->bfe_rwlock, NULL, RW_DRIVER,
2929 2929 DDI_INTR_PRI(bfe->bfe_intrpri));
2930 2930
2931 2931 if (bfe_add_intr(bfe) != DDI_SUCCESS) {
2932 2932 bfe_error(dip, "Could not add interrupt");
2933 2933 goto fail3;
2934 2934 }
2935 2935
2936 2936 if (bfe_rings_alloc(bfe) != DDI_SUCCESS) {
2937 2937 bfe_error(dip, "Could not allocate TX/RX Ring");
2938 2938 goto fail4;
2939 2939 }
2940 2940
2941 2941 /* Init and then reset the chip */
2942 2942 bfe->bfe_chip_action = 0;
2943 2943 bfe_init_vars(bfe);
2944 2944
2945 2945 /* PHY will also start running */
2946 2946 bfe_chip_reset(bfe);
2947 2947
2948 2948 /*
2949 2949 * Even though we enable the interrupts here but chip's interrupt
2950 2950 * is not enabled yet. It will be enabled once we plumb the interface.
2951 2951 */
2952 2952 if (ddi_intr_enable(bfe->bfe_intrhdl) != DDI_SUCCESS) {
2953 2953 bfe_error(dip, "Could not enable interrupt");
2954 2954 goto fail4;
2955 2955 }
2956 2956
2957 2957 return (DDI_SUCCESS);
2958 2958
2959 2959 fail4:
2960 2960 bfe_remove_intr(bfe);
2961 2961 fail3:
2962 2962 (void) mac_unregister(bfe->bfe_machdl);
2963 2963 fail2:
2964 2964 bfe_unmap_regs(bfe);
2965 2965 fail1:
2966 2966 pci_config_teardown(&bfe->bfe_conf_handle);
2967 2967 fail0:
2968 2968 kmem_free(bfe, sizeof (bfe_t));
2969 2969 return (DDI_FAILURE);
2970 2970 }
2971 2971
2972 2972 static int
2973 2973 bfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2974 2974 {
2975 2975 bfe_t *bfe;
2976 2976
2977 2977 bfe = ddi_get_driver_private(devinfo);
2978 2978
2979 2979 switch (cmd) {
2980 2980 case DDI_DETACH:
2981 2981 /*
2982 2982 * We need to stop the timer before grabbing locks otherwise
2983 2983 * we can land-up in deadlock with untimeout.
2984 2984 */
2985 2985 bfe_stop_timer(bfe);
2986 2986
2987 2987 /*
2988 2988 * First unregister with MAC layer before stopping DMA
2989 2989 * engine.
2990 2990 */
2991 2991 if (mac_unregister(bfe->bfe_machdl) != DDI_SUCCESS)
2992 2992 return (DDI_FAILURE);
2993 2993
2994 2994 bfe->bfe_machdl = NULL;
2995 2995
2996 2996 /*
2997 2997 * Quiesce the chip first.
2998 2998 */
2999 2999 bfe_grab_locks(bfe);
3000 3000 bfe_chip_halt(bfe);
3001 3001 bfe_stop_phy(bfe);
3002 3002 bfe_release_locks(bfe);
3003 3003
3004 3004 (void) ddi_intr_disable(bfe->bfe_intrhdl);
3005 3005
3006 3006 /* Make sure timer is gone. */
3007 3007 bfe_stop_timer(bfe);
3008 3008
3009 3009 /*
3010 3010 * Free the DMA resources for buffer and then descriptors
3011 3011 */
3012 3012 if (bfe->bfe_tx_ring.r_flags == BFE_RING_ALLOCATED) {
3013 3013 /* TX */
3014 3014 bfe_buffer_free(&bfe->bfe_tx_ring);
3015 3015 bfe_ring_desc_free(&bfe->bfe_tx_ring);
3016 3016 }
3017 3017
3018 3018 if (bfe->bfe_rx_ring.r_flags == BFE_RING_ALLOCATED) {
3019 3019 /* RX */
3020 3020 bfe_buffer_free(&bfe->bfe_rx_ring);
3021 3021 bfe_ring_desc_free(&bfe->bfe_rx_ring);
3022 3022 }
3023 3023
3024 3024 bfe_remove_intr(bfe);
3025 3025 bfe_unmap_regs(bfe);
3026 3026 pci_config_teardown(&bfe->bfe_conf_handle);
3027 3027
3028 3028 mutex_destroy(&bfe->bfe_tx_ring.r_lock);
3029 3029 mutex_destroy(&bfe->bfe_rx_ring.r_lock);
3030 3030 rw_destroy(&bfe->bfe_rwlock);
3031 3031
3032 3032 kmem_free(bfe, sizeof (bfe_t));
3033 3033
3034 3034 ddi_set_driver_private(devinfo, NULL);
3035 3035 return (DDI_SUCCESS);
3036 3036
3037 3037 case DDI_SUSPEND:
3038 3038 /*
3039 3039 * We need to stop the timer before grabbing locks otherwise
3040 3040 * we can land-up in deadlock with untimeout.
3041 3041 */
3042 3042 bfe_stop_timer(bfe);
3043 3043
3044 3044 /*
3045 3045 * Grab all the locks first.
3046 3046 */
3047 3047 bfe_grab_locks(bfe);
3048 3048 bfe_chip_halt(bfe);
3049 3049 bfe_stop_phy(bfe);
3050 3050 bfe->bfe_chip_state = BFE_CHIP_SUSPENDED;
3051 3051 bfe_release_locks(bfe);
3052 3052
3053 3053 return (DDI_SUCCESS);
3054 3054
3055 3055 default:
3056 3056 return (DDI_FAILURE);
3057 3057 }
3058 3058 }
3059 3059
3060 3060 /*
3061 3061 * Quiesce the card for fast reboot
3062 3062 */
3063 3063 int
3064 3064 bfe_quiesce(dev_info_t *dev_info)
3065 3065 {
3066 3066 bfe_t *bfe;
3067 3067
3068 3068 bfe = ddi_get_driver_private(dev_info);
3069 3069
3070 3070 bfe_chip_halt(bfe);
3071 3071 bfe_stop_phy(bfe);
3072 3072 bfe->bfe_chip_state = BFE_CHIP_QUIESCED;
3073 3073
3074 3074 return (DDI_SUCCESS);
3075 3075 }
3076 3076
3077 3077 static struct cb_ops bfe_cb_ops = {
3078 3078 nulldev, /* cb_open */
3079 3079 nulldev, /* cb_close */
3080 3080 nodev, /* cb_strategy */
3081 3081 nodev, /* cb_print */
3082 3082 nodev, /* cb_dump */
3083 3083 nodev, /* cb_read */
3084 3084 nodev, /* cb_write */
3085 3085 nodev, /* cb_ioctl */
3086 3086 nodev, /* cb_devmap */
3087 3087 nodev, /* cb_mmap */
3088 3088 nodev, /* cb_segmap */
3089 3089 nochpoll, /* cb_chpoll */
3090 3090 ddi_prop_op, /* cb_prop_op */
3091 3091 NULL, /* cb_stream */
3092 3092 D_MP | D_HOTPLUG, /* cb_flag */
3093 3093 CB_REV, /* cb_rev */
3094 3094 nodev, /* cb_aread */
3095 3095 nodev /* cb_awrite */
3096 3096 };
3097 3097
3098 3098 static struct dev_ops bfe_dev_ops = {
3099 3099 DEVO_REV, /* devo_rev */
3100 3100 0, /* devo_refcnt */
3101 3101 NULL, /* devo_getinfo */
3102 3102 nulldev, /* devo_identify */
3103 3103 nulldev, /* devo_probe */
3104 3104 bfe_attach, /* devo_attach */
3105 3105 bfe_detach, /* devo_detach */
3106 3106 nodev, /* devo_reset */
3107 3107 &bfe_cb_ops, /* devo_cb_ops */
3108 3108 NULL, /* devo_bus_ops */
3109 3109 ddi_power, /* devo_power */
↓ open down ↓ |
2019 lines elided |
↑ open up ↑ |
3110 3110 bfe_quiesce /* devo_quiesce */
3111 3111 };
3112 3112
3113 3113 static struct modldrv bfe_modldrv = {
3114 3114 &mod_driverops,
3115 3115 bfe_ident,
3116 3116 &bfe_dev_ops
3117 3117 };
3118 3118
3119 3119 static struct modlinkage modlinkage = {
3120 - MODREV_1, (void *)&bfe_modldrv, NULL
3120 + MODREV_1, { (void *)&bfe_modldrv, NULL }
3121 3121 };
3122 3122
3123 3123 int
3124 3124 _info(struct modinfo *modinfop)
3125 3125 {
3126 3126 return (mod_info(&modlinkage, modinfop));
3127 3127 }
3128 3128
3129 3129 int
3130 3130 _init(void)
3131 3131 {
3132 3132 int status;
3133 3133
3134 3134 mac_init_ops(&bfe_dev_ops, MODULE_NAME);
3135 3135 status = mod_install(&modlinkage);
3136 3136 if (status == DDI_FAILURE)
3137 3137 mac_fini_ops(&bfe_dev_ops);
3138 3138 return (status);
3139 3139 }
3140 3140
3141 3141 int
3142 3142 _fini(void)
3143 3143 {
3144 3144 int status;
3145 3145
3146 3146 status = mod_remove(&modlinkage);
3147 3147 if (status == 0) {
3148 3148 mac_fini_ops(&bfe_dev_ops);
3149 3149 }
3150 3150 return (status);
3151 3151 }
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX