1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 #include <sys/stream.h>
  26 #include <sys/strsun.h>
  27 #include <sys/stat.h>
  28 #include <sys/pci.h>
  29 #include <sys/modctl.h>
  30 #include <sys/kstat.h>
  31 #include <sys/ethernet.h>
  32 #include <sys/devops.h>
  33 #include <sys/debug.h>
  34 #include <sys/conf.h>
  35 #include <sys/sysmacros.h>
  36 #include <sys/dditypes.h>
  37 #include <sys/ddi.h>
  38 #include <sys/sunddi.h>
  39 #include <sys/miiregs.h>
  40 #include <sys/byteorder.h>
  41 #include <sys/cyclic.h>
  42 #include <sys/note.h>
  43 #include <sys/crc32.h>
  44 #include <sys/mac_provider.h>
  45 #include <sys/mac_ether.h>
  46 #include <sys/vlan.h>
  47 #include <sys/errno.h>
  48 #include <sys/sdt.h>
  49 #include <sys/strsubr.h>
  50 
  51 #include "bfe.h"
  52 #include "bfe_hw.h"
  53 
  54 
  55 /*
  56  * Broadcom BCM4401 chipsets use two rings :
  57  *
  58  * - One TX : For sending packets down the wire.
  59  * - One RX : For receving packets.
  60  *
  61  * Each ring can have any number of descriptors (configured during attach).
  62  * As of now we configure only 128 descriptor per ring (TX/RX). Each descriptor
  63  * has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for
  64  * the packet and control information (like start/end of frame or end of table).
  65  * The descriptor table is allocated first and then a DMA buffer (for a packet)
  66  * is allocated and linked to each descriptor.
  67  *
  68  * Each descriptor entry is bfe_desc_t structure in bfe. During TX/RX
  69  * interrupt, the stat register will point to current descriptor being
  70  * processed.
  71  *
  72  * Here's an example of TX and RX ring :
  73  *
  74  * TX:
  75  *
  76  *   Base of the descriptor table is programmed using BFE_DMATX_CTRL control
  77  *   register. Each 'addr' points to DMA buffer (or packet data buffer) to
  78  *   be transmitted and 'ctl' has the length of the packet (usually MTU).
  79  *
  80  *  ----------------------|
  81  *  | addr |Descriptor 0  |
  82  *  | ctl  |              |
  83  *  ----------------------|
  84  *  | addr |Descriptor 1  |    SOF (start of the frame)
  85  *  | ctl  |              |
  86  *  ----------------------|
  87  *  | ...  |Descriptor... |    EOF (end of the frame)
  88  *  | ...  |              |
  89  *  ----------------------|
  90  *  | addr |Descritor 127 |
  91  *  | ctl  | EOT          |    EOT (End of Table)
  92  *  ----------------------|
  93  *
  94  * 'r_curr_desc'  : pointer to current descriptor which can be used to transmit
  95  *                  a packet.
  96  * 'r_avail_desc' : decremented whenever a packet is being sent.
  97  * 'r_cons_desc'  : incremented whenever a packet is sent down the wire and
  98  *                  notified by an interrupt to bfe driver.
  99  *
 100  * RX:
 101  *
 102  *   Base of the descriptor table is programmed using BFE_DMARX_CTRL control
 103  *   register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl'
 104  *   contains the size of the DMA buffer and all the DMA buffers are
 105  *   pre-allocated during attach and hence the maxmium size of the packet is
 106  *   also known (r_buf_len from the bfe_rint_t structure). During RX interrupt
 107  *   the packet length is embedded in bfe_header_t which is added by the
 108  *   chip in the beginning of the packet.
 109  *
 110  *  ----------------------|
 111  *  | addr |Descriptor 0  |
 112  *  | ctl  |              |
 113  *  ----------------------|
 114  *  | addr |Descriptor 1  |
 115  *  | ctl  |              |
 116  *  ----------------------|
 117  *  | ...  |Descriptor... |
 118  *  | ...  |              |
 119  *  ----------------------|
 120  *  | addr |Descriptor 127|
 121  *  | ctl  | EOT          |    EOT (End of Table)
 122  *  ----------------------|
 123  *
 124  * 'r_curr_desc'  : pointer to current descriptor while receving a packet.
 125  *
 126  */
 127 
 128 #define MODULE_NAME     "bfe"
 129 
 130 /*
 131  * Used for checking PHY (link state, speed)
 132  */
 133 #define BFE_TIMEOUT_INTERVAL    (1000 * 1000 * 1000)
 134 
 135 
 136 /*
 137  * Chip restart action and reason for restart
 138  */
 139 #define BFE_ACTION_RESTART              0x1     /* For restarting the chip */
 140 #define BFE_ACTION_RESTART_SETPROP      0x2     /* restart due to setprop */
 141 #define BFE_ACTION_RESTART_FAULT        0x4     /* restart due to fault */
 142 #define BFE_ACTION_RESTART_PKT          0x8     /* restart due to pkt timeout */
 143 
 144 static  char    bfe_ident[] = "bfe driver for Broadcom BCM4401 chipsets";
 145 
 146 /*
 147  * Function Prototypes for bfe driver.
 148  */
 149 static  int     bfe_check_link(bfe_t *);
 150 static  void    bfe_report_link(bfe_t *);
 151 static  void    bfe_chip_halt(bfe_t *);
 152 static  void    bfe_chip_reset(bfe_t *);
 153 static  void    bfe_tx_desc_init(bfe_ring_t *);
 154 static  void    bfe_rx_desc_init(bfe_ring_t *);
 155 static  void    bfe_set_rx_mode(bfe_t *);
 156 static  void    bfe_enable_chip_intrs(bfe_t *);
 157 static  void    bfe_chip_restart(bfe_t *);
 158 static  void    bfe_init_vars(bfe_t *);
 159 static  void    bfe_clear_stats(bfe_t *);
 160 static  void    bfe_gather_stats(bfe_t *);
 161 static  void    bfe_error(dev_info_t *, char *, ...);
 162 static  int     bfe_mac_getprop(void *, const char *, mac_prop_id_t, uint_t,
 163     void *);
 164 static  int     bfe_mac_setprop(void *, const char *, mac_prop_id_t, uint_t,
 165     const void *);
 166 static  int     bfe_tx_reclaim(bfe_ring_t *);
 167 int     bfe_mac_set_ether_addr(void *, const uint8_t *);
 168 
 169 
 170 /*
 171  * Macros for ddi_dma_sync().
 172  */
 173 #define SYNC_DESC(r, s, l, d)   \
 174         (void) ddi_dma_sync(r->r_desc_dma_handle, \
 175             (off_t)(s * sizeof (bfe_desc_t)), \
 176             (size_t)(l * sizeof (bfe_desc_t)), \
 177             d)
 178 
 179 #define SYNC_BUF(r, s, b, l, d) \
 180         (void) ddi_dma_sync(r->r_buf_dma[s].handle, \
 181             (off_t)(b), (size_t)(l), d)
 182 
 183 /*
 184  * Supported Broadcom BCM4401 Cards.
 185  */
 186 static bfe_cards_t bfe_cards[] = {
 187         { 0x14e4, 0x170c, "BCM4401 100Base-TX"},
 188 };
 189 
 190 
 191 /*
 192  * DMA attributes for device registers, packet data (buffer) and
 193  * descriptor table.
 194  */
 195 static struct ddi_device_acc_attr bfe_dev_attr = {
 196         DDI_DEVICE_ATTR_V0,
 197         DDI_STRUCTURE_LE_ACC,
 198         DDI_STRICTORDER_ACC
 199 };
 200 
 201 static struct ddi_device_acc_attr bfe_buf_attr = {
 202         DDI_DEVICE_ATTR_V0,
 203         DDI_NEVERSWAP_ACC,      /* native endianness */
 204         DDI_STRICTORDER_ACC
 205 };
 206 
 207 static ddi_dma_attr_t bfe_dma_attr_buf = {
 208         DMA_ATTR_V0,            /* dma_attr_version */
 209         0,                      /* dma_attr_addr_lo */
 210         BFE_PCI_DMA - 1,        /* dma_attr_addr_hi */
 211         0x1fff,                 /* dma_attr_count_max */
 212         8,                      /* dma_attr_align */
 213         0,                      /* dma_attr_burstsizes */
 214         1,                      /* dma_attr_minxfer */
 215         0x1fff,                 /* dma_attr_maxxfer */
 216         BFE_PCI_DMA - 1,        /* dma_attr_seg */
 217         1,                      /* dma_attr_sgllen */
 218         1,                      /* dma_attr_granular */
 219         0                       /* dma_attr_flags */
 220 };
 221 
 222 static ddi_dma_attr_t bfe_dma_attr_desc = {
 223         DMA_ATTR_V0,            /* dma_attr_version */
 224         0,                      /* dma_attr_addr_lo */
 225         BFE_PCI_DMA - 1,        /* dma_attr_addr_hi */
 226         BFE_PCI_DMA - 1,        /* dma_attr_count_max */
 227         BFE_DESC_ALIGN,         /* dma_attr_align */
 228         0,                      /* dma_attr_burstsizes */
 229         1,                      /* dma_attr_minxfer */
 230         BFE_PCI_DMA - 1,        /* dma_attr_maxxfer */
 231         BFE_PCI_DMA - 1,        /* dma_attr_seg */
 232         1,                      /* dma_attr_sgllen */
 233         1,                      /* dma_attr_granular */
 234         0                       /* dma_attr_flags */
 235 };
 236 
 237 /*
 238  * Ethernet broadcast addresses.
 239  */
 240 static uchar_t bfe_broadcast[ETHERADDRL] = {
 241         0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 242 };
 243 
 244 #define ASSERT_ALL_LOCKS(bfe) { \
 245         ASSERT(mutex_owned(&bfe->bfe_tx_ring.r_lock));   \
 246         ASSERT(rw_write_held(&bfe->bfe_rwlock)); \
 247 }
 248 
 249 /*
 250  * Debugging and error reproting code.
 251  */
 252 static void
 253 bfe_error(dev_info_t *dip, char *fmt, ...)
 254 {
 255         va_list ap;
 256         char    buf[256];
 257 
 258         va_start(ap, fmt);
 259         (void) vsnprintf(buf, sizeof (buf), fmt, ap);
 260         va_end(ap);
 261 
 262         if (dip) {
 263                 cmn_err(CE_WARN, "%s%d: %s",
 264                     ddi_driver_name(dip), ddi_get_instance(dip), buf);
 265         } else {
 266                 cmn_err(CE_WARN, "bfe: %s", buf);
 267         }
 268 }
 269 
 270 /*
 271  * Grabs all necessary locks to block any other operation on the chip.
 272  */
 273 static void
 274 bfe_grab_locks(bfe_t *bfe)
 275 {
 276         bfe_ring_t *tx = &bfe->bfe_tx_ring;
 277 
 278         /*
 279          * Grab all the locks.
 280          * - bfe_rwlock : locks down whole chip including RX.
 281          * - tx's r_lock : locks down only TX side.
 282          */
 283         rw_enter(&bfe->bfe_rwlock, RW_WRITER);
 284         mutex_enter(&tx->r_lock);
 285 
 286         /*
 287          * Note that we don't use RX's r_lock.
 288          */
 289 }
 290 
 291 /*
 292  * Release lock on chip/drver.
 293  */
 294 static void
 295 bfe_release_locks(bfe_t *bfe)
 296 {
 297         bfe_ring_t *tx = &bfe->bfe_tx_ring;
 298 
 299         /*
 300          * Release all the locks in the order in which they were grabbed.
 301          */
 302         mutex_exit(&tx->r_lock);
 303         rw_exit(&bfe->bfe_rwlock);
 304 }
 305 
 306 
 307 /*
 308  * It's used to make sure that the write to device register was successful.
 309  */
 310 static int
 311 bfe_wait_bit(bfe_t *bfe, uint32_t reg, uint32_t bit,
 312     ulong_t t, const int clear)
 313 {
 314         ulong_t i;
 315         uint32_t v;
 316 
 317         for (i = 0; i < t; i++) {
 318                 v = INL(bfe, reg);
 319 
 320                 if (clear && !(v & bit))
 321                         break;
 322 
 323                 if (!clear && (v & bit))
 324                         break;
 325 
 326                 drv_usecwait(10);
 327         }
 328 
 329         /* if device still didn't see the value */
 330         if (i == t)
 331                 return (-1);
 332 
 333         return (0);
 334 }
 335 
 336 /*
 337  * PHY functions (read, write, stop, reset and startup)
 338  */
 339 static int
 340 bfe_read_phy(bfe_t *bfe, uint32_t reg)
 341 {
 342         OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
 343         OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
 344             (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
 345             (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
 346             (reg << BFE_MDIO_RA_SHIFT) |
 347             (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
 348 
 349         (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
 350 
 351         return ((INL(bfe, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA));
 352 }
 353 
 354 static void
 355 bfe_write_phy(bfe_t *bfe, uint32_t reg, uint32_t val)
 356 {
 357         OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
 358         OUTL(bfe,  BFE_MDIO_DATA, (BFE_MDIO_SB_START |
 359             (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
 360             (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
 361             (reg << BFE_MDIO_RA_SHIFT) |
 362             (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
 363             (val & BFE_MDIO_DATA_DATA)));
 364 
 365         (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
 366 }
 367 
 368 /*
 369  * It resets the PHY layer.
 370  */
 371 static int
 372 bfe_reset_phy(bfe_t *bfe)
 373 {
 374         uint32_t i;
 375 
 376         bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_RESET);
 377         drv_usecwait(100);
 378         for (i = 0; i < 10; i++) {
 379                 if (bfe_read_phy(bfe, MII_CONTROL) &
 380                     MII_CONTROL_RESET) {
 381                         drv_usecwait(500);
 382                         continue;
 383                 }
 384 
 385                 break;
 386         }
 387 
 388         if (i == 10) {
 389                 bfe_error(bfe->bfe_dip, "Timeout waiting for PHY to reset");
 390                 bfe->bfe_phy_state = BFE_PHY_RESET_TIMEOUT;
 391                 return (BFE_FAILURE);
 392         }
 393 
 394         bfe->bfe_phy_state = BFE_PHY_RESET_DONE;
 395 
 396         return (BFE_SUCCESS);
 397 }
 398 
 399 /*
 400  * Make sure timer function is out of our way and especially during
 401  * detach.
 402  */
 403 static void
 404 bfe_stop_timer(bfe_t *bfe)
 405 {
 406         if (bfe->bfe_periodic_id) {
 407                 ddi_periodic_delete(bfe->bfe_periodic_id);
 408                 bfe->bfe_periodic_id = NULL;
 409         }
 410 }
 411 
 412 /*
 413  * Stops the PHY
 414  */
 415 static void
 416 bfe_stop_phy(bfe_t *bfe)
 417 {
 418         bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_PWRDN |
 419             MII_CONTROL_ISOLATE);
 420 
 421         bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
 422         bfe->bfe_chip.speed = 0;
 423         bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
 424 
 425         bfe->bfe_phy_state = BFE_PHY_STOPPED;
 426 
 427         /*
 428          * Report the link status to MAC layer.
 429          */
 430         if (bfe->bfe_machdl != NULL)
 431                 (void) bfe_report_link(bfe);
 432 }
 433 
 434 static int
 435 bfe_probe_phy(bfe_t *bfe)
 436 {
 437         int phy;
 438         uint32_t status;
 439 
 440         if (bfe->bfe_phy_addr) {
 441                 status = bfe_read_phy(bfe, MII_STATUS);
 442                 if (status != 0xffff && status != 0) {
 443                         bfe_write_phy(bfe, MII_CONTROL, 0);
 444                         return (BFE_SUCCESS);
 445                 }
 446         }
 447 
 448         for (phy = 0; phy < 32; phy++) {
 449                 bfe->bfe_phy_addr = phy;
 450                 status = bfe_read_phy(bfe, MII_STATUS);
 451                 if (status != 0xffff && status != 0) {
 452                         bfe_write_phy(bfe, MII_CONTROL, 0);
 453                         return (BFE_SUCCESS);
 454                 }
 455         }
 456 
 457         return (BFE_FAILURE);
 458 }
 459 
 460 /*
 461  * This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link
 462  * status.
 463  */
 464 static void
 465 bfe_timeout(void *arg)
 466 {
 467         bfe_t *bfe = (bfe_t *)arg;
 468         int resched = 0;
 469 
 470         /*
 471          * We don't grab any lock because bfe can't go away.
 472          * untimeout() will wait for this timeout instance to complete.
 473          */
 474         if (bfe->bfe_chip_action & BFE_ACTION_RESTART) {
 475                 /*
 476                  * Restart the chip.
 477                  */
 478                 bfe_grab_locks(bfe);
 479                 bfe_chip_restart(bfe);
 480                 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART;
 481                 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_FAULT;
 482                 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_PKT;
 483                 bfe_release_locks(bfe);
 484                 mac_tx_update(bfe->bfe_machdl);
 485                 /* Restart will register a new timeout */
 486                 return;
 487         }
 488 
 489         rw_enter(&bfe->bfe_rwlock, RW_READER);
 490 
 491         if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
 492                 hrtime_t hr;
 493 
 494                 hr = gethrtime();
 495                 if (bfe->bfe_tx_stall_time != 0 &&
 496                     hr > bfe->bfe_tx_stall_time) {
 497                         DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
 498                             char *, "pkt timeout");
 499                         bfe->bfe_chip_action |=
 500                             (BFE_ACTION_RESTART | BFE_ACTION_RESTART_PKT);
 501                         bfe->bfe_tx_stall_time = 0;
 502                 }
 503         }
 504 
 505         if (bfe->bfe_phy_state == BFE_PHY_STARTED) {
 506                 /*
 507                  * Report the link status to MAC layer if link status changed.
 508                  */
 509                 if (bfe_check_link(bfe)) {
 510                         bfe_report_link(bfe);
 511                         if (bfe->bfe_chip.link == LINK_STATE_UP) {
 512                                 uint32_t val, flow;
 513 
 514                                 val = INL(bfe, BFE_TX_CTRL);
 515                                 val &= ~BFE_TX_DUPLEX;
 516                                 if (bfe->bfe_chip.duplex == LINK_DUPLEX_FULL) {
 517                                         val |= BFE_TX_DUPLEX;
 518                                         flow = INL(bfe, BFE_RXCONF);
 519                                         flow &= ~BFE_RXCONF_FLOW;
 520                                         OUTL(bfe, BFE_RXCONF, flow);
 521 
 522                                         flow = INL(bfe, BFE_MAC_FLOW);
 523                                         flow &= ~(BFE_FLOW_RX_HIWAT);
 524                                         OUTL(bfe, BFE_MAC_FLOW, flow);
 525                                 }
 526 
 527                                 resched = 1;
 528 
 529                                 OUTL(bfe, BFE_TX_CTRL, val);
 530                                 DTRACE_PROBE1(link__up,
 531                                     int, bfe->bfe_unit);
 532                         }
 533                 }
 534         }
 535 
 536         rw_exit(&bfe->bfe_rwlock);
 537 
 538         if (resched)
 539                 mac_tx_update(bfe->bfe_machdl);
 540 }
 541 
 542 /*
 543  * Starts PHY layer.
 544  */
 545 static int
 546 bfe_startup_phy(bfe_t *bfe)
 547 {
 548         uint16_t bmsr, bmcr, anar;
 549         int     prog, s;
 550         int phyid1, phyid2;
 551 
 552         if (bfe_probe_phy(bfe) == BFE_FAILURE) {
 553                 bfe->bfe_phy_state = BFE_PHY_NOTFOUND;
 554                 return (BFE_FAILURE);
 555         }
 556 
 557         (void) bfe_reset_phy(bfe);
 558 
 559         phyid1 = bfe_read_phy(bfe, MII_PHYIDH);
 560         phyid2 = bfe_read_phy(bfe, MII_PHYIDL);
 561         bfe->bfe_phy_id = (phyid1 << 16) | phyid2;
 562 
 563         bmsr = bfe_read_phy(bfe, MII_STATUS);
 564         anar = bfe_read_phy(bfe, MII_AN_ADVERT);
 565 
 566 again:
 567         anar &= ~(MII_ABILITY_100BASE_T4 |
 568             MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX |
 569             MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T);
 570 
 571         /*
 572          * Supported hardware modes are in bmsr.
 573          */
 574         bfe->bfe_chip.bmsr = bmsr;
 575 
 576         /*
 577          * Assume no capabilities are supported in the hardware.
 578          */
 579         bfe->bfe_cap_aneg = bfe->bfe_cap_100T4 =
 580             bfe->bfe_cap_100fdx = bfe->bfe_cap_100hdx =
 581             bfe->bfe_cap_10fdx = bfe->bfe_cap_10hdx = 0;
 582 
 583         /*
 584          * Assume property is set.
 585          */
 586         s = 1;
 587         if (!(bfe->bfe_chip_action & BFE_ACTION_RESTART_SETPROP)) {
 588                 /*
 589                  * Property is not set which means bfe_mac_setprop()
 590                  * is not called on us.
 591                  */
 592                 s = 0;
 593         }
 594 
 595         bmcr = prog = 0;
 596 
 597         if (bmsr & MII_STATUS_100_BASEX_FD) {
 598                 bfe->bfe_cap_100fdx = 1;
 599                 if (s == 0) {
 600                         anar |= MII_ABILITY_100BASE_TX_FD;
 601                         bfe->bfe_adv_100fdx = 1;
 602                         prog++;
 603                 } else if (bfe->bfe_adv_100fdx) {
 604                         anar |= MII_ABILITY_100BASE_TX_FD;
 605                         prog++;
 606                 }
 607         }
 608 
 609         if (bmsr & MII_STATUS_100_BASE_T4) {
 610                 bfe->bfe_cap_100T4 = 1;
 611                 if (s == 0) {
 612                         anar |= MII_ABILITY_100BASE_T4;
 613                         bfe->bfe_adv_100T4 = 1;
 614                         prog++;
 615                 } else if (bfe->bfe_adv_100T4) {
 616                         anar |= MII_ABILITY_100BASE_T4;
 617                         prog++;
 618                 }
 619         }
 620 
 621         if (bmsr & MII_STATUS_100_BASEX) {
 622                 bfe->bfe_cap_100hdx = 1;
 623                 if (s == 0) {
 624                         anar |= MII_ABILITY_100BASE_TX;
 625                         bfe->bfe_adv_100hdx = 1;
 626                         prog++;
 627                 } else if (bfe->bfe_adv_100hdx) {
 628                         anar |= MII_ABILITY_100BASE_TX;
 629                         prog++;
 630                 }
 631         }
 632 
 633         if (bmsr & MII_STATUS_10_FD) {
 634                 bfe->bfe_cap_10fdx = 1;
 635                 if (s == 0) {
 636                         anar |= MII_ABILITY_10BASE_T_FD;
 637                         bfe->bfe_adv_10fdx = 1;
 638                         prog++;
 639                 } else if (bfe->bfe_adv_10fdx) {
 640                         anar |= MII_ABILITY_10BASE_T_FD;
 641                         prog++;
 642                 }
 643         }
 644 
 645         if (bmsr & MII_STATUS_10) {
 646                 bfe->bfe_cap_10hdx = 1;
 647                 if (s == 0) {
 648                         anar |= MII_ABILITY_10BASE_T;
 649                         bfe->bfe_adv_10hdx = 1;
 650                         prog++;
 651                 } else if (bfe->bfe_adv_10hdx) {
 652                         anar |= MII_ABILITY_10BASE_T;
 653                         prog++;
 654                 }
 655         }
 656 
 657         if (bmsr & MII_STATUS_CANAUTONEG) {
 658                 bfe->bfe_cap_aneg = 1;
 659                 if (s == 0) {
 660                         bfe->bfe_adv_aneg = 1;
 661                 }
 662         }
 663 
 664         if (prog == 0) {
 665                 if (s == 0) {
 666                         bfe_error(bfe->bfe_dip,
 667                             "No valid link mode selected. Powering down PHY");
 668                         bfe_stop_phy(bfe);
 669                         bfe_report_link(bfe);
 670                         return (BFE_FAILURE);
 671                 }
 672 
 673                 /*
 674                  * If property is set then user would have goofed up. So we
 675                  * go back to default properties.
 676                  */
 677                 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_SETPROP;
 678                 goto again;
 679         }
 680 
 681         if (bfe->bfe_adv_aneg && (bmsr & MII_STATUS_CANAUTONEG)) {
 682                 bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN);
 683         } else {
 684                 if (bfe->bfe_adv_100fdx)
 685                         bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
 686                 else if (bfe->bfe_adv_100hdx)
 687                         bmcr = MII_CONTROL_100MB;
 688                 else if (bfe->bfe_adv_10fdx)
 689                         bmcr = MII_CONTROL_FDUPLEX;
 690                 else
 691                         bmcr = 0;               /* 10HDX */
 692         }
 693 
 694         if (prog)
 695                 bfe_write_phy(bfe, MII_AN_ADVERT, anar);
 696 
 697         if (bmcr)
 698                 bfe_write_phy(bfe, MII_CONTROL, bmcr);
 699 
 700         bfe->bfe_mii_anar = anar;
 701         bfe->bfe_mii_bmcr = bmcr;
 702         bfe->bfe_phy_state = BFE_PHY_STARTED;
 703 
 704         if (bfe->bfe_periodic_id == NULL) {
 705                 bfe->bfe_periodic_id = ddi_periodic_add(bfe_timeout,
 706                     (void *)bfe, BFE_TIMEOUT_INTERVAL, DDI_IPL_0);
 707 
 708                 DTRACE_PROBE1(first__timeout, int, bfe->bfe_unit);
 709         }
 710 
 711         DTRACE_PROBE4(phy_started, int, bfe->bfe_unit,
 712             int, bmsr, int, bmcr, int, anar);
 713 
 714         return (BFE_SUCCESS);
 715 }
 716 
 717 /*
 718  * Reports link status back to MAC Layer.
 719  */
 720 static void
 721 bfe_report_link(bfe_t *bfe)
 722 {
 723         mac_link_update(bfe->bfe_machdl, bfe->bfe_chip.link);
 724 }
 725 
 726 /*
 727  * Reads PHY/MII registers and get the link status for us.
 728  */
 729 static int
 730 bfe_check_link(bfe_t *bfe)
 731 {
 732         uint16_t bmsr, bmcr, anar, anlpar;
 733         int speed, duplex, link;
 734 
 735         speed = bfe->bfe_chip.speed;
 736         duplex = bfe->bfe_chip.duplex;
 737         link = bfe->bfe_chip.link;
 738 
 739         bmsr = bfe_read_phy(bfe, MII_STATUS);
 740         bfe->bfe_mii_bmsr = bmsr;
 741 
 742         bmcr = bfe_read_phy(bfe, MII_CONTROL);
 743 
 744         anar = bfe_read_phy(bfe, MII_AN_ADVERT);
 745         bfe->bfe_mii_anar = anar;
 746 
 747         anlpar = bfe_read_phy(bfe, MII_AN_LPABLE);
 748         bfe->bfe_mii_anlpar = anlpar;
 749 
 750         bfe->bfe_mii_exp = bfe_read_phy(bfe, MII_AN_EXPANSION);
 751 
 752         /*
 753          * If exp register is not present in PHY.
 754          */
 755         if (bfe->bfe_mii_exp == 0xffff) {
 756                 bfe->bfe_mii_exp = 0;
 757         }
 758 
 759         if ((bmsr & MII_STATUS_LINKUP) == 0) {
 760                 bfe->bfe_chip.link = LINK_STATE_DOWN;
 761                 bfe->bfe_chip.speed = 0;
 762                 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
 763                 goto done;
 764         }
 765 
 766         bfe->bfe_chip.link = LINK_STATE_UP;
 767 
 768         if (!(bmcr & MII_CONTROL_ANE)) {
 769                 /* Forced mode */
 770                 if (bmcr & MII_CONTROL_100MB)
 771                         bfe->bfe_chip.speed = 100000000;
 772                 else
 773                         bfe->bfe_chip.speed = 10000000;
 774 
 775                 if (bmcr & MII_CONTROL_FDUPLEX)
 776                         bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
 777                 else
 778                         bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
 779 
 780         } else if ((!(bmsr & MII_STATUS_CANAUTONEG)) ||
 781             (!(bmsr & MII_STATUS_ANDONE))) {
 782                 bfe->bfe_chip.speed = 0;
 783                 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
 784         } else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) {
 785                 bfe->bfe_chip.speed = 100000000;
 786                 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
 787         } else if (anar & anlpar & MII_ABILITY_100BASE_T4) {
 788                 bfe->bfe_chip.speed = 100000000;
 789                 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
 790         } else if (anar & anlpar & MII_ABILITY_100BASE_TX) {
 791                 bfe->bfe_chip.speed = 100000000;
 792                 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
 793         } else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) {
 794                 bfe->bfe_chip.speed = 10000000;
 795                 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
 796         } else if (anar & anlpar & MII_ABILITY_10BASE_T) {
 797                 bfe->bfe_chip.speed = 10000000;
 798                 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
 799         } else {
 800                 bfe->bfe_chip.speed = 0;
 801                 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
 802         }
 803 
 804 done:
 805         /*
 806          * If speed or link status or duplex mode changed then report to
 807          * MAC layer which is done by the caller.
 808          */
 809         if (speed != bfe->bfe_chip.speed ||
 810             duplex != bfe->bfe_chip.duplex ||
 811             link != bfe->bfe_chip.link) {
 812                 return (1);
 813         }
 814 
 815         return (0);
 816 }
 817 
 818 static void
 819 bfe_cam_write(bfe_t *bfe, uchar_t *d, int index)
 820 {
 821         uint32_t v;
 822 
 823         v = ((uint32_t)d[2] << 24);
 824         v |= ((uint32_t)d[3] << 16);
 825         v |= ((uint32_t)d[4] << 8);
 826         v |= (uint32_t)d[5];
 827 
 828         OUTL(bfe, BFE_CAM_DATA_LO, v);
 829         v = (BFE_CAM_HI_VALID |
 830             (((uint32_t)d[0]) << 8) |
 831             (((uint32_t)d[1])));
 832 
 833         OUTL(bfe, BFE_CAM_DATA_HI, v);
 834         OUTL(bfe, BFE_CAM_CTRL, (BFE_CAM_WRITE |
 835             ((uint32_t)index << BFE_CAM_INDEX_SHIFT)));
 836         (void) bfe_wait_bit(bfe, BFE_CAM_CTRL, BFE_CAM_BUSY, 10, 1);
 837 }
 838 
 839 /*
 840  * Chip related functions (halt, reset, start).
 841  */
 842 static void
 843 bfe_chip_halt(bfe_t *bfe)
 844 {
 845         /*
 846          * Disables interrupts.
 847          */
 848         OUTL(bfe, BFE_INTR_MASK, 0);
 849         FLUSH(bfe, BFE_INTR_MASK);
 850 
 851         OUTL(bfe,  BFE_ENET_CTRL, BFE_ENET_DISABLE);
 852 
 853         /*
 854          * Wait until TX and RX finish their job.
 855          */
 856         (void) bfe_wait_bit(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE, 20, 1);
 857 
 858         /*
 859          * Disables DMA engine.
 860          */
 861         OUTL(bfe, BFE_DMARX_CTRL, 0);
 862         OUTL(bfe, BFE_DMATX_CTRL, 0);
 863 
 864         drv_usecwait(10);
 865 
 866         bfe->bfe_chip_state = BFE_CHIP_HALT;
 867 }
 868 
 869 static void
 870 bfe_chip_restart(bfe_t *bfe)
 871 {
 872         DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
 873             int, bfe->bfe_chip_action);
 874 
 875         /*
 876          * Halt chip and PHY.
 877          */
 878         bfe_chip_halt(bfe);
 879         bfe_stop_phy(bfe);
 880         bfe->bfe_chip_state = BFE_CHIP_STOPPED;
 881 
 882         /*
 883          * Init variables.
 884          */
 885         bfe_init_vars(bfe);
 886 
 887         /*
 888          * Reset chip and start PHY.
 889          */
 890         bfe_chip_reset(bfe);
 891 
 892         /*
 893          * DMA descriptor rings.
 894          */
 895         bfe_tx_desc_init(&bfe->bfe_tx_ring);
 896         bfe_rx_desc_init(&bfe->bfe_rx_ring);
 897 
 898         bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
 899         bfe_set_rx_mode(bfe);
 900         bfe_enable_chip_intrs(bfe);
 901 }
 902 
 903 /*
 904  * Disables core by stopping the clock.
 905  */
 906 static void
 907 bfe_core_disable(bfe_t *bfe)
 908 {
 909         if ((INL(bfe, BFE_SBTMSLOW) & BFE_RESET))
 910                 return;
 911 
 912         OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
 913         (void) bfe_wait_bit(bfe, BFE_SBTMSLOW, BFE_REJECT, 100, 0);
 914         (void) bfe_wait_bit(bfe, BFE_SBTMSHIGH, BFE_BUSY, 100, 1);
 915         OUTL(bfe, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET));
 916         FLUSH(bfe, BFE_SBTMSLOW);
 917         drv_usecwait(10);
 918         OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
 919         drv_usecwait(10);
 920 }
 921 
 922 /*
 923  * Resets core.
 924  */
 925 static void
 926 bfe_core_reset(bfe_t *bfe)
 927 {
 928         uint32_t val;
 929 
 930         /*
 931          * First disable the core.
 932          */
 933         bfe_core_disable(bfe);
 934 
 935         OUTL(bfe, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
 936         FLUSH(bfe, BFE_SBTMSLOW);
 937         drv_usecwait(1);
 938 
 939         if (INL(bfe, BFE_SBTMSHIGH) & BFE_SERR)
 940                 OUTL(bfe, BFE_SBTMSHIGH, 0);
 941 
 942         val = INL(bfe, BFE_SBIMSTATE);
 943         if (val & (BFE_IBE | BFE_TO))
 944                 OUTL(bfe, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
 945 
 946         OUTL(bfe, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
 947         FLUSH(bfe, BFE_SBTMSLOW);
 948         drv_usecwait(1);
 949 
 950         OUTL(bfe, BFE_SBTMSLOW, BFE_CLOCK);
 951         FLUSH(bfe, BFE_SBTMSLOW);
 952         drv_usecwait(1);
 953 }
 954 
 955 static void
 956 bfe_setup_config(bfe_t *bfe, uint32_t cores)
 957 {
 958         uint32_t bar_orig, val;
 959 
 960         /*
 961          * Change bar0 window to map sbtopci registers.
 962          */
 963         bar_orig = pci_config_get32(bfe->bfe_conf_handle, BFE_BAR0_WIN);
 964         pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, BFE_REG_PCI);
 965 
 966         /* Just read it and don't do anything */
 967         val = INL(bfe, BFE_SBIDHIGH) & BFE_IDH_CORE;
 968 
 969         val = INL(bfe, BFE_SBINTVEC);
 970         val |= cores;
 971         OUTL(bfe, BFE_SBINTVEC, val);
 972 
 973         val = INL(bfe, BFE_SSB_PCI_TRANS_2);
 974         val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
 975         OUTL(bfe, BFE_SSB_PCI_TRANS_2, val);
 976 
 977         /*
 978          * Restore bar0 window mapping.
 979          */
 980         pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, bar_orig);
 981 }
 982 
 983 /*
 984  * Resets chip and starts PHY.
 985  */
 986 static void
 987 bfe_chip_reset(bfe_t *bfe)
 988 {
 989         uint32_t val;
 990 
 991         /* Set the interrupt vector for the enet core */
 992         bfe_setup_config(bfe, BFE_INTVEC_ENET0);
 993 
 994         /* check if core is up */
 995         val = INL(bfe, BFE_SBTMSLOW) &
 996             (BFE_RESET | BFE_REJECT | BFE_CLOCK);
 997 
 998         if (val == BFE_CLOCK) {
 999                 OUTL(bfe, BFE_RCV_LAZY, 0);
1000                 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE);
1001                 (void) bfe_wait_bit(bfe, BFE_ENET_CTRL,
1002                     BFE_ENET_DISABLE, 10, 1);
1003                 OUTL(bfe, BFE_DMATX_CTRL, 0);
1004                 FLUSH(bfe, BFE_DMARX_STAT);
1005                 drv_usecwait(20000);    /* 20 milli seconds */
1006                 if (INL(bfe, BFE_DMARX_STAT) & BFE_STAT_EMASK) {
1007                         (void) bfe_wait_bit(bfe, BFE_DMARX_STAT, BFE_STAT_SIDLE,
1008                             10, 0);
1009                 }
1010                 OUTL(bfe, BFE_DMARX_CTRL, 0);
1011         }
1012 
1013         bfe_core_reset(bfe);
1014         bfe_clear_stats(bfe);
1015 
1016         OUTL(bfe, BFE_MDIO_CTRL, 0x8d);
1017         val = INL(bfe, BFE_DEVCTRL);
1018         if (!(val & BFE_IPP))
1019                 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_EPSEL);
1020         else if (INL(bfe, BFE_DEVCTRL & BFE_EPR)) {
1021                 OUTL_AND(bfe, BFE_DEVCTRL, ~BFE_EPR);
1022                 drv_usecwait(20000);    /* 20 milli seconds */
1023         }
1024 
1025         OUTL_OR(bfe, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
1026 
1027         OUTL_AND(bfe, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
1028 
1029         OUTL(bfe, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
1030             BFE_LAZY_FC_MASK));
1031 
1032         OUTL_OR(bfe, BFE_RCV_LAZY, 0);
1033 
1034         OUTL(bfe, BFE_RXMAXLEN, bfe->bfe_rx_ring.r_buf_len);
1035         OUTL(bfe, BFE_TXMAXLEN, bfe->bfe_tx_ring.r_buf_len);
1036 
1037         OUTL(bfe, BFE_TX_WMARK, 56);
1038 
1039         /* Program DMA channels */
1040         OUTL(bfe, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
1041 
1042         /*
1043          * DMA addresses need to be added to BFE_PCI_DMA
1044          */
1045         OUTL(bfe, BFE_DMATX_ADDR,
1046             bfe->bfe_tx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1047 
1048         OUTL(bfe, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT)
1049             | BFE_RX_CTRL_ENABLE);
1050 
1051         OUTL(bfe, BFE_DMARX_ADDR,
1052             bfe->bfe_rx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1053 
1054         (void) bfe_startup_phy(bfe);
1055 
1056         bfe->bfe_chip_state = BFE_CHIP_INITIALIZED;
1057 }
1058 
1059 /*
1060  * It enables interrupts. Should be the last step while starting chip.
1061  */
1062 static void
1063 bfe_enable_chip_intrs(bfe_t *bfe)
1064 {
1065         /* Enable the chip and core */
1066         OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1067 
1068         /* Enable interrupts */
1069         OUTL(bfe, BFE_INTR_MASK, BFE_IMASK_DEF);
1070 }
1071 
1072 /*
1073  * Common code to take care of setting RX side mode (filter).
1074  */
1075 static void
1076 bfe_set_rx_mode(bfe_t *bfe)
1077 {
1078         uint32_t val;
1079         int i;
1080         ether_addr_t mac[ETHERADDRL] = {0, 0, 0, 0, 0, 0};
1081 
1082         /*
1083          * We don't touch RX filter if we were asked to suspend. It's fine
1084          * if chip is not active (no interface is plumbed on us).
1085          */
1086         if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED)
1087                 return;
1088 
1089         val = INL(bfe, BFE_RXCONF);
1090 
1091         val &= ~BFE_RXCONF_PROMISC;
1092         val &= ~BFE_RXCONF_DBCAST;
1093 
1094         if ((bfe->bfe_chip_mode & BFE_RX_MODE_ENABLE) == 0) {
1095                 OUTL(bfe, BFE_CAM_CTRL, 0);
1096                 FLUSH(bfe, BFE_CAM_CTRL);
1097         } else if (bfe->bfe_chip_mode & BFE_RX_MODE_PROMISC) {
1098                 val |= BFE_RXCONF_PROMISC;
1099                 val &= ~BFE_RXCONF_DBCAST;
1100         } else {
1101                 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1102                         /* Flush everything */
1103                         OUTL(bfe, BFE_RXCONF, val |
1104                             BFE_RXCONF_PROMISC | BFE_RXCONF_ALLMULTI);
1105                         FLUSH(bfe, BFE_RXCONF);
1106                 }
1107 
1108                 /* Disable CAM */
1109                 OUTL(bfe, BFE_CAM_CTRL, 0);
1110                 FLUSH(bfe, BFE_CAM_CTRL);
1111 
1112                 /*
1113                  * We receive all multicast packets.
1114                  */
1115                 val |= BFE_RXCONF_ALLMULTI;
1116 
1117                 for (i = 0; i < BFE_MAX_MULTICAST_TABLE - 1; i++) {
1118                         bfe_cam_write(bfe, (uchar_t *)mac, i);
1119                 }
1120 
1121                 bfe_cam_write(bfe, bfe->bfe_ether_addr, i);
1122 
1123                 /* Enable CAM */
1124                 OUTL_OR(bfe, BFE_CAM_CTRL, BFE_CAM_ENABLE);
1125                 FLUSH(bfe, BFE_CAM_CTRL);
1126         }
1127 
1128         DTRACE_PROBE2(rx__mode__filter, int, bfe->bfe_unit,
1129             int, val);
1130 
1131         OUTL(bfe, BFE_RXCONF, val);
1132         FLUSH(bfe, BFE_RXCONF);
1133 }
1134 
1135 /*
1136  * Reset various variable values to initial state.
1137  */
1138 static void
1139 bfe_init_vars(bfe_t *bfe)
1140 {
1141         bfe->bfe_chip_mode = BFE_RX_MODE_ENABLE;
1142 
1143         /* Initial assumption */
1144         bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
1145         bfe->bfe_chip.speed = 0;
1146         bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
1147 
1148         bfe->bfe_periodic_id = NULL;
1149         bfe->bfe_chip_state = BFE_CHIP_UNINITIALIZED;
1150 
1151         bfe->bfe_tx_stall_time = 0;
1152 }
1153 
1154 /*
1155  * Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry
1156  * has control (desc_ctl) and address (desc_addr) member.
1157  */
1158 static void
1159 bfe_tx_desc_init(bfe_ring_t *r)
1160 {
1161         int i;
1162         uint32_t v;
1163 
1164         for (i = 0; i < r->r_ndesc; i++) {
1165                 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1166                     (r->r_buf_dma[i].len & BFE_DESC_LEN));
1167 
1168                 /*
1169                  * DMA addresses need to be added to BFE_PCI_DMA
1170                  */
1171                 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1172                     (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1173         }
1174 
1175         v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1176         PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1177             v | BFE_DESC_EOT);
1178 
1179         (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1180 
1181         r->r_curr_desc = 0;
1182         r->r_avail_desc = TX_NUM_DESC;
1183         r->r_cons_desc = 0;
1184 }
1185 
1186 /*
1187  * Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry
1188  * has control (desc_ctl) and address (desc_addr) member.
1189  */
1190 static void
1191 bfe_rx_desc_init(bfe_ring_t *r)
1192 {
1193         int i;
1194         uint32_t v;
1195 
1196         for (i = 0; i < r->r_ndesc; i++) {
1197                 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1198                     (r->r_buf_dma[i].len& BFE_DESC_LEN));
1199 
1200                 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1201                     (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1202 
1203                 /* Initialize rx header (len, flags) */
1204                 bzero(r->r_buf_dma[i].addr, sizeof (bfe_rx_header_t));
1205 
1206                 (void) SYNC_BUF(r, i, 0, sizeof (bfe_rx_header_t),
1207                     DDI_DMA_SYNC_FORDEV);
1208         }
1209 
1210         v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1211         PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1212             v | BFE_DESC_EOT);
1213 
1214         (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1215 
1216         /* TAIL of RX Descriptor */
1217         OUTL(r->r_bfe, BFE_DMARX_PTR, ((i) * sizeof (bfe_desc_t)));
1218 
1219         r->r_curr_desc = 0;
1220         r->r_avail_desc = RX_NUM_DESC;
1221 }
1222 
1223 static int
1224 bfe_chip_start(bfe_t *bfe)
1225 {
1226         ASSERT_ALL_LOCKS(bfe);
1227 
1228         /*
1229          * Stop the chip first & then Reset the chip. At last enable interrupts.
1230          */
1231         bfe_chip_halt(bfe);
1232         bfe_stop_phy(bfe);
1233 
1234         /*
1235          * Reset chip and start PHY.
1236          */
1237         bfe_chip_reset(bfe);
1238 
1239         /*
1240          * Initailize Descriptor Rings.
1241          */
1242         bfe_tx_desc_init(&bfe->bfe_tx_ring);
1243         bfe_rx_desc_init(&bfe->bfe_rx_ring);
1244 
1245         bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
1246         bfe->bfe_chip_mode |= BFE_RX_MODE_ENABLE;
1247         bfe_set_rx_mode(bfe);
1248         bfe_enable_chip_intrs(bfe);
1249 
1250         /* Check link, speed and duplex mode */
1251         (void) bfe_check_link(bfe);
1252 
1253         return (DDI_SUCCESS);
1254 }
1255 
1256 
1257 /*
1258  * Clear chip statistics.
1259  */
1260 static void
1261 bfe_clear_stats(bfe_t *bfe)
1262 {
1263         ulong_t r;
1264 
1265         OUTL(bfe, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
1266 
1267         /*
1268          * Stat registers are cleared by reading.
1269          */
1270         for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4)
1271                 (void) INL(bfe, r);
1272 
1273         for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4)
1274                 (void) INL(bfe, r);
1275 }
1276 
1277 /*
1278  * Collect chip statistics.
1279  */
1280 static void
1281 bfe_gather_stats(bfe_t *bfe)
1282 {
1283         ulong_t r;
1284         uint32_t *v;
1285         uint32_t txerr = 0, rxerr = 0, coll = 0;
1286 
1287         v = &bfe->bfe_hw_stats.tx_good_octets;
1288         for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4) {
1289                 *v += INL(bfe, r);
1290                 v++;
1291         }
1292 
1293         v = &bfe->bfe_hw_stats.rx_good_octets;
1294         for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4) {
1295                 *v += INL(bfe, r);
1296                 v++;
1297         }
1298 
1299         /*
1300          * TX :
1301          * -------
1302          * tx_good_octets, tx_good_pkts, tx_octets
1303          * tx_pkts, tx_broadcast_pkts, tx_multicast_pkts
1304          * tx_len_64, tx_len_65_to_127, tx_len_128_to_255
1305          * tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max
1306          * tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts
1307          * tx_underruns, tx_total_cols, tx_single_cols
1308          * tx_multiple_cols, tx_excessive_cols, tx_late_cols
1309          * tx_defered, tx_carrier_lost, tx_pause_pkts
1310          *
1311          * RX :
1312          * -------
1313          * rx_good_octets, rx_good_pkts, rx_octets
1314          * rx_pkts, rx_broadcast_pkts, rx_multicast_pkts
1315          * rx_len_64, rx_len_65_to_127, rx_len_128_to_255
1316          * rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max
1317          * rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts
1318          * rx_missed_pkts, rx_crc_align_errs, rx_undersize
1319          * rx_crc_errs, rx_align_errs, rx_symbol_errs
1320          * rx_pause_pkts, rx_nonpause_pkts
1321          */
1322 
1323         bfe->bfe_stats.ether_stat_carrier_errors =
1324             bfe->bfe_hw_stats.tx_carrier_lost;
1325 
1326         /* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */
1327 
1328         bfe->bfe_stats.ether_stat_ex_collisions =
1329             bfe->bfe_hw_stats.tx_excessive_cols;
1330         txerr += bfe->bfe_hw_stats.tx_excessive_cols;
1331         coll += bfe->bfe_hw_stats.tx_excessive_cols;
1332 
1333         bfe->bfe_stats.ether_stat_fcs_errors =
1334             bfe->bfe_hw_stats.rx_crc_errs;
1335         rxerr += bfe->bfe_hw_stats.rx_crc_errs;
1336 
1337         bfe->bfe_stats.ether_stat_first_collisions =
1338             bfe->bfe_hw_stats.tx_single_cols;
1339         coll += bfe->bfe_hw_stats.tx_single_cols;
1340         bfe->bfe_stats.ether_stat_multi_collisions =
1341             bfe->bfe_hw_stats.tx_multiple_cols;
1342         coll += bfe->bfe_hw_stats.tx_multiple_cols;
1343 
1344         bfe->bfe_stats.ether_stat_toolong_errors =
1345             bfe->bfe_hw_stats.rx_oversize_pkts;
1346         rxerr += bfe->bfe_hw_stats.rx_oversize_pkts;
1347 
1348         bfe->bfe_stats.ether_stat_tooshort_errors =
1349             bfe->bfe_hw_stats.rx_undersize;
1350         rxerr += bfe->bfe_hw_stats.rx_undersize;
1351 
1352         bfe->bfe_stats.ether_stat_tx_late_collisions +=
1353             bfe->bfe_hw_stats.tx_late_cols;
1354 
1355         bfe->bfe_stats.ether_stat_defer_xmts +=
1356             bfe->bfe_hw_stats.tx_defered;
1357 
1358         bfe->bfe_stats.ether_stat_macrcv_errors += rxerr;
1359         bfe->bfe_stats.ether_stat_macxmt_errors += txerr;
1360 
1361         bfe->bfe_stats.collisions += coll;
1362 }
1363 
1364 /*
1365  * Gets the state for dladm command and all.
1366  */
1367 int
1368 bfe_mac_getstat(void *arg, uint_t stat, uint64_t *val)
1369 {
1370         bfe_t *bfe = (bfe_t *)arg;
1371         uint64_t        v;
1372         int err = 0;
1373 
1374         rw_enter(&bfe->bfe_rwlock, RW_READER);
1375 
1376 
1377         switch (stat) {
1378         default:
1379                 err = ENOTSUP;
1380                 break;
1381 
1382         case MAC_STAT_IFSPEED:
1383                 /*
1384                  * MAC layer will ask for IFSPEED first and hence we
1385                  * collect it only once.
1386                  */
1387                 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1388                         /*
1389                          * Update stats from the hardware.
1390                          */
1391                         bfe_gather_stats(bfe);
1392                 }
1393                 v = bfe->bfe_chip.speed;
1394                 break;
1395 
1396         case ETHER_STAT_ADV_CAP_100T4:
1397                 v = bfe->bfe_adv_100T4;
1398                 break;
1399 
1400         case ETHER_STAT_ADV_CAP_100FDX:
1401                 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX_FD) != 0;
1402                 break;
1403 
1404         case ETHER_STAT_ADV_CAP_100HDX:
1405                 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX) != 0;
1406                 break;
1407 
1408         case ETHER_STAT_ADV_CAP_10FDX:
1409                 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T_FD) != 0;
1410                 break;
1411 
1412         case ETHER_STAT_ADV_CAP_10HDX:
1413                 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T) != 0;
1414                 break;
1415 
1416         case ETHER_STAT_ADV_CAP_ASMPAUSE:
1417                 v = 0;
1418                 break;
1419 
1420         case ETHER_STAT_ADV_CAP_AUTONEG:
1421                 v = bfe->bfe_adv_aneg;
1422                 break;
1423 
1424         case ETHER_STAT_ADV_CAP_PAUSE:
1425                 v = (bfe->bfe_mii_anar & MII_ABILITY_PAUSE) != 0;
1426                 break;
1427 
1428         case ETHER_STAT_ADV_REMFAULT:
1429                 v = (bfe->bfe_mii_anar & MII_AN_ADVERT_REMFAULT) != 0;
1430                 break;
1431 
1432         case ETHER_STAT_ALIGN_ERRORS:
1433                 /* MIB */
1434                 v = bfe->bfe_stats.ether_stat_align_errors;
1435                 break;
1436 
1437         case ETHER_STAT_CAP_100T4:
1438                 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASE_T4) != 0;
1439                 break;
1440 
1441         case ETHER_STAT_CAP_100FDX:
1442                 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX_FD) != 0;
1443                 break;
1444 
1445         case ETHER_STAT_CAP_100HDX:
1446                 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX) != 0;
1447                 break;
1448 
1449         case ETHER_STAT_CAP_10FDX:
1450                 v = (bfe->bfe_mii_bmsr & MII_STATUS_10_FD) != 0;
1451                 break;
1452 
1453         case ETHER_STAT_CAP_10HDX:
1454                 v = (bfe->bfe_mii_bmsr & MII_STATUS_10) != 0;
1455                 break;
1456 
1457         case ETHER_STAT_CAP_ASMPAUSE:
1458                 v = 0;
1459                 break;
1460 
1461         case ETHER_STAT_CAP_AUTONEG:
1462                 v = ((bfe->bfe_mii_bmsr & MII_STATUS_CANAUTONEG) != 0);
1463                 break;
1464 
1465         case ETHER_STAT_CAP_PAUSE:
1466                 v = 1;
1467                 break;
1468 
1469         case ETHER_STAT_CAP_REMFAULT:
1470                 v = (bfe->bfe_mii_bmsr & MII_STATUS_REMFAULT) != 0;
1471                 break;
1472 
1473         case ETHER_STAT_CARRIER_ERRORS:
1474                 v = bfe->bfe_stats.ether_stat_carrier_errors;
1475                 break;
1476 
1477         case ETHER_STAT_JABBER_ERRORS:
1478                 err = ENOTSUP;
1479                 break;
1480 
1481         case ETHER_STAT_DEFER_XMTS:
1482                 v = bfe->bfe_stats.ether_stat_defer_xmts;
1483                 break;
1484 
1485         case ETHER_STAT_EX_COLLISIONS:
1486                 /* MIB */
1487                 v = bfe->bfe_stats.ether_stat_ex_collisions;
1488                 break;
1489 
1490         case ETHER_STAT_FCS_ERRORS:
1491                 /* MIB */
1492                 v = bfe->bfe_stats.ether_stat_fcs_errors;
1493                 break;
1494 
1495         case ETHER_STAT_FIRST_COLLISIONS:
1496                 /* MIB */
1497                 v = bfe->bfe_stats.ether_stat_first_collisions;
1498                 break;
1499 
1500         case ETHER_STAT_LINK_ASMPAUSE:
1501                 v = 0;
1502                 break;
1503 
1504         case ETHER_STAT_LINK_AUTONEG:
1505                 v = (bfe->bfe_mii_bmcr & MII_CONTROL_ANE) != 0 &&
1506                     (bfe->bfe_mii_bmsr & MII_STATUS_ANDONE) != 0;
1507                 break;
1508 
1509         case ETHER_STAT_LINK_DUPLEX:
1510                 v = bfe->bfe_chip.duplex;
1511                 break;
1512 
1513         case ETHER_STAT_LP_CAP_100T4:
1514                 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_T4) != 0;
1515                 break;
1516 
1517         case ETHER_STAT_LP_CAP_100FDX:
1518                 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX_FD) != 0;
1519                 break;
1520 
1521         case ETHER_STAT_LP_CAP_100HDX:
1522                 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX) != 0;
1523                 break;
1524 
1525         case ETHER_STAT_LP_CAP_10FDX:
1526                 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T_FD) != 0;
1527                 break;
1528 
1529         case ETHER_STAT_LP_CAP_10HDX:
1530                 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T) != 0;
1531                 break;
1532 
1533         case ETHER_STAT_LP_CAP_ASMPAUSE:
1534                 v = 0;
1535                 break;
1536 
1537         case ETHER_STAT_LP_CAP_AUTONEG:
1538                 v = (bfe->bfe_mii_exp & MII_AN_EXP_LPCANAN) != 0;
1539                 break;
1540 
1541         case ETHER_STAT_LP_CAP_PAUSE:
1542                 v = (bfe->bfe_mii_anlpar & MII_ABILITY_PAUSE) != 0;
1543                 break;
1544 
1545         case ETHER_STAT_LP_REMFAULT:
1546                 v = (bfe->bfe_mii_anlpar & MII_STATUS_REMFAULT) != 0;
1547                 break;
1548 
1549         case ETHER_STAT_MACRCV_ERRORS:
1550                 v = bfe->bfe_stats.ether_stat_macrcv_errors;
1551                 break;
1552 
1553         case ETHER_STAT_MACXMT_ERRORS:
1554                 v = bfe->bfe_stats.ether_stat_macxmt_errors;
1555                 break;
1556 
1557         case ETHER_STAT_MULTI_COLLISIONS:
1558                 v = bfe->bfe_stats.ether_stat_multi_collisions;
1559                 break;
1560 
1561         case ETHER_STAT_SQE_ERRORS:
1562                 err = ENOTSUP;
1563                 break;
1564 
1565         case ETHER_STAT_TOOLONG_ERRORS:
1566                 v = bfe->bfe_stats.ether_stat_toolong_errors;
1567                 break;
1568 
1569         case ETHER_STAT_TOOSHORT_ERRORS:
1570                 v = bfe->bfe_stats.ether_stat_tooshort_errors;
1571                 break;
1572 
1573         case ETHER_STAT_TX_LATE_COLLISIONS:
1574                 v = bfe->bfe_stats.ether_stat_tx_late_collisions;
1575                 break;
1576 
1577         case ETHER_STAT_XCVR_ADDR:
1578                 v = bfe->bfe_phy_addr;
1579                 break;
1580 
1581         case ETHER_STAT_XCVR_ID:
1582                 v = bfe->bfe_phy_id;
1583                 break;
1584 
1585         case MAC_STAT_BRDCSTRCV:
1586                 v = bfe->bfe_stats.brdcstrcv;
1587                 break;
1588 
1589         case MAC_STAT_BRDCSTXMT:
1590                 v = bfe->bfe_stats.brdcstxmt;
1591                 break;
1592 
1593         case MAC_STAT_MULTIXMT:
1594                 v = bfe->bfe_stats.multixmt;
1595                 break;
1596 
1597         case MAC_STAT_COLLISIONS:
1598                 v = bfe->bfe_stats.collisions;
1599                 break;
1600 
1601         case MAC_STAT_IERRORS:
1602                 v = bfe->bfe_stats.ierrors;
1603                 break;
1604 
1605         case MAC_STAT_IPACKETS:
1606                 v = bfe->bfe_stats.ipackets;
1607                 break;
1608 
1609         case MAC_STAT_MULTIRCV:
1610                 v = bfe->bfe_stats.multircv;
1611                 break;
1612 
1613         case MAC_STAT_NORCVBUF:
1614                 v = bfe->bfe_stats.norcvbuf;
1615                 break;
1616 
1617         case MAC_STAT_NOXMTBUF:
1618                 v = bfe->bfe_stats.noxmtbuf;
1619                 break;
1620 
1621         case MAC_STAT_OBYTES:
1622                 v = bfe->bfe_stats.obytes;
1623                 break;
1624 
1625         case MAC_STAT_OERRORS:
1626                 /* MIB */
1627                 v = bfe->bfe_stats.ether_stat_macxmt_errors;
1628                 break;
1629 
1630         case MAC_STAT_OPACKETS:
1631                 v = bfe->bfe_stats.opackets;
1632                 break;
1633 
1634         case MAC_STAT_RBYTES:
1635                 v = bfe->bfe_stats.rbytes;
1636                 break;
1637 
1638         case MAC_STAT_UNDERFLOWS:
1639                 v = bfe->bfe_stats.underflows;
1640                 break;
1641 
1642         case MAC_STAT_OVERFLOWS:
1643                 v = bfe->bfe_stats.overflows;
1644                 break;
1645         }
1646 
1647         rw_exit(&bfe->bfe_rwlock);
1648 
1649         *val = v;
1650         return (err);
1651 }
1652 
1653 int
1654 bfe_mac_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1655     void *val)
1656 {
1657         bfe_t           *bfe = (bfe_t *)arg;
1658         int             err = 0;
1659 
1660         switch (num) {
1661         case MAC_PROP_DUPLEX:
1662                 ASSERT(sz >= sizeof (link_duplex_t));
1663                 bcopy(&bfe->bfe_chip.duplex, val, sizeof (link_duplex_t));
1664                 break;
1665 
1666         case MAC_PROP_SPEED:
1667                 ASSERT(sz >= sizeof (uint64_t));
1668                 bcopy(&bfe->bfe_chip.speed, val, sizeof (uint64_t));
1669                 break;
1670 
1671         case MAC_PROP_AUTONEG:
1672                 *(uint8_t *)val = bfe->bfe_adv_aneg;
1673                 break;
1674 
1675         case MAC_PROP_ADV_100FDX_CAP:
1676                 *(uint8_t *)val = bfe->bfe_adv_100fdx;
1677                 break;
1678 
1679         case MAC_PROP_EN_100FDX_CAP:
1680                 *(uint8_t *)val = bfe->bfe_adv_100fdx;
1681                 break;
1682 
1683         case MAC_PROP_ADV_100HDX_CAP:
1684                 *(uint8_t *)val = bfe->bfe_adv_100hdx;
1685                 break;
1686 
1687         case MAC_PROP_EN_100HDX_CAP:
1688                 *(uint8_t *)val = bfe->bfe_adv_100hdx;
1689                 break;
1690 
1691         case MAC_PROP_ADV_10FDX_CAP:
1692                 *(uint8_t *)val = bfe->bfe_adv_10fdx;
1693                 break;
1694 
1695         case MAC_PROP_EN_10FDX_CAP:
1696                 *(uint8_t *)val = bfe->bfe_adv_10fdx;
1697                 break;
1698 
1699         case MAC_PROP_ADV_10HDX_CAP:
1700                 *(uint8_t *)val = bfe->bfe_adv_10hdx;
1701                 break;
1702 
1703         case MAC_PROP_EN_10HDX_CAP:
1704                 *(uint8_t *)val = bfe->bfe_adv_10hdx;
1705                 break;
1706 
1707         case MAC_PROP_ADV_100T4_CAP:
1708                 *(uint8_t *)val = bfe->bfe_adv_100T4;
1709                 break;
1710 
1711         case MAC_PROP_EN_100T4_CAP:
1712                 *(uint8_t *)val = bfe->bfe_adv_100T4;
1713                 break;
1714 
1715         default:
1716                 err = ENOTSUP;
1717         }
1718 
1719         return (err);
1720 }
1721 
1722 
1723 static void
1724 bfe_mac_propinfo(void *arg, const char *name, mac_prop_id_t num,
1725     mac_prop_info_handle_t prh)
1726 {
1727         bfe_t           *bfe = (bfe_t *)arg;
1728 
1729         switch (num) {
1730         case MAC_PROP_DUPLEX:
1731         case MAC_PROP_SPEED:
1732         case MAC_PROP_ADV_100FDX_CAP:
1733         case MAC_PROP_ADV_100HDX_CAP:
1734         case MAC_PROP_ADV_10FDX_CAP:
1735         case MAC_PROP_ADV_10HDX_CAP:
1736         case MAC_PROP_ADV_100T4_CAP:
1737         case MAC_PROP_EN_100T4_CAP:
1738                 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1739                 break;
1740 
1741         case MAC_PROP_AUTONEG:
1742                 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_aneg);
1743                 break;
1744 
1745         case MAC_PROP_EN_100FDX_CAP:
1746                 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100fdx);
1747                 break;
1748 
1749         case MAC_PROP_EN_100HDX_CAP:
1750                 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100hdx);
1751                 break;
1752 
1753         case MAC_PROP_EN_10FDX_CAP:
1754                 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10fdx);
1755                 break;
1756 
1757         case MAC_PROP_EN_10HDX_CAP:
1758                 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10hdx);
1759                 break;
1760         }
1761 }
1762 
1763 
1764 /*ARGSUSED*/
1765 int
1766 bfe_mac_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1767     const void *val)
1768 {
1769         bfe_t           *bfe = (bfe_t *)arg;
1770         uint8_t         *advp;
1771         uint8_t         *capp;
1772         int             r = 0;
1773 
1774         switch (num) {
1775         case MAC_PROP_EN_100FDX_CAP:
1776                 advp = &bfe->bfe_adv_100fdx;
1777                 capp = &bfe->bfe_cap_100fdx;
1778                 break;
1779 
1780         case MAC_PROP_EN_100HDX_CAP:
1781                 advp = &bfe->bfe_adv_100hdx;
1782                 capp = &bfe->bfe_cap_100hdx;
1783                 break;
1784 
1785         case MAC_PROP_EN_10FDX_CAP:
1786                 advp = &bfe->bfe_adv_10fdx;
1787                 capp = &bfe->bfe_cap_10fdx;
1788                 break;
1789 
1790         case MAC_PROP_EN_10HDX_CAP:
1791                 advp = &bfe->bfe_adv_10hdx;
1792                 capp = &bfe->bfe_cap_10hdx;
1793                 break;
1794 
1795         case MAC_PROP_AUTONEG:
1796                 advp = &bfe->bfe_adv_aneg;
1797                 capp = &bfe->bfe_cap_aneg;
1798                 break;
1799 
1800         default:
1801                 return (ENOTSUP);
1802         }
1803 
1804         if (*capp == 0)
1805                 return (ENOTSUP);
1806 
1807         bfe_grab_locks(bfe);
1808 
1809         if (*advp != *(const uint8_t *)val) {
1810                 *advp = *(const uint8_t *)val;
1811 
1812                 bfe->bfe_chip_action = BFE_ACTION_RESTART_SETPROP;
1813                 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1814                         /*
1815                          * We need to stop the timer before grabbing locks
1816                          * otherwise we can land-up in deadlock with untimeout.
1817                          */
1818                         bfe_stop_timer(bfe);
1819 
1820                         bfe->bfe_chip_action |= BFE_ACTION_RESTART;
1821 
1822                         bfe_chip_restart(bfe);
1823 
1824                         /*
1825                          * We leave SETPROP because properties can be
1826                          * temporary.
1827                          */
1828                         bfe->bfe_chip_action &= ~(BFE_ACTION_RESTART);
1829                         r = 1;
1830                 }
1831         }
1832 
1833         bfe_release_locks(bfe);
1834 
1835         /* kick-off a potential stopped downstream */
1836         if (r)
1837                 mac_tx_update(bfe->bfe_machdl);
1838 
1839         return (0);
1840 }
1841 
1842 
1843 int
1844 bfe_mac_set_ether_addr(void *arg, const uint8_t *ea)
1845 {
1846         bfe_t *bfe = (bfe_t *)arg;
1847 
1848         bfe_grab_locks(bfe);
1849         bcopy(ea, bfe->bfe_ether_addr, ETHERADDRL);
1850         bfe_set_rx_mode(bfe);
1851         bfe_release_locks(bfe);
1852         return (0);
1853 }
1854 
1855 int
1856 bfe_mac_start(void *arg)
1857 {
1858         bfe_t *bfe = (bfe_t *)arg;
1859 
1860         bfe_grab_locks(bfe);
1861         if (bfe_chip_start(bfe) == DDI_FAILURE) {
1862                 bfe_release_locks(bfe);
1863                 return (EINVAL);
1864         }
1865 
1866         bfe_release_locks(bfe);
1867 
1868         mac_tx_update(bfe->bfe_machdl);
1869 
1870         return (0);
1871 }
1872 
1873 void
1874 bfe_mac_stop(void *arg)
1875 {
1876         bfe_t *bfe = (bfe_t *)arg;
1877 
1878         /*
1879          * We need to stop the timer before grabbing locks otherwise
1880          * we can land-up in deadlock with untimeout.
1881          */
1882         bfe_stop_timer(bfe);
1883 
1884         bfe_grab_locks(bfe);
1885 
1886         /*
1887          * First halt the chip by disabling interrupts.
1888          */
1889         bfe_chip_halt(bfe);
1890         bfe_stop_phy(bfe);
1891 
1892         bfe->bfe_chip_state = BFE_CHIP_STOPPED;
1893 
1894         /*
1895          * This will leave the PHY running.
1896          */
1897         bfe_chip_reset(bfe);
1898 
1899         /*
1900          * Disable RX register.
1901          */
1902         bfe->bfe_chip_mode &= ~BFE_RX_MODE_ENABLE;
1903         bfe_set_rx_mode(bfe);
1904 
1905         bfe_release_locks(bfe);
1906 }
1907 
1908 /*
1909  * Send a packet down the wire.
1910  */
1911 static int
1912 bfe_send_a_packet(bfe_t *bfe, mblk_t *mp)
1913 {
1914         bfe_ring_t *r = &bfe->bfe_tx_ring;
1915         uint32_t cur = r->r_curr_desc;
1916         uint32_t next;
1917         size_t  pktlen = msgsize(mp);
1918         uchar_t *buf;
1919         uint32_t v;
1920 
1921         ASSERT(MUTEX_HELD(&r->r_lock));
1922         ASSERT(mp != NULL);
1923 
1924         if (pktlen > r->r_buf_len) {
1925                 freemsg(mp);
1926                 return (BFE_SUCCESS);
1927         }
1928 
1929         /*
1930          * There is a big reason why we don't check for '0'. It becomes easy
1931          * for us to not roll over the ring since we are based on producer (tx)
1932          * and consumer (reclaim by an interrupt) model. Especially when we
1933          * run out of TX descriptor, chip will send a single interrupt and
1934          * both producer and consumer counter will be same. So we keep a
1935          * difference of 1 always.
1936          */
1937         if (r->r_avail_desc <= 1) {
1938                 bfe->bfe_stats.noxmtbuf++;
1939                 bfe->bfe_tx_resched = 1;
1940                 return (BFE_FAILURE);
1941         }
1942 
1943         /*
1944          * Get the DMA buffer to hold packet.
1945          */
1946         buf = (uchar_t *)r->r_buf_dma[cur].addr;
1947 
1948         mcopymsg(mp, buf);      /* it also frees mp */
1949 
1950         /*
1951          * Gather statistics.
1952          */
1953         if (buf[0] & 0x1) {
1954                 if (bcmp(buf, bfe_broadcast, ETHERADDRL) != 0)
1955                         bfe->bfe_stats.multixmt++;
1956                 else
1957                         bfe->bfe_stats.brdcstxmt++;
1958         }
1959         bfe->bfe_stats.opackets++;
1960         bfe->bfe_stats.obytes += pktlen;
1961 
1962 
1963         /*
1964          * Program the DMA descriptor (start and end of frame are same).
1965          */
1966         next = cur;
1967         v = (pktlen & BFE_DESC_LEN) | BFE_DESC_IOC | BFE_DESC_SOF |
1968             BFE_DESC_EOF;
1969 
1970         if (cur == (TX_NUM_DESC - 1))
1971                 v |= BFE_DESC_EOT;
1972 
1973         PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_ctl), v);
1974 
1975         /*
1976          * DMA addresses need to be added to BFE_PCI_DMA
1977          */
1978         PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_addr),
1979             (r->r_buf_dma[cur].cookie.dmac_laddress + BFE_PCI_DMA));
1980 
1981         /*
1982          * Sync the packet data for the device.
1983          */
1984         (void) SYNC_BUF(r, cur, 0, pktlen, DDI_DMA_SYNC_FORDEV);
1985 
1986         /* Move to next descriptor slot */
1987         BFE_INC_SLOT(next, TX_NUM_DESC);
1988 
1989         (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1990 
1991         r->r_curr_desc = next;
1992 
1993         /*
1994          * The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,...
1995          * descriptor slot are being programmed.
1996          */
1997         OUTL(bfe, BFE_DMATX_PTR, next * sizeof (bfe_desc_t));
1998         FLUSH(bfe, BFE_DMATX_PTR);
1999 
2000         r->r_avail_desc--;
2001 
2002         /*
2003          * Let timeout know that it must reset the chip if a
2004          * packet is not sent down the wire for more than 5 seconds.
2005          */
2006         bfe->bfe_tx_stall_time = gethrtime() + (5 * 1000000000ULL);
2007 
2008         return (BFE_SUCCESS);
2009 }
2010 
2011 mblk_t *
2012 bfe_mac_transmit_packet(void *arg, mblk_t *mp)
2013 {
2014         bfe_t *bfe = (bfe_t *)arg;
2015         bfe_ring_t *r = &bfe->bfe_tx_ring;
2016         mblk_t  *nmp;
2017 
2018         mutex_enter(&r->r_lock);
2019 
2020         if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2021                 DTRACE_PROBE1(tx__chip__not__active, int, bfe->bfe_unit);
2022 
2023                 freemsgchain(mp);
2024                 mutex_exit(&r->r_lock);
2025                 return (NULL);
2026         }
2027 
2028 
2029         while (mp != NULL) {
2030                 nmp = mp->b_next;
2031                 mp->b_next = NULL;
2032 
2033                 if (bfe_send_a_packet(bfe, mp) == BFE_FAILURE) {
2034                         mp->b_next = nmp;
2035                         break;
2036                 }
2037                 mp = nmp;
2038         }
2039 
2040         mutex_exit(&r->r_lock);
2041 
2042         return (mp);
2043 }
2044 
2045 int
2046 bfe_mac_set_promisc(void *arg, boolean_t promiscflag)
2047 {
2048         bfe_t *bfe = (bfe_t *)arg;
2049 
2050         bfe_grab_locks(bfe);
2051         if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2052                 bfe_release_locks(bfe);
2053                 return (EIO);
2054         }
2055 
2056         if (promiscflag) {
2057                 /* Set Promiscous on */
2058                 bfe->bfe_chip_mode |= BFE_RX_MODE_PROMISC;
2059         } else {
2060                 bfe->bfe_chip_mode &= ~BFE_RX_MODE_PROMISC;
2061         }
2062 
2063         bfe_set_rx_mode(bfe);
2064         bfe_release_locks(bfe);
2065 
2066         return (0);
2067 }
2068 
2069 int
2070 bfe_mac_set_multicast(void *arg, boolean_t add, const uint8_t *macaddr)
2071 {
2072         /*
2073          * It was too much of pain to implement multicast in CAM. Instead
2074          * we never disable multicast filter.
2075          */
2076         return (0);
2077 }
2078 
2079 static mac_callbacks_t bfe_mac_callbacks = {
2080         MC_SETPROP | MC_GETPROP | MC_PROPINFO,
2081         bfe_mac_getstat,        /* gets stats */
2082         bfe_mac_start,          /* starts mac */
2083         bfe_mac_stop,           /* stops mac */
2084         bfe_mac_set_promisc,    /* sets promisc mode for snoop */
2085         bfe_mac_set_multicast,  /* multicast implementation */
2086         bfe_mac_set_ether_addr, /* sets ethernet address (unicast) */
2087         bfe_mac_transmit_packet, /* transmits packet */
2088         NULL,
2089         NULL,                   /* ioctl */
2090         NULL,                   /* getcap */
2091         NULL,                   /* open */
2092         NULL,                   /* close */
2093         bfe_mac_setprop,
2094         bfe_mac_getprop,
2095         bfe_mac_propinfo
2096 };
2097 
2098 static void
2099 bfe_error_handler(bfe_t *bfe, int intr_mask)
2100 {
2101         uint32_t v;
2102 
2103         if (intr_mask & BFE_ISTAT_RFO) {
2104                 bfe->bfe_stats.overflows++;
2105                 bfe->bfe_chip_action |=
2106                     (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2107                 goto action;
2108         }
2109 
2110         if (intr_mask & BFE_ISTAT_TFU) {
2111                 bfe->bfe_stats.underflows++;
2112                 return;
2113         }
2114 
2115         /* Descriptor Protocol Error */
2116         if (intr_mask & BFE_ISTAT_DPE) {
2117                 bfe_error(bfe->bfe_dip,
2118                     "Descriptor Protocol Error. Halting Chip");
2119                 bfe->bfe_chip_action |=
2120                     (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2121                 goto action;
2122         }
2123 
2124         /* Descriptor Error */
2125         if (intr_mask & BFE_ISTAT_DSCE) {
2126                 bfe_error(bfe->bfe_dip, "Descriptor Error. Restarting Chip");
2127                 goto action;
2128         }
2129 
2130         /* Receive Descr. Underflow */
2131         if (intr_mask & BFE_ISTAT_RDU) {
2132                 bfe_error(bfe->bfe_dip,
2133                     "Receive Descriptor Underflow. Restarting Chip");
2134                 bfe->bfe_stats.ether_stat_macrcv_errors++;
2135                 bfe->bfe_chip_action |=
2136                     (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2137                 goto action;
2138         }
2139 
2140         v = INL(bfe, BFE_DMATX_STAT);
2141 
2142         /* Error while sending a packet */
2143         if (v & BFE_STAT_EMASK) {
2144                 bfe->bfe_stats.ether_stat_macxmt_errors++;
2145                 bfe_error(bfe->bfe_dip,
2146                     "Error while sending a packet. Restarting Chip");
2147         }
2148 
2149         /* Error while receiving a packet */
2150         v = INL(bfe, BFE_DMARX_STAT);
2151         if (v & BFE_RX_FLAG_ERRORS) {
2152                 bfe->bfe_stats.ierrors++;
2153                 bfe_error(bfe->bfe_dip,
2154                     "Error while receiving a packet. Restarting Chip");
2155         }
2156 
2157 
2158         bfe->bfe_chip_action |=
2159             (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2160 
2161 action:
2162         bfe_chip_halt(bfe);
2163 }
2164 
2165 /*
2166  * It will recycle a RX descriptor slot.
2167  */
2168 static void
2169 bfe_rx_desc_buf_reinit(bfe_t *bfe, uint_t slot)
2170 {
2171         bfe_ring_t *r = &bfe->bfe_rx_ring;
2172         uint32_t v;
2173 
2174         slot %= RX_NUM_DESC;
2175 
2176         bzero(r->r_buf_dma[slot].addr, sizeof (bfe_rx_header_t));
2177 
2178         (void) SYNC_BUF(r, slot, 0, BFE_RX_OFFSET, DDI_DMA_SYNC_FORDEV);
2179 
2180         v = r->r_buf_dma[slot].len  & BFE_DESC_LEN;
2181         if (slot == (RX_NUM_DESC - 1))
2182                 v |= BFE_DESC_EOT;
2183 
2184         PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_ctl), v);
2185 
2186         /*
2187          * DMA addresses need to be added to BFE_PCI_DMA
2188          */
2189         PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_addr),
2190             (r->r_buf_dma[slot].cookie.dmac_laddress + BFE_PCI_DMA));
2191 }
2192 
2193 /*
2194  * Gets called from interrupt context to handle RX interrupt.
2195  */
2196 static mblk_t *
2197 bfe_receive(bfe_t *bfe, int intr_mask)
2198 {
2199         int rxstat, current;
2200         mblk_t  *mp = NULL, *rx_head, *rx_tail;
2201         uchar_t *rx_header;
2202         uint16_t len;
2203         uchar_t *bp;
2204         bfe_ring_t *r = &bfe->bfe_rx_ring;
2205         int i;
2206 
2207         rxstat = INL(bfe, BFE_DMARX_STAT);
2208         current = (rxstat & BFE_STAT_CDMASK) / sizeof (bfe_desc_t);
2209         i = r->r_curr_desc;
2210 
2211         rx_head = rx_tail = NULL;
2212 
2213         DTRACE_PROBE3(receive, int, bfe->bfe_unit,
2214             int, r->r_curr_desc,
2215             int, current);
2216 
2217         for (i = r->r_curr_desc; i != current;
2218             BFE_INC_SLOT(i, RX_NUM_DESC)) {
2219 
2220                 /*
2221                  * Sync the buffer associated with the descriptor table entry.
2222                  */
2223                 (void) SYNC_BUF(r, i, 0, r->r_buf_dma[i].len,
2224                     DDI_DMA_SYNC_FORKERNEL);
2225 
2226                 rx_header = (void *)r->r_buf_dma[i].addr;
2227 
2228                 /*
2229                  * We do this to make sure we are endian neutral. Chip is
2230                  * big endian.
2231                  *
2232                  * The header looks like :-
2233                  *
2234                  *  Offset 0  -> uint16_t len
2235                  *  Offset 2  -> uint16_t flags
2236                  *  Offset 4  -> uint16_t pad[12]
2237                  */
2238                 len = (rx_header[1] << 8) | rx_header[0];
2239                 len -= 4;       /* CRC bytes need to be removed */
2240 
2241                 /*
2242                  * Don't receive this packet if pkt length is greater than
2243                  * MTU + VLAN_TAGSZ.
2244                  */
2245                 if (len > r->r_buf_len) {
2246                         /* Recycle slot for later use */
2247                         bfe_rx_desc_buf_reinit(bfe, i);
2248                         continue;
2249                 }
2250 
2251                 if ((mp = allocb(len + VLAN_TAGSZ, BPRI_MED)) != NULL) {
2252                         mp->b_rptr += VLAN_TAGSZ;
2253                         bp = mp->b_rptr;
2254                         mp->b_wptr = bp + len;
2255 
2256                         /* sizeof (bfe_rx_header_t) + 2 */
2257                         bcopy(r->r_buf_dma[i].addr +
2258                             BFE_RX_OFFSET, bp, len);
2259 
2260                         mp->b_next = NULL;
2261                         if (rx_tail == NULL)
2262                                 rx_head = rx_tail = mp;
2263                         else {
2264                                 rx_tail->b_next = mp;
2265                                 rx_tail = mp;
2266                         }
2267 
2268                         /* Number of packets received so far */
2269                         bfe->bfe_stats.ipackets++;
2270 
2271                         /* Total bytes of packets received so far */
2272                         bfe->bfe_stats.rbytes += len;
2273 
2274                         if (bcmp(mp->b_rptr, bfe_broadcast, ETHERADDRL) == 0)
2275                                 bfe->bfe_stats.brdcstrcv++;
2276                         else
2277                                 bfe->bfe_stats.multircv++;
2278                 } else {
2279                         bfe->bfe_stats.norcvbuf++;
2280                         /* Recycle the slot for later use */
2281                         bfe_rx_desc_buf_reinit(bfe, i);
2282                         break;
2283                 }
2284 
2285                 /*
2286                  * Reinitialize the current descriptor slot's buffer so that
2287                  * it can be reused.
2288                  */
2289                 bfe_rx_desc_buf_reinit(bfe, i);
2290         }
2291 
2292         r->r_curr_desc = i;
2293 
2294         (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
2295 
2296         return (rx_head);
2297 }
2298 
2299 static int
2300 bfe_tx_reclaim(bfe_ring_t *r)
2301 {
2302         uint32_t cur, start;
2303         uint32_t v;
2304 
2305         cur = INL(r->r_bfe, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
2306         cur = cur / sizeof (bfe_desc_t);
2307 
2308         /*
2309          * Start with the last descriptor consumed by the chip.
2310          */
2311         start = r->r_cons_desc;
2312 
2313         DTRACE_PROBE3(tx__reclaim, int, r->r_bfe->bfe_unit,
2314             int, start,
2315             int, cur);
2316 
2317         /*
2318          * There will be at least one descriptor to process.
2319          */
2320         while (start != cur) {
2321                 r->r_avail_desc++;
2322                 v = r->r_buf_dma[start].len  & BFE_DESC_LEN;
2323                 if (start == (TX_NUM_DESC - 1))
2324                         v |= BFE_DESC_EOT;
2325 
2326                 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_ctl), v);
2327                 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_addr),
2328                     (r->r_buf_dma[start].cookie.dmac_laddress + BFE_PCI_DMA));
2329 
2330                 /* Move to next descriptor in TX ring */
2331                 BFE_INC_SLOT(start, TX_NUM_DESC);
2332         }
2333 
2334         (void) ddi_dma_sync(r->r_desc_dma_handle,
2335             0, (r->r_ndesc * sizeof (bfe_desc_t)),
2336             DDI_DMA_SYNC_FORDEV);
2337 
2338         r->r_cons_desc = start;      /* consumed pointer */
2339         r->r_bfe->bfe_tx_stall_time = 0;
2340 
2341         return (cur);
2342 }
2343 
2344 static int
2345 bfe_tx_done(bfe_t *bfe, int intr_mask)
2346 {
2347         bfe_ring_t *r = &bfe->bfe_tx_ring;
2348         int resched = 0;
2349 
2350         mutex_enter(&r->r_lock);
2351         (void) bfe_tx_reclaim(r);
2352 
2353         if (bfe->bfe_tx_resched) {
2354                 resched = 1;
2355                 bfe->bfe_tx_resched = 0;
2356         }
2357         mutex_exit(&r->r_lock);
2358 
2359         return (resched);
2360 }
2361 
2362 /*
2363  * ISR for interrupt handling
2364  */
2365 static uint_t
2366 bfe_interrupt(caddr_t arg1, caddr_t arg2)
2367 {
2368         bfe_t *bfe =  (void *)arg1;
2369         uint32_t        intr_stat;
2370         mblk_t *rx_head = NULL;
2371         int resched = 0;
2372 
2373         /*
2374          * Grab the lock to avoid stopping the chip while this interrupt
2375          * is handled.
2376          */
2377         rw_enter(&bfe->bfe_rwlock, RW_READER);
2378 
2379         /*
2380          * It's necessary to read intr stat again because masking interrupt
2381          * register does not really mask interrupts coming from the chip.
2382          */
2383         intr_stat = INL(bfe, BFE_INTR_STAT);
2384         intr_stat &= BFE_IMASK_DEF;
2385         OUTL(bfe, BFE_INTR_STAT, intr_stat);
2386         (void) INL(bfe, BFE_INTR_STAT);
2387 
2388         if (intr_stat == 0) {
2389                 rw_exit(&bfe->bfe_rwlock);
2390                 return (DDI_INTR_UNCLAIMED);
2391         }
2392 
2393         DTRACE_PROBE2(bfe__interrupt, int, bfe->bfe_unit,
2394             int, intr_stat);
2395 
2396         if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2397                 /*
2398                  * If chip is suspended then we just return.
2399                  */
2400                 if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED) {
2401                         rw_exit(&bfe->bfe_rwlock);
2402                         DTRACE_PROBE1(interrupt__chip__is__suspend, int,
2403                             bfe->bfe_unit);
2404                         return (DDI_INTR_CLAIMED);
2405                 }
2406 
2407                 /*
2408                  * Halt the chip again i.e basically disable interrupts.
2409                  */
2410                 bfe_chip_halt(bfe);
2411                 rw_exit(&bfe->bfe_rwlock);
2412                 DTRACE_PROBE1(interrupt__chip__not__active, int,
2413                     bfe->bfe_unit);
2414                 return (DDI_INTR_CLAIMED);
2415         }
2416 
2417         /* A packet was received */
2418         if (intr_stat & BFE_ISTAT_RX) {
2419                 rx_head = bfe_receive(bfe, intr_stat);
2420         }
2421 
2422         /* A packet was sent down the wire */
2423         if (intr_stat & BFE_ISTAT_TX) {
2424                 resched = bfe_tx_done(bfe, intr_stat);
2425         }
2426 
2427         /* There was an error */
2428         if (intr_stat & BFE_ISTAT_ERRORS) {
2429                 bfe_error_handler(bfe, intr_stat);
2430         }
2431 
2432         rw_exit(&bfe->bfe_rwlock);
2433 
2434         /*
2435          * Pass the list of packets received from chip to MAC layer.
2436          */
2437         if (rx_head) {
2438                 mac_rx(bfe->bfe_machdl, 0, rx_head);
2439         }
2440 
2441         /*
2442          * Let the MAC start sending pkts to a potential stopped stream.
2443          */
2444         if (resched)
2445                 mac_tx_update(bfe->bfe_machdl);
2446 
2447         return (DDI_INTR_CLAIMED);
2448 }
2449 
2450 /*
2451  * Removes registered interrupt handler.
2452  */
2453 static void
2454 bfe_remove_intr(bfe_t *bfe)
2455 {
2456         (void) ddi_intr_remove_handler(bfe->bfe_intrhdl);
2457         (void) ddi_intr_free(bfe->bfe_intrhdl);
2458 }
2459 
2460 /*
2461  * Add an interrupt for the driver.
2462  */
2463 static int
2464 bfe_add_intr(bfe_t *bfe)
2465 {
2466         int     nintrs = 1;
2467         int ret;
2468 
2469         ret = ddi_intr_alloc(bfe->bfe_dip, &bfe->bfe_intrhdl,
2470             DDI_INTR_TYPE_FIXED,        /* type */
2471             0,  /* inumber */
2472             1,  /* count */
2473             &nintrs,        /* actual nintrs */
2474             DDI_INTR_ALLOC_STRICT);
2475 
2476         if (ret != DDI_SUCCESS) {
2477                 bfe_error(bfe->bfe_dip, "ddi_intr_alloc() failed"
2478                     " : ret : %d", ret);
2479                 return (DDI_FAILURE);
2480         }
2481 
2482         ret = ddi_intr_add_handler(bfe->bfe_intrhdl, bfe_interrupt, bfe, NULL);
2483         if (ret != DDI_SUCCESS) {
2484                 bfe_error(bfe->bfe_dip, "ddi_intr_add_handler() failed");
2485                 (void) ddi_intr_free(bfe->bfe_intrhdl);
2486                 return (DDI_FAILURE);
2487         }
2488 
2489         ret = ddi_intr_get_pri(bfe->bfe_intrhdl, &bfe->bfe_intrpri);
2490         if (ret != DDI_SUCCESS) {
2491                 bfe_error(bfe->bfe_dip, "ddi_intr_get_pri() failed");
2492                 bfe_remove_intr(bfe);
2493                 return (DDI_FAILURE);
2494         }
2495 
2496         return (DDI_SUCCESS);
2497 }
2498 
2499 
2500 /*
2501  * Identify chipset family.
2502  */
2503 static int
2504 bfe_identify_hardware(bfe_t *bfe)
2505 {
2506         uint16_t        vid, did;
2507         int i;
2508 
2509         vid = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_VENID);
2510         did = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_DEVID);
2511 
2512         for (i = 0; i < (sizeof (bfe_cards) / sizeof (bfe_cards_t)); i++) {
2513                 if (bfe_cards[i].vendor_id == vid &&
2514                     bfe_cards[i].device_id == did) {
2515                         return (BFE_SUCCESS);
2516                 }
2517         }
2518 
2519         bfe_error(bfe->bfe_dip, "bfe driver is attaching to unknown pci%d,%d"
2520             " vendor/device-id card", vid, did);
2521 
2522         return (BFE_SUCCESS);
2523 }
2524 
2525 /*
2526  * Maps device registers.
2527  */
2528 static int
2529 bfe_regs_map(bfe_t *bfe)
2530 {
2531         dev_info_t *dip = bfe->bfe_dip;
2532         int ret;
2533 
2534         ret = ddi_regs_map_setup(dip, 1, &bfe->bfe_mem_regset.addr, 0, 0,
2535             &bfe_dev_attr, &bfe->bfe_mem_regset.hdl);
2536 
2537         if (ret != DDI_SUCCESS) {
2538                 bfe_error(bfe->bfe_dip, "ddi_regs_map_setup failed");
2539                 return (DDI_FAILURE);
2540         }
2541 
2542         return (DDI_SUCCESS);
2543 }
2544 
2545 static void
2546 bfe_unmap_regs(bfe_t *bfe)
2547 {
2548         ddi_regs_map_free(&bfe->bfe_mem_regset.hdl);
2549 }
2550 
2551 static int
2552 bfe_get_chip_config(bfe_t *bfe)
2553 {
2554         uint32_t        prom[BFE_EEPROM_SIZE];
2555         int i;
2556 
2557         /*
2558          * Read EEPROM in prom[]
2559          */
2560         for (i = 0; i < BFE_EEPROM_SIZE; i++) {
2561                 prom[i] = INL(bfe, BFE_EEPROM_BASE + i * sizeof (uint32_t));
2562         }
2563 
2564         bfe->bfe_dev_addr[0] = bfe->bfe_ether_addr[0] =
2565             INB(bfe, BFE_EEPROM_BASE + 79);
2566 
2567         bfe->bfe_dev_addr[1] = bfe->bfe_ether_addr[1] =
2568             INB(bfe, BFE_EEPROM_BASE + 78);
2569 
2570         bfe->bfe_dev_addr[2] = bfe->bfe_ether_addr[2] =
2571             INB(bfe, BFE_EEPROM_BASE + 81);
2572 
2573         bfe->bfe_dev_addr[3] = bfe->bfe_ether_addr[3] =
2574             INB(bfe, BFE_EEPROM_BASE + 80);
2575 
2576         bfe->bfe_dev_addr[4] = bfe->bfe_ether_addr[4] =
2577             INB(bfe, BFE_EEPROM_BASE + 83);
2578 
2579         bfe->bfe_dev_addr[5] = bfe->bfe_ether_addr[5] =
2580             INB(bfe, BFE_EEPROM_BASE + 82);
2581 
2582         bfe->bfe_phy_addr = -1;
2583 
2584         return (DDI_SUCCESS);
2585 }
2586 
2587 /*
2588  * Ring Management routines
2589  */
2590 static int
2591 bfe_ring_buf_alloc(bfe_t *bfe, bfe_ring_t *r, int slot, int d)
2592 {
2593         int err;
2594         uint_t count = 0;
2595 
2596         err = ddi_dma_alloc_handle(bfe->bfe_dip,
2597             &bfe_dma_attr_buf, DDI_DMA_SLEEP, NULL,
2598             &r->r_buf_dma[slot].handle);
2599 
2600         if (err != DDI_SUCCESS) {
2601                 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2602                     " alloc_handle failed");
2603                 goto fail0;
2604         }
2605 
2606         err = ddi_dma_mem_alloc(r->r_buf_dma[slot].handle,
2607             r->r_buf_len, &bfe_buf_attr, DDI_DMA_STREAMING,
2608             DDI_DMA_SLEEP, NULL, &r->r_buf_dma[slot].addr,
2609             &r->r_buf_dma[slot].len,
2610             &r->r_buf_dma[slot].acchdl);
2611 
2612         if (err != DDI_SUCCESS) {
2613                 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2614                     " mem_alloc failed :%d", err);
2615                 goto fail1;
2616         }
2617 
2618         err = ddi_dma_addr_bind_handle(r->r_buf_dma[slot].handle,
2619             NULL, r->r_buf_dma[slot].addr,
2620             r->r_buf_dma[slot].len,
2621             (DDI_DMA_RDWR | DDI_DMA_STREAMING),
2622             DDI_DMA_SLEEP, NULL,
2623             &r->r_buf_dma[slot].cookie,
2624             &count);
2625 
2626         if (err != DDI_DMA_MAPPED) {
2627                 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2628                     " bind_handle failed");
2629                 goto fail2;
2630         }
2631 
2632         if (count > 1) {
2633                 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2634                     " more than one DMA cookie");
2635                 (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2636                 goto fail2;
2637         }
2638 
2639         return (DDI_SUCCESS);
2640 fail2:
2641         ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2642 fail1:
2643         ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2644 fail0:
2645         return (DDI_FAILURE);
2646 }
2647 
2648 static void
2649 bfe_ring_buf_free(bfe_ring_t *r, int slot)
2650 {
2651         if (r->r_buf_dma == NULL)
2652                 return;
2653 
2654         (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2655         ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2656         ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2657 }
2658 
2659 static void
2660 bfe_buffer_free(bfe_ring_t *r)
2661 {
2662         int i;
2663 
2664         for (i = 0; i < r->r_ndesc; i++) {
2665                 bfe_ring_buf_free(r, i);
2666         }
2667 }
2668 
2669 static void
2670 bfe_ring_desc_free(bfe_ring_t *r)
2671 {
2672         (void) ddi_dma_unbind_handle(r->r_desc_dma_handle);
2673         ddi_dma_mem_free(&r->r_desc_acc_handle);
2674         ddi_dma_free_handle(&r->r_desc_dma_handle);
2675         kmem_free(r->r_buf_dma, r->r_ndesc * sizeof (bfe_dma_t));
2676 
2677         r->r_buf_dma = NULL;
2678         r->r_desc = NULL;
2679 }
2680 
2681 
2682 static int
2683 bfe_ring_desc_alloc(bfe_t *bfe, bfe_ring_t *r, int d)
2684 {
2685         int err, i, fail = 0;
2686         caddr_t ring;
2687         size_t  size_krnl = 0, size_dma = 0, ring_len = 0;
2688         ddi_dma_cookie_t cookie;
2689         uint_t  count = 0;
2690 
2691         ASSERT(bfe != NULL);
2692 
2693         size_krnl = r->r_ndesc * sizeof (bfe_dma_t);
2694         size_dma = r->r_ndesc * sizeof (bfe_desc_t);
2695         r->r_buf_dma = kmem_zalloc(size_krnl, KM_SLEEP);
2696 
2697 
2698         err = ddi_dma_alloc_handle(bfe->bfe_dip, &bfe_dma_attr_desc,
2699             DDI_DMA_SLEEP, NULL, &r->r_desc_dma_handle);
2700 
2701         if (err != DDI_SUCCESS) {
2702                 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2703                     " ddi_dma_alloc_handle()");
2704                 kmem_free(r->r_buf_dma, size_krnl);
2705                 return (DDI_FAILURE);
2706         }
2707 
2708 
2709         err = ddi_dma_mem_alloc(r->r_desc_dma_handle,
2710             size_dma, &bfe_buf_attr,
2711             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2712             &ring, &ring_len, &r->r_desc_acc_handle);
2713 
2714         if (err != DDI_SUCCESS) {
2715                 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2716                     " ddi_dma_mem_alloc()");
2717                 ddi_dma_free_handle(&r->r_desc_dma_handle);
2718                 kmem_free(r->r_buf_dma, size_krnl);
2719                 return (DDI_FAILURE);
2720         }
2721 
2722         err = ddi_dma_addr_bind_handle(r->r_desc_dma_handle,
2723             NULL, ring, ring_len,
2724             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2725             DDI_DMA_SLEEP, NULL,
2726             &cookie, &count);
2727 
2728         if (err != DDI_SUCCESS) {
2729                 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2730                     " ddi_dma_addr_bind_handle()");
2731                 ddi_dma_mem_free(&r->r_desc_acc_handle);
2732                 ddi_dma_free_handle(&r->r_desc_dma_handle);
2733                 kmem_free(r->r_buf_dma, size_krnl);
2734                 return (DDI_FAILURE);
2735         }
2736 
2737         /*
2738          * We don't want to have multiple cookies. Descriptor should be
2739          * aligned to PAGESIZE boundary.
2740          */
2741         ASSERT(count == 1);
2742 
2743         /* The actual descriptor for the ring */
2744         r->r_desc_len = ring_len;
2745         r->r_desc_cookie = cookie;
2746 
2747         r->r_desc = (void *)ring;
2748 
2749         bzero(r->r_desc, size_dma);
2750         bzero(r->r_desc, ring_len);
2751 
2752         /* For each descriptor, allocate a DMA buffer */
2753         fail = 0;
2754         for (i = 0; i < r->r_ndesc; i++) {
2755                 if (bfe_ring_buf_alloc(bfe, r, i, d) != DDI_SUCCESS) {
2756                         i--;
2757                         fail = 1;
2758                         break;
2759                 }
2760         }
2761 
2762         if (fail) {
2763                 while (i-- >= 0) {
2764                         bfe_ring_buf_free(r, i);
2765                 }
2766 
2767                 /* We don't need the descriptor anymore */
2768                 bfe_ring_desc_free(r);
2769                 return (DDI_FAILURE);
2770         }
2771 
2772         return (DDI_SUCCESS);
2773 }
2774 
2775 static int
2776 bfe_rings_alloc(bfe_t *bfe)
2777 {
2778         /* TX */
2779         mutex_init(&bfe->bfe_tx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2780         bfe->bfe_tx_ring.r_lockp = &bfe->bfe_tx_ring.r_lock;
2781         bfe->bfe_tx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2782             VLAN_TAGSZ + ETHERFCSL;
2783         bfe->bfe_tx_ring.r_ndesc = TX_NUM_DESC;
2784         bfe->bfe_tx_ring.r_bfe = bfe;
2785         bfe->bfe_tx_ring.r_avail_desc = TX_NUM_DESC;
2786 
2787         /* RX */
2788         mutex_init(&bfe->bfe_rx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2789         bfe->bfe_rx_ring.r_lockp = &bfe->bfe_rx_ring.r_lock;
2790         bfe->bfe_rx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2791             VLAN_TAGSZ + ETHERFCSL + RX_HEAD_ROOM;
2792         bfe->bfe_rx_ring.r_ndesc = RX_NUM_DESC;
2793         bfe->bfe_rx_ring.r_bfe = bfe;
2794         bfe->bfe_rx_ring.r_avail_desc = RX_NUM_DESC;
2795 
2796         /* Allocate TX Ring */
2797         if (bfe_ring_desc_alloc(bfe, &bfe->bfe_tx_ring,
2798             DDI_DMA_WRITE) != DDI_SUCCESS)
2799                 return (DDI_FAILURE);
2800 
2801         /* Allocate RX Ring */
2802         if (bfe_ring_desc_alloc(bfe, &bfe->bfe_rx_ring,
2803             DDI_DMA_READ) != DDI_SUCCESS) {
2804                 cmn_err(CE_NOTE, "RX ring allocation failed");
2805                 bfe_ring_desc_free(&bfe->bfe_tx_ring);
2806                 return (DDI_FAILURE);
2807         }
2808 
2809         bfe->bfe_tx_ring.r_flags = BFE_RING_ALLOCATED;
2810         bfe->bfe_rx_ring.r_flags = BFE_RING_ALLOCATED;
2811 
2812         return (DDI_SUCCESS);
2813 }
2814 
2815 static int
2816 bfe_resume(dev_info_t *dip)
2817 {
2818         bfe_t *bfe;
2819         int err = DDI_SUCCESS;
2820 
2821         if ((bfe = ddi_get_driver_private(dip)) == NULL) {
2822                 bfe_error(dip, "Unexpected error (no driver private data)"
2823                     " while resume");
2824                 return (DDI_FAILURE);
2825         }
2826 
2827         /*
2828          * Grab all the locks first.
2829          */
2830         bfe_grab_locks(bfe);
2831         bfe->bfe_chip_state = BFE_CHIP_RESUME;
2832 
2833         bfe_init_vars(bfe);
2834         /* PHY will also start running */
2835         bfe_chip_reset(bfe);
2836         if (bfe_chip_start(bfe) == DDI_FAILURE) {
2837                 bfe_error(dip, "Could not resume chip");
2838                 err = DDI_FAILURE;
2839         }
2840 
2841         bfe_release_locks(bfe);
2842 
2843         if (err == DDI_SUCCESS)
2844                 mac_tx_update(bfe->bfe_machdl);
2845 
2846         return (err);
2847 }
2848 
2849 static int
2850 bfe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2851 {
2852         int     unit;
2853         bfe_t   *bfe;
2854         mac_register_t  *macreg;
2855         int     ret;
2856 
2857         switch (cmd) {
2858         case DDI_RESUME:
2859                 return (bfe_resume(dip));
2860 
2861         case DDI_ATTACH:
2862                 break;
2863 
2864         default:
2865                 return (DDI_FAILURE);
2866         }
2867 
2868 
2869         unit = ddi_get_instance(dip);
2870 
2871         bfe = kmem_zalloc(sizeof (bfe_t), KM_SLEEP);
2872         bfe->bfe_dip = dip;
2873         bfe->bfe_unit = unit;
2874 
2875         if (pci_config_setup(dip, &bfe->bfe_conf_handle) != DDI_SUCCESS) {
2876                 bfe_error(dip, "pci_config_setup failed");
2877                 goto fail0;
2878         }
2879 
2880         /*
2881          * Enable IO space, Bus Master and Memory Space accessess.
2882          */
2883         ret = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_COMM);
2884         pci_config_put16(bfe->bfe_conf_handle, PCI_CONF_COMM,
2885             PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME | ret);
2886 
2887         ddi_set_driver_private(dip, bfe);
2888 
2889         /* Identify hardware */
2890         if (bfe_identify_hardware(bfe) == BFE_FAILURE) {
2891                 bfe_error(dip, "Could not identify device");
2892                 goto fail1;
2893         }
2894 
2895         if (bfe_regs_map(bfe) != DDI_SUCCESS) {
2896                 bfe_error(dip, "Could not map device registers");
2897                 goto fail1;
2898         }
2899 
2900         (void) bfe_get_chip_config(bfe);
2901 
2902         /*
2903          * Register with MAC layer
2904          */
2905         if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
2906                 bfe_error(dip, "mac_alloc() failed");
2907                 goto fail2;
2908         }
2909 
2910         macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2911         macreg->m_driver = bfe;
2912         macreg->m_dip = dip;
2913         macreg->m_instance = unit;
2914         macreg->m_src_addr = bfe->bfe_ether_addr;
2915         macreg->m_callbacks = &bfe_mac_callbacks;
2916         macreg->m_min_sdu = 0;
2917         macreg->m_max_sdu = ETHERMTU;
2918         macreg->m_margin = VLAN_TAGSZ;
2919 
2920         if ((ret = mac_register(macreg, &bfe->bfe_machdl)) != 0) {
2921                 bfe_error(dip, "mac_register() failed with %d error", ret);
2922                 mac_free(macreg);
2923                 goto fail2;
2924         }
2925 
2926         mac_free(macreg);
2927 
2928         rw_init(&bfe->bfe_rwlock, NULL, RW_DRIVER,
2929             DDI_INTR_PRI(bfe->bfe_intrpri));
2930 
2931         if (bfe_add_intr(bfe) != DDI_SUCCESS) {
2932                 bfe_error(dip, "Could not add interrupt");
2933                 goto fail3;
2934         }
2935 
2936         if (bfe_rings_alloc(bfe) != DDI_SUCCESS) {
2937                 bfe_error(dip, "Could not allocate TX/RX Ring");
2938                 goto fail4;
2939         }
2940 
2941         /* Init and then reset the chip */
2942         bfe->bfe_chip_action = 0;
2943         bfe_init_vars(bfe);
2944 
2945         /* PHY will also start running */
2946         bfe_chip_reset(bfe);
2947 
2948         /*
2949          * Even though we enable the interrupts here but chip's interrupt
2950          * is not enabled yet. It will be enabled once we plumb the interface.
2951          */
2952         if (ddi_intr_enable(bfe->bfe_intrhdl) != DDI_SUCCESS) {
2953                 bfe_error(dip, "Could not enable interrupt");
2954                 goto fail4;
2955         }
2956 
2957         return (DDI_SUCCESS);
2958 
2959 fail4:
2960         bfe_remove_intr(bfe);
2961 fail3:
2962         (void) mac_unregister(bfe->bfe_machdl);
2963 fail2:
2964         bfe_unmap_regs(bfe);
2965 fail1:
2966         pci_config_teardown(&bfe->bfe_conf_handle);
2967 fail0:
2968         kmem_free(bfe, sizeof (bfe_t));
2969         return (DDI_FAILURE);
2970 }
2971 
2972 static int
2973 bfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2974 {
2975         bfe_t *bfe;
2976 
2977         bfe = ddi_get_driver_private(devinfo);
2978 
2979         switch (cmd) {
2980         case DDI_DETACH:
2981                 /*
2982                  * We need to stop the timer before grabbing locks otherwise
2983                  * we can land-up in deadlock with untimeout.
2984                  */
2985                 bfe_stop_timer(bfe);
2986 
2987                 /*
2988                  * First unregister with MAC layer before stopping DMA
2989                  * engine.
2990                  */
2991                 if (mac_unregister(bfe->bfe_machdl) != DDI_SUCCESS)
2992                         return (DDI_FAILURE);
2993 
2994                 bfe->bfe_machdl = NULL;
2995 
2996                 /*
2997                  * Quiesce the chip first.
2998                  */
2999                 bfe_grab_locks(bfe);
3000                 bfe_chip_halt(bfe);
3001                 bfe_stop_phy(bfe);
3002                 bfe_release_locks(bfe);
3003 
3004                 (void) ddi_intr_disable(bfe->bfe_intrhdl);
3005 
3006                 /* Make sure timer is gone. */
3007                 bfe_stop_timer(bfe);
3008 
3009                 /*
3010                  * Free the DMA resources for buffer and then descriptors
3011                  */
3012                 if (bfe->bfe_tx_ring.r_flags == BFE_RING_ALLOCATED) {
3013                         /* TX */
3014                         bfe_buffer_free(&bfe->bfe_tx_ring);
3015                         bfe_ring_desc_free(&bfe->bfe_tx_ring);
3016                 }
3017 
3018                 if (bfe->bfe_rx_ring.r_flags == BFE_RING_ALLOCATED) {
3019                         /* RX */
3020                         bfe_buffer_free(&bfe->bfe_rx_ring);
3021                         bfe_ring_desc_free(&bfe->bfe_rx_ring);
3022                 }
3023 
3024                 bfe_remove_intr(bfe);
3025                 bfe_unmap_regs(bfe);
3026                 pci_config_teardown(&bfe->bfe_conf_handle);
3027 
3028                 mutex_destroy(&bfe->bfe_tx_ring.r_lock);
3029                 mutex_destroy(&bfe->bfe_rx_ring.r_lock);
3030                 rw_destroy(&bfe->bfe_rwlock);
3031 
3032                 kmem_free(bfe, sizeof (bfe_t));
3033 
3034                 ddi_set_driver_private(devinfo, NULL);
3035                 return (DDI_SUCCESS);
3036 
3037         case DDI_SUSPEND:
3038                 /*
3039                  * We need to stop the timer before grabbing locks otherwise
3040                  * we can land-up in deadlock with untimeout.
3041                  */
3042                 bfe_stop_timer(bfe);
3043 
3044                 /*
3045                  * Grab all the locks first.
3046                  */
3047                 bfe_grab_locks(bfe);
3048                 bfe_chip_halt(bfe);
3049                 bfe_stop_phy(bfe);
3050                 bfe->bfe_chip_state = BFE_CHIP_SUSPENDED;
3051                 bfe_release_locks(bfe);
3052 
3053                 return (DDI_SUCCESS);
3054 
3055         default:
3056                 return (DDI_FAILURE);
3057         }
3058 }
3059 
3060 /*
3061  * Quiesce the card for fast reboot
3062  */
3063 int
3064 bfe_quiesce(dev_info_t *dev_info)
3065 {
3066         bfe_t *bfe;
3067 
3068         bfe = ddi_get_driver_private(dev_info);
3069 
3070         bfe_chip_halt(bfe);
3071         bfe_stop_phy(bfe);
3072         bfe->bfe_chip_state = BFE_CHIP_QUIESCED;
3073 
3074         return (DDI_SUCCESS);
3075 }
3076 
3077 static struct cb_ops bfe_cb_ops = {
3078         nulldev,                /* cb_open */
3079         nulldev,                /* cb_close */
3080         nodev,                  /* cb_strategy */
3081         nodev,                  /* cb_print */
3082         nodev,                  /* cb_dump */
3083         nodev,                  /* cb_read */
3084         nodev,                  /* cb_write */
3085         nodev,                  /* cb_ioctl */
3086         nodev,                  /* cb_devmap */
3087         nodev,                  /* cb_mmap */
3088         nodev,                  /* cb_segmap */
3089         nochpoll,               /* cb_chpoll */
3090         ddi_prop_op,            /* cb_prop_op */
3091         NULL,                   /* cb_stream */
3092         D_MP | D_HOTPLUG,       /* cb_flag */
3093         CB_REV,                 /* cb_rev */
3094         nodev,                  /* cb_aread */
3095         nodev                   /* cb_awrite */
3096 };
3097 
3098 static struct dev_ops bfe_dev_ops = {
3099         DEVO_REV,       /* devo_rev */
3100         0,              /* devo_refcnt */
3101         NULL,           /* devo_getinfo */
3102         nulldev,        /* devo_identify */
3103         nulldev,        /* devo_probe */
3104         bfe_attach,     /* devo_attach */
3105         bfe_detach,     /* devo_detach */
3106         nodev,          /* devo_reset */
3107         &bfe_cb_ops,        /* devo_cb_ops */
3108         NULL,           /* devo_bus_ops */
3109         ddi_power,      /* devo_power */
3110         bfe_quiesce     /* devo_quiesce */
3111 };
3112 
3113 static struct modldrv bfe_modldrv = {
3114         &mod_driverops,
3115         bfe_ident,
3116         &bfe_dev_ops
3117 };
3118 
3119 static struct modlinkage modlinkage = {
3120         MODREV_1, (void *)&bfe_modldrv, NULL
3121 };
3122 
3123 int
3124 _info(struct modinfo *modinfop)
3125 {
3126         return (mod_info(&modlinkage, modinfop));
3127 }
3128 
3129 int
3130 _init(void)
3131 {
3132         int     status;
3133 
3134         mac_init_ops(&bfe_dev_ops, MODULE_NAME);
3135         status = mod_install(&modlinkage);
3136         if (status == DDI_FAILURE)
3137                 mac_fini_ops(&bfe_dev_ops);
3138         return (status);
3139 }
3140 
3141 int
3142 _fini(void)
3143 {
3144         int status;
3145 
3146         status = mod_remove(&modlinkage);
3147         if (status == 0) {
3148                 mac_fini_ops(&bfe_dev_ops);
3149         }
3150         return (status);
3151 }