Print this page
6064 ixgbe needs X550 support


  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  28  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  29  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
  30  * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
  31  * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.

  32  */
  33 
  34 #include "ixgbe_sw.h"
  35 
  36 static char ixgbe_ident[] = "Intel 10Gb Ethernet";

  37 static char ixgbe_version[] = "ixgbe 1.1.7";
  38 
  39 /*
  40  * Local function protoypes
  41  */
  42 static int ixgbe_register_mac(ixgbe_t *);
  43 static int ixgbe_identify_hardware(ixgbe_t *);
  44 static int ixgbe_regs_map(ixgbe_t *);
  45 static void ixgbe_init_properties(ixgbe_t *);
  46 static int ixgbe_init_driver_settings(ixgbe_t *);
  47 static void ixgbe_init_locks(ixgbe_t *);
  48 static void ixgbe_destroy_locks(ixgbe_t *);
  49 static int ixgbe_init(ixgbe_t *);
  50 static int ixgbe_chip_start(ixgbe_t *);
  51 static void ixgbe_chip_stop(ixgbe_t *);
  52 static int ixgbe_reset(ixgbe_t *);
  53 static void ixgbe_tx_clean(ixgbe_t *);
  54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
  55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
  56 static int ixgbe_alloc_rings(ixgbe_t *);


 297 };
 298 
 299 static adapter_info_t ixgbe_X540_cap = {
 300         128,            /* maximum number of rx queues */
 301         1,              /* minimum number of rx queues */
 302         128,            /* default number of rx queues */
 303         64,             /* maximum number of rx groups */
 304         1,              /* minimum number of rx groups */
 305         1,              /* default number of rx groups */
 306         128,            /* maximum number of tx queues */
 307         1,              /* minimum number of tx queues */
 308         8,              /* default number of tx queues */
 309         15500,          /* maximum MTU size */
 310         0xFF8,          /* maximum interrupt throttle rate */
 311         0,              /* minimum interrupt throttle rate */
 312         200,            /* default interrupt throttle rate */
 313         64,             /* maximum total msix vectors */
 314         16,             /* maximum number of ring vectors */
 315         2,              /* maximum number of other vectors */
 316         (IXGBE_EICR_LSC
 317         | IXGBE_EICR_GPI_SDP1
 318         | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
 319 
 320         (IXGBE_SDP1_GPIEN
 321         | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
 322 
 323         (IXGBE_FLAG_DCA_CAPABLE
 324         | IXGBE_FLAG_RSS_CAPABLE
 325         | IXGBE_FLAG_VMDQ_CAPABLE
 326         | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
 327 };
 328 





























 329 /*
 330  * Module Initialization Functions.
 331  */
 332 
 333 int
 334 _init(void)
 335 {
 336         int status;
 337 
 338         mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
 339 
 340         status = mod_install(&ixgbe_modlinkage);
 341 
 342         if (status != DDI_SUCCESS) {
 343                 mac_fini_ops(&ixgbe_dev_ops);
 344         }
 345 
 346         return (status);
 347 }
 348 


 588         ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
 589 
 590         ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
 591             IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
 592         if (ixgbe->periodic_id == 0) {
 593                 ixgbe_error(ixgbe, "Failed to add the link check timer");
 594                 goto attach_fail;
 595         }
 596         ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
 597 
 598         /*
 599          * Now that mutex locks are initialized, and the chip is also
 600          * initialized, enable interrupts.
 601          */
 602         if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
 603                 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
 604                 goto attach_fail;
 605         }
 606         ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
 607 
 608         ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
 609         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
 610 
 611         return (DDI_SUCCESS);
 612 
 613 attach_fail:
 614         ixgbe_unconfigure(devinfo, ixgbe);
 615         return (DDI_FAILURE);
 616 }
 617 
 618 /*
 619  * ixgbe_detach - Driver detach.
 620  *
 621  * The detach() function is the complement of the attach routine.
 622  * If cmd is set to DDI_DETACH, detach() is used to remove  the
 623  * state  associated  with  a  given  instance of a device node
 624  * prior to the removal of that instance from the system.
 625  *
 626  * The detach() function will be called once for each  instance
 627  * of the device for which there has been a successful attach()
 628  * once there are no longer  any  opens  on  the  device.


 940         case ixgbe_mac_82599EB:
 941                 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
 942                 ixgbe->capab = &ixgbe_82599eb_cap;
 943 
 944                 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
 945                         ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
 946                         ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
 947                         ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
 948                 }
 949                 break;
 950 
 951         case ixgbe_mac_X540:
 952                 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
 953                 ixgbe->capab = &ixgbe_X540_cap;
 954                 /*
 955                  * For now, X540 is all set in its capab structure.
 956                  * As other X540 variants show up, things can change here.
 957                  */
 958                 break;
 959 










 960         default:
 961                 IXGBE_DEBUGLOG_1(ixgbe,
 962                     "adapter not supported in ixgbe_identify_hardware(): %d\n",
 963                     hw->mac.type);
 964                 return (IXGBE_FAILURE);
 965         }
 966 
 967         return (IXGBE_SUCCESS);
 968 }
 969 
 970 /*
 971  * ixgbe_regs_map - Map the device registers.
 972  *
 973  */
 974 static int
 975 ixgbe_regs_map(ixgbe_t *ixgbe)
 976 {
 977         dev_info_t *devinfo = ixgbe->dip;
 978         struct ixgbe_hw *hw = &ixgbe->hw;
 979         struct ixgbe_osdep *osdep = &ixgbe->osdep;


2174 
2175         /*
2176          * Setup head & tail pointers
2177          */
2178         IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2179             rx_data->ring_size - 1);
2180         IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2181 
2182         rx_data->rbd_next = 0;
2183         rx_data->lro_first = 0;
2184 
2185         /*
2186          * Setup the Receive Descriptor Control Register (RXDCTL)
2187          * PTHRESH=32 descriptors (half the internal cache)
2188          * HTHRESH=0 descriptors (to minimize latency on fetch)
2189          * WTHRESH defaults to 1 (writeback each descriptor)
2190          */
2191         reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2192         reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2193 
2194         /* Not a valid value for 82599 or X540 */
2195         if (hw->mac.type == ixgbe_mac_82598EB) {
2196                 reg_val |= 0x0020;      /* pthresh */
2197         }
2198         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2199 
2200         if (hw->mac.type == ixgbe_mac_82599EB ||
2201             hw->mac.type == ixgbe_mac_X540) {


2202                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2203                 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2204                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2205         }
2206 
2207         /*
2208          * Setup the Split and Replication Receive Control Register.
2209          * Set the rx buffer size and the advanced descriptor type.
2210          */
2211         reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2212             IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2213         reg_val |= IXGBE_SRRCTL_DROP_EN;
2214         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2215 }
2216 
2217 static void
2218 ixgbe_setup_rx(ixgbe_t *ixgbe)
2219 {
2220         ixgbe_rx_ring_t *rx_ring;
2221         struct ixgbe_hw *hw = &ixgbe->hw;


2479         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2480                 tx_ring = &ixgbe->tx_rings[i];
2481                 ixgbe_setup_tx_ring(tx_ring);
2482         }
2483 
2484         /*
2485          * Setup the per-ring statistics mapping.
2486          */
2487         ring_mapping = 0;
2488         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2489                 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2490                 if ((i & 0x3) == 0x3) {
2491                         switch (hw->mac.type) {
2492                         case ixgbe_mac_82598EB:
2493                                 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2494                                     ring_mapping);
2495                                 break;
2496 
2497                         case ixgbe_mac_82599EB:
2498                         case ixgbe_mac_X540:


2499                                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2500                                     ring_mapping);
2501                                 break;
2502 
2503                         default:
2504                                 break;
2505                         }
2506 
2507                         ring_mapping = 0;
2508                 }
2509         }
2510         if (i & 0x3) {
2511                 switch (hw->mac.type) {
2512                 case ixgbe_mac_82598EB:
2513                         IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2514                         break;
2515 
2516                 case ixgbe_mac_82599EB:
2517                 case ixgbe_mac_X540:


2518                         IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2519                         break;
2520 
2521                 default:
2522                         break;
2523                 }
2524         }
2525 
2526         /*
2527          * Enable CRC appending and TX padding (for short tx frames)
2528          */
2529         reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2530         reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2531         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2532 
2533         /*
2534          * enable DMA for 82599 and X540 parts
2535          */
2536         if (hw->mac.type == ixgbe_mac_82599EB ||
2537             hw->mac.type == ixgbe_mac_X540) {


2538                 /* DMATXCTL.TE must be set after all Tx config is complete */
2539                 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2540                 reg_val |= IXGBE_DMATXCTL_TE;
2541                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2542 
2543                 /* Disable arbiter to set MTQC */
2544                 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2545                 reg_val |= IXGBE_RTTDCS_ARBDIS;
2546                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2547                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2548                 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2549                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2550         }
2551 
2552         /*
2553          * Enabling tx queues ..
2554          * For 82599 must be done after DMATXCTL.TE is set
2555          */
2556         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2557                 tx_ring = &ixgbe->tx_rings[i];


2630         struct ixgbe_hw *hw = &ixgbe->hw;
2631         uint32_t vmdctl, i, vtctl;
2632 
2633         /*
2634          * Setup the VMDq Control register, enable VMDq based on
2635          * packet destination MAC address:
2636          */
2637         switch (hw->mac.type) {
2638         case ixgbe_mac_82598EB:
2639                 /*
2640                  * VMDq Enable = 1;
2641                  * VMDq Filter = 0; MAC filtering
2642                  * Default VMDq output index = 0;
2643                  */
2644                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2645                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2646                 break;
2647 
2648         case ixgbe_mac_82599EB:
2649         case ixgbe_mac_X540:


2650                 /*
2651                  * Enable VMDq-only.
2652                  */
2653                 vmdctl = IXGBE_MRQC_VMDQEN;
2654                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2655 
2656                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2657                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2658                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2659                 }
2660 
2661                 /*
2662                  * Enable Virtualization and Replication.
2663                  */
2664                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2665                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2666 
2667                 /*
2668                  * Enable receiving packets to all VFs
2669                  */


2724                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2725                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2726                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2727                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2728                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2729                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2730                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2731                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2732 
2733                 /*
2734                  * Enable and Setup VMDq
2735                  * VMDq Filter = 0; MAC filtering
2736                  * Default VMDq output index = 0;
2737                  */
2738                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2739                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2740                 break;
2741 
2742         case ixgbe_mac_82599EB:
2743         case ixgbe_mac_X540:


2744                 /*
2745                  * Enable RSS & Setup RSS Hash functions
2746                  */
2747                 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2748                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2749                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2750                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2751                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2752                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2753                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2754                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2755                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2756 
2757                 /*
2758                  * Enable VMDq+RSS.
2759                  */
2760                 if (ixgbe->num_rx_groups > 32)  {
2761                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2762                 } else {
2763                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;


2770                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2771                 }
2772                 break;
2773 
2774         default:
2775                 break;
2776 
2777         }
2778 
2779         /*
2780          * Disable Packet Checksum to enable RSS for multiple receive queues.
2781          * It is an adapter hardware limitation that Packet Checksum is
2782          * mutually exclusive with RSS.
2783          */
2784         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2785         rxcsum |= IXGBE_RXCSUM_PCSD;
2786         rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2787         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2788 
2789         if (hw->mac.type == ixgbe_mac_82599EB ||
2790             hw->mac.type == ixgbe_mac_X540) {


2791                 /*
2792                  * Enable Virtualization and Replication.
2793                  */
2794                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2795                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2796 
2797                 /*
2798                  * Enable receiving packets to all VFs
2799                  */
2800                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2801                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2802         }
2803 }
2804 
2805 /*
2806  * ixgbe_init_unicst - Initialize the unicast addresses.
2807  */
2808 static void
2809 ixgbe_init_unicst(ixgbe_t *ixgbe)
2810 {


2982                 /*
2983                  * 82598 supports the following combination:
2984                  * vmdq no. x rss no.
2985                  * [5..16]  x 1
2986                  * [1..4]   x [1..16]
2987                  * However 8 rss queue per pool (vmdq) is sufficient for
2988                  * most cases.
2989                  */
2990                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2991                 if (ixgbe->num_rx_groups > 4) {
2992                         ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2993                 } else {
2994                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2995                             min(8, ring_per_group);
2996                 }
2997 
2998                 break;
2999 
3000         case ixgbe_mac_82599EB:
3001         case ixgbe_mac_X540:


3002                 /*
3003                  * 82599 supports the following combination:
3004                  * vmdq no. x rss no.
3005                  * [33..64] x [1..2]
3006                  * [2..32]  x [1..4]
3007                  * 1 x [1..16]
3008                  * However 8 rss queue per pool (vmdq) is sufficient for
3009                  * most cases.
3010                  *
3011                  * For now, treat X540 like the 82599.
3012                  */
3013                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3014                 if (ixgbe->num_rx_groups == 1) {
3015                         ixgbe->num_rx_rings = min(8, ring_per_group);
3016                 } else if (ixgbe->num_rx_groups <= 32) {
3017                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3018                             min(4, ring_per_group);
3019                 } else if (ixgbe->num_rx_groups <= 64) {
3020                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3021                             min(2, ring_per_group);
3022                 }
3023                 break;
3024 
3025         default:
3026                 break;
3027         }
3028 
3029         ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3030 
3031         if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {


3154          * 1 = force interrupt type MSI-X
3155          * 2 = force interrupt type MSI
3156          * 3 = force interrupt type Legacy
3157          */
3158         ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3159             IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3160 
3161         ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3162             0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3163         ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3164             0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3165         ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3166             0, 1, DEFAULT_LSO_ENABLE);
3167         ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3168             0, 1, DEFAULT_LRO_ENABLE);
3169         ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3170             0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3171         ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3172             PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3173 
3174         /* Head Write Back not recommended for 82599 and X540 */
3175         if (hw->mac.type == ixgbe_mac_82599EB ||
3176             hw->mac.type == ixgbe_mac_X540) {


3177                 ixgbe->tx_head_wb_enable = B_FALSE;
3178         }
3179 
3180         /*
3181          * ixgbe LSO needs the tx h/w checksum support.
3182          * LSO will be disabled if tx h/w checksum is not
3183          * enabled.
3184          */
3185         if (ixgbe->tx_hcksum_enable == B_FALSE) {
3186                 ixgbe->lso_enable = B_FALSE;
3187         }
3188 
3189         /*
3190          * ixgbe LRO needs the rx h/w checksum support.
3191          * LRO will be disabled if rx h/w checksum is not
3192          * enabled.
3193          */
3194         if (ixgbe->rx_hcksum_enable == B_FALSE) {
3195                 ixgbe->lro_enable = B_FALSE;
3196         }
3197 
3198         /*
3199          * ixgbe LRO only been supported by 82599 and X540 now
3200          */
3201         if (hw->mac.type == ixgbe_mac_82598EB) {
3202                 ixgbe->lro_enable = B_FALSE;
3203         }
3204         ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3205             MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3206             DEFAULT_TX_COPY_THRESHOLD);
3207         ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3208             PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3209             MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3210         ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3211             PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3212             MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3213         ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3214             PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3215             MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3216 
3217         ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3218             MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3219             DEFAULT_RX_COPY_THRESHOLD);
3220         ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3221             MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3222             DEFAULT_RX_LIMIT_PER_INTR);
3223 
3224         ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3225             ixgbe->capab->min_intr_throttle,
3226             ixgbe->capab->max_intr_throttle,
3227             ixgbe->capab->def_intr_throttle);
3228         /*
3229          * 82599 and X540 require the interrupt throttling rate is
3230          * a multiple of 8. This is enforced by the register
3231          * definiton.
3232          */
3233         if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540)



3234                 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3235 
3236         hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3237             PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3238 }
3239 
3240 static void
3241 ixgbe_init_params(ixgbe_t *ixgbe)
3242 {
3243         ixgbe->param_en_10000fdx_cap = 1;
3244         ixgbe->param_en_1000fdx_cap = 1;
3245         ixgbe->param_en_100fdx_cap = 1;
3246         ixgbe->param_adv_10000fdx_cap = 1;
3247         ixgbe->param_adv_1000fdx_cap = 1;
3248         ixgbe->param_adv_100fdx_cap = 1;
3249 
3250         ixgbe->param_pause_cap = 1;
3251         ixgbe->param_asym_pause_cap = 1;
3252         ixgbe->param_rem_fault = 0;
3253 


3312         if (ixgbe->param_adv_10000fdx_cap == 1)
3313                 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3314 
3315         if (ixgbe->param_adv_1000fdx_cap == 1)
3316                 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3317 
3318         if (ixgbe->param_adv_100fdx_cap == 1)
3319                 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3320 
3321         if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3322                 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3323                     "to autonegotiation with full link capabilities.");
3324 
3325                 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3326                     IXGBE_LINK_SPEED_1GB_FULL |
3327                     IXGBE_LINK_SPEED_100_FULL;
3328         }
3329 
3330         if (setup_hw) {
3331                 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3332                     ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3333                         ixgbe_notice(ixgbe, "Setup link failed on this "
3334                             "device.");
3335                         return (IXGBE_FAILURE);
3336                 }
3337         }
3338 
3339         return (IXGBE_SUCCESS);
3340 }
3341 
3342 /*
3343  * ixgbe_driver_link_check - Link status processing.
3344  *
3345  * This function can be called in both kernel context and interrupt context
3346  */
3347 static void
3348 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3349 {
3350         struct ixgbe_hw *hw = &ixgbe->hw;
3351         ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3352         boolean_t link_up = B_FALSE;


3405                 ixgbe->eims |= IXGBE_EICR_LSC;
3406                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3407         }
3408 
3409         if (link_changed) {
3410                 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3411         }
3412 }
3413 
3414 /*
3415  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3416  */
3417 static void
3418 ixgbe_sfp_check(void *arg)
3419 {
3420         ixgbe_t *ixgbe = (ixgbe_t *)arg;
3421         uint32_t eicr = ixgbe->eicr;
3422         struct ixgbe_hw *hw = &ixgbe->hw;
3423 
3424         mutex_enter(&ixgbe->gen_lock);
3425         if (eicr & IXGBE_EICR_GPI_SDP1) {
3426                 /* clear the interrupt */
3427                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3428 
3429                 /* if link up, do multispeed fiber setup */
3430                 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3431                     B_TRUE, B_TRUE);
3432                 ixgbe_driver_link_check(ixgbe);
3433                 ixgbe_get_hw_state(ixgbe);
3434         } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3435                 /* clear the interrupt */
3436                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3437 
3438                 /* if link up, do sfp module setup */
3439                 (void) hw->mac.ops.setup_sfp(hw);
3440 
3441                 /* do multispeed fiber setup */
3442                 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3443                     B_TRUE, B_TRUE);
3444                 ixgbe_driver_link_check(ixgbe);
3445                 ixgbe_get_hw_state(ixgbe);
3446         }
3447         mutex_exit(&ixgbe->gen_lock);
3448 
3449         /*
3450          * We need to fully re-check the link later.
3451          */
3452         ixgbe->link_check_complete = B_FALSE;
3453         ixgbe->link_check_hrtime = gethrtime() +
3454             (IXGBE_LINK_UP_TIME * 100000000ULL);
3455 }
3456 
3457 /*
3458  * ixgbe_overtemp_check - overtemp module processing done in taskq
3459  *
3460  * This routine will only be called on adapters with temperature sensor.
3461  * The indication of over-temperature can be either SDP0 interrupt or the link
3462  * status change interrupt.
3463  */
3464 static void
3465 ixgbe_overtemp_check(void *arg)
3466 {
3467         ixgbe_t *ixgbe = (ixgbe_t *)arg;
3468         struct ixgbe_hw *hw = &ixgbe->hw;
3469         uint32_t eicr = ixgbe->eicr;
3470         ixgbe_link_speed speed;
3471         boolean_t link_up;
3472 
3473         mutex_enter(&ixgbe->gen_lock);
3474 
3475         /* make sure we know current state of link */
3476         (void) ixgbe_check_link(hw, &speed, &link_up, false);
3477 
3478         /* check over-temp condition */
3479         if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3480             (eicr & IXGBE_EICR_LSC)) {
3481                 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3482                         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3483 
3484                         /*
3485                          * Disable the adapter interrupts
3486                          */
3487                         ixgbe_disable_adapter_interrupts(ixgbe);
3488 
3489                         /*
3490                          * Disable Rx/Tx units
3491                          */
3492                         (void) ixgbe_stop_adapter(hw);
3493 
3494                         ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3495                         ixgbe_error(ixgbe,
3496                             "Problem: Network adapter has been stopped "
3497                             "because it has overheated");
3498                         ixgbe_error(ixgbe,
3499                             "Action: Restart the computer. "


3655 
3656         /*
3657          * Look up the OBP property "local-mac-address?". If the user has set
3658          * 'local-mac-address? = false', use "the system address" instead.
3659          */
3660         if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3661             "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3662                 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3663                         if (localetheraddr(NULL, &sysaddr) != 0) {
3664                                 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3665                                 found = B_TRUE;
3666                         }
3667                 }
3668                 ddi_prop_free(bytes);
3669         }
3670 
3671         /*
3672          * Finally(!), if there's a valid "mac-address" property (created
3673          * if we netbooted from this interface), we must use this instead
3674          * of any of the above to ensure that the NFS/install server doesn't
3675          * get confused by the address changing as Solaris takes over!
3676          */
3677         err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3678             DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3679         if (err == DDI_PROP_SUCCESS) {
3680                 if (nelts == ETHERADDRL) {
3681                         while (nelts--)
3682                                 hw->mac.addr[nelts] = bytes[nelts];
3683                         found = B_TRUE;
3684                 }
3685                 ddi_prop_free(bytes);
3686         }
3687 
3688         if (found) {
3689                 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3690                 return (B_TRUE);
3691         }
3692 #else
3693         _NOTE(ARGUNUSED(ixgbe));
3694 #endif
3695 


3845          */
3846         if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3847                 /* enable autoclear but not on bits 29:20 */
3848                 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3849 
3850                 /* general purpose interrupt enable */
3851                 gpie |= (IXGBE_GPIE_MSIX_MODE
3852                     | IXGBE_GPIE_PBA_SUPPORT
3853                     | IXGBE_GPIE_OCD
3854                     | IXGBE_GPIE_EIAME);
3855         /*
3856          * non-msi-x mode
3857          */
3858         } else {
3859 
3860                 /* disable autoclear, leave gpie at default */
3861                 eiac = 0;
3862 
3863                 /*
3864                  * General purpose interrupt enable.
3865                  * For 82599 or X540, extended interrupt automask enable
3866                  * only in MSI or MSI-X mode
3867                  */
3868                 if ((hw->mac.type == ixgbe_mac_82598EB) ||
3869                     (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3870                         gpie |= IXGBE_GPIE_EIAME;
3871                 }
3872         }
3873 
3874         /* Enable specific "other" interrupt types */
3875         switch (hw->mac.type) {
3876         case ixgbe_mac_82598EB:
3877                 gpie |= ixgbe->capab->other_gpie;
3878                 break;
3879 
3880         case ixgbe_mac_82599EB:
3881         case ixgbe_mac_X540:


3882                 gpie |= ixgbe->capab->other_gpie;
3883 
3884                 /* Enable RSC Delay 8us when LRO enabled  */
3885                 if (ixgbe->lro_enable) {
3886                         gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3887                 }
3888                 break;
3889 
3890         default:
3891                 break;
3892         }
3893 
3894         /* write to interrupt control registers */
3895         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3896         IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3897         IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3898         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3899         IXGBE_WRITE_FLUSH(hw);
3900 }
3901 


4056                     &atlas);
4057                 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4058                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4059                     atlas);
4060 
4061                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4062                     &atlas);
4063                 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4064                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4065                     atlas);
4066 
4067                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4068                     &atlas);
4069                 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4070                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4071                     atlas);
4072                 break;
4073 
4074         case ixgbe_mac_82599EB:
4075         case ixgbe_mac_X540:


4076                 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4077                 reg |= (IXGBE_AUTOC_FLU |
4078                     IXGBE_AUTOC_10G_KX4);
4079                 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4080 
4081                 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4082                     B_FALSE, B_TRUE);
4083                 break;
4084 
4085         default:
4086                 break;
4087         }
4088 }
4089 
4090 #pragma inline(ixgbe_intr_rx_work)
4091 /*
4092  * ixgbe_intr_rx_work - RX processing of ISR.
4093  */
4094 static void
4095 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4096 {
4097         mblk_t *mp;
4098 
4099         mutex_enter(&rx_ring->rx_lock);
4100 
4101         mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4102         mutex_exit(&rx_ring->rx_lock);


4122 
4123         /*
4124          * Schedule the re-transmit
4125          */
4126         if (tx_ring->reschedule &&
4127             (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4128                 tx_ring->reschedule = B_FALSE;
4129                 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4130                     tx_ring->ring_handle);
4131                 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4132         }
4133 }
4134 
4135 #pragma inline(ixgbe_intr_other_work)
4136 /*
4137  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4138  */
4139 static void
4140 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4141 {


4142         ASSERT(mutex_owned(&ixgbe->gen_lock));
4143 
4144         /*
4145          * handle link status change
4146          */
4147         if (eicr & IXGBE_EICR_LSC) {
4148                 ixgbe_driver_link_check(ixgbe);
4149                 ixgbe_get_hw_state(ixgbe);
4150         }
4151 
4152         /*
4153          * check for fan failure on adapters with fans
4154          */
4155         if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4156             (eicr & IXGBE_EICR_GPI_SDP1)) {
4157                 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4158 
4159                 /*
4160                  * Disable the adapter interrupts
4161                  */
4162                 ixgbe_disable_adapter_interrupts(ixgbe);
4163 
4164                 /*
4165                  * Disable Rx/Tx units
4166                  */
4167                 (void) ixgbe_stop_adapter(&ixgbe->hw);
4168 
4169                 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4170                 ixgbe_error(ixgbe,
4171                     "Problem: Network adapter has been stopped "
4172                     "because the fan has stopped.\n");
4173                 ixgbe_error(ixgbe,
4174                     "Action: Replace the adapter.\n");
4175 
4176                 /* re-enable the interrupt, which was automasked */
4177                 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4178         }
4179 
4180         /*
4181          * Do SFP check for adapters with hot-plug capability
4182          */
4183         if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4184             ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {

4185                 ixgbe->eicr = eicr;
4186                 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4187                     ixgbe_sfp_check, (void *)ixgbe,
4188                     DDI_NOSLEEP)) != DDI_SUCCESS) {
4189                         ixgbe_log(ixgbe, "No memory available to dispatch "
4190                             "taskq for SFP check");
4191                 }
4192         }
4193 
4194         /*
4195          * Do over-temperature check for adapters with temp sensor
4196          */
4197         if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4198             ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4199                 ixgbe->eicr = eicr;
4200                 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4201                     ixgbe_overtemp_check, (void *)ixgbe,
4202                     DDI_NOSLEEP)) != DDI_SUCCESS) {
4203                         ixgbe_log(ixgbe, "No memory available to dispatch "
4204                             "taskq for overtemp check");
4205                 }
4206         }
4207 }
4208 
4209 /*
4210  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4211  */
4212 static uint_t
4213 ixgbe_intr_legacy(void *arg1, void *arg2)
4214 {
4215         ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4216         struct ixgbe_hw *hw = &ixgbe->hw;
4217         ixgbe_tx_ring_t *tx_ring;
4218         ixgbe_rx_ring_t *rx_ring;


4275                          */
4276                         tx_ring = &ixgbe->tx_rings[0];
4277                         tx_ring->tx_recycle(tx_ring);
4278 
4279                         /*
4280                          * Schedule the re-transmit
4281                          */
4282                         tx_reschedule = (tx_ring->reschedule &&
4283                             (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4284                 }
4285 
4286                 /* any interrupt type other than tx/rx */
4287                 if (eicr & ixgbe->capab->other_intr) {
4288                         switch (hw->mac.type) {
4289                         case ixgbe_mac_82598EB:
4290                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4291                                 break;
4292 
4293                         case ixgbe_mac_82599EB:
4294                         case ixgbe_mac_X540:


4295                                 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4296                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4297                                 break;
4298 
4299                         default:
4300                                 break;
4301                         }
4302                         ixgbe_intr_other_work(ixgbe, eicr);
4303                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4304                 }
4305 
4306                 mutex_exit(&ixgbe->gen_lock);
4307 
4308                 result = DDI_INTR_CLAIMED;
4309         } else {
4310                 mutex_exit(&ixgbe->gen_lock);
4311 
4312                 /*
4313                  * No interrupt cause bits set: don't claim this interrupt.
4314                  */


4369                 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4370         }
4371 
4372         /*
4373          * For MSI interrupt, tx rings[0] will use RTxQ[1].
4374          */
4375         if (eicr & 0x2) {
4376                 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4377         }
4378 
4379         /* any interrupt type other than tx/rx */
4380         if (eicr & ixgbe->capab->other_intr) {
4381                 mutex_enter(&ixgbe->gen_lock);
4382                 switch (hw->mac.type) {
4383                 case ixgbe_mac_82598EB:
4384                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4385                         break;
4386 
4387                 case ixgbe_mac_82599EB:
4388                 case ixgbe_mac_X540:


4389                         ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4390                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4391                         break;
4392 
4393                 default:
4394                         break;
4395                 }
4396                 ixgbe_intr_other_work(ixgbe, eicr);
4397                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4398                 mutex_exit(&ixgbe->gen_lock);
4399         }
4400 
4401         /* re-enable the interrupts which were automasked */
4402         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4403 
4404         return (DDI_INTR_CLAIMED);
4405 }
4406 
4407 /*
4408  * ixgbe_intr_msix - Interrupt handler for MSI-X.


4449                     DDI_FM_OK) {
4450                         ddi_fm_service_impact(ixgbe->dip,
4451                             DDI_SERVICE_DEGRADED);
4452                         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4453                         return (DDI_INTR_CLAIMED);
4454                 }
4455 
4456                 /*
4457                  * Check "other" cause bits: any interrupt type other than tx/rx
4458                  */
4459                 if (eicr & ixgbe->capab->other_intr) {
4460                         mutex_enter(&ixgbe->gen_lock);
4461                         switch (hw->mac.type) {
4462                         case ixgbe_mac_82598EB:
4463                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4464                                 ixgbe_intr_other_work(ixgbe, eicr);
4465                                 break;
4466 
4467                         case ixgbe_mac_82599EB:
4468                         case ixgbe_mac_X540:


4469                                 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4470                                 ixgbe_intr_other_work(ixgbe, eicr);
4471                                 break;
4472 
4473                         default:
4474                                 break;
4475                         }
4476                         mutex_exit(&ixgbe->gen_lock);
4477                 }
4478 
4479                 /* re-enable the interrupts which were automasked */
4480                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4481         }
4482 
4483         return (DDI_INTR_CLAIMED);
4484 }
4485 
4486 /*
4487  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4488  *


4850     int8_t cause)
4851 {
4852         struct ixgbe_hw *hw = &ixgbe->hw;
4853         u32 ivar, index;
4854 
4855         switch (hw->mac.type) {
4856         case ixgbe_mac_82598EB:
4857                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4858                 if (cause == -1) {
4859                         cause = 0;
4860                 }
4861                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4862                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4863                 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4864                 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4865                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4866                 break;
4867 
4868         case ixgbe_mac_82599EB:
4869         case ixgbe_mac_X540:


4870                 if (cause == -1) {
4871                         /* other causes */
4872                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4873                         index = (intr_alloc_entry & 1) * 8;
4874                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4875                         ivar &= ~(0xFF << index);
4876                         ivar |= (msix_vector << index);
4877                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4878                 } else {
4879                         /* tx or rx causes */
4880                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4881                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4882                         ivar = IXGBE_READ_REG(hw,
4883                             IXGBE_IVAR(intr_alloc_entry >> 1));
4884                         ivar &= ~(0xFF << index);
4885                         ivar |= (msix_vector << index);
4886                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4887                             ivar);
4888                 }
4889                 break;


4904 static void
4905 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4906 {
4907         struct ixgbe_hw *hw = &ixgbe->hw;
4908         u32 ivar, index;
4909 
4910         switch (hw->mac.type) {
4911         case ixgbe_mac_82598EB:
4912                 if (cause == -1) {
4913                         cause = 0;
4914                 }
4915                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4916                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4917                 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4918                     (intr_alloc_entry & 0x3)));
4919                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4920                 break;
4921 
4922         case ixgbe_mac_82599EB:
4923         case ixgbe_mac_X540:


4924                 if (cause == -1) {
4925                         /* other causes */
4926                         index = (intr_alloc_entry & 1) * 8;
4927                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4928                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4929                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4930                 } else {
4931                         /* tx or rx causes */
4932                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4933                         ivar = IXGBE_READ_REG(hw,
4934                             IXGBE_IVAR(intr_alloc_entry >> 1));
4935                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4936                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4937                             ivar);
4938                 }
4939                 break;
4940 
4941         default:
4942                 break;
4943         }


4954 static void
4955 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4956 {
4957         struct ixgbe_hw *hw = &ixgbe->hw;
4958         u32 ivar, index;
4959 
4960         switch (hw->mac.type) {
4961         case ixgbe_mac_82598EB:
4962                 if (cause == -1) {
4963                         cause = 0;
4964                 }
4965                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4966                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4967                 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4968                     (intr_alloc_entry & 0x3)));
4969                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4970                 break;
4971 
4972         case ixgbe_mac_82599EB:
4973         case ixgbe_mac_X540:


4974                 if (cause == -1) {
4975                         /* other causes */
4976                         index = (intr_alloc_entry & 1) * 8;
4977                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4978                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4979                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4980                 } else {
4981                         /* tx or rx causes */
4982                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4983                         ivar = IXGBE_READ_REG(hw,
4984                             IXGBE_IVAR(intr_alloc_entry >> 1));
4985                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4986                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4987                             ivar);
4988                 }
4989                 break;
4990 
4991         default:
4992                 break;
4993         }


4997  * Convert the rx ring index driver maintained to the rx ring index
4998  * in h/w.
4999  */
5000 static uint32_t
5001 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5002 {
5003 
5004         struct ixgbe_hw *hw = &ixgbe->hw;
5005         uint32_t rx_ring_per_group, hw_rx_index;
5006 
5007         if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5008             ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5009                 return (sw_rx_index);
5010         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5011                 switch (hw->mac.type) {
5012                 case ixgbe_mac_82598EB:
5013                         return (sw_rx_index);
5014 
5015                 case ixgbe_mac_82599EB:
5016                 case ixgbe_mac_X540:


5017                         return (sw_rx_index * 2);
5018 
5019                 default:
5020                         break;
5021                 }
5022         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5023                 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5024 
5025                 switch (hw->mac.type) {
5026                 case ixgbe_mac_82598EB:
5027                         hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5028                             16 + (sw_rx_index % rx_ring_per_group);
5029                         return (hw_rx_index);
5030 
5031                 case ixgbe_mac_82599EB:
5032                 case ixgbe_mac_X540:


5033                         if (ixgbe->num_rx_groups > 32) {
5034                                 hw_rx_index = (sw_rx_index /
5035                                     rx_ring_per_group) * 2 +
5036                                     (sw_rx_index % rx_ring_per_group);
5037                         } else {
5038                                 hw_rx_index = (sw_rx_index /
5039                                     rx_ring_per_group) * 4 +
5040                                     (sw_rx_index % rx_ring_per_group);
5041                         }
5042                         return (hw_rx_index);
5043 
5044                 default:
5045                         break;
5046                 }
5047         }
5048 
5049         /*
5050          * Should never reach. Just to make compiler happy.
5051          */
5052         return (sw_rx_index);


5118 static void
5119 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5120 {
5121         struct ixgbe_hw *hw = &ixgbe->hw;
5122         ixgbe_intr_vector_t *vect;      /* vector bitmap */
5123         int r_idx;      /* ring index */
5124         int v_idx;      /* vector index */
5125         uint32_t hw_index;
5126 
5127         /*
5128          * Clear any previous entries
5129          */
5130         switch (hw->mac.type) {
5131         case ixgbe_mac_82598EB:
5132                 for (v_idx = 0; v_idx < 25; v_idx++)
5133                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5134                 break;
5135 
5136         case ixgbe_mac_82599EB:
5137         case ixgbe_mac_X540:


5138                 for (v_idx = 0; v_idx < 64; v_idx++)
5139                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5140                 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5141                 break;
5142 
5143         default:
5144                 break;
5145         }
5146 
5147         /*
5148          * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5149          * tx rings[0] will use RTxQ[1].
5150          */
5151         if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5152                 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5153                 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5154                 return;
5155         }
5156 
5157         /*




  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  28  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  29  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
  30  * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
  31  * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
  32  * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
  33  */
  34 
  35 #include "ixgbe_sw.h"
  36 
  37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
  38 /* LINTED E_STATIC_UNUSED */
  39 static char ixgbe_version[] = "ixgbe 1.1.7";
  40 
  41 /*
  42  * Local function protoypes
  43  */
  44 static int ixgbe_register_mac(ixgbe_t *);
  45 static int ixgbe_identify_hardware(ixgbe_t *);
  46 static int ixgbe_regs_map(ixgbe_t *);
  47 static void ixgbe_init_properties(ixgbe_t *);
  48 static int ixgbe_init_driver_settings(ixgbe_t *);
  49 static void ixgbe_init_locks(ixgbe_t *);
  50 static void ixgbe_destroy_locks(ixgbe_t *);
  51 static int ixgbe_init(ixgbe_t *);
  52 static int ixgbe_chip_start(ixgbe_t *);
  53 static void ixgbe_chip_stop(ixgbe_t *);
  54 static int ixgbe_reset(ixgbe_t *);
  55 static void ixgbe_tx_clean(ixgbe_t *);
  56 static boolean_t ixgbe_tx_drain(ixgbe_t *);
  57 static boolean_t ixgbe_rx_drain(ixgbe_t *);
  58 static int ixgbe_alloc_rings(ixgbe_t *);


 299 };
 300 
 301 static adapter_info_t ixgbe_X540_cap = {
 302         128,            /* maximum number of rx queues */
 303         1,              /* minimum number of rx queues */
 304         128,            /* default number of rx queues */
 305         64,             /* maximum number of rx groups */
 306         1,              /* minimum number of rx groups */
 307         1,              /* default number of rx groups */
 308         128,            /* maximum number of tx queues */
 309         1,              /* minimum number of tx queues */
 310         8,              /* default number of tx queues */
 311         15500,          /* maximum MTU size */
 312         0xFF8,          /* maximum interrupt throttle rate */
 313         0,              /* minimum interrupt throttle rate */
 314         200,            /* default interrupt throttle rate */
 315         64,             /* maximum total msix vectors */
 316         16,             /* maximum number of ring vectors */
 317         2,              /* maximum number of other vectors */
 318         (IXGBE_EICR_LSC
 319         | IXGBE_EICR_GPI_SDP1_X540
 320         | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */
 321 
 322         (IXGBE_SDP1_GPIEN_X540
 323         | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */
 324 
 325         (IXGBE_FLAG_DCA_CAPABLE
 326         | IXGBE_FLAG_RSS_CAPABLE
 327         | IXGBE_FLAG_VMDQ_CAPABLE
 328         | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
 329 };
 330 
 331 static adapter_info_t ixgbe_X550_cap = {
 332         128,            /* maximum number of rx queues */
 333         1,              /* minimum number of rx queues */
 334         128,            /* default number of rx queues */
 335         64,             /* maximum number of rx groups */
 336         1,              /* minimum number of rx groups */
 337         1,              /* default number of rx groups */
 338         128,            /* maximum number of tx queues */
 339         1,              /* minimum number of tx queues */
 340         8,              /* default number of tx queues */
 341         15500,          /* maximum MTU size */
 342         0xFF8,          /* maximum interrupt throttle rate */
 343         0,              /* minimum interrupt throttle rate */
 344         200,            /* default interrupt throttle rate */
 345         64,             /* maximum total msix vectors */
 346         16,             /* maximum number of ring vectors */
 347         2,              /* maximum number of other vectors */
 348         (IXGBE_EICR_LSC
 349         | IXGBE_SDP1_GPIEN_X550
 350         | IXGBE_SDP2_GPIEN_X550), /* "other" interrupt types handled */
 351 
 352         (IXGBE_SDP1_GPIEN_X550
 353         | IXGBE_SDP2_GPIEN_X550), /* "other" interrupt types enable mask */
 354 
 355         (IXGBE_FLAG_RSS_CAPABLE
 356         | IXGBE_FLAG_VMDQ_CAPABLE
 357         | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
 358 };
 359 
 360 /*
 361  * Module Initialization Functions.
 362  */
 363 
 364 int
 365 _init(void)
 366 {
 367         int status;
 368 
 369         mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
 370 
 371         status = mod_install(&ixgbe_modlinkage);
 372 
 373         if (status != DDI_SUCCESS) {
 374                 mac_fini_ops(&ixgbe_dev_ops);
 375         }
 376 
 377         return (status);
 378 }
 379 


 619         ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
 620 
 621         ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
 622             IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
 623         if (ixgbe->periodic_id == 0) {
 624                 ixgbe_error(ixgbe, "Failed to add the link check timer");
 625                 goto attach_fail;
 626         }
 627         ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
 628 
 629         /*
 630          * Now that mutex locks are initialized, and the chip is also
 631          * initialized, enable interrupts.
 632          */
 633         if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
 634                 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
 635                 goto attach_fail;
 636         }
 637         ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
 638 
 639         ixgbe_log(ixgbe, "%s", ixgbe_ident);
 640         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
 641 
 642         return (DDI_SUCCESS);
 643 
 644 attach_fail:
 645         ixgbe_unconfigure(devinfo, ixgbe);
 646         return (DDI_FAILURE);
 647 }
 648 
 649 /*
 650  * ixgbe_detach - Driver detach.
 651  *
 652  * The detach() function is the complement of the attach routine.
 653  * If cmd is set to DDI_DETACH, detach() is used to remove  the
 654  * state  associated  with  a  given  instance of a device node
 655  * prior to the removal of that instance from the system.
 656  *
 657  * The detach() function will be called once for each  instance
 658  * of the device for which there has been a successful attach()
 659  * once there are no longer  any  opens  on  the  device.


 971         case ixgbe_mac_82599EB:
 972                 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
 973                 ixgbe->capab = &ixgbe_82599eb_cap;
 974 
 975                 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
 976                         ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
 977                         ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
 978                         ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
 979                 }
 980                 break;
 981 
 982         case ixgbe_mac_X540:
 983                 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
 984                 ixgbe->capab = &ixgbe_X540_cap;
 985                 /*
 986                  * For now, X540 is all set in its capab structure.
 987                  * As other X540 variants show up, things can change here.
 988                  */
 989                 break;
 990 
 991         case ixgbe_mac_X550:
 992         case ixgbe_mac_X550EM_x:
 993                 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n");
 994                 ixgbe->capab = &ixgbe_X550_cap;
 995 
 996                 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
 997                         ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE;
 998 
 999                 break;
1000 
1001         default:
1002                 IXGBE_DEBUGLOG_1(ixgbe,
1003                     "adapter not supported in ixgbe_identify_hardware(): %d\n",
1004                     hw->mac.type);
1005                 return (IXGBE_FAILURE);
1006         }
1007 
1008         return (IXGBE_SUCCESS);
1009 }
1010 
1011 /*
1012  * ixgbe_regs_map - Map the device registers.
1013  *
1014  */
1015 static int
1016 ixgbe_regs_map(ixgbe_t *ixgbe)
1017 {
1018         dev_info_t *devinfo = ixgbe->dip;
1019         struct ixgbe_hw *hw = &ixgbe->hw;
1020         struct ixgbe_osdep *osdep = &ixgbe->osdep;


2215 
2216         /*
2217          * Setup head & tail pointers
2218          */
2219         IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2220             rx_data->ring_size - 1);
2221         IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2222 
2223         rx_data->rbd_next = 0;
2224         rx_data->lro_first = 0;
2225 
2226         /*
2227          * Setup the Receive Descriptor Control Register (RXDCTL)
2228          * PTHRESH=32 descriptors (half the internal cache)
2229          * HTHRESH=0 descriptors (to minimize latency on fetch)
2230          * WTHRESH defaults to 1 (writeback each descriptor)
2231          */
2232         reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2233         reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2234 
2235         /* Not a valid value for 82599, X540 or X550 */
2236         if (hw->mac.type == ixgbe_mac_82598EB) {
2237                 reg_val |= 0x0020;      /* pthresh */
2238         }
2239         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2240 
2241         if (hw->mac.type == ixgbe_mac_82599EB ||
2242             hw->mac.type == ixgbe_mac_X540 ||
2243             hw->mac.type == ixgbe_mac_X550 ||
2244             hw->mac.type == ixgbe_mac_X550EM_x) {
2245                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2246                 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2247                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2248         }
2249 
2250         /*
2251          * Setup the Split and Replication Receive Control Register.
2252          * Set the rx buffer size and the advanced descriptor type.
2253          */
2254         reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2255             IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2256         reg_val |= IXGBE_SRRCTL_DROP_EN;
2257         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2258 }
2259 
2260 static void
2261 ixgbe_setup_rx(ixgbe_t *ixgbe)
2262 {
2263         ixgbe_rx_ring_t *rx_ring;
2264         struct ixgbe_hw *hw = &ixgbe->hw;


2522         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2523                 tx_ring = &ixgbe->tx_rings[i];
2524                 ixgbe_setup_tx_ring(tx_ring);
2525         }
2526 
2527         /*
2528          * Setup the per-ring statistics mapping.
2529          */
2530         ring_mapping = 0;
2531         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2532                 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2533                 if ((i & 0x3) == 0x3) {
2534                         switch (hw->mac.type) {
2535                         case ixgbe_mac_82598EB:
2536                                 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2537                                     ring_mapping);
2538                                 break;
2539 
2540                         case ixgbe_mac_82599EB:
2541                         case ixgbe_mac_X540:
2542                         case ixgbe_mac_X550:
2543                         case ixgbe_mac_X550EM_x:
2544                                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2545                                     ring_mapping);
2546                                 break;
2547 
2548                         default:
2549                                 break;
2550                         }
2551 
2552                         ring_mapping = 0;
2553                 }
2554         }
2555         if (i & 0x3) {
2556                 switch (hw->mac.type) {
2557                 case ixgbe_mac_82598EB:
2558                         IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2559                         break;
2560 
2561                 case ixgbe_mac_82599EB:
2562                 case ixgbe_mac_X540:
2563                 case ixgbe_mac_X550:
2564                 case ixgbe_mac_X550EM_x:
2565                         IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2566                         break;
2567 
2568                 default:
2569                         break;
2570                 }
2571         }
2572 
2573         /*
2574          * Enable CRC appending and TX padding (for short tx frames)
2575          */
2576         reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2577         reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2578         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2579 
2580         /*
2581          * enable DMA for 82599, X540 and X550 parts
2582          */
2583         if (hw->mac.type == ixgbe_mac_82599EB ||
2584             hw->mac.type == ixgbe_mac_X540 ||
2585             hw->mac.type == ixgbe_mac_X550 ||
2586             hw->mac.type == ixgbe_mac_X550EM_x) {
2587                 /* DMATXCTL.TE must be set after all Tx config is complete */
2588                 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2589                 reg_val |= IXGBE_DMATXCTL_TE;
2590                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2591 
2592                 /* Disable arbiter to set MTQC */
2593                 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2594                 reg_val |= IXGBE_RTTDCS_ARBDIS;
2595                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2596                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2597                 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2598                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2599         }
2600 
2601         /*
2602          * Enabling tx queues ..
2603          * For 82599 must be done after DMATXCTL.TE is set
2604          */
2605         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2606                 tx_ring = &ixgbe->tx_rings[i];


2679         struct ixgbe_hw *hw = &ixgbe->hw;
2680         uint32_t vmdctl, i, vtctl;
2681 
2682         /*
2683          * Setup the VMDq Control register, enable VMDq based on
2684          * packet destination MAC address:
2685          */
2686         switch (hw->mac.type) {
2687         case ixgbe_mac_82598EB:
2688                 /*
2689                  * VMDq Enable = 1;
2690                  * VMDq Filter = 0; MAC filtering
2691                  * Default VMDq output index = 0;
2692                  */
2693                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2694                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2695                 break;
2696 
2697         case ixgbe_mac_82599EB:
2698         case ixgbe_mac_X540:
2699         case ixgbe_mac_X550:
2700         case ixgbe_mac_X550EM_x:
2701                 /*
2702                  * Enable VMDq-only.
2703                  */
2704                 vmdctl = IXGBE_MRQC_VMDQEN;
2705                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2706 
2707                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2708                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2709                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2710                 }
2711 
2712                 /*
2713                  * Enable Virtualization and Replication.
2714                  */
2715                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2716                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2717 
2718                 /*
2719                  * Enable receiving packets to all VFs
2720                  */


2775                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2776                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2777                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2778                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2779                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2780                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2781                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2782                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2783 
2784                 /*
2785                  * Enable and Setup VMDq
2786                  * VMDq Filter = 0; MAC filtering
2787                  * Default VMDq output index = 0;
2788                  */
2789                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2790                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2791                 break;
2792 
2793         case ixgbe_mac_82599EB:
2794         case ixgbe_mac_X540:
2795         case ixgbe_mac_X550:
2796         case ixgbe_mac_X550EM_x:
2797                 /*
2798                  * Enable RSS & Setup RSS Hash functions
2799                  */
2800                 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2801                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2802                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2803                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2804                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2805                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2806                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2807                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2808                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2809 
2810                 /*
2811                  * Enable VMDq+RSS.
2812                  */
2813                 if (ixgbe->num_rx_groups > 32)  {
2814                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2815                 } else {
2816                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;


2823                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2824                 }
2825                 break;
2826 
2827         default:
2828                 break;
2829 
2830         }
2831 
2832         /*
2833          * Disable Packet Checksum to enable RSS for multiple receive queues.
2834          * It is an adapter hardware limitation that Packet Checksum is
2835          * mutually exclusive with RSS.
2836          */
2837         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2838         rxcsum |= IXGBE_RXCSUM_PCSD;
2839         rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2840         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2841 
2842         if (hw->mac.type == ixgbe_mac_82599EB ||
2843             hw->mac.type == ixgbe_mac_X540 ||
2844             hw->mac.type == ixgbe_mac_X550 ||
2845             hw->mac.type == ixgbe_mac_X550EM_x) {
2846                 /*
2847                  * Enable Virtualization and Replication.
2848                  */
2849                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2850                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2851 
2852                 /*
2853                  * Enable receiving packets to all VFs
2854                  */
2855                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2856                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2857         }
2858 }
2859 
2860 /*
2861  * ixgbe_init_unicst - Initialize the unicast addresses.
2862  */
2863 static void
2864 ixgbe_init_unicst(ixgbe_t *ixgbe)
2865 {


3037                 /*
3038                  * 82598 supports the following combination:
3039                  * vmdq no. x rss no.
3040                  * [5..16]  x 1
3041                  * [1..4]   x [1..16]
3042                  * However 8 rss queue per pool (vmdq) is sufficient for
3043                  * most cases.
3044                  */
3045                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3046                 if (ixgbe->num_rx_groups > 4) {
3047                         ixgbe->num_rx_rings = ixgbe->num_rx_groups;
3048                 } else {
3049                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3050                             min(8, ring_per_group);
3051                 }
3052 
3053                 break;
3054 
3055         case ixgbe_mac_82599EB:
3056         case ixgbe_mac_X540:
3057         case ixgbe_mac_X550:
3058         case ixgbe_mac_X550EM_x:
3059                 /*
3060                  * 82599 supports the following combination:
3061                  * vmdq no. x rss no.
3062                  * [33..64] x [1..2]
3063                  * [2..32]  x [1..4]
3064                  * 1 x [1..16]
3065                  * However 8 rss queue per pool (vmdq) is sufficient for
3066                  * most cases.
3067                  *
3068                  * For now, treat X540 and X550 like the 82599.
3069                  */
3070                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3071                 if (ixgbe->num_rx_groups == 1) {
3072                         ixgbe->num_rx_rings = min(8, ring_per_group);
3073                 } else if (ixgbe->num_rx_groups <= 32) {
3074                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3075                             min(4, ring_per_group);
3076                 } else if (ixgbe->num_rx_groups <= 64) {
3077                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3078                             min(2, ring_per_group);
3079                 }
3080                 break;
3081 
3082         default:
3083                 break;
3084         }
3085 
3086         ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3087 
3088         if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {


3211          * 1 = force interrupt type MSI-X
3212          * 2 = force interrupt type MSI
3213          * 3 = force interrupt type Legacy
3214          */
3215         ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3216             IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3217 
3218         ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3219             0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3220         ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3221             0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3222         ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3223             0, 1, DEFAULT_LSO_ENABLE);
3224         ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3225             0, 1, DEFAULT_LRO_ENABLE);
3226         ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3227             0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3228         ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3229             PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3230 
3231         /* Head Write Back not recommended for 82599, X540 and X550 */
3232         if (hw->mac.type == ixgbe_mac_82599EB ||
3233             hw->mac.type == ixgbe_mac_X540 ||
3234             hw->mac.type == ixgbe_mac_X550 ||
3235             hw->mac.type == ixgbe_mac_X550EM_x) {
3236                 ixgbe->tx_head_wb_enable = B_FALSE;
3237         }
3238 
3239         /*
3240          * ixgbe LSO needs the tx h/w checksum support.
3241          * LSO will be disabled if tx h/w checksum is not
3242          * enabled.
3243          */
3244         if (ixgbe->tx_hcksum_enable == B_FALSE) {
3245                 ixgbe->lso_enable = B_FALSE;
3246         }
3247 
3248         /*
3249          * ixgbe LRO needs the rx h/w checksum support.
3250          * LRO will be disabled if rx h/w checksum is not
3251          * enabled.
3252          */
3253         if (ixgbe->rx_hcksum_enable == B_FALSE) {
3254                 ixgbe->lro_enable = B_FALSE;
3255         }
3256 
3257         /*
3258          * ixgbe LRO only supported by 82599, X540 and X550
3259          */
3260         if (hw->mac.type == ixgbe_mac_82598EB) {
3261                 ixgbe->lro_enable = B_FALSE;
3262         }
3263         ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3264             MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3265             DEFAULT_TX_COPY_THRESHOLD);
3266         ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3267             PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3268             MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3269         ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3270             PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3271             MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3272         ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3273             PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3274             MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3275 
3276         ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3277             MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3278             DEFAULT_RX_COPY_THRESHOLD);
3279         ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3280             MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3281             DEFAULT_RX_LIMIT_PER_INTR);
3282 
3283         ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3284             ixgbe->capab->min_intr_throttle,
3285             ixgbe->capab->max_intr_throttle,
3286             ixgbe->capab->def_intr_throttle);
3287         /*
3288          * 82599, X540 and X550 require the interrupt throttling rate is
3289          * a multiple of 8. This is enforced by the register definiton.

3290          */
3291         if (hw->mac.type == ixgbe_mac_82599EB ||
3292             hw->mac.type == ixgbe_mac_X540 ||
3293             hw->mac.type == ixgbe_mac_X550 ||
3294             hw->mac.type == ixgbe_mac_X550EM_x)
3295                 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3296 
3297         hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3298             PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3299 }
3300 
3301 static void
3302 ixgbe_init_params(ixgbe_t *ixgbe)
3303 {
3304         ixgbe->param_en_10000fdx_cap = 1;
3305         ixgbe->param_en_1000fdx_cap = 1;
3306         ixgbe->param_en_100fdx_cap = 1;
3307         ixgbe->param_adv_10000fdx_cap = 1;
3308         ixgbe->param_adv_1000fdx_cap = 1;
3309         ixgbe->param_adv_100fdx_cap = 1;
3310 
3311         ixgbe->param_pause_cap = 1;
3312         ixgbe->param_asym_pause_cap = 1;
3313         ixgbe->param_rem_fault = 0;
3314 


3373         if (ixgbe->param_adv_10000fdx_cap == 1)
3374                 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3375 
3376         if (ixgbe->param_adv_1000fdx_cap == 1)
3377                 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3378 
3379         if (ixgbe->param_adv_100fdx_cap == 1)
3380                 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3381 
3382         if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3383                 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3384                     "to autonegotiation with full link capabilities.");
3385 
3386                 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3387                     IXGBE_LINK_SPEED_1GB_FULL |
3388                     IXGBE_LINK_SPEED_100_FULL;
3389         }
3390 
3391         if (setup_hw) {
3392                 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3393                     ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) {
3394                         ixgbe_notice(ixgbe, "Setup link failed on this "
3395                             "device.");
3396                         return (IXGBE_FAILURE);
3397                 }
3398         }
3399 
3400         return (IXGBE_SUCCESS);
3401 }
3402 
3403 /*
3404  * ixgbe_driver_link_check - Link status processing.
3405  *
3406  * This function can be called in both kernel context and interrupt context
3407  */
3408 static void
3409 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3410 {
3411         struct ixgbe_hw *hw = &ixgbe->hw;
3412         ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3413         boolean_t link_up = B_FALSE;


3466                 ixgbe->eims |= IXGBE_EICR_LSC;
3467                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3468         }
3469 
3470         if (link_changed) {
3471                 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3472         }
3473 }
3474 
3475 /*
3476  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3477  */
3478 static void
3479 ixgbe_sfp_check(void *arg)
3480 {
3481         ixgbe_t *ixgbe = (ixgbe_t *)arg;
3482         uint32_t eicr = ixgbe->eicr;
3483         struct ixgbe_hw *hw = &ixgbe->hw;
3484 
3485         mutex_enter(&ixgbe->gen_lock);
3486         if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
3487                 /* clear the interrupt */
3488                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3489 
3490                 /* if link up, do multispeed fiber setup */
3491                 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3492                     B_TRUE);
3493                 ixgbe_driver_link_check(ixgbe);
3494                 ixgbe_get_hw_state(ixgbe);
3495         } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) {
3496                 /* clear the interrupt */
3497                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
3498 
3499                 /* if link up, do sfp module setup */
3500                 (void) hw->mac.ops.setup_sfp(hw);
3501 
3502                 /* do multispeed fiber setup */
3503                 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3504                     B_TRUE);
3505                 ixgbe_driver_link_check(ixgbe);
3506                 ixgbe_get_hw_state(ixgbe);
3507         }
3508         mutex_exit(&ixgbe->gen_lock);
3509 
3510         /*
3511          * We need to fully re-check the link later.
3512          */
3513         ixgbe->link_check_complete = B_FALSE;
3514         ixgbe->link_check_hrtime = gethrtime() +
3515             (IXGBE_LINK_UP_TIME * 100000000ULL);
3516 }
3517 
3518 /*
3519  * ixgbe_overtemp_check - overtemp module processing done in taskq
3520  *
3521  * This routine will only be called on adapters with temperature sensor.
3522  * The indication of over-temperature can be either SDP0 interrupt or the link
3523  * status change interrupt.
3524  */
3525 static void
3526 ixgbe_overtemp_check(void *arg)
3527 {
3528         ixgbe_t *ixgbe = (ixgbe_t *)arg;
3529         struct ixgbe_hw *hw = &ixgbe->hw;
3530         uint32_t eicr = ixgbe->eicr;
3531         ixgbe_link_speed speed;
3532         boolean_t link_up;
3533 
3534         mutex_enter(&ixgbe->gen_lock);
3535 
3536         /* make sure we know current state of link */
3537         (void) ixgbe_check_link(hw, &speed, &link_up, false);
3538 
3539         /* check over-temp condition */
3540         if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) ||
3541             (eicr & IXGBE_EICR_LSC)) {
3542                 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3543                         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3544 
3545                         /*
3546                          * Disable the adapter interrupts
3547                          */
3548                         ixgbe_disable_adapter_interrupts(ixgbe);
3549 
3550                         /*
3551                          * Disable Rx/Tx units
3552                          */
3553                         (void) ixgbe_stop_adapter(hw);
3554 
3555                         ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3556                         ixgbe_error(ixgbe,
3557                             "Problem: Network adapter has been stopped "
3558                             "because it has overheated");
3559                         ixgbe_error(ixgbe,
3560                             "Action: Restart the computer. "


3716 
3717         /*
3718          * Look up the OBP property "local-mac-address?". If the user has set
3719          * 'local-mac-address? = false', use "the system address" instead.
3720          */
3721         if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3722             "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3723                 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3724                         if (localetheraddr(NULL, &sysaddr) != 0) {
3725                                 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3726                                 found = B_TRUE;
3727                         }
3728                 }
3729                 ddi_prop_free(bytes);
3730         }
3731 
3732         /*
3733          * Finally(!), if there's a valid "mac-address" property (created
3734          * if we netbooted from this interface), we must use this instead
3735          * of any of the above to ensure that the NFS/install server doesn't
3736          * get confused by the address changing as illumos takes over!
3737          */
3738         err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3739             DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3740         if (err == DDI_PROP_SUCCESS) {
3741                 if (nelts == ETHERADDRL) {
3742                         while (nelts--)
3743                                 hw->mac.addr[nelts] = bytes[nelts];
3744                         found = B_TRUE;
3745                 }
3746                 ddi_prop_free(bytes);
3747         }
3748 
3749         if (found) {
3750                 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3751                 return (B_TRUE);
3752         }
3753 #else
3754         _NOTE(ARGUNUSED(ixgbe));
3755 #endif
3756 


3906          */
3907         if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3908                 /* enable autoclear but not on bits 29:20 */
3909                 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3910 
3911                 /* general purpose interrupt enable */
3912                 gpie |= (IXGBE_GPIE_MSIX_MODE
3913                     | IXGBE_GPIE_PBA_SUPPORT
3914                     | IXGBE_GPIE_OCD
3915                     | IXGBE_GPIE_EIAME);
3916         /*
3917          * non-msi-x mode
3918          */
3919         } else {
3920 
3921                 /* disable autoclear, leave gpie at default */
3922                 eiac = 0;
3923 
3924                 /*
3925                  * General purpose interrupt enable.
3926                  * For 82599, X540 and X550, extended interrupt
3927                  * automask enable only in MSI or MSI-X mode
3928                  */
3929                 if ((hw->mac.type == ixgbe_mac_82598EB) ||
3930                     (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3931                         gpie |= IXGBE_GPIE_EIAME;
3932                 }
3933         }
3934 
3935         /* Enable specific "other" interrupt types */
3936         switch (hw->mac.type) {
3937         case ixgbe_mac_82598EB:
3938                 gpie |= ixgbe->capab->other_gpie;
3939                 break;
3940 
3941         case ixgbe_mac_82599EB:
3942         case ixgbe_mac_X540:
3943         case ixgbe_mac_X550:
3944         case ixgbe_mac_X550EM_x:
3945                 gpie |= ixgbe->capab->other_gpie;
3946 
3947                 /* Enable RSC Delay 8us when LRO enabled  */
3948                 if (ixgbe->lro_enable) {
3949                         gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3950                 }
3951                 break;
3952 
3953         default:
3954                 break;
3955         }
3956 
3957         /* write to interrupt control registers */
3958         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3959         IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3960         IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3961         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3962         IXGBE_WRITE_FLUSH(hw);
3963 }
3964 


4119                     &atlas);
4120                 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4121                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4122                     atlas);
4123 
4124                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4125                     &atlas);
4126                 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4127                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4128                     atlas);
4129 
4130                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4131                     &atlas);
4132                 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4133                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4134                     atlas);
4135                 break;
4136 
4137         case ixgbe_mac_82599EB:
4138         case ixgbe_mac_X540:
4139         case ixgbe_mac_X550:
4140         case ixgbe_mac_X550EM_x:
4141                 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4142                 reg |= (IXGBE_AUTOC_FLU |
4143                     IXGBE_AUTOC_10G_KX4);
4144                 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4145 
4146                 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4147                     B_FALSE);
4148                 break;
4149 
4150         default:
4151                 break;
4152         }
4153 }
4154 
4155 #pragma inline(ixgbe_intr_rx_work)
4156 /*
4157  * ixgbe_intr_rx_work - RX processing of ISR.
4158  */
4159 static void
4160 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4161 {
4162         mblk_t *mp;
4163 
4164         mutex_enter(&rx_ring->rx_lock);
4165 
4166         mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4167         mutex_exit(&rx_ring->rx_lock);


4187 
4188         /*
4189          * Schedule the re-transmit
4190          */
4191         if (tx_ring->reschedule &&
4192             (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4193                 tx_ring->reschedule = B_FALSE;
4194                 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4195                     tx_ring->ring_handle);
4196                 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4197         }
4198 }
4199 
4200 #pragma inline(ixgbe_intr_other_work)
4201 /*
4202  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4203  */
4204 static void
4205 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4206 {
4207         struct ixgbe_hw *hw = &ixgbe->hw;
4208 
4209         ASSERT(mutex_owned(&ixgbe->gen_lock));
4210 
4211         /*
4212          * handle link status change
4213          */
4214         if (eicr & IXGBE_EICR_LSC) {
4215                 ixgbe_driver_link_check(ixgbe);
4216                 ixgbe_get_hw_state(ixgbe);
4217         }
4218 
4219         /*
4220          * check for fan failure on adapters with fans
4221          */
4222         if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4223             (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4224                 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4225 
4226                 /*
4227                  * Disable the adapter interrupts
4228                  */
4229                 ixgbe_disable_adapter_interrupts(ixgbe);
4230 
4231                 /*
4232                  * Disable Rx/Tx units
4233                  */
4234                 (void) ixgbe_stop_adapter(&ixgbe->hw);
4235 
4236                 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4237                 ixgbe_error(ixgbe,
4238                     "Problem: Network adapter has been stopped "
4239                     "because the fan has stopped.\n");
4240                 ixgbe_error(ixgbe,
4241                     "Action: Replace the adapter.\n");
4242 
4243                 /* re-enable the interrupt, which was automasked */
4244                 ixgbe->eims |= IXGBE_EICR_GPI_SDP1_BY_MAC(hw);
4245         }
4246 
4247         /*
4248          * Do SFP check for adapters with hot-plug capability
4249          */
4250         if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4251             ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) ||
4252              (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) {
4253                 ixgbe->eicr = eicr;
4254                 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4255                     ixgbe_sfp_check, (void *)ixgbe,
4256                     DDI_NOSLEEP)) != DDI_SUCCESS) {
4257                         ixgbe_log(ixgbe, "No memory available to dispatch "
4258                             "taskq for SFP check");
4259                 }
4260         }
4261 
4262         /*
4263          * Do over-temperature check for adapters with temp sensor
4264          */
4265         if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4266             ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || (eicr & IXGBE_EICR_LSC))) {
4267                 ixgbe->eicr = eicr;
4268                 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4269                     ixgbe_overtemp_check, (void *)ixgbe,
4270                     DDI_NOSLEEP)) != DDI_SUCCESS) {
4271                         ixgbe_log(ixgbe, "No memory available to dispatch "
4272                             "taskq for overtemp check");
4273                 }
4274         }
4275 }
4276 
4277 /*
4278  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4279  */
4280 static uint_t
4281 ixgbe_intr_legacy(void *arg1, void *arg2)
4282 {
4283         ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4284         struct ixgbe_hw *hw = &ixgbe->hw;
4285         ixgbe_tx_ring_t *tx_ring;
4286         ixgbe_rx_ring_t *rx_ring;


4343                          */
4344                         tx_ring = &ixgbe->tx_rings[0];
4345                         tx_ring->tx_recycle(tx_ring);
4346 
4347                         /*
4348                          * Schedule the re-transmit
4349                          */
4350                         tx_reschedule = (tx_ring->reschedule &&
4351                             (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4352                 }
4353 
4354                 /* any interrupt type other than tx/rx */
4355                 if (eicr & ixgbe->capab->other_intr) {
4356                         switch (hw->mac.type) {
4357                         case ixgbe_mac_82598EB:
4358                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4359                                 break;
4360 
4361                         case ixgbe_mac_82599EB:
4362                         case ixgbe_mac_X540:
4363                         case ixgbe_mac_X550:
4364                         case ixgbe_mac_X550EM_x:
4365                                 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4366                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4367                                 break;
4368 
4369                         default:
4370                                 break;
4371                         }
4372                         ixgbe_intr_other_work(ixgbe, eicr);
4373                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4374                 }
4375 
4376                 mutex_exit(&ixgbe->gen_lock);
4377 
4378                 result = DDI_INTR_CLAIMED;
4379         } else {
4380                 mutex_exit(&ixgbe->gen_lock);
4381 
4382                 /*
4383                  * No interrupt cause bits set: don't claim this interrupt.
4384                  */


4439                 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4440         }
4441 
4442         /*
4443          * For MSI interrupt, tx rings[0] will use RTxQ[1].
4444          */
4445         if (eicr & 0x2) {
4446                 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4447         }
4448 
4449         /* any interrupt type other than tx/rx */
4450         if (eicr & ixgbe->capab->other_intr) {
4451                 mutex_enter(&ixgbe->gen_lock);
4452                 switch (hw->mac.type) {
4453                 case ixgbe_mac_82598EB:
4454                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4455                         break;
4456 
4457                 case ixgbe_mac_82599EB:
4458                 case ixgbe_mac_X540:
4459                 case ixgbe_mac_X550:
4460                 case ixgbe_mac_X550EM_x:
4461                         ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4462                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4463                         break;
4464 
4465                 default:
4466                         break;
4467                 }
4468                 ixgbe_intr_other_work(ixgbe, eicr);
4469                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4470                 mutex_exit(&ixgbe->gen_lock);
4471         }
4472 
4473         /* re-enable the interrupts which were automasked */
4474         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4475 
4476         return (DDI_INTR_CLAIMED);
4477 }
4478 
4479 /*
4480  * ixgbe_intr_msix - Interrupt handler for MSI-X.


4521                     DDI_FM_OK) {
4522                         ddi_fm_service_impact(ixgbe->dip,
4523                             DDI_SERVICE_DEGRADED);
4524                         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4525                         return (DDI_INTR_CLAIMED);
4526                 }
4527 
4528                 /*
4529                  * Check "other" cause bits: any interrupt type other than tx/rx
4530                  */
4531                 if (eicr & ixgbe->capab->other_intr) {
4532                         mutex_enter(&ixgbe->gen_lock);
4533                         switch (hw->mac.type) {
4534                         case ixgbe_mac_82598EB:
4535                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4536                                 ixgbe_intr_other_work(ixgbe, eicr);
4537                                 break;
4538 
4539                         case ixgbe_mac_82599EB:
4540                         case ixgbe_mac_X540:
4541                         case ixgbe_mac_X550:
4542                         case ixgbe_mac_X550EM_x:
4543                                 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4544                                 ixgbe_intr_other_work(ixgbe, eicr);
4545                                 break;
4546 
4547                         default:
4548                                 break;
4549                         }
4550                         mutex_exit(&ixgbe->gen_lock);
4551                 }
4552 
4553                 /* re-enable the interrupts which were automasked */
4554                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4555         }
4556 
4557         return (DDI_INTR_CLAIMED);
4558 }
4559 
4560 /*
4561  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4562  *


4924     int8_t cause)
4925 {
4926         struct ixgbe_hw *hw = &ixgbe->hw;
4927         u32 ivar, index;
4928 
4929         switch (hw->mac.type) {
4930         case ixgbe_mac_82598EB:
4931                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4932                 if (cause == -1) {
4933                         cause = 0;
4934                 }
4935                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4936                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4937                 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4938                 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4939                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4940                 break;
4941 
4942         case ixgbe_mac_82599EB:
4943         case ixgbe_mac_X540:
4944         case ixgbe_mac_X550:
4945         case ixgbe_mac_X550EM_x:
4946                 if (cause == -1) {
4947                         /* other causes */
4948                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4949                         index = (intr_alloc_entry & 1) * 8;
4950                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4951                         ivar &= ~(0xFF << index);
4952                         ivar |= (msix_vector << index);
4953                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4954                 } else {
4955                         /* tx or rx causes */
4956                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4957                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4958                         ivar = IXGBE_READ_REG(hw,
4959                             IXGBE_IVAR(intr_alloc_entry >> 1));
4960                         ivar &= ~(0xFF << index);
4961                         ivar |= (msix_vector << index);
4962                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4963                             ivar);
4964                 }
4965                 break;


4980 static void
4981 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4982 {
4983         struct ixgbe_hw *hw = &ixgbe->hw;
4984         u32 ivar, index;
4985 
4986         switch (hw->mac.type) {
4987         case ixgbe_mac_82598EB:
4988                 if (cause == -1) {
4989                         cause = 0;
4990                 }
4991                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4992                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4993                 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4994                     (intr_alloc_entry & 0x3)));
4995                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4996                 break;
4997 
4998         case ixgbe_mac_82599EB:
4999         case ixgbe_mac_X540:
5000         case ixgbe_mac_X550:
5001         case ixgbe_mac_X550EM_x:
5002                 if (cause == -1) {
5003                         /* other causes */
5004                         index = (intr_alloc_entry & 1) * 8;
5005                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5006                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5007                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5008                 } else {
5009                         /* tx or rx causes */
5010                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5011                         ivar = IXGBE_READ_REG(hw,
5012                             IXGBE_IVAR(intr_alloc_entry >> 1));
5013                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5014                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5015                             ivar);
5016                 }
5017                 break;
5018 
5019         default:
5020                 break;
5021         }


5032 static void
5033 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5034 {
5035         struct ixgbe_hw *hw = &ixgbe->hw;
5036         u32 ivar, index;
5037 
5038         switch (hw->mac.type) {
5039         case ixgbe_mac_82598EB:
5040                 if (cause == -1) {
5041                         cause = 0;
5042                 }
5043                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5044                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5045                 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
5046                     (intr_alloc_entry & 0x3)));
5047                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5048                 break;
5049 
5050         case ixgbe_mac_82599EB:
5051         case ixgbe_mac_X540:
5052         case ixgbe_mac_X550:
5053         case ixgbe_mac_X550EM_x:
5054                 if (cause == -1) {
5055                         /* other causes */
5056                         index = (intr_alloc_entry & 1) * 8;
5057                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5058                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5059                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5060                 } else {
5061                         /* tx or rx causes */
5062                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5063                         ivar = IXGBE_READ_REG(hw,
5064                             IXGBE_IVAR(intr_alloc_entry >> 1));
5065                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5066                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5067                             ivar);
5068                 }
5069                 break;
5070 
5071         default:
5072                 break;
5073         }


5077  * Convert the rx ring index driver maintained to the rx ring index
5078  * in h/w.
5079  */
5080 static uint32_t
5081 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5082 {
5083 
5084         struct ixgbe_hw *hw = &ixgbe->hw;
5085         uint32_t rx_ring_per_group, hw_rx_index;
5086 
5087         if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5088             ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5089                 return (sw_rx_index);
5090         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5091                 switch (hw->mac.type) {
5092                 case ixgbe_mac_82598EB:
5093                         return (sw_rx_index);
5094 
5095                 case ixgbe_mac_82599EB:
5096                 case ixgbe_mac_X540:
5097                 case ixgbe_mac_X550:
5098                 case ixgbe_mac_X550EM_x:
5099                         return (sw_rx_index * 2);
5100 
5101                 default:
5102                         break;
5103                 }
5104         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5105                 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5106 
5107                 switch (hw->mac.type) {
5108                 case ixgbe_mac_82598EB:
5109                         hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5110                             16 + (sw_rx_index % rx_ring_per_group);
5111                         return (hw_rx_index);
5112 
5113                 case ixgbe_mac_82599EB:
5114                 case ixgbe_mac_X540:
5115                 case ixgbe_mac_X550:
5116                 case ixgbe_mac_X550EM_x:
5117                         if (ixgbe->num_rx_groups > 32) {
5118                                 hw_rx_index = (sw_rx_index /
5119                                     rx_ring_per_group) * 2 +
5120                                     (sw_rx_index % rx_ring_per_group);
5121                         } else {
5122                                 hw_rx_index = (sw_rx_index /
5123                                     rx_ring_per_group) * 4 +
5124                                     (sw_rx_index % rx_ring_per_group);
5125                         }
5126                         return (hw_rx_index);
5127 
5128                 default:
5129                         break;
5130                 }
5131         }
5132 
5133         /*
5134          * Should never reach. Just to make compiler happy.
5135          */
5136         return (sw_rx_index);


5202 static void
5203 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5204 {
5205         struct ixgbe_hw *hw = &ixgbe->hw;
5206         ixgbe_intr_vector_t *vect;      /* vector bitmap */
5207         int r_idx;      /* ring index */
5208         int v_idx;      /* vector index */
5209         uint32_t hw_index;
5210 
5211         /*
5212          * Clear any previous entries
5213          */
5214         switch (hw->mac.type) {
5215         case ixgbe_mac_82598EB:
5216                 for (v_idx = 0; v_idx < 25; v_idx++)
5217                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5218                 break;
5219 
5220         case ixgbe_mac_82599EB:
5221         case ixgbe_mac_X540:
5222         case ixgbe_mac_X550:
5223         case ixgbe_mac_X550EM_x:
5224                 for (v_idx = 0; v_idx < 64; v_idx++)
5225                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5226                 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5227                 break;
5228 
5229         default:
5230                 break;
5231         }
5232 
5233         /*
5234          * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5235          * tx rings[0] will use RTxQ[1].
5236          */
5237         if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5238                 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5239                 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5240                 return;
5241         }
5242 
5243         /*