1 /******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_82598.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
43 bool *autoneg);
44 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46 bool autoneg_wait_to_complete);
47 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48 ixgbe_link_speed *speed, bool *link_up,
49 bool link_up_wait_to_complete);
50 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51 ixgbe_link_speed speed,
52 bool autoneg,
53 bool autoneg_wait_to_complete);
54 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
55 ixgbe_link_speed speed,
56 bool autoneg,
57 bool autoneg_wait_to_complete);
58 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
59 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
60 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
61 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
62 u32 headroom, int strategy);
63
64 /**
65 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
66 * @hw: pointer to the HW structure
67 *
68 * The defaults for 82598 should be in the range of 50us to 50ms,
69 * however the hardware default for these parts is 500us to 1ms which is less
70 * than the 10ms recommended by the pci-e spec. To address this we need to
71 * increase the value to either 10ms to 250ms for capability version 1 config,
72 * or 16ms to 55ms for version 2.
73 **/
74 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
75 {
76 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
77 u16 pcie_devctl2;
78
79 /* only take action if timeout value is defaulted to 0 */
80 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
81 goto out;
82
83 /*
105
106 /**
107 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
108 * @hw: pointer to hardware structure
109 *
110 * Initialize the function pointers and assign the MAC type for 82598.
111 * Does not touch the hardware.
112 **/
113 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
114 {
115 struct ixgbe_mac_info *mac = &hw->mac;
116 struct ixgbe_phy_info *phy = &hw->phy;
117 s32 ret_val;
118
119 DEBUGFUNC("ixgbe_init_ops_82598");
120
121 ret_val = ixgbe_init_phy_ops_generic(hw);
122 ret_val = ixgbe_init_ops_generic(hw);
123
124 /* PHY */
125 phy->ops.init = &ixgbe_init_phy_ops_82598;
126
127 /* MAC */
128 mac->ops.start_hw = &ixgbe_start_hw_82598;
129 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
130 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
131 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
132 mac->ops.get_supported_physical_layer =
133 &ixgbe_get_supported_physical_layer_82598;
134 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
135 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
136 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
137
138 /* RAR, Multicast, VLAN */
139 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
140 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
141 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
142 mac->ops.set_vlvf = NULL;
143 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
144
145 /* Flow Control */
146 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
147
148 mac->mcft_size = 128;
149 mac->vft_size = 128;
150 mac->num_rar_entries = 16;
151 mac->rx_pb_size = 512;
152 mac->max_tx_queues = 32;
153 mac->max_rx_queues = 64;
154 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
155
156 /* SFP+ Module */
157 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
158
159 /* Link */
160 mac->ops.check_link = &ixgbe_check_mac_link_82598;
161 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
162 mac->ops.flap_tx_laser = NULL;
163 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
165
166 /* Manageability interface */
167 mac->ops.set_fw_drv_ver = NULL;
168
169 return ret_val;
170 }
171
172 /**
173 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
174 * @hw: pointer to hardware structure
175 *
176 * Initialize any function pointers that were not able to be
177 * set during init_shared_code because the PHY/SFP type was
178 * not known. Perform the SFP init if necessary.
179 *
180 **/
181 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
182 {
183 struct ixgbe_mac_info *mac = &hw->mac;
184 struct ixgbe_phy_info *phy = &hw->phy;
185 s32 ret_val = IXGBE_SUCCESS;
186 u16 list_offset, data_offset;
187
188 DEBUGFUNC("ixgbe_init_phy_ops_82598");
189
190 /* Identify the PHY */
191 phy->ops.identify(hw);
192
193 /* Overwrite the link function pointers if copper PHY */
194 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
195 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
196 mac->ops.get_link_capabilities =
197 &ixgbe_get_copper_link_capabilities_generic;
198 }
199
200 switch (hw->phy.type) {
201 case ixgbe_phy_tn:
202 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
203 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
204 phy->ops.get_firmware_version =
205 &ixgbe_get_phy_firmware_version_tnx;
206 break;
207 case ixgbe_phy_nl:
208 phy->ops.reset = &ixgbe_reset_phy_nl;
209
210 /* Call SFP+ identify routine to get the SFP+ module type */
211 ret_val = phy->ops.identify_sfp(hw);
212 if (ret_val != IXGBE_SUCCESS)
213 goto out;
214 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
215 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
216 goto out;
217 }
218
219 /* Check to see if SFP+ module is supported */
220 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
221 &list_offset,
222 &data_offset);
223 if (ret_val != IXGBE_SUCCESS) {
224 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
225 goto out;
226 }
227 break;
228 default:
233 return ret_val;
234 }
235
236 /**
237 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
238 * @hw: pointer to hardware structure
239 *
240 * Starts the hardware using the generic start_hw function.
241 * Disables relaxed ordering Then set pcie completion timeout
242 *
243 **/
244 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
245 {
246 u32 regval;
247 u32 i;
248 s32 ret_val = IXGBE_SUCCESS;
249
250 DEBUGFUNC("ixgbe_start_hw_82598");
251
252 ret_val = ixgbe_start_hw_generic(hw);
253
254 /* Disable relaxed ordering */
255 for (i = 0; ((i < hw->mac.max_tx_queues) &&
256 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
257 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
258 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
259 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
260 }
261
262 for (i = 0; ((i < hw->mac.max_rx_queues) &&
263 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
264 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
265 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
266 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
267 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
268 }
269
270 /* set the completion timeout for interface */
271 if (ret_val == IXGBE_SUCCESS)
272 ixgbe_set_pcie_completion_timeout(hw);
273
274 return ret_val;
275 }
276
277 /**
278 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
279 * @hw: pointer to hardware structure
280 * @speed: pointer to link speed
281 * @autoneg: boolean auto-negotiation value
282 *
283 * Determines the link capabilities by reading the AUTOC register.
284 **/
285 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
286 ixgbe_link_speed *speed,
287 bool *autoneg)
288 {
289 s32 status = IXGBE_SUCCESS;
290 u32 autoc = 0;
291
483 * disable the adapter's ability to send PAUSE frames.
484 */
485 fctrl_reg |= IXGBE_FCTRL_RFCE;
486 break;
487 case ixgbe_fc_tx_pause:
488 /*
489 * Tx Flow control is enabled, and Rx Flow control is
490 * disabled by software override.
491 */
492 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
493 break;
494 case ixgbe_fc_full:
495 /* Flow control (both Rx and Tx) is enabled by SW override. */
496 fctrl_reg |= IXGBE_FCTRL_RFCE;
497 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
498 break;
499 default:
500 DEBUGOUT("Flow control param set incorrectly\n");
501 ret_val = IXGBE_ERR_CONFIG;
502 goto out;
503 }
504
505 /* Set 802.3x based flow control settings. */
506 fctrl_reg |= IXGBE_FCTRL_DPF;
507 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
508 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
509
510 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
511 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
512 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
513 hw->fc.high_water[i]) {
514 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
515 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
516 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
517 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
518 } else {
519 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
520 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
521 }
522
631 bool link_up_wait_to_complete)
632 {
633 u32 links_reg;
634 u32 i;
635 u16 link_reg, adapt_comp_reg;
636
637 DEBUGFUNC("ixgbe_check_mac_link_82598");
638
639 /*
640 * SERDES PHY requires us to read link status from undocumented
641 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
642 * indicates link down. OxC00C is read to check that the XAUI lanes
643 * are active. Bit 0 clear indicates active; set indicates inactive.
644 */
645 if (hw->phy.type == ixgbe_phy_nl) {
646 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
647 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
648 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
649 &adapt_comp_reg);
650 if (link_up_wait_to_complete) {
651 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
652 if ((link_reg & 1) &&
653 ((adapt_comp_reg & 1) == 0)) {
654 *link_up = TRUE;
655 break;
656 } else {
657 *link_up = FALSE;
658 }
659 msec_delay(100);
660 hw->phy.ops.read_reg(hw, 0xC79F,
661 IXGBE_TWINAX_DEV,
662 &link_reg);
663 hw->phy.ops.read_reg(hw, 0xC00C,
664 IXGBE_TWINAX_DEV,
665 &adapt_comp_reg);
666 }
667 } else {
668 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
669 *link_up = TRUE;
670 else
671 *link_up = FALSE;
672 }
673
674 if (*link_up == FALSE)
675 goto out;
676 }
677
678 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
679 if (link_up_wait_to_complete) {
680 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
681 if (links_reg & IXGBE_LINKS_UP) {
682 *link_up = TRUE;
683 break;
684 } else {
685 *link_up = FALSE;
686 }
687 msec_delay(100);
688 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
689 }
690 } else {
691 if (links_reg & IXGBE_LINKS_UP)
692 *link_up = TRUE;
693 else
694 *link_up = FALSE;
695 }
696
697 if (links_reg & IXGBE_LINKS_SPEED)
698 *speed = IXGBE_LINK_SPEED_10GB_FULL;
699 else
700 *speed = IXGBE_LINK_SPEED_1GB_FULL;
701
702 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
703 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
704 *link_up = FALSE;
705
706 out:
707 return IXGBE_SUCCESS;
708 }
709
710 /**
711 * ixgbe_setup_mac_link_82598 - Set MAC link speed
712 * @hw: pointer to hardware structure
713 * @speed: new link speed
714 * @autoneg: TRUE if autonegotiation enabled
715 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
716 *
717 * Set the link speed in the AUTOC register and restarts link.
718 **/
719 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
720 ixgbe_link_speed speed, bool autoneg,
721 bool autoneg_wait_to_complete)
722 {
723 s32 status;
724 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
725 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
726 u32 autoc = curr_autoc;
727 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
728
729 DEBUGFUNC("ixgbe_setup_mac_link_82598");
730
731 /* Check to see if speed passed in is supported. */
732 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
733 if (status != IXGBE_SUCCESS)
734 return (status);
735 speed &= link_capabilities;
736
737 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
738 status = IXGBE_ERR_LINK_SETUP;
739
740 /* Set KX4/KX support according to speed requested */
741 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
742 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
743 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
744 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
745 autoc |= IXGBE_AUTOC_KX4_SUPP;
746 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
747 autoc |= IXGBE_AUTOC_KX_SUPP;
748 if (autoc != curr_autoc)
749 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
750 }
751
752 if (status == IXGBE_SUCCESS) {
753 /*
754 * Setup and restart the link based on the new values in
755 * ixgbe_hw This will write the AUTOC register based on the new
756 * stored values
757 */
758 status = ixgbe_start_mac_link_82598(hw,
759 autoneg_wait_to_complete);
760 }
761
762 return status;
763 }
764
765
766 /**
767 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
768 * @hw: pointer to hardware structure
769 * @speed: new link speed
770 * @autoneg: TRUE if autonegotiation enabled
771 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
772 *
773 * Sets the link speed in the AUTOC register in the MAC and restarts link.
774 **/
775 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
776 ixgbe_link_speed speed,
777 bool autoneg,
778 bool autoneg_wait_to_complete)
779 {
780 s32 status;
781
782 DEBUGFUNC("ixgbe_setup_copper_link_82598");
783
784 /* Setup the PHY according to input speed */
785 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
786 autoneg_wait_to_complete);
787 if (status == IXGBE_SUCCESS) {
788 /* Set up MAC */
789 status =
790 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
791 }
792
793 return status;
794 }
795
796 /**
797 * ixgbe_reset_hw_82598 - Performs hardware reset
798 * @hw: pointer to hardware structure
799 *
800 * Resets the hardware by resetting the transmit and receive units, masks and
801 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
802 * reset.
803 **/
804 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
805 {
806 s32 status = IXGBE_SUCCESS;
807 s32 phy_status = IXGBE_SUCCESS;
808 u32 ctrl;
809 u32 gheccr;
810 u32 i;
811 u32 autoc;
1089 * @reg: atlas register to write
1090 * @val: value to write
1091 *
1092 * Performs write operation to Atlas analog register specified.
1093 **/
1094 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1095 {
1096 u32 atlas_ctl;
1097
1098 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1099
1100 atlas_ctl = (reg << 8) | val;
1101 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1102 IXGBE_WRITE_FLUSH(hw);
1103 usec_delay(10);
1104
1105 return IXGBE_SUCCESS;
1106 }
1107
1108 /**
1109 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1110 * @hw: pointer to hardware structure
1111 * @byte_offset: EEPROM byte offset to read
1112 * @eeprom_data: value read
1113 *
1114 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1115 **/
1116 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1117 u8 *eeprom_data)
1118 {
1119 s32 status = IXGBE_SUCCESS;
1120 u16 sfp_addr = 0;
1121 u16 sfp_data = 0;
1122 u16 sfp_stat = 0;
1123 u32 i;
1124
1125 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1126
1127 if (hw->phy.type == ixgbe_phy_nl) {
1128 /*
1129 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1130 * 0xC30D. These registers are used to talk to the SFP+
1131 * module's EEPROM through the SDA/SCL (I2C) interface.
1132 */
1133 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1134 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1135 hw->phy.ops.write_reg(hw,
1136 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1137 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1138 sfp_addr);
1139
1140 /* Poll status */
1141 for (i = 0; i < 100; i++) {
1142 hw->phy.ops.read_reg(hw,
1143 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1144 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1145 &sfp_stat);
1146 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1147 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1148 break;
1149 msec_delay(10);
1150 }
1151
1152 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1153 DEBUGOUT("EEPROM read did not pass.\n");
1154 status = IXGBE_ERR_SFP_NOT_PRESENT;
1155 goto out;
1156 }
1157
1158 /* Read data */
1159 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1160 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1161
1162 *eeprom_data = (u8)(sfp_data >> 8);
1163 } else {
1164 status = IXGBE_ERR_PHY;
1165 goto out;
1166 }
1167
1168 out:
1169 return status;
1170 }
1171
1172 /**
1173 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1174 * @hw: pointer to hardware structure
1175 *
1176 * Determines physical layer capabilities of the current configuration.
1177 **/
1178 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1179 {
1180 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1181 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1182 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1183 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1184 u16 ext_ability = 0;
1185
1186 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1187
1188 hw->phy.ops.identify(hw);
1189
1190 /* Copper PHY must be checked before AUTOC LMS to determine correct
1191 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1192 switch (hw->phy.type) {
1345 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1346 u32 headroom, int strategy)
1347 {
1348 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1349 u8 i = 0;
1350 UNREFERENCED_1PARAMETER(headroom);
1351
1352 if (!num_pb)
1353 return;
1354
1355 /* Setup Rx packet buffer sizes */
1356 switch (strategy) {
1357 case PBA_STRATEGY_WEIGHTED:
1358 /* Setup the first four at 80KB */
1359 rxpktsize = IXGBE_RXPBSIZE_80KB;
1360 for (; i < 4; i++)
1361 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1362 /* Setup the last four at 48KB...don't re-init i */
1363 rxpktsize = IXGBE_RXPBSIZE_48KB;
1364 /* Fall Through */
1365 /* FALLTHRU */
1366 case PBA_STRATEGY_EQUAL:
1367 default:
1368 /* Divide the remaining Rx packet buffer evenly among the TCs */
1369 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1370 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1371 break;
1372 }
1373
1374 /* Setup Tx packet buffer sizes */
1375 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1376 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1377 }
|
1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_82598.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 #define IXGBE_82598_MAX_TX_QUEUES 32
42 #define IXGBE_82598_MAX_RX_QUEUES 64
43 #define IXGBE_82598_RAR_ENTRIES 16
44 #define IXGBE_82598_MC_TBL_SIZE 128
45 #define IXGBE_82598_VFT_TBL_SIZE 128
46 #define IXGBE_82598_RX_PB_SIZE 512
47
48 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
49 ixgbe_link_speed *speed,
50 bool *autoneg);
51 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
52 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
53 bool autoneg_wait_to_complete);
54 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
55 ixgbe_link_speed *speed, bool *link_up,
56 bool link_up_wait_to_complete);
57 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
58 ixgbe_link_speed speed,
59 bool autoneg_wait_to_complete);
60 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
61 ixgbe_link_speed speed,
62 bool autoneg_wait_to_complete);
63 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
64 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
66 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
67 u32 headroom, int strategy);
68 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
69 u8 *sff8472_data);
70 /**
71 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
72 * @hw: pointer to the HW structure
73 *
74 * The defaults for 82598 should be in the range of 50us to 50ms,
75 * however the hardware default for these parts is 500us to 1ms which is less
76 * than the 10ms recommended by the pci-e spec. To address this we need to
77 * increase the value to either 10ms to 250ms for capability version 1 config,
78 * or 16ms to 55ms for version 2.
79 **/
80 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
81 {
82 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
83 u16 pcie_devctl2;
84
85 /* only take action if timeout value is defaulted to 0 */
86 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
87 goto out;
88
89 /*
111
112 /**
113 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
114 * @hw: pointer to hardware structure
115 *
116 * Initialize the function pointers and assign the MAC type for 82598.
117 * Does not touch the hardware.
118 **/
119 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
120 {
121 struct ixgbe_mac_info *mac = &hw->mac;
122 struct ixgbe_phy_info *phy = &hw->phy;
123 s32 ret_val;
124
125 DEBUGFUNC("ixgbe_init_ops_82598");
126
127 ret_val = ixgbe_init_phy_ops_generic(hw);
128 ret_val = ixgbe_init_ops_generic(hw);
129
130 /* PHY */
131 phy->ops.init = ixgbe_init_phy_ops_82598;
132
133 /* MAC */
134 mac->ops.start_hw = ixgbe_start_hw_82598;
135 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
136 mac->ops.reset_hw = ixgbe_reset_hw_82598;
137 mac->ops.get_media_type = ixgbe_get_media_type_82598;
138 mac->ops.get_supported_physical_layer =
139 ixgbe_get_supported_physical_layer_82598;
140 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
141 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
142 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
143 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
144
145 /* RAR, Multicast, VLAN */
146 mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
147 mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
148 mac->ops.set_vfta = ixgbe_set_vfta_82598;
149 mac->ops.set_vlvf = NULL;
150 mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
151
152 /* Flow Control */
153 mac->ops.fc_enable = ixgbe_fc_enable_82598;
154
155 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
156 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
157 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
158 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
159 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
160 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
161 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
162
163 /* SFP+ Module */
164 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
165 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
166
167 /* Link */
168 mac->ops.check_link = ixgbe_check_mac_link_82598;
169 mac->ops.setup_link = ixgbe_setup_mac_link_82598;
170 mac->ops.flap_tx_laser = NULL;
171 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
172 mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
173
174 /* Manageability interface */
175 mac->ops.set_fw_drv_ver = NULL;
176
177 mac->ops.get_rtrup2tc = NULL;
178
179 return ret_val;
180 }
181
182 /**
183 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
184 * @hw: pointer to hardware structure
185 *
186 * Initialize any function pointers that were not able to be
187 * set during init_shared_code because the PHY/SFP type was
188 * not known. Perform the SFP init if necessary.
189 *
190 **/
191 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
192 {
193 struct ixgbe_mac_info *mac = &hw->mac;
194 struct ixgbe_phy_info *phy = &hw->phy;
195 s32 ret_val = IXGBE_SUCCESS;
196 u16 list_offset, data_offset;
197
198 DEBUGFUNC("ixgbe_init_phy_ops_82598");
199
200 /* Identify the PHY */
201 phy->ops.identify(hw);
202
203 /* Overwrite the link function pointers if copper PHY */
204 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
205 mac->ops.setup_link = ixgbe_setup_copper_link_82598;
206 mac->ops.get_link_capabilities =
207 ixgbe_get_copper_link_capabilities_generic;
208 }
209
210 switch (hw->phy.type) {
211 case ixgbe_phy_tn:
212 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
213 phy->ops.check_link = ixgbe_check_phy_link_tnx;
214 phy->ops.get_firmware_version =
215 ixgbe_get_phy_firmware_version_tnx;
216 break;
217 case ixgbe_phy_nl:
218 phy->ops.reset = ixgbe_reset_phy_nl;
219
220 /* Call SFP+ identify routine to get the SFP+ module type */
221 ret_val = phy->ops.identify_sfp(hw);
222 if (ret_val != IXGBE_SUCCESS)
223 goto out;
224 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
225 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
226 goto out;
227 }
228
229 /* Check to see if SFP+ module is supported */
230 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
231 &list_offset,
232 &data_offset);
233 if (ret_val != IXGBE_SUCCESS) {
234 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
235 goto out;
236 }
237 break;
238 default:
243 return ret_val;
244 }
245
246 /**
247 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
248 * @hw: pointer to hardware structure
249 *
250 * Starts the hardware using the generic start_hw function.
251 * Disables relaxed ordering Then set pcie completion timeout
252 *
253 **/
254 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
255 {
256 u32 regval;
257 u32 i;
258 s32 ret_val = IXGBE_SUCCESS;
259
260 DEBUGFUNC("ixgbe_start_hw_82598");
261
262 ret_val = ixgbe_start_hw_generic(hw);
263 if (ret_val)
264 return ret_val;
265
266 /* Disable relaxed ordering */
267 for (i = 0; ((i < hw->mac.max_tx_queues) &&
268 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
269 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
270 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
271 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
272 }
273
274 for (i = 0; ((i < hw->mac.max_rx_queues) &&
275 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
276 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
277 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
278 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
279 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
280 }
281
282 /* set the completion timeout for interface */
283 ixgbe_set_pcie_completion_timeout(hw);
284
285 return ret_val;
286 }
287
288 /**
289 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
290 * @hw: pointer to hardware structure
291 * @speed: pointer to link speed
292 * @autoneg: boolean auto-negotiation value
293 *
294 * Determines the link capabilities by reading the AUTOC register.
295 **/
296 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
297 ixgbe_link_speed *speed,
298 bool *autoneg)
299 {
300 s32 status = IXGBE_SUCCESS;
301 u32 autoc = 0;
302
494 * disable the adapter's ability to send PAUSE frames.
495 */
496 fctrl_reg |= IXGBE_FCTRL_RFCE;
497 break;
498 case ixgbe_fc_tx_pause:
499 /*
500 * Tx Flow control is enabled, and Rx Flow control is
501 * disabled by software override.
502 */
503 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
504 break;
505 case ixgbe_fc_full:
506 /* Flow control (both Rx and Tx) is enabled by SW override. */
507 fctrl_reg |= IXGBE_FCTRL_RFCE;
508 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
509 break;
510 default:
511 DEBUGOUT("Flow control param set incorrectly\n");
512 ret_val = IXGBE_ERR_CONFIG;
513 goto out;
514 break;
515 }
516
517 /* Set 802.3x based flow control settings. */
518 fctrl_reg |= IXGBE_FCTRL_DPF;
519 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
520 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
521
522 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
523 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
524 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
525 hw->fc.high_water[i]) {
526 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
527 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
528 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
529 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
530 } else {
531 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
532 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
533 }
534
643 bool link_up_wait_to_complete)
644 {
645 u32 links_reg;
646 u32 i;
647 u16 link_reg, adapt_comp_reg;
648
649 DEBUGFUNC("ixgbe_check_mac_link_82598");
650
651 /*
652 * SERDES PHY requires us to read link status from undocumented
653 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
654 * indicates link down. OxC00C is read to check that the XAUI lanes
655 * are active. Bit 0 clear indicates active; set indicates inactive.
656 */
657 if (hw->phy.type == ixgbe_phy_nl) {
658 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
659 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
660 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
661 &adapt_comp_reg);
662 if (link_up_wait_to_complete) {
663 for (i = 0; i < hw->mac.max_link_up_time; i++) {
664 if ((link_reg & 1) &&
665 ((adapt_comp_reg & 1) == 0)) {
666 *link_up = TRUE;
667 break;
668 } else {
669 *link_up = FALSE;
670 }
671 msec_delay(100);
672 hw->phy.ops.read_reg(hw, 0xC79F,
673 IXGBE_TWINAX_DEV,
674 &link_reg);
675 hw->phy.ops.read_reg(hw, 0xC00C,
676 IXGBE_TWINAX_DEV,
677 &adapt_comp_reg);
678 }
679 } else {
680 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
681 *link_up = TRUE;
682 else
683 *link_up = FALSE;
684 }
685
686 if (*link_up == FALSE)
687 goto out;
688 }
689
690 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
691 if (link_up_wait_to_complete) {
692 for (i = 0; i < hw->mac.max_link_up_time; i++) {
693 if (links_reg & IXGBE_LINKS_UP) {
694 *link_up = TRUE;
695 break;
696 } else {
697 *link_up = FALSE;
698 }
699 msec_delay(100);
700 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
701 }
702 } else {
703 if (links_reg & IXGBE_LINKS_UP)
704 *link_up = TRUE;
705 else
706 *link_up = FALSE;
707 }
708
709 if (links_reg & IXGBE_LINKS_SPEED)
710 *speed = IXGBE_LINK_SPEED_10GB_FULL;
711 else
712 *speed = IXGBE_LINK_SPEED_1GB_FULL;
713
714 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
715 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
716 *link_up = FALSE;
717
718 out:
719 return IXGBE_SUCCESS;
720 }
721
722 /**
723 * ixgbe_setup_mac_link_82598 - Set MAC link speed
724 * @hw: pointer to hardware structure
725 * @speed: new link speed
726 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
727 *
728 * Set the link speed in the AUTOC register and restarts link.
729 **/
730 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
731 ixgbe_link_speed speed,
732 bool autoneg_wait_to_complete)
733 {
734 bool autoneg = FALSE;
735 s32 status = IXGBE_SUCCESS;
736 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
737 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
738 u32 autoc = curr_autoc;
739 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
740
741 DEBUGFUNC("ixgbe_setup_mac_link_82598");
742
743 /* Check to see if speed passed in is supported. */
744 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
745 speed &= link_capabilities;
746
747 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
748 status = IXGBE_ERR_LINK_SETUP;
749
750 /* Set KX4/KX support according to speed requested */
751 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
752 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
753 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
754 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
755 autoc |= IXGBE_AUTOC_KX4_SUPP;
756 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
757 autoc |= IXGBE_AUTOC_KX_SUPP;
758 if (autoc != curr_autoc)
759 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
760 }
761
762 if (status == IXGBE_SUCCESS) {
763 /*
764 * Setup and restart the link based on the new values in
765 * ixgbe_hw This will write the AUTOC register based on the new
766 * stored values
767 */
768 status = ixgbe_start_mac_link_82598(hw,
769 autoneg_wait_to_complete);
770 }
771
772 return status;
773 }
774
775
776 /**
777 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
778 * @hw: pointer to hardware structure
779 * @speed: new link speed
780 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
781 *
782 * Sets the link speed in the AUTOC register in the MAC and restarts link.
783 **/
784 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
785 ixgbe_link_speed speed,
786 bool autoneg_wait_to_complete)
787 {
788 s32 status;
789
790 DEBUGFUNC("ixgbe_setup_copper_link_82598");
791
792 /* Setup the PHY according to input speed */
793 status = hw->phy.ops.setup_link_speed(hw, speed,
794 autoneg_wait_to_complete);
795 /* Set up MAC */
796 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
797
798 return status;
799 }
800
801 /**
802 * ixgbe_reset_hw_82598 - Performs hardware reset
803 * @hw: pointer to hardware structure
804 *
805 * Resets the hardware by resetting the transmit and receive units, masks and
806 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
807 * reset.
808 **/
809 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
810 {
811 s32 status = IXGBE_SUCCESS;
812 s32 phy_status = IXGBE_SUCCESS;
813 u32 ctrl;
814 u32 gheccr;
815 u32 i;
816 u32 autoc;
1094 * @reg: atlas register to write
1095 * @val: value to write
1096 *
1097 * Performs write operation to Atlas analog register specified.
1098 **/
1099 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1100 {
1101 u32 atlas_ctl;
1102
1103 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1104
1105 atlas_ctl = (reg << 8) | val;
1106 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1107 IXGBE_WRITE_FLUSH(hw);
1108 usec_delay(10);
1109
1110 return IXGBE_SUCCESS;
1111 }
1112
1113 /**
1114 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1115 * @hw: pointer to hardware structure
1116 * @dev_addr: address to read from
1117 * @byte_offset: byte offset to read from dev_addr
1118 * @eeprom_data: value read
1119 *
1120 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1121 **/
1122 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1123 u8 byte_offset, u8 *eeprom_data)
1124 {
1125 s32 status = IXGBE_SUCCESS;
1126 u16 sfp_addr = 0;
1127 u16 sfp_data = 0;
1128 u16 sfp_stat = 0;
1129 u16 gssr;
1130 u32 i;
1131
1132 DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1133
1134 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1135 gssr = IXGBE_GSSR_PHY1_SM;
1136 else
1137 gssr = IXGBE_GSSR_PHY0_SM;
1138
1139 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1140 return IXGBE_ERR_SWFW_SYNC;
1141
1142 if (hw->phy.type == ixgbe_phy_nl) {
1143 /*
1144 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1145 * 0xC30D. These registers are used to talk to the SFP+
1146 * module's EEPROM through the SDA/SCL (I2C) interface.
1147 */
1148 sfp_addr = (dev_addr << 8) + byte_offset;
1149 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1150 hw->phy.ops.write_reg_mdi(hw,
1151 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1152 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1153 sfp_addr);
1154
1155 /* Poll status */
1156 for (i = 0; i < 100; i++) {
1157 hw->phy.ops.read_reg_mdi(hw,
1158 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1159 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1160 &sfp_stat);
1161 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1162 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1163 break;
1164 msec_delay(10);
1165 }
1166
1167 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1168 DEBUGOUT("EEPROM read did not pass.\n");
1169 status = IXGBE_ERR_SFP_NOT_PRESENT;
1170 goto out;
1171 }
1172
1173 /* Read data */
1174 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1175 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1176
1177 *eeprom_data = (u8)(sfp_data >> 8);
1178 } else {
1179 status = IXGBE_ERR_PHY;
1180 }
1181
1182 out:
1183 hw->mac.ops.release_swfw_sync(hw, gssr);
1184 return status;
1185 }
1186
1187 /**
1188 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1189 * @hw: pointer to hardware structure
1190 * @byte_offset: EEPROM byte offset to read
1191 * @eeprom_data: value read
1192 *
1193 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1194 **/
1195 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1196 u8 *eeprom_data)
1197 {
1198 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1199 byte_offset, eeprom_data);
1200 }
1201
1202 /**
1203 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1204 * @hw: pointer to hardware structure
1205 * @byte_offset: byte offset at address 0xA2
1206 * @eeprom_data: value read
1207 *
1208 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1209 **/
1210 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1211 u8 *sff8472_data)
1212 {
1213 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1214 byte_offset, sff8472_data);
1215 }
1216
1217 /**
1218 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1219 * @hw: pointer to hardware structure
1220 *
1221 * Determines physical layer capabilities of the current configuration.
1222 **/
1223 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1224 {
1225 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1226 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1227 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1228 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1229 u16 ext_ability = 0;
1230
1231 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1232
1233 hw->phy.ops.identify(hw);
1234
1235 /* Copper PHY must be checked before AUTOC LMS to determine correct
1236 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1237 switch (hw->phy.type) {
1390 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1391 u32 headroom, int strategy)
1392 {
1393 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1394 u8 i = 0;
1395 UNREFERENCED_1PARAMETER(headroom);
1396
1397 if (!num_pb)
1398 return;
1399
1400 /* Setup Rx packet buffer sizes */
1401 switch (strategy) {
1402 case PBA_STRATEGY_WEIGHTED:
1403 /* Setup the first four at 80KB */
1404 rxpktsize = IXGBE_RXPBSIZE_80KB;
1405 for (; i < 4; i++)
1406 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1407 /* Setup the last four at 48KB...don't re-init i */
1408 rxpktsize = IXGBE_RXPBSIZE_48KB;
1409 /* Fall Through */
1410 case PBA_STRATEGY_EQUAL:
1411 default:
1412 /* Divide the remaining Rx packet buffer evenly among the TCs */
1413 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1414 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1415 break;
1416 }
1417
1418 /* Setup Tx packet buffer sizes */
1419 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1420 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1421 }
1422
1423 /**
1424 * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1425 * @hw: pointer to hardware structure
1426 * @regval: register value to write to RXCTRL
1427 *
1428 * Enables the Rx DMA unit
1429 **/
1430 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1431 {
1432 DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1433
1434 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1435
1436 return IXGBE_SUCCESS;
1437 }
|