Print this page
6064 ixgbe needs X550 support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_common.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_common.c
1 1 /******************************************************************************
2 2
3 - Copyright (c) 2001-2012, Intel Corporation
3 + Copyright (c) 2001-2015, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 -/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
33 +/*$FreeBSD$*/
34 34
35 35 #include "ixgbe_common.h"
36 36 #include "ixgbe_phy.h"
37 +#include "ixgbe_dcb.h"
38 +#include "ixgbe_dcb_82599.h"
37 39 #include "ixgbe_api.h"
38 40
39 41 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 42 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 43 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 44 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 45 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 46 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45 47 u16 count);
46 48 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47 49 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 50 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 51 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50 52
51 53 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52 54 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
53 55 u16 *san_mac_offset);
54 56 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 57 u16 words, u16 *data);
56 58 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 59 u16 words, u16 *data);
58 60 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
59 61 u16 offset);
60 62
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
61 63 /**
62 64 * ixgbe_init_ops_generic - Inits function ptrs
63 65 * @hw: pointer to the hardware structure
64 66 *
65 67 * Initialize the function pointers.
66 68 **/
67 69 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
68 70 {
69 71 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
70 72 struct ixgbe_mac_info *mac = &hw->mac;
71 - u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
73 + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
72 74
73 75 DEBUGFUNC("ixgbe_init_ops_generic");
74 76
75 77 /* EEPROM */
76 - eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
78 + eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
77 79 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
78 80 if (eec & IXGBE_EEC_PRES) {
79 - eeprom->ops.read = &ixgbe_read_eerd_generic;
80 - eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
81 + eeprom->ops.read = ixgbe_read_eerd_generic;
82 + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
81 83 } else {
82 - eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
84 + eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
83 85 eeprom->ops.read_buffer =
84 - &ixgbe_read_eeprom_buffer_bit_bang_generic;
86 + ixgbe_read_eeprom_buffer_bit_bang_generic;
85 87 }
86 - eeprom->ops.write = &ixgbe_write_eeprom_generic;
87 - eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
88 + eeprom->ops.write = ixgbe_write_eeprom_generic;
89 + eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
88 90 eeprom->ops.validate_checksum =
89 - &ixgbe_validate_eeprom_checksum_generic;
90 - eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
91 - eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
91 + ixgbe_validate_eeprom_checksum_generic;
92 + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
93 + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
92 94
93 95 /* MAC */
94 - mac->ops.init_hw = &ixgbe_init_hw_generic;
96 + mac->ops.init_hw = ixgbe_init_hw_generic;
95 97 mac->ops.reset_hw = NULL;
96 - mac->ops.start_hw = &ixgbe_start_hw_generic;
97 - mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
98 + mac->ops.start_hw = ixgbe_start_hw_generic;
99 + mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
98 100 mac->ops.get_media_type = NULL;
99 101 mac->ops.get_supported_physical_layer = NULL;
100 - mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
101 - mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
102 - mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
103 - mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
104 - mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
105 - mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
106 - mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
102 + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
103 + mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
104 + mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
105 + mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
106 + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
107 + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
108 + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
109 + mac->ops.prot_autoc_read = prot_autoc_read_generic;
110 + mac->ops.prot_autoc_write = prot_autoc_write_generic;
107 111
108 112 /* LEDs */
109 - mac->ops.led_on = &ixgbe_led_on_generic;
110 - mac->ops.led_off = &ixgbe_led_off_generic;
111 - mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
112 - mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
113 + mac->ops.led_on = ixgbe_led_on_generic;
114 + mac->ops.led_off = ixgbe_led_off_generic;
115 + mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
116 + mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
113 117
114 118 /* RAR, Multicast, VLAN */
115 - mac->ops.set_rar = &ixgbe_set_rar_generic;
116 - mac->ops.clear_rar = &ixgbe_clear_rar_generic;
119 + mac->ops.set_rar = ixgbe_set_rar_generic;
120 + mac->ops.clear_rar = ixgbe_clear_rar_generic;
117 121 mac->ops.insert_mac_addr = NULL;
118 122 mac->ops.set_vmdq = NULL;
119 123 mac->ops.clear_vmdq = NULL;
120 - mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
121 - mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
122 - mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
123 - mac->ops.enable_mc = &ixgbe_enable_mc_generic;
124 - mac->ops.disable_mc = &ixgbe_disable_mc_generic;
124 + mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
125 + mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
126 + mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
127 + mac->ops.enable_mc = ixgbe_enable_mc_generic;
128 + mac->ops.disable_mc = ixgbe_disable_mc_generic;
125 129 mac->ops.clear_vfta = NULL;
126 130 mac->ops.set_vfta = NULL;
127 131 mac->ops.set_vlvf = NULL;
128 132 mac->ops.init_uta_tables = NULL;
133 + mac->ops.enable_rx = ixgbe_enable_rx_generic;
134 + mac->ops.disable_rx = ixgbe_disable_rx_generic;
129 135
130 136 /* Flow Control */
131 - mac->ops.fc_enable = &ixgbe_fc_enable_generic;
137 + mac->ops.fc_enable = ixgbe_fc_enable_generic;
138 + mac->ops.setup_fc = ixgbe_setup_fc_generic;
132 139
133 140 /* Link */
134 141 mac->ops.get_link_capabilities = NULL;
135 142 mac->ops.setup_link = NULL;
136 143 mac->ops.check_link = NULL;
144 + mac->ops.dmac_config = NULL;
145 + mac->ops.dmac_update_tcs = NULL;
146 + mac->ops.dmac_config_tcs = NULL;
137 147
138 148 return IXGBE_SUCCESS;
139 149 }
140 150
141 151 /**
142 - * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
143 - * control
144 - * @hw: pointer to hardware structure
152 + * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
153 + * of flow control
154 + * @hw: pointer to hardware structure
145 155 *
146 - * There are several phys that do not support autoneg flow control. This
147 - * function check the device id to see if the associated phy supports
148 - * autoneg flow control.
156 + * This function returns TRUE if the device supports flow control
157 + * autonegotiation, and FALSE if it does not.
158 + *
149 159 **/
150 -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
160 +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
151 161 {
162 + bool supported = FALSE;
163 + ixgbe_link_speed speed;
164 + bool link_up;
152 165
153 166 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
154 167
155 - switch (hw->device_id) {
156 - case IXGBE_DEV_ID_X540T:
157 - case IXGBE_DEV_ID_X540T1:
158 - return IXGBE_SUCCESS;
159 - case IXGBE_DEV_ID_82599_T3_LOM:
160 - return IXGBE_SUCCESS;
168 + switch (hw->phy.media_type) {
169 + case ixgbe_media_type_fiber_fixed:
170 + case ixgbe_media_type_fiber_qsfp:
171 + case ixgbe_media_type_fiber:
172 + hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
173 + /* if link is down, assume supported */
174 + if (link_up)
175 + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
176 + TRUE : FALSE;
177 + else
178 + supported = TRUE;
179 + break;
180 + case ixgbe_media_type_backplane:
181 + supported = TRUE;
182 + break;
183 + case ixgbe_media_type_copper:
184 + /* only some copper devices support flow control autoneg */
185 + switch (hw->device_id) {
186 + case IXGBE_DEV_ID_82599_T3_LOM:
187 + case IXGBE_DEV_ID_X540T:
188 + case IXGBE_DEV_ID_X540T1:
189 + case IXGBE_DEV_ID_X540_BYPASS:
190 + case IXGBE_DEV_ID_X550T:
191 + case IXGBE_DEV_ID_X550T1:
192 + case IXGBE_DEV_ID_X550EM_X_10G_T:
193 + supported = TRUE;
194 + break;
195 + default:
196 + supported = FALSE;
197 + }
161 198 default:
162 - return IXGBE_ERR_FC_NOT_SUPPORTED;
199 + break;
163 200 }
201 +
202 + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
203 + "Device %x does not support flow control autoneg",
204 + hw->device_id);
205 + return supported;
164 206 }
165 207
166 208 /**
167 - * ixgbe_setup_fc - Set up flow control
209 + * ixgbe_setup_fc_generic - Set up flow control
168 210 * @hw: pointer to hardware structure
169 211 *
170 212 * Called at init time to set up flow control.
171 213 **/
172 -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
214 +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
173 215 {
174 216 s32 ret_val = IXGBE_SUCCESS;
175 217 u32 reg = 0, reg_bp = 0;
176 218 u16 reg_cu = 0;
219 + bool locked = FALSE;
177 220
178 - DEBUGFUNC("ixgbe_setup_fc");
221 + DEBUGFUNC("ixgbe_setup_fc_generic");
179 222
180 - /*
181 - * Validate the requested mode. Strict IEEE mode does not allow
182 - * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
183 - */
223 + /* Validate the requested mode */
184 224 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
185 - DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
225 + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
226 + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
186 227 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
187 228 goto out;
188 229 }
189 230
190 231 /*
191 232 * 10gig parts do not have a word in the EEPROM to determine the
192 233 * default flow control setting, so we explicitly set it to full.
193 234 */
194 235 if (hw->fc.requested_mode == ixgbe_fc_default)
195 236 hw->fc.requested_mode = ixgbe_fc_full;
196 237
197 238 /*
198 239 * Set up the 1G and 10G flow control advertisement registers so the
199 240 * HW will be able to do fc autoneg once the cable is plugged in. If
200 241 * we link at 10G, the 1G advertisement is harmless and vice versa.
201 242 */
202 243 switch (hw->phy.media_type) {
203 - case ixgbe_media_type_fiber:
204 244 case ixgbe_media_type_backplane:
245 + /* some MAC's need RMW protection on AUTOC */
246 + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
247 + if (ret_val != IXGBE_SUCCESS)
248 + goto out;
249 +
250 + /* only backplane uses autoc so fall though */
251 + case ixgbe_media_type_fiber_fixed:
252 + case ixgbe_media_type_fiber_qsfp:
253 + case ixgbe_media_type_fiber:
205 254 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
206 - reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
255 +
207 256 break;
208 257 case ixgbe_media_type_copper:
209 258 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
210 259 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
211 260 break;
212 261 default:
213 262 break;
214 263 }
215 264
216 265 /*
217 266 * The possible values of fc.requested_mode are:
218 267 * 0: Flow control is completely disabled
219 268 * 1: Rx flow control is enabled (we can receive pause frames,
220 269 * but not send pause frames).
221 270 * 2: Tx flow control is enabled (we can send pause frames but
222 271 * we do not support receiving pause frames).
223 272 * 3: Both Rx and Tx flow control (symmetric) are enabled.
224 273 * other: Invalid.
225 274 */
226 275 switch (hw->fc.requested_mode) {
227 276 case ixgbe_fc_none:
228 277 /* Flow control completely disabled by software override. */
229 278 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
230 279 if (hw->phy.media_type == ixgbe_media_type_backplane)
231 280 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
232 281 IXGBE_AUTOC_ASM_PAUSE);
233 282 else if (hw->phy.media_type == ixgbe_media_type_copper)
234 283 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
235 284 break;
236 285 case ixgbe_fc_tx_pause:
237 286 /*
238 287 * Tx Flow control is enabled, and Rx Flow control is
239 288 * disabled by software override.
240 289 */
241 290 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
242 291 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
243 292 if (hw->phy.media_type == ixgbe_media_type_backplane) {
244 293 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
245 294 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
246 295 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
247 296 reg_cu |= IXGBE_TAF_ASM_PAUSE;
248 297 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
249 298 }
250 299 break;
251 300 case ixgbe_fc_rx_pause:
252 301 /*
253 302 * Rx Flow control is enabled and Tx Flow control is
254 303 * disabled by software override. Since there really
255 304 * isn't a way to advertise that we are capable of RX
256 305 * Pause ONLY, we will advertise that we support both
257 306 * symmetric and asymmetric Rx PAUSE, as such we fall
258 307 * through to the fc_full statement. Later, we will
259 308 * disable the adapter's ability to send PAUSE frames.
260 309 */
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
261 310 case ixgbe_fc_full:
262 311 /* Flow control (both Rx and Tx) is enabled by SW override. */
263 312 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
264 313 if (hw->phy.media_type == ixgbe_media_type_backplane)
265 314 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
266 315 IXGBE_AUTOC_ASM_PAUSE;
267 316 else if (hw->phy.media_type == ixgbe_media_type_copper)
268 317 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
269 318 break;
270 319 default:
271 - DEBUGOUT("Flow control param set incorrectly\n");
320 + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
321 + "Flow control param set incorrectly\n");
272 322 ret_val = IXGBE_ERR_CONFIG;
273 323 goto out;
324 + break;
274 325 }
275 326
276 - if (hw->mac.type != ixgbe_mac_X540) {
327 + if (hw->mac.type < ixgbe_mac_X540) {
277 328 /*
278 329 * Enable auto-negotiation between the MAC & PHY;
279 330 * the MAC will advertise clause 37 flow control.
280 331 */
281 332 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
282 333 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
283 334
284 335 /* Disable AN timeout */
285 336 if (hw->fc.strict_ieee)
286 337 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
287 338
288 339 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
289 340 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
290 341 }
291 342
292 343 /*
293 344 * AUTOC restart handles negotiation of 1G and 10G on backplane
294 345 * and copper. There is no need to set the PCS1GCTL register.
295 346 *
296 347 */
297 348 if (hw->phy.media_type == ixgbe_media_type_backplane) {
298 349 reg_bp |= IXGBE_AUTOC_AN_RESTART;
299 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
350 + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
351 + if (ret_val)
352 + goto out;
300 353 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
301 - (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
354 + (ixgbe_device_supports_autoneg_fc(hw))) {
302 355 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
303 356 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
304 357 }
305 358
306 - DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
359 + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
307 360 out:
308 361 return ret_val;
309 362 }
310 363
311 364 /**
312 365 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
313 366 * @hw: pointer to hardware structure
314 367 *
315 368 * Starts the hardware by filling the bus info structure and media type, clears
316 369 * all on chip counters, initializes receive address registers, multicast
317 370 * table, VLAN filter table, calls routine to set up link and flow control
318 371 * settings, and leaves transmit and receive units disabled and uninitialized
319 372 **/
320 373 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
321 374 {
322 375 s32 ret_val;
323 376 u32 ctrl_ext;
324 377
325 378 DEBUGFUNC("ixgbe_start_hw_generic");
326 379
327 380 /* Set the media type */
328 381 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
329 382
330 383 /* PHY ops initialization must be done in reset_hw() */
331 384
332 385 /* Clear the VLAN filter table */
333 386 hw->mac.ops.clear_vfta(hw);
334 387
335 388 /* Clear statistics registers */
336 389 hw->mac.ops.clear_hw_cntrs(hw);
337 390
338 391 /* Set No Snoop Disable */
339 392 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
340 393 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
341 394 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
342 395 IXGBE_WRITE_FLUSH(hw);
343 396
344 397 /* Setup flow control */
345 398 ret_val = ixgbe_setup_fc(hw);
346 399 if (ret_val != IXGBE_SUCCESS)
347 400 goto out;
348 401
349 402 /* Clear adapter stopped flag */
350 403 hw->adapter_stopped = FALSE;
351 404
352 405 out:
353 406 return ret_val;
354 407 }
355 408
356 409 /**
357 410 * ixgbe_start_hw_gen2 - Init sequence for common device family
358 411 * @hw: pointer to hw structure
359 412 *
360 413 * Performs the init sequence common to the second generation
361 414 * of 10 GbE devices.
362 415 * Devices in the second generation:
363 416 * 82599
364 417 * X540
365 418 **/
366 419 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
367 420 {
368 421 u32 i;
369 422 u32 regval;
370 423
371 424 /* Clear the rate limiters */
372 425 for (i = 0; i < hw->mac.max_tx_queues; i++) {
373 426 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
374 427 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
375 428 }
376 429 IXGBE_WRITE_FLUSH(hw);
377 430
378 431 /* Disable relaxed ordering */
379 432 for (i = 0; i < hw->mac.max_tx_queues; i++) {
380 433 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
381 434 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
382 435 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
383 436 }
384 437
385 438 for (i = 0; i < hw->mac.max_rx_queues; i++) {
386 439 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
387 440 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
388 441 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
389 442 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
390 443 }
391 444
392 445 return IXGBE_SUCCESS;
393 446 }
394 447
395 448 /**
396 449 * ixgbe_init_hw_generic - Generic hardware initialization
397 450 * @hw: pointer to hardware structure
398 451 *
399 452 * Initialize the hardware by resetting the hardware, filling the bus info
400 453 * structure and media type, clears all on chip counters, initializes receive
401 454 * address registers, multicast table, VLAN filter table, calls routine to set
402 455 * up link and flow control settings, and leaves transmit and receive units
403 456 * disabled and uninitialized
404 457 **/
405 458 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
406 459 {
407 460 s32 status;
408 461
409 462 DEBUGFUNC("ixgbe_init_hw_generic");
410 463
411 464 /* Reset the hardware */
412 465 status = hw->mac.ops.reset_hw(hw);
413 466
414 467 if (status == IXGBE_SUCCESS) {
415 468 /* Start the HW */
416 469 status = hw->mac.ops.start_hw(hw);
417 470 }
418 471
419 472 return status;
420 473 }
421 474
422 475 /**
423 476 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
424 477 * @hw: pointer to hardware structure
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
425 478 *
426 479 * Clears all hardware statistics counters by reading them from the hardware
427 480 * Statistics counters are clear on read.
428 481 **/
429 482 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
430 483 {
431 484 u16 i = 0;
432 485
433 486 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
434 487
435 - (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 - (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 - (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 - (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
488 + IXGBE_READ_REG(hw, IXGBE_CRCERRS);
489 + IXGBE_READ_REG(hw, IXGBE_ILLERRC);
490 + IXGBE_READ_REG(hw, IXGBE_ERRBC);
491 + IXGBE_READ_REG(hw, IXGBE_MSPDC);
439 492 for (i = 0; i < 8; i++)
440 - (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
493 + IXGBE_READ_REG(hw, IXGBE_MPC(i));
441 494
442 - (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
443 - (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
444 - (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
445 - (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 - (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
495 + IXGBE_READ_REG(hw, IXGBE_MLFC);
496 + IXGBE_READ_REG(hw, IXGBE_MRFC);
497 + IXGBE_READ_REG(hw, IXGBE_RLEC);
498 + IXGBE_READ_REG(hw, IXGBE_LXONTXC);
499 + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
447 500 if (hw->mac.type >= ixgbe_mac_82599EB) {
448 - (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 - (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
501 + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
502 + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
450 503 } else {
451 - (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 - (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
504 + IXGBE_READ_REG(hw, IXGBE_LXONRXC);
505 + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
453 506 }
454 507
455 508 for (i = 0; i < 8; i++) {
456 - (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 - (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
509 + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
510 + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
458 511 if (hw->mac.type >= ixgbe_mac_82599EB) {
459 - (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 - (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
512 + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
513 + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
461 514 } else {
462 - (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 - (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
515 + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
516 + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
464 517 }
465 518 }
466 519 if (hw->mac.type >= ixgbe_mac_82599EB)
467 520 for (i = 0; i < 8; i++)
468 - (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 - (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
470 - (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
471 - (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
472 - (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
473 - (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 - (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 - (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
476 - (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
477 - (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
478 - (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
479 - (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
480 - (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
481 - (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 - (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
521 + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
522 + IXGBE_READ_REG(hw, IXGBE_PRC64);
523 + IXGBE_READ_REG(hw, IXGBE_PRC127);
524 + IXGBE_READ_REG(hw, IXGBE_PRC255);
525 + IXGBE_READ_REG(hw, IXGBE_PRC511);
526 + IXGBE_READ_REG(hw, IXGBE_PRC1023);
527 + IXGBE_READ_REG(hw, IXGBE_PRC1522);
528 + IXGBE_READ_REG(hw, IXGBE_GPRC);
529 + IXGBE_READ_REG(hw, IXGBE_BPRC);
530 + IXGBE_READ_REG(hw, IXGBE_MPRC);
531 + IXGBE_READ_REG(hw, IXGBE_GPTC);
532 + IXGBE_READ_REG(hw, IXGBE_GORCL);
533 + IXGBE_READ_REG(hw, IXGBE_GORCH);
534 + IXGBE_READ_REG(hw, IXGBE_GOTCL);
535 + IXGBE_READ_REG(hw, IXGBE_GOTCH);
483 536 if (hw->mac.type == ixgbe_mac_82598EB)
484 537 for (i = 0; i < 8; i++)
485 - (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 - (void) IXGBE_READ_REG(hw, IXGBE_RUC);
487 - (void) IXGBE_READ_REG(hw, IXGBE_RFC);
488 - (void) IXGBE_READ_REG(hw, IXGBE_ROC);
489 - (void) IXGBE_READ_REG(hw, IXGBE_RJC);
490 - (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 - (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 - (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 - (void) IXGBE_READ_REG(hw, IXGBE_TORL);
494 - (void) IXGBE_READ_REG(hw, IXGBE_TORH);
495 - (void) IXGBE_READ_REG(hw, IXGBE_TPR);
496 - (void) IXGBE_READ_REG(hw, IXGBE_TPT);
497 - (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
498 - (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
499 - (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
500 - (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
501 - (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 - (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 - (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
504 - (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
538 + IXGBE_READ_REG(hw, IXGBE_RNBC(i));
539 + IXGBE_READ_REG(hw, IXGBE_RUC);
540 + IXGBE_READ_REG(hw, IXGBE_RFC);
541 + IXGBE_READ_REG(hw, IXGBE_ROC);
542 + IXGBE_READ_REG(hw, IXGBE_RJC);
543 + IXGBE_READ_REG(hw, IXGBE_MNGPRC);
544 + IXGBE_READ_REG(hw, IXGBE_MNGPDC);
545 + IXGBE_READ_REG(hw, IXGBE_MNGPTC);
546 + IXGBE_READ_REG(hw, IXGBE_TORL);
547 + IXGBE_READ_REG(hw, IXGBE_TORH);
548 + IXGBE_READ_REG(hw, IXGBE_TPR);
549 + IXGBE_READ_REG(hw, IXGBE_TPT);
550 + IXGBE_READ_REG(hw, IXGBE_PTC64);
551 + IXGBE_READ_REG(hw, IXGBE_PTC127);
552 + IXGBE_READ_REG(hw, IXGBE_PTC255);
553 + IXGBE_READ_REG(hw, IXGBE_PTC511);
554 + IXGBE_READ_REG(hw, IXGBE_PTC1023);
555 + IXGBE_READ_REG(hw, IXGBE_PTC1522);
556 + IXGBE_READ_REG(hw, IXGBE_MPTC);
557 + IXGBE_READ_REG(hw, IXGBE_BPTC);
505 558 for (i = 0; i < 16; i++) {
506 - (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 - (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
559 + IXGBE_READ_REG(hw, IXGBE_QPRC(i));
560 + IXGBE_READ_REG(hw, IXGBE_QPTC(i));
508 561 if (hw->mac.type >= ixgbe_mac_82599EB) {
509 - (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 - (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 - (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 - (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 - (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
562 + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
563 + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
564 + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
565 + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
566 + IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
514 567 } else {
515 - (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 - (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
568 + IXGBE_READ_REG(hw, IXGBE_QBRC(i));
569 + IXGBE_READ_REG(hw, IXGBE_QBTC(i));
517 570 }
518 571 }
519 572
520 - if (hw->mac.type == ixgbe_mac_X540) {
573 + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
521 574 if (hw->phy.id == 0)
522 - (void) ixgbe_identify_phy(hw);
575 + ixgbe_identify_phy(hw);
523 576 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
524 577 IXGBE_MDIO_PCS_DEV_TYPE, &i);
525 578 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
526 579 IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 580 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
528 581 IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 582 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
530 583 IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 584 }
532 585
533 586 return IXGBE_SUCCESS;
534 587 }
535 588
536 589 /**
537 590 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
538 591 * @hw: pointer to hardware structure
539 592 * @pba_num: stores the part number string from the EEPROM
540 593 * @pba_num_size: part number string buffer length
541 594 *
542 595 * Reads the part number string from the EEPROM.
543 596 **/
544 597 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
545 598 u32 pba_num_size)
546 599 {
547 600 s32 ret_val;
548 601 u16 data;
549 602 u16 pba_ptr;
550 603 u16 offset;
551 604 u16 length;
552 605
553 606 DEBUGFUNC("ixgbe_read_pba_string_generic");
554 607
555 608 if (pba_num == NULL) {
556 609 DEBUGOUT("PBA string buffer was null\n");
557 610 return IXGBE_ERR_INVALID_ARGUMENT;
558 611 }
559 612
560 613 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
561 614 if (ret_val) {
562 615 DEBUGOUT("NVM Read Error\n");
563 616 return ret_val;
564 617 }
565 618
566 619 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
567 620 if (ret_val) {
568 621 DEBUGOUT("NVM Read Error\n");
569 622 return ret_val;
570 623 }
571 624
572 625 /*
573 626 * if data is not ptr guard the PBA must be in legacy format which
574 627 * means pba_ptr is actually our second data word for the PBA number
575 628 * and we can decode it into an ascii string
576 629 */
577 630 if (data != IXGBE_PBANUM_PTR_GUARD) {
578 631 DEBUGOUT("NVM PBA number is not stored as string\n");
579 632
580 633 /* we will need 11 characters to store the PBA */
581 634 if (pba_num_size < 11) {
582 635 DEBUGOUT("PBA string buffer too small\n");
583 636 return IXGBE_ERR_NO_SPACE;
584 637 }
585 638
586 639 /* extract hex string from data and pba_ptr */
587 640 pba_num[0] = (data >> 12) & 0xF;
588 641 pba_num[1] = (data >> 8) & 0xF;
589 642 pba_num[2] = (data >> 4) & 0xF;
590 643 pba_num[3] = data & 0xF;
591 644 pba_num[4] = (pba_ptr >> 12) & 0xF;
592 645 pba_num[5] = (pba_ptr >> 8) & 0xF;
593 646 pba_num[6] = '-';
594 647 pba_num[7] = 0;
595 648 pba_num[8] = (pba_ptr >> 4) & 0xF;
596 649 pba_num[9] = pba_ptr & 0xF;
597 650
598 651 /* put a null character on the end of our string */
599 652 pba_num[10] = '\0';
600 653
601 654 /* switch all the data but the '-' to hex char */
602 655 for (offset = 0; offset < 10; offset++) {
603 656 if (pba_num[offset] < 0xA)
604 657 pba_num[offset] += '0';
605 658 else if (pba_num[offset] < 0x10)
606 659 pba_num[offset] += 'A' - 0xA;
607 660 }
608 661
609 662 return IXGBE_SUCCESS;
610 663 }
611 664
612 665 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
613 666 if (ret_val) {
614 667 DEBUGOUT("NVM Read Error\n");
615 668 return ret_val;
616 669 }
617 670
618 671 if (length == 0xFFFF || length == 0) {
619 672 DEBUGOUT("NVM PBA number section invalid length\n");
620 673 return IXGBE_ERR_PBA_SECTION;
621 674 }
622 675
623 676 /* check if pba_num buffer is big enough */
624 677 if (pba_num_size < (((u32)length * 2) - 1)) {
625 678 DEBUGOUT("PBA string buffer too small\n");
626 679 return IXGBE_ERR_NO_SPACE;
627 680 }
628 681
629 682 /* trim pba length from start of string */
630 683 pba_ptr++;
631 684 length--;
632 685
633 686 for (offset = 0; offset < length; offset++) {
634 687 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
635 688 if (ret_val) {
636 689 DEBUGOUT("NVM Read Error\n");
637 690 return ret_val;
638 691 }
639 692 pba_num[offset * 2] = (u8)(data >> 8);
640 693 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
641 694 }
642 695 pba_num[offset * 2] = '\0';
643 696
644 697 return IXGBE_SUCCESS;
645 698 }
646 699
647 700 /**
648 701 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
649 702 * @hw: pointer to hardware structure
650 703 * @pba_num: stores the part number from the EEPROM
651 704 *
652 705 * Reads the part number from the EEPROM.
653 706 **/
654 707 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
655 708 {
656 709 s32 ret_val;
657 710 u16 data;
658 711
659 712 DEBUGFUNC("ixgbe_read_pba_num_generic");
660 713
661 714 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
662 715 if (ret_val) {
663 716 DEBUGOUT("NVM Read Error\n");
664 717 return ret_val;
665 718 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
666 719 DEBUGOUT("NVM Not supported\n");
667 720 return IXGBE_NOT_IMPLEMENTED;
668 721 }
669 722 *pba_num = (u32)(data << 16);
670 723
671 724 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
↓ open down ↓ |
139 lines elided |
↑ open up ↑ |
672 725 if (ret_val) {
673 726 DEBUGOUT("NVM Read Error\n");
674 727 return ret_val;
675 728 }
676 729 *pba_num |= data;
677 730
678 731 return IXGBE_SUCCESS;
679 732 }
680 733
681 734 /**
735 + * ixgbe_read_pba_raw
736 + * @hw: pointer to the HW structure
737 + * @eeprom_buf: optional pointer to EEPROM image
738 + * @eeprom_buf_size: size of EEPROM image in words
739 + * @max_pba_block_size: PBA block size limit
740 + * @pba: pointer to output PBA structure
741 + *
742 + * Reads PBA from EEPROM image when eeprom_buf is not NULL.
743 + * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
744 + *
745 + **/
746 +s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
747 + u32 eeprom_buf_size, u16 max_pba_block_size,
748 + struct ixgbe_pba *pba)
749 +{
750 + s32 ret_val;
751 + u16 pba_block_size;
752 +
753 + if (pba == NULL)
754 + return IXGBE_ERR_PARAM;
755 +
756 + if (eeprom_buf == NULL) {
757 + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
758 + &pba->word[0]);
759 + if (ret_val)
760 + return ret_val;
761 + } else {
762 + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
763 + pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
764 + pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
765 + } else {
766 + return IXGBE_ERR_PARAM;
767 + }
768 + }
769 +
770 + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
771 + if (pba->pba_block == NULL)
772 + return IXGBE_ERR_PARAM;
773 +
774 + ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
775 + eeprom_buf_size,
776 + &pba_block_size);
777 + if (ret_val)
778 + return ret_val;
779 +
780 + if (pba_block_size > max_pba_block_size)
781 + return IXGBE_ERR_PARAM;
782 +
783 + if (eeprom_buf == NULL) {
784 + ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
785 + pba_block_size,
786 + pba->pba_block);
787 + if (ret_val)
788 + return ret_val;
789 + } else {
790 + if (eeprom_buf_size > (u32)(pba->word[1] +
791 + pba_block_size)) {
792 + memcpy(pba->pba_block,
793 + &eeprom_buf[pba->word[1]],
794 + pba_block_size * sizeof(u16));
795 + } else {
796 + return IXGBE_ERR_PARAM;
797 + }
798 + }
799 + }
800 +
801 + return IXGBE_SUCCESS;
802 +}
803 +
804 +/**
805 + * ixgbe_write_pba_raw
806 + * @hw: pointer to the HW structure
807 + * @eeprom_buf: optional pointer to EEPROM image
808 + * @eeprom_buf_size: size of EEPROM image in words
809 + * @pba: pointer to PBA structure
810 + *
811 + * Writes PBA to EEPROM image when eeprom_buf is not NULL.
812 + * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
813 + *
814 + **/
815 +s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
816 + u32 eeprom_buf_size, struct ixgbe_pba *pba)
817 +{
818 + s32 ret_val;
819 +
820 + if (pba == NULL)
821 + return IXGBE_ERR_PARAM;
822 +
823 + if (eeprom_buf == NULL) {
824 + ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
825 + &pba->word[0]);
826 + if (ret_val)
827 + return ret_val;
828 + } else {
829 + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
830 + eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
831 + eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
832 + } else {
833 + return IXGBE_ERR_PARAM;
834 + }
835 + }
836 +
837 + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
838 + if (pba->pba_block == NULL)
839 + return IXGBE_ERR_PARAM;
840 +
841 + if (eeprom_buf == NULL) {
842 + ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
843 + pba->pba_block[0],
844 + pba->pba_block);
845 + if (ret_val)
846 + return ret_val;
847 + } else {
848 + if (eeprom_buf_size > (u32)(pba->word[1] +
849 + pba->pba_block[0])) {
850 + memcpy(&eeprom_buf[pba->word[1]],
851 + pba->pba_block,
852 + pba->pba_block[0] * sizeof(u16));
853 + } else {
854 + return IXGBE_ERR_PARAM;
855 + }
856 + }
857 + }
858 +
859 + return IXGBE_SUCCESS;
860 +}
861 +
862 +/**
863 + * ixgbe_get_pba_block_size
864 + * @hw: pointer to the HW structure
865 + * @eeprom_buf: optional pointer to EEPROM image
866 + * @eeprom_buf_size: size of EEPROM image in words
867 + * @pba_data_size: pointer to output variable
868 + *
869 + * Returns the size of the PBA block in words. Function operates on EEPROM
870 + * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
871 + * EEPROM device.
872 + *
873 + **/
874 +s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
875 + u32 eeprom_buf_size, u16 *pba_block_size)
876 +{
877 + s32 ret_val;
878 + u16 pba_word[2];
879 + u16 length;
880 +
881 + DEBUGFUNC("ixgbe_get_pba_block_size");
882 +
883 + if (eeprom_buf == NULL) {
884 + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
885 + &pba_word[0]);
886 + if (ret_val)
887 + return ret_val;
888 + } else {
889 + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
890 + pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
891 + pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
892 + } else {
893 + return IXGBE_ERR_PARAM;
894 + }
895 + }
896 +
897 + if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
898 + if (eeprom_buf == NULL) {
899 + ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
900 + &length);
901 + if (ret_val)
902 + return ret_val;
903 + } else {
904 + if (eeprom_buf_size > pba_word[1])
905 + length = eeprom_buf[pba_word[1] + 0];
906 + else
907 + return IXGBE_ERR_PARAM;
908 + }
909 +
910 + if (length == 0xFFFF || length == 0)
911 + return IXGBE_ERR_PBA_SECTION;
912 + } else {
913 + /* PBA number in legacy format, there is no PBA Block. */
914 + length = 0;
915 + }
916 +
917 + if (pba_block_size != NULL)
918 + *pba_block_size = length;
919 +
920 + return IXGBE_SUCCESS;
921 +}
922 +
923 +/**
682 924 * ixgbe_get_mac_addr_generic - Generic get MAC address
683 925 * @hw: pointer to hardware structure
684 926 * @mac_addr: Adapter MAC address
685 927 *
686 928 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
687 929 * A reset of the adapter must be performed prior to calling this function
688 930 * in order for the MAC address to have been loaded from the EEPROM into RAR0
689 931 **/
690 932 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
691 933 {
692 934 u32 rar_high;
693 935 u32 rar_low;
694 936 u16 i;
695 937
696 938 DEBUGFUNC("ixgbe_get_mac_addr_generic");
697 939
698 940 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
699 941 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
700 942
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
701 943 for (i = 0; i < 4; i++)
702 944 mac_addr[i] = (u8)(rar_low >> (i*8));
703 945
704 946 for (i = 0; i < 2; i++)
705 947 mac_addr[i+4] = (u8)(rar_high >> (i*8));
706 948
707 949 return IXGBE_SUCCESS;
708 950 }
709 951
710 952 /**
711 - * ixgbe_get_bus_info_generic - Generic set PCI bus info
953 + * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
712 954 * @hw: pointer to hardware structure
955 + * @link_status: the link status returned by the PCI config space
713 956 *
714 - * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
957 + * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
715 958 **/
716 -s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
959 +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
717 960 {
718 961 struct ixgbe_mac_info *mac = &hw->mac;
719 - u16 link_status;
720 962
721 - DEBUGFUNC("ixgbe_get_bus_info_generic");
963 + if (hw->bus.type == ixgbe_bus_type_unknown)
964 + hw->bus.type = ixgbe_bus_type_pci_express;
722 965
723 - hw->bus.type = ixgbe_bus_type_pci_express;
724 -
725 - /* Get the negotiated link width and speed from PCI config space */
726 - link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
727 -
728 966 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
729 967 case IXGBE_PCI_LINK_WIDTH_1:
730 968 hw->bus.width = ixgbe_bus_width_pcie_x1;
731 969 break;
732 970 case IXGBE_PCI_LINK_WIDTH_2:
733 971 hw->bus.width = ixgbe_bus_width_pcie_x2;
734 972 break;
735 973 case IXGBE_PCI_LINK_WIDTH_4:
736 974 hw->bus.width = ixgbe_bus_width_pcie_x4;
737 975 break;
738 976 case IXGBE_PCI_LINK_WIDTH_8:
739 977 hw->bus.width = ixgbe_bus_width_pcie_x8;
740 978 break;
741 979 default:
742 980 hw->bus.width = ixgbe_bus_width_unknown;
743 981 break;
744 982 }
745 983
746 984 switch (link_status & IXGBE_PCI_LINK_SPEED) {
747 985 case IXGBE_PCI_LINK_SPEED_2500:
748 986 hw->bus.speed = ixgbe_bus_speed_2500;
749 987 break;
750 988 case IXGBE_PCI_LINK_SPEED_5000:
751 989 hw->bus.speed = ixgbe_bus_speed_5000;
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
752 990 break;
753 991 case IXGBE_PCI_LINK_SPEED_8000:
754 992 hw->bus.speed = ixgbe_bus_speed_8000;
755 993 break;
756 994 default:
757 995 hw->bus.speed = ixgbe_bus_speed_unknown;
758 996 break;
759 997 }
760 998
761 999 mac->ops.set_lan_id(hw);
1000 +}
762 1001
1002 +/**
1003 + * ixgbe_get_bus_info_generic - Generic set PCI bus info
1004 + * @hw: pointer to hardware structure
1005 + *
1006 + * Gets the PCI bus info (speed, width, type) then calls helper function to
1007 + * store this data within the ixgbe_hw structure.
1008 + **/
1009 +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1010 +{
1011 + u16 link_status;
1012 +
1013 + DEBUGFUNC("ixgbe_get_bus_info_generic");
1014 +
1015 + /* Get the negotiated link width and speed from PCI config space */
1016 + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1017 +
1018 + ixgbe_set_pci_config_data_generic(hw, link_status);
1019 +
763 1020 return IXGBE_SUCCESS;
764 1021 }
765 1022
766 1023 /**
767 1024 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
768 1025 * @hw: pointer to the HW structure
769 1026 *
770 1027 * Determines the LAN function id by reading memory-mapped registers
771 1028 * and swaps the port value if requested.
772 1029 **/
773 1030 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
774 1031 {
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
775 1032 struct ixgbe_bus_info *bus = &hw->bus;
776 1033 u32 reg;
777 1034
778 1035 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
779 1036
780 1037 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
781 1038 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
782 1039 bus->lan_id = bus->func;
783 1040
784 1041 /* check for a port swap */
785 - reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
1042 + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
786 1043 if (reg & IXGBE_FACTPS_LFS)
787 1044 bus->func ^= 0x1;
788 1045 }
789 1046
790 1047 /**
791 1048 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
792 1049 * @hw: pointer to hardware structure
793 1050 *
794 1051 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
795 1052 * disables transmit and receive units. The adapter_stopped flag is used by
796 1053 * the shared code and drivers to determine if the adapter is in a stopped
797 1054 * state and should not touch the hardware.
798 1055 **/
799 1056 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
800 1057 {
801 1058 u32 reg_val;
802 1059 u16 i;
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
803 1060
804 1061 DEBUGFUNC("ixgbe_stop_adapter_generic");
805 1062
806 1063 /*
807 1064 * Set the adapter_stopped flag so other driver functions stop touching
808 1065 * the hardware
809 1066 */
810 1067 hw->adapter_stopped = TRUE;
811 1068
812 1069 /* Disable the receive unit */
813 - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1070 + ixgbe_disable_rx(hw);
814 1071
815 1072 /* Clear interrupt mask to stop interrupts from being generated */
816 1073 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
817 1074
818 1075 /* Clear any pending interrupts, flush previous writes */
819 - (void) IXGBE_READ_REG(hw, IXGBE_EICR);
1076 + IXGBE_READ_REG(hw, IXGBE_EICR);
820 1077
821 1078 /* Disable the transmit unit. Each queue must be disabled. */
822 1079 for (i = 0; i < hw->mac.max_tx_queues; i++)
823 1080 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
824 1081
825 1082 /* Disable the receive unit by stopping each queue */
826 1083 for (i = 0; i < hw->mac.max_rx_queues; i++) {
827 1084 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
828 1085 reg_val &= ~IXGBE_RXDCTL_ENABLE;
829 1086 reg_val |= IXGBE_RXDCTL_SWFLSH;
830 1087 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
831 1088 }
832 1089
833 1090 /* flush all queues disables */
834 1091 IXGBE_WRITE_FLUSH(hw);
835 1092 msec_delay(2);
836 1093
837 1094 /*
838 - * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1095 + * Prevent the PCI-E bus from hanging by disabling PCI-E master
839 1096 * access and verify no pending requests
840 1097 */
841 1098 return ixgbe_disable_pcie_master(hw);
842 1099 }
843 1100
844 1101 /**
845 1102 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
846 1103 * @hw: pointer to hardware structure
847 1104 * @index: led number to turn on
848 1105 **/
849 1106 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
850 1107 {
851 1108 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
852 1109
853 1110 DEBUGFUNC("ixgbe_led_on_generic");
854 1111
855 1112 /* To turn on the LED, set mode to ON. */
856 1113 led_reg &= ~IXGBE_LED_MODE_MASK(index);
857 1114 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
858 1115 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
859 1116 IXGBE_WRITE_FLUSH(hw);
860 1117
861 1118 return IXGBE_SUCCESS;
862 1119 }
863 1120
864 1121 /**
865 1122 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
866 1123 * @hw: pointer to hardware structure
867 1124 * @index: led number to turn off
868 1125 **/
869 1126 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
870 1127 {
871 1128 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
872 1129
873 1130 DEBUGFUNC("ixgbe_led_off_generic");
874 1131
875 1132 /* To turn off the LED, set mode to OFF. */
876 1133 led_reg &= ~IXGBE_LED_MODE_MASK(index);
877 1134 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
878 1135 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
879 1136 IXGBE_WRITE_FLUSH(hw);
880 1137
881 1138 return IXGBE_SUCCESS;
882 1139 }
883 1140
884 1141 /**
885 1142 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
886 1143 * @hw: pointer to hardware structure
887 1144 *
888 1145 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
889 1146 * ixgbe_hw struct in order to set up EEPROM access.
890 1147 **/
891 1148 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
892 1149 {
893 1150 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
894 1151 u32 eec;
895 1152 u16 eeprom_size;
896 1153
897 1154 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
898 1155
899 1156 if (eeprom->type == ixgbe_eeprom_uninitialized) {
900 1157 eeprom->type = ixgbe_eeprom_none;
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
901 1158 /* Set default semaphore delay to 10ms which is a well
902 1159 * tested value */
903 1160 eeprom->semaphore_delay = 10;
904 1161 /* Clear EEPROM page size, it will be initialized as needed */
905 1162 eeprom->word_page_size = 0;
906 1163
907 1164 /*
908 1165 * Check for EEPROM present first.
909 1166 * If not present leave as none
910 1167 */
911 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1168 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
912 1169 if (eec & IXGBE_EEC_PRES) {
913 1170 eeprom->type = ixgbe_eeprom_spi;
914 1171
915 1172 /*
916 1173 * SPI EEPROM is assumed here. This code would need to
917 1174 * change if a future EEPROM is not SPI.
918 1175 */
919 1176 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
920 1177 IXGBE_EEC_SIZE_SHIFT);
921 1178 eeprom->word_size = 1 << (eeprom_size +
922 1179 IXGBE_EEPROM_WORD_SIZE_SHIFT);
923 1180 }
924 1181
925 1182 if (eec & IXGBE_EEC_ADDR_SIZE)
926 1183 eeprom->address_bits = 16;
927 1184 else
928 1185 eeprom->address_bits = 8;
929 1186 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
930 1187 "%d\n", eeprom->type, eeprom->word_size,
931 1188 eeprom->address_bits);
932 1189 }
933 1190
934 1191 return IXGBE_SUCCESS;
935 1192 }
936 1193
937 1194 /**
938 1195 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
939 1196 * @hw: pointer to hardware structure
940 1197 * @offset: offset within the EEPROM to write
941 1198 * @words: number of word(s)
942 1199 * @data: 16 bit word(s) to write to EEPROM
943 1200 *
944 1201 * Reads 16 bit word(s) from EEPROM through bit-bang method
945 1202 **/
946 1203 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
947 1204 u16 words, u16 *data)
948 1205 {
949 1206 s32 status = IXGBE_SUCCESS;
950 1207 u16 i, count;
951 1208
952 1209 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
953 1210
954 1211 hw->eeprom.ops.init_params(hw);
955 1212
956 1213 if (words == 0) {
957 1214 status = IXGBE_ERR_INVALID_ARGUMENT;
958 1215 goto out;
959 1216 }
960 1217
961 1218 if (offset + words > hw->eeprom.word_size) {
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
962 1219 status = IXGBE_ERR_EEPROM;
963 1220 goto out;
964 1221 }
965 1222
966 1223 /*
967 1224 * The EEPROM page size cannot be queried from the chip. We do lazy
968 1225 * initialization. It is worth to do that when we write large buffer.
969 1226 */
970 1227 if ((hw->eeprom.word_page_size == 0) &&
971 1228 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
972 - status = ixgbe_detect_eeprom_page_size_generic(hw, offset);
973 - if (status != IXGBE_SUCCESS)
974 - goto out;
1229 + ixgbe_detect_eeprom_page_size_generic(hw, offset);
975 1230
976 1231 /*
977 1232 * We cannot hold synchronization semaphores for too long
978 1233 * to avoid other entity starvation. However it is more efficient
979 1234 * to read in bursts than synchronizing access for each word.
980 1235 */
981 1236 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
982 1237 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
983 1238 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
984 1239 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
985 1240 count, &data[i]);
986 1241
987 1242 if (status != IXGBE_SUCCESS)
988 1243 break;
989 1244 }
990 1245
991 1246 out:
992 1247 return status;
993 1248 }
994 1249
995 1250 /**
996 1251 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
997 1252 * @hw: pointer to hardware structure
998 1253 * @offset: offset within the EEPROM to be written to
999 1254 * @words: number of word(s)
1000 1255 * @data: 16 bit word(s) to be written to the EEPROM
1001 1256 *
1002 1257 * If ixgbe_eeprom_update_checksum is not called after this function, the
1003 1258 * EEPROM will most likely contain an invalid checksum.
1004 1259 **/
1005 1260 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1006 1261 u16 words, u16 *data)
1007 1262 {
1008 1263 s32 status;
1009 1264 u16 word;
1010 1265 u16 page_size;
1011 1266 u16 i;
1012 1267 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1013 1268
1014 1269 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1015 1270
1016 1271 /* Prepare the EEPROM for writing */
1017 1272 status = ixgbe_acquire_eeprom(hw);
1018 1273
1019 1274 if (status == IXGBE_SUCCESS) {
1020 1275 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1021 1276 ixgbe_release_eeprom(hw);
1022 1277 status = IXGBE_ERR_EEPROM;
1023 1278 }
1024 1279 }
1025 1280
1026 1281 if (status == IXGBE_SUCCESS) {
1027 1282 for (i = 0; i < words; i++) {
1028 1283 ixgbe_standby_eeprom(hw);
1029 1284
1030 1285 /* Send the WRITE ENABLE command (8 bit opcode ) */
1031 1286 ixgbe_shift_out_eeprom_bits(hw,
1032 1287 IXGBE_EEPROM_WREN_OPCODE_SPI,
1033 1288 IXGBE_EEPROM_OPCODE_BITS);
1034 1289
1035 1290 ixgbe_standby_eeprom(hw);
1036 1291
1037 1292 /*
1038 1293 * Some SPI eeproms use the 8th address bit embedded
1039 1294 * in the opcode
1040 1295 */
1041 1296 if ((hw->eeprom.address_bits == 8) &&
1042 1297 ((offset + i) >= 128))
1043 1298 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1044 1299
1045 1300 /* Send the Write command (8-bit opcode + addr) */
1046 1301 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1047 1302 IXGBE_EEPROM_OPCODE_BITS);
1048 1303 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1049 1304 hw->eeprom.address_bits);
1050 1305
1051 1306 page_size = hw->eeprom.word_page_size;
1052 1307
1053 1308 /* Send the data in burst via SPI*/
1054 1309 do {
1055 1310 word = data[i];
1056 1311 word = (word >> 8) | (word << 8);
1057 1312 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1058 1313
1059 1314 if (page_size == 0)
1060 1315 break;
1061 1316
1062 1317 /* do not wrap around page */
1063 1318 if (((offset + i) & (page_size - 1)) ==
1064 1319 (page_size - 1))
1065 1320 break;
1066 1321 } while (++i < words);
1067 1322
1068 1323 ixgbe_standby_eeprom(hw);
1069 1324 msec_delay(10);
1070 1325 }
1071 1326 /* Done with writing - release the EEPROM */
1072 1327 ixgbe_release_eeprom(hw);
1073 1328 }
1074 1329
1075 1330 return status;
1076 1331 }
1077 1332
1078 1333 /**
1079 1334 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1080 1335 * @hw: pointer to hardware structure
1081 1336 * @offset: offset within the EEPROM to be written to
1082 1337 * @data: 16 bit word to be written to the EEPROM
1083 1338 *
1084 1339 * If ixgbe_eeprom_update_checksum is not called after this function, the
1085 1340 * EEPROM will most likely contain an invalid checksum.
1086 1341 **/
1087 1342 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1088 1343 {
1089 1344 s32 status;
1090 1345
1091 1346 DEBUGFUNC("ixgbe_write_eeprom_generic");
1092 1347
1093 1348 hw->eeprom.ops.init_params(hw);
1094 1349
1095 1350 if (offset >= hw->eeprom.word_size) {
1096 1351 status = IXGBE_ERR_EEPROM;
1097 1352 goto out;
1098 1353 }
1099 1354
1100 1355 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1101 1356
1102 1357 out:
1103 1358 return status;
1104 1359 }
1105 1360
1106 1361 /**
1107 1362 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1108 1363 * @hw: pointer to hardware structure
1109 1364 * @offset: offset within the EEPROM to be read
1110 1365 * @data: read 16 bit words(s) from EEPROM
1111 1366 * @words: number of word(s)
1112 1367 *
1113 1368 * Reads 16 bit word(s) from EEPROM through bit-bang method
1114 1369 **/
1115 1370 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1116 1371 u16 words, u16 *data)
1117 1372 {
1118 1373 s32 status = IXGBE_SUCCESS;
1119 1374 u16 i, count;
1120 1375
1121 1376 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1122 1377
1123 1378 hw->eeprom.ops.init_params(hw);
1124 1379
1125 1380 if (words == 0) {
1126 1381 status = IXGBE_ERR_INVALID_ARGUMENT;
1127 1382 goto out;
1128 1383 }
1129 1384
1130 1385 if (offset + words > hw->eeprom.word_size) {
1131 1386 status = IXGBE_ERR_EEPROM;
1132 1387 goto out;
1133 1388 }
1134 1389
1135 1390 /*
1136 1391 * We cannot hold synchronization semaphores for too long
1137 1392 * to avoid other entity starvation. However it is more efficient
1138 1393 * to read in bursts than synchronizing access for each word.
1139 1394 */
1140 1395 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1141 1396 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1142 1397 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1143 1398
1144 1399 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1145 1400 count, &data[i]);
1146 1401
1147 1402 if (status != IXGBE_SUCCESS)
1148 1403 break;
1149 1404 }
1150 1405
1151 1406 out:
1152 1407 return status;
1153 1408 }
1154 1409
1155 1410 /**
1156 1411 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1157 1412 * @hw: pointer to hardware structure
1158 1413 * @offset: offset within the EEPROM to be read
1159 1414 * @words: number of word(s)
1160 1415 * @data: read 16 bit word(s) from EEPROM
1161 1416 *
1162 1417 * Reads 16 bit word(s) from EEPROM through bit-bang method
1163 1418 **/
1164 1419 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1165 1420 u16 words, u16 *data)
1166 1421 {
1167 1422 s32 status;
1168 1423 u16 word_in;
1169 1424 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1170 1425 u16 i;
1171 1426
1172 1427 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1173 1428
1174 1429 /* Prepare the EEPROM for reading */
1175 1430 status = ixgbe_acquire_eeprom(hw);
1176 1431
1177 1432 if (status == IXGBE_SUCCESS) {
1178 1433 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1179 1434 ixgbe_release_eeprom(hw);
1180 1435 status = IXGBE_ERR_EEPROM;
1181 1436 }
1182 1437 }
1183 1438
1184 1439 if (status == IXGBE_SUCCESS) {
1185 1440 for (i = 0; i < words; i++) {
1186 1441 ixgbe_standby_eeprom(hw);
1187 1442 /*
1188 1443 * Some SPI eeproms use the 8th address bit embedded
1189 1444 * in the opcode
1190 1445 */
1191 1446 if ((hw->eeprom.address_bits == 8) &&
1192 1447 ((offset + i) >= 128))
1193 1448 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1194 1449
1195 1450 /* Send the READ command (opcode + addr) */
1196 1451 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1197 1452 IXGBE_EEPROM_OPCODE_BITS);
1198 1453 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1199 1454 hw->eeprom.address_bits);
1200 1455
1201 1456 /* Read the data. */
1202 1457 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1203 1458 data[i] = (word_in >> 8) | (word_in << 8);
1204 1459 }
1205 1460
1206 1461 /* End this read operation */
1207 1462 ixgbe_release_eeprom(hw);
1208 1463 }
1209 1464
1210 1465 return status;
1211 1466 }
1212 1467
1213 1468 /**
1214 1469 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1215 1470 * @hw: pointer to hardware structure
1216 1471 * @offset: offset within the EEPROM to be read
1217 1472 * @data: read 16 bit value from EEPROM
1218 1473 *
1219 1474 * Reads 16 bit value from EEPROM through bit-bang method
1220 1475 **/
1221 1476 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1222 1477 u16 *data)
1223 1478 {
1224 1479 s32 status;
1225 1480
1226 1481 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1227 1482
1228 1483 hw->eeprom.ops.init_params(hw);
1229 1484
1230 1485 if (offset >= hw->eeprom.word_size) {
1231 1486 status = IXGBE_ERR_EEPROM;
1232 1487 goto out;
1233 1488 }
1234 1489
1235 1490 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1236 1491
1237 1492 out:
1238 1493 return status;
1239 1494 }
1240 1495
1241 1496 /**
1242 1497 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1243 1498 * @hw: pointer to hardware structure
1244 1499 * @offset: offset of word in the EEPROM to read
1245 1500 * @words: number of word(s)
1246 1501 * @data: 16 bit word(s) from the EEPROM
1247 1502 *
1248 1503 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1249 1504 **/
1250 1505 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1251 1506 u16 words, u16 *data)
1252 1507 {
↓ open down ↓ |
268 lines elided |
↑ open up ↑ |
1253 1508 u32 eerd;
1254 1509 s32 status = IXGBE_SUCCESS;
1255 1510 u32 i;
1256 1511
1257 1512 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1258 1513
1259 1514 hw->eeprom.ops.init_params(hw);
1260 1515
1261 1516 if (words == 0) {
1262 1517 status = IXGBE_ERR_INVALID_ARGUMENT;
1518 + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1263 1519 goto out;
1264 1520 }
1265 1521
1266 1522 if (offset >= hw->eeprom.word_size) {
1267 1523 status = IXGBE_ERR_EEPROM;
1524 + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1268 1525 goto out;
1269 1526 }
1270 1527
1271 1528 for (i = 0; i < words; i++) {
1272 - eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1529 + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1273 1530 IXGBE_EEPROM_RW_REG_START;
1274 1531
1275 1532 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1276 1533 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1277 1534
1278 1535 if (status == IXGBE_SUCCESS) {
1279 1536 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1280 1537 IXGBE_EEPROM_RW_REG_DATA);
1281 1538 } else {
1282 1539 DEBUGOUT("Eeprom read timed out\n");
1283 1540 goto out;
1284 1541 }
1285 1542 }
1286 1543 out:
1287 1544 return status;
1288 1545 }
1289 1546
1290 1547 /**
1291 1548 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1292 1549 * @hw: pointer to hardware structure
1293 1550 * @offset: offset within the EEPROM to be used as a scratch pad
1294 1551 *
1295 1552 * Discover EEPROM page size by writing marching data at given offset.
1296 1553 * This function is called only when we are writing a new large buffer
1297 1554 * at given offset so the data would be overwritten anyway.
1298 1555 **/
1299 1556 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1300 1557 u16 offset)
1301 1558 {
1302 1559 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1303 1560 s32 status = IXGBE_SUCCESS;
1304 1561 u16 i;
1305 1562
1306 1563 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1307 1564
1308 1565 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1309 1566 data[i] = i;
1310 1567
1311 1568 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1312 1569 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1313 1570 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1314 1571 hw->eeprom.word_page_size = 0;
1315 1572 if (status != IXGBE_SUCCESS)
1316 1573 goto out;
1317 1574
1318 1575 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1319 1576 if (status != IXGBE_SUCCESS)
1320 1577 goto out;
1321 1578
1322 1579 /*
1323 1580 * When writing in burst more than the actual page size
1324 1581 * EEPROM address wraps around current page.
1325 1582 */
1326 1583 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1327 1584
1328 1585 DEBUGOUT1("Detected EEPROM page size = %d words.",
1329 1586 hw->eeprom.word_page_size);
1330 1587 out:
1331 1588 return status;
1332 1589 }
1333 1590
1334 1591 /**
1335 1592 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1336 1593 * @hw: pointer to hardware structure
1337 1594 * @offset: offset of word in the EEPROM to read
1338 1595 * @data: word read from the EEPROM
1339 1596 *
1340 1597 * Reads a 16 bit word from the EEPROM using the EERD register.
1341 1598 **/
1342 1599 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1343 1600 {
1344 1601 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1345 1602 }
1346 1603
1347 1604 /**
1348 1605 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1349 1606 * @hw: pointer to hardware structure
1350 1607 * @offset: offset of word in the EEPROM to write
1351 1608 * @words: number of word(s)
1352 1609 * @data: word(s) write to the EEPROM
1353 1610 *
1354 1611 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1355 1612 **/
1356 1613 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1357 1614 u16 words, u16 *data)
1358 1615 {
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
1359 1616 u32 eewr;
1360 1617 s32 status = IXGBE_SUCCESS;
1361 1618 u16 i;
1362 1619
1363 1620 DEBUGFUNC("ixgbe_write_eewr_generic");
1364 1621
1365 1622 hw->eeprom.ops.init_params(hw);
1366 1623
1367 1624 if (words == 0) {
1368 1625 status = IXGBE_ERR_INVALID_ARGUMENT;
1626 + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1369 1627 goto out;
1370 1628 }
1371 1629
1372 1630 if (offset >= hw->eeprom.word_size) {
1373 1631 status = IXGBE_ERR_EEPROM;
1632 + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1374 1633 goto out;
1375 1634 }
1376 1635
1377 1636 for (i = 0; i < words; i++) {
1378 1637 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1379 1638 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1380 1639 IXGBE_EEPROM_RW_REG_START;
1381 1640
1382 1641 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1383 1642 if (status != IXGBE_SUCCESS) {
1384 1643 DEBUGOUT("Eeprom write EEWR timed out\n");
1385 1644 goto out;
1386 1645 }
1387 1646
1388 1647 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1389 1648
1390 1649 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1391 1650 if (status != IXGBE_SUCCESS) {
1392 1651 DEBUGOUT("Eeprom write EEWR timed out\n");
1393 1652 goto out;
1394 1653 }
1395 1654 }
1396 1655
1397 1656 out:
1398 1657 return status;
1399 1658 }
1400 1659
1401 1660 /**
1402 1661 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1403 1662 * @hw: pointer to hardware structure
1404 1663 * @offset: offset of word in the EEPROM to write
1405 1664 * @data: word write to the EEPROM
1406 1665 *
1407 1666 * Write a 16 bit word to the EEPROM using the EEWR register.
1408 1667 **/
1409 1668 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1410 1669 {
1411 1670 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1412 1671 }
1413 1672
1414 1673 /**
1415 1674 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1416 1675 * @hw: pointer to hardware structure
1417 1676 * @ee_reg: EEPROM flag for polling
1418 1677 *
1419 1678 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1420 1679 * read or write is done respectively.
1421 1680 **/
1422 1681 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1423 1682 {
1424 1683 u32 i;
1425 1684 u32 reg;
1426 1685 s32 status = IXGBE_ERR_EEPROM;
1427 1686
1428 1687 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1429 1688
1430 1689 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1431 1690 if (ee_reg == IXGBE_NVM_POLL_READ)
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
1432 1691 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1433 1692 else
1434 1693 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1435 1694
1436 1695 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1437 1696 status = IXGBE_SUCCESS;
1438 1697 break;
1439 1698 }
1440 1699 usec_delay(5);
1441 1700 }
1701 +
1702 + if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1703 + ERROR_REPORT1(IXGBE_ERROR_POLLING,
1704 + "EEPROM read/write done polling timed out");
1705 +
1442 1706 return status;
1443 1707 }
1444 1708
1445 1709 /**
1446 1710 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1447 1711 * @hw: pointer to hardware structure
1448 1712 *
1449 1713 * Prepares EEPROM for access using bit-bang method. This function should
1450 1714 * be called before issuing a command to the EEPROM.
1451 1715 **/
1452 1716 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1453 1717 {
1454 1718 s32 status = IXGBE_SUCCESS;
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1455 1719 u32 eec;
1456 1720 u32 i;
1457 1721
1458 1722 DEBUGFUNC("ixgbe_acquire_eeprom");
1459 1723
1460 1724 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1461 1725 != IXGBE_SUCCESS)
1462 1726 status = IXGBE_ERR_SWFW_SYNC;
1463 1727
1464 1728 if (status == IXGBE_SUCCESS) {
1465 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1729 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1466 1730
1467 1731 /* Request EEPROM Access */
1468 1732 eec |= IXGBE_EEC_REQ;
1469 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1733 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1470 1734
1471 1735 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1472 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1736 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1473 1737 if (eec & IXGBE_EEC_GNT)
1474 1738 break;
1475 1739 usec_delay(5);
1476 1740 }
1477 1741
1478 1742 /* Release if grant not acquired */
1479 1743 if (!(eec & IXGBE_EEC_GNT)) {
1480 1744 eec &= ~IXGBE_EEC_REQ;
1481 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1745 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1482 1746 DEBUGOUT("Could not acquire EEPROM grant\n");
1483 1747
1484 1748 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1485 1749 status = IXGBE_ERR_EEPROM;
1486 1750 }
1487 1751
1488 1752 /* Setup EEPROM for Read/Write */
1489 1753 if (status == IXGBE_SUCCESS) {
1490 1754 /* Clear CS and SK */
1491 1755 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1492 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1756 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1493 1757 IXGBE_WRITE_FLUSH(hw);
1494 1758 usec_delay(1);
1495 1759 }
1496 1760 }
1497 1761 return status;
1498 1762 }
1499 1763
1500 1764 /**
1501 1765 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1502 1766 * @hw: pointer to hardware structure
1503 1767 *
1504 1768 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1505 1769 **/
1506 1770 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1507 1771 {
1508 1772 s32 status = IXGBE_ERR_EEPROM;
1509 1773 u32 timeout = 2000;
1510 1774 u32 i;
1511 1775 u32 swsm;
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
1512 1776
1513 1777 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1514 1778
1515 1779
1516 1780 /* Get SMBI software semaphore between device drivers first */
1517 1781 for (i = 0; i < timeout; i++) {
1518 1782 /*
1519 1783 * If the SMBI bit is 0 when we read it, then the bit will be
1520 1784 * set and we have the semaphore
1521 1785 */
1522 - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1786 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1523 1787 if (!(swsm & IXGBE_SWSM_SMBI)) {
1524 1788 status = IXGBE_SUCCESS;
1525 1789 break;
1526 1790 }
1527 1791 usec_delay(50);
1528 1792 }
1529 1793
1530 1794 if (i == timeout) {
1531 1795 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1532 1796 "not granted.\n");
1533 1797 /*
1534 1798 * this release is particularly important because our attempts
1535 1799 * above to get the semaphore may have succeeded, and if there
1536 1800 * was a timeout, we should unconditionally clear the semaphore
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1537 1801 * bits to free the driver to make progress
1538 1802 */
1539 1803 ixgbe_release_eeprom_semaphore(hw);
1540 1804
1541 1805 usec_delay(50);
1542 1806 /*
1543 1807 * one last try
1544 1808 * If the SMBI bit is 0 when we read it, then the bit will be
1545 1809 * set and we have the semaphore
1546 1810 */
1547 - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1811 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1548 1812 if (!(swsm & IXGBE_SWSM_SMBI))
1549 1813 status = IXGBE_SUCCESS;
1550 1814 }
1551 1815
1552 1816 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1553 1817 if (status == IXGBE_SUCCESS) {
1554 1818 for (i = 0; i < timeout; i++) {
1555 - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1819 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1556 1820
1557 1821 /* Set the SW EEPROM semaphore bit to request access */
1558 1822 swsm |= IXGBE_SWSM_SWESMBI;
1559 - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1823 + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1560 1824
1561 1825 /*
1562 1826 * If we set the bit successfully then we got the
1563 1827 * semaphore.
1564 1828 */
1565 - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1829 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1566 1830 if (swsm & IXGBE_SWSM_SWESMBI)
1567 1831 break;
1568 1832
1569 1833 usec_delay(50);
1570 1834 }
1571 1835
1572 1836 /*
1573 1837 * Release semaphores and return error if SW EEPROM semaphore
1574 1838 * was not granted because we don't have access to the EEPROM
1575 1839 */
1576 1840 if (i >= timeout) {
1577 - DEBUGOUT("SWESMBI Software EEPROM semaphore "
1578 - "not granted.\n");
1841 + ERROR_REPORT1(IXGBE_ERROR_POLLING,
1842 + "SWESMBI Software EEPROM semaphore not granted.\n");
1579 1843 ixgbe_release_eeprom_semaphore(hw);
1580 1844 status = IXGBE_ERR_EEPROM;
1581 1845 }
1582 1846 } else {
1583 - DEBUGOUT("Software semaphore SMBI between device drivers "
1584 - "not granted.\n");
1847 + ERROR_REPORT1(IXGBE_ERROR_POLLING,
1848 + "Software semaphore SMBI between device drivers "
1849 + "not granted.\n");
1585 1850 }
1586 1851
1587 1852 return status;
1588 1853 }
1589 1854
1590 1855 /**
1591 1856 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1592 1857 * @hw: pointer to hardware structure
1593 1858 *
1594 1859 * This function clears hardware semaphore bits.
1595 1860 **/
1596 1861 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1597 1862 {
1598 1863 u32 swsm;
1599 1864
1600 1865 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1601 1866
1602 1867 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1603 1868
1604 1869 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1605 1870 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1606 1871 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1607 1872 IXGBE_WRITE_FLUSH(hw);
1608 1873 }
1609 1874
1610 1875 /**
1611 1876 * ixgbe_ready_eeprom - Polls for EEPROM ready
1612 1877 * @hw: pointer to hardware structure
1613 1878 **/
1614 1879 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1615 1880 {
1616 1881 s32 status = IXGBE_SUCCESS;
1617 1882 u16 i;
1618 1883 u8 spi_stat_reg;
1619 1884
1620 1885 DEBUGFUNC("ixgbe_ready_eeprom");
1621 1886
1622 1887 /*
1623 1888 * Read "Status Register" repeatedly until the LSB is cleared. The
1624 1889 * EEPROM will signal that the command has been completed by clearing
1625 1890 * bit 0 of the internal status register. If it's not cleared within
1626 1891 * 5 milliseconds, then error out.
1627 1892 */
1628 1893 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1629 1894 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1630 1895 IXGBE_EEPROM_OPCODE_BITS);
1631 1896 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1632 1897 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1633 1898 break;
1634 1899
1635 1900 usec_delay(5);
1636 1901 ixgbe_standby_eeprom(hw);
1637 1902 };
1638 1903
1639 1904 /*
1640 1905 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1641 1906 * devices (and only 0-5mSec on 5V devices)
1642 1907 */
1643 1908 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1644 1909 DEBUGOUT("SPI EEPROM Status error\n");
1645 1910 status = IXGBE_ERR_EEPROM;
1646 1911 }
1647 1912
1648 1913 return status;
1649 1914 }
1650 1915
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
1651 1916 /**
1652 1917 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1653 1918 * @hw: pointer to hardware structure
1654 1919 **/
1655 1920 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1656 1921 {
1657 1922 u32 eec;
1658 1923
1659 1924 DEBUGFUNC("ixgbe_standby_eeprom");
1660 1925
1661 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1926 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1662 1927
1663 1928 /* Toggle CS to flush commands */
1664 1929 eec |= IXGBE_EEC_CS;
1665 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1930 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1666 1931 IXGBE_WRITE_FLUSH(hw);
1667 1932 usec_delay(1);
1668 1933 eec &= ~IXGBE_EEC_CS;
1669 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1934 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1670 1935 IXGBE_WRITE_FLUSH(hw);
1671 1936 usec_delay(1);
1672 1937 }
1673 1938
1674 1939 /**
1675 1940 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1676 1941 * @hw: pointer to hardware structure
1677 1942 * @data: data to send to the EEPROM
1678 1943 * @count: number of bits to shift out
1679 1944 **/
1680 1945 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1681 1946 u16 count)
1682 1947 {
1683 1948 u32 eec;
1684 1949 u32 mask;
1685 1950 u32 i;
1686 1951
1687 1952 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1688 1953
1689 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1954 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1690 1955
1691 1956 /*
1692 1957 * Mask is used to shift "count" bits of "data" out to the EEPROM
1693 1958 * one bit at a time. Determine the starting bit based on count
1694 1959 */
1695 1960 mask = 0x01 << (count - 1);
1696 1961
1697 1962 for (i = 0; i < count; i++) {
1698 1963 /*
1699 1964 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1700 1965 * "1", and then raising and then lowering the clock (the SK
1701 1966 * bit controls the clock input to the EEPROM). A "0" is
1702 1967 * shifted out to the EEPROM by setting "DI" to "0" and then
1703 1968 * raising and then lowering the clock.
1704 1969 */
1705 1970 if (data & mask)
1706 1971 eec |= IXGBE_EEC_DI;
1707 1972 else
1708 1973 eec &= ~IXGBE_EEC_DI;
1709 1974
1710 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1975 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1711 1976 IXGBE_WRITE_FLUSH(hw);
1712 1977
1713 1978 usec_delay(1);
1714 1979
1715 1980 ixgbe_raise_eeprom_clk(hw, &eec);
1716 1981 ixgbe_lower_eeprom_clk(hw, &eec);
1717 1982
1718 1983 /*
1719 1984 * Shift mask to signify next bit of data to shift in to the
1720 1985 * EEPROM
1721 1986 */
1722 1987 mask = mask >> 1;
1723 1988 };
1724 1989
1725 1990 /* We leave the "DI" bit set to "0" when we leave this routine. */
1726 1991 eec &= ~IXGBE_EEC_DI;
1727 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1992 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1728 1993 IXGBE_WRITE_FLUSH(hw);
1729 1994 }
1730 1995
1731 1996 /**
1732 1997 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1733 1998 * @hw: pointer to hardware structure
1734 1999 **/
1735 2000 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1736 2001 {
1737 2002 u32 eec;
1738 2003 u32 i;
1739 2004 u16 data = 0;
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1740 2005
1741 2006 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1742 2007
1743 2008 /*
1744 2009 * In order to read a register from the EEPROM, we need to shift
1745 2010 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1746 2011 * the clock input to the EEPROM (setting the SK bit), and then reading
1747 2012 * the value of the "DO" bit. During this "shifting in" process the
1748 2013 * "DI" bit should always be clear.
1749 2014 */
1750 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2015 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1751 2016
1752 2017 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1753 2018
1754 2019 for (i = 0; i < count; i++) {
1755 2020 data = data << 1;
1756 2021 ixgbe_raise_eeprom_clk(hw, &eec);
1757 2022
1758 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2023 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1759 2024
1760 2025 eec &= ~(IXGBE_EEC_DI);
1761 2026 if (eec & IXGBE_EEC_DO)
1762 2027 data |= 1;
1763 2028
1764 2029 ixgbe_lower_eeprom_clk(hw, &eec);
1765 2030 }
1766 2031
1767 2032 return data;
1768 2033 }
1769 2034
1770 2035 /**
1771 2036 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1772 2037 * @hw: pointer to hardware structure
1773 2038 * @eec: EEC register's current value
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1774 2039 **/
1775 2040 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1776 2041 {
1777 2042 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1778 2043
1779 2044 /*
1780 2045 * Raise the clock input to the EEPROM
1781 2046 * (setting the SK bit), then delay
1782 2047 */
1783 2048 *eec = *eec | IXGBE_EEC_SK;
1784 - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2049 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
1785 2050 IXGBE_WRITE_FLUSH(hw);
1786 2051 usec_delay(1);
1787 2052 }
1788 2053
1789 2054 /**
1790 2055 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1791 2056 * @hw: pointer to hardware structure
1792 2057 * @eecd: EECD's current value
1793 2058 **/
1794 2059 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1795 2060 {
1796 2061 DEBUGFUNC("ixgbe_lower_eeprom_clk");
1797 2062
1798 2063 /*
1799 2064 * Lower the clock input to the EEPROM (clearing the SK bit), then
1800 2065 * delay
1801 2066 */
1802 2067 *eec = *eec & ~IXGBE_EEC_SK;
1803 - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2068 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
1804 2069 IXGBE_WRITE_FLUSH(hw);
1805 2070 usec_delay(1);
1806 2071 }
1807 2072
1808 2073 /**
1809 2074 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1810 2075 * @hw: pointer to hardware structure
1811 2076 **/
1812 2077 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1813 2078 {
1814 2079 u32 eec;
1815 2080
1816 2081 DEBUGFUNC("ixgbe_release_eeprom");
1817 2082
1818 - eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2083 + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1819 2084
1820 2085 eec |= IXGBE_EEC_CS; /* Pull CS high */
1821 2086 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1822 2087
1823 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2088 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1824 2089 IXGBE_WRITE_FLUSH(hw);
1825 2090
1826 2091 usec_delay(1);
1827 2092
1828 2093 /* Stop requesting EEPROM access */
1829 2094 eec &= ~IXGBE_EEC_REQ;
1830 - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2095 + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1831 2096
1832 2097 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1833 2098
1834 2099 /* Delay before attempt to obtain semaphore again to allow FW access */
1835 2100 msec_delay(hw->eeprom.semaphore_delay);
1836 2101 }
1837 2102
1838 2103 /**
1839 2104 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1840 2105 * @hw: pointer to hardware structure
2106 + *
2107 + * Returns a negative error code on error, or the 16-bit checksum
1841 2108 **/
1842 -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2109 +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1843 2110 {
1844 2111 u16 i;
1845 2112 u16 j;
1846 2113 u16 checksum = 0;
1847 2114 u16 length = 0;
1848 2115 u16 pointer = 0;
1849 2116 u16 word = 0;
1850 2117
1851 2118 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1852 2119
1853 2120 /* Include 0x0-0x3F in the checksum */
1854 2121 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1855 - if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2122 + if (hw->eeprom.ops.read(hw, i, &word)) {
1856 2123 DEBUGOUT("EEPROM read failed\n");
1857 - break;
2124 + return IXGBE_ERR_EEPROM;
1858 2125 }
1859 2126 checksum += word;
1860 2127 }
1861 2128
1862 2129 /* Include all data from pointers except for the fw pointer */
1863 2130 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1864 - hw->eeprom.ops.read(hw, i, &pointer);
2131 + if (hw->eeprom.ops.read(hw, i, &pointer)) {
2132 + DEBUGOUT("EEPROM read failed\n");
2133 + return IXGBE_ERR_EEPROM;
2134 + }
1865 2135
1866 - /* Make sure the pointer seems valid */
1867 - if (pointer != 0xFFFF && pointer != 0) {
1868 - hw->eeprom.ops.read(hw, pointer, &length);
2136 + /* If the pointer seems invalid */
2137 + if (pointer == 0xFFFF || pointer == 0)
2138 + continue;
1869 2139
1870 - if (length != 0xFFFF && length != 0) {
1871 - for (j = pointer+1; j <= pointer+length; j++) {
1872 - hw->eeprom.ops.read(hw, j, &word);
1873 - checksum += word;
1874 - }
2140 + if (hw->eeprom.ops.read(hw, pointer, &length)) {
2141 + DEBUGOUT("EEPROM read failed\n");
2142 + return IXGBE_ERR_EEPROM;
2143 + }
2144 +
2145 + if (length == 0xFFFF || length == 0)
2146 + continue;
2147 +
2148 + for (j = pointer + 1; j <= pointer + length; j++) {
2149 + if (hw->eeprom.ops.read(hw, j, &word)) {
2150 + DEBUGOUT("EEPROM read failed\n");
2151 + return IXGBE_ERR_EEPROM;
1875 2152 }
2153 + checksum += word;
1876 2154 }
1877 2155 }
1878 2156
1879 2157 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1880 2158
1881 - return checksum;
2159 + return (s32)checksum;
1882 2160 }
1883 2161
1884 2162 /**
1885 2163 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1886 2164 * @hw: pointer to hardware structure
1887 2165 * @checksum_val: calculated checksum
1888 2166 *
1889 2167 * Performs checksum calculation and validates the EEPROM checksum. If the
1890 2168 * caller does not need checksum_val, the value can be NULL.
1891 2169 **/
1892 2170 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1893 2171 u16 *checksum_val)
1894 2172 {
1895 2173 s32 status;
1896 2174 u16 checksum;
1897 2175 u16 read_checksum = 0;
1898 2176
1899 2177 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1900 2178
1901 - /*
1902 - * Read the first word from the EEPROM. If this times out or fails, do
2179 + /* Read the first word from the EEPROM. If this times out or fails, do
1903 2180 * not continue or we could be in for a very long wait while every
1904 2181 * EEPROM read fails
1905 2182 */
1906 2183 status = hw->eeprom.ops.read(hw, 0, &checksum);
2184 + if (status) {
2185 + DEBUGOUT("EEPROM read failed\n");
2186 + return status;
2187 + }
1907 2188
1908 - if (status == IXGBE_SUCCESS) {
1909 - checksum = hw->eeprom.ops.calc_checksum(hw);
2189 + status = hw->eeprom.ops.calc_checksum(hw);
2190 + if (status < 0)
2191 + return status;
1910 2192
1911 - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2193 + checksum = (u16)(status & 0xffff);
1912 2194
1913 - /*
1914 - * Verify read checksum from EEPROM is the same as
1915 - * calculated checksum
1916 - */
1917 - if (read_checksum != checksum)
1918 - status = IXGBE_ERR_EEPROM_CHECKSUM;
1919 -
1920 - /* If the user cares, return the calculated checksum */
1921 - if (checksum_val)
1922 - *checksum_val = checksum;
1923 - } else {
2195 + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2196 + if (status) {
1924 2197 DEBUGOUT("EEPROM read failed\n");
2198 + return status;
1925 2199 }
1926 2200
2201 + /* Verify read checksum from EEPROM is the same as
2202 + * calculated checksum
2203 + */
2204 + if (read_checksum != checksum)
2205 + status = IXGBE_ERR_EEPROM_CHECKSUM;
2206 +
2207 + /* If the user cares, return the calculated checksum */
2208 + if (checksum_val)
2209 + *checksum_val = checksum;
2210 +
1927 2211 return status;
1928 2212 }
1929 2213
1930 2214 /**
1931 2215 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1932 2216 * @hw: pointer to hardware structure
1933 2217 **/
1934 2218 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1935 2219 {
1936 2220 s32 status;
1937 2221 u16 checksum;
1938 2222
1939 2223 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1940 2224
1941 - /*
1942 - * Read the first word from the EEPROM. If this times out or fails, do
2225 + /* Read the first word from the EEPROM. If this times out or fails, do
1943 2226 * not continue or we could be in for a very long wait while every
1944 2227 * EEPROM read fails
1945 2228 */
1946 2229 status = hw->eeprom.ops.read(hw, 0, &checksum);
1947 -
1948 - if (status == IXGBE_SUCCESS) {
1949 - checksum = hw->eeprom.ops.calc_checksum(hw);
1950 - status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1951 - checksum);
1952 - } else {
2230 + if (status) {
1953 2231 DEBUGOUT("EEPROM read failed\n");
2232 + return status;
1954 2233 }
1955 2234
2235 + status = hw->eeprom.ops.calc_checksum(hw);
2236 + if (status < 0)
2237 + return status;
2238 +
2239 + checksum = (u16)(status & 0xffff);
2240 +
2241 + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2242 +
1956 2243 return status;
1957 2244 }
1958 2245
1959 2246 /**
1960 2247 * ixgbe_validate_mac_addr - Validate MAC address
1961 2248 * @mac_addr: pointer to MAC address.
1962 2249 *
1963 2250 * Tests a MAC address to ensure it is a valid Individual Address
1964 2251 **/
1965 2252 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1966 2253 {
1967 2254 s32 status = IXGBE_SUCCESS;
1968 2255
1969 2256 DEBUGFUNC("ixgbe_validate_mac_addr");
1970 2257
1971 2258 /* Make sure it is not a multicast address */
1972 2259 if (IXGBE_IS_MULTICAST(mac_addr)) {
1973 2260 DEBUGOUT("MAC address is multicast\n");
1974 2261 status = IXGBE_ERR_INVALID_MAC_ADDR;
1975 2262 /* Not a broadcast address */
1976 2263 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1977 2264 DEBUGOUT("MAC address is broadcast\n");
1978 2265 status = IXGBE_ERR_INVALID_MAC_ADDR;
1979 2266 /* Reject the zero address */
1980 2267 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1981 2268 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1982 2269 DEBUGOUT("MAC address is all zeros\n");
1983 2270 status = IXGBE_ERR_INVALID_MAC_ADDR;
1984 2271 }
1985 2272 return status;
1986 2273 }
1987 2274
1988 2275 /**
1989 2276 * ixgbe_set_rar_generic - Set Rx address register
1990 2277 * @hw: pointer to hardware structure
1991 2278 * @index: Receive address register to write
1992 2279 * @addr: Address to put into receive address register
1993 2280 * @vmdq: VMDq "set" or "pool" index
1994 2281 * @enable_addr: set flag that address is active
1995 2282 *
1996 2283 * Puts an ethernet address into a receive address register.
1997 2284 **/
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
1998 2285 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1999 2286 u32 enable_addr)
2000 2287 {
2001 2288 u32 rar_low, rar_high;
2002 2289 u32 rar_entries = hw->mac.num_rar_entries;
2003 2290
2004 2291 DEBUGFUNC("ixgbe_set_rar_generic");
2005 2292
2006 2293 /* Make sure we are using a valid rar index range */
2007 2294 if (index >= rar_entries) {
2008 - DEBUGOUT1("RAR index %d is out of range.\n", index);
2295 + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2296 + "RAR index %d is out of range.\n", index);
2009 2297 return IXGBE_ERR_INVALID_ARGUMENT;
2010 2298 }
2011 2299
2012 2300 /* setup VMDq pool selection before this RAR gets enabled */
2013 2301 hw->mac.ops.set_vmdq(hw, index, vmdq);
2014 2302
2015 2303 /*
2016 2304 * HW expects these in little endian so we reverse the byte
2017 2305 * order from network order (big endian) to little endian
2018 2306 */
2019 2307 rar_low = ((u32)addr[0] |
2020 2308 ((u32)addr[1] << 8) |
2021 2309 ((u32)addr[2] << 16) |
2022 2310 ((u32)addr[3] << 24));
2023 2311 /*
2024 2312 * Some parts put the VMDq setting in the extra RAH bits,
2025 2313 * so save everything except the lower 16 bits that hold part
2026 2314 * of the address and the address valid bit.
2027 2315 */
2028 2316 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2029 2317 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2030 2318 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2031 2319
2032 2320 if (enable_addr != 0)
2033 2321 rar_high |= IXGBE_RAH_AV;
2034 2322
2035 2323 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2036 2324 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2037 2325
2038 2326 return IXGBE_SUCCESS;
2039 2327 }
2040 2328
2041 2329 /**
2042 2330 * ixgbe_clear_rar_generic - Remove Rx address register
2043 2331 * @hw: pointer to hardware structure
2044 2332 * @index: Receive address register to write
2045 2333 *
2046 2334 * Clears an ethernet address from a receive address register.
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
2047 2335 **/
2048 2336 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2049 2337 {
2050 2338 u32 rar_high;
2051 2339 u32 rar_entries = hw->mac.num_rar_entries;
2052 2340
2053 2341 DEBUGFUNC("ixgbe_clear_rar_generic");
2054 2342
2055 2343 /* Make sure we are using a valid rar index range */
2056 2344 if (index >= rar_entries) {
2057 - DEBUGOUT1("RAR index %d is out of range.\n", index);
2345 + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2346 + "RAR index %d is out of range.\n", index);
2058 2347 return IXGBE_ERR_INVALID_ARGUMENT;
2059 2348 }
2060 2349
2061 2350 /*
2062 2351 * Some parts put the VMDq setting in the extra RAH bits,
2063 2352 * so save everything except the lower 16 bits that hold part
2064 2353 * of the address and the address valid bit.
2065 2354 */
2066 2355 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2067 2356 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2068 2357
2069 2358 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2070 2359 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2071 2360
2072 2361 /* clear VMDq pool/queue selection for this RAR */
2073 2362 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2074 2363
2075 2364 return IXGBE_SUCCESS;
2076 2365 }
2077 2366
2078 2367 /**
2079 2368 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2080 2369 * @hw: pointer to hardware structure
2081 2370 *
2082 2371 * Places the MAC address in receive address register 0 and clears the rest
2083 2372 * of the receive address registers. Clears the multicast table. Assumes
2084 2373 * the receiver is in reset when the routine is called.
2085 2374 **/
2086 2375 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2087 2376 {
2088 2377 u32 i;
2089 2378 u32 rar_entries = hw->mac.num_rar_entries;
2090 2379
2091 2380 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2092 2381
2093 2382 /*
2094 2383 * If the current mac address is valid, assume it is a software override
2095 2384 * to the permanent address.
2096 2385 * Otherwise, use the permanent address from the eeprom.
2097 2386 */
2098 2387 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2099 2388 IXGBE_ERR_INVALID_MAC_ADDR) {
2100 2389 /* Get the MAC address from the RAR0 for later reference */
2101 2390 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2102 2391
2103 2392 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2104 2393 hw->mac.addr[0], hw->mac.addr[1],
2105 2394 hw->mac.addr[2]);
2106 2395 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2107 2396 hw->mac.addr[4], hw->mac.addr[5]);
2108 2397 } else {
2109 2398 /* Setup the receive address. */
2110 2399 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2111 2400 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2112 2401 hw->mac.addr[0], hw->mac.addr[1],
2113 2402 hw->mac.addr[2]);
2114 2403 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2115 2404 hw->mac.addr[4], hw->mac.addr[5]);
2116 2405
2117 2406 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2118 2407
2119 2408 /* clear VMDq pool/queue selection for RAR 0 */
2120 2409 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2121 2410 }
2122 2411 hw->addr_ctrl.overflow_promisc = 0;
2123 2412
2124 2413 hw->addr_ctrl.rar_used_count = 1;
2125 2414
2126 2415 /* Zero out the other receive addresses. */
2127 2416 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2128 2417 for (i = 1; i < rar_entries; i++) {
2129 2418 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2130 2419 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
2131 2420 }
2132 2421
2133 2422 /* Clear the MTA */
2134 2423 hw->addr_ctrl.mta_in_use = 0;
2135 2424 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2136 2425
2137 2426 DEBUGOUT(" Clearing MTA\n");
2138 2427 for (i = 0; i < hw->mac.mcft_size; i++)
2139 2428 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2140 2429
2141 - /* Should always be IXGBE_SUCCESS. */
2142 - return ixgbe_init_uta_tables(hw);
2430 + ixgbe_init_uta_tables(hw);
2431 +
2432 + return IXGBE_SUCCESS;
2143 2433 }
2144 2434
2145 2435 /**
2146 2436 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2147 2437 * @hw: pointer to hardware structure
2148 2438 * @addr: new address
2149 2439 *
2150 2440 * Adds it to unused receive address register or goes into promiscuous mode.
2151 2441 **/
2152 2442 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2153 2443 {
2154 2444 u32 rar_entries = hw->mac.num_rar_entries;
2155 2445 u32 rar;
2156 2446
2157 2447 DEBUGFUNC("ixgbe_add_uc_addr");
2158 2448
2159 2449 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2160 2450 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2161 2451
2162 2452 /*
2163 2453 * Place this address in the RAR if there is room,
2164 2454 * else put the controller into promiscuous mode
2165 2455 */
2166 2456 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2167 2457 rar = hw->addr_ctrl.rar_used_count;
2168 2458 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2169 2459 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2170 2460 hw->addr_ctrl.rar_used_count++;
2171 2461 } else {
2172 2462 hw->addr_ctrl.overflow_promisc++;
2173 2463 }
2174 2464
2175 2465 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2176 2466 }
2177 2467
2178 2468 /**
2179 2469 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2180 2470 * @hw: pointer to hardware structure
2181 2471 * @addr_list: the list of new addresses
2182 2472 * @addr_count: number of addresses
2183 2473 * @next: iterator function to walk the address list
2184 2474 *
2185 2475 * The given list replaces any existing list. Clears the secondary addrs from
2186 2476 * receive address registers. Uses unused receive address registers for the
2187 2477 * first secondary addresses, and falls back to promiscuous mode as needed.
2188 2478 *
2189 2479 * Drivers using secondary unicast addresses must set user_set_promisc when
2190 2480 * manually putting the device into promiscuous mode.
2191 2481 **/
2192 2482 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2193 2483 u32 addr_count, ixgbe_mc_addr_itr next)
2194 2484 {
2195 2485 u8 *addr;
2196 2486 u32 i;
2197 2487 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2198 2488 u32 uc_addr_in_use;
2199 2489 u32 fctrl;
2200 2490 u32 vmdq;
2201 2491
2202 2492 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2203 2493
2204 2494 /*
2205 2495 * Clear accounting of old secondary address list,
2206 2496 * don't count RAR[0]
2207 2497 */
2208 2498 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2209 2499 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2210 2500 hw->addr_ctrl.overflow_promisc = 0;
2211 2501
2212 2502 /* Zero out the other receive addresses */
2213 2503 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2214 2504 for (i = 0; i < uc_addr_in_use; i++) {
2215 2505 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2216 2506 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2217 2507 }
2218 2508
2219 2509 /* Add the new addresses */
2220 2510 for (i = 0; i < addr_count; i++) {
2221 2511 DEBUGOUT(" Adding the secondary addresses:\n");
2222 2512 addr = next(hw, &addr_list, &vmdq);
2223 2513 ixgbe_add_uc_addr(hw, addr, vmdq);
2224 2514 }
2225 2515
2226 2516 if (hw->addr_ctrl.overflow_promisc) {
2227 2517 /* enable promisc if not already in overflow or set by user */
2228 2518 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2229 2519 DEBUGOUT(" Entering address overflow promisc mode\n");
2230 2520 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2231 2521 fctrl |= IXGBE_FCTRL_UPE;
2232 2522 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2233 2523 }
2234 2524 } else {
2235 2525 /* only disable if set by overflow, not by user */
2236 2526 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2237 2527 DEBUGOUT(" Leaving address overflow promisc mode\n");
2238 2528 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2239 2529 fctrl &= ~IXGBE_FCTRL_UPE;
2240 2530 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2241 2531 }
2242 2532 }
2243 2533
2244 2534 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2245 2535 return IXGBE_SUCCESS;
2246 2536 }
2247 2537
2248 2538 /**
2249 2539 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2250 2540 * @hw: pointer to hardware structure
2251 2541 * @mc_addr: the multicast address
2252 2542 *
2253 2543 * Extracts the 12 bits, from a multicast address, to determine which
2254 2544 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2255 2545 * incoming rx multicast addresses, to determine the bit-vector to check in
2256 2546 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2257 2547 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2258 2548 * to mc_filter_type.
2259 2549 **/
2260 2550 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2261 2551 {
2262 2552 u32 vector = 0;
2263 2553
2264 2554 DEBUGFUNC("ixgbe_mta_vector");
2265 2555
2266 2556 switch (hw->mac.mc_filter_type) {
2267 2557 case 0: /* use bits [47:36] of the address */
2268 2558 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2269 2559 break;
2270 2560 case 1: /* use bits [46:35] of the address */
2271 2561 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2272 2562 break;
2273 2563 case 2: /* use bits [45:34] of the address */
2274 2564 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2275 2565 break;
2276 2566 case 3: /* use bits [43:32] of the address */
2277 2567 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2278 2568 break;
2279 2569 default: /* Invalid mc_filter_type */
2280 2570 DEBUGOUT("MC filter type param set incorrectly\n");
2281 2571 ASSERT(0);
2282 2572 break;
2283 2573 }
2284 2574
2285 2575 /* vector can only be 12-bits or boundary will be exceeded */
2286 2576 vector &= 0xFFF;
2287 2577 return vector;
2288 2578 }
2289 2579
2290 2580 /**
2291 2581 * ixgbe_set_mta - Set bit-vector in multicast table
2292 2582 * @hw: pointer to hardware structure
2293 2583 * @hash_value: Multicast address hash value
2294 2584 *
2295 2585 * Sets the bit-vector in the multicast table.
2296 2586 **/
2297 2587 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2298 2588 {
2299 2589 u32 vector;
2300 2590 u32 vector_bit;
2301 2591 u32 vector_reg;
2302 2592
2303 2593 DEBUGFUNC("ixgbe_set_mta");
2304 2594
2305 2595 hw->addr_ctrl.mta_in_use++;
2306 2596
2307 2597 vector = ixgbe_mta_vector(hw, mc_addr);
2308 2598 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2309 2599
2310 2600 /*
2311 2601 * The MTA is a register array of 128 32-bit registers. It is treated
2312 2602 * like an array of 4096 bits. We want to set bit
2313 2603 * BitArray[vector_value]. So we figure out what register the bit is
2314 2604 * in, read it, OR in the new bit, then write back the new value. The
2315 2605 * register is determined by the upper 7 bits of the vector value and
2316 2606 * the bit within that register are determined by the lower 5 bits of
2317 2607 * the value.
2318 2608 */
2319 2609 vector_reg = (vector >> 5) & 0x7F;
2320 2610 vector_bit = vector & 0x1F;
2321 2611 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2322 2612 }
2323 2613
2324 2614 /**
2325 2615 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2326 2616 * @hw: pointer to hardware structure
2327 2617 * @mc_addr_list: the list of new multicast addresses
2328 2618 * @mc_addr_count: number of addresses
2329 2619 * @next: iterator function to walk the multicast address list
2330 2620 * @clear: flag, when set clears the table beforehand
2331 2621 *
2332 2622 * When the clear flag is set, the given list replaces any existing list.
2333 2623 * Hashes the given addresses into the multicast table.
2334 2624 **/
2335 2625 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2336 2626 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2337 2627 bool clear)
2338 2628 {
2339 2629 u32 i;
2340 2630 u32 vmdq;
2341 2631
2342 2632 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2343 2633
↓ open down ↓ |
191 lines elided |
↑ open up ↑ |
2344 2634 /*
2345 2635 * Set the new number of MC addresses that we are being requested to
2346 2636 * use.
2347 2637 */
2348 2638 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2349 2639 hw->addr_ctrl.mta_in_use = 0;
2350 2640
2351 2641 /* Clear mta_shadow */
2352 2642 if (clear) {
2353 2643 DEBUGOUT(" Clearing MTA\n");
2354 - (void) memset(&hw->mac.mta_shadow, 0,
2355 - sizeof(hw->mac.mta_shadow));
2644 + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2356 2645 }
2357 2646
2358 2647 /* Update mta_shadow */
2359 2648 for (i = 0; i < mc_addr_count; i++) {
2360 2649 DEBUGOUT(" Adding the multicast addresses:\n");
2361 2650 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2362 2651 }
2363 2652
2364 2653 /* Enable mta */
2365 2654 for (i = 0; i < hw->mac.mcft_size; i++)
2366 2655 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2367 2656 hw->mac.mta_shadow[i]);
2368 2657
2369 2658 if (hw->addr_ctrl.mta_in_use > 0)
2370 2659 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2371 2660 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2372 2661
2373 2662 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2374 2663 return IXGBE_SUCCESS;
2375 2664 }
2376 2665
2377 2666 /**
2378 2667 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2379 2668 * @hw: pointer to hardware structure
2380 2669 *
2381 2670 * Enables multicast address in RAR and the use of the multicast hash table.
2382 2671 **/
2383 2672 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2384 2673 {
2385 2674 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2386 2675
2387 2676 DEBUGFUNC("ixgbe_enable_mc_generic");
2388 2677
2389 2678 if (a->mta_in_use > 0)
2390 2679 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2391 2680 hw->mac.mc_filter_type);
2392 2681
2393 2682 return IXGBE_SUCCESS;
2394 2683 }
2395 2684
2396 2685 /**
2397 2686 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2398 2687 * @hw: pointer to hardware structure
2399 2688 *
2400 2689 * Disables multicast address in RAR and the use of the multicast hash table.
2401 2690 **/
2402 2691 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2403 2692 {
2404 2693 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2405 2694
2406 2695 DEBUGFUNC("ixgbe_disable_mc_generic");
2407 2696
2408 2697 if (a->mta_in_use > 0)
2409 2698 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2410 2699
2411 2700 return IXGBE_SUCCESS;
2412 2701 }
2413 2702
2414 2703 /**
2415 2704 * ixgbe_fc_enable_generic - Enable flow control
2416 2705 * @hw: pointer to hardware structure
2417 2706 *
2418 2707 * Enable flow control according to the current settings.
2419 2708 **/
2420 2709 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2421 2710 {
2422 2711 s32 ret_val = IXGBE_SUCCESS;
2423 2712 u32 mflcn_reg, fccfg_reg;
2424 2713 u32 reg;
2425 2714 u32 fcrtl, fcrth;
2426 2715 int i;
2427 2716
2428 2717 DEBUGFUNC("ixgbe_fc_enable_generic");
2429 2718
2430 2719 /* Validate the water mark configuration */
2431 2720 if (!hw->fc.pause_time) {
2432 2721 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2433 2722 goto out;
2434 2723 }
2435 2724
2436 2725 /* Low water mark of zero causes XOFF floods */
2437 2726 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2438 2727 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2439 2728 hw->fc.high_water[i]) {
2440 2729 if (!hw->fc.low_water[i] ||
2441 2730 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2442 2731 DEBUGOUT("Invalid water mark configuration\n");
2443 2732 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2444 2733 goto out;
2445 2734 }
2446 2735 }
2447 2736 }
2448 2737
2449 2738 /* Negotiate the fc mode to use */
2450 2739 ixgbe_fc_autoneg(hw);
2451 2740
2452 2741 /* Disable any previous flow control settings */
2453 2742 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2454 2743 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2455 2744
2456 2745 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2457 2746 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2458 2747
2459 2748 /*
2460 2749 * The possible values of fc.current_mode are:
2461 2750 * 0: Flow control is completely disabled
2462 2751 * 1: Rx flow control is enabled (we can receive pause frames,
2463 2752 * but not send pause frames).
2464 2753 * 2: Tx flow control is enabled (we can send pause frames but
2465 2754 * we do not support receiving pause frames).
2466 2755 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2467 2756 * other: Invalid.
2468 2757 */
2469 2758 switch (hw->fc.current_mode) {
2470 2759 case ixgbe_fc_none:
2471 2760 /*
2472 2761 * Flow control is disabled by software override or autoneg.
2473 2762 * The code below will actually disable it in the HW.
2474 2763 */
2475 2764 break;
2476 2765 case ixgbe_fc_rx_pause:
2477 2766 /*
2478 2767 * Rx Flow control is enabled and Tx Flow control is
2479 2768 * disabled by software override. Since there really
2480 2769 * isn't a way to advertise that we are capable of RX
2481 2770 * Pause ONLY, we will advertise that we support both
2482 2771 * symmetric and asymmetric Rx PAUSE. Later, we will
2483 2772 * disable the adapter's ability to send PAUSE frames.
2484 2773 */
2485 2774 mflcn_reg |= IXGBE_MFLCN_RFCE;
2486 2775 break;
2487 2776 case ixgbe_fc_tx_pause:
2488 2777 /*
2489 2778 * Tx Flow control is enabled, and Rx Flow control is
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
2490 2779 * disabled by software override.
2491 2780 */
2492 2781 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2493 2782 break;
2494 2783 case ixgbe_fc_full:
2495 2784 /* Flow control (both Rx and Tx) is enabled by SW override. */
2496 2785 mflcn_reg |= IXGBE_MFLCN_RFCE;
2497 2786 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2498 2787 break;
2499 2788 default:
2500 - DEBUGOUT("Flow control param set incorrectly\n");
2789 + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2790 + "Flow control param set incorrectly\n");
2501 2791 ret_val = IXGBE_ERR_CONFIG;
2502 2792 goto out;
2793 + break;
2503 2794 }
2504 2795
2505 2796 /* Set 802.3x based flow control settings. */
2506 2797 mflcn_reg |= IXGBE_MFLCN_DPF;
2507 2798 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2508 2799 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2509 2800
2510 2801
2511 2802 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2512 2803 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2513 2804 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2514 2805 hw->fc.high_water[i]) {
2515 2806 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2516 2807 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2517 2808 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2518 2809 } else {
2519 2810 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2520 2811 /*
2521 2812 * In order to prevent Tx hangs when the internal Tx
2522 2813 * switch is enabled we must set the high water mark
2523 - * to the maximum FCRTH value. This allows the Tx
2524 - * switch to function even under heavy Rx workloads.
2814 + * to the Rx packet buffer size - 24KB. This allows
2815 + * the Tx switch to function even under heavy Rx
2816 + * workloads.
2525 2817 */
2526 - fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2818 + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2527 2819 }
2528 2820
2529 2821 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2530 2822 }
2531 2823
2532 2824 /* Configure pause time (2 TCs per register) */
2533 2825 reg = hw->fc.pause_time * 0x00010001;
2534 2826 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2535 2827 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2536 2828
2537 2829 /* Configure flow control refresh threshold value */
2538 2830 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2539 2831
2540 2832 out:
2541 2833 return ret_val;
2542 2834 }
2543 2835
2544 2836 /**
2545 2837 * ixgbe_negotiate_fc - Negotiate flow control
2546 2838 * @hw: pointer to hardware structure
2547 2839 * @adv_reg: flow control advertised settings
2548 2840 * @lp_reg: link partner's flow control settings
2549 2841 * @adv_sym: symmetric pause bit in advertisement
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2550 2842 * @adv_asm: asymmetric pause bit in advertisement
2551 2843 * @lp_sym: symmetric pause bit in link partner advertisement
2552 2844 * @lp_asm: asymmetric pause bit in link partner advertisement
2553 2845 *
2554 2846 * Find the intersection between advertised settings and link partner's
2555 2847 * advertised settings
2556 2848 **/
2557 2849 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2558 2850 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2559 2851 {
2560 - if ((!(adv_reg)) || (!(lp_reg)))
2852 + if ((!(adv_reg)) || (!(lp_reg))) {
2853 + ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2854 + "Local or link partner's advertised flow control "
2855 + "settings are NULL. Local: %x, link partner: %x\n",
2856 + adv_reg, lp_reg);
2561 2857 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2858 + }
2562 2859
2563 2860 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2564 2861 /*
2565 2862 * Now we need to check if the user selected Rx ONLY
2566 2863 * of pause frames. In this case, we had to advertise
2567 2864 * FULL flow control because we could not advertise RX
2568 2865 * ONLY. Hence, we must now check to see if we need to
2569 2866 * turn OFF the TRANSMISSION of PAUSE frames.
2570 2867 */
2571 2868 if (hw->fc.requested_mode == ixgbe_fc_full) {
2572 2869 hw->fc.current_mode = ixgbe_fc_full;
2573 2870 DEBUGOUT("Flow Control = FULL.\n");
2574 2871 } else {
2575 2872 hw->fc.current_mode = ixgbe_fc_rx_pause;
2576 2873 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2577 2874 }
2578 2875 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2579 2876 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2580 2877 hw->fc.current_mode = ixgbe_fc_tx_pause;
2581 2878 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2582 2879 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2583 2880 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2584 2881 hw->fc.current_mode = ixgbe_fc_rx_pause;
2585 2882 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2586 2883 } else {
2587 2884 hw->fc.current_mode = ixgbe_fc_none;
2588 2885 DEBUGOUT("Flow Control = NONE.\n");
2589 2886 }
2590 2887 return IXGBE_SUCCESS;
2591 2888 }
2592 2889
2593 2890 /**
2594 2891 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2595 2892 * @hw: pointer to hardware structure
2596 2893 *
2597 2894 * Enable flow control according on 1 gig fiber.
2598 2895 **/
2599 2896 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2600 2897 {
2601 2898 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
2602 2899 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2603 2900
2604 2901 /*
2605 2902 * On multispeed fiber at 1g, bail out if
2606 2903 * - link is up but AN did not complete, or if
2607 2904 * - link is up and AN completed but timed out
2608 2905 */
2609 2906
2610 2907 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2611 2908 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2612 - (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2909 + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2910 + DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2613 2911 goto out;
2912 + }
2614 2913
2615 2914 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2616 2915 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2617 2916
2618 2917 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2619 2918 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2620 2919 IXGBE_PCS1GANA_ASM_PAUSE,
2621 2920 IXGBE_PCS1GANA_SYM_PAUSE,
2622 2921 IXGBE_PCS1GANA_ASM_PAUSE);
2623 2922
2624 2923 out:
2625 2924 return ret_val;
2626 2925 }
2627 2926
2628 2927 /**
2629 2928 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2630 2929 * @hw: pointer to hardware structure
2631 2930 *
2632 2931 * Enable flow control according to IEEE clause 37.
2633 2932 **/
2634 2933 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
2635 2934 {
2636 2935 u32 links2, anlp1_reg, autoc_reg, links;
2637 2936 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2638 2937
2639 2938 /*
2640 2939 * On backplane, bail out if
2641 2940 * - backplane autoneg was not completed, or if
2642 2941 * - we are 82599 and link partner is not AN enabled
2643 2942 */
2644 2943 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2645 - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2944 + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2945 + DEBUGOUT("Auto-Negotiation did not complete\n");
2646 2946 goto out;
2947 + }
2647 2948
2648 2949 if (hw->mac.type == ixgbe_mac_82599EB) {
2649 2950 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2650 - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2951 + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2952 + DEBUGOUT("Link partner is not AN enabled\n");
2651 2953 goto out;
2954 + }
2652 2955 }
2653 2956 /*
2654 2957 * Read the 10g AN autoc and LP ability registers and resolve
2655 2958 * local flow control settings accordingly
2656 2959 */
2657 2960 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2658 2961 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2659 2962
2660 2963 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2661 2964 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2662 2965 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2663 2966
2664 2967 out:
2665 2968 return ret_val;
2666 2969 }
2667 2970
2668 2971 /**
2669 2972 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2670 2973 * @hw: pointer to hardware structure
2671 2974 *
2672 2975 * Enable flow control according to IEEE clause 37.
2673 2976 **/
2674 2977 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2675 2978 {
2676 2979 u16 technology_ability_reg = 0;
2677 2980 u16 lp_technology_ability_reg = 0;
2678 2981
2679 2982 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2680 2983 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2681 2984 &technology_ability_reg);
2682 2985 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2683 2986 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2684 2987 &lp_technology_ability_reg);
2685 2988
2686 2989 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2687 2990 (u32)lp_technology_ability_reg,
2688 2991 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2689 2992 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2690 2993 }
2691 2994
2692 2995 /**
2693 2996 * ixgbe_fc_autoneg - Configure flow control
2694 2997 * @hw: pointer to hardware structure
2695 2998 *
2696 2999 * Compares our advertised flow control capabilities to those advertised by
2697 3000 * our link partner, and determines the proper flow control mode to use.
2698 3001 **/
2699 3002 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2700 3003 {
2701 3004 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2702 3005 ixgbe_link_speed speed;
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
2703 3006 bool link_up;
2704 3007
2705 3008 DEBUGFUNC("ixgbe_fc_autoneg");
2706 3009
2707 3010 /*
2708 3011 * AN should have completed when the cable was plugged in.
2709 3012 * Look for reasons to bail out. Bail out if:
2710 3013 * - FC autoneg is disabled, or if
2711 3014 * - link is not up.
2712 3015 */
2713 - if (hw->fc.disable_fc_autoneg)
3016 + if (hw->fc.disable_fc_autoneg) {
3017 + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3018 + "Flow control autoneg is disabled");
2714 3019 goto out;
3020 + }
2715 3021
2716 3022 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2717 - if (!link_up)
3023 + if (!link_up) {
3024 + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2718 3025 goto out;
3026 + }
2719 3027
2720 3028 switch (hw->phy.media_type) {
2721 3029 /* Autoneg flow control on fiber adapters */
3030 + case ixgbe_media_type_fiber_fixed:
3031 + case ixgbe_media_type_fiber_qsfp:
2722 3032 case ixgbe_media_type_fiber:
2723 3033 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2724 3034 ret_val = ixgbe_fc_autoneg_fiber(hw);
2725 3035 break;
2726 3036
2727 3037 /* Autoneg flow control on backplane adapters */
2728 3038 case ixgbe_media_type_backplane:
2729 3039 ret_val = ixgbe_fc_autoneg_backplane(hw);
2730 3040 break;
2731 3041
2732 3042 /* Autoneg flow control on copper adapters */
2733 3043 case ixgbe_media_type_copper:
2734 - if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
3044 + if (ixgbe_device_supports_autoneg_fc(hw))
2735 3045 ret_val = ixgbe_fc_autoneg_copper(hw);
2736 3046 break;
2737 3047
2738 3048 default:
2739 3049 break;
2740 3050 }
2741 3051
2742 3052 out:
2743 3053 if (ret_val == IXGBE_SUCCESS) {
2744 3054 hw->fc.fc_was_autonegged = TRUE;
2745 3055 } else {
2746 3056 hw->fc.fc_was_autonegged = FALSE;
2747 3057 hw->fc.current_mode = hw->fc.requested_mode;
2748 3058 }
2749 3059 }
2750 3060
3061 +/*
3062 + * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3063 + * @hw: pointer to hardware structure
3064 + *
3065 + * System-wide timeout range is encoded in PCIe Device Control2 register.
3066 + *
3067 + * Add 10% to specified maximum and return the number of times to poll for
3068 + * completion timeout, in units of 100 microsec. Never return less than
3069 + * 800 = 80 millisec.
3070 + */
3071 +static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3072 +{
3073 + s16 devctl2;
3074 + u32 pollcnt;
3075 +
3076 + devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3077 + devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3078 +
3079 + switch (devctl2) {
3080 + case IXGBE_PCIDEVCTRL2_65_130ms:
3081 + pollcnt = 1300; /* 130 millisec */
3082 + break;
3083 + case IXGBE_PCIDEVCTRL2_260_520ms:
3084 + pollcnt = 5200; /* 520 millisec */
3085 + break;
3086 + case IXGBE_PCIDEVCTRL2_1_2s:
3087 + pollcnt = 20000; /* 2 sec */
3088 + break;
3089 + case IXGBE_PCIDEVCTRL2_4_8s:
3090 + pollcnt = 80000; /* 8 sec */
3091 + break;
3092 + case IXGBE_PCIDEVCTRL2_17_34s:
3093 + pollcnt = 34000; /* 34 sec */
3094 + break;
3095 + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3096 + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3097 + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3098 + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3099 + default:
3100 + pollcnt = 800; /* 80 millisec minimum */
3101 + break;
3102 + }
3103 +
3104 + /* add 10% to spec maximum */
3105 + return (pollcnt * 11) / 10;
3106 +}
3107 +
2751 3108 /**
2752 3109 * ixgbe_disable_pcie_master - Disable PCI-express master access
2753 3110 * @hw: pointer to hardware structure
2754 3111 *
2755 3112 * Disables PCI-Express master access and verifies there are no pending
2756 3113 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2757 3114 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2758 3115 * is returned signifying master requests disabled.
2759 3116 **/
2760 3117 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2761 3118 {
2762 3119 s32 status = IXGBE_SUCCESS;
2763 - u32 i;
3120 + u32 i, poll;
3121 + u16 value;
2764 3122
2765 3123 DEBUGFUNC("ixgbe_disable_pcie_master");
2766 3124
2767 3125 /* Always set this bit to ensure any future transactions are blocked */
2768 3126 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2769 3127
2770 - /* Exit if master requets are blocked */
2771 - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3128 + /* Exit if master requests are blocked */
3129 + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3130 + IXGBE_REMOVED(hw->hw_addr))
2772 3131 goto out;
2773 3132
2774 3133 /* Poll for master request bit to clear */
2775 3134 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2776 3135 usec_delay(100);
2777 3136 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2778 3137 goto out;
2779 3138 }
2780 3139
2781 3140 /*
2782 3141 * Two consecutive resets are required via CTRL.RST per datasheet
2783 3142 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2784 3143 * of this need. The first reset prevents new master requests from
2785 3144 * being issued by our device. We then must wait 1usec or more for any
2786 3145 * remaining completions from the PCIe bus to trickle in, and then reset
2787 3146 * again to clear out any effects they may have had on our device.
2788 3147 */
2789 3148 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2790 3149 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2791 3150
3151 + if (hw->mac.type >= ixgbe_mac_X550)
3152 + goto out;
3153 +
2792 3154 /*
2793 3155 * Before proceeding, make sure that the PCIe block does not have
2794 3156 * transactions pending.
2795 3157 */
2796 - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3158 + poll = ixgbe_pcie_timeout_poll(hw);
3159 + for (i = 0; i < poll; i++) {
2797 3160 usec_delay(100);
2798 - if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2799 - IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3161 + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3162 + if (IXGBE_REMOVED(hw->hw_addr))
2800 3163 goto out;
3164 + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3165 + goto out;
2801 3166 }
2802 3167
2803 - DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
3168 + ERROR_REPORT1(IXGBE_ERROR_POLLING,
3169 + "PCIe transaction pending bit also did not clear.\n");
2804 3170 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2805 3171
2806 3172 out:
2807 3173 return status;
2808 3174 }
2809 3175
2810 3176 /**
2811 3177 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2812 3178 * @hw: pointer to hardware structure
2813 3179 * @mask: Mask to specify which semaphore to acquire
2814 3180 *
2815 3181 * Acquires the SWFW semaphore through the GSSR register for the specified
2816 3182 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2817 3183 **/
2818 -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3184 +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2819 3185 {
2820 - u32 gssr;
3186 + u32 gssr = 0;
2821 3187 u32 swmask = mask;
2822 3188 u32 fwmask = mask << 5;
2823 - s32 timeout = 200;
3189 + u32 timeout = 200;
3190 + u32 i;
2824 3191
2825 3192 DEBUGFUNC("ixgbe_acquire_swfw_sync");
2826 3193
2827 - while (timeout) {
3194 + for (i = 0; i < timeout; i++) {
2828 3195 /*
2829 - * SW EEPROM semaphore bit is used for access to all
2830 - * SW_FW_SYNC/GSSR bits (not just EEPROM)
3196 + * SW NVM semaphore bit is used for access to all
3197 + * SW_FW_SYNC bits (not just NVM)
2831 3198 */
2832 3199 if (ixgbe_get_eeprom_semaphore(hw))
2833 3200 return IXGBE_ERR_SWFW_SYNC;
2834 3201
2835 3202 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2836 - if (!(gssr & (fwmask | swmask)))
2837 - break;
2838 -
2839 - /*
2840 - * Firmware currently using resource (fwmask) or other software
2841 - * thread currently using resource (swmask)
2842 - */
2843 - ixgbe_release_eeprom_semaphore(hw);
2844 - msec_delay(5);
2845 - timeout--;
3203 + if (!(gssr & (fwmask | swmask))) {
3204 + gssr |= swmask;
3205 + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3206 + ixgbe_release_eeprom_semaphore(hw);
3207 + return IXGBE_SUCCESS;
3208 + } else {
3209 + /* Resource is currently in use by FW or SW */
3210 + ixgbe_release_eeprom_semaphore(hw);
3211 + msec_delay(5);
3212 + }
2846 3213 }
2847 3214
2848 - if (!timeout) {
2849 - DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2850 - return IXGBE_ERR_SWFW_SYNC;
2851 - }
3215 + /* If time expired clear the bits holding the lock and retry */
3216 + if (gssr & (fwmask | swmask))
3217 + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2852 3218
2853 - gssr |= swmask;
2854 - IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2855 -
2856 - ixgbe_release_eeprom_semaphore(hw);
2857 - return IXGBE_SUCCESS;
3219 + msec_delay(5);
3220 + return IXGBE_ERR_SWFW_SYNC;
2858 3221 }
2859 3222
2860 3223 /**
2861 3224 * ixgbe_release_swfw_sync - Release SWFW semaphore
2862 3225 * @hw: pointer to hardware structure
2863 3226 * @mask: Mask to specify which semaphore to release
2864 3227 *
2865 3228 * Releases the SWFW semaphore through the GSSR register for the specified
2866 3229 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2867 3230 **/
2868 -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3231 +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2869 3232 {
2870 3233 u32 gssr;
2871 3234 u32 swmask = mask;
2872 3235
2873 3236 DEBUGFUNC("ixgbe_release_swfw_sync");
2874 3237
2875 - (void) ixgbe_get_eeprom_semaphore(hw);
3238 + ixgbe_get_eeprom_semaphore(hw);
2876 3239
2877 3240 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2878 3241 gssr &= ~swmask;
2879 3242 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2880 3243
2881 3244 ixgbe_release_eeprom_semaphore(hw);
2882 3245 }
2883 3246
2884 3247 /**
2885 3248 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2886 3249 * @hw: pointer to hardware structure
2887 3250 *
2888 3251 * Stops the receive data path and waits for the HW to internally empty
2889 3252 * the Rx security block
2890 3253 **/
2891 3254 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2892 3255 {
2893 3256 #define IXGBE_MAX_SECRX_POLL 40
2894 3257
2895 3258 int i;
2896 3259 int secrxreg;
2897 3260
2898 3261 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2899 3262
2900 3263
2901 3264 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2902 3265 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2903 3266 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2904 3267 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2905 3268 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2906 3269 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2907 3270 break;
2908 3271 else
2909 3272 /* Use interrupt-safe sleep just in case */
2910 3273 usec_delay(1000);
2911 3274 }
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2912 3275
2913 3276 /* For informational purposes only */
2914 3277 if (i >= IXGBE_MAX_SECRX_POLL)
2915 3278 DEBUGOUT("Rx unit being enabled before security "
2916 3279 "path fully disabled. Continuing with init.\n");
2917 3280
2918 3281 return IXGBE_SUCCESS;
2919 3282 }
2920 3283
2921 3284 /**
3285 + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3286 + * @hw: pointer to hardware structure
3287 + * @reg_val: Value we read from AUTOC
3288 + *
3289 + * The default case requires no protection so just to the register read.
3290 + */
3291 +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3292 +{
3293 + *locked = FALSE;
3294 + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3295 + return IXGBE_SUCCESS;
3296 +}
3297 +
3298 +/**
3299 + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3300 + * @hw: pointer to hardware structure
3301 + * @reg_val: value to write to AUTOC
3302 + * @locked: bool to indicate whether the SW/FW lock was already taken by
3303 + * previous read.
3304 + *
3305 + * The default case requires no protection so just to the register write.
3306 + */
3307 +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3308 +{
3309 + UNREFERENCED_1PARAMETER(locked);
3310 +
3311 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3312 + return IXGBE_SUCCESS;
3313 +}
3314 +
3315 +/**
2922 3316 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2923 3317 * @hw: pointer to hardware structure
2924 3318 *
2925 3319 * Enables the receive data path.
2926 3320 **/
2927 3321 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2928 3322 {
2929 3323 int secrxreg;
2930 3324
2931 3325 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2932 3326
2933 3327 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2934 3328 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2935 3329 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2936 3330 IXGBE_WRITE_FLUSH(hw);
2937 3331
2938 3332 return IXGBE_SUCCESS;
2939 3333 }
2940 3334
2941 3335 /**
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
2942 3336 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2943 3337 * @hw: pointer to hardware structure
2944 3338 * @regval: register value to write to RXCTRL
2945 3339 *
2946 3340 * Enables the Rx DMA unit
2947 3341 **/
2948 3342 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2949 3343 {
2950 3344 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2951 3345
2952 - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3346 + if (regval & IXGBE_RXCTRL_RXEN)
3347 + ixgbe_enable_rx(hw);
3348 + else
3349 + ixgbe_disable_rx(hw);
2953 3350
2954 3351 return IXGBE_SUCCESS;
2955 3352 }
2956 3353
2957 3354 /**
2958 3355 * ixgbe_blink_led_start_generic - Blink LED based on index.
2959 3356 * @hw: pointer to hardware structure
2960 3357 * @index: led number to blink
2961 3358 **/
2962 3359 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2963 3360 {
2964 3361 ixgbe_link_speed speed = 0;
2965 3362 bool link_up = 0;
2966 - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3363 + u32 autoc_reg = 0;
2967 3364 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3365 + s32 ret_val = IXGBE_SUCCESS;
3366 + bool locked = FALSE;
2968 3367
2969 3368 DEBUGFUNC("ixgbe_blink_led_start_generic");
2970 3369
2971 3370 /*
2972 3371 * Link must be up to auto-blink the LEDs;
2973 3372 * Force it if link is down.
2974 3373 */
2975 3374 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2976 3375
2977 3376 if (!link_up) {
3377 + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3378 + if (ret_val != IXGBE_SUCCESS)
3379 + goto out;
3380 +
2978 3381 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2979 3382 autoc_reg |= IXGBE_AUTOC_FLU;
2980 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3383 +
3384 + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3385 + if (ret_val != IXGBE_SUCCESS)
3386 + goto out;
3387 +
2981 3388 IXGBE_WRITE_FLUSH(hw);
2982 3389 msec_delay(10);
2983 3390 }
2984 3391
2985 3392 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2986 3393 led_reg |= IXGBE_LED_BLINK(index);
2987 3394 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2988 3395 IXGBE_WRITE_FLUSH(hw);
2989 3396
2990 - return IXGBE_SUCCESS;
3397 +out:
3398 + return ret_val;
2991 3399 }
2992 3400
2993 3401 /**
2994 3402 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2995 3403 * @hw: pointer to hardware structure
2996 3404 * @index: led number to stop blinking
2997 3405 **/
2998 3406 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2999 3407 {
3000 - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3408 + u32 autoc_reg = 0;
3001 3409 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3410 + s32 ret_val = IXGBE_SUCCESS;
3411 + bool locked = FALSE;
3002 3412
3003 3413 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3004 3414
3415 + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3416 + if (ret_val != IXGBE_SUCCESS)
3417 + goto out;
3005 3418
3006 3419 autoc_reg &= ~IXGBE_AUTOC_FLU;
3007 3420 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3008 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3009 3421
3422 + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3423 + if (ret_val != IXGBE_SUCCESS)
3424 + goto out;
3425 +
3010 3426 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3011 3427 led_reg &= ~IXGBE_LED_BLINK(index);
3012 3428 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3013 3429 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3014 3430 IXGBE_WRITE_FLUSH(hw);
3015 3431
3016 - return IXGBE_SUCCESS;
3432 +out:
3433 + return ret_val;
3017 3434 }
3018 3435
3019 3436 /**
3020 3437 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3021 3438 * @hw: pointer to hardware structure
3022 3439 * @san_mac_offset: SAN MAC address offset
3023 3440 *
3024 3441 * This function will read the EEPROM location for the SAN MAC address
3025 3442 * pointer, and returns the value at that location. This is used in both
3026 3443 * get and set mac_addr routines.
3027 3444 **/
3028 3445 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3029 3446 u16 *san_mac_offset)
3030 3447 {
3448 + s32 ret_val;
3449 +
3031 3450 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3032 3451
3033 3452 /*
3034 3453 * First read the EEPROM pointer to see if the MAC addresses are
3035 3454 * available.
3036 3455 */
3037 - hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3456 + ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3457 + san_mac_offset);
3458 + if (ret_val) {
3459 + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3460 + "eeprom at offset %d failed",
3461 + IXGBE_SAN_MAC_ADDR_PTR);
3462 + }
3038 3463
3039 - return IXGBE_SUCCESS;
3464 + return ret_val;
3040 3465 }
3041 3466
3042 3467 /**
3043 3468 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3044 3469 * @hw: pointer to hardware structure
3045 3470 * @san_mac_addr: SAN MAC address
3046 3471 *
3047 3472 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3048 3473 * per-port, so set_lan_id() must be called before reading the addresses.
3049 3474 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3050 3475 * upon for non-SFP connections, so we must call it here.
3051 3476 **/
3052 3477 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3053 3478 {
3054 3479 u16 san_mac_data, san_mac_offset;
3055 3480 u8 i;
3481 + s32 ret_val;
3056 3482
3057 3483 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3058 3484
3059 3485 /*
3060 3486 * First read the EEPROM pointer to see if the MAC addresses are
3061 3487 * available. If they're not, no point in calling set_lan_id() here.
3062 3488 */
3063 - (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3064 -
3065 - if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3066 - /*
3067 - * No addresses available in this EEPROM. It's not an
3068 - * error though, so just wipe the local address and return.
3069 - */
3070 - for (i = 0; i < 6; i++)
3071 - san_mac_addr[i] = 0xFF;
3072 -
3489 + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3490 + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3073 3491 goto san_mac_addr_out;
3074 - }
3075 3492
3076 3493 /* make sure we know which port we need to program */
3077 3494 hw->mac.ops.set_lan_id(hw);
3078 3495 /* apply the port offset to the address offset */
3079 3496 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3080 3497 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3081 3498 for (i = 0; i < 3; i++) {
3082 - hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3499 + ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3500 + &san_mac_data);
3501 + if (ret_val) {
3502 + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3503 + "eeprom read at offset %d failed",
3504 + san_mac_offset);
3505 + goto san_mac_addr_out;
3506 + }
3083 3507 san_mac_addr[i * 2] = (u8)(san_mac_data);
3084 3508 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3085 3509 san_mac_offset++;
3086 3510 }
3511 + return IXGBE_SUCCESS;
3087 3512
3088 3513 san_mac_addr_out:
3514 + /*
3515 + * No addresses available in this EEPROM. It's not an
3516 + * error though, so just wipe the local address and return.
3517 + */
3518 + for (i = 0; i < 6; i++)
3519 + san_mac_addr[i] = 0xFF;
3089 3520 return IXGBE_SUCCESS;
3090 3521 }
3091 3522
3092 3523 /**
3093 3524 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3094 3525 * @hw: pointer to hardware structure
3095 3526 * @san_mac_addr: SAN MAC address
3096 3527 *
3097 3528 * Write a SAN MAC address to the EEPROM.
3098 3529 **/
3099 3530 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3100 3531 {
3101 - s32 status = IXGBE_SUCCESS;
3532 + s32 ret_val;
3102 3533 u16 san_mac_data, san_mac_offset;
3103 3534 u8 i;
3104 3535
3105 3536 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3106 3537
3107 3538 /* Look for SAN mac address pointer. If not defined, return */
3108 - (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3539 + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3540 + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3541 + return IXGBE_ERR_NO_SAN_ADDR_PTR;
3109 3542
3110 - if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3111 - status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3112 - goto san_mac_addr_out;
3113 - }
3114 -
3115 3543 /* Make sure we know which port we need to write */
3116 3544 hw->mac.ops.set_lan_id(hw);
3117 3545 /* Apply the port offset to the address offset */
3118 3546 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3119 3547 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3120 3548
3121 3549 for (i = 0; i < 3; i++) {
3122 3550 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3123 3551 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3124 3552 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3125 3553 san_mac_offset++;
3126 3554 }
3127 3555
3128 -san_mac_addr_out:
3129 - return status;
3556 + return IXGBE_SUCCESS;
3130 3557 }
3131 3558
3132 3559 /**
3133 3560 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3134 3561 * @hw: pointer to hardware structure
3135 3562 *
3136 3563 * Read PCIe configuration space, and get the MSI-X vector count from
3137 3564 * the capabilities table.
3138 3565 **/
3139 3566 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3140 3567 {
3141 3568 u16 msix_count = 1;
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3142 3569 u16 max_msix_count;
3143 3570 u16 pcie_offset;
3144 3571
3145 3572 switch (hw->mac.type) {
3146 3573 case ixgbe_mac_82598EB:
3147 3574 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3148 3575 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3149 3576 break;
3150 3577 case ixgbe_mac_82599EB:
3151 3578 case ixgbe_mac_X540:
3579 + case ixgbe_mac_X550:
3580 + case ixgbe_mac_X550EM_x:
3152 3581 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3153 3582 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3154 3583 break;
3155 3584 default:
3156 3585 return msix_count;
3157 3586 }
3158 3587
3159 3588 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3160 3589 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3590 + if (IXGBE_REMOVED(hw->hw_addr))
3591 + msix_count = 0;
3161 3592 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3162 3593
3163 3594 /* MSI-X count is zero-based in HW */
3164 3595 msix_count++;
3165 3596
3166 3597 if (msix_count > max_msix_count)
3167 3598 msix_count = max_msix_count;
3168 3599
3169 3600 return msix_count;
3170 3601 }
3171 3602
3172 3603 /**
3173 3604 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3174 3605 * @hw: pointer to hardware structure
3175 3606 * @addr: Address to put into receive address register
3176 3607 * @vmdq: VMDq pool to assign
3177 3608 *
3178 3609 * Puts an ethernet address into a receive address register, or
3179 3610 * finds the rar that it is aleady in; adds to the pool list
3180 3611 **/
3181 3612 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3182 3613 {
3183 3614 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3184 3615 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3185 3616 u32 rar;
3186 3617 u32 rar_low, rar_high;
3187 3618 u32 addr_low, addr_high;
3188 3619
3189 3620 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3190 3621
3191 3622 /* swap bytes for HW little endian */
3192 3623 addr_low = addr[0] | (addr[1] << 8)
3193 3624 | (addr[2] << 16)
3194 3625 | (addr[3] << 24);
3195 3626 addr_high = addr[4] | (addr[5] << 8);
3196 3627
3197 3628 /*
3198 3629 * Either find the mac_id in rar or find the first empty space.
3199 3630 * rar_highwater points to just after the highest currently used
3200 3631 * rar in order to shorten the search. It grows when we add a new
3201 3632 * rar to the top.
3202 3633 */
3203 3634 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3204 3635 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3205 3636
3206 3637 if (((IXGBE_RAH_AV & rar_high) == 0)
3207 3638 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
3208 3639 first_empty_rar = rar;
3209 3640 } else if ((rar_high & 0xFFFF) == addr_high) {
3210 3641 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3211 3642 if (rar_low == addr_low)
3212 3643 break; /* found it already in the rars */
3213 3644 }
3214 3645 }
3215 3646
3216 3647 if (rar < hw->mac.rar_highwater) {
3217 3648 /* already there so just add to the pool bits */
3218 - (void) ixgbe_set_vmdq(hw, rar, vmdq);
3649 + ixgbe_set_vmdq(hw, rar, vmdq);
3219 3650 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3220 3651 /* stick it into first empty RAR slot we found */
3221 3652 rar = first_empty_rar;
3222 - (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3653 + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3223 3654 } else if (rar == hw->mac.rar_highwater) {
3224 3655 /* add it to the top of the list and inc the highwater mark */
3225 - (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3656 + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3226 3657 hw->mac.rar_highwater++;
3227 3658 } else if (rar >= hw->mac.num_rar_entries) {
3228 3659 return IXGBE_ERR_INVALID_MAC_ADDR;
3229 3660 }
3230 3661
3231 3662 /*
3232 3663 * If we found rar[0], make sure the default pool bit (we use pool 0)
3233 3664 * remains cleared to be sure default pool packets will get delivered
3234 3665 */
3235 3666 if (rar == 0)
3236 - (void) ixgbe_clear_vmdq(hw, rar, 0);
3667 + ixgbe_clear_vmdq(hw, rar, 0);
3237 3668
3238 3669 return rar;
3239 3670 }
3240 3671
3241 3672 /**
3242 3673 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3243 3674 * @hw: pointer to hardware struct
3244 3675 * @rar: receive address register index to disassociate
3245 3676 * @vmdq: VMDq pool index to remove from the rar
3246 3677 **/
3247 3678 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3248 3679 {
3249 3680 u32 mpsar_lo, mpsar_hi;
3250 3681 u32 rar_entries = hw->mac.num_rar_entries;
3251 3682
3252 3683 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3253 3684
3254 3685 /* Make sure we are using a valid rar index range */
3255 3686 if (rar >= rar_entries) {
3256 - DEBUGOUT1("RAR index %d is out of range.\n", rar);
3687 + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3688 + "RAR index %d is out of range.\n", rar);
3257 3689 return IXGBE_ERR_INVALID_ARGUMENT;
3258 3690 }
3259 3691
3260 3692 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3261 3693 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3262 3694
3695 + if (IXGBE_REMOVED(hw->hw_addr))
3696 + goto done;
3697 +
3263 3698 if (!mpsar_lo && !mpsar_hi)
3264 3699 goto done;
3265 3700
3266 3701 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3267 3702 if (mpsar_lo) {
3268 3703 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3269 3704 mpsar_lo = 0;
3270 3705 }
3271 3706 if (mpsar_hi) {
3272 3707 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3273 3708 mpsar_hi = 0;
3274 3709 }
3275 3710 } else if (vmdq < 32) {
3276 3711 mpsar_lo &= ~(1 << vmdq);
3277 3712 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3278 3713 } else {
3279 3714 mpsar_hi &= ~(1 << (vmdq - 32));
3280 3715 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3281 3716 }
3282 3717
3283 3718 /* was that the last pool using this rar? */
3284 3719 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3285 3720 hw->mac.ops.clear_rar(hw, rar);
3286 3721 done:
3287 3722 return IXGBE_SUCCESS;
3288 3723 }
3289 3724
3290 3725 /**
3291 3726 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3292 3727 * @hw: pointer to hardware struct
3293 3728 * @rar: receive address register index to associate with a VMDq index
3294 3729 * @vmdq: VMDq pool index
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
3295 3730 **/
3296 3731 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3297 3732 {
3298 3733 u32 mpsar;
3299 3734 u32 rar_entries = hw->mac.num_rar_entries;
3300 3735
3301 3736 DEBUGFUNC("ixgbe_set_vmdq_generic");
3302 3737
3303 3738 /* Make sure we are using a valid rar index range */
3304 3739 if (rar >= rar_entries) {
3305 - DEBUGOUT1("RAR index %d is out of range.\n", rar);
3740 + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3741 + "RAR index %d is out of range.\n", rar);
3306 3742 return IXGBE_ERR_INVALID_ARGUMENT;
3307 3743 }
3308 3744
3309 3745 if (vmdq < 32) {
3310 3746 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3311 3747 mpsar |= 1 << vmdq;
3312 3748 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3313 3749 } else {
3314 3750 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3315 3751 mpsar |= 1 << (vmdq - 32);
3316 3752 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3317 3753 }
3318 3754 return IXGBE_SUCCESS;
3319 3755 }
3320 3756
3321 3757 /**
3322 3758 * This function should only be involved in the IOV mode.
3323 3759 * In IOV mode, Default pool is next pool after the number of
3324 3760 * VFs advertized and not 0.
3325 3761 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3326 3762 *
3327 3763 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3328 3764 * @hw: pointer to hardware struct
3329 3765 * @vmdq: VMDq pool index
3330 3766 **/
3331 3767 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3332 3768 {
3333 3769 u32 rar = hw->mac.san_mac_rar_index;
3334 3770
3335 3771 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3336 3772
3337 3773 if (vmdq < 32) {
3338 3774 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3339 3775 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3340 3776 } else {
3341 3777 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3342 3778 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3343 3779 }
3344 3780
3345 3781 return IXGBE_SUCCESS;
3346 3782 }
3347 3783
3348 3784 /**
3349 3785 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3350 3786 * @hw: pointer to hardware structure
3351 3787 **/
3352 3788 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3353 3789 {
3354 3790 int i;
3355 3791
3356 3792 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3357 3793 DEBUGOUT(" Clearing UTA\n");
3358 3794
3359 3795 for (i = 0; i < 128; i++)
3360 3796 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3361 3797
3362 3798 return IXGBE_SUCCESS;
3363 3799 }
3364 3800
3365 3801 /**
3366 3802 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3367 3803 * @hw: pointer to hardware structure
3368 3804 * @vlan: VLAN id to write to VLAN filter
3369 3805 *
3370 3806 * return the VLVF index where this VLAN id should be placed
3371 3807 *
3372 3808 **/
3373 3809 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3374 3810 {
3375 3811 u32 bits = 0;
3376 3812 u32 first_empty_slot = 0;
3377 3813 s32 regindex;
3378 3814
3379 3815 /* short cut the special case */
3380 3816 if (vlan == 0)
3381 3817 return 0;
3382 3818
3383 3819 /*
3384 3820 * Search for the vlan id in the VLVF entries. Save off the first empty
3385 3821 * slot found along the way
3386 3822 */
3387 3823 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3388 3824 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3389 3825 if (!bits && !(first_empty_slot))
3390 3826 first_empty_slot = regindex;
3391 3827 else if ((bits & 0x0FFF) == vlan)
3392 3828 break;
3393 3829 }
↓ open down ↓ |
78 lines elided |
↑ open up ↑ |
3394 3830
3395 3831 /*
3396 3832 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3397 3833 * in the VLVF. Else use the first empty VLVF register for this
3398 3834 * vlan id.
3399 3835 */
3400 3836 if (regindex >= IXGBE_VLVF_ENTRIES) {
3401 3837 if (first_empty_slot)
3402 3838 regindex = first_empty_slot;
3403 3839 else {
3404 - DEBUGOUT("No space in VLVF.\n");
3840 + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3841 + "No space in VLVF.\n");
3405 3842 regindex = IXGBE_ERR_NO_SPACE;
3406 3843 }
3407 3844 }
3408 3845
3409 3846 return regindex;
3410 3847 }
3411 3848
3412 3849 /**
3413 3850 * ixgbe_set_vfta_generic - Set VLAN filter table
3414 3851 * @hw: pointer to hardware structure
3415 3852 * @vlan: VLAN id to write to VLAN filter
3416 3853 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3417 3854 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3418 3855 *
3419 3856 * Turn on/off specified VLAN in the VLAN filter table.
3420 3857 **/
3421 3858 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3422 3859 bool vlan_on)
3423 3860 {
3424 3861 s32 regindex;
3425 3862 u32 bitindex;
3426 3863 u32 vfta;
3427 3864 u32 targetbit;
3428 3865 s32 ret_val = IXGBE_SUCCESS;
3429 3866 bool vfta_changed = FALSE;
3430 3867
3431 3868 DEBUGFUNC("ixgbe_set_vfta_generic");
3432 3869
3433 3870 if (vlan > 4095)
3434 3871 return IXGBE_ERR_PARAM;
3435 3872
3436 3873 /*
3437 3874 * this is a 2 part operation - first the VFTA, then the
3438 3875 * VLVF and VLVFB if VT Mode is set
3439 3876 * We don't write the VFTA until we know the VLVF part succeeded.
3440 3877 */
3441 3878
3442 3879 /* Part 1
3443 3880 * The VFTA is a bitstring made up of 128 32-bit registers
3444 3881 * that enable the particular VLAN id, much like the MTA:
3445 3882 * bits[11-5]: which register
3446 3883 * bits[4-0]: which bit in the register
3447 3884 */
3448 3885 regindex = (vlan >> 5) & 0x7F;
3449 3886 bitindex = vlan & 0x1F;
3450 3887 targetbit = (1 << bitindex);
3451 3888 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3452 3889
3453 3890 if (vlan_on) {
3454 3891 if (!(vfta & targetbit)) {
3455 3892 vfta |= targetbit;
3456 3893 vfta_changed = TRUE;
3457 3894 }
3458 3895 } else {
3459 3896 if ((vfta & targetbit)) {
3460 3897 vfta &= ~targetbit;
3461 3898 vfta_changed = TRUE;
3462 3899 }
3463 3900 }
3464 3901
3465 3902 /* Part 2
3466 3903 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3467 3904 */
3468 3905 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3469 3906 &vfta_changed);
3470 3907 if (ret_val != IXGBE_SUCCESS)
3471 3908 return ret_val;
3472 3909
3473 3910 if (vfta_changed)
3474 3911 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3475 3912
3476 3913 return IXGBE_SUCCESS;
3477 3914 }
3478 3915
3479 3916 /**
3480 3917 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3481 3918 * @hw: pointer to hardware structure
3482 3919 * @vlan: VLAN id to write to VLAN filter
3483 3920 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3484 3921 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3485 3922 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3486 3923 * should be changed
3487 3924 *
3488 3925 * Turn on/off specified bit in VLVF table.
3489 3926 **/
3490 3927 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3491 3928 bool vlan_on, bool *vfta_changed)
3492 3929 {
3493 3930 u32 vt;
3494 3931
3495 3932 DEBUGFUNC("ixgbe_set_vlvf_generic");
3496 3933
3497 3934 if (vlan > 4095)
3498 3935 return IXGBE_ERR_PARAM;
3499 3936
3500 3937 /* If VT Mode is set
3501 3938 * Either vlan_on
3502 3939 * make sure the vlan is in VLVF
3503 3940 * set the vind bit in the matching VLVFB
3504 3941 * Or !vlan_on
3505 3942 * clear the pool bit and possibly the vind
3506 3943 */
3507 3944 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3508 3945 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3509 3946 s32 vlvf_index;
3510 3947 u32 bits;
3511 3948
3512 3949 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3513 3950 if (vlvf_index < 0)
3514 3951 return vlvf_index;
3515 3952
3516 3953 if (vlan_on) {
3517 3954 /* set the pool bit */
3518 3955 if (vind < 32) {
3519 3956 bits = IXGBE_READ_REG(hw,
3520 3957 IXGBE_VLVFB(vlvf_index * 2));
3521 3958 bits |= (1 << vind);
3522 3959 IXGBE_WRITE_REG(hw,
3523 3960 IXGBE_VLVFB(vlvf_index * 2),
3524 3961 bits);
3525 3962 } else {
3526 3963 bits = IXGBE_READ_REG(hw,
3527 3964 IXGBE_VLVFB((vlvf_index * 2) + 1));
3528 3965 bits |= (1 << (vind - 32));
3529 3966 IXGBE_WRITE_REG(hw,
3530 3967 IXGBE_VLVFB((vlvf_index * 2) + 1),
3531 3968 bits);
3532 3969 }
3533 3970 } else {
3534 3971 /* clear the pool bit */
3535 3972 if (vind < 32) {
3536 3973 bits = IXGBE_READ_REG(hw,
3537 3974 IXGBE_VLVFB(vlvf_index * 2));
3538 3975 bits &= ~(1 << vind);
3539 3976 IXGBE_WRITE_REG(hw,
3540 3977 IXGBE_VLVFB(vlvf_index * 2),
3541 3978 bits);
3542 3979 bits |= IXGBE_READ_REG(hw,
3543 3980 IXGBE_VLVFB((vlvf_index * 2) + 1));
3544 3981 } else {
3545 3982 bits = IXGBE_READ_REG(hw,
3546 3983 IXGBE_VLVFB((vlvf_index * 2) + 1));
3547 3984 bits &= ~(1 << (vind - 32));
3548 3985 IXGBE_WRITE_REG(hw,
3549 3986 IXGBE_VLVFB((vlvf_index * 2) + 1),
3550 3987 bits);
3551 3988 bits |= IXGBE_READ_REG(hw,
3552 3989 IXGBE_VLVFB(vlvf_index * 2));
3553 3990 }
3554 3991 }
3555 3992
3556 3993 /*
3557 3994 * If there are still bits set in the VLVFB registers
3558 3995 * for the VLAN ID indicated we need to see if the
3559 3996 * caller is requesting that we clear the VFTA entry bit.
3560 3997 * If the caller has requested that we clear the VFTA
3561 3998 * entry bit but there are still pools/VFs using this VLAN
3562 3999 * ID entry then ignore the request. We're not worried
3563 4000 * about the case where we're turning the VFTA VLAN ID
3564 4001 * entry bit on, only when requested to turn it off as
3565 4002 * there may be multiple pools and/or VFs using the
3566 4003 * VLAN ID entry. In that case we cannot clear the
3567 4004 * VFTA bit until all pools/VFs using that VLAN ID have also
3568 4005 * been cleared. This will be indicated by "bits" being
3569 4006 * zero.
3570 4007 */
3571 4008 if (bits) {
3572 4009 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3573 4010 (IXGBE_VLVF_VIEN | vlan));
3574 4011 if ((!vlan_on) && (vfta_changed != NULL)) {
3575 4012 /* someone wants to clear the vfta entry
3576 4013 * but some pools/VFs are still using it.
3577 4014 * Ignore it. */
3578 4015 *vfta_changed = FALSE;
3579 4016 }
3580 4017 } else
3581 4018 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3582 4019 }
3583 4020
3584 4021 return IXGBE_SUCCESS;
3585 4022 }
3586 4023
3587 4024 /**
3588 4025 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3589 4026 * @hw: pointer to hardware structure
3590 4027 *
3591 4028 * Clears the VLAN filer table, and the VMDq index associated with the filter
3592 4029 **/
3593 4030 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3594 4031 {
3595 4032 u32 offset;
3596 4033
3597 4034 DEBUGFUNC("ixgbe_clear_vfta_generic");
3598 4035
3599 4036 for (offset = 0; offset < hw->mac.vft_size; offset++)
3600 4037 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3601 4038
3602 4039 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3603 4040 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3604 4041 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3605 4042 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3606 4043 }
3607 4044
3608 4045 return IXGBE_SUCCESS;
3609 4046 }
3610 4047
3611 4048 /**
3612 4049 * ixgbe_check_mac_link_generic - Determine link and speed status
3613 4050 * @hw: pointer to hardware structure
3614 4051 * @speed: pointer to link speed
3615 4052 * @link_up: TRUE when link is up
3616 4053 * @link_up_wait_to_complete: bool used to wait for link up or not
3617 4054 *
3618 4055 * Reads the links register to determine if link is up and the current speed
3619 4056 **/
3620 4057 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3621 4058 bool *link_up, bool link_up_wait_to_complete)
3622 4059 {
3623 4060 u32 links_reg, links_orig;
3624 4061 u32 i;
3625 4062
3626 4063 DEBUGFUNC("ixgbe_check_mac_link_generic");
3627 4064
3628 4065 /* clear the old state */
↓ open down ↓ |
214 lines elided |
↑ open up ↑ |
3629 4066 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3630 4067
3631 4068 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3632 4069
3633 4070 if (links_orig != links_reg) {
3634 4071 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3635 4072 links_orig, links_reg);
3636 4073 }
3637 4074
3638 4075 if (link_up_wait_to_complete) {
3639 - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
4076 + for (i = 0; i < hw->mac.max_link_up_time; i++) {
3640 4077 if (links_reg & IXGBE_LINKS_UP) {
3641 4078 *link_up = TRUE;
3642 4079 break;
3643 4080 } else {
3644 4081 *link_up = FALSE;
3645 4082 }
3646 4083 msec_delay(100);
3647 4084 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3648 4085 }
3649 4086 } else {
3650 4087 if (links_reg & IXGBE_LINKS_UP)
3651 4088 *link_up = TRUE;
3652 4089 else
3653 4090 *link_up = FALSE;
3654 4091 }
3655 4092
3656 - if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3657 - IXGBE_LINKS_SPEED_10G_82599)
4093 + switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4094 + case IXGBE_LINKS_SPEED_10G_82599:
3658 4095 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3659 - else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3660 - IXGBE_LINKS_SPEED_1G_82599)
4096 + if (hw->mac.type >= ixgbe_mac_X550) {
4097 + if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4098 + *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4099 + }
4100 + break;
4101 + case IXGBE_LINKS_SPEED_1G_82599:
3661 4102 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3662 - else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3663 - IXGBE_LINKS_SPEED_100_82599)
4103 + break;
4104 + case IXGBE_LINKS_SPEED_100_82599:
3664 4105 *speed = IXGBE_LINK_SPEED_100_FULL;
3665 - else
4106 + if (hw->mac.type >= ixgbe_mac_X550) {
4107 + if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4108 + *speed = IXGBE_LINK_SPEED_5GB_FULL;
4109 + }
4110 + break;
4111 + default:
3666 4112 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4113 + }
3667 4114
3668 4115 return IXGBE_SUCCESS;
3669 4116 }
3670 4117
3671 4118 /**
3672 4119 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3673 4120 * the EEPROM
3674 4121 * @hw: pointer to hardware structure
3675 4122 * @wwnn_prefix: the alternative WWNN prefix
3676 4123 * @wwpn_prefix: the alternative WWPN prefix
3677 4124 *
3678 4125 * This function will read the EEPROM from the alternative SAN MAC address
3679 4126 * block to check the support for the alternative WWNN/WWPN prefix support.
3680 4127 **/
3681 4128 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3682 4129 u16 *wwpn_prefix)
3683 4130 {
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
3684 4131 u16 offset, caps;
3685 4132 u16 alt_san_mac_blk_offset;
3686 4133
3687 4134 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3688 4135
3689 4136 /* clear output first */
3690 4137 *wwnn_prefix = 0xFFFF;
3691 4138 *wwpn_prefix = 0xFFFF;
3692 4139
3693 4140 /* check if alternative SAN MAC is supported */
3694 - hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3695 - &alt_san_mac_blk_offset);
4141 + offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4142 + if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4143 + goto wwn_prefix_err;
3696 4144
3697 4145 if ((alt_san_mac_blk_offset == 0) ||
3698 4146 (alt_san_mac_blk_offset == 0xFFFF))
3699 4147 goto wwn_prefix_out;
3700 4148
3701 4149 /* check capability in alternative san mac address block */
3702 4150 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3703 - hw->eeprom.ops.read(hw, offset, &caps);
4151 + if (hw->eeprom.ops.read(hw, offset, &caps))
4152 + goto wwn_prefix_err;
3704 4153 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3705 4154 goto wwn_prefix_out;
3706 4155
3707 4156 /* get the corresponding prefix for WWNN/WWPN */
3708 4157 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3709 - hw->eeprom.ops.read(hw, offset, wwnn_prefix);
4158 + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4159 + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4160 + "eeprom read at offset %d failed", offset);
4161 + }
3710 4162
3711 4163 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3712 - hw->eeprom.ops.read(hw, offset, wwpn_prefix);
4164 + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4165 + goto wwn_prefix_err;
3713 4166
3714 4167 wwn_prefix_out:
3715 4168 return IXGBE_SUCCESS;
4169 +
4170 +wwn_prefix_err:
4171 + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4172 + "eeprom read at offset %d failed", offset);
4173 + return IXGBE_SUCCESS;
3716 4174 }
3717 4175
3718 4176 /**
3719 4177 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3720 4178 * @hw: pointer to hardware structure
3721 4179 * @bs: the fcoe boot status
3722 4180 *
3723 4181 * This function will read the FCOE boot status from the iSCSI FCOE block
3724 4182 **/
3725 4183 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3726 4184 {
3727 4185 u16 offset, caps, flags;
3728 4186 s32 status;
3729 4187
3730 4188 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3731 4189
3732 4190 /* clear output first */
3733 4191 *bs = ixgbe_fcoe_bootstatus_unavailable;
3734 4192
3735 4193 /* check if FCOE IBA block is present */
3736 4194 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3737 4195 status = hw->eeprom.ops.read(hw, offset, &caps);
3738 4196 if (status != IXGBE_SUCCESS)
3739 4197 goto out;
3740 4198
3741 4199 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3742 4200 goto out;
3743 4201
3744 4202 /* check if iSCSI FCOE block is populated */
3745 4203 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3746 4204 if (status != IXGBE_SUCCESS)
3747 4205 goto out;
3748 4206
3749 4207 if ((offset == 0) || (offset == 0xFFFF))
3750 4208 goto out;
3751 4209
3752 4210 /* read fcoe flags in iSCSI FCOE block */
3753 4211 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3754 4212 status = hw->eeprom.ops.read(hw, offset, &flags);
3755 4213 if (status != IXGBE_SUCCESS)
3756 4214 goto out;
3757 4215
3758 4216 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3759 4217 *bs = ixgbe_fcoe_bootstatus_enabled;
3760 4218 else
3761 4219 *bs = ixgbe_fcoe_bootstatus_disabled;
3762 4220
3763 4221 out:
3764 4222 return status;
3765 4223 }
3766 4224
3767 4225 /**
3768 4226 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3769 4227 * @hw: pointer to hardware structure
3770 4228 * @enable: enable or disable switch for anti-spoofing
3771 4229 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
3772 4230 *
3773 4231 **/
3774 4232 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3775 4233 {
3776 4234 int j;
3777 4235 int pf_target_reg = pf >> 3;
3778 4236 int pf_target_shift = pf % 8;
3779 4237 u32 pfvfspoof = 0;
3780 4238
3781 4239 if (hw->mac.type == ixgbe_mac_82598EB)
3782 4240 return;
3783 4241
3784 4242 if (enable)
3785 4243 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3786 4244
3787 4245 /*
3788 4246 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3789 4247 * MAC anti-spoof enables in each register array element.
3790 4248 */
3791 4249 for (j = 0; j < pf_target_reg; j++)
3792 4250 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3793 4251
3794 4252 /*
3795 4253 * The PF should be allowed to spoof so that it can support
3796 4254 * emulation mode NICs. Do not set the bits assigned to the PF
3797 4255 */
3798 4256 pfvfspoof &= (1 << pf_target_shift) - 1;
3799 4257 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3800 4258
3801 4259 /*
3802 4260 * Remaining pools belong to the PF so they do not need to have
↓ open down ↓ |
77 lines elided |
↑ open up ↑ |
3803 4261 * anti-spoofing enabled.
3804 4262 */
3805 4263 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3806 4264 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3807 4265 }
3808 4266
3809 4267 /**
3810 4268 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3811 4269 * @hw: pointer to hardware structure
3812 4270 * @enable: enable or disable switch for VLAN anti-spoofing
3813 - * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4271 + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3814 4272 *
3815 4273 **/
3816 4274 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3817 4275 {
3818 4276 int vf_target_reg = vf >> 3;
3819 4277 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3820 4278 u32 pfvfspoof;
3821 4279
3822 4280 if (hw->mac.type == ixgbe_mac_82598EB)
3823 4281 return;
3824 4282
3825 4283 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3826 4284 if (enable)
3827 4285 pfvfspoof |= (1 << vf_target_shift);
3828 4286 else
3829 4287 pfvfspoof &= ~(1 << vf_target_shift);
3830 4288 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3831 4289 }
3832 4290
3833 4291 /**
3834 4292 * ixgbe_get_device_caps_generic - Get additional device capabilities
3835 4293 * @hw: pointer to hardware structure
3836 4294 * @device_caps: the EEPROM word with the extra device capabilities
3837 4295 *
3838 4296 * This function will read the EEPROM location for the device capabilities,
3839 4297 * and return the word through device_caps.
3840 4298 **/
3841 4299 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3842 4300 {
3843 4301 DEBUGFUNC("ixgbe_get_device_caps_generic");
3844 4302
3845 4303 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3846 4304
3847 4305 return IXGBE_SUCCESS;
3848 4306 }
3849 4307
3850 4308 /**
3851 4309 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3852 4310 * @hw: pointer to hardware structure
3853 4311 *
3854 4312 **/
3855 4313 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3856 4314 {
3857 4315 u32 regval;
3858 4316 u32 i;
3859 4317
3860 4318 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3861 4319
3862 4320 /* Enable relaxed ordering */
3863 4321 for (i = 0; i < hw->mac.max_tx_queues; i++) {
3864 4322 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3865 4323 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3866 4324 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3867 4325 }
3868 4326
3869 4327 for (i = 0; i < hw->mac.max_rx_queues; i++) {
3870 4328 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3871 4329 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3872 4330 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3873 4331 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3874 4332 }
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
3875 4333
3876 4334 }
3877 4335
3878 4336 /**
3879 4337 * ixgbe_calculate_checksum - Calculate checksum for buffer
3880 4338 * @buffer: pointer to EEPROM
3881 4339 * @length: size of EEPROM to calculate a checksum for
3882 4340 * Calculates the checksum for some buffer on a specified length. The
3883 4341 * checksum calculated is returned.
3884 4342 **/
3885 -static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4343 +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3886 4344 {
3887 4345 u32 i;
3888 4346 u8 sum = 0;
3889 4347
3890 4348 DEBUGFUNC("ixgbe_calculate_checksum");
3891 4349
3892 4350 if (!buffer)
3893 4351 return 0;
3894 4352
3895 4353 for (i = 0; i < length; i++)
3896 4354 sum += buffer[i];
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
3897 4355
3898 4356 return (u8) (0 - sum);
3899 4357 }
3900 4358
3901 4359 /**
3902 4360 * ixgbe_host_interface_command - Issue command to manageability block
3903 4361 * @hw: pointer to the HW structure
3904 4362 * @buffer: contains the command to write and where the return status will
3905 4363 * be placed
3906 4364 * @length: length of buffer, must be multiple of 4 bytes
4365 + * @timeout: time in ms to wait for command completion
4366 + * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4367 + * Needed because FW structures are big endian and decoding of
4368 + * these fields can be 8 bit or 16 bit based on command. Decoding
4369 + * is not easily understood without making a table of commands.
4370 + * So we will leave this up to the caller to read back the data
4371 + * in these cases.
3907 4372 *
3908 4373 * Communicates with the manageability block. On success return IXGBE_SUCCESS
3909 4374 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3910 4375 **/
3911 -static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3912 - u32 length)
4376 +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4377 + u32 length, u32 timeout, bool return_data)
3913 4378 {
3914 - u32 hicr, i, bi;
4379 + u32 hicr, i, bi, fwsts;
3915 4380 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3916 - u8 buf_len, dword_len;
4381 + u16 buf_len;
4382 + u16 dword_len;
3917 4383
3918 - s32 ret_val = IXGBE_SUCCESS;
3919 -
3920 4384 DEBUGFUNC("ixgbe_host_interface_command");
3921 4385
3922 - if (length == 0 || length & 0x3 ||
3923 - length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3924 - DEBUGOUT("Buffer length failure.\n");
3925 - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3926 - goto out;
4386 + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4387 + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4388 + return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3927 4389 }
4390 + /* Set bit 9 of FWSTS clearing FW reset indication */
4391 + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4392 + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3928 4393
3929 4394 /* Check that the host interface is enabled. */
3930 4395 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3931 4396 if ((hicr & IXGBE_HICR_EN) == 0) {
3932 4397 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3933 - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3934 - goto out;
4398 + return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3935 4399 }
3936 4400
3937 - /* Calculate length in DWORDs */
4401 + /* Calculate length in DWORDs. We must be DWORD aligned */
4402 + if ((length % (sizeof(u32))) != 0) {
4403 + DEBUGOUT("Buffer length failure, not aligned to dword");
4404 + return IXGBE_ERR_INVALID_ARGUMENT;
4405 + }
4406 +
3938 4407 dword_len = length >> 2;
3939 4408
3940 - /*
3941 - * The device driver writes the relevant command block
4409 + /* The device driver writes the relevant command block
3942 4410 * into the ram area.
3943 4411 */
3944 4412 for (i = 0; i < dword_len; i++)
3945 4413 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3946 4414 i, IXGBE_CPU_TO_LE32(buffer[i]));
3947 4415
3948 4416 /* Setting this bit tells the ARC that a new command is pending. */
3949 4417 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3950 4418
3951 - for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4419 + for (i = 0; i < timeout; i++) {
3952 4420 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3953 4421 if (!(hicr & IXGBE_HICR_C))
3954 4422 break;
3955 4423 msec_delay(1);
3956 4424 }
3957 4425
3958 - /* Check command successful completion. */
3959 - if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3960 - (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3961 - DEBUGOUT("Command has failed with no status valid.\n");
3962 - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3963 - goto out;
4426 + /* Check command completion */
4427 + if ((timeout != 0 && i == timeout) ||
4428 + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4429 + ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4430 + "Command has failed with no status valid.\n");
4431 + return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3964 4432 }
3965 4433
4434 + if (!return_data)
4435 + return 0;
4436 +
3966 4437 /* Calculate length in DWORDs */
3967 4438 dword_len = hdr_size >> 2;
3968 4439
3969 4440 /* first pull in the header so we know the buffer length */
3970 4441 for (bi = 0; bi < dword_len; bi++) {
3971 4442 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3972 - buffer[bi] = IXGBE_LE32_TO_CPUS(buffer[bi]);
4443 + IXGBE_LE32_TO_CPUS(&buffer[bi]);
3973 4444 }
3974 4445
3975 4446 /* If there is any thing in data position pull it in */
3976 4447 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3977 4448 if (buf_len == 0)
3978 - goto out;
4449 + return 0;
3979 4450
3980 - if (length < (buf_len + hdr_size)) {
4451 + if (length < buf_len + hdr_size) {
3981 4452 DEBUGOUT("Buffer not large enough for reply message.\n");
3982 - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3983 - goto out;
4453 + return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3984 4454 }
3985 4455
3986 4456 /* Calculate length in DWORDs, add 3 for odd lengths */
3987 4457 dword_len = (buf_len + 3) >> 2;
3988 4458
3989 - /* Pull in the rest of the buffer (bi is where we left off)*/
4459 + /* Pull in the rest of the buffer (bi is where we left off) */
3990 4460 for (; bi <= dword_len; bi++) {
3991 4461 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3992 - buffer[bi] = IXGBE_LE32_TO_CPUS(buffer[bi]);
4462 + IXGBE_LE32_TO_CPUS(&buffer[bi]);
3993 4463 }
3994 4464
3995 -out:
3996 - return ret_val;
4465 + return 0;
3997 4466 }
3998 4467
3999 4468 /**
4000 4469 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4001 4470 * @hw: pointer to the HW structure
4002 4471 * @maj: driver version major number
4003 4472 * @min: driver version minor number
4004 4473 * @build: driver version build number
4005 4474 * @sub: driver version sub build number
4006 4475 *
4007 4476 * Sends driver version number to firmware through the manageability
4008 4477 * block. On success return IXGBE_SUCCESS
4009 4478 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4010 4479 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4011 4480 **/
4012 4481 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4013 4482 u8 build, u8 sub)
4014 4483 {
4015 4484 struct ixgbe_hic_drv_info fw_cmd;
4016 4485 int i;
4017 4486 s32 ret_val = IXGBE_SUCCESS;
4018 4487
4019 4488 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4020 4489
4021 4490 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4022 4491 != IXGBE_SUCCESS) {
4023 4492 ret_val = IXGBE_ERR_SWFW_SYNC;
4024 4493 goto out;
4025 4494 }
4026 4495
4027 4496 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4028 4497 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4029 4498 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4030 4499 fw_cmd.port_num = (u8)hw->bus.func;
4031 4500 fw_cmd.ver_maj = maj;
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
4032 4501 fw_cmd.ver_min = min;
4033 4502 fw_cmd.ver_build = build;
4034 4503 fw_cmd.ver_sub = sub;
4035 4504 fw_cmd.hdr.checksum = 0;
4036 4505 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4037 4506 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4038 4507 fw_cmd.pad = 0;
4039 4508 fw_cmd.pad2 = 0;
4040 4509
4041 4510 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4042 - /* LINTED */
4043 4511 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4044 - sizeof(fw_cmd));
4512 + sizeof(fw_cmd),
4513 + IXGBE_HI_COMMAND_TIMEOUT,
4514 + TRUE);
4045 4515 if (ret_val != IXGBE_SUCCESS)
4046 4516 continue;
4047 4517
4048 4518 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4049 4519 FW_CEM_RESP_STATUS_SUCCESS)
4050 4520 ret_val = IXGBE_SUCCESS;
4051 4521 else
4052 4522 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4053 4523
4054 4524 break;
4055 4525 }
4056 4526
4057 4527 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4058 4528 out:
4059 4529 return ret_val;
4060 4530 }
4061 4531
4062 4532 /**
4063 4533 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4064 4534 * @hw: pointer to hardware structure
4065 4535 * @num_pb: number of packet buffers to allocate
4066 4536 * @headroom: reserve n KB of headroom
4067 4537 * @strategy: packet buffer allocation strategy
4068 4538 **/
4069 4539 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4070 4540 int strategy)
4071 4541 {
4072 4542 u32 pbsize = hw->mac.rx_pb_size;
4073 4543 int i = 0;
4074 4544 u32 rxpktsize, txpktsize, txpbthresh;
4075 4545
4076 4546 /* Reserve headroom */
4077 4547 pbsize -= headroom;
4078 4548
4079 4549 if (!num_pb)
4080 4550 num_pb = 1;
4081 4551
4082 4552 /* Divide remaining packet buffer space amongst the number of packet
4083 4553 * buffers requested using supplied strategy.
4084 4554 */
4085 4555 switch (strategy) {
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
4086 4556 case PBA_STRATEGY_WEIGHTED:
4087 4557 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4088 4558 * buffer with 5/8 of the packet buffer space.
4089 4559 */
4090 4560 rxpktsize = (pbsize * 5) / (num_pb * 4);
4091 4561 pbsize -= rxpktsize * (num_pb / 2);
4092 4562 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4093 4563 for (; i < (num_pb / 2); i++)
4094 4564 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4095 4565 /* Fall through to configure remaining packet buffers */
4096 - /* FALLTHRU */
4097 4566 case PBA_STRATEGY_EQUAL:
4098 4567 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4099 4568 for (; i < num_pb; i++)
4100 4569 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4101 4570 break;
4102 4571 default:
4103 4572 break;
4104 4573 }
4105 4574
4106 4575 /* Only support an equally distributed Tx packet buffer strategy. */
4107 4576 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4108 4577 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4109 4578 for (i = 0; i < num_pb; i++) {
4110 4579 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4111 4580 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4112 4581 }
4113 4582
4114 4583 /* Clear unused TCs, if any, to zero buffer size*/
4115 4584 for (; i < IXGBE_MAX_PB; i++) {
4116 4585 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4117 4586 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4118 4587 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4119 4588 }
4120 4589 }
4121 4590
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
4122 4591 /**
4123 4592 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4124 4593 * @hw: pointer to the hardware structure
4125 4594 *
4126 4595 * The 82599 and x540 MACs can experience issues if TX work is still pending
4127 4596 * when a reset occurs. This function prevents this by flushing the PCIe
4128 4597 * buffers on the system.
4129 4598 **/
4130 4599 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4131 4600 {
4132 - u32 gcr_ext, hlreg0;
4601 + u32 gcr_ext, hlreg0, i, poll;
4602 + u16 value;
4133 4603
4134 4604 /*
4135 4605 * If double reset is not requested then all transactions should
4136 4606 * already be clear and as such there is no work to do
4137 4607 */
4138 4608 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4139 4609 return;
4140 4610
4141 4611 /*
4142 4612 * Set loopback enable to prevent any transmits from being sent
4143 4613 * should the link come up. This assumes that the RXCTRL.RXEN bit
4144 4614 * has already been cleared.
4145 4615 */
4146 4616 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4147 4617 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4148 4618
4619 + /* Wait for a last completion before clearing buffers */
4620 + IXGBE_WRITE_FLUSH(hw);
4621 + msec_delay(3);
4622 +
4623 + /*
4624 + * Before proceeding, make sure that the PCIe block does not have
4625 + * transactions pending.
4626 + */
4627 + poll = ixgbe_pcie_timeout_poll(hw);
4628 + for (i = 0; i < poll; i++) {
4629 + usec_delay(100);
4630 + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4631 + if (IXGBE_REMOVED(hw->hw_addr))
4632 + goto out;
4633 + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4634 + goto out;
4635 + }
4636 +
4637 +out:
4149 4638 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4150 4639 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4151 4640 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4152 4641 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4153 4642
4154 4643 /* Flush all writes and allow 20usec for all transactions to clear */
4155 4644 IXGBE_WRITE_FLUSH(hw);
4156 4645 usec_delay(20);
4157 4646
4158 4647 /* restore previous register values */
4159 4648 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4160 4649 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4161 4650 }
4162 4651
4652 +
4653 +/**
4654 + * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4655 + * @hw: pointer to hardware structure
4656 + * @map: pointer to u8 arr for returning map
4657 + *
4658 + * Read the rtrup2tc HW register and resolve its content into map
4659 + **/
4660 +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4661 +{
4662 + u32 reg, i;
4663 +
4664 + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4665 + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4666 + map[i] = IXGBE_RTRUP2TC_UP_MASK &
4667 + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4668 + return;
4669 +}
4670 +
4671 +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
4672 +{
4673 + u32 pfdtxgswc;
4674 + u32 rxctrl;
4675 +
4676 + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4677 + if (rxctrl & IXGBE_RXCTRL_RXEN) {
4678 + if (hw->mac.type != ixgbe_mac_82598EB) {
4679 + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4680 + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4681 + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4682 + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4683 + hw->mac.set_lben = TRUE;
4684 + } else {
4685 + hw->mac.set_lben = FALSE;
4686 + }
4687 + }
4688 + rxctrl &= ~IXGBE_RXCTRL_RXEN;
4689 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4690 + }
4691 +}
4692 +
4693 +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
4694 +{
4695 + u32 pfdtxgswc;
4696 + u32 rxctrl;
4697 +
4698 + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4699 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
4700 +
4701 + if (hw->mac.type != ixgbe_mac_82598EB) {
4702 + if (hw->mac.set_lben) {
4703 + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4704 + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
4705 + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4706 + hw->mac.set_lben = FALSE;
4707 + }
4708 + }
4709 +}
4710 +
4711 +/**
4712 + * ixgbe_mng_present - returns TRUE when management capability is present
4713 + * @hw: pointer to hardware structure
4714 + */
4715 +bool ixgbe_mng_present(struct ixgbe_hw *hw)
4716 +{
4717 + u32 fwsm;
4718 +
4719 + if (hw->mac.type < ixgbe_mac_82599EB)
4720 + return FALSE;
4721 +
4722 + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4723 + fwsm &= IXGBE_FWSM_MODE_MASK;
4724 + return fwsm == IXGBE_FWSM_FW_MODE_PT;
4725 +}
4726 +
4727 +/**
4728 + * ixgbe_mng_enabled - Is the manageability engine enabled?
4729 + * @hw: pointer to hardware structure
4730 + *
4731 + * Returns TRUE if the manageability engine is enabled.
4732 + **/
4733 +bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
4734 +{
4735 + u32 fwsm, manc, factps;
4736 +
4737 + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4738 + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
4739 + return FALSE;
4740 +
4741 + manc = IXGBE_READ_REG(hw, IXGBE_MANC);
4742 + if (!(manc & IXGBE_MANC_RCV_TCO_EN))
4743 + return FALSE;
4744 +
4745 + if (hw->mac.type <= ixgbe_mac_X540) {
4746 + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
4747 + if (factps & IXGBE_FACTPS_MNGCG)
4748 + return FALSE;
4749 + }
4750 +
4751 + return TRUE;
4752 +}
4753 +
4754 +/**
4755 + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
4756 + * @hw: pointer to hardware structure
4757 + * @speed: new link speed
4758 + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4759 + *
4760 + * Set the link speed in the MAC and/or PHY register and restarts link.
4761 + **/
4762 +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
4763 + ixgbe_link_speed speed,
4764 + bool autoneg_wait_to_complete)
4765 +{
4766 + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4767 + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4768 + s32 status = IXGBE_SUCCESS;
4769 + u32 speedcnt = 0;
4770 + u32 i = 0;
4771 + bool autoneg, link_up = FALSE;
4772 +
4773 + DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
4774 +
4775 + /* Mask off requested but non-supported speeds */
4776 + status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
4777 + if (status != IXGBE_SUCCESS)
4778 + return status;
4779 +
4780 + speed &= link_speed;
4781 +
4782 + /* Try each speed one by one, highest priority first. We do this in
4783 + * software because 10Gb fiber doesn't support speed autonegotiation.
4784 + */
4785 + if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
4786 + speedcnt++;
4787 + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
4788 +
4789 + /* If we already have link at this speed, just jump out */
4790 + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4791 + if (status != IXGBE_SUCCESS)
4792 + return status;
4793 +
4794 + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
4795 + goto out;
4796 +
4797 + /* Set the module link speed */
4798 + switch (hw->phy.media_type) {
4799 + case ixgbe_media_type_fiber_fixed:
4800 + case ixgbe_media_type_fiber:
4801 + ixgbe_set_rate_select_speed(hw,
4802 + IXGBE_LINK_SPEED_10GB_FULL);
4803 + break;
4804 + case ixgbe_media_type_fiber_qsfp:
4805 + /* QSFP module automatically detects MAC link speed */
4806 + break;
4807 + default:
4808 + DEBUGOUT("Unexpected media type.\n");
4809 + break;
4810 + }
4811 +
4812 + /* Allow module to change analog characteristics (1G->10G) */
4813 + msec_delay(40);
4814 +
4815 + status = ixgbe_setup_mac_link(hw,
4816 + IXGBE_LINK_SPEED_10GB_FULL,
4817 + autoneg_wait_to_complete);
4818 + if (status != IXGBE_SUCCESS)
4819 + return status;
4820 +
4821 + /* Flap the Tx laser if it has not already been done */
4822 + ixgbe_flap_tx_laser(hw);
4823 +
4824 + /* Wait for the controller to acquire link. Per IEEE 802.3ap,
4825 + * Section 73.10.2, we may have to wait up to 500ms if KR is
4826 + * attempted. 82599 uses the same timing for 10g SFI.
4827 + */
4828 + for (i = 0; i < 5; i++) {
4829 + /* Wait for the link partner to also set speed */
4830 + msec_delay(100);
4831 +
4832 + /* If we have link, just jump out */
4833 + status = ixgbe_check_link(hw, &link_speed,
4834 + &link_up, FALSE);
4835 + if (status != IXGBE_SUCCESS)
4836 + return status;
4837 +
4838 + if (link_up)
4839 + goto out;
4840 + }
4841 + }
4842 +
4843 + if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4844 + speedcnt++;
4845 + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4846 + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4847 +
4848 + /* If we already have link at this speed, just jump out */
4849 + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4850 + if (status != IXGBE_SUCCESS)
4851 + return status;
4852 +
4853 + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
4854 + goto out;
4855 +
4856 + /* Set the module link speed */
4857 + switch (hw->phy.media_type) {
4858 + case ixgbe_media_type_fiber_fixed:
4859 + case ixgbe_media_type_fiber:
4860 + ixgbe_set_rate_select_speed(hw,
4861 + IXGBE_LINK_SPEED_1GB_FULL);
4862 + break;
4863 + case ixgbe_media_type_fiber_qsfp:
4864 + /* QSFP module automatically detects link speed */
4865 + break;
4866 + default:
4867 + DEBUGOUT("Unexpected media type.\n");
4868 + break;
4869 + }
4870 +
4871 + /* Allow module to change analog characteristics (10G->1G) */
4872 + msec_delay(40);
4873 +
4874 + status = ixgbe_setup_mac_link(hw,
4875 + IXGBE_LINK_SPEED_1GB_FULL,
4876 + autoneg_wait_to_complete);
4877 + if (status != IXGBE_SUCCESS)
4878 + return status;
4879 +
4880 + /* Flap the Tx laser if it has not already been done */
4881 + ixgbe_flap_tx_laser(hw);
4882 +
4883 + /* Wait for the link partner to also set speed */
4884 + msec_delay(100);
4885 +
4886 + /* If we have link, just jump out */
4887 + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4888 + if (status != IXGBE_SUCCESS)
4889 + return status;
4890 +
4891 + if (link_up)
4892 + goto out;
4893 + }
4894 +
4895 + /* We didn't get link. Configure back to the highest speed we tried,
4896 + * (if there was more than one). We call ourselves back with just the
4897 + * single highest speed that the user requested.
4898 + */
4899 + if (speedcnt > 1)
4900 + status = ixgbe_setup_mac_link_multispeed_fiber(hw,
4901 + highest_link_speed,
4902 + autoneg_wait_to_complete);
4903 +
4904 +out:
4905 + /* Set autoneg_advertised value based on input link speed */
4906 + hw->phy.autoneg_advertised = 0;
4907 +
4908 + if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4909 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
4910 +
4911 + if (speed & IXGBE_LINK_SPEED_1GB_FULL)
4912 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
4913 +
4914 + return status;
4915 +}
4916 +
4917 +/**
4918 + * ixgbe_set_soft_rate_select_speed - Set module link speed
4919 + * @hw: pointer to hardware structure
4920 + * @speed: link speed to set
4921 + *
4922 + * Set module link speed via the soft rate select.
4923 + */
4924 +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
4925 + ixgbe_link_speed speed)
4926 +{
4927 + s32 status;
4928 + u8 rs, eeprom_data;
4929 +
4930 + switch (speed) {
4931 + case IXGBE_LINK_SPEED_10GB_FULL:
4932 + /* one bit mask same as setting on */
4933 + rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
4934 + break;
4935 + case IXGBE_LINK_SPEED_1GB_FULL:
4936 + rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
4937 + break;
4938 + default:
4939 + DEBUGOUT("Invalid fixed module speed\n");
4940 + return;
4941 + }
4942 +
4943 + /* Set RS0 */
4944 + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4945 + IXGBE_I2C_EEPROM_DEV_ADDR2,
4946 + &eeprom_data);
4947 + if (status) {
4948 + DEBUGOUT("Failed to read Rx Rate Select RS0\n");
4949 + goto out;
4950 + }
4951 +
4952 + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4953 +
4954 + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4955 + IXGBE_I2C_EEPROM_DEV_ADDR2,
4956 + eeprom_data);
4957 + if (status) {
4958 + DEBUGOUT("Failed to write Rx Rate Select RS0\n");
4959 + goto out;
4960 + }
4961 +
4962 + /* Set RS1 */
4963 + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4964 + IXGBE_I2C_EEPROM_DEV_ADDR2,
4965 + &eeprom_data);
4966 + if (status) {
4967 + DEBUGOUT("Failed to read Rx Rate Select RS1\n");
4968 + goto out;
4969 + }
4970 +
4971 + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4972 +
4973 + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4974 + IXGBE_I2C_EEPROM_DEV_ADDR2,
4975 + eeprom_data);
4976 + if (status) {
4977 + DEBUGOUT("Failed to write Rx Rate Select RS1\n");
4978 + goto out;
4979 + }
4980 +out:
4981 + return;
4982 +}
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX