Print this page
4431 igb support for I354
4616 igb has uninitialized kstats
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/e1000api/e1000_i210.c
+++ new/usr/src/uts/common/io/e1000api/e1000_i210.c
1 1 /******************************************************************************
2 2
3 3 Copyright (c) 2001-2013, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 33 /*$FreeBSD$*/
34 34
35 35 #include "e1000_api.h"
36 36
37 37
38 38 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
39 39 static void e1000_release_nvm_i210(struct e1000_hw *hw);
40 40 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
41 41 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
42 42 u16 *data);
43 43 static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
44 44 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
45 45 static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
46 46 u16 *data);
47 47
48 48 /**
49 49 * e1000_acquire_nvm_i210 - Request for access to EEPROM
50 50 * @hw: pointer to the HW structure
51 51 *
52 52 * Acquire the necessary semaphores for exclusive access to the EEPROM.
53 53 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
54 54 * Return successful if access grant bit set, else clear the request for
55 55 * EEPROM access and return -E1000_ERR_NVM (-1).
56 56 **/
57 57 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
58 58 {
59 59 s32 ret_val;
60 60
61 61 DEBUGFUNC("e1000_acquire_nvm_i210");
62 62
63 63 ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
64 64
65 65 return ret_val;
66 66 }
67 67
68 68 /**
69 69 * e1000_release_nvm_i210 - Release exclusive access to EEPROM
70 70 * @hw: pointer to the HW structure
71 71 *
72 72 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
73 73 * then release the semaphores acquired.
74 74 **/
75 75 static void e1000_release_nvm_i210(struct e1000_hw *hw)
76 76 {
77 77 DEBUGFUNC("e1000_release_nvm_i210");
78 78
79 79 e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
80 80 }
81 81
82 82 /**
83 83 * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
84 84 * @hw: pointer to the HW structure
85 85 * @mask: specifies which semaphore to acquire
86 86 *
87 87 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
88 88 * will also specify which port we're acquiring the lock for.
89 89 **/
90 90 s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
91 91 {
92 92 u32 swfw_sync;
93 93 u32 swmask = mask;
94 94 u32 fwmask = mask << 16;
95 95 s32 ret_val = E1000_SUCCESS;
96 96 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
97 97
98 98 DEBUGFUNC("e1000_acquire_swfw_sync_i210");
99 99
100 100 while (i < timeout) {
101 101 if (e1000_get_hw_semaphore_i210(hw)) {
102 102 ret_val = -E1000_ERR_SWFW_SYNC;
103 103 goto out;
104 104 }
105 105
106 106 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
107 107 if (!(swfw_sync & (fwmask | swmask)))
108 108 break;
109 109
110 110 /*
111 111 * Firmware currently using resource (fwmask)
112 112 * or other software thread using resource (swmask)
113 113 */
114 114 e1000_put_hw_semaphore_generic(hw);
115 115 msec_delay_irq(5);
116 116 i++;
117 117 }
118 118
119 119 if (i == timeout) {
120 120 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
121 121 ret_val = -E1000_ERR_SWFW_SYNC;
122 122 goto out;
123 123 }
124 124
125 125 swfw_sync |= swmask;
126 126 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
127 127
128 128 e1000_put_hw_semaphore_generic(hw);
129 129
130 130 out:
131 131 return ret_val;
132 132 }
133 133
134 134 /**
135 135 * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
136 136 * @hw: pointer to the HW structure
137 137 * @mask: specifies which semaphore to acquire
138 138 *
139 139 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
140 140 * will also specify which port we're releasing the lock for.
141 141 **/
142 142 void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
143 143 {
144 144 u32 swfw_sync;
145 145
146 146 DEBUGFUNC("e1000_release_swfw_sync_i210");
147 147
148 148 while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
149 149 ; /* Empty */
150 150
151 151 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
152 152 swfw_sync &= ~mask;
153 153 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
154 154
155 155 e1000_put_hw_semaphore_generic(hw);
156 156 }
157 157
158 158 /**
159 159 * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
160 160 * @hw: pointer to the HW structure
161 161 *
162 162 * Acquire the HW semaphore to access the PHY or NVM
163 163 **/
164 164 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
165 165 {
166 166 u32 swsm;
167 167 s32 timeout = hw->nvm.word_size + 1;
168 168 s32 i = 0;
169 169
170 170 DEBUGFUNC("e1000_get_hw_semaphore_i210");
171 171
172 172 /* Get the SW semaphore */
173 173 while (i < timeout) {
174 174 swsm = E1000_READ_REG(hw, E1000_SWSM);
175 175 if (!(swsm & E1000_SWSM_SMBI))
176 176 break;
177 177
178 178 usec_delay(50);
179 179 i++;
180 180 }
181 181
182 182 if (i == timeout) {
183 183 /*
184 184 * In rare circumstances, the driver may not have released the
185 185 * SW semaphore. Clear the semaphore once before giving up.
186 186 */
187 187 if (hw->dev_spec._82575.clear_semaphore_once) {
188 188 hw->dev_spec._82575.clear_semaphore_once = FALSE;
189 189 e1000_put_hw_semaphore_generic(hw);
190 190 for (i = 0; i < timeout; i++) {
191 191 swsm = E1000_READ_REG(hw, E1000_SWSM);
192 192 if (!(swsm & E1000_SWSM_SMBI))
193 193 break;
194 194
195 195 usec_delay(50);
196 196 }
197 197 }
198 198
199 199 /* If we do not have the semaphore here, we have to give up. */
200 200 if (i == timeout) {
201 201 DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
202 202 return -E1000_ERR_NVM;
203 203 }
204 204 }
205 205
206 206 /* Get the FW semaphore. */
207 207 for (i = 0; i < timeout; i++) {
208 208 swsm = E1000_READ_REG(hw, E1000_SWSM);
209 209 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
210 210
211 211 /* Semaphore acquired if bit latched */
212 212 if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
213 213 break;
214 214
215 215 usec_delay(50);
216 216 }
217 217
218 218 if (i == timeout) {
219 219 /* Release semaphores */
220 220 e1000_put_hw_semaphore_generic(hw);
221 221 DEBUGOUT("Driver can't access the NVM\n");
222 222 return -E1000_ERR_NVM;
223 223 }
224 224
225 225 return E1000_SUCCESS;
226 226 }
227 227
228 228 /**
229 229 * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
230 230 * @hw: pointer to the HW structure
231 231 * @offset: offset of word in the Shadow Ram to read
232 232 * @words: number of words to read
233 233 * @data: word read from the Shadow Ram
234 234 *
235 235 * Reads a 16 bit word from the Shadow Ram using the EERD register.
236 236 * Uses necessary synchronization semaphores.
237 237 **/
238 238 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
239 239 u16 *data)
240 240 {
241 241 s32 status = E1000_SUCCESS;
242 242 u16 i, count;
243 243
244 244 DEBUGFUNC("e1000_read_nvm_srrd_i210");
245 245
246 246 /* We cannot hold synchronization semaphores for too long,
247 247 * because of forceful takeover procedure. However it is more efficient
248 248 * to read in bursts than synchronizing access for each word. */
249 249 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
250 250 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
251 251 E1000_EERD_EEWR_MAX_COUNT : (words - i);
252 252 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
253 253 status = e1000_read_nvm_eerd(hw, offset, count,
254 254 data + i);
255 255 hw->nvm.ops.release(hw);
256 256 } else {
257 257 status = E1000_ERR_SWFW_SYNC;
258 258 }
259 259
260 260 if (status != E1000_SUCCESS)
261 261 break;
262 262 }
263 263
264 264 return status;
265 265 }
266 266
267 267 /**
268 268 * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
269 269 * @hw: pointer to the HW structure
270 270 * @offset: offset within the Shadow RAM to be written to
271 271 * @words: number of words to write
272 272 * @data: 16 bit word(s) to be written to the Shadow RAM
273 273 *
274 274 * Writes data to Shadow RAM at offset using EEWR register.
275 275 *
276 276 * If e1000_update_nvm_checksum is not called after this function , the
277 277 * data will not be committed to FLASH and also Shadow RAM will most likely
278 278 * contain an invalid checksum.
279 279 *
280 280 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
281 281 * partially written.
282 282 **/
283 283 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
284 284 u16 *data)
285 285 {
286 286 s32 status = E1000_SUCCESS;
287 287 u16 i, count;
288 288
289 289 DEBUGFUNC("e1000_write_nvm_srwr_i210");
290 290
291 291 /* We cannot hold synchronization semaphores for too long,
292 292 * because of forceful takeover procedure. However it is more efficient
293 293 * to write in bursts than synchronizing access for each word. */
294 294 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
295 295 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
296 296 E1000_EERD_EEWR_MAX_COUNT : (words - i);
297 297 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
298 298 status = e1000_write_nvm_srwr(hw, offset, count,
299 299 data + i);
300 300 hw->nvm.ops.release(hw);
301 301 } else {
302 302 status = E1000_ERR_SWFW_SYNC;
303 303 }
304 304
305 305 if (status != E1000_SUCCESS)
306 306 break;
307 307 }
308 308
309 309 return status;
310 310 }
311 311
312 312 /**
313 313 * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
314 314 * @hw: pointer to the HW structure
315 315 * @offset: offset within the Shadow Ram to be written to
316 316 * @words: number of words to write
317 317 * @data: 16 bit word(s) to be written to the Shadow Ram
318 318 *
319 319 * Writes data to Shadow Ram at offset using EEWR register.
320 320 *
321 321 * If e1000_update_nvm_checksum is not called after this function , the
322 322 * Shadow Ram will most likely contain an invalid checksum.
323 323 **/
324 324 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
325 325 u16 *data)
326 326 {
327 327 struct e1000_nvm_info *nvm = &hw->nvm;
328 328 u32 i, k, eewr = 0;
329 329 u32 attempts = 100000;
330 330 s32 ret_val = E1000_SUCCESS;
331 331
332 332 DEBUGFUNC("e1000_write_nvm_srwr");
333 333
334 334 /*
335 335 * A check for invalid values: offset too large, too many words,
336 336 * too many words for the offset, and not enough words.
337 337 */
338 338 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
339 339 (words == 0)) {
340 340 DEBUGOUT("nvm parameter(s) out of bounds\n");
341 341 ret_val = -E1000_ERR_NVM;
342 342 goto out;
343 343 }
344 344
345 345 for (i = 0; i < words; i++) {
346 346 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
347 347 (data[i] << E1000_NVM_RW_REG_DATA) |
348 348 E1000_NVM_RW_REG_START;
349 349
350 350 E1000_WRITE_REG(hw, E1000_SRWR, eewr);
351 351
352 352 for (k = 0; k < attempts; k++) {
353 353 if (E1000_NVM_RW_REG_DONE &
354 354 E1000_READ_REG(hw, E1000_SRWR)) {
355 355 ret_val = E1000_SUCCESS;
356 356 break;
357 357 }
358 358 usec_delay(5);
359 359 }
360 360
361 361 if (ret_val != E1000_SUCCESS) {
362 362 DEBUGOUT("Shadow RAM write EEWR timed out\n");
363 363 break;
364 364 }
365 365 }
366 366
367 367 out:
368 368 return ret_val;
369 369 }
370 370
371 371 /**
372 372 * e1000_read_nvm_i211 - Read NVM wrapper function for I211
373 373 * @hw: pointer to the HW structure
374 374 * @address: the word address (aka eeprom offset) to read
375 375 * @data: pointer to the data read
376 376 *
377 377 * Wrapper function to return data formerly found in the NVM.
378 378 **/
379 379 static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset,
380 380 u16 words, u16 *data)
381 381 {
382 382 s32 ret_val = E1000_SUCCESS;
383 383
384 384 DEBUGFUNC("e1000_read_nvm_i211");
385 385
386 386 /* Only the MAC addr is required to be present in the iNVM */
387 387 switch (offset) {
388 388 case NVM_MAC_ADDR:
389 389 ret_val = e1000_read_invm_i211(hw, (u8)offset, &data[0]);
390 390 ret_val |= e1000_read_invm_i211(hw, (u8)offset+1, &data[1]);
391 391 ret_val |= e1000_read_invm_i211(hw, (u8)offset+2, &data[2]);
392 392 if (ret_val != E1000_SUCCESS)
393 393 DEBUGOUT("MAC Addr not found in iNVM\n");
394 394 break;
395 395 case NVM_INIT_CTRL_2:
396 396 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
397 397 if (ret_val != E1000_SUCCESS) {
398 398 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
399 399 ret_val = E1000_SUCCESS;
400 400 }
401 401 break;
402 402 case NVM_INIT_CTRL_4:
403 403 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
404 404 if (ret_val != E1000_SUCCESS) {
405 405 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
406 406 ret_val = E1000_SUCCESS;
407 407 }
408 408 break;
409 409 case NVM_LED_1_CFG:
410 410 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
411 411 if (ret_val != E1000_SUCCESS) {
412 412 *data = NVM_LED_1_CFG_DEFAULT_I211;
413 413 ret_val = E1000_SUCCESS;
414 414 }
415 415 break;
416 416 case NVM_LED_0_2_CFG:
417 417 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
418 418 if (ret_val != E1000_SUCCESS) {
419 419 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
420 420 ret_val = E1000_SUCCESS;
421 421 }
422 422 break;
423 423 case NVM_ID_LED_SETTINGS:
424 424 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
425 425 if (ret_val != E1000_SUCCESS) {
426 426 *data = ID_LED_RESERVED_FFFF;
427 427 ret_val = E1000_SUCCESS;
428 428 }
429 429 break;
430 430 case NVM_SUB_DEV_ID:
431 431 *data = hw->subsystem_device_id;
432 432 break;
433 433 case NVM_SUB_VEN_ID:
434 434 *data = hw->subsystem_vendor_id;
435 435 break;
436 436 case NVM_DEV_ID:
437 437 *data = hw->device_id;
438 438 break;
439 439 case NVM_VEN_ID:
440 440 *data = hw->vendor_id;
441 441 break;
442 442 default:
443 443 DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
444 444 *data = NVM_RESERVED_WORD;
445 445 break;
446 446 }
447 447 return ret_val;
448 448 }
449 449
450 450 /**
451 451 * e1000_read_invm_i211 - Reads OTP
452 452 * @hw: pointer to the HW structure
453 453 * @address: the word address (aka eeprom offset) to read
454 454 * @data: pointer to the data read
455 455 *
456 456 * Reads 16-bit words from the OTP. Return error when the word is not
457 457 * stored in OTP.
458 458 **/
459 459 s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data)
460 460 {
461 461 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
462 462 u32 invm_dword;
463 463 u16 i;
464 464 u8 record_type, word_address;
465 465
466 466 DEBUGFUNC("e1000_read_invm_i211");
467 467
468 468 for (i = 0; i < E1000_INVM_SIZE; i++) {
469 469 invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
470 470 /* Get record type */
471 471 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
472 472 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
473 473 break;
474 474 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
475 475 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
476 476 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
477 477 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
478 478 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
479 479 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
480 480 if (word_address == address) {
481 481 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
482 482 DEBUGOUT2("Read INVM Word 0x%02x = %x",
483 483 address, *data);
484 484 status = E1000_SUCCESS;
485 485 break;
486 486 }
487 487 }
488 488 }
489 489 if (status != E1000_SUCCESS)
490 490 DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
491 491 return status;
492 492 }
493 493
494 494 /**
495 495 * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
496 496 * @hw: pointer to the HW structure
497 497 *
498 498 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
499 499 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
500 500 **/
501 501 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
502 502 {
503 503 s32 status = E1000_SUCCESS;
504 504 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
505 505
506 506 DEBUGFUNC("e1000_validate_nvm_checksum_i210");
507 507
508 508 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
509 509
510 510 /*
511 511 * Replace the read function with semaphore grabbing with
512 512 * the one that skips this for a while.
513 513 * We have semaphore taken already here.
514 514 */
515 515 read_op_ptr = hw->nvm.ops.read;
516 516 hw->nvm.ops.read = e1000_read_nvm_eerd;
517 517
518 518 status = e1000_validate_nvm_checksum_generic(hw);
519 519
520 520 /* Revert original read operation. */
521 521 hw->nvm.ops.read = read_op_ptr;
522 522
523 523 hw->nvm.ops.release(hw);
524 524 } else {
525 525 status = E1000_ERR_SWFW_SYNC;
526 526 }
527 527
528 528 return status;
529 529 }
530 530
531 531
532 532 /**
533 533 * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
534 534 * @hw: pointer to the HW structure
535 535 *
536 536 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
537 537 * up to the checksum. Then calculates the EEPROM checksum and writes the
538 538 * value to the EEPROM. Next commit EEPROM data onto the Flash.
539 539 **/
540 540 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
541 541 {
542 542 s32 ret_val = E1000_SUCCESS;
543 543 u16 checksum = 0;
544 544 u16 i, nvm_data;
545 545
546 546 DEBUGFUNC("e1000_update_nvm_checksum_i210");
547 547
548 548 /*
549 549 * Read the first word from the EEPROM. If this times out or fails, do
550 550 * not continue or we could be in for a very long wait while every
551 551 * EEPROM read fails
552 552 */
553 553 ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
554 554 if (ret_val != E1000_SUCCESS) {
555 555 DEBUGOUT("EEPROM read failed\n");
556 556 goto out;
557 557 }
558 558
559 559 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
560 560 /*
561 561 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
562 562 * because we do not want to take the synchronization
563 563 * semaphores twice here.
564 564 */
565 565
566 566 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
567 567 ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
568 568 if (ret_val) {
569 569 hw->nvm.ops.release(hw);
570 570 DEBUGOUT("NVM Read Error while updating checksum.\n");
571 571 goto out;
572 572 }
573 573 checksum += nvm_data;
574 574 }
575 575 checksum = (u16) NVM_SUM - checksum;
576 576 ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
577 577 &checksum);
578 578 if (ret_val != E1000_SUCCESS) {
579 579 hw->nvm.ops.release(hw);
580 580 DEBUGOUT("NVM Write Error while updating checksum.\n");
581 581 goto out;
582 582 }
583 583
584 584 hw->nvm.ops.release(hw);
585 585
586 586 ret_val = e1000_update_flash_i210(hw);
587 587 } else {
588 588 ret_val = E1000_ERR_SWFW_SYNC;
589 589 }
590 590 out:
591 591 return ret_val;
592 592 }
593 593
594 594 /**
595 595 * e1000_update_flash_i210 - Commit EEPROM to the flash
596 596 * @hw: pointer to the HW structure
597 597 *
598 598 **/
599 599 s32 e1000_update_flash_i210(struct e1000_hw *hw)
600 600 {
601 601 s32 ret_val = E1000_SUCCESS;
602 602 u32 flup;
603 603
604 604 DEBUGFUNC("e1000_update_flash_i210");
605 605
606 606 ret_val = e1000_pool_flash_update_done_i210(hw);
607 607 if (ret_val == -E1000_ERR_NVM) {
608 608 DEBUGOUT("Flash update time out\n");
609 609 goto out;
610 610 }
611 611
612 612 flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
613 613 E1000_WRITE_REG(hw, E1000_EECD, flup);
614 614
615 615 ret_val = e1000_pool_flash_update_done_i210(hw);
616 616 if (ret_val == E1000_SUCCESS)
617 617 DEBUGOUT("Flash update complete\n");
618 618 else
619 619 DEBUGOUT("Flash update time out\n");
620 620
621 621 out:
622 622 return ret_val;
623 623 }
624 624
625 625 /**
626 626 * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
627 627 * @hw: pointer to the HW structure
628 628 *
629 629 **/
630 630 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
631 631 {
632 632 s32 ret_val = -E1000_ERR_NVM;
633 633 u32 i, reg;
634 634
635 635 DEBUGFUNC("e1000_pool_flash_update_done_i210");
636 636
637 637 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
638 638 reg = E1000_READ_REG(hw, E1000_EECD);
639 639 if (reg & E1000_EECD_FLUDONE_I210) {
640 640 ret_val = E1000_SUCCESS;
641 641 break;
642 642 }
643 643 usec_delay(5);
644 644 }
645 645
646 646 return ret_val;
647 647 }
648 648
649 649 /**
650 650 * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
651 651 * @hw: pointer to the HW structure
652 652 *
653 653 * Initialize the i210 NVM parameters and function pointers.
654 654 **/
655 655 static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
656 656 {
657 657 s32 ret_val = E1000_SUCCESS;
658 658 struct e1000_nvm_info *nvm = &hw->nvm;
659 659
660 660 DEBUGFUNC("e1000_init_nvm_params_i210");
661 661
662 662 ret_val = e1000_init_nvm_params_82575(hw);
663 663
664 664 nvm->ops.acquire = e1000_acquire_nvm_i210;
665 665 nvm->ops.release = e1000_release_nvm_i210;
666 666 nvm->ops.read = e1000_read_nvm_srrd_i210;
667 667 nvm->ops.write = e1000_write_nvm_srwr_i210;
668 668 nvm->ops.valid_led_default = e1000_valid_led_default_i210;
669 669 nvm->ops.validate = e1000_validate_nvm_checksum_i210;
670 670 nvm->ops.update = e1000_update_nvm_checksum_i210;
671 671
672 672 return ret_val;
673 673 }
674 674
675 675 /**
676 676 * e1000_init_nvm_params_i211 - Initialize i211 NVM function pointers
677 677 * @hw: pointer to the HW structure
678 678 *
679 679 * Initialize the NVM parameters and function pointers for i211.
680 680 **/
681 681 static s32 e1000_init_nvm_params_i211(struct e1000_hw *hw)
682 682 {
683 683 struct e1000_nvm_info *nvm = &hw->nvm;
684 684
685 685 DEBUGFUNC("e1000_init_nvm_params_i211");
686 686
687 687 nvm->ops.acquire = e1000_acquire_nvm_i210;
688 688 nvm->ops.release = e1000_release_nvm_i210;
689 689 nvm->ops.read = e1000_read_nvm_i211;
690 690 nvm->ops.valid_led_default = e1000_valid_led_default_i210;
691 691 nvm->ops.write = e1000_null_write_nvm;
692 692 nvm->ops.validate = e1000_null_ops_generic;
693 693 nvm->ops.update = e1000_null_ops_generic;
694 694
695 695 return E1000_SUCCESS;
696 696 }
697 697
698 698 /**
699 699 * e1000_init_function_pointers_i210 - Init func ptrs.
700 700 * @hw: pointer to the HW structure
701 701 *
702 702 * Called to initialize all function pointers and parameters.
703 703 **/
704 704 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
705 705 {
706 706 e1000_init_function_pointers_82575(hw);
707 707
708 708 switch (hw->mac.type) {
709 709 case e1000_i210:
710 710 hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
711 711 break;
712 712 case e1000_i211:
713 713 hw->nvm.ops.init_params = e1000_init_nvm_params_i211;
714 714 break;
715 715 default:
716 716 break;
717 717 }
718 718 return;
719 719 }
720 720
721 721 /**
722 722 * e1000_valid_led_default_i210 - Verify a valid default LED config
723 723 * @hw: pointer to the HW structure
724 724 * @data: pointer to the NVM (EEPROM)
725 725 *
726 726 * Read the EEPROM for the current default LED configuration. If the
727 727 * LED configuration is not valid, set to a valid LED configuration.
728 728 **/
729 729 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
730 730 {
731 731 s32 ret_val;
732 732
733 733 DEBUGFUNC("e1000_valid_led_default_i210");
734 734
735 735 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
736 736 if (ret_val) {
737 737 DEBUGOUT("NVM Read Error\n");
738 738 goto out;
739 739 }
740 740
741 741 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
742 742 switch (hw->phy.media_type) {
743 743 case e1000_media_type_internal_serdes:
744 744 *data = ID_LED_DEFAULT_I210_SERDES;
↓ open down ↓ |
744 lines elided |
↑ open up ↑ |
745 745 break;
746 746 case e1000_media_type_copper:
747 747 default:
748 748 *data = ID_LED_DEFAULT_I210;
749 749 break;
750 750 }
751 751 }
752 752 out:
753 753 return ret_val;
754 754 }
755 +
756 +/**
757 + * __e1000_access_xmdio_reg - Read/write XMDIO register
758 + * @hw: pointer to the HW structure
759 + * @address: XMDIO address to program
760 + * @dev_addr: device address to program
761 + * @data: pointer to value to read/write from/to the XMDIO address
762 + * @read: boolean flag to indicate read or write
763 + **/
764 +static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
765 + u8 dev_addr, u16 *data, bool read)
766 +{
767 + s32 ret_val = E1000_SUCCESS;
768 +
769 + DEBUGFUNC("__e1000_access_xmdio_reg");
770 +
771 + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
772 + if (ret_val)
773 + return ret_val;
774 +
775 + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
776 + if (ret_val)
777 + return ret_val;
778 +
779 + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
780 + dev_addr);
781 + if (ret_val)
782 + return ret_val;
783 +
784 + if (read)
785 + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
786 + else
787 + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
788 + if (ret_val)
789 + return ret_val;
790 +
791 + /* Recalibrate the device back to 0 */
792 + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
793 + if (ret_val)
794 + return ret_val;
795 +
796 + return ret_val;
797 +}
798 +
799 +/**
800 + * e1000_read_xmdio_reg - Read XMDIO register
801 + * @hw: pointer to the HW structure
802 + * @addr: XMDIO address to program
803 + * @dev_addr: device address to program
804 + * @data: value to be read from the EMI address
805 + **/
806 +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
807 +{
808 + DEBUGFUNC("e1000_read_xmdio_reg");
809 +
810 + return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, TRUE);
811 +}
812 +
813 +/**
814 + * e1000_write_xmdio_reg - Write XMDIO register
815 + * @hw: pointer to the HW structure
816 + * @addr: XMDIO address to program
817 + * @dev_addr: device address to program
818 + * @data: value to be written to the XMDIO address
819 + **/
820 +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
821 +{
822 + DEBUGFUNC("e1000_read_xmdio_reg");
823 +
824 + return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, FALSE);
825 +}
826 +
827 +
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX