Print this page
3492 some e1000g devices don't support 15 unicast addresses
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/e1000g/e1000g_main.c
+++ new/usr/src/uts/common/io/e1000g/e1000g_main.c
1 1 /*
2 2 * This file is provided under a CDDLv1 license. When using or
3 3 * redistributing this file, you may do so under this license.
4 4 * In redistributing this file this license must be included
5 5 * and no other modification of this header file is permitted.
6 6 *
7 7 * CDDL LICENSE SUMMARY
8 8 *
9 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10 10 *
11 11 * The contents of this file are subject to the terms of Version
12 12 * 1.0 of the Common Development and Distribution License (the "License").
13 13 *
14 14 * You should have received a copy of the License with this software.
15 15 * You can obtain a copy of the License at
16 16 * http://www.opensolaris.org/os/licensing.
17 17 * See the License for the specific language governing permissions
18 18 * and limitations under the License.
19 19 */
20 20
21 21 /*
22 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 27 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * **********************************************************************
32 32 * *
33 33 * Module Name: *
34 34 * e1000g_main.c *
35 35 * *
36 36 * Abstract: *
37 37 * This file contains the interface routines for the solaris OS. *
38 38 * It has all DDI entry point routines and GLD entry point routines. *
39 39 * *
40 40 * This file also contains routines that take care of initialization *
41 41 * uninit routine and interrupt routine. *
42 42 * *
43 43 * **********************************************************************
44 44 */
45 45
46 46 #include <sys/dlpi.h>
47 47 #include <sys/mac.h>
48 48 #include "e1000g_sw.h"
49 49 #include "e1000g_debug.h"
50 50
51 51 static char ident[] = "Intel PRO/1000 Ethernet";
52 52 /* LINTED E_STATIC_UNUSED */
53 53 static char e1000g_version[] = "Driver Ver. 5.3.24";
54 54
55 55 /*
56 56 * Proto types for DDI entry points
57 57 */
58 58 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
59 59 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
60 60 static int e1000g_quiesce(dev_info_t *);
61 61
62 62 /*
63 63 * init and intr routines prototype
64 64 */
65 65 static int e1000g_resume(dev_info_t *);
66 66 static int e1000g_suspend(dev_info_t *);
67 67 static uint_t e1000g_intr_pciexpress(caddr_t);
68 68 static uint_t e1000g_intr(caddr_t);
69 69 static void e1000g_intr_work(struct e1000g *, uint32_t);
70 70 #pragma inline(e1000g_intr_work)
71 71 static int e1000g_init(struct e1000g *);
72 72 static int e1000g_start(struct e1000g *, boolean_t);
73 73 static void e1000g_stop(struct e1000g *, boolean_t);
74 74 static int e1000g_m_start(void *);
75 75 static void e1000g_m_stop(void *);
76 76 static int e1000g_m_promisc(void *, boolean_t);
77 77 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
78 78 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
79 79 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
80 80 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
81 81 uint_t, const void *);
82 82 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
83 83 uint_t, void *);
84 84 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t,
85 85 mac_prop_info_handle_t);
86 86 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
87 87 const void *);
88 88 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *);
89 89 static void e1000g_init_locks(struct e1000g *);
90 90 static void e1000g_destroy_locks(struct e1000g *);
91 91 static int e1000g_identify_hardware(struct e1000g *);
92 92 static int e1000g_regs_map(struct e1000g *);
93 93 static int e1000g_set_driver_params(struct e1000g *);
94 94 static void e1000g_set_bufsize(struct e1000g *);
95 95 static int e1000g_register_mac(struct e1000g *);
96 96 static boolean_t e1000g_rx_drain(struct e1000g *);
97 97 static boolean_t e1000g_tx_drain(struct e1000g *);
98 98 static void e1000g_init_unicst(struct e1000g *);
99 99 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
100 100 static int e1000g_alloc_rx_data(struct e1000g *);
101 101 static void e1000g_release_multicast(struct e1000g *);
102 102 static void e1000g_pch_limits(struct e1000g *);
103 103 static uint32_t e1000g_mtu2maxframe(uint32_t);
104 104
105 105 /*
106 106 * Local routines
107 107 */
108 108 static boolean_t e1000g_reset_adapter(struct e1000g *);
109 109 static void e1000g_tx_clean(struct e1000g *);
110 110 static void e1000g_rx_clean(struct e1000g *);
111 111 static void e1000g_link_timer(void *);
112 112 static void e1000g_local_timer(void *);
113 113 static boolean_t e1000g_link_check(struct e1000g *);
114 114 static boolean_t e1000g_stall_check(struct e1000g *);
115 115 static void e1000g_smartspeed(struct e1000g *);
116 116 static void e1000g_get_conf(struct e1000g *);
117 117 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int,
118 118 int *);
119 119 static void enable_watchdog_timer(struct e1000g *);
120 120 static void disable_watchdog_timer(struct e1000g *);
121 121 static void start_watchdog_timer(struct e1000g *);
122 122 static void restart_watchdog_timer(struct e1000g *);
123 123 static void stop_watchdog_timer(struct e1000g *);
124 124 static void stop_link_timer(struct e1000g *);
125 125 static void stop_82547_timer(e1000g_tx_ring_t *);
126 126 static void e1000g_force_speed_duplex(struct e1000g *);
127 127 static void e1000g_setup_max_mtu(struct e1000g *);
128 128 static void e1000g_get_max_frame_size(struct e1000g *);
129 129 static boolean_t is_valid_mac_addr(uint8_t *);
130 130 static void e1000g_unattach(dev_info_t *, struct e1000g *);
131 131 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *);
132 132 #ifdef E1000G_DEBUG
133 133 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
134 134 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
135 135 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
136 136 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
137 137 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
138 138 struct iocblk *, mblk_t *);
139 139 #endif
140 140 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
141 141 struct iocblk *, mblk_t *);
142 142 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
143 143 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
144 144 static void e1000g_set_internal_loopback(struct e1000g *);
145 145 static void e1000g_set_external_loopback_1000(struct e1000g *);
146 146 static void e1000g_set_external_loopback_100(struct e1000g *);
147 147 static void e1000g_set_external_loopback_10(struct e1000g *);
148 148 static int e1000g_add_intrs(struct e1000g *);
149 149 static int e1000g_intr_add(struct e1000g *, int);
150 150 static int e1000g_rem_intrs(struct e1000g *);
151 151 static int e1000g_enable_intrs(struct e1000g *);
152 152 static int e1000g_disable_intrs(struct e1000g *);
153 153 static boolean_t e1000g_link_up(struct e1000g *);
154 154 #ifdef __sparc
155 155 static boolean_t e1000g_find_mac_address(struct e1000g *);
156 156 #endif
157 157 static void e1000g_get_phy_state(struct e1000g *);
158 158 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
159 159 const void *impl_data);
160 160 static void e1000g_fm_init(struct e1000g *Adapter);
161 161 static void e1000g_fm_fini(struct e1000g *Adapter);
162 162 static void e1000g_param_sync(struct e1000g *);
163 163 static void e1000g_get_driver_control(struct e1000_hw *);
164 164 static void e1000g_release_driver_control(struct e1000_hw *);
165 165 static void e1000g_restore_promisc(struct e1000g *Adapter);
166 166
167 167 char *e1000g_priv_props[] = {
168 168 "_tx_bcopy_threshold",
169 169 "_tx_interrupt_enable",
170 170 "_tx_intr_delay",
171 171 "_tx_intr_abs_delay",
172 172 "_rx_bcopy_threshold",
173 173 "_max_num_rcv_packets",
174 174 "_rx_intr_delay",
175 175 "_rx_intr_abs_delay",
176 176 "_intr_throttling_rate",
177 177 "_intr_adaptive",
178 178 "_adv_pause_cap",
179 179 "_adv_asym_pause_cap",
180 180 NULL
181 181 };
182 182
183 183 static struct cb_ops cb_ws_ops = {
184 184 nulldev, /* cb_open */
185 185 nulldev, /* cb_close */
186 186 nodev, /* cb_strategy */
187 187 nodev, /* cb_print */
188 188 nodev, /* cb_dump */
189 189 nodev, /* cb_read */
190 190 nodev, /* cb_write */
191 191 nodev, /* cb_ioctl */
192 192 nodev, /* cb_devmap */
193 193 nodev, /* cb_mmap */
194 194 nodev, /* cb_segmap */
195 195 nochpoll, /* cb_chpoll */
196 196 ddi_prop_op, /* cb_prop_op */
197 197 NULL, /* cb_stream */
198 198 D_MP | D_HOTPLUG, /* cb_flag */
199 199 CB_REV, /* cb_rev */
200 200 nodev, /* cb_aread */
201 201 nodev /* cb_awrite */
202 202 };
203 203
204 204 static struct dev_ops ws_ops = {
205 205 DEVO_REV, /* devo_rev */
206 206 0, /* devo_refcnt */
207 207 NULL, /* devo_getinfo */
208 208 nulldev, /* devo_identify */
209 209 nulldev, /* devo_probe */
210 210 e1000g_attach, /* devo_attach */
211 211 e1000g_detach, /* devo_detach */
212 212 nodev, /* devo_reset */
213 213 &cb_ws_ops, /* devo_cb_ops */
214 214 NULL, /* devo_bus_ops */
215 215 ddi_power, /* devo_power */
216 216 e1000g_quiesce /* devo_quiesce */
217 217 };
218 218
219 219 static struct modldrv modldrv = {
220 220 &mod_driverops, /* Type of module. This one is a driver */
221 221 ident, /* Discription string */
222 222 &ws_ops, /* driver ops */
223 223 };
224 224
225 225 static struct modlinkage modlinkage = {
226 226 MODREV_1, &modldrv, NULL
227 227 };
228 228
229 229 /* Access attributes for register mapping */
230 230 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
231 231 DDI_DEVICE_ATTR_V1,
232 232 DDI_STRUCTURE_LE_ACC,
233 233 DDI_STRICTORDER_ACC,
234 234 DDI_FLAGERR_ACC
235 235 };
236 236
237 237 #define E1000G_M_CALLBACK_FLAGS \
238 238 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
239 239
240 240 static mac_callbacks_t e1000g_m_callbacks = {
241 241 E1000G_M_CALLBACK_FLAGS,
242 242 e1000g_m_stat,
243 243 e1000g_m_start,
244 244 e1000g_m_stop,
245 245 e1000g_m_promisc,
246 246 e1000g_m_multicst,
247 247 NULL,
248 248 e1000g_m_tx,
249 249 NULL,
250 250 e1000g_m_ioctl,
251 251 e1000g_m_getcapab,
252 252 NULL,
253 253 NULL,
254 254 e1000g_m_setprop,
255 255 e1000g_m_getprop,
256 256 e1000g_m_propinfo
257 257 };
258 258
259 259 /*
260 260 * Global variables
261 261 */
262 262 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K;
263 263 uint32_t e1000g_mblks_pending = 0;
264 264 /*
265 265 * Workaround for Dynamic Reconfiguration support, for x86 platform only.
266 266 * Here we maintain a private dev_info list if e1000g_force_detach is
267 267 * enabled. If we force the driver to detach while there are still some
268 268 * rx buffers retained in the upper layer, we have to keep a copy of the
269 269 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
270 270 * structure will be freed after the driver is detached. However when we
271 271 * finally free those rx buffers released by the upper layer, we need to
272 272 * refer to the dev_info to free the dma buffers. So we save a copy of
273 273 * the dev_info for this purpose. On x86 platform, we assume this copy
274 274 * of dev_info is always valid, but on SPARC platform, it could be invalid
275 275 * after the system board level DR operation. For this reason, the global
276 276 * variable e1000g_force_detach must be B_FALSE on SPARC platform.
277 277 */
278 278 #ifdef __sparc
279 279 boolean_t e1000g_force_detach = B_FALSE;
280 280 #else
281 281 boolean_t e1000g_force_detach = B_TRUE;
282 282 #endif
283 283 private_devi_list_t *e1000g_private_devi_list = NULL;
284 284
285 285 /*
286 286 * The mutex e1000g_rx_detach_lock is defined to protect the processing of
287 287 * the private dev_info list, and to serialize the processing of rx buffer
288 288 * freeing and rx buffer recycling.
289 289 */
290 290 kmutex_t e1000g_rx_detach_lock;
291 291 /*
292 292 * The rwlock e1000g_dma_type_lock is defined to protect the global flag
293 293 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
294 294 * If there are many e1000g instances, the system may run out of DVMA
295 295 * resources during the initialization of the instances, then the flag will
296 296 * be changed to "USE_DMA". Because different e1000g instances are initialized
297 297 * in parallel, we need to use this lock to protect the flag.
298 298 */
299 299 krwlock_t e1000g_dma_type_lock;
300 300
301 301 /*
302 302 * The 82546 chipset is a dual-port device, both the ports share one eeprom.
303 303 * Based on the information from Intel, the 82546 chipset has some hardware
304 304 * problem. When one port is being reset and the other port is trying to
305 305 * access the eeprom, it could cause system hang or panic. To workaround this
306 306 * hardware problem, we use a global mutex to prevent such operations from
307 307 * happening simultaneously on different instances. This workaround is applied
308 308 * to all the devices supported by this driver.
309 309 */
310 310 kmutex_t e1000g_nvm_lock;
311 311
312 312 /*
313 313 * Loadable module configuration entry points for the driver
314 314 */
315 315
316 316 /*
317 317 * _init - module initialization
318 318 */
319 319 int
320 320 _init(void)
321 321 {
322 322 int status;
323 323
324 324 mac_init_ops(&ws_ops, WSNAME);
325 325 status = mod_install(&modlinkage);
326 326 if (status != DDI_SUCCESS)
327 327 mac_fini_ops(&ws_ops);
328 328 else {
329 329 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
330 330 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
331 331 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
332 332 }
333 333
334 334 return (status);
335 335 }
336 336
337 337 /*
338 338 * _fini - module finalization
339 339 */
340 340 int
341 341 _fini(void)
342 342 {
343 343 int status;
344 344
345 345 if (e1000g_mblks_pending != 0)
346 346 return (EBUSY);
347 347
348 348 status = mod_remove(&modlinkage);
349 349 if (status == DDI_SUCCESS) {
350 350 mac_fini_ops(&ws_ops);
351 351
352 352 if (e1000g_force_detach) {
353 353 private_devi_list_t *devi_node;
354 354
355 355 mutex_enter(&e1000g_rx_detach_lock);
356 356 while (e1000g_private_devi_list != NULL) {
357 357 devi_node = e1000g_private_devi_list;
358 358 e1000g_private_devi_list =
359 359 e1000g_private_devi_list->next;
360 360
361 361 kmem_free(devi_node->priv_dip,
362 362 sizeof (struct dev_info));
363 363 kmem_free(devi_node,
364 364 sizeof (private_devi_list_t));
365 365 }
366 366 mutex_exit(&e1000g_rx_detach_lock);
367 367 }
368 368
369 369 mutex_destroy(&e1000g_rx_detach_lock);
370 370 rw_destroy(&e1000g_dma_type_lock);
371 371 mutex_destroy(&e1000g_nvm_lock);
372 372 }
373 373
374 374 return (status);
375 375 }
376 376
377 377 /*
378 378 * _info - module information
379 379 */
380 380 int
381 381 _info(struct modinfo *modinfop)
382 382 {
383 383 return (mod_info(&modlinkage, modinfop));
384 384 }
385 385
386 386 /*
387 387 * e1000g_attach - driver attach
388 388 *
389 389 * This function is the device-specific initialization entry
390 390 * point. This entry point is required and must be written.
391 391 * The DDI_ATTACH command must be provided in the attach entry
392 392 * point. When attach() is called with cmd set to DDI_ATTACH,
393 393 * all normal kernel services (such as kmem_alloc(9F)) are
394 394 * available for use by the driver.
395 395 *
396 396 * The attach() function will be called once for each instance
397 397 * of the device on the system with cmd set to DDI_ATTACH.
398 398 * Until attach() succeeds, the only driver entry points which
399 399 * may be called are open(9E) and getinfo(9E).
400 400 */
401 401 static int
402 402 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
403 403 {
404 404 struct e1000g *Adapter;
405 405 struct e1000_hw *hw;
406 406 struct e1000g_osdep *osdep;
407 407 int instance;
408 408
409 409 switch (cmd) {
410 410 default:
411 411 e1000g_log(NULL, CE_WARN,
412 412 "Unsupported command send to e1000g_attach... ");
413 413 return (DDI_FAILURE);
414 414
415 415 case DDI_RESUME:
416 416 return (e1000g_resume(devinfo));
417 417
418 418 case DDI_ATTACH:
419 419 break;
420 420 }
421 421
422 422 /*
423 423 * get device instance number
424 424 */
425 425 instance = ddi_get_instance(devinfo);
426 426
427 427 /*
428 428 * Allocate soft data structure
429 429 */
430 430 Adapter =
431 431 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
432 432
433 433 Adapter->dip = devinfo;
434 434 Adapter->instance = instance;
435 435 Adapter->tx_ring->adapter = Adapter;
436 436 Adapter->rx_ring->adapter = Adapter;
437 437
438 438 hw = &Adapter->shared;
439 439 osdep = &Adapter->osdep;
440 440 hw->back = osdep;
441 441 osdep->adapter = Adapter;
442 442
443 443 ddi_set_driver_private(devinfo, (caddr_t)Adapter);
444 444
445 445 /*
446 446 * Initialize for fma support
447 447 */
448 448 (void) e1000g_get_prop(Adapter, "fm-capable",
449 449 0, 0x0f,
450 450 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
451 451 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE,
452 452 &Adapter->fm_capabilities);
453 453 e1000g_fm_init(Adapter);
454 454 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
455 455
456 456 /*
457 457 * PCI Configure
458 458 */
459 459 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
460 460 e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
461 461 goto attach_fail;
462 462 }
463 463 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
464 464
465 465 /*
466 466 * Setup hardware
467 467 */
468 468 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
469 469 e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
470 470 goto attach_fail;
471 471 }
472 472
473 473 /*
474 474 * Map in the device registers.
475 475 */
476 476 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
477 477 e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
478 478 goto attach_fail;
479 479 }
480 480 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
481 481
482 482 /*
483 483 * Initialize driver parameters
484 484 */
485 485 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
486 486 goto attach_fail;
487 487 }
488 488 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
489 489
490 490 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
491 491 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
492 492 goto attach_fail;
493 493 }
494 494
495 495 /*
496 496 * Initialize interrupts
497 497 */
498 498 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
499 499 e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
500 500 goto attach_fail;
501 501 }
502 502 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
503 503
504 504 /*
505 505 * Initialize mutex's for this device.
506 506 * Do this before enabling the interrupt handler and
507 507 * register the softint to avoid the condition where
508 508 * interrupt handler can try using uninitialized mutex
509 509 */
510 510 e1000g_init_locks(Adapter);
511 511 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
512 512
513 513 /*
514 514 * Initialize Driver Counters
515 515 */
516 516 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
517 517 e1000g_log(Adapter, CE_WARN, "Init stats failed");
518 518 goto attach_fail;
519 519 }
520 520 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
521 521
522 522 /*
523 523 * Initialize chip hardware and software structures
524 524 */
525 525 rw_enter(&Adapter->chip_lock, RW_WRITER);
526 526 if (e1000g_init(Adapter) != DDI_SUCCESS) {
527 527 rw_exit(&Adapter->chip_lock);
528 528 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
529 529 goto attach_fail;
530 530 }
531 531 rw_exit(&Adapter->chip_lock);
532 532 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
533 533
534 534 /*
535 535 * Register the driver to the MAC
536 536 */
537 537 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
538 538 e1000g_log(Adapter, CE_WARN, "Register MAC failed");
539 539 goto attach_fail;
540 540 }
541 541 Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
542 542
543 543 /*
544 544 * Now that mutex locks are initialized, and the chip is also
545 545 * initialized, enable interrupts.
546 546 */
547 547 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
548 548 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
549 549 goto attach_fail;
550 550 }
551 551 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
552 552
553 553 /*
554 554 * If e1000g_force_detach is enabled, in global private dip list,
555 555 * we will create a new entry, which maintains the priv_dip for DR
556 556 * supports after driver detached.
557 557 */
558 558 if (e1000g_force_detach) {
559 559 private_devi_list_t *devi_node;
560 560
561 561 Adapter->priv_dip =
562 562 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
563 563 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
564 564 sizeof (struct dev_info));
565 565
566 566 devi_node =
567 567 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
568 568
569 569 mutex_enter(&e1000g_rx_detach_lock);
570 570 devi_node->priv_dip = Adapter->priv_dip;
571 571 devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
572 572 devi_node->pending_rx_count = 0;
573 573
574 574 Adapter->priv_devi_node = devi_node;
575 575
576 576 if (e1000g_private_devi_list == NULL) {
577 577 devi_node->prev = NULL;
578 578 devi_node->next = NULL;
579 579 e1000g_private_devi_list = devi_node;
580 580 } else {
581 581 devi_node->prev = NULL;
582 582 devi_node->next = e1000g_private_devi_list;
583 583 e1000g_private_devi_list->prev = devi_node;
584 584 e1000g_private_devi_list = devi_node;
585 585 }
586 586 mutex_exit(&e1000g_rx_detach_lock);
587 587 }
588 588
589 589 Adapter->e1000g_state = E1000G_INITIALIZED;
590 590 return (DDI_SUCCESS);
591 591
592 592 attach_fail:
593 593 e1000g_unattach(devinfo, Adapter);
594 594 return (DDI_FAILURE);
595 595 }
596 596
597 597 static int
598 598 e1000g_register_mac(struct e1000g *Adapter)
599 599 {
600 600 struct e1000_hw *hw = &Adapter->shared;
601 601 mac_register_t *mac;
602 602 int err;
603 603
604 604 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
605 605 return (DDI_FAILURE);
606 606
607 607 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
608 608 mac->m_driver = Adapter;
609 609 mac->m_dip = Adapter->dip;
610 610 mac->m_src_addr = hw->mac.addr;
611 611 mac->m_callbacks = &e1000g_m_callbacks;
612 612 mac->m_min_sdu = 0;
613 613 mac->m_max_sdu = Adapter->default_mtu;
614 614 mac->m_margin = VLAN_TAGSZ;
615 615 mac->m_priv_props = e1000g_priv_props;
616 616 mac->m_v12n = MAC_VIRT_LEVEL1;
617 617
618 618 err = mac_register(mac, &Adapter->mh);
619 619 mac_free(mac);
620 620
621 621 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
622 622 }
623 623
624 624 static int
625 625 e1000g_identify_hardware(struct e1000g *Adapter)
626 626 {
627 627 struct e1000_hw *hw = &Adapter->shared;
628 628 struct e1000g_osdep *osdep = &Adapter->osdep;
629 629
630 630 /* Get the device id */
631 631 hw->vendor_id =
632 632 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
633 633 hw->device_id =
634 634 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
635 635 hw->revision_id =
636 636 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
637 637 hw->subsystem_device_id =
638 638 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
639 639 hw->subsystem_vendor_id =
640 640 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
641 641
642 642 if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
643 643 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
644 644 "MAC type could not be set properly.");
645 645 return (DDI_FAILURE);
646 646 }
647 647
648 648 return (DDI_SUCCESS);
649 649 }
650 650
651 651 static int
652 652 e1000g_regs_map(struct e1000g *Adapter)
653 653 {
654 654 dev_info_t *devinfo = Adapter->dip;
655 655 struct e1000_hw *hw = &Adapter->shared;
656 656 struct e1000g_osdep *osdep = &Adapter->osdep;
657 657 off_t mem_size;
658 658 bar_info_t bar_info;
659 659 int offset, rnumber;
660 660
661 661 rnumber = ADAPTER_REG_SET;
662 662 /* Get size of adapter register memory */
663 663 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) !=
664 664 DDI_SUCCESS) {
665 665 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
666 666 "ddi_dev_regsize for registers failed");
667 667 return (DDI_FAILURE);
668 668 }
669 669
670 670 /* Map adapter register memory */
671 671 if ((ddi_regs_map_setup(devinfo, rnumber,
672 672 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
673 673 &osdep->reg_handle)) != DDI_SUCCESS) {
674 674 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
675 675 "ddi_regs_map_setup for registers failed");
676 676 goto regs_map_fail;
677 677 }
678 678
679 679 /* ICH needs to map flash memory */
680 680 switch (hw->mac.type) {
681 681 case e1000_ich8lan:
682 682 case e1000_ich9lan:
683 683 case e1000_ich10lan:
684 684 case e1000_pchlan:
685 685 case e1000_pch2lan:
686 686 rnumber = ICH_FLASH_REG_SET;
687 687
688 688 /* get flash size */
689 689 if (ddi_dev_regsize(devinfo, rnumber,
690 690 &mem_size) != DDI_SUCCESS) {
691 691 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
692 692 "ddi_dev_regsize for ICH flash failed");
693 693 goto regs_map_fail;
694 694 }
695 695
696 696 /* map flash in */
697 697 if (ddi_regs_map_setup(devinfo, rnumber,
698 698 (caddr_t *)&hw->flash_address, 0,
699 699 mem_size, &e1000g_regs_acc_attr,
700 700 &osdep->ich_flash_handle) != DDI_SUCCESS) {
701 701 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
702 702 "ddi_regs_map_setup for ICH flash failed");
703 703 goto regs_map_fail;
704 704 }
705 705 break;
706 706 default:
707 707 break;
708 708 }
709 709
710 710 /* map io space */
711 711 switch (hw->mac.type) {
712 712 case e1000_82544:
713 713 case e1000_82540:
714 714 case e1000_82545:
715 715 case e1000_82546:
716 716 case e1000_82541:
717 717 case e1000_82541_rev_2:
718 718 /* find the IO bar */
719 719 rnumber = -1;
720 720 for (offset = PCI_CONF_BASE1;
721 721 offset <= PCI_CONF_BASE5; offset += 4) {
722 722 if (e1000g_get_bar_info(devinfo, offset, &bar_info)
723 723 != DDI_SUCCESS)
724 724 continue;
725 725 if (bar_info.type == E1000G_BAR_IO) {
726 726 rnumber = bar_info.rnumber;
727 727 break;
728 728 }
729 729 }
730 730
731 731 if (rnumber < 0) {
732 732 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
733 733 "No io space is found");
734 734 goto regs_map_fail;
735 735 }
736 736
737 737 /* get io space size */
738 738 if (ddi_dev_regsize(devinfo, rnumber,
739 739 &mem_size) != DDI_SUCCESS) {
740 740 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
741 741 "ddi_dev_regsize for io space failed");
742 742 goto regs_map_fail;
743 743 }
744 744
745 745 /* map io space */
746 746 if ((ddi_regs_map_setup(devinfo, rnumber,
747 747 (caddr_t *)&hw->io_base, 0, mem_size,
748 748 &e1000g_regs_acc_attr,
749 749 &osdep->io_reg_handle)) != DDI_SUCCESS) {
750 750 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
751 751 "ddi_regs_map_setup for io space failed");
752 752 goto regs_map_fail;
753 753 }
754 754 break;
755 755 default:
756 756 hw->io_base = 0;
757 757 break;
758 758 }
759 759
760 760 return (DDI_SUCCESS);
761 761
762 762 regs_map_fail:
763 763 if (osdep->reg_handle != NULL)
764 764 ddi_regs_map_free(&osdep->reg_handle);
765 765 if (osdep->ich_flash_handle != NULL)
766 766 ddi_regs_map_free(&osdep->ich_flash_handle);
767 767 return (DDI_FAILURE);
768 768 }
769 769
770 770 static int
771 771 e1000g_set_driver_params(struct e1000g *Adapter)
772 772 {
773 773 struct e1000_hw *hw;
774 774
775 775 hw = &Adapter->shared;
776 776
777 777 /* Set MAC type and initialize hardware functions */
778 778 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
779 779 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
780 780 "Could not setup hardware functions");
781 781 return (DDI_FAILURE);
782 782 }
783 783
784 784 /* Get bus information */
785 785 if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
786 786 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
787 787 "Could not get bus information");
788 788 return (DDI_FAILURE);
789 789 }
790 790
791 791 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
792 792
793 793 hw->mac.autoneg_failed = B_TRUE;
794 794
795 795 /* Set the autoneg_wait_to_complete flag to B_FALSE */
796 796 hw->phy.autoneg_wait_to_complete = B_FALSE;
797 797
798 798 /* Adaptive IFS related changes */
799 799 hw->mac.adaptive_ifs = B_TRUE;
800 800
801 801 /* Enable phy init script for IGP phy of 82541/82547 */
802 802 if ((hw->mac.type == e1000_82547) ||
803 803 (hw->mac.type == e1000_82541) ||
804 804 (hw->mac.type == e1000_82547_rev_2) ||
805 805 (hw->mac.type == e1000_82541_rev_2))
806 806 e1000_init_script_state_82541(hw, B_TRUE);
807 807
808 808 /* Enable the TTL workaround for 82541/82547 */
809 809 e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
810 810
811 811 #ifdef __sparc
812 812 Adapter->strip_crc = B_TRUE;
813 813 #else
814 814 Adapter->strip_crc = B_FALSE;
815 815 #endif
816 816
817 817 /* setup the maximum MTU size of the chip */
818 818 e1000g_setup_max_mtu(Adapter);
819 819
820 820 /* Get speed/duplex settings in conf file */
821 821 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
822 822 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
823 823 e1000g_force_speed_duplex(Adapter);
824 824
825 825 /* Get Jumbo Frames settings in conf file */
826 826 e1000g_get_max_frame_size(Adapter);
827 827
828 828 /* Get conf file properties */
829 829 e1000g_get_conf(Adapter);
830 830
831 831 /* enforce PCH limits */
832 832 e1000g_pch_limits(Adapter);
833 833
834 834 /* Set Rx/Tx buffer size */
835 835 e1000g_set_bufsize(Adapter);
836 836
837 837 /* Master Latency Timer */
838 838 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
839 839
840 840 /* copper options */
841 841 if (hw->phy.media_type == e1000_media_type_copper) {
842 842 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
843 843 hw->phy.disable_polarity_correction = B_FALSE;
844 844 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */
845 845 }
846 846
847 847 /* The initial link state should be "unknown" */
848 848 Adapter->link_state = LINK_STATE_UNKNOWN;
849 849
850 850 /* Initialize rx parameters */
851 851 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
852 852 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
853 853
854 854 /* Initialize tx parameters */
855 855 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
856 856 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
857 857 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
858 858 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
859 859
860 860 /* Initialize rx parameters */
861 861 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
862 862
863 863 return (DDI_SUCCESS);
864 864 }
865 865
866 866 static void
867 867 e1000g_setup_max_mtu(struct e1000g *Adapter)
868 868 {
869 869 struct e1000_mac_info *mac = &Adapter->shared.mac;
870 870 struct e1000_phy_info *phy = &Adapter->shared.phy;
871 871
872 872 switch (mac->type) {
873 873 /* types that do not support jumbo frames */
874 874 case e1000_ich8lan:
875 875 case e1000_82573:
876 876 case e1000_82583:
877 877 Adapter->max_mtu = ETHERMTU;
878 878 break;
879 879 /* ich9 supports jumbo frames except on one phy type */
880 880 case e1000_ich9lan:
881 881 if (phy->type == e1000_phy_ife)
882 882 Adapter->max_mtu = ETHERMTU;
883 883 else
884 884 Adapter->max_mtu = MAXIMUM_MTU_9K;
885 885 break;
886 886 /* pch can do jumbo frames up to 4K */
887 887 case e1000_pchlan:
888 888 Adapter->max_mtu = MAXIMUM_MTU_4K;
889 889 break;
890 890 /* pch2 can do jumbo frames up to 9K */
891 891 case e1000_pch2lan:
892 892 Adapter->max_mtu = MAXIMUM_MTU_9K;
893 893 break;
894 894 /* types with a special limit */
895 895 case e1000_82571:
896 896 case e1000_82572:
897 897 case e1000_82574:
898 898 case e1000_80003es2lan:
899 899 case e1000_ich10lan:
900 900 if (e1000g_jumbo_mtu >= ETHERMTU &&
901 901 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) {
902 902 Adapter->max_mtu = e1000g_jumbo_mtu;
903 903 } else {
904 904 Adapter->max_mtu = MAXIMUM_MTU_9K;
905 905 }
906 906 break;
907 907 /* default limit is 16K */
908 908 default:
909 909 Adapter->max_mtu = FRAME_SIZE_UPTO_16K -
910 910 sizeof (struct ether_vlan_header) - ETHERFCSL;
911 911 break;
912 912 }
913 913 }
914 914
915 915 static void
916 916 e1000g_set_bufsize(struct e1000g *Adapter)
917 917 {
918 918 struct e1000_mac_info *mac = &Adapter->shared.mac;
919 919 uint64_t rx_size;
920 920 uint64_t tx_size;
921 921
922 922 dev_info_t *devinfo = Adapter->dip;
923 923 #ifdef __sparc
924 924 ulong_t iommu_pagesize;
925 925 #endif
926 926 /* Get the system page size */
927 927 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
928 928
929 929 #ifdef __sparc
930 930 iommu_pagesize = dvma_pagesize(devinfo);
931 931 if (iommu_pagesize != 0) {
932 932 if (Adapter->sys_page_sz == iommu_pagesize) {
933 933 if (iommu_pagesize > 0x4000)
934 934 Adapter->sys_page_sz = 0x4000;
935 935 } else {
936 936 if (Adapter->sys_page_sz > iommu_pagesize)
937 937 Adapter->sys_page_sz = iommu_pagesize;
938 938 }
939 939 }
940 940 if (Adapter->lso_enable) {
941 941 Adapter->dvma_page_num = E1000_LSO_MAXLEN /
942 942 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
943 943 } else {
944 944 Adapter->dvma_page_num = Adapter->max_frame_size /
945 945 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
946 946 }
947 947 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
948 948 #endif
949 949
950 950 Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
951 951
952 952 if (Adapter->mem_workaround_82546 &&
953 953 ((mac->type == e1000_82545) ||
954 954 (mac->type == e1000_82546) ||
955 955 (mac->type == e1000_82546_rev_3))) {
956 956 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
957 957 } else {
958 958 rx_size = Adapter->max_frame_size;
959 959 if ((rx_size > FRAME_SIZE_UPTO_2K) &&
960 960 (rx_size <= FRAME_SIZE_UPTO_4K))
961 961 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
962 962 else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
963 963 (rx_size <= FRAME_SIZE_UPTO_8K))
964 964 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
965 965 else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
966 966 (rx_size <= FRAME_SIZE_UPTO_16K))
967 967 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
968 968 else
969 969 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
970 970 }
971 971 Adapter->rx_buffer_size += E1000G_IPALIGNROOM;
972 972
973 973 tx_size = Adapter->max_frame_size;
974 974 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
975 975 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
976 976 else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
977 977 (tx_size <= FRAME_SIZE_UPTO_8K))
978 978 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
979 979 else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
980 980 (tx_size <= FRAME_SIZE_UPTO_16K))
981 981 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
982 982 else
983 983 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
984 984
985 985 /*
986 986 * For Wiseman adapters we have an requirement of having receive
987 987 * buffers aligned at 256 byte boundary. Since Livengood does not
988 988 * require this and forcing it for all hardwares will have
989 989 * performance implications, I am making it applicable only for
990 990 * Wiseman and for Jumbo frames enabled mode as rest of the time,
991 991 * it is okay to have normal frames...but it does involve a
992 992 * potential risk where we may loose data if buffer is not
993 993 * aligned...so all wiseman boards to have 256 byte aligned
994 994 * buffers
995 995 */
996 996 if (mac->type < e1000_82543)
997 997 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
998 998 else
999 999 Adapter->rx_buf_align = 1;
1000 1000 }
1001 1001
1002 1002 /*
1003 1003 * e1000g_detach - driver detach
1004 1004 *
1005 1005 * The detach() function is the complement of the attach routine.
1006 1006 * If cmd is set to DDI_DETACH, detach() is used to remove the
1007 1007 * state associated with a given instance of a device node
1008 1008 * prior to the removal of that instance from the system.
1009 1009 *
1010 1010 * The detach() function will be called once for each instance
1011 1011 * of the device for which there has been a successful attach()
1012 1012 * once there are no longer any opens on the device.
1013 1013 *
1014 1014 * Interrupts routine are disabled, All memory allocated by this
1015 1015 * driver are freed.
1016 1016 */
1017 1017 static int
1018 1018 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1019 1019 {
1020 1020 struct e1000g *Adapter;
1021 1021 boolean_t rx_drain;
1022 1022
1023 1023 switch (cmd) {
1024 1024 default:
1025 1025 return (DDI_FAILURE);
1026 1026
1027 1027 case DDI_SUSPEND:
1028 1028 return (e1000g_suspend(devinfo));
1029 1029
1030 1030 case DDI_DETACH:
1031 1031 break;
1032 1032 }
1033 1033
1034 1034 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1035 1035 if (Adapter == NULL)
1036 1036 return (DDI_FAILURE);
1037 1037
1038 1038 rx_drain = e1000g_rx_drain(Adapter);
1039 1039 if (!rx_drain && !e1000g_force_detach)
1040 1040 return (DDI_FAILURE);
1041 1041
1042 1042 if (mac_unregister(Adapter->mh) != 0) {
1043 1043 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
1044 1044 return (DDI_FAILURE);
1045 1045 }
1046 1046 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
1047 1047
1048 1048 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
1049 1049
1050 1050 if (!e1000g_force_detach && !rx_drain)
1051 1051 return (DDI_FAILURE);
1052 1052
1053 1053 e1000g_unattach(devinfo, Adapter);
1054 1054
1055 1055 return (DDI_SUCCESS);
1056 1056 }
1057 1057
1058 1058 /*
1059 1059 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1060 1060 */
1061 1061 void
1062 1062 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
1063 1063 {
1064 1064 ASSERT(e1000g_private_devi_list != NULL);
1065 1065 ASSERT(devi_node != NULL);
1066 1066
1067 1067 if (devi_node->prev != NULL)
1068 1068 devi_node->prev->next = devi_node->next;
1069 1069 if (devi_node->next != NULL)
1070 1070 devi_node->next->prev = devi_node->prev;
1071 1071 if (devi_node == e1000g_private_devi_list)
1072 1072 e1000g_private_devi_list = devi_node->next;
1073 1073
1074 1074 kmem_free(devi_node->priv_dip,
1075 1075 sizeof (struct dev_info));
1076 1076 kmem_free(devi_node,
1077 1077 sizeof (private_devi_list_t));
1078 1078 }
1079 1079
1080 1080 static void
1081 1081 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1082 1082 {
1083 1083 private_devi_list_t *devi_node;
1084 1084 int result;
1085 1085
1086 1086 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1087 1087 (void) e1000g_disable_intrs(Adapter);
1088 1088 }
1089 1089
1090 1090 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1091 1091 (void) mac_unregister(Adapter->mh);
1092 1092 }
1093 1093
1094 1094 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1095 1095 (void) e1000g_rem_intrs(Adapter);
1096 1096 }
1097 1097
1098 1098 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1099 1099 (void) ddi_prop_remove_all(devinfo);
1100 1100 }
1101 1101
1102 1102 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1103 1103 kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1104 1104 }
1105 1105
1106 1106 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1107 1107 stop_link_timer(Adapter);
1108 1108
1109 1109 mutex_enter(&e1000g_nvm_lock);
1110 1110 result = e1000_reset_hw(&Adapter->shared);
1111 1111 mutex_exit(&e1000g_nvm_lock);
1112 1112
1113 1113 if (result != E1000_SUCCESS) {
1114 1114 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1115 1115 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1116 1116 }
1117 1117 }
1118 1118
1119 1119 e1000g_release_multicast(Adapter);
1120 1120
1121 1121 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1122 1122 if (Adapter->osdep.reg_handle != NULL)
1123 1123 ddi_regs_map_free(&Adapter->osdep.reg_handle);
1124 1124 if (Adapter->osdep.ich_flash_handle != NULL)
1125 1125 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1126 1126 if (Adapter->osdep.io_reg_handle != NULL)
1127 1127 ddi_regs_map_free(&Adapter->osdep.io_reg_handle);
1128 1128 }
1129 1129
1130 1130 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1131 1131 if (Adapter->osdep.cfg_handle != NULL)
1132 1132 pci_config_teardown(&Adapter->osdep.cfg_handle);
1133 1133 }
1134 1134
1135 1135 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1136 1136 e1000g_destroy_locks(Adapter);
1137 1137 }
1138 1138
1139 1139 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1140 1140 e1000g_fm_fini(Adapter);
1141 1141 }
1142 1142
1143 1143 mutex_enter(&e1000g_rx_detach_lock);
1144 1144 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1145 1145 devi_node = Adapter->priv_devi_node;
1146 1146 devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1147 1147
1148 1148 if (devi_node->pending_rx_count == 0) {
1149 1149 e1000g_free_priv_devi_node(devi_node);
1150 1150 }
1151 1151 }
1152 1152 mutex_exit(&e1000g_rx_detach_lock);
1153 1153
1154 1154 kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1155 1155
1156 1156 /*
1157 1157 * Another hotplug spec requirement,
1158 1158 * run ddi_set_driver_private(devinfo, null);
1159 1159 */
1160 1160 ddi_set_driver_private(devinfo, NULL);
1161 1161 }
1162 1162
1163 1163 /*
1164 1164 * Get the BAR type and rnumber for a given PCI BAR offset
1165 1165 */
1166 1166 static int
1167 1167 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info)
1168 1168 {
1169 1169 pci_regspec_t *regs;
1170 1170 uint_t regs_length;
1171 1171 int type, rnumber, rcount;
1172 1172
1173 1173 ASSERT((bar_offset >= PCI_CONF_BASE0) &&
1174 1174 (bar_offset <= PCI_CONF_BASE5));
1175 1175
1176 1176 /*
1177 1177 * Get the DDI "reg" property
1178 1178 */
1179 1179 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1180 1180 DDI_PROP_DONTPASS, "reg", (int **)®s,
1181 1181 ®s_length) != DDI_PROP_SUCCESS) {
1182 1182 return (DDI_FAILURE);
1183 1183 }
1184 1184
1185 1185 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1186 1186 /*
1187 1187 * Check the BAR offset
1188 1188 */
1189 1189 for (rnumber = 0; rnumber < rcount; ++rnumber) {
1190 1190 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) {
1191 1191 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK;
1192 1192 break;
1193 1193 }
1194 1194 }
1195 1195
1196 1196 ddi_prop_free(regs);
1197 1197
1198 1198 if (rnumber >= rcount)
1199 1199 return (DDI_FAILURE);
1200 1200
1201 1201 switch (type) {
1202 1202 case PCI_ADDR_CONFIG:
1203 1203 bar_info->type = E1000G_BAR_CONFIG;
1204 1204 break;
1205 1205 case PCI_ADDR_IO:
1206 1206 bar_info->type = E1000G_BAR_IO;
1207 1207 break;
1208 1208 case PCI_ADDR_MEM32:
1209 1209 bar_info->type = E1000G_BAR_MEM32;
1210 1210 break;
1211 1211 case PCI_ADDR_MEM64:
1212 1212 bar_info->type = E1000G_BAR_MEM64;
1213 1213 break;
1214 1214 default:
1215 1215 return (DDI_FAILURE);
1216 1216 }
1217 1217 bar_info->rnumber = rnumber;
1218 1218 return (DDI_SUCCESS);
1219 1219 }
1220 1220
1221 1221 static void
1222 1222 e1000g_init_locks(struct e1000g *Adapter)
1223 1223 {
1224 1224 e1000g_tx_ring_t *tx_ring;
1225 1225 e1000g_rx_ring_t *rx_ring;
1226 1226
1227 1227 rw_init(&Adapter->chip_lock, NULL,
1228 1228 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1229 1229 mutex_init(&Adapter->link_lock, NULL,
1230 1230 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1231 1231 mutex_init(&Adapter->watchdog_lock, NULL,
1232 1232 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1233 1233
1234 1234 tx_ring = Adapter->tx_ring;
1235 1235
1236 1236 mutex_init(&tx_ring->tx_lock, NULL,
1237 1237 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1238 1238 mutex_init(&tx_ring->usedlist_lock, NULL,
1239 1239 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1240 1240 mutex_init(&tx_ring->freelist_lock, NULL,
1241 1241 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1242 1242
1243 1243 rx_ring = Adapter->rx_ring;
1244 1244
1245 1245 mutex_init(&rx_ring->rx_lock, NULL,
1246 1246 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1247 1247 }
1248 1248
1249 1249 static void
1250 1250 e1000g_destroy_locks(struct e1000g *Adapter)
1251 1251 {
1252 1252 e1000g_tx_ring_t *tx_ring;
1253 1253 e1000g_rx_ring_t *rx_ring;
1254 1254
1255 1255 tx_ring = Adapter->tx_ring;
1256 1256 mutex_destroy(&tx_ring->tx_lock);
1257 1257 mutex_destroy(&tx_ring->usedlist_lock);
1258 1258 mutex_destroy(&tx_ring->freelist_lock);
1259 1259
1260 1260 rx_ring = Adapter->rx_ring;
1261 1261 mutex_destroy(&rx_ring->rx_lock);
1262 1262
1263 1263 mutex_destroy(&Adapter->link_lock);
1264 1264 mutex_destroy(&Adapter->watchdog_lock);
1265 1265 rw_destroy(&Adapter->chip_lock);
1266 1266
1267 1267 /* destory mutex initialized in shared code */
1268 1268 e1000_destroy_hw_mutex(&Adapter->shared);
1269 1269 }
1270 1270
1271 1271 static int
1272 1272 e1000g_resume(dev_info_t *devinfo)
1273 1273 {
1274 1274 struct e1000g *Adapter;
1275 1275
1276 1276 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1277 1277 if (Adapter == NULL)
1278 1278 e1000g_log(Adapter, CE_PANIC,
1279 1279 "Instance pointer is null\n");
1280 1280
1281 1281 if (Adapter->dip != devinfo)
1282 1282 e1000g_log(Adapter, CE_PANIC,
1283 1283 "Devinfo is not the same as saved devinfo\n");
1284 1284
1285 1285 rw_enter(&Adapter->chip_lock, RW_WRITER);
1286 1286
1287 1287 if (Adapter->e1000g_state & E1000G_STARTED) {
1288 1288 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1289 1289 rw_exit(&Adapter->chip_lock);
1290 1290 /*
1291 1291 * We note the failure, but return success, as the
1292 1292 * system is still usable without this controller.
1293 1293 */
1294 1294 e1000g_log(Adapter, CE_WARN,
1295 1295 "e1000g_resume: failed to restart controller\n");
1296 1296 return (DDI_SUCCESS);
1297 1297 }
1298 1298 /* Enable and start the watchdog timer */
1299 1299 enable_watchdog_timer(Adapter);
1300 1300 }
1301 1301
1302 1302 Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1303 1303
1304 1304 rw_exit(&Adapter->chip_lock);
1305 1305
1306 1306 return (DDI_SUCCESS);
1307 1307 }
1308 1308
1309 1309 static int
1310 1310 e1000g_suspend(dev_info_t *devinfo)
1311 1311 {
1312 1312 struct e1000g *Adapter;
1313 1313
1314 1314 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1315 1315 if (Adapter == NULL)
1316 1316 return (DDI_FAILURE);
1317 1317
1318 1318 rw_enter(&Adapter->chip_lock, RW_WRITER);
1319 1319
1320 1320 Adapter->e1000g_state |= E1000G_SUSPENDED;
1321 1321
1322 1322 /* if the port isn't plumbed, we can simply return */
1323 1323 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1324 1324 rw_exit(&Adapter->chip_lock);
1325 1325 return (DDI_SUCCESS);
1326 1326 }
1327 1327
1328 1328 e1000g_stop(Adapter, B_FALSE);
1329 1329
1330 1330 rw_exit(&Adapter->chip_lock);
1331 1331
1332 1332 /* Disable and stop all the timers */
1333 1333 disable_watchdog_timer(Adapter);
1334 1334 stop_link_timer(Adapter);
1335 1335 stop_82547_timer(Adapter->tx_ring);
1336 1336
1337 1337 return (DDI_SUCCESS);
1338 1338 }
1339 1339
1340 1340 static int
1341 1341 e1000g_init(struct e1000g *Adapter)
1342 1342 {
1343 1343 uint32_t pba;
1344 1344 uint32_t high_water;
1345 1345 struct e1000_hw *hw;
1346 1346 clock_t link_timeout;
1347 1347 int result;
1348 1348
1349 1349 hw = &Adapter->shared;
1350 1350
1351 1351 /*
1352 1352 * reset to put the hardware in a known state
1353 1353 * before we try to do anything with the eeprom
1354 1354 */
1355 1355 mutex_enter(&e1000g_nvm_lock);
1356 1356 result = e1000_reset_hw(hw);
1357 1357 mutex_exit(&e1000g_nvm_lock);
1358 1358
1359 1359 if (result != E1000_SUCCESS) {
1360 1360 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1361 1361 goto init_fail;
1362 1362 }
1363 1363
1364 1364 mutex_enter(&e1000g_nvm_lock);
1365 1365 result = e1000_validate_nvm_checksum(hw);
1366 1366 if (result < E1000_SUCCESS) {
1367 1367 /*
1368 1368 * Some PCI-E parts fail the first check due to
1369 1369 * the link being in sleep state. Call it again,
1370 1370 * if it fails a second time its a real issue.
1371 1371 */
1372 1372 result = e1000_validate_nvm_checksum(hw);
1373 1373 }
1374 1374 mutex_exit(&e1000g_nvm_lock);
1375 1375
1376 1376 if (result < E1000_SUCCESS) {
1377 1377 e1000g_log(Adapter, CE_WARN,
1378 1378 "Invalid NVM checksum. Please contact "
1379 1379 "the vendor to update the NVM.");
1380 1380 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1381 1381 goto init_fail;
1382 1382 }
1383 1383
1384 1384 result = 0;
1385 1385 #ifdef __sparc
1386 1386 /*
1387 1387 * First, we try to get the local ethernet address from OBP. If
1388 1388 * failed, then we get it from the EEPROM of NIC card.
1389 1389 */
1390 1390 result = e1000g_find_mac_address(Adapter);
1391 1391 #endif
1392 1392 /* Get the local ethernet address. */
1393 1393 if (!result) {
1394 1394 mutex_enter(&e1000g_nvm_lock);
1395 1395 result = e1000_read_mac_addr(hw);
1396 1396 mutex_exit(&e1000g_nvm_lock);
1397 1397 }
1398 1398
1399 1399 if (result < E1000_SUCCESS) {
1400 1400 e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1401 1401 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1402 1402 goto init_fail;
1403 1403 }
1404 1404
1405 1405 /* check for valid mac address */
1406 1406 if (!is_valid_mac_addr(hw->mac.addr)) {
1407 1407 e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1408 1408 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1409 1409 goto init_fail;
1410 1410 }
1411 1411
1412 1412 /* Set LAA state for 82571 chipset */
1413 1413 e1000_set_laa_state_82571(hw, B_TRUE);
1414 1414
1415 1415 /* Master Latency Timer implementation */
1416 1416 if (Adapter->master_latency_timer) {
1417 1417 pci_config_put8(Adapter->osdep.cfg_handle,
1418 1418 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1419 1419 }
1420 1420
1421 1421 if (hw->mac.type < e1000_82547) {
1422 1422 /*
1423 1423 * Total FIFO is 64K
1424 1424 */
1425 1425 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1426 1426 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1427 1427 else
1428 1428 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1429 1429 } else if ((hw->mac.type == e1000_82571) ||
1430 1430 (hw->mac.type == e1000_82572) ||
1431 1431 (hw->mac.type == e1000_80003es2lan)) {
1432 1432 /*
1433 1433 * Total FIFO is 48K
1434 1434 */
1435 1435 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1436 1436 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */
1437 1437 else
1438 1438 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */
1439 1439 } else if (hw->mac.type == e1000_82573) {
1440 1440 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */
1441 1441 } else if (hw->mac.type == e1000_82574) {
1442 1442 /* Keep adapter default: 20K for Rx, 20K for Tx */
1443 1443 pba = E1000_READ_REG(hw, E1000_PBA);
1444 1444 } else if (hw->mac.type == e1000_ich8lan) {
1445 1445 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */
1446 1446 } else if (hw->mac.type == e1000_ich9lan) {
1447 1447 pba = E1000_PBA_10K;
1448 1448 } else if (hw->mac.type == e1000_ich10lan) {
1449 1449 pba = E1000_PBA_10K;
1450 1450 } else if (hw->mac.type == e1000_pchlan) {
1451 1451 pba = E1000_PBA_26K;
1452 1452 } else if (hw->mac.type == e1000_pch2lan) {
1453 1453 pba = E1000_PBA_26K;
1454 1454 } else {
1455 1455 /*
1456 1456 * Total FIFO is 40K
1457 1457 */
1458 1458 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1459 1459 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1460 1460 else
1461 1461 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1462 1462 }
1463 1463 E1000_WRITE_REG(hw, E1000_PBA, pba);
1464 1464
1465 1465 /*
1466 1466 * These parameters set thresholds for the adapter's generation(Tx)
1467 1467 * and response(Rx) to Ethernet PAUSE frames. These are just threshold
1468 1468 * settings. Flow control is enabled or disabled in the configuration
1469 1469 * file.
1470 1470 * High-water mark is set down from the top of the rx fifo (not
1471 1471 * sensitive to max_frame_size) and low-water is set just below
1472 1472 * high-water mark.
1473 1473 * The high water mark must be low enough to fit one full frame above
1474 1474 * it in the rx FIFO. Should be the lower of:
1475 1475 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1476 1476 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1477 1477 * Rx FIFO size minus one full frame.
1478 1478 */
1479 1479 high_water = min(((pba << 10) * 9 / 10),
1480 1480 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1481 1481 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1482 1482 ((pba << 10) - (E1000_ERT_2048 << 3)) :
1483 1483 ((pba << 10) - Adapter->max_frame_size)));
1484 1484
1485 1485 hw->fc.high_water = high_water & 0xFFF8;
1486 1486 hw->fc.low_water = hw->fc.high_water - 8;
1487 1487
1488 1488 if (hw->mac.type == e1000_80003es2lan)
1489 1489 hw->fc.pause_time = 0xFFFF;
1490 1490 else
1491 1491 hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1492 1492 hw->fc.send_xon = B_TRUE;
1493 1493
1494 1494 /*
1495 1495 * Reset the adapter hardware the second time.
1496 1496 */
1497 1497 mutex_enter(&e1000g_nvm_lock);
1498 1498 result = e1000_reset_hw(hw);
1499 1499 mutex_exit(&e1000g_nvm_lock);
1500 1500
1501 1501 if (result != E1000_SUCCESS) {
1502 1502 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1503 1503 goto init_fail;
1504 1504 }
1505 1505
1506 1506 /* disable wakeup control by default */
1507 1507 if (hw->mac.type >= e1000_82544)
1508 1508 E1000_WRITE_REG(hw, E1000_WUC, 0);
1509 1509
1510 1510 /*
1511 1511 * MWI should be disabled on 82546.
1512 1512 */
1513 1513 if (hw->mac.type == e1000_82546)
1514 1514 e1000_pci_clear_mwi(hw);
1515 1515 else
1516 1516 e1000_pci_set_mwi(hw);
1517 1517
1518 1518 /*
1519 1519 * Configure/Initialize hardware
1520 1520 */
1521 1521 mutex_enter(&e1000g_nvm_lock);
1522 1522 result = e1000_init_hw(hw);
1523 1523 mutex_exit(&e1000g_nvm_lock);
1524 1524
1525 1525 if (result < E1000_SUCCESS) {
1526 1526 e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1527 1527 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1528 1528 goto init_fail;
1529 1529 }
1530 1530
1531 1531 /*
1532 1532 * Restore LED settings to the default from EEPROM
1533 1533 * to meet the standard for Sun platforms.
1534 1534 */
1535 1535 (void) e1000_cleanup_led(hw);
1536 1536
1537 1537 /* Disable Smart Power Down */
1538 1538 phy_spd_state(hw, B_FALSE);
1539 1539
1540 1540 /* Make sure driver has control */
1541 1541 e1000g_get_driver_control(hw);
1542 1542
1543 1543 /*
1544 1544 * Initialize unicast addresses.
1545 1545 */
1546 1546 e1000g_init_unicst(Adapter);
1547 1547
1548 1548 /*
1549 1549 * Setup and initialize the mctable structures. After this routine
1550 1550 * completes Multicast table will be set
1551 1551 */
1552 1552 e1000_update_mc_addr_list(hw,
1553 1553 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
1554 1554 msec_delay(5);
1555 1555
1556 1556 /*
1557 1557 * Implement Adaptive IFS
1558 1558 */
1559 1559 e1000_reset_adaptive(hw);
1560 1560
1561 1561 /* Setup Interrupt Throttling Register */
1562 1562 if (hw->mac.type >= e1000_82540) {
1563 1563 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1564 1564 } else
1565 1565 Adapter->intr_adaptive = B_FALSE;
1566 1566
1567 1567 /* Start the timer for link setup */
1568 1568 if (hw->mac.autoneg)
1569 1569 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1570 1570 else
1571 1571 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1572 1572
1573 1573 mutex_enter(&Adapter->link_lock);
1574 1574 if (hw->phy.autoneg_wait_to_complete) {
1575 1575 Adapter->link_complete = B_TRUE;
1576 1576 } else {
1577 1577 Adapter->link_complete = B_FALSE;
1578 1578 Adapter->link_tid = timeout(e1000g_link_timer,
1579 1579 (void *)Adapter, link_timeout);
1580 1580 }
1581 1581 mutex_exit(&Adapter->link_lock);
1582 1582
1583 1583 /* Save the state of the phy */
1584 1584 e1000g_get_phy_state(Adapter);
1585 1585
1586 1586 e1000g_param_sync(Adapter);
1587 1587
1588 1588 Adapter->init_count++;
1589 1589
1590 1590 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1591 1591 goto init_fail;
1592 1592 }
1593 1593 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1594 1594 goto init_fail;
1595 1595 }
1596 1596
1597 1597 Adapter->poll_mode = e1000g_poll_mode;
1598 1598
1599 1599 return (DDI_SUCCESS);
1600 1600
1601 1601 init_fail:
1602 1602 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1603 1603 return (DDI_FAILURE);
1604 1604 }
1605 1605
1606 1606 static int
1607 1607 e1000g_alloc_rx_data(struct e1000g *Adapter)
1608 1608 {
1609 1609 e1000g_rx_ring_t *rx_ring;
1610 1610 e1000g_rx_data_t *rx_data;
1611 1611
1612 1612 rx_ring = Adapter->rx_ring;
1613 1613
1614 1614 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1615 1615
1616 1616 if (rx_data == NULL)
1617 1617 return (DDI_FAILURE);
1618 1618
1619 1619 rx_data->priv_devi_node = Adapter->priv_devi_node;
1620 1620 rx_data->rx_ring = rx_ring;
1621 1621
1622 1622 mutex_init(&rx_data->freelist_lock, NULL,
1623 1623 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1624 1624 mutex_init(&rx_data->recycle_lock, NULL,
1625 1625 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1626 1626
1627 1627 rx_ring->rx_data = rx_data;
1628 1628
1629 1629 return (DDI_SUCCESS);
1630 1630 }
1631 1631
1632 1632 void
1633 1633 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1634 1634 {
1635 1635 rx_sw_packet_t *packet, *next_packet;
1636 1636
1637 1637 if (rx_data == NULL)
1638 1638 return;
1639 1639
1640 1640 packet = rx_data->packet_area;
1641 1641 while (packet != NULL) {
1642 1642 next_packet = packet->next;
1643 1643 e1000g_free_rx_sw_packet(packet, B_TRUE);
1644 1644 packet = next_packet;
1645 1645 }
1646 1646 rx_data->packet_area = NULL;
1647 1647 }
1648 1648
1649 1649 void
1650 1650 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1651 1651 {
1652 1652 if (rx_data == NULL)
1653 1653 return;
1654 1654
1655 1655 mutex_destroy(&rx_data->freelist_lock);
1656 1656 mutex_destroy(&rx_data->recycle_lock);
1657 1657
1658 1658 kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1659 1659 }
1660 1660
1661 1661 /*
1662 1662 * Check if the link is up
1663 1663 */
1664 1664 static boolean_t
1665 1665 e1000g_link_up(struct e1000g *Adapter)
1666 1666 {
1667 1667 struct e1000_hw *hw = &Adapter->shared;
1668 1668 boolean_t link_up = B_FALSE;
1669 1669
1670 1670 /*
1671 1671 * get_link_status is set in the interrupt handler on link-status-change
1672 1672 * or rx sequence error interrupt. get_link_status will stay
1673 1673 * false until the e1000_check_for_link establishes link only
1674 1674 * for copper adapters.
1675 1675 */
1676 1676 switch (hw->phy.media_type) {
1677 1677 case e1000_media_type_copper:
1678 1678 if (hw->mac.get_link_status) {
1679 1679 (void) e1000_check_for_link(hw);
1680 1680 if ((E1000_READ_REG(hw, E1000_STATUS) &
1681 1681 E1000_STATUS_LU)) {
1682 1682 link_up = B_TRUE;
1683 1683 } else {
1684 1684 link_up = !hw->mac.get_link_status;
1685 1685 }
1686 1686 } else {
1687 1687 link_up = B_TRUE;
1688 1688 }
1689 1689 break;
1690 1690 case e1000_media_type_fiber:
1691 1691 (void) e1000_check_for_link(hw);
1692 1692 link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1693 1693 E1000_STATUS_LU);
1694 1694 break;
1695 1695 case e1000_media_type_internal_serdes:
1696 1696 (void) e1000_check_for_link(hw);
1697 1697 link_up = hw->mac.serdes_has_link;
1698 1698 break;
1699 1699 }
1700 1700
1701 1701 return (link_up);
1702 1702 }
1703 1703
1704 1704 static void
1705 1705 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1706 1706 {
1707 1707 struct iocblk *iocp;
1708 1708 struct e1000g *e1000gp;
1709 1709 enum ioc_reply status;
1710 1710
1711 1711 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1712 1712 iocp->ioc_error = 0;
1713 1713 e1000gp = (struct e1000g *)arg;
1714 1714
1715 1715 ASSERT(e1000gp);
1716 1716 if (e1000gp == NULL) {
1717 1717 miocnak(q, mp, 0, EINVAL);
1718 1718 return;
1719 1719 }
1720 1720
1721 1721 rw_enter(&e1000gp->chip_lock, RW_READER);
1722 1722 if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1723 1723 rw_exit(&e1000gp->chip_lock);
1724 1724 miocnak(q, mp, 0, EINVAL);
1725 1725 return;
1726 1726 }
1727 1727 rw_exit(&e1000gp->chip_lock);
1728 1728
1729 1729 switch (iocp->ioc_cmd) {
1730 1730
1731 1731 case LB_GET_INFO_SIZE:
1732 1732 case LB_GET_INFO:
1733 1733 case LB_GET_MODE:
1734 1734 case LB_SET_MODE:
1735 1735 status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1736 1736 break;
1737 1737
1738 1738
1739 1739 #ifdef E1000G_DEBUG
1740 1740 case E1000G_IOC_REG_PEEK:
1741 1741 case E1000G_IOC_REG_POKE:
1742 1742 status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1743 1743 break;
1744 1744 case E1000G_IOC_CHIP_RESET:
1745 1745 e1000gp->reset_count++;
1746 1746 if (e1000g_reset_adapter(e1000gp))
1747 1747 status = IOC_ACK;
1748 1748 else
1749 1749 status = IOC_INVAL;
1750 1750 break;
1751 1751 #endif
1752 1752 default:
1753 1753 status = IOC_INVAL;
1754 1754 break;
1755 1755 }
1756 1756
1757 1757 /*
1758 1758 * Decide how to reply
1759 1759 */
1760 1760 switch (status) {
1761 1761 default:
1762 1762 case IOC_INVAL:
1763 1763 /*
1764 1764 * Error, reply with a NAK and EINVAL or the specified error
1765 1765 */
1766 1766 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1767 1767 EINVAL : iocp->ioc_error);
1768 1768 break;
1769 1769
1770 1770 case IOC_DONE:
1771 1771 /*
1772 1772 * OK, reply already sent
1773 1773 */
1774 1774 break;
1775 1775
1776 1776 case IOC_ACK:
1777 1777 /*
1778 1778 * OK, reply with an ACK
1779 1779 */
1780 1780 miocack(q, mp, 0, 0);
1781 1781 break;
1782 1782
1783 1783 case IOC_REPLY:
1784 1784 /*
1785 1785 * OK, send prepared reply as ACK or NAK
1786 1786 */
1787 1787 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1788 1788 M_IOCACK : M_IOCNAK;
1789 1789 qreply(q, mp);
1790 1790 break;
1791 1791 }
1792 1792 }
1793 1793
1794 1794 /*
1795 1795 * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1796 1796 * capable of supporting only one interrupt and we shouldn't disable
1797 1797 * the physical interrupt. In this case we let the interrupt come and
1798 1798 * we queue the packets in the rx ring itself in case we are in polling
1799 1799 * mode (better latency but slightly lower performance and a very
1800 1800 * high intrrupt count in mpstat which is harmless).
1801 1801 *
1802 1802 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1803 1803 * which can be disabled in poll mode. This gives better overall
1804 1804 * throughput (compared to the mode above), shows very low interrupt
1805 1805 * count but has slightly higher latency since we pick the packets when
1806 1806 * the poll thread does polling.
1807 1807 *
1808 1808 * Currently, this flag should be enabled only while doing performance
1809 1809 * measurement or when it can be guaranteed that entire NIC going
1810 1810 * in poll mode will not harm any traffic like cluster heartbeat etc.
1811 1811 */
1812 1812 int e1000g_poll_mode = 0;
1813 1813
1814 1814 /*
1815 1815 * Called from the upper layers when driver is in polling mode to
1816 1816 * pick up any queued packets. Care should be taken to not block
1817 1817 * this thread.
1818 1818 */
1819 1819 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1820 1820 {
1821 1821 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg;
1822 1822 mblk_t *mp = NULL;
1823 1823 mblk_t *tail;
1824 1824 struct e1000g *adapter;
1825 1825
1826 1826 adapter = rx_ring->adapter;
1827 1827
1828 1828 rw_enter(&adapter->chip_lock, RW_READER);
1829 1829
1830 1830 if (adapter->e1000g_state & E1000G_SUSPENDED) {
1831 1831 rw_exit(&adapter->chip_lock);
1832 1832 return (NULL);
1833 1833 }
1834 1834
1835 1835 mutex_enter(&rx_ring->rx_lock);
1836 1836 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1837 1837 mutex_exit(&rx_ring->rx_lock);
1838 1838 rw_exit(&adapter->chip_lock);
1839 1839 return (mp);
1840 1840 }
1841 1841
1842 1842 static int
1843 1843 e1000g_m_start(void *arg)
1844 1844 {
1845 1845 struct e1000g *Adapter = (struct e1000g *)arg;
1846 1846
1847 1847 rw_enter(&Adapter->chip_lock, RW_WRITER);
1848 1848
1849 1849 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1850 1850 rw_exit(&Adapter->chip_lock);
1851 1851 return (ECANCELED);
1852 1852 }
1853 1853
1854 1854 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1855 1855 rw_exit(&Adapter->chip_lock);
1856 1856 return (ENOTACTIVE);
1857 1857 }
1858 1858
1859 1859 Adapter->e1000g_state |= E1000G_STARTED;
1860 1860
1861 1861 rw_exit(&Adapter->chip_lock);
1862 1862
1863 1863 /* Enable and start the watchdog timer */
1864 1864 enable_watchdog_timer(Adapter);
1865 1865
1866 1866 return (0);
1867 1867 }
1868 1868
1869 1869 static int
1870 1870 e1000g_start(struct e1000g *Adapter, boolean_t global)
1871 1871 {
1872 1872 e1000g_rx_data_t *rx_data;
1873 1873
1874 1874 if (global) {
1875 1875 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1876 1876 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1877 1877 goto start_fail;
1878 1878 }
1879 1879
1880 1880 /* Allocate dma resources for descriptors and buffers */
1881 1881 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1882 1882 e1000g_log(Adapter, CE_WARN,
1883 1883 "Alloc DMA resources failed");
1884 1884 goto start_fail;
1885 1885 }
1886 1886 Adapter->rx_buffer_setup = B_FALSE;
1887 1887 }
1888 1888
1889 1889 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1890 1890 if (e1000g_init(Adapter) != DDI_SUCCESS) {
1891 1891 e1000g_log(Adapter, CE_WARN,
1892 1892 "Adapter initialization failed");
1893 1893 goto start_fail;
1894 1894 }
1895 1895 }
1896 1896
1897 1897 /* Setup and initialize the transmit structures */
1898 1898 e1000g_tx_setup(Adapter);
1899 1899 msec_delay(5);
1900 1900
1901 1901 /* Setup and initialize the receive structures */
1902 1902 e1000g_rx_setup(Adapter);
1903 1903 msec_delay(5);
1904 1904
1905 1905 /* Restore the e1000g promiscuous mode */
1906 1906 e1000g_restore_promisc(Adapter);
1907 1907
1908 1908 e1000g_mask_interrupt(Adapter);
1909 1909
1910 1910 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1911 1911
1912 1912 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1913 1913 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1914 1914 goto start_fail;
1915 1915 }
1916 1916
1917 1917 return (DDI_SUCCESS);
1918 1918
1919 1919 start_fail:
1920 1920 rx_data = Adapter->rx_ring->rx_data;
1921 1921
1922 1922 if (global) {
1923 1923 e1000g_release_dma_resources(Adapter);
1924 1924 e1000g_free_rx_pending_buffers(rx_data);
1925 1925 e1000g_free_rx_data(rx_data);
1926 1926 }
1927 1927
1928 1928 mutex_enter(&e1000g_nvm_lock);
1929 1929 (void) e1000_reset_hw(&Adapter->shared);
1930 1930 mutex_exit(&e1000g_nvm_lock);
1931 1931
1932 1932 return (DDI_FAILURE);
1933 1933 }
1934 1934
1935 1935 static void
1936 1936 e1000g_m_stop(void *arg)
1937 1937 {
1938 1938 struct e1000g *Adapter = (struct e1000g *)arg;
1939 1939
1940 1940 /* Drain tx sessions */
1941 1941 (void) e1000g_tx_drain(Adapter);
1942 1942
1943 1943 rw_enter(&Adapter->chip_lock, RW_WRITER);
1944 1944
1945 1945 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1946 1946 rw_exit(&Adapter->chip_lock);
1947 1947 return;
1948 1948 }
1949 1949 Adapter->e1000g_state &= ~E1000G_STARTED;
1950 1950 e1000g_stop(Adapter, B_TRUE);
1951 1951
1952 1952 rw_exit(&Adapter->chip_lock);
1953 1953
1954 1954 /* Disable and stop all the timers */
1955 1955 disable_watchdog_timer(Adapter);
1956 1956 stop_link_timer(Adapter);
1957 1957 stop_82547_timer(Adapter->tx_ring);
1958 1958 }
1959 1959
1960 1960 static void
1961 1961 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1962 1962 {
1963 1963 private_devi_list_t *devi_node;
1964 1964 e1000g_rx_data_t *rx_data;
1965 1965 int result;
1966 1966
1967 1967 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1968 1968
1969 1969 /* Stop the chip and release pending resources */
1970 1970
1971 1971 /* Tell firmware driver is no longer in control */
1972 1972 e1000g_release_driver_control(&Adapter->shared);
1973 1973
1974 1974 e1000g_clear_all_interrupts(Adapter);
1975 1975
1976 1976 mutex_enter(&e1000g_nvm_lock);
1977 1977 result = e1000_reset_hw(&Adapter->shared);
1978 1978 mutex_exit(&e1000g_nvm_lock);
1979 1979
1980 1980 if (result != E1000_SUCCESS) {
1981 1981 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1982 1982 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1983 1983 }
1984 1984
1985 1985 mutex_enter(&Adapter->link_lock);
1986 1986 Adapter->link_complete = B_FALSE;
1987 1987 mutex_exit(&Adapter->link_lock);
1988 1988
1989 1989 /* Release resources still held by the TX descriptors */
1990 1990 e1000g_tx_clean(Adapter);
1991 1991
1992 1992 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1993 1993 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1994 1994
1995 1995 /* Clean the pending rx jumbo packet fragment */
1996 1996 e1000g_rx_clean(Adapter);
1997 1997
1998 1998 if (global) {
1999 1999 e1000g_release_dma_resources(Adapter);
2000 2000
2001 2001 mutex_enter(&e1000g_rx_detach_lock);
2002 2002 rx_data = Adapter->rx_ring->rx_data;
2003 2003 rx_data->flag |= E1000G_RX_STOPPED;
2004 2004
2005 2005 if (rx_data->pending_count == 0) {
2006 2006 e1000g_free_rx_pending_buffers(rx_data);
2007 2007 e1000g_free_rx_data(rx_data);
2008 2008 } else {
2009 2009 devi_node = rx_data->priv_devi_node;
2010 2010 if (devi_node != NULL)
2011 2011 atomic_inc_32(&devi_node->pending_rx_count);
2012 2012 else
2013 2013 atomic_inc_32(&Adapter->pending_rx_count);
2014 2014 }
2015 2015 mutex_exit(&e1000g_rx_detach_lock);
2016 2016 }
2017 2017
2018 2018 if (Adapter->link_state != LINK_STATE_UNKNOWN) {
2019 2019 Adapter->link_state = LINK_STATE_UNKNOWN;
2020 2020 if (!Adapter->reset_flag)
2021 2021 mac_link_update(Adapter->mh, Adapter->link_state);
2022 2022 }
2023 2023 }
2024 2024
2025 2025 static void
2026 2026 e1000g_rx_clean(struct e1000g *Adapter)
2027 2027 {
2028 2028 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
2029 2029
2030 2030 if (rx_data == NULL)
2031 2031 return;
2032 2032
2033 2033 if (rx_data->rx_mblk != NULL) {
2034 2034 freemsg(rx_data->rx_mblk);
2035 2035 rx_data->rx_mblk = NULL;
2036 2036 rx_data->rx_mblk_tail = NULL;
2037 2037 rx_data->rx_mblk_len = 0;
2038 2038 }
2039 2039 }
2040 2040
2041 2041 static void
2042 2042 e1000g_tx_clean(struct e1000g *Adapter)
2043 2043 {
2044 2044 e1000g_tx_ring_t *tx_ring;
2045 2045 p_tx_sw_packet_t packet;
2046 2046 mblk_t *mp;
2047 2047 mblk_t *nmp;
2048 2048 uint32_t packet_count;
2049 2049
2050 2050 tx_ring = Adapter->tx_ring;
2051 2051
2052 2052 /*
2053 2053 * Here we don't need to protect the lists using
2054 2054 * the usedlist_lock and freelist_lock, for they
2055 2055 * have been protected by the chip_lock.
2056 2056 */
2057 2057 mp = NULL;
2058 2058 nmp = NULL;
2059 2059 packet_count = 0;
2060 2060 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
2061 2061 while (packet != NULL) {
2062 2062 if (packet->mp != NULL) {
2063 2063 /* Assemble the message chain */
2064 2064 if (mp == NULL) {
2065 2065 mp = packet->mp;
2066 2066 nmp = packet->mp;
2067 2067 } else {
2068 2068 nmp->b_next = packet->mp;
2069 2069 nmp = packet->mp;
2070 2070 }
2071 2071 /* Disconnect the message from the sw packet */
2072 2072 packet->mp = NULL;
2073 2073 }
2074 2074
2075 2075 e1000g_free_tx_swpkt(packet);
2076 2076 packet_count++;
2077 2077
2078 2078 packet = (p_tx_sw_packet_t)
2079 2079 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
2080 2080 }
2081 2081
2082 2082 if (mp != NULL)
2083 2083 freemsgchain(mp);
2084 2084
2085 2085 if (packet_count > 0) {
2086 2086 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
2087 2087 QUEUE_INIT_LIST(&tx_ring->used_list);
2088 2088
2089 2089 /* Setup TX descriptor pointers */
2090 2090 tx_ring->tbd_next = tx_ring->tbd_first;
2091 2091 tx_ring->tbd_oldest = tx_ring->tbd_first;
2092 2092
2093 2093 /* Setup our HW Tx Head & Tail descriptor pointers */
2094 2094 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
2095 2095 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
2096 2096 }
2097 2097 }
2098 2098
2099 2099 static boolean_t
2100 2100 e1000g_tx_drain(struct e1000g *Adapter)
2101 2101 {
2102 2102 int i;
2103 2103 boolean_t done;
2104 2104 e1000g_tx_ring_t *tx_ring;
2105 2105
2106 2106 tx_ring = Adapter->tx_ring;
2107 2107
2108 2108 /* Allow up to 'wsdraintime' for pending xmit's to complete. */
2109 2109 for (i = 0; i < TX_DRAIN_TIME; i++) {
2110 2110 mutex_enter(&tx_ring->usedlist_lock);
2111 2111 done = IS_QUEUE_EMPTY(&tx_ring->used_list);
2112 2112 mutex_exit(&tx_ring->usedlist_lock);
2113 2113
2114 2114 if (done)
2115 2115 break;
2116 2116
2117 2117 msec_delay(1);
2118 2118 }
2119 2119
2120 2120 return (done);
2121 2121 }
2122 2122
2123 2123 static boolean_t
2124 2124 e1000g_rx_drain(struct e1000g *Adapter)
2125 2125 {
2126 2126 int i;
2127 2127 boolean_t done;
2128 2128
2129 2129 /*
2130 2130 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2131 2131 */
2132 2132 for (i = 0; i < RX_DRAIN_TIME; i++) {
2133 2133 done = (Adapter->pending_rx_count == 0);
2134 2134
2135 2135 if (done)
2136 2136 break;
2137 2137
2138 2138 msec_delay(1);
2139 2139 }
2140 2140
2141 2141 return (done);
2142 2142 }
2143 2143
2144 2144 static boolean_t
2145 2145 e1000g_reset_adapter(struct e1000g *Adapter)
2146 2146 {
2147 2147 /* Disable and stop all the timers */
2148 2148 disable_watchdog_timer(Adapter);
2149 2149 stop_link_timer(Adapter);
2150 2150 stop_82547_timer(Adapter->tx_ring);
2151 2151
2152 2152 rw_enter(&Adapter->chip_lock, RW_WRITER);
2153 2153
2154 2154 if (Adapter->stall_flag) {
2155 2155 Adapter->stall_flag = B_FALSE;
2156 2156 Adapter->reset_flag = B_TRUE;
2157 2157 }
2158 2158
2159 2159 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2160 2160 rw_exit(&Adapter->chip_lock);
2161 2161 return (B_TRUE);
2162 2162 }
2163 2163
2164 2164 e1000g_stop(Adapter, B_FALSE);
2165 2165
2166 2166 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
2167 2167 rw_exit(&Adapter->chip_lock);
2168 2168 e1000g_log(Adapter, CE_WARN, "Reset failed");
2169 2169 return (B_FALSE);
2170 2170 }
2171 2171
2172 2172 rw_exit(&Adapter->chip_lock);
2173 2173
2174 2174 /* Enable and start the watchdog timer */
2175 2175 enable_watchdog_timer(Adapter);
2176 2176
2177 2177 return (B_TRUE);
2178 2178 }
2179 2179
2180 2180 boolean_t
2181 2181 e1000g_global_reset(struct e1000g *Adapter)
2182 2182 {
2183 2183 /* Disable and stop all the timers */
2184 2184 disable_watchdog_timer(Adapter);
2185 2185 stop_link_timer(Adapter);
2186 2186 stop_82547_timer(Adapter->tx_ring);
2187 2187
2188 2188 rw_enter(&Adapter->chip_lock, RW_WRITER);
2189 2189
2190 2190 e1000g_stop(Adapter, B_TRUE);
2191 2191
2192 2192 Adapter->init_count = 0;
2193 2193
2194 2194 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2195 2195 rw_exit(&Adapter->chip_lock);
2196 2196 e1000g_log(Adapter, CE_WARN, "Reset failed");
2197 2197 return (B_FALSE);
2198 2198 }
2199 2199
2200 2200 rw_exit(&Adapter->chip_lock);
2201 2201
2202 2202 /* Enable and start the watchdog timer */
2203 2203 enable_watchdog_timer(Adapter);
2204 2204
2205 2205 return (B_TRUE);
2206 2206 }
2207 2207
2208 2208 /*
2209 2209 * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2210 2210 *
2211 2211 * This interrupt service routine is for PCI-Express adapters.
2212 2212 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2213 2213 * bit is set.
2214 2214 */
2215 2215 static uint_t
2216 2216 e1000g_intr_pciexpress(caddr_t arg)
2217 2217 {
2218 2218 struct e1000g *Adapter;
2219 2219 uint32_t icr;
2220 2220
2221 2221 Adapter = (struct e1000g *)(uintptr_t)arg;
2222 2222 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2223 2223
2224 2224 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2225 2225 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2226 2226 return (DDI_INTR_CLAIMED);
2227 2227 }
2228 2228
2229 2229 if (icr & E1000_ICR_INT_ASSERTED) {
2230 2230 /*
2231 2231 * E1000_ICR_INT_ASSERTED bit was set:
2232 2232 * Read(Clear) the ICR, claim this interrupt,
2233 2233 * look for work to do.
2234 2234 */
2235 2235 e1000g_intr_work(Adapter, icr);
2236 2236 return (DDI_INTR_CLAIMED);
2237 2237 } else {
2238 2238 /*
2239 2239 * E1000_ICR_INT_ASSERTED bit was not set:
2240 2240 * Don't claim this interrupt, return immediately.
2241 2241 */
2242 2242 return (DDI_INTR_UNCLAIMED);
2243 2243 }
2244 2244 }
2245 2245
2246 2246 /*
2247 2247 * e1000g_intr - ISR for PCI/PCI-X chipsets
2248 2248 *
2249 2249 * This interrupt service routine is for PCI/PCI-X adapters.
2250 2250 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2251 2251 * bit is set or not.
2252 2252 */
2253 2253 static uint_t
2254 2254 e1000g_intr(caddr_t arg)
2255 2255 {
2256 2256 struct e1000g *Adapter;
2257 2257 uint32_t icr;
2258 2258
2259 2259 Adapter = (struct e1000g *)(uintptr_t)arg;
2260 2260 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2261 2261
2262 2262 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2263 2263 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2264 2264 return (DDI_INTR_CLAIMED);
2265 2265 }
2266 2266
2267 2267 if (icr) {
2268 2268 /*
2269 2269 * Any bit was set in ICR:
2270 2270 * Read(Clear) the ICR, claim this interrupt,
2271 2271 * look for work to do.
2272 2272 */
2273 2273 e1000g_intr_work(Adapter, icr);
2274 2274 return (DDI_INTR_CLAIMED);
2275 2275 } else {
2276 2276 /*
2277 2277 * No bit was set in ICR:
2278 2278 * Don't claim this interrupt, return immediately.
2279 2279 */
2280 2280 return (DDI_INTR_UNCLAIMED);
2281 2281 }
2282 2282 }
2283 2283
2284 2284 /*
2285 2285 * e1000g_intr_work - actual processing of ISR
2286 2286 *
2287 2287 * Read(clear) the ICR contents and call appropriate interrupt
2288 2288 * processing routines.
2289 2289 */
2290 2290 static void
2291 2291 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2292 2292 {
2293 2293 struct e1000_hw *hw;
2294 2294 hw = &Adapter->shared;
2295 2295 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2296 2296
2297 2297 Adapter->rx_pkt_cnt = 0;
2298 2298 Adapter->tx_pkt_cnt = 0;
2299 2299
2300 2300 rw_enter(&Adapter->chip_lock, RW_READER);
2301 2301
2302 2302 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2303 2303 rw_exit(&Adapter->chip_lock);
2304 2304 return;
2305 2305 }
2306 2306 /*
2307 2307 * Here we need to check the "e1000g_state" flag within the chip_lock to
2308 2308 * ensure the receive routine will not execute when the adapter is
2309 2309 * being reset.
2310 2310 */
2311 2311 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2312 2312 rw_exit(&Adapter->chip_lock);
2313 2313 return;
2314 2314 }
2315 2315
2316 2316 if (icr & E1000_ICR_RXT0) {
2317 2317 mblk_t *mp = NULL;
2318 2318 mblk_t *tail = NULL;
2319 2319 e1000g_rx_ring_t *rx_ring;
2320 2320
2321 2321 rx_ring = Adapter->rx_ring;
2322 2322 mutex_enter(&rx_ring->rx_lock);
2323 2323 /*
2324 2324 * Sometimes with legacy interrupts, it possible that
2325 2325 * there is a single interrupt for Rx/Tx. In which
2326 2326 * case, if poll flag is set, we shouldn't really
2327 2327 * be doing Rx processing.
2328 2328 */
2329 2329 if (!rx_ring->poll_flag)
2330 2330 mp = e1000g_receive(rx_ring, &tail,
2331 2331 E1000G_CHAIN_NO_LIMIT);
2332 2332 mutex_exit(&rx_ring->rx_lock);
2333 2333 rw_exit(&Adapter->chip_lock);
2334 2334 if (mp != NULL)
2335 2335 mac_rx_ring(Adapter->mh, rx_ring->mrh,
2336 2336 mp, rx_ring->ring_gen_num);
2337 2337 } else
2338 2338 rw_exit(&Adapter->chip_lock);
2339 2339
2340 2340 if (icr & E1000_ICR_TXDW) {
2341 2341 if (!Adapter->tx_intr_enable)
2342 2342 e1000g_clear_tx_interrupt(Adapter);
2343 2343
2344 2344 /* Recycle the tx descriptors */
2345 2345 rw_enter(&Adapter->chip_lock, RW_READER);
2346 2346 (void) e1000g_recycle(tx_ring);
2347 2347 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2348 2348 rw_exit(&Adapter->chip_lock);
2349 2349
2350 2350 if (tx_ring->resched_needed &&
2351 2351 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2352 2352 tx_ring->resched_needed = B_FALSE;
2353 2353 mac_tx_update(Adapter->mh);
2354 2354 E1000G_STAT(tx_ring->stat_reschedule);
2355 2355 }
2356 2356 }
2357 2357
2358 2358 /*
2359 2359 * The Receive Sequence errors RXSEQ and the link status change LSC
2360 2360 * are checked to detect that the cable has been pulled out. For
2361 2361 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2362 2362 * are an indication that cable is not connected.
2363 2363 */
2364 2364 if ((icr & E1000_ICR_RXSEQ) ||
2365 2365 (icr & E1000_ICR_LSC) ||
2366 2366 (icr & E1000_ICR_GPI_EN1)) {
2367 2367 boolean_t link_changed;
2368 2368 timeout_id_t tid = 0;
2369 2369
2370 2370 stop_watchdog_timer(Adapter);
2371 2371
2372 2372 rw_enter(&Adapter->chip_lock, RW_WRITER);
2373 2373
2374 2374 /*
2375 2375 * Because we got a link-status-change interrupt, force
2376 2376 * e1000_check_for_link() to look at phy
2377 2377 */
2378 2378 Adapter->shared.mac.get_link_status = B_TRUE;
2379 2379
2380 2380 /* e1000g_link_check takes care of link status change */
2381 2381 link_changed = e1000g_link_check(Adapter);
2382 2382
2383 2383 /* Get new phy state */
2384 2384 e1000g_get_phy_state(Adapter);
2385 2385
2386 2386 /*
2387 2387 * If the link timer has not timed out, we'll not notify
2388 2388 * the upper layer with any link state until the link is up.
2389 2389 */
2390 2390 if (link_changed && !Adapter->link_complete) {
2391 2391 if (Adapter->link_state == LINK_STATE_UP) {
2392 2392 mutex_enter(&Adapter->link_lock);
2393 2393 Adapter->link_complete = B_TRUE;
2394 2394 tid = Adapter->link_tid;
2395 2395 Adapter->link_tid = 0;
2396 2396 mutex_exit(&Adapter->link_lock);
2397 2397 } else {
2398 2398 link_changed = B_FALSE;
2399 2399 }
2400 2400 }
2401 2401 rw_exit(&Adapter->chip_lock);
2402 2402
2403 2403 if (link_changed) {
2404 2404 if (tid != 0)
2405 2405 (void) untimeout(tid);
2406 2406
2407 2407 /*
2408 2408 * Workaround for esb2. Data stuck in fifo on a link
2409 2409 * down event. Stop receiver here and reset in watchdog.
2410 2410 */
2411 2411 if ((Adapter->link_state == LINK_STATE_DOWN) &&
2412 2412 (Adapter->shared.mac.type == e1000_80003es2lan)) {
2413 2413 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2414 2414 E1000_WRITE_REG(hw, E1000_RCTL,
2415 2415 rctl & ~E1000_RCTL_EN);
2416 2416 e1000g_log(Adapter, CE_WARN,
2417 2417 "ESB2 receiver disabled");
2418 2418 Adapter->esb2_workaround = B_TRUE;
2419 2419 }
2420 2420 if (!Adapter->reset_flag)
2421 2421 mac_link_update(Adapter->mh,
2422 2422 Adapter->link_state);
2423 2423 if (Adapter->link_state == LINK_STATE_UP)
2424 2424 Adapter->reset_flag = B_FALSE;
2425 2425 }
2426 2426
2427 2427 start_watchdog_timer(Adapter);
2428 2428 }
2429 2429 }
2430 2430
↓ open down ↓ |
2430 lines elided |
↑ open up ↑ |
2431 2431 static void
2432 2432 e1000g_init_unicst(struct e1000g *Adapter)
2433 2433 {
2434 2434 struct e1000_hw *hw;
2435 2435 int slot;
2436 2436
2437 2437 hw = &Adapter->shared;
2438 2438
2439 2439 if (Adapter->init_count == 0) {
2440 2440 /* Initialize the multiple unicast addresses */
2441 - Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2441 + Adapter->unicst_total = min(hw->mac.rar_entry_count,
2442 + MAX_NUM_UNICAST_ADDRESSES);
2442 2443
2443 2444 /* Workaround for an erratum of 82571 chipst */
2444 2445 if ((hw->mac.type == e1000_82571) &&
2445 2446 (e1000_get_laa_state_82571(hw) == B_TRUE))
2446 2447 Adapter->unicst_total--;
2447 2448
2448 2449 /* VMware doesn't support multiple mac addresses properly */
2449 2450 if (hw->subsystem_vendor_id == 0x15ad)
2450 2451 Adapter->unicst_total = 1;
2451 2452
2452 2453 Adapter->unicst_avail = Adapter->unicst_total;
2453 2454
2454 2455 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2455 2456 /* Clear both the flag and MAC address */
2456 2457 Adapter->unicst_addr[slot].reg.high = 0;
2457 2458 Adapter->unicst_addr[slot].reg.low = 0;
2458 2459 }
2459 2460 } else {
2460 2461 /* Workaround for an erratum of 82571 chipst */
2461 2462 if ((hw->mac.type == e1000_82571) &&
2462 2463 (e1000_get_laa_state_82571(hw) == B_TRUE))
2463 2464 e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2464 2465
2465 2466 /* Re-configure the RAR registers */
2466 2467 for (slot = 0; slot < Adapter->unicst_total; slot++)
2467 2468 if (Adapter->unicst_addr[slot].mac.set == 1)
2468 2469 e1000_rar_set(hw,
2469 2470 Adapter->unicst_addr[slot].mac.addr, slot);
2470 2471 }
2471 2472
2472 2473 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2473 2474 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2474 2475 }
2475 2476
2476 2477 static int
2477 2478 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2478 2479 int slot)
2479 2480 {
2480 2481 struct e1000_hw *hw;
2481 2482
2482 2483 hw = &Adapter->shared;
2483 2484
2484 2485 /*
2485 2486 * The first revision of Wiseman silicon (rev 2.0) has an errata
2486 2487 * that requires the receiver to be in reset when any of the
2487 2488 * receive address registers (RAR regs) are accessed. The first
2488 2489 * rev of Wiseman silicon also requires MWI to be disabled when
2489 2490 * a global reset or a receive reset is issued. So before we
2490 2491 * initialize the RARs, we check the rev of the Wiseman controller
2491 2492 * and work around any necessary HW errata.
2492 2493 */
2493 2494 if ((hw->mac.type == e1000_82542) &&
2494 2495 (hw->revision_id == E1000_REVISION_2)) {
2495 2496 e1000_pci_clear_mwi(hw);
2496 2497 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2497 2498 msec_delay(5);
2498 2499 }
2499 2500 if (mac_addr == NULL) {
2500 2501 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2501 2502 E1000_WRITE_FLUSH(hw);
2502 2503 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2503 2504 E1000_WRITE_FLUSH(hw);
2504 2505 /* Clear both the flag and MAC address */
2505 2506 Adapter->unicst_addr[slot].reg.high = 0;
2506 2507 Adapter->unicst_addr[slot].reg.low = 0;
2507 2508 } else {
2508 2509 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2509 2510 ETHERADDRL);
2510 2511 e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2511 2512 Adapter->unicst_addr[slot].mac.set = 1;
2512 2513 }
2513 2514
2514 2515 /* Workaround for an erratum of 82571 chipst */
2515 2516 if (slot == 0) {
2516 2517 if ((hw->mac.type == e1000_82571) &&
2517 2518 (e1000_get_laa_state_82571(hw) == B_TRUE))
2518 2519 if (mac_addr == NULL) {
2519 2520 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2520 2521 slot << 1, 0);
2521 2522 E1000_WRITE_FLUSH(hw);
2522 2523 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2523 2524 (slot << 1) + 1, 0);
2524 2525 E1000_WRITE_FLUSH(hw);
2525 2526 } else {
2526 2527 e1000_rar_set(hw, (uint8_t *)mac_addr,
2527 2528 LAST_RAR_ENTRY);
2528 2529 }
2529 2530 }
2530 2531
2531 2532 /*
2532 2533 * If we are using Wiseman rev 2.0 silicon, we will have previously
2533 2534 * put the receive in reset, and disabled MWI, to work around some
2534 2535 * HW errata. Now we should take the receiver out of reset, and
2535 2536 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2536 2537 */
2537 2538 if ((hw->mac.type == e1000_82542) &&
2538 2539 (hw->revision_id == E1000_REVISION_2)) {
2539 2540 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2540 2541 msec_delay(1);
2541 2542 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2542 2543 e1000_pci_set_mwi(hw);
2543 2544 e1000g_rx_setup(Adapter);
2544 2545 }
2545 2546
2546 2547 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2547 2548 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2548 2549 return (EIO);
2549 2550 }
2550 2551
2551 2552 return (0);
2552 2553 }
2553 2554
2554 2555 static int
2555 2556 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2556 2557 {
2557 2558 struct e1000_hw *hw = &Adapter->shared;
2558 2559 struct ether_addr *newtable;
2559 2560 size_t new_len;
2560 2561 size_t old_len;
2561 2562 int res = 0;
2562 2563
2563 2564 if ((multiaddr[0] & 01) == 0) {
2564 2565 res = EINVAL;
2565 2566 e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2566 2567 goto done;
2567 2568 }
2568 2569
2569 2570 if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2570 2571 res = ENOENT;
2571 2572 e1000g_log(Adapter, CE_WARN,
2572 2573 "Adapter requested more than %d mcast addresses",
2573 2574 Adapter->mcast_max_num);
2574 2575 goto done;
2575 2576 }
2576 2577
2577 2578
2578 2579 if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2579 2580 old_len = Adapter->mcast_alloc_count *
2580 2581 sizeof (struct ether_addr);
2581 2582 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2582 2583 sizeof (struct ether_addr);
2583 2584
2584 2585 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2585 2586 if (newtable == NULL) {
2586 2587 res = ENOMEM;
2587 2588 e1000g_log(Adapter, CE_WARN,
2588 2589 "Not enough memory to alloc mcast table");
2589 2590 goto done;
2590 2591 }
2591 2592
2592 2593 if (Adapter->mcast_table != NULL) {
2593 2594 bcopy(Adapter->mcast_table, newtable, old_len);
2594 2595 kmem_free(Adapter->mcast_table, old_len);
2595 2596 }
2596 2597 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2597 2598 Adapter->mcast_table = newtable;
2598 2599 }
2599 2600
2600 2601 bcopy(multiaddr,
2601 2602 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2602 2603 Adapter->mcast_count++;
2603 2604
2604 2605 /*
2605 2606 * Update the MC table in the hardware
2606 2607 */
2607 2608 e1000g_clear_interrupt(Adapter);
2608 2609
2609 2610 e1000_update_mc_addr_list(hw,
2610 2611 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2611 2612
2612 2613 e1000g_mask_interrupt(Adapter);
2613 2614
2614 2615 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2615 2616 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2616 2617 res = EIO;
2617 2618 }
2618 2619
2619 2620 done:
2620 2621 return (res);
2621 2622 }
2622 2623
2623 2624 static int
2624 2625 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2625 2626 {
2626 2627 struct e1000_hw *hw = &Adapter->shared;
2627 2628 struct ether_addr *newtable;
2628 2629 size_t new_len;
2629 2630 size_t old_len;
2630 2631 unsigned i;
2631 2632
2632 2633 for (i = 0; i < Adapter->mcast_count; i++) {
2633 2634 if (bcmp(multiaddr, &Adapter->mcast_table[i],
2634 2635 ETHERADDRL) == 0) {
2635 2636 for (i++; i < Adapter->mcast_count; i++) {
2636 2637 Adapter->mcast_table[i - 1] =
2637 2638 Adapter->mcast_table[i];
2638 2639 }
2639 2640 Adapter->mcast_count--;
2640 2641 break;
2641 2642 }
2642 2643 }
2643 2644
2644 2645 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2645 2646 MCAST_ALLOC_SIZE) {
2646 2647 old_len = Adapter->mcast_alloc_count *
2647 2648 sizeof (struct ether_addr);
2648 2649 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2649 2650 sizeof (struct ether_addr);
2650 2651
2651 2652 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2652 2653 if (newtable != NULL) {
2653 2654 bcopy(Adapter->mcast_table, newtable, new_len);
2654 2655 kmem_free(Adapter->mcast_table, old_len);
2655 2656
2656 2657 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2657 2658 Adapter->mcast_table = newtable;
2658 2659 }
2659 2660 }
2660 2661
2661 2662 /*
2662 2663 * Update the MC table in the hardware
2663 2664 */
2664 2665 e1000g_clear_interrupt(Adapter);
2665 2666
2666 2667 e1000_update_mc_addr_list(hw,
2667 2668 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2668 2669
2669 2670 e1000g_mask_interrupt(Adapter);
2670 2671
2671 2672 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2672 2673 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2673 2674 return (EIO);
2674 2675 }
2675 2676
2676 2677 return (0);
2677 2678 }
2678 2679
2679 2680 static void
2680 2681 e1000g_release_multicast(struct e1000g *Adapter)
2681 2682 {
2682 2683 if (Adapter->mcast_table != NULL) {
2683 2684 kmem_free(Adapter->mcast_table,
2684 2685 Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2685 2686 Adapter->mcast_table = NULL;
2686 2687 }
2687 2688 }
2688 2689
2689 2690 int
2690 2691 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2691 2692 {
2692 2693 struct e1000g *Adapter = (struct e1000g *)arg;
2693 2694 int result;
2694 2695
2695 2696 rw_enter(&Adapter->chip_lock, RW_WRITER);
2696 2697
2697 2698 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2698 2699 result = ECANCELED;
2699 2700 goto done;
2700 2701 }
2701 2702
2702 2703 result = (add) ? multicst_add(Adapter, addr)
2703 2704 : multicst_remove(Adapter, addr);
2704 2705
2705 2706 done:
2706 2707 rw_exit(&Adapter->chip_lock);
2707 2708 return (result);
2708 2709
2709 2710 }
2710 2711
2711 2712 int
2712 2713 e1000g_m_promisc(void *arg, boolean_t on)
2713 2714 {
2714 2715 struct e1000g *Adapter = (struct e1000g *)arg;
2715 2716 uint32_t rctl;
2716 2717
2717 2718 rw_enter(&Adapter->chip_lock, RW_WRITER);
2718 2719
2719 2720 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2720 2721 rw_exit(&Adapter->chip_lock);
2721 2722 return (ECANCELED);
2722 2723 }
2723 2724
2724 2725 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2725 2726
2726 2727 if (on)
2727 2728 rctl |=
2728 2729 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2729 2730 else
2730 2731 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2731 2732
2732 2733 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2733 2734
2734 2735 Adapter->e1000g_promisc = on;
2735 2736
2736 2737 rw_exit(&Adapter->chip_lock);
2737 2738
2738 2739 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2739 2740 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2740 2741 return (EIO);
2741 2742 }
2742 2743
2743 2744 return (0);
2744 2745 }
2745 2746
2746 2747 /*
2747 2748 * Entry points to enable and disable interrupts at the granularity of
2748 2749 * a group.
2749 2750 * Turns the poll_mode for the whole adapter on and off to enable or
2750 2751 * override the ring level polling control over the hardware interrupts.
2751 2752 */
2752 2753 static int
2753 2754 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2754 2755 {
2755 2756 struct e1000g *adapter = (struct e1000g *)arg;
2756 2757 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2757 2758
2758 2759 /*
2759 2760 * Later interrupts at the granularity of the this ring will
2760 2761 * invoke mac_rx() with NULL, indicating the need for another
2761 2762 * software classification.
2762 2763 * We have a single ring usable per adapter now, so we only need to
2763 2764 * reset the rx handle for that one.
2764 2765 * When more RX rings can be used, we should update each one of them.
2765 2766 */
2766 2767 mutex_enter(&rx_ring->rx_lock);
2767 2768 rx_ring->mrh = NULL;
2768 2769 adapter->poll_mode = B_FALSE;
2769 2770 mutex_exit(&rx_ring->rx_lock);
2770 2771 return (0);
2771 2772 }
2772 2773
2773 2774 static int
2774 2775 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2775 2776 {
2776 2777 struct e1000g *adapter = (struct e1000g *)arg;
2777 2778 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2778 2779
2779 2780 mutex_enter(&rx_ring->rx_lock);
2780 2781
2781 2782 /*
2782 2783 * Later interrupts at the granularity of the this ring will
2783 2784 * invoke mac_rx() with the handle for this ring;
2784 2785 */
2785 2786 adapter->poll_mode = B_TRUE;
2786 2787 rx_ring->mrh = rx_ring->mrh_init;
2787 2788 mutex_exit(&rx_ring->rx_lock);
2788 2789 return (0);
2789 2790 }
2790 2791
2791 2792 /*
2792 2793 * Entry points to enable and disable interrupts at the granularity of
2793 2794 * a ring.
2794 2795 * adapter poll_mode controls whether we actually proceed with hardware
2795 2796 * interrupt toggling.
2796 2797 */
2797 2798 static int
2798 2799 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2799 2800 {
2800 2801 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2801 2802 struct e1000g *adapter = rx_ring->adapter;
2802 2803 struct e1000_hw *hw = &adapter->shared;
2803 2804 uint32_t intr_mask;
2804 2805
2805 2806 rw_enter(&adapter->chip_lock, RW_READER);
2806 2807
2807 2808 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2808 2809 rw_exit(&adapter->chip_lock);
2809 2810 return (0);
2810 2811 }
2811 2812
2812 2813 mutex_enter(&rx_ring->rx_lock);
2813 2814 rx_ring->poll_flag = 0;
2814 2815 mutex_exit(&rx_ring->rx_lock);
2815 2816
2816 2817 /* Rx interrupt enabling for MSI and legacy */
2817 2818 intr_mask = E1000_READ_REG(hw, E1000_IMS);
2818 2819 intr_mask |= E1000_IMS_RXT0;
2819 2820 E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2820 2821 E1000_WRITE_FLUSH(hw);
2821 2822
2822 2823 /* Trigger a Rx interrupt to check Rx ring */
2823 2824 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2824 2825 E1000_WRITE_FLUSH(hw);
2825 2826
2826 2827 rw_exit(&adapter->chip_lock);
2827 2828 return (0);
2828 2829 }
2829 2830
2830 2831 static int
2831 2832 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2832 2833 {
2833 2834 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2834 2835 struct e1000g *adapter = rx_ring->adapter;
2835 2836 struct e1000_hw *hw = &adapter->shared;
2836 2837
2837 2838 rw_enter(&adapter->chip_lock, RW_READER);
2838 2839
2839 2840 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2840 2841 rw_exit(&adapter->chip_lock);
2841 2842 return (0);
2842 2843 }
2843 2844 mutex_enter(&rx_ring->rx_lock);
2844 2845 rx_ring->poll_flag = 1;
2845 2846 mutex_exit(&rx_ring->rx_lock);
2846 2847
2847 2848 /* Rx interrupt disabling for MSI and legacy */
2848 2849 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2849 2850 E1000_WRITE_FLUSH(hw);
2850 2851
2851 2852 rw_exit(&adapter->chip_lock);
2852 2853 return (0);
2853 2854 }
2854 2855
2855 2856 /*
2856 2857 * e1000g_unicst_find - Find the slot for the specified unicast address
2857 2858 */
2858 2859 static int
2859 2860 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2860 2861 {
2861 2862 int slot;
2862 2863
2863 2864 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2864 2865 if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2865 2866 (bcmp(Adapter->unicst_addr[slot].mac.addr,
2866 2867 mac_addr, ETHERADDRL) == 0))
2867 2868 return (slot);
2868 2869 }
2869 2870
2870 2871 return (-1);
2871 2872 }
2872 2873
2873 2874 /*
2874 2875 * Entry points to add and remove a MAC address to a ring group.
2875 2876 * The caller takes care of adding and removing the MAC addresses
2876 2877 * to the filter via these two routines.
2877 2878 */
2878 2879
2879 2880 static int
2880 2881 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2881 2882 {
2882 2883 struct e1000g *Adapter = (struct e1000g *)arg;
2883 2884 int slot, err;
2884 2885
2885 2886 rw_enter(&Adapter->chip_lock, RW_WRITER);
2886 2887
2887 2888 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2888 2889 rw_exit(&Adapter->chip_lock);
2889 2890 return (ECANCELED);
2890 2891 }
2891 2892
2892 2893 if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2893 2894 /* The same address is already in slot */
2894 2895 rw_exit(&Adapter->chip_lock);
2895 2896 return (0);
2896 2897 }
2897 2898
2898 2899 if (Adapter->unicst_avail == 0) {
2899 2900 /* no slots available */
2900 2901 rw_exit(&Adapter->chip_lock);
2901 2902 return (ENOSPC);
2902 2903 }
2903 2904
2904 2905 /* Search for a free slot */
2905 2906 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2906 2907 if (Adapter->unicst_addr[slot].mac.set == 0)
2907 2908 break;
2908 2909 }
2909 2910 ASSERT(slot < Adapter->unicst_total);
2910 2911
2911 2912 err = e1000g_unicst_set(Adapter, mac_addr, slot);
2912 2913 if (err == 0)
2913 2914 Adapter->unicst_avail--;
2914 2915
2915 2916 rw_exit(&Adapter->chip_lock);
2916 2917
2917 2918 return (err);
2918 2919 }
2919 2920
2920 2921 static int
2921 2922 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2922 2923 {
2923 2924 struct e1000g *Adapter = (struct e1000g *)arg;
2924 2925 int slot, err;
2925 2926
2926 2927 rw_enter(&Adapter->chip_lock, RW_WRITER);
2927 2928
2928 2929 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2929 2930 rw_exit(&Adapter->chip_lock);
2930 2931 return (ECANCELED);
2931 2932 }
2932 2933
2933 2934 slot = e1000g_unicst_find(Adapter, mac_addr);
2934 2935 if (slot == -1) {
2935 2936 rw_exit(&Adapter->chip_lock);
2936 2937 return (EINVAL);
2937 2938 }
2938 2939
2939 2940 ASSERT(Adapter->unicst_addr[slot].mac.set);
2940 2941
2941 2942 /* Clear this slot */
2942 2943 err = e1000g_unicst_set(Adapter, NULL, slot);
2943 2944 if (err == 0)
2944 2945 Adapter->unicst_avail++;
2945 2946
2946 2947 rw_exit(&Adapter->chip_lock);
2947 2948
2948 2949 return (err);
2949 2950 }
2950 2951
2951 2952 static int
2952 2953 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
2953 2954 {
2954 2955 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
2955 2956
2956 2957 mutex_enter(&rx_ring->rx_lock);
2957 2958 rx_ring->ring_gen_num = mr_gen_num;
2958 2959 mutex_exit(&rx_ring->rx_lock);
2959 2960 return (0);
2960 2961 }
2961 2962
2962 2963 /*
2963 2964 * Callback funtion for MAC layer to register all rings.
2964 2965 *
2965 2966 * The hardware supports a single group with currently only one ring
2966 2967 * available.
2967 2968 * Though not offering virtualization ability per se, exposing the
2968 2969 * group/ring still enables the polling and interrupt toggling.
2969 2970 */
2970 2971 /* ARGSUSED */
2971 2972 void
2972 2973 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
2973 2974 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2974 2975 {
2975 2976 struct e1000g *Adapter = (struct e1000g *)arg;
2976 2977 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
2977 2978 mac_intr_t *mintr;
2978 2979
2979 2980 /*
2980 2981 * We advertised only RX group/rings, so the MAC framework shouldn't
2981 2982 * ask for any thing else.
2982 2983 */
2983 2984 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
2984 2985
2985 2986 rx_ring->mrh = rx_ring->mrh_init = rh;
2986 2987 infop->mri_driver = (mac_ring_driver_t)rx_ring;
2987 2988 infop->mri_start = e1000g_ring_start;
2988 2989 infop->mri_stop = NULL;
2989 2990 infop->mri_poll = e1000g_poll_ring;
2990 2991 infop->mri_stat = e1000g_rx_ring_stat;
2991 2992
2992 2993 /* Ring level interrupts */
2993 2994 mintr = &infop->mri_intr;
2994 2995 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
2995 2996 mintr->mi_enable = e1000g_rx_ring_intr_enable;
2996 2997 mintr->mi_disable = e1000g_rx_ring_intr_disable;
2997 2998 if (Adapter->msi_enable)
2998 2999 mintr->mi_ddi_handle = Adapter->htable[0];
2999 3000 }
3000 3001
3001 3002 /* ARGSUSED */
3002 3003 static void
3003 3004 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
3004 3005 mac_group_info_t *infop, mac_group_handle_t gh)
3005 3006 {
3006 3007 struct e1000g *Adapter = (struct e1000g *)arg;
3007 3008 mac_intr_t *mintr;
3008 3009
3009 3010 /*
3010 3011 * We advertised a single RX ring. Getting a request for anything else
3011 3012 * signifies a bug in the MAC framework.
3012 3013 */
3013 3014 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
3014 3015
3015 3016 Adapter->rx_group = gh;
3016 3017
3017 3018 infop->mgi_driver = (mac_group_driver_t)Adapter;
3018 3019 infop->mgi_start = NULL;
3019 3020 infop->mgi_stop = NULL;
3020 3021 infop->mgi_addmac = e1000g_addmac;
3021 3022 infop->mgi_remmac = e1000g_remmac;
3022 3023 infop->mgi_count = 1;
3023 3024
3024 3025 /* Group level interrupts */
3025 3026 mintr = &infop->mgi_intr;
3026 3027 mintr->mi_handle = (mac_intr_handle_t)Adapter;
3027 3028 mintr->mi_enable = e1000g_rx_group_intr_enable;
3028 3029 mintr->mi_disable = e1000g_rx_group_intr_disable;
3029 3030 }
3030 3031
3031 3032 static boolean_t
3032 3033 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3033 3034 {
3034 3035 struct e1000g *Adapter = (struct e1000g *)arg;
3035 3036
3036 3037 switch (cap) {
3037 3038 case MAC_CAPAB_HCKSUM: {
3038 3039 uint32_t *txflags = cap_data;
3039 3040
3040 3041 if (Adapter->tx_hcksum_enable)
3041 3042 *txflags = HCKSUM_IPHDRCKSUM |
3042 3043 HCKSUM_INET_PARTIAL;
3043 3044 else
3044 3045 return (B_FALSE);
3045 3046 break;
3046 3047 }
3047 3048
3048 3049 case MAC_CAPAB_LSO: {
3049 3050 mac_capab_lso_t *cap_lso = cap_data;
3050 3051
3051 3052 if (Adapter->lso_enable) {
3052 3053 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3053 3054 cap_lso->lso_basic_tcp_ipv4.lso_max =
3054 3055 E1000_LSO_MAXLEN;
3055 3056 } else
3056 3057 return (B_FALSE);
3057 3058 break;
3058 3059 }
3059 3060 case MAC_CAPAB_RINGS: {
3060 3061 mac_capab_rings_t *cap_rings = cap_data;
3061 3062
3062 3063 /* No TX rings exposed yet */
3063 3064 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
3064 3065 return (B_FALSE);
3065 3066
3066 3067 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3067 3068 cap_rings->mr_rnum = 1;
3068 3069 cap_rings->mr_gnum = 1;
3069 3070 cap_rings->mr_rget = e1000g_fill_ring;
3070 3071 cap_rings->mr_gget = e1000g_fill_group;
3071 3072 break;
3072 3073 }
3073 3074 default:
3074 3075 return (B_FALSE);
3075 3076 }
3076 3077 return (B_TRUE);
3077 3078 }
3078 3079
3079 3080 static boolean_t
3080 3081 e1000g_param_locked(mac_prop_id_t pr_num)
3081 3082 {
3082 3083 /*
3083 3084 * All en_* parameters are locked (read-only) while
3084 3085 * the device is in any sort of loopback mode ...
3085 3086 */
3086 3087 switch (pr_num) {
3087 3088 case MAC_PROP_EN_1000FDX_CAP:
3088 3089 case MAC_PROP_EN_1000HDX_CAP:
3089 3090 case MAC_PROP_EN_100FDX_CAP:
3090 3091 case MAC_PROP_EN_100HDX_CAP:
3091 3092 case MAC_PROP_EN_10FDX_CAP:
3092 3093 case MAC_PROP_EN_10HDX_CAP:
3093 3094 case MAC_PROP_AUTONEG:
3094 3095 case MAC_PROP_FLOWCTRL:
3095 3096 return (B_TRUE);
3096 3097 }
3097 3098 return (B_FALSE);
3098 3099 }
3099 3100
3100 3101 /*
3101 3102 * callback function for set/get of properties
3102 3103 */
3103 3104 static int
3104 3105 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3105 3106 uint_t pr_valsize, const void *pr_val)
3106 3107 {
3107 3108 struct e1000g *Adapter = arg;
3108 3109 struct e1000_hw *hw = &Adapter->shared;
3109 3110 struct e1000_fc_info *fc = &Adapter->shared.fc;
3110 3111 int err = 0;
3111 3112 link_flowctrl_t flowctrl;
3112 3113 uint32_t cur_mtu, new_mtu;
3113 3114
3114 3115 rw_enter(&Adapter->chip_lock, RW_WRITER);
3115 3116
3116 3117 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3117 3118 rw_exit(&Adapter->chip_lock);
3118 3119 return (ECANCELED);
3119 3120 }
3120 3121
3121 3122 if (Adapter->loopback_mode != E1000G_LB_NONE &&
3122 3123 e1000g_param_locked(pr_num)) {
3123 3124 /*
3124 3125 * All en_* parameters are locked (read-only)
3125 3126 * while the device is in any sort of loopback mode.
3126 3127 */
3127 3128 rw_exit(&Adapter->chip_lock);
3128 3129 return (EBUSY);
3129 3130 }
3130 3131
3131 3132 switch (pr_num) {
3132 3133 case MAC_PROP_EN_1000FDX_CAP:
3133 3134 if (hw->phy.media_type != e1000_media_type_copper) {
3134 3135 err = ENOTSUP;
3135 3136 break;
3136 3137 }
3137 3138 Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3138 3139 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3139 3140 goto reset;
3140 3141 case MAC_PROP_EN_100FDX_CAP:
3141 3142 if (hw->phy.media_type != e1000_media_type_copper) {
3142 3143 err = ENOTSUP;
3143 3144 break;
3144 3145 }
3145 3146 Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3146 3147 Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3147 3148 goto reset;
3148 3149 case MAC_PROP_EN_100HDX_CAP:
3149 3150 if (hw->phy.media_type != e1000_media_type_copper) {
3150 3151 err = ENOTSUP;
3151 3152 break;
3152 3153 }
3153 3154 Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3154 3155 Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3155 3156 goto reset;
3156 3157 case MAC_PROP_EN_10FDX_CAP:
3157 3158 if (hw->phy.media_type != e1000_media_type_copper) {
3158 3159 err = ENOTSUP;
3159 3160 break;
3160 3161 }
3161 3162 Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3162 3163 Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3163 3164 goto reset;
3164 3165 case MAC_PROP_EN_10HDX_CAP:
3165 3166 if (hw->phy.media_type != e1000_media_type_copper) {
3166 3167 err = ENOTSUP;
3167 3168 break;
3168 3169 }
3169 3170 Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3170 3171 Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3171 3172 goto reset;
3172 3173 case MAC_PROP_AUTONEG:
3173 3174 if (hw->phy.media_type != e1000_media_type_copper) {
3174 3175 err = ENOTSUP;
3175 3176 break;
3176 3177 }
3177 3178 Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3178 3179 goto reset;
3179 3180 case MAC_PROP_FLOWCTRL:
3180 3181 fc->send_xon = B_TRUE;
3181 3182 bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3182 3183
3183 3184 switch (flowctrl) {
3184 3185 default:
3185 3186 err = EINVAL;
3186 3187 break;
3187 3188 case LINK_FLOWCTRL_NONE:
3188 3189 fc->requested_mode = e1000_fc_none;
3189 3190 break;
3190 3191 case LINK_FLOWCTRL_RX:
3191 3192 fc->requested_mode = e1000_fc_rx_pause;
3192 3193 break;
3193 3194 case LINK_FLOWCTRL_TX:
3194 3195 fc->requested_mode = e1000_fc_tx_pause;
3195 3196 break;
3196 3197 case LINK_FLOWCTRL_BI:
3197 3198 fc->requested_mode = e1000_fc_full;
3198 3199 break;
3199 3200 }
3200 3201 reset:
3201 3202 if (err == 0) {
3202 3203 /* check PCH limits & reset the link */
3203 3204 e1000g_pch_limits(Adapter);
3204 3205 if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3205 3206 err = EINVAL;
3206 3207 }
3207 3208 break;
3208 3209 case MAC_PROP_ADV_1000FDX_CAP:
3209 3210 case MAC_PROP_ADV_1000HDX_CAP:
3210 3211 case MAC_PROP_ADV_100FDX_CAP:
3211 3212 case MAC_PROP_ADV_100HDX_CAP:
3212 3213 case MAC_PROP_ADV_10FDX_CAP:
3213 3214 case MAC_PROP_ADV_10HDX_CAP:
3214 3215 case MAC_PROP_EN_1000HDX_CAP:
3215 3216 case MAC_PROP_STATUS:
3216 3217 case MAC_PROP_SPEED:
3217 3218 case MAC_PROP_DUPLEX:
3218 3219 err = ENOTSUP; /* read-only prop. Can't set this. */
3219 3220 break;
3220 3221 case MAC_PROP_MTU:
3221 3222 /* adapter must be stopped for an MTU change */
3222 3223 if (Adapter->e1000g_state & E1000G_STARTED) {
3223 3224 err = EBUSY;
3224 3225 break;
3225 3226 }
3226 3227
3227 3228 cur_mtu = Adapter->default_mtu;
3228 3229
3229 3230 /* get new requested MTU */
3230 3231 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3231 3232 if (new_mtu == cur_mtu) {
3232 3233 err = 0;
3233 3234 break;
3234 3235 }
3235 3236
3236 3237 if ((new_mtu < DEFAULT_MTU) ||
3237 3238 (new_mtu > Adapter->max_mtu)) {
3238 3239 err = EINVAL;
3239 3240 break;
3240 3241 }
3241 3242
3242 3243 /* inform MAC framework of new MTU */
3243 3244 err = mac_maxsdu_update(Adapter->mh, new_mtu);
3244 3245
3245 3246 if (err == 0) {
3246 3247 Adapter->default_mtu = new_mtu;
3247 3248 Adapter->max_frame_size =
3248 3249 e1000g_mtu2maxframe(new_mtu);
3249 3250
3250 3251 /*
3251 3252 * check PCH limits & set buffer sizes to
3252 3253 * match new MTU
3253 3254 */
3254 3255 e1000g_pch_limits(Adapter);
3255 3256 e1000g_set_bufsize(Adapter);
3256 3257
3257 3258 /*
3258 3259 * decrease the number of descriptors and free
3259 3260 * packets for jumbo frames to reduce tx/rx
3260 3261 * resource consumption
3261 3262 */
3262 3263 if (Adapter->max_frame_size >=
3263 3264 (FRAME_SIZE_UPTO_4K)) {
3264 3265 if (Adapter->tx_desc_num_flag == 0)
3265 3266 Adapter->tx_desc_num =
3266 3267 DEFAULT_JUMBO_NUM_TX_DESC;
3267 3268
3268 3269 if (Adapter->rx_desc_num_flag == 0)
3269 3270 Adapter->rx_desc_num =
3270 3271 DEFAULT_JUMBO_NUM_RX_DESC;
3271 3272
3272 3273 if (Adapter->tx_buf_num_flag == 0)
3273 3274 Adapter->tx_freelist_num =
3274 3275 DEFAULT_JUMBO_NUM_TX_BUF;
3275 3276
3276 3277 if (Adapter->rx_buf_num_flag == 0)
3277 3278 Adapter->rx_freelist_limit =
3278 3279 DEFAULT_JUMBO_NUM_RX_BUF;
3279 3280 } else {
3280 3281 if (Adapter->tx_desc_num_flag == 0)
3281 3282 Adapter->tx_desc_num =
3282 3283 DEFAULT_NUM_TX_DESCRIPTOR;
3283 3284
3284 3285 if (Adapter->rx_desc_num_flag == 0)
3285 3286 Adapter->rx_desc_num =
3286 3287 DEFAULT_NUM_RX_DESCRIPTOR;
3287 3288
3288 3289 if (Adapter->tx_buf_num_flag == 0)
3289 3290 Adapter->tx_freelist_num =
3290 3291 DEFAULT_NUM_TX_FREELIST;
3291 3292
3292 3293 if (Adapter->rx_buf_num_flag == 0)
3293 3294 Adapter->rx_freelist_limit =
3294 3295 DEFAULT_NUM_RX_FREELIST;
3295 3296 }
3296 3297 }
3297 3298 break;
3298 3299 case MAC_PROP_PRIVATE:
3299 3300 err = e1000g_set_priv_prop(Adapter, pr_name,
3300 3301 pr_valsize, pr_val);
3301 3302 break;
3302 3303 default:
3303 3304 err = ENOTSUP;
3304 3305 break;
3305 3306 }
3306 3307 rw_exit(&Adapter->chip_lock);
3307 3308 return (err);
3308 3309 }
3309 3310
3310 3311 static int
3311 3312 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3312 3313 uint_t pr_valsize, void *pr_val)
3313 3314 {
3314 3315 struct e1000g *Adapter = arg;
3315 3316 struct e1000_fc_info *fc = &Adapter->shared.fc;
3316 3317 int err = 0;
3317 3318 link_flowctrl_t flowctrl;
3318 3319 uint64_t tmp = 0;
3319 3320
3320 3321 switch (pr_num) {
3321 3322 case MAC_PROP_DUPLEX:
3322 3323 ASSERT(pr_valsize >= sizeof (link_duplex_t));
3323 3324 bcopy(&Adapter->link_duplex, pr_val,
3324 3325 sizeof (link_duplex_t));
3325 3326 break;
3326 3327 case MAC_PROP_SPEED:
3327 3328 ASSERT(pr_valsize >= sizeof (uint64_t));
3328 3329 tmp = Adapter->link_speed * 1000000ull;
3329 3330 bcopy(&tmp, pr_val, sizeof (tmp));
3330 3331 break;
3331 3332 case MAC_PROP_AUTONEG:
3332 3333 *(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3333 3334 break;
3334 3335 case MAC_PROP_FLOWCTRL:
3335 3336 ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
3336 3337 switch (fc->current_mode) {
3337 3338 case e1000_fc_none:
3338 3339 flowctrl = LINK_FLOWCTRL_NONE;
3339 3340 break;
3340 3341 case e1000_fc_rx_pause:
3341 3342 flowctrl = LINK_FLOWCTRL_RX;
3342 3343 break;
3343 3344 case e1000_fc_tx_pause:
3344 3345 flowctrl = LINK_FLOWCTRL_TX;
3345 3346 break;
3346 3347 case e1000_fc_full:
3347 3348 flowctrl = LINK_FLOWCTRL_BI;
3348 3349 break;
3349 3350 }
3350 3351 bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3351 3352 break;
3352 3353 case MAC_PROP_ADV_1000FDX_CAP:
3353 3354 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3354 3355 break;
3355 3356 case MAC_PROP_EN_1000FDX_CAP:
3356 3357 *(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3357 3358 break;
3358 3359 case MAC_PROP_ADV_1000HDX_CAP:
3359 3360 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3360 3361 break;
3361 3362 case MAC_PROP_EN_1000HDX_CAP:
3362 3363 *(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3363 3364 break;
3364 3365 case MAC_PROP_ADV_100FDX_CAP:
3365 3366 *(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3366 3367 break;
3367 3368 case MAC_PROP_EN_100FDX_CAP:
3368 3369 *(uint8_t *)pr_val = Adapter->param_en_100fdx;
3369 3370 break;
3370 3371 case MAC_PROP_ADV_100HDX_CAP:
3371 3372 *(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3372 3373 break;
3373 3374 case MAC_PROP_EN_100HDX_CAP:
3374 3375 *(uint8_t *)pr_val = Adapter->param_en_100hdx;
3375 3376 break;
3376 3377 case MAC_PROP_ADV_10FDX_CAP:
3377 3378 *(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3378 3379 break;
3379 3380 case MAC_PROP_EN_10FDX_CAP:
3380 3381 *(uint8_t *)pr_val = Adapter->param_en_10fdx;
3381 3382 break;
3382 3383 case MAC_PROP_ADV_10HDX_CAP:
3383 3384 *(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3384 3385 break;
3385 3386 case MAC_PROP_EN_10HDX_CAP:
3386 3387 *(uint8_t *)pr_val = Adapter->param_en_10hdx;
3387 3388 break;
3388 3389 case MAC_PROP_ADV_100T4_CAP:
3389 3390 case MAC_PROP_EN_100T4_CAP:
3390 3391 *(uint8_t *)pr_val = Adapter->param_adv_100t4;
3391 3392 break;
3392 3393 case MAC_PROP_PRIVATE:
3393 3394 err = e1000g_get_priv_prop(Adapter, pr_name,
3394 3395 pr_valsize, pr_val);
3395 3396 break;
3396 3397 default:
3397 3398 err = ENOTSUP;
3398 3399 break;
3399 3400 }
3400 3401
3401 3402 return (err);
3402 3403 }
3403 3404
3404 3405 static void
3405 3406 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3406 3407 mac_prop_info_handle_t prh)
3407 3408 {
3408 3409 struct e1000g *Adapter = arg;
3409 3410 struct e1000_hw *hw = &Adapter->shared;
3410 3411
3411 3412 switch (pr_num) {
3412 3413 case MAC_PROP_DUPLEX:
3413 3414 case MAC_PROP_SPEED:
3414 3415 case MAC_PROP_ADV_1000FDX_CAP:
3415 3416 case MAC_PROP_ADV_1000HDX_CAP:
3416 3417 case MAC_PROP_ADV_100FDX_CAP:
3417 3418 case MAC_PROP_ADV_100HDX_CAP:
3418 3419 case MAC_PROP_ADV_10FDX_CAP:
3419 3420 case MAC_PROP_ADV_10HDX_CAP:
3420 3421 case MAC_PROP_ADV_100T4_CAP:
3421 3422 case MAC_PROP_EN_100T4_CAP:
3422 3423 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3423 3424 break;
3424 3425
3425 3426 case MAC_PROP_EN_1000FDX_CAP:
3426 3427 if (hw->phy.media_type != e1000_media_type_copper) {
3427 3428 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3428 3429 } else {
3429 3430 mac_prop_info_set_default_uint8(prh,
3430 3431 ((Adapter->phy_ext_status &
3431 3432 IEEE_ESR_1000T_FD_CAPS) ||
3432 3433 (Adapter->phy_ext_status &
3433 3434 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
3434 3435 }
3435 3436 break;
3436 3437
3437 3438 case MAC_PROP_EN_100FDX_CAP:
3438 3439 if (hw->phy.media_type != e1000_media_type_copper) {
3439 3440 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3440 3441 } else {
3441 3442 mac_prop_info_set_default_uint8(prh,
3442 3443 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3443 3444 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3444 3445 ? 1 : 0);
3445 3446 }
3446 3447 break;
3447 3448
3448 3449 case MAC_PROP_EN_100HDX_CAP:
3449 3450 if (hw->phy.media_type != e1000_media_type_copper) {
3450 3451 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3451 3452 } else {
3452 3453 mac_prop_info_set_default_uint8(prh,
3453 3454 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
3454 3455 (Adapter->phy_status & MII_SR_100T2_HD_CAPS))
3455 3456 ? 1 : 0);
3456 3457 }
3457 3458 break;
3458 3459
3459 3460 case MAC_PROP_EN_10FDX_CAP:
3460 3461 if (hw->phy.media_type != e1000_media_type_copper) {
3461 3462 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3462 3463 } else {
3463 3464 mac_prop_info_set_default_uint8(prh,
3464 3465 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
3465 3466 }
3466 3467 break;
3467 3468
3468 3469 case MAC_PROP_EN_10HDX_CAP:
3469 3470 if (hw->phy.media_type != e1000_media_type_copper) {
3470 3471 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3471 3472 } else {
3472 3473 mac_prop_info_set_default_uint8(prh,
3473 3474 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
3474 3475 }
3475 3476 break;
3476 3477
3477 3478 case MAC_PROP_EN_1000HDX_CAP:
3478 3479 if (hw->phy.media_type != e1000_media_type_copper)
3479 3480 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3480 3481 break;
3481 3482
3482 3483 case MAC_PROP_AUTONEG:
3483 3484 if (hw->phy.media_type != e1000_media_type_copper) {
3484 3485 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3485 3486 } else {
3486 3487 mac_prop_info_set_default_uint8(prh,
3487 3488 (Adapter->phy_status & MII_SR_AUTONEG_CAPS)
3488 3489 ? 1 : 0);
3489 3490 }
3490 3491 break;
3491 3492
3492 3493 case MAC_PROP_FLOWCTRL:
3493 3494 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
3494 3495 break;
3495 3496
3496 3497 case MAC_PROP_MTU: {
3497 3498 struct e1000_mac_info *mac = &Adapter->shared.mac;
3498 3499 struct e1000_phy_info *phy = &Adapter->shared.phy;
3499 3500 uint32_t max;
3500 3501
3501 3502 /* some MAC types do not support jumbo frames */
3502 3503 if ((mac->type == e1000_ich8lan) ||
3503 3504 ((mac->type == e1000_ich9lan) && (phy->type ==
3504 3505 e1000_phy_ife))) {
3505 3506 max = DEFAULT_MTU;
3506 3507 } else {
3507 3508 max = Adapter->max_mtu;
3508 3509 }
3509 3510
3510 3511 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max);
3511 3512 break;
3512 3513 }
3513 3514 case MAC_PROP_PRIVATE: {
3514 3515 char valstr[64];
3515 3516 int value;
3516 3517
3517 3518 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
3518 3519 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3519 3520 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3520 3521 return;
3521 3522 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3522 3523 value = DEFAULT_TX_BCOPY_THRESHOLD;
3523 3524 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3524 3525 value = DEFAULT_TX_INTR_ENABLE;
3525 3526 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3526 3527 value = DEFAULT_TX_INTR_DELAY;
3527 3528 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3528 3529 value = DEFAULT_TX_INTR_ABS_DELAY;
3529 3530 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3530 3531 value = DEFAULT_RX_BCOPY_THRESHOLD;
3531 3532 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3532 3533 value = DEFAULT_RX_LIMIT_ON_INTR;
3533 3534 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3534 3535 value = DEFAULT_RX_INTR_DELAY;
3535 3536 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3536 3537 value = DEFAULT_RX_INTR_ABS_DELAY;
3537 3538 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3538 3539 value = DEFAULT_INTR_THROTTLING;
3539 3540 } else if (strcmp(pr_name, "_intr_adaptive") == 0) {
3540 3541 value = 1;
3541 3542 } else {
3542 3543 return;
3543 3544 }
3544 3545
3545 3546 (void) snprintf(valstr, sizeof (valstr), "%d", value);
3546 3547 mac_prop_info_set_default_str(prh, valstr);
3547 3548 break;
3548 3549 }
3549 3550 }
3550 3551 }
3551 3552
3552 3553 /* ARGSUSED2 */
3553 3554 static int
3554 3555 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3555 3556 uint_t pr_valsize, const void *pr_val)
3556 3557 {
3557 3558 int err = 0;
3558 3559 long result;
3559 3560 struct e1000_hw *hw = &Adapter->shared;
3560 3561
3561 3562 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3562 3563 if (pr_val == NULL) {
3563 3564 err = EINVAL;
3564 3565 return (err);
3565 3566 }
3566 3567 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3567 3568 if (result < MIN_TX_BCOPY_THRESHOLD ||
3568 3569 result > MAX_TX_BCOPY_THRESHOLD)
3569 3570 err = EINVAL;
3570 3571 else {
3571 3572 Adapter->tx_bcopy_thresh = (uint32_t)result;
3572 3573 }
3573 3574 return (err);
3574 3575 }
3575 3576 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3576 3577 if (pr_val == NULL) {
3577 3578 err = EINVAL;
3578 3579 return (err);
3579 3580 }
3580 3581 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3581 3582 if (result < 0 || result > 1)
3582 3583 err = EINVAL;
3583 3584 else {
3584 3585 Adapter->tx_intr_enable = (result == 1) ?
3585 3586 B_TRUE: B_FALSE;
3586 3587 if (Adapter->tx_intr_enable)
3587 3588 e1000g_mask_tx_interrupt(Adapter);
3588 3589 else
3589 3590 e1000g_clear_tx_interrupt(Adapter);
3590 3591 if (e1000g_check_acc_handle(
3591 3592 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3592 3593 ddi_fm_service_impact(Adapter->dip,
3593 3594 DDI_SERVICE_DEGRADED);
3594 3595 err = EIO;
3595 3596 }
3596 3597 }
3597 3598 return (err);
3598 3599 }
3599 3600 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3600 3601 if (pr_val == NULL) {
3601 3602 err = EINVAL;
3602 3603 return (err);
3603 3604 }
3604 3605 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3605 3606 if (result < MIN_TX_INTR_DELAY ||
3606 3607 result > MAX_TX_INTR_DELAY)
3607 3608 err = EINVAL;
3608 3609 else {
3609 3610 Adapter->tx_intr_delay = (uint32_t)result;
3610 3611 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3611 3612 if (e1000g_check_acc_handle(
3612 3613 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3613 3614 ddi_fm_service_impact(Adapter->dip,
3614 3615 DDI_SERVICE_DEGRADED);
3615 3616 err = EIO;
3616 3617 }
3617 3618 }
3618 3619 return (err);
3619 3620 }
3620 3621 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3621 3622 if (pr_val == NULL) {
3622 3623 err = EINVAL;
3623 3624 return (err);
3624 3625 }
3625 3626 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3626 3627 if (result < MIN_TX_INTR_ABS_DELAY ||
3627 3628 result > MAX_TX_INTR_ABS_DELAY)
3628 3629 err = EINVAL;
3629 3630 else {
3630 3631 Adapter->tx_intr_abs_delay = (uint32_t)result;
3631 3632 E1000_WRITE_REG(hw, E1000_TADV,
3632 3633 Adapter->tx_intr_abs_delay);
3633 3634 if (e1000g_check_acc_handle(
3634 3635 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3635 3636 ddi_fm_service_impact(Adapter->dip,
3636 3637 DDI_SERVICE_DEGRADED);
3637 3638 err = EIO;
3638 3639 }
3639 3640 }
3640 3641 return (err);
3641 3642 }
3642 3643 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3643 3644 if (pr_val == NULL) {
3644 3645 err = EINVAL;
3645 3646 return (err);
3646 3647 }
3647 3648 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3648 3649 if (result < MIN_RX_BCOPY_THRESHOLD ||
3649 3650 result > MAX_RX_BCOPY_THRESHOLD)
3650 3651 err = EINVAL;
3651 3652 else
3652 3653 Adapter->rx_bcopy_thresh = (uint32_t)result;
3653 3654 return (err);
3654 3655 }
3655 3656 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3656 3657 if (pr_val == NULL) {
3657 3658 err = EINVAL;
3658 3659 return (err);
3659 3660 }
3660 3661 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3661 3662 if (result < MIN_RX_LIMIT_ON_INTR ||
3662 3663 result > MAX_RX_LIMIT_ON_INTR)
3663 3664 err = EINVAL;
3664 3665 else
3665 3666 Adapter->rx_limit_onintr = (uint32_t)result;
3666 3667 return (err);
3667 3668 }
3668 3669 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3669 3670 if (pr_val == NULL) {
3670 3671 err = EINVAL;
3671 3672 return (err);
3672 3673 }
3673 3674 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3674 3675 if (result < MIN_RX_INTR_DELAY ||
3675 3676 result > MAX_RX_INTR_DELAY)
3676 3677 err = EINVAL;
3677 3678 else {
3678 3679 Adapter->rx_intr_delay = (uint32_t)result;
3679 3680 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3680 3681 if (e1000g_check_acc_handle(
3681 3682 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3682 3683 ddi_fm_service_impact(Adapter->dip,
3683 3684 DDI_SERVICE_DEGRADED);
3684 3685 err = EIO;
3685 3686 }
3686 3687 }
3687 3688 return (err);
3688 3689 }
3689 3690 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3690 3691 if (pr_val == NULL) {
3691 3692 err = EINVAL;
3692 3693 return (err);
3693 3694 }
3694 3695 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3695 3696 if (result < MIN_RX_INTR_ABS_DELAY ||
3696 3697 result > MAX_RX_INTR_ABS_DELAY)
3697 3698 err = EINVAL;
3698 3699 else {
3699 3700 Adapter->rx_intr_abs_delay = (uint32_t)result;
3700 3701 E1000_WRITE_REG(hw, E1000_RADV,
3701 3702 Adapter->rx_intr_abs_delay);
3702 3703 if (e1000g_check_acc_handle(
3703 3704 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3704 3705 ddi_fm_service_impact(Adapter->dip,
3705 3706 DDI_SERVICE_DEGRADED);
3706 3707 err = EIO;
3707 3708 }
3708 3709 }
3709 3710 return (err);
3710 3711 }
3711 3712 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3712 3713 if (pr_val == NULL) {
3713 3714 err = EINVAL;
3714 3715 return (err);
3715 3716 }
3716 3717 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3717 3718 if (result < MIN_INTR_THROTTLING ||
3718 3719 result > MAX_INTR_THROTTLING)
3719 3720 err = EINVAL;
3720 3721 else {
3721 3722 if (hw->mac.type >= e1000_82540) {
3722 3723 Adapter->intr_throttling_rate =
3723 3724 (uint32_t)result;
3724 3725 E1000_WRITE_REG(hw, E1000_ITR,
3725 3726 Adapter->intr_throttling_rate);
3726 3727 if (e1000g_check_acc_handle(
3727 3728 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3728 3729 ddi_fm_service_impact(Adapter->dip,
3729 3730 DDI_SERVICE_DEGRADED);
3730 3731 err = EIO;
3731 3732 }
3732 3733 } else
3733 3734 err = EINVAL;
3734 3735 }
3735 3736 return (err);
3736 3737 }
3737 3738 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3738 3739 if (pr_val == NULL) {
3739 3740 err = EINVAL;
3740 3741 return (err);
3741 3742 }
3742 3743 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3743 3744 if (result < 0 || result > 1)
3744 3745 err = EINVAL;
3745 3746 else {
3746 3747 if (hw->mac.type >= e1000_82540) {
3747 3748 Adapter->intr_adaptive = (result == 1) ?
3748 3749 B_TRUE : B_FALSE;
3749 3750 } else {
3750 3751 err = EINVAL;
3751 3752 }
3752 3753 }
3753 3754 return (err);
3754 3755 }
3755 3756 return (ENOTSUP);
3756 3757 }
3757 3758
3758 3759 static int
3759 3760 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3760 3761 uint_t pr_valsize, void *pr_val)
3761 3762 {
3762 3763 int err = ENOTSUP;
3763 3764 int value;
3764 3765
3765 3766 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3766 3767 value = Adapter->param_adv_pause;
3767 3768 err = 0;
3768 3769 goto done;
3769 3770 }
3770 3771 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3771 3772 value = Adapter->param_adv_asym_pause;
3772 3773 err = 0;
3773 3774 goto done;
3774 3775 }
3775 3776 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3776 3777 value = Adapter->tx_bcopy_thresh;
3777 3778 err = 0;
3778 3779 goto done;
3779 3780 }
3780 3781 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3781 3782 value = Adapter->tx_intr_enable;
3782 3783 err = 0;
3783 3784 goto done;
3784 3785 }
3785 3786 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3786 3787 value = Adapter->tx_intr_delay;
3787 3788 err = 0;
3788 3789 goto done;
3789 3790 }
3790 3791 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3791 3792 value = Adapter->tx_intr_abs_delay;
3792 3793 err = 0;
3793 3794 goto done;
3794 3795 }
3795 3796 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3796 3797 value = Adapter->rx_bcopy_thresh;
3797 3798 err = 0;
3798 3799 goto done;
3799 3800 }
3800 3801 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3801 3802 value = Adapter->rx_limit_onintr;
3802 3803 err = 0;
3803 3804 goto done;
3804 3805 }
3805 3806 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3806 3807 value = Adapter->rx_intr_delay;
3807 3808 err = 0;
3808 3809 goto done;
3809 3810 }
3810 3811 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3811 3812 value = Adapter->rx_intr_abs_delay;
3812 3813 err = 0;
3813 3814 goto done;
3814 3815 }
3815 3816 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3816 3817 value = Adapter->intr_throttling_rate;
3817 3818 err = 0;
3818 3819 goto done;
3819 3820 }
3820 3821 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3821 3822 value = Adapter->intr_adaptive;
3822 3823 err = 0;
3823 3824 goto done;
3824 3825 }
3825 3826 done:
3826 3827 if (err == 0) {
3827 3828 (void) snprintf(pr_val, pr_valsize, "%d", value);
3828 3829 }
3829 3830 return (err);
3830 3831 }
3831 3832
3832 3833 /*
3833 3834 * e1000g_get_conf - get configurations set in e1000g.conf
3834 3835 * This routine gets user-configured values out of the configuration
3835 3836 * file e1000g.conf.
3836 3837 *
3837 3838 * For each configurable value, there is a minimum, a maximum, and a
3838 3839 * default.
3839 3840 * If user does not configure a value, use the default.
3840 3841 * If user configures below the minimum, use the minumum.
3841 3842 * If user configures above the maximum, use the maxumum.
3842 3843 */
3843 3844 static void
3844 3845 e1000g_get_conf(struct e1000g *Adapter)
3845 3846 {
3846 3847 struct e1000_hw *hw = &Adapter->shared;
3847 3848 boolean_t tbi_compatibility = B_FALSE;
3848 3849 boolean_t is_jumbo = B_FALSE;
3849 3850 int propval;
3850 3851 /*
3851 3852 * decrease the number of descriptors and free packets
3852 3853 * for jumbo frames to reduce tx/rx resource consumption
3853 3854 */
3854 3855 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) {
3855 3856 is_jumbo = B_TRUE;
3856 3857 }
3857 3858
3858 3859 /*
3859 3860 * get each configurable property from e1000g.conf
3860 3861 */
3861 3862
3862 3863 /*
3863 3864 * NumTxDescriptors
3864 3865 */
3865 3866 Adapter->tx_desc_num_flag =
3866 3867 e1000g_get_prop(Adapter, "NumTxDescriptors",
3867 3868 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
3868 3869 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC
3869 3870 : DEFAULT_NUM_TX_DESCRIPTOR, &propval);
3870 3871 Adapter->tx_desc_num = propval;
3871 3872
3872 3873 /*
3873 3874 * NumRxDescriptors
3874 3875 */
3875 3876 Adapter->rx_desc_num_flag =
3876 3877 e1000g_get_prop(Adapter, "NumRxDescriptors",
3877 3878 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
3878 3879 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC
3879 3880 : DEFAULT_NUM_RX_DESCRIPTOR, &propval);
3880 3881 Adapter->rx_desc_num = propval;
3881 3882
3882 3883 /*
3883 3884 * NumRxFreeList
3884 3885 */
3885 3886 Adapter->rx_buf_num_flag =
3886 3887 e1000g_get_prop(Adapter, "NumRxFreeList",
3887 3888 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
3888 3889 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF
3889 3890 : DEFAULT_NUM_RX_FREELIST, &propval);
3890 3891 Adapter->rx_freelist_limit = propval;
3891 3892
3892 3893 /*
3893 3894 * NumTxPacketList
3894 3895 */
3895 3896 Adapter->tx_buf_num_flag =
3896 3897 e1000g_get_prop(Adapter, "NumTxPacketList",
3897 3898 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
3898 3899 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF
3899 3900 : DEFAULT_NUM_TX_FREELIST, &propval);
3900 3901 Adapter->tx_freelist_num = propval;
3901 3902
3902 3903 /*
3903 3904 * FlowControl
3904 3905 */
3905 3906 hw->fc.send_xon = B_TRUE;
3906 3907 (void) e1000g_get_prop(Adapter, "FlowControl",
3907 3908 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval);
3908 3909 hw->fc.requested_mode = propval;
3909 3910 /* 4 is the setting that says "let the eeprom decide" */
3910 3911 if (hw->fc.requested_mode == 4)
3911 3912 hw->fc.requested_mode = e1000_fc_default;
3912 3913
3913 3914 /*
3914 3915 * Max Num Receive Packets on Interrupt
3915 3916 */
3916 3917 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets",
3917 3918 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
3918 3919 DEFAULT_RX_LIMIT_ON_INTR, &propval);
3919 3920 Adapter->rx_limit_onintr = propval;
3920 3921
3921 3922 /*
3922 3923 * PHY master slave setting
3923 3924 */
3924 3925 (void) e1000g_get_prop(Adapter, "SetMasterSlave",
3925 3926 e1000_ms_hw_default, e1000_ms_auto,
3926 3927 e1000_ms_hw_default, &propval);
3927 3928 hw->phy.ms_type = propval;
3928 3929
3929 3930 /*
3930 3931 * Parameter which controls TBI mode workaround, which is only
3931 3932 * needed on certain switches such as Cisco 6500/Foundry
3932 3933 */
3933 3934 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
3934 3935 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval);
3935 3936 tbi_compatibility = (propval == 1);
3936 3937 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
3937 3938
3938 3939 /*
3939 3940 * MSI Enable
3940 3941 */
3941 3942 (void) e1000g_get_prop(Adapter, "MSIEnable",
3942 3943 0, 1, DEFAULT_MSI_ENABLE, &propval);
3943 3944 Adapter->msi_enable = (propval == 1);
3944 3945
3945 3946 /*
3946 3947 * Interrupt Throttling Rate
3947 3948 */
3948 3949 (void) e1000g_get_prop(Adapter, "intr_throttling_rate",
3949 3950 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
3950 3951 DEFAULT_INTR_THROTTLING, &propval);
3951 3952 Adapter->intr_throttling_rate = propval;
3952 3953
3953 3954 /*
3954 3955 * Adaptive Interrupt Blanking Enable/Disable
3955 3956 * It is enabled by default
3956 3957 */
3957 3958 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1,
3958 3959 &propval);
3959 3960 Adapter->intr_adaptive = (propval == 1);
3960 3961
3961 3962 /*
3962 3963 * Hardware checksum enable/disable parameter
3963 3964 */
3964 3965 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable",
3965 3966 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval);
3966 3967 Adapter->tx_hcksum_enable = (propval == 1);
3967 3968 /*
3968 3969 * Checksum on/off selection via global parameters.
3969 3970 *
3970 3971 * If the chip is flagged as not capable of (correctly)
3971 3972 * handling checksumming, we don't enable it on either
3972 3973 * Rx or Tx side. Otherwise, we take this chip's settings
3973 3974 * from the patchable global defaults.
3974 3975 *
3975 3976 * We advertise our capabilities only if TX offload is
3976 3977 * enabled. On receive, the stack will accept checksummed
3977 3978 * packets anyway, even if we haven't said we can deliver
3978 3979 * them.
3979 3980 */
3980 3981 switch (hw->mac.type) {
3981 3982 case e1000_82540:
3982 3983 case e1000_82544:
3983 3984 case e1000_82545:
3984 3985 case e1000_82545_rev_3:
3985 3986 case e1000_82546:
3986 3987 case e1000_82546_rev_3:
3987 3988 case e1000_82571:
3988 3989 case e1000_82572:
3989 3990 case e1000_82573:
3990 3991 case e1000_80003es2lan:
3991 3992 break;
3992 3993 /*
3993 3994 * For the following Intel PRO/1000 chipsets, we have not
3994 3995 * tested the hardware checksum offload capability, so we
3995 3996 * disable the capability for them.
3996 3997 * e1000_82542,
3997 3998 * e1000_82543,
3998 3999 * e1000_82541,
3999 4000 * e1000_82541_rev_2,
4000 4001 * e1000_82547,
4001 4002 * e1000_82547_rev_2,
4002 4003 */
4003 4004 default:
4004 4005 Adapter->tx_hcksum_enable = B_FALSE;
4005 4006 }
4006 4007
4007 4008 /*
4008 4009 * Large Send Offloading(LSO) Enable/Disable
4009 4010 * If the tx hardware checksum is not enabled, LSO should be
4010 4011 * disabled.
4011 4012 */
4012 4013 (void) e1000g_get_prop(Adapter, "lso_enable",
4013 4014 0, 1, DEFAULT_LSO_ENABLE, &propval);
4014 4015 Adapter->lso_enable = (propval == 1);
4015 4016
4016 4017 switch (hw->mac.type) {
4017 4018 case e1000_82546:
4018 4019 case e1000_82546_rev_3:
4019 4020 if (Adapter->lso_enable)
4020 4021 Adapter->lso_premature_issue = B_TRUE;
4021 4022 /* FALLTHRU */
4022 4023 case e1000_82571:
4023 4024 case e1000_82572:
4024 4025 case e1000_82573:
4025 4026 case e1000_80003es2lan:
4026 4027 break;
4027 4028 default:
4028 4029 Adapter->lso_enable = B_FALSE;
4029 4030 }
4030 4031
4031 4032 if (!Adapter->tx_hcksum_enable) {
4032 4033 Adapter->lso_premature_issue = B_FALSE;
4033 4034 Adapter->lso_enable = B_FALSE;
4034 4035 }
4035 4036
4036 4037 /*
4037 4038 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4038 4039 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4039 4040 * will not cross 64k boundary.
4040 4041 */
4041 4042 (void) e1000g_get_prop(Adapter, "mem_workaround_82546",
4042 4043 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval);
4043 4044 Adapter->mem_workaround_82546 = (propval == 1);
4044 4045
4045 4046 /*
4046 4047 * Max number of multicast addresses
4047 4048 */
4048 4049 (void) e1000g_get_prop(Adapter, "mcast_max_num",
4049 4050 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32,
4050 4051 &propval);
4051 4052 Adapter->mcast_max_num = propval;
4052 4053 }
4053 4054
4054 4055 /*
4055 4056 * e1000g_get_prop - routine to read properties
4056 4057 *
4057 4058 * Get a user-configure property value out of the configuration
4058 4059 * file e1000g.conf.
4059 4060 *
4060 4061 * Caller provides name of the property, a default value, a minimum
4061 4062 * value, a maximum value and a pointer to the returned property
4062 4063 * value.
4063 4064 *
4064 4065 * Return B_TRUE if the configured value of the property is not a default
4065 4066 * value, otherwise return B_FALSE.
4066 4067 */
4067 4068 static boolean_t
4068 4069 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */
4069 4070 char *propname, /* name of the property */
4070 4071 int minval, /* minimum acceptable value */
4071 4072 int maxval, /* maximim acceptable value */
4072 4073 int defval, /* default value */
4073 4074 int *propvalue) /* property value return to caller */
4074 4075 {
4075 4076 int propval; /* value returned for requested property */
4076 4077 int *props; /* point to array of properties returned */
4077 4078 uint_t nprops; /* number of property value returned */
4078 4079 boolean_t ret = B_TRUE;
4079 4080
4080 4081 /*
4081 4082 * get the array of properties from the config file
4082 4083 */
4083 4084 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
4084 4085 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
4085 4086 /* got some properties, test if we got enough */
4086 4087 if (Adapter->instance < nprops) {
4087 4088 propval = props[Adapter->instance];
4088 4089 } else {
4089 4090 /* not enough properties configured */
4090 4091 propval = defval;
4091 4092 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4092 4093 "Not Enough %s values found in e1000g.conf"
4093 4094 " - set to %d\n",
4094 4095 propname, propval);
4095 4096 ret = B_FALSE;
4096 4097 }
4097 4098
4098 4099 /* free memory allocated for properties */
4099 4100 ddi_prop_free(props);
4100 4101
4101 4102 } else {
4102 4103 propval = defval;
4103 4104 ret = B_FALSE;
4104 4105 }
4105 4106
4106 4107 /*
4107 4108 * enforce limits
4108 4109 */
4109 4110 if (propval > maxval) {
4110 4111 propval = maxval;
4111 4112 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4112 4113 "Too High %s value in e1000g.conf - set to %d\n",
4113 4114 propname, propval);
4114 4115 }
4115 4116
4116 4117 if (propval < minval) {
4117 4118 propval = minval;
4118 4119 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4119 4120 "Too Low %s value in e1000g.conf - set to %d\n",
4120 4121 propname, propval);
4121 4122 }
4122 4123
4123 4124 *propvalue = propval;
4124 4125 return (ret);
4125 4126 }
4126 4127
4127 4128 static boolean_t
4128 4129 e1000g_link_check(struct e1000g *Adapter)
4129 4130 {
4130 4131 uint16_t speed, duplex, phydata;
4131 4132 boolean_t link_changed = B_FALSE;
4132 4133 struct e1000_hw *hw;
4133 4134 uint32_t reg_tarc;
4134 4135
4135 4136 hw = &Adapter->shared;
4136 4137
4137 4138 if (e1000g_link_up(Adapter)) {
4138 4139 /*
4139 4140 * The Link is up, check whether it was marked as down earlier
4140 4141 */
4141 4142 if (Adapter->link_state != LINK_STATE_UP) {
4142 4143 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
4143 4144 Adapter->link_speed = speed;
4144 4145 Adapter->link_duplex = duplex;
4145 4146 Adapter->link_state = LINK_STATE_UP;
4146 4147 link_changed = B_TRUE;
4147 4148
4148 4149 if (Adapter->link_speed == SPEED_1000)
4149 4150 Adapter->stall_threshold = TX_STALL_TIME_2S;
4150 4151 else
4151 4152 Adapter->stall_threshold = TX_STALL_TIME_8S;
4152 4153
4153 4154 Adapter->tx_link_down_timeout = 0;
4154 4155
4155 4156 if ((hw->mac.type == e1000_82571) ||
4156 4157 (hw->mac.type == e1000_82572)) {
4157 4158 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
4158 4159 if (speed == SPEED_1000)
4159 4160 reg_tarc |= (1 << 21);
4160 4161 else
4161 4162 reg_tarc &= ~(1 << 21);
4162 4163 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
4163 4164 }
4164 4165 }
4165 4166 Adapter->smartspeed = 0;
4166 4167 } else {
4167 4168 if (Adapter->link_state != LINK_STATE_DOWN) {
4168 4169 Adapter->link_speed = 0;
4169 4170 Adapter->link_duplex = 0;
4170 4171 Adapter->link_state = LINK_STATE_DOWN;
4171 4172 link_changed = B_TRUE;
4172 4173
4173 4174 /*
4174 4175 * SmartSpeed workaround for Tabor/TanaX, When the
4175 4176 * driver loses link disable auto master/slave
4176 4177 * resolution.
4177 4178 */
4178 4179 if (hw->phy.type == e1000_phy_igp) {
4179 4180 (void) e1000_read_phy_reg(hw,
4180 4181 PHY_1000T_CTRL, &phydata);
4181 4182 phydata |= CR_1000T_MS_ENABLE;
4182 4183 (void) e1000_write_phy_reg(hw,
4183 4184 PHY_1000T_CTRL, phydata);
4184 4185 }
4185 4186 } else {
4186 4187 e1000g_smartspeed(Adapter);
4187 4188 }
4188 4189
4189 4190 if (Adapter->e1000g_state & E1000G_STARTED) {
4190 4191 if (Adapter->tx_link_down_timeout <
4191 4192 MAX_TX_LINK_DOWN_TIMEOUT) {
4192 4193 Adapter->tx_link_down_timeout++;
4193 4194 } else if (Adapter->tx_link_down_timeout ==
4194 4195 MAX_TX_LINK_DOWN_TIMEOUT) {
4195 4196 e1000g_tx_clean(Adapter);
4196 4197 Adapter->tx_link_down_timeout++;
4197 4198 }
4198 4199 }
4199 4200 }
4200 4201
4201 4202 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4202 4203 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4203 4204
4204 4205 return (link_changed);
4205 4206 }
4206 4207
4207 4208 /*
4208 4209 * e1000g_reset_link - Using the link properties to setup the link
4209 4210 */
4210 4211 int
4211 4212 e1000g_reset_link(struct e1000g *Adapter)
4212 4213 {
4213 4214 struct e1000_mac_info *mac;
4214 4215 struct e1000_phy_info *phy;
4215 4216 struct e1000_hw *hw;
4216 4217 boolean_t invalid;
4217 4218
4218 4219 mac = &Adapter->shared.mac;
4219 4220 phy = &Adapter->shared.phy;
4220 4221 hw = &Adapter->shared;
4221 4222 invalid = B_FALSE;
4222 4223
4223 4224 if (hw->phy.media_type != e1000_media_type_copper)
4224 4225 goto out;
4225 4226
4226 4227 if (Adapter->param_adv_autoneg == 1) {
4227 4228 mac->autoneg = B_TRUE;
4228 4229 phy->autoneg_advertised = 0;
4229 4230
4230 4231 /*
4231 4232 * 1000hdx is not supported for autonegotiation
4232 4233 */
4233 4234 if (Adapter->param_adv_1000fdx == 1)
4234 4235 phy->autoneg_advertised |= ADVERTISE_1000_FULL;
4235 4236
4236 4237 if (Adapter->param_adv_100fdx == 1)
4237 4238 phy->autoneg_advertised |= ADVERTISE_100_FULL;
4238 4239
4239 4240 if (Adapter->param_adv_100hdx == 1)
4240 4241 phy->autoneg_advertised |= ADVERTISE_100_HALF;
4241 4242
4242 4243 if (Adapter->param_adv_10fdx == 1)
4243 4244 phy->autoneg_advertised |= ADVERTISE_10_FULL;
4244 4245
4245 4246 if (Adapter->param_adv_10hdx == 1)
4246 4247 phy->autoneg_advertised |= ADVERTISE_10_HALF;
4247 4248
4248 4249 if (phy->autoneg_advertised == 0)
4249 4250 invalid = B_TRUE;
4250 4251 } else {
4251 4252 mac->autoneg = B_FALSE;
4252 4253
4253 4254 /*
4254 4255 * For Intel copper cards, 1000fdx and 1000hdx are not
4255 4256 * supported for forced link
4256 4257 */
4257 4258 if (Adapter->param_adv_100fdx == 1)
4258 4259 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4259 4260 else if (Adapter->param_adv_100hdx == 1)
4260 4261 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4261 4262 else if (Adapter->param_adv_10fdx == 1)
4262 4263 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4263 4264 else if (Adapter->param_adv_10hdx == 1)
4264 4265 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4265 4266 else
4266 4267 invalid = B_TRUE;
4267 4268
4268 4269 }
4269 4270
4270 4271 if (invalid) {
4271 4272 e1000g_log(Adapter, CE_WARN,
4272 4273 "Invalid link settings. Setup link to "
4273 4274 "support autonegotiation with all link capabilities.");
4274 4275 mac->autoneg = B_TRUE;
4275 4276 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
4276 4277 }
4277 4278
4278 4279 out:
4279 4280 return (e1000_setup_link(&Adapter->shared));
4280 4281 }
4281 4282
4282 4283 static void
4283 4284 e1000g_timer_tx_resched(struct e1000g *Adapter)
4284 4285 {
4285 4286 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
4286 4287
4287 4288 rw_enter(&Adapter->chip_lock, RW_READER);
4288 4289
4289 4290 if (tx_ring->resched_needed &&
4290 4291 ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
4291 4292 drv_usectohz(1000000)) &&
4292 4293 (Adapter->e1000g_state & E1000G_STARTED) &&
4293 4294 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4294 4295 tx_ring->resched_needed = B_FALSE;
4295 4296 mac_tx_update(Adapter->mh);
4296 4297 E1000G_STAT(tx_ring->stat_reschedule);
4297 4298 E1000G_STAT(tx_ring->stat_timer_reschedule);
4298 4299 }
4299 4300
4300 4301 rw_exit(&Adapter->chip_lock);
4301 4302 }
4302 4303
4303 4304 static void
4304 4305 e1000g_local_timer(void *ws)
4305 4306 {
4306 4307 struct e1000g *Adapter = (struct e1000g *)ws;
4307 4308 struct e1000_hw *hw;
4308 4309 e1000g_ether_addr_t ether_addr;
4309 4310 boolean_t link_changed;
4310 4311
4311 4312 hw = &Adapter->shared;
4312 4313
4313 4314 if (Adapter->e1000g_state & E1000G_ERROR) {
4314 4315 rw_enter(&Adapter->chip_lock, RW_WRITER);
4315 4316 Adapter->e1000g_state &= ~E1000G_ERROR;
4316 4317 rw_exit(&Adapter->chip_lock);
4317 4318
4318 4319 Adapter->reset_count++;
4319 4320 if (e1000g_global_reset(Adapter)) {
4320 4321 ddi_fm_service_impact(Adapter->dip,
4321 4322 DDI_SERVICE_RESTORED);
4322 4323 e1000g_timer_tx_resched(Adapter);
4323 4324 } else
4324 4325 ddi_fm_service_impact(Adapter->dip,
4325 4326 DDI_SERVICE_LOST);
4326 4327 return;
4327 4328 }
4328 4329
4329 4330 if (e1000g_stall_check(Adapter)) {
4330 4331 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4331 4332 "Tx stall detected. Activate automatic recovery.\n");
4332 4333 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4333 4334 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4334 4335 Adapter->reset_count++;
4335 4336 if (e1000g_reset_adapter(Adapter)) {
4336 4337 ddi_fm_service_impact(Adapter->dip,
4337 4338 DDI_SERVICE_RESTORED);
4338 4339 e1000g_timer_tx_resched(Adapter);
4339 4340 }
4340 4341 return;
4341 4342 }
4342 4343
4343 4344 link_changed = B_FALSE;
4344 4345 rw_enter(&Adapter->chip_lock, RW_READER);
4345 4346 if (Adapter->link_complete)
4346 4347 link_changed = e1000g_link_check(Adapter);
4347 4348 rw_exit(&Adapter->chip_lock);
4348 4349
4349 4350 if (link_changed) {
4350 4351 if (!Adapter->reset_flag &&
4351 4352 (Adapter->e1000g_state & E1000G_STARTED) &&
4352 4353 !(Adapter->e1000g_state & E1000G_SUSPENDED))
4353 4354 mac_link_update(Adapter->mh, Adapter->link_state);
4354 4355 if (Adapter->link_state == LINK_STATE_UP)
4355 4356 Adapter->reset_flag = B_FALSE;
4356 4357 }
4357 4358 /*
4358 4359 * Workaround for esb2. Data stuck in fifo on a link
4359 4360 * down event. Reset the adapter to recover it.
4360 4361 */
4361 4362 if (Adapter->esb2_workaround) {
4362 4363 Adapter->esb2_workaround = B_FALSE;
4363 4364 (void) e1000g_reset_adapter(Adapter);
4364 4365 return;
4365 4366 }
4366 4367
4367 4368 /*
4368 4369 * With 82571 controllers, any locally administered address will
4369 4370 * be overwritten when there is a reset on the other port.
4370 4371 * Detect this circumstance and correct it.
4371 4372 */
4372 4373 if ((hw->mac.type == e1000_82571) &&
4373 4374 (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4374 4375 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4375 4376 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4376 4377
4377 4378 ether_addr.reg.low = ntohl(ether_addr.reg.low);
4378 4379 ether_addr.reg.high = ntohl(ether_addr.reg.high);
4379 4380
4380 4381 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4381 4382 (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4382 4383 (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4383 4384 (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4384 4385 (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4385 4386 (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4386 4387 e1000_rar_set(hw, hw->mac.addr, 0);
4387 4388 }
4388 4389 }
4389 4390
4390 4391 /*
4391 4392 * Long TTL workaround for 82541/82547
4392 4393 */
4393 4394 (void) e1000_igp_ttl_workaround_82547(hw);
4394 4395
4395 4396 /*
4396 4397 * Check for Adaptive IFS settings If there are lots of collisions
4397 4398 * change the value in steps...
4398 4399 * These properties should only be set for 10/100
4399 4400 */
4400 4401 if ((hw->phy.media_type == e1000_media_type_copper) &&
4401 4402 ((Adapter->link_speed == SPEED_100) ||
4402 4403 (Adapter->link_speed == SPEED_10))) {
4403 4404 e1000_update_adaptive(hw);
4404 4405 }
4405 4406 /*
4406 4407 * Set Timer Interrupts
4407 4408 */
4408 4409 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4409 4410
4410 4411 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4411 4412 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4412 4413 else
4413 4414 e1000g_timer_tx_resched(Adapter);
4414 4415
4415 4416 restart_watchdog_timer(Adapter);
4416 4417 }
4417 4418
4418 4419 /*
4419 4420 * The function e1000g_link_timer() is called when the timer for link setup
4420 4421 * is expired, which indicates the completion of the link setup. The link
4421 4422 * state will not be updated until the link setup is completed. And the
4422 4423 * link state will not be sent to the upper layer through mac_link_update()
4423 4424 * in this function. It will be updated in the local timer routine or the
4424 4425 * interrupt service routine after the interface is started (plumbed).
4425 4426 */
4426 4427 static void
4427 4428 e1000g_link_timer(void *arg)
4428 4429 {
4429 4430 struct e1000g *Adapter = (struct e1000g *)arg;
4430 4431
4431 4432 mutex_enter(&Adapter->link_lock);
4432 4433 Adapter->link_complete = B_TRUE;
4433 4434 Adapter->link_tid = 0;
4434 4435 mutex_exit(&Adapter->link_lock);
4435 4436 }
4436 4437
4437 4438 /*
4438 4439 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4439 4440 *
4440 4441 * This function read the forced speed and duplex for 10/100 Mbps speeds
4441 4442 * and also for 1000 Mbps speeds from the e1000g.conf file
4442 4443 */
4443 4444 static void
4444 4445 e1000g_force_speed_duplex(struct e1000g *Adapter)
4445 4446 {
4446 4447 int forced;
4447 4448 int propval;
4448 4449 struct e1000_mac_info *mac = &Adapter->shared.mac;
4449 4450 struct e1000_phy_info *phy = &Adapter->shared.phy;
4450 4451
4451 4452 /*
4452 4453 * get value out of config file
4453 4454 */
4454 4455 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4455 4456 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced);
4456 4457
4457 4458 switch (forced) {
4458 4459 case GDIAG_10_HALF:
4459 4460 /*
4460 4461 * Disable Auto Negotiation
4461 4462 */
4462 4463 mac->autoneg = B_FALSE;
4463 4464 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4464 4465 break;
4465 4466 case GDIAG_10_FULL:
4466 4467 /*
4467 4468 * Disable Auto Negotiation
4468 4469 */
4469 4470 mac->autoneg = B_FALSE;
4470 4471 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4471 4472 break;
4472 4473 case GDIAG_100_HALF:
4473 4474 /*
4474 4475 * Disable Auto Negotiation
4475 4476 */
4476 4477 mac->autoneg = B_FALSE;
4477 4478 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4478 4479 break;
4479 4480 case GDIAG_100_FULL:
4480 4481 /*
4481 4482 * Disable Auto Negotiation
4482 4483 */
4483 4484 mac->autoneg = B_FALSE;
4484 4485 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4485 4486 break;
4486 4487 case GDIAG_1000_FULL:
4487 4488 /*
4488 4489 * The gigabit spec requires autonegotiation. Therefore,
4489 4490 * when the user wants to force the speed to 1000Mbps, we
4490 4491 * enable AutoNeg, but only allow the harware to advertise
4491 4492 * 1000Mbps. This is different from 10/100 operation, where
4492 4493 * we are allowed to link without any negotiation.
4493 4494 */
4494 4495 mac->autoneg = B_TRUE;
4495 4496 phy->autoneg_advertised = ADVERTISE_1000_FULL;
4496 4497 break;
4497 4498 default: /* obey the setting of AutoNegAdvertised */
4498 4499 mac->autoneg = B_TRUE;
4499 4500 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised",
4500 4501 0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4501 4502 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval);
4502 4503 phy->autoneg_advertised = (uint16_t)propval;
4503 4504 break;
4504 4505 } /* switch */
4505 4506 }
4506 4507
4507 4508 /*
4508 4509 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4509 4510 *
4510 4511 * This function reads MaxFrameSize from e1000g.conf
4511 4512 */
4512 4513 static void
4513 4514 e1000g_get_max_frame_size(struct e1000g *Adapter)
4514 4515 {
4515 4516 int max_frame;
4516 4517
4517 4518 /*
4518 4519 * get value out of config file
4519 4520 */
4520 4521 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0,
4521 4522 &max_frame);
4522 4523
4523 4524 switch (max_frame) {
4524 4525 case 0:
4525 4526 Adapter->default_mtu = ETHERMTU;
4526 4527 break;
4527 4528 case 1:
4528 4529 Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4529 4530 sizeof (struct ether_vlan_header) - ETHERFCSL;
4530 4531 break;
4531 4532 case 2:
4532 4533 Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4533 4534 sizeof (struct ether_vlan_header) - ETHERFCSL;
4534 4535 break;
4535 4536 case 3:
4536 4537 Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4537 4538 sizeof (struct ether_vlan_header) - ETHERFCSL;
4538 4539 break;
4539 4540 default:
4540 4541 Adapter->default_mtu = ETHERMTU;
4541 4542 break;
4542 4543 } /* switch */
4543 4544
4544 4545 /*
4545 4546 * If the user configed MTU is larger than the deivce's maximum MTU,
4546 4547 * the MTU is set to the deivce's maximum value.
4547 4548 */
4548 4549 if (Adapter->default_mtu > Adapter->max_mtu)
4549 4550 Adapter->default_mtu = Adapter->max_mtu;
4550 4551
4551 4552 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu);
4552 4553 }
4553 4554
4554 4555 /*
4555 4556 * e1000g_pch_limits - Apply limits of the PCH silicon type
4556 4557 *
4557 4558 * At any frame size larger than the ethernet default,
4558 4559 * prevent linking at 10/100 speeds.
4559 4560 */
4560 4561 static void
4561 4562 e1000g_pch_limits(struct e1000g *Adapter)
4562 4563 {
4563 4564 struct e1000_hw *hw = &Adapter->shared;
4564 4565
4565 4566 /* only applies to PCH silicon type */
4566 4567 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan)
4567 4568 return;
4568 4569
4569 4570 /* only applies to frames larger than ethernet default */
4570 4571 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) {
4571 4572 hw->mac.autoneg = B_TRUE;
4572 4573 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
4573 4574
4574 4575 Adapter->param_adv_autoneg = 1;
4575 4576 Adapter->param_adv_1000fdx = 1;
4576 4577
4577 4578 Adapter->param_adv_100fdx = 0;
4578 4579 Adapter->param_adv_100hdx = 0;
4579 4580 Adapter->param_adv_10fdx = 0;
4580 4581 Adapter->param_adv_10hdx = 0;
4581 4582
4582 4583 e1000g_param_sync(Adapter);
4583 4584 }
4584 4585 }
4585 4586
4586 4587 /*
4587 4588 * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4588 4589 */
4589 4590 static uint32_t
4590 4591 e1000g_mtu2maxframe(uint32_t mtu)
4591 4592 {
4592 4593 uint32_t maxframe;
4593 4594
4594 4595 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL;
4595 4596
4596 4597 return (maxframe);
4597 4598 }
4598 4599
4599 4600 static void
4600 4601 arm_watchdog_timer(struct e1000g *Adapter)
4601 4602 {
4602 4603 Adapter->watchdog_tid =
4603 4604 timeout(e1000g_local_timer,
4604 4605 (void *)Adapter, 1 * drv_usectohz(1000000));
4605 4606 }
4606 4607 #pragma inline(arm_watchdog_timer)
4607 4608
4608 4609 static void
4609 4610 enable_watchdog_timer(struct e1000g *Adapter)
4610 4611 {
4611 4612 mutex_enter(&Adapter->watchdog_lock);
4612 4613
4613 4614 if (!Adapter->watchdog_timer_enabled) {
4614 4615 Adapter->watchdog_timer_enabled = B_TRUE;
4615 4616 Adapter->watchdog_timer_started = B_TRUE;
4616 4617 arm_watchdog_timer(Adapter);
4617 4618 }
4618 4619
4619 4620 mutex_exit(&Adapter->watchdog_lock);
4620 4621 }
4621 4622
4622 4623 static void
4623 4624 disable_watchdog_timer(struct e1000g *Adapter)
4624 4625 {
4625 4626 timeout_id_t tid;
4626 4627
4627 4628 mutex_enter(&Adapter->watchdog_lock);
4628 4629
4629 4630 Adapter->watchdog_timer_enabled = B_FALSE;
4630 4631 Adapter->watchdog_timer_started = B_FALSE;
4631 4632 tid = Adapter->watchdog_tid;
4632 4633 Adapter->watchdog_tid = 0;
4633 4634
4634 4635 mutex_exit(&Adapter->watchdog_lock);
4635 4636
4636 4637 if (tid != 0)
4637 4638 (void) untimeout(tid);
4638 4639 }
4639 4640
4640 4641 static void
4641 4642 start_watchdog_timer(struct e1000g *Adapter)
4642 4643 {
4643 4644 mutex_enter(&Adapter->watchdog_lock);
4644 4645
4645 4646 if (Adapter->watchdog_timer_enabled) {
4646 4647 if (!Adapter->watchdog_timer_started) {
4647 4648 Adapter->watchdog_timer_started = B_TRUE;
4648 4649 arm_watchdog_timer(Adapter);
4649 4650 }
4650 4651 }
4651 4652
4652 4653 mutex_exit(&Adapter->watchdog_lock);
4653 4654 }
4654 4655
4655 4656 static void
4656 4657 restart_watchdog_timer(struct e1000g *Adapter)
4657 4658 {
4658 4659 mutex_enter(&Adapter->watchdog_lock);
4659 4660
4660 4661 if (Adapter->watchdog_timer_started)
4661 4662 arm_watchdog_timer(Adapter);
4662 4663
4663 4664 mutex_exit(&Adapter->watchdog_lock);
4664 4665 }
4665 4666
4666 4667 static void
4667 4668 stop_watchdog_timer(struct e1000g *Adapter)
4668 4669 {
4669 4670 timeout_id_t tid;
4670 4671
4671 4672 mutex_enter(&Adapter->watchdog_lock);
4672 4673
4673 4674 Adapter->watchdog_timer_started = B_FALSE;
4674 4675 tid = Adapter->watchdog_tid;
4675 4676 Adapter->watchdog_tid = 0;
4676 4677
4677 4678 mutex_exit(&Adapter->watchdog_lock);
4678 4679
4679 4680 if (tid != 0)
4680 4681 (void) untimeout(tid);
4681 4682 }
4682 4683
4683 4684 static void
4684 4685 stop_link_timer(struct e1000g *Adapter)
4685 4686 {
4686 4687 timeout_id_t tid;
4687 4688
4688 4689 /* Disable the link timer */
4689 4690 mutex_enter(&Adapter->link_lock);
4690 4691
4691 4692 tid = Adapter->link_tid;
4692 4693 Adapter->link_tid = 0;
4693 4694
4694 4695 mutex_exit(&Adapter->link_lock);
4695 4696
4696 4697 if (tid != 0)
4697 4698 (void) untimeout(tid);
4698 4699 }
4699 4700
4700 4701 static void
4701 4702 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4702 4703 {
4703 4704 timeout_id_t tid;
4704 4705
4705 4706 /* Disable the tx timer for 82547 chipset */
4706 4707 mutex_enter(&tx_ring->tx_lock);
4707 4708
4708 4709 tx_ring->timer_enable_82547 = B_FALSE;
4709 4710 tid = tx_ring->timer_id_82547;
4710 4711 tx_ring->timer_id_82547 = 0;
4711 4712
4712 4713 mutex_exit(&tx_ring->tx_lock);
4713 4714
4714 4715 if (tid != 0)
4715 4716 (void) untimeout(tid);
4716 4717 }
4717 4718
4718 4719 void
4719 4720 e1000g_clear_interrupt(struct e1000g *Adapter)
4720 4721 {
4721 4722 E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4722 4723 0xffffffff & ~E1000_IMS_RXSEQ);
4723 4724 }
4724 4725
4725 4726 void
4726 4727 e1000g_mask_interrupt(struct e1000g *Adapter)
4727 4728 {
4728 4729 E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4729 4730 IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4730 4731
4731 4732 if (Adapter->tx_intr_enable)
4732 4733 e1000g_mask_tx_interrupt(Adapter);
4733 4734 }
4734 4735
4735 4736 /*
4736 4737 * This routine is called by e1000g_quiesce(), therefore must not block.
4737 4738 */
4738 4739 void
4739 4740 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4740 4741 {
4741 4742 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4742 4743 }
4743 4744
4744 4745 void
4745 4746 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4746 4747 {
4747 4748 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4748 4749 }
4749 4750
4750 4751 void
4751 4752 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4752 4753 {
4753 4754 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4754 4755 }
4755 4756
4756 4757 static void
4757 4758 e1000g_smartspeed(struct e1000g *Adapter)
4758 4759 {
4759 4760 struct e1000_hw *hw = &Adapter->shared;
4760 4761 uint16_t phy_status;
4761 4762 uint16_t phy_ctrl;
4762 4763
4763 4764 /*
4764 4765 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4765 4766 * advertising 1000Full, we don't even use the workaround
4766 4767 */
4767 4768 if ((hw->phy.type != e1000_phy_igp) ||
4768 4769 !hw->mac.autoneg ||
4769 4770 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4770 4771 return;
4771 4772
4772 4773 /*
4773 4774 * True if this is the first call of this function or after every
4774 4775 * 30 seconds of not having link
4775 4776 */
4776 4777 if (Adapter->smartspeed == 0) {
4777 4778 /*
4778 4779 * If Master/Slave config fault is asserted twice, we
4779 4780 * assume back-to-back
4780 4781 */
4781 4782 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4782 4783 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4783 4784 return;
4784 4785
4785 4786 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4786 4787 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4787 4788 return;
4788 4789 /*
4789 4790 * We're assuming back-2-back because our status register
4790 4791 * insists! there's a fault in the master/slave
4791 4792 * relationship that was "negotiated"
4792 4793 */
4793 4794 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4794 4795 /*
4795 4796 * Is the phy configured for manual configuration of
4796 4797 * master/slave?
4797 4798 */
4798 4799 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4799 4800 /*
4800 4801 * Yes. Then disable manual configuration (enable
4801 4802 * auto configuration) of master/slave
4802 4803 */
4803 4804 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4804 4805 (void) e1000_write_phy_reg(hw,
4805 4806 PHY_1000T_CTRL, phy_ctrl);
4806 4807 /*
4807 4808 * Effectively starting the clock
4808 4809 */
4809 4810 Adapter->smartspeed++;
4810 4811 /*
4811 4812 * Restart autonegotiation
4812 4813 */
4813 4814 if (!e1000_phy_setup_autoneg(hw) &&
4814 4815 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4815 4816 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4816 4817 MII_CR_RESTART_AUTO_NEG);
4817 4818 (void) e1000_write_phy_reg(hw,
4818 4819 PHY_CONTROL, phy_ctrl);
4819 4820 }
4820 4821 }
4821 4822 return;
4822 4823 /*
4823 4824 * Has 6 seconds transpired still without link? Remember,
4824 4825 * you should reset the smartspeed counter once you obtain
4825 4826 * link
4826 4827 */
4827 4828 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4828 4829 /*
4829 4830 * Yes. Remember, we did at the start determine that
4830 4831 * there's a master/slave configuration fault, so we're
4831 4832 * still assuming there's someone on the other end, but we
4832 4833 * just haven't yet been able to talk to it. We then
4833 4834 * re-enable auto configuration of master/slave to see if
4834 4835 * we're running 2/3 pair cables.
4835 4836 */
4836 4837 /*
4837 4838 * If still no link, perhaps using 2/3 pair cable
4838 4839 */
4839 4840 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4840 4841 phy_ctrl |= CR_1000T_MS_ENABLE;
4841 4842 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4842 4843 /*
4843 4844 * Restart autoneg with phy enabled for manual
4844 4845 * configuration of master/slave
4845 4846 */
4846 4847 if (!e1000_phy_setup_autoneg(hw) &&
4847 4848 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4848 4849 phy_ctrl |=
4849 4850 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
4850 4851 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
4851 4852 }
4852 4853 /*
4853 4854 * Hopefully, there are no more faults and we've obtained
4854 4855 * link as a result.
4855 4856 */
4856 4857 }
4857 4858 /*
4858 4859 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4859 4860 * seconds)
4860 4861 */
4861 4862 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4862 4863 Adapter->smartspeed = 0;
4863 4864 }
4864 4865
4865 4866 static boolean_t
4866 4867 is_valid_mac_addr(uint8_t *mac_addr)
4867 4868 {
4868 4869 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4869 4870 const uint8_t addr_test2[6] =
4870 4871 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4871 4872
4872 4873 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4873 4874 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4874 4875 return (B_FALSE);
4875 4876
4876 4877 return (B_TRUE);
4877 4878 }
4878 4879
4879 4880 /*
4880 4881 * e1000g_stall_check - check for tx stall
4881 4882 *
4882 4883 * This function checks if the adapter is stalled (in transmit).
4883 4884 *
4884 4885 * It is called each time the watchdog timeout is invoked.
4885 4886 * If the transmit descriptor reclaim continuously fails,
4886 4887 * the watchdog value will increment by 1. If the watchdog
4887 4888 * value exceeds the threshold, the adapter is assumed to
4888 4889 * have stalled and need to be reset.
4889 4890 */
4890 4891 static boolean_t
4891 4892 e1000g_stall_check(struct e1000g *Adapter)
4892 4893 {
4893 4894 e1000g_tx_ring_t *tx_ring;
4894 4895
4895 4896 tx_ring = Adapter->tx_ring;
4896 4897
4897 4898 if (Adapter->link_state != LINK_STATE_UP)
4898 4899 return (B_FALSE);
4899 4900
4900 4901 (void) e1000g_recycle(tx_ring);
4901 4902
4902 4903 if (Adapter->stall_flag)
4903 4904 return (B_TRUE);
4904 4905
4905 4906 return (B_FALSE);
4906 4907 }
4907 4908
4908 4909 #ifdef E1000G_DEBUG
4909 4910 static enum ioc_reply
4910 4911 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
4911 4912 {
4912 4913 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
4913 4914 e1000g_peekpoke_t *ppd;
4914 4915 uint64_t mem_va;
4915 4916 uint64_t maxoff;
4916 4917 boolean_t peek;
4917 4918
4918 4919 switch (iocp->ioc_cmd) {
4919 4920
4920 4921 case E1000G_IOC_REG_PEEK:
4921 4922 peek = B_TRUE;
4922 4923 break;
4923 4924
4924 4925 case E1000G_IOC_REG_POKE:
4925 4926 peek = B_FALSE;
4926 4927 break;
4927 4928
4928 4929 deault:
4929 4930 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4930 4931 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4931 4932 iocp->ioc_cmd);
4932 4933 return (IOC_INVAL);
4933 4934 }
4934 4935
4935 4936 /*
4936 4937 * Validate format of ioctl
4937 4938 */
4938 4939 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
4939 4940 return (IOC_INVAL);
4940 4941 if (mp->b_cont == NULL)
4941 4942 return (IOC_INVAL);
4942 4943
4943 4944 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
4944 4945
4945 4946 /*
4946 4947 * Validate request parameters
4947 4948 */
4948 4949 switch (ppd->pp_acc_space) {
4949 4950
4950 4951 default:
4951 4952 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4952 4953 "e1000g_diag_ioctl: invalid access space 0x%X\n",
4953 4954 ppd->pp_acc_space);
4954 4955 return (IOC_INVAL);
4955 4956
4956 4957 case E1000G_PP_SPACE_REG:
4957 4958 /*
4958 4959 * Memory-mapped I/O space
4959 4960 */
4960 4961 ASSERT(ppd->pp_acc_size == 4);
4961 4962 if (ppd->pp_acc_size != 4)
4962 4963 return (IOC_INVAL);
4963 4964
4964 4965 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
4965 4966 return (IOC_INVAL);
4966 4967
4967 4968 mem_va = 0;
4968 4969 maxoff = 0x10000;
4969 4970 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
4970 4971 break;
4971 4972
4972 4973 case E1000G_PP_SPACE_E1000G:
4973 4974 /*
4974 4975 * E1000g data structure!
4975 4976 */
4976 4977 mem_va = (uintptr_t)e1000gp;
4977 4978 maxoff = sizeof (struct e1000g);
4978 4979 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
4979 4980 break;
4980 4981
4981 4982 }
4982 4983
4983 4984 if (ppd->pp_acc_offset >= maxoff)
4984 4985 return (IOC_INVAL);
4985 4986
4986 4987 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
4987 4988 return (IOC_INVAL);
4988 4989
4989 4990 /*
4990 4991 * All OK - go!
4991 4992 */
4992 4993 ppd->pp_acc_offset += mem_va;
4993 4994 (*ppfn)(e1000gp, ppd);
4994 4995 return (peek ? IOC_REPLY : IOC_ACK);
4995 4996 }
4996 4997
4997 4998 static void
4998 4999 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4999 5000 {
5000 5001 ddi_acc_handle_t handle;
5001 5002 uint32_t *regaddr;
5002 5003
5003 5004 handle = e1000gp->osdep.reg_handle;
5004 5005 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5005 5006 (uintptr_t)ppd->pp_acc_offset);
5006 5007
5007 5008 ppd->pp_acc_data = ddi_get32(handle, regaddr);
5008 5009 }
5009 5010
5010 5011 static void
5011 5012 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5012 5013 {
5013 5014 ddi_acc_handle_t handle;
5014 5015 uint32_t *regaddr;
5015 5016 uint32_t value;
5016 5017
5017 5018 handle = e1000gp->osdep.reg_handle;
5018 5019 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5019 5020 (uintptr_t)ppd->pp_acc_offset);
5020 5021 value = (uint32_t)ppd->pp_acc_data;
5021 5022
5022 5023 ddi_put32(handle, regaddr, value);
5023 5024 }
5024 5025
5025 5026 static void
5026 5027 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5027 5028 {
5028 5029 uint64_t value;
5029 5030 void *vaddr;
5030 5031
5031 5032 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5032 5033
5033 5034 switch (ppd->pp_acc_size) {
5034 5035 case 1:
5035 5036 value = *(uint8_t *)vaddr;
5036 5037 break;
5037 5038
5038 5039 case 2:
5039 5040 value = *(uint16_t *)vaddr;
5040 5041 break;
5041 5042
5042 5043 case 4:
5043 5044 value = *(uint32_t *)vaddr;
5044 5045 break;
5045 5046
5046 5047 case 8:
5047 5048 value = *(uint64_t *)vaddr;
5048 5049 break;
5049 5050 }
5050 5051
5051 5052 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5052 5053 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5053 5054 (void *)e1000gp, (void *)ppd, value, vaddr);
5054 5055
5055 5056 ppd->pp_acc_data = value;
5056 5057 }
5057 5058
5058 5059 static void
5059 5060 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5060 5061 {
5061 5062 uint64_t value;
5062 5063 void *vaddr;
5063 5064
5064 5065 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5065 5066 value = ppd->pp_acc_data;
5066 5067
5067 5068 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5068 5069 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5069 5070 (void *)e1000gp, (void *)ppd, value, vaddr);
5070 5071
5071 5072 switch (ppd->pp_acc_size) {
5072 5073 case 1:
5073 5074 *(uint8_t *)vaddr = (uint8_t)value;
5074 5075 break;
5075 5076
5076 5077 case 2:
5077 5078 *(uint16_t *)vaddr = (uint16_t)value;
5078 5079 break;
5079 5080
5080 5081 case 4:
5081 5082 *(uint32_t *)vaddr = (uint32_t)value;
5082 5083 break;
5083 5084
5084 5085 case 8:
5085 5086 *(uint64_t *)vaddr = (uint64_t)value;
5086 5087 break;
5087 5088 }
5088 5089 }
5089 5090 #endif
5090 5091
5091 5092 /*
5092 5093 * Loopback Support
5093 5094 */
5094 5095 static lb_property_t lb_normal =
5095 5096 { normal, "normal", E1000G_LB_NONE };
5096 5097 static lb_property_t lb_external1000 =
5097 5098 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 };
5098 5099 static lb_property_t lb_external100 =
5099 5100 { external, "100Mbps", E1000G_LB_EXTERNAL_100 };
5100 5101 static lb_property_t lb_external10 =
5101 5102 { external, "10Mbps", E1000G_LB_EXTERNAL_10 };
5102 5103 static lb_property_t lb_phy =
5103 5104 { internal, "PHY", E1000G_LB_INTERNAL_PHY };
5104 5105
5105 5106 static enum ioc_reply
5106 5107 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
5107 5108 {
5108 5109 lb_info_sz_t *lbsp;
5109 5110 lb_property_t *lbpp;
5110 5111 struct e1000_hw *hw;
5111 5112 uint32_t *lbmp;
5112 5113 uint32_t size;
5113 5114 uint32_t value;
5114 5115
5115 5116 hw = &Adapter->shared;
5116 5117
5117 5118 if (mp->b_cont == NULL)
5118 5119 return (IOC_INVAL);
5119 5120
5120 5121 if (!e1000g_check_loopback_support(hw)) {
5121 5122 e1000g_log(NULL, CE_WARN,
5122 5123 "Loopback is not supported on e1000g%d", Adapter->instance);
5123 5124 return (IOC_INVAL);
5124 5125 }
5125 5126
5126 5127 switch (iocp->ioc_cmd) {
5127 5128 default:
5128 5129 return (IOC_INVAL);
5129 5130
5130 5131 case LB_GET_INFO_SIZE:
5131 5132 size = sizeof (lb_info_sz_t);
5132 5133 if (iocp->ioc_count != size)
5133 5134 return (IOC_INVAL);
5134 5135
5135 5136 rw_enter(&Adapter->chip_lock, RW_WRITER);
5136 5137 e1000g_get_phy_state(Adapter);
5137 5138
5138 5139 /*
5139 5140 * Workaround for hardware faults. In order to get a stable
5140 5141 * state of phy, we will wait for a specific interval and
5141 5142 * try again. The time delay is an experiential value based
5142 5143 * on our testing.
5143 5144 */
5144 5145 msec_delay(100);
5145 5146 e1000g_get_phy_state(Adapter);
5146 5147 rw_exit(&Adapter->chip_lock);
5147 5148
5148 5149 value = sizeof (lb_normal);
5149 5150 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5150 5151 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5151 5152 (hw->phy.media_type == e1000_media_type_fiber) ||
5152 5153 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5153 5154 value += sizeof (lb_phy);
5154 5155 switch (hw->mac.type) {
5155 5156 case e1000_82571:
5156 5157 case e1000_82572:
5157 5158 case e1000_80003es2lan:
5158 5159 value += sizeof (lb_external1000);
5159 5160 break;
5160 5161 }
5161 5162 }
5162 5163 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5163 5164 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5164 5165 value += sizeof (lb_external100);
5165 5166 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5166 5167 value += sizeof (lb_external10);
5167 5168
5168 5169 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
5169 5170 *lbsp = value;
5170 5171 break;
5171 5172
5172 5173 case LB_GET_INFO:
5173 5174 value = sizeof (lb_normal);
5174 5175 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5175 5176 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5176 5177 (hw->phy.media_type == e1000_media_type_fiber) ||
5177 5178 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5178 5179 value += sizeof (lb_phy);
5179 5180 switch (hw->mac.type) {
5180 5181 case e1000_82571:
5181 5182 case e1000_82572:
5182 5183 case e1000_80003es2lan:
5183 5184 value += sizeof (lb_external1000);
5184 5185 break;
5185 5186 }
5186 5187 }
5187 5188 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5188 5189 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5189 5190 value += sizeof (lb_external100);
5190 5191 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5191 5192 value += sizeof (lb_external10);
5192 5193
5193 5194 size = value;
5194 5195 if (iocp->ioc_count != size)
5195 5196 return (IOC_INVAL);
5196 5197
5197 5198 value = 0;
5198 5199 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
5199 5200 lbpp[value++] = lb_normal;
5200 5201 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5201 5202 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5202 5203 (hw->phy.media_type == e1000_media_type_fiber) ||
5203 5204 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5204 5205 lbpp[value++] = lb_phy;
5205 5206 switch (hw->mac.type) {
5206 5207 case e1000_82571:
5207 5208 case e1000_82572:
5208 5209 case e1000_80003es2lan:
5209 5210 lbpp[value++] = lb_external1000;
5210 5211 break;
5211 5212 }
5212 5213 }
5213 5214 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5214 5215 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5215 5216 lbpp[value++] = lb_external100;
5216 5217 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5217 5218 lbpp[value++] = lb_external10;
5218 5219 break;
5219 5220
5220 5221 case LB_GET_MODE:
5221 5222 size = sizeof (uint32_t);
5222 5223 if (iocp->ioc_count != size)
5223 5224 return (IOC_INVAL);
5224 5225
5225 5226 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5226 5227 *lbmp = Adapter->loopback_mode;
5227 5228 break;
5228 5229
5229 5230 case LB_SET_MODE:
5230 5231 size = 0;
5231 5232 if (iocp->ioc_count != sizeof (uint32_t))
5232 5233 return (IOC_INVAL);
5233 5234
5234 5235 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5235 5236 if (!e1000g_set_loopback_mode(Adapter, *lbmp))
5236 5237 return (IOC_INVAL);
5237 5238 break;
5238 5239 }
5239 5240
5240 5241 iocp->ioc_count = size;
5241 5242 iocp->ioc_error = 0;
5242 5243
5243 5244 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
5244 5245 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
5245 5246 return (IOC_INVAL);
5246 5247 }
5247 5248
5248 5249 return (IOC_REPLY);
5249 5250 }
5250 5251
5251 5252 static boolean_t
5252 5253 e1000g_check_loopback_support(struct e1000_hw *hw)
5253 5254 {
5254 5255 switch (hw->mac.type) {
5255 5256 case e1000_82540:
5256 5257 case e1000_82545:
5257 5258 case e1000_82545_rev_3:
5258 5259 case e1000_82546:
5259 5260 case e1000_82546_rev_3:
5260 5261 case e1000_82541:
5261 5262 case e1000_82541_rev_2:
5262 5263 case e1000_82547:
5263 5264 case e1000_82547_rev_2:
5264 5265 case e1000_82571:
5265 5266 case e1000_82572:
5266 5267 case e1000_82573:
5267 5268 case e1000_82574:
5268 5269 case e1000_80003es2lan:
5269 5270 case e1000_ich9lan:
5270 5271 case e1000_ich10lan:
5271 5272 return (B_TRUE);
5272 5273 }
5273 5274 return (B_FALSE);
5274 5275 }
5275 5276
5276 5277 static boolean_t
5277 5278 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
5278 5279 {
5279 5280 struct e1000_hw *hw;
5280 5281 int i, times;
5281 5282 boolean_t link_up;
5282 5283
5283 5284 if (mode == Adapter->loopback_mode)
5284 5285 return (B_TRUE);
5285 5286
5286 5287 hw = &Adapter->shared;
5287 5288 times = 0;
5288 5289
5289 5290 Adapter->loopback_mode = mode;
5290 5291
5291 5292 if (mode == E1000G_LB_NONE) {
5292 5293 /* Reset the chip */
5293 5294 hw->phy.autoneg_wait_to_complete = B_TRUE;
5294 5295 (void) e1000g_reset_adapter(Adapter);
5295 5296 hw->phy.autoneg_wait_to_complete = B_FALSE;
5296 5297 return (B_TRUE);
5297 5298 }
5298 5299
5299 5300 again:
5300 5301
5301 5302 rw_enter(&Adapter->chip_lock, RW_WRITER);
5302 5303
5303 5304 switch (mode) {
5304 5305 default:
5305 5306 rw_exit(&Adapter->chip_lock);
5306 5307 return (B_FALSE);
5307 5308
5308 5309 case E1000G_LB_EXTERNAL_1000:
5309 5310 e1000g_set_external_loopback_1000(Adapter);
5310 5311 break;
5311 5312
5312 5313 case E1000G_LB_EXTERNAL_100:
5313 5314 e1000g_set_external_loopback_100(Adapter);
5314 5315 break;
5315 5316
5316 5317 case E1000G_LB_EXTERNAL_10:
5317 5318 e1000g_set_external_loopback_10(Adapter);
5318 5319 break;
5319 5320
5320 5321 case E1000G_LB_INTERNAL_PHY:
5321 5322 e1000g_set_internal_loopback(Adapter);
5322 5323 break;
5323 5324 }
5324 5325
5325 5326 times++;
5326 5327
5327 5328 rw_exit(&Adapter->chip_lock);
5328 5329
5329 5330 /* Wait for link up */
5330 5331 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5331 5332 msec_delay(100);
5332 5333
5333 5334 rw_enter(&Adapter->chip_lock, RW_WRITER);
5334 5335
5335 5336 link_up = e1000g_link_up(Adapter);
5336 5337
5337 5338 rw_exit(&Adapter->chip_lock);
5338 5339
5339 5340 if (!link_up) {
5340 5341 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5341 5342 "Failed to get the link up");
5342 5343 if (times < 2) {
5343 5344 /* Reset the link */
5344 5345 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5345 5346 "Reset the link ...");
5346 5347 (void) e1000g_reset_adapter(Adapter);
5347 5348 goto again;
5348 5349 }
5349 5350
5350 5351 /*
5351 5352 * Reset driver to loopback none when set loopback failed
5352 5353 * for the second time.
5353 5354 */
5354 5355 Adapter->loopback_mode = E1000G_LB_NONE;
5355 5356
5356 5357 /* Reset the chip */
5357 5358 hw->phy.autoneg_wait_to_complete = B_TRUE;
5358 5359 (void) e1000g_reset_adapter(Adapter);
5359 5360 hw->phy.autoneg_wait_to_complete = B_FALSE;
5360 5361
5361 5362 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5362 5363 "Set loopback mode failed, reset to loopback none");
5363 5364
5364 5365 return (B_FALSE);
5365 5366 }
5366 5367
5367 5368 return (B_TRUE);
5368 5369 }
5369 5370
5370 5371 /*
5371 5372 * The following loopback settings are from Intel's technical
5372 5373 * document - "How To Loopback". All the register settings and
5373 5374 * time delay values are directly inherited from the document
5374 5375 * without more explanations available.
5375 5376 */
5376 5377 static void
5377 5378 e1000g_set_internal_loopback(struct e1000g *Adapter)
5378 5379 {
5379 5380 struct e1000_hw *hw;
5380 5381 uint32_t ctrl;
5381 5382 uint32_t status;
5382 5383 uint16_t phy_ctrl;
5383 5384 uint16_t phy_reg;
5384 5385 uint32_t txcw;
5385 5386
5386 5387 hw = &Adapter->shared;
5387 5388
5388 5389 /* Disable Smart Power Down */
5389 5390 phy_spd_state(hw, B_FALSE);
5390 5391
5391 5392 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5392 5393 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5393 5394 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5394 5395
5395 5396 switch (hw->mac.type) {
5396 5397 case e1000_82540:
5397 5398 case e1000_82545:
5398 5399 case e1000_82545_rev_3:
5399 5400 case e1000_82546:
5400 5401 case e1000_82546_rev_3:
5401 5402 case e1000_82573:
5402 5403 /* Auto-MDI/MDIX off */
5403 5404 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5404 5405 /* Reset PHY to update Auto-MDI/MDIX */
5405 5406 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5406 5407 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5407 5408 /* Reset PHY to auto-neg off and force 1000 */
5408 5409 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5409 5410 phy_ctrl | MII_CR_RESET);
5410 5411 /*
5411 5412 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5412 5413 * See comments above e1000g_set_internal_loopback() for the
5413 5414 * background.
5414 5415 */
5415 5416 (void) e1000_write_phy_reg(hw, 29, 0x001F);
5416 5417 (void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5417 5418 (void) e1000_write_phy_reg(hw, 29, 0x001A);
5418 5419 (void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5419 5420 break;
5420 5421 case e1000_80003es2lan:
5421 5422 /* Force Link Up */
5422 5423 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5423 5424 0x1CC);
5424 5425 /* Sets PCS loopback at 1Gbs */
5425 5426 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5426 5427 0x1046);
5427 5428 break;
5428 5429 }
5429 5430
5430 5431 /*
5431 5432 * The following registers should be set for e1000_phy_bm phy type.
5432 5433 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5433 5434 * For others, we do not need to set these registers.
5434 5435 */
5435 5436 if (hw->phy.type == e1000_phy_bm) {
5436 5437 /* Set Default MAC Interface speed to 1GB */
5437 5438 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5438 5439 phy_reg &= ~0x0007;
5439 5440 phy_reg |= 0x006;
5440 5441 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5441 5442 /* Assert SW reset for above settings to take effect */
5442 5443 (void) e1000_phy_commit(hw);
5443 5444 msec_delay(1);
5444 5445 /* Force Full Duplex */
5445 5446 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5446 5447 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5447 5448 phy_reg | 0x000C);
5448 5449 /* Set Link Up (in force link) */
5449 5450 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5450 5451 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5451 5452 phy_reg | 0x0040);
5452 5453 /* Force Link */
5453 5454 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5454 5455 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5455 5456 phy_reg | 0x0040);
5456 5457 /* Set Early Link Enable */
5457 5458 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5458 5459 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5459 5460 phy_reg | 0x0400);
5460 5461 }
5461 5462
5462 5463 /* Set loopback */
5463 5464 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5464 5465
5465 5466 msec_delay(250);
5466 5467
5467 5468 /* Now set up the MAC to the same speed/duplex as the PHY. */
5468 5469 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5469 5470 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5470 5471 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5471 5472 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5472 5473 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
5473 5474 E1000_CTRL_FD); /* Force Duplex to FULL */
5474 5475
5475 5476 switch (hw->mac.type) {
5476 5477 case e1000_82540:
5477 5478 case e1000_82545:
5478 5479 case e1000_82545_rev_3:
5479 5480 case e1000_82546:
5480 5481 case e1000_82546_rev_3:
5481 5482 /*
5482 5483 * For some serdes we'll need to commit the writes now
5483 5484 * so that the status is updated on link
5484 5485 */
5485 5486 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5486 5487 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5487 5488 msec_delay(100);
5488 5489 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5489 5490 }
5490 5491
5491 5492 if (hw->phy.media_type == e1000_media_type_copper) {
5492 5493 /* Invert Loss of Signal */
5493 5494 ctrl |= E1000_CTRL_ILOS;
5494 5495 } else {
5495 5496 /* Set ILOS on fiber nic if half duplex is detected */
5496 5497 status = E1000_READ_REG(hw, E1000_STATUS);
5497 5498 if ((status & E1000_STATUS_FD) == 0)
5498 5499 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5499 5500 }
5500 5501 break;
5501 5502
5502 5503 case e1000_82571:
5503 5504 case e1000_82572:
5504 5505 /*
5505 5506 * The fiber/SerDes versions of this adapter do not contain an
5506 5507 * accessible PHY. Therefore, loopback beyond MAC must be done
5507 5508 * using SerDes analog loopback.
5508 5509 */
5509 5510 if (hw->phy.media_type != e1000_media_type_copper) {
5510 5511 /* Disable autoneg by setting bit 31 of TXCW to zero */
5511 5512 txcw = E1000_READ_REG(hw, E1000_TXCW);
5512 5513 txcw &= ~((uint32_t)1 << 31);
5513 5514 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5514 5515
5515 5516 /*
5516 5517 * Write 0x410 to Serdes Control register
5517 5518 * to enable Serdes analog loopback
5518 5519 */
5519 5520 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5520 5521 msec_delay(10);
5521 5522 }
5522 5523
5523 5524 status = E1000_READ_REG(hw, E1000_STATUS);
5524 5525 /* Set ILOS on fiber nic if half duplex is detected */
5525 5526 if ((hw->phy.media_type == e1000_media_type_fiber) &&
5526 5527 ((status & E1000_STATUS_FD) == 0 ||
5527 5528 (status & E1000_STATUS_LU) == 0))
5528 5529 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5529 5530 else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5530 5531 ctrl |= E1000_CTRL_SLU;
5531 5532 break;
5532 5533
5533 5534 case e1000_82573:
5534 5535 ctrl |= E1000_CTRL_ILOS;
5535 5536 break;
5536 5537 case e1000_ich9lan:
5537 5538 case e1000_ich10lan:
5538 5539 ctrl |= E1000_CTRL_SLU;
5539 5540 break;
5540 5541 }
5541 5542 if (hw->phy.type == e1000_phy_bm)
5542 5543 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5543 5544
5544 5545 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5545 5546 }
5546 5547
5547 5548 static void
5548 5549 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5549 5550 {
5550 5551 struct e1000_hw *hw;
5551 5552 uint32_t rctl;
5552 5553 uint32_t ctrl_ext;
5553 5554 uint32_t ctrl;
5554 5555 uint32_t status;
5555 5556 uint32_t txcw;
5556 5557 uint16_t phydata;
5557 5558
5558 5559 hw = &Adapter->shared;
5559 5560
5560 5561 /* Disable Smart Power Down */
5561 5562 phy_spd_state(hw, B_FALSE);
5562 5563
5563 5564 switch (hw->mac.type) {
5564 5565 case e1000_82571:
5565 5566 case e1000_82572:
5566 5567 switch (hw->phy.media_type) {
5567 5568 case e1000_media_type_copper:
5568 5569 /* Force link up (Must be done before the PHY writes) */
5569 5570 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5570 5571 ctrl |= E1000_CTRL_SLU; /* Force Link Up */
5571 5572 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5572 5573
5573 5574 rctl = E1000_READ_REG(hw, E1000_RCTL);
5574 5575 rctl |= (E1000_RCTL_EN |
5575 5576 E1000_RCTL_SBP |
5576 5577 E1000_RCTL_UPE |
5577 5578 E1000_RCTL_MPE |
5578 5579 E1000_RCTL_LPE |
5579 5580 E1000_RCTL_BAM); /* 0x803E */
5580 5581 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5581 5582
5582 5583 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5583 5584 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5584 5585 E1000_CTRL_EXT_SDP6_DATA |
5585 5586 E1000_CTRL_EXT_SDP3_DATA |
5586 5587 E1000_CTRL_EXT_SDP4_DIR |
5587 5588 E1000_CTRL_EXT_SDP6_DIR |
5588 5589 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */
5589 5590 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5590 5591
5591 5592 /*
5592 5593 * This sequence tunes the PHY's SDP and no customer
5593 5594 * settable values. For background, see comments above
5594 5595 * e1000g_set_internal_loopback().
5595 5596 */
5596 5597 (void) e1000_write_phy_reg(hw, 0x0, 0x140);
5597 5598 msec_delay(10);
5598 5599 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5599 5600 (void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5600 5601 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5601 5602 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5602 5603 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5603 5604 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5604 5605
5605 5606 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5606 5607 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5607 5608 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5608 5609 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5609 5610 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5610 5611
5611 5612 msec_delay(50);
5612 5613 break;
5613 5614 case e1000_media_type_fiber:
5614 5615 case e1000_media_type_internal_serdes:
5615 5616 status = E1000_READ_REG(hw, E1000_STATUS);
5616 5617 if (((status & E1000_STATUS_LU) == 0) ||
5617 5618 (hw->phy.media_type ==
5618 5619 e1000_media_type_internal_serdes)) {
5619 5620 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5620 5621 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5621 5622 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5622 5623 }
5623 5624
5624 5625 /* Disable autoneg by setting bit 31 of TXCW to zero */
5625 5626 txcw = E1000_READ_REG(hw, E1000_TXCW);
5626 5627 txcw &= ~((uint32_t)1 << 31);
5627 5628 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5628 5629
5629 5630 /*
5630 5631 * Write 0x410 to Serdes Control register
5631 5632 * to enable Serdes analog loopback
5632 5633 */
5633 5634 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5634 5635 msec_delay(10);
5635 5636 break;
5636 5637 default:
5637 5638 break;
5638 5639 }
5639 5640 break;
5640 5641 case e1000_82574:
5641 5642 case e1000_80003es2lan:
5642 5643 case e1000_ich9lan:
5643 5644 case e1000_ich10lan:
5644 5645 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5645 5646 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5646 5647 phydata | (1 << 5));
5647 5648 Adapter->param_adv_autoneg = 1;
5648 5649 Adapter->param_adv_1000fdx = 1;
5649 5650 (void) e1000g_reset_link(Adapter);
5650 5651 break;
5651 5652 }
5652 5653 }
5653 5654
5654 5655 static void
5655 5656 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5656 5657 {
5657 5658 struct e1000_hw *hw;
5658 5659 uint32_t ctrl;
5659 5660 uint16_t phy_ctrl;
5660 5661
5661 5662 hw = &Adapter->shared;
5662 5663
5663 5664 /* Disable Smart Power Down */
5664 5665 phy_spd_state(hw, B_FALSE);
5665 5666
5666 5667 phy_ctrl = (MII_CR_FULL_DUPLEX |
5667 5668 MII_CR_SPEED_100);
5668 5669
5669 5670 /* Force 100/FD, reset PHY */
5670 5671 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5671 5672 phy_ctrl | MII_CR_RESET); /* 0xA100 */
5672 5673 msec_delay(10);
5673 5674
5674 5675 /* Force 100/FD */
5675 5676 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5676 5677 phy_ctrl); /* 0x2100 */
5677 5678 msec_delay(10);
5678 5679
5679 5680 /* Now setup the MAC to the same speed/duplex as the PHY. */
5680 5681 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5681 5682 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5682 5683 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5683 5684 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5684 5685 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5685 5686 E1000_CTRL_SPD_100 | /* Force Speed to 100 */
5686 5687 E1000_CTRL_FD); /* Force Duplex to FULL */
5687 5688
5688 5689 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5689 5690 }
5690 5691
5691 5692 static void
5692 5693 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5693 5694 {
5694 5695 struct e1000_hw *hw;
5695 5696 uint32_t ctrl;
5696 5697 uint16_t phy_ctrl;
5697 5698
5698 5699 hw = &Adapter->shared;
5699 5700
5700 5701 /* Disable Smart Power Down */
5701 5702 phy_spd_state(hw, B_FALSE);
5702 5703
5703 5704 phy_ctrl = (MII_CR_FULL_DUPLEX |
5704 5705 MII_CR_SPEED_10);
5705 5706
5706 5707 /* Force 10/FD, reset PHY */
5707 5708 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5708 5709 phy_ctrl | MII_CR_RESET); /* 0x8100 */
5709 5710 msec_delay(10);
5710 5711
5711 5712 /* Force 10/FD */
5712 5713 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5713 5714 phy_ctrl); /* 0x0100 */
5714 5715 msec_delay(10);
5715 5716
5716 5717 /* Now setup the MAC to the same speed/duplex as the PHY. */
5717 5718 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5718 5719 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5719 5720 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5720 5721 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5721 5722 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5722 5723 E1000_CTRL_SPD_10 | /* Force Speed to 10 */
5723 5724 E1000_CTRL_FD); /* Force Duplex to FULL */
5724 5725
5725 5726 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5726 5727 }
5727 5728
5728 5729 #ifdef __sparc
5729 5730 static boolean_t
5730 5731 e1000g_find_mac_address(struct e1000g *Adapter)
5731 5732 {
5732 5733 struct e1000_hw *hw = &Adapter->shared;
5733 5734 uchar_t *bytes;
5734 5735 struct ether_addr sysaddr;
5735 5736 uint_t nelts;
5736 5737 int err;
5737 5738 boolean_t found = B_FALSE;
5738 5739
5739 5740 /*
5740 5741 * The "vendor's factory-set address" may already have
5741 5742 * been extracted from the chip, but if the property
5742 5743 * "local-mac-address" is set we use that instead.
5743 5744 *
5744 5745 * We check whether it looks like an array of 6
5745 5746 * bytes (which it should, if OBP set it). If we can't
5746 5747 * make sense of it this way, we'll ignore it.
5747 5748 */
5748 5749 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5749 5750 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
5750 5751 if (err == DDI_PROP_SUCCESS) {
5751 5752 if (nelts == ETHERADDRL) {
5752 5753 while (nelts--)
5753 5754 hw->mac.addr[nelts] = bytes[nelts];
5754 5755 found = B_TRUE;
5755 5756 }
5756 5757 ddi_prop_free(bytes);
5757 5758 }
5758 5759
5759 5760 /*
5760 5761 * Look up the OBP property "local-mac-address?". If the user has set
5761 5762 * 'local-mac-address? = false', use "the system address" instead.
5762 5763 */
5763 5764 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
5764 5765 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
5765 5766 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
5766 5767 if (localetheraddr(NULL, &sysaddr) != 0) {
5767 5768 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
5768 5769 found = B_TRUE;
5769 5770 }
5770 5771 }
5771 5772 ddi_prop_free(bytes);
5772 5773 }
5773 5774
5774 5775 /*
5775 5776 * Finally(!), if there's a valid "mac-address" property (created
5776 5777 * if we netbooted from this interface), we must use this instead
5777 5778 * of any of the above to ensure that the NFS/install server doesn't
5778 5779 * get confused by the address changing as Solaris takes over!
5779 5780 */
5780 5781 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5781 5782 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
5782 5783 if (err == DDI_PROP_SUCCESS) {
5783 5784 if (nelts == ETHERADDRL) {
5784 5785 while (nelts--)
5785 5786 hw->mac.addr[nelts] = bytes[nelts];
5786 5787 found = B_TRUE;
5787 5788 }
5788 5789 ddi_prop_free(bytes);
5789 5790 }
5790 5791
5791 5792 if (found) {
5792 5793 bcopy(hw->mac.addr, hw->mac.perm_addr,
5793 5794 ETHERADDRL);
5794 5795 }
5795 5796
5796 5797 return (found);
5797 5798 }
5798 5799 #endif
5799 5800
5800 5801 static int
5801 5802 e1000g_add_intrs(struct e1000g *Adapter)
5802 5803 {
5803 5804 dev_info_t *devinfo;
5804 5805 int intr_types;
5805 5806 int rc;
5806 5807
5807 5808 devinfo = Adapter->dip;
5808 5809
5809 5810 /* Get supported interrupt types */
5810 5811 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5811 5812
5812 5813 if (rc != DDI_SUCCESS) {
5813 5814 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5814 5815 "Get supported interrupt types failed: %d\n", rc);
5815 5816 return (DDI_FAILURE);
5816 5817 }
5817 5818
5818 5819 /*
5819 5820 * Based on Intel Technical Advisory document (TA-160), there are some
5820 5821 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5821 5822 * that it supports MSI, but in fact has problems.
5822 5823 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5823 5824 * PCI/PCI-X NICs.
5824 5825 */
5825 5826 if (Adapter->shared.mac.type < e1000_82571)
5826 5827 Adapter->msi_enable = B_FALSE;
5827 5828
5828 5829 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5829 5830 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5830 5831
5831 5832 if (rc != DDI_SUCCESS) {
5832 5833 /* EMPTY */
5833 5834 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5834 5835 "Add MSI failed, trying Legacy interrupts\n");
5835 5836 } else {
5836 5837 Adapter->intr_type = DDI_INTR_TYPE_MSI;
5837 5838 }
5838 5839 }
5839 5840
5840 5841 if ((Adapter->intr_type == 0) &&
5841 5842 (intr_types & DDI_INTR_TYPE_FIXED)) {
5842 5843 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5843 5844
5844 5845 if (rc != DDI_SUCCESS) {
5845 5846 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5846 5847 "Add Legacy interrupts failed\n");
5847 5848 return (DDI_FAILURE);
5848 5849 }
5849 5850
5850 5851 Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5851 5852 }
5852 5853
5853 5854 if (Adapter->intr_type == 0) {
5854 5855 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5855 5856 "No interrupts registered\n");
5856 5857 return (DDI_FAILURE);
5857 5858 }
5858 5859
5859 5860 return (DDI_SUCCESS);
5860 5861 }
5861 5862
5862 5863 /*
5863 5864 * e1000g_intr_add() handles MSI/Legacy interrupts
5864 5865 */
5865 5866 static int
5866 5867 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
5867 5868 {
5868 5869 dev_info_t *devinfo;
5869 5870 int count, avail, actual;
5870 5871 int x, y, rc, inum = 0;
5871 5872 int flag;
5872 5873 ddi_intr_handler_t *intr_handler;
5873 5874
5874 5875 devinfo = Adapter->dip;
5875 5876
5876 5877 /* get number of interrupts */
5877 5878 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5878 5879 if ((rc != DDI_SUCCESS) || (count == 0)) {
5879 5880 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5880 5881 "Get interrupt number failed. Return: %d, count: %d\n",
5881 5882 rc, count);
5882 5883 return (DDI_FAILURE);
5883 5884 }
5884 5885
5885 5886 /* get number of available interrupts */
5886 5887 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
5887 5888 if ((rc != DDI_SUCCESS) || (avail == 0)) {
5888 5889 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5889 5890 "Get interrupt available number failed. "
5890 5891 "Return: %d, available: %d\n", rc, avail);
5891 5892 return (DDI_FAILURE);
5892 5893 }
5893 5894
5894 5895 if (avail < count) {
5895 5896 /* EMPTY */
5896 5897 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5897 5898 "Interrupts count: %d, available: %d\n",
5898 5899 count, avail);
5899 5900 }
5900 5901
5901 5902 /* Allocate an array of interrupt handles */
5902 5903 Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
5903 5904 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
5904 5905
5905 5906 /* Set NORMAL behavior for both MSI and FIXED interrupt */
5906 5907 flag = DDI_INTR_ALLOC_NORMAL;
5907 5908
5908 5909 /* call ddi_intr_alloc() */
5909 5910 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
5910 5911 count, &actual, flag);
5911 5912
5912 5913 if ((rc != DDI_SUCCESS) || (actual == 0)) {
5913 5914 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5914 5915 "Allocate interrupts failed: %d\n", rc);
5915 5916
5916 5917 kmem_free(Adapter->htable, Adapter->intr_size);
5917 5918 return (DDI_FAILURE);
5918 5919 }
5919 5920
5920 5921 if (actual < count) {
5921 5922 /* EMPTY */
5922 5923 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5923 5924 "Interrupts requested: %d, received: %d\n",
5924 5925 count, actual);
5925 5926 }
5926 5927
5927 5928 Adapter->intr_cnt = actual;
5928 5929
5929 5930 /* Get priority for first msi, assume remaining are all the same */
5930 5931 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
5931 5932
5932 5933 if (rc != DDI_SUCCESS) {
5933 5934 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5934 5935 "Get interrupt priority failed: %d\n", rc);
5935 5936
5936 5937 /* Free already allocated intr */
5937 5938 for (y = 0; y < actual; y++)
5938 5939 (void) ddi_intr_free(Adapter->htable[y]);
5939 5940
5940 5941 kmem_free(Adapter->htable, Adapter->intr_size);
5941 5942 return (DDI_FAILURE);
5942 5943 }
5943 5944
5944 5945 /*
5945 5946 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5946 5947 * use the interrupt service routine e1000g_intr_pciexpress()
5947 5948 * to avoid interrupt stealing when sharing interrupt with other
5948 5949 * devices.
5949 5950 */
5950 5951 if (Adapter->shared.mac.type < e1000_82571)
5951 5952 intr_handler = (ddi_intr_handler_t *)e1000g_intr;
5952 5953 else
5953 5954 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
5954 5955
5955 5956 /* Call ddi_intr_add_handler() */
5956 5957 for (x = 0; x < actual; x++) {
5957 5958 rc = ddi_intr_add_handler(Adapter->htable[x],
5958 5959 intr_handler, (caddr_t)Adapter, NULL);
5959 5960
5960 5961 if (rc != DDI_SUCCESS) {
5961 5962 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5962 5963 "Add interrupt handler failed: %d\n", rc);
5963 5964
5964 5965 /* Remove already added handler */
5965 5966 for (y = 0; y < x; y++)
5966 5967 (void) ddi_intr_remove_handler(
5967 5968 Adapter->htable[y]);
5968 5969
5969 5970 /* Free already allocated intr */
5970 5971 for (y = 0; y < actual; y++)
5971 5972 (void) ddi_intr_free(Adapter->htable[y]);
5972 5973
5973 5974 kmem_free(Adapter->htable, Adapter->intr_size);
5974 5975 return (DDI_FAILURE);
5975 5976 }
5976 5977 }
5977 5978
5978 5979 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
5979 5980
5980 5981 if (rc != DDI_SUCCESS) {
5981 5982 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5982 5983 "Get interrupt cap failed: %d\n", rc);
5983 5984
5984 5985 /* Free already allocated intr */
5985 5986 for (y = 0; y < actual; y++) {
5986 5987 (void) ddi_intr_remove_handler(Adapter->htable[y]);
5987 5988 (void) ddi_intr_free(Adapter->htable[y]);
5988 5989 }
5989 5990
5990 5991 kmem_free(Adapter->htable, Adapter->intr_size);
5991 5992 return (DDI_FAILURE);
5992 5993 }
5993 5994
5994 5995 return (DDI_SUCCESS);
5995 5996 }
5996 5997
5997 5998 static int
5998 5999 e1000g_rem_intrs(struct e1000g *Adapter)
5999 6000 {
6000 6001 int x;
6001 6002 int rc;
6002 6003
6003 6004 for (x = 0; x < Adapter->intr_cnt; x++) {
6004 6005 rc = ddi_intr_remove_handler(Adapter->htable[x]);
6005 6006 if (rc != DDI_SUCCESS) {
6006 6007 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6007 6008 "Remove intr handler failed: %d\n", rc);
6008 6009 return (DDI_FAILURE);
6009 6010 }
6010 6011
6011 6012 rc = ddi_intr_free(Adapter->htable[x]);
6012 6013 if (rc != DDI_SUCCESS) {
6013 6014 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6014 6015 "Free intr failed: %d\n", rc);
6015 6016 return (DDI_FAILURE);
6016 6017 }
6017 6018 }
6018 6019
6019 6020 kmem_free(Adapter->htable, Adapter->intr_size);
6020 6021
6021 6022 return (DDI_SUCCESS);
6022 6023 }
6023 6024
6024 6025 static int
6025 6026 e1000g_enable_intrs(struct e1000g *Adapter)
6026 6027 {
6027 6028 int x;
6028 6029 int rc;
6029 6030
6030 6031 /* Enable interrupts */
6031 6032 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6032 6033 /* Call ddi_intr_block_enable() for MSI */
6033 6034 rc = ddi_intr_block_enable(Adapter->htable,
6034 6035 Adapter->intr_cnt);
6035 6036 if (rc != DDI_SUCCESS) {
6036 6037 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6037 6038 "Enable block intr failed: %d\n", rc);
6038 6039 return (DDI_FAILURE);
6039 6040 }
6040 6041 } else {
6041 6042 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
6042 6043 for (x = 0; x < Adapter->intr_cnt; x++) {
6043 6044 rc = ddi_intr_enable(Adapter->htable[x]);
6044 6045 if (rc != DDI_SUCCESS) {
6045 6046 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6046 6047 "Enable intr failed: %d\n", rc);
6047 6048 return (DDI_FAILURE);
6048 6049 }
6049 6050 }
6050 6051 }
6051 6052
6052 6053 return (DDI_SUCCESS);
6053 6054 }
6054 6055
6055 6056 static int
6056 6057 e1000g_disable_intrs(struct e1000g *Adapter)
6057 6058 {
6058 6059 int x;
6059 6060 int rc;
6060 6061
6061 6062 /* Disable all interrupts */
6062 6063 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6063 6064 rc = ddi_intr_block_disable(Adapter->htable,
6064 6065 Adapter->intr_cnt);
6065 6066 if (rc != DDI_SUCCESS) {
6066 6067 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6067 6068 "Disable block intr failed: %d\n", rc);
6068 6069 return (DDI_FAILURE);
6069 6070 }
6070 6071 } else {
6071 6072 for (x = 0; x < Adapter->intr_cnt; x++) {
6072 6073 rc = ddi_intr_disable(Adapter->htable[x]);
6073 6074 if (rc != DDI_SUCCESS) {
6074 6075 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6075 6076 "Disable intr failed: %d\n", rc);
6076 6077 return (DDI_FAILURE);
6077 6078 }
6078 6079 }
6079 6080 }
6080 6081
6081 6082 return (DDI_SUCCESS);
6082 6083 }
6083 6084
6084 6085 /*
6085 6086 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6086 6087 */
6087 6088 static void
6088 6089 e1000g_get_phy_state(struct e1000g *Adapter)
6089 6090 {
6090 6091 struct e1000_hw *hw = &Adapter->shared;
6091 6092
6092 6093 if (hw->phy.media_type == e1000_media_type_copper) {
6093 6094 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
6094 6095 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
6095 6096 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
6096 6097 &Adapter->phy_an_adv);
6097 6098 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
6098 6099 &Adapter->phy_an_exp);
6099 6100 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
6100 6101 &Adapter->phy_ext_status);
6101 6102 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL,
6102 6103 &Adapter->phy_1000t_ctrl);
6103 6104 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6104 6105 &Adapter->phy_1000t_status);
6105 6106 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY,
6106 6107 &Adapter->phy_lp_able);
6107 6108
6108 6109 Adapter->param_autoneg_cap =
6109 6110 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
6110 6111 Adapter->param_pause_cap =
6111 6112 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6112 6113 Adapter->param_asym_pause_cap =
6113 6114 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6114 6115 Adapter->param_1000fdx_cap =
6115 6116 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6116 6117 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6117 6118 Adapter->param_1000hdx_cap =
6118 6119 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6119 6120 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6120 6121 Adapter->param_100t4_cap =
6121 6122 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
6122 6123 Adapter->param_100fdx_cap =
6123 6124 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6124 6125 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6125 6126 Adapter->param_100hdx_cap =
6126 6127 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6127 6128 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6128 6129 Adapter->param_10fdx_cap =
6129 6130 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6130 6131 Adapter->param_10hdx_cap =
6131 6132 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6132 6133
6133 6134 Adapter->param_adv_autoneg = hw->mac.autoneg;
6134 6135 Adapter->param_adv_pause =
6135 6136 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6136 6137 Adapter->param_adv_asym_pause =
6137 6138 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6138 6139 Adapter->param_adv_1000hdx =
6139 6140 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
6140 6141 Adapter->param_adv_100t4 =
6141 6142 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
6142 6143 if (Adapter->param_adv_autoneg == 1) {
6143 6144 Adapter->param_adv_1000fdx =
6144 6145 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS)
6145 6146 ? 1 : 0;
6146 6147 Adapter->param_adv_100fdx =
6147 6148 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS)
6148 6149 ? 1 : 0;
6149 6150 Adapter->param_adv_100hdx =
6150 6151 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS)
6151 6152 ? 1 : 0;
6152 6153 Adapter->param_adv_10fdx =
6153 6154 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
6154 6155 Adapter->param_adv_10hdx =
6155 6156 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
6156 6157 }
6157 6158
6158 6159 Adapter->param_lp_autoneg =
6159 6160 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
6160 6161 Adapter->param_lp_pause =
6161 6162 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
6162 6163 Adapter->param_lp_asym_pause =
6163 6164 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
6164 6165 Adapter->param_lp_1000fdx =
6165 6166 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
6166 6167 Adapter->param_lp_1000hdx =
6167 6168 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
6168 6169 Adapter->param_lp_100t4 =
6169 6170 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
6170 6171 Adapter->param_lp_100fdx =
6171 6172 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
6172 6173 Adapter->param_lp_100hdx =
6173 6174 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
6174 6175 Adapter->param_lp_10fdx =
6175 6176 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
6176 6177 Adapter->param_lp_10hdx =
6177 6178 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
6178 6179 } else {
6179 6180 /*
6180 6181 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6181 6182 * it can only work with 1Gig Full Duplex Link Partner.
6182 6183 */
6183 6184 Adapter->param_autoneg_cap = 0;
6184 6185 Adapter->param_pause_cap = 1;
6185 6186 Adapter->param_asym_pause_cap = 1;
6186 6187 Adapter->param_1000fdx_cap = 1;
6187 6188 Adapter->param_1000hdx_cap = 0;
6188 6189 Adapter->param_100t4_cap = 0;
6189 6190 Adapter->param_100fdx_cap = 0;
6190 6191 Adapter->param_100hdx_cap = 0;
6191 6192 Adapter->param_10fdx_cap = 0;
6192 6193 Adapter->param_10hdx_cap = 0;
6193 6194
6194 6195 Adapter->param_adv_autoneg = 0;
6195 6196 Adapter->param_adv_pause = 1;
6196 6197 Adapter->param_adv_asym_pause = 1;
6197 6198 Adapter->param_adv_1000fdx = 1;
6198 6199 Adapter->param_adv_1000hdx = 0;
6199 6200 Adapter->param_adv_100t4 = 0;
6200 6201 Adapter->param_adv_100fdx = 0;
6201 6202 Adapter->param_adv_100hdx = 0;
6202 6203 Adapter->param_adv_10fdx = 0;
6203 6204 Adapter->param_adv_10hdx = 0;
6204 6205
6205 6206 Adapter->param_lp_autoneg = 0;
6206 6207 Adapter->param_lp_pause = 0;
6207 6208 Adapter->param_lp_asym_pause = 0;
6208 6209 Adapter->param_lp_1000fdx = 0;
6209 6210 Adapter->param_lp_1000hdx = 0;
6210 6211 Adapter->param_lp_100t4 = 0;
6211 6212 Adapter->param_lp_100fdx = 0;
6212 6213 Adapter->param_lp_100hdx = 0;
6213 6214 Adapter->param_lp_10fdx = 0;
6214 6215 Adapter->param_lp_10hdx = 0;
6215 6216 }
6216 6217 }
6217 6218
6218 6219 /*
6219 6220 * FMA support
6220 6221 */
6221 6222
6222 6223 int
6223 6224 e1000g_check_acc_handle(ddi_acc_handle_t handle)
6224 6225 {
6225 6226 ddi_fm_error_t de;
6226 6227
6227 6228 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6228 6229 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
6229 6230 return (de.fme_status);
6230 6231 }
6231 6232
6232 6233 int
6233 6234 e1000g_check_dma_handle(ddi_dma_handle_t handle)
6234 6235 {
6235 6236 ddi_fm_error_t de;
6236 6237
6237 6238 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6238 6239 return (de.fme_status);
6239 6240 }
6240 6241
6241 6242 /*
6242 6243 * The IO fault service error handling callback function
6243 6244 */
6244 6245 /* ARGSUSED2 */
6245 6246 static int
6246 6247 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6247 6248 {
6248 6249 /*
6249 6250 * as the driver can always deal with an error in any dma or
6250 6251 * access handle, we can just return the fme_status value.
6251 6252 */
6252 6253 pci_ereport_post(dip, err, NULL);
6253 6254 return (err->fme_status);
6254 6255 }
6255 6256
6256 6257 static void
6257 6258 e1000g_fm_init(struct e1000g *Adapter)
6258 6259 {
6259 6260 ddi_iblock_cookie_t iblk;
6260 6261 int fma_dma_flag;
6261 6262
6262 6263 /* Only register with IO Fault Services if we have some capability */
6263 6264 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6264 6265 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6265 6266 } else {
6266 6267 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6267 6268 }
6268 6269
6269 6270 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6270 6271 fma_dma_flag = 1;
6271 6272 } else {
6272 6273 fma_dma_flag = 0;
6273 6274 }
6274 6275
6275 6276 (void) e1000g_set_fma_flags(fma_dma_flag);
6276 6277
6277 6278 if (Adapter->fm_capabilities) {
6278 6279
6279 6280 /* Register capabilities with IO Fault Services */
6280 6281 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
6281 6282
6282 6283 /*
6283 6284 * Initialize pci ereport capabilities if ereport capable
6284 6285 */
6285 6286 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6286 6287 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6287 6288 pci_ereport_setup(Adapter->dip);
6288 6289
6289 6290 /*
6290 6291 * Register error callback if error callback capable
6291 6292 */
6292 6293 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6293 6294 ddi_fm_handler_register(Adapter->dip,
6294 6295 e1000g_fm_error_cb, (void*) Adapter);
6295 6296 }
6296 6297 }
6297 6298
6298 6299 static void
6299 6300 e1000g_fm_fini(struct e1000g *Adapter)
6300 6301 {
6301 6302 /* Only unregister FMA capabilities if we registered some */
6302 6303 if (Adapter->fm_capabilities) {
6303 6304
6304 6305 /*
6305 6306 * Release any resources allocated by pci_ereport_setup()
6306 6307 */
6307 6308 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6308 6309 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6309 6310 pci_ereport_teardown(Adapter->dip);
6310 6311
6311 6312 /*
6312 6313 * Un-register error callback if error callback capable
6313 6314 */
6314 6315 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6315 6316 ddi_fm_handler_unregister(Adapter->dip);
6316 6317
6317 6318 /* Unregister from IO Fault Services */
6318 6319 mutex_enter(&e1000g_rx_detach_lock);
6319 6320 ddi_fm_fini(Adapter->dip);
6320 6321 if (Adapter->priv_dip != NULL) {
6321 6322 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
6322 6323 }
6323 6324 mutex_exit(&e1000g_rx_detach_lock);
6324 6325 }
6325 6326 }
6326 6327
6327 6328 void
6328 6329 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
6329 6330 {
6330 6331 uint64_t ena;
6331 6332 char buf[FM_MAX_CLASS];
6332 6333
6333 6334 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6334 6335 ena = fm_ena_generate(0, FM_ENA_FMT1);
6335 6336 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
6336 6337 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
6337 6338 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6338 6339 }
6339 6340 }
6340 6341
6341 6342 /*
6342 6343 * quiesce(9E) entry point.
6343 6344 *
6344 6345 * This function is called when the system is single-threaded at high
6345 6346 * PIL with preemption disabled. Therefore, this function must not be
6346 6347 * blocked.
6347 6348 *
6348 6349 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6349 6350 * DDI_FAILURE indicates an error condition and should almost never happen.
6350 6351 */
6351 6352 static int
6352 6353 e1000g_quiesce(dev_info_t *devinfo)
6353 6354 {
6354 6355 struct e1000g *Adapter;
6355 6356
6356 6357 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
6357 6358
6358 6359 if (Adapter == NULL)
6359 6360 return (DDI_FAILURE);
6360 6361
6361 6362 e1000g_clear_all_interrupts(Adapter);
6362 6363
6363 6364 (void) e1000_reset_hw(&Adapter->shared);
6364 6365
6365 6366 /* Setup our HW Tx Head & Tail descriptor pointers */
6366 6367 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
6367 6368 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
6368 6369
6369 6370 /* Setup our HW Rx Head & Tail descriptor pointers */
6370 6371 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
6371 6372 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
6372 6373
6373 6374 return (DDI_SUCCESS);
6374 6375 }
6375 6376
6376 6377 /*
6377 6378 * synchronize the adv* and en* parameters.
6378 6379 *
6379 6380 * See comments in <sys/dld.h> for details of the *_en_*
6380 6381 * parameters. The usage of ndd for setting adv parameters will
6381 6382 * synchronize all the en parameters with the e1000g parameters,
6382 6383 * implicitly disabling any settings made via dladm.
6383 6384 */
6384 6385 static void
6385 6386 e1000g_param_sync(struct e1000g *Adapter)
6386 6387 {
6387 6388 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6388 6389 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6389 6390 Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6390 6391 Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6391 6392 Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6392 6393 Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6393 6394 }
6394 6395
6395 6396 /*
6396 6397 * e1000g_get_driver_control - tell manageability firmware that the driver
6397 6398 * has control.
6398 6399 */
6399 6400 static void
6400 6401 e1000g_get_driver_control(struct e1000_hw *hw)
6401 6402 {
6402 6403 uint32_t ctrl_ext;
6403 6404 uint32_t swsm;
6404 6405
6405 6406 /* tell manageability firmware the driver has taken over */
6406 6407 switch (hw->mac.type) {
6407 6408 case e1000_82573:
6408 6409 swsm = E1000_READ_REG(hw, E1000_SWSM);
6409 6410 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6410 6411 break;
6411 6412 case e1000_82571:
6412 6413 case e1000_82572:
6413 6414 case e1000_82574:
6414 6415 case e1000_80003es2lan:
6415 6416 case e1000_ich8lan:
6416 6417 case e1000_ich9lan:
6417 6418 case e1000_ich10lan:
6418 6419 case e1000_pchlan:
6419 6420 case e1000_pch2lan:
6420 6421 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6421 6422 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6422 6423 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6423 6424 break;
6424 6425 default:
6425 6426 /* no manageability firmware: do nothing */
6426 6427 break;
6427 6428 }
6428 6429 }
6429 6430
6430 6431 /*
6431 6432 * e1000g_release_driver_control - tell manageability firmware that the driver
6432 6433 * has released control.
6433 6434 */
6434 6435 static void
6435 6436 e1000g_release_driver_control(struct e1000_hw *hw)
6436 6437 {
6437 6438 uint32_t ctrl_ext;
6438 6439 uint32_t swsm;
6439 6440
6440 6441 /* tell manageability firmware the driver has released control */
6441 6442 switch (hw->mac.type) {
6442 6443 case e1000_82573:
6443 6444 swsm = E1000_READ_REG(hw, E1000_SWSM);
6444 6445 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6445 6446 break;
6446 6447 case e1000_82571:
6447 6448 case e1000_82572:
6448 6449 case e1000_82574:
6449 6450 case e1000_80003es2lan:
6450 6451 case e1000_ich8lan:
6451 6452 case e1000_ich9lan:
6452 6453 case e1000_ich10lan:
6453 6454 case e1000_pchlan:
6454 6455 case e1000_pch2lan:
6455 6456 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6456 6457 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6457 6458 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6458 6459 break;
6459 6460 default:
6460 6461 /* no manageability firmware: do nothing */
6461 6462 break;
6462 6463 }
6463 6464 }
6464 6465
6465 6466 /*
6466 6467 * Restore e1000g promiscuous mode.
6467 6468 */
6468 6469 static void
6469 6470 e1000g_restore_promisc(struct e1000g *Adapter)
6470 6471 {
6471 6472 if (Adapter->e1000g_promisc) {
6472 6473 uint32_t rctl;
6473 6474
6474 6475 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6475 6476 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6476 6477 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6477 6478 }
6478 6479 }
↓ open down ↓ |
4027 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX