Print this page
6064 ixgbe needs X550 support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 + * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
32 33 */
33 34
34 35 #include "ixgbe_sw.h"
35 36
36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
38 +/* LINTED E_STATIC_UNUSED */
37 39 static char ixgbe_version[] = "ixgbe 1.1.7";
38 40
39 41 /*
40 42 * Local function protoypes
41 43 */
42 44 static int ixgbe_register_mac(ixgbe_t *);
43 45 static int ixgbe_identify_hardware(ixgbe_t *);
44 46 static int ixgbe_regs_map(ixgbe_t *);
45 47 static void ixgbe_init_properties(ixgbe_t *);
46 48 static int ixgbe_init_driver_settings(ixgbe_t *);
47 49 static void ixgbe_init_locks(ixgbe_t *);
48 50 static void ixgbe_destroy_locks(ixgbe_t *);
49 51 static int ixgbe_init(ixgbe_t *);
50 52 static int ixgbe_chip_start(ixgbe_t *);
51 53 static void ixgbe_chip_stop(ixgbe_t *);
52 54 static int ixgbe_reset(ixgbe_t *);
53 55 static void ixgbe_tx_clean(ixgbe_t *);
54 56 static boolean_t ixgbe_tx_drain(ixgbe_t *);
55 57 static boolean_t ixgbe_rx_drain(ixgbe_t *);
56 58 static int ixgbe_alloc_rings(ixgbe_t *);
57 59 static void ixgbe_free_rings(ixgbe_t *);
58 60 static int ixgbe_alloc_rx_data(ixgbe_t *);
59 61 static void ixgbe_free_rx_data(ixgbe_t *);
60 62 static void ixgbe_setup_rings(ixgbe_t *);
61 63 static void ixgbe_setup_rx(ixgbe_t *);
62 64 static void ixgbe_setup_tx(ixgbe_t *);
63 65 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
64 66 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
65 67 static void ixgbe_setup_rss(ixgbe_t *);
66 68 static void ixgbe_setup_vmdq(ixgbe_t *);
67 69 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
68 70 static void ixgbe_init_unicst(ixgbe_t *);
69 71 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
70 72 static void ixgbe_setup_multicst(ixgbe_t *);
71 73 static void ixgbe_get_hw_state(ixgbe_t *);
72 74 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
73 75 static void ixgbe_get_conf(ixgbe_t *);
74 76 static void ixgbe_init_params(ixgbe_t *);
75 77 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
76 78 static void ixgbe_driver_link_check(ixgbe_t *);
77 79 static void ixgbe_sfp_check(void *);
78 80 static void ixgbe_overtemp_check(void *);
79 81 static void ixgbe_link_timer(void *);
80 82 static void ixgbe_local_timer(void *);
81 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
82 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
83 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
84 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
85 87 static boolean_t is_valid_mac_addr(uint8_t *);
86 88 static boolean_t ixgbe_stall_check(ixgbe_t *);
87 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
88 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
89 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
90 92 static int ixgbe_alloc_intrs(ixgbe_t *);
91 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
92 94 static int ixgbe_add_intr_handlers(ixgbe_t *);
93 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
94 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
95 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
96 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
97 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
98 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
99 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
100 102 static void ixgbe_setup_adapter_vector(ixgbe_t *);
101 103 static void ixgbe_rem_intr_handlers(ixgbe_t *);
102 104 static void ixgbe_rem_intrs(ixgbe_t *);
103 105 static int ixgbe_enable_intrs(ixgbe_t *);
104 106 static int ixgbe_disable_intrs(ixgbe_t *);
105 107 static uint_t ixgbe_intr_legacy(void *, void *);
106 108 static uint_t ixgbe_intr_msi(void *, void *);
107 109 static uint_t ixgbe_intr_msix(void *, void *);
108 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
109 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
110 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
111 113 static void ixgbe_get_driver_control(struct ixgbe_hw *);
112 114 static int ixgbe_addmac(void *, const uint8_t *);
113 115 static int ixgbe_remmac(void *, const uint8_t *);
114 116 static void ixgbe_release_driver_control(struct ixgbe_hw *);
115 117
116 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
117 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
118 120 static int ixgbe_resume(dev_info_t *);
119 121 static int ixgbe_suspend(dev_info_t *);
120 122 static int ixgbe_quiesce(dev_info_t *);
121 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
122 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
123 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
124 126 static int ixgbe_intr_cb_register(ixgbe_t *);
125 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
126 128
127 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
128 130 const void *impl_data);
129 131 static void ixgbe_fm_init(ixgbe_t *);
130 132 static void ixgbe_fm_fini(ixgbe_t *);
131 133
132 134 char *ixgbe_priv_props[] = {
133 135 "_tx_copy_thresh",
134 136 "_tx_recycle_thresh",
135 137 "_tx_overload_thresh",
136 138 "_tx_resched_thresh",
137 139 "_rx_copy_thresh",
138 140 "_rx_limit_per_intr",
139 141 "_intr_throttling",
140 142 "_adv_pause_cap",
141 143 "_adv_asym_pause_cap",
142 144 NULL
143 145 };
144 146
145 147 #define IXGBE_MAX_PRIV_PROPS \
146 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
147 149
148 150 static struct cb_ops ixgbe_cb_ops = {
149 151 nulldev, /* cb_open */
150 152 nulldev, /* cb_close */
151 153 nodev, /* cb_strategy */
152 154 nodev, /* cb_print */
153 155 nodev, /* cb_dump */
154 156 nodev, /* cb_read */
155 157 nodev, /* cb_write */
156 158 nodev, /* cb_ioctl */
157 159 nodev, /* cb_devmap */
158 160 nodev, /* cb_mmap */
159 161 nodev, /* cb_segmap */
160 162 nochpoll, /* cb_chpoll */
161 163 ddi_prop_op, /* cb_prop_op */
162 164 NULL, /* cb_stream */
163 165 D_MP | D_HOTPLUG, /* cb_flag */
164 166 CB_REV, /* cb_rev */
165 167 nodev, /* cb_aread */
166 168 nodev /* cb_awrite */
167 169 };
168 170
169 171 static struct dev_ops ixgbe_dev_ops = {
170 172 DEVO_REV, /* devo_rev */
171 173 0, /* devo_refcnt */
172 174 NULL, /* devo_getinfo */
173 175 nulldev, /* devo_identify */
174 176 nulldev, /* devo_probe */
175 177 ixgbe_attach, /* devo_attach */
176 178 ixgbe_detach, /* devo_detach */
177 179 nodev, /* devo_reset */
178 180 &ixgbe_cb_ops, /* devo_cb_ops */
179 181 NULL, /* devo_bus_ops */
180 182 ddi_power, /* devo_power */
181 183 ixgbe_quiesce, /* devo_quiesce */
182 184 };
183 185
184 186 static struct modldrv ixgbe_modldrv = {
185 187 &mod_driverops, /* Type of module. This one is a driver */
186 188 ixgbe_ident, /* Discription string */
187 189 &ixgbe_dev_ops /* driver ops */
188 190 };
189 191
190 192 static struct modlinkage ixgbe_modlinkage = {
191 193 MODREV_1, &ixgbe_modldrv, NULL
192 194 };
193 195
194 196 /*
195 197 * Access attributes for register mapping
196 198 */
197 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
198 200 DDI_DEVICE_ATTR_V1,
199 201 DDI_STRUCTURE_LE_ACC,
200 202 DDI_STRICTORDER_ACC,
201 203 DDI_FLAGERR_ACC
202 204 };
203 205
204 206 /*
205 207 * Loopback property
206 208 */
207 209 static lb_property_t lb_normal = {
208 210 normal, "normal", IXGBE_LB_NONE
209 211 };
210 212
211 213 static lb_property_t lb_mac = {
212 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC
213 215 };
214 216
215 217 static lb_property_t lb_external = {
216 218 external, "External", IXGBE_LB_EXTERNAL
217 219 };
218 220
219 221 #define IXGBE_M_CALLBACK_FLAGS \
220 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
221 223
222 224 static mac_callbacks_t ixgbe_m_callbacks = {
223 225 IXGBE_M_CALLBACK_FLAGS,
224 226 ixgbe_m_stat,
225 227 ixgbe_m_start,
226 228 ixgbe_m_stop,
227 229 ixgbe_m_promisc,
228 230 ixgbe_m_multicst,
229 231 NULL,
230 232 NULL,
231 233 NULL,
232 234 ixgbe_m_ioctl,
233 235 ixgbe_m_getcapab,
234 236 NULL,
235 237 NULL,
236 238 ixgbe_m_setprop,
237 239 ixgbe_m_getprop,
238 240 ixgbe_m_propinfo
239 241 };
240 242
241 243 /*
242 244 * Initialize capabilities of each supported adapter type
243 245 */
244 246 static adapter_info_t ixgbe_82598eb_cap = {
245 247 64, /* maximum number of rx queues */
246 248 1, /* minimum number of rx queues */
247 249 64, /* default number of rx queues */
248 250 16, /* maximum number of rx groups */
249 251 1, /* minimum number of rx groups */
250 252 1, /* default number of rx groups */
251 253 32, /* maximum number of tx queues */
252 254 1, /* minimum number of tx queues */
253 255 8, /* default number of tx queues */
254 256 16366, /* maximum MTU size */
255 257 0xFFFF, /* maximum interrupt throttle rate */
256 258 0, /* minimum interrupt throttle rate */
257 259 200, /* default interrupt throttle rate */
258 260 18, /* maximum total msix vectors */
259 261 16, /* maximum number of ring vectors */
260 262 2, /* maximum number of other vectors */
261 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */
262 264 0, /* "other" interrupt types enable mask */
263 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
264 266 | IXGBE_FLAG_RSS_CAPABLE
265 267 | IXGBE_FLAG_VMDQ_CAPABLE)
266 268 };
267 269
268 270 static adapter_info_t ixgbe_82599eb_cap = {
269 271 128, /* maximum number of rx queues */
270 272 1, /* minimum number of rx queues */
271 273 128, /* default number of rx queues */
272 274 64, /* maximum number of rx groups */
273 275 1, /* minimum number of rx groups */
274 276 1, /* default number of rx groups */
275 277 128, /* maximum number of tx queues */
276 278 1, /* minimum number of tx queues */
277 279 8, /* default number of tx queues */
278 280 15500, /* maximum MTU size */
279 281 0xFF8, /* maximum interrupt throttle rate */
280 282 0, /* minimum interrupt throttle rate */
281 283 200, /* default interrupt throttle rate */
282 284 64, /* maximum total msix vectors */
283 285 16, /* maximum number of ring vectors */
284 286 2, /* maximum number of other vectors */
285 287 (IXGBE_EICR_LSC
286 288 | IXGBE_EICR_GPI_SDP1
287 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
288 290
289 291 (IXGBE_SDP1_GPIEN
290 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
291 293
292 294 (IXGBE_FLAG_DCA_CAPABLE
293 295 | IXGBE_FLAG_RSS_CAPABLE
294 296 | IXGBE_FLAG_VMDQ_CAPABLE
295 297 | IXGBE_FLAG_RSC_CAPABLE
296 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
297 299 };
298 300
299 301 static adapter_info_t ixgbe_X540_cap = {
300 302 128, /* maximum number of rx queues */
301 303 1, /* minimum number of rx queues */
302 304 128, /* default number of rx queues */
303 305 64, /* maximum number of rx groups */
304 306 1, /* minimum number of rx groups */
305 307 1, /* default number of rx groups */
306 308 128, /* maximum number of tx queues */
↓ open down ↓ |
260 lines elided |
↑ open up ↑ |
307 309 1, /* minimum number of tx queues */
308 310 8, /* default number of tx queues */
309 311 15500, /* maximum MTU size */
310 312 0xFF8, /* maximum interrupt throttle rate */
311 313 0, /* minimum interrupt throttle rate */
312 314 200, /* default interrupt throttle rate */
313 315 64, /* maximum total msix vectors */
314 316 16, /* maximum number of ring vectors */
315 317 2, /* maximum number of other vectors */
316 318 (IXGBE_EICR_LSC
317 - | IXGBE_EICR_GPI_SDP1
318 - | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
319 + | IXGBE_EICR_GPI_SDP1_X540
320 + | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */
319 321
320 - (IXGBE_SDP1_GPIEN
321 - | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
322 + (IXGBE_SDP1_GPIEN_X540
323 + | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */
322 324
323 325 (IXGBE_FLAG_DCA_CAPABLE
324 326 | IXGBE_FLAG_RSS_CAPABLE
325 327 | IXGBE_FLAG_VMDQ_CAPABLE
326 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
327 329 };
328 330
331 +static adapter_info_t ixgbe_X550_cap = {
332 + 128, /* maximum number of rx queues */
333 + 1, /* minimum number of rx queues */
334 + 128, /* default number of rx queues */
335 + 64, /* maximum number of rx groups */
336 + 1, /* minimum number of rx groups */
337 + 1, /* default number of rx groups */
338 + 128, /* maximum number of tx queues */
339 + 1, /* minimum number of tx queues */
340 + 8, /* default number of tx queues */
341 + 15500, /* maximum MTU size */
342 + 0xFF8, /* maximum interrupt throttle rate */
343 + 0, /* minimum interrupt throttle rate */
344 + 200, /* default interrupt throttle rate */
345 + 64, /* maximum total msix vectors */
346 + 16, /* maximum number of ring vectors */
347 + 2, /* maximum number of other vectors */
348 + (IXGBE_EICR_LSC
349 + | IXGBE_SDP1_GPIEN_X550
350 + | IXGBE_SDP2_GPIEN_X550), /* "other" interrupt types handled */
351 +
352 + (IXGBE_SDP1_GPIEN_X550
353 + | IXGBE_SDP2_GPIEN_X550), /* "other" interrupt types enable mask */
354 +
355 + (IXGBE_FLAG_RSS_CAPABLE
356 + | IXGBE_FLAG_VMDQ_CAPABLE
357 + | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
358 +};
359 +
329 360 /*
330 361 * Module Initialization Functions.
331 362 */
332 363
333 364 int
334 365 _init(void)
335 366 {
336 367 int status;
337 368
338 369 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
339 370
340 371 status = mod_install(&ixgbe_modlinkage);
341 372
342 373 if (status != DDI_SUCCESS) {
343 374 mac_fini_ops(&ixgbe_dev_ops);
344 375 }
345 376
346 377 return (status);
347 378 }
348 379
349 380 int
350 381 _fini(void)
351 382 {
352 383 int status;
353 384
354 385 status = mod_remove(&ixgbe_modlinkage);
355 386
356 387 if (status == DDI_SUCCESS) {
357 388 mac_fini_ops(&ixgbe_dev_ops);
358 389 }
359 390
360 391 return (status);
361 392 }
362 393
363 394 int
364 395 _info(struct modinfo *modinfop)
365 396 {
366 397 int status;
367 398
368 399 status = mod_info(&ixgbe_modlinkage, modinfop);
369 400
370 401 return (status);
371 402 }
372 403
373 404 /*
374 405 * ixgbe_attach - Driver attach.
375 406 *
376 407 * This function is the device specific initialization entry
377 408 * point. This entry point is required and must be written.
378 409 * The DDI_ATTACH command must be provided in the attach entry
379 410 * point. When attach() is called with cmd set to DDI_ATTACH,
380 411 * all normal kernel services (such as kmem_alloc(9F)) are
381 412 * available for use by the driver.
382 413 *
383 414 * The attach() function will be called once for each instance
384 415 * of the device on the system with cmd set to DDI_ATTACH.
385 416 * Until attach() succeeds, the only driver entry points which
386 417 * may be called are open(9E) and getinfo(9E).
387 418 */
388 419 static int
389 420 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
390 421 {
391 422 ixgbe_t *ixgbe;
392 423 struct ixgbe_osdep *osdep;
393 424 struct ixgbe_hw *hw;
394 425 int instance;
395 426 char taskqname[32];
396 427
397 428 /*
398 429 * Check the command and perform corresponding operations
399 430 */
400 431 switch (cmd) {
401 432 default:
402 433 return (DDI_FAILURE);
403 434
404 435 case DDI_RESUME:
405 436 return (ixgbe_resume(devinfo));
406 437
407 438 case DDI_ATTACH:
408 439 break;
409 440 }
410 441
411 442 /* Get the device instance */
412 443 instance = ddi_get_instance(devinfo);
413 444
414 445 /* Allocate memory for the instance data structure */
415 446 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
416 447
417 448 ixgbe->dip = devinfo;
418 449 ixgbe->instance = instance;
419 450
420 451 hw = &ixgbe->hw;
421 452 osdep = &ixgbe->osdep;
422 453 hw->back = osdep;
423 454 osdep->ixgbe = ixgbe;
424 455
425 456 /* Attach the instance pointer to the dev_info data structure */
426 457 ddi_set_driver_private(devinfo, ixgbe);
427 458
428 459 /*
429 460 * Initialize for fma support
430 461 */
431 462 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
432 463 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
433 464 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
434 465 ixgbe_fm_init(ixgbe);
435 466 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
436 467
437 468 /*
438 469 * Map PCI config space registers
439 470 */
440 471 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
441 472 ixgbe_error(ixgbe, "Failed to map PCI configurations");
442 473 goto attach_fail;
443 474 }
444 475 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
445 476
446 477 /*
447 478 * Identify the chipset family
448 479 */
449 480 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
450 481 ixgbe_error(ixgbe, "Failed to identify hardware");
451 482 goto attach_fail;
452 483 }
453 484
454 485 /*
455 486 * Map device registers
456 487 */
457 488 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
458 489 ixgbe_error(ixgbe, "Failed to map device registers");
459 490 goto attach_fail;
460 491 }
461 492 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
462 493
463 494 /*
464 495 * Initialize driver parameters
465 496 */
466 497 ixgbe_init_properties(ixgbe);
467 498 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
468 499
469 500 /*
470 501 * Register interrupt callback
471 502 */
472 503 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
473 504 ixgbe_error(ixgbe, "Failed to register interrupt callback");
474 505 goto attach_fail;
475 506 }
476 507
477 508 /*
478 509 * Allocate interrupts
479 510 */
480 511 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
481 512 ixgbe_error(ixgbe, "Failed to allocate interrupts");
482 513 goto attach_fail;
483 514 }
484 515 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
485 516
486 517 /*
487 518 * Allocate rx/tx rings based on the ring numbers.
488 519 * The actual numbers of rx/tx rings are decided by the number of
489 520 * allocated interrupt vectors, so we should allocate the rings after
490 521 * interrupts are allocated.
491 522 */
492 523 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
493 524 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
494 525 goto attach_fail;
495 526 }
496 527 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
497 528
498 529 /*
499 530 * Map rings to interrupt vectors
500 531 */
501 532 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
502 533 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
503 534 goto attach_fail;
504 535 }
505 536
506 537 /*
507 538 * Add interrupt handlers
508 539 */
509 540 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
510 541 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
511 542 goto attach_fail;
512 543 }
513 544 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
514 545
515 546 /*
516 547 * Create a taskq for sfp-change
517 548 */
518 549 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
519 550 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
520 551 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
521 552 ixgbe_error(ixgbe, "sfp_taskq create failed");
522 553 goto attach_fail;
523 554 }
524 555 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
525 556
526 557 /*
527 558 * Create a taskq for over-temp
528 559 */
529 560 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
530 561 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
531 562 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
532 563 ixgbe_error(ixgbe, "overtemp_taskq create failed");
533 564 goto attach_fail;
534 565 }
535 566 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
536 567
537 568 /*
538 569 * Initialize driver parameters
539 570 */
540 571 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
541 572 ixgbe_error(ixgbe, "Failed to initialize driver settings");
542 573 goto attach_fail;
543 574 }
544 575
545 576 /*
546 577 * Initialize mutexes for this device.
547 578 * Do this before enabling the interrupt handler and
548 579 * register the softint to avoid the condition where
549 580 * interrupt handler can try using uninitialized mutex.
550 581 */
551 582 ixgbe_init_locks(ixgbe);
552 583 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
553 584
554 585 /*
555 586 * Initialize chipset hardware
556 587 */
557 588 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
558 589 ixgbe_error(ixgbe, "Failed to initialize adapter");
559 590 goto attach_fail;
560 591 }
561 592 ixgbe->link_check_complete = B_FALSE;
562 593 ixgbe->link_check_hrtime = gethrtime() +
563 594 (IXGBE_LINK_UP_TIME * 100000000ULL);
564 595 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
565 596
566 597 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
567 598 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
568 599 goto attach_fail;
569 600 }
570 601
571 602 /*
572 603 * Initialize statistics
573 604 */
574 605 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
575 606 ixgbe_error(ixgbe, "Failed to initialize statistics");
576 607 goto attach_fail;
577 608 }
578 609 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
579 610
580 611 /*
581 612 * Register the driver to the MAC
582 613 */
583 614 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
584 615 ixgbe_error(ixgbe, "Failed to register MAC");
585 616 goto attach_fail;
586 617 }
587 618 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
588 619 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
589 620
590 621 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
591 622 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
592 623 if (ixgbe->periodic_id == 0) {
593 624 ixgbe_error(ixgbe, "Failed to add the link check timer");
594 625 goto attach_fail;
595 626 }
596 627 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
597 628
↓ open down ↓ |
259 lines elided |
↑ open up ↑ |
598 629 /*
599 630 * Now that mutex locks are initialized, and the chip is also
600 631 * initialized, enable interrupts.
601 632 */
602 633 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
603 634 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
604 635 goto attach_fail;
605 636 }
606 637 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
607 638
608 - ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
639 + ixgbe_log(ixgbe, "%s", ixgbe_ident);
609 640 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
610 641
611 642 return (DDI_SUCCESS);
612 643
613 644 attach_fail:
614 645 ixgbe_unconfigure(devinfo, ixgbe);
615 646 return (DDI_FAILURE);
616 647 }
617 648
618 649 /*
619 650 * ixgbe_detach - Driver detach.
620 651 *
621 652 * The detach() function is the complement of the attach routine.
622 653 * If cmd is set to DDI_DETACH, detach() is used to remove the
623 654 * state associated with a given instance of a device node
624 655 * prior to the removal of that instance from the system.
625 656 *
626 657 * The detach() function will be called once for each instance
627 658 * of the device for which there has been a successful attach()
628 659 * once there are no longer any opens on the device.
629 660 *
630 661 * Interrupts routine are disabled, All memory allocated by this
631 662 * driver are freed.
632 663 */
633 664 static int
634 665 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
635 666 {
636 667 ixgbe_t *ixgbe;
637 668
638 669 /*
639 670 * Check detach command
640 671 */
641 672 switch (cmd) {
642 673 default:
643 674 return (DDI_FAILURE);
644 675
645 676 case DDI_SUSPEND:
646 677 return (ixgbe_suspend(devinfo));
647 678
648 679 case DDI_DETACH:
649 680 break;
650 681 }
651 682
652 683 /*
653 684 * Get the pointer to the driver private data structure
654 685 */
655 686 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
656 687 if (ixgbe == NULL)
657 688 return (DDI_FAILURE);
658 689
659 690 /*
660 691 * If the device is still running, it needs to be stopped first.
661 692 * This check is necessary because under some specific circumstances,
662 693 * the detach routine can be called without stopping the interface
663 694 * first.
664 695 */
665 696 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
666 697 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
667 698 mutex_enter(&ixgbe->gen_lock);
668 699 ixgbe_stop(ixgbe, B_TRUE);
669 700 mutex_exit(&ixgbe->gen_lock);
670 701 /* Disable and stop the watchdog timer */
671 702 ixgbe_disable_watchdog_timer(ixgbe);
672 703 }
673 704
674 705 /*
675 706 * Check if there are still rx buffers held by the upper layer.
676 707 * If so, fail the detach.
677 708 */
678 709 if (!ixgbe_rx_drain(ixgbe))
679 710 return (DDI_FAILURE);
680 711
681 712 /*
682 713 * Do the remaining unconfigure routines
683 714 */
684 715 ixgbe_unconfigure(devinfo, ixgbe);
685 716
686 717 return (DDI_SUCCESS);
687 718 }
688 719
689 720 /*
690 721 * quiesce(9E) entry point.
691 722 *
692 723 * This function is called when the system is single-threaded at high
693 724 * PIL with preemption disabled. Therefore, this function must not be
694 725 * blocked.
695 726 *
696 727 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
697 728 * DDI_FAILURE indicates an error condition and should almost never happen.
698 729 */
699 730 static int
700 731 ixgbe_quiesce(dev_info_t *devinfo)
701 732 {
702 733 ixgbe_t *ixgbe;
703 734 struct ixgbe_hw *hw;
704 735
705 736 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
706 737
707 738 if (ixgbe == NULL)
708 739 return (DDI_FAILURE);
709 740
710 741 hw = &ixgbe->hw;
711 742
712 743 /*
713 744 * Disable the adapter interrupts
714 745 */
715 746 ixgbe_disable_adapter_interrupts(ixgbe);
716 747
717 748 /*
718 749 * Tell firmware driver is no longer in control
719 750 */
720 751 ixgbe_release_driver_control(hw);
721 752
722 753 /*
723 754 * Reset the chipset
724 755 */
725 756 (void) ixgbe_reset_hw(hw);
726 757
727 758 /*
728 759 * Reset PHY
729 760 */
730 761 (void) ixgbe_reset_phy(hw);
731 762
732 763 return (DDI_SUCCESS);
733 764 }
734 765
735 766 static void
736 767 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
737 768 {
738 769 /*
739 770 * Disable interrupt
740 771 */
741 772 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
742 773 (void) ixgbe_disable_intrs(ixgbe);
743 774 }
744 775
745 776 /*
746 777 * remove the link check timer
747 778 */
748 779 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
749 780 if (ixgbe->periodic_id != NULL) {
750 781 ddi_periodic_delete(ixgbe->periodic_id);
751 782 ixgbe->periodic_id = NULL;
752 783 }
753 784 }
754 785
755 786 /*
756 787 * Unregister MAC
757 788 */
758 789 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
759 790 (void) mac_unregister(ixgbe->mac_hdl);
760 791 }
761 792
762 793 /*
763 794 * Free statistics
764 795 */
765 796 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
766 797 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
767 798 }
768 799
769 800 /*
770 801 * Remove interrupt handlers
771 802 */
772 803 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
773 804 ixgbe_rem_intr_handlers(ixgbe);
774 805 }
775 806
776 807 /*
777 808 * Remove taskq for sfp-status-change
778 809 */
779 810 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
780 811 ddi_taskq_destroy(ixgbe->sfp_taskq);
781 812 }
782 813
783 814 /*
784 815 * Remove taskq for over-temp
785 816 */
786 817 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
787 818 ddi_taskq_destroy(ixgbe->overtemp_taskq);
788 819 }
789 820
790 821 /*
791 822 * Remove interrupts
792 823 */
793 824 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
794 825 ixgbe_rem_intrs(ixgbe);
795 826 }
796 827
797 828 /*
798 829 * Unregister interrupt callback handler
799 830 */
800 831 (void) ddi_cb_unregister(ixgbe->cb_hdl);
801 832
802 833 /*
803 834 * Remove driver properties
804 835 */
805 836 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
806 837 (void) ddi_prop_remove_all(devinfo);
807 838 }
808 839
809 840 /*
810 841 * Stop the chipset
811 842 */
812 843 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
813 844 mutex_enter(&ixgbe->gen_lock);
814 845 ixgbe_chip_stop(ixgbe);
815 846 mutex_exit(&ixgbe->gen_lock);
816 847 }
817 848
818 849 /*
819 850 * Free register handle
820 851 */
821 852 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
822 853 if (ixgbe->osdep.reg_handle != NULL)
823 854 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
824 855 }
825 856
826 857 /*
827 858 * Free PCI config handle
828 859 */
829 860 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
830 861 if (ixgbe->osdep.cfg_handle != NULL)
831 862 pci_config_teardown(&ixgbe->osdep.cfg_handle);
832 863 }
833 864
834 865 /*
835 866 * Free locks
836 867 */
837 868 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
838 869 ixgbe_destroy_locks(ixgbe);
839 870 }
840 871
841 872 /*
842 873 * Free the rx/tx rings
843 874 */
844 875 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
845 876 ixgbe_free_rings(ixgbe);
846 877 }
847 878
848 879 /*
849 880 * Unregister FMA capabilities
850 881 */
851 882 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
852 883 ixgbe_fm_fini(ixgbe);
853 884 }
854 885
855 886 /*
856 887 * Free the driver data structure
857 888 */
858 889 kmem_free(ixgbe, sizeof (ixgbe_t));
859 890
860 891 ddi_set_driver_private(devinfo, NULL);
861 892 }
862 893
863 894 /*
864 895 * ixgbe_register_mac - Register the driver and its function pointers with
865 896 * the GLD interface.
866 897 */
867 898 static int
868 899 ixgbe_register_mac(ixgbe_t *ixgbe)
869 900 {
870 901 struct ixgbe_hw *hw = &ixgbe->hw;
871 902 mac_register_t *mac;
872 903 int status;
873 904
874 905 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
875 906 return (IXGBE_FAILURE);
876 907
877 908 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
878 909 mac->m_driver = ixgbe;
879 910 mac->m_dip = ixgbe->dip;
880 911 mac->m_src_addr = hw->mac.addr;
881 912 mac->m_callbacks = &ixgbe_m_callbacks;
882 913 mac->m_min_sdu = 0;
883 914 mac->m_max_sdu = ixgbe->default_mtu;
884 915 mac->m_margin = VLAN_TAGSZ;
885 916 mac->m_priv_props = ixgbe_priv_props;
886 917 mac->m_v12n = MAC_VIRT_LEVEL1;
887 918
888 919 status = mac_register(mac, &ixgbe->mac_hdl);
889 920
890 921 mac_free(mac);
891 922
892 923 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
893 924 }
894 925
895 926 /*
896 927 * ixgbe_identify_hardware - Identify the type of the chipset.
897 928 */
898 929 static int
899 930 ixgbe_identify_hardware(ixgbe_t *ixgbe)
900 931 {
901 932 struct ixgbe_hw *hw = &ixgbe->hw;
902 933 struct ixgbe_osdep *osdep = &ixgbe->osdep;
903 934
904 935 /*
905 936 * Get the device id
906 937 */
907 938 hw->vendor_id =
908 939 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
909 940 hw->device_id =
910 941 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
911 942 hw->revision_id =
912 943 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
913 944 hw->subsystem_device_id =
914 945 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
915 946 hw->subsystem_vendor_id =
916 947 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
917 948
918 949 /*
919 950 * Set the mac type of the adapter based on the device id
920 951 */
921 952 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
922 953 return (IXGBE_FAILURE);
923 954 }
924 955
925 956 /*
926 957 * Install adapter capabilities
927 958 */
928 959 switch (hw->mac.type) {
929 960 case ixgbe_mac_82598EB:
930 961 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
931 962 ixgbe->capab = &ixgbe_82598eb_cap;
932 963
933 964 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
934 965 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
935 966 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
936 967 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
937 968 }
938 969 break;
939 970
940 971 case ixgbe_mac_82599EB:
941 972 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
942 973 ixgbe->capab = &ixgbe_82599eb_cap;
943 974
944 975 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
945 976 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
946 977 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
947 978 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
948 979 }
949 980 break;
↓ open down ↓ |
331 lines elided |
↑ open up ↑ |
950 981
951 982 case ixgbe_mac_X540:
952 983 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
953 984 ixgbe->capab = &ixgbe_X540_cap;
954 985 /*
955 986 * For now, X540 is all set in its capab structure.
956 987 * As other X540 variants show up, things can change here.
957 988 */
958 989 break;
959 990
991 + case ixgbe_mac_X550:
992 + case ixgbe_mac_X550EM_x:
993 + IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n");
994 + ixgbe->capab = &ixgbe_X550_cap;
995 +
996 + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
997 + ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE;
998 +
999 + break;
1000 +
960 1001 default:
961 1002 IXGBE_DEBUGLOG_1(ixgbe,
962 1003 "adapter not supported in ixgbe_identify_hardware(): %d\n",
963 1004 hw->mac.type);
964 1005 return (IXGBE_FAILURE);
965 1006 }
966 1007
967 1008 return (IXGBE_SUCCESS);
968 1009 }
969 1010
970 1011 /*
971 1012 * ixgbe_regs_map - Map the device registers.
972 1013 *
973 1014 */
974 1015 static int
975 1016 ixgbe_regs_map(ixgbe_t *ixgbe)
976 1017 {
977 1018 dev_info_t *devinfo = ixgbe->dip;
978 1019 struct ixgbe_hw *hw = &ixgbe->hw;
979 1020 struct ixgbe_osdep *osdep = &ixgbe->osdep;
980 1021 off_t mem_size;
981 1022
982 1023 /*
983 1024 * First get the size of device registers to be mapped.
984 1025 */
985 1026 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
986 1027 != DDI_SUCCESS) {
987 1028 return (IXGBE_FAILURE);
988 1029 }
989 1030
990 1031 /*
991 1032 * Call ddi_regs_map_setup() to map registers
992 1033 */
993 1034 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
994 1035 (caddr_t *)&hw->hw_addr, 0,
995 1036 mem_size, &ixgbe_regs_acc_attr,
996 1037 &osdep->reg_handle)) != DDI_SUCCESS) {
997 1038 return (IXGBE_FAILURE);
998 1039 }
999 1040
1000 1041 return (IXGBE_SUCCESS);
1001 1042 }
1002 1043
1003 1044 /*
1004 1045 * ixgbe_init_properties - Initialize driver properties.
1005 1046 */
1006 1047 static void
1007 1048 ixgbe_init_properties(ixgbe_t *ixgbe)
1008 1049 {
1009 1050 /*
1010 1051 * Get conf file properties, including link settings
1011 1052 * jumbo frames, ring number, descriptor number, etc.
1012 1053 */
1013 1054 ixgbe_get_conf(ixgbe);
1014 1055
1015 1056 ixgbe_init_params(ixgbe);
1016 1057 }
1017 1058
1018 1059 /*
1019 1060 * ixgbe_init_driver_settings - Initialize driver settings.
1020 1061 *
1021 1062 * The settings include hardware function pointers, bus information,
1022 1063 * rx/tx rings settings, link state, and any other parameters that
1023 1064 * need to be setup during driver initialization.
1024 1065 */
1025 1066 static int
1026 1067 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
1027 1068 {
1028 1069 struct ixgbe_hw *hw = &ixgbe->hw;
1029 1070 dev_info_t *devinfo = ixgbe->dip;
1030 1071 ixgbe_rx_ring_t *rx_ring;
1031 1072 ixgbe_rx_group_t *rx_group;
1032 1073 ixgbe_tx_ring_t *tx_ring;
1033 1074 uint32_t rx_size;
1034 1075 uint32_t tx_size;
1035 1076 uint32_t ring_per_group;
1036 1077 int i;
1037 1078
1038 1079 /*
1039 1080 * Initialize chipset specific hardware function pointers
1040 1081 */
1041 1082 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
1042 1083 return (IXGBE_FAILURE);
1043 1084 }
1044 1085
1045 1086 /*
1046 1087 * Get the system page size
1047 1088 */
1048 1089 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1049 1090
1050 1091 /*
1051 1092 * Set rx buffer size
1052 1093 *
1053 1094 * The IP header alignment room is counted in the calculation.
1054 1095 * The rx buffer size is in unit of 1K that is required by the
1055 1096 * chipset hardware.
1056 1097 */
1057 1098 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1058 1099 ixgbe->rx_buf_size = ((rx_size >> 10) +
1059 1100 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1060 1101
1061 1102 /*
1062 1103 * Set tx buffer size
1063 1104 */
1064 1105 tx_size = ixgbe->max_frame_size;
1065 1106 ixgbe->tx_buf_size = ((tx_size >> 10) +
1066 1107 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1067 1108
1068 1109 /*
1069 1110 * Initialize rx/tx rings/groups parameters
1070 1111 */
1071 1112 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1072 1113 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1073 1114 rx_ring = &ixgbe->rx_rings[i];
1074 1115 rx_ring->index = i;
1075 1116 rx_ring->ixgbe = ixgbe;
1076 1117 rx_ring->group_index = i / ring_per_group;
1077 1118 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1078 1119 }
1079 1120
1080 1121 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1081 1122 rx_group = &ixgbe->rx_groups[i];
1082 1123 rx_group->index = i;
1083 1124 rx_group->ixgbe = ixgbe;
1084 1125 }
1085 1126
1086 1127 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1087 1128 tx_ring = &ixgbe->tx_rings[i];
1088 1129 tx_ring->index = i;
1089 1130 tx_ring->ixgbe = ixgbe;
1090 1131 if (ixgbe->tx_head_wb_enable)
1091 1132 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1092 1133 else
1093 1134 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1094 1135
1095 1136 tx_ring->ring_size = ixgbe->tx_ring_size;
1096 1137 tx_ring->free_list_size = ixgbe->tx_ring_size +
1097 1138 (ixgbe->tx_ring_size >> 1);
1098 1139 }
1099 1140
1100 1141 /*
1101 1142 * Initialize values of interrupt throttling rate
1102 1143 */
1103 1144 for (i = 1; i < MAX_INTR_VECTOR; i++)
1104 1145 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1105 1146
1106 1147 /*
1107 1148 * The initial link state should be "unknown"
1108 1149 */
1109 1150 ixgbe->link_state = LINK_STATE_UNKNOWN;
1110 1151
1111 1152 return (IXGBE_SUCCESS);
1112 1153 }
1113 1154
1114 1155 /*
1115 1156 * ixgbe_init_locks - Initialize locks.
1116 1157 */
1117 1158 static void
1118 1159 ixgbe_init_locks(ixgbe_t *ixgbe)
1119 1160 {
1120 1161 ixgbe_rx_ring_t *rx_ring;
1121 1162 ixgbe_tx_ring_t *tx_ring;
1122 1163 int i;
1123 1164
1124 1165 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1125 1166 rx_ring = &ixgbe->rx_rings[i];
1126 1167 mutex_init(&rx_ring->rx_lock, NULL,
1127 1168 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1128 1169 }
1129 1170
1130 1171 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1131 1172 tx_ring = &ixgbe->tx_rings[i];
1132 1173 mutex_init(&tx_ring->tx_lock, NULL,
1133 1174 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1134 1175 mutex_init(&tx_ring->recycle_lock, NULL,
1135 1176 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1136 1177 mutex_init(&tx_ring->tcb_head_lock, NULL,
1137 1178 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1138 1179 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1139 1180 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1140 1181 }
1141 1182
1142 1183 mutex_init(&ixgbe->gen_lock, NULL,
1143 1184 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1144 1185
1145 1186 mutex_init(&ixgbe->watchdog_lock, NULL,
1146 1187 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1147 1188 }
1148 1189
1149 1190 /*
1150 1191 * ixgbe_destroy_locks - Destroy locks.
1151 1192 */
1152 1193 static void
1153 1194 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1154 1195 {
1155 1196 ixgbe_rx_ring_t *rx_ring;
1156 1197 ixgbe_tx_ring_t *tx_ring;
1157 1198 int i;
1158 1199
1159 1200 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1160 1201 rx_ring = &ixgbe->rx_rings[i];
1161 1202 mutex_destroy(&rx_ring->rx_lock);
1162 1203 }
1163 1204
1164 1205 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1165 1206 tx_ring = &ixgbe->tx_rings[i];
1166 1207 mutex_destroy(&tx_ring->tx_lock);
1167 1208 mutex_destroy(&tx_ring->recycle_lock);
1168 1209 mutex_destroy(&tx_ring->tcb_head_lock);
1169 1210 mutex_destroy(&tx_ring->tcb_tail_lock);
1170 1211 }
1171 1212
1172 1213 mutex_destroy(&ixgbe->gen_lock);
1173 1214 mutex_destroy(&ixgbe->watchdog_lock);
1174 1215 }
1175 1216
1176 1217 static int
1177 1218 ixgbe_resume(dev_info_t *devinfo)
1178 1219 {
1179 1220 ixgbe_t *ixgbe;
1180 1221 int i;
1181 1222
1182 1223 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1183 1224 if (ixgbe == NULL)
1184 1225 return (DDI_FAILURE);
1185 1226
1186 1227 mutex_enter(&ixgbe->gen_lock);
1187 1228
1188 1229 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1189 1230 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1190 1231 mutex_exit(&ixgbe->gen_lock);
1191 1232 return (DDI_FAILURE);
1192 1233 }
1193 1234
1194 1235 /*
1195 1236 * Enable and start the watchdog timer
1196 1237 */
1197 1238 ixgbe_enable_watchdog_timer(ixgbe);
1198 1239 }
1199 1240
1200 1241 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1201 1242
1202 1243 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1203 1244 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1204 1245 mac_tx_ring_update(ixgbe->mac_hdl,
1205 1246 ixgbe->tx_rings[i].ring_handle);
1206 1247 }
1207 1248 }
1208 1249
1209 1250 mutex_exit(&ixgbe->gen_lock);
1210 1251
1211 1252 return (DDI_SUCCESS);
1212 1253 }
1213 1254
1214 1255 static int
1215 1256 ixgbe_suspend(dev_info_t *devinfo)
1216 1257 {
1217 1258 ixgbe_t *ixgbe;
1218 1259
1219 1260 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1220 1261 if (ixgbe == NULL)
1221 1262 return (DDI_FAILURE);
1222 1263
1223 1264 mutex_enter(&ixgbe->gen_lock);
1224 1265
1225 1266 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1226 1267 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1227 1268 mutex_exit(&ixgbe->gen_lock);
1228 1269 return (DDI_SUCCESS);
1229 1270 }
1230 1271 ixgbe_stop(ixgbe, B_FALSE);
1231 1272
1232 1273 mutex_exit(&ixgbe->gen_lock);
1233 1274
1234 1275 /*
1235 1276 * Disable and stop the watchdog timer
1236 1277 */
1237 1278 ixgbe_disable_watchdog_timer(ixgbe);
1238 1279
1239 1280 return (DDI_SUCCESS);
1240 1281 }
1241 1282
1242 1283 /*
1243 1284 * ixgbe_init - Initialize the device.
1244 1285 */
1245 1286 static int
1246 1287 ixgbe_init(ixgbe_t *ixgbe)
1247 1288 {
1248 1289 struct ixgbe_hw *hw = &ixgbe->hw;
1249 1290 u8 pbanum[IXGBE_PBANUM_LENGTH];
1250 1291
1251 1292 mutex_enter(&ixgbe->gen_lock);
1252 1293
1253 1294 /*
1254 1295 * Reset chipset to put the hardware in a known state
1255 1296 * before we try to do anything with the eeprom.
1256 1297 */
1257 1298 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1258 1299 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1259 1300 goto init_fail;
1260 1301 }
1261 1302
1262 1303 /*
1263 1304 * Need to init eeprom before validating the checksum.
1264 1305 */
1265 1306 if (ixgbe_init_eeprom_params(hw) < 0) {
1266 1307 ixgbe_error(ixgbe,
1267 1308 "Unable to intitialize the eeprom interface.");
1268 1309 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1269 1310 goto init_fail;
1270 1311 }
1271 1312
1272 1313 /*
1273 1314 * NVM validation
1274 1315 */
1275 1316 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1276 1317 /*
1277 1318 * Some PCI-E parts fail the first check due to
1278 1319 * the link being in sleep state. Call it again,
1279 1320 * if it fails a second time it's a real issue.
1280 1321 */
1281 1322 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1282 1323 ixgbe_error(ixgbe,
1283 1324 "Invalid NVM checksum. Please contact "
1284 1325 "the vendor to update the NVM.");
1285 1326 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1286 1327 goto init_fail;
1287 1328 }
1288 1329 }
1289 1330
1290 1331 /*
1291 1332 * Setup default flow control thresholds - enable/disable
1292 1333 * & flow control type is controlled by ixgbe.conf
1293 1334 */
1294 1335 hw->fc.high_water[0] = DEFAULT_FCRTH;
1295 1336 hw->fc.low_water[0] = DEFAULT_FCRTL;
1296 1337 hw->fc.pause_time = DEFAULT_FCPAUSE;
1297 1338 hw->fc.send_xon = B_TRUE;
1298 1339
1299 1340 /*
1300 1341 * Initialize link settings
1301 1342 */
1302 1343 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1303 1344
1304 1345 /*
1305 1346 * Initialize the chipset hardware
1306 1347 */
1307 1348 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1308 1349 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1309 1350 goto init_fail;
1310 1351 }
1311 1352
1312 1353 /*
1313 1354 * Read identifying information and place in devinfo.
1314 1355 */
1315 1356 pbanum[0] = '\0';
1316 1357 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum));
1317 1358 if (*pbanum != '\0') {
1318 1359 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip,
1319 1360 "printed-board-assembly", (char *)pbanum);
1320 1361 }
1321 1362
1322 1363 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1323 1364 goto init_fail;
1324 1365 }
1325 1366
1326 1367 mutex_exit(&ixgbe->gen_lock);
1327 1368 return (IXGBE_SUCCESS);
1328 1369
1329 1370 init_fail:
1330 1371 /*
1331 1372 * Reset PHY
1332 1373 */
1333 1374 (void) ixgbe_reset_phy(hw);
1334 1375
1335 1376 mutex_exit(&ixgbe->gen_lock);
1336 1377 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1337 1378 return (IXGBE_FAILURE);
1338 1379 }
1339 1380
1340 1381 /*
1341 1382 * ixgbe_chip_start - Initialize and start the chipset hardware.
1342 1383 */
1343 1384 static int
1344 1385 ixgbe_chip_start(ixgbe_t *ixgbe)
1345 1386 {
1346 1387 struct ixgbe_hw *hw = &ixgbe->hw;
1347 1388 int ret_val, i;
1348 1389
1349 1390 ASSERT(mutex_owned(&ixgbe->gen_lock));
1350 1391
1351 1392 /*
1352 1393 * Get the mac address
1353 1394 * This function should handle SPARC case correctly.
1354 1395 */
1355 1396 if (!ixgbe_find_mac_address(ixgbe)) {
1356 1397 ixgbe_error(ixgbe, "Failed to get the mac address");
1357 1398 return (IXGBE_FAILURE);
1358 1399 }
1359 1400
1360 1401 /*
1361 1402 * Validate the mac address
1362 1403 */
1363 1404 (void) ixgbe_init_rx_addrs(hw);
1364 1405 if (!is_valid_mac_addr(hw->mac.addr)) {
1365 1406 ixgbe_error(ixgbe, "Invalid mac address");
1366 1407 return (IXGBE_FAILURE);
1367 1408 }
1368 1409
1369 1410 /*
1370 1411 * Configure/Initialize hardware
1371 1412 */
1372 1413 ret_val = ixgbe_init_hw(hw);
1373 1414 if (ret_val != IXGBE_SUCCESS) {
1374 1415 if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1375 1416 ixgbe_error(ixgbe,
1376 1417 "This 82599 device is pre-release and contains"
1377 1418 " outdated firmware, please contact your hardware"
1378 1419 " vendor for a replacement.");
1379 1420 } else {
1380 1421 ixgbe_error(ixgbe, "Failed to initialize hardware");
1381 1422 return (IXGBE_FAILURE);
1382 1423 }
1383 1424 }
1384 1425
1385 1426 /*
1386 1427 * Re-enable relaxed ordering for performance. It is disabled
1387 1428 * by default in the hardware init.
1388 1429 */
1389 1430 if (ixgbe->relax_order_enable == B_TRUE)
1390 1431 ixgbe_enable_relaxed_ordering(hw);
1391 1432
1392 1433 /*
1393 1434 * Setup adapter interrupt vectors
1394 1435 */
1395 1436 ixgbe_setup_adapter_vector(ixgbe);
1396 1437
1397 1438 /*
1398 1439 * Initialize unicast addresses.
1399 1440 */
1400 1441 ixgbe_init_unicst(ixgbe);
1401 1442
1402 1443 /*
1403 1444 * Setup and initialize the mctable structures.
1404 1445 */
1405 1446 ixgbe_setup_multicst(ixgbe);
1406 1447
1407 1448 /*
1408 1449 * Set interrupt throttling rate
1409 1450 */
1410 1451 for (i = 0; i < ixgbe->intr_cnt; i++) {
1411 1452 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1412 1453 }
1413 1454
1414 1455 /*
1415 1456 * Save the state of the phy
1416 1457 */
1417 1458 ixgbe_get_hw_state(ixgbe);
1418 1459
1419 1460 /*
1420 1461 * Make sure driver has control
1421 1462 */
1422 1463 ixgbe_get_driver_control(hw);
1423 1464
1424 1465 return (IXGBE_SUCCESS);
1425 1466 }
1426 1467
1427 1468 /*
1428 1469 * ixgbe_chip_stop - Stop the chipset hardware
1429 1470 */
1430 1471 static void
1431 1472 ixgbe_chip_stop(ixgbe_t *ixgbe)
1432 1473 {
1433 1474 struct ixgbe_hw *hw = &ixgbe->hw;
1434 1475
1435 1476 ASSERT(mutex_owned(&ixgbe->gen_lock));
1436 1477
1437 1478 /*
1438 1479 * Tell firmware driver is no longer in control
1439 1480 */
1440 1481 ixgbe_release_driver_control(hw);
1441 1482
1442 1483 /*
1443 1484 * Reset the chipset
1444 1485 */
1445 1486 (void) ixgbe_reset_hw(hw);
1446 1487
1447 1488 /*
1448 1489 * Reset PHY
1449 1490 */
1450 1491 (void) ixgbe_reset_phy(hw);
1451 1492 }
1452 1493
1453 1494 /*
1454 1495 * ixgbe_reset - Reset the chipset and re-start the driver.
1455 1496 *
1456 1497 * It involves stopping and re-starting the chipset,
1457 1498 * and re-configuring the rx/tx rings.
1458 1499 */
1459 1500 static int
1460 1501 ixgbe_reset(ixgbe_t *ixgbe)
1461 1502 {
1462 1503 int i;
1463 1504
1464 1505 /*
1465 1506 * Disable and stop the watchdog timer
1466 1507 */
1467 1508 ixgbe_disable_watchdog_timer(ixgbe);
1468 1509
1469 1510 mutex_enter(&ixgbe->gen_lock);
1470 1511
1471 1512 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1472 1513 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1473 1514
1474 1515 ixgbe_stop(ixgbe, B_FALSE);
1475 1516
1476 1517 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1477 1518 mutex_exit(&ixgbe->gen_lock);
1478 1519 return (IXGBE_FAILURE);
1479 1520 }
1480 1521
1481 1522 /*
1482 1523 * After resetting, need to recheck the link status.
1483 1524 */
1484 1525 ixgbe->link_check_complete = B_FALSE;
1485 1526 ixgbe->link_check_hrtime = gethrtime() +
1486 1527 (IXGBE_LINK_UP_TIME * 100000000ULL);
1487 1528
1488 1529 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1489 1530
1490 1531 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1491 1532 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1492 1533 mac_tx_ring_update(ixgbe->mac_hdl,
1493 1534 ixgbe->tx_rings[i].ring_handle);
1494 1535 }
1495 1536 }
1496 1537
1497 1538 mutex_exit(&ixgbe->gen_lock);
1498 1539
1499 1540 /*
1500 1541 * Enable and start the watchdog timer
1501 1542 */
1502 1543 ixgbe_enable_watchdog_timer(ixgbe);
1503 1544
1504 1545 return (IXGBE_SUCCESS);
1505 1546 }
1506 1547
1507 1548 /*
1508 1549 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1509 1550 */
1510 1551 static void
1511 1552 ixgbe_tx_clean(ixgbe_t *ixgbe)
1512 1553 {
1513 1554 ixgbe_tx_ring_t *tx_ring;
1514 1555 tx_control_block_t *tcb;
1515 1556 link_list_t pending_list;
1516 1557 uint32_t desc_num;
1517 1558 int i, j;
1518 1559
1519 1560 LINK_LIST_INIT(&pending_list);
1520 1561
1521 1562 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1522 1563 tx_ring = &ixgbe->tx_rings[i];
1523 1564
1524 1565 mutex_enter(&tx_ring->recycle_lock);
1525 1566
1526 1567 /*
1527 1568 * Clean the pending tx data - the pending packets in the
1528 1569 * work_list that have no chances to be transmitted again.
1529 1570 *
1530 1571 * We must ensure the chipset is stopped or the link is down
1531 1572 * before cleaning the transmit packets.
1532 1573 */
1533 1574 desc_num = 0;
1534 1575 for (j = 0; j < tx_ring->ring_size; j++) {
1535 1576 tcb = tx_ring->work_list[j];
1536 1577 if (tcb != NULL) {
1537 1578 desc_num += tcb->desc_num;
1538 1579
1539 1580 tx_ring->work_list[j] = NULL;
1540 1581
1541 1582 ixgbe_free_tcb(tcb);
1542 1583
1543 1584 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1544 1585 }
1545 1586 }
1546 1587
1547 1588 if (desc_num > 0) {
1548 1589 atomic_add_32(&tx_ring->tbd_free, desc_num);
1549 1590 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1550 1591
1551 1592 /*
1552 1593 * Reset the head and tail pointers of the tbd ring;
1553 1594 * Reset the writeback head if it's enable.
1554 1595 */
1555 1596 tx_ring->tbd_head = 0;
1556 1597 tx_ring->tbd_tail = 0;
1557 1598 if (ixgbe->tx_head_wb_enable)
1558 1599 *tx_ring->tbd_head_wb = 0;
1559 1600
1560 1601 IXGBE_WRITE_REG(&ixgbe->hw,
1561 1602 IXGBE_TDH(tx_ring->index), 0);
1562 1603 IXGBE_WRITE_REG(&ixgbe->hw,
1563 1604 IXGBE_TDT(tx_ring->index), 0);
1564 1605 }
1565 1606
1566 1607 mutex_exit(&tx_ring->recycle_lock);
1567 1608
1568 1609 /*
1569 1610 * Add the tx control blocks in the pending list to
1570 1611 * the free list.
1571 1612 */
1572 1613 ixgbe_put_free_list(tx_ring, &pending_list);
1573 1614 }
1574 1615 }
1575 1616
1576 1617 /*
1577 1618 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1578 1619 * transmitted.
1579 1620 */
1580 1621 static boolean_t
1581 1622 ixgbe_tx_drain(ixgbe_t *ixgbe)
1582 1623 {
1583 1624 ixgbe_tx_ring_t *tx_ring;
1584 1625 boolean_t done;
1585 1626 int i, j;
1586 1627
1587 1628 /*
1588 1629 * Wait for a specific time to allow pending tx packets
1589 1630 * to be transmitted.
1590 1631 *
1591 1632 * Check the counter tbd_free to see if transmission is done.
1592 1633 * No lock protection is needed here.
1593 1634 *
1594 1635 * Return B_TRUE if all pending packets have been transmitted;
1595 1636 * Otherwise return B_FALSE;
1596 1637 */
1597 1638 for (i = 0; i < TX_DRAIN_TIME; i++) {
1598 1639
1599 1640 done = B_TRUE;
1600 1641 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1601 1642 tx_ring = &ixgbe->tx_rings[j];
1602 1643 done = done &&
1603 1644 (tx_ring->tbd_free == tx_ring->ring_size);
1604 1645 }
1605 1646
1606 1647 if (done)
1607 1648 break;
1608 1649
1609 1650 msec_delay(1);
1610 1651 }
1611 1652
1612 1653 return (done);
1613 1654 }
1614 1655
1615 1656 /*
1616 1657 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1617 1658 */
1618 1659 static boolean_t
1619 1660 ixgbe_rx_drain(ixgbe_t *ixgbe)
1620 1661 {
1621 1662 boolean_t done = B_TRUE;
1622 1663 int i;
1623 1664
1624 1665 /*
1625 1666 * Polling the rx free list to check if those rx buffers held by
1626 1667 * the upper layer are released.
1627 1668 *
1628 1669 * Check the counter rcb_free to see if all pending buffers are
1629 1670 * released. No lock protection is needed here.
1630 1671 *
1631 1672 * Return B_TRUE if all pending buffers have been released;
1632 1673 * Otherwise return B_FALSE;
1633 1674 */
1634 1675 for (i = 0; i < RX_DRAIN_TIME; i++) {
1635 1676 done = (ixgbe->rcb_pending == 0);
1636 1677
1637 1678 if (done)
1638 1679 break;
1639 1680
1640 1681 msec_delay(1);
1641 1682 }
1642 1683
1643 1684 return (done);
1644 1685 }
1645 1686
1646 1687 /*
1647 1688 * ixgbe_start - Start the driver/chipset.
1648 1689 */
1649 1690 int
1650 1691 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1651 1692 {
1652 1693 int i;
1653 1694
1654 1695 ASSERT(mutex_owned(&ixgbe->gen_lock));
1655 1696
1656 1697 if (alloc_buffer) {
1657 1698 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1658 1699 ixgbe_error(ixgbe,
1659 1700 "Failed to allocate software receive rings");
1660 1701 return (IXGBE_FAILURE);
1661 1702 }
1662 1703
1663 1704 /* Allocate buffers for all the rx/tx rings */
1664 1705 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1665 1706 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1666 1707 return (IXGBE_FAILURE);
1667 1708 }
1668 1709
1669 1710 ixgbe->tx_ring_init = B_TRUE;
1670 1711 } else {
1671 1712 ixgbe->tx_ring_init = B_FALSE;
1672 1713 }
1673 1714
1674 1715 for (i = 0; i < ixgbe->num_rx_rings; i++)
1675 1716 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1676 1717 for (i = 0; i < ixgbe->num_tx_rings; i++)
1677 1718 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1678 1719
1679 1720 /*
1680 1721 * Start the chipset hardware
1681 1722 */
1682 1723 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1683 1724 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1684 1725 goto start_failure;
1685 1726 }
1686 1727
1687 1728 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1688 1729 goto start_failure;
1689 1730 }
1690 1731
1691 1732 /*
1692 1733 * Setup the rx/tx rings
1693 1734 */
1694 1735 ixgbe_setup_rings(ixgbe);
1695 1736
1696 1737 /*
1697 1738 * ixgbe_start() will be called when resetting, however if reset
1698 1739 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1699 1740 * before enabling the interrupts.
1700 1741 */
1701 1742 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1702 1743 | IXGBE_STALL| IXGBE_OVERTEMP));
1703 1744
1704 1745 /*
1705 1746 * Enable adapter interrupts
1706 1747 * The interrupts must be enabled after the driver state is START
1707 1748 */
1708 1749 ixgbe_enable_adapter_interrupts(ixgbe);
1709 1750
1710 1751 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1711 1752 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1712 1753 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1713 1754 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1714 1755
1715 1756 return (IXGBE_SUCCESS);
1716 1757
1717 1758 start_failure:
1718 1759 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1719 1760 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1720 1761 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1721 1762 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1722 1763
1723 1764 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1724 1765
1725 1766 return (IXGBE_FAILURE);
1726 1767 }
1727 1768
1728 1769 /*
1729 1770 * ixgbe_stop - Stop the driver/chipset.
1730 1771 */
1731 1772 void
1732 1773 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1733 1774 {
1734 1775 int i;
1735 1776
1736 1777 ASSERT(mutex_owned(&ixgbe->gen_lock));
1737 1778
1738 1779 /*
1739 1780 * Disable the adapter interrupts
1740 1781 */
1741 1782 ixgbe_disable_adapter_interrupts(ixgbe);
1742 1783
1743 1784 /*
1744 1785 * Drain the pending tx packets
1745 1786 */
1746 1787 (void) ixgbe_tx_drain(ixgbe);
1747 1788
1748 1789 for (i = 0; i < ixgbe->num_rx_rings; i++)
1749 1790 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1750 1791 for (i = 0; i < ixgbe->num_tx_rings; i++)
1751 1792 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1752 1793
1753 1794 /*
1754 1795 * Stop the chipset hardware
1755 1796 */
1756 1797 ixgbe_chip_stop(ixgbe);
1757 1798
1758 1799 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1759 1800 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1760 1801 }
1761 1802
1762 1803 /*
1763 1804 * Clean the pending tx data/resources
1764 1805 */
1765 1806 ixgbe_tx_clean(ixgbe);
1766 1807
1767 1808 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1768 1809 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1769 1810 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1770 1811 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1771 1812
1772 1813 if (ixgbe->link_state == LINK_STATE_UP) {
1773 1814 ixgbe->link_state = LINK_STATE_UNKNOWN;
1774 1815 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1775 1816 }
1776 1817
1777 1818 if (free_buffer) {
1778 1819 /*
1779 1820 * Release the DMA/memory resources of rx/tx rings
1780 1821 */
1781 1822 ixgbe_free_dma(ixgbe);
1782 1823 ixgbe_free_rx_data(ixgbe);
1783 1824 }
1784 1825 }
1785 1826
1786 1827 /*
1787 1828 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1788 1829 */
1789 1830 /* ARGSUSED */
1790 1831 static int
1791 1832 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1792 1833 void *arg1, void *arg2)
1793 1834 {
1794 1835 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1795 1836
1796 1837 switch (cbaction) {
1797 1838 /* IRM callback */
1798 1839 int count;
1799 1840 case DDI_CB_INTR_ADD:
1800 1841 case DDI_CB_INTR_REMOVE:
1801 1842 count = (int)(uintptr_t)cbarg;
1802 1843 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1803 1844 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1804 1845 int, ixgbe->intr_cnt);
1805 1846 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1806 1847 DDI_SUCCESS) {
1807 1848 ixgbe_error(ixgbe,
1808 1849 "IRM CB: Failed to adjust interrupts");
1809 1850 goto cb_fail;
1810 1851 }
1811 1852 break;
1812 1853 default:
1813 1854 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1814 1855 cbaction);
1815 1856 return (DDI_ENOTSUP);
1816 1857 }
1817 1858 return (DDI_SUCCESS);
1818 1859 cb_fail:
1819 1860 return (DDI_FAILURE);
1820 1861 }
1821 1862
1822 1863 /*
1823 1864 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1824 1865 */
1825 1866 static int
1826 1867 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1827 1868 {
1828 1869 int i, rc, actual;
1829 1870
1830 1871 if (count == 0)
1831 1872 return (DDI_SUCCESS);
1832 1873
1833 1874 if ((cbaction == DDI_CB_INTR_ADD &&
1834 1875 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1835 1876 (cbaction == DDI_CB_INTR_REMOVE &&
1836 1877 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1837 1878 return (DDI_FAILURE);
1838 1879
1839 1880 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1840 1881 return (DDI_FAILURE);
1841 1882 }
1842 1883
1843 1884 for (i = 0; i < ixgbe->num_rx_rings; i++)
1844 1885 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1845 1886 for (i = 0; i < ixgbe->num_tx_rings; i++)
1846 1887 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1847 1888
1848 1889 mutex_enter(&ixgbe->gen_lock);
1849 1890 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1850 1891 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1851 1892 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1852 1893 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1853 1894
1854 1895 ixgbe_stop(ixgbe, B_FALSE);
1855 1896 /*
1856 1897 * Disable interrupts
1857 1898 */
1858 1899 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1859 1900 rc = ixgbe_disable_intrs(ixgbe);
1860 1901 ASSERT(rc == IXGBE_SUCCESS);
1861 1902 }
1862 1903 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1863 1904
1864 1905 /*
1865 1906 * Remove interrupt handlers
1866 1907 */
1867 1908 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1868 1909 ixgbe_rem_intr_handlers(ixgbe);
1869 1910 }
1870 1911 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1871 1912
1872 1913 /*
1873 1914 * Clear vect_map
1874 1915 */
1875 1916 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1876 1917 switch (cbaction) {
1877 1918 case DDI_CB_INTR_ADD:
1878 1919 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1879 1920 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1880 1921 DDI_INTR_ALLOC_NORMAL);
1881 1922 if (rc != DDI_SUCCESS || actual != count) {
1882 1923 ixgbe_log(ixgbe, "Adjust interrupts failed."
1883 1924 "return: %d, irm cb size: %d, actual: %d",
1884 1925 rc, count, actual);
1885 1926 goto intr_adjust_fail;
1886 1927 }
1887 1928 ixgbe->intr_cnt += count;
1888 1929 break;
1889 1930
1890 1931 case DDI_CB_INTR_REMOVE:
1891 1932 for (i = ixgbe->intr_cnt - count;
1892 1933 i < ixgbe->intr_cnt; i ++) {
1893 1934 rc = ddi_intr_free(ixgbe->htable[i]);
1894 1935 ixgbe->htable[i] = NULL;
1895 1936 if (rc != DDI_SUCCESS) {
1896 1937 ixgbe_log(ixgbe, "Adjust interrupts failed."
1897 1938 "return: %d, irm cb size: %d, actual: %d",
1898 1939 rc, count, actual);
1899 1940 goto intr_adjust_fail;
1900 1941 }
1901 1942 }
1902 1943 ixgbe->intr_cnt -= count;
1903 1944 break;
1904 1945 }
1905 1946
1906 1947 /*
1907 1948 * Get priority for first vector, assume remaining are all the same
1908 1949 */
1909 1950 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1910 1951 if (rc != DDI_SUCCESS) {
1911 1952 ixgbe_log(ixgbe,
1912 1953 "Get interrupt priority failed: %d", rc);
1913 1954 goto intr_adjust_fail;
1914 1955 }
1915 1956 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1916 1957 if (rc != DDI_SUCCESS) {
1917 1958 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1918 1959 goto intr_adjust_fail;
1919 1960 }
1920 1961 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1921 1962
1922 1963 /*
1923 1964 * Map rings to interrupt vectors
1924 1965 */
1925 1966 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1926 1967 ixgbe_error(ixgbe,
1927 1968 "IRM CB: Failed to map interrupts to vectors");
1928 1969 goto intr_adjust_fail;
1929 1970 }
1930 1971
1931 1972 /*
1932 1973 * Add interrupt handlers
1933 1974 */
1934 1975 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1935 1976 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1936 1977 goto intr_adjust_fail;
1937 1978 }
1938 1979 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1939 1980
1940 1981 /*
1941 1982 * Now that mutex locks are initialized, and the chip is also
1942 1983 * initialized, enable interrupts.
1943 1984 */
1944 1985 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1945 1986 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1946 1987 goto intr_adjust_fail;
1947 1988 }
1948 1989 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1949 1990 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1950 1991 ixgbe_error(ixgbe, "IRM CB: Failed to start");
1951 1992 goto intr_adjust_fail;
1952 1993 }
1953 1994 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1954 1995 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1955 1996 ixgbe->ixgbe_state |= IXGBE_STARTED;
1956 1997 mutex_exit(&ixgbe->gen_lock);
1957 1998
1958 1999 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1959 2000 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1960 2001 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1961 2002 }
1962 2003 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1963 2004 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1964 2005 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1965 2006 }
1966 2007
1967 2008 /* Wakeup all Tx rings */
1968 2009 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1969 2010 mac_tx_ring_update(ixgbe->mac_hdl,
1970 2011 ixgbe->tx_rings[i].ring_handle);
1971 2012 }
1972 2013
1973 2014 IXGBE_DEBUGLOG_3(ixgbe,
1974 2015 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1975 2016 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1976 2017 return (DDI_SUCCESS);
1977 2018
1978 2019 intr_adjust_fail:
1979 2020 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1980 2021 mutex_exit(&ixgbe->gen_lock);
1981 2022 return (DDI_FAILURE);
1982 2023 }
1983 2024
1984 2025 /*
1985 2026 * ixgbe_intr_cb_register - Register interrupt callback function.
1986 2027 */
1987 2028 static int
1988 2029 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1989 2030 {
1990 2031 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1991 2032 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1992 2033 return (IXGBE_FAILURE);
1993 2034 }
1994 2035 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1995 2036 return (IXGBE_SUCCESS);
1996 2037 }
1997 2038
1998 2039 /*
1999 2040 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
2000 2041 */
2001 2042 static int
2002 2043 ixgbe_alloc_rings(ixgbe_t *ixgbe)
2003 2044 {
2004 2045 /*
2005 2046 * Allocate memory space for rx rings
2006 2047 */
2007 2048 ixgbe->rx_rings = kmem_zalloc(
2008 2049 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
2009 2050 KM_NOSLEEP);
2010 2051
2011 2052 if (ixgbe->rx_rings == NULL) {
2012 2053 return (IXGBE_FAILURE);
2013 2054 }
2014 2055
2015 2056 /*
2016 2057 * Allocate memory space for tx rings
2017 2058 */
2018 2059 ixgbe->tx_rings = kmem_zalloc(
2019 2060 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
2020 2061 KM_NOSLEEP);
2021 2062
2022 2063 if (ixgbe->tx_rings == NULL) {
2023 2064 kmem_free(ixgbe->rx_rings,
2024 2065 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2025 2066 ixgbe->rx_rings = NULL;
2026 2067 return (IXGBE_FAILURE);
2027 2068 }
2028 2069
2029 2070 /*
2030 2071 * Allocate memory space for rx ring groups
2031 2072 */
2032 2073 ixgbe->rx_groups = kmem_zalloc(
2033 2074 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
2034 2075 KM_NOSLEEP);
2035 2076
2036 2077 if (ixgbe->rx_groups == NULL) {
2037 2078 kmem_free(ixgbe->rx_rings,
2038 2079 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2039 2080 kmem_free(ixgbe->tx_rings,
2040 2081 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2041 2082 ixgbe->rx_rings = NULL;
2042 2083 ixgbe->tx_rings = NULL;
2043 2084 return (IXGBE_FAILURE);
2044 2085 }
2045 2086
2046 2087 return (IXGBE_SUCCESS);
2047 2088 }
2048 2089
2049 2090 /*
2050 2091 * ixgbe_free_rings - Free the memory space of rx/tx rings.
2051 2092 */
2052 2093 static void
2053 2094 ixgbe_free_rings(ixgbe_t *ixgbe)
2054 2095 {
2055 2096 if (ixgbe->rx_rings != NULL) {
2056 2097 kmem_free(ixgbe->rx_rings,
2057 2098 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2058 2099 ixgbe->rx_rings = NULL;
2059 2100 }
2060 2101
2061 2102 if (ixgbe->tx_rings != NULL) {
2062 2103 kmem_free(ixgbe->tx_rings,
2063 2104 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2064 2105 ixgbe->tx_rings = NULL;
2065 2106 }
2066 2107
2067 2108 if (ixgbe->rx_groups != NULL) {
2068 2109 kmem_free(ixgbe->rx_groups,
2069 2110 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2070 2111 ixgbe->rx_groups = NULL;
2071 2112 }
2072 2113 }
2073 2114
2074 2115 static int
2075 2116 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2076 2117 {
2077 2118 ixgbe_rx_ring_t *rx_ring;
2078 2119 int i;
2079 2120
2080 2121 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2081 2122 rx_ring = &ixgbe->rx_rings[i];
2082 2123 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2083 2124 goto alloc_rx_rings_failure;
2084 2125 }
2085 2126 return (IXGBE_SUCCESS);
2086 2127
2087 2128 alloc_rx_rings_failure:
2088 2129 ixgbe_free_rx_data(ixgbe);
2089 2130 return (IXGBE_FAILURE);
2090 2131 }
2091 2132
2092 2133 static void
2093 2134 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2094 2135 {
2095 2136 ixgbe_rx_ring_t *rx_ring;
2096 2137 ixgbe_rx_data_t *rx_data;
2097 2138 int i;
2098 2139
2099 2140 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2100 2141 rx_ring = &ixgbe->rx_rings[i];
2101 2142
2102 2143 mutex_enter(&ixgbe->rx_pending_lock);
2103 2144 rx_data = rx_ring->rx_data;
2104 2145
2105 2146 if (rx_data != NULL) {
2106 2147 rx_data->flag |= IXGBE_RX_STOPPED;
2107 2148
2108 2149 if (rx_data->rcb_pending == 0) {
2109 2150 ixgbe_free_rx_ring_data(rx_data);
2110 2151 rx_ring->rx_data = NULL;
2111 2152 }
2112 2153 }
2113 2154
2114 2155 mutex_exit(&ixgbe->rx_pending_lock);
2115 2156 }
2116 2157 }
2117 2158
2118 2159 /*
2119 2160 * ixgbe_setup_rings - Setup rx/tx rings.
2120 2161 */
2121 2162 static void
2122 2163 ixgbe_setup_rings(ixgbe_t *ixgbe)
2123 2164 {
2124 2165 /*
2125 2166 * Setup the rx/tx rings, including the following:
2126 2167 *
2127 2168 * 1. Setup the descriptor ring and the control block buffers;
2128 2169 * 2. Initialize necessary registers for receive/transmit;
2129 2170 * 3. Initialize software pointers/parameters for receive/transmit;
2130 2171 */
2131 2172 ixgbe_setup_rx(ixgbe);
2132 2173
2133 2174 ixgbe_setup_tx(ixgbe);
2134 2175 }
2135 2176
2136 2177 static void
2137 2178 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2138 2179 {
2139 2180 ixgbe_t *ixgbe = rx_ring->ixgbe;
2140 2181 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2141 2182 struct ixgbe_hw *hw = &ixgbe->hw;
2142 2183 rx_control_block_t *rcb;
2143 2184 union ixgbe_adv_rx_desc *rbd;
2144 2185 uint32_t size;
2145 2186 uint32_t buf_low;
2146 2187 uint32_t buf_high;
2147 2188 uint32_t reg_val;
2148 2189 int i;
2149 2190
2150 2191 ASSERT(mutex_owned(&rx_ring->rx_lock));
2151 2192 ASSERT(mutex_owned(&ixgbe->gen_lock));
2152 2193
2153 2194 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2154 2195 rcb = rx_data->work_list[i];
2155 2196 rbd = &rx_data->rbd_ring[i];
2156 2197
2157 2198 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2158 2199 rbd->read.hdr_addr = NULL;
2159 2200 }
2160 2201
2161 2202 /*
2162 2203 * Initialize the length register
2163 2204 */
2164 2205 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2165 2206 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2166 2207
2167 2208 /*
2168 2209 * Initialize the base address registers
2169 2210 */
2170 2211 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2171 2212 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2172 2213 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2173 2214 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2174 2215
2175 2216 /*
2176 2217 * Setup head & tail pointers
2177 2218 */
2178 2219 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2179 2220 rx_data->ring_size - 1);
2180 2221 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2181 2222
2182 2223 rx_data->rbd_next = 0;
2183 2224 rx_data->lro_first = 0;
↓ open down ↓ |
1214 lines elided |
↑ open up ↑ |
2184 2225
2185 2226 /*
2186 2227 * Setup the Receive Descriptor Control Register (RXDCTL)
2187 2228 * PTHRESH=32 descriptors (half the internal cache)
2188 2229 * HTHRESH=0 descriptors (to minimize latency on fetch)
2189 2230 * WTHRESH defaults to 1 (writeback each descriptor)
2190 2231 */
2191 2232 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2192 2233 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2193 2234
2194 - /* Not a valid value for 82599 or X540 */
2235 + /* Not a valid value for 82599, X540 or X550 */
2195 2236 if (hw->mac.type == ixgbe_mac_82598EB) {
2196 2237 reg_val |= 0x0020; /* pthresh */
2197 2238 }
2198 2239 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2199 2240
2200 2241 if (hw->mac.type == ixgbe_mac_82599EB ||
2201 - hw->mac.type == ixgbe_mac_X540) {
2242 + hw->mac.type == ixgbe_mac_X540 ||
2243 + hw->mac.type == ixgbe_mac_X550 ||
2244 + hw->mac.type == ixgbe_mac_X550EM_x) {
2202 2245 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2203 2246 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2204 2247 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2205 2248 }
2206 2249
2207 2250 /*
2208 2251 * Setup the Split and Replication Receive Control Register.
2209 2252 * Set the rx buffer size and the advanced descriptor type.
2210 2253 */
2211 2254 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2212 2255 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2213 2256 reg_val |= IXGBE_SRRCTL_DROP_EN;
2214 2257 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2215 2258 }
2216 2259
2217 2260 static void
2218 2261 ixgbe_setup_rx(ixgbe_t *ixgbe)
2219 2262 {
2220 2263 ixgbe_rx_ring_t *rx_ring;
2221 2264 struct ixgbe_hw *hw = &ixgbe->hw;
2222 2265 uint32_t reg_val;
2223 2266 uint32_t ring_mapping;
2224 2267 uint32_t i, index;
2225 2268 uint32_t psrtype_rss_bit;
2226 2269
2227 2270 /* PSRTYPE must be configured for 82599 */
2228 2271 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2229 2272 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2230 2273 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2231 2274 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2232 2275 reg_val |= IXGBE_PSRTYPE_L2HDR;
2233 2276 reg_val |= 0x80000000;
2234 2277 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2235 2278 } else {
2236 2279 if (ixgbe->num_rx_groups > 32) {
2237 2280 psrtype_rss_bit = 0x20000000;
2238 2281 } else {
2239 2282 psrtype_rss_bit = 0x40000000;
2240 2283 }
2241 2284 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2242 2285 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2243 2286 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2244 2287 reg_val |= IXGBE_PSRTYPE_L2HDR;
2245 2288 reg_val |= psrtype_rss_bit;
2246 2289 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2247 2290 }
2248 2291 }
2249 2292
2250 2293 /*
2251 2294 * Set filter control in FCTRL to accept broadcast packets and do
2252 2295 * not pass pause frames to host. Flow control settings are already
2253 2296 * in this register, so preserve them.
2254 2297 */
2255 2298 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2256 2299 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
2257 2300 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
2258 2301 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2259 2302
2260 2303 /*
2261 2304 * Hardware checksum settings
2262 2305 */
2263 2306 if (ixgbe->rx_hcksum_enable) {
2264 2307 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2265 2308 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2266 2309 }
2267 2310
2268 2311 /*
2269 2312 * Setup VMDq and RSS for multiple receive queues
2270 2313 */
2271 2314 switch (ixgbe->classify_mode) {
2272 2315 case IXGBE_CLASSIFY_RSS:
2273 2316 /*
2274 2317 * One group, only RSS is needed when more than
2275 2318 * one ring enabled.
2276 2319 */
2277 2320 ixgbe_setup_rss(ixgbe);
2278 2321 break;
2279 2322
2280 2323 case IXGBE_CLASSIFY_VMDQ:
2281 2324 /*
2282 2325 * Multiple groups, each group has one ring,
2283 2326 * only VMDq is needed.
2284 2327 */
2285 2328 ixgbe_setup_vmdq(ixgbe);
2286 2329 break;
2287 2330
2288 2331 case IXGBE_CLASSIFY_VMDQ_RSS:
2289 2332 /*
2290 2333 * Multiple groups and multiple rings, both
2291 2334 * VMDq and RSS are needed.
2292 2335 */
2293 2336 ixgbe_setup_vmdq_rss(ixgbe);
2294 2337 break;
2295 2338
2296 2339 default:
2297 2340 break;
2298 2341 }
2299 2342
2300 2343 /*
2301 2344 * Enable the receive unit. This must be done after filter
2302 2345 * control is set in FCTRL.
2303 2346 */
2304 2347 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
2305 2348 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
2306 2349 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2307 2350
2308 2351 /*
2309 2352 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2310 2353 */
2311 2354 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2312 2355 rx_ring = &ixgbe->rx_rings[i];
2313 2356 ixgbe_setup_rx_ring(rx_ring);
2314 2357 }
2315 2358
2316 2359 /*
2317 2360 * Setup the per-ring statistics mapping.
2318 2361 */
2319 2362 ring_mapping = 0;
2320 2363 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2321 2364 index = ixgbe->rx_rings[i].hw_index;
2322 2365 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2323 2366 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2324 2367 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2325 2368 }
2326 2369
2327 2370 /*
2328 2371 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2329 2372 * by four bytes if the packet has a VLAN field, so includes MTU,
2330 2373 * ethernet header and frame check sequence.
2331 2374 * Register is MAXFRS in 82599.
2332 2375 */
2333 2376 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2334 2377 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2335 2378 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2336 2379
2337 2380 /*
2338 2381 * Setup Jumbo Frame enable bit
2339 2382 */
2340 2383 if (ixgbe->default_mtu > ETHERMTU) {
2341 2384 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2342 2385 reg_val |= IXGBE_HLREG0_JUMBOEN;
2343 2386 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2344 2387 }
2345 2388
2346 2389 /*
2347 2390 * Setup RSC for multiple receive queues.
2348 2391 */
2349 2392 if (ixgbe->lro_enable) {
2350 2393 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2351 2394 /*
2352 2395 * Make sure rx_buf_size * MAXDESC not greater
2353 2396 * than 65535.
2354 2397 * Intel recommends 4 for MAXDESC field value.
2355 2398 */
2356 2399 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2357 2400 reg_val |= IXGBE_RSCCTL_RSCEN;
2358 2401 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2359 2402 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2360 2403 else
2361 2404 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2362 2405 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2363 2406 }
2364 2407
2365 2408 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2366 2409 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2367 2410 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2368 2411
2369 2412 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2370 2413 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2371 2414 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2372 2415 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2373 2416
2374 2417 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2375 2418 }
2376 2419 }
2377 2420
2378 2421 static void
2379 2422 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2380 2423 {
2381 2424 ixgbe_t *ixgbe = tx_ring->ixgbe;
2382 2425 struct ixgbe_hw *hw = &ixgbe->hw;
2383 2426 uint32_t size;
2384 2427 uint32_t buf_low;
2385 2428 uint32_t buf_high;
2386 2429 uint32_t reg_val;
2387 2430
2388 2431 ASSERT(mutex_owned(&tx_ring->tx_lock));
2389 2432 ASSERT(mutex_owned(&ixgbe->gen_lock));
2390 2433
2391 2434 /*
2392 2435 * Initialize the length register
2393 2436 */
2394 2437 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2395 2438 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2396 2439
2397 2440 /*
2398 2441 * Initialize the base address registers
2399 2442 */
2400 2443 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2401 2444 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2402 2445 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2403 2446 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2404 2447
2405 2448 /*
2406 2449 * Setup head & tail pointers
2407 2450 */
2408 2451 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2409 2452 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2410 2453
2411 2454 /*
2412 2455 * Setup head write-back
2413 2456 */
2414 2457 if (ixgbe->tx_head_wb_enable) {
2415 2458 /*
2416 2459 * The memory of the head write-back is allocated using
2417 2460 * the extra tbd beyond the tail of the tbd ring.
2418 2461 */
2419 2462 tx_ring->tbd_head_wb = (uint32_t *)
2420 2463 ((uintptr_t)tx_ring->tbd_area.address + size);
2421 2464 *tx_ring->tbd_head_wb = 0;
2422 2465
2423 2466 buf_low = (uint32_t)
2424 2467 (tx_ring->tbd_area.dma_address + size);
2425 2468 buf_high = (uint32_t)
2426 2469 ((tx_ring->tbd_area.dma_address + size) >> 32);
2427 2470
2428 2471 /* Set the head write-back enable bit */
2429 2472 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2430 2473
2431 2474 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2432 2475 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2433 2476
2434 2477 /*
2435 2478 * Turn off relaxed ordering for head write back or it will
2436 2479 * cause problems with the tx recycling
2437 2480 */
2438 2481
2439 2482 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2440 2483 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2441 2484 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2442 2485 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2443 2486 if (hw->mac.type == ixgbe_mac_82598EB) {
2444 2487 IXGBE_WRITE_REG(hw,
2445 2488 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2446 2489 } else {
2447 2490 IXGBE_WRITE_REG(hw,
2448 2491 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2449 2492 }
2450 2493 } else {
2451 2494 tx_ring->tbd_head_wb = NULL;
2452 2495 }
2453 2496
2454 2497 tx_ring->tbd_head = 0;
2455 2498 tx_ring->tbd_tail = 0;
2456 2499 tx_ring->tbd_free = tx_ring->ring_size;
2457 2500
2458 2501 if (ixgbe->tx_ring_init == B_TRUE) {
2459 2502 tx_ring->tcb_head = 0;
2460 2503 tx_ring->tcb_tail = 0;
2461 2504 tx_ring->tcb_free = tx_ring->free_list_size;
2462 2505 }
2463 2506
2464 2507 /*
2465 2508 * Initialize the s/w context structure
2466 2509 */
2467 2510 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2468 2511 }
2469 2512
2470 2513 static void
2471 2514 ixgbe_setup_tx(ixgbe_t *ixgbe)
2472 2515 {
2473 2516 struct ixgbe_hw *hw = &ixgbe->hw;
2474 2517 ixgbe_tx_ring_t *tx_ring;
2475 2518 uint32_t reg_val;
2476 2519 uint32_t ring_mapping;
2477 2520 int i;
2478 2521
2479 2522 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2480 2523 tx_ring = &ixgbe->tx_rings[i];
2481 2524 ixgbe_setup_tx_ring(tx_ring);
2482 2525 }
2483 2526
2484 2527 /*
2485 2528 * Setup the per-ring statistics mapping.
2486 2529 */
2487 2530 ring_mapping = 0;
2488 2531 for (i = 0; i < ixgbe->num_tx_rings; i++) {
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
2489 2532 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2490 2533 if ((i & 0x3) == 0x3) {
2491 2534 switch (hw->mac.type) {
2492 2535 case ixgbe_mac_82598EB:
2493 2536 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2494 2537 ring_mapping);
2495 2538 break;
2496 2539
2497 2540 case ixgbe_mac_82599EB:
2498 2541 case ixgbe_mac_X540:
2542 + case ixgbe_mac_X550:
2543 + case ixgbe_mac_X550EM_x:
2499 2544 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2500 2545 ring_mapping);
2501 2546 break;
2502 2547
2503 2548 default:
2504 2549 break;
2505 2550 }
2506 2551
2507 2552 ring_mapping = 0;
2508 2553 }
2509 2554 }
2510 2555 if (i & 0x3) {
2511 2556 switch (hw->mac.type) {
2512 2557 case ixgbe_mac_82598EB:
2513 2558 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2514 2559 break;
2515 2560
2516 2561 case ixgbe_mac_82599EB:
2517 2562 case ixgbe_mac_X540:
2563 + case ixgbe_mac_X550:
2564 + case ixgbe_mac_X550EM_x:
2518 2565 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2519 2566 break;
2520 2567
2521 2568 default:
2522 2569 break;
2523 2570 }
2524 2571 }
2525 2572
2526 2573 /*
2527 2574 * Enable CRC appending and TX padding (for short tx frames)
2528 2575 */
2529 2576 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2530 2577 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2531 2578 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2532 2579
2533 2580 /*
2534 - * enable DMA for 82599 and X540 parts
2581 + * enable DMA for 82599, X540 and X550 parts
2535 2582 */
2536 2583 if (hw->mac.type == ixgbe_mac_82599EB ||
2537 - hw->mac.type == ixgbe_mac_X540) {
2584 + hw->mac.type == ixgbe_mac_X540 ||
2585 + hw->mac.type == ixgbe_mac_X550 ||
2586 + hw->mac.type == ixgbe_mac_X550EM_x) {
2538 2587 /* DMATXCTL.TE must be set after all Tx config is complete */
2539 2588 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2540 2589 reg_val |= IXGBE_DMATXCTL_TE;
2541 2590 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2542 2591
2543 2592 /* Disable arbiter to set MTQC */
2544 2593 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2545 2594 reg_val |= IXGBE_RTTDCS_ARBDIS;
2546 2595 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2547 2596 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2548 2597 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2549 2598 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2550 2599 }
2551 2600
2552 2601 /*
2553 2602 * Enabling tx queues ..
2554 2603 * For 82599 must be done after DMATXCTL.TE is set
2555 2604 */
2556 2605 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2557 2606 tx_ring = &ixgbe->tx_rings[i];
2558 2607 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2559 2608 reg_val |= IXGBE_TXDCTL_ENABLE;
2560 2609 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2561 2610 }
2562 2611 }
2563 2612
2564 2613 /*
2565 2614 * ixgbe_setup_rss - Setup receive-side scaling feature.
2566 2615 */
2567 2616 static void
2568 2617 ixgbe_setup_rss(ixgbe_t *ixgbe)
2569 2618 {
2570 2619 struct ixgbe_hw *hw = &ixgbe->hw;
2571 2620 uint32_t i, mrqc, rxcsum;
2572 2621 uint32_t random;
2573 2622 uint32_t reta;
2574 2623 uint32_t ring_per_group;
2575 2624
2576 2625 /*
2577 2626 * Fill out redirection table
2578 2627 */
2579 2628 reta = 0;
2580 2629 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2581 2630
2582 2631 for (i = 0; i < 128; i++) {
2583 2632 reta = (reta << 8) | (i % ring_per_group) |
2584 2633 ((i % ring_per_group) << 4);
2585 2634 if ((i & 3) == 3)
2586 2635 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2587 2636 }
2588 2637
2589 2638 /*
2590 2639 * Fill out hash function seeds with a random constant
2591 2640 */
2592 2641 for (i = 0; i < 10; i++) {
2593 2642 (void) random_get_pseudo_bytes((uint8_t *)&random,
2594 2643 sizeof (uint32_t));
2595 2644 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2596 2645 }
2597 2646
2598 2647 /*
2599 2648 * Enable RSS & perform hash on these packet types
2600 2649 */
2601 2650 mrqc = IXGBE_MRQC_RSSEN |
2602 2651 IXGBE_MRQC_RSS_FIELD_IPV4 |
2603 2652 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2604 2653 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2605 2654 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2606 2655 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2607 2656 IXGBE_MRQC_RSS_FIELD_IPV6 |
2608 2657 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2609 2658 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2610 2659 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2611 2660 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2612 2661
2613 2662 /*
2614 2663 * Disable Packet Checksum to enable RSS for multiple receive queues.
2615 2664 * It is an adapter hardware limitation that Packet Checksum is
2616 2665 * mutually exclusive with RSS.
2617 2666 */
2618 2667 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2619 2668 rxcsum |= IXGBE_RXCSUM_PCSD;
2620 2669 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2621 2670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2622 2671 }
2623 2672
2624 2673 /*
2625 2674 * ixgbe_setup_vmdq - Setup MAC classification feature
2626 2675 */
2627 2676 static void
2628 2677 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2629 2678 {
2630 2679 struct ixgbe_hw *hw = &ixgbe->hw;
2631 2680 uint32_t vmdctl, i, vtctl;
2632 2681
2633 2682 /*
2634 2683 * Setup the VMDq Control register, enable VMDq based on
2635 2684 * packet destination MAC address:
2636 2685 */
2637 2686 switch (hw->mac.type) {
2638 2687 case ixgbe_mac_82598EB:
2639 2688 /*
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
2640 2689 * VMDq Enable = 1;
2641 2690 * VMDq Filter = 0; MAC filtering
2642 2691 * Default VMDq output index = 0;
2643 2692 */
2644 2693 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2645 2694 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2646 2695 break;
2647 2696
2648 2697 case ixgbe_mac_82599EB:
2649 2698 case ixgbe_mac_X540:
2699 + case ixgbe_mac_X550:
2700 + case ixgbe_mac_X550EM_x:
2650 2701 /*
2651 2702 * Enable VMDq-only.
2652 2703 */
2653 2704 vmdctl = IXGBE_MRQC_VMDQEN;
2654 2705 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2655 2706
2656 2707 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2657 2708 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2658 2709 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2659 2710 }
2660 2711
2661 2712 /*
2662 2713 * Enable Virtualization and Replication.
2663 2714 */
2664 2715 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2665 2716 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2666 2717
2667 2718 /*
2668 2719 * Enable receiving packets to all VFs
2669 2720 */
2670 2721 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2671 2722 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2672 2723 break;
2673 2724
2674 2725 default:
2675 2726 break;
2676 2727 }
2677 2728 }
2678 2729
2679 2730 /*
2680 2731 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2681 2732 */
2682 2733 static void
2683 2734 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2684 2735 {
2685 2736 struct ixgbe_hw *hw = &ixgbe->hw;
2686 2737 uint32_t i, mrqc, rxcsum;
2687 2738 uint32_t random;
2688 2739 uint32_t reta;
2689 2740 uint32_t ring_per_group;
2690 2741 uint32_t vmdctl, vtctl;
2691 2742
2692 2743 /*
2693 2744 * Fill out redirection table
2694 2745 */
2695 2746 reta = 0;
2696 2747 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2697 2748 for (i = 0; i < 128; i++) {
2698 2749 reta = (reta << 8) | (i % ring_per_group) |
2699 2750 ((i % ring_per_group) << 4);
2700 2751 if ((i & 3) == 3)
2701 2752 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2702 2753 }
2703 2754
2704 2755 /*
2705 2756 * Fill out hash function seeds with a random constant
2706 2757 */
2707 2758 for (i = 0; i < 10; i++) {
2708 2759 (void) random_get_pseudo_bytes((uint8_t *)&random,
2709 2760 sizeof (uint32_t));
2710 2761 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2711 2762 }
2712 2763
2713 2764 /*
2714 2765 * Enable and setup RSS and VMDq
2715 2766 */
2716 2767 switch (hw->mac.type) {
2717 2768 case ixgbe_mac_82598EB:
2718 2769 /*
2719 2770 * Enable RSS & Setup RSS Hash functions
2720 2771 */
2721 2772 mrqc = IXGBE_MRQC_RSSEN |
2722 2773 IXGBE_MRQC_RSS_FIELD_IPV4 |
2723 2774 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2724 2775 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2725 2776 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2726 2777 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2727 2778 IXGBE_MRQC_RSS_FIELD_IPV6 |
2728 2779 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2729 2780 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2730 2781 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2731 2782 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2732 2783
2733 2784 /*
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
2734 2785 * Enable and Setup VMDq
2735 2786 * VMDq Filter = 0; MAC filtering
2736 2787 * Default VMDq output index = 0;
2737 2788 */
2738 2789 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2739 2790 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2740 2791 break;
2741 2792
2742 2793 case ixgbe_mac_82599EB:
2743 2794 case ixgbe_mac_X540:
2795 + case ixgbe_mac_X550:
2796 + case ixgbe_mac_X550EM_x:
2744 2797 /*
2745 2798 * Enable RSS & Setup RSS Hash functions
2746 2799 */
2747 2800 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2748 2801 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2749 2802 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2750 2803 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2751 2804 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2752 2805 IXGBE_MRQC_RSS_FIELD_IPV6 |
2753 2806 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2754 2807 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2755 2808 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2756 2809
2757 2810 /*
2758 2811 * Enable VMDq+RSS.
2759 2812 */
2760 2813 if (ixgbe->num_rx_groups > 32) {
2761 2814 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2762 2815 } else {
2763 2816 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2764 2817 }
2765 2818
2766 2819 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2767 2820
2768 2821 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2769 2822 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2770 2823 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2771 2824 }
2772 2825 break;
2773 2826
2774 2827 default:
2775 2828 break;
2776 2829
2777 2830 }
2778 2831
2779 2832 /*
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2780 2833 * Disable Packet Checksum to enable RSS for multiple receive queues.
2781 2834 * It is an adapter hardware limitation that Packet Checksum is
2782 2835 * mutually exclusive with RSS.
2783 2836 */
2784 2837 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2785 2838 rxcsum |= IXGBE_RXCSUM_PCSD;
2786 2839 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2787 2840 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2788 2841
2789 2842 if (hw->mac.type == ixgbe_mac_82599EB ||
2790 - hw->mac.type == ixgbe_mac_X540) {
2843 + hw->mac.type == ixgbe_mac_X540 ||
2844 + hw->mac.type == ixgbe_mac_X550 ||
2845 + hw->mac.type == ixgbe_mac_X550EM_x) {
2791 2846 /*
2792 2847 * Enable Virtualization and Replication.
2793 2848 */
2794 2849 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2795 2850 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2796 2851
2797 2852 /*
2798 2853 * Enable receiving packets to all VFs
2799 2854 */
2800 2855 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2801 2856 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2802 2857 }
2803 2858 }
2804 2859
2805 2860 /*
2806 2861 * ixgbe_init_unicst - Initialize the unicast addresses.
2807 2862 */
2808 2863 static void
2809 2864 ixgbe_init_unicst(ixgbe_t *ixgbe)
2810 2865 {
2811 2866 struct ixgbe_hw *hw = &ixgbe->hw;
2812 2867 uint8_t *mac_addr;
2813 2868 int slot;
2814 2869 /*
2815 2870 * Here we should consider two situations:
2816 2871 *
2817 2872 * 1. Chipset is initialized at the first time,
2818 2873 * Clear all the multiple unicast addresses.
2819 2874 *
2820 2875 * 2. Chipset is reset
2821 2876 * Recover the multiple unicast addresses from the
2822 2877 * software data structure to the RAR registers.
2823 2878 */
2824 2879 if (!ixgbe->unicst_init) {
2825 2880 /*
2826 2881 * Initialize the multiple unicast addresses
2827 2882 */
2828 2883 ixgbe->unicst_total = hw->mac.num_rar_entries;
2829 2884 ixgbe->unicst_avail = ixgbe->unicst_total;
2830 2885 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2831 2886 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2832 2887 bzero(mac_addr, ETHERADDRL);
2833 2888 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2834 2889 ixgbe->unicst_addr[slot].mac.set = 0;
2835 2890 }
2836 2891 ixgbe->unicst_init = B_TRUE;
2837 2892 } else {
2838 2893 /* Re-configure the RAR registers */
2839 2894 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2840 2895 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2841 2896 if (ixgbe->unicst_addr[slot].mac.set == 1) {
2842 2897 (void) ixgbe_set_rar(hw, slot, mac_addr,
2843 2898 ixgbe->unicst_addr[slot].mac.group_index,
2844 2899 IXGBE_RAH_AV);
2845 2900 } else {
2846 2901 bzero(mac_addr, ETHERADDRL);
2847 2902 (void) ixgbe_set_rar(hw, slot, mac_addr,
2848 2903 NULL, NULL);
2849 2904 }
2850 2905 }
2851 2906 }
2852 2907 }
2853 2908
2854 2909 /*
2855 2910 * ixgbe_unicst_find - Find the slot for the specified unicast address
2856 2911 */
2857 2912 int
2858 2913 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2859 2914 {
2860 2915 int slot;
2861 2916
2862 2917 ASSERT(mutex_owned(&ixgbe->gen_lock));
2863 2918
2864 2919 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2865 2920 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2866 2921 mac_addr, ETHERADDRL) == 0)
2867 2922 return (slot);
2868 2923 }
2869 2924
2870 2925 return (-1);
2871 2926 }
2872 2927
2873 2928 /*
2874 2929 * ixgbe_multicst_add - Add a multicst address.
2875 2930 */
2876 2931 int
2877 2932 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2878 2933 {
2879 2934 ASSERT(mutex_owned(&ixgbe->gen_lock));
2880 2935
2881 2936 if ((multiaddr[0] & 01) == 0) {
2882 2937 return (EINVAL);
2883 2938 }
2884 2939
2885 2940 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2886 2941 return (ENOENT);
2887 2942 }
2888 2943
2889 2944 bcopy(multiaddr,
2890 2945 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2891 2946 ixgbe->mcast_count++;
2892 2947
2893 2948 /*
2894 2949 * Update the multicast table in the hardware
2895 2950 */
2896 2951 ixgbe_setup_multicst(ixgbe);
2897 2952
2898 2953 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2899 2954 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2900 2955 return (EIO);
2901 2956 }
2902 2957
2903 2958 return (0);
2904 2959 }
2905 2960
2906 2961 /*
2907 2962 * ixgbe_multicst_remove - Remove a multicst address.
2908 2963 */
2909 2964 int
2910 2965 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2911 2966 {
2912 2967 int i;
2913 2968
2914 2969 ASSERT(mutex_owned(&ixgbe->gen_lock));
2915 2970
2916 2971 for (i = 0; i < ixgbe->mcast_count; i++) {
2917 2972 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2918 2973 ETHERADDRL) == 0) {
2919 2974 for (i++; i < ixgbe->mcast_count; i++) {
2920 2975 ixgbe->mcast_table[i - 1] =
2921 2976 ixgbe->mcast_table[i];
2922 2977 }
2923 2978 ixgbe->mcast_count--;
2924 2979 break;
2925 2980 }
2926 2981 }
2927 2982
2928 2983 /*
2929 2984 * Update the multicast table in the hardware
2930 2985 */
2931 2986 ixgbe_setup_multicst(ixgbe);
2932 2987
2933 2988 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2934 2989 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2935 2990 return (EIO);
2936 2991 }
2937 2992
2938 2993 return (0);
2939 2994 }
2940 2995
2941 2996 /*
2942 2997 * ixgbe_setup_multicast - Setup multicast data structures.
2943 2998 *
2944 2999 * This routine initializes all of the multicast related structures
2945 3000 * and save them in the hardware registers.
2946 3001 */
2947 3002 static void
2948 3003 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2949 3004 {
2950 3005 uint8_t *mc_addr_list;
2951 3006 uint32_t mc_addr_count;
2952 3007 struct ixgbe_hw *hw = &ixgbe->hw;
2953 3008
2954 3009 ASSERT(mutex_owned(&ixgbe->gen_lock));
2955 3010
2956 3011 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2957 3012
2958 3013 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2959 3014 mc_addr_count = ixgbe->mcast_count;
2960 3015
2961 3016 /*
2962 3017 * Update the multicast addresses to the MTA registers
2963 3018 */
2964 3019 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2965 3020 ixgbe_mc_table_itr, TRUE);
2966 3021 }
2967 3022
2968 3023 /*
2969 3024 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2970 3025 *
2971 3026 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2972 3027 * Different chipsets may have different allowed configuration of vmdq and rss.
2973 3028 */
2974 3029 static void
2975 3030 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2976 3031 {
2977 3032 struct ixgbe_hw *hw = &ixgbe->hw;
2978 3033 uint32_t ring_per_group;
2979 3034
2980 3035 switch (hw->mac.type) {
2981 3036 case ixgbe_mac_82598EB:
2982 3037 /*
2983 3038 * 82598 supports the following combination:
2984 3039 * vmdq no. x rss no.
2985 3040 * [5..16] x 1
2986 3041 * [1..4] x [1..16]
2987 3042 * However 8 rss queue per pool (vmdq) is sufficient for
2988 3043 * most cases.
2989 3044 */
2990 3045 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2991 3046 if (ixgbe->num_rx_groups > 4) {
↓ open down ↓ |
191 lines elided |
↑ open up ↑ |
2992 3047 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2993 3048 } else {
2994 3049 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2995 3050 min(8, ring_per_group);
2996 3051 }
2997 3052
2998 3053 break;
2999 3054
3000 3055 case ixgbe_mac_82599EB:
3001 3056 case ixgbe_mac_X540:
3057 + case ixgbe_mac_X550:
3058 + case ixgbe_mac_X550EM_x:
3002 3059 /*
3003 3060 * 82599 supports the following combination:
3004 3061 * vmdq no. x rss no.
3005 3062 * [33..64] x [1..2]
3006 3063 * [2..32] x [1..4]
3007 3064 * 1 x [1..16]
3008 3065 * However 8 rss queue per pool (vmdq) is sufficient for
3009 3066 * most cases.
3010 3067 *
3011 - * For now, treat X540 like the 82599.
3068 + * For now, treat X540 and X550 like the 82599.
3012 3069 */
3013 3070 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3014 3071 if (ixgbe->num_rx_groups == 1) {
3015 3072 ixgbe->num_rx_rings = min(8, ring_per_group);
3016 3073 } else if (ixgbe->num_rx_groups <= 32) {
3017 3074 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3018 3075 min(4, ring_per_group);
3019 3076 } else if (ixgbe->num_rx_groups <= 64) {
3020 3077 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3021 3078 min(2, ring_per_group);
3022 3079 }
3023 3080 break;
3024 3081
3025 3082 default:
3026 3083 break;
3027 3084 }
3028 3085
3029 3086 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3030 3087
3031 3088 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3032 3089 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3033 3090 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
3034 3091 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
3035 3092 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
3036 3093 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
3037 3094 } else {
3038 3095 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
3039 3096 }
3040 3097
3041 3098 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
3042 3099 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
3043 3100 }
3044 3101
3045 3102 /*
3046 3103 * ixgbe_get_conf - Get driver configurations set in driver.conf.
3047 3104 *
3048 3105 * This routine gets user-configured values out of the configuration
3049 3106 * file ixgbe.conf.
3050 3107 *
3051 3108 * For each configurable value, there is a minimum, a maximum, and a
3052 3109 * default.
3053 3110 * If user does not configure a value, use the default.
3054 3111 * If user configures below the minimum, use the minumum.
3055 3112 * If user configures above the maximum, use the maxumum.
3056 3113 */
3057 3114 static void
3058 3115 ixgbe_get_conf(ixgbe_t *ixgbe)
3059 3116 {
3060 3117 struct ixgbe_hw *hw = &ixgbe->hw;
3061 3118 uint32_t flow_control;
3062 3119
3063 3120 /*
3064 3121 * ixgbe driver supports the following user configurations:
3065 3122 *
3066 3123 * Jumbo frame configuration:
3067 3124 * default_mtu
3068 3125 *
3069 3126 * Ethernet flow control configuration:
3070 3127 * flow_control
3071 3128 *
3072 3129 * Multiple rings configurations:
3073 3130 * tx_queue_number
3074 3131 * tx_ring_size
3075 3132 * rx_queue_number
3076 3133 * rx_ring_size
3077 3134 *
3078 3135 * Call ixgbe_get_prop() to get the value for a specific
3079 3136 * configuration parameter.
3080 3137 */
3081 3138
3082 3139 /*
3083 3140 * Jumbo frame configuration - max_frame_size controls host buffer
3084 3141 * allocation, so includes MTU, ethernet header, vlan tag and
3085 3142 * frame check sequence.
3086 3143 */
3087 3144 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3088 3145 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3089 3146
3090 3147 ixgbe->max_frame_size = ixgbe->default_mtu +
3091 3148 sizeof (struct ether_vlan_header) + ETHERFCSL;
3092 3149
3093 3150 /*
3094 3151 * Ethernet flow control configuration
3095 3152 */
3096 3153 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3097 3154 ixgbe_fc_none, 3, ixgbe_fc_none);
3098 3155 if (flow_control == 3)
3099 3156 flow_control = ixgbe_fc_default;
3100 3157
3101 3158 /*
3102 3159 * fc.requested mode is what the user requests. After autoneg,
3103 3160 * fc.current_mode will be the flow_control mode that was negotiated.
3104 3161 */
3105 3162 hw->fc.requested_mode = flow_control;
3106 3163
3107 3164 /*
3108 3165 * Multiple rings configurations
3109 3166 */
3110 3167 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3111 3168 ixgbe->capab->min_tx_que_num,
3112 3169 ixgbe->capab->max_tx_que_num,
3113 3170 ixgbe->capab->def_tx_que_num);
3114 3171 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3115 3172 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3116 3173
3117 3174 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3118 3175 ixgbe->capab->min_rx_que_num,
3119 3176 ixgbe->capab->max_rx_que_num,
3120 3177 ixgbe->capab->def_rx_que_num);
3121 3178 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3122 3179 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3123 3180
3124 3181 /*
3125 3182 * Multiple groups configuration
3126 3183 */
3127 3184 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3128 3185 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3129 3186 ixgbe->capab->def_rx_grp_num);
3130 3187
3131 3188 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3132 3189 0, 1, DEFAULT_MR_ENABLE);
3133 3190
3134 3191 if (ixgbe->mr_enable == B_FALSE) {
3135 3192 ixgbe->num_tx_rings = 1;
3136 3193 ixgbe->num_rx_rings = 1;
3137 3194 ixgbe->num_rx_groups = 1;
3138 3195 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3139 3196 } else {
3140 3197 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3141 3198 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3142 3199 /*
3143 3200 * The combination of num_rx_rings and num_rx_groups
3144 3201 * may be not supported by h/w. We need to adjust
3145 3202 * them to appropriate values.
3146 3203 */
3147 3204 ixgbe_setup_vmdq_rss_conf(ixgbe);
3148 3205 }
3149 3206
3150 3207 /*
3151 3208 * Tunable used to force an interrupt type. The only use is
3152 3209 * for testing of the lesser interrupt types.
3153 3210 * 0 = don't force interrupt type
3154 3211 * 1 = force interrupt type MSI-X
3155 3212 * 2 = force interrupt type MSI
3156 3213 * 3 = force interrupt type Legacy
3157 3214 */
3158 3215 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3159 3216 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3160 3217
3161 3218 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3162 3219 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3163 3220 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
3164 3221 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3165 3222 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3166 3223 0, 1, DEFAULT_LSO_ENABLE);
3167 3224 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3168 3225 0, 1, DEFAULT_LRO_ENABLE);
3169 3226 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3170 3227 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3171 3228 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3172 3229 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3173 3230
3174 - /* Head Write Back not recommended for 82599 and X540 */
3231 + /* Head Write Back not recommended for 82599, X540 and X550 */
3175 3232 if (hw->mac.type == ixgbe_mac_82599EB ||
3176 - hw->mac.type == ixgbe_mac_X540) {
3233 + hw->mac.type == ixgbe_mac_X540 ||
3234 + hw->mac.type == ixgbe_mac_X550 ||
3235 + hw->mac.type == ixgbe_mac_X550EM_x) {
3177 3236 ixgbe->tx_head_wb_enable = B_FALSE;
3178 3237 }
3179 3238
3180 3239 /*
3181 3240 * ixgbe LSO needs the tx h/w checksum support.
3182 3241 * LSO will be disabled if tx h/w checksum is not
3183 3242 * enabled.
3184 3243 */
3185 3244 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3186 3245 ixgbe->lso_enable = B_FALSE;
3187 3246 }
3188 3247
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3189 3248 /*
3190 3249 * ixgbe LRO needs the rx h/w checksum support.
3191 3250 * LRO will be disabled if rx h/w checksum is not
3192 3251 * enabled.
3193 3252 */
3194 3253 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3195 3254 ixgbe->lro_enable = B_FALSE;
3196 3255 }
3197 3256
3198 3257 /*
3199 - * ixgbe LRO only been supported by 82599 and X540 now
3258 + * ixgbe LRO only supported by 82599, X540 and X550
3200 3259 */
3201 3260 if (hw->mac.type == ixgbe_mac_82598EB) {
3202 3261 ixgbe->lro_enable = B_FALSE;
3203 3262 }
3204 3263 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3205 3264 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3206 3265 DEFAULT_TX_COPY_THRESHOLD);
3207 3266 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3208 3267 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3209 3268 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3210 3269 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3211 3270 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3212 3271 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3213 3272 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3214 3273 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3215 3274 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3216 3275
3217 3276 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3218 3277 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
3219 3278 DEFAULT_RX_COPY_THRESHOLD);
3220 3279 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3221 3280 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3222 3281 DEFAULT_RX_LIMIT_PER_INTR);
3223 3282
3224 3283 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3225 3284 ixgbe->capab->min_intr_throttle,
3226 3285 ixgbe->capab->max_intr_throttle,
3227 3286 ixgbe->capab->def_intr_throttle);
3228 3287 /*
3229 - * 82599 and X540 require the interrupt throttling rate is
3230 - * a multiple of 8. This is enforced by the register
3231 - * definiton.
3288 + * 82599, X540 and X550 require the interrupt throttling rate is
3289 + * a multiple of 8. This is enforced by the register definiton.
3232 3290 */
3233 - if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540)
3291 + if (hw->mac.type == ixgbe_mac_82599EB ||
3292 + hw->mac.type == ixgbe_mac_X540 ||
3293 + hw->mac.type == ixgbe_mac_X550 ||
3294 + hw->mac.type == ixgbe_mac_X550EM_x)
3234 3295 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3235 3296
3236 3297 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3237 3298 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3238 3299 }
3239 3300
3240 3301 static void
3241 3302 ixgbe_init_params(ixgbe_t *ixgbe)
3242 3303 {
3243 3304 ixgbe->param_en_10000fdx_cap = 1;
3244 3305 ixgbe->param_en_1000fdx_cap = 1;
3245 3306 ixgbe->param_en_100fdx_cap = 1;
3246 3307 ixgbe->param_adv_10000fdx_cap = 1;
3247 3308 ixgbe->param_adv_1000fdx_cap = 1;
3248 3309 ixgbe->param_adv_100fdx_cap = 1;
3249 3310
3250 3311 ixgbe->param_pause_cap = 1;
3251 3312 ixgbe->param_asym_pause_cap = 1;
3252 3313 ixgbe->param_rem_fault = 0;
3253 3314
3254 3315 ixgbe->param_adv_autoneg_cap = 1;
3255 3316 ixgbe->param_adv_pause_cap = 1;
3256 3317 ixgbe->param_adv_asym_pause_cap = 1;
3257 3318 ixgbe->param_adv_rem_fault = 0;
3258 3319
3259 3320 ixgbe->param_lp_10000fdx_cap = 0;
3260 3321 ixgbe->param_lp_1000fdx_cap = 0;
3261 3322 ixgbe->param_lp_100fdx_cap = 0;
3262 3323 ixgbe->param_lp_autoneg_cap = 0;
3263 3324 ixgbe->param_lp_pause_cap = 0;
3264 3325 ixgbe->param_lp_asym_pause_cap = 0;
3265 3326 ixgbe->param_lp_rem_fault = 0;
3266 3327 }
3267 3328
3268 3329 /*
3269 3330 * ixgbe_get_prop - Get a property value out of the configuration file
3270 3331 * ixgbe.conf.
3271 3332 *
3272 3333 * Caller provides the name of the property, a default value, a minimum
3273 3334 * value, and a maximum value.
3274 3335 *
3275 3336 * Return configured value of the property, with default, minimum and
3276 3337 * maximum properly applied.
3277 3338 */
3278 3339 static int
3279 3340 ixgbe_get_prop(ixgbe_t *ixgbe,
3280 3341 char *propname, /* name of the property */
3281 3342 int minval, /* minimum acceptable value */
3282 3343 int maxval, /* maximim acceptable value */
3283 3344 int defval) /* default value */
3284 3345 {
3285 3346 int value;
3286 3347
3287 3348 /*
3288 3349 * Call ddi_prop_get_int() to read the conf settings
3289 3350 */
3290 3351 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3291 3352 DDI_PROP_DONTPASS, propname, defval);
3292 3353 if (value > maxval)
3293 3354 value = maxval;
3294 3355
3295 3356 if (value < minval)
3296 3357 value = minval;
3297 3358
3298 3359 return (value);
3299 3360 }
3300 3361
3301 3362 /*
3302 3363 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3303 3364 */
3304 3365 int
3305 3366 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3306 3367 {
3307 3368 u32 autoneg_advertised = 0;
3308 3369
3309 3370 /*
3310 3371 * No half duplex support with 10Gb parts
3311 3372 */
3312 3373 if (ixgbe->param_adv_10000fdx_cap == 1)
3313 3374 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3314 3375
3315 3376 if (ixgbe->param_adv_1000fdx_cap == 1)
3316 3377 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3317 3378
3318 3379 if (ixgbe->param_adv_100fdx_cap == 1)
3319 3380 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3320 3381
3321 3382 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
↓ open down ↓ |
78 lines elided |
↑ open up ↑ |
3322 3383 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3323 3384 "to autonegotiation with full link capabilities.");
3324 3385
3325 3386 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3326 3387 IXGBE_LINK_SPEED_1GB_FULL |
3327 3388 IXGBE_LINK_SPEED_100_FULL;
3328 3389 }
3329 3390
3330 3391 if (setup_hw) {
3331 3392 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3332 - ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3393 + ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) {
3333 3394 ixgbe_notice(ixgbe, "Setup link failed on this "
3334 3395 "device.");
3335 3396 return (IXGBE_FAILURE);
3336 3397 }
3337 3398 }
3338 3399
3339 3400 return (IXGBE_SUCCESS);
3340 3401 }
3341 3402
3342 3403 /*
3343 3404 * ixgbe_driver_link_check - Link status processing.
3344 3405 *
3345 3406 * This function can be called in both kernel context and interrupt context
3346 3407 */
3347 3408 static void
3348 3409 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3349 3410 {
3350 3411 struct ixgbe_hw *hw = &ixgbe->hw;
3351 3412 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3352 3413 boolean_t link_up = B_FALSE;
3353 3414 boolean_t link_changed = B_FALSE;
3354 3415
3355 3416 ASSERT(mutex_owned(&ixgbe->gen_lock));
3356 3417
3357 3418 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3358 3419 if (link_up) {
3359 3420 ixgbe->link_check_complete = B_TRUE;
3360 3421
3361 3422 /* Link is up, enable flow control settings */
3362 3423 (void) ixgbe_fc_enable(hw);
3363 3424
3364 3425 /*
3365 3426 * The Link is up, check whether it was marked as down earlier
3366 3427 */
3367 3428 if (ixgbe->link_state != LINK_STATE_UP) {
3368 3429 switch (speed) {
3369 3430 case IXGBE_LINK_SPEED_10GB_FULL:
3370 3431 ixgbe->link_speed = SPEED_10GB;
3371 3432 break;
3372 3433 case IXGBE_LINK_SPEED_1GB_FULL:
3373 3434 ixgbe->link_speed = SPEED_1GB;
3374 3435 break;
3375 3436 case IXGBE_LINK_SPEED_100_FULL:
3376 3437 ixgbe->link_speed = SPEED_100;
3377 3438 }
3378 3439 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3379 3440 ixgbe->link_state = LINK_STATE_UP;
3380 3441 link_changed = B_TRUE;
3381 3442 }
3382 3443 } else {
3383 3444 if (ixgbe->link_check_complete == B_TRUE ||
3384 3445 (ixgbe->link_check_complete == B_FALSE &&
3385 3446 gethrtime() >= ixgbe->link_check_hrtime)) {
3386 3447 /*
3387 3448 * The link is really down
3388 3449 */
3389 3450 ixgbe->link_check_complete = B_TRUE;
3390 3451
3391 3452 if (ixgbe->link_state != LINK_STATE_DOWN) {
3392 3453 ixgbe->link_speed = 0;
3393 3454 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3394 3455 ixgbe->link_state = LINK_STATE_DOWN;
3395 3456 link_changed = B_TRUE;
3396 3457 }
3397 3458 }
3398 3459 }
3399 3460
3400 3461 /*
3401 3462 * If we are in an interrupt context, need to re-enable the
3402 3463 * interrupt, which was automasked
3403 3464 */
3404 3465 if (servicing_interrupt() != 0) {
3405 3466 ixgbe->eims |= IXGBE_EICR_LSC;
3406 3467 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3407 3468 }
3408 3469
3409 3470 if (link_changed) {
3410 3471 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3411 3472 }
3412 3473 }
3413 3474
3414 3475 /*
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
3415 3476 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3416 3477 */
3417 3478 static void
3418 3479 ixgbe_sfp_check(void *arg)
3419 3480 {
3420 3481 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3421 3482 uint32_t eicr = ixgbe->eicr;
3422 3483 struct ixgbe_hw *hw = &ixgbe->hw;
3423 3484
3424 3485 mutex_enter(&ixgbe->gen_lock);
3425 - if (eicr & IXGBE_EICR_GPI_SDP1) {
3486 + if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
3426 3487 /* clear the interrupt */
3427 - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3488 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3428 3489
3429 3490 /* if link up, do multispeed fiber setup */
3430 3491 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3431 - B_TRUE, B_TRUE);
3492 + B_TRUE);
3432 3493 ixgbe_driver_link_check(ixgbe);
3433 3494 ixgbe_get_hw_state(ixgbe);
3434 - } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3495 + } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) {
3435 3496 /* clear the interrupt */
3436 - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3497 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
3437 3498
3438 3499 /* if link up, do sfp module setup */
3439 3500 (void) hw->mac.ops.setup_sfp(hw);
3440 3501
3441 3502 /* do multispeed fiber setup */
3442 3503 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3443 - B_TRUE, B_TRUE);
3504 + B_TRUE);
3444 3505 ixgbe_driver_link_check(ixgbe);
3445 3506 ixgbe_get_hw_state(ixgbe);
3446 3507 }
3447 3508 mutex_exit(&ixgbe->gen_lock);
3448 3509
3449 3510 /*
3450 3511 * We need to fully re-check the link later.
3451 3512 */
3452 3513 ixgbe->link_check_complete = B_FALSE;
3453 3514 ixgbe->link_check_hrtime = gethrtime() +
3454 3515 (IXGBE_LINK_UP_TIME * 100000000ULL);
3455 3516 }
3456 3517
3457 3518 /*
3458 3519 * ixgbe_overtemp_check - overtemp module processing done in taskq
3459 3520 *
3460 3521 * This routine will only be called on adapters with temperature sensor.
3461 3522 * The indication of over-temperature can be either SDP0 interrupt or the link
3462 3523 * status change interrupt.
3463 3524 */
3464 3525 static void
3465 3526 ixgbe_overtemp_check(void *arg)
3466 3527 {
3467 3528 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3468 3529 struct ixgbe_hw *hw = &ixgbe->hw;
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
3469 3530 uint32_t eicr = ixgbe->eicr;
3470 3531 ixgbe_link_speed speed;
3471 3532 boolean_t link_up;
3472 3533
3473 3534 mutex_enter(&ixgbe->gen_lock);
3474 3535
3475 3536 /* make sure we know current state of link */
3476 3537 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3477 3538
3478 3539 /* check over-temp condition */
3479 - if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3540 + if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) ||
3480 3541 (eicr & IXGBE_EICR_LSC)) {
3481 3542 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3482 3543 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3483 3544
3484 3545 /*
3485 3546 * Disable the adapter interrupts
3486 3547 */
3487 3548 ixgbe_disable_adapter_interrupts(ixgbe);
3488 3549
3489 3550 /*
3490 3551 * Disable Rx/Tx units
3491 3552 */
3492 3553 (void) ixgbe_stop_adapter(hw);
3493 3554
3494 3555 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3495 3556 ixgbe_error(ixgbe,
3496 3557 "Problem: Network adapter has been stopped "
3497 3558 "because it has overheated");
3498 3559 ixgbe_error(ixgbe,
3499 3560 "Action: Restart the computer. "
3500 3561 "If the problem persists, power off the system "
3501 3562 "and replace the adapter");
3502 3563 }
3503 3564 }
3504 3565
3505 3566 /* write to clear the interrupt */
3506 3567 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3507 3568
3508 3569 mutex_exit(&ixgbe->gen_lock);
3509 3570 }
3510 3571
3511 3572 /*
3512 3573 * ixgbe_link_timer - timer for link status detection
3513 3574 */
3514 3575 static void
3515 3576 ixgbe_link_timer(void *arg)
3516 3577 {
3517 3578 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3518 3579
3519 3580 mutex_enter(&ixgbe->gen_lock);
3520 3581 ixgbe_driver_link_check(ixgbe);
3521 3582 mutex_exit(&ixgbe->gen_lock);
3522 3583 }
3523 3584
3524 3585 /*
3525 3586 * ixgbe_local_timer - Driver watchdog function.
3526 3587 *
3527 3588 * This function will handle the transmit stall check and other routines.
3528 3589 */
3529 3590 static void
3530 3591 ixgbe_local_timer(void *arg)
3531 3592 {
3532 3593 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3533 3594
3534 3595 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3535 3596 goto out;
3536 3597
3537 3598 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3538 3599 ixgbe->reset_count++;
3539 3600 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3540 3601 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3541 3602 goto out;
3542 3603 }
3543 3604
3544 3605 if (ixgbe_stall_check(ixgbe)) {
3545 3606 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3546 3607 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3547 3608
3548 3609 ixgbe->reset_count++;
3549 3610 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3550 3611 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3551 3612 }
3552 3613
3553 3614 out:
3554 3615 ixgbe_restart_watchdog_timer(ixgbe);
3555 3616 }
3556 3617
3557 3618 /*
3558 3619 * ixgbe_stall_check - Check for transmit stall.
3559 3620 *
3560 3621 * This function checks if the adapter is stalled (in transmit).
3561 3622 *
3562 3623 * It is called each time the watchdog timeout is invoked.
3563 3624 * If the transmit descriptor reclaim continuously fails,
3564 3625 * the watchdog value will increment by 1. If the watchdog
3565 3626 * value exceeds the threshold, the ixgbe is assumed to
3566 3627 * have stalled and need to be reset.
3567 3628 */
3568 3629 static boolean_t
3569 3630 ixgbe_stall_check(ixgbe_t *ixgbe)
3570 3631 {
3571 3632 ixgbe_tx_ring_t *tx_ring;
3572 3633 boolean_t result;
3573 3634 int i;
3574 3635
3575 3636 if (ixgbe->link_state != LINK_STATE_UP)
3576 3637 return (B_FALSE);
3577 3638
3578 3639 /*
3579 3640 * If any tx ring is stalled, we'll reset the chipset
3580 3641 */
3581 3642 result = B_FALSE;
3582 3643 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3583 3644 tx_ring = &ixgbe->tx_rings[i];
3584 3645 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3585 3646 tx_ring->tx_recycle(tx_ring);
3586 3647 }
3587 3648
3588 3649 if (tx_ring->recycle_fail > 0)
3589 3650 tx_ring->stall_watchdog++;
3590 3651 else
3591 3652 tx_ring->stall_watchdog = 0;
3592 3653
3593 3654 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3594 3655 result = B_TRUE;
3595 3656 break;
3596 3657 }
3597 3658 }
3598 3659
3599 3660 if (result) {
3600 3661 tx_ring->stall_watchdog = 0;
3601 3662 tx_ring->recycle_fail = 0;
3602 3663 }
3603 3664
3604 3665 return (result);
3605 3666 }
3606 3667
3607 3668
3608 3669 /*
3609 3670 * is_valid_mac_addr - Check if the mac address is valid.
3610 3671 */
3611 3672 static boolean_t
3612 3673 is_valid_mac_addr(uint8_t *mac_addr)
3613 3674 {
3614 3675 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3615 3676 const uint8_t addr_test2[6] =
3616 3677 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3617 3678
3618 3679 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3619 3680 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3620 3681 return (B_FALSE);
3621 3682
3622 3683 return (B_TRUE);
3623 3684 }
3624 3685
3625 3686 static boolean_t
3626 3687 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3627 3688 {
3628 3689 #ifdef __sparc
3629 3690 struct ixgbe_hw *hw = &ixgbe->hw;
3630 3691 uchar_t *bytes;
3631 3692 struct ether_addr sysaddr;
3632 3693 uint_t nelts;
3633 3694 int err;
3634 3695 boolean_t found = B_FALSE;
3635 3696
3636 3697 /*
3637 3698 * The "vendor's factory-set address" may already have
3638 3699 * been extracted from the chip, but if the property
3639 3700 * "local-mac-address" is set we use that instead.
3640 3701 *
3641 3702 * We check whether it looks like an array of 6
3642 3703 * bytes (which it should, if OBP set it). If we can't
3643 3704 * make sense of it this way, we'll ignore it.
3644 3705 */
3645 3706 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3646 3707 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3647 3708 if (err == DDI_PROP_SUCCESS) {
3648 3709 if (nelts == ETHERADDRL) {
3649 3710 while (nelts--)
3650 3711 hw->mac.addr[nelts] = bytes[nelts];
3651 3712 found = B_TRUE;
3652 3713 }
3653 3714 ddi_prop_free(bytes);
3654 3715 }
3655 3716
3656 3717 /*
3657 3718 * Look up the OBP property "local-mac-address?". If the user has set
3658 3719 * 'local-mac-address? = false', use "the system address" instead.
3659 3720 */
3660 3721 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3661 3722 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3662 3723 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3663 3724 if (localetheraddr(NULL, &sysaddr) != 0) {
3664 3725 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
↓ open down ↓ |
175 lines elided |
↑ open up ↑ |
3665 3726 found = B_TRUE;
3666 3727 }
3667 3728 }
3668 3729 ddi_prop_free(bytes);
3669 3730 }
3670 3731
3671 3732 /*
3672 3733 * Finally(!), if there's a valid "mac-address" property (created
3673 3734 * if we netbooted from this interface), we must use this instead
3674 3735 * of any of the above to ensure that the NFS/install server doesn't
3675 - * get confused by the address changing as Solaris takes over!
3736 + * get confused by the address changing as illumos takes over!
3676 3737 */
3677 3738 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3678 3739 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3679 3740 if (err == DDI_PROP_SUCCESS) {
3680 3741 if (nelts == ETHERADDRL) {
3681 3742 while (nelts--)
3682 3743 hw->mac.addr[nelts] = bytes[nelts];
3683 3744 found = B_TRUE;
3684 3745 }
3685 3746 ddi_prop_free(bytes);
3686 3747 }
3687 3748
3688 3749 if (found) {
3689 3750 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3690 3751 return (B_TRUE);
3691 3752 }
3692 3753 #else
3693 3754 _NOTE(ARGUNUSED(ixgbe));
3694 3755 #endif
3695 3756
3696 3757 return (B_TRUE);
3697 3758 }
3698 3759
3699 3760 #pragma inline(ixgbe_arm_watchdog_timer)
3700 3761 static void
3701 3762 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3702 3763 {
3703 3764 /*
3704 3765 * Fire a watchdog timer
3705 3766 */
3706 3767 ixgbe->watchdog_tid =
3707 3768 timeout(ixgbe_local_timer,
3708 3769 (void *)ixgbe, 1 * drv_usectohz(1000000));
3709 3770
3710 3771 }
3711 3772
3712 3773 /*
3713 3774 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3714 3775 */
3715 3776 void
3716 3777 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3717 3778 {
3718 3779 mutex_enter(&ixgbe->watchdog_lock);
3719 3780
3720 3781 if (!ixgbe->watchdog_enable) {
3721 3782 ixgbe->watchdog_enable = B_TRUE;
3722 3783 ixgbe->watchdog_start = B_TRUE;
3723 3784 ixgbe_arm_watchdog_timer(ixgbe);
3724 3785 }
3725 3786
3726 3787 mutex_exit(&ixgbe->watchdog_lock);
3727 3788 }
3728 3789
3729 3790 /*
3730 3791 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3731 3792 */
3732 3793 void
3733 3794 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3734 3795 {
3735 3796 timeout_id_t tid;
3736 3797
3737 3798 mutex_enter(&ixgbe->watchdog_lock);
3738 3799
3739 3800 ixgbe->watchdog_enable = B_FALSE;
3740 3801 ixgbe->watchdog_start = B_FALSE;
3741 3802 tid = ixgbe->watchdog_tid;
3742 3803 ixgbe->watchdog_tid = 0;
3743 3804
3744 3805 mutex_exit(&ixgbe->watchdog_lock);
3745 3806
3746 3807 if (tid != 0)
3747 3808 (void) untimeout(tid);
3748 3809 }
3749 3810
3750 3811 /*
3751 3812 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3752 3813 */
3753 3814 void
3754 3815 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3755 3816 {
3756 3817 mutex_enter(&ixgbe->watchdog_lock);
3757 3818
3758 3819 if (ixgbe->watchdog_enable) {
3759 3820 if (!ixgbe->watchdog_start) {
3760 3821 ixgbe->watchdog_start = B_TRUE;
3761 3822 ixgbe_arm_watchdog_timer(ixgbe);
3762 3823 }
3763 3824 }
3764 3825
3765 3826 mutex_exit(&ixgbe->watchdog_lock);
3766 3827 }
3767 3828
3768 3829 /*
3769 3830 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3770 3831 */
3771 3832 static void
3772 3833 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3773 3834 {
3774 3835 mutex_enter(&ixgbe->watchdog_lock);
3775 3836
3776 3837 if (ixgbe->watchdog_start)
3777 3838 ixgbe_arm_watchdog_timer(ixgbe);
3778 3839
3779 3840 mutex_exit(&ixgbe->watchdog_lock);
3780 3841 }
3781 3842
3782 3843 /*
3783 3844 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3784 3845 */
3785 3846 void
3786 3847 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3787 3848 {
3788 3849 timeout_id_t tid;
3789 3850
3790 3851 mutex_enter(&ixgbe->watchdog_lock);
3791 3852
3792 3853 ixgbe->watchdog_start = B_FALSE;
3793 3854 tid = ixgbe->watchdog_tid;
3794 3855 ixgbe->watchdog_tid = 0;
3795 3856
3796 3857 mutex_exit(&ixgbe->watchdog_lock);
3797 3858
3798 3859 if (tid != 0)
3799 3860 (void) untimeout(tid);
3800 3861 }
3801 3862
3802 3863 /*
3803 3864 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3804 3865 */
3805 3866 static void
3806 3867 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3807 3868 {
3808 3869 struct ixgbe_hw *hw = &ixgbe->hw;
3809 3870
3810 3871 /*
3811 3872 * mask all interrupts off
3812 3873 */
3813 3874 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3814 3875
3815 3876 /*
3816 3877 * for MSI-X, also disable autoclear
3817 3878 */
3818 3879 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3819 3880 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3820 3881 }
3821 3882
3822 3883 IXGBE_WRITE_FLUSH(hw);
3823 3884 }
3824 3885
3825 3886 /*
3826 3887 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3827 3888 */
3828 3889 static void
3829 3890 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3830 3891 {
3831 3892 struct ixgbe_hw *hw = &ixgbe->hw;
3832 3893 uint32_t eiac, eiam;
3833 3894 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3834 3895
3835 3896 /* interrupt types to enable */
3836 3897 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
3837 3898 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
3838 3899 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3839 3900
3840 3901 /* enable automask on "other" causes that this adapter can generate */
3841 3902 eiam = ixgbe->capab->other_intr;
3842 3903
3843 3904 /*
3844 3905 * msi-x mode
3845 3906 */
3846 3907 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3847 3908 /* enable autoclear but not on bits 29:20 */
3848 3909 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3849 3910
3850 3911 /* general purpose interrupt enable */
3851 3912 gpie |= (IXGBE_GPIE_MSIX_MODE
3852 3913 | IXGBE_GPIE_PBA_SUPPORT
3853 3914 | IXGBE_GPIE_OCD
3854 3915 | IXGBE_GPIE_EIAME);
↓ open down ↓ |
169 lines elided |
↑ open up ↑ |
3855 3916 /*
3856 3917 * non-msi-x mode
3857 3918 */
3858 3919 } else {
3859 3920
3860 3921 /* disable autoclear, leave gpie at default */
3861 3922 eiac = 0;
3862 3923
3863 3924 /*
3864 3925 * General purpose interrupt enable.
3865 - * For 82599 or X540, extended interrupt automask enable
3866 - * only in MSI or MSI-X mode
3926 + * For 82599, X540 and X550, extended interrupt
3927 + * automask enable only in MSI or MSI-X mode
3867 3928 */
3868 3929 if ((hw->mac.type == ixgbe_mac_82598EB) ||
3869 3930 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3870 3931 gpie |= IXGBE_GPIE_EIAME;
3871 3932 }
3872 3933 }
3873 3934
3874 3935 /* Enable specific "other" interrupt types */
3875 3936 switch (hw->mac.type) {
3876 3937 case ixgbe_mac_82598EB:
3877 3938 gpie |= ixgbe->capab->other_gpie;
3878 3939 break;
3879 3940
3880 3941 case ixgbe_mac_82599EB:
3881 3942 case ixgbe_mac_X540:
3943 + case ixgbe_mac_X550:
3944 + case ixgbe_mac_X550EM_x:
3882 3945 gpie |= ixgbe->capab->other_gpie;
3883 3946
3884 3947 /* Enable RSC Delay 8us when LRO enabled */
3885 3948 if (ixgbe->lro_enable) {
3886 3949 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3887 3950 }
3888 3951 break;
3889 3952
3890 3953 default:
3891 3954 break;
3892 3955 }
3893 3956
3894 3957 /* write to interrupt control registers */
3895 3958 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3896 3959 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3897 3960 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3898 3961 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3899 3962 IXGBE_WRITE_FLUSH(hw);
3900 3963 }
3901 3964
3902 3965 /*
3903 3966 * ixgbe_loopback_ioctl - Loopback support.
3904 3967 */
3905 3968 enum ioc_reply
3906 3969 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3907 3970 {
3908 3971 lb_info_sz_t *lbsp;
3909 3972 lb_property_t *lbpp;
3910 3973 uint32_t *lbmp;
3911 3974 uint32_t size;
3912 3975 uint32_t value;
3913 3976
3914 3977 if (mp->b_cont == NULL)
3915 3978 return (IOC_INVAL);
3916 3979
3917 3980 switch (iocp->ioc_cmd) {
3918 3981 default:
3919 3982 return (IOC_INVAL);
3920 3983
3921 3984 case LB_GET_INFO_SIZE:
3922 3985 size = sizeof (lb_info_sz_t);
3923 3986 if (iocp->ioc_count != size)
3924 3987 return (IOC_INVAL);
3925 3988
3926 3989 value = sizeof (lb_normal);
3927 3990 value += sizeof (lb_mac);
3928 3991 value += sizeof (lb_external);
3929 3992
3930 3993 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3931 3994 *lbsp = value;
3932 3995 break;
3933 3996
3934 3997 case LB_GET_INFO:
3935 3998 value = sizeof (lb_normal);
3936 3999 value += sizeof (lb_mac);
3937 4000 value += sizeof (lb_external);
3938 4001
3939 4002 size = value;
3940 4003 if (iocp->ioc_count != size)
3941 4004 return (IOC_INVAL);
3942 4005
3943 4006 value = 0;
3944 4007 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3945 4008
3946 4009 lbpp[value++] = lb_normal;
3947 4010 lbpp[value++] = lb_mac;
3948 4011 lbpp[value++] = lb_external;
3949 4012 break;
3950 4013
3951 4014 case LB_GET_MODE:
3952 4015 size = sizeof (uint32_t);
3953 4016 if (iocp->ioc_count != size)
3954 4017 return (IOC_INVAL);
3955 4018
3956 4019 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3957 4020 *lbmp = ixgbe->loopback_mode;
3958 4021 break;
3959 4022
3960 4023 case LB_SET_MODE:
3961 4024 size = 0;
3962 4025 if (iocp->ioc_count != sizeof (uint32_t))
3963 4026 return (IOC_INVAL);
3964 4027
3965 4028 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3966 4029 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3967 4030 return (IOC_INVAL);
3968 4031 break;
3969 4032 }
3970 4033
3971 4034 iocp->ioc_count = size;
3972 4035 iocp->ioc_error = 0;
3973 4036
3974 4037 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3975 4038 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3976 4039 return (IOC_INVAL);
3977 4040 }
3978 4041
3979 4042 return (IOC_REPLY);
3980 4043 }
3981 4044
3982 4045 /*
3983 4046 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3984 4047 */
3985 4048 static boolean_t
3986 4049 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3987 4050 {
3988 4051 if (mode == ixgbe->loopback_mode)
3989 4052 return (B_TRUE);
3990 4053
3991 4054 ixgbe->loopback_mode = mode;
3992 4055
3993 4056 if (mode == IXGBE_LB_NONE) {
3994 4057 /*
3995 4058 * Reset the chip
3996 4059 */
3997 4060 (void) ixgbe_reset(ixgbe);
3998 4061 return (B_TRUE);
3999 4062 }
4000 4063
4001 4064 mutex_enter(&ixgbe->gen_lock);
4002 4065
4003 4066 switch (mode) {
4004 4067 default:
4005 4068 mutex_exit(&ixgbe->gen_lock);
4006 4069 return (B_FALSE);
4007 4070
4008 4071 case IXGBE_LB_EXTERNAL:
4009 4072 break;
4010 4073
4011 4074 case IXGBE_LB_INTERNAL_MAC:
4012 4075 ixgbe_set_internal_mac_loopback(ixgbe);
4013 4076 break;
4014 4077 }
4015 4078
4016 4079 mutex_exit(&ixgbe->gen_lock);
4017 4080
4018 4081 return (B_TRUE);
4019 4082 }
4020 4083
4021 4084 /*
4022 4085 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
4023 4086 */
4024 4087 static void
4025 4088 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
4026 4089 {
4027 4090 struct ixgbe_hw *hw;
4028 4091 uint32_t reg;
4029 4092 uint8_t atlas;
4030 4093
4031 4094 hw = &ixgbe->hw;
4032 4095
4033 4096 /*
4034 4097 * Setup MAC loopback
4035 4098 */
4036 4099 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
4037 4100 reg |= IXGBE_HLREG0_LPBK;
4038 4101 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
4039 4102
4040 4103 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4041 4104 reg &= ~IXGBE_AUTOC_LMS_MASK;
4042 4105 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4043 4106
4044 4107 /*
4045 4108 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
4046 4109 */
4047 4110 switch (hw->mac.type) {
4048 4111 case ixgbe_mac_82598EB:
4049 4112 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4050 4113 &atlas);
4051 4114 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
4052 4115 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4053 4116 atlas);
4054 4117
4055 4118 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4056 4119 &atlas);
4057 4120 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4058 4121 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4059 4122 atlas);
4060 4123
4061 4124 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4062 4125 &atlas);
4063 4126 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4064 4127 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4065 4128 atlas);
↓ open down ↓ |
174 lines elided |
↑ open up ↑ |
4066 4129
4067 4130 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4068 4131 &atlas);
4069 4132 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4070 4133 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4071 4134 atlas);
4072 4135 break;
4073 4136
4074 4137 case ixgbe_mac_82599EB:
4075 4138 case ixgbe_mac_X540:
4139 + case ixgbe_mac_X550:
4140 + case ixgbe_mac_X550EM_x:
4076 4141 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4077 4142 reg |= (IXGBE_AUTOC_FLU |
4078 4143 IXGBE_AUTOC_10G_KX4);
4079 4144 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4080 4145
4081 4146 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4082 - B_FALSE, B_TRUE);
4147 + B_FALSE);
4083 4148 break;
4084 4149
4085 4150 default:
4086 4151 break;
4087 4152 }
4088 4153 }
4089 4154
4090 4155 #pragma inline(ixgbe_intr_rx_work)
4091 4156 /*
4092 4157 * ixgbe_intr_rx_work - RX processing of ISR.
4093 4158 */
4094 4159 static void
4095 4160 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4096 4161 {
4097 4162 mblk_t *mp;
4098 4163
4099 4164 mutex_enter(&rx_ring->rx_lock);
4100 4165
4101 4166 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4102 4167 mutex_exit(&rx_ring->rx_lock);
4103 4168
4104 4169 if (mp != NULL)
4105 4170 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4106 4171 rx_ring->ring_gen_num);
4107 4172 }
4108 4173
4109 4174 #pragma inline(ixgbe_intr_tx_work)
4110 4175 /*
4111 4176 * ixgbe_intr_tx_work - TX processing of ISR.
4112 4177 */
4113 4178 static void
4114 4179 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4115 4180 {
4116 4181 ixgbe_t *ixgbe = tx_ring->ixgbe;
4117 4182
4118 4183 /*
4119 4184 * Recycle the tx descriptors
4120 4185 */
4121 4186 tx_ring->tx_recycle(tx_ring);
4122 4187
4123 4188 /*
4124 4189 * Schedule the re-transmit
4125 4190 */
4126 4191 if (tx_ring->reschedule &&
4127 4192 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4128 4193 tx_ring->reschedule = B_FALSE;
4129 4194 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4130 4195 tx_ring->ring_handle);
4131 4196 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
4132 4197 }
4133 4198 }
4134 4199
4135 4200 #pragma inline(ixgbe_intr_other_work)
4136 4201 /*
4137 4202 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4138 4203 */
4139 4204 static void
4140 4205 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4141 4206 {
4207 + struct ixgbe_hw *hw = &ixgbe->hw;
4208 +
4142 4209 ASSERT(mutex_owned(&ixgbe->gen_lock));
4143 4210
4144 4211 /*
4145 4212 * handle link status change
4146 4213 */
4147 4214 if (eicr & IXGBE_EICR_LSC) {
4148 4215 ixgbe_driver_link_check(ixgbe);
4149 4216 ixgbe_get_hw_state(ixgbe);
4150 4217 }
4151 4218
4152 4219 /*
4153 4220 * check for fan failure on adapters with fans
4154 4221 */
4155 4222 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4156 - (eicr & IXGBE_EICR_GPI_SDP1)) {
4223 + (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4157 4224 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4158 4225
4159 4226 /*
4160 4227 * Disable the adapter interrupts
4161 4228 */
4162 4229 ixgbe_disable_adapter_interrupts(ixgbe);
4163 4230
4164 4231 /*
4165 4232 * Disable Rx/Tx units
4166 4233 */
4167 4234 (void) ixgbe_stop_adapter(&ixgbe->hw);
4168 4235
4169 4236 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4170 4237 ixgbe_error(ixgbe,
4171 4238 "Problem: Network adapter has been stopped "
4172 4239 "because the fan has stopped.\n");
4173 4240 ixgbe_error(ixgbe,
4174 4241 "Action: Replace the adapter.\n");
4175 4242
4176 4243 /* re-enable the interrupt, which was automasked */
4177 - ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4244 + ixgbe->eims |= IXGBE_EICR_GPI_SDP1_BY_MAC(hw);
4178 4245 }
4179 4246
4180 4247 /*
4181 4248 * Do SFP check for adapters with hot-plug capability
4182 4249 */
4183 4250 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4184 - ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4251 + ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) ||
4252 + (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) {
4185 4253 ixgbe->eicr = eicr;
4186 4254 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4187 4255 ixgbe_sfp_check, (void *)ixgbe,
4188 4256 DDI_NOSLEEP)) != DDI_SUCCESS) {
4189 4257 ixgbe_log(ixgbe, "No memory available to dispatch "
4190 4258 "taskq for SFP check");
4191 4259 }
4192 4260 }
4193 4261
4194 4262 /*
4195 4263 * Do over-temperature check for adapters with temp sensor
4196 4264 */
4197 4265 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4198 - ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4266 + ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || (eicr & IXGBE_EICR_LSC))) {
4199 4267 ixgbe->eicr = eicr;
4200 4268 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4201 4269 ixgbe_overtemp_check, (void *)ixgbe,
4202 4270 DDI_NOSLEEP)) != DDI_SUCCESS) {
4203 4271 ixgbe_log(ixgbe, "No memory available to dispatch "
4204 4272 "taskq for overtemp check");
4205 4273 }
4206 4274 }
4207 4275 }
4208 4276
4209 4277 /*
4210 4278 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4211 4279 */
4212 4280 static uint_t
4213 4281 ixgbe_intr_legacy(void *arg1, void *arg2)
4214 4282 {
4215 4283 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4216 4284 struct ixgbe_hw *hw = &ixgbe->hw;
4217 4285 ixgbe_tx_ring_t *tx_ring;
4218 4286 ixgbe_rx_ring_t *rx_ring;
4219 4287 uint32_t eicr;
4220 4288 mblk_t *mp;
4221 4289 boolean_t tx_reschedule;
4222 4290 uint_t result;
4223 4291
4224 4292 _NOTE(ARGUNUSED(arg2));
4225 4293
4226 4294 mutex_enter(&ixgbe->gen_lock);
4227 4295 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4228 4296 mutex_exit(&ixgbe->gen_lock);
4229 4297 return (DDI_INTR_UNCLAIMED);
4230 4298 }
4231 4299
4232 4300 mp = NULL;
4233 4301 tx_reschedule = B_FALSE;
4234 4302
4235 4303 /*
4236 4304 * Any bit set in eicr: claim this interrupt
4237 4305 */
4238 4306 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4239 4307
4240 4308 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4241 4309 mutex_exit(&ixgbe->gen_lock);
4242 4310 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4243 4311 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4244 4312 return (DDI_INTR_CLAIMED);
4245 4313 }
4246 4314
4247 4315 if (eicr) {
4248 4316 /*
4249 4317 * For legacy interrupt, we have only one interrupt,
4250 4318 * so we have only one rx ring and one tx ring enabled.
4251 4319 */
4252 4320 ASSERT(ixgbe->num_rx_rings == 1);
4253 4321 ASSERT(ixgbe->num_tx_rings == 1);
4254 4322
4255 4323 /*
4256 4324 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4257 4325 */
4258 4326 if (eicr & 0x1) {
4259 4327 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4260 4328 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4261 4329 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4262 4330 /*
4263 4331 * Clean the rx descriptors
4264 4332 */
4265 4333 rx_ring = &ixgbe->rx_rings[0];
4266 4334 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4267 4335 }
4268 4336
4269 4337 /*
4270 4338 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4271 4339 */
4272 4340 if (eicr & 0x2) {
4273 4341 /*
4274 4342 * Recycle the tx descriptors
4275 4343 */
4276 4344 tx_ring = &ixgbe->tx_rings[0];
4277 4345 tx_ring->tx_recycle(tx_ring);
4278 4346
4279 4347 /*
4280 4348 * Schedule the re-transmit
4281 4349 */
4282 4350 tx_reschedule = (tx_ring->reschedule &&
4283 4351 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4284 4352 }
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
4285 4353
4286 4354 /* any interrupt type other than tx/rx */
4287 4355 if (eicr & ixgbe->capab->other_intr) {
4288 4356 switch (hw->mac.type) {
4289 4357 case ixgbe_mac_82598EB:
4290 4358 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4291 4359 break;
4292 4360
4293 4361 case ixgbe_mac_82599EB:
4294 4362 case ixgbe_mac_X540:
4363 + case ixgbe_mac_X550:
4364 + case ixgbe_mac_X550EM_x:
4295 4365 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4296 4366 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4297 4367 break;
4298 4368
4299 4369 default:
4300 4370 break;
4301 4371 }
4302 4372 ixgbe_intr_other_work(ixgbe, eicr);
4303 4373 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4304 4374 }
4305 4375
4306 4376 mutex_exit(&ixgbe->gen_lock);
4307 4377
4308 4378 result = DDI_INTR_CLAIMED;
4309 4379 } else {
4310 4380 mutex_exit(&ixgbe->gen_lock);
4311 4381
4312 4382 /*
4313 4383 * No interrupt cause bits set: don't claim this interrupt.
4314 4384 */
4315 4385 result = DDI_INTR_UNCLAIMED;
4316 4386 }
4317 4387
4318 4388 /* re-enable the interrupts which were automasked */
4319 4389 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4320 4390
4321 4391 /*
4322 4392 * Do the following work outside of the gen_lock
4323 4393 */
4324 4394 if (mp != NULL) {
4325 4395 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4326 4396 rx_ring->ring_gen_num);
4327 4397 }
4328 4398
4329 4399 if (tx_reschedule) {
4330 4400 tx_ring->reschedule = B_FALSE;
4331 4401 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4332 4402 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4333 4403 }
4334 4404
4335 4405 return (result);
4336 4406 }
4337 4407
4338 4408 /*
4339 4409 * ixgbe_intr_msi - Interrupt handler for MSI.
4340 4410 */
4341 4411 static uint_t
4342 4412 ixgbe_intr_msi(void *arg1, void *arg2)
4343 4413 {
4344 4414 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4345 4415 struct ixgbe_hw *hw = &ixgbe->hw;
4346 4416 uint32_t eicr;
4347 4417
4348 4418 _NOTE(ARGUNUSED(arg2));
4349 4419
4350 4420 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4351 4421
4352 4422 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4353 4423 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4354 4424 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4355 4425 return (DDI_INTR_CLAIMED);
4356 4426 }
4357 4427
4358 4428 /*
4359 4429 * For MSI interrupt, we have only one vector,
4360 4430 * so we have only one rx ring and one tx ring enabled.
4361 4431 */
4362 4432 ASSERT(ixgbe->num_rx_rings == 1);
4363 4433 ASSERT(ixgbe->num_tx_rings == 1);
4364 4434
4365 4435 /*
4366 4436 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4367 4437 */
4368 4438 if (eicr & 0x1) {
4369 4439 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4370 4440 }
4371 4441
4372 4442 /*
4373 4443 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4374 4444 */
4375 4445 if (eicr & 0x2) {
4376 4446 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4377 4447 }
4378 4448
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
4379 4449 /* any interrupt type other than tx/rx */
4380 4450 if (eicr & ixgbe->capab->other_intr) {
4381 4451 mutex_enter(&ixgbe->gen_lock);
4382 4452 switch (hw->mac.type) {
4383 4453 case ixgbe_mac_82598EB:
4384 4454 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4385 4455 break;
4386 4456
4387 4457 case ixgbe_mac_82599EB:
4388 4458 case ixgbe_mac_X540:
4459 + case ixgbe_mac_X550:
4460 + case ixgbe_mac_X550EM_x:
4389 4461 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4390 4462 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4391 4463 break;
4392 4464
4393 4465 default:
4394 4466 break;
4395 4467 }
4396 4468 ixgbe_intr_other_work(ixgbe, eicr);
4397 4469 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4398 4470 mutex_exit(&ixgbe->gen_lock);
4399 4471 }
4400 4472
4401 4473 /* re-enable the interrupts which were automasked */
4402 4474 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4403 4475
4404 4476 return (DDI_INTR_CLAIMED);
4405 4477 }
4406 4478
4407 4479 /*
4408 4480 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4409 4481 */
4410 4482 static uint_t
4411 4483 ixgbe_intr_msix(void *arg1, void *arg2)
4412 4484 {
4413 4485 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4414 4486 ixgbe_t *ixgbe = vect->ixgbe;
4415 4487 struct ixgbe_hw *hw = &ixgbe->hw;
4416 4488 uint32_t eicr;
4417 4489 int r_idx = 0;
4418 4490
4419 4491 _NOTE(ARGUNUSED(arg2));
4420 4492
4421 4493 /*
4422 4494 * Clean each rx ring that has its bit set in the map
4423 4495 */
4424 4496 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4425 4497 while (r_idx >= 0) {
4426 4498 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4427 4499 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4428 4500 (ixgbe->num_rx_rings - 1));
4429 4501 }
4430 4502
4431 4503 /*
4432 4504 * Clean each tx ring that has its bit set in the map
4433 4505 */
4434 4506 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4435 4507 while (r_idx >= 0) {
4436 4508 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4437 4509 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4438 4510 (ixgbe->num_tx_rings - 1));
4439 4511 }
4440 4512
4441 4513
4442 4514 /*
4443 4515 * Clean other interrupt (link change) that has its bit set in the map
4444 4516 */
4445 4517 if (BT_TEST(vect->other_map, 0) == 1) {
4446 4518 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4447 4519
4448 4520 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4449 4521 DDI_FM_OK) {
4450 4522 ddi_fm_service_impact(ixgbe->dip,
4451 4523 DDI_SERVICE_DEGRADED);
4452 4524 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4453 4525 return (DDI_INTR_CLAIMED);
4454 4526 }
4455 4527
4456 4528 /*
4457 4529 * Check "other" cause bits: any interrupt type other than tx/rx
4458 4530 */
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
4459 4531 if (eicr & ixgbe->capab->other_intr) {
4460 4532 mutex_enter(&ixgbe->gen_lock);
4461 4533 switch (hw->mac.type) {
4462 4534 case ixgbe_mac_82598EB:
4463 4535 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4464 4536 ixgbe_intr_other_work(ixgbe, eicr);
4465 4537 break;
4466 4538
4467 4539 case ixgbe_mac_82599EB:
4468 4540 case ixgbe_mac_X540:
4541 + case ixgbe_mac_X550:
4542 + case ixgbe_mac_X550EM_x:
4469 4543 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4470 4544 ixgbe_intr_other_work(ixgbe, eicr);
4471 4545 break;
4472 4546
4473 4547 default:
4474 4548 break;
4475 4549 }
4476 4550 mutex_exit(&ixgbe->gen_lock);
4477 4551 }
4478 4552
4479 4553 /* re-enable the interrupts which were automasked */
4480 4554 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4481 4555 }
4482 4556
4483 4557 return (DDI_INTR_CLAIMED);
4484 4558 }
4485 4559
4486 4560 /*
4487 4561 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4488 4562 *
4489 4563 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4490 4564 * if not successful, try Legacy.
4491 4565 * ixgbe->intr_force can be used to force sequence to start with
4492 4566 * any of the 3 types.
4493 4567 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4494 4568 */
4495 4569 static int
4496 4570 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4497 4571 {
4498 4572 dev_info_t *devinfo;
4499 4573 int intr_types;
4500 4574 int rc;
4501 4575
4502 4576 devinfo = ixgbe->dip;
4503 4577
4504 4578 /*
4505 4579 * Get supported interrupt types
4506 4580 */
4507 4581 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4508 4582
4509 4583 if (rc != DDI_SUCCESS) {
4510 4584 ixgbe_log(ixgbe,
4511 4585 "Get supported interrupt types failed: %d", rc);
4512 4586 return (IXGBE_FAILURE);
4513 4587 }
4514 4588 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4515 4589
4516 4590 ixgbe->intr_type = 0;
4517 4591
4518 4592 /*
4519 4593 * Install MSI-X interrupts
4520 4594 */
4521 4595 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4522 4596 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4523 4597 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4524 4598 if (rc == IXGBE_SUCCESS)
4525 4599 return (IXGBE_SUCCESS);
4526 4600
4527 4601 ixgbe_log(ixgbe,
4528 4602 "Allocate MSI-X failed, trying MSI interrupts...");
4529 4603 }
4530 4604
4531 4605 /*
4532 4606 * MSI-X not used, force rings and groups to 1
4533 4607 */
4534 4608 ixgbe->num_rx_rings = 1;
4535 4609 ixgbe->num_rx_groups = 1;
4536 4610 ixgbe->num_tx_rings = 1;
4537 4611 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4538 4612 ixgbe_log(ixgbe,
4539 4613 "MSI-X not used, force rings and groups number to 1");
4540 4614
4541 4615 /*
4542 4616 * Install MSI interrupts
4543 4617 */
4544 4618 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4545 4619 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4546 4620 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4547 4621 if (rc == IXGBE_SUCCESS)
4548 4622 return (IXGBE_SUCCESS);
4549 4623
4550 4624 ixgbe_log(ixgbe,
4551 4625 "Allocate MSI failed, trying Legacy interrupts...");
4552 4626 }
4553 4627
4554 4628 /*
4555 4629 * Install legacy interrupts
4556 4630 */
4557 4631 if (intr_types & DDI_INTR_TYPE_FIXED) {
4558 4632 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4559 4633 if (rc == IXGBE_SUCCESS)
4560 4634 return (IXGBE_SUCCESS);
4561 4635
4562 4636 ixgbe_log(ixgbe,
4563 4637 "Allocate Legacy interrupts failed");
4564 4638 }
4565 4639
4566 4640 /*
4567 4641 * If none of the 3 types succeeded, return failure
4568 4642 */
4569 4643 return (IXGBE_FAILURE);
4570 4644 }
4571 4645
4572 4646 /*
4573 4647 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4574 4648 *
4575 4649 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4576 4650 * if fewer than 2 handles are available, return failure.
4577 4651 * Upon success, this maps the vectors to rx and tx rings for
4578 4652 * interrupts.
4579 4653 */
4580 4654 static int
4581 4655 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4582 4656 {
4583 4657 dev_info_t *devinfo;
4584 4658 int request, count, actual;
4585 4659 int minimum;
4586 4660 int rc;
4587 4661 uint32_t ring_per_group;
4588 4662
4589 4663 devinfo = ixgbe->dip;
4590 4664
4591 4665 switch (intr_type) {
4592 4666 case DDI_INTR_TYPE_FIXED:
4593 4667 request = 1; /* Request 1 legacy interrupt handle */
4594 4668 minimum = 1;
4595 4669 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4596 4670 break;
4597 4671
4598 4672 case DDI_INTR_TYPE_MSI:
4599 4673 request = 1; /* Request 1 MSI interrupt handle */
4600 4674 minimum = 1;
4601 4675 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4602 4676 break;
4603 4677
4604 4678 case DDI_INTR_TYPE_MSIX:
4605 4679 /*
4606 4680 * Best number of vectors for the adapter is
4607 4681 * (# rx rings + # tx rings), however we will
4608 4682 * limit the request number.
4609 4683 */
4610 4684 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4611 4685 if (request > ixgbe->capab->max_ring_vect)
4612 4686 request = ixgbe->capab->max_ring_vect;
4613 4687 minimum = 1;
4614 4688 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4615 4689 break;
4616 4690
4617 4691 default:
4618 4692 ixgbe_log(ixgbe,
4619 4693 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4620 4694 intr_type);
4621 4695 return (IXGBE_FAILURE);
4622 4696 }
4623 4697 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
4624 4698 request, minimum);
4625 4699
4626 4700 /*
4627 4701 * Get number of supported interrupts
4628 4702 */
4629 4703 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4630 4704 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4631 4705 ixgbe_log(ixgbe,
4632 4706 "Get interrupt number failed. Return: %d, count: %d",
4633 4707 rc, count);
4634 4708 return (IXGBE_FAILURE);
4635 4709 }
4636 4710 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4637 4711
4638 4712 actual = 0;
4639 4713 ixgbe->intr_cnt = 0;
4640 4714 ixgbe->intr_cnt_max = 0;
4641 4715 ixgbe->intr_cnt_min = 0;
4642 4716
4643 4717 /*
4644 4718 * Allocate an array of interrupt handles
4645 4719 */
4646 4720 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4647 4721 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4648 4722
4649 4723 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4650 4724 request, &actual, DDI_INTR_ALLOC_NORMAL);
4651 4725 if (rc != DDI_SUCCESS) {
4652 4726 ixgbe_log(ixgbe, "Allocate interrupts failed. "
4653 4727 "return: %d, request: %d, actual: %d",
4654 4728 rc, request, actual);
4655 4729 goto alloc_handle_fail;
4656 4730 }
4657 4731 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4658 4732
4659 4733 /*
4660 4734 * upper/lower limit of interrupts
4661 4735 */
4662 4736 ixgbe->intr_cnt = actual;
4663 4737 ixgbe->intr_cnt_max = request;
4664 4738 ixgbe->intr_cnt_min = minimum;
4665 4739
4666 4740 /*
4667 4741 * rss number per group should not exceed the rx interrupt number,
4668 4742 * else need to adjust rx ring number.
4669 4743 */
4670 4744 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4671 4745 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4672 4746 if (actual < ring_per_group) {
4673 4747 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4674 4748 ixgbe_setup_vmdq_rss_conf(ixgbe);
4675 4749 }
4676 4750
4677 4751 /*
4678 4752 * Now we know the actual number of vectors. Here we map the vector
4679 4753 * to other, rx rings and tx ring.
4680 4754 */
4681 4755 if (actual < minimum) {
4682 4756 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4683 4757 actual);
4684 4758 goto alloc_handle_fail;
4685 4759 }
4686 4760
4687 4761 /*
4688 4762 * Get priority for first vector, assume remaining are all the same
4689 4763 */
4690 4764 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4691 4765 if (rc != DDI_SUCCESS) {
4692 4766 ixgbe_log(ixgbe,
4693 4767 "Get interrupt priority failed: %d", rc);
4694 4768 goto alloc_handle_fail;
4695 4769 }
4696 4770
4697 4771 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4698 4772 if (rc != DDI_SUCCESS) {
4699 4773 ixgbe_log(ixgbe,
4700 4774 "Get interrupt cap failed: %d", rc);
4701 4775 goto alloc_handle_fail;
4702 4776 }
4703 4777
4704 4778 ixgbe->intr_type = intr_type;
4705 4779
4706 4780 return (IXGBE_SUCCESS);
4707 4781
4708 4782 alloc_handle_fail:
4709 4783 ixgbe_rem_intrs(ixgbe);
4710 4784
4711 4785 return (IXGBE_FAILURE);
4712 4786 }
4713 4787
4714 4788 /*
4715 4789 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4716 4790 *
4717 4791 * Before adding the interrupt handlers, the interrupt vectors have
4718 4792 * been allocated, and the rx/tx rings have also been allocated.
4719 4793 */
4720 4794 static int
4721 4795 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4722 4796 {
4723 4797 int vector = 0;
4724 4798 int rc;
4725 4799
4726 4800 switch (ixgbe->intr_type) {
4727 4801 case DDI_INTR_TYPE_MSIX:
4728 4802 /*
4729 4803 * Add interrupt handler for all vectors
4730 4804 */
4731 4805 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4732 4806 /*
4733 4807 * install pointer to vect_map[vector]
4734 4808 */
4735 4809 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4736 4810 (ddi_intr_handler_t *)ixgbe_intr_msix,
4737 4811 (void *)&ixgbe->vect_map[vector], NULL);
4738 4812
4739 4813 if (rc != DDI_SUCCESS) {
4740 4814 ixgbe_log(ixgbe,
4741 4815 "Add interrupt handler failed. "
4742 4816 "return: %d, vector: %d", rc, vector);
4743 4817 for (vector--; vector >= 0; vector--) {
4744 4818 (void) ddi_intr_remove_handler(
4745 4819 ixgbe->htable[vector]);
4746 4820 }
4747 4821 return (IXGBE_FAILURE);
4748 4822 }
4749 4823 }
4750 4824
4751 4825 break;
4752 4826
4753 4827 case DDI_INTR_TYPE_MSI:
4754 4828 /*
4755 4829 * Add interrupt handlers for the only vector
4756 4830 */
4757 4831 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4758 4832 (ddi_intr_handler_t *)ixgbe_intr_msi,
4759 4833 (void *)ixgbe, NULL);
4760 4834
4761 4835 if (rc != DDI_SUCCESS) {
4762 4836 ixgbe_log(ixgbe,
4763 4837 "Add MSI interrupt handler failed: %d", rc);
4764 4838 return (IXGBE_FAILURE);
4765 4839 }
4766 4840
4767 4841 break;
4768 4842
4769 4843 case DDI_INTR_TYPE_FIXED:
4770 4844 /*
4771 4845 * Add interrupt handlers for the only vector
4772 4846 */
4773 4847 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4774 4848 (ddi_intr_handler_t *)ixgbe_intr_legacy,
4775 4849 (void *)ixgbe, NULL);
4776 4850
4777 4851 if (rc != DDI_SUCCESS) {
4778 4852 ixgbe_log(ixgbe,
4779 4853 "Add legacy interrupt handler failed: %d", rc);
4780 4854 return (IXGBE_FAILURE);
4781 4855 }
4782 4856
4783 4857 break;
4784 4858
4785 4859 default:
4786 4860 return (IXGBE_FAILURE);
4787 4861 }
4788 4862
4789 4863 return (IXGBE_SUCCESS);
4790 4864 }
4791 4865
4792 4866 #pragma inline(ixgbe_map_rxring_to_vector)
4793 4867 /*
4794 4868 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4795 4869 */
4796 4870 static void
4797 4871 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4798 4872 {
4799 4873 /*
4800 4874 * Set bit in map
4801 4875 */
4802 4876 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4803 4877
4804 4878 /*
4805 4879 * Count bits set
4806 4880 */
4807 4881 ixgbe->vect_map[v_idx].rxr_cnt++;
4808 4882
4809 4883 /*
4810 4884 * Remember bit position
4811 4885 */
4812 4886 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4813 4887 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4814 4888 }
4815 4889
4816 4890 #pragma inline(ixgbe_map_txring_to_vector)
4817 4891 /*
4818 4892 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4819 4893 */
4820 4894 static void
4821 4895 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4822 4896 {
4823 4897 /*
4824 4898 * Set bit in map
4825 4899 */
4826 4900 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4827 4901
4828 4902 /*
4829 4903 * Count bits set
4830 4904 */
4831 4905 ixgbe->vect_map[v_idx].txr_cnt++;
4832 4906
4833 4907 /*
4834 4908 * Remember bit position
4835 4909 */
4836 4910 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4837 4911 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4838 4912 }
4839 4913
4840 4914 /*
4841 4915 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4842 4916 * allocation register (IVAR).
4843 4917 * cause:
4844 4918 * -1 : other cause
4845 4919 * 0 : rx
4846 4920 * 1 : tx
4847 4921 */
4848 4922 static void
4849 4923 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4850 4924 int8_t cause)
4851 4925 {
4852 4926 struct ixgbe_hw *hw = &ixgbe->hw;
4853 4927 u32 ivar, index;
4854 4928
4855 4929 switch (hw->mac.type) {
4856 4930 case ixgbe_mac_82598EB:
4857 4931 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4858 4932 if (cause == -1) {
4859 4933 cause = 0;
↓ open down ↓ |
381 lines elided |
↑ open up ↑ |
4860 4934 }
4861 4935 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4862 4936 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4863 4937 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4864 4938 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4865 4939 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4866 4940 break;
4867 4941
4868 4942 case ixgbe_mac_82599EB:
4869 4943 case ixgbe_mac_X540:
4944 + case ixgbe_mac_X550:
4945 + case ixgbe_mac_X550EM_x:
4870 4946 if (cause == -1) {
4871 4947 /* other causes */
4872 4948 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4873 4949 index = (intr_alloc_entry & 1) * 8;
4874 4950 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4875 4951 ivar &= ~(0xFF << index);
4876 4952 ivar |= (msix_vector << index);
4877 4953 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4878 4954 } else {
4879 4955 /* tx or rx causes */
4880 4956 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4881 4957 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4882 4958 ivar = IXGBE_READ_REG(hw,
4883 4959 IXGBE_IVAR(intr_alloc_entry >> 1));
4884 4960 ivar &= ~(0xFF << index);
4885 4961 ivar |= (msix_vector << index);
4886 4962 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4887 4963 ivar);
4888 4964 }
4889 4965 break;
4890 4966
4891 4967 default:
4892 4968 break;
4893 4969 }
4894 4970 }
4895 4971
4896 4972 /*
4897 4973 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4898 4974 * given interrupt vector allocation register (IVAR).
4899 4975 * cause:
4900 4976 * -1 : other cause
4901 4977 * 0 : rx
4902 4978 * 1 : tx
4903 4979 */
4904 4980 static void
4905 4981 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4906 4982 {
4907 4983 struct ixgbe_hw *hw = &ixgbe->hw;
4908 4984 u32 ivar, index;
4909 4985
4910 4986 switch (hw->mac.type) {
4911 4987 case ixgbe_mac_82598EB:
4912 4988 if (cause == -1) {
4913 4989 cause = 0;
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
4914 4990 }
4915 4991 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4916 4992 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4917 4993 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4918 4994 (intr_alloc_entry & 0x3)));
4919 4995 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4920 4996 break;
4921 4997
4922 4998 case ixgbe_mac_82599EB:
4923 4999 case ixgbe_mac_X540:
5000 + case ixgbe_mac_X550:
5001 + case ixgbe_mac_X550EM_x:
4924 5002 if (cause == -1) {
4925 5003 /* other causes */
4926 5004 index = (intr_alloc_entry & 1) * 8;
4927 5005 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4928 5006 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4929 5007 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4930 5008 } else {
4931 5009 /* tx or rx causes */
4932 5010 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4933 5011 ivar = IXGBE_READ_REG(hw,
4934 5012 IXGBE_IVAR(intr_alloc_entry >> 1));
4935 5013 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4936 5014 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4937 5015 ivar);
4938 5016 }
4939 5017 break;
4940 5018
4941 5019 default:
4942 5020 break;
4943 5021 }
4944 5022 }
4945 5023
4946 5024 /*
4947 5025 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4948 5026 * given interrupt vector allocation register (IVAR).
4949 5027 * cause:
4950 5028 * -1 : other cause
4951 5029 * 0 : rx
4952 5030 * 1 : tx
4953 5031 */
4954 5032 static void
4955 5033 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4956 5034 {
4957 5035 struct ixgbe_hw *hw = &ixgbe->hw;
4958 5036 u32 ivar, index;
4959 5037
4960 5038 switch (hw->mac.type) {
4961 5039 case ixgbe_mac_82598EB:
4962 5040 if (cause == -1) {
4963 5041 cause = 0;
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
4964 5042 }
4965 5043 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4966 5044 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4967 5045 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4968 5046 (intr_alloc_entry & 0x3)));
4969 5047 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4970 5048 break;
4971 5049
4972 5050 case ixgbe_mac_82599EB:
4973 5051 case ixgbe_mac_X540:
5052 + case ixgbe_mac_X550:
5053 + case ixgbe_mac_X550EM_x:
4974 5054 if (cause == -1) {
4975 5055 /* other causes */
4976 5056 index = (intr_alloc_entry & 1) * 8;
4977 5057 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4978 5058 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4979 5059 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4980 5060 } else {
4981 5061 /* tx or rx causes */
4982 5062 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4983 5063 ivar = IXGBE_READ_REG(hw,
4984 5064 IXGBE_IVAR(intr_alloc_entry >> 1));
4985 5065 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4986 5066 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4987 5067 ivar);
4988 5068 }
4989 5069 break;
4990 5070
4991 5071 default:
4992 5072 break;
4993 5073 }
4994 5074 }
4995 5075
4996 5076 /*
4997 5077 * Convert the rx ring index driver maintained to the rx ring index
4998 5078 * in h/w.
4999 5079 */
5000 5080 static uint32_t
5001 5081 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5002 5082 {
5003 5083
5004 5084 struct ixgbe_hw *hw = &ixgbe->hw;
5005 5085 uint32_t rx_ring_per_group, hw_rx_index;
5006 5086
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
5007 5087 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5008 5088 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5009 5089 return (sw_rx_index);
5010 5090 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5011 5091 switch (hw->mac.type) {
5012 5092 case ixgbe_mac_82598EB:
5013 5093 return (sw_rx_index);
5014 5094
5015 5095 case ixgbe_mac_82599EB:
5016 5096 case ixgbe_mac_X540:
5097 + case ixgbe_mac_X550:
5098 + case ixgbe_mac_X550EM_x:
5017 5099 return (sw_rx_index * 2);
5018 5100
5019 5101 default:
5020 5102 break;
5021 5103 }
5022 5104 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5023 5105 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5024 5106
5025 5107 switch (hw->mac.type) {
5026 5108 case ixgbe_mac_82598EB:
5027 5109 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5028 5110 16 + (sw_rx_index % rx_ring_per_group);
5029 5111 return (hw_rx_index);
5030 5112
5031 5113 case ixgbe_mac_82599EB:
5032 5114 case ixgbe_mac_X540:
5115 + case ixgbe_mac_X550:
5116 + case ixgbe_mac_X550EM_x:
5033 5117 if (ixgbe->num_rx_groups > 32) {
5034 5118 hw_rx_index = (sw_rx_index /
5035 5119 rx_ring_per_group) * 2 +
5036 5120 (sw_rx_index % rx_ring_per_group);
5037 5121 } else {
5038 5122 hw_rx_index = (sw_rx_index /
5039 5123 rx_ring_per_group) * 4 +
5040 5124 (sw_rx_index % rx_ring_per_group);
5041 5125 }
5042 5126 return (hw_rx_index);
5043 5127
5044 5128 default:
5045 5129 break;
5046 5130 }
5047 5131 }
5048 5132
5049 5133 /*
5050 5134 * Should never reach. Just to make compiler happy.
5051 5135 */
5052 5136 return (sw_rx_index);
5053 5137 }
5054 5138
5055 5139 /*
5056 5140 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
5057 5141 *
5058 5142 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
5059 5143 * to vector[0 - (intr_cnt -1)].
5060 5144 */
5061 5145 static int
5062 5146 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
5063 5147 {
5064 5148 int i, vector = 0;
5065 5149
5066 5150 /* initialize vector map */
5067 5151 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5068 5152 for (i = 0; i < ixgbe->intr_cnt; i++) {
5069 5153 ixgbe->vect_map[i].ixgbe = ixgbe;
5070 5154 }
5071 5155
5072 5156 /*
5073 5157 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5074 5158 * tx rings[0] on RTxQ[1].
5075 5159 */
5076 5160 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5077 5161 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5078 5162 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5079 5163 return (IXGBE_SUCCESS);
5080 5164 }
5081 5165
5082 5166 /*
5083 5167 * Interrupts/vectors mapping for MSI-X
5084 5168 */
5085 5169
5086 5170 /*
5087 5171 * Map other interrupt to vector 0,
5088 5172 * Set bit in map and count the bits set.
5089 5173 */
5090 5174 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5091 5175 ixgbe->vect_map[vector].other_cnt++;
5092 5176
5093 5177 /*
5094 5178 * Map rx ring interrupts to vectors
5095 5179 */
5096 5180 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5097 5181 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5098 5182 vector = (vector +1) % ixgbe->intr_cnt;
5099 5183 }
5100 5184
5101 5185 /*
5102 5186 * Map tx ring interrupts to vectors
5103 5187 */
5104 5188 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5105 5189 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5106 5190 vector = (vector +1) % ixgbe->intr_cnt;
5107 5191 }
5108 5192
5109 5193 return (IXGBE_SUCCESS);
5110 5194 }
5111 5195
5112 5196 /*
5113 5197 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5114 5198 *
5115 5199 * This relies on ring/vector mapping already set up in the
5116 5200 * vect_map[] structures
5117 5201 */
5118 5202 static void
5119 5203 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5120 5204 {
5121 5205 struct ixgbe_hw *hw = &ixgbe->hw;
5122 5206 ixgbe_intr_vector_t *vect; /* vector bitmap */
5123 5207 int r_idx; /* ring index */
5124 5208 int v_idx; /* vector index */
5125 5209 uint32_t hw_index;
5126 5210
5127 5211 /*
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
5128 5212 * Clear any previous entries
5129 5213 */
5130 5214 switch (hw->mac.type) {
5131 5215 case ixgbe_mac_82598EB:
5132 5216 for (v_idx = 0; v_idx < 25; v_idx++)
5133 5217 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5134 5218 break;
5135 5219
5136 5220 case ixgbe_mac_82599EB:
5137 5221 case ixgbe_mac_X540:
5222 + case ixgbe_mac_X550:
5223 + case ixgbe_mac_X550EM_x:
5138 5224 for (v_idx = 0; v_idx < 64; v_idx++)
5139 5225 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5140 5226 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5141 5227 break;
5142 5228
5143 5229 default:
5144 5230 break;
5145 5231 }
5146 5232
5147 5233 /*
5148 5234 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5149 5235 * tx rings[0] will use RTxQ[1].
5150 5236 */
5151 5237 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5152 5238 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5153 5239 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5154 5240 return;
5155 5241 }
5156 5242
5157 5243 /*
5158 5244 * For MSI-X interrupt, "Other" is always on vector[0].
5159 5245 */
5160 5246 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5161 5247
5162 5248 /*
5163 5249 * For each interrupt vector, populate the IVAR table
5164 5250 */
5165 5251 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5166 5252 vect = &ixgbe->vect_map[v_idx];
5167 5253
5168 5254 /*
5169 5255 * For each rx ring bit set
5170 5256 */
5171 5257 r_idx = bt_getlowbit(vect->rx_map, 0,
5172 5258 (ixgbe->num_rx_rings - 1));
5173 5259
5174 5260 while (r_idx >= 0) {
5175 5261 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5176 5262 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5177 5263 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5178 5264 (ixgbe->num_rx_rings - 1));
5179 5265 }
5180 5266
5181 5267 /*
5182 5268 * For each tx ring bit set
5183 5269 */
5184 5270 r_idx = bt_getlowbit(vect->tx_map, 0,
5185 5271 (ixgbe->num_tx_rings - 1));
5186 5272
5187 5273 while (r_idx >= 0) {
5188 5274 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5189 5275 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5190 5276 (ixgbe->num_tx_rings - 1));
5191 5277 }
5192 5278 }
5193 5279 }
5194 5280
5195 5281 /*
5196 5282 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5197 5283 */
5198 5284 static void
5199 5285 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5200 5286 {
5201 5287 int i;
5202 5288 int rc;
5203 5289
5204 5290 for (i = 0; i < ixgbe->intr_cnt; i++) {
5205 5291 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5206 5292 if (rc != DDI_SUCCESS) {
5207 5293 IXGBE_DEBUGLOG_1(ixgbe,
5208 5294 "Remove intr handler failed: %d", rc);
5209 5295 }
5210 5296 }
5211 5297 }
5212 5298
5213 5299 /*
5214 5300 * ixgbe_rem_intrs - Remove the allocated interrupts.
5215 5301 */
5216 5302 static void
5217 5303 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5218 5304 {
5219 5305 int i;
5220 5306 int rc;
5221 5307
5222 5308 for (i = 0; i < ixgbe->intr_cnt; i++) {
5223 5309 rc = ddi_intr_free(ixgbe->htable[i]);
5224 5310 if (rc != DDI_SUCCESS) {
5225 5311 IXGBE_DEBUGLOG_1(ixgbe,
5226 5312 "Free intr failed: %d", rc);
5227 5313 }
5228 5314 }
5229 5315
5230 5316 kmem_free(ixgbe->htable, ixgbe->intr_size);
5231 5317 ixgbe->htable = NULL;
5232 5318 }
5233 5319
5234 5320 /*
5235 5321 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5236 5322 */
5237 5323 static int
5238 5324 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5239 5325 {
5240 5326 int i;
5241 5327 int rc;
5242 5328
5243 5329 /*
5244 5330 * Enable interrupts
5245 5331 */
5246 5332 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5247 5333 /*
5248 5334 * Call ddi_intr_block_enable() for MSI
5249 5335 */
5250 5336 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5251 5337 if (rc != DDI_SUCCESS) {
5252 5338 ixgbe_log(ixgbe,
5253 5339 "Enable block intr failed: %d", rc);
5254 5340 return (IXGBE_FAILURE);
5255 5341 }
5256 5342 } else {
5257 5343 /*
5258 5344 * Call ddi_intr_enable() for Legacy/MSI non block enable
5259 5345 */
5260 5346 for (i = 0; i < ixgbe->intr_cnt; i++) {
5261 5347 rc = ddi_intr_enable(ixgbe->htable[i]);
5262 5348 if (rc != DDI_SUCCESS) {
5263 5349 ixgbe_log(ixgbe,
5264 5350 "Enable intr failed: %d", rc);
5265 5351 return (IXGBE_FAILURE);
5266 5352 }
5267 5353 }
5268 5354 }
5269 5355
5270 5356 return (IXGBE_SUCCESS);
5271 5357 }
5272 5358
5273 5359 /*
5274 5360 * ixgbe_disable_intrs - Disable all the interrupts.
5275 5361 */
5276 5362 static int
5277 5363 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5278 5364 {
5279 5365 int i;
5280 5366 int rc;
5281 5367
5282 5368 /*
5283 5369 * Disable all interrupts
5284 5370 */
5285 5371 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5286 5372 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5287 5373 if (rc != DDI_SUCCESS) {
5288 5374 ixgbe_log(ixgbe,
5289 5375 "Disable block intr failed: %d", rc);
5290 5376 return (IXGBE_FAILURE);
5291 5377 }
5292 5378 } else {
5293 5379 for (i = 0; i < ixgbe->intr_cnt; i++) {
5294 5380 rc = ddi_intr_disable(ixgbe->htable[i]);
5295 5381 if (rc != DDI_SUCCESS) {
5296 5382 ixgbe_log(ixgbe,
5297 5383 "Disable intr failed: %d", rc);
5298 5384 return (IXGBE_FAILURE);
5299 5385 }
5300 5386 }
5301 5387 }
5302 5388
5303 5389 return (IXGBE_SUCCESS);
5304 5390 }
5305 5391
5306 5392 /*
5307 5393 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5308 5394 */
5309 5395 static void
5310 5396 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5311 5397 {
5312 5398 struct ixgbe_hw *hw = &ixgbe->hw;
5313 5399 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5314 5400 boolean_t link_up = B_FALSE;
5315 5401 uint32_t pcs1g_anlp = 0;
5316 5402 uint32_t pcs1g_ana = 0;
5317 5403 boolean_t autoneg = B_FALSE;
5318 5404
5319 5405 ASSERT(mutex_owned(&ixgbe->gen_lock));
5320 5406 ixgbe->param_lp_1000fdx_cap = 0;
5321 5407 ixgbe->param_lp_100fdx_cap = 0;
5322 5408
5323 5409 /* check for link, don't wait */
5324 5410 (void) ixgbe_check_link(hw, &speed, &link_up, false);
5325 5411 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5326 5412
5327 5413 if (link_up) {
5328 5414 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5329 5415
5330 5416 ixgbe->param_lp_1000fdx_cap =
5331 5417 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5332 5418 ixgbe->param_lp_100fdx_cap =
5333 5419 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5334 5420 }
5335 5421
5336 5422 (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5337 5423
5338 5424 ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5339 5425 (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5340 5426 ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5341 5427 (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5342 5428 }
5343 5429
5344 5430 /*
5345 5431 * ixgbe_get_driver_control - Notify that driver is in control of device.
5346 5432 */
5347 5433 static void
5348 5434 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5349 5435 {
5350 5436 uint32_t ctrl_ext;
5351 5437
5352 5438 /*
5353 5439 * Notify firmware that driver is in control of device
5354 5440 */
5355 5441 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5356 5442 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5357 5443 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5358 5444 }
5359 5445
5360 5446 /*
5361 5447 * ixgbe_release_driver_control - Notify that driver is no longer in control
5362 5448 * of device.
5363 5449 */
5364 5450 static void
5365 5451 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5366 5452 {
5367 5453 uint32_t ctrl_ext;
5368 5454
5369 5455 /*
5370 5456 * Notify firmware that driver is no longer in control of device
5371 5457 */
5372 5458 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5373 5459 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5374 5460 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5375 5461 }
5376 5462
5377 5463 /*
5378 5464 * ixgbe_atomic_reserve - Atomic decrease operation.
5379 5465 */
5380 5466 int
5381 5467 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5382 5468 {
5383 5469 uint32_t oldval;
5384 5470 uint32_t newval;
5385 5471
5386 5472 /*
5387 5473 * ATOMICALLY
5388 5474 */
5389 5475 do {
5390 5476 oldval = *count_p;
5391 5477 if (oldval < n)
5392 5478 return (-1);
5393 5479 newval = oldval - n;
5394 5480 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5395 5481
5396 5482 return (newval);
5397 5483 }
5398 5484
5399 5485 /*
5400 5486 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5401 5487 */
5402 5488 static uint8_t *
5403 5489 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5404 5490 {
5405 5491 uint8_t *addr = *upd_ptr;
5406 5492 uint8_t *new_ptr;
5407 5493
5408 5494 _NOTE(ARGUNUSED(hw));
5409 5495 _NOTE(ARGUNUSED(vmdq));
5410 5496
5411 5497 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5412 5498 *upd_ptr = new_ptr;
5413 5499 return (addr);
5414 5500 }
5415 5501
5416 5502 /*
5417 5503 * FMA support
5418 5504 */
5419 5505 int
5420 5506 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5421 5507 {
5422 5508 ddi_fm_error_t de;
5423 5509
5424 5510 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5425 5511 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5426 5512 return (de.fme_status);
5427 5513 }
5428 5514
5429 5515 int
5430 5516 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5431 5517 {
5432 5518 ddi_fm_error_t de;
5433 5519
5434 5520 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5435 5521 return (de.fme_status);
5436 5522 }
5437 5523
5438 5524 /*
5439 5525 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5440 5526 */
5441 5527 static int
5442 5528 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5443 5529 {
5444 5530 _NOTE(ARGUNUSED(impl_data));
5445 5531 /*
5446 5532 * as the driver can always deal with an error in any dma or
5447 5533 * access handle, we can just return the fme_status value.
5448 5534 */
5449 5535 pci_ereport_post(dip, err, NULL);
5450 5536 return (err->fme_status);
5451 5537 }
5452 5538
5453 5539 static void
5454 5540 ixgbe_fm_init(ixgbe_t *ixgbe)
5455 5541 {
5456 5542 ddi_iblock_cookie_t iblk;
5457 5543 int fma_dma_flag;
5458 5544
5459 5545 /*
5460 5546 * Only register with IO Fault Services if we have some capability
5461 5547 */
5462 5548 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5463 5549 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5464 5550 } else {
5465 5551 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5466 5552 }
5467 5553
5468 5554 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5469 5555 fma_dma_flag = 1;
5470 5556 } else {
5471 5557 fma_dma_flag = 0;
5472 5558 }
5473 5559
5474 5560 ixgbe_set_fma_flags(fma_dma_flag);
5475 5561
5476 5562 if (ixgbe->fm_capabilities) {
5477 5563
5478 5564 /*
5479 5565 * Register capabilities with IO Fault Services
5480 5566 */
5481 5567 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5482 5568
5483 5569 /*
5484 5570 * Initialize pci ereport capabilities if ereport capable
5485 5571 */
5486 5572 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5487 5573 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5488 5574 pci_ereport_setup(ixgbe->dip);
5489 5575
5490 5576 /*
5491 5577 * Register error callback if error callback capable
5492 5578 */
5493 5579 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5494 5580 ddi_fm_handler_register(ixgbe->dip,
5495 5581 ixgbe_fm_error_cb, (void*) ixgbe);
5496 5582 }
5497 5583 }
5498 5584
5499 5585 static void
5500 5586 ixgbe_fm_fini(ixgbe_t *ixgbe)
5501 5587 {
5502 5588 /*
5503 5589 * Only unregister FMA capabilities if they are registered
5504 5590 */
5505 5591 if (ixgbe->fm_capabilities) {
5506 5592
5507 5593 /*
5508 5594 * Release any resources allocated by pci_ereport_setup()
5509 5595 */
5510 5596 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5511 5597 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5512 5598 pci_ereport_teardown(ixgbe->dip);
5513 5599
5514 5600 /*
5515 5601 * Un-register error callback if error callback capable
5516 5602 */
5517 5603 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5518 5604 ddi_fm_handler_unregister(ixgbe->dip);
5519 5605
5520 5606 /*
5521 5607 * Unregister from IO Fault Service
5522 5608 */
5523 5609 ddi_fm_fini(ixgbe->dip);
5524 5610 }
5525 5611 }
5526 5612
5527 5613 void
5528 5614 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5529 5615 {
5530 5616 uint64_t ena;
5531 5617 char buf[FM_MAX_CLASS];
5532 5618
5533 5619 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5534 5620 ena = fm_ena_generate(0, FM_ENA_FMT1);
5535 5621 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5536 5622 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5537 5623 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5538 5624 }
5539 5625 }
5540 5626
5541 5627 static int
5542 5628 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5543 5629 {
5544 5630 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5545 5631
5546 5632 mutex_enter(&rx_ring->rx_lock);
5547 5633 rx_ring->ring_gen_num = mr_gen_num;
5548 5634 mutex_exit(&rx_ring->rx_lock);
5549 5635 return (0);
5550 5636 }
5551 5637
5552 5638 /*
5553 5639 * Get the global ring index by a ring index within a group.
5554 5640 */
5555 5641 static int
5556 5642 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5557 5643 {
5558 5644 ixgbe_rx_ring_t *rx_ring;
5559 5645 int i;
5560 5646
5561 5647 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5562 5648 rx_ring = &ixgbe->rx_rings[i];
5563 5649 if (rx_ring->group_index == gindex)
5564 5650 rindex--;
5565 5651 if (rindex < 0)
5566 5652 return (i);
5567 5653 }
5568 5654
5569 5655 return (-1);
5570 5656 }
5571 5657
5572 5658 /*
5573 5659 * Callback funtion for MAC layer to register all rings.
5574 5660 */
5575 5661 /* ARGSUSED */
5576 5662 void
5577 5663 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5578 5664 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5579 5665 {
5580 5666 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5581 5667 mac_intr_t *mintr = &infop->mri_intr;
5582 5668
5583 5669 switch (rtype) {
5584 5670 case MAC_RING_TYPE_RX: {
5585 5671 /*
5586 5672 * 'index' is the ring index within the group.
5587 5673 * Need to get the global ring index by searching in groups.
5588 5674 */
5589 5675 int global_ring_index = ixgbe_get_rx_ring_index(
5590 5676 ixgbe, group_index, ring_index);
5591 5677
5592 5678 ASSERT(global_ring_index >= 0);
5593 5679
5594 5680 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5595 5681 rx_ring->ring_handle = rh;
5596 5682
5597 5683 infop->mri_driver = (mac_ring_driver_t)rx_ring;
5598 5684 infop->mri_start = ixgbe_ring_start;
5599 5685 infop->mri_stop = NULL;
5600 5686 infop->mri_poll = ixgbe_ring_rx_poll;
5601 5687 infop->mri_stat = ixgbe_rx_ring_stat;
5602 5688
5603 5689 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5604 5690 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5605 5691 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5606 5692 if (ixgbe->intr_type &
5607 5693 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5608 5694 mintr->mi_ddi_handle =
5609 5695 ixgbe->htable[rx_ring->intr_vector];
5610 5696 }
5611 5697
5612 5698 break;
5613 5699 }
5614 5700 case MAC_RING_TYPE_TX: {
5615 5701 ASSERT(group_index == -1);
5616 5702 ASSERT(ring_index < ixgbe->num_tx_rings);
5617 5703
5618 5704 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5619 5705 tx_ring->ring_handle = rh;
5620 5706
5621 5707 infop->mri_driver = (mac_ring_driver_t)tx_ring;
5622 5708 infop->mri_start = NULL;
5623 5709 infop->mri_stop = NULL;
5624 5710 infop->mri_tx = ixgbe_ring_tx;
5625 5711 infop->mri_stat = ixgbe_tx_ring_stat;
5626 5712 if (ixgbe->intr_type &
5627 5713 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5628 5714 mintr->mi_ddi_handle =
5629 5715 ixgbe->htable[tx_ring->intr_vector];
5630 5716 }
5631 5717 break;
5632 5718 }
5633 5719 default:
5634 5720 break;
5635 5721 }
5636 5722 }
5637 5723
5638 5724 /*
5639 5725 * Callback funtion for MAC layer to register all groups.
5640 5726 */
5641 5727 void
5642 5728 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5643 5729 mac_group_info_t *infop, mac_group_handle_t gh)
5644 5730 {
5645 5731 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5646 5732
5647 5733 switch (rtype) {
5648 5734 case MAC_RING_TYPE_RX: {
5649 5735 ixgbe_rx_group_t *rx_group;
5650 5736
5651 5737 rx_group = &ixgbe->rx_groups[index];
5652 5738 rx_group->group_handle = gh;
5653 5739
5654 5740 infop->mgi_driver = (mac_group_driver_t)rx_group;
5655 5741 infop->mgi_start = NULL;
5656 5742 infop->mgi_stop = NULL;
5657 5743 infop->mgi_addmac = ixgbe_addmac;
5658 5744 infop->mgi_remmac = ixgbe_remmac;
5659 5745 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5660 5746
5661 5747 break;
5662 5748 }
5663 5749 case MAC_RING_TYPE_TX:
5664 5750 break;
5665 5751 default:
5666 5752 break;
5667 5753 }
5668 5754 }
5669 5755
5670 5756 /*
5671 5757 * Enable interrupt on the specificed rx ring.
5672 5758 */
5673 5759 int
5674 5760 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5675 5761 {
5676 5762 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5677 5763 ixgbe_t *ixgbe = rx_ring->ixgbe;
5678 5764 int r_idx = rx_ring->index;
5679 5765 int hw_r_idx = rx_ring->hw_index;
5680 5766 int v_idx = rx_ring->intr_vector;
5681 5767
5682 5768 mutex_enter(&ixgbe->gen_lock);
5683 5769 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5684 5770 mutex_exit(&ixgbe->gen_lock);
5685 5771 /*
5686 5772 * Simply return 0.
5687 5773 * Interrupts are being adjusted. ixgbe_intr_adjust()
5688 5774 * will eventually re-enable the interrupt when it's
5689 5775 * done with the adjustment.
5690 5776 */
5691 5777 return (0);
5692 5778 }
5693 5779
5694 5780 /*
5695 5781 * To enable interrupt by setting the VAL bit of given interrupt
5696 5782 * vector allocation register (IVAR).
5697 5783 */
5698 5784 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5699 5785
5700 5786 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5701 5787
5702 5788 /*
5703 5789 * Trigger a Rx interrupt on this ring
5704 5790 */
5705 5791 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5706 5792 IXGBE_WRITE_FLUSH(&ixgbe->hw);
5707 5793
5708 5794 mutex_exit(&ixgbe->gen_lock);
5709 5795
5710 5796 return (0);
5711 5797 }
5712 5798
5713 5799 /*
5714 5800 * Disable interrupt on the specificed rx ring.
5715 5801 */
5716 5802 int
5717 5803 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5718 5804 {
5719 5805 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5720 5806 ixgbe_t *ixgbe = rx_ring->ixgbe;
5721 5807 int r_idx = rx_ring->index;
5722 5808 int hw_r_idx = rx_ring->hw_index;
5723 5809 int v_idx = rx_ring->intr_vector;
5724 5810
5725 5811 mutex_enter(&ixgbe->gen_lock);
5726 5812 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5727 5813 mutex_exit(&ixgbe->gen_lock);
5728 5814 /*
5729 5815 * Simply return 0.
5730 5816 * In the rare case where an interrupt is being
5731 5817 * disabled while interrupts are being adjusted,
5732 5818 * we don't fail the operation. No interrupts will
5733 5819 * be generated while they are adjusted, and
5734 5820 * ixgbe_intr_adjust() will cause the interrupts
5735 5821 * to be re-enabled once it completes. Note that
5736 5822 * in this case, packets may be delivered to the
5737 5823 * stack via interrupts before xgbe_rx_ring_intr_enable()
5738 5824 * is called again. This is acceptable since interrupt
5739 5825 * adjustment is infrequent, and the stack will be
5740 5826 * able to handle these packets.
5741 5827 */
5742 5828 return (0);
5743 5829 }
5744 5830
5745 5831 /*
5746 5832 * To disable interrupt by clearing the VAL bit of given interrupt
5747 5833 * vector allocation register (IVAR).
5748 5834 */
5749 5835 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5750 5836
5751 5837 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5752 5838
5753 5839 mutex_exit(&ixgbe->gen_lock);
5754 5840
5755 5841 return (0);
5756 5842 }
5757 5843
5758 5844 /*
5759 5845 * Add a mac address.
5760 5846 */
5761 5847 static int
5762 5848 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5763 5849 {
5764 5850 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5765 5851 ixgbe_t *ixgbe = rx_group->ixgbe;
5766 5852 struct ixgbe_hw *hw = &ixgbe->hw;
5767 5853 int slot, i;
5768 5854
5769 5855 mutex_enter(&ixgbe->gen_lock);
5770 5856
5771 5857 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5772 5858 mutex_exit(&ixgbe->gen_lock);
5773 5859 return (ECANCELED);
5774 5860 }
5775 5861
5776 5862 if (ixgbe->unicst_avail == 0) {
5777 5863 /* no slots available */
5778 5864 mutex_exit(&ixgbe->gen_lock);
5779 5865 return (ENOSPC);
5780 5866 }
5781 5867
5782 5868 /*
5783 5869 * The first ixgbe->num_rx_groups slots are reserved for each respective
5784 5870 * group. The rest slots are shared by all groups. While adding a
5785 5871 * MAC address, reserved slots are firstly checked then the shared
5786 5872 * slots are searched.
5787 5873 */
5788 5874 slot = -1;
5789 5875 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5790 5876 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5791 5877 if (ixgbe->unicst_addr[i].mac.set == 0) {
5792 5878 slot = i;
5793 5879 break;
5794 5880 }
5795 5881 }
5796 5882 } else {
5797 5883 slot = rx_group->index;
5798 5884 }
5799 5885
5800 5886 if (slot == -1) {
5801 5887 /* no slots available */
5802 5888 mutex_exit(&ixgbe->gen_lock);
5803 5889 return (ENOSPC);
5804 5890 }
5805 5891
5806 5892 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5807 5893 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5808 5894 rx_group->index, IXGBE_RAH_AV);
5809 5895 ixgbe->unicst_addr[slot].mac.set = 1;
5810 5896 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5811 5897 ixgbe->unicst_avail--;
5812 5898
5813 5899 mutex_exit(&ixgbe->gen_lock);
5814 5900
5815 5901 return (0);
5816 5902 }
5817 5903
5818 5904 /*
5819 5905 * Remove a mac address.
5820 5906 */
5821 5907 static int
5822 5908 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5823 5909 {
5824 5910 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5825 5911 ixgbe_t *ixgbe = rx_group->ixgbe;
5826 5912 struct ixgbe_hw *hw = &ixgbe->hw;
5827 5913 int slot;
5828 5914
5829 5915 mutex_enter(&ixgbe->gen_lock);
5830 5916
5831 5917 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5832 5918 mutex_exit(&ixgbe->gen_lock);
5833 5919 return (ECANCELED);
5834 5920 }
5835 5921
5836 5922 slot = ixgbe_unicst_find(ixgbe, mac_addr);
5837 5923 if (slot == -1) {
5838 5924 mutex_exit(&ixgbe->gen_lock);
5839 5925 return (EINVAL);
5840 5926 }
5841 5927
5842 5928 if (ixgbe->unicst_addr[slot].mac.set == 0) {
5843 5929 mutex_exit(&ixgbe->gen_lock);
5844 5930 return (EINVAL);
5845 5931 }
5846 5932
5847 5933 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5848 5934 (void) ixgbe_clear_rar(hw, slot);
5849 5935 ixgbe->unicst_addr[slot].mac.set = 0;
5850 5936 ixgbe->unicst_avail++;
5851 5937
5852 5938 mutex_exit(&ixgbe->gen_lock);
5853 5939
5854 5940 return (0);
5855 5941 }
↓ open down ↓ |
708 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX