Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nxge/nxge_main.c
+++ new/usr/src/uts/common/io/nxge/nxge_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
28 28 */
29 29 #include <sys/nxge/nxge_impl.h>
30 30 #include <sys/nxge/nxge_hio.h>
31 31 #include <sys/nxge/nxge_rxdma.h>
32 32 #include <sys/pcie.h>
33 33
34 34 uint32_t nxge_use_partition = 0; /* debug partition flag */
35 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */
36 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */
37 37 /*
38 38 * PSARC/2007/453 MSI-X interrupt limit override
39 39 */
40 40 uint32_t nxge_msi_enable = 2;
41 41
42 42 /*
43 43 * Software workaround for a Neptune (PCI-E)
44 44 * hardware interrupt bug which the hardware
45 45 * may generate spurious interrupts after the
46 46 * device interrupt handler was removed. If this flag
47 47 * is enabled, the driver will reset the
48 48 * hardware when devices are being detached.
49 49 */
50 50 uint32_t nxge_peu_reset_enable = 0;
51 51
52 52 /*
53 53 * Software workaround for the hardware
54 54 * checksum bugs that affect packet transmission
55 55 * and receive:
56 56 *
57 57 * Usage of nxge_cksum_offload:
58 58 *
59 59 * (1) nxge_cksum_offload = 0 (default):
60 60 * - transmits packets:
61 61 * TCP: uses the hardware checksum feature.
62 62 * UDP: driver will compute the software checksum
63 63 * based on the partial checksum computed
64 64 * by the IP layer.
65 65 * - receives packets
66 66 * TCP: marks packets checksum flags based on hardware result.
67 67 * UDP: will not mark checksum flags.
68 68 *
69 69 * (2) nxge_cksum_offload = 1:
70 70 * - transmit packets:
71 71 * TCP/UDP: uses the hardware checksum feature.
72 72 * - receives packets
73 73 * TCP/UDP: marks packet checksum flags based on hardware result.
74 74 *
75 75 * (3) nxge_cksum_offload = 2:
76 76 * - The driver will not register its checksum capability.
77 77 * Checksum for both TCP and UDP will be computed
78 78 * by the stack.
79 79 * - The software LSO is not allowed in this case.
80 80 *
81 81 * (4) nxge_cksum_offload > 2:
82 82 * - Will be treated as it is set to 2
83 83 * (stack will compute the checksum).
84 84 *
85 85 * (5) If the hardware bug is fixed, this workaround
86 86 * needs to be updated accordingly to reflect
87 87 * the new hardware revision.
88 88 */
89 89 uint32_t nxge_cksum_offload = 0;
90 90
91 91 /*
92 92 * Globals: tunable parameters (/etc/system or adb)
93 93 *
94 94 */
95 95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
96 96 uint32_t nxge_rbr_spare_size = 0;
97 97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT;
98 98 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
99 99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
100 100 boolean_t nxge_no_msg = B_TRUE; /* control message display */
101 101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */
102 102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX;
103 103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN;
104 104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN;
105 105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU;
106 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL;
107 107
108 108 /* MAX LSO size */
109 109 #define NXGE_LSO_MAXLEN 65535
110 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN;
111 111
112 112
113 113 /*
114 114 * Add tunable to reduce the amount of time spent in the
115 115 * ISR doing Rx Processing.
116 116 */
117 117 uint32_t nxge_max_rx_pkts = 1024;
118 118
119 119 /*
120 120 * Tunables to manage the receive buffer blocks.
121 121 *
122 122 * nxge_rx_threshold_hi: copy all buffers.
123 123 * nxge_rx_bcopy_size_type: receive buffer block size type.
124 124 * nxge_rx_threshold_lo: copy only up to tunable block size type.
125 125 */
126 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
127 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
128 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
129 129
130 130 /* Use kmem_alloc() to allocate data buffers. */
131 131 #if defined(__sparc)
132 132 uint32_t nxge_use_kmem_alloc = 1;
133 133 #elif defined(__i386)
134 134 uint32_t nxge_use_kmem_alloc = 0;
135 135 #else
136 136 uint32_t nxge_use_kmem_alloc = 1;
137 137 #endif
138 138
139 139 rtrace_t npi_rtracebuf;
140 140
141 141 /*
142 142 * The hardware sometimes fails to allow enough time for the link partner
143 143 * to send an acknowledgement for packets that the hardware sent to it. The
144 144 * hardware resends the packets earlier than it should be in those instances.
145 145 * This behavior caused some switches to acknowledge the wrong packets
146 146 * and it triggered the fatal error.
147 147 * This software workaround is to set the replay timer to a value
148 148 * suggested by the hardware team.
149 149 *
150 150 * PCI config space replay timer register:
151 151 * The following replay timeout value is 0xc
152 152 * for bit 14:18.
153 153 */
154 154 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8
155 155 #define PCI_REPLAY_TIMEOUT_SHIFT 14
156 156
157 157 uint32_t nxge_set_replay_timer = 1;
158 158 uint32_t nxge_replay_timeout = 0xc;
159 159
160 160 /*
161 161 * The transmit serialization sometimes causes
162 162 * longer sleep before calling the driver transmit
163 163 * function as it sleeps longer than it should.
164 164 * The performace group suggests that a time wait tunable
165 165 * can be used to set the maximum wait time when needed
166 166 * and the default is set to 1 tick.
167 167 */
168 168 uint32_t nxge_tx_serial_maxsleep = 1;
169 169
170 170 #if defined(sun4v)
171 171 /*
172 172 * Hypervisor N2/NIU services information.
173 173 */
174 174 /*
175 175 * The following is the default API supported:
176 176 * major 1 and minor 1.
177 177 *
178 178 * Please update the MAX_NIU_MAJORS,
179 179 * MAX_NIU_MINORS, and minor number supported
180 180 * when the newer Hypervior API interfaces
181 181 * are added. Also, please update nxge_hsvc_register()
182 182 * if needed.
183 183 */
184 184 static hsvc_info_t niu_hsvc = {
185 185 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
186 186 NIU_MINOR_VER, "nxge"
187 187 };
188 188
189 189 static int nxge_hsvc_register(p_nxge_t);
190 190 #endif
191 191
192 192 /*
193 193 * Function Prototypes
194 194 */
195 195 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
196 196 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
197 197 static void nxge_unattach(p_nxge_t);
198 198 static int nxge_quiesce(dev_info_t *);
199 199
200 200 #if NXGE_PROPERTY
201 201 static void nxge_remove_hard_properties(p_nxge_t);
202 202 #endif
203 203
204 204 /*
205 205 * These two functions are required by nxge_hio.c
206 206 */
207 207 extern int nxge_m_mmac_remove(void *arg, int slot);
208 208 extern void nxge_grp_cleanup(p_nxge_t nxge);
209 209
210 210 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
211 211
212 212 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
213 213 static void nxge_destroy_mutexes(p_nxge_t);
214 214
215 215 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
216 216 static void nxge_unmap_regs(p_nxge_t nxgep);
217 217 #ifdef NXGE_DEBUG
218 218 static void nxge_test_map_regs(p_nxge_t nxgep);
219 219 #endif
220 220
221 221 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
222 222 static void nxge_remove_intrs(p_nxge_t nxgep);
223 223
224 224 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
225 225 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
226 226 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
227 227 static void nxge_intrs_enable(p_nxge_t nxgep);
228 228 static void nxge_intrs_disable(p_nxge_t nxgep);
229 229
230 230 static void nxge_suspend(p_nxge_t);
231 231 static nxge_status_t nxge_resume(p_nxge_t);
232 232
233 233 static nxge_status_t nxge_setup_dev(p_nxge_t);
234 234 static void nxge_destroy_dev(p_nxge_t);
235 235
236 236 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
237 237 static void nxge_free_mem_pool(p_nxge_t);
238 238
239 239 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
240 240 static void nxge_free_rx_mem_pool(p_nxge_t);
241 241
242 242 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
243 243 static void nxge_free_tx_mem_pool(p_nxge_t);
244 244
245 245 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
246 246 struct ddi_dma_attr *,
247 247 size_t, ddi_device_acc_attr_t *, uint_t,
248 248 p_nxge_dma_common_t);
249 249
250 250 static void nxge_dma_mem_free(p_nxge_dma_common_t);
251 251 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
252 252
253 253 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
254 254 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
255 255 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
256 256
257 257 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
258 258 p_nxge_dma_common_t *, size_t);
259 259 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
260 260
261 261 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
262 262 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
263 263 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
264 264
265 265 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
266 266 p_nxge_dma_common_t *,
267 267 size_t);
268 268 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
269 269
270 270 static int nxge_init_common_dev(p_nxge_t);
271 271 static void nxge_uninit_common_dev(p_nxge_t);
272 272 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *,
273 273 char *, caddr_t);
274 274 #if defined(sun4v)
275 275 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep);
276 276 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm);
277 277 #endif
278 278
279 279 /*
280 280 * The next declarations are for the GLDv3 interface.
281 281 */
282 282 static int nxge_m_start(void *);
283 283 static void nxge_m_stop(void *);
284 284 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
285 285 static int nxge_m_promisc(void *, boolean_t);
286 286 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
287 287 nxge_status_t nxge_mac_register(p_nxge_t);
288 288 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
289 289 int slot, int rdctbl, boolean_t usetbl);
290 290 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot,
291 291 boolean_t factory);
292 292
293 293 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *);
294 294 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
295 295 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
296 296 uint_t, const void *);
297 297 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
298 298 uint_t, void *);
299 299 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t,
300 300 mac_prop_info_handle_t);
301 301 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t);
302 302 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
303 303 const void *);
304 304 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *);
305 305 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int,
306 306 mac_ring_info_t *, mac_ring_handle_t);
307 307 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t,
308 308 mac_ring_type_t);
309 309 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t,
310 310 mac_ring_type_t);
311 311
312 312 static void nxge_niu_peu_reset(p_nxge_t nxgep);
313 313 static void nxge_set_pci_replay_timeout(nxge_t *);
314 314
315 315 char *nxge_priv_props[] = {
316 316 "_adv_10gfdx_cap",
317 317 "_adv_pause_cap",
318 318 "_function_number",
319 319 "_fw_version",
320 320 "_port_mode",
321 321 "_hot_swap_phy",
322 322 "_rxdma_intr_time",
323 323 "_rxdma_intr_pkts",
324 324 "_class_opt_ipv4_tcp",
325 325 "_class_opt_ipv4_udp",
326 326 "_class_opt_ipv4_ah",
327 327 "_class_opt_ipv4_sctp",
328 328 "_class_opt_ipv6_tcp",
329 329 "_class_opt_ipv6_udp",
330 330 "_class_opt_ipv6_ah",
331 331 "_class_opt_ipv6_sctp",
332 332 "_soft_lso_enable",
333 333 NULL
334 334 };
335 335
336 336 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL
337 337 #define MAX_DUMP_SZ 256
338 338
339 339 #define NXGE_M_CALLBACK_FLAGS \
340 340 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
341 341
342 342 mac_callbacks_t nxge_m_callbacks = {
343 343 NXGE_M_CALLBACK_FLAGS,
344 344 nxge_m_stat,
345 345 nxge_m_start,
346 346 nxge_m_stop,
347 347 nxge_m_promisc,
348 348 nxge_m_multicst,
349 349 NULL,
350 350 NULL,
351 351 NULL,
352 352 nxge_m_ioctl,
353 353 nxge_m_getcapab,
354 354 NULL,
355 355 NULL,
356 356 nxge_m_setprop,
357 357 nxge_m_getprop,
358 358 nxge_m_propinfo
359 359 };
360 360
361 361 void
362 362 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
363 363
364 364 /* PSARC/2007/453 MSI-X interrupt limit override. */
365 365 #define NXGE_MSIX_REQUEST_10G 8
366 366 #define NXGE_MSIX_REQUEST_1G 2
367 367 static int nxge_create_msi_property(p_nxge_t);
368 368 /*
369 369 * For applications that care about the
370 370 * latency, it was requested by PAE and the
371 371 * customers that the driver has tunables that
372 372 * allow the user to tune it to a higher number
373 373 * interrupts to spread the interrupts among
374 374 * multiple channels. The DDI framework limits
375 375 * the maximum number of MSI-X resources to allocate
376 376 * to 8 (ddi_msix_alloc_limit). If more than 8
377 377 * is set, ddi_msix_alloc_limit must be set accordingly.
378 378 * The default number of MSI interrupts are set to
379 379 * 8 for 10G and 2 for 1G link.
380 380 */
381 381 #define NXGE_MSIX_MAX_ALLOWED 32
382 382 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G;
383 383 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G;
384 384
385 385 /*
386 386 * These global variables control the message
387 387 * output.
388 388 */
389 389 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
390 390 uint64_t nxge_debug_level;
391 391
392 392 /*
393 393 * This list contains the instance structures for the Neptune
394 394 * devices present in the system. The lock exists to guarantee
395 395 * mutually exclusive access to the list.
396 396 */
397 397 void *nxge_list = NULL;
398 398 void *nxge_hw_list = NULL;
399 399 nxge_os_mutex_t nxge_common_lock;
400 400 nxge_os_mutex_t nxgedebuglock;
401 401
402 402 extern uint64_t npi_debug_level;
403 403
404 404 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *);
405 405 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *);
406 406 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t);
407 407 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t);
408 408 extern void nxge_fm_init(p_nxge_t,
409 409 ddi_device_acc_attr_t *,
410 410 ddi_dma_attr_t *);
411 411 extern void nxge_fm_fini(p_nxge_t);
412 412 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
413 413
414 414 /*
415 415 * Count used to maintain the number of buffers being used
416 416 * by Neptune instances and loaned up to the upper layers.
417 417 */
418 418 uint32_t nxge_mblks_pending = 0;
419 419
420 420 /*
421 421 * Device register access attributes for PIO.
422 422 */
423 423 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
424 424 DDI_DEVICE_ATTR_V1,
425 425 DDI_STRUCTURE_LE_ACC,
426 426 DDI_STRICTORDER_ACC,
427 427 DDI_DEFAULT_ACC
428 428 };
429 429
430 430 /*
431 431 * Device descriptor access attributes for DMA.
432 432 */
433 433 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
434 434 DDI_DEVICE_ATTR_V0,
435 435 DDI_STRUCTURE_LE_ACC,
436 436 DDI_STRICTORDER_ACC
437 437 };
438 438
439 439 /*
440 440 * Device buffer access attributes for DMA.
441 441 */
442 442 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
443 443 DDI_DEVICE_ATTR_V0,
444 444 DDI_STRUCTURE_BE_ACC,
445 445 DDI_STRICTORDER_ACC
446 446 };
447 447
448 448 ddi_dma_attr_t nxge_desc_dma_attr = {
449 449 DMA_ATTR_V0, /* version number. */
450 450 0, /* low address */
451 451 0xffffffffffffffff, /* high address */
452 452 0xffffffffffffffff, /* address counter max */
453 453 #ifndef NIU_PA_WORKAROUND
454 454 0x100000, /* alignment */
455 455 #else
456 456 0x2000,
457 457 #endif
458 458 0xfc00fc, /* dlim_burstsizes */
459 459 0x1, /* minimum transfer size */
460 460 0xffffffffffffffff, /* maximum transfer size */
461 461 0xffffffffffffffff, /* maximum segment size */
462 462 1, /* scatter/gather list length */
463 463 (unsigned int) 1, /* granularity */
464 464 0 /* attribute flags */
465 465 };
466 466
467 467 ddi_dma_attr_t nxge_tx_dma_attr = {
468 468 DMA_ATTR_V0, /* version number. */
469 469 0, /* low address */
470 470 0xffffffffffffffff, /* high address */
471 471 0xffffffffffffffff, /* address counter max */
472 472 #if defined(_BIG_ENDIAN)
473 473 0x2000, /* alignment */
474 474 #else
475 475 0x1000, /* alignment */
476 476 #endif
477 477 0xfc00fc, /* dlim_burstsizes */
478 478 0x1, /* minimum transfer size */
479 479 0xffffffffffffffff, /* maximum transfer size */
480 480 0xffffffffffffffff, /* maximum segment size */
481 481 5, /* scatter/gather list length */
482 482 (unsigned int) 1, /* granularity */
483 483 0 /* attribute flags */
484 484 };
485 485
486 486 ddi_dma_attr_t nxge_rx_dma_attr = {
487 487 DMA_ATTR_V0, /* version number. */
488 488 0, /* low address */
489 489 0xffffffffffffffff, /* high address */
490 490 0xffffffffffffffff, /* address counter max */
491 491 0x2000, /* alignment */
492 492 0xfc00fc, /* dlim_burstsizes */
493 493 0x1, /* minimum transfer size */
494 494 0xffffffffffffffff, /* maximum transfer size */
495 495 0xffffffffffffffff, /* maximum segment size */
496 496 1, /* scatter/gather list length */
497 497 (unsigned int) 1, /* granularity */
498 498 DDI_DMA_RELAXED_ORDERING /* attribute flags */
499 499 };
500 500
501 501 ddi_dma_lim_t nxge_dma_limits = {
502 502 (uint_t)0, /* dlim_addr_lo */
503 503 (uint_t)0xffffffff, /* dlim_addr_hi */
504 504 (uint_t)0xffffffff, /* dlim_cntr_max */
505 505 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
506 506 0x1, /* dlim_minxfer */
507 507 1024 /* dlim_speed */
508 508 };
509 509
510 510 dma_method_t nxge_force_dma = DVMA;
511 511
512 512 /*
513 513 * dma chunk sizes.
514 514 *
515 515 * Try to allocate the largest possible size
516 516 * so that fewer number of dma chunks would be managed
517 517 */
518 518 #ifdef NIU_PA_WORKAROUND
519 519 size_t alloc_sizes [] = {0x2000};
520 520 #else
521 521 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
522 522 0x10000, 0x20000, 0x40000, 0x80000,
523 523 0x100000, 0x200000, 0x400000, 0x800000,
524 524 0x1000000, 0x2000000, 0x4000000};
525 525 #endif
526 526
527 527 /*
528 528 * Translate "dev_t" to a pointer to the associated "dev_info_t".
529 529 */
530 530
531 531 extern void nxge_get_environs(nxge_t *);
532 532
533 533 static int
534 534 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
535 535 {
536 536 p_nxge_t nxgep = NULL;
537 537 int instance;
538 538 int status = DDI_SUCCESS;
539 539 uint8_t portn;
540 540 nxge_mmac_t *mmac_info;
541 541
542 542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
543 543
544 544 /*
545 545 * Get the device instance since we'll need to setup
546 546 * or retrieve a soft state for this instance.
547 547 */
548 548 instance = ddi_get_instance(dip);
549 549
550 550 switch (cmd) {
551 551 case DDI_ATTACH:
552 552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
553 553 break;
554 554
555 555 case DDI_RESUME:
556 556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
557 557 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
558 558 if (nxgep == NULL) {
559 559 status = DDI_FAILURE;
560 560 break;
561 561 }
562 562 if (nxgep->dip != dip) {
563 563 status = DDI_FAILURE;
564 564 break;
565 565 }
566 566 if (nxgep->suspended == DDI_PM_SUSPEND) {
567 567 status = ddi_dev_is_needed(nxgep->dip, 0, 1);
568 568 } else {
569 569 status = nxge_resume(nxgep);
570 570 }
571 571 goto nxge_attach_exit;
572 572
573 573 case DDI_PM_RESUME:
574 574 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
575 575 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
576 576 if (nxgep == NULL) {
577 577 status = DDI_FAILURE;
578 578 break;
579 579 }
580 580 if (nxgep->dip != dip) {
581 581 status = DDI_FAILURE;
582 582 break;
583 583 }
584 584 status = nxge_resume(nxgep);
585 585 goto nxge_attach_exit;
586 586
587 587 default:
588 588 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
589 589 status = DDI_FAILURE;
590 590 goto nxge_attach_exit;
591 591 }
592 592
593 593
594 594 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
595 595 status = DDI_FAILURE;
596 596 goto nxge_attach_exit;
597 597 }
598 598
599 599 nxgep = ddi_get_soft_state(nxge_list, instance);
600 600 if (nxgep == NULL) {
601 601 status = NXGE_ERROR;
602 602 goto nxge_attach_fail2;
603 603 }
604 604
605 605 nxgep->nxge_magic = NXGE_MAGIC;
606 606
607 607 nxgep->drv_state = 0;
608 608 nxgep->dip = dip;
609 609 nxgep->instance = instance;
610 610 nxgep->p_dip = ddi_get_parent(dip);
611 611 nxgep->nxge_debug_level = nxge_debug_level;
612 612 npi_debug_level = nxge_debug_level;
613 613
614 614 /* Are we a guest running in a Hybrid I/O environment? */
615 615 nxge_get_environs(nxgep);
616 616
617 617 status = nxge_map_regs(nxgep);
618 618
619 619 if (status != NXGE_OK) {
620 620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
621 621 goto nxge_attach_fail3;
622 622 }
623 623
624 624 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr);
625 625
626 626 /* Create & initialize the per-Neptune data structure */
627 627 /* (even if we're a guest). */
628 628 status = nxge_init_common_dev(nxgep);
629 629 if (status != NXGE_OK) {
630 630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
631 631 "nxge_init_common_dev failed"));
632 632 goto nxge_attach_fail4;
633 633 }
634 634
635 635 /*
636 636 * Software workaround: set the replay timer.
637 637 */
638 638 if (nxgep->niu_type != N2_NIU) {
639 639 nxge_set_pci_replay_timeout(nxgep);
640 640 }
641 641
642 642 #if defined(sun4v)
643 643 /* This is required by nxge_hio_init(), which follows. */
644 644 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS)
645 645 goto nxge_attach_fail4;
646 646 #endif
647 647
648 648 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) {
649 649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
650 650 "nxge_hio_init failed"));
651 651 goto nxge_attach_fail4;
652 652 }
653 653
654 654 if (nxgep->niu_type == NEPTUNE_2_10GF) {
655 655 if (nxgep->function_num > 1) {
656 656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
657 657 " function %d. Only functions 0 and 1 are "
658 658 "supported for this card.", nxgep->function_num));
659 659 status = NXGE_ERROR;
660 660 goto nxge_attach_fail4;
661 661 }
662 662 }
663 663
664 664 if (isLDOMguest(nxgep)) {
665 665 /*
666 666 * Use the function number here.
667 667 */
668 668 nxgep->mac.portnum = nxgep->function_num;
669 669 nxgep->mac.porttype = PORT_TYPE_LOGICAL;
670 670
671 671 /* XXX We'll set the MAC address counts to 1 for now. */
672 672 mmac_info = &nxgep->nxge_mmac_info;
673 673 mmac_info->num_mmac = 1;
674 674 mmac_info->naddrfree = 1;
675 675 } else {
676 676 portn = NXGE_GET_PORT_NUM(nxgep->function_num);
677 677 nxgep->mac.portnum = portn;
678 678 if ((portn == 0) || (portn == 1))
679 679 nxgep->mac.porttype = PORT_TYPE_XMAC;
680 680 else
681 681 nxgep->mac.porttype = PORT_TYPE_BMAC;
682 682 /*
683 683 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
684 684 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
685 685 * The two types of MACs have different characterizations.
686 686 */
687 687 mmac_info = &nxgep->nxge_mmac_info;
688 688 if (nxgep->function_num < 2) {
689 689 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
690 690 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
691 691 } else {
692 692 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
693 693 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
694 694 }
695 695 }
696 696 /*
697 697 * Setup the Ndd parameters for the this instance.
698 698 */
699 699 nxge_init_param(nxgep);
700 700
701 701 /*
702 702 * Setup Register Tracing Buffer.
703 703 */
704 704 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
705 705
706 706 /* init stats ptr */
707 707 nxge_init_statsp(nxgep);
708 708
709 709 /*
710 710 * Copy the vpd info from eeprom to a local data
711 711 * structure, and then check its validity.
712 712 */
713 713 if (!isLDOMguest(nxgep)) {
714 714 int *regp;
715 715 uint_t reglen;
716 716 int rv;
717 717
718 718 nxge_vpd_info_get(nxgep);
719 719
720 720 /* Find the NIU config handle. */
721 721 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
722 722 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS,
723 723 "reg", ®p, ®len);
724 724
725 725 if (rv != DDI_PROP_SUCCESS) {
726 726 goto nxge_attach_fail5;
727 727 }
728 728 /*
729 729 * The address_hi, that is the first int, in the reg
730 730 * property consists of config handle, but need to remove
731 731 * the bits 28-31 which are OBP specific info.
732 732 */
733 733 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF;
734 734 ddi_prop_free(regp);
735 735 }
736 736
737 737 /*
738 738 * Set the defaults for the MTU size.
739 739 */
740 740 nxge_hw_id_init(nxgep);
741 741
742 742 if (isLDOMguest(nxgep)) {
743 743 uchar_t *prop_val;
744 744 uint_t prop_len;
745 745 uint32_t max_frame_size;
746 746
747 747 extern void nxge_get_logical_props(p_nxge_t);
748 748
749 749 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR;
750 750 nxgep->mac.portmode = PORT_LOGICAL;
751 751 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
752 752 "phy-type", "virtual transceiver");
753 753
754 754 nxgep->nports = 1;
755 755 nxgep->board_ver = 0; /* XXX What? */
756 756
757 757 /*
758 758 * local-mac-address property gives us info on which
759 759 * specific MAC address the Hybrid resource is associated
760 760 * with.
761 761 */
762 762 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
763 763 "local-mac-address", &prop_val,
764 764 &prop_len) != DDI_PROP_SUCCESS) {
765 765 goto nxge_attach_fail5;
766 766 }
767 767 if (prop_len != ETHERADDRL) {
768 768 ddi_prop_free(prop_val);
769 769 goto nxge_attach_fail5;
770 770 }
771 771 ether_copy(prop_val, nxgep->hio_mac_addr);
772 772 ddi_prop_free(prop_val);
773 773 nxge_get_logical_props(nxgep);
774 774
775 775 /*
776 776 * Enable Jumbo property based on the "max-frame-size"
777 777 * property value.
778 778 */
779 779 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY,
780 780 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
781 781 "max-frame-size", NXGE_MTU_DEFAULT_MAX);
782 782 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) &&
783 783 (max_frame_size <= TX_JUMBO_MTU)) {
784 784 nxgep->mac.is_jumbo = B_TRUE;
785 785 nxgep->mac.maxframesize = (uint16_t)max_frame_size;
786 786 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
787 787 NXGE_EHEADER_VLAN_CRC;
788 788 }
789 789 } else {
790 790 status = nxge_xcvr_find(nxgep);
791 791
792 792 if (status != NXGE_OK) {
793 793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
794 794 " Couldn't determine card type"
795 795 " .... exit "));
796 796 goto nxge_attach_fail5;
797 797 }
798 798
799 799 status = nxge_get_config_properties(nxgep);
800 800
801 801 if (status != NXGE_OK) {
802 802 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
803 803 "get_hw create failed"));
804 804 goto nxge_attach_fail;
805 805 }
806 806 }
807 807
808 808 /*
809 809 * Setup the Kstats for the driver.
810 810 */
811 811 nxge_setup_kstats(nxgep);
812 812
813 813 if (!isLDOMguest(nxgep))
814 814 nxge_setup_param(nxgep);
815 815
816 816 status = nxge_setup_system_dma_pages(nxgep);
817 817 if (status != NXGE_OK) {
818 818 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
819 819 goto nxge_attach_fail;
820 820 }
821 821
822 822
823 823 if (!isLDOMguest(nxgep))
824 824 nxge_hw_init_niu_common(nxgep);
825 825
826 826 status = nxge_setup_mutexes(nxgep);
827 827 if (status != NXGE_OK) {
828 828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
829 829 goto nxge_attach_fail;
830 830 }
831 831
832 832 #if defined(sun4v)
833 833 if (isLDOMguest(nxgep)) {
834 834 /* Find our VR & channel sets. */
835 835 status = nxge_hio_vr_add(nxgep);
836 836 if (status != DDI_SUCCESS) {
837 837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
838 838 "nxge_hio_vr_add failed"));
839 839 (void) hsvc_unregister(&nxgep->niu_hsvc);
840 840 nxgep->niu_hsvc_available = B_FALSE;
841 841 goto nxge_attach_fail;
842 842 }
843 843 goto nxge_attach_exit;
844 844 }
845 845 #endif
846 846
847 847 status = nxge_setup_dev(nxgep);
848 848 if (status != DDI_SUCCESS) {
849 849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
850 850 goto nxge_attach_fail;
851 851 }
852 852
853 853 status = nxge_add_intrs(nxgep);
854 854 if (status != DDI_SUCCESS) {
855 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
856 856 goto nxge_attach_fail;
857 857 }
858 858
859 859 /* If a guest, register with vio_net instead. */
860 860 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
861 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
862 862 "unable to register to mac layer (%d)", status));
863 863 goto nxge_attach_fail;
864 864 }
865 865
866 866 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
867 867
868 868 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
869 869 "registered to mac (instance %d)", instance));
870 870
871 871 /* nxge_link_monitor calls xcvr.check_link recursively */
872 872 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
873 873
874 874 goto nxge_attach_exit;
875 875
876 876 nxge_attach_fail:
877 877 nxge_unattach(nxgep);
878 878 goto nxge_attach_fail1;
879 879
880 880 nxge_attach_fail5:
881 881 /*
882 882 * Tear down the ndd parameters setup.
883 883 */
884 884 nxge_destroy_param(nxgep);
885 885
886 886 /*
887 887 * Tear down the kstat setup.
888 888 */
889 889 nxge_destroy_kstats(nxgep);
890 890
891 891 nxge_attach_fail4:
892 892 if (nxgep->nxge_hw_p) {
893 893 nxge_uninit_common_dev(nxgep);
894 894 nxgep->nxge_hw_p = NULL;
895 895 }
896 896
897 897 nxge_attach_fail3:
898 898 /*
899 899 * Unmap the register setup.
900 900 */
901 901 nxge_unmap_regs(nxgep);
902 902
903 903 nxge_fm_fini(nxgep);
904 904
905 905 nxge_attach_fail2:
906 906 ddi_soft_state_free(nxge_list, nxgep->instance);
907 907
908 908 nxge_attach_fail1:
909 909 if (status != NXGE_OK)
910 910 status = (NXGE_ERROR | NXGE_DDI_FAILED);
911 911 nxgep = NULL;
912 912
913 913 nxge_attach_exit:
914 914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
915 915 status));
916 916
917 917 return (status);
918 918 }
919 919
920 920 static int
921 921 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
922 922 {
923 923 int status = DDI_SUCCESS;
924 924 int instance;
925 925 p_nxge_t nxgep = NULL;
926 926
927 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
928 928 instance = ddi_get_instance(dip);
929 929 nxgep = ddi_get_soft_state(nxge_list, instance);
930 930 if (nxgep == NULL) {
931 931 status = DDI_FAILURE;
932 932 goto nxge_detach_exit;
933 933 }
934 934
935 935 switch (cmd) {
936 936 case DDI_DETACH:
937 937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
938 938 break;
939 939
940 940 case DDI_PM_SUSPEND:
941 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
942 942 nxgep->suspended = DDI_PM_SUSPEND;
943 943 nxge_suspend(nxgep);
944 944 break;
945 945
946 946 case DDI_SUSPEND:
947 947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
948 948 if (nxgep->suspended != DDI_PM_SUSPEND) {
949 949 nxgep->suspended = DDI_SUSPEND;
950 950 nxge_suspend(nxgep);
951 951 }
952 952 break;
953 953
954 954 default:
955 955 status = DDI_FAILURE;
956 956 }
957 957
958 958 if (cmd != DDI_DETACH)
959 959 goto nxge_detach_exit;
960 960
961 961 /*
962 962 * Stop the xcvr polling.
963 963 */
964 964 nxgep->suspended = cmd;
965 965
966 966 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
967 967
968 968 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
969 969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
970 970 "<== nxge_detach status = 0x%08X", status));
971 971 return (DDI_FAILURE);
972 972 }
973 973
974 974 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
975 975 "<== nxge_detach (mac_unregister) status = 0x%08X", status));
976 976
977 977 nxge_unattach(nxgep);
978 978 nxgep = NULL;
979 979
980 980 nxge_detach_exit:
981 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
982 982 status));
983 983
984 984 return (status);
985 985 }
986 986
987 987 static void
988 988 nxge_unattach(p_nxge_t nxgep)
989 989 {
990 990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
991 991
992 992 if (nxgep == NULL || nxgep->dev_regs == NULL) {
993 993 return;
994 994 }
995 995
996 996 nxgep->nxge_magic = 0;
997 997
998 998 if (nxgep->nxge_timerid) {
999 999 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1000 1000 nxgep->nxge_timerid = 0;
1001 1001 }
1002 1002
1003 1003 /*
1004 1004 * If this flag is set, it will affect the Neptune
1005 1005 * only.
1006 1006 */
1007 1007 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) {
1008 1008 nxge_niu_peu_reset(nxgep);
1009 1009 }
1010 1010
1011 1011 #if defined(sun4v)
1012 1012 if (isLDOMguest(nxgep)) {
1013 1013 (void) nxge_hio_vr_release(nxgep);
1014 1014 }
1015 1015 #endif
1016 1016
1017 1017 if (nxgep->nxge_hw_p) {
1018 1018 nxge_uninit_common_dev(nxgep);
1019 1019 nxgep->nxge_hw_p = NULL;
1020 1020 }
1021 1021
1022 1022 #if defined(sun4v)
1023 1023 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
1024 1024 (void) hsvc_unregister(&nxgep->niu_hsvc);
1025 1025 nxgep->niu_hsvc_available = B_FALSE;
1026 1026 }
1027 1027 #endif
1028 1028 /*
1029 1029 * Stop any further interrupts.
1030 1030 */
1031 1031 nxge_remove_intrs(nxgep);
1032 1032
1033 1033 /*
1034 1034 * Stop the device and free resources.
1035 1035 */
1036 1036 if (!isLDOMguest(nxgep)) {
1037 1037 nxge_destroy_dev(nxgep);
1038 1038 }
1039 1039
1040 1040 /*
1041 1041 * Tear down the ndd parameters setup.
1042 1042 */
1043 1043 nxge_destroy_param(nxgep);
1044 1044
1045 1045 /*
1046 1046 * Tear down the kstat setup.
1047 1047 */
1048 1048 nxge_destroy_kstats(nxgep);
1049 1049
1050 1050 /*
1051 1051 * Free any memory allocated for PHY properties
1052 1052 */
1053 1053 if (nxgep->phy_prop.cnt > 0) {
1054 1054 KMEM_FREE(nxgep->phy_prop.arr,
1055 1055 sizeof (nxge_phy_mdio_val_t) * nxgep->phy_prop.cnt);
1056 1056 nxgep->phy_prop.cnt = 0;
1057 1057 }
1058 1058
1059 1059 /*
1060 1060 * Destroy all mutexes.
1061 1061 */
1062 1062 nxge_destroy_mutexes(nxgep);
1063 1063
1064 1064 /*
1065 1065 * Remove the list of ndd parameters which
1066 1066 * were setup during attach.
1067 1067 */
1068 1068 if (nxgep->dip) {
1069 1069 NXGE_DEBUG_MSG((nxgep, OBP_CTL,
1070 1070 " nxge_unattach: remove all properties"));
1071 1071
1072 1072 (void) ddi_prop_remove_all(nxgep->dip);
1073 1073 }
1074 1074
1075 1075 #if NXGE_PROPERTY
1076 1076 nxge_remove_hard_properties(nxgep);
1077 1077 #endif
1078 1078
1079 1079 /*
1080 1080 * Unmap the register setup.
1081 1081 */
1082 1082 nxge_unmap_regs(nxgep);
1083 1083
1084 1084 nxge_fm_fini(nxgep);
1085 1085
1086 1086 ddi_soft_state_free(nxge_list, nxgep->instance);
1087 1087
1088 1088 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
1089 1089 }
1090 1090
1091 1091 #if defined(sun4v)
1092 1092 int
1093 1093 nxge_hsvc_register(nxge_t *nxgep)
1094 1094 {
1095 1095 nxge_status_t status;
1096 1096 int i, j;
1097 1097
1098 1098 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register"));
1099 1099 if (nxgep->niu_type != N2_NIU) {
1100 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register"));
1101 1101 return (DDI_SUCCESS);
1102 1102 }
1103 1103
1104 1104 /*
1105 1105 * Currently, the NIU Hypervisor API supports two major versions:
1106 1106 * version 1 and 2.
1107 1107 * If Hypervisor introduces a higher major or minor version,
1108 1108 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
1109 1109 */
1110 1110 nxgep->niu_hsvc_available = B_FALSE;
1111 1111 bcopy(&niu_hsvc, &nxgep->niu_hsvc,
1112 1112 sizeof (hsvc_info_t));
1113 1113
1114 1114 for (i = NIU_MAJOR_HI; i > 0; i--) {
1115 1115 nxgep->niu_hsvc.hsvc_major = i;
1116 1116 for (j = NIU_MINOR_HI; j >= 0; j--) {
1117 1117 nxgep->niu_hsvc.hsvc_minor = j;
1118 1118 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1119 1119 "nxge_hsvc_register: %s: negotiating "
1120 1120 "hypervisor services revision %d "
1121 1121 "group: 0x%lx major: 0x%lx "
1122 1122 "minor: 0x%lx",
1123 1123 nxgep->niu_hsvc.hsvc_modname,
1124 1124 nxgep->niu_hsvc.hsvc_rev,
1125 1125 nxgep->niu_hsvc.hsvc_group,
1126 1126 nxgep->niu_hsvc.hsvc_major,
1127 1127 nxgep->niu_hsvc.hsvc_minor,
1128 1128 nxgep->niu_min_ver));
1129 1129
1130 1130 if ((status = hsvc_register(&nxgep->niu_hsvc,
1131 1131 &nxgep->niu_min_ver)) == 0) {
1132 1132 /* Use the supported minor */
1133 1133 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver;
1134 1134 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1135 1135 "nxge_hsvc_register: %s: negotiated "
1136 1136 "hypervisor services revision %d "
1137 1137 "group: 0x%lx major: 0x%lx "
1138 1138 "minor: 0x%lx (niu_min_ver 0x%lx)",
1139 1139 nxgep->niu_hsvc.hsvc_modname,
1140 1140 nxgep->niu_hsvc.hsvc_rev,
1141 1141 nxgep->niu_hsvc.hsvc_group,
1142 1142 nxgep->niu_hsvc.hsvc_major,
1143 1143 nxgep->niu_hsvc.hsvc_minor,
1144 1144 nxgep->niu_min_ver));
1145 1145
1146 1146 nxgep->niu_hsvc_available = B_TRUE;
1147 1147 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1148 1148 "<== nxge_hsvc_register: "
1149 1149 "NIU Hypervisor service enabled"));
1150 1150 return (DDI_SUCCESS);
1151 1151 }
1152 1152
1153 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1154 1154 "nxge_hsvc_register: %s: negotiated failed - "
1155 1155 "try lower major number "
1156 1156 "hypervisor services revision %d "
1157 1157 "group: 0x%lx major: 0x%lx minor: 0x%lx "
1158 1158 "errno: %d",
1159 1159 nxgep->niu_hsvc.hsvc_modname,
1160 1160 nxgep->niu_hsvc.hsvc_rev,
1161 1161 nxgep->niu_hsvc.hsvc_group,
1162 1162 nxgep->niu_hsvc.hsvc_major,
1163 1163 nxgep->niu_hsvc.hsvc_minor, status));
1164 1164 }
1165 1165 }
1166 1166
1167 1167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1168 1168 "nxge_hsvc_register: %s: cannot negotiate "
1169 1169 "hypervisor services revision %d group: 0x%lx "
1170 1170 "major: 0x%lx minor: 0x%lx errno: %d",
1171 1171 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
1172 1172 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
1173 1173 niu_hsvc.hsvc_minor, status));
1174 1174
1175 1175 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1176 1176 "<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
1177 1177
1178 1178 return (DDI_FAILURE);
1179 1179 }
1180 1180 #endif
1181 1181
1182 1182 static char n2_siu_name[] = "niu";
1183 1183
1184 1184 static nxge_status_t
1185 1185 nxge_map_regs(p_nxge_t nxgep)
1186 1186 {
1187 1187 int ddi_status = DDI_SUCCESS;
1188 1188 p_dev_regs_t dev_regs;
1189 1189 char buf[MAXPATHLEN + 1];
1190 1190 char *devname;
1191 1191 #ifdef NXGE_DEBUG
1192 1192 char *sysname;
1193 1193 #endif
1194 1194 off_t regsize;
1195 1195 nxge_status_t status = NXGE_OK;
1196 1196 #if !defined(_BIG_ENDIAN)
1197 1197 off_t pci_offset;
1198 1198 uint16_t pcie_devctl;
1199 1199 #endif
1200 1200
1201 1201 if (isLDOMguest(nxgep)) {
1202 1202 return (nxge_guest_regs_map(nxgep));
1203 1203 }
1204 1204
1205 1205 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
1206 1206 nxgep->dev_regs = NULL;
1207 1207 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
1208 1208 dev_regs->nxge_regh = NULL;
1209 1209 dev_regs->nxge_pciregh = NULL;
1210 1210 dev_regs->nxge_msix_regh = NULL;
1211 1211 dev_regs->nxge_vir_regh = NULL;
1212 1212 dev_regs->nxge_vir2_regh = NULL;
1213 1213 nxgep->niu_type = NIU_TYPE_NONE;
1214 1214
1215 1215 devname = ddi_pathname(nxgep->dip, buf);
1216 1216 ASSERT(strlen(devname) > 0);
1217 1217 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1218 1218 "nxge_map_regs: pathname devname %s", devname));
1219 1219
1220 1220 /*
1221 1221 * The driver is running on a N2-NIU system if devname is something
1222 1222 * like "/niu@80/network@0"
1223 1223 */
1224 1224 if (strstr(devname, n2_siu_name)) {
1225 1225 /* N2/NIU */
1226 1226 nxgep->niu_type = N2_NIU;
1227 1227 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1228 1228 "nxge_map_regs: N2/NIU devname %s", devname));
1229 1229 /*
1230 1230 * Get function number:
1231 1231 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1"
1232 1232 */
1233 1233 nxgep->function_num =
1234 1234 (devname[strlen(devname) -1] == '1' ? 1 : 0);
1235 1235 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1236 1236 "nxge_map_regs: N2/NIU function number %d",
1237 1237 nxgep->function_num));
1238 1238 } else {
1239 1239 int *prop_val;
1240 1240 uint_t prop_len;
1241 1241 uint8_t func_num;
1242 1242
1243 1243 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
1244 1244 0, "reg",
1245 1245 &prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1246 1246 NXGE_DEBUG_MSG((nxgep, VPD_CTL,
1247 1247 "Reg property not found"));
1248 1248 ddi_status = DDI_FAILURE;
1249 1249 goto nxge_map_regs_fail0;
1250 1250
1251 1251 } else {
1252 1252 func_num = (prop_val[0] >> 8) & 0x7;
1253 1253 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1254 1254 "Reg property found: fun # %d",
1255 1255 func_num));
1256 1256 nxgep->function_num = func_num;
1257 1257 if (isLDOMguest(nxgep)) {
1258 1258 nxgep->function_num /= 2;
1259 1259 return (NXGE_OK);
1260 1260 }
1261 1261 ddi_prop_free(prop_val);
1262 1262 }
1263 1263 }
1264 1264
1265 1265 switch (nxgep->niu_type) {
1266 1266 default:
1267 1267 (void) ddi_dev_regsize(nxgep->dip, 0, ®size);
1268 1268 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1269 1269 "nxge_map_regs: pci config size 0x%x", regsize));
1270 1270
1271 1271 ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
1272 1272 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
1273 1273 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
1274 1274 if (ddi_status != DDI_SUCCESS) {
1275 1275 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1276 1276 "ddi_map_regs, nxge bus config regs failed"));
1277 1277 goto nxge_map_regs_fail0;
1278 1278 }
1279 1279 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1280 1280 "nxge_map_reg: PCI config addr 0x%0llx "
1281 1281 " handle 0x%0llx", dev_regs->nxge_pciregp,
1282 1282 dev_regs->nxge_pciregh));
1283 1283 /*
1284 1284 * IMP IMP
1285 1285 * workaround for bit swapping bug in HW
1286 1286 * which ends up in no-snoop = yes
1287 1287 * resulting, in DMA not synched properly
1288 1288 */
1289 1289 #if !defined(_BIG_ENDIAN)
1290 1290 /* workarounds for x86 systems */
1291 1291 pci_offset = 0x80 + PCIE_DEVCTL;
1292 1292 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh,
1293 1293 pci_offset);
1294 1294 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP;
1295 1295 pcie_devctl |= PCIE_DEVCTL_RO_EN;
1296 1296 pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
1297 1297 pcie_devctl);
1298 1298 #endif
1299 1299
1300 1300 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1301 1301 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1302 1302 "nxge_map_regs: pio size 0x%x", regsize));
1303 1303 /* set up the device mapped register */
1304 1304 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1305 1305 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1306 1306 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1307 1307 if (ddi_status != DDI_SUCCESS) {
1308 1308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1309 1309 "ddi_map_regs for Neptune global reg failed"));
1310 1310 goto nxge_map_regs_fail1;
1311 1311 }
1312 1312
1313 1313 /* set up the msi/msi-x mapped register */
1314 1314 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1315 1315 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1316 1316 "nxge_map_regs: msix size 0x%x", regsize));
1317 1317 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1318 1318 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
1319 1319 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
1320 1320 if (ddi_status != DDI_SUCCESS) {
1321 1321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1322 1322 "ddi_map_regs for msi reg failed"));
1323 1323 goto nxge_map_regs_fail2;
1324 1324 }
1325 1325
1326 1326 /* set up the vio region mapped register */
1327 1327 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1328 1328 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1329 1329 "nxge_map_regs: vio size 0x%x", regsize));
1330 1330 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1331 1331 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1332 1332 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1333 1333
1334 1334 if (ddi_status != DDI_SUCCESS) {
1335 1335 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1336 1336 "ddi_map_regs for nxge vio reg failed"));
1337 1337 goto nxge_map_regs_fail3;
1338 1338 }
1339 1339 nxgep->dev_regs = dev_regs;
1340 1340
1341 1341 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
1342 1342 NPI_PCI_ADD_HANDLE_SET(nxgep,
1343 1343 (npi_reg_ptr_t)dev_regs->nxge_pciregp);
1344 1344 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
1345 1345 NPI_MSI_ADD_HANDLE_SET(nxgep,
1346 1346 (npi_reg_ptr_t)dev_regs->nxge_msix_regp);
1347 1347
1348 1348 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1349 1349 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1350 1350
1351 1351 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1352 1352 NPI_REG_ADD_HANDLE_SET(nxgep,
1353 1353 (npi_reg_ptr_t)dev_regs->nxge_regp);
1354 1354
1355 1355 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1356 1356 NPI_VREG_ADD_HANDLE_SET(nxgep,
1357 1357 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1358 1358
1359 1359 break;
1360 1360
1361 1361 case N2_NIU:
1362 1362 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1363 1363 /*
1364 1364 * Set up the device mapped register (FWARC 2006/556)
1365 1365 * (changed back to 1: reg starts at 1!)
1366 1366 */
1367 1367 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1368 1368 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1369 1369 "nxge_map_regs: dev size 0x%x", regsize));
1370 1370 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1371 1371 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1372 1372 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1373 1373
1374 1374 if (ddi_status != DDI_SUCCESS) {
1375 1375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1376 1376 "ddi_map_regs for N2/NIU, global reg failed "));
1377 1377 goto nxge_map_regs_fail1;
1378 1378 }
1379 1379
1380 1380 /* set up the first vio region mapped register */
1381 1381 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1382 1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1383 1383 "nxge_map_regs: vio (1) size 0x%x", regsize));
1384 1384 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1385 1385 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1386 1386 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1387 1387
1388 1388 if (ddi_status != DDI_SUCCESS) {
1389 1389 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1390 1390 "ddi_map_regs for nxge vio reg failed"));
1391 1391 goto nxge_map_regs_fail2;
1392 1392 }
1393 1393 /* set up the second vio region mapped register */
1394 1394 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1395 1395 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1396 1396 "nxge_map_regs: vio (3) size 0x%x", regsize));
1397 1397 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1398 1398 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1399 1399 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1400 1400
1401 1401 if (ddi_status != DDI_SUCCESS) {
1402 1402 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1403 1403 "ddi_map_regs for nxge vio2 reg failed"));
1404 1404 goto nxge_map_regs_fail3;
1405 1405 }
1406 1406 nxgep->dev_regs = dev_regs;
1407 1407
1408 1408 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1409 1409 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1410 1410
1411 1411 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1412 1412 NPI_REG_ADD_HANDLE_SET(nxgep,
1413 1413 (npi_reg_ptr_t)dev_regs->nxge_regp);
1414 1414
1415 1415 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1416 1416 NPI_VREG_ADD_HANDLE_SET(nxgep,
1417 1417 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1418 1418
1419 1419 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1420 1420 NPI_V2REG_ADD_HANDLE_SET(nxgep,
1421 1421 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1422 1422
1423 1423 break;
1424 1424 }
1425 1425
1426 1426 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1427 1427 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1428 1428
1429 1429 goto nxge_map_regs_exit;
1430 1430 nxge_map_regs_fail3:
1431 1431 if (dev_regs->nxge_msix_regh) {
1432 1432 ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1433 1433 }
1434 1434 if (dev_regs->nxge_vir_regh) {
1435 1435 ddi_regs_map_free(&dev_regs->nxge_regh);
1436 1436 }
1437 1437 nxge_map_regs_fail2:
1438 1438 if (dev_regs->nxge_regh) {
1439 1439 ddi_regs_map_free(&dev_regs->nxge_regh);
1440 1440 }
1441 1441 nxge_map_regs_fail1:
1442 1442 if (dev_regs->nxge_pciregh) {
1443 1443 ddi_regs_map_free(&dev_regs->nxge_pciregh);
1444 1444 }
1445 1445 nxge_map_regs_fail0:
1446 1446 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1447 1447 kmem_free(dev_regs, sizeof (dev_regs_t));
1448 1448
1449 1449 nxge_map_regs_exit:
1450 1450 if (ddi_status != DDI_SUCCESS)
1451 1451 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1452 1452 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1453 1453 return (status);
1454 1454 }
1455 1455
1456 1456 static void
1457 1457 nxge_unmap_regs(p_nxge_t nxgep)
1458 1458 {
1459 1459 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1460 1460
1461 1461 if (isLDOMguest(nxgep)) {
1462 1462 nxge_guest_regs_map_free(nxgep);
1463 1463 return;
1464 1464 }
1465 1465
1466 1466 if (nxgep->dev_regs) {
1467 1467 if (nxgep->dev_regs->nxge_pciregh) {
1468 1468 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1469 1469 "==> nxge_unmap_regs: bus"));
1470 1470 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1471 1471 nxgep->dev_regs->nxge_pciregh = NULL;
1472 1472 }
1473 1473 if (nxgep->dev_regs->nxge_regh) {
1474 1474 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1475 1475 "==> nxge_unmap_regs: device registers"));
1476 1476 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1477 1477 nxgep->dev_regs->nxge_regh = NULL;
1478 1478 }
1479 1479 if (nxgep->dev_regs->nxge_msix_regh) {
1480 1480 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1481 1481 "==> nxge_unmap_regs: device interrupts"));
1482 1482 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1483 1483 nxgep->dev_regs->nxge_msix_regh = NULL;
1484 1484 }
1485 1485 if (nxgep->dev_regs->nxge_vir_regh) {
1486 1486 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1487 1487 "==> nxge_unmap_regs: vio region"));
1488 1488 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1489 1489 nxgep->dev_regs->nxge_vir_regh = NULL;
1490 1490 }
1491 1491 if (nxgep->dev_regs->nxge_vir2_regh) {
1492 1492 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1493 1493 "==> nxge_unmap_regs: vio2 region"));
1494 1494 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1495 1495 nxgep->dev_regs->nxge_vir2_regh = NULL;
1496 1496 }
1497 1497
1498 1498 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1499 1499 nxgep->dev_regs = NULL;
1500 1500 }
1501 1501
1502 1502 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1503 1503 }
1504 1504
1505 1505 static nxge_status_t
1506 1506 nxge_setup_mutexes(p_nxge_t nxgep)
1507 1507 {
1508 1508 int ddi_status = DDI_SUCCESS;
1509 1509 nxge_status_t status = NXGE_OK;
1510 1510 nxge_classify_t *classify_ptr;
1511 1511 int partition;
1512 1512
1513 1513 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1514 1514
1515 1515 /*
1516 1516 * Get the interrupt cookie so the mutexes can be
1517 1517 * Initialized.
1518 1518 */
1519 1519 if (isLDOMguest(nxgep)) {
1520 1520 nxgep->interrupt_cookie = 0;
1521 1521 } else {
1522 1522 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1523 1523 &nxgep->interrupt_cookie);
1524 1524
1525 1525 if (ddi_status != DDI_SUCCESS) {
1526 1526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1527 1527 "<== nxge_setup_mutexes: failed 0x%x",
1528 1528 ddi_status));
1529 1529 goto nxge_setup_mutexes_exit;
1530 1530 }
1531 1531 }
1532 1532
1533 1533 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1534 1534 MUTEX_INIT(&nxgep->poll_lock, NULL,
1535 1535 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1536 1536
1537 1537 /*
1538 1538 * Initialize mutexes for this device.
1539 1539 */
1540 1540 MUTEX_INIT(nxgep->genlock, NULL,
1541 1541 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1542 1542 MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1543 1543 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1544 1544 MUTEX_INIT(&nxgep->mif_lock, NULL,
1545 1545 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1546 1546 MUTEX_INIT(&nxgep->group_lock, NULL,
1547 1547 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1548 1548 RW_INIT(&nxgep->filter_lock, NULL,
1549 1549 RW_DRIVER, (void *)nxgep->interrupt_cookie);
1550 1550
1551 1551 classify_ptr = &nxgep->classifier;
1552 1552 /*
1553 1553 * FFLP Mutexes are never used in interrupt context
1554 1554 * as fflp operation can take very long time to
1555 1555 * complete and hence not suitable to invoke from interrupt
1556 1556 * handlers.
1557 1557 */
1558 1558 MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1559 1559 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1560 1560 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1561 1561 MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1562 1562 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1563 1563 for (partition = 0; partition < MAX_PARTITION; partition++) {
1564 1564 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1565 1565 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1566 1566 }
1567 1567 }
1568 1568
1569 1569 nxge_setup_mutexes_exit:
1570 1570 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1571 1571 "<== nxge_setup_mutexes status = %x", status));
1572 1572
1573 1573 if (ddi_status != DDI_SUCCESS)
1574 1574 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1575 1575
1576 1576 return (status);
1577 1577 }
1578 1578
1579 1579 static void
1580 1580 nxge_destroy_mutexes(p_nxge_t nxgep)
1581 1581 {
1582 1582 int partition;
1583 1583 nxge_classify_t *classify_ptr;
1584 1584
1585 1585 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1586 1586 RW_DESTROY(&nxgep->filter_lock);
1587 1587 MUTEX_DESTROY(&nxgep->group_lock);
1588 1588 MUTEX_DESTROY(&nxgep->mif_lock);
1589 1589 MUTEX_DESTROY(&nxgep->ouraddr_lock);
1590 1590 MUTEX_DESTROY(nxgep->genlock);
1591 1591
1592 1592 classify_ptr = &nxgep->classifier;
1593 1593 MUTEX_DESTROY(&classify_ptr->tcam_lock);
1594 1594
1595 1595 /* Destroy all polling resources. */
1596 1596 MUTEX_DESTROY(&nxgep->poll_lock);
1597 1597 cv_destroy(&nxgep->poll_cv);
1598 1598
1599 1599 /* free data structures, based on HW type */
1600 1600 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1601 1601 MUTEX_DESTROY(&classify_ptr->fcram_lock);
1602 1602 for (partition = 0; partition < MAX_PARTITION; partition++) {
1603 1603 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1604 1604 }
1605 1605 }
1606 1606
1607 1607 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1608 1608 }
1609 1609
1610 1610 nxge_status_t
1611 1611 nxge_init(p_nxge_t nxgep)
1612 1612 {
1613 1613 nxge_status_t status = NXGE_OK;
1614 1614
1615 1615 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1616 1616
1617 1617 if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1618 1618 return (status);
1619 1619 }
1620 1620
1621 1621 /*
1622 1622 * Allocate system memory for the receive/transmit buffer blocks
1623 1623 * and receive/transmit descriptor rings.
1624 1624 */
1625 1625 status = nxge_alloc_mem_pool(nxgep);
1626 1626 if (status != NXGE_OK) {
1627 1627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1628 1628 goto nxge_init_fail1;
1629 1629 }
1630 1630
1631 1631 if (!isLDOMguest(nxgep)) {
1632 1632 /*
1633 1633 * Initialize and enable the TXC registers.
1634 1634 * (Globally enable the Tx controller,
1635 1635 * enable the port, configure the dma channel bitmap,
1636 1636 * configure the max burst size).
1637 1637 */
1638 1638 status = nxge_txc_init(nxgep);
1639 1639 if (status != NXGE_OK) {
1640 1640 NXGE_ERROR_MSG((nxgep,
1641 1641 NXGE_ERR_CTL, "init txc failed\n"));
1642 1642 goto nxge_init_fail2;
1643 1643 }
1644 1644 }
1645 1645
1646 1646 /*
1647 1647 * Initialize and enable TXDMA channels.
1648 1648 */
1649 1649 status = nxge_init_txdma_channels(nxgep);
1650 1650 if (status != NXGE_OK) {
1651 1651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1652 1652 goto nxge_init_fail3;
1653 1653 }
1654 1654
1655 1655 /*
1656 1656 * Initialize and enable RXDMA channels.
1657 1657 */
1658 1658 status = nxge_init_rxdma_channels(nxgep);
1659 1659 if (status != NXGE_OK) {
1660 1660 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1661 1661 goto nxge_init_fail4;
1662 1662 }
1663 1663
1664 1664 /*
1665 1665 * The guest domain is now done.
1666 1666 */
1667 1667 if (isLDOMguest(nxgep)) {
1668 1668 nxgep->drv_state |= STATE_HW_INITIALIZED;
1669 1669 goto nxge_init_exit;
1670 1670 }
1671 1671
1672 1672 /*
1673 1673 * Initialize TCAM and FCRAM (Neptune).
1674 1674 */
1675 1675 status = nxge_classify_init(nxgep);
1676 1676 if (status != NXGE_OK) {
1677 1677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1678 1678 goto nxge_init_fail5;
1679 1679 }
1680 1680
1681 1681 /*
1682 1682 * Initialize ZCP
1683 1683 */
1684 1684 status = nxge_zcp_init(nxgep);
1685 1685 if (status != NXGE_OK) {
1686 1686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1687 1687 goto nxge_init_fail5;
1688 1688 }
1689 1689
1690 1690 /*
1691 1691 * Initialize IPP.
1692 1692 */
1693 1693 status = nxge_ipp_init(nxgep);
1694 1694 if (status != NXGE_OK) {
1695 1695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1696 1696 goto nxge_init_fail5;
1697 1697 }
1698 1698
1699 1699 /*
1700 1700 * Initialize the MAC block.
1701 1701 */
1702 1702 status = nxge_mac_init(nxgep);
1703 1703 if (status != NXGE_OK) {
1704 1704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1705 1705 goto nxge_init_fail5;
1706 1706 }
1707 1707
1708 1708 /*
1709 1709 * Enable the interrrupts for DDI.
1710 1710 */
1711 1711 nxge_intrs_enable(nxgep);
1712 1712
1713 1713 nxgep->drv_state |= STATE_HW_INITIALIZED;
1714 1714
1715 1715 goto nxge_init_exit;
1716 1716
1717 1717 nxge_init_fail5:
1718 1718 nxge_uninit_rxdma_channels(nxgep);
1719 1719 nxge_init_fail4:
1720 1720 nxge_uninit_txdma_channels(nxgep);
1721 1721 nxge_init_fail3:
1722 1722 if (!isLDOMguest(nxgep)) {
1723 1723 (void) nxge_txc_uninit(nxgep);
1724 1724 }
1725 1725 nxge_init_fail2:
1726 1726 nxge_free_mem_pool(nxgep);
1727 1727 nxge_init_fail1:
1728 1728 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1729 1729 "<== nxge_init status (failed) = 0x%08x", status));
1730 1730 return (status);
1731 1731
1732 1732 nxge_init_exit:
1733 1733 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1734 1734 status));
1735 1735 return (status);
1736 1736 }
1737 1737
1738 1738
1739 1739 timeout_id_t
1740 1740 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1741 1741 {
1742 1742 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) {
1743 1743 return (timeout(func, (caddr_t)nxgep,
1744 1744 drv_usectohz(1000 * msec)));
1745 1745 }
1746 1746 return (NULL);
1747 1747 }
1748 1748
1749 1749 /*ARGSUSED*/
1750 1750 void
1751 1751 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1752 1752 {
1753 1753 if (timerid) {
1754 1754 (void) untimeout(timerid);
1755 1755 }
1756 1756 }
1757 1757
1758 1758 void
1759 1759 nxge_uninit(p_nxge_t nxgep)
1760 1760 {
1761 1761 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1762 1762
1763 1763 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1764 1764 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1765 1765 "==> nxge_uninit: not initialized"));
1766 1766 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1767 1767 "<== nxge_uninit"));
1768 1768 return;
1769 1769 }
1770 1770
1771 1771 if (!isLDOMguest(nxgep)) {
1772 1772 /*
1773 1773 * Reset the receive MAC side.
1774 1774 */
1775 1775 (void) nxge_rx_mac_disable(nxgep);
1776 1776
1777 1777 /*
1778 1778 * Drain the IPP.
1779 1779 */
1780 1780 (void) nxge_ipp_drain(nxgep);
1781 1781 }
1782 1782
1783 1783 /* stop timer */
1784 1784 if (nxgep->nxge_timerid) {
1785 1785 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1786 1786 nxgep->nxge_timerid = 0;
1787 1787 }
1788 1788
1789 1789 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1790 1790 (void) nxge_intr_hw_disable(nxgep);
1791 1791
1792 1792
1793 1793 /* Disable and soft reset the IPP */
1794 1794 if (!isLDOMguest(nxgep))
1795 1795 (void) nxge_ipp_disable(nxgep);
1796 1796
1797 1797 /* Free classification resources */
1798 1798 (void) nxge_classify_uninit(nxgep);
1799 1799
1800 1800 /*
1801 1801 * Reset the transmit/receive DMA side.
1802 1802 */
1803 1803 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1804 1804 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1805 1805
1806 1806 nxge_uninit_txdma_channels(nxgep);
1807 1807 nxge_uninit_rxdma_channels(nxgep);
1808 1808
1809 1809 /*
1810 1810 * Reset the transmit MAC side.
1811 1811 */
1812 1812 (void) nxge_tx_mac_disable(nxgep);
1813 1813
1814 1814 nxge_free_mem_pool(nxgep);
1815 1815
1816 1816 /*
1817 1817 * Start the timer if the reset flag is not set.
1818 1818 * If this reset flag is set, the link monitor
1819 1819 * will not be started in order to stop furthur bus
1820 1820 * activities coming from this interface.
1821 1821 * The driver will start the monitor function
1822 1822 * if the interface was initialized again later.
1823 1823 */
1824 1824 if (!nxge_peu_reset_enable) {
1825 1825 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1826 1826 }
1827 1827
1828 1828 nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1829 1829
1830 1830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1831 1831 "nxge_mblks_pending %d", nxge_mblks_pending));
1832 1832 }
1833 1833
1834 1834 void
1835 1835 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1836 1836 {
1837 1837 uint64_t reg;
1838 1838 uint64_t regdata;
1839 1839 int i, retry;
1840 1840
1841 1841 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t));
1842 1842 regdata = 0;
1843 1843 retry = 1;
1844 1844
1845 1845 for (i = 0; i < retry; i++) {
1846 1846 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data);
1847 1847 }
1848 1848 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t));
1849 1849 }
1850 1850
1851 1851 void
1852 1852 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1853 1853 {
1854 1854 uint64_t reg;
1855 1855 uint64_t buf[2];
1856 1856
1857 1857 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1858 1858 reg = buf[0];
1859 1859
1860 1860 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1861 1861 }
1862 1862
1863 1863 /*ARGSUSED*/
1864 1864 /*VARARGS*/
1865 1865 void
1866 1866 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1867 1867 {
1868 1868 char msg_buffer[1048];
1869 1869 char prefix_buffer[32];
1870 1870 int instance;
1871 1871 uint64_t debug_level;
1872 1872 int cmn_level = CE_CONT;
1873 1873 va_list ap;
1874 1874
1875 1875 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) {
1876 1876 /* In case a developer has changed nxge_debug_level. */
1877 1877 if (nxgep->nxge_debug_level != nxge_debug_level)
1878 1878 nxgep->nxge_debug_level = nxge_debug_level;
1879 1879 }
1880 1880
1881 1881 debug_level = (nxgep == NULL) ? nxge_debug_level :
1882 1882 nxgep->nxge_debug_level;
1883 1883
1884 1884 if ((level & debug_level) ||
1885 1885 (level == NXGE_NOTE) ||
1886 1886 (level == NXGE_ERR_CTL)) {
1887 1887 /* do the msg processing */
1888 1888 MUTEX_ENTER(&nxgedebuglock);
1889 1889
1890 1890 if ((level & NXGE_NOTE)) {
1891 1891 cmn_level = CE_NOTE;
1892 1892 }
1893 1893
1894 1894 if (level & NXGE_ERR_CTL) {
1895 1895 cmn_level = CE_WARN;
1896 1896 }
1897 1897
1898 1898 va_start(ap, fmt);
1899 1899 (void) vsprintf(msg_buffer, fmt, ap);
1900 1900 va_end(ap);
1901 1901 if (nxgep == NULL) {
1902 1902 instance = -1;
1903 1903 (void) sprintf(prefix_buffer, "%s :", "nxge");
1904 1904 } else {
1905 1905 instance = nxgep->instance;
1906 1906 (void) sprintf(prefix_buffer,
1907 1907 "%s%d :", "nxge", instance);
1908 1908 }
1909 1909
1910 1910 MUTEX_EXIT(&nxgedebuglock);
1911 1911 cmn_err(cmn_level, "!%s %s\n",
1912 1912 prefix_buffer, msg_buffer);
1913 1913
1914 1914 }
1915 1915 }
1916 1916
1917 1917 char *
1918 1918 nxge_dump_packet(char *addr, int size)
1919 1919 {
1920 1920 uchar_t *ap = (uchar_t *)addr;
1921 1921 int i;
1922 1922 static char etherbuf[1024];
1923 1923 char *cp = etherbuf;
1924 1924 char digits[] = "0123456789abcdef";
1925 1925
1926 1926 if (!size)
1927 1927 size = 60;
1928 1928
1929 1929 if (size > MAX_DUMP_SZ) {
1930 1930 /* Dump the leading bytes */
1931 1931 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1932 1932 if (*ap > 0x0f)
1933 1933 *cp++ = digits[*ap >> 4];
1934 1934 *cp++ = digits[*ap++ & 0xf];
1935 1935 *cp++ = ':';
1936 1936 }
1937 1937 for (i = 0; i < 20; i++)
1938 1938 *cp++ = '.';
1939 1939 /* Dump the last MAX_DUMP_SZ/2 bytes */
1940 1940 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1941 1941 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1942 1942 if (*ap > 0x0f)
1943 1943 *cp++ = digits[*ap >> 4];
1944 1944 *cp++ = digits[*ap++ & 0xf];
1945 1945 *cp++ = ':';
1946 1946 }
1947 1947 } else {
1948 1948 for (i = 0; i < size; i++) {
1949 1949 if (*ap > 0x0f)
1950 1950 *cp++ = digits[*ap >> 4];
1951 1951 *cp++ = digits[*ap++ & 0xf];
1952 1952 *cp++ = ':';
1953 1953 }
1954 1954 }
1955 1955 *--cp = 0;
1956 1956 return (etherbuf);
1957 1957 }
1958 1958
1959 1959 #ifdef NXGE_DEBUG
1960 1960 static void
1961 1961 nxge_test_map_regs(p_nxge_t nxgep)
1962 1962 {
1963 1963 ddi_acc_handle_t cfg_handle;
1964 1964 p_pci_cfg_t cfg_ptr;
1965 1965 ddi_acc_handle_t dev_handle;
1966 1966 char *dev_ptr;
1967 1967 ddi_acc_handle_t pci_config_handle;
1968 1968 uint32_t regval;
1969 1969 int i;
1970 1970
1971 1971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1972 1972
1973 1973 dev_handle = nxgep->dev_regs->nxge_regh;
1974 1974 dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1975 1975
1976 1976 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1977 1977 cfg_handle = nxgep->dev_regs->nxge_pciregh;
1978 1978 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1979 1979
1980 1980 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1981 1981 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1982 1982 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1983 1983 "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1984 1984 &cfg_ptr->vendorid));
1985 1985 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1986 1986 "\tvendorid 0x%x devid 0x%x",
1987 1987 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1988 1988 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0)));
1989 1989 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1990 1990 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1991 1991 "bar1c 0x%x",
1992 1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0),
1993 1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1994 1994 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1995 1995 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1996 1996 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1997 1997 "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1998 1998 "base 28 0x%x bar2c 0x%x\n",
1999 1999 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
2000 2000 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
2001 2001 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
2002 2002 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
2003 2003 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2004 2004 "\nNeptune PCI BAR: base30 0x%x\n",
2005 2005 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
2006 2006
2007 2007 cfg_handle = nxgep->dev_regs->nxge_pciregh;
2008 2008 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
2009 2009 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2010 2010 "first 0x%llx second 0x%llx third 0x%llx "
2011 2011 "last 0x%llx ",
2012 2012 NXGE_PIO_READ64(dev_handle,
2013 2013 (uint64_t *)(dev_ptr + 0), 0),
2014 2014 NXGE_PIO_READ64(dev_handle,
2015 2015 (uint64_t *)(dev_ptr + 8), 0),
2016 2016 NXGE_PIO_READ64(dev_handle,
2017 2017 (uint64_t *)(dev_ptr + 16), 0),
2018 2018 NXGE_PIO_READ64(cfg_handle,
2019 2019 (uint64_t *)(dev_ptr + 24), 0)));
2020 2020 }
2021 2021 }
2022 2022
2023 2023 #endif
2024 2024
2025 2025 static void
2026 2026 nxge_suspend(p_nxge_t nxgep)
2027 2027 {
2028 2028 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
2029 2029
2030 2030 nxge_intrs_disable(nxgep);
2031 2031 nxge_destroy_dev(nxgep);
2032 2032
2033 2033 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
2034 2034 }
2035 2035
2036 2036 static nxge_status_t
2037 2037 nxge_resume(p_nxge_t nxgep)
2038 2038 {
2039 2039 nxge_status_t status = NXGE_OK;
2040 2040
2041 2041 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
2042 2042
2043 2043 nxgep->suspended = DDI_RESUME;
2044 2044 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
2045 2045 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
2046 2046 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
2047 2047 (void) nxge_rx_mac_enable(nxgep);
2048 2048 (void) nxge_tx_mac_enable(nxgep);
2049 2049 nxge_intrs_enable(nxgep);
2050 2050 nxgep->suspended = 0;
2051 2051
2052 2052 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2053 2053 "<== nxge_resume status = 0x%x", status));
2054 2054 return (status);
2055 2055 }
2056 2056
2057 2057 static nxge_status_t
2058 2058 nxge_setup_dev(p_nxge_t nxgep)
2059 2059 {
2060 2060 nxge_status_t status = NXGE_OK;
2061 2061
2062 2062 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
2063 2063 nxgep->mac.portnum));
2064 2064
2065 2065 status = nxge_link_init(nxgep);
2066 2066
2067 2067 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
2068 2068 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2069 2069 "port%d Bad register acc handle", nxgep->mac.portnum));
2070 2070 status = NXGE_ERROR;
2071 2071 }
2072 2072
2073 2073 if (status != NXGE_OK) {
2074 2074 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2075 2075 " nxge_setup_dev status "
2076 2076 "(xcvr init 0x%08x)", status));
2077 2077 goto nxge_setup_dev_exit;
2078 2078 }
2079 2079
2080 2080 nxge_setup_dev_exit:
2081 2081 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2082 2082 "<== nxge_setup_dev port %d status = 0x%08x",
2083 2083 nxgep->mac.portnum, status));
2084 2084
2085 2085 return (status);
2086 2086 }
2087 2087
2088 2088 static void
2089 2089 nxge_destroy_dev(p_nxge_t nxgep)
2090 2090 {
2091 2091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
2092 2092
2093 2093 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
2094 2094
2095 2095 (void) nxge_hw_stop(nxgep);
2096 2096
2097 2097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
2098 2098 }
2099 2099
2100 2100 static nxge_status_t
2101 2101 nxge_setup_system_dma_pages(p_nxge_t nxgep)
2102 2102 {
2103 2103 int ddi_status = DDI_SUCCESS;
2104 2104 uint_t count;
2105 2105 ddi_dma_cookie_t cookie;
2106 2106 uint_t iommu_pagesize;
2107 2107 nxge_status_t status = NXGE_OK;
2108 2108
2109 2109 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
2110 2110 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
2111 2111 if (nxgep->niu_type != N2_NIU) {
2112 2112 iommu_pagesize = dvma_pagesize(nxgep->dip);
2113 2113 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2114 2114 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2115 2115 " default_block_size %d iommu_pagesize %d",
2116 2116 nxgep->sys_page_sz,
2117 2117 ddi_ptob(nxgep->dip, (ulong_t)1),
2118 2118 nxgep->rx_default_block_size,
2119 2119 iommu_pagesize));
2120 2120
2121 2121 if (iommu_pagesize != 0) {
2122 2122 if (nxgep->sys_page_sz == iommu_pagesize) {
2123 2123 if (iommu_pagesize > 0x4000)
2124 2124 nxgep->sys_page_sz = 0x4000;
2125 2125 } else {
2126 2126 if (nxgep->sys_page_sz > iommu_pagesize)
2127 2127 nxgep->sys_page_sz = iommu_pagesize;
2128 2128 }
2129 2129 }
2130 2130 }
2131 2131 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2132 2132 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2133 2133 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2134 2134 "default_block_size %d page mask %d",
2135 2135 nxgep->sys_page_sz,
2136 2136 ddi_ptob(nxgep->dip, (ulong_t)1),
2137 2137 nxgep->rx_default_block_size,
2138 2138 nxgep->sys_page_mask));
2139 2139
2140 2140
2141 2141 switch (nxgep->sys_page_sz) {
2142 2142 default:
2143 2143 nxgep->sys_page_sz = 0x1000;
2144 2144 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2145 2145 nxgep->rx_default_block_size = 0x1000;
2146 2146 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2147 2147 break;
2148 2148 case 0x1000:
2149 2149 nxgep->rx_default_block_size = 0x1000;
2150 2150 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2151 2151 break;
2152 2152 case 0x2000:
2153 2153 nxgep->rx_default_block_size = 0x2000;
2154 2154 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2155 2155 break;
2156 2156 case 0x4000:
2157 2157 nxgep->rx_default_block_size = 0x4000;
2158 2158 nxgep->rx_bksize_code = RBR_BKSIZE_16K;
2159 2159 break;
2160 2160 case 0x8000:
2161 2161 nxgep->rx_default_block_size = 0x8000;
2162 2162 nxgep->rx_bksize_code = RBR_BKSIZE_32K;
2163 2163 break;
2164 2164 }
2165 2165
2166 2166 #ifndef USE_RX_BIG_BUF
2167 2167 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
2168 2168 #else
2169 2169 nxgep->rx_default_block_size = 0x2000;
2170 2170 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2171 2171 #endif
2172 2172 /*
2173 2173 * Get the system DMA burst size.
2174 2174 */
2175 2175 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2176 2176 DDI_DMA_DONTWAIT, 0,
2177 2177 &nxgep->dmasparehandle);
2178 2178 if (ddi_status != DDI_SUCCESS) {
2179 2179 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2180 2180 "ddi_dma_alloc_handle: failed "
2181 2181 " status 0x%x", ddi_status));
2182 2182 goto nxge_get_soft_properties_exit;
2183 2183 }
2184 2184
2185 2185 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
2186 2186 (caddr_t)nxgep->dmasparehandle,
2187 2187 sizeof (nxgep->dmasparehandle),
2188 2188 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2189 2189 DDI_DMA_DONTWAIT, 0,
2190 2190 &cookie, &count);
2191 2191 if (ddi_status != DDI_DMA_MAPPED) {
2192 2192 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2193 2193 "Binding spare handle to find system"
2194 2194 " burstsize failed."));
2195 2195 ddi_status = DDI_FAILURE;
2196 2196 goto nxge_get_soft_properties_fail1;
2197 2197 }
2198 2198
2199 2199 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
2200 2200 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
2201 2201
2202 2202 nxge_get_soft_properties_fail1:
2203 2203 ddi_dma_free_handle(&nxgep->dmasparehandle);
2204 2204
2205 2205 nxge_get_soft_properties_exit:
2206 2206
2207 2207 if (ddi_status != DDI_SUCCESS)
2208 2208 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2209 2209
2210 2210 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2211 2211 "<== nxge_setup_system_dma_pages status = 0x%08x", status));
2212 2212 return (status);
2213 2213 }
2214 2214
2215 2215 static nxge_status_t
2216 2216 nxge_alloc_mem_pool(p_nxge_t nxgep)
2217 2217 {
2218 2218 nxge_status_t status = NXGE_OK;
2219 2219
2220 2220 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
2221 2221
2222 2222 status = nxge_alloc_rx_mem_pool(nxgep);
2223 2223 if (status != NXGE_OK) {
2224 2224 return (NXGE_ERROR);
2225 2225 }
2226 2226
2227 2227 status = nxge_alloc_tx_mem_pool(nxgep);
2228 2228 if (status != NXGE_OK) {
2229 2229 nxge_free_rx_mem_pool(nxgep);
2230 2230 return (NXGE_ERROR);
2231 2231 }
2232 2232
2233 2233 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
2234 2234 return (NXGE_OK);
2235 2235 }
2236 2236
2237 2237 static void
2238 2238 nxge_free_mem_pool(p_nxge_t nxgep)
2239 2239 {
2240 2240 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
2241 2241
2242 2242 nxge_free_rx_mem_pool(nxgep);
2243 2243 nxge_free_tx_mem_pool(nxgep);
2244 2244
2245 2245 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
2246 2246 }
2247 2247
2248 2248 nxge_status_t
2249 2249 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
2250 2250 {
2251 2251 uint32_t rdc_max;
2252 2252 p_nxge_dma_pt_cfg_t p_all_cfgp;
2253 2253 p_nxge_hw_pt_cfg_t p_cfgp;
2254 2254 p_nxge_dma_pool_t dma_poolp;
2255 2255 p_nxge_dma_common_t *dma_buf_p;
2256 2256 p_nxge_dma_pool_t dma_cntl_poolp;
2257 2257 p_nxge_dma_common_t *dma_cntl_p;
2258 2258 uint32_t *num_chunks; /* per dma */
2259 2259 nxge_status_t status = NXGE_OK;
2260 2260
2261 2261 uint32_t nxge_port_rbr_size;
2262 2262 uint32_t nxge_port_rbr_spare_size;
2263 2263 uint32_t nxge_port_rcr_size;
2264 2264 uint32_t rx_cntl_alloc_size;
2265 2265
2266 2266 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
2267 2267
2268 2268 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2269 2269 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2270 2270 rdc_max = NXGE_MAX_RDCS;
2271 2271
2272 2272 /*
2273 2273 * Allocate memory for the common DMA data structures.
2274 2274 */
2275 2275 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2276 2276 KM_SLEEP);
2277 2277 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2278 2278 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2279 2279
2280 2280 dma_cntl_poolp = (p_nxge_dma_pool_t)
2281 2281 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2282 2282 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2283 2283 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2284 2284
2285 2285 num_chunks = (uint32_t *)KMEM_ZALLOC(
2286 2286 sizeof (uint32_t) * rdc_max, KM_SLEEP);
2287 2287
2288 2288 /*
2289 2289 * Assume that each DMA channel will be configured with
2290 2290 * the default block size.
2291 2291 * rbr block counts are modulo the batch count (16).
2292 2292 */
2293 2293 nxge_port_rbr_size = p_all_cfgp->rbr_size;
2294 2294 nxge_port_rcr_size = p_all_cfgp->rcr_size;
2295 2295
2296 2296 if (!nxge_port_rbr_size) {
2297 2297 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
2298 2298 }
2299 2299 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
2300 2300 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
2301 2301 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
2302 2302 }
2303 2303
2304 2304 p_all_cfgp->rbr_size = nxge_port_rbr_size;
2305 2305 nxge_port_rbr_spare_size = nxge_rbr_spare_size;
2306 2306
2307 2307 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
2308 2308 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
2309 2309 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
2310 2310 }
2311 2311 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
2312 2312 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2313 2313 "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2314 2314 "set to default %d",
2315 2315 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
2316 2316 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
2317 2317 }
2318 2318 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
2319 2319 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2320 2320 "nxge_alloc_rx_mem_pool: RCR too high %d, "
2321 2321 "set to default %d",
2322 2322 nxge_port_rcr_size, RCR_DEFAULT_MAX));
2323 2323 nxge_port_rcr_size = RCR_DEFAULT_MAX;
2324 2324 }
2325 2325
2326 2326 /*
2327 2327 * N2/NIU has limitation on the descriptor sizes (contiguous
2328 2328 * memory allocation on data buffers to 4M (contig_mem_alloc)
2329 2329 * and little endian for control buffers (must use the ddi/dki mem alloc
2330 2330 * function).
2331 2331 */
2332 2332 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2333 2333 if (nxgep->niu_type == N2_NIU) {
2334 2334 nxge_port_rbr_spare_size = 0;
2335 2335 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
2336 2336 (!ISP2(nxge_port_rbr_size))) {
2337 2337 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
2338 2338 }
2339 2339 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
2340 2340 (!ISP2(nxge_port_rcr_size))) {
2341 2341 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
2342 2342 }
2343 2343 }
2344 2344 #endif
2345 2345
2346 2346 /*
2347 2347 * Addresses of receive block ring, receive completion ring and the
2348 2348 * mailbox must be all cache-aligned (64 bytes).
2349 2349 */
2350 2350 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
2351 2351 rx_cntl_alloc_size *= (sizeof (rx_desc_t));
2352 2352 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
2353 2353 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
2354 2354
2355 2355 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
2356 2356 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2357 2357 "nxge_port_rcr_size = %d "
2358 2358 "rx_cntl_alloc_size = %d",
2359 2359 nxge_port_rbr_size, nxge_port_rbr_spare_size,
2360 2360 nxge_port_rcr_size,
2361 2361 rx_cntl_alloc_size));
2362 2362
2363 2363 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2364 2364 if (nxgep->niu_type == N2_NIU) {
2365 2365 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size *
2366 2366 (nxge_port_rbr_size + nxge_port_rbr_spare_size));
2367 2367
2368 2368 if (!ISP2(rx_buf_alloc_size)) {
2369 2369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2370 2370 "==> nxge_alloc_rx_mem_pool: "
2371 2371 " must be power of 2"));
2372 2372 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2373 2373 goto nxge_alloc_rx_mem_pool_exit;
2374 2374 }
2375 2375
2376 2376 if (rx_buf_alloc_size > (1 << 22)) {
2377 2377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2378 2378 "==> nxge_alloc_rx_mem_pool: "
2379 2379 " limit size to 4M"));
2380 2380 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2381 2381 goto nxge_alloc_rx_mem_pool_exit;
2382 2382 }
2383 2383
2384 2384 if (rx_cntl_alloc_size < 0x2000) {
2385 2385 rx_cntl_alloc_size = 0x2000;
2386 2386 }
2387 2387 }
2388 2388 #endif
2389 2389 nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2390 2390 nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2391 2391 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size;
2392 2392 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size;
2393 2393
2394 2394 dma_poolp->ndmas = p_cfgp->max_rdcs;
2395 2395 dma_poolp->num_chunks = num_chunks;
2396 2396 dma_poolp->buf_allocated = B_TRUE;
2397 2397 nxgep->rx_buf_pool_p = dma_poolp;
2398 2398 dma_poolp->dma_buf_pool_p = dma_buf_p;
2399 2399
2400 2400 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs;
2401 2401 dma_cntl_poolp->buf_allocated = B_TRUE;
2402 2402 nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2403 2403 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2404 2404
2405 2405 /* Allocate the receive rings, too. */
2406 2406 nxgep->rx_rbr_rings =
2407 2407 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2408 2408 nxgep->rx_rbr_rings->rbr_rings =
2409 2409 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP);
2410 2410 nxgep->rx_rcr_rings =
2411 2411 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2412 2412 nxgep->rx_rcr_rings->rcr_rings =
2413 2413 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP);
2414 2414 nxgep->rx_mbox_areas_p =
2415 2415 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2416 2416 nxgep->rx_mbox_areas_p->rxmbox_areas =
2417 2417 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP);
2418 2418
2419 2419 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas =
2420 2420 p_cfgp->max_rdcs;
2421 2421
2422 2422 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2423 2423 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2424 2424
2425 2425 nxge_alloc_rx_mem_pool_exit:
2426 2426 return (status);
2427 2427 }
2428 2428
2429 2429 /*
2430 2430 * nxge_alloc_rxb
2431 2431 *
2432 2432 * Allocate buffers for an RDC.
2433 2433 *
2434 2434 * Arguments:
2435 2435 * nxgep
2436 2436 * channel The channel to map into our kernel space.
2437 2437 *
2438 2438 * Notes:
2439 2439 *
2440 2440 * NPI function calls:
2441 2441 *
2442 2442 * NXGE function calls:
2443 2443 *
2444 2444 * Registers accessed:
2445 2445 *
2446 2446 * Context:
2447 2447 *
2448 2448 * Taking apart:
2449 2449 *
2450 2450 * Open questions:
2451 2451 *
2452 2452 */
2453 2453 nxge_status_t
2454 2454 nxge_alloc_rxb(
2455 2455 p_nxge_t nxgep,
2456 2456 int channel)
2457 2457 {
2458 2458 size_t rx_buf_alloc_size;
2459 2459 nxge_status_t status = NXGE_OK;
2460 2460
2461 2461 nxge_dma_common_t **data;
2462 2462 nxge_dma_common_t **control;
2463 2463 uint32_t *num_chunks;
2464 2464
2465 2465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2466 2466
2467 2467 /*
2468 2468 * Allocate memory for the receive buffers and descriptor rings.
2469 2469 * Replace these allocation functions with the interface functions
2470 2470 * provided by the partition manager if/when they are available.
2471 2471 */
2472 2472
2473 2473 /*
2474 2474 * Allocate memory for the receive buffer blocks.
2475 2475 */
2476 2476 rx_buf_alloc_size = (nxgep->rx_default_block_size *
2477 2477 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size));
2478 2478
2479 2479 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2480 2480 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel];
2481 2481
2482 2482 if ((status = nxge_alloc_rx_buf_dma(
2483 2483 nxgep, channel, data, rx_buf_alloc_size,
2484 2484 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) {
2485 2485 return (status);
2486 2486 }
2487 2487
2488 2488 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): "
2489 2489 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data));
2490 2490
2491 2491 /*
2492 2492 * Allocate memory for descriptor rings and mailbox.
2493 2493 */
2494 2494 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2495 2495
2496 2496 if ((status = nxge_alloc_rx_cntl_dma(
2497 2497 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size))
2498 2498 != NXGE_OK) {
2499 2499 nxge_free_rx_cntl_dma(nxgep, *control);
2500 2500 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE;
2501 2501 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks);
2502 2502 return (status);
2503 2503 }
2504 2504
2505 2505 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2506 2506 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2507 2507
2508 2508 return (status);
2509 2509 }
2510 2510
2511 2511 void
2512 2512 nxge_free_rxb(
2513 2513 p_nxge_t nxgep,
2514 2514 int channel)
2515 2515 {
2516 2516 nxge_dma_common_t *data;
2517 2517 nxge_dma_common_t *control;
2518 2518 uint32_t num_chunks;
2519 2519
2520 2520 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2521 2521
2522 2522 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2523 2523 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
2524 2524 nxge_free_rx_buf_dma(nxgep, data, num_chunks);
2525 2525
2526 2526 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2527 2527 nxgep->rx_buf_pool_p->num_chunks[channel] = 0;
2528 2528
2529 2529 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2530 2530 nxge_free_rx_cntl_dma(nxgep, control);
2531 2531
2532 2532 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2533 2533
2534 2534 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2535 2535 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2536 2536
2537 2537 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb"));
2538 2538 }
2539 2539
2540 2540 static void
2541 2541 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2542 2542 {
2543 2543 int rdc_max = NXGE_MAX_RDCS;
2544 2544
2545 2545 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2546 2546
2547 2547 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) {
2548 2548 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2549 2549 "<== nxge_free_rx_mem_pool "
2550 2550 "(null rx buf pool or buf not allocated"));
2551 2551 return;
2552 2552 }
2553 2553 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) {
2554 2554 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2555 2555 "<== nxge_free_rx_mem_pool "
2556 2556 "(null rx cntl buf pool or cntl buf not allocated"));
2557 2557 return;
2558 2558 }
2559 2559
2560 2560 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p,
2561 2561 sizeof (p_nxge_dma_common_t) * rdc_max);
2562 2562 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t));
2563 2563
2564 2564 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks,
2565 2565 sizeof (uint32_t) * rdc_max);
2566 2566 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p,
2567 2567 sizeof (p_nxge_dma_common_t) * rdc_max);
2568 2568 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t));
2569 2569
2570 2570 nxgep->rx_buf_pool_p = 0;
2571 2571 nxgep->rx_cntl_pool_p = 0;
2572 2572
2573 2573 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings,
2574 2574 sizeof (p_rx_rbr_ring_t) * rdc_max);
2575 2575 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t));
2576 2576 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings,
2577 2577 sizeof (p_rx_rcr_ring_t) * rdc_max);
2578 2578 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t));
2579 2579 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas,
2580 2580 sizeof (p_rx_mbox_t) * rdc_max);
2581 2581 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2582 2582
2583 2583 nxgep->rx_rbr_rings = 0;
2584 2584 nxgep->rx_rcr_rings = 0;
2585 2585 nxgep->rx_mbox_areas_p = 0;
2586 2586
2587 2587 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2588 2588 }
2589 2589
2590 2590
2591 2591 static nxge_status_t
2592 2592 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2593 2593 p_nxge_dma_common_t *dmap,
2594 2594 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2595 2595 {
2596 2596 p_nxge_dma_common_t rx_dmap;
2597 2597 nxge_status_t status = NXGE_OK;
2598 2598 size_t total_alloc_size;
2599 2599 size_t allocated = 0;
2600 2600 int i, size_index, array_size;
2601 2601 boolean_t use_kmem_alloc = B_FALSE;
2602 2602
2603 2603 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2604 2604
2605 2605 rx_dmap = (p_nxge_dma_common_t)
2606 2606 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2607 2607 KM_SLEEP);
2608 2608
2609 2609 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2610 2610 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2611 2611 dma_channel, alloc_size, block_size, dmap));
2612 2612
2613 2613 total_alloc_size = alloc_size;
2614 2614
2615 2615 #if defined(RX_USE_RECLAIM_POST)
2616 2616 total_alloc_size = alloc_size + alloc_size/4;
2617 2617 #endif
2618 2618
2619 2619 i = 0;
2620 2620 size_index = 0;
2621 2621 array_size = sizeof (alloc_sizes)/sizeof (size_t);
2622 2622 while ((size_index < array_size) &&
2623 2623 (alloc_sizes[size_index] < alloc_size))
2624 2624 size_index++;
2625 2625 if (size_index >= array_size) {
2626 2626 size_index = array_size - 1;
2627 2627 }
2628 2628
2629 2629 /* For Neptune, use kmem_alloc if the kmem flag is set. */
2630 2630 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) {
2631 2631 use_kmem_alloc = B_TRUE;
2632 2632 #if defined(__i386) || defined(__amd64)
2633 2633 size_index = 0;
2634 2634 #endif
2635 2635 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2636 2636 "==> nxge_alloc_rx_buf_dma: "
2637 2637 "Neptune use kmem_alloc() - size_index %d",
2638 2638 size_index));
2639 2639 }
2640 2640
2641 2641 while ((allocated < total_alloc_size) &&
2642 2642 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2643 2643 rx_dmap[i].dma_chunk_index = i;
2644 2644 rx_dmap[i].block_size = block_size;
2645 2645 rx_dmap[i].alength = alloc_sizes[size_index];
2646 2646 rx_dmap[i].orig_alength = rx_dmap[i].alength;
2647 2647 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2648 2648 rx_dmap[i].dma_channel = dma_channel;
2649 2649 rx_dmap[i].contig_alloc_type = B_FALSE;
2650 2650 rx_dmap[i].kmem_alloc_type = B_FALSE;
2651 2651 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC;
2652 2652
2653 2653 /*
2654 2654 * N2/NIU: data buffers must be contiguous as the driver
2655 2655 * needs to call Hypervisor api to set up
2656 2656 * logical pages.
2657 2657 */
2658 2658 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2659 2659 rx_dmap[i].contig_alloc_type = B_TRUE;
2660 2660 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC;
2661 2661 } else if (use_kmem_alloc) {
2662 2662 /* For Neptune, use kmem_alloc */
2663 2663 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2664 2664 "==> nxge_alloc_rx_buf_dma: "
2665 2665 "Neptune use kmem_alloc()"));
2666 2666 rx_dmap[i].kmem_alloc_type = B_TRUE;
2667 2667 rx_dmap[i].buf_alloc_type = KMEM_ALLOC;
2668 2668 }
2669 2669
2670 2670 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2671 2671 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2672 2672 "i %d nblocks %d alength %d",
2673 2673 dma_channel, i, &rx_dmap[i], block_size,
2674 2674 i, rx_dmap[i].nblocks,
2675 2675 rx_dmap[i].alength));
2676 2676 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2677 2677 &nxge_rx_dma_attr,
2678 2678 rx_dmap[i].alength,
2679 2679 &nxge_dev_buf_dma_acc_attr,
2680 2680 DDI_DMA_READ | DDI_DMA_STREAMING,
2681 2681 (p_nxge_dma_common_t)(&rx_dmap[i]));
2682 2682 if (status != NXGE_OK) {
2683 2683 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2684 2684 "nxge_alloc_rx_buf_dma: Alloc Failed: "
2685 2685 "dma %d size_index %d size requested %d",
2686 2686 dma_channel,
2687 2687 size_index,
2688 2688 rx_dmap[i].alength));
2689 2689 size_index--;
2690 2690 } else {
2691 2691 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED;
2692 2692 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2693 2693 " nxge_alloc_rx_buf_dma DONE alloc mem: "
2694 2694 "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2695 2695 "buf_alloc_state %d alloc_type %d",
2696 2696 dma_channel,
2697 2697 &rx_dmap[i],
2698 2698 rx_dmap[i].kaddrp,
2699 2699 rx_dmap[i].alength,
2700 2700 rx_dmap[i].buf_alloc_state,
2701 2701 rx_dmap[i].buf_alloc_type));
2702 2702 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2703 2703 " alloc_rx_buf_dma allocated rdc %d "
2704 2704 "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2705 2705 dma_channel, i, rx_dmap[i].alength,
2706 2706 rx_dmap[i].ioaddr_pp, &rx_dmap[i],
2707 2707 rx_dmap[i].kaddrp));
2708 2708 i++;
2709 2709 allocated += alloc_sizes[size_index];
2710 2710 }
2711 2711 }
2712 2712
2713 2713 if (allocated < total_alloc_size) {
2714 2714 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2715 2715 "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2716 2716 "allocated 0x%x requested 0x%x",
2717 2717 dma_channel,
2718 2718 allocated, total_alloc_size));
2719 2719 status = NXGE_ERROR;
2720 2720 goto nxge_alloc_rx_mem_fail1;
2721 2721 }
2722 2722
2723 2723 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2724 2724 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2725 2725 "allocated 0x%x requested 0x%x",
2726 2726 dma_channel,
2727 2727 allocated, total_alloc_size));
2728 2728
2729 2729 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2730 2730 " alloc_rx_buf_dma rdc %d allocated %d chunks",
2731 2731 dma_channel, i));
2732 2732 *num_chunks = i;
2733 2733 *dmap = rx_dmap;
2734 2734
2735 2735 goto nxge_alloc_rx_mem_exit;
2736 2736
2737 2737 nxge_alloc_rx_mem_fail1:
2738 2738 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2739 2739
2740 2740 nxge_alloc_rx_mem_exit:
2741 2741 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2742 2742 "<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2743 2743
2744 2744 return (status);
2745 2745 }
2746 2746
2747 2747 /*ARGSUSED*/
2748 2748 static void
2749 2749 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2750 2750 uint32_t num_chunks)
2751 2751 {
2752 2752 int i;
2753 2753
2754 2754 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2755 2755 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2756 2756
2757 2757 if (dmap == 0)
2758 2758 return;
2759 2759
2760 2760 for (i = 0; i < num_chunks; i++) {
2761 2761 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2762 2762 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2763 2763 i, dmap));
2764 2764 nxge_dma_free_rx_data_buf(dmap++);
2765 2765 }
2766 2766
2767 2767 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2768 2768 }
2769 2769
2770 2770 /*ARGSUSED*/
2771 2771 static nxge_status_t
2772 2772 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2773 2773 p_nxge_dma_common_t *dmap, size_t size)
2774 2774 {
2775 2775 p_nxge_dma_common_t rx_dmap;
2776 2776 nxge_status_t status = NXGE_OK;
2777 2777
2778 2778 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2779 2779
2780 2780 rx_dmap = (p_nxge_dma_common_t)
2781 2781 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2782 2782
2783 2783 rx_dmap->contig_alloc_type = B_FALSE;
2784 2784 rx_dmap->kmem_alloc_type = B_FALSE;
2785 2785
2786 2786 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2787 2787 &nxge_desc_dma_attr,
2788 2788 size,
2789 2789 &nxge_dev_desc_dma_acc_attr,
2790 2790 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2791 2791 rx_dmap);
2792 2792 if (status != NXGE_OK) {
2793 2793 goto nxge_alloc_rx_cntl_dma_fail1;
2794 2794 }
2795 2795
2796 2796 *dmap = rx_dmap;
2797 2797 goto nxge_alloc_rx_cntl_dma_exit;
2798 2798
2799 2799 nxge_alloc_rx_cntl_dma_fail1:
2800 2800 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2801 2801
2802 2802 nxge_alloc_rx_cntl_dma_exit:
2803 2803 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2804 2804 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2805 2805
2806 2806 return (status);
2807 2807 }
2808 2808
2809 2809 /*ARGSUSED*/
2810 2810 static void
2811 2811 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2812 2812 {
2813 2813 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2814 2814
2815 2815 if (dmap == 0)
2816 2816 return;
2817 2817
2818 2818 nxge_dma_mem_free(dmap);
2819 2819
2820 2820 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2821 2821 }
2822 2822
2823 2823 typedef struct {
2824 2824 size_t tx_size;
2825 2825 size_t cr_size;
2826 2826 size_t threshhold;
2827 2827 } nxge_tdc_sizes_t;
2828 2828
2829 2829 static
2830 2830 nxge_status_t
2831 2831 nxge_tdc_sizes(
2832 2832 nxge_t *nxgep,
2833 2833 nxge_tdc_sizes_t *sizes)
2834 2834 {
2835 2835 uint32_t threshhold; /* The bcopy() threshhold */
2836 2836 size_t tx_size; /* Transmit buffer size */
2837 2837 size_t cr_size; /* Completion ring size */
2838 2838
2839 2839 /*
2840 2840 * Assume that each DMA channel will be configured with the
2841 2841 * default transmit buffer size for copying transmit data.
2842 2842 * (If a packet is bigger than this, it will not be copied.)
2843 2843 */
2844 2844 if (nxgep->niu_type == N2_NIU) {
2845 2845 threshhold = TX_BCOPY_SIZE;
2846 2846 } else {
2847 2847 threshhold = nxge_bcopy_thresh;
2848 2848 }
2849 2849 tx_size = nxge_tx_ring_size * threshhold;
2850 2850
2851 2851 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t);
2852 2852 cr_size += sizeof (txdma_mailbox_t);
2853 2853
2854 2854 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2855 2855 if (nxgep->niu_type == N2_NIU) {
2856 2856 if (!ISP2(tx_size)) {
2857 2857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2858 2858 "==> nxge_tdc_sizes: Tx size"
2859 2859 " must be power of 2"));
2860 2860 return (NXGE_ERROR);
2861 2861 }
2862 2862
2863 2863 if (tx_size > (1 << 22)) {
2864 2864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2865 2865 "==> nxge_tdc_sizes: Tx size"
2866 2866 " limited to 4M"));
2867 2867 return (NXGE_ERROR);
2868 2868 }
2869 2869
2870 2870 if (cr_size < 0x2000)
2871 2871 cr_size = 0x2000;
2872 2872 }
2873 2873 #endif
2874 2874
2875 2875 sizes->threshhold = threshhold;
2876 2876 sizes->tx_size = tx_size;
2877 2877 sizes->cr_size = cr_size;
2878 2878
2879 2879 return (NXGE_OK);
2880 2880 }
2881 2881 /*
2882 2882 * nxge_alloc_txb
2883 2883 *
2884 2884 * Allocate buffers for an TDC.
2885 2885 *
2886 2886 * Arguments:
2887 2887 * nxgep
2888 2888 * channel The channel to map into our kernel space.
2889 2889 *
2890 2890 * Notes:
2891 2891 *
2892 2892 * NPI function calls:
2893 2893 *
2894 2894 * NXGE function calls:
2895 2895 *
2896 2896 * Registers accessed:
2897 2897 *
2898 2898 * Context:
2899 2899 *
2900 2900 * Taking apart:
2901 2901 *
2902 2902 * Open questions:
2903 2903 *
2904 2904 */
2905 2905 nxge_status_t
2906 2906 nxge_alloc_txb(
2907 2907 p_nxge_t nxgep,
2908 2908 int channel)
2909 2909 {
2910 2910 nxge_dma_common_t **dma_buf_p;
2911 2911 nxge_dma_common_t **dma_cntl_p;
2912 2912 uint32_t *num_chunks;
2913 2913 nxge_status_t status = NXGE_OK;
2914 2914
2915 2915 nxge_tdc_sizes_t sizes;
2916 2916
2917 2917 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb"));
2918 2918
2919 2919 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK)
2920 2920 return (NXGE_ERROR);
2921 2921
2922 2922 /*
2923 2923 * Allocate memory for transmit buffers and descriptor rings.
2924 2924 * Replace these allocation functions with the interface functions
2925 2925 * provided by the partition manager Real Soon Now.
2926 2926 */
2927 2927 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2928 2928 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel];
2929 2929
2930 2930 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2931 2931
2932 2932 /*
2933 2933 * Allocate memory for transmit buffers and descriptor rings.
2934 2934 * Replace allocation functions with interface functions provided
2935 2935 * by the partition manager when it is available.
2936 2936 *
2937 2937 * Allocate memory for the transmit buffer pool.
2938 2938 */
2939 2939 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2940 2940 "sizes: tx: %ld, cr:%ld, th:%ld",
2941 2941 sizes.tx_size, sizes.cr_size, sizes.threshhold));
2942 2942
2943 2943 *num_chunks = 0;
2944 2944 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p,
2945 2945 sizes.tx_size, sizes.threshhold, num_chunks);
2946 2946 if (status != NXGE_OK) {
2947 2947 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!");
2948 2948 return (status);
2949 2949 }
2950 2950
2951 2951 /*
2952 2952 * Allocate memory for descriptor rings and mailbox.
2953 2953 */
2954 2954 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p,
2955 2955 sizes.cr_size);
2956 2956 if (status != NXGE_OK) {
2957 2957 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks);
2958 2958 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!");
2959 2959 return (status);
2960 2960 }
2961 2961
2962 2962 return (NXGE_OK);
2963 2963 }
2964 2964
2965 2965 void
2966 2966 nxge_free_txb(
2967 2967 p_nxge_t nxgep,
2968 2968 int channel)
2969 2969 {
2970 2970 nxge_dma_common_t *data;
2971 2971 nxge_dma_common_t *control;
2972 2972 uint32_t num_chunks;
2973 2973
2974 2974 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb"));
2975 2975
2976 2976 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2977 2977 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2978 2978 nxge_free_tx_buf_dma(nxgep, data, num_chunks);
2979 2979
2980 2980 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2981 2981 nxgep->tx_buf_pool_p->num_chunks[channel] = 0;
2982 2982
2983 2983 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2984 2984 nxge_free_tx_cntl_dma(nxgep, control);
2985 2985
2986 2986 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2987 2987
2988 2988 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2989 2989 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2990 2990
2991 2991 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb"));
2992 2992 }
2993 2993
2994 2994 /*
2995 2995 * nxge_alloc_tx_mem_pool
2996 2996 *
2997 2997 * This function allocates all of the per-port TDC control data structures.
2998 2998 * The per-channel (TDC) data structures are allocated when needed.
2999 2999 *
3000 3000 * Arguments:
3001 3001 * nxgep
3002 3002 *
3003 3003 * Notes:
3004 3004 *
3005 3005 * Context:
3006 3006 * Any domain
3007 3007 */
3008 3008 nxge_status_t
3009 3009 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
3010 3010 {
3011 3011 nxge_hw_pt_cfg_t *p_cfgp;
3012 3012 nxge_dma_pool_t *dma_poolp;
3013 3013 nxge_dma_common_t **dma_buf_p;
3014 3014 nxge_dma_pool_t *dma_cntl_poolp;
3015 3015 nxge_dma_common_t **dma_cntl_p;
3016 3016 uint32_t *num_chunks; /* per dma */
3017 3017 int tdc_max;
3018 3018
3019 3019 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
3020 3020
3021 3021 p_cfgp = &nxgep->pt_config.hw_config;
3022 3022 tdc_max = NXGE_MAX_TDCS;
3023 3023
3024 3024 /*
3025 3025 * Allocate memory for each transmit DMA channel.
3026 3026 */
3027 3027 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
3028 3028 KM_SLEEP);
3029 3029 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3030 3030 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3031 3031
3032 3032 dma_cntl_poolp = (p_nxge_dma_pool_t)
3033 3033 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
3034 3034 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3035 3035 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3036 3036
3037 3037 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
3038 3038 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3039 3039 "nxge_alloc_tx_mem_pool: TDC too high %d, "
3040 3040 "set to default %d",
3041 3041 nxge_tx_ring_size, TDC_DEFAULT_MAX));
3042 3042 nxge_tx_ring_size = TDC_DEFAULT_MAX;
3043 3043 }
3044 3044
3045 3045 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3046 3046 /*
3047 3047 * N2/NIU has limitation on the descriptor sizes (contiguous
3048 3048 * memory allocation on data buffers to 4M (contig_mem_alloc)
3049 3049 * and little endian for control buffers (must use the ddi/dki mem alloc
3050 3050 * function). The transmit ring is limited to 8K (includes the
3051 3051 * mailbox).
3052 3052 */
3053 3053 if (nxgep->niu_type == N2_NIU) {
3054 3054 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
3055 3055 (!ISP2(nxge_tx_ring_size))) {
3056 3056 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
3057 3057 }
3058 3058 }
3059 3059 #endif
3060 3060
3061 3061 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
3062 3062
3063 3063 num_chunks = (uint32_t *)KMEM_ZALLOC(
3064 3064 sizeof (uint32_t) * tdc_max, KM_SLEEP);
3065 3065
3066 3066 dma_poolp->ndmas = p_cfgp->tdc.owned;
3067 3067 dma_poolp->num_chunks = num_chunks;
3068 3068 dma_poolp->dma_buf_pool_p = dma_buf_p;
3069 3069 nxgep->tx_buf_pool_p = dma_poolp;
3070 3070
3071 3071 dma_poolp->buf_allocated = B_TRUE;
3072 3072
3073 3073 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned;
3074 3074 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
3075 3075 nxgep->tx_cntl_pool_p = dma_cntl_poolp;
3076 3076
3077 3077 dma_cntl_poolp->buf_allocated = B_TRUE;
3078 3078
3079 3079 nxgep->tx_rings =
3080 3080 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
3081 3081 nxgep->tx_rings->rings =
3082 3082 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP);
3083 3083 nxgep->tx_mbox_areas_p =
3084 3084 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
3085 3085 nxgep->tx_mbox_areas_p->txmbox_areas_p =
3086 3086 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP);
3087 3087
3088 3088 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned;
3089 3089
3090 3090 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3091 3091 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3092 3092 tdc_max, dma_poolp->ndmas));
3093 3093
3094 3094 return (NXGE_OK);
3095 3095 }
3096 3096
3097 3097 nxge_status_t
3098 3098 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
3099 3099 p_nxge_dma_common_t *dmap, size_t alloc_size,
3100 3100 size_t block_size, uint32_t *num_chunks)
3101 3101 {
3102 3102 p_nxge_dma_common_t tx_dmap;
3103 3103 nxge_status_t status = NXGE_OK;
3104 3104 size_t total_alloc_size;
3105 3105 size_t allocated = 0;
3106 3106 int i, size_index, array_size;
3107 3107
3108 3108 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
3109 3109
3110 3110 tx_dmap = (p_nxge_dma_common_t)
3111 3111 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
3112 3112 KM_SLEEP);
3113 3113
3114 3114 total_alloc_size = alloc_size;
3115 3115 i = 0;
3116 3116 size_index = 0;
3117 3117 array_size = sizeof (alloc_sizes) / sizeof (size_t);
3118 3118 while ((size_index < array_size) &&
3119 3119 (alloc_sizes[size_index] < alloc_size))
3120 3120 size_index++;
3121 3121 if (size_index >= array_size) {
3122 3122 size_index = array_size - 1;
3123 3123 }
3124 3124
3125 3125 while ((allocated < total_alloc_size) &&
3126 3126 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
3127 3127
3128 3128 tx_dmap[i].dma_chunk_index = i;
3129 3129 tx_dmap[i].block_size = block_size;
3130 3130 tx_dmap[i].alength = alloc_sizes[size_index];
3131 3131 tx_dmap[i].orig_alength = tx_dmap[i].alength;
3132 3132 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
3133 3133 tx_dmap[i].dma_channel = dma_channel;
3134 3134 tx_dmap[i].contig_alloc_type = B_FALSE;
3135 3135 tx_dmap[i].kmem_alloc_type = B_FALSE;
3136 3136
3137 3137 /*
3138 3138 * N2/NIU: data buffers must be contiguous as the driver
3139 3139 * needs to call Hypervisor api to set up
3140 3140 * logical pages.
3141 3141 */
3142 3142 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
3143 3143 tx_dmap[i].contig_alloc_type = B_TRUE;
3144 3144 }
3145 3145
3146 3146 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3147 3147 &nxge_tx_dma_attr,
3148 3148 tx_dmap[i].alength,
3149 3149 &nxge_dev_buf_dma_acc_attr,
3150 3150 DDI_DMA_WRITE | DDI_DMA_STREAMING,
3151 3151 (p_nxge_dma_common_t)(&tx_dmap[i]));
3152 3152 if (status != NXGE_OK) {
3153 3153 size_index--;
3154 3154 } else {
3155 3155 i++;
3156 3156 allocated += alloc_sizes[size_index];
3157 3157 }
3158 3158 }
3159 3159
3160 3160 if (allocated < total_alloc_size) {
3161 3161 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3162 3162 "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3163 3163 "allocated 0x%x requested 0x%x",
3164 3164 dma_channel,
3165 3165 allocated, total_alloc_size));
3166 3166 status = NXGE_ERROR;
3167 3167 goto nxge_alloc_tx_mem_fail1;
3168 3168 }
3169 3169
3170 3170 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3171 3171 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3172 3172 "allocated 0x%x requested 0x%x",
3173 3173 dma_channel,
3174 3174 allocated, total_alloc_size));
3175 3175
3176 3176 *num_chunks = i;
3177 3177 *dmap = tx_dmap;
3178 3178 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3179 3179 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3180 3180 *dmap, i));
3181 3181 goto nxge_alloc_tx_mem_exit;
3182 3182
3183 3183 nxge_alloc_tx_mem_fail1:
3184 3184 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
3185 3185
3186 3186 nxge_alloc_tx_mem_exit:
3187 3187 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3188 3188 "<== nxge_alloc_tx_buf_dma status 0x%08x", status));
3189 3189
3190 3190 return (status);
3191 3191 }
3192 3192
3193 3193 /*ARGSUSED*/
3194 3194 static void
3195 3195 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
3196 3196 uint32_t num_chunks)
3197 3197 {
3198 3198 int i;
3199 3199
3200 3200 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
3201 3201
3202 3202 if (dmap == 0)
3203 3203 return;
3204 3204
3205 3205 for (i = 0; i < num_chunks; i++) {
3206 3206 nxge_dma_mem_free(dmap++);
3207 3207 }
3208 3208
3209 3209 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
3210 3210 }
3211 3211
3212 3212 /*ARGSUSED*/
3213 3213 nxge_status_t
3214 3214 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
3215 3215 p_nxge_dma_common_t *dmap, size_t size)
3216 3216 {
3217 3217 p_nxge_dma_common_t tx_dmap;
3218 3218 nxge_status_t status = NXGE_OK;
3219 3219
3220 3220 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
3221 3221 tx_dmap = (p_nxge_dma_common_t)
3222 3222 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
3223 3223
3224 3224 tx_dmap->contig_alloc_type = B_FALSE;
3225 3225 tx_dmap->kmem_alloc_type = B_FALSE;
3226 3226
3227 3227 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3228 3228 &nxge_desc_dma_attr,
3229 3229 size,
3230 3230 &nxge_dev_desc_dma_acc_attr,
3231 3231 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3232 3232 tx_dmap);
3233 3233 if (status != NXGE_OK) {
3234 3234 goto nxge_alloc_tx_cntl_dma_fail1;
3235 3235 }
3236 3236
3237 3237 *dmap = tx_dmap;
3238 3238 goto nxge_alloc_tx_cntl_dma_exit;
3239 3239
3240 3240 nxge_alloc_tx_cntl_dma_fail1:
3241 3241 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
3242 3242
3243 3243 nxge_alloc_tx_cntl_dma_exit:
3244 3244 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3245 3245 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
3246 3246
3247 3247 return (status);
3248 3248 }
3249 3249
3250 3250 /*ARGSUSED*/
3251 3251 static void
3252 3252 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
3253 3253 {
3254 3254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
3255 3255
3256 3256 if (dmap == 0)
3257 3257 return;
3258 3258
3259 3259 nxge_dma_mem_free(dmap);
3260 3260
3261 3261 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
3262 3262 }
3263 3263
3264 3264 /*
3265 3265 * nxge_free_tx_mem_pool
3266 3266 *
3267 3267 * This function frees all of the per-port TDC control data structures.
3268 3268 * The per-channel (TDC) data structures are freed when the channel
3269 3269 * is stopped.
3270 3270 *
3271 3271 * Arguments:
3272 3272 * nxgep
3273 3273 *
3274 3274 * Notes:
3275 3275 *
3276 3276 * Context:
3277 3277 * Any domain
3278 3278 */
3279 3279 static void
3280 3280 nxge_free_tx_mem_pool(p_nxge_t nxgep)
3281 3281 {
3282 3282 int tdc_max = NXGE_MAX_TDCS;
3283 3283
3284 3284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool"));
3285 3285
3286 3286 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) {
3287 3287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3288 3288 "<== nxge_free_tx_mem_pool "
3289 3289 "(null tx buf pool or buf not allocated"));
3290 3290 return;
3291 3291 }
3292 3292 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) {
3293 3293 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3294 3294 "<== nxge_free_tx_mem_pool "
3295 3295 "(null tx cntl buf pool or cntl buf not allocated"));
3296 3296 return;
3297 3297 }
3298 3298
3299 3299 /* 1. Free the mailboxes. */
3300 3300 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p,
3301 3301 sizeof (p_tx_mbox_t) * tdc_max);
3302 3302 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
3303 3303
3304 3304 nxgep->tx_mbox_areas_p = 0;
3305 3305
3306 3306 /* 2. Free the transmit ring arrays. */
3307 3307 KMEM_FREE(nxgep->tx_rings->rings,
3308 3308 sizeof (p_tx_ring_t) * tdc_max);
3309 3309 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t));
3310 3310
3311 3311 nxgep->tx_rings = 0;
3312 3312
3313 3313 /* 3. Free the completion ring data structures. */
3314 3314 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p,
3315 3315 sizeof (p_nxge_dma_common_t) * tdc_max);
3316 3316 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t));
3317 3317
3318 3318 nxgep->tx_cntl_pool_p = 0;
3319 3319
3320 3320 /* 4. Free the data ring data structures. */
3321 3321 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks,
3322 3322 sizeof (uint32_t) * tdc_max);
3323 3323 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p,
3324 3324 sizeof (p_nxge_dma_common_t) * tdc_max);
3325 3325 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t));
3326 3326
3327 3327 nxgep->tx_buf_pool_p = 0;
3328 3328
3329 3329 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool"));
3330 3330 }
3331 3331
3332 3332 /*ARGSUSED*/
3333 3333 static nxge_status_t
3334 3334 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
3335 3335 struct ddi_dma_attr *dma_attrp,
3336 3336 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
3337 3337 p_nxge_dma_common_t dma_p)
3338 3338 {
3339 3339 caddr_t kaddrp;
3340 3340 int ddi_status = DDI_SUCCESS;
3341 3341 boolean_t contig_alloc_type;
3342 3342 boolean_t kmem_alloc_type;
3343 3343
3344 3344 contig_alloc_type = dma_p->contig_alloc_type;
3345 3345
3346 3346 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
3347 3347 /*
3348 3348 * contig_alloc_type for contiguous memory only allowed
3349 3349 * for N2/NIU.
3350 3350 */
3351 3351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3352 3352 "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3353 3353 dma_p->contig_alloc_type));
3354 3354 return (NXGE_ERROR | NXGE_DDI_FAILED);
3355 3355 }
3356 3356
3357 3357 dma_p->dma_handle = NULL;
3358 3358 dma_p->acc_handle = NULL;
3359 3359 dma_p->kaddrp = dma_p->last_kaddrp = NULL;
3360 3360 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
3361 3361 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
3362 3362 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
3363 3363 if (ddi_status != DDI_SUCCESS) {
3364 3364 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3365 3365 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3366 3366 return (NXGE_ERROR | NXGE_DDI_FAILED);
3367 3367 }
3368 3368
3369 3369 kmem_alloc_type = dma_p->kmem_alloc_type;
3370 3370
3371 3371 switch (contig_alloc_type) {
3372 3372 case B_FALSE:
3373 3373 switch (kmem_alloc_type) {
3374 3374 case B_FALSE:
3375 3375 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle,
3376 3376 length,
3377 3377 acc_attr_p,
3378 3378 xfer_flags,
3379 3379 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
3380 3380 &dma_p->acc_handle);
3381 3381 if (ddi_status != DDI_SUCCESS) {
3382 3382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3383 3383 "nxge_dma_mem_alloc: "
3384 3384 "ddi_dma_mem_alloc failed"));
3385 3385 ddi_dma_free_handle(&dma_p->dma_handle);
3386 3386 dma_p->dma_handle = NULL;
3387 3387 return (NXGE_ERROR | NXGE_DDI_FAILED);
3388 3388 }
3389 3389 if (dma_p->alength < length) {
3390 3390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3391 3391 "nxge_dma_mem_alloc:di_dma_mem_alloc "
3392 3392 "< length."));
3393 3393 ddi_dma_mem_free(&dma_p->acc_handle);
3394 3394 ddi_dma_free_handle(&dma_p->dma_handle);
3395 3395 dma_p->acc_handle = NULL;
3396 3396 dma_p->dma_handle = NULL;
3397 3397 return (NXGE_ERROR);
3398 3398 }
3399 3399
3400 3400 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3401 3401 NULL,
3402 3402 kaddrp, dma_p->alength, xfer_flags,
3403 3403 DDI_DMA_DONTWAIT,
3404 3404 0, &dma_p->dma_cookie, &dma_p->ncookies);
3405 3405 if (ddi_status != DDI_DMA_MAPPED) {
3406 3406 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3407 3407 "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3408 3408 "failed "
3409 3409 "(staus 0x%x ncookies %d.)", ddi_status,
3410 3410 dma_p->ncookies));
3411 3411 if (dma_p->acc_handle) {
3412 3412 ddi_dma_mem_free(&dma_p->acc_handle);
3413 3413 dma_p->acc_handle = NULL;
3414 3414 }
3415 3415 ddi_dma_free_handle(&dma_p->dma_handle);
3416 3416 dma_p->dma_handle = NULL;
3417 3417 return (NXGE_ERROR | NXGE_DDI_FAILED);
3418 3418 }
3419 3419
3420 3420 if (dma_p->ncookies != 1) {
3421 3421 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3422 3422 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3423 3423 "> 1 cookie"
3424 3424 "(staus 0x%x ncookies %d.)", ddi_status,
3425 3425 dma_p->ncookies));
3426 3426 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3427 3427 if (dma_p->acc_handle) {
3428 3428 ddi_dma_mem_free(&dma_p->acc_handle);
3429 3429 dma_p->acc_handle = NULL;
3430 3430 }
3431 3431 ddi_dma_free_handle(&dma_p->dma_handle);
3432 3432 dma_p->dma_handle = NULL;
3433 3433 dma_p->acc_handle = NULL;
3434 3434 return (NXGE_ERROR);
3435 3435 }
3436 3436 break;
3437 3437
3438 3438 case B_TRUE:
3439 3439 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP);
3440 3440 if (kaddrp == NULL) {
3441 3441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3442 3442 "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3443 3443 "kmem alloc failed"));
3444 3444 return (NXGE_ERROR);
3445 3445 }
3446 3446
3447 3447 dma_p->alength = length;
3448 3448 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3449 3449 NULL, kaddrp, dma_p->alength, xfer_flags,
3450 3450 DDI_DMA_DONTWAIT, 0,
3451 3451 &dma_p->dma_cookie, &dma_p->ncookies);
3452 3452 if (ddi_status != DDI_DMA_MAPPED) {
3453 3453 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3454 3454 "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3455 3455 "(kmem_alloc) failed kaddrp $%p length %d "
3456 3456 "(staus 0x%x (%d) ncookies %d.)",
3457 3457 kaddrp, length,
3458 3458 ddi_status, ddi_status, dma_p->ncookies));
3459 3459 KMEM_FREE(kaddrp, length);
3460 3460 dma_p->acc_handle = NULL;
3461 3461 ddi_dma_free_handle(&dma_p->dma_handle);
3462 3462 dma_p->dma_handle = NULL;
3463 3463 dma_p->kaddrp = NULL;
3464 3464 return (NXGE_ERROR | NXGE_DDI_FAILED);
3465 3465 }
3466 3466
3467 3467 if (dma_p->ncookies != 1) {
3468 3468 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3469 3469 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3470 3470 "(kmem_alloc) > 1 cookie"
3471 3471 "(staus 0x%x ncookies %d.)", ddi_status,
3472 3472 dma_p->ncookies));
3473 3473 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3474 3474 KMEM_FREE(kaddrp, length);
3475 3475 ddi_dma_free_handle(&dma_p->dma_handle);
3476 3476 dma_p->dma_handle = NULL;
3477 3477 dma_p->acc_handle = NULL;
3478 3478 dma_p->kaddrp = NULL;
3479 3479 return (NXGE_ERROR);
3480 3480 }
3481 3481
3482 3482 dma_p->kaddrp = kaddrp;
3483 3483
3484 3484 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
3485 3485 "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3486 3486 "kaddr $%p alength %d",
3487 3487 dma_p,
3488 3488 kaddrp,
3489 3489 dma_p->alength));
3490 3490 break;
3491 3491 }
3492 3492 break;
3493 3493
3494 3494 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3495 3495 case B_TRUE:
3496 3496 kaddrp = (caddr_t)contig_mem_alloc(length);
3497 3497 if (kaddrp == NULL) {
3498 3498 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3499 3499 "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3500 3500 ddi_dma_free_handle(&dma_p->dma_handle);
3501 3501 return (NXGE_ERROR | NXGE_DDI_FAILED);
3502 3502 }
3503 3503
3504 3504 dma_p->alength = length;
3505 3505 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
3506 3506 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
3507 3507 &dma_p->dma_cookie, &dma_p->ncookies);
3508 3508 if (ddi_status != DDI_DMA_MAPPED) {
3509 3509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3510 3510 "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3511 3511 "(status 0x%x ncookies %d.)", ddi_status,
3512 3512 dma_p->ncookies));
3513 3513
3514 3514 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3515 3515 "==> nxge_dma_mem_alloc: (not mapped)"
3516 3516 "length %lu (0x%x) "
3517 3517 "free contig kaddrp $%p "
3518 3518 "va_to_pa $%p",
3519 3519 length, length,
3520 3520 kaddrp,
3521 3521 va_to_pa(kaddrp)));
3522 3522
3523 3523
3524 3524 contig_mem_free((void *)kaddrp, length);
3525 3525 ddi_dma_free_handle(&dma_p->dma_handle);
3526 3526
3527 3527 dma_p->dma_handle = NULL;
3528 3528 dma_p->acc_handle = NULL;
3529 3529 dma_p->alength = NULL;
3530 3530 dma_p->kaddrp = NULL;
3531 3531
3532 3532 return (NXGE_ERROR | NXGE_DDI_FAILED);
3533 3533 }
3534 3534
3535 3535 if (dma_p->ncookies != 1 ||
3536 3536 (dma_p->dma_cookie.dmac_laddress == NULL)) {
3537 3537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3538 3538 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3539 3539 "cookie or "
3540 3540 "dmac_laddress is NULL $%p size %d "
3541 3541 " (status 0x%x ncookies %d.)",
3542 3542 ddi_status,
3543 3543 dma_p->dma_cookie.dmac_laddress,
3544 3544 dma_p->dma_cookie.dmac_size,
3545 3545 dma_p->ncookies));
3546 3546
3547 3547 contig_mem_free((void *)kaddrp, length);
3548 3548 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3549 3549 ddi_dma_free_handle(&dma_p->dma_handle);
3550 3550
3551 3551 dma_p->alength = 0;
3552 3552 dma_p->dma_handle = NULL;
3553 3553 dma_p->acc_handle = NULL;
3554 3554 dma_p->kaddrp = NULL;
3555 3555
3556 3556 return (NXGE_ERROR | NXGE_DDI_FAILED);
3557 3557 }
3558 3558 break;
3559 3559
3560 3560 #else
3561 3561 case B_TRUE:
3562 3562 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3563 3563 "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3564 3564 return (NXGE_ERROR | NXGE_DDI_FAILED);
3565 3565 #endif
3566 3566 }
3567 3567
3568 3568 dma_p->kaddrp = kaddrp;
3569 3569 dma_p->last_kaddrp = (unsigned char *)kaddrp +
3570 3570 dma_p->alength - RXBUF_64B_ALIGNED;
3571 3571 #if defined(__i386)
3572 3572 dma_p->ioaddr_pp =
3573 3573 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
3574 3574 #else
3575 3575 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3576 3576 #endif
3577 3577 dma_p->last_ioaddr_pp =
3578 3578 #if defined(__i386)
3579 3579 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
3580 3580 #else
3581 3581 (unsigned char *)dma_p->dma_cookie.dmac_laddress +
3582 3582 #endif
3583 3583 dma_p->alength - RXBUF_64B_ALIGNED;
3584 3584
3585 3585 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
3586 3586
3587 3587 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3588 3588 dma_p->orig_ioaddr_pp =
3589 3589 (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3590 3590 dma_p->orig_alength = length;
3591 3591 dma_p->orig_kaddrp = kaddrp;
3592 3592 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
3593 3593 #endif
3594 3594
3595 3595 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
3596 3596 "dma buffer allocated: dma_p $%p "
3597 3597 "return dmac_ladress from cookie $%p cookie dmac_size %d "
3598 3598 "dma_p->ioaddr_p $%p "
3599 3599 "dma_p->orig_ioaddr_p $%p "
3600 3600 "orig_vatopa $%p "
3601 3601 "alength %d (0x%x) "
3602 3602 "kaddrp $%p "
3603 3603 "length %d (0x%x)",
3604 3604 dma_p,
3605 3605 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
3606 3606 dma_p->ioaddr_pp,
3607 3607 dma_p->orig_ioaddr_pp,
3608 3608 dma_p->orig_vatopa,
3609 3609 dma_p->alength, dma_p->alength,
3610 3610 kaddrp,
3611 3611 length, length));
3612 3612
3613 3613 return (NXGE_OK);
3614 3614 }
3615 3615
3616 3616 static void
3617 3617 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
3618 3618 {
3619 3619 if (dma_p->dma_handle != NULL) {
3620 3620 if (dma_p->ncookies) {
3621 3621 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3622 3622 dma_p->ncookies = 0;
3623 3623 }
3624 3624 ddi_dma_free_handle(&dma_p->dma_handle);
3625 3625 dma_p->dma_handle = NULL;
3626 3626 }
3627 3627
3628 3628 if (dma_p->acc_handle != NULL) {
3629 3629 ddi_dma_mem_free(&dma_p->acc_handle);
3630 3630 dma_p->acc_handle = NULL;
3631 3631 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3632 3632 }
3633 3633
3634 3634 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3635 3635 if (dma_p->contig_alloc_type &&
3636 3636 dma_p->orig_kaddrp && dma_p->orig_alength) {
3637 3637 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3638 3638 "kaddrp $%p (orig_kaddrp $%p)"
3639 3639 "mem type %d ",
3640 3640 "orig_alength %d "
3641 3641 "alength 0x%x (%d)",
3642 3642 dma_p->kaddrp,
3643 3643 dma_p->orig_kaddrp,
3644 3644 dma_p->contig_alloc_type,
3645 3645 dma_p->orig_alength,
3646 3646 dma_p->alength, dma_p->alength));
3647 3647
3648 3648 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3649 3649 dma_p->orig_alength = NULL;
3650 3650 dma_p->orig_kaddrp = NULL;
3651 3651 dma_p->contig_alloc_type = B_FALSE;
3652 3652 }
3653 3653 #endif
3654 3654 dma_p->kaddrp = NULL;
3655 3655 dma_p->alength = NULL;
3656 3656 }
3657 3657
3658 3658 static void
3659 3659 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)
3660 3660 {
3661 3661 uint64_t kaddr;
3662 3662 uint32_t buf_size;
3663 3663
3664 3664 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf"));
3665 3665
3666 3666 if (dma_p->dma_handle != NULL) {
3667 3667 if (dma_p->ncookies) {
3668 3668 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3669 3669 dma_p->ncookies = 0;
3670 3670 }
3671 3671 ddi_dma_free_handle(&dma_p->dma_handle);
3672 3672 dma_p->dma_handle = NULL;
3673 3673 }
3674 3674
3675 3675 if (dma_p->acc_handle != NULL) {
3676 3676 ddi_dma_mem_free(&dma_p->acc_handle);
3677 3677 dma_p->acc_handle = NULL;
3678 3678 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3679 3679 }
3680 3680
3681 3681 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3682 3682 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3683 3683 dma_p,
3684 3684 dma_p->buf_alloc_state));
3685 3685
3686 3686 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) {
3687 3687 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3688 3688 "<== nxge_dma_free_rx_data_buf: "
3689 3689 "outstanding data buffers"));
3690 3690 return;
3691 3691 }
3692 3692
3693 3693 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3694 3694 if (dma_p->contig_alloc_type &&
3695 3695 dma_p->orig_kaddrp && dma_p->orig_alength) {
3696 3696 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: "
3697 3697 "kaddrp $%p (orig_kaddrp $%p)"
3698 3698 "mem type %d ",
3699 3699 "orig_alength %d "
3700 3700 "alength 0x%x (%d)",
3701 3701 dma_p->kaddrp,
3702 3702 dma_p->orig_kaddrp,
3703 3703 dma_p->contig_alloc_type,
3704 3704 dma_p->orig_alength,
3705 3705 dma_p->alength, dma_p->alength));
3706 3706
3707 3707 kaddr = (uint64_t)dma_p->orig_kaddrp;
3708 3708 buf_size = dma_p->orig_alength;
3709 3709 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size);
3710 3710 dma_p->orig_alength = NULL;
3711 3711 dma_p->orig_kaddrp = NULL;
3712 3712 dma_p->contig_alloc_type = B_FALSE;
3713 3713 dma_p->kaddrp = NULL;
3714 3714 dma_p->alength = NULL;
3715 3715 return;
3716 3716 }
3717 3717 #endif
3718 3718
3719 3719 if (dma_p->kmem_alloc_type) {
3720 3720 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3721 3721 "nxge_dma_free_rx_data_buf: free kmem "
3722 3722 "kaddrp $%p (orig_kaddrp $%p)"
3723 3723 "alloc type %d "
3724 3724 "orig_alength %d "
3725 3725 "alength 0x%x (%d)",
3726 3726 dma_p->kaddrp,
3727 3727 dma_p->orig_kaddrp,
3728 3728 dma_p->kmem_alloc_type,
3729 3729 dma_p->orig_alength,
3730 3730 dma_p->alength, dma_p->alength));
3731 3731 #if defined(__i386)
3732 3732 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp;
3733 3733 #else
3734 3734 kaddr = (uint64_t)dma_p->kaddrp;
3735 3735 #endif
3736 3736 buf_size = dma_p->orig_alength;
3737 3737 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3738 3738 "nxge_dma_free_rx_data_buf: free dmap $%p "
3739 3739 "kaddr $%p buf_size %d",
3740 3740 dma_p,
3741 3741 kaddr, buf_size));
3742 3742 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size);
3743 3743 dma_p->alength = 0;
3744 3744 dma_p->orig_alength = 0;
3745 3745 dma_p->kaddrp = NULL;
3746 3746 dma_p->kmem_alloc_type = B_FALSE;
3747 3747 }
3748 3748
3749 3749 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf"));
3750 3750 }
3751 3751
3752 3752 /*
3753 3753 * nxge_m_start() -- start transmitting and receiving.
3754 3754 *
3755 3755 * This function is called by the MAC layer when the first
3756 3756 * stream is open to prepare the hardware ready for sending
3757 3757 * and transmitting packets.
3758 3758 */
3759 3759 static int
3760 3760 nxge_m_start(void *arg)
3761 3761 {
3762 3762 p_nxge_t nxgep = (p_nxge_t)arg;
3763 3763
3764 3764 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3765 3765
3766 3766 /*
3767 3767 * Are we already started?
3768 3768 */
3769 3769 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
3770 3770 return (0);
3771 3771 }
3772 3772
3773 3773 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) {
3774 3774 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
3775 3775 }
3776 3776
3777 3777 /*
3778 3778 * Make sure RX MAC is disabled while we initialize.
3779 3779 */
3780 3780 if (!isLDOMguest(nxgep)) {
3781 3781 (void) nxge_rx_mac_disable(nxgep);
3782 3782 }
3783 3783
3784 3784 /*
3785 3785 * Grab the global lock.
3786 3786 */
3787 3787 MUTEX_ENTER(nxgep->genlock);
3788 3788
3789 3789 /*
3790 3790 * Initialize the driver and hardware.
3791 3791 */
3792 3792 if (nxge_init(nxgep) != NXGE_OK) {
3793 3793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3794 3794 "<== nxge_m_start: initialization failed"));
3795 3795 MUTEX_EXIT(nxgep->genlock);
3796 3796 return (EIO);
3797 3797 }
3798 3798
3799 3799 /*
3800 3800 * Start timer to check the system error and tx hangs
3801 3801 */
3802 3802 if (!isLDOMguest(nxgep))
3803 3803 nxgep->nxge_timerid = nxge_start_timer(nxgep,
3804 3804 nxge_check_hw_state, NXGE_CHECK_TIMER);
3805 3805 #if defined(sun4v)
3806 3806 else
3807 3807 nxge_hio_start_timer(nxgep);
3808 3808 #endif
3809 3809
3810 3810 nxgep->link_notify = B_TRUE;
3811 3811 nxgep->link_check_count = 0;
3812 3812 nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3813 3813
3814 3814 /*
3815 3815 * Let the global lock go, since we are intialized.
3816 3816 */
3817 3817 MUTEX_EXIT(nxgep->genlock);
3818 3818
3819 3819 /*
3820 3820 * Let the MAC start receiving packets, now that
3821 3821 * we are initialized.
3822 3822 */
3823 3823 if (!isLDOMguest(nxgep)) {
3824 3824 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
3825 3825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3826 3826 "<== nxge_m_start: enable of RX mac failed"));
3827 3827 return (EIO);
3828 3828 }
3829 3829
3830 3830 /*
3831 3831 * Enable hardware interrupts.
3832 3832 */
3833 3833 nxge_intr_hw_enable(nxgep);
3834 3834 }
3835 3835 #if defined(sun4v)
3836 3836 else {
3837 3837 /*
3838 3838 * In guest domain we enable RDCs and their interrupts as
3839 3839 * the last step.
3840 3840 */
3841 3841 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) {
3842 3842 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3843 3843 "<== nxge_m_start: enable of RDCs failed"));
3844 3844 return (EIO);
3845 3845 }
3846 3846
3847 3847 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) {
3848 3848 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3849 3849 "<== nxge_m_start: intrs enable for RDCs failed"));
3850 3850 return (EIO);
3851 3851 }
3852 3852 }
3853 3853 #endif
3854 3854 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3855 3855 return (0);
3856 3856 }
3857 3857
3858 3858 static boolean_t
3859 3859 nxge_check_groups_stopped(p_nxge_t nxgep)
3860 3860 {
3861 3861 int i;
3862 3862
3863 3863 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
3864 3864 if (nxgep->rx_hio_groups[i].started)
3865 3865 return (B_FALSE);
3866 3866 }
3867 3867
3868 3868 return (B_TRUE);
3869 3869 }
3870 3870
3871 3871 /*
3872 3872 * nxge_m_stop(): stop transmitting and receiving.
3873 3873 */
3874 3874 static void
3875 3875 nxge_m_stop(void *arg)
3876 3876 {
3877 3877 p_nxge_t nxgep = (p_nxge_t)arg;
3878 3878 boolean_t groups_stopped;
3879 3879
3880 3880 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3881 3881
3882 3882 /*
3883 3883 * Are the groups stopped?
3884 3884 */
3885 3885 groups_stopped = nxge_check_groups_stopped(nxgep);
3886 3886 ASSERT(groups_stopped == B_TRUE);
3887 3887 if (!groups_stopped) {
3888 3888 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n",
3889 3889 nxgep->instance);
3890 3890 return;
3891 3891 }
3892 3892
3893 3893 if (!isLDOMguest(nxgep)) {
3894 3894 /*
3895 3895 * Disable the RX mac.
3896 3896 */
3897 3897 (void) nxge_rx_mac_disable(nxgep);
3898 3898
3899 3899 /*
3900 3900 * Wait for the IPP to drain.
3901 3901 */
3902 3902 (void) nxge_ipp_drain(nxgep);
3903 3903
3904 3904 /*
3905 3905 * Disable hardware interrupts.
3906 3906 */
3907 3907 nxge_intr_hw_disable(nxgep);
3908 3908 }
3909 3909 #if defined(sun4v)
3910 3910 else {
3911 3911 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE);
3912 3912 }
3913 3913 #endif
3914 3914
3915 3915 /*
3916 3916 * Grab the global lock.
3917 3917 */
3918 3918 MUTEX_ENTER(nxgep->genlock);
3919 3919
3920 3920 nxgep->nxge_mac_state = NXGE_MAC_STOPPING;
3921 3921 if (nxgep->nxge_timerid) {
3922 3922 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3923 3923 nxgep->nxge_timerid = 0;
3924 3924 }
3925 3925
3926 3926 /*
3927 3927 * Clean up.
3928 3928 */
3929 3929 nxge_uninit(nxgep);
3930 3930
3931 3931 nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3932 3932
3933 3933 /*
3934 3934 * Let go of the global lock.
3935 3935 */
3936 3936 MUTEX_EXIT(nxgep->genlock);
3937 3937 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3938 3938 }
3939 3939
3940 3940 static int
3941 3941 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3942 3942 {
3943 3943 p_nxge_t nxgep = (p_nxge_t)arg;
3944 3944 struct ether_addr addrp;
3945 3945
3946 3946 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3947 3947 "==> nxge_m_multicst: add %d", add));
3948 3948
3949 3949 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3950 3950 if (add) {
3951 3951 if (nxge_add_mcast_addr(nxgep, &addrp)) {
3952 3952 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3953 3953 "<== nxge_m_multicst: add multicast failed"));
3954 3954 return (EINVAL);
3955 3955 }
3956 3956 } else {
3957 3957 if (nxge_del_mcast_addr(nxgep, &addrp)) {
3958 3958 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3959 3959 "<== nxge_m_multicst: del multicast failed"));
3960 3960 return (EINVAL);
3961 3961 }
3962 3962 }
3963 3963
3964 3964 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3965 3965
3966 3966 return (0);
3967 3967 }
3968 3968
3969 3969 static int
3970 3970 nxge_m_promisc(void *arg, boolean_t on)
3971 3971 {
3972 3972 p_nxge_t nxgep = (p_nxge_t)arg;
3973 3973
3974 3974 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3975 3975 "==> nxge_m_promisc: on %d", on));
3976 3976
3977 3977 if (nxge_set_promisc(nxgep, on)) {
3978 3978 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3979 3979 "<== nxge_m_promisc: set promisc failed"));
3980 3980 return (EINVAL);
3981 3981 }
3982 3982
3983 3983 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3984 3984 "<== nxge_m_promisc: on %d", on));
3985 3985
3986 3986 return (0);
3987 3987 }
3988 3988
3989 3989 static void
3990 3990 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3991 3991 {
3992 3992 p_nxge_t nxgep = (p_nxge_t)arg;
3993 3993 struct iocblk *iocp;
3994 3994 boolean_t need_privilege;
3995 3995 int err;
3996 3996 int cmd;
3997 3997
3998 3998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3999 3999
4000 4000 iocp = (struct iocblk *)mp->b_rptr;
4001 4001 iocp->ioc_error = 0;
4002 4002 need_privilege = B_TRUE;
4003 4003 cmd = iocp->ioc_cmd;
4004 4004 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
4005 4005 switch (cmd) {
4006 4006 default:
4007 4007 miocnak(wq, mp, 0, EINVAL);
4008 4008 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
4009 4009 return;
4010 4010
4011 4011 case LB_GET_INFO_SIZE:
4012 4012 case LB_GET_INFO:
4013 4013 case LB_GET_MODE:
4014 4014 need_privilege = B_FALSE;
4015 4015 break;
4016 4016 case LB_SET_MODE:
4017 4017 break;
4018 4018
4019 4019
4020 4020 case NXGE_GET_MII:
4021 4021 case NXGE_PUT_MII:
4022 4022 case NXGE_GET64:
4023 4023 case NXGE_PUT64:
4024 4024 case NXGE_GET_TX_RING_SZ:
4025 4025 case NXGE_GET_TX_DESC:
4026 4026 case NXGE_TX_SIDE_RESET:
4027 4027 case NXGE_RX_SIDE_RESET:
4028 4028 case NXGE_GLOBAL_RESET:
4029 4029 case NXGE_RESET_MAC:
4030 4030 case NXGE_TX_REGS_DUMP:
4031 4031 case NXGE_RX_REGS_DUMP:
4032 4032 case NXGE_INT_REGS_DUMP:
4033 4033 case NXGE_VIR_INT_REGS_DUMP:
4034 4034 case NXGE_PUT_TCAM:
4035 4035 case NXGE_GET_TCAM:
4036 4036 case NXGE_RTRACE:
4037 4037 case NXGE_RDUMP:
4038 4038 case NXGE_RX_CLASS:
4039 4039 case NXGE_RX_HASH:
4040 4040
4041 4041 need_privilege = B_FALSE;
4042 4042 break;
4043 4043 case NXGE_INJECT_ERR:
4044 4044 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
4045 4045 nxge_err_inject(nxgep, wq, mp);
4046 4046 break;
4047 4047 }
4048 4048
4049 4049 if (need_privilege) {
4050 4050 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
4051 4051 if (err != 0) {
4052 4052 miocnak(wq, mp, 0, err);
4053 4053 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4054 4054 "<== nxge_m_ioctl: no priv"));
4055 4055 return;
4056 4056 }
4057 4057 }
4058 4058
4059 4059 switch (cmd) {
4060 4060
4061 4061 case LB_GET_MODE:
4062 4062 case LB_SET_MODE:
4063 4063 case LB_GET_INFO_SIZE:
4064 4064 case LB_GET_INFO:
4065 4065 nxge_loopback_ioctl(nxgep, wq, mp, iocp);
4066 4066 break;
4067 4067
4068 4068 case NXGE_GET_MII:
4069 4069 case NXGE_PUT_MII:
4070 4070 case NXGE_PUT_TCAM:
4071 4071 case NXGE_GET_TCAM:
4072 4072 case NXGE_GET64:
4073 4073 case NXGE_PUT64:
4074 4074 case NXGE_GET_TX_RING_SZ:
4075 4075 case NXGE_GET_TX_DESC:
4076 4076 case NXGE_TX_SIDE_RESET:
4077 4077 case NXGE_RX_SIDE_RESET:
4078 4078 case NXGE_GLOBAL_RESET:
4079 4079 case NXGE_RESET_MAC:
4080 4080 case NXGE_TX_REGS_DUMP:
4081 4081 case NXGE_RX_REGS_DUMP:
4082 4082 case NXGE_INT_REGS_DUMP:
4083 4083 case NXGE_VIR_INT_REGS_DUMP:
4084 4084 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4085 4085 "==> nxge_m_ioctl: cmd 0x%x", cmd));
4086 4086 nxge_hw_ioctl(nxgep, wq, mp, iocp);
4087 4087 break;
4088 4088 case NXGE_RX_CLASS:
4089 4089 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0)
4090 4090 miocnak(wq, mp, 0, EINVAL);
4091 4091 else
4092 4092 miocack(wq, mp, sizeof (rx_class_cfg_t), 0);
4093 4093 break;
4094 4094 case NXGE_RX_HASH:
4095 4095
4096 4096 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0)
4097 4097 miocnak(wq, mp, 0, EINVAL);
4098 4098 else
4099 4099 miocack(wq, mp, sizeof (cfg_cmd_t), 0);
4100 4100 break;
4101 4101 }
4102 4102
4103 4103 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
4104 4104 }
4105 4105
4106 4106 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
4107 4107
4108 4108 void
4109 4109 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory)
4110 4110 {
4111 4111 p_nxge_mmac_stats_t mmac_stats;
4112 4112 int i;
4113 4113 nxge_mmac_t *mmac_info;
4114 4114
4115 4115 mmac_info = &nxgep->nxge_mmac_info;
4116 4116
4117 4117 mmac_stats = &nxgep->statsp->mmac_stats;
4118 4118 mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
4119 4119 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
4120 4120
4121 4121 for (i = 0; i < ETHERADDRL; i++) {
4122 4122 if (factory) {
4123 4123 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4124 4124 = mmac_info->factory_mac_pool[slot][
4125 4125 (ETHERADDRL-1) - i];
4126 4126 } else {
4127 4127 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4128 4128 = mmac_info->mac_pool[slot].addr[
4129 4129 (ETHERADDRL - 1) - i];
4130 4130 }
4131 4131 }
4132 4132 }
4133 4133
4134 4134 /*
4135 4135 * nxge_altmac_set() -- Set an alternate MAC address
4136 4136 */
4137 4137 static int
4138 4138 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot,
4139 4139 int rdctbl, boolean_t usetbl)
4140 4140 {
4141 4141 uint8_t addrn;
4142 4142 uint8_t portn;
4143 4143 npi_mac_addr_t altmac;
4144 4144 hostinfo_t mac_rdc;
4145 4145 p_nxge_class_pt_cfg_t clscfgp;
4146 4146
4147 4147
4148 4148 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
4149 4149 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
4150 4150 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
4151 4151
4152 4152 portn = nxgep->mac.portnum;
4153 4153 addrn = (uint8_t)slot - 1;
4154 4154
4155 4155 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET,
4156 4156 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS)
4157 4157 return (EIO);
4158 4158
4159 4159 /*
4160 4160 * Set the rdc table number for the host info entry
4161 4161 * for this mac address slot.
4162 4162 */
4163 4163 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
4164 4164 mac_rdc.value = 0;
4165 4165 if (usetbl)
4166 4166 mac_rdc.bits.w0.rdc_tbl_num = rdctbl;
4167 4167 else
4168 4168 mac_rdc.bits.w0.rdc_tbl_num =
4169 4169 clscfgp->mac_host_info[addrn].rdctbl;
4170 4170 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
4171 4171
4172 4172 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
4173 4173 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
4174 4174 return (EIO);
4175 4175 }
4176 4176
4177 4177 /*
4178 4178 * Enable comparison with the alternate MAC address.
4179 4179 * While the first alternate addr is enabled by bit 1 of register
4180 4180 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4181 4181 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4182 4182 * accordingly before calling npi_mac_altaddr_entry.
4183 4183 */
4184 4184 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4185 4185 addrn = (uint8_t)slot - 1;
4186 4186 else
4187 4187 addrn = (uint8_t)slot;
4188 4188
4189 4189 if (npi_mac_altaddr_enable(nxgep->npi_handle,
4190 4190 nxgep->function_num, addrn) != NPI_SUCCESS) {
4191 4191 return (EIO);
4192 4192 }
4193 4193
4194 4194 return (0);
4195 4195 }
4196 4196
4197 4197 /*
4198 4198 * nxeg_m_mmac_add_g() - find an unused address slot, set the address
4199 4199 * value to the one specified, enable the port to start filtering on
4200 4200 * the new MAC address. Returns 0 on success.
4201 4201 */
4202 4202 int
4203 4203 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
4204 4204 boolean_t usetbl)
4205 4205 {
4206 4206 p_nxge_t nxgep = arg;
4207 4207 int slot;
4208 4208 nxge_mmac_t *mmac_info;
4209 4209 int err;
4210 4210 nxge_status_t status;
4211 4211
4212 4212 mutex_enter(nxgep->genlock);
4213 4213
4214 4214 /*
4215 4215 * Make sure that nxge is initialized, if _start() has
4216 4216 * not been called.
4217 4217 */
4218 4218 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4219 4219 status = nxge_init(nxgep);
4220 4220 if (status != NXGE_OK) {
4221 4221 mutex_exit(nxgep->genlock);
4222 4222 return (ENXIO);
4223 4223 }
4224 4224 }
4225 4225
4226 4226 mmac_info = &nxgep->nxge_mmac_info;
4227 4227 if (mmac_info->naddrfree == 0) {
4228 4228 mutex_exit(nxgep->genlock);
4229 4229 return (ENOSPC);
4230 4230 }
4231 4231
4232 4232 /*
4233 4233 * Search for the first available slot. Because naddrfree
4234 4234 * is not zero, we are guaranteed to find one.
4235 4235 * Each of the first two ports of Neptune has 16 alternate
4236 4236 * MAC slots but only the first 7 (of 15) slots have assigned factory
4237 4237 * MAC addresses. We first search among the slots without bundled
4238 4238 * factory MACs. If we fail to find one in that range, then we
4239 4239 * search the slots with bundled factory MACs. A factory MAC
4240 4240 * will be wasted while the slot is used with a user MAC address.
4241 4241 * But the slot could be used by factory MAC again after calling
4242 4242 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4243 4243 */
4244 4244 for (slot = 0; slot <= mmac_info->num_mmac; slot++) {
4245 4245 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4246 4246 break;
4247 4247 }
4248 4248
4249 4249 ASSERT(slot <= mmac_info->num_mmac);
4250 4250
4251 4251 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl,
4252 4252 usetbl)) != 0) {
4253 4253 mutex_exit(nxgep->genlock);
4254 4254 return (err);
4255 4255 }
4256 4256
4257 4257 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
4258 4258 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
4259 4259 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4260 4260 mmac_info->naddrfree--;
4261 4261 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4262 4262
4263 4263 mutex_exit(nxgep->genlock);
4264 4264 return (0);
4265 4265 }
4266 4266
4267 4267 /*
4268 4268 * Remove the specified mac address and update the HW not to filter
4269 4269 * the mac address anymore.
4270 4270 */
4271 4271 int
4272 4272 nxge_m_mmac_remove(void *arg, int slot)
4273 4273 {
4274 4274 p_nxge_t nxgep = arg;
4275 4275 nxge_mmac_t *mmac_info;
4276 4276 uint8_t addrn;
4277 4277 uint8_t portn;
4278 4278 int err = 0;
4279 4279 nxge_status_t status;
4280 4280
4281 4281 mutex_enter(nxgep->genlock);
4282 4282
4283 4283 /*
4284 4284 * Make sure that nxge is initialized, if _start() has
4285 4285 * not been called.
4286 4286 */
4287 4287 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4288 4288 status = nxge_init(nxgep);
4289 4289 if (status != NXGE_OK) {
4290 4290 mutex_exit(nxgep->genlock);
4291 4291 return (ENXIO);
4292 4292 }
4293 4293 }
4294 4294
4295 4295 mmac_info = &nxgep->nxge_mmac_info;
4296 4296 if (slot < 1 || slot > mmac_info->num_mmac) {
4297 4297 mutex_exit(nxgep->genlock);
4298 4298 return (EINVAL);
4299 4299 }
4300 4300
4301 4301 portn = nxgep->mac.portnum;
4302 4302 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4303 4303 addrn = (uint8_t)slot - 1;
4304 4304 else
4305 4305 addrn = (uint8_t)slot;
4306 4306
4307 4307 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4308 4308 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
4309 4309 == NPI_SUCCESS) {
4310 4310 mmac_info->naddrfree++;
4311 4311 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
4312 4312 /*
4313 4313 * Regardless if the MAC we just stopped filtering
4314 4314 * is a user addr or a facory addr, we must set
4315 4315 * the MMAC_VENDOR_ADDR flag if this slot has an
4316 4316 * associated factory MAC to indicate that a factory
4317 4317 * MAC is available.
4318 4318 */
4319 4319 if (slot <= mmac_info->num_factory_mmac) {
4320 4320 mmac_info->mac_pool[slot].flags
4321 4321 |= MMAC_VENDOR_ADDR;
4322 4322 }
4323 4323 /*
4324 4324 * Clear mac_pool[slot].addr so that kstat shows 0
4325 4325 * alternate MAC address if the slot is not used.
4326 4326 * (But nxge_m_mmac_get returns the factory MAC even
4327 4327 * when the slot is not used!)
4328 4328 */
4329 4329 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
4330 4330 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4331 4331 } else {
4332 4332 err = EIO;
4333 4333 }
4334 4334 } else {
4335 4335 err = EINVAL;
4336 4336 }
4337 4337
4338 4338 mutex_exit(nxgep->genlock);
4339 4339 return (err);
4340 4340 }
4341 4341
4342 4342 /*
4343 4343 * The callback to query all the factory addresses. naddr must be the same as
4344 4344 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and
4345 4345 * mcm_addr is the space allocated for keep all the addresses, whose size is
4346 4346 * naddr * MAXMACADDRLEN.
4347 4347 */
4348 4348 static void
4349 4349 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr)
4350 4350 {
4351 4351 nxge_t *nxgep = arg;
4352 4352 nxge_mmac_t *mmac_info;
4353 4353 int i;
4354 4354
4355 4355 mutex_enter(nxgep->genlock);
4356 4356
4357 4357 mmac_info = &nxgep->nxge_mmac_info;
4358 4358 ASSERT(naddr == mmac_info->num_factory_mmac);
4359 4359
4360 4360 for (i = 0; i < naddr; i++) {
4361 4361 bcopy(mmac_info->factory_mac_pool[i + 1],
4362 4362 addr + i * MAXMACADDRLEN, ETHERADDRL);
4363 4363 }
4364 4364
4365 4365 mutex_exit(nxgep->genlock);
4366 4366 }
4367 4367
4368 4368
4369 4369 static boolean_t
4370 4370 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4371 4371 {
4372 4372 nxge_t *nxgep = arg;
4373 4373 uint32_t *txflags = cap_data;
4374 4374
4375 4375 switch (cap) {
4376 4376 case MAC_CAPAB_HCKSUM:
4377 4377 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4378 4378 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
4379 4379 if (nxge_cksum_offload <= 1) {
4380 4380 *txflags = HCKSUM_INET_PARTIAL;
4381 4381 }
4382 4382 break;
4383 4383
4384 4384 case MAC_CAPAB_MULTIFACTADDR: {
4385 4385 mac_capab_multifactaddr_t *mfacp = cap_data;
4386 4386
4387 4387 if (!isLDOMguest(nxgep)) {
4388 4388 mutex_enter(nxgep->genlock);
4389 4389 mfacp->mcm_naddr =
4390 4390 nxgep->nxge_mmac_info.num_factory_mmac;
4391 4391 mfacp->mcm_getaddr = nxge_m_getfactaddr;
4392 4392 mutex_exit(nxgep->genlock);
4393 4393 }
4394 4394 break;
4395 4395 }
4396 4396
4397 4397 case MAC_CAPAB_LSO: {
4398 4398 mac_capab_lso_t *cap_lso = cap_data;
4399 4399
4400 4400 if (nxgep->soft_lso_enable) {
4401 4401 if (nxge_cksum_offload <= 1) {
4402 4402 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
4403 4403 if (nxge_lso_max > NXGE_LSO_MAXLEN) {
4404 4404 nxge_lso_max = NXGE_LSO_MAXLEN;
4405 4405 }
4406 4406 cap_lso->lso_basic_tcp_ipv4.lso_max =
4407 4407 nxge_lso_max;
4408 4408 }
4409 4409 break;
4410 4410 } else {
4411 4411 return (B_FALSE);
4412 4412 }
4413 4413 }
4414 4414
4415 4415 case MAC_CAPAB_RINGS: {
4416 4416 mac_capab_rings_t *cap_rings = cap_data;
4417 4417 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
4418 4418
4419 4419 mutex_enter(nxgep->genlock);
4420 4420 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
4421 4421 if (isLDOMguest(nxgep)) {
4422 4422 cap_rings->mr_group_type =
4423 4423 MAC_GROUP_TYPE_STATIC;
4424 4424 cap_rings->mr_rnum =
4425 4425 NXGE_HIO_SHARE_MAX_CHANNELS;
4426 4426 cap_rings->mr_rget = nxge_fill_ring;
4427 4427 cap_rings->mr_gnum = 1;
4428 4428 cap_rings->mr_gget = nxge_hio_group_get;
4429 4429 cap_rings->mr_gaddring = NULL;
4430 4430 cap_rings->mr_gremring = NULL;
4431 4431 } else {
4432 4432 /*
4433 4433 * Service Domain.
4434 4434 */
4435 4435 cap_rings->mr_group_type =
4436 4436 MAC_GROUP_TYPE_DYNAMIC;
4437 4437 cap_rings->mr_rnum = p_cfgp->max_rdcs;
4438 4438 cap_rings->mr_rget = nxge_fill_ring;
4439 4439 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids;
4440 4440 cap_rings->mr_gget = nxge_hio_group_get;
4441 4441 cap_rings->mr_gaddring = nxge_group_add_ring;
4442 4442 cap_rings->mr_gremring = nxge_group_rem_ring;
4443 4443 }
4444 4444
4445 4445 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4446 4446 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]",
4447 4447 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids));
4448 4448 } else {
4449 4449 /*
4450 4450 * TX Rings.
4451 4451 */
4452 4452 if (isLDOMguest(nxgep)) {
4453 4453 cap_rings->mr_group_type =
4454 4454 MAC_GROUP_TYPE_STATIC;
4455 4455 cap_rings->mr_rnum =
4456 4456 NXGE_HIO_SHARE_MAX_CHANNELS;
4457 4457 cap_rings->mr_rget = nxge_fill_ring;
4458 4458 cap_rings->mr_gnum = 0;
4459 4459 cap_rings->mr_gget = NULL;
4460 4460 cap_rings->mr_gaddring = NULL;
4461 4461 cap_rings->mr_gremring = NULL;
4462 4462 } else {
4463 4463 /*
4464 4464 * Service Domain.
4465 4465 */
4466 4466 cap_rings->mr_group_type =
4467 4467 MAC_GROUP_TYPE_DYNAMIC;
4468 4468 cap_rings->mr_rnum = p_cfgp->tdc.count;
4469 4469 cap_rings->mr_rget = nxge_fill_ring;
4470 4470
4471 4471 /*
4472 4472 * Share capable.
4473 4473 *
4474 4474 * Do not report the default group: hence -1
4475 4475 */
4476 4476 cap_rings->mr_gnum =
4477 4477 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1;
4478 4478 cap_rings->mr_gget = nxge_hio_group_get;
4479 4479 cap_rings->mr_gaddring = nxge_group_add_ring;
4480 4480 cap_rings->mr_gremring = nxge_group_rem_ring;
4481 4481 }
4482 4482
4483 4483 NXGE_DEBUG_MSG((nxgep, TX_CTL,
4484 4484 "==> nxge_m_getcapab: tx rings # of rings %d",
4485 4485 p_cfgp->tdc.count));
4486 4486 }
4487 4487 mutex_exit(nxgep->genlock);
4488 4488 break;
4489 4489 }
4490 4490
4491 4491 #if defined(sun4v)
4492 4492 case MAC_CAPAB_SHARES: {
4493 4493 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data;
4494 4494
4495 4495 /*
4496 4496 * Only the service domain driver responds to
4497 4497 * this capability request.
4498 4498 */
4499 4499 mutex_enter(nxgep->genlock);
4500 4500 if (isLDOMservice(nxgep)) {
4501 4501 mshares->ms_snum = 3;
4502 4502 mshares->ms_handle = (void *)nxgep;
4503 4503 mshares->ms_salloc = nxge_hio_share_alloc;
4504 4504 mshares->ms_sfree = nxge_hio_share_free;
4505 4505 mshares->ms_sadd = nxge_hio_share_add_group;
4506 4506 mshares->ms_sremove = nxge_hio_share_rem_group;
4507 4507 mshares->ms_squery = nxge_hio_share_query;
4508 4508 mshares->ms_sbind = nxge_hio_share_bind;
4509 4509 mshares->ms_sunbind = nxge_hio_share_unbind;
4510 4510 mutex_exit(nxgep->genlock);
4511 4511 } else {
4512 4512 mutex_exit(nxgep->genlock);
4513 4513 return (B_FALSE);
4514 4514 }
4515 4515 break;
4516 4516 }
4517 4517 #endif
4518 4518 default:
4519 4519 return (B_FALSE);
4520 4520 }
4521 4521 return (B_TRUE);
4522 4522 }
4523 4523
4524 4524 static boolean_t
4525 4525 nxge_param_locked(mac_prop_id_t pr_num)
4526 4526 {
4527 4527 /*
4528 4528 * All adv_* parameters are locked (read-only) while
4529 4529 * the device is in any sort of loopback mode ...
4530 4530 */
4531 4531 switch (pr_num) {
4532 4532 case MAC_PROP_ADV_1000FDX_CAP:
4533 4533 case MAC_PROP_EN_1000FDX_CAP:
4534 4534 case MAC_PROP_ADV_1000HDX_CAP:
4535 4535 case MAC_PROP_EN_1000HDX_CAP:
4536 4536 case MAC_PROP_ADV_100FDX_CAP:
4537 4537 case MAC_PROP_EN_100FDX_CAP:
4538 4538 case MAC_PROP_ADV_100HDX_CAP:
4539 4539 case MAC_PROP_EN_100HDX_CAP:
4540 4540 case MAC_PROP_ADV_10FDX_CAP:
4541 4541 case MAC_PROP_EN_10FDX_CAP:
4542 4542 case MAC_PROP_ADV_10HDX_CAP:
4543 4543 case MAC_PROP_EN_10HDX_CAP:
4544 4544 case MAC_PROP_AUTONEG:
4545 4545 case MAC_PROP_FLOWCTRL:
4546 4546 return (B_TRUE);
4547 4547 }
4548 4548 return (B_FALSE);
4549 4549 }
4550 4550
4551 4551 /*
4552 4552 * callback functions for set/get of properties
4553 4553 */
4554 4554 static int
4555 4555 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4556 4556 uint_t pr_valsize, const void *pr_val)
4557 4557 {
4558 4558 nxge_t *nxgep = barg;
4559 4559 p_nxge_param_t param_arr = nxgep->param_arr;
4560 4560 p_nxge_stats_t statsp = nxgep->statsp;
4561 4561 int err = 0;
4562 4562
4563 4563 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
4564 4564
4565 4565 mutex_enter(nxgep->genlock);
4566 4566 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4567 4567 nxge_param_locked(pr_num)) {
4568 4568 /*
4569 4569 * All adv_* parameters are locked (read-only)
4570 4570 * while the device is in any sort of loopback mode.
4571 4571 */
4572 4572 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4573 4573 "==> nxge_m_setprop: loopback mode: read only"));
4574 4574 mutex_exit(nxgep->genlock);
4575 4575 return (EBUSY);
4576 4576 }
4577 4577
4578 4578 switch (pr_num) {
4579 4579 case MAC_PROP_EN_1000FDX_CAP:
4580 4580 nxgep->param_en_1000fdx =
4581 4581 param_arr[param_anar_1000fdx].value = *(uint8_t *)pr_val;
4582 4582 goto reprogram;
4583 4583
4584 4584 case MAC_PROP_EN_100FDX_CAP:
4585 4585 nxgep->param_en_100fdx =
4586 4586 param_arr[param_anar_100fdx].value = *(uint8_t *)pr_val;
4587 4587 goto reprogram;
4588 4588
4589 4589 case MAC_PROP_EN_10FDX_CAP:
4590 4590 nxgep->param_en_10fdx =
4591 4591 param_arr[param_anar_10fdx].value = *(uint8_t *)pr_val;
4592 4592 goto reprogram;
4593 4593
4594 4594 case MAC_PROP_AUTONEG:
4595 4595 param_arr[param_autoneg].value = *(uint8_t *)pr_val;
4596 4596 goto reprogram;
4597 4597
4598 4598 case MAC_PROP_MTU: {
4599 4599 uint32_t cur_mtu, new_mtu, old_framesize;
4600 4600
4601 4601 cur_mtu = nxgep->mac.default_mtu;
4602 4602 ASSERT(pr_valsize >= sizeof (new_mtu));
4603 4603 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
4604 4604
4605 4605 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4606 4606 "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4607 4607 new_mtu, nxgep->mac.is_jumbo));
4608 4608
4609 4609 if (new_mtu == cur_mtu) {
4610 4610 err = 0;
4611 4611 break;
4612 4612 }
4613 4613
4614 4614 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4615 4615 err = EBUSY;
4616 4616 break;
4617 4617 }
4618 4618
4619 4619 if ((new_mtu < NXGE_DEFAULT_MTU) ||
4620 4620 (new_mtu > NXGE_MAXIMUM_MTU)) {
4621 4621 err = EINVAL;
4622 4622 break;
4623 4623 }
4624 4624
4625 4625 old_framesize = (uint32_t)nxgep->mac.maxframesize;
4626 4626 nxgep->mac.maxframesize = (uint16_t)
4627 4627 (new_mtu + NXGE_EHEADER_VLAN_CRC);
4628 4628 if (nxge_mac_set_framesize(nxgep)) {
4629 4629 nxgep->mac.maxframesize =
4630 4630 (uint16_t)old_framesize;
4631 4631 err = EINVAL;
4632 4632 break;
4633 4633 }
4634 4634
4635 4635 nxgep->mac.default_mtu = new_mtu;
4636 4636 nxgep->mac.is_jumbo = (new_mtu > NXGE_DEFAULT_MTU);
4637 4637
4638 4638 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4639 4639 "==> nxge_m_setprop: set MTU: %d maxframe %d",
4640 4640 new_mtu, nxgep->mac.maxframesize));
4641 4641 break;
4642 4642 }
4643 4643
4644 4644 case MAC_PROP_FLOWCTRL: {
4645 4645 link_flowctrl_t fl;
4646 4646
4647 4647 ASSERT(pr_valsize >= sizeof (fl));
4648 4648 bcopy(pr_val, &fl, sizeof (fl));
4649 4649
4650 4650 switch (fl) {
4651 4651 case LINK_FLOWCTRL_NONE:
4652 4652 param_arr[param_anar_pause].value = 0;
4653 4653 break;
4654 4654
4655 4655 case LINK_FLOWCTRL_RX:
4656 4656 param_arr[param_anar_pause].value = 1;
4657 4657 break;
4658 4658
4659 4659 case LINK_FLOWCTRL_TX:
4660 4660 case LINK_FLOWCTRL_BI:
4661 4661 err = EINVAL;
4662 4662 break;
4663 4663 default:
4664 4664 err = EINVAL;
4665 4665 break;
4666 4666 }
4667 4667 reprogram:
4668 4668 if ((err == 0) && !isLDOMguest(nxgep)) {
4669 4669 if (!nxge_param_link_update(nxgep)) {
4670 4670 err = EINVAL;
4671 4671 }
4672 4672 } else {
4673 4673 err = EINVAL;
4674 4674 }
4675 4675 break;
4676 4676 }
4677 4677
4678 4678 case MAC_PROP_PRIVATE:
4679 4679 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4680 4680 "==> nxge_m_setprop: private property"));
4681 4681 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, pr_val);
4682 4682 break;
4683 4683
4684 4684 default:
4685 4685 err = ENOTSUP;
4686 4686 break;
4687 4687 }
4688 4688
4689 4689 mutex_exit(nxgep->genlock);
4690 4690
4691 4691 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4692 4692 "<== nxge_m_setprop (return %d)", err));
4693 4693 return (err);
4694 4694 }
4695 4695
4696 4696 static int
4697 4697 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4698 4698 uint_t pr_valsize, void *pr_val)
4699 4699 {
4700 4700 nxge_t *nxgep = barg;
4701 4701 p_nxge_param_t param_arr = nxgep->param_arr;
4702 4702 p_nxge_stats_t statsp = nxgep->statsp;
4703 4703
4704 4704 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4705 4705 "==> nxge_m_getprop: pr_num %d", pr_num));
4706 4706
4707 4707 switch (pr_num) {
4708 4708 case MAC_PROP_DUPLEX:
4709 4709 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4710 4710 break;
4711 4711
4712 4712 case MAC_PROP_SPEED: {
4713 4713 uint64_t val = statsp->mac_stats.link_speed * 1000000ull;
4714 4714
4715 4715 ASSERT(pr_valsize >= sizeof (val));
4716 4716 bcopy(&val, pr_val, sizeof (val));
4717 4717 break;
4718 4718 }
4719 4719
4720 4720 case MAC_PROP_STATUS: {
4721 4721 link_state_t state = statsp->mac_stats.link_up ?
4722 4722 LINK_STATE_UP : LINK_STATE_DOWN;
4723 4723
4724 4724 ASSERT(pr_valsize >= sizeof (state));
4725 4725 bcopy(&state, pr_val, sizeof (state));
4726 4726 break;
4727 4727 }
4728 4728
4729 4729 case MAC_PROP_AUTONEG:
4730 4730 *(uint8_t *)pr_val = param_arr[param_autoneg].value;
4731 4731 break;
4732 4732
4733 4733 case MAC_PROP_FLOWCTRL: {
4734 4734 link_flowctrl_t fl = param_arr[param_anar_pause].value != 0 ?
4735 4735 LINK_FLOWCTRL_RX : LINK_FLOWCTRL_NONE;
4736 4736
4737 4737 ASSERT(pr_valsize >= sizeof (fl));
4738 4738 bcopy(&fl, pr_val, sizeof (fl));
4739 4739 break;
4740 4740 }
4741 4741
4742 4742 case MAC_PROP_ADV_1000FDX_CAP:
4743 4743 *(uint8_t *)pr_val = param_arr[param_anar_1000fdx].value;
4744 4744 break;
4745 4745
4746 4746 case MAC_PROP_EN_1000FDX_CAP:
4747 4747 *(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4748 4748 break;
4749 4749
4750 4750 case MAC_PROP_ADV_100FDX_CAP:
4751 4751 *(uint8_t *)pr_val = param_arr[param_anar_100fdx].value;
4752 4752 break;
4753 4753
4754 4754 case MAC_PROP_EN_100FDX_CAP:
4755 4755 *(uint8_t *)pr_val = nxgep->param_en_100fdx;
4756 4756 break;
4757 4757
4758 4758 case MAC_PROP_ADV_10FDX_CAP:
4759 4759 *(uint8_t *)pr_val = param_arr[param_anar_10fdx].value;
4760 4760 break;
4761 4761
4762 4762 case MAC_PROP_EN_10FDX_CAP:
4763 4763 *(uint8_t *)pr_val = nxgep->param_en_10fdx;
4764 4764 break;
4765 4765
4766 4766 case MAC_PROP_PRIVATE:
4767 4767 return (nxge_get_priv_prop(nxgep, pr_name, pr_valsize,
4768 4768 pr_val));
4769 4769
4770 4770 default:
4771 4771 return (ENOTSUP);
4772 4772 }
4773 4773
4774 4774 return (0);
4775 4775 }
4776 4776
4777 4777 static void
4778 4778 nxge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4779 4779 mac_prop_info_handle_t prh)
4780 4780 {
4781 4781 nxge_t *nxgep = barg;
4782 4782 p_nxge_stats_t statsp = nxgep->statsp;
4783 4783
4784 4784 /*
4785 4785 * By default permissions are read/write unless specified
4786 4786 * otherwise by the driver.
4787 4787 */
4788 4788
4789 4789 switch (pr_num) {
4790 4790 case MAC_PROP_DUPLEX:
4791 4791 case MAC_PROP_SPEED:
4792 4792 case MAC_PROP_STATUS:
4793 4793 case MAC_PROP_EN_1000HDX_CAP:
4794 4794 case MAC_PROP_EN_100HDX_CAP:
4795 4795 case MAC_PROP_EN_10HDX_CAP:
4796 4796 case MAC_PROP_ADV_1000FDX_CAP:
4797 4797 case MAC_PROP_ADV_1000HDX_CAP:
4798 4798 case MAC_PROP_ADV_100FDX_CAP:
4799 4799 case MAC_PROP_ADV_100HDX_CAP:
4800 4800 case MAC_PROP_ADV_10FDX_CAP:
4801 4801 case MAC_PROP_ADV_10HDX_CAP:
4802 4802 /*
4803 4803 * Note that read-only properties don't need to
4804 4804 * provide default values since they cannot be
4805 4805 * changed by the administrator.
4806 4806 */
4807 4807 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4808 4808 break;
4809 4809
4810 4810 case MAC_PROP_EN_1000FDX_CAP:
4811 4811 case MAC_PROP_EN_100FDX_CAP:
4812 4812 case MAC_PROP_EN_10FDX_CAP:
4813 4813 mac_prop_info_set_default_uint8(prh, 1);
4814 4814 break;
4815 4815
4816 4816 case MAC_PROP_AUTONEG:
4817 4817 mac_prop_info_set_default_uint8(prh, 1);
4818 4818 break;
4819 4819
4820 4820 case MAC_PROP_FLOWCTRL:
4821 4821 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_RX);
4822 4822 break;
4823 4823
4824 4824 case MAC_PROP_MTU:
4825 4825 mac_prop_info_set_range_uint32(prh,
4826 4826 NXGE_DEFAULT_MTU, NXGE_MAXIMUM_MTU);
4827 4827 break;
4828 4828
4829 4829 case MAC_PROP_PRIVATE:
4830 4830 nxge_priv_propinfo(pr_name, prh);
4831 4831 break;
4832 4832 }
4833 4833
4834 4834 mutex_enter(nxgep->genlock);
4835 4835 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4836 4836 nxge_param_locked(pr_num)) {
4837 4837 /*
4838 4838 * Some properties are locked (read-only) while the
4839 4839 * device is in any sort of loopback mode.
4840 4840 */
4841 4841 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4842 4842 }
4843 4843 mutex_exit(nxgep->genlock);
4844 4844 }
4845 4845
4846 4846 static void
4847 4847 nxge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t prh)
4848 4848 {
4849 4849 char valstr[64];
4850 4850
4851 4851 bzero(valstr, sizeof (valstr));
4852 4852
4853 4853 if (strcmp(pr_name, "_function_number") == 0 ||
4854 4854 strcmp(pr_name, "_fw_version") == 0 ||
4855 4855 strcmp(pr_name, "_port_mode") == 0 ||
4856 4856 strcmp(pr_name, "_hot_swap_phy") == 0) {
4857 4857 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4858 4858
4859 4859 } else if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4860 4860 (void) snprintf(valstr, sizeof (valstr),
4861 4861 "%d", RXDMA_RCR_TO_DEFAULT);
4862 4862
4863 4863 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4864 4864 (void) snprintf(valstr, sizeof (valstr),
4865 4865 "%d", RXDMA_RCR_PTHRES_DEFAULT);
4866 4866
4867 4867 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
4868 4868 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
4869 4869 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
4870 4870 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
4871 4871 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
4872 4872 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
4873 4873 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
4874 4874 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4875 4875 (void) snprintf(valstr, sizeof (valstr), "%x",
4876 4876 NXGE_CLASS_FLOW_GEN_SERVER);
4877 4877
4878 4878 } else if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4879 4879 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
4880 4880
4881 4881 } else if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
4882 4882 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4883 4883
4884 4884 } else if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4885 4885 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4886 4886 }
4887 4887
4888 4888 if (strlen(valstr) > 0)
4889 4889 mac_prop_info_set_default_str(prh, valstr);
4890 4890 }
4891 4891
4892 4892 /* ARGSUSED */
4893 4893 static int
4894 4894 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4895 4895 const void *pr_val)
4896 4896 {
4897 4897 p_nxge_param_t param_arr = nxgep->param_arr;
4898 4898 int err = 0;
4899 4899 long result;
4900 4900
4901 4901 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4902 4902 "==> nxge_set_priv_prop: name %s", pr_name));
4903 4903
4904 4904 /* Blanking */
4905 4905 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4906 4906 err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4907 4907 (char *)pr_val,
4908 4908 (caddr_t)¶m_arr[param_rxdma_intr_time]);
4909 4909 if (err) {
4910 4910 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4911 4911 "<== nxge_set_priv_prop: "
4912 4912 "unable to set (%s)", pr_name));
4913 4913 err = EINVAL;
4914 4914 } else {
4915 4915 err = 0;
4916 4916 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4917 4917 "<== nxge_set_priv_prop: "
4918 4918 "set (%s)", pr_name));
4919 4919 }
4920 4920
4921 4921 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4922 4922 "<== nxge_set_priv_prop: name %s (value %d)",
4923 4923 pr_name, result));
4924 4924
4925 4925 return (err);
4926 4926 }
4927 4927
4928 4928 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4929 4929 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4930 4930 (char *)pr_val,
4931 4931 (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
4932 4932 if (err) {
4933 4933 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4934 4934 "<== nxge_set_priv_prop: "
4935 4935 "unable to set (%s)", pr_name));
4936 4936 err = EINVAL;
4937 4937 } else {
4938 4938 err = 0;
4939 4939 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4940 4940 "<== nxge_set_priv_prop: "
4941 4941 "set (%s)", pr_name));
4942 4942 }
4943 4943
4944 4944 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4945 4945 "<== nxge_set_priv_prop: name %s (value %d)",
4946 4946 pr_name, result));
4947 4947
4948 4948 return (err);
4949 4949 }
4950 4950
4951 4951 /* Classification */
4952 4952 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4953 4953 if (pr_val == NULL) {
4954 4954 err = EINVAL;
4955 4955 return (err);
4956 4956 }
4957 4957 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4958 4958
4959 4959 err = nxge_param_set_ip_opt(nxgep, NULL,
4960 4960 NULL, (char *)pr_val,
4961 4961 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
4962 4962
4963 4963 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4964 4964 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4965 4965 pr_name, result));
4966 4966
4967 4967 return (err);
4968 4968 }
4969 4969
4970 4970 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4971 4971 if (pr_val == NULL) {
4972 4972 err = EINVAL;
4973 4973 return (err);
4974 4974 }
4975 4975 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4976 4976
4977 4977 err = nxge_param_set_ip_opt(nxgep, NULL,
4978 4978 NULL, (char *)pr_val,
4979 4979 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
4980 4980
4981 4981 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4982 4982 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4983 4983 pr_name, result));
4984 4984
4985 4985 return (err);
4986 4986 }
4987 4987 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4988 4988 if (pr_val == NULL) {
4989 4989 err = EINVAL;
4990 4990 return (err);
4991 4991 }
4992 4992 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4993 4993
4994 4994 err = nxge_param_set_ip_opt(nxgep, NULL,
4995 4995 NULL, (char *)pr_val,
4996 4996 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
4997 4997
4998 4998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4999 4999 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5000 5000 pr_name, result));
5001 5001
5002 5002 return (err);
5003 5003 }
5004 5004 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5005 5005 if (pr_val == NULL) {
5006 5006 err = EINVAL;
5007 5007 return (err);
5008 5008 }
5009 5009 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5010 5010
5011 5011 err = nxge_param_set_ip_opt(nxgep, NULL,
5012 5012 NULL, (char *)pr_val,
5013 5013 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5014 5014
5015 5015 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5016 5016 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5017 5017 pr_name, result));
5018 5018
5019 5019 return (err);
5020 5020 }
5021 5021
5022 5022 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5023 5023 if (pr_val == NULL) {
5024 5024 err = EINVAL;
5025 5025 return (err);
5026 5026 }
5027 5027 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5028 5028
5029 5029 err = nxge_param_set_ip_opt(nxgep, NULL,
5030 5030 NULL, (char *)pr_val,
5031 5031 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5032 5032
5033 5033 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5034 5034 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5035 5035 pr_name, result));
5036 5036
5037 5037 return (err);
5038 5038 }
5039 5039
5040 5040 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5041 5041 if (pr_val == NULL) {
5042 5042 err = EINVAL;
5043 5043 return (err);
5044 5044 }
5045 5045 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5046 5046
5047 5047 err = nxge_param_set_ip_opt(nxgep, NULL,
5048 5048 NULL, (char *)pr_val,
5049 5049 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5050 5050
5051 5051 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5052 5052 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5053 5053 pr_name, result));
5054 5054
5055 5055 return (err);
5056 5056 }
5057 5057 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5058 5058 if (pr_val == NULL) {
5059 5059 err = EINVAL;
5060 5060 return (err);
5061 5061 }
5062 5062 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5063 5063
5064 5064 err = nxge_param_set_ip_opt(nxgep, NULL,
5065 5065 NULL, (char *)pr_val,
5066 5066 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5067 5067
5068 5068 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5069 5069 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5070 5070 pr_name, result));
5071 5071
5072 5072 return (err);
5073 5073 }
5074 5074 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5075 5075 if (pr_val == NULL) {
5076 5076 err = EINVAL;
5077 5077 return (err);
5078 5078 }
5079 5079 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5080 5080
5081 5081 err = nxge_param_set_ip_opt(nxgep, NULL,
5082 5082 NULL, (char *)pr_val,
5083 5083 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5084 5084
5085 5085 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5086 5086 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5087 5087 pr_name, result));
5088 5088
5089 5089 return (err);
5090 5090 }
5091 5091
5092 5092 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5093 5093 if (pr_val == NULL) {
5094 5094 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5095 5095 "==> nxge_set_priv_prop: name %s (null)", pr_name));
5096 5096 err = EINVAL;
5097 5097 return (err);
5098 5098 }
5099 5099
5100 5100 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5101 5101 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5102 5102 "<== nxge_set_priv_prop: name %s "
5103 5103 "(lso %d pr_val %s value %d)",
5104 5104 pr_name, nxgep->soft_lso_enable, pr_val, result));
5105 5105
5106 5106 if (result > 1 || result < 0) {
5107 5107 err = EINVAL;
5108 5108 } else {
5109 5109 if (nxgep->soft_lso_enable == (uint32_t)result) {
5110 5110 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5111 5111 "no change (%d %d)",
5112 5112 nxgep->soft_lso_enable, result));
5113 5113 return (0);
5114 5114 }
5115 5115 }
5116 5116
5117 5117 nxgep->soft_lso_enable = (int)result;
5118 5118
5119 5119 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5120 5120 "<== nxge_set_priv_prop: name %s (value %d)",
5121 5121 pr_name, result));
5122 5122
5123 5123 return (err);
5124 5124 }
5125 5125 /*
5126 5126 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5127 5127 * following code to be executed.
5128 5128 */
5129 5129 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5130 5130 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5131 5131 (caddr_t)¶m_arr[param_anar_10gfdx]);
5132 5132 return (err);
5133 5133 }
5134 5134 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5135 5135 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5136 5136 (caddr_t)¶m_arr[param_anar_pause]);
5137 5137 return (err);
5138 5138 }
5139 5139
5140 5140 return (ENOTSUP);
5141 5141 }
5142 5142
5143 5143 static int
5144 5144 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
5145 5145 void *pr_val)
5146 5146 {
5147 5147 p_nxge_param_t param_arr = nxgep->param_arr;
5148 5148 char valstr[MAXNAMELEN];
5149 5149 int err = ENOTSUP;
5150 5150 uint_t strsize;
5151 5151
5152 5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5153 5153 "==> nxge_get_priv_prop: property %s", pr_name));
5154 5154
5155 5155 /* function number */
5156 5156 if (strcmp(pr_name, "_function_number") == 0) {
5157 5157 (void) snprintf(valstr, sizeof (valstr), "%d",
5158 5158 nxgep->function_num);
5159 5159 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5160 5160 "==> nxge_get_priv_prop: name %s "
5161 5161 "(value %d valstr %s)",
5162 5162 pr_name, nxgep->function_num, valstr));
5163 5163
5164 5164 err = 0;
5165 5165 goto done;
5166 5166 }
5167 5167
5168 5168 /* Neptune firmware version */
5169 5169 if (strcmp(pr_name, "_fw_version") == 0) {
5170 5170 (void) snprintf(valstr, sizeof (valstr), "%s",
5171 5171 nxgep->vpd_info.ver);
5172 5172 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5173 5173 "==> nxge_get_priv_prop: name %s "
5174 5174 "(value %d valstr %s)",
5175 5175 pr_name, nxgep->vpd_info.ver, valstr));
5176 5176
5177 5177 err = 0;
5178 5178 goto done;
5179 5179 }
5180 5180
5181 5181 /* port PHY mode */
5182 5182 if (strcmp(pr_name, "_port_mode") == 0) {
5183 5183 switch (nxgep->mac.portmode) {
5184 5184 case PORT_1G_COPPER:
5185 5185 (void) snprintf(valstr, sizeof (valstr), "1G copper %s",
5186 5186 nxgep->hot_swappable_phy ?
5187 5187 "[Hot Swappable]" : "");
5188 5188 break;
5189 5189 case PORT_1G_FIBER:
5190 5190 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s",
5191 5191 nxgep->hot_swappable_phy ?
5192 5192 "[hot swappable]" : "");
5193 5193 break;
5194 5194 case PORT_10G_COPPER:
5195 5195 (void) snprintf(valstr, sizeof (valstr),
5196 5196 "10G copper %s",
5197 5197 nxgep->hot_swappable_phy ?
5198 5198 "[hot swappable]" : "");
5199 5199 break;
5200 5200 case PORT_10G_FIBER:
5201 5201 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s",
5202 5202 nxgep->hot_swappable_phy ?
5203 5203 "[hot swappable]" : "");
5204 5204 break;
5205 5205 case PORT_10G_SERDES:
5206 5206 (void) snprintf(valstr, sizeof (valstr),
5207 5207 "10G serdes %s", nxgep->hot_swappable_phy ?
5208 5208 "[hot swappable]" : "");
5209 5209 break;
5210 5210 case PORT_1G_SERDES:
5211 5211 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s",
5212 5212 nxgep->hot_swappable_phy ?
5213 5213 "[hot swappable]" : "");
5214 5214 break;
5215 5215 case PORT_1G_TN1010:
5216 5216 (void) snprintf(valstr, sizeof (valstr),
5217 5217 "1G TN1010 copper %s", nxgep->hot_swappable_phy ?
5218 5218 "[hot swappable]" : "");
5219 5219 break;
5220 5220 case PORT_10G_TN1010:
5221 5221 (void) snprintf(valstr, sizeof (valstr),
5222 5222 "10G TN1010 copper %s", nxgep->hot_swappable_phy ?
5223 5223 "[hot swappable]" : "");
5224 5224 break;
5225 5225 case PORT_1G_RGMII_FIBER:
5226 5226 (void) snprintf(valstr, sizeof (valstr),
5227 5227 "1G rgmii fiber %s", nxgep->hot_swappable_phy ?
5228 5228 "[hot swappable]" : "");
5229 5229 break;
5230 5230 case PORT_HSP_MODE:
5231 5231 (void) snprintf(valstr, sizeof (valstr),
5232 5232 "phy not present[hot swappable]");
5233 5233 break;
5234 5234 default:
5235 5235 (void) snprintf(valstr, sizeof (valstr), "unknown %s",
5236 5236 nxgep->hot_swappable_phy ?
5237 5237 "[hot swappable]" : "");
5238 5238 break;
5239 5239 }
5240 5240
5241 5241 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5242 5242 "==> nxge_get_priv_prop: name %s (value %s)",
5243 5243 pr_name, valstr));
5244 5244
5245 5245 err = 0;
5246 5246 goto done;
5247 5247 }
5248 5248
5249 5249 /* Hot swappable PHY */
5250 5250 if (strcmp(pr_name, "_hot_swap_phy") == 0) {
5251 5251 (void) snprintf(valstr, sizeof (valstr), "%s",
5252 5252 nxgep->hot_swappable_phy ?
5253 5253 "yes" : "no");
5254 5254
5255 5255 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5256 5256 "==> nxge_get_priv_prop: name %s "
5257 5257 "(value %d valstr %s)",
5258 5258 pr_name, nxgep->hot_swappable_phy, valstr));
5259 5259
5260 5260 err = 0;
5261 5261 goto done;
5262 5262 }
5263 5263
5264 5264
5265 5265 /* Receive Interrupt Blanking Parameters */
5266 5266 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
5267 5267 err = 0;
5268 5268 (void) snprintf(valstr, sizeof (valstr), "%d",
5269 5269 nxgep->intr_timeout);
5270 5270 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5271 5271 "==> nxge_get_priv_prop: name %s (value %d)",
5272 5272 pr_name,
5273 5273 (uint32_t)nxgep->intr_timeout));
5274 5274 goto done;
5275 5275 }
5276 5276
5277 5277 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
5278 5278 err = 0;
5279 5279 (void) snprintf(valstr, sizeof (valstr), "%d",
5280 5280 nxgep->intr_threshold);
5281 5281 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5282 5282 "==> nxge_get_priv_prop: name %s (value %d)",
5283 5283 pr_name, (uint32_t)nxgep->intr_threshold));
5284 5284
5285 5285 goto done;
5286 5286 }
5287 5287
5288 5288 /* Classification and Load Distribution Configuration */
5289 5289 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
5290 5290 err = nxge_dld_get_ip_opt(nxgep,
5291 5291 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
5292 5292
5293 5293 (void) snprintf(valstr, sizeof (valstr), "%x",
5294 5294 (int)param_arr[param_class_opt_ipv4_tcp].value);
5295 5295
5296 5296 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5297 5297 "==> nxge_get_priv_prop: %s", valstr));
5298 5298 goto done;
5299 5299 }
5300 5300
5301 5301 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5302 5302 err = nxge_dld_get_ip_opt(nxgep,
5303 5303 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
5304 5304
5305 5305 (void) snprintf(valstr, sizeof (valstr), "%x",
5306 5306 (int)param_arr[param_class_opt_ipv4_udp].value);
5307 5307
5308 5308 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5309 5309 "==> nxge_get_priv_prop: %s", valstr));
5310 5310 goto done;
5311 5311 }
5312 5312 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5313 5313 err = nxge_dld_get_ip_opt(nxgep,
5314 5314 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
5315 5315
5316 5316 (void) snprintf(valstr, sizeof (valstr), "%x",
5317 5317 (int)param_arr[param_class_opt_ipv4_ah].value);
5318 5318
5319 5319 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5320 5320 "==> nxge_get_priv_prop: %s", valstr));
5321 5321 goto done;
5322 5322 }
5323 5323
5324 5324 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5325 5325 err = nxge_dld_get_ip_opt(nxgep,
5326 5326 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5327 5327
5328 5328 (void) snprintf(valstr, sizeof (valstr), "%x",
5329 5329 (int)param_arr[param_class_opt_ipv4_sctp].value);
5330 5330
5331 5331 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5332 5332 "==> nxge_get_priv_prop: %s", valstr));
5333 5333 goto done;
5334 5334 }
5335 5335
5336 5336 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5337 5337 err = nxge_dld_get_ip_opt(nxgep,
5338 5338 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5339 5339
5340 5340 (void) snprintf(valstr, sizeof (valstr), "%x",
5341 5341 (int)param_arr[param_class_opt_ipv6_tcp].value);
5342 5342
5343 5343 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5344 5344 "==> nxge_get_priv_prop: %s", valstr));
5345 5345 goto done;
5346 5346 }
5347 5347
5348 5348 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5349 5349 err = nxge_dld_get_ip_opt(nxgep,
5350 5350 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5351 5351
5352 5352 (void) snprintf(valstr, sizeof (valstr), "%x",
5353 5353 (int)param_arr[param_class_opt_ipv6_udp].value);
5354 5354
5355 5355 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5356 5356 "==> nxge_get_priv_prop: %s", valstr));
5357 5357 goto done;
5358 5358 }
5359 5359
5360 5360 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5361 5361 err = nxge_dld_get_ip_opt(nxgep,
5362 5362 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5363 5363
5364 5364 (void) snprintf(valstr, sizeof (valstr), "%x",
5365 5365 (int)param_arr[param_class_opt_ipv6_ah].value);
5366 5366
5367 5367 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5368 5368 "==> nxge_get_priv_prop: %s", valstr));
5369 5369 goto done;
5370 5370 }
5371 5371
5372 5372 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5373 5373 err = nxge_dld_get_ip_opt(nxgep,
5374 5374 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5375 5375
5376 5376 (void) snprintf(valstr, sizeof (valstr), "%x",
5377 5377 (int)param_arr[param_class_opt_ipv6_sctp].value);
5378 5378
5379 5379 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5380 5380 "==> nxge_get_priv_prop: %s", valstr));
5381 5381 goto done;
5382 5382 }
5383 5383
5384 5384 /* Software LSO */
5385 5385 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5386 5386 (void) snprintf(valstr, sizeof (valstr),
5387 5387 "%d", nxgep->soft_lso_enable);
5388 5388 err = 0;
5389 5389 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5390 5390 "==> nxge_get_priv_prop: name %s (value %d)",
5391 5391 pr_name, nxgep->soft_lso_enable));
5392 5392
5393 5393 goto done;
5394 5394 }
5395 5395 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5396 5396 err = 0;
5397 5397 if (nxgep->param_arr[param_anar_10gfdx].value != 0) {
5398 5398 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5399 5399 goto done;
5400 5400 } else {
5401 5401 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5402 5402 goto done;
5403 5403 }
5404 5404 }
5405 5405 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5406 5406 err = 0;
5407 5407 if (nxgep->param_arr[param_anar_pause].value != 0) {
5408 5408 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5409 5409 goto done;
5410 5410 } else {
5411 5411 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5412 5412 goto done;
5413 5413 }
5414 5414 }
5415 5415
5416 5416 done:
5417 5417 if (err == 0) {
5418 5418 strsize = (uint_t)strlen(valstr);
5419 5419 if (pr_valsize < strsize) {
5420 5420 err = ENOBUFS;
5421 5421 } else {
5422 5422 (void) strlcpy(pr_val, valstr, pr_valsize);
5423 5423 }
5424 5424 }
5425 5425
5426 5426 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5427 5427 "<== nxge_get_priv_prop: return %d", err));
5428 5428 return (err);
5429 5429 }
5430 5430
5431 5431 /*
5432 5432 * Module loading and removing entry points.
5433 5433 */
5434 5434
5435 5435 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach,
5436 5436 nodev, NULL, D_MP, NULL, nxge_quiesce);
5437 5437
5438 5438 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet"
5439 5439
↓ open down ↓ |
5439 lines elided |
↑ open up ↑ |
5440 5440 /*
5441 5441 * Module linkage information for the kernel.
5442 5442 */
5443 5443 static struct modldrv nxge_modldrv = {
5444 5444 &mod_driverops,
5445 5445 NXGE_DESC_VER,
5446 5446 &nxge_dev_ops
5447 5447 };
5448 5448
5449 5449 static struct modlinkage modlinkage = {
5450 - MODREV_1, (void *) &nxge_modldrv, NULL
5450 + MODREV_1, { (void *) &nxge_modldrv, NULL }
5451 5451 };
5452 5452
5453 5453 int
5454 5454 _init(void)
5455 5455 {
5456 5456 int status;
5457 5457
5458 5458 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
5459 5459
5460 5460 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
5461 5461
5462 5462 mac_init_ops(&nxge_dev_ops, "nxge");
5463 5463
5464 5464 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
5465 5465 if (status != 0) {
5466 5466 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
5467 5467 "failed to init device soft state"));
5468 5468 goto _init_exit;
5469 5469 }
5470 5470
5471 5471 status = mod_install(&modlinkage);
5472 5472 if (status != 0) {
5473 5473 ddi_soft_state_fini(&nxge_list);
5474 5474 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
5475 5475 goto _init_exit;
5476 5476 }
5477 5477
5478 5478 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
5479 5479
5480 5480 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5481 5481 return (status);
5482 5482
5483 5483 _init_exit:
5484 5484 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5485 5485 MUTEX_DESTROY(&nxgedebuglock);
5486 5486 return (status);
5487 5487 }
5488 5488
5489 5489 int
5490 5490 _fini(void)
5491 5491 {
5492 5492 int status;
5493 5493
5494 5494 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
5495 5495 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
5496 5496
5497 5497 if (nxge_mblks_pending)
5498 5498 return (EBUSY);
5499 5499
5500 5500 status = mod_remove(&modlinkage);
5501 5501 if (status != DDI_SUCCESS) {
5502 5502 NXGE_DEBUG_MSG((NULL, MOD_CTL,
5503 5503 "Module removal failed 0x%08x",
5504 5504 status));
5505 5505 goto _fini_exit;
5506 5506 }
5507 5507
5508 5508 mac_fini_ops(&nxge_dev_ops);
5509 5509
5510 5510 ddi_soft_state_fini(&nxge_list);
5511 5511
5512 5512 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5513 5513
5514 5514 MUTEX_DESTROY(&nxge_common_lock);
5515 5515 MUTEX_DESTROY(&nxgedebuglock);
5516 5516 return (status);
5517 5517
5518 5518 _fini_exit:
5519 5519 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5520 5520 return (status);
5521 5521 }
5522 5522
5523 5523 int
5524 5524 _info(struct modinfo *modinfop)
5525 5525 {
5526 5526 int status;
5527 5527
5528 5528 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
5529 5529 status = mod_info(&modlinkage, modinfop);
5530 5530 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
5531 5531
5532 5532 return (status);
5533 5533 }
5534 5534
5535 5535 /*ARGSUSED*/
5536 5536 static int
5537 5537 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5538 5538 {
5539 5539 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5540 5540 p_nxge_t nxgep = rhp->nxgep;
5541 5541 uint32_t channel;
5542 5542 p_tx_ring_t ring;
5543 5543
5544 5544 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5545 5545 ring = nxgep->tx_rings->rings[channel];
5546 5546
5547 5547 MUTEX_ENTER(&ring->lock);
5548 5548 ASSERT(ring->tx_ring_handle == NULL);
5549 5549 ring->tx_ring_handle = rhp->ring_handle;
5550 5550 MUTEX_EXIT(&ring->lock);
5551 5551
5552 5552 return (0);
5553 5553 }
5554 5554
5555 5555 static void
5556 5556 nxge_tx_ring_stop(mac_ring_driver_t rdriver)
5557 5557 {
5558 5558 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5559 5559 p_nxge_t nxgep = rhp->nxgep;
5560 5560 uint32_t channel;
5561 5561 p_tx_ring_t ring;
5562 5562
5563 5563 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5564 5564 ring = nxgep->tx_rings->rings[channel];
5565 5565
5566 5566 MUTEX_ENTER(&ring->lock);
5567 5567 ASSERT(ring->tx_ring_handle != NULL);
5568 5568 ring->tx_ring_handle = (mac_ring_handle_t)NULL;
5569 5569 MUTEX_EXIT(&ring->lock);
5570 5570 }
5571 5571
5572 5572 int
5573 5573 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5574 5574 {
5575 5575 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5576 5576 p_nxge_t nxgep = rhp->nxgep;
5577 5577 uint32_t channel;
5578 5578 p_rx_rcr_ring_t ring;
5579 5579 int i;
5580 5580
5581 5581 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5582 5582 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5583 5583
5584 5584 MUTEX_ENTER(&ring->lock);
5585 5585
5586 5586 if (ring->started) {
5587 5587 ASSERT(ring->started == B_FALSE);
5588 5588 MUTEX_EXIT(&ring->lock);
5589 5589 return (0);
5590 5590 }
5591 5591
5592 5592 /* set rcr_ring */
5593 5593 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5594 5594 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5595 5595 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5596 5596 ring->ldvp = &nxgep->ldgvp->ldvp[i];
5597 5597 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp;
5598 5598 }
5599 5599 }
5600 5600
5601 5601 ring->rcr_mac_handle = rhp->ring_handle;
5602 5602 ring->rcr_gen_num = mr_gen_num;
5603 5603 ring->started = B_TRUE;
5604 5604 rhp->ring_gen_num = mr_gen_num;
5605 5605 MUTEX_EXIT(&ring->lock);
5606 5606
5607 5607 return (0);
5608 5608 }
5609 5609
5610 5610 static void
5611 5611 nxge_rx_ring_stop(mac_ring_driver_t rdriver)
5612 5612 {
5613 5613 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5614 5614 p_nxge_t nxgep = rhp->nxgep;
5615 5615 uint32_t channel;
5616 5616 p_rx_rcr_ring_t ring;
5617 5617
5618 5618 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5619 5619 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5620 5620
5621 5621 MUTEX_ENTER(&ring->lock);
5622 5622 ASSERT(ring->started == B_TRUE);
5623 5623 ring->rcr_mac_handle = NULL;
5624 5624 ring->ldvp = NULL;
5625 5625 ring->ldgp = NULL;
5626 5626 ring->started = B_FALSE;
5627 5627 MUTEX_EXIT(&ring->lock);
5628 5628 }
5629 5629
5630 5630 static int
5631 5631 nxge_ring_get_htable_idx(p_nxge_t nxgep, mac_ring_type_t type, uint32_t channel)
5632 5632 {
5633 5633 int i;
5634 5634
5635 5635 #if defined(sun4v)
5636 5636 if (isLDOMguest(nxgep)) {
5637 5637 return (nxge_hio_get_dc_htable_idx(nxgep,
5638 5638 (type == MAC_RING_TYPE_TX) ? VP_BOUND_TX : VP_BOUND_RX,
5639 5639 channel));
5640 5640 }
5641 5641 #endif
5642 5642
5643 5643 ASSERT(nxgep->ldgvp != NULL);
5644 5644
5645 5645 switch (type) {
5646 5646 case MAC_RING_TYPE_TX:
5647 5647 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5648 5648 if ((nxgep->ldgvp->ldvp[i].is_txdma) &&
5649 5649 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5650 5650 return ((int)
5651 5651 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5652 5652 }
5653 5653 }
5654 5654 break;
5655 5655
5656 5656 case MAC_RING_TYPE_RX:
5657 5657 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5658 5658 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5659 5659 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5660 5660 return ((int)
5661 5661 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5662 5662 }
5663 5663 }
5664 5664 }
5665 5665
5666 5666 return (-1);
5667 5667 }
5668 5668
5669 5669 /*
5670 5670 * Callback funtion for MAC layer to register all rings.
5671 5671 */
5672 5672 static void
5673 5673 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
5674 5674 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5675 5675 {
5676 5676 p_nxge_t nxgep = (p_nxge_t)arg;
5677 5677 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
5678 5678 p_nxge_intr_t intrp;
5679 5679 uint32_t channel;
5680 5680 int htable_idx;
5681 5681 p_nxge_ring_handle_t rhandlep;
5682 5682
5683 5683 ASSERT(nxgep != NULL);
5684 5684 ASSERT(p_cfgp != NULL);
5685 5685 ASSERT(infop != NULL);
5686 5686
5687 5687 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5688 5688 "==> nxge_fill_ring 0x%x index %d", rtype, index));
5689 5689
5690 5690
5691 5691 switch (rtype) {
5692 5692 case MAC_RING_TYPE_TX: {
5693 5693 mac_intr_t *mintr = &infop->mri_intr;
5694 5694
5695 5695 NXGE_DEBUG_MSG((nxgep, TX_CTL,
5696 5696 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d",
5697 5697 rtype, index, p_cfgp->tdc.count));
5698 5698
5699 5699 ASSERT((index >= 0) && (index < p_cfgp->tdc.count));
5700 5700 rhandlep = &nxgep->tx_ring_handles[index];
5701 5701 rhandlep->nxgep = nxgep;
5702 5702 rhandlep->index = index;
5703 5703 rhandlep->ring_handle = rh;
5704 5704
5705 5705 channel = nxgep->pt_config.hw_config.tdc.start + index;
5706 5706 rhandlep->channel = channel;
5707 5707 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5708 5708 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5709 5709 channel);
5710 5710 if (htable_idx >= 0)
5711 5711 mintr->mi_ddi_handle = intrp->htable[htable_idx];
5712 5712 else
5713 5713 mintr->mi_ddi_handle = NULL;
5714 5714
5715 5715 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5716 5716 infop->mri_start = nxge_tx_ring_start;
5717 5717 infop->mri_stop = nxge_tx_ring_stop;
5718 5718 infop->mri_tx = nxge_tx_ring_send;
5719 5719 infop->mri_stat = nxge_tx_ring_stat;
5720 5720 infop->mri_flags = MAC_RING_TX_SERIALIZE;
5721 5721 break;
5722 5722 }
5723 5723
5724 5724 case MAC_RING_TYPE_RX: {
5725 5725 mac_intr_t nxge_mac_intr;
5726 5726 int nxge_rindex;
5727 5727 p_nxge_intr_t intrp;
5728 5728
5729 5729 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5730 5730
5731 5731 NXGE_DEBUG_MSG((nxgep, RX_CTL,
5732 5732 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d",
5733 5733 rtype, index, p_cfgp->max_rdcs));
5734 5734
5735 5735 /*
5736 5736 * 'index' is the ring index within the group.
5737 5737 * Find the ring index in the nxge instance.
5738 5738 */
5739 5739 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index);
5740 5740 channel = nxgep->pt_config.hw_config.start_rdc + index;
5741 5741 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5742 5742
5743 5743 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs));
5744 5744 rhandlep = &nxgep->rx_ring_handles[nxge_rindex];
5745 5745 rhandlep->nxgep = nxgep;
5746 5746 rhandlep->index = nxge_rindex;
5747 5747 rhandlep->ring_handle = rh;
5748 5748 rhandlep->channel = channel;
5749 5749
5750 5750 /*
5751 5751 * Entrypoint to enable interrupt (disable poll) and
5752 5752 * disable interrupt (enable poll).
5753 5753 */
5754 5754 bzero(&nxge_mac_intr, sizeof (nxge_mac_intr));
5755 5755 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep;
5756 5756 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll;
5757 5757 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll;
5758 5758
5759 5759 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5760 5760 channel);
5761 5761 if (htable_idx >= 0)
5762 5762 nxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
5763 5763 else
5764 5764 nxge_mac_intr.mi_ddi_handle = NULL;
5765 5765
5766 5766 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5767 5767 infop->mri_start = nxge_rx_ring_start;
5768 5768 infop->mri_stop = nxge_rx_ring_stop;
5769 5769 infop->mri_intr = nxge_mac_intr;
5770 5770 infop->mri_poll = nxge_rx_poll;
5771 5771 infop->mri_stat = nxge_rx_ring_stat;
5772 5772 infop->mri_flags = MAC_RING_RX_ENQUEUE;
5773 5773 break;
5774 5774 }
5775 5775
5776 5776 default:
5777 5777 break;
5778 5778 }
5779 5779
5780 5780 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", rtype));
5781 5781 }
5782 5782
5783 5783 static void
5784 5784 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5785 5785 mac_ring_type_t type)
5786 5786 {
5787 5787 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5788 5788 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5789 5789 nxge_t *nxge;
5790 5790 nxge_grp_t *grp;
5791 5791 nxge_rdc_grp_t *rdc_grp;
5792 5792 uint16_t channel; /* device-wise ring id */
5793 5793 int dev_gindex;
5794 5794 int rv;
5795 5795
5796 5796 nxge = rgroup->nxgep;
5797 5797
5798 5798 switch (type) {
5799 5799 case MAC_RING_TYPE_TX:
5800 5800 /*
5801 5801 * nxge_grp_dc_add takes a channel number which is a
5802 5802 * "devise" ring ID.
5803 5803 */
5804 5804 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5805 5805
5806 5806 /*
5807 5807 * Remove the ring from the default group
5808 5808 */
5809 5809 if (rgroup->gindex != 0) {
5810 5810 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5811 5811 }
5812 5812
5813 5813 /*
5814 5814 * nxge->tx_set.group[] is an array of groups indexed by
5815 5815 * a "port" group ID.
5816 5816 */
5817 5817 grp = nxge->tx_set.group[rgroup->gindex];
5818 5818 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5819 5819 if (rv != 0) {
5820 5820 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5821 5821 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5822 5822 }
5823 5823 break;
5824 5824
5825 5825 case MAC_RING_TYPE_RX:
5826 5826 /*
5827 5827 * nxge->rx_set.group[] is an array of groups indexed by
5828 5828 * a "port" group ID.
5829 5829 */
5830 5830 grp = nxge->rx_set.group[rgroup->gindex];
5831 5831
5832 5832 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5833 5833 rgroup->gindex;
5834 5834 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5835 5835
5836 5836 /*
5837 5837 * nxge_grp_dc_add takes a channel number which is a
5838 5838 * "devise" ring ID.
5839 5839 */
5840 5840 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index;
5841 5841 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel);
5842 5842 if (rv != 0) {
5843 5843 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5844 5844 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5845 5845 }
5846 5846
5847 5847 rdc_grp->map |= (1 << channel);
5848 5848 rdc_grp->max_rdcs++;
5849 5849
5850 5850 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5851 5851 break;
5852 5852 }
5853 5853 }
5854 5854
5855 5855 static void
5856 5856 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5857 5857 mac_ring_type_t type)
5858 5858 {
5859 5859 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5860 5860 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5861 5861 nxge_t *nxge;
5862 5862 uint16_t channel; /* device-wise ring id */
5863 5863 nxge_rdc_grp_t *rdc_grp;
5864 5864 int dev_gindex;
5865 5865
5866 5866 nxge = rgroup->nxgep;
5867 5867
5868 5868 switch (type) {
5869 5869 case MAC_RING_TYPE_TX:
5870 5870 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid +
5871 5871 rgroup->gindex;
5872 5872 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5873 5873 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5874 5874
5875 5875 /*
5876 5876 * Add the ring back to the default group
5877 5877 */
5878 5878 if (rgroup->gindex != 0) {
5879 5879 nxge_grp_t *grp;
5880 5880 grp = nxge->tx_set.group[0];
5881 5881 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5882 5882 }
5883 5883 break;
5884 5884
5885 5885 case MAC_RING_TYPE_RX:
5886 5886 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5887 5887 rgroup->gindex;
5888 5888 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5889 5889 channel = rdc_grp->start_rdc + rhandle->index;
5890 5890 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
5891 5891
5892 5892 rdc_grp->map &= ~(1 << channel);
5893 5893 rdc_grp->max_rdcs--;
5894 5894
5895 5895 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5896 5896 break;
5897 5897 }
5898 5898 }
5899 5899
5900 5900
5901 5901 /*ARGSUSED*/
5902 5902 static nxge_status_t
5903 5903 nxge_add_intrs(p_nxge_t nxgep)
5904 5904 {
5905 5905
5906 5906 int intr_types;
5907 5907 int type = 0;
5908 5908 int ddi_status = DDI_SUCCESS;
5909 5909 nxge_status_t status = NXGE_OK;
5910 5910
5911 5911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
5912 5912
5913 5913 nxgep->nxge_intr_type.intr_registered = B_FALSE;
5914 5914 nxgep->nxge_intr_type.intr_enabled = B_FALSE;
5915 5915 nxgep->nxge_intr_type.msi_intx_cnt = 0;
5916 5916 nxgep->nxge_intr_type.intr_added = 0;
5917 5917 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
5918 5918 nxgep->nxge_intr_type.intr_type = 0;
5919 5919
5920 5920 if (nxgep->niu_type == N2_NIU) {
5921 5921 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5922 5922 } else if (nxge_msi_enable) {
5923 5923 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5924 5924 }
5925 5925
5926 5926 /* Get the supported interrupt types */
5927 5927 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
5928 5928 != DDI_SUCCESS) {
5929 5929 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
5930 5930 "ddi_intr_get_supported_types failed: status 0x%08x",
5931 5931 ddi_status));
5932 5932 return (NXGE_ERROR | NXGE_DDI_FAILED);
5933 5933 }
5934 5934 nxgep->nxge_intr_type.intr_types = intr_types;
5935 5935
5936 5936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5937 5937 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5938 5938
5939 5939 /*
5940 5940 * Solaris MSIX is not supported yet. use MSI for now.
5941 5941 * nxge_msi_enable (1):
5942 5942 * 1 - MSI 2 - MSI-X others - FIXED
5943 5943 */
5944 5944 switch (nxge_msi_enable) {
5945 5945 default:
5946 5946 type = DDI_INTR_TYPE_FIXED;
5947 5947 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5948 5948 "use fixed (intx emulation) type %08x",
5949 5949 type));
5950 5950 break;
5951 5951
5952 5952 case 2:
5953 5953 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5954 5954 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5955 5955 if (intr_types & DDI_INTR_TYPE_MSIX) {
5956 5956 type = DDI_INTR_TYPE_MSIX;
5957 5957 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5958 5958 "ddi_intr_get_supported_types: MSIX 0x%08x",
5959 5959 type));
5960 5960 } else if (intr_types & DDI_INTR_TYPE_MSI) {
5961 5961 type = DDI_INTR_TYPE_MSI;
5962 5962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5963 5963 "ddi_intr_get_supported_types: MSI 0x%08x",
5964 5964 type));
5965 5965 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5966 5966 type = DDI_INTR_TYPE_FIXED;
5967 5967 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5968 5968 "ddi_intr_get_supported_types: MSXED0x%08x",
5969 5969 type));
5970 5970 }
5971 5971 break;
5972 5972
5973 5973 case 1:
5974 5974 if (intr_types & DDI_INTR_TYPE_MSI) {
5975 5975 type = DDI_INTR_TYPE_MSI;
5976 5976 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5977 5977 "ddi_intr_get_supported_types: MSI 0x%08x",
5978 5978 type));
5979 5979 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
5980 5980 type = DDI_INTR_TYPE_MSIX;
5981 5981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5982 5982 "ddi_intr_get_supported_types: MSIX 0x%08x",
5983 5983 type));
5984 5984 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5985 5985 type = DDI_INTR_TYPE_FIXED;
5986 5986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5987 5987 "ddi_intr_get_supported_types: MSXED0x%08x",
5988 5988 type));
5989 5989 }
5990 5990 }
5991 5991
5992 5992 nxgep->nxge_intr_type.intr_type = type;
5993 5993 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
5994 5994 type == DDI_INTR_TYPE_FIXED) &&
5995 5995 nxgep->nxge_intr_type.niu_msi_enable) {
5996 5996 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
5997 5997 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5998 5998 " nxge_add_intrs: "
5999 5999 " nxge_add_intrs_adv failed: status 0x%08x",
6000 6000 status));
6001 6001 return (status);
6002 6002 } else {
6003 6003 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
6004 6004 "interrupts registered : type %d", type));
6005 6005 nxgep->nxge_intr_type.intr_registered = B_TRUE;
6006 6006
6007 6007 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6008 6008 "\nAdded advanced nxge add_intr_adv "
6009 6009 "intr type 0x%x\n", type));
6010 6010
6011 6011 return (status);
6012 6012 }
6013 6013 }
6014 6014
6015 6015 if (!nxgep->nxge_intr_type.intr_registered) {
6016 6016 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
6017 6017 "failed to register interrupts"));
6018 6018 return (NXGE_ERROR | NXGE_DDI_FAILED);
6019 6019 }
6020 6020
6021 6021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
6022 6022 return (status);
6023 6023 }
6024 6024
6025 6025 static nxge_status_t
6026 6026 nxge_add_intrs_adv(p_nxge_t nxgep)
6027 6027 {
6028 6028 int intr_type;
6029 6029 p_nxge_intr_t intrp;
6030 6030
6031 6031 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
6032 6032
6033 6033 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6034 6034 intr_type = intrp->intr_type;
6035 6035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
6036 6036 intr_type));
6037 6037
6038 6038 switch (intr_type) {
6039 6039 case DDI_INTR_TYPE_MSI: /* 0x2 */
6040 6040 case DDI_INTR_TYPE_MSIX: /* 0x4 */
6041 6041 return (nxge_add_intrs_adv_type(nxgep, intr_type));
6042 6042
6043 6043 case DDI_INTR_TYPE_FIXED: /* 0x1 */
6044 6044 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
6045 6045
6046 6046 default:
6047 6047 return (NXGE_ERROR);
6048 6048 }
6049 6049 }
6050 6050
6051 6051
6052 6052 /*ARGSUSED*/
6053 6053 static nxge_status_t
6054 6054 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
6055 6055 {
6056 6056 dev_info_t *dip = nxgep->dip;
6057 6057 p_nxge_ldg_t ldgp;
6058 6058 p_nxge_intr_t intrp;
6059 6059 uint_t *inthandler;
6060 6060 void *arg1, *arg2;
6061 6061 int behavior;
6062 6062 int nintrs, navail, nrequest;
6063 6063 int nactual, nrequired;
6064 6064 int inum = 0;
6065 6065 int x, y;
6066 6066 int ddi_status = DDI_SUCCESS;
6067 6067 nxge_status_t status = NXGE_OK;
6068 6068
6069 6069 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
6070 6070 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6071 6071 intrp->start_inum = 0;
6072 6072
6073 6073 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6074 6074 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6075 6075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6076 6076 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6077 6077 "nintrs: %d", ddi_status, nintrs));
6078 6078 return (NXGE_ERROR | NXGE_DDI_FAILED);
6079 6079 }
6080 6080
6081 6081 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6082 6082 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6083 6083 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6084 6084 "ddi_intr_get_navail() failed, status: 0x%x%, "
6085 6085 "nintrs: %d", ddi_status, navail));
6086 6086 return (NXGE_ERROR | NXGE_DDI_FAILED);
6087 6087 }
6088 6088
6089 6089 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6090 6090 "ddi_intr_get_navail() returned: nintrs %d, navail %d",
6091 6091 nintrs, navail));
6092 6092
6093 6093 /* PSARC/2007/453 MSI-X interrupt limit override */
6094 6094 if (int_type == DDI_INTR_TYPE_MSIX) {
6095 6095 nrequest = nxge_create_msi_property(nxgep);
6096 6096 if (nrequest < navail) {
6097 6097 navail = nrequest;
6098 6098 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6099 6099 "nxge_add_intrs_adv_type: nintrs %d "
6100 6100 "navail %d (nrequest %d)",
6101 6101 nintrs, navail, nrequest));
6102 6102 }
6103 6103 }
6104 6104
6105 6105 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
6106 6106 /* MSI must be power of 2 */
6107 6107 if ((navail & 16) == 16) {
6108 6108 navail = 16;
6109 6109 } else if ((navail & 8) == 8) {
6110 6110 navail = 8;
6111 6111 } else if ((navail & 4) == 4) {
6112 6112 navail = 4;
6113 6113 } else if ((navail & 2) == 2) {
6114 6114 navail = 2;
6115 6115 } else {
6116 6116 navail = 1;
6117 6117 }
6118 6118 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6119 6119 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
6120 6120 "navail %d", nintrs, navail));
6121 6121 }
6122 6122
6123 6123 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6124 6124 DDI_INTR_ALLOC_NORMAL);
6125 6125 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6126 6126 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6127 6127 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6128 6128 navail, &nactual, behavior);
6129 6129 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6130 6130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6131 6131 " ddi_intr_alloc() failed: %d",
6132 6132 ddi_status));
6133 6133 kmem_free(intrp->htable, intrp->intr_size);
6134 6134 return (NXGE_ERROR | NXGE_DDI_FAILED);
6135 6135 }
6136 6136
6137 6137 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6138 6138 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6139 6139 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6140 6140 " ddi_intr_get_pri() failed: %d",
6141 6141 ddi_status));
6142 6142 /* Free already allocated interrupts */
6143 6143 for (y = 0; y < nactual; y++) {
6144 6144 (void) ddi_intr_free(intrp->htable[y]);
6145 6145 }
6146 6146
6147 6147 kmem_free(intrp->htable, intrp->intr_size);
6148 6148 return (NXGE_ERROR | NXGE_DDI_FAILED);
6149 6149 }
6150 6150
6151 6151 nrequired = 0;
6152 6152 switch (nxgep->niu_type) {
6153 6153 default:
6154 6154 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6155 6155 break;
6156 6156
6157 6157 case N2_NIU:
6158 6158 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6159 6159 break;
6160 6160 }
6161 6161
6162 6162 if (status != NXGE_OK) {
6163 6163 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6164 6164 "nxge_add_intrs_adv_typ:nxge_ldgv_init "
6165 6165 "failed: 0x%x", status));
6166 6166 /* Free already allocated interrupts */
6167 6167 for (y = 0; y < nactual; y++) {
6168 6168 (void) ddi_intr_free(intrp->htable[y]);
6169 6169 }
6170 6170
6171 6171 kmem_free(intrp->htable, intrp->intr_size);
6172 6172 return (status);
6173 6173 }
6174 6174
6175 6175 ldgp = nxgep->ldgvp->ldgp;
6176 6176 for (x = 0; x < nrequired; x++, ldgp++) {
6177 6177 ldgp->vector = (uint8_t)x;
6178 6178 ldgp->intdata = SID_DATA(ldgp->func, x);
6179 6179 arg1 = ldgp->ldvp;
6180 6180 arg2 = nxgep;
6181 6181 if (ldgp->nldvs == 1) {
6182 6182 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6183 6183 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6184 6184 "nxge_add_intrs_adv_type: "
6185 6185 "arg1 0x%x arg2 0x%x: "
6186 6186 "1-1 int handler (entry %d intdata 0x%x)\n",
6187 6187 arg1, arg2,
6188 6188 x, ldgp->intdata));
6189 6189 } else if (ldgp->nldvs > 1) {
6190 6190 inthandler = (uint_t *)ldgp->sys_intr_handler;
6191 6191 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6192 6192 "nxge_add_intrs_adv_type: "
6193 6193 "arg1 0x%x arg2 0x%x: "
6194 6194 "nldevs %d int handler "
6195 6195 "(entry %d intdata 0x%x)\n",
6196 6196 arg1, arg2,
6197 6197 ldgp->nldvs, x, ldgp->intdata));
6198 6198 }
6199 6199
6200 6200 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6201 6201 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
6202 6202 "htable 0x%llx", x, intrp->htable[x]));
6203 6203
6204 6204 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6205 6205 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6206 6206 != DDI_SUCCESS) {
6207 6207 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6208 6208 "==> nxge_add_intrs_adv_type: failed #%d "
6209 6209 "status 0x%x", x, ddi_status));
6210 6210 for (y = 0; y < intrp->intr_added; y++) {
6211 6211 (void) ddi_intr_remove_handler(
6212 6212 intrp->htable[y]);
6213 6213 }
6214 6214 /* Free already allocated intr */
6215 6215 for (y = 0; y < nactual; y++) {
6216 6216 (void) ddi_intr_free(intrp->htable[y]);
6217 6217 }
6218 6218 kmem_free(intrp->htable, intrp->intr_size);
6219 6219
6220 6220 (void) nxge_ldgv_uninit(nxgep);
6221 6221
6222 6222 return (NXGE_ERROR | NXGE_DDI_FAILED);
6223 6223 }
6224 6224
6225 6225 ldgp->htable_idx = x;
6226 6226 intrp->intr_added++;
6227 6227 }
6228 6228
6229 6229 intrp->msi_intx_cnt = nactual;
6230 6230
6231 6231 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6232 6232 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6233 6233 navail, nactual,
6234 6234 intrp->msi_intx_cnt,
6235 6235 intrp->intr_added));
6236 6236
6237 6237 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6238 6238
6239 6239 (void) nxge_intr_ldgv_init(nxgep);
6240 6240
6241 6241 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
6242 6242
6243 6243 return (status);
6244 6244 }
6245 6245
6246 6246 /*ARGSUSED*/
6247 6247 static nxge_status_t
6248 6248 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
6249 6249 {
6250 6250 dev_info_t *dip = nxgep->dip;
6251 6251 p_nxge_ldg_t ldgp;
6252 6252 p_nxge_intr_t intrp;
6253 6253 uint_t *inthandler;
6254 6254 void *arg1, *arg2;
6255 6255 int behavior;
6256 6256 int nintrs, navail;
6257 6257 int nactual, nrequired;
6258 6258 int inum = 0;
6259 6259 int x, y;
6260 6260 int ddi_status = DDI_SUCCESS;
6261 6261 nxge_status_t status = NXGE_OK;
6262 6262
6263 6263 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
6264 6264 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6265 6265 intrp->start_inum = 0;
6266 6266
6267 6267 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6268 6268 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6269 6269 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6270 6270 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6271 6271 "nintrs: %d", status, nintrs));
6272 6272 return (NXGE_ERROR | NXGE_DDI_FAILED);
6273 6273 }
6274 6274
6275 6275 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6276 6276 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6277 6277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6278 6278 "ddi_intr_get_navail() failed, status: 0x%x%, "
6279 6279 "nintrs: %d", ddi_status, navail));
6280 6280 return (NXGE_ERROR | NXGE_DDI_FAILED);
6281 6281 }
6282 6282
6283 6283 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6284 6284 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6285 6285 nintrs, navail));
6286 6286
6287 6287 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6288 6288 DDI_INTR_ALLOC_NORMAL);
6289 6289 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6290 6290 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6291 6291 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6292 6292 navail, &nactual, behavior);
6293 6293 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6294 6294 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6295 6295 " ddi_intr_alloc() failed: %d",
6296 6296 ddi_status));
6297 6297 kmem_free(intrp->htable, intrp->intr_size);
6298 6298 return (NXGE_ERROR | NXGE_DDI_FAILED);
6299 6299 }
6300 6300
6301 6301 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6302 6302 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6303 6303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6304 6304 " ddi_intr_get_pri() failed: %d",
6305 6305 ddi_status));
6306 6306 /* Free already allocated interrupts */
6307 6307 for (y = 0; y < nactual; y++) {
6308 6308 (void) ddi_intr_free(intrp->htable[y]);
6309 6309 }
6310 6310
6311 6311 kmem_free(intrp->htable, intrp->intr_size);
6312 6312 return (NXGE_ERROR | NXGE_DDI_FAILED);
6313 6313 }
6314 6314
6315 6315 nrequired = 0;
6316 6316 switch (nxgep->niu_type) {
6317 6317 default:
6318 6318 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6319 6319 break;
6320 6320
6321 6321 case N2_NIU:
6322 6322 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6323 6323 break;
6324 6324 }
6325 6325
6326 6326 if (status != NXGE_OK) {
6327 6327 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6328 6328 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6329 6329 "failed: 0x%x", status));
6330 6330 /* Free already allocated interrupts */
6331 6331 for (y = 0; y < nactual; y++) {
6332 6332 (void) ddi_intr_free(intrp->htable[y]);
6333 6333 }
6334 6334
6335 6335 kmem_free(intrp->htable, intrp->intr_size);
6336 6336 return (status);
6337 6337 }
6338 6338
6339 6339 ldgp = nxgep->ldgvp->ldgp;
6340 6340 for (x = 0; x < nrequired; x++, ldgp++) {
6341 6341 ldgp->vector = (uint8_t)x;
6342 6342 if (nxgep->niu_type != N2_NIU) {
6343 6343 ldgp->intdata = SID_DATA(ldgp->func, x);
6344 6344 }
6345 6345
6346 6346 arg1 = ldgp->ldvp;
6347 6347 arg2 = nxgep;
6348 6348 if (ldgp->nldvs == 1) {
6349 6349 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6350 6350 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6351 6351 "nxge_add_intrs_adv_type_fix: "
6352 6352 "1-1 int handler(%d) ldg %d ldv %d "
6353 6353 "arg1 $%p arg2 $%p\n",
6354 6354 x, ldgp->ldg, ldgp->ldvp->ldv,
6355 6355 arg1, arg2));
6356 6356 } else if (ldgp->nldvs > 1) {
6357 6357 inthandler = (uint_t *)ldgp->sys_intr_handler;
6358 6358 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6359 6359 "nxge_add_intrs_adv_type_fix: "
6360 6360 "shared ldv %d int handler(%d) ldv %d ldg %d"
6361 6361 "arg1 0x%016llx arg2 0x%016llx\n",
6362 6362 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
6363 6363 arg1, arg2));
6364 6364 }
6365 6365
6366 6366 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6367 6367 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6368 6368 != DDI_SUCCESS) {
6369 6369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6370 6370 "==> nxge_add_intrs_adv_type_fix: failed #%d "
6371 6371 "status 0x%x", x, ddi_status));
6372 6372 for (y = 0; y < intrp->intr_added; y++) {
6373 6373 (void) ddi_intr_remove_handler(
6374 6374 intrp->htable[y]);
6375 6375 }
6376 6376 for (y = 0; y < nactual; y++) {
6377 6377 (void) ddi_intr_free(intrp->htable[y]);
6378 6378 }
6379 6379 /* Free already allocated intr */
6380 6380 kmem_free(intrp->htable, intrp->intr_size);
6381 6381
6382 6382 (void) nxge_ldgv_uninit(nxgep);
6383 6383
6384 6384 return (NXGE_ERROR | NXGE_DDI_FAILED);
6385 6385 }
6386 6386
6387 6387 ldgp->htable_idx = x;
6388 6388 intrp->intr_added++;
6389 6389 }
6390 6390
6391 6391 intrp->msi_intx_cnt = nactual;
6392 6392
6393 6393 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6394 6394
6395 6395 status = nxge_intr_ldgv_init(nxgep);
6396 6396 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
6397 6397
6398 6398 return (status);
6399 6399 }
6400 6400
6401 6401 static void
6402 6402 nxge_remove_intrs(p_nxge_t nxgep)
6403 6403 {
6404 6404 int i, inum;
6405 6405 p_nxge_intr_t intrp;
6406 6406
6407 6407 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
6408 6408 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6409 6409 if (!intrp->intr_registered) {
6410 6410 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6411 6411 "<== nxge_remove_intrs: interrupts not registered"));
6412 6412 return;
6413 6413 }
6414 6414
6415 6415 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
6416 6416
6417 6417 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6418 6418 (void) ddi_intr_block_disable(intrp->htable,
6419 6419 intrp->intr_added);
6420 6420 } else {
6421 6421 for (i = 0; i < intrp->intr_added; i++) {
6422 6422 (void) ddi_intr_disable(intrp->htable[i]);
6423 6423 }
6424 6424 }
6425 6425
6426 6426 for (inum = 0; inum < intrp->intr_added; inum++) {
6427 6427 if (intrp->htable[inum]) {
6428 6428 (void) ddi_intr_remove_handler(intrp->htable[inum]);
6429 6429 }
6430 6430 }
6431 6431
6432 6432 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
6433 6433 if (intrp->htable[inum]) {
6434 6434 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6435 6435 "nxge_remove_intrs: ddi_intr_free inum %d "
6436 6436 "msi_intx_cnt %d intr_added %d",
6437 6437 inum,
6438 6438 intrp->msi_intx_cnt,
6439 6439 intrp->intr_added));
6440 6440
6441 6441 (void) ddi_intr_free(intrp->htable[inum]);
6442 6442 }
6443 6443 }
6444 6444
6445 6445 kmem_free(intrp->htable, intrp->intr_size);
6446 6446 intrp->intr_registered = B_FALSE;
6447 6447 intrp->intr_enabled = B_FALSE;
6448 6448 intrp->msi_intx_cnt = 0;
6449 6449 intrp->intr_added = 0;
6450 6450
6451 6451 (void) nxge_ldgv_uninit(nxgep);
6452 6452
6453 6453 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
6454 6454 "#msix-request");
6455 6455
6456 6456 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
6457 6457 }
6458 6458
6459 6459 /*ARGSUSED*/
6460 6460 static void
6461 6461 nxge_intrs_enable(p_nxge_t nxgep)
6462 6462 {
6463 6463 p_nxge_intr_t intrp;
6464 6464 int i;
6465 6465 int status;
6466 6466
6467 6467 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
6468 6468
6469 6469 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6470 6470
6471 6471 if (!intrp->intr_registered) {
6472 6472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
6473 6473 "interrupts are not registered"));
6474 6474 return;
6475 6475 }
6476 6476
6477 6477 if (intrp->intr_enabled) {
6478 6478 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6479 6479 "<== nxge_intrs_enable: already enabled"));
6480 6480 return;
6481 6481 }
6482 6482
6483 6483 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6484 6484 status = ddi_intr_block_enable(intrp->htable,
6485 6485 intrp->intr_added);
6486 6486 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6487 6487 "block enable - status 0x%x total inums #%d\n",
6488 6488 status, intrp->intr_added));
6489 6489 } else {
6490 6490 for (i = 0; i < intrp->intr_added; i++) {
6491 6491 status = ddi_intr_enable(intrp->htable[i]);
6492 6492 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6493 6493 "ddi_intr_enable:enable - status 0x%x "
6494 6494 "total inums %d enable inum #%d\n",
6495 6495 status, intrp->intr_added, i));
6496 6496 if (status == DDI_SUCCESS) {
6497 6497 intrp->intr_enabled = B_TRUE;
6498 6498 }
6499 6499 }
6500 6500 }
6501 6501
6502 6502 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
6503 6503 }
6504 6504
6505 6505 /*ARGSUSED*/
6506 6506 static void
6507 6507 nxge_intrs_disable(p_nxge_t nxgep)
6508 6508 {
6509 6509 p_nxge_intr_t intrp;
6510 6510 int i;
6511 6511
6512 6512 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
6513 6513
6514 6514 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6515 6515
6516 6516 if (!intrp->intr_registered) {
6517 6517 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
6518 6518 "interrupts are not registered"));
6519 6519 return;
6520 6520 }
6521 6521
6522 6522 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6523 6523 (void) ddi_intr_block_disable(intrp->htable,
6524 6524 intrp->intr_added);
6525 6525 } else {
6526 6526 for (i = 0; i < intrp->intr_added; i++) {
6527 6527 (void) ddi_intr_disable(intrp->htable[i]);
6528 6528 }
6529 6529 }
6530 6530
6531 6531 intrp->intr_enabled = B_FALSE;
6532 6532 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
6533 6533 }
6534 6534
6535 6535 nxge_status_t
6536 6536 nxge_mac_register(p_nxge_t nxgep)
6537 6537 {
6538 6538 mac_register_t *macp;
6539 6539 int status;
6540 6540
6541 6541 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
6542 6542
6543 6543 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
6544 6544 return (NXGE_ERROR);
6545 6545
6546 6546 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
6547 6547 macp->m_driver = nxgep;
6548 6548 macp->m_dip = nxgep->dip;
6549 6549 if (!isLDOMguest(nxgep)) {
6550 6550 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
6551 6551 } else {
6552 6552 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6553 6553 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6554 6554 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
6555 6555 }
6556 6556 macp->m_callbacks = &nxge_m_callbacks;
6557 6557 macp->m_min_sdu = 0;
6558 6558 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
6559 6559 NXGE_EHEADER_VLAN_CRC;
6560 6560 macp->m_max_sdu = nxgep->mac.default_mtu;
6561 6561 macp->m_margin = VLAN_TAGSZ;
6562 6562 macp->m_priv_props = nxge_priv_props;
6563 6563 if (isLDOMguest(nxgep))
6564 6564 macp->m_v12n = MAC_VIRT_LEVEL1;
6565 6565 else
6566 6566 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1;
6567 6567
6568 6568 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
6569 6569 "==> nxge_mac_register: instance %d "
6570 6570 "max_sdu %d margin %d maxframe %d (header %d)",
6571 6571 nxgep->instance,
6572 6572 macp->m_max_sdu, macp->m_margin,
6573 6573 nxgep->mac.maxframesize,
6574 6574 NXGE_EHEADER_VLAN_CRC));
6575 6575
6576 6576 status = mac_register(macp, &nxgep->mach);
6577 6577 if (isLDOMguest(nxgep)) {
6578 6578 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN);
6579 6579 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN);
6580 6580 }
6581 6581 mac_free(macp);
6582 6582
6583 6583 if (status != 0) {
6584 6584 cmn_err(CE_WARN,
6585 6585 "!nxge_mac_register failed (status %d instance %d)",
6586 6586 status, nxgep->instance);
6587 6587 return (NXGE_ERROR);
6588 6588 }
6589 6589
6590 6590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
6591 6591 "(instance %d)", nxgep->instance));
6592 6592
6593 6593 return (NXGE_OK);
6594 6594 }
6595 6595
6596 6596 void
6597 6597 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
6598 6598 {
6599 6599 ssize_t size;
6600 6600 mblk_t *nmp;
6601 6601 uint8_t blk_id;
6602 6602 uint8_t chan;
6603 6603 uint32_t err_id;
6604 6604 err_inject_t *eip;
6605 6605
6606 6606 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
6607 6607
6608 6608 size = 1024;
6609 6609 nmp = mp->b_cont;
6610 6610 eip = (err_inject_t *)nmp->b_rptr;
6611 6611 blk_id = eip->blk_id;
6612 6612 err_id = eip->err_id;
6613 6613 chan = eip->chan;
6614 6614 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
6615 6615 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
6616 6616 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
6617 6617 switch (blk_id) {
6618 6618 case MAC_BLK_ID:
6619 6619 break;
6620 6620 case TXMAC_BLK_ID:
6621 6621 break;
6622 6622 case RXMAC_BLK_ID:
6623 6623 break;
6624 6624 case MIF_BLK_ID:
6625 6625 break;
6626 6626 case IPP_BLK_ID:
6627 6627 nxge_ipp_inject_err(nxgep, err_id);
6628 6628 break;
6629 6629 case TXC_BLK_ID:
6630 6630 nxge_txc_inject_err(nxgep, err_id);
6631 6631 break;
6632 6632 case TXDMA_BLK_ID:
6633 6633 nxge_txdma_inject_err(nxgep, err_id, chan);
6634 6634 break;
6635 6635 case RXDMA_BLK_ID:
6636 6636 nxge_rxdma_inject_err(nxgep, err_id, chan);
6637 6637 break;
6638 6638 case ZCP_BLK_ID:
6639 6639 nxge_zcp_inject_err(nxgep, err_id);
6640 6640 break;
6641 6641 case ESPC_BLK_ID:
6642 6642 break;
6643 6643 case FFLP_BLK_ID:
6644 6644 break;
6645 6645 case PHY_BLK_ID:
6646 6646 break;
6647 6647 case ETHER_SERDES_BLK_ID:
6648 6648 break;
6649 6649 case PCIE_SERDES_BLK_ID:
6650 6650 break;
6651 6651 case VIR_BLK_ID:
6652 6652 break;
6653 6653 }
6654 6654
6655 6655 nmp->b_wptr = nmp->b_rptr + size;
6656 6656 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
6657 6657
6658 6658 miocack(wq, mp, (int)size, 0);
6659 6659 }
6660 6660
6661 6661 static int
6662 6662 nxge_init_common_dev(p_nxge_t nxgep)
6663 6663 {
6664 6664 p_nxge_hw_list_t hw_p;
6665 6665 dev_info_t *p_dip;
6666 6666
6667 6667 ASSERT(nxgep != NULL);
6668 6668
6669 6669 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
6670 6670
6671 6671 p_dip = nxgep->p_dip;
6672 6672 MUTEX_ENTER(&nxge_common_lock);
6673 6673 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6674 6674 "==> nxge_init_common_dev:func # %d",
6675 6675 nxgep->function_num));
6676 6676 /*
6677 6677 * Loop through existing per neptune hardware list.
6678 6678 */
6679 6679 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6680 6680 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6681 6681 "==> nxge_init_common_device:func # %d "
6682 6682 "hw_p $%p parent dip $%p",
6683 6683 nxgep->function_num,
6684 6684 hw_p,
6685 6685 p_dip));
6686 6686 if (hw_p->parent_devp == p_dip) {
6687 6687 nxgep->nxge_hw_p = hw_p;
6688 6688 hw_p->ndevs++;
6689 6689 hw_p->nxge_p[nxgep->function_num] = nxgep;
6690 6690 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6691 6691 "==> nxge_init_common_device:func # %d "
6692 6692 "hw_p $%p parent dip $%p "
6693 6693 "ndevs %d (found)",
6694 6694 nxgep->function_num,
6695 6695 hw_p,
6696 6696 p_dip,
6697 6697 hw_p->ndevs));
6698 6698 break;
6699 6699 }
6700 6700 }
6701 6701
6702 6702 if (hw_p == NULL) {
6703 6703
6704 6704 char **prop_val;
6705 6705 uint_t prop_len;
6706 6706 int i;
6707 6707
6708 6708 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6709 6709 "==> nxge_init_common_device:func # %d "
6710 6710 "parent dip $%p (new)",
6711 6711 nxgep->function_num,
6712 6712 p_dip));
6713 6713 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
6714 6714 hw_p->parent_devp = p_dip;
6715 6715 hw_p->magic = NXGE_NEPTUNE_MAGIC;
6716 6716 nxgep->nxge_hw_p = hw_p;
6717 6717 hw_p->ndevs++;
6718 6718 hw_p->nxge_p[nxgep->function_num] = nxgep;
6719 6719 hw_p->next = nxge_hw_list;
6720 6720 if (nxgep->niu_type == N2_NIU) {
6721 6721 hw_p->niu_type = N2_NIU;
6722 6722 hw_p->platform_type = P_NEPTUNE_NIU;
6723 6723 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
6724 6724 } else {
6725 6725 hw_p->niu_type = NIU_TYPE_NONE;
6726 6726 hw_p->platform_type = P_NEPTUNE_NONE;
6727 6727 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
6728 6728 }
6729 6729
6730 6730 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) *
6731 6731 hw_p->tcam_size, KM_SLEEP);
6732 6732
6733 6733 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
6734 6734 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
6735 6735 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
6736 6736 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
6737 6737
6738 6738 nxge_hw_list = hw_p;
6739 6739
6740 6740 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0,
6741 6741 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
6742 6742 for (i = 0; i < prop_len; i++) {
6743 6743 if ((strcmp((caddr_t)prop_val[i],
6744 6744 NXGE_ROCK_COMPATIBLE) == 0)) {
6745 6745 hw_p->platform_type = P_NEPTUNE_ROCK;
6746 6746 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6747 6747 "ROCK hw_p->platform_type %d",
6748 6748 hw_p->platform_type));
6749 6749 break;
6750 6750 }
6751 6751 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6752 6752 "nxge_init_common_dev: read compatible"
6753 6753 " property[%d] val[%s]",
6754 6754 i, (caddr_t)prop_val[i]));
6755 6755 }
6756 6756 }
6757 6757
6758 6758 ddi_prop_free(prop_val);
6759 6759
6760 6760 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
6761 6761 }
6762 6762
6763 6763 MUTEX_EXIT(&nxge_common_lock);
6764 6764
6765 6765 nxgep->platform_type = hw_p->platform_type;
6766 6766 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d",
6767 6767 nxgep->platform_type));
6768 6768 if (nxgep->niu_type != N2_NIU) {
6769 6769 nxgep->niu_type = hw_p->niu_type;
6770 6770 }
6771 6771
6772 6772 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6773 6773 "==> nxge_init_common_device (nxge_hw_list) $%p",
6774 6774 nxge_hw_list));
6775 6775 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
6776 6776
6777 6777 return (NXGE_OK);
6778 6778 }
6779 6779
6780 6780 static void
6781 6781 nxge_uninit_common_dev(p_nxge_t nxgep)
6782 6782 {
6783 6783 p_nxge_hw_list_t hw_p, h_hw_p;
6784 6784 p_nxge_dma_pt_cfg_t p_dma_cfgp;
6785 6785 p_nxge_hw_pt_cfg_t p_cfgp;
6786 6786 dev_info_t *p_dip;
6787 6787
6788 6788 ASSERT(nxgep != NULL);
6789 6789
6790 6790 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
6791 6791 if (nxgep->nxge_hw_p == NULL) {
6792 6792 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6793 6793 "<== nxge_uninit_common_device (no common)"));
6794 6794 return;
6795 6795 }
6796 6796
6797 6797 MUTEX_ENTER(&nxge_common_lock);
6798 6798 h_hw_p = nxge_hw_list;
6799 6799 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6800 6800 p_dip = hw_p->parent_devp;
6801 6801 if (nxgep->nxge_hw_p == hw_p &&
6802 6802 p_dip == nxgep->p_dip &&
6803 6803 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
6804 6804 hw_p->magic == NXGE_NEPTUNE_MAGIC) {
6805 6805
6806 6806 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6807 6807 "==> nxge_uninit_common_device:func # %d "
6808 6808 "hw_p $%p parent dip $%p "
6809 6809 "ndevs %d (found)",
6810 6810 nxgep->function_num,
6811 6811 hw_p,
6812 6812 p_dip,
6813 6813 hw_p->ndevs));
6814 6814
6815 6815 /*
6816 6816 * Release the RDC table, a shared resoruce
6817 6817 * of the nxge hardware. The RDC table was
6818 6818 * assigned to this instance of nxge in
6819 6819 * nxge_use_cfg_dma_config().
6820 6820 */
6821 6821 if (!isLDOMguest(nxgep)) {
6822 6822 p_dma_cfgp =
6823 6823 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
6824 6824 p_cfgp =
6825 6825 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
6826 6826 (void) nxge_fzc_rdc_tbl_unbind(nxgep,
6827 6827 p_cfgp->def_mac_rxdma_grpid);
6828 6828
6829 6829 /* Cleanup any outstanding groups. */
6830 6830 nxge_grp_cleanup(nxgep);
6831 6831 }
6832 6832
6833 6833 if (hw_p->ndevs) {
6834 6834 hw_p->ndevs--;
6835 6835 }
6836 6836 hw_p->nxge_p[nxgep->function_num] = NULL;
6837 6837 if (!hw_p->ndevs) {
6838 6838 KMEM_FREE(hw_p->tcam,
6839 6839 sizeof (tcam_flow_spec_t) *
6840 6840 hw_p->tcam_size);
6841 6841 MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
6842 6842 MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
6843 6843 MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
6844 6844 MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
6845 6845 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6846 6846 "==> nxge_uninit_common_device: "
6847 6847 "func # %d "
6848 6848 "hw_p $%p parent dip $%p "
6849 6849 "ndevs %d (last)",
6850 6850 nxgep->function_num,
6851 6851 hw_p,
6852 6852 p_dip,
6853 6853 hw_p->ndevs));
6854 6854
6855 6855 nxge_hio_uninit(nxgep);
6856 6856
6857 6857 if (hw_p == nxge_hw_list) {
6858 6858 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6859 6859 "==> nxge_uninit_common_device:"
6860 6860 "remove head func # %d "
6861 6861 "hw_p $%p parent dip $%p "
6862 6862 "ndevs %d (head)",
6863 6863 nxgep->function_num,
6864 6864 hw_p,
6865 6865 p_dip,
6866 6866 hw_p->ndevs));
6867 6867 nxge_hw_list = hw_p->next;
6868 6868 } else {
6869 6869 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6870 6870 "==> nxge_uninit_common_device:"
6871 6871 "remove middle func # %d "
6872 6872 "hw_p $%p parent dip $%p "
6873 6873 "ndevs %d (middle)",
6874 6874 nxgep->function_num,
6875 6875 hw_p,
6876 6876 p_dip,
6877 6877 hw_p->ndevs));
6878 6878 h_hw_p->next = hw_p->next;
6879 6879 }
6880 6880
6881 6881 nxgep->nxge_hw_p = NULL;
6882 6882 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
6883 6883 }
6884 6884 break;
6885 6885 } else {
6886 6886 h_hw_p = hw_p;
6887 6887 }
6888 6888 }
6889 6889
6890 6890 MUTEX_EXIT(&nxge_common_lock);
6891 6891 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6892 6892 "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6893 6893 nxge_hw_list));
6894 6894
6895 6895 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
6896 6896 }
6897 6897
6898 6898 /*
6899 6899 * Determines the number of ports from the niu_type or the platform type.
6900 6900 * Returns the number of ports, or returns zero on failure.
6901 6901 */
6902 6902
6903 6903 int
6904 6904 nxge_get_nports(p_nxge_t nxgep)
6905 6905 {
6906 6906 int nports = 0;
6907 6907
6908 6908 switch (nxgep->niu_type) {
6909 6909 case N2_NIU:
6910 6910 case NEPTUNE_2_10GF:
6911 6911 nports = 2;
6912 6912 break;
6913 6913 case NEPTUNE_4_1GC:
6914 6914 case NEPTUNE_2_10GF_2_1GC:
6915 6915 case NEPTUNE_1_10GF_3_1GC:
6916 6916 case NEPTUNE_1_1GC_1_10GF_2_1GC:
6917 6917 case NEPTUNE_2_10GF_2_1GRF:
6918 6918 nports = 4;
6919 6919 break;
6920 6920 default:
6921 6921 switch (nxgep->platform_type) {
6922 6922 case P_NEPTUNE_NIU:
6923 6923 case P_NEPTUNE_ATLAS_2PORT:
6924 6924 nports = 2;
6925 6925 break;
6926 6926 case P_NEPTUNE_ATLAS_4PORT:
6927 6927 case P_NEPTUNE_MARAMBA_P0:
6928 6928 case P_NEPTUNE_MARAMBA_P1:
6929 6929 case P_NEPTUNE_ROCK:
6930 6930 case P_NEPTUNE_ALONSO:
6931 6931 nports = 4;
6932 6932 break;
6933 6933 default:
6934 6934 break;
6935 6935 }
6936 6936 break;
6937 6937 }
6938 6938
6939 6939 return (nports);
6940 6940 }
6941 6941
6942 6942 /*
6943 6943 * The following two functions are to support
6944 6944 * PSARC/2007/453 MSI-X interrupt limit override.
6945 6945 */
6946 6946 static int
6947 6947 nxge_create_msi_property(p_nxge_t nxgep)
6948 6948 {
6949 6949 int nmsi;
6950 6950 extern int ncpus;
6951 6951
6952 6952 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
6953 6953
6954 6954 switch (nxgep->mac.portmode) {
6955 6955 case PORT_10G_COPPER:
6956 6956 case PORT_10G_FIBER:
6957 6957 case PORT_10G_TN1010:
6958 6958 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6959 6959 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6960 6960 /*
6961 6961 * The maximum MSI-X requested will be 8.
6962 6962 * If the # of CPUs is less than 8, we will request
6963 6963 * # MSI-X based on the # of CPUs (default).
6964 6964 */
6965 6965 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6966 6966 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d",
6967 6967 nxge_msix_10g_intrs));
6968 6968 if ((nxge_msix_10g_intrs == 0) ||
6969 6969 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
6970 6970 nmsi = NXGE_MSIX_REQUEST_10G;
6971 6971 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6972 6972 "==>nxge_create_msi_property (10G): reset to 8"));
6973 6973 } else {
6974 6974 nmsi = nxge_msix_10g_intrs;
6975 6975 }
6976 6976
6977 6977 /*
6978 6978 * If # of interrupts requested is 8 (default),
6979 6979 * the checking of the number of cpus will be
6980 6980 * be maintained.
6981 6981 */
6982 6982 if ((nmsi == NXGE_MSIX_REQUEST_10G) &&
6983 6983 (ncpus < nmsi)) {
6984 6984 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6985 6985 "==>nxge_create_msi_property (10G): reset to 8"));
6986 6986 nmsi = ncpus;
6987 6987 }
6988 6988 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6989 6989 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6990 6990 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6991 6991 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6992 6992 break;
6993 6993
6994 6994 default:
6995 6995 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6996 6996 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6997 6997 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6998 6998 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d",
6999 6999 nxge_msix_1g_intrs));
7000 7000 if ((nxge_msix_1g_intrs == 0) ||
7001 7001 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
7002 7002 nmsi = NXGE_MSIX_REQUEST_1G;
7003 7003 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7004 7004 "==>nxge_create_msi_property (1G): reset to 2"));
7005 7005 } else {
7006 7006 nmsi = nxge_msix_1g_intrs;
7007 7007 }
7008 7008 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7009 7009 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
7010 7010 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
7011 7011 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
7012 7012 break;
7013 7013 }
7014 7014
7015 7015 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
7016 7016 return (nmsi);
7017 7017 }
7018 7018
7019 7019 /*
7020 7020 * The following is a software around for the Neptune hardware's
7021 7021 * interrupt bugs; The Neptune hardware may generate spurious interrupts when
7022 7022 * an interrupr handler is removed.
7023 7023 */
7024 7024 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98
7025 7025 #define NXGE_PIM_RESET (1ULL << 29)
7026 7026 #define NXGE_GLU_RESET (1ULL << 30)
7027 7027 #define NXGE_NIU_RESET (1ULL << 31)
7028 7028 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \
7029 7029 NXGE_GLU_RESET | \
7030 7030 NXGE_NIU_RESET)
7031 7031
7032 7032 #define NXGE_WAIT_QUITE_TIME 200000
7033 7033 #define NXGE_WAIT_QUITE_RETRY 40
7034 7034 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */
7035 7035
7036 7036 static void
7037 7037 nxge_niu_peu_reset(p_nxge_t nxgep)
7038 7038 {
7039 7039 uint32_t rvalue;
7040 7040 p_nxge_hw_list_t hw_p;
7041 7041 p_nxge_t fnxgep;
7042 7042 int i, j;
7043 7043
7044 7044 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset"));
7045 7045 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
7046 7046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7047 7047 "==> nxge_niu_peu_reset: NULL hardware pointer"));
7048 7048 return;
7049 7049 }
7050 7050
7051 7051 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7052 7052 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
7053 7053 hw_p->flags, nxgep->nxge_link_poll_timerid,
7054 7054 nxgep->nxge_timerid));
7055 7055
7056 7056 MUTEX_ENTER(&hw_p->nxge_cfg_lock);
7057 7057 /*
7058 7058 * Make sure other instances from the same hardware
7059 7059 * stop sending PIO and in quiescent state.
7060 7060 */
7061 7061 for (i = 0; i < NXGE_MAX_PORTS; i++) {
7062 7062 fnxgep = hw_p->nxge_p[i];
7063 7063 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7064 7064 "==> nxge_niu_peu_reset: checking entry %d "
7065 7065 "nxgep $%p", i, fnxgep));
7066 7066 #ifdef NXGE_DEBUG
7067 7067 if (fnxgep) {
7068 7068 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7069 7069 "==> nxge_niu_peu_reset: entry %d (function %d) "
7070 7070 "link timer id %d hw timer id %d",
7071 7071 i, fnxgep->function_num,
7072 7072 fnxgep->nxge_link_poll_timerid,
7073 7073 fnxgep->nxge_timerid));
7074 7074 }
7075 7075 #endif
7076 7076 if (fnxgep && fnxgep != nxgep &&
7077 7077 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) {
7078 7078 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7079 7079 "==> nxge_niu_peu_reset: checking $%p "
7080 7080 "(function %d) timer ids",
7081 7081 fnxgep, fnxgep->function_num));
7082 7082 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
7083 7083 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7084 7084 "==> nxge_niu_peu_reset: waiting"));
7085 7085 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7086 7086 if (!fnxgep->nxge_timerid &&
7087 7087 !fnxgep->nxge_link_poll_timerid) {
7088 7088 break;
7089 7089 }
7090 7090 }
7091 7091 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7092 7092 if (fnxgep->nxge_timerid ||
7093 7093 fnxgep->nxge_link_poll_timerid) {
7094 7094 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7095 7095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7096 7096 "<== nxge_niu_peu_reset: cannot reset "
7097 7097 "hardware (devices are still in use)"));
7098 7098 return;
7099 7099 }
7100 7100 }
7101 7101 }
7102 7102
7103 7103 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) {
7104 7104 hw_p->flags |= COMMON_RESET_NIU_PCI;
7105 7105 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh,
7106 7106 NXGE_PCI_PORT_LOGIC_OFFSET);
7107 7107 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7108 7108 "nxge_niu_peu_reset: read offset 0x%x (%d) "
7109 7109 "(data 0x%x)",
7110 7110 NXGE_PCI_PORT_LOGIC_OFFSET,
7111 7111 NXGE_PCI_PORT_LOGIC_OFFSET,
7112 7112 rvalue));
7113 7113
7114 7114 rvalue |= NXGE_PCI_RESET_ALL;
7115 7115 pci_config_put32(nxgep->dev_regs->nxge_pciregh,
7116 7116 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue);
7117 7117 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7118 7118 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
7119 7119 rvalue));
7120 7120
7121 7121 NXGE_DELAY(NXGE_PCI_RESET_WAIT);
7122 7122 }
7123 7123
7124 7124 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7125 7125 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset"));
7126 7126 }
7127 7127
7128 7128 static void
7129 7129 nxge_set_pci_replay_timeout(p_nxge_t nxgep)
7130 7130 {
7131 7131 p_dev_regs_t dev_regs;
7132 7132 uint32_t value;
7133 7133
7134 7134 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout"));
7135 7135
7136 7136 if (!nxge_set_replay_timer) {
7137 7137 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7138 7138 "==> nxge_set_pci_replay_timeout: will not change "
7139 7139 "the timeout"));
7140 7140 return;
7141 7141 }
7142 7142
7143 7143 dev_regs = nxgep->dev_regs;
7144 7144 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7145 7145 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
7146 7146 dev_regs, dev_regs->nxge_pciregh));
7147 7147
7148 7148 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) {
7149 7149 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7150 7150 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
7151 7151 "no PCI handle",
7152 7152 dev_regs));
7153 7153 return;
7154 7154 }
7155 7155 value = (pci_config_get32(dev_regs->nxge_pciregh,
7156 7156 PCI_REPLAY_TIMEOUT_CFG_OFFSET) |
7157 7157 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT));
7158 7158
7159 7159 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7160 7160 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
7161 7161 "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
7162 7162 pci_config_get32(dev_regs->nxge_pciregh,
7163 7163 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout,
7164 7164 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value));
7165 7165
7166 7166 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET,
7167 7167 value);
7168 7168
7169 7169 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7170 7170 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
7171 7171 pci_config_get32(dev_regs->nxge_pciregh,
7172 7172 PCI_REPLAY_TIMEOUT_CFG_OFFSET)));
7173 7173
7174 7174 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout"));
7175 7175 }
7176 7176
7177 7177 /*
7178 7178 * quiesce(9E) entry point.
7179 7179 *
7180 7180 * This function is called when the system is single-threaded at high
7181 7181 * PIL with preemption disabled. Therefore, this function must not be
7182 7182 * blocked.
7183 7183 *
7184 7184 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7185 7185 * DDI_FAILURE indicates an error condition and should almost never happen.
7186 7186 */
7187 7187 static int
7188 7188 nxge_quiesce(dev_info_t *dip)
7189 7189 {
7190 7190 int instance = ddi_get_instance(dip);
7191 7191 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
7192 7192
7193 7193 if (nxgep == NULL)
7194 7194 return (DDI_FAILURE);
7195 7195
7196 7196 /* Turn off debugging */
7197 7197 nxge_debug_level = NO_DEBUG;
7198 7198 nxgep->nxge_debug_level = NO_DEBUG;
7199 7199 npi_debug_level = NO_DEBUG;
7200 7200
7201 7201 /*
7202 7202 * Stop link monitor only when linkchkmod is interrupt based
7203 7203 */
7204 7204 if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
7205 7205 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
7206 7206 }
7207 7207
7208 7208 (void) nxge_intr_hw_disable(nxgep);
7209 7209
7210 7210 /*
7211 7211 * Reset the receive MAC side.
7212 7212 */
7213 7213 (void) nxge_rx_mac_disable(nxgep);
7214 7214
7215 7215 /* Disable and soft reset the IPP */
7216 7216 if (!isLDOMguest(nxgep))
7217 7217 (void) nxge_ipp_disable(nxgep);
7218 7218
7219 7219 /*
7220 7220 * Reset the transmit/receive DMA side.
7221 7221 */
7222 7222 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
7223 7223 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
7224 7224
7225 7225 /*
7226 7226 * Reset the transmit MAC side.
7227 7227 */
7228 7228 (void) nxge_tx_mac_disable(nxgep);
7229 7229
7230 7230 return (DDI_SUCCESS);
7231 7231 }
↓ open down ↓ |
1771 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX