Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/hxge/hxge_main.c
+++ new/usr/src/uts/common/io/hxge/hxge_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright 2012 Milan Jurik. All rights reserved.
25 25 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
30 30 */
31 31 #include <hxge_impl.h>
32 32 #include <hxge_pfc.h>
33 33
34 34 /*
35 35 * PSARC/2007/453 MSI-X interrupt limit override
36 36 * (This PSARC case is limited to MSI-X vectors
37 37 * and SPARC platforms only).
38 38 */
39 39 uint32_t hxge_msi_enable = 2;
40 40
41 41 /*
42 42 * Globals: tunable parameters (/etc/system or adb)
43 43 *
44 44 */
45 45 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
46 46 uint32_t hxge_rbr_spare_size = 0;
47 47 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
48 48 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
49 49 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
50 50 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
51 51 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
52 52 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
53 53
54 54 static hxge_os_mutex_t hxgedebuglock;
55 55 static int hxge_debug_init = 0;
56 56
57 57 /*
58 58 * Debugging flags:
59 59 * hxge_no_tx_lb : transmit load balancing
60 60 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
61 61 * 1 - From the Stack
62 62 * 2 - Destination IP Address
63 63 */
64 64 uint32_t hxge_no_tx_lb = 0;
65 65 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
66 66
67 67 /*
68 68 * Tunables to manage the receive buffer blocks.
69 69 *
70 70 * hxge_rx_threshold_hi: copy all buffers.
71 71 * hxge_rx_bcopy_size_type: receive buffer block size type.
72 72 * hxge_rx_threshold_lo: copy only up to tunable block size type.
73 73 */
74 74 #if defined(__sparc)
75 75 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
76 76 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
77 77 #else
78 78 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
79 79 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
80 80 #endif
81 81 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
82 82
83 83 rtrace_t hpi_rtracebuf;
84 84
85 85 /*
86 86 * Function Prototypes
87 87 */
88 88 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
89 89 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
90 90 static void hxge_unattach(p_hxge_t);
91 91
92 92 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
93 93
94 94 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
95 95 static void hxge_destroy_mutexes(p_hxge_t);
96 96
97 97 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
98 98 static void hxge_unmap_regs(p_hxge_t hxgep);
99 99
100 100 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
101 101 static void hxge_remove_intrs(p_hxge_t hxgep);
102 102 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
103 103 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
104 104 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
105 105 static void hxge_intrs_enable(p_hxge_t hxgep);
106 106 static void hxge_intrs_disable(p_hxge_t hxgep);
107 107 static void hxge_suspend(p_hxge_t);
108 108 static hxge_status_t hxge_resume(p_hxge_t);
109 109 static hxge_status_t hxge_setup_dev(p_hxge_t);
110 110 static void hxge_destroy_dev(p_hxge_t);
111 111 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
112 112 static void hxge_free_mem_pool(p_hxge_t);
113 113 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
114 114 static void hxge_free_rx_mem_pool(p_hxge_t);
115 115 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
116 116 static void hxge_free_tx_mem_pool(p_hxge_t);
117 117 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
118 118 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
119 119 p_hxge_dma_common_t);
120 120 static void hxge_dma_mem_free(p_hxge_dma_common_t);
121 121 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
122 122 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
123 123 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
124 124 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
125 125 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
126 126 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
127 127 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
128 128 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
129 129 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
130 130 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
131 131 p_hxge_dma_common_t *, size_t);
132 132 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
133 133 static int hxge_init_common_dev(p_hxge_t);
134 134 static void hxge_uninit_common_dev(p_hxge_t);
135 135
136 136 /*
137 137 * The next declarations are for the GLDv3 interface.
138 138 */
139 139 static int hxge_m_start(void *);
140 140 static void hxge_m_stop(void *);
141 141 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
142 142 static int hxge_m_promisc(void *, boolean_t);
143 143 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
144 144 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
145 145
146 146 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
147 147 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
148 148 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
149 149 uint_t pr_valsize, const void *pr_val);
150 150 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
151 151 uint_t pr_valsize, void *pr_val);
152 152 static void hxge_m_propinfo(void *barg, const char *pr_name,
153 153 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
154 154 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
155 155 uint_t pr_valsize, const void *pr_val);
156 156 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
157 157 uint_t pr_valsize, void *pr_val);
158 158 static void hxge_link_poll(void *arg);
159 159 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
160 160 static void hxge_msix_init(p_hxge_t hxgep);
161 161
162 162 char *hxge_priv_props[] = {
163 163 "_rxdma_intr_time",
164 164 "_rxdma_intr_pkts",
165 165 "_class_opt_ipv4_tcp",
166 166 "_class_opt_ipv4_udp",
167 167 "_class_opt_ipv4_ah",
168 168 "_class_opt_ipv4_sctp",
169 169 "_class_opt_ipv6_tcp",
170 170 "_class_opt_ipv6_udp",
171 171 "_class_opt_ipv6_ah",
172 172 "_class_opt_ipv6_sctp",
173 173 NULL
174 174 };
175 175
176 176 #define HXGE_MAX_PRIV_PROPS \
177 177 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
178 178
179 179 #define HXGE_MAGIC 0x4E584745UL
180 180 #define MAX_DUMP_SZ 256
181 181
182 182 #define HXGE_M_CALLBACK_FLAGS \
183 183 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
184 184
185 185 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
186 186
187 187 static mac_callbacks_t hxge_m_callbacks = {
188 188 HXGE_M_CALLBACK_FLAGS,
189 189 hxge_m_stat,
190 190 hxge_m_start,
191 191 hxge_m_stop,
192 192 hxge_m_promisc,
193 193 hxge_m_multicst,
194 194 NULL,
195 195 NULL,
196 196 NULL,
197 197 hxge_m_ioctl,
198 198 hxge_m_getcapab,
199 199 NULL,
200 200 NULL,
201 201 hxge_m_setprop,
202 202 hxge_m_getprop,
203 203 hxge_m_propinfo
204 204 };
205 205
206 206 /* PSARC/2007/453 MSI-X interrupt limit override. */
207 207 #define HXGE_MSIX_REQUEST_10G 8
208 208 static int hxge_create_msi_property(p_hxge_t);
209 209
210 210 /* Enable debug messages as necessary. */
211 211 uint64_t hxge_debug_level = 0;
212 212
213 213 /*
214 214 * This list contains the instance structures for the Hydra
215 215 * devices present in the system. The lock exists to guarantee
216 216 * mutually exclusive access to the list.
217 217 */
218 218 void *hxge_list = NULL;
219 219 void *hxge_hw_list = NULL;
220 220 hxge_os_mutex_t hxge_common_lock;
221 221
222 222 extern uint64_t hpi_debug_level;
223 223
224 224 extern hxge_status_t hxge_ldgv_init(p_hxge_t, int *, int *);
225 225 extern hxge_status_t hxge_ldgv_uninit(p_hxge_t);
226 226 extern hxge_status_t hxge_intr_ldgv_init(p_hxge_t);
227 227 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
228 228 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
229 229 extern void hxge_fm_fini(p_hxge_t hxgep);
230 230
231 231 /*
232 232 * Count used to maintain the number of buffers being used
233 233 * by Hydra instances and loaned up to the upper layers.
234 234 */
235 235 uint32_t hxge_mblks_pending = 0;
236 236
237 237 /*
238 238 * Device register access attributes for PIO.
239 239 */
240 240 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
241 241 DDI_DEVICE_ATTR_V0,
242 242 DDI_STRUCTURE_LE_ACC,
243 243 DDI_STRICTORDER_ACC,
244 244 };
245 245
246 246 /*
247 247 * Device descriptor access attributes for DMA.
248 248 */
249 249 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
250 250 DDI_DEVICE_ATTR_V0,
251 251 DDI_STRUCTURE_LE_ACC,
252 252 DDI_STRICTORDER_ACC
253 253 };
254 254
255 255 /*
256 256 * Device buffer access attributes for DMA.
257 257 */
258 258 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
259 259 DDI_DEVICE_ATTR_V0,
260 260 DDI_STRUCTURE_BE_ACC,
261 261 DDI_STRICTORDER_ACC
262 262 };
263 263
264 264 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
265 265 DMA_ATTR_V0, /* version number. */
266 266 0, /* low address */
267 267 0xffffffffffffffff, /* high address */
268 268 0xffffffffffffffff, /* address counter max */
269 269 0x80000, /* alignment */
270 270 0xfc00fc, /* dlim_burstsizes */
271 271 0x1, /* minimum transfer size */
272 272 0xffffffffffffffff, /* maximum transfer size */
273 273 0xffffffffffffffff, /* maximum segment size */
274 274 1, /* scatter/gather list length */
275 275 (unsigned int)1, /* granularity */
276 276 0 /* attribute flags */
277 277 };
278 278
279 279 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
280 280 DMA_ATTR_V0, /* version number. */
281 281 0, /* low address */
282 282 0xffffffffffffffff, /* high address */
283 283 0xffffffffffffffff, /* address counter max */
284 284 0x100000, /* alignment */
285 285 0xfc00fc, /* dlim_burstsizes */
286 286 0x1, /* minimum transfer size */
287 287 0xffffffffffffffff, /* maximum transfer size */
288 288 0xffffffffffffffff, /* maximum segment size */
289 289 1, /* scatter/gather list length */
290 290 (unsigned int)1, /* granularity */
291 291 0 /* attribute flags */
292 292 };
293 293
294 294 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
295 295 DMA_ATTR_V0, /* version number. */
296 296 0, /* low address */
297 297 0xffffffffffffffff, /* high address */
298 298 0xffffffffffffffff, /* address counter max */
299 299 0x40000, /* alignment */
300 300 0xfc00fc, /* dlim_burstsizes */
301 301 0x1, /* minimum transfer size */
302 302 0xffffffffffffffff, /* maximum transfer size */
303 303 0xffffffffffffffff, /* maximum segment size */
304 304 1, /* scatter/gather list length */
305 305 (unsigned int)1, /* granularity */
306 306 0 /* attribute flags */
307 307 };
308 308
309 309 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
310 310 DMA_ATTR_V0, /* version number. */
311 311 0, /* low address */
312 312 0xffffffffffffffff, /* high address */
313 313 0xffffffffffffffff, /* address counter max */
314 314 #if defined(_BIG_ENDIAN)
315 315 0x2000, /* alignment */
316 316 #else
317 317 0x1000, /* alignment */
318 318 #endif
319 319 0xfc00fc, /* dlim_burstsizes */
320 320 0x1, /* minimum transfer size */
321 321 0xffffffffffffffff, /* maximum transfer size */
322 322 0xffffffffffffffff, /* maximum segment size */
323 323 5, /* scatter/gather list length */
324 324 (unsigned int)1, /* granularity */
325 325 0 /* attribute flags */
326 326 };
327 327
328 328 ddi_dma_attr_t hxge_tx_dma_attr = {
329 329 DMA_ATTR_V0, /* version number. */
330 330 0, /* low address */
331 331 0xffffffffffffffff, /* high address */
332 332 0xffffffffffffffff, /* address counter max */
333 333 #if defined(_BIG_ENDIAN)
334 334 0x2000, /* alignment */
335 335 #else
336 336 0x1000, /* alignment */
337 337 #endif
338 338 0xfc00fc, /* dlim_burstsizes */
339 339 0x1, /* minimum transfer size */
340 340 0xffffffffffffffff, /* maximum transfer size */
341 341 0xffffffffffffffff, /* maximum segment size */
342 342 5, /* scatter/gather list length */
343 343 (unsigned int)1, /* granularity */
344 344 0 /* attribute flags */
345 345 };
346 346
347 347 ddi_dma_attr_t hxge_rx_dma_attr = {
348 348 DMA_ATTR_V0, /* version number. */
349 349 0, /* low address */
350 350 0xffffffffffffffff, /* high address */
351 351 0xffffffffffffffff, /* address counter max */
352 352 0x10000, /* alignment */
353 353 0xfc00fc, /* dlim_burstsizes */
354 354 0x1, /* minimum transfer size */
355 355 0xffffffffffffffff, /* maximum transfer size */
356 356 0xffffffffffffffff, /* maximum segment size */
357 357 1, /* scatter/gather list length */
358 358 (unsigned int)1, /* granularity */
359 359 DDI_DMA_RELAXED_ORDERING /* attribute flags */
360 360 };
361 361
362 362 ddi_dma_lim_t hxge_dma_limits = {
363 363 (uint_t)0, /* dlim_addr_lo */
364 364 (uint_t)0xffffffff, /* dlim_addr_hi */
365 365 (uint_t)0xffffffff, /* dlim_cntr_max */
366 366 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
367 367 0x1, /* dlim_minxfer */
368 368 1024 /* dlim_speed */
369 369 };
370 370
371 371 dma_method_t hxge_force_dma = DVMA;
372 372
373 373 /*
374 374 * dma chunk sizes.
375 375 *
376 376 * Try to allocate the largest possible size
377 377 * so that fewer number of dma chunks would be managed
378 378 */
379 379 size_t alloc_sizes[] = {
380 380 0x1000, 0x2000, 0x4000, 0x8000,
381 381 0x10000, 0x20000, 0x40000, 0x80000,
382 382 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
383 383 };
384 384
385 385 /*
386 386 * Translate "dev_t" to a pointer to the associated "dev_info_t".
387 387 */
388 388 static int
389 389 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
390 390 {
391 391 p_hxge_t hxgep = NULL;
392 392 int instance;
393 393 int status = DDI_SUCCESS;
394 394 int i;
395 395
396 396 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
397 397
398 398 /*
399 399 * Get the device instance since we'll need to setup or retrieve a soft
400 400 * state for this instance.
401 401 */
402 402 instance = ddi_get_instance(dip);
403 403
404 404 switch (cmd) {
405 405 case DDI_ATTACH:
406 406 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
407 407 break;
408 408
409 409 case DDI_RESUME:
410 410 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
411 411 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
412 412 if (hxgep == NULL) {
413 413 status = DDI_FAILURE;
414 414 break;
415 415 }
416 416 if (hxgep->dip != dip) {
417 417 status = DDI_FAILURE;
418 418 break;
419 419 }
420 420 if (hxgep->suspended == DDI_PM_SUSPEND) {
421 421 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
422 422 } else {
423 423 (void) hxge_resume(hxgep);
424 424 }
425 425 goto hxge_attach_exit;
426 426
427 427 case DDI_PM_RESUME:
428 428 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
429 429 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
430 430 if (hxgep == NULL) {
431 431 status = DDI_FAILURE;
432 432 break;
433 433 }
434 434 if (hxgep->dip != dip) {
435 435 status = DDI_FAILURE;
436 436 break;
437 437 }
438 438 (void) hxge_resume(hxgep);
439 439 goto hxge_attach_exit;
440 440
441 441 default:
442 442 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
443 443 status = DDI_FAILURE;
444 444 goto hxge_attach_exit;
445 445 }
446 446
447 447 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
448 448 status = DDI_FAILURE;
449 449 HXGE_ERROR_MSG((hxgep, DDI_CTL,
450 450 "ddi_soft_state_zalloc failed"));
451 451 goto hxge_attach_exit;
452 452 }
453 453
454 454 hxgep = ddi_get_soft_state(hxge_list, instance);
455 455 if (hxgep == NULL) {
456 456 status = HXGE_ERROR;
457 457 HXGE_ERROR_MSG((hxgep, DDI_CTL,
458 458 "ddi_get_soft_state failed"));
459 459 goto hxge_attach_fail2;
460 460 }
461 461
462 462 hxgep->drv_state = 0;
463 463 hxgep->dip = dip;
464 464 hxgep->instance = instance;
465 465 hxgep->p_dip = ddi_get_parent(dip);
466 466 hxgep->hxge_debug_level = hxge_debug_level;
467 467 hpi_debug_level = hxge_debug_level;
468 468
469 469 /*
470 470 * Initialize MMAC struture.
471 471 */
472 472 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
473 473 hxgep->mmac.available = hxgep->mmac.total;
474 474 for (i = 0; i < hxgep->mmac.total; i++) {
475 475 hxgep->mmac.addrs[i].set = B_FALSE;
476 476 hxgep->mmac.addrs[i].primary = B_FALSE;
477 477 }
478 478
479 479 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
480 480 &hxge_rx_dma_attr);
481 481
482 482 status = hxge_map_regs(hxgep);
483 483 if (status != HXGE_OK) {
484 484 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
485 485 goto hxge_attach_fail3;
486 486 }
487 487
488 488 status = hxge_init_common_dev(hxgep);
489 489 if (status != HXGE_OK) {
490 490 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
491 491 "hxge_init_common_dev failed"));
492 492 goto hxge_attach_fail4;
493 493 }
494 494
495 495 /*
496 496 * Setup the Ndd parameters for this instance.
497 497 */
498 498 hxge_init_param(hxgep);
499 499
500 500 /*
501 501 * Setup Register Tracing Buffer.
502 502 */
503 503 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
504 504
505 505 /* init stats ptr */
506 506 hxge_init_statsp(hxgep);
507 507
508 508 status = hxge_setup_mutexes(hxgep);
509 509 if (status != HXGE_OK) {
510 510 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
511 511 goto hxge_attach_fail;
512 512 }
513 513
514 514 /* Scrub the MSI-X memory */
515 515 hxge_msix_init(hxgep);
516 516
517 517 status = hxge_get_config_properties(hxgep);
518 518 if (status != HXGE_OK) {
519 519 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
520 520 goto hxge_attach_fail;
521 521 }
522 522
523 523 /*
524 524 * Setup the Kstats for the driver.
525 525 */
526 526 hxge_setup_kstats(hxgep);
527 527 hxge_setup_param(hxgep);
528 528
529 529 status = hxge_setup_system_dma_pages(hxgep);
530 530 if (status != HXGE_OK) {
531 531 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
532 532 goto hxge_attach_fail;
533 533 }
534 534
535 535 hxge_hw_id_init(hxgep);
536 536 hxge_hw_init_niu_common(hxgep);
537 537
538 538 status = hxge_setup_dev(hxgep);
539 539 if (status != DDI_SUCCESS) {
540 540 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
541 541 goto hxge_attach_fail;
542 542 }
543 543
544 544 status = hxge_add_intrs(hxgep);
545 545 if (status != DDI_SUCCESS) {
546 546 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
547 547 goto hxge_attach_fail;
548 548 }
549 549
550 550 /*
551 551 * Enable interrupts.
552 552 */
553 553 hxge_intrs_enable(hxgep);
554 554
555 555 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
556 556 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
557 557 "unable to register to mac layer (%d)", status));
558 558 goto hxge_attach_fail;
559 559 }
560 560 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
561 561
562 562 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
563 563 instance));
564 564
565 565 goto hxge_attach_exit;
566 566
567 567 hxge_attach_fail:
568 568 hxge_unattach(hxgep);
569 569 goto hxge_attach_fail1;
570 570
571 571 hxge_attach_fail5:
572 572 /*
573 573 * Tear down the ndd parameters setup.
574 574 */
575 575 hxge_destroy_param(hxgep);
576 576
577 577 /*
578 578 * Tear down the kstat setup.
579 579 */
580 580 hxge_destroy_kstats(hxgep);
581 581
582 582 hxge_attach_fail4:
583 583 if (hxgep->hxge_hw_p) {
584 584 hxge_uninit_common_dev(hxgep);
585 585 hxgep->hxge_hw_p = NULL;
586 586 }
587 587 hxge_attach_fail3:
588 588 /*
589 589 * Unmap the register setup.
590 590 */
591 591 hxge_unmap_regs(hxgep);
592 592
593 593 hxge_fm_fini(hxgep);
594 594
595 595 hxge_attach_fail2:
596 596 ddi_soft_state_free(hxge_list, hxgep->instance);
597 597
598 598 hxge_attach_fail1:
599 599 if (status != HXGE_OK)
600 600 status = (HXGE_ERROR | HXGE_DDI_FAILED);
601 601 hxgep = NULL;
602 602
603 603 hxge_attach_exit:
604 604 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
605 605 status));
606 606
607 607 return (status);
608 608 }
609 609
610 610 static int
611 611 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
612 612 {
613 613 int status = DDI_SUCCESS;
614 614 int instance;
615 615 p_hxge_t hxgep = NULL;
616 616
617 617 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
618 618 instance = ddi_get_instance(dip);
619 619 hxgep = ddi_get_soft_state(hxge_list, instance);
620 620 if (hxgep == NULL) {
621 621 status = DDI_FAILURE;
622 622 goto hxge_detach_exit;
623 623 }
624 624
625 625 switch (cmd) {
626 626 case DDI_DETACH:
627 627 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
628 628 break;
629 629
630 630 case DDI_PM_SUSPEND:
631 631 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
632 632 hxgep->suspended = DDI_PM_SUSPEND;
633 633 hxge_suspend(hxgep);
634 634 break;
635 635
636 636 case DDI_SUSPEND:
637 637 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
638 638 if (hxgep->suspended != DDI_PM_SUSPEND) {
639 639 hxgep->suspended = DDI_SUSPEND;
640 640 hxge_suspend(hxgep);
641 641 }
642 642 break;
643 643
644 644 default:
645 645 status = DDI_FAILURE;
646 646 break;
647 647 }
648 648
649 649 if (cmd != DDI_DETACH)
650 650 goto hxge_detach_exit;
651 651
652 652 /*
653 653 * Stop the xcvr polling.
654 654 */
655 655 hxgep->suspended = cmd;
656 656
657 657 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
658 658 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
659 659 "<== hxge_detach status = 0x%08X", status));
660 660 return (DDI_FAILURE);
661 661 }
662 662 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
663 663 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
664 664
665 665 hxge_unattach(hxgep);
666 666 hxgep = NULL;
667 667
668 668 hxge_detach_exit:
669 669 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
670 670 status));
671 671
672 672 return (status);
673 673 }
674 674
675 675 static void
676 676 hxge_unattach(p_hxge_t hxgep)
677 677 {
678 678 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
679 679
680 680 if (hxgep == NULL || hxgep->dev_regs == NULL) {
681 681 return;
682 682 }
683 683
684 684 if (hxgep->hxge_hw_p) {
685 685 hxge_uninit_common_dev(hxgep);
686 686 hxgep->hxge_hw_p = NULL;
687 687 }
688 688
689 689 if (hxgep->hxge_timerid) {
690 690 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
691 691 hxgep->hxge_timerid = 0;
692 692 }
693 693
694 694 /* Stop interrupts. */
695 695 hxge_intrs_disable(hxgep);
696 696
697 697 /* Stop any further interrupts. */
698 698 hxge_remove_intrs(hxgep);
699 699
700 700 /* Stop the device and free resources. */
701 701 hxge_destroy_dev(hxgep);
702 702
703 703 /* Tear down the ndd parameters setup. */
704 704 hxge_destroy_param(hxgep);
705 705
706 706 /* Tear down the kstat setup. */
707 707 hxge_destroy_kstats(hxgep);
708 708
709 709 /*
710 710 * Remove the list of ndd parameters which were setup during attach.
711 711 */
712 712 if (hxgep->dip) {
713 713 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
714 714 " hxge_unattach: remove all properties"));
715 715 (void) ddi_prop_remove_all(hxgep->dip);
716 716 }
717 717
718 718 /*
719 719 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
720 720 * previous state before unmapping the registers.
721 721 */
722 722 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
723 723 HXGE_DELAY(1000);
724 724
725 725 /*
726 726 * Unmap the register setup.
727 727 */
728 728 hxge_unmap_regs(hxgep);
729 729
730 730 hxge_fm_fini(hxgep);
731 731
732 732 /* Destroy all mutexes. */
733 733 hxge_destroy_mutexes(hxgep);
734 734
735 735 /*
736 736 * Free the soft state data structures allocated with this instance.
737 737 */
738 738 ddi_soft_state_free(hxge_list, hxgep->instance);
739 739
740 740 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
741 741 }
742 742
743 743 static hxge_status_t
744 744 hxge_map_regs(p_hxge_t hxgep)
745 745 {
746 746 int ddi_status = DDI_SUCCESS;
747 747 p_dev_regs_t dev_regs;
748 748
749 749 #ifdef HXGE_DEBUG
750 750 char *sysname;
751 751 #endif
752 752
753 753 off_t regsize;
754 754 hxge_status_t status = HXGE_OK;
755 755 int nregs;
756 756
757 757 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
758 758
759 759 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
760 760 return (HXGE_ERROR);
761 761
762 762 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
763 763
764 764 hxgep->dev_regs = NULL;
765 765 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
766 766 dev_regs->hxge_regh = NULL;
767 767 dev_regs->hxge_pciregh = NULL;
768 768 dev_regs->hxge_msix_regh = NULL;
769 769
770 770 (void) ddi_dev_regsize(hxgep->dip, 0, ®size);
771 771 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
772 772 "hxge_map_regs: pci config size 0x%x", regsize));
773 773
774 774 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
775 775 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
776 776 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
777 777 if (ddi_status != DDI_SUCCESS) {
778 778 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
779 779 "ddi_map_regs, hxge bus config regs failed"));
780 780 goto hxge_map_regs_fail0;
781 781 }
782 782
783 783 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
784 784 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
785 785 dev_regs->hxge_pciregp,
786 786 dev_regs->hxge_pciregh));
787 787
788 788 (void) ddi_dev_regsize(hxgep->dip, 1, ®size);
789 789 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
790 790 "hxge_map_regs: pio size 0x%x", regsize));
791 791
792 792 /* set up the device mapped register */
793 793 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
794 794 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
795 795 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
796 796
797 797 if (ddi_status != DDI_SUCCESS) {
798 798 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
799 799 "ddi_map_regs for Hydra global reg failed"));
800 800 goto hxge_map_regs_fail1;
801 801 }
802 802
803 803 /* set up the msi/msi-x mapped register */
804 804 (void) ddi_dev_regsize(hxgep->dip, 2, ®size);
805 805 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
806 806 "hxge_map_regs: msix size 0x%x", regsize));
807 807
808 808 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
809 809 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
810 810 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
811 811
812 812 if (ddi_status != DDI_SUCCESS) {
813 813 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
814 814 "ddi_map_regs for msi reg failed"));
815 815 goto hxge_map_regs_fail2;
816 816 }
817 817
818 818 hxgep->dev_regs = dev_regs;
819 819
820 820 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
821 821 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
822 822 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
823 823 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
824 824
825 825 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
826 826 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
827 827
828 828 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
829 829 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
830 830
831 831 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
832 832 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
833 833
834 834 goto hxge_map_regs_exit;
835 835
836 836 hxge_map_regs_fail3:
837 837 if (dev_regs->hxge_msix_regh) {
838 838 ddi_regs_map_free(&dev_regs->hxge_msix_regh);
839 839 }
840 840
841 841 hxge_map_regs_fail2:
842 842 if (dev_regs->hxge_regh) {
843 843 ddi_regs_map_free(&dev_regs->hxge_regh);
844 844 }
845 845
846 846 hxge_map_regs_fail1:
847 847 if (dev_regs->hxge_pciregh) {
848 848 ddi_regs_map_free(&dev_regs->hxge_pciregh);
849 849 }
850 850
851 851 hxge_map_regs_fail0:
852 852 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
853 853 kmem_free(dev_regs, sizeof (dev_regs_t));
854 854
855 855 hxge_map_regs_exit:
856 856 if (ddi_status != DDI_SUCCESS)
857 857 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
858 858 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
859 859 return (status);
860 860 }
861 861
862 862 static void
863 863 hxge_unmap_regs(p_hxge_t hxgep)
864 864 {
865 865 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
866 866 if (hxgep->dev_regs) {
867 867 if (hxgep->dev_regs->hxge_pciregh) {
868 868 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
869 869 "==> hxge_unmap_regs: bus"));
870 870 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
871 871 hxgep->dev_regs->hxge_pciregh = NULL;
872 872 }
873 873
874 874 if (hxgep->dev_regs->hxge_regh) {
875 875 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
876 876 "==> hxge_unmap_regs: device registers"));
877 877 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
878 878 hxgep->dev_regs->hxge_regh = NULL;
879 879 }
880 880
881 881 if (hxgep->dev_regs->hxge_msix_regh) {
882 882 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
883 883 "==> hxge_unmap_regs: device interrupts"));
884 884 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
885 885 hxgep->dev_regs->hxge_msix_regh = NULL;
886 886 }
887 887 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
888 888 hxgep->dev_regs = NULL;
889 889 }
890 890 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
891 891 }
892 892
893 893 static hxge_status_t
894 894 hxge_setup_mutexes(p_hxge_t hxgep)
895 895 {
896 896 int ddi_status = DDI_SUCCESS;
897 897 hxge_status_t status = HXGE_OK;
898 898
899 899 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
900 900
901 901 /*
902 902 * Get the interrupt cookie so the mutexes can be Initialised.
903 903 */
904 904 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
905 905 &hxgep->interrupt_cookie);
906 906
907 907 if (ddi_status != DDI_SUCCESS) {
908 908 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
909 909 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
910 910 goto hxge_setup_mutexes_exit;
911 911 }
912 912
913 913 /*
914 914 * Initialize mutex's for this device.
915 915 */
916 916 MUTEX_INIT(hxgep->genlock, NULL,
917 917 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
918 918 MUTEX_INIT(&hxgep->vmac_lock, NULL,
919 919 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
920 920 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
921 921 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
922 922 RW_INIT(&hxgep->filter_lock, NULL,
923 923 RW_DRIVER, (void *) hxgep->interrupt_cookie);
924 924 MUTEX_INIT(&hxgep->pio_lock, NULL,
925 925 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
926 926 MUTEX_INIT(&hxgep->timeout.lock, NULL,
927 927 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
928 928
929 929 hxge_setup_mutexes_exit:
930 930 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
931 931 "<== hxge_setup_mutexes status = %x", status));
932 932
933 933 if (ddi_status != DDI_SUCCESS)
934 934 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
935 935
936 936 return (status);
937 937 }
938 938
939 939 static void
940 940 hxge_destroy_mutexes(p_hxge_t hxgep)
941 941 {
942 942 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
943 943 RW_DESTROY(&hxgep->filter_lock);
944 944 MUTEX_DESTROY(&hxgep->vmac_lock);
945 945 MUTEX_DESTROY(&hxgep->ouraddr_lock);
946 946 MUTEX_DESTROY(hxgep->genlock);
947 947 MUTEX_DESTROY(&hxgep->pio_lock);
948 948 MUTEX_DESTROY(&hxgep->timeout.lock);
949 949
950 950 if (hxge_debug_init == 1) {
951 951 MUTEX_DESTROY(&hxgedebuglock);
952 952 hxge_debug_init = 0;
953 953 }
954 954
955 955 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
956 956 }
957 957
958 958 hxge_status_t
959 959 hxge_init(p_hxge_t hxgep)
960 960 {
961 961 hxge_status_t status = HXGE_OK;
962 962
963 963 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
964 964
965 965 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
966 966 return (status);
967 967 }
968 968
969 969 /*
970 970 * Allocate system memory for the receive/transmit buffer blocks and
971 971 * receive/transmit descriptor rings.
972 972 */
973 973 status = hxge_alloc_mem_pool(hxgep);
974 974 if (status != HXGE_OK) {
975 975 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
976 976 goto hxge_init_fail1;
977 977 }
978 978
979 979 /*
980 980 * Initialize and enable TXDMA channels.
981 981 */
982 982 status = hxge_init_txdma_channels(hxgep);
983 983 if (status != HXGE_OK) {
984 984 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
985 985 goto hxge_init_fail3;
986 986 }
987 987
988 988 /*
989 989 * Initialize and enable RXDMA channels.
990 990 */
991 991 status = hxge_init_rxdma_channels(hxgep);
992 992 if (status != HXGE_OK) {
993 993 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
994 994 goto hxge_init_fail4;
995 995 }
996 996
997 997 /*
998 998 * Initialize TCAM
999 999 */
1000 1000 status = hxge_classify_init(hxgep);
1001 1001 if (status != HXGE_OK) {
1002 1002 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1003 1003 goto hxge_init_fail5;
1004 1004 }
1005 1005
1006 1006 /*
1007 1007 * Initialize the VMAC block.
1008 1008 */
1009 1009 status = hxge_vmac_init(hxgep);
1010 1010 if (status != HXGE_OK) {
1011 1011 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1012 1012 goto hxge_init_fail5;
1013 1013 }
1014 1014
1015 1015 /* Bringup - this may be unnecessary when PXE and FCODE available */
1016 1016 status = hxge_pfc_set_default_mac_addr(hxgep);
1017 1017 if (status != HXGE_OK) {
1018 1018 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1019 1019 "Default Address Failure\n"));
1020 1020 goto hxge_init_fail5;
1021 1021 }
1022 1022
1023 1023 /*
1024 1024 * Enable hardware interrupts.
1025 1025 */
1026 1026 hxge_intr_hw_enable(hxgep);
1027 1027 hxgep->drv_state |= STATE_HW_INITIALIZED;
1028 1028
1029 1029 goto hxge_init_exit;
1030 1030
1031 1031 hxge_init_fail5:
1032 1032 hxge_uninit_rxdma_channels(hxgep);
1033 1033 hxge_init_fail4:
1034 1034 hxge_uninit_txdma_channels(hxgep);
1035 1035 hxge_init_fail3:
1036 1036 hxge_free_mem_pool(hxgep);
1037 1037 hxge_init_fail1:
1038 1038 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1039 1039 "<== hxge_init status (failed) = 0x%08x", status));
1040 1040 return (status);
1041 1041
1042 1042 hxge_init_exit:
1043 1043
1044 1044 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1045 1045 status));
1046 1046
1047 1047 return (status);
1048 1048 }
1049 1049
1050 1050 timeout_id_t
1051 1051 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1052 1052 {
1053 1053 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1054 1054 return (timeout(func, (caddr_t)hxgep,
1055 1055 drv_usectohz(1000 * msec)));
1056 1056 }
1057 1057 return (NULL);
1058 1058 }
1059 1059
1060 1060 /*ARGSUSED*/
1061 1061 void
1062 1062 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1063 1063 {
1064 1064 if (timerid) {
1065 1065 (void) untimeout(timerid);
1066 1066 }
1067 1067 }
1068 1068
1069 1069 void
1070 1070 hxge_uninit(p_hxge_t hxgep)
1071 1071 {
1072 1072 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1073 1073
1074 1074 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1075 1075 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1076 1076 "==> hxge_uninit: not initialized"));
1077 1077 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1078 1078 return;
1079 1079 }
1080 1080
1081 1081 /* Stop timer */
1082 1082 if (hxgep->hxge_timerid) {
1083 1083 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1084 1084 hxgep->hxge_timerid = 0;
1085 1085 }
1086 1086
1087 1087 (void) hxge_intr_hw_disable(hxgep);
1088 1088
1089 1089 /* Reset the receive VMAC side. */
1090 1090 (void) hxge_rx_vmac_disable(hxgep);
1091 1091
1092 1092 /* Free classification resources */
1093 1093 (void) hxge_classify_uninit(hxgep);
1094 1094
1095 1095 /* Reset the transmit/receive DMA side. */
1096 1096 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1097 1097 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1098 1098
1099 1099 hxge_uninit_txdma_channels(hxgep);
1100 1100 hxge_uninit_rxdma_channels(hxgep);
1101 1101
1102 1102 /* Reset the transmit VMAC side. */
1103 1103 (void) hxge_tx_vmac_disable(hxgep);
1104 1104
1105 1105 hxge_free_mem_pool(hxgep);
1106 1106
1107 1107 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1108 1108
1109 1109 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1110 1110 }
1111 1111
1112 1112 /*ARGSUSED*/
1113 1113 /*VARARGS*/
1114 1114 void
1115 1115 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1116 1116 {
1117 1117 char msg_buffer[1048];
1118 1118 char prefix_buffer[32];
1119 1119 int instance;
1120 1120 uint64_t debug_level;
1121 1121 int cmn_level = CE_CONT;
1122 1122 va_list ap;
1123 1123
1124 1124 debug_level = (hxgep == NULL) ? hxge_debug_level :
1125 1125 hxgep->hxge_debug_level;
1126 1126
1127 1127 if ((level & debug_level) || (level == HXGE_NOTE) ||
1128 1128 (level == HXGE_ERR_CTL)) {
1129 1129 /* do the msg processing */
1130 1130 if (hxge_debug_init == 0) {
1131 1131 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1132 1132 hxge_debug_init = 1;
1133 1133 }
1134 1134
1135 1135 MUTEX_ENTER(&hxgedebuglock);
1136 1136
1137 1137 if ((level & HXGE_NOTE)) {
1138 1138 cmn_level = CE_NOTE;
1139 1139 }
1140 1140
1141 1141 if (level & HXGE_ERR_CTL) {
1142 1142 cmn_level = CE_WARN;
1143 1143 }
1144 1144
1145 1145 va_start(ap, fmt);
1146 1146 (void) vsprintf(msg_buffer, fmt, ap);
1147 1147 va_end(ap);
1148 1148
1149 1149 if (hxgep == NULL) {
1150 1150 instance = -1;
1151 1151 (void) sprintf(prefix_buffer, "%s :", "hxge");
1152 1152 } else {
1153 1153 instance = hxgep->instance;
1154 1154 (void) sprintf(prefix_buffer,
1155 1155 "%s%d :", "hxge", instance);
1156 1156 }
1157 1157
1158 1158 MUTEX_EXIT(&hxgedebuglock);
1159 1159 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1160 1160 }
1161 1161 }
1162 1162
1163 1163 char *
1164 1164 hxge_dump_packet(char *addr, int size)
1165 1165 {
1166 1166 uchar_t *ap = (uchar_t *)addr;
1167 1167 int i;
1168 1168 static char etherbuf[1024];
1169 1169 char *cp = etherbuf;
1170 1170 char digits[] = "0123456789abcdef";
1171 1171
1172 1172 if (!size)
1173 1173 size = 60;
1174 1174
1175 1175 if (size > MAX_DUMP_SZ) {
1176 1176 /* Dump the leading bytes */
1177 1177 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1178 1178 if (*ap > 0x0f)
1179 1179 *cp++ = digits[*ap >> 4];
1180 1180 *cp++ = digits[*ap++ & 0xf];
1181 1181 *cp++ = ':';
1182 1182 }
1183 1183 for (i = 0; i < 20; i++)
1184 1184 *cp++ = '.';
1185 1185 /* Dump the last MAX_DUMP_SZ/2 bytes */
1186 1186 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1187 1187 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1188 1188 if (*ap > 0x0f)
1189 1189 *cp++ = digits[*ap >> 4];
1190 1190 *cp++ = digits[*ap++ & 0xf];
1191 1191 *cp++ = ':';
1192 1192 }
1193 1193 } else {
1194 1194 for (i = 0; i < size; i++) {
1195 1195 if (*ap > 0x0f)
1196 1196 *cp++ = digits[*ap >> 4];
1197 1197 *cp++ = digits[*ap++ & 0xf];
1198 1198 *cp++ = ':';
1199 1199 }
1200 1200 }
1201 1201 *--cp = 0;
1202 1202 return (etherbuf);
1203 1203 }
1204 1204
1205 1205 static void
1206 1206 hxge_suspend(p_hxge_t hxgep)
1207 1207 {
1208 1208 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1209 1209
1210 1210 /*
1211 1211 * Stop the link status timer before hxge_intrs_disable() to avoid
1212 1212 * accessing the the MSIX table simultaneously. Note that the timer
1213 1213 * routine polls for MSIX parity errors.
1214 1214 */
1215 1215 MUTEX_ENTER(&hxgep->timeout.lock);
1216 1216 if (hxgep->timeout.id)
1217 1217 (void) untimeout(hxgep->timeout.id);
1218 1218 MUTEX_EXIT(&hxgep->timeout.lock);
1219 1219
1220 1220 hxge_intrs_disable(hxgep);
1221 1221 hxge_destroy_dev(hxgep);
1222 1222
1223 1223 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1224 1224 }
1225 1225
1226 1226 static hxge_status_t
1227 1227 hxge_resume(p_hxge_t hxgep)
1228 1228 {
1229 1229 hxge_status_t status = HXGE_OK;
1230 1230
1231 1231 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1232 1232 hxgep->suspended = DDI_RESUME;
1233 1233
1234 1234 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1235 1235 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1236 1236
1237 1237 (void) hxge_rx_vmac_enable(hxgep);
1238 1238 (void) hxge_tx_vmac_enable(hxgep);
1239 1239
1240 1240 hxge_intrs_enable(hxgep);
1241 1241
1242 1242 hxgep->suspended = 0;
1243 1243
1244 1244 /*
1245 1245 * Resume the link status timer after hxge_intrs_enable to avoid
1246 1246 * accessing MSIX table simultaneously.
1247 1247 */
1248 1248 MUTEX_ENTER(&hxgep->timeout.lock);
1249 1249 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1250 1250 hxgep->timeout.ticks);
1251 1251 MUTEX_EXIT(&hxgep->timeout.lock);
1252 1252
1253 1253 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1254 1254 "<== hxge_resume status = 0x%x", status));
1255 1255
1256 1256 return (status);
1257 1257 }
1258 1258
1259 1259 static hxge_status_t
1260 1260 hxge_setup_dev(p_hxge_t hxgep)
1261 1261 {
1262 1262 hxge_status_t status = HXGE_OK;
1263 1263
1264 1264 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1265 1265
1266 1266 status = hxge_link_init(hxgep);
1267 1267 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1268 1268 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1269 1269 "Bad register acc handle"));
1270 1270 status = HXGE_ERROR;
1271 1271 }
1272 1272
1273 1273 if (status != HXGE_OK) {
1274 1274 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1275 1275 " hxge_setup_dev status (link init 0x%08x)", status));
1276 1276 goto hxge_setup_dev_exit;
1277 1277 }
1278 1278
1279 1279 hxge_setup_dev_exit:
1280 1280 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1281 1281 "<== hxge_setup_dev status = 0x%08x", status));
1282 1282
1283 1283 return (status);
1284 1284 }
1285 1285
1286 1286 static void
1287 1287 hxge_destroy_dev(p_hxge_t hxgep)
1288 1288 {
1289 1289 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1290 1290
1291 1291 (void) hxge_hw_stop(hxgep);
1292 1292
1293 1293 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1294 1294 }
1295 1295
1296 1296 static hxge_status_t
1297 1297 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1298 1298 {
1299 1299 int ddi_status = DDI_SUCCESS;
1300 1300 uint_t count;
1301 1301 ddi_dma_cookie_t cookie;
1302 1302 uint_t iommu_pagesize;
1303 1303 hxge_status_t status = HXGE_OK;
1304 1304
1305 1305 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1306 1306
1307 1307 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1308 1308 iommu_pagesize = dvma_pagesize(hxgep->dip);
1309 1309
1310 1310 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1311 1311 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1312 1312 " default_block_size %d iommu_pagesize %d",
1313 1313 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1314 1314 hxgep->rx_default_block_size, iommu_pagesize));
1315 1315
1316 1316 if (iommu_pagesize != 0) {
1317 1317 if (hxgep->sys_page_sz == iommu_pagesize) {
1318 1318 /* Hydra support up to 8K pages */
1319 1319 if (iommu_pagesize > 0x2000)
1320 1320 hxgep->sys_page_sz = 0x2000;
1321 1321 } else {
1322 1322 if (hxgep->sys_page_sz > iommu_pagesize)
1323 1323 hxgep->sys_page_sz = iommu_pagesize;
1324 1324 }
1325 1325 }
1326 1326
1327 1327 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1328 1328
1329 1329 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1330 1330 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1331 1331 "default_block_size %d page mask %d",
1332 1332 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1333 1333 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1334 1334
1335 1335 switch (hxgep->sys_page_sz) {
1336 1336 default:
1337 1337 hxgep->sys_page_sz = 0x1000;
1338 1338 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1339 1339 hxgep->rx_default_block_size = 0x1000;
1340 1340 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1341 1341 break;
1342 1342 case 0x1000:
1343 1343 hxgep->rx_default_block_size = 0x1000;
1344 1344 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1345 1345 break;
1346 1346 case 0x2000:
1347 1347 hxgep->rx_default_block_size = 0x2000;
1348 1348 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1349 1349 break;
1350 1350 }
1351 1351
1352 1352 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1353 1353 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1354 1354
1355 1355 /*
1356 1356 * Get the system DMA burst size.
1357 1357 */
1358 1358 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1359 1359 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1360 1360 if (ddi_status != DDI_SUCCESS) {
1361 1361 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1362 1362 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1363 1363 goto hxge_get_soft_properties_exit;
1364 1364 }
1365 1365
1366 1366 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1367 1367 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1368 1368 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1369 1369 &cookie, &count);
1370 1370 if (ddi_status != DDI_DMA_MAPPED) {
1371 1371 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1372 1372 "Binding spare handle to find system burstsize failed."));
1373 1373 ddi_status = DDI_FAILURE;
1374 1374 goto hxge_get_soft_properties_fail1;
1375 1375 }
1376 1376
1377 1377 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1378 1378 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1379 1379
1380 1380 hxge_get_soft_properties_fail1:
1381 1381 ddi_dma_free_handle(&hxgep->dmasparehandle);
1382 1382
1383 1383 hxge_get_soft_properties_exit:
1384 1384
1385 1385 if (ddi_status != DDI_SUCCESS)
1386 1386 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1387 1387
1388 1388 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1389 1389 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1390 1390
1391 1391 return (status);
1392 1392 }
1393 1393
1394 1394 static hxge_status_t
1395 1395 hxge_alloc_mem_pool(p_hxge_t hxgep)
1396 1396 {
1397 1397 hxge_status_t status = HXGE_OK;
1398 1398
1399 1399 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1400 1400
1401 1401 status = hxge_alloc_rx_mem_pool(hxgep);
1402 1402 if (status != HXGE_OK) {
1403 1403 return (HXGE_ERROR);
1404 1404 }
1405 1405
1406 1406 status = hxge_alloc_tx_mem_pool(hxgep);
1407 1407 if (status != HXGE_OK) {
1408 1408 hxge_free_rx_mem_pool(hxgep);
1409 1409 return (HXGE_ERROR);
1410 1410 }
1411 1411
1412 1412 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1413 1413 return (HXGE_OK);
1414 1414 }
1415 1415
1416 1416 static void
1417 1417 hxge_free_mem_pool(p_hxge_t hxgep)
1418 1418 {
1419 1419 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1420 1420
1421 1421 hxge_free_rx_mem_pool(hxgep);
1422 1422 hxge_free_tx_mem_pool(hxgep);
1423 1423
1424 1424 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1425 1425 }
1426 1426
1427 1427 static hxge_status_t
1428 1428 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1429 1429 {
1430 1430 int i, j;
1431 1431 uint32_t ndmas, st_rdc;
1432 1432 p_hxge_dma_pt_cfg_t p_all_cfgp;
1433 1433 p_hxge_hw_pt_cfg_t p_cfgp;
1434 1434 p_hxge_dma_pool_t dma_poolp;
1435 1435 p_hxge_dma_common_t *dma_buf_p;
1436 1436 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1437 1437 p_hxge_dma_common_t *dma_rbr_cntl_p;
1438 1438 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1439 1439 p_hxge_dma_common_t *dma_rcr_cntl_p;
1440 1440 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1441 1441 p_hxge_dma_common_t *dma_mbox_cntl_p;
1442 1442 size_t rx_buf_alloc_size;
1443 1443 size_t rx_rbr_cntl_alloc_size;
1444 1444 size_t rx_rcr_cntl_alloc_size;
1445 1445 size_t rx_mbox_cntl_alloc_size;
1446 1446 uint32_t *num_chunks; /* per dma */
1447 1447 hxge_status_t status = HXGE_OK;
1448 1448
1449 1449 uint32_t hxge_port_rbr_size;
1450 1450 uint32_t hxge_port_rbr_spare_size;
1451 1451 uint32_t hxge_port_rcr_size;
1452 1452
1453 1453 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1454 1454
1455 1455 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1456 1456 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1457 1457 st_rdc = p_cfgp->start_rdc;
1458 1458 ndmas = p_cfgp->max_rdcs;
1459 1459
1460 1460 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1461 1461 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1462 1462
1463 1463 /*
1464 1464 * Allocate memory for each receive DMA channel.
1465 1465 */
1466 1466 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1467 1467 KM_SLEEP);
1468 1468 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1469 1469 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1470 1470
1471 1471 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1472 1472 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1473 1473 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1474 1474 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1475 1475 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1476 1476 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1477 1477 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1478 1478 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1479 1479 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1480 1480 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1481 1481 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1482 1482 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1483 1483
1484 1484 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1485 1485 KM_SLEEP);
1486 1486
1487 1487 /*
1488 1488 * Assume that each DMA channel will be configured with default block
1489 1489 * size. rbr block counts are mod of batch count (16).
1490 1490 */
1491 1491 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1492 1492 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1493 1493
1494 1494 if (!hxge_port_rbr_size) {
1495 1495 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1496 1496 }
1497 1497
1498 1498 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1499 1499 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1500 1500 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1501 1501 }
1502 1502
1503 1503 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1504 1504 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1505 1505
1506 1506 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1507 1507 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1508 1508 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1509 1509 }
1510 1510
1511 1511 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1512 1512 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1513 1513
1514 1514 /*
1515 1515 * Addresses of receive block ring, receive completion ring and the
1516 1516 * mailbox must be all cache-aligned (64 bytes).
1517 1517 */
1518 1518 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1519 1519 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1520 1520 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1521 1521 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1522 1522
1523 1523 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1524 1524 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1525 1525 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1526 1526 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1527 1527 hxge_port_rcr_size, rx_cntl_alloc_size));
1528 1528
1529 1529 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1530 1530 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1531 1531
1532 1532 /*
1533 1533 * Allocate memory for receive buffers and descriptor rings. Replace
1534 1534 * allocation functions with interface functions provided by the
1535 1535 * partition manager when it is available.
1536 1536 */
1537 1537 /*
1538 1538 * Allocate memory for the receive buffer blocks.
1539 1539 */
1540 1540 for (i = 0; i < ndmas; i++) {
1541 1541 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1542 1542 " hxge_alloc_rx_mem_pool to alloc mem: "
1543 1543 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1544 1544 i, dma_buf_p[i], &dma_buf_p[i]));
1545 1545
1546 1546 num_chunks[i] = 0;
1547 1547
1548 1548 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1549 1549 rx_buf_alloc_size, hxgep->rx_default_block_size,
1550 1550 &num_chunks[i]);
1551 1551 if (status != HXGE_OK) {
1552 1552 break;
1553 1553 }
1554 1554
1555 1555 st_rdc++;
1556 1556 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1557 1557 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1558 1558 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1559 1559 dma_buf_p[i], &dma_buf_p[i]));
1560 1560 }
1561 1561
1562 1562 if (i < ndmas) {
1563 1563 goto hxge_alloc_rx_mem_fail1;
1564 1564 }
1565 1565
1566 1566 /*
1567 1567 * Allocate memory for descriptor rings and mailbox.
1568 1568 */
1569 1569 st_rdc = p_cfgp->start_rdc;
1570 1570 for (j = 0; j < ndmas; j++) {
1571 1571 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1572 1572 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1573 1573 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1574 1574 break;
1575 1575 }
1576 1576
1577 1577 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1578 1578 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1579 1579 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1580 1580 break;
1581 1581 }
1582 1582
1583 1583 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1584 1584 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1585 1585 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1586 1586 break;
1587 1587 }
1588 1588 st_rdc++;
1589 1589 }
1590 1590
1591 1591 if (j < ndmas) {
1592 1592 goto hxge_alloc_rx_mem_fail2;
1593 1593 }
1594 1594
1595 1595 dma_poolp->ndmas = ndmas;
1596 1596 dma_poolp->num_chunks = num_chunks;
1597 1597 dma_poolp->buf_allocated = B_TRUE;
1598 1598 hxgep->rx_buf_pool_p = dma_poolp;
1599 1599 dma_poolp->dma_buf_pool_p = dma_buf_p;
1600 1600
1601 1601 dma_rbr_cntl_poolp->ndmas = ndmas;
1602 1602 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1603 1603 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1604 1604 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1605 1605
1606 1606 dma_rcr_cntl_poolp->ndmas = ndmas;
1607 1607 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1608 1608 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1609 1609 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1610 1610
1611 1611 dma_mbox_cntl_poolp->ndmas = ndmas;
1612 1612 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1613 1613 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1614 1614 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1615 1615
1616 1616 goto hxge_alloc_rx_mem_pool_exit;
1617 1617
1618 1618 hxge_alloc_rx_mem_fail2:
1619 1619 /* Free control buffers */
1620 1620 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1621 1621 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1622 1622 for (; j >= 0; j--) {
1623 1623 hxge_free_rx_cntl_dma(hxgep,
1624 1624 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1625 1625 hxge_free_rx_cntl_dma(hxgep,
1626 1626 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1627 1627 hxge_free_rx_cntl_dma(hxgep,
1628 1628 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1629 1629 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1630 1630 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1631 1631 }
1632 1632 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1633 1633 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1634 1634
1635 1635 hxge_alloc_rx_mem_fail1:
1636 1636 /* Free data buffers */
1637 1637 i--;
1638 1638 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1639 1639 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1640 1640 for (; i >= 0; i--) {
1641 1641 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1642 1642 num_chunks[i]);
1643 1643 }
1644 1644 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1645 1645 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1646 1646
1647 1647 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1648 1648 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1649 1649 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1650 1650 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1651 1651 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1652 1652 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1653 1653 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1654 1654 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1655 1655 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1656 1656
1657 1657 hxge_alloc_rx_mem_pool_exit:
1658 1658 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1659 1659 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1660 1660
1661 1661 return (status);
1662 1662 }
1663 1663
1664 1664 static void
1665 1665 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1666 1666 {
1667 1667 uint32_t i, ndmas;
1668 1668 p_hxge_dma_pool_t dma_poolp;
1669 1669 p_hxge_dma_common_t *dma_buf_p;
1670 1670 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1671 1671 p_hxge_dma_common_t *dma_rbr_cntl_p;
1672 1672 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1673 1673 p_hxge_dma_common_t *dma_rcr_cntl_p;
1674 1674 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1675 1675 p_hxge_dma_common_t *dma_mbox_cntl_p;
1676 1676 uint32_t *num_chunks;
1677 1677
1678 1678 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1679 1679
1680 1680 dma_poolp = hxgep->rx_buf_pool_p;
1681 1681 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1682 1682 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1683 1683 "(null rx buf pool or buf not allocated"));
1684 1684 return;
1685 1685 }
1686 1686
1687 1687 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1688 1688 if (dma_rbr_cntl_poolp == NULL ||
1689 1689 (!dma_rbr_cntl_poolp->buf_allocated)) {
1690 1690 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1691 1691 "<== hxge_free_rx_mem_pool "
1692 1692 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1693 1693 return;
1694 1694 }
1695 1695
1696 1696 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1697 1697 if (dma_rcr_cntl_poolp == NULL ||
1698 1698 (!dma_rcr_cntl_poolp->buf_allocated)) {
1699 1699 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1700 1700 "<== hxge_free_rx_mem_pool "
1701 1701 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1702 1702 return;
1703 1703 }
1704 1704
1705 1705 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1706 1706 if (dma_mbox_cntl_poolp == NULL ||
1707 1707 (!dma_mbox_cntl_poolp->buf_allocated)) {
1708 1708 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1709 1709 "<== hxge_free_rx_mem_pool "
1710 1710 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1711 1711 return;
1712 1712 }
1713 1713
1714 1714 dma_buf_p = dma_poolp->dma_buf_pool_p;
1715 1715 num_chunks = dma_poolp->num_chunks;
1716 1716
1717 1717 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1718 1718 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1719 1719 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1720 1720 ndmas = dma_rbr_cntl_poolp->ndmas;
1721 1721
1722 1722 for (i = 0; i < ndmas; i++) {
1723 1723 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1724 1724 }
1725 1725
1726 1726 for (i = 0; i < ndmas; i++) {
1727 1727 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1728 1728 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1729 1729 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1730 1730 }
1731 1731
1732 1732 for (i = 0; i < ndmas; i++) {
1733 1733 KMEM_FREE(dma_buf_p[i],
1734 1734 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1735 1735 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1736 1736 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1737 1737 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1738 1738 }
1739 1739
1740 1740 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1741 1741 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1742 1742 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1743 1743 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1744 1744 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1745 1745 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1746 1746 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1747 1747 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1748 1748 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1749 1749
1750 1750 hxgep->rx_buf_pool_p = NULL;
1751 1751 hxgep->rx_rbr_cntl_pool_p = NULL;
1752 1752 hxgep->rx_rcr_cntl_pool_p = NULL;
1753 1753 hxgep->rx_mbox_cntl_pool_p = NULL;
1754 1754
1755 1755 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1756 1756 }
1757 1757
1758 1758 static hxge_status_t
1759 1759 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1760 1760 p_hxge_dma_common_t *dmap,
1761 1761 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1762 1762 {
1763 1763 p_hxge_dma_common_t rx_dmap;
1764 1764 hxge_status_t status = HXGE_OK;
1765 1765 size_t total_alloc_size;
1766 1766 size_t allocated = 0;
1767 1767 int i, size_index, array_size;
1768 1768
1769 1769 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1770 1770
1771 1771 rx_dmap = (p_hxge_dma_common_t)
1772 1772 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1773 1773
1774 1774 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1775 1775 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1776 1776 dma_channel, alloc_size, block_size, dmap));
1777 1777
1778 1778 total_alloc_size = alloc_size;
1779 1779
1780 1780 i = 0;
1781 1781 size_index = 0;
1782 1782 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1783 1783 while ((size_index < array_size) &&
1784 1784 (alloc_sizes[size_index] < alloc_size))
1785 1785 size_index++;
1786 1786 if (size_index >= array_size) {
1787 1787 size_index = array_size - 1;
1788 1788 }
1789 1789
1790 1790 while ((allocated < total_alloc_size) &&
1791 1791 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1792 1792 rx_dmap[i].dma_chunk_index = i;
1793 1793 rx_dmap[i].block_size = block_size;
1794 1794 rx_dmap[i].alength = alloc_sizes[size_index];
1795 1795 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1796 1796 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1797 1797 rx_dmap[i].dma_channel = dma_channel;
1798 1798 rx_dmap[i].contig_alloc_type = B_FALSE;
1799 1799
1800 1800 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1801 1801 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1802 1802 "i %d nblocks %d alength %d",
1803 1803 dma_channel, i, &rx_dmap[i], block_size,
1804 1804 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1805 1805 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1806 1806 &hxge_rx_dma_attr, rx_dmap[i].alength,
1807 1807 &hxge_dev_buf_dma_acc_attr,
1808 1808 DDI_DMA_READ | DDI_DMA_STREAMING,
1809 1809 (p_hxge_dma_common_t)(&rx_dmap[i]));
1810 1810 if (status != HXGE_OK) {
1811 1811 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1812 1812 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1813 1813 " for size: %d", alloc_sizes[size_index]));
1814 1814 size_index--;
1815 1815 } else {
1816 1816 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1817 1817 " alloc_rx_buf_dma allocated rdc %d "
1818 1818 "chunk %d size %x dvma %x bufp %llx ",
1819 1819 dma_channel, i, rx_dmap[i].alength,
1820 1820 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1821 1821 i++;
1822 1822 allocated += alloc_sizes[size_index];
1823 1823 }
1824 1824 }
1825 1825
1826 1826 if (allocated < total_alloc_size) {
1827 1827 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1828 1828 " hxge_alloc_rx_buf_dma failed due to"
1829 1829 " allocated(%d) < required(%d)",
1830 1830 allocated, total_alloc_size));
1831 1831 goto hxge_alloc_rx_mem_fail1;
1832 1832 }
1833 1833
1834 1834 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1835 1835 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1836 1836
1837 1837 *num_chunks = i;
1838 1838 *dmap = rx_dmap;
1839 1839
1840 1840 goto hxge_alloc_rx_mem_exit;
1841 1841
1842 1842 hxge_alloc_rx_mem_fail1:
1843 1843 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1844 1844
1845 1845 hxge_alloc_rx_mem_exit:
1846 1846 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1847 1847 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1848 1848
1849 1849 return (status);
1850 1850 }
1851 1851
1852 1852 /*ARGSUSED*/
1853 1853 static void
1854 1854 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1855 1855 uint32_t num_chunks)
1856 1856 {
1857 1857 int i;
1858 1858
1859 1859 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1860 1860 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1861 1861
1862 1862 for (i = 0; i < num_chunks; i++) {
1863 1863 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1864 1864 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1865 1865 hxge_dma_mem_free(dmap++);
1866 1866 }
1867 1867
1868 1868 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1869 1869 }
1870 1870
1871 1871 /*ARGSUSED*/
1872 1872 static hxge_status_t
1873 1873 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1874 1874 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1875 1875 {
1876 1876 p_hxge_dma_common_t rx_dmap;
1877 1877 hxge_status_t status = HXGE_OK;
1878 1878
1879 1879 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1880 1880
1881 1881 rx_dmap = (p_hxge_dma_common_t)
1882 1882 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1883 1883
1884 1884 rx_dmap->contig_alloc_type = B_FALSE;
1885 1885
1886 1886 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1887 1887 attr, size, &hxge_dev_desc_dma_acc_attr,
1888 1888 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1889 1889 if (status != HXGE_OK) {
1890 1890 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1891 1891 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1892 1892 " for size: %d", size));
1893 1893 goto hxge_alloc_rx_cntl_dma_fail1;
1894 1894 }
1895 1895
1896 1896 *dmap = rx_dmap;
1897 1897
1898 1898 goto hxge_alloc_rx_cntl_dma_exit;
1899 1899
1900 1900 hxge_alloc_rx_cntl_dma_fail1:
1901 1901 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1902 1902
1903 1903 hxge_alloc_rx_cntl_dma_exit:
1904 1904 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1905 1905 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1906 1906
1907 1907 return (status);
1908 1908 }
1909 1909
1910 1910 /*ARGSUSED*/
1911 1911 static void
1912 1912 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1913 1913 {
1914 1914 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1915 1915
1916 1916 hxge_dma_mem_free(dmap);
1917 1917
1918 1918 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1919 1919 }
1920 1920
1921 1921 static hxge_status_t
1922 1922 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1923 1923 {
1924 1924 hxge_status_t status = HXGE_OK;
1925 1925 int i, j;
1926 1926 uint32_t ndmas, st_tdc;
1927 1927 p_hxge_dma_pt_cfg_t p_all_cfgp;
1928 1928 p_hxge_hw_pt_cfg_t p_cfgp;
1929 1929 p_hxge_dma_pool_t dma_poolp;
1930 1930 p_hxge_dma_common_t *dma_buf_p;
1931 1931 p_hxge_dma_pool_t dma_cntl_poolp;
1932 1932 p_hxge_dma_common_t *dma_cntl_p;
1933 1933 size_t tx_buf_alloc_size;
1934 1934 size_t tx_cntl_alloc_size;
1935 1935 uint32_t *num_chunks; /* per dma */
1936 1936
1937 1937 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1938 1938
1939 1939 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1940 1940 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1941 1941 st_tdc = p_cfgp->start_tdc;
1942 1942 ndmas = p_cfgp->max_tdcs;
1943 1943
1944 1944 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1945 1945 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1946 1946 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1947 1947 /*
1948 1948 * Allocate memory for each transmit DMA channel.
1949 1949 */
1950 1950 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1951 1951 KM_SLEEP);
1952 1952 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1953 1953 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1954 1954
1955 1955 dma_cntl_poolp = (p_hxge_dma_pool_t)
1956 1956 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1957 1957 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1958 1958 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1959 1959
1960 1960 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1961 1961
1962 1962 /*
1963 1963 * Assume that each DMA channel will be configured with default
1964 1964 * transmit bufer size for copying transmit data. (For packet payload
1965 1965 * over this limit, packets will not be copied.)
1966 1966 */
1967 1967 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1968 1968
1969 1969 /*
1970 1970 * Addresses of transmit descriptor ring and the mailbox must be all
1971 1971 * cache-aligned (64 bytes).
1972 1972 */
1973 1973 tx_cntl_alloc_size = hxge_tx_ring_size;
1974 1974 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1975 1975 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1976 1976
1977 1977 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1978 1978 KM_SLEEP);
1979 1979
1980 1980 /*
1981 1981 * Allocate memory for transmit buffers and descriptor rings. Replace
1982 1982 * allocation functions with interface functions provided by the
1983 1983 * partition manager when it is available.
1984 1984 *
1985 1985 * Allocate memory for the transmit buffer pool.
1986 1986 */
1987 1987 for (i = 0; i < ndmas; i++) {
1988 1988 num_chunks[i] = 0;
1989 1989 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1990 1990 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1991 1991 if (status != HXGE_OK) {
1992 1992 break;
1993 1993 }
1994 1994 st_tdc++;
1995 1995 }
1996 1996
1997 1997 if (i < ndmas) {
1998 1998 goto hxge_alloc_tx_mem_pool_fail1;
1999 1999 }
2000 2000
2001 2001 st_tdc = p_cfgp->start_tdc;
2002 2002
2003 2003 /*
2004 2004 * Allocate memory for descriptor rings and mailbox.
2005 2005 */
2006 2006 for (j = 0; j < ndmas; j++) {
2007 2007 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2008 2008 tx_cntl_alloc_size);
2009 2009 if (status != HXGE_OK) {
2010 2010 break;
2011 2011 }
2012 2012 st_tdc++;
2013 2013 }
2014 2014
2015 2015 if (j < ndmas) {
2016 2016 goto hxge_alloc_tx_mem_pool_fail2;
2017 2017 }
2018 2018
2019 2019 dma_poolp->ndmas = ndmas;
2020 2020 dma_poolp->num_chunks = num_chunks;
2021 2021 dma_poolp->buf_allocated = B_TRUE;
2022 2022 dma_poolp->dma_buf_pool_p = dma_buf_p;
2023 2023 hxgep->tx_buf_pool_p = dma_poolp;
2024 2024
2025 2025 dma_cntl_poolp->ndmas = ndmas;
2026 2026 dma_cntl_poolp->buf_allocated = B_TRUE;
2027 2027 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2028 2028 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2029 2029
2030 2030 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2031 2031 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2032 2032 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2033 2033
2034 2034 goto hxge_alloc_tx_mem_pool_exit;
2035 2035
2036 2036 hxge_alloc_tx_mem_pool_fail2:
2037 2037 /* Free control buffers */
2038 2038 j--;
2039 2039 for (; j >= 0; j--) {
2040 2040 hxge_free_tx_cntl_dma(hxgep,
2041 2041 (p_hxge_dma_common_t)dma_cntl_p[j]);
2042 2042 }
2043 2043
2044 2044 hxge_alloc_tx_mem_pool_fail1:
2045 2045 /* Free data buffers */
2046 2046 i--;
2047 2047 for (; i >= 0; i--) {
2048 2048 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2049 2049 num_chunks[i]);
2050 2050 }
2051 2051
2052 2052 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2053 2053 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2054 2054 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2055 2055 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2056 2056 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2057 2057
2058 2058 hxge_alloc_tx_mem_pool_exit:
2059 2059 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2060 2060 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2061 2061
2062 2062 return (status);
2063 2063 }
2064 2064
2065 2065 static hxge_status_t
2066 2066 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2067 2067 p_hxge_dma_common_t *dmap, size_t alloc_size,
2068 2068 size_t block_size, uint32_t *num_chunks)
2069 2069 {
2070 2070 p_hxge_dma_common_t tx_dmap;
2071 2071 hxge_status_t status = HXGE_OK;
2072 2072 size_t total_alloc_size;
2073 2073 size_t allocated = 0;
2074 2074 int i, size_index, array_size;
2075 2075
2076 2076 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2077 2077
2078 2078 tx_dmap = (p_hxge_dma_common_t)
2079 2079 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2080 2080
2081 2081 total_alloc_size = alloc_size;
2082 2082 i = 0;
2083 2083 size_index = 0;
2084 2084 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2085 2085 while ((size_index < array_size) &&
2086 2086 (alloc_sizes[size_index] < alloc_size))
2087 2087 size_index++;
2088 2088 if (size_index >= array_size) {
2089 2089 size_index = array_size - 1;
2090 2090 }
2091 2091
2092 2092 while ((allocated < total_alloc_size) &&
2093 2093 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2094 2094 tx_dmap[i].dma_chunk_index = i;
2095 2095 tx_dmap[i].block_size = block_size;
2096 2096 tx_dmap[i].alength = alloc_sizes[size_index];
2097 2097 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2098 2098 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2099 2099 tx_dmap[i].dma_channel = dma_channel;
2100 2100 tx_dmap[i].contig_alloc_type = B_FALSE;
2101 2101
2102 2102 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2103 2103 &hxge_tx_dma_attr, tx_dmap[i].alength,
2104 2104 &hxge_dev_buf_dma_acc_attr,
2105 2105 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2106 2106 (p_hxge_dma_common_t)(&tx_dmap[i]));
2107 2107 if (status != HXGE_OK) {
2108 2108 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2109 2109 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2110 2110 " for size: %d", alloc_sizes[size_index]));
2111 2111 size_index--;
2112 2112 } else {
2113 2113 i++;
2114 2114 allocated += alloc_sizes[size_index];
2115 2115 }
2116 2116 }
2117 2117
2118 2118 if (allocated < total_alloc_size) {
2119 2119 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2120 2120 " hxge_alloc_tx_buf_dma: failed due to"
2121 2121 " allocated(%d) < required(%d)",
2122 2122 allocated, total_alloc_size));
2123 2123 goto hxge_alloc_tx_mem_fail1;
2124 2124 }
2125 2125
2126 2126 *num_chunks = i;
2127 2127 *dmap = tx_dmap;
2128 2128 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2129 2129 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2130 2130 *dmap, i));
2131 2131 goto hxge_alloc_tx_mem_exit;
2132 2132
2133 2133 hxge_alloc_tx_mem_fail1:
2134 2134 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2135 2135
2136 2136 hxge_alloc_tx_mem_exit:
2137 2137 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2138 2138 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2139 2139
2140 2140 return (status);
2141 2141 }
2142 2142
2143 2143 /*ARGSUSED*/
2144 2144 static void
2145 2145 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2146 2146 uint32_t num_chunks)
2147 2147 {
2148 2148 int i;
2149 2149
2150 2150 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2151 2151
2152 2152 for (i = 0; i < num_chunks; i++) {
2153 2153 hxge_dma_mem_free(dmap++);
2154 2154 }
2155 2155
2156 2156 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2157 2157 }
2158 2158
2159 2159 /*ARGSUSED*/
2160 2160 static hxge_status_t
2161 2161 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2162 2162 p_hxge_dma_common_t *dmap, size_t size)
2163 2163 {
2164 2164 p_hxge_dma_common_t tx_dmap;
2165 2165 hxge_status_t status = HXGE_OK;
2166 2166
2167 2167 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2168 2168
2169 2169 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2170 2170 KM_SLEEP);
2171 2171
2172 2172 tx_dmap->contig_alloc_type = B_FALSE;
2173 2173
2174 2174 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2175 2175 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2176 2176 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2177 2177 if (status != HXGE_OK) {
2178 2178 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2179 2179 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2180 2180 " for size: %d", size));
2181 2181 goto hxge_alloc_tx_cntl_dma_fail1;
2182 2182 }
2183 2183
2184 2184 *dmap = tx_dmap;
2185 2185
2186 2186 goto hxge_alloc_tx_cntl_dma_exit;
2187 2187
2188 2188 hxge_alloc_tx_cntl_dma_fail1:
2189 2189 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2190 2190
2191 2191 hxge_alloc_tx_cntl_dma_exit:
2192 2192 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2193 2193 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2194 2194
2195 2195 return (status);
2196 2196 }
2197 2197
2198 2198 /*ARGSUSED*/
2199 2199 static void
2200 2200 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2201 2201 {
2202 2202 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2203 2203
2204 2204 hxge_dma_mem_free(dmap);
2205 2205
2206 2206 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2207 2207 }
2208 2208
2209 2209 static void
2210 2210 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2211 2211 {
2212 2212 uint32_t i, ndmas;
2213 2213 p_hxge_dma_pool_t dma_poolp;
2214 2214 p_hxge_dma_common_t *dma_buf_p;
2215 2215 p_hxge_dma_pool_t dma_cntl_poolp;
2216 2216 p_hxge_dma_common_t *dma_cntl_p;
2217 2217 uint32_t *num_chunks;
2218 2218
2219 2219 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2220 2220
2221 2221 dma_poolp = hxgep->tx_buf_pool_p;
2222 2222 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2223 2223 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2224 2224 "<== hxge_free_tx_mem_pool "
2225 2225 "(null rx buf pool or buf not allocated"));
2226 2226 return;
2227 2227 }
2228 2228
2229 2229 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2230 2230 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2231 2231 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2232 2232 "<== hxge_free_tx_mem_pool "
2233 2233 "(null tx cntl buf pool or cntl buf not allocated"));
2234 2234 return;
2235 2235 }
2236 2236
2237 2237 dma_buf_p = dma_poolp->dma_buf_pool_p;
2238 2238 num_chunks = dma_poolp->num_chunks;
2239 2239
2240 2240 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2241 2241 ndmas = dma_cntl_poolp->ndmas;
2242 2242
2243 2243 for (i = 0; i < ndmas; i++) {
2244 2244 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2245 2245 }
2246 2246
2247 2247 for (i = 0; i < ndmas; i++) {
2248 2248 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2249 2249 }
2250 2250
2251 2251 for (i = 0; i < ndmas; i++) {
2252 2252 KMEM_FREE(dma_buf_p[i],
2253 2253 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2254 2254 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2255 2255 }
2256 2256
2257 2257 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2258 2258 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2259 2259 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2260 2260 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2261 2261 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2262 2262
2263 2263 hxgep->tx_buf_pool_p = NULL;
2264 2264 hxgep->tx_cntl_pool_p = NULL;
2265 2265
2266 2266 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2267 2267 }
2268 2268
2269 2269 /*ARGSUSED*/
2270 2270 static hxge_status_t
2271 2271 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2272 2272 struct ddi_dma_attr *dma_attrp,
2273 2273 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2274 2274 p_hxge_dma_common_t dma_p)
2275 2275 {
2276 2276 caddr_t kaddrp;
2277 2277 int ddi_status = DDI_SUCCESS;
2278 2278
2279 2279 dma_p->dma_handle = NULL;
2280 2280 dma_p->acc_handle = NULL;
2281 2281 dma_p->kaddrp = NULL;
2282 2282
2283 2283 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2284 2284 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2285 2285 if (ddi_status != DDI_SUCCESS) {
2286 2286 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2287 2287 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2288 2288 return (HXGE_ERROR | HXGE_DDI_FAILED);
2289 2289 }
2290 2290
2291 2291 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2292 2292 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2293 2293 &dma_p->acc_handle);
2294 2294 if (ddi_status != DDI_SUCCESS) {
2295 2295 /* The caller will decide whether it is fatal */
2296 2296 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2297 2297 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2298 2298 ddi_dma_free_handle(&dma_p->dma_handle);
2299 2299 dma_p->dma_handle = NULL;
2300 2300 return (HXGE_ERROR | HXGE_DDI_FAILED);
2301 2301 }
2302 2302
2303 2303 if (dma_p->alength < length) {
2304 2304 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2305 2305 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2306 2306 ddi_dma_mem_free(&dma_p->acc_handle);
2307 2307 ddi_dma_free_handle(&dma_p->dma_handle);
2308 2308 dma_p->acc_handle = NULL;
2309 2309 dma_p->dma_handle = NULL;
2310 2310 return (HXGE_ERROR);
2311 2311 }
2312 2312
2313 2313 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2314 2314 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2315 2315 &dma_p->dma_cookie, &dma_p->ncookies);
2316 2316 if (ddi_status != DDI_DMA_MAPPED) {
2317 2317 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2318 2318 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2319 2319 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2320 2320 if (dma_p->acc_handle) {
2321 2321 ddi_dma_mem_free(&dma_p->acc_handle);
2322 2322 dma_p->acc_handle = NULL;
2323 2323 }
2324 2324 ddi_dma_free_handle(&dma_p->dma_handle);
2325 2325 dma_p->dma_handle = NULL;
2326 2326 return (HXGE_ERROR | HXGE_DDI_FAILED);
2327 2327 }
2328 2328
2329 2329 if (dma_p->ncookies != 1) {
2330 2330 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2331 2331 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2332 2332 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2333 2333 if (dma_p->acc_handle) {
2334 2334 ddi_dma_mem_free(&dma_p->acc_handle);
2335 2335 dma_p->acc_handle = NULL;
2336 2336 }
2337 2337 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2338 2338 ddi_dma_free_handle(&dma_p->dma_handle);
2339 2339 dma_p->dma_handle = NULL;
2340 2340 return (HXGE_ERROR);
2341 2341 }
2342 2342
2343 2343 dma_p->kaddrp = kaddrp;
2344 2344 #if defined(__i386)
2345 2345 dma_p->ioaddr_pp =
2346 2346 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2347 2347 #else
2348 2348 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2349 2349 #endif
2350 2350
2351 2351 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2352 2352
2353 2353 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2354 2354 "dma buffer allocated: dma_p $%p "
2355 2355 "return dmac_ladress from cookie $%p dmac_size %d "
2356 2356 "dma_p->ioaddr_p $%p "
2357 2357 "dma_p->orig_ioaddr_p $%p "
2358 2358 "orig_vatopa $%p "
2359 2359 "alength %d (0x%x) "
2360 2360 "kaddrp $%p "
2361 2361 "length %d (0x%x)",
2362 2362 dma_p,
2363 2363 dma_p->dma_cookie.dmac_laddress,
2364 2364 dma_p->dma_cookie.dmac_size,
2365 2365 dma_p->ioaddr_pp,
2366 2366 dma_p->orig_ioaddr_pp,
2367 2367 dma_p->orig_vatopa,
2368 2368 dma_p->alength, dma_p->alength,
2369 2369 kaddrp,
2370 2370 length, length));
2371 2371
2372 2372 return (HXGE_OK);
2373 2373 }
2374 2374
2375 2375 static void
2376 2376 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2377 2377 {
2378 2378 if (dma_p == NULL)
2379 2379 return;
2380 2380
2381 2381 if (dma_p->dma_handle != NULL) {
2382 2382 if (dma_p->ncookies) {
2383 2383 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2384 2384 dma_p->ncookies = 0;
2385 2385 }
2386 2386 ddi_dma_free_handle(&dma_p->dma_handle);
2387 2387 dma_p->dma_handle = NULL;
2388 2388 }
2389 2389
2390 2390 if (dma_p->acc_handle != NULL) {
2391 2391 ddi_dma_mem_free(&dma_p->acc_handle);
2392 2392 dma_p->acc_handle = NULL;
2393 2393 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2394 2394 }
2395 2395
2396 2396 dma_p->kaddrp = NULL;
2397 2397 dma_p->alength = NULL;
2398 2398 }
2399 2399
2400 2400 /*
2401 2401 * hxge_m_start() -- start transmitting and receiving.
2402 2402 *
2403 2403 * This function is called by the MAC layer when the first
2404 2404 * stream is open to prepare the hardware ready for sending
2405 2405 * and transmitting packets.
2406 2406 */
2407 2407 static int
2408 2408 hxge_m_start(void *arg)
2409 2409 {
2410 2410 p_hxge_t hxgep = (p_hxge_t)arg;
2411 2411
2412 2412 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2413 2413
2414 2414 MUTEX_ENTER(hxgep->genlock);
2415 2415
2416 2416 if (hxge_init(hxgep) != DDI_SUCCESS) {
2417 2417 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2418 2418 "<== hxge_m_start: initialization failed"));
2419 2419 MUTEX_EXIT(hxgep->genlock);
2420 2420 return (EIO);
2421 2421 }
2422 2422
2423 2423 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2424 2424 /*
2425 2425 * Start timer to check the system error and tx hangs
2426 2426 */
2427 2427 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2428 2428 hxge_check_hw_state, HXGE_CHECK_TIMER);
2429 2429
2430 2430 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2431 2431
2432 2432 hxgep->timeout.link_status = 0;
2433 2433 hxgep->timeout.report_link_status = B_TRUE;
2434 2434 hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2435 2435
2436 2436 /* Start the link status timer to check the link status */
2437 2437 MUTEX_ENTER(&hxgep->timeout.lock);
2438 2438 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2439 2439 hxgep->timeout.ticks);
2440 2440 MUTEX_EXIT(&hxgep->timeout.lock);
2441 2441 }
2442 2442
2443 2443 MUTEX_EXIT(hxgep->genlock);
2444 2444
2445 2445 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2446 2446
2447 2447 return (0);
2448 2448 }
2449 2449
2450 2450 /*
2451 2451 * hxge_m_stop(): stop transmitting and receiving.
2452 2452 */
2453 2453 static void
2454 2454 hxge_m_stop(void *arg)
2455 2455 {
2456 2456 p_hxge_t hxgep = (p_hxge_t)arg;
2457 2457
2458 2458 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2459 2459
2460 2460 if (hxgep->hxge_timerid) {
2461 2461 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2462 2462 hxgep->hxge_timerid = 0;
2463 2463 }
2464 2464
2465 2465 /* Stop the link status timer before unregistering */
2466 2466 MUTEX_ENTER(&hxgep->timeout.lock);
2467 2467 if (hxgep->timeout.id) {
2468 2468 (void) untimeout(hxgep->timeout.id);
2469 2469 hxgep->timeout.id = 0;
2470 2470 }
2471 2471 hxge_link_update(hxgep, LINK_STATE_DOWN);
2472 2472 MUTEX_EXIT(&hxgep->timeout.lock);
2473 2473
2474 2474 MUTEX_ENTER(hxgep->genlock);
2475 2475
2476 2476 hxge_uninit(hxgep);
2477 2477
2478 2478 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2479 2479
2480 2480 MUTEX_EXIT(hxgep->genlock);
2481 2481
2482 2482 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2483 2483 }
2484 2484
2485 2485 static int
2486 2486 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2487 2487 {
2488 2488 p_hxge_t hxgep = (p_hxge_t)arg;
2489 2489 struct ether_addr addrp;
2490 2490
2491 2491 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2492 2492
2493 2493 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2494 2494
2495 2495 if (add) {
2496 2496 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2497 2497 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2498 2498 "<== hxge_m_multicst: add multicast failed"));
2499 2499 return (EINVAL);
2500 2500 }
2501 2501 } else {
2502 2502 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2503 2503 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2504 2504 "<== hxge_m_multicst: del multicast failed"));
2505 2505 return (EINVAL);
2506 2506 }
2507 2507 }
2508 2508
2509 2509 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2510 2510
2511 2511 return (0);
2512 2512 }
2513 2513
2514 2514 static int
2515 2515 hxge_m_promisc(void *arg, boolean_t on)
2516 2516 {
2517 2517 p_hxge_t hxgep = (p_hxge_t)arg;
2518 2518
2519 2519 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2520 2520
2521 2521 if (hxge_set_promisc(hxgep, on)) {
2522 2522 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2523 2523 "<== hxge_m_promisc: set promisc failed"));
2524 2524 return (EINVAL);
2525 2525 }
2526 2526
2527 2527 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2528 2528
2529 2529 return (0);
2530 2530 }
2531 2531
2532 2532 static void
2533 2533 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2534 2534 {
2535 2535 p_hxge_t hxgep = (p_hxge_t)arg;
2536 2536 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2537 2537 boolean_t need_privilege;
2538 2538 int err;
2539 2539 int cmd;
2540 2540
2541 2541 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2542 2542
2543 2543 iocp = (struct iocblk *)mp->b_rptr;
2544 2544 iocp->ioc_error = 0;
2545 2545 need_privilege = B_TRUE;
2546 2546 cmd = iocp->ioc_cmd;
2547 2547
2548 2548 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2549 2549 switch (cmd) {
2550 2550 default:
2551 2551 miocnak(wq, mp, 0, EINVAL);
2552 2552 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2553 2553 return;
2554 2554
2555 2555 case LB_GET_INFO_SIZE:
2556 2556 case LB_GET_INFO:
2557 2557 case LB_GET_MODE:
2558 2558 need_privilege = B_FALSE;
2559 2559 break;
2560 2560
2561 2561 case LB_SET_MODE:
2562 2562 break;
2563 2563
2564 2564 case ND_GET:
2565 2565 need_privilege = B_FALSE;
2566 2566 break;
2567 2567 case ND_SET:
2568 2568 break;
2569 2569
2570 2570 case HXGE_GET_TX_RING_SZ:
2571 2571 case HXGE_GET_TX_DESC:
2572 2572 case HXGE_TX_SIDE_RESET:
2573 2573 case HXGE_RX_SIDE_RESET:
2574 2574 case HXGE_GLOBAL_RESET:
2575 2575 case HXGE_RESET_MAC:
2576 2576 case HXGE_PUT_TCAM:
2577 2577 case HXGE_GET_TCAM:
2578 2578 case HXGE_RTRACE:
2579 2579
2580 2580 need_privilege = B_FALSE;
2581 2581 break;
2582 2582 }
2583 2583
2584 2584 if (need_privilege) {
2585 2585 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2586 2586 if (err != 0) {
2587 2587 miocnak(wq, mp, 0, err);
2588 2588 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2589 2589 "<== hxge_m_ioctl: no priv"));
2590 2590 return;
2591 2591 }
2592 2592 }
2593 2593
2594 2594 switch (cmd) {
2595 2595 case ND_GET:
2596 2596 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2597 2597 case ND_SET:
2598 2598 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2599 2599 hxge_param_ioctl(hxgep, wq, mp, iocp);
2600 2600 break;
2601 2601
2602 2602 case LB_GET_MODE:
2603 2603 case LB_SET_MODE:
2604 2604 case LB_GET_INFO_SIZE:
2605 2605 case LB_GET_INFO:
2606 2606 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2607 2607 break;
2608 2608
2609 2609 case HXGE_PUT_TCAM:
2610 2610 case HXGE_GET_TCAM:
2611 2611 case HXGE_GET_TX_RING_SZ:
2612 2612 case HXGE_GET_TX_DESC:
2613 2613 case HXGE_TX_SIDE_RESET:
2614 2614 case HXGE_RX_SIDE_RESET:
2615 2615 case HXGE_GLOBAL_RESET:
2616 2616 case HXGE_RESET_MAC:
2617 2617 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2618 2618 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2619 2619 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2620 2620 break;
2621 2621 }
2622 2622
2623 2623 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2624 2624 }
2625 2625
2626 2626 /*ARGSUSED*/
2627 2627 static int
2628 2628 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2629 2629 {
2630 2630 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2631 2631 p_hxge_t hxgep;
2632 2632 p_tx_ring_t ring;
2633 2633
2634 2634 ASSERT(rhp != NULL);
2635 2635 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2636 2636
2637 2637 hxgep = rhp->hxgep;
2638 2638
2639 2639 /*
2640 2640 * Get the ring pointer.
2641 2641 */
2642 2642 ring = hxgep->tx_rings->rings[rhp->index];
2643 2643
2644 2644 /*
2645 2645 * Fill in the handle for the transmit.
2646 2646 */
2647 2647 MUTEX_ENTER(&ring->lock);
2648 2648 rhp->started = B_TRUE;
2649 2649 ring->ring_handle = rhp->ring_handle;
2650 2650 MUTEX_EXIT(&ring->lock);
2651 2651
2652 2652 return (0);
2653 2653 }
2654 2654
2655 2655 static void
2656 2656 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2657 2657 {
2658 2658 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2659 2659 p_hxge_t hxgep;
2660 2660 p_tx_ring_t ring;
2661 2661
2662 2662 ASSERT(rhp != NULL);
2663 2663 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2664 2664
2665 2665 hxgep = rhp->hxgep;
2666 2666 ring = hxgep->tx_rings->rings[rhp->index];
2667 2667
2668 2668 MUTEX_ENTER(&ring->lock);
2669 2669 ring->ring_handle = (mac_ring_handle_t)NULL;
2670 2670 rhp->started = B_FALSE;
2671 2671 MUTEX_EXIT(&ring->lock);
2672 2672 }
2673 2673
2674 2674 static int
2675 2675 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2676 2676 {
2677 2677 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2678 2678 p_hxge_t hxgep;
2679 2679 p_rx_rcr_ring_t ring;
2680 2680 int i;
2681 2681
2682 2682 ASSERT(rhp != NULL);
2683 2683 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2684 2684
2685 2685 hxgep = rhp->hxgep;
2686 2686
2687 2687 /*
2688 2688 * Get pointer to ring.
2689 2689 */
2690 2690 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2691 2691
2692 2692 MUTEX_ENTER(&ring->lock);
2693 2693
2694 2694 if (rhp->started) {
2695 2695 MUTEX_EXIT(&ring->lock);
2696 2696 return (0);
2697 2697 }
2698 2698
2699 2699 /*
2700 2700 * Set the ldvp and ldgp pointers to enable/disable
2701 2701 * polling.
2702 2702 */
2703 2703 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2704 2704 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2705 2705 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2706 2706 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2707 2707 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2708 2708 break;
2709 2709 }
2710 2710 }
2711 2711
2712 2712 rhp->started = B_TRUE;
2713 2713 ring->rcr_mac_handle = rhp->ring_handle;
2714 2714 ring->rcr_gen_num = mr_gen_num;
2715 2715 MUTEX_EXIT(&ring->lock);
2716 2716
2717 2717 return (0);
2718 2718 }
2719 2719
2720 2720 static void
2721 2721 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2722 2722 {
2723 2723 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2724 2724 p_hxge_t hxgep;
2725 2725 p_rx_rcr_ring_t ring;
2726 2726
2727 2727 ASSERT(rhp != NULL);
2728 2728 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2729 2729
2730 2730 hxgep = rhp->hxgep;
2731 2731 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2732 2732
2733 2733 MUTEX_ENTER(&ring->lock);
2734 2734 rhp->started = B_TRUE;
2735 2735 ring->rcr_mac_handle = NULL;
2736 2736 ring->ldvp = NULL;
2737 2737 ring->ldgp = NULL;
2738 2738 MUTEX_EXIT(&ring->lock);
2739 2739 }
2740 2740
2741 2741 static int
2742 2742 hxge_rx_group_start(mac_group_driver_t gdriver)
2743 2743 {
2744 2744 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2745 2745
2746 2746 ASSERT(group->hxgep != NULL);
2747 2747 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2748 2748
2749 2749 MUTEX_ENTER(group->hxgep->genlock);
2750 2750 group->started = B_TRUE;
2751 2751 MUTEX_EXIT(group->hxgep->genlock);
2752 2752
2753 2753 return (0);
2754 2754 }
2755 2755
2756 2756 static void
2757 2757 hxge_rx_group_stop(mac_group_driver_t gdriver)
2758 2758 {
2759 2759 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2760 2760
2761 2761 ASSERT(group->hxgep != NULL);
2762 2762 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2763 2763 ASSERT(group->started == B_TRUE);
2764 2764
2765 2765 MUTEX_ENTER(group->hxgep->genlock);
2766 2766 group->started = B_FALSE;
2767 2767 MUTEX_EXIT(group->hxgep->genlock);
2768 2768 }
2769 2769
2770 2770 static int
2771 2771 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2772 2772 {
2773 2773 int i;
2774 2774
2775 2775 /*
2776 2776 * Find an open slot.
2777 2777 */
2778 2778 for (i = 0; i < hxgep->mmac.total; i++) {
2779 2779 if (!hxgep->mmac.addrs[i].set) {
2780 2780 *slot = i;
2781 2781 return (0);
2782 2782 }
2783 2783 }
2784 2784
2785 2785 return (ENXIO);
2786 2786 }
2787 2787
2788 2788 static int
2789 2789 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2790 2790 {
2791 2791 struct ether_addr eaddr;
2792 2792 hxge_status_t status = HXGE_OK;
2793 2793
2794 2794 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2795 2795
2796 2796 /*
2797 2797 * Set new interface local address and re-init device.
2798 2798 * This is destructive to any other streams attached
2799 2799 * to this device.
2800 2800 */
2801 2801 RW_ENTER_WRITER(&hxgep->filter_lock);
2802 2802 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2803 2803 RW_EXIT(&hxgep->filter_lock);
2804 2804 if (status != HXGE_OK)
2805 2805 return (status);
2806 2806
2807 2807 hxgep->mmac.addrs[slot].set = B_TRUE;
2808 2808 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2809 2809 hxgep->mmac.available--;
2810 2810 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2811 2811 hxgep->mmac.addrs[slot].primary = B_TRUE;
2812 2812
2813 2813 return (0);
2814 2814 }
2815 2815
2816 2816 static int
2817 2817 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2818 2818 {
2819 2819 int i, result;
2820 2820
2821 2821 for (i = 0; i < hxgep->mmac.total; i++) {
2822 2822 if (hxgep->mmac.addrs[i].set) {
2823 2823 result = memcmp(hxgep->mmac.addrs[i].addr,
2824 2824 addr, ETHERADDRL);
2825 2825 if (result == 0) {
2826 2826 *slot = i;
2827 2827 return (0);
2828 2828 }
2829 2829 }
2830 2830 }
2831 2831
2832 2832 return (EINVAL);
2833 2833 }
2834 2834
2835 2835 static int
2836 2836 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2837 2837 {
2838 2838 hxge_status_t status;
2839 2839 int i;
2840 2840
2841 2841 status = hxge_pfc_clear_mac_address(hxgep, slot);
2842 2842 if (status != HXGE_OK)
2843 2843 return (status);
2844 2844
2845 2845 for (i = 0; i < ETHERADDRL; i++)
2846 2846 hxgep->mmac.addrs[slot].addr[i] = 0;
2847 2847
2848 2848 hxgep->mmac.addrs[slot].set = B_FALSE;
2849 2849 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2850 2850 hxgep->mmac.addrs[slot].primary = B_FALSE;
2851 2851 hxgep->mmac.available++;
2852 2852
2853 2853 return (0);
2854 2854 }
2855 2855
2856 2856 static int
2857 2857 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2858 2858 {
2859 2859 hxge_ring_group_t *group = arg;
2860 2860 p_hxge_t hxgep = group->hxgep;
2861 2861 int slot = 0;
2862 2862
2863 2863 ASSERT(group->type == MAC_RING_TYPE_RX);
2864 2864
2865 2865 MUTEX_ENTER(hxgep->genlock);
2866 2866
2867 2867 /*
2868 2868 * Find a slot for the address.
2869 2869 */
2870 2870 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2871 2871 MUTEX_EXIT(hxgep->genlock);
2872 2872 return (ENOSPC);
2873 2873 }
2874 2874
2875 2875 /*
2876 2876 * Program the MAC address.
2877 2877 */
2878 2878 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2879 2879 MUTEX_EXIT(hxgep->genlock);
2880 2880 return (ENOSPC);
2881 2881 }
2882 2882
2883 2883 MUTEX_EXIT(hxgep->genlock);
2884 2884 return (0);
2885 2885 }
2886 2886
2887 2887 static int
2888 2888 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2889 2889 {
2890 2890 hxge_ring_group_t *group = arg;
2891 2891 p_hxge_t hxgep = group->hxgep;
2892 2892 int rv, slot;
2893 2893
2894 2894 ASSERT(group->type == MAC_RING_TYPE_RX);
2895 2895
2896 2896 MUTEX_ENTER(hxgep->genlock);
2897 2897
2898 2898 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2899 2899 MUTEX_EXIT(hxgep->genlock);
2900 2900 return (rv);
2901 2901 }
2902 2902
2903 2903 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2904 2904 MUTEX_EXIT(hxgep->genlock);
2905 2905 return (rv);
2906 2906 }
2907 2907
2908 2908 MUTEX_EXIT(hxgep->genlock);
2909 2909 return (0);
2910 2910 }
2911 2911
2912 2912 static void
2913 2913 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2914 2914 mac_group_info_t *infop, mac_group_handle_t gh)
2915 2915 {
2916 2916 p_hxge_t hxgep = arg;
2917 2917 hxge_ring_group_t *group;
2918 2918
2919 2919 ASSERT(type == MAC_RING_TYPE_RX);
2920 2920
2921 2921 switch (type) {
2922 2922 case MAC_RING_TYPE_RX:
2923 2923 group = &hxgep->rx_groups[groupid];
2924 2924 group->hxgep = hxgep;
2925 2925 group->ghandle = gh;
2926 2926 group->index = groupid;
2927 2927 group->type = type;
2928 2928
2929 2929 infop->mgi_driver = (mac_group_driver_t)group;
2930 2930 infop->mgi_start = hxge_rx_group_start;
2931 2931 infop->mgi_stop = hxge_rx_group_stop;
2932 2932 infop->mgi_addmac = hxge_rx_group_add_mac;
2933 2933 infop->mgi_remmac = hxge_rx_group_rem_mac;
2934 2934 infop->mgi_count = HXGE_MAX_RDCS;
2935 2935 break;
2936 2936
2937 2937 case MAC_RING_TYPE_TX:
2938 2938 default:
2939 2939 break;
2940 2940 }
2941 2941 }
2942 2942
2943 2943 static int
2944 2944 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2945 2945 {
2946 2946 int i;
2947 2947
2948 2948 ASSERT(hxgep->ldgvp != NULL);
2949 2949
2950 2950 switch (type) {
2951 2951 case MAC_RING_TYPE_RX:
2952 2952 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2953 2953 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2954 2954 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2955 2955 return ((int)
2956 2956 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2957 2957 }
2958 2958 }
2959 2959 break;
2960 2960
2961 2961 case MAC_RING_TYPE_TX:
2962 2962 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2963 2963 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2964 2964 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2965 2965 return ((int)
2966 2966 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2967 2967 }
2968 2968 }
2969 2969 break;
2970 2970
2971 2971 default:
2972 2972 break;
2973 2973 }
2974 2974
2975 2975 return (-1);
2976 2976 }
2977 2977
2978 2978 /*
2979 2979 * Callback function for the GLDv3 layer to register all rings.
2980 2980 */
2981 2981 /*ARGSUSED*/
2982 2982 static void
2983 2983 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2984 2984 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2985 2985 {
2986 2986 p_hxge_t hxgep = arg;
2987 2987
2988 2988 ASSERT(hxgep != NULL);
2989 2989 ASSERT(infop != NULL);
2990 2990
2991 2991 switch (type) {
2992 2992 case MAC_RING_TYPE_TX: {
2993 2993 p_hxge_ring_handle_t rhp;
2994 2994 mac_intr_t *mintr = &infop->mri_intr;
2995 2995 p_hxge_intr_t intrp;
2996 2996 int htable_idx;
2997 2997
2998 2998 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2999 2999 rhp = &hxgep->tx_ring_handles[index];
3000 3000 rhp->hxgep = hxgep;
3001 3001 rhp->index = index;
3002 3002 rhp->ring_handle = rh;
3003 3003 infop->mri_driver = (mac_ring_driver_t)rhp;
3004 3004 infop->mri_start = hxge_tx_ring_start;
3005 3005 infop->mri_stop = hxge_tx_ring_stop;
3006 3006 infop->mri_tx = hxge_tx_ring_send;
3007 3007 infop->mri_stat = hxge_tx_ring_stat;
3008 3008
3009 3009 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3010 3010 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3011 3011 if (htable_idx >= 0)
3012 3012 mintr->mi_ddi_handle = intrp->htable[htable_idx];
3013 3013 else
3014 3014 mintr->mi_ddi_handle = NULL;
3015 3015 break;
3016 3016 }
3017 3017
3018 3018 case MAC_RING_TYPE_RX: {
3019 3019 p_hxge_ring_handle_t rhp;
3020 3020 mac_intr_t hxge_mac_intr;
3021 3021 p_hxge_intr_t intrp;
3022 3022 int htable_idx;
3023 3023
3024 3024 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3025 3025 rhp = &hxgep->rx_ring_handles[index];
3026 3026 rhp->hxgep = hxgep;
3027 3027 rhp->index = index;
3028 3028 rhp->ring_handle = rh;
3029 3029
3030 3030 /*
3031 3031 * Entrypoint to enable interrupt (disable poll) and
3032 3032 * disable interrupt (enable poll).
3033 3033 */
3034 3034 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3035 3035 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3036 3036 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3037 3037
3038 3038 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3039 3039 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3040 3040 if (htable_idx >= 0)
3041 3041 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3042 3042 else
3043 3043 hxge_mac_intr.mi_ddi_handle = NULL;
3044 3044
3045 3045 infop->mri_driver = (mac_ring_driver_t)rhp;
3046 3046 infop->mri_start = hxge_rx_ring_start;
3047 3047 infop->mri_stop = hxge_rx_ring_stop;
3048 3048 infop->mri_intr = hxge_mac_intr;
3049 3049 infop->mri_poll = hxge_rx_poll;
3050 3050 infop->mri_stat = hxge_rx_ring_stat;
3051 3051 break;
3052 3052 }
3053 3053
3054 3054 default:
3055 3055 break;
3056 3056 }
3057 3057 }
3058 3058
3059 3059 /*ARGSUSED*/
3060 3060 boolean_t
3061 3061 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3062 3062 {
3063 3063 p_hxge_t hxgep = arg;
3064 3064
3065 3065 switch (cap) {
3066 3066 case MAC_CAPAB_HCKSUM: {
3067 3067 uint32_t *txflags = cap_data;
3068 3068
3069 3069 *txflags = HCKSUM_INET_PARTIAL;
3070 3070 break;
3071 3071 }
3072 3072
3073 3073 case MAC_CAPAB_RINGS: {
3074 3074 mac_capab_rings_t *cap_rings = cap_data;
3075 3075
3076 3076 MUTEX_ENTER(hxgep->genlock);
3077 3077 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3078 3078 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3079 3079 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3080 3080 cap_rings->mr_rget = hxge_fill_ring;
3081 3081 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3082 3082 cap_rings->mr_gget = hxge_group_get;
3083 3083 cap_rings->mr_gaddring = NULL;
3084 3084 cap_rings->mr_gremring = NULL;
3085 3085 } else {
3086 3086 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3087 3087 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3088 3088 cap_rings->mr_rget = hxge_fill_ring;
3089 3089 cap_rings->mr_gnum = 0;
3090 3090 cap_rings->mr_gget = NULL;
3091 3091 cap_rings->mr_gaddring = NULL;
3092 3092 cap_rings->mr_gremring = NULL;
3093 3093 }
3094 3094 MUTEX_EXIT(hxgep->genlock);
3095 3095 break;
3096 3096 }
3097 3097
3098 3098 default:
3099 3099 return (B_FALSE);
3100 3100 }
3101 3101 return (B_TRUE);
3102 3102 }
3103 3103
3104 3104 static boolean_t
3105 3105 hxge_param_locked(mac_prop_id_t pr_num)
3106 3106 {
3107 3107 /*
3108 3108 * All adv_* parameters are locked (read-only) while
3109 3109 * the device is in any sort of loopback mode ...
3110 3110 */
3111 3111 switch (pr_num) {
3112 3112 case MAC_PROP_ADV_1000FDX_CAP:
3113 3113 case MAC_PROP_EN_1000FDX_CAP:
3114 3114 case MAC_PROP_ADV_1000HDX_CAP:
3115 3115 case MAC_PROP_EN_1000HDX_CAP:
3116 3116 case MAC_PROP_ADV_100FDX_CAP:
3117 3117 case MAC_PROP_EN_100FDX_CAP:
3118 3118 case MAC_PROP_ADV_100HDX_CAP:
3119 3119 case MAC_PROP_EN_100HDX_CAP:
3120 3120 case MAC_PROP_ADV_10FDX_CAP:
3121 3121 case MAC_PROP_EN_10FDX_CAP:
3122 3122 case MAC_PROP_ADV_10HDX_CAP:
3123 3123 case MAC_PROP_EN_10HDX_CAP:
3124 3124 case MAC_PROP_AUTONEG:
3125 3125 case MAC_PROP_FLOWCTRL:
3126 3126 return (B_TRUE);
3127 3127 }
3128 3128 return (B_FALSE);
3129 3129 }
3130 3130
3131 3131 /*
3132 3132 * callback functions for set/get of properties
3133 3133 */
3134 3134 static int
3135 3135 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3136 3136 uint_t pr_valsize, const void *pr_val)
3137 3137 {
3138 3138 hxge_t *hxgep = barg;
3139 3139 p_hxge_stats_t statsp;
3140 3140 int err = 0;
3141 3141 uint32_t new_mtu, old_framesize, new_framesize;
3142 3142
3143 3143 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3144 3144
3145 3145 statsp = hxgep->statsp;
3146 3146 MUTEX_ENTER(hxgep->genlock);
3147 3147 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3148 3148 hxge_param_locked(pr_num)) {
3149 3149 /*
3150 3150 * All adv_* parameters are locked (read-only)
3151 3151 * while the device is in any sort of loopback mode.
3152 3152 */
3153 3153 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3154 3154 "==> hxge_m_setprop: loopback mode: read only"));
3155 3155 MUTEX_EXIT(hxgep->genlock);
3156 3156 return (EBUSY);
3157 3157 }
3158 3158
3159 3159 switch (pr_num) {
3160 3160 /*
3161 3161 * These properties are either not exist or read only
3162 3162 */
3163 3163 case MAC_PROP_EN_1000FDX_CAP:
3164 3164 case MAC_PROP_EN_100FDX_CAP:
3165 3165 case MAC_PROP_EN_10FDX_CAP:
3166 3166 case MAC_PROP_EN_1000HDX_CAP:
3167 3167 case MAC_PROP_EN_100HDX_CAP:
3168 3168 case MAC_PROP_EN_10HDX_CAP:
3169 3169 case MAC_PROP_ADV_1000FDX_CAP:
3170 3170 case MAC_PROP_ADV_1000HDX_CAP:
3171 3171 case MAC_PROP_ADV_100FDX_CAP:
3172 3172 case MAC_PROP_ADV_100HDX_CAP:
3173 3173 case MAC_PROP_ADV_10FDX_CAP:
3174 3174 case MAC_PROP_ADV_10HDX_CAP:
3175 3175 case MAC_PROP_STATUS:
3176 3176 case MAC_PROP_SPEED:
3177 3177 case MAC_PROP_DUPLEX:
3178 3178 case MAC_PROP_AUTONEG:
3179 3179 /*
3180 3180 * Flow control is handled in the shared domain and
3181 3181 * it is readonly here.
3182 3182 */
3183 3183 case MAC_PROP_FLOWCTRL:
3184 3184 err = EINVAL;
3185 3185 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3186 3186 "==> hxge_m_setprop: read only property %d",
3187 3187 pr_num));
3188 3188 break;
3189 3189
3190 3190 case MAC_PROP_MTU:
3191 3191 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3192 3192 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3193 3193 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3194 3194
3195 3195 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3196 3196 if (new_framesize == hxgep->vmac.maxframesize) {
3197 3197 err = 0;
3198 3198 break;
3199 3199 }
3200 3200
3201 3201 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3202 3202 err = EBUSY;
3203 3203 break;
3204 3204 }
3205 3205
3206 3206 if (new_framesize < MIN_FRAME_SIZE ||
3207 3207 new_framesize > MAX_FRAME_SIZE) {
3208 3208 err = EINVAL;
3209 3209 break;
3210 3210 }
3211 3211
3212 3212 old_framesize = hxgep->vmac.maxframesize;
3213 3213 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3214 3214
3215 3215 if (hxge_vmac_set_framesize(hxgep)) {
3216 3216 hxgep->vmac.maxframesize =
3217 3217 (uint16_t)old_framesize;
3218 3218 err = EINVAL;
3219 3219 break;
3220 3220 }
3221 3221
3222 3222 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3223 3223 if (err) {
3224 3224 hxgep->vmac.maxframesize =
3225 3225 (uint16_t)old_framesize;
3226 3226 (void) hxge_vmac_set_framesize(hxgep);
3227 3227 }
3228 3228
3229 3229 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3230 3230 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3231 3231 new_mtu, hxgep->vmac.maxframesize));
3232 3232 break;
3233 3233
3234 3234 case MAC_PROP_PRIVATE:
3235 3235 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3236 3236 "==> hxge_m_setprop: private property"));
3237 3237 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3238 3238 pr_val);
3239 3239 break;
3240 3240
3241 3241 default:
3242 3242 err = ENOTSUP;
3243 3243 break;
3244 3244 }
3245 3245
3246 3246 MUTEX_EXIT(hxgep->genlock);
3247 3247
3248 3248 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3249 3249 "<== hxge_m_setprop (return %d)", err));
3250 3250
3251 3251 return (err);
3252 3252 }
3253 3253
3254 3254 static int
3255 3255 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3256 3256 uint_t pr_valsize, void *pr_val)
3257 3257 {
3258 3258 hxge_t *hxgep = barg;
3259 3259 p_hxge_stats_t statsp = hxgep->statsp;
3260 3260 int err = 0;
3261 3261 link_flowctrl_t fl;
3262 3262 uint64_t tmp = 0;
3263 3263 link_state_t ls;
3264 3264
3265 3265 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3266 3266 "==> hxge_m_getprop: pr_num %d", pr_num));
3267 3267
3268 3268 switch (pr_num) {
3269 3269 case MAC_PROP_DUPLEX:
3270 3270 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3271 3271 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3272 3272 "==> hxge_m_getprop: duplex mode %d",
3273 3273 *(uint8_t *)pr_val));
3274 3274 break;
3275 3275
3276 3276 case MAC_PROP_SPEED:
3277 3277 ASSERT(pr_valsize >= sizeof (uint64_t));
3278 3278 tmp = statsp->mac_stats.link_speed * 1000000ull;
3279 3279 bcopy(&tmp, pr_val, sizeof (tmp));
3280 3280 break;
3281 3281
3282 3282 case MAC_PROP_STATUS:
3283 3283 ASSERT(pr_valsize >= sizeof (link_state_t));
3284 3284 if (!statsp->mac_stats.link_up)
3285 3285 ls = LINK_STATE_DOWN;
3286 3286 else
3287 3287 ls = LINK_STATE_UP;
3288 3288 bcopy(&ls, pr_val, sizeof (ls));
3289 3289 break;
3290 3290
3291 3291 case MAC_PROP_FLOWCTRL:
3292 3292 /*
3293 3293 * Flow control is supported by the shared domain and
3294 3294 * it is currently transmit only
3295 3295 */
3296 3296 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3297 3297 fl = LINK_FLOWCTRL_TX;
3298 3298 bcopy(&fl, pr_val, sizeof (fl));
3299 3299 break;
3300 3300 case MAC_PROP_AUTONEG:
3301 3301 /* 10G link only and it is not negotiable */
3302 3302 *(uint8_t *)pr_val = 0;
3303 3303 break;
3304 3304 case MAC_PROP_ADV_1000FDX_CAP:
3305 3305 case MAC_PROP_ADV_100FDX_CAP:
3306 3306 case MAC_PROP_ADV_10FDX_CAP:
3307 3307 case MAC_PROP_ADV_1000HDX_CAP:
3308 3308 case MAC_PROP_ADV_100HDX_CAP:
3309 3309 case MAC_PROP_ADV_10HDX_CAP:
3310 3310 case MAC_PROP_EN_1000FDX_CAP:
3311 3311 case MAC_PROP_EN_100FDX_CAP:
3312 3312 case MAC_PROP_EN_10FDX_CAP:
3313 3313 case MAC_PROP_EN_1000HDX_CAP:
3314 3314 case MAC_PROP_EN_100HDX_CAP:
3315 3315 case MAC_PROP_EN_10HDX_CAP:
3316 3316 err = ENOTSUP;
3317 3317 break;
3318 3318
3319 3319 case MAC_PROP_PRIVATE:
3320 3320 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3321 3321 pr_val);
3322 3322 break;
3323 3323
3324 3324 default:
3325 3325 err = ENOTSUP;
3326 3326 break;
3327 3327 }
3328 3328
3329 3329 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3330 3330
3331 3331 return (err);
3332 3332 }
3333 3333
3334 3334 static void
3335 3335 hxge_m_propinfo(void *arg, const char *pr_name,
3336 3336 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3337 3337 {
3338 3338 _NOTE(ARGUNUSED(arg));
3339 3339 switch (pr_num) {
3340 3340 case MAC_PROP_DUPLEX:
3341 3341 case MAC_PROP_SPEED:
3342 3342 case MAC_PROP_STATUS:
3343 3343 case MAC_PROP_AUTONEG:
3344 3344 case MAC_PROP_FLOWCTRL:
3345 3345 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3346 3346 break;
3347 3347
3348 3348 case MAC_PROP_MTU:
3349 3349 mac_prop_info_set_range_uint32(prh,
3350 3350 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3351 3351 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3352 3352 break;
3353 3353
3354 3354 case MAC_PROP_PRIVATE: {
3355 3355 char valstr[MAXNAMELEN];
3356 3356
3357 3357 bzero(valstr, sizeof (valstr));
3358 3358
3359 3359 /* Receive Interrupt Blanking Parameters */
3360 3360 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3361 3361 (void) snprintf(valstr, sizeof (valstr), "%d",
3362 3362 RXDMA_RCR_TO_DEFAULT);
3363 3363 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3364 3364 (void) snprintf(valstr, sizeof (valstr), "%d",
3365 3365 RXDMA_RCR_PTHRES_DEFAULT);
3366 3366
3367 3367 /* Classification and Load Distribution Configuration */
3368 3368 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3369 3369 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3370 3370 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3371 3371 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3372 3372 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3373 3373 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3374 3374 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3375 3375 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3376 3376 (void) snprintf(valstr, sizeof (valstr), "%d",
3377 3377 HXGE_CLASS_TCAM_LOOKUP);
3378 3378 }
3379 3379
3380 3380 if (strlen(valstr) > 0)
3381 3381 mac_prop_info_set_default_str(prh, valstr);
3382 3382 break;
3383 3383 }
3384 3384 }
3385 3385 }
3386 3386
3387 3387
3388 3388 /* ARGSUSED */
3389 3389 static int
3390 3390 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3391 3391 const void *pr_val)
3392 3392 {
3393 3393 p_hxge_param_t param_arr = hxgep->param_arr;
3394 3394 int err = 0;
3395 3395
3396 3396 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3397 3397 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3398 3398
3399 3399 if (pr_val == NULL) {
3400 3400 return (EINVAL);
3401 3401 }
3402 3402
3403 3403 /* Blanking */
3404 3404 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3405 3405 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3406 3406 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]);
3407 3407 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3408 3408 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3409 3409 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
3410 3410
3411 3411 /* Classification */
3412 3412 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3413 3413 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3414 3414 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3415 3415 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3416 3416 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3417 3417 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3418 3418 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3419 3419 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3420 3420 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3421 3421 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3422 3422 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3423 3423 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3424 3424 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3425 3425 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3426 3426 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3427 3427 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3428 3428 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3429 3429 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3430 3430 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3431 3431 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3432 3432 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3433 3433 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3434 3434 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3435 3435 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3436 3436 } else {
3437 3437 err = ENOTSUP;
3438 3438 }
3439 3439
3440 3440 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3441 3441 "<== hxge_set_priv_prop: err %d", err));
3442 3442
3443 3443 return (err);
3444 3444 }
3445 3445
3446 3446 static int
3447 3447 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3448 3448 void *pr_val)
3449 3449 {
3450 3450 p_hxge_param_t param_arr = hxgep->param_arr;
3451 3451 char valstr[MAXNAMELEN];
3452 3452 int err = 0;
3453 3453 uint_t strsize;
3454 3454 int value = 0;
3455 3455
3456 3456 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3457 3457 "==> hxge_get_priv_prop: property %s", pr_name));
3458 3458
3459 3459 /* Receive Interrupt Blanking Parameters */
3460 3460 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3461 3461 value = hxgep->intr_timeout;
3462 3462 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3463 3463 value = hxgep->intr_threshold;
3464 3464
3465 3465 /* Classification and Load Distribution Configuration */
3466 3466 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3467 3467 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3468 3468 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3469 3469
3470 3470 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3471 3471 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3472 3472 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3473 3473 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3474 3474
3475 3475 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3476 3476 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3477 3477 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3478 3478 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3479 3479
3480 3480 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3481 3481 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3482 3482 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3483 3483 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3484 3484
3485 3485 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3486 3486 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3487 3487 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3488 3488 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3489 3489
3490 3490 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3491 3491 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3492 3492 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3493 3493 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3494 3494
3495 3495 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3496 3496 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3497 3497 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3498 3498 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3499 3499
3500 3500 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3501 3501 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3502 3502 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3503 3503 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3504 3504
3505 3505 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3506 3506 } else {
3507 3507 err = ENOTSUP;
3508 3508 }
3509 3509
3510 3510 if (err == 0) {
3511 3511 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3512 3512
3513 3513 strsize = (uint_t)strlen(valstr);
3514 3514 if (pr_valsize < strsize) {
3515 3515 err = ENOBUFS;
3516 3516 } else {
3517 3517 (void) strlcpy(pr_val, valstr, pr_valsize);
3518 3518 }
3519 3519 }
3520 3520
3521 3521 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3522 3522 "<== hxge_get_priv_prop: return %d", err));
3523 3523
3524 3524 return (err);
3525 3525 }
3526 3526 /*
3527 3527 * Module loading and removing entry points.
3528 3528 */
3529 3529 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3530 3530 nodev, NULL, D_MP, NULL, NULL);
3531 3531
3532 3532 extern struct mod_ops mod_driverops;
3533 3533
3534 3534 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3535 3535
↓ open down ↓ |
3535 lines elided |
↑ open up ↑ |
3536 3536 /*
3537 3537 * Module linkage information for the kernel.
3538 3538 */
3539 3539 static struct modldrv hxge_modldrv = {
3540 3540 &mod_driverops,
3541 3541 HXGE_DESC_VER,
3542 3542 &hxge_dev_ops
3543 3543 };
3544 3544
3545 3545 static struct modlinkage modlinkage = {
3546 - MODREV_1, (void *) &hxge_modldrv, NULL
3546 + MODREV_1, { (void *) &hxge_modldrv, NULL }
3547 3547 };
3548 3548
3549 3549 int
3550 3550 _init(void)
3551 3551 {
3552 3552 int status;
3553 3553
3554 3554 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3555 3555 mac_init_ops(&hxge_dev_ops, "hxge");
3556 3556 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3557 3557 if (status != 0) {
3558 3558 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3559 3559 "failed to init device soft state"));
3560 3560 mac_fini_ops(&hxge_dev_ops);
3561 3561 goto _init_exit;
3562 3562 }
3563 3563
3564 3564 status = mod_install(&modlinkage);
3565 3565 if (status != 0) {
3566 3566 ddi_soft_state_fini(&hxge_list);
3567 3567 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3568 3568 goto _init_exit;
3569 3569 }
3570 3570
3571 3571 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3572 3572
3573 3573 _init_exit:
3574 3574 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3575 3575
3576 3576 return (status);
3577 3577 }
3578 3578
3579 3579 int
3580 3580 _fini(void)
3581 3581 {
3582 3582 int status;
3583 3583
3584 3584 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3585 3585
3586 3586 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3587 3587
3588 3588 if (hxge_mblks_pending)
3589 3589 return (EBUSY);
3590 3590
3591 3591 status = mod_remove(&modlinkage);
3592 3592 if (status != DDI_SUCCESS) {
3593 3593 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3594 3594 "Module removal failed 0x%08x", status));
3595 3595 goto _fini_exit;
3596 3596 }
3597 3597
3598 3598 mac_fini_ops(&hxge_dev_ops);
3599 3599
3600 3600 ddi_soft_state_fini(&hxge_list);
3601 3601
3602 3602 MUTEX_DESTROY(&hxge_common_lock);
3603 3603
3604 3604 _fini_exit:
3605 3605 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3606 3606
3607 3607 return (status);
3608 3608 }
3609 3609
3610 3610 int
3611 3611 _info(struct modinfo *modinfop)
3612 3612 {
3613 3613 int status;
3614 3614
3615 3615 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3616 3616 status = mod_info(&modlinkage, modinfop);
3617 3617 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3618 3618
3619 3619 return (status);
3620 3620 }
3621 3621
3622 3622 /*ARGSUSED*/
3623 3623 static hxge_status_t
3624 3624 hxge_add_intrs(p_hxge_t hxgep)
3625 3625 {
3626 3626 int intr_types;
3627 3627 int type = 0;
3628 3628 int ddi_status = DDI_SUCCESS;
3629 3629 hxge_status_t status = HXGE_OK;
3630 3630
3631 3631 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3632 3632
3633 3633 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3634 3634 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3635 3635 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3636 3636 hxgep->hxge_intr_type.intr_added = 0;
3637 3637 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3638 3638 hxgep->hxge_intr_type.intr_type = 0;
3639 3639
3640 3640 if (hxge_msi_enable) {
3641 3641 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3642 3642 }
3643 3643
3644 3644 /* Get the supported interrupt types */
3645 3645 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3646 3646 != DDI_SUCCESS) {
3647 3647 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3648 3648 "ddi_intr_get_supported_types failed: status 0x%08x",
3649 3649 ddi_status));
3650 3650 return (HXGE_ERROR | HXGE_DDI_FAILED);
3651 3651 }
3652 3652
3653 3653 hxgep->hxge_intr_type.intr_types = intr_types;
3654 3654
3655 3655 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3656 3656 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3657 3657
3658 3658 /*
3659 3659 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3660 3660 * (1): 1 - MSI
3661 3661 * (2): 2 - MSI-X
3662 3662 * others - FIXED
3663 3663 */
3664 3664 switch (hxge_msi_enable) {
3665 3665 default:
3666 3666 type = DDI_INTR_TYPE_FIXED;
3667 3667 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3668 3668 "use fixed (intx emulation) type %08x", type));
3669 3669 break;
3670 3670
3671 3671 case 2:
3672 3672 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3673 3673 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3674 3674 if (intr_types & DDI_INTR_TYPE_MSIX) {
3675 3675 type = DDI_INTR_TYPE_MSIX;
3676 3676 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3677 3677 "==> hxge_add_intrs: "
3678 3678 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3679 3679 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3680 3680 type = DDI_INTR_TYPE_MSI;
3681 3681 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3682 3682 "==> hxge_add_intrs: "
3683 3683 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3684 3684 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3685 3685 type = DDI_INTR_TYPE_FIXED;
3686 3686 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3687 3687 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3688 3688 }
3689 3689 break;
3690 3690
3691 3691 case 1:
3692 3692 if (intr_types & DDI_INTR_TYPE_MSI) {
3693 3693 type = DDI_INTR_TYPE_MSI;
3694 3694 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3695 3695 "==> hxge_add_intrs: "
3696 3696 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3697 3697 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3698 3698 type = DDI_INTR_TYPE_MSIX;
3699 3699 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3700 3700 "==> hxge_add_intrs: "
3701 3701 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3702 3702 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3703 3703 type = DDI_INTR_TYPE_FIXED;
3704 3704 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3705 3705 "==> hxge_add_intrs: "
3706 3706 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3707 3707 }
3708 3708 }
3709 3709
3710 3710 hxgep->hxge_intr_type.intr_type = type;
3711 3711 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3712 3712 type == DDI_INTR_TYPE_FIXED) &&
3713 3713 hxgep->hxge_intr_type.niu_msi_enable) {
3714 3714 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3715 3715 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3716 3716 " hxge_add_intrs: "
3717 3717 " hxge_add_intrs_adv failed: status 0x%08x",
3718 3718 status));
3719 3719 return (status);
3720 3720 } else {
3721 3721 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3722 3722 "interrupts registered : type %d", type));
3723 3723 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3724 3724
3725 3725 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3726 3726 "\nAdded advanced hxge add_intr_adv "
3727 3727 "intr type 0x%x\n", type));
3728 3728
3729 3729 return (status);
3730 3730 }
3731 3731 }
3732 3732
3733 3733 if (!hxgep->hxge_intr_type.intr_registered) {
3734 3734 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3735 3735 "==> hxge_add_intrs: failed to register interrupts"));
3736 3736 return (HXGE_ERROR | HXGE_DDI_FAILED);
3737 3737 }
3738 3738
3739 3739 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3740 3740
3741 3741 return (status);
3742 3742 }
3743 3743
3744 3744 /*ARGSUSED*/
3745 3745 static hxge_status_t
3746 3746 hxge_add_intrs_adv(p_hxge_t hxgep)
3747 3747 {
3748 3748 int intr_type;
3749 3749 p_hxge_intr_t intrp;
3750 3750 hxge_status_t status;
3751 3751
3752 3752 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3753 3753
3754 3754 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3755 3755 intr_type = intrp->intr_type;
3756 3756
3757 3757 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3758 3758 intr_type));
3759 3759
3760 3760 switch (intr_type) {
3761 3761 case DDI_INTR_TYPE_MSI: /* 0x2 */
3762 3762 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3763 3763 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3764 3764 break;
3765 3765
3766 3766 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3767 3767 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3768 3768 break;
3769 3769
3770 3770 default:
3771 3771 status = HXGE_ERROR;
3772 3772 break;
3773 3773 }
3774 3774
3775 3775 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3776 3776
3777 3777 return (status);
3778 3778 }
3779 3779
3780 3780 /*ARGSUSED*/
3781 3781 static hxge_status_t
3782 3782 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3783 3783 {
3784 3784 dev_info_t *dip = hxgep->dip;
3785 3785 p_hxge_ldg_t ldgp;
3786 3786 p_hxge_intr_t intrp;
3787 3787 uint_t *inthandler;
3788 3788 void *arg1, *arg2;
3789 3789 int behavior;
3790 3790 int nintrs, navail;
3791 3791 int nactual, nrequired, nrequest;
3792 3792 int inum = 0;
3793 3793 int loop = 0;
3794 3794 int x, y;
3795 3795 int ddi_status = DDI_SUCCESS;
3796 3796 hxge_status_t status = HXGE_OK;
3797 3797
3798 3798 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3799 3799
3800 3800 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3801 3801
3802 3802 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3803 3803 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3804 3804 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3805 3805 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3806 3806 "nintrs: %d", ddi_status, nintrs));
3807 3807 return (HXGE_ERROR | HXGE_DDI_FAILED);
3808 3808 }
3809 3809
3810 3810 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3811 3811 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3812 3812 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3813 3813 "ddi_intr_get_navail() failed, status: 0x%x%, "
3814 3814 "nintrs: %d", ddi_status, navail));
3815 3815 return (HXGE_ERROR | HXGE_DDI_FAILED);
3816 3816 }
3817 3817
3818 3818 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3819 3819 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3820 3820 int_type, nintrs, navail));
3821 3821
3822 3822 /* PSARC/2007/453 MSI-X interrupt limit override */
3823 3823 if (int_type == DDI_INTR_TYPE_MSIX) {
3824 3824 nrequest = hxge_create_msi_property(hxgep);
3825 3825 if (nrequest < navail) {
3826 3826 navail = nrequest;
3827 3827 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3828 3828 "hxge_add_intrs_adv_type: nintrs %d "
3829 3829 "navail %d (nrequest %d)",
3830 3830 nintrs, navail, nrequest));
3831 3831 }
3832 3832 }
3833 3833
3834 3834 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3835 3835 /* MSI must be power of 2 */
3836 3836 if ((navail & 16) == 16) {
3837 3837 navail = 16;
3838 3838 } else if ((navail & 8) == 8) {
3839 3839 navail = 8;
3840 3840 } else if ((navail & 4) == 4) {
3841 3841 navail = 4;
3842 3842 } else if ((navail & 2) == 2) {
3843 3843 navail = 2;
3844 3844 } else {
3845 3845 navail = 1;
3846 3846 }
3847 3847 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3848 3848 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3849 3849 "navail %d", nintrs, navail));
3850 3850 }
3851 3851
3852 3852 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3853 3853 "requesting: intr type %d nintrs %d, navail %d",
3854 3854 int_type, nintrs, navail));
3855 3855
3856 3856 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3857 3857 DDI_INTR_ALLOC_NORMAL);
3858 3858 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3859 3859 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3860 3860
3861 3861 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3862 3862 navail, &nactual, behavior);
3863 3863 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3864 3864 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3865 3865 " ddi_intr_alloc() failed: %d", ddi_status));
3866 3866 kmem_free(intrp->htable, intrp->intr_size);
3867 3867 return (HXGE_ERROR | HXGE_DDI_FAILED);
3868 3868 }
3869 3869
3870 3870 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3871 3871 "ddi_intr_alloc() returned: navail %d nactual %d",
3872 3872 navail, nactual));
3873 3873
3874 3874 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3875 3875 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3876 3876 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3877 3877 " ddi_intr_get_pri() failed: %d", ddi_status));
3878 3878 /* Free already allocated interrupts */
3879 3879 for (y = 0; y < nactual; y++) {
3880 3880 (void) ddi_intr_free(intrp->htable[y]);
3881 3881 }
3882 3882
3883 3883 kmem_free(intrp->htable, intrp->intr_size);
3884 3884 return (HXGE_ERROR | HXGE_DDI_FAILED);
3885 3885 }
3886 3886
3887 3887 nrequired = 0;
3888 3888 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3889 3889 if (status != HXGE_OK) {
3890 3890 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3891 3891 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3892 3892 "failed: 0x%x", status));
3893 3893 /* Free already allocated interrupts */
3894 3894 for (y = 0; y < nactual; y++) {
3895 3895 (void) ddi_intr_free(intrp->htable[y]);
3896 3896 }
3897 3897
3898 3898 kmem_free(intrp->htable, intrp->intr_size);
3899 3899 return (status);
3900 3900 }
3901 3901
3902 3902 ldgp = hxgep->ldgvp->ldgp;
3903 3903 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3904 3904 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3905 3905
3906 3906 if (nactual < nrequired)
3907 3907 loop = nactual;
3908 3908 else
3909 3909 loop = nrequired;
3910 3910
3911 3911 for (x = 0; x < loop; x++, ldgp++) {
3912 3912 ldgp->vector = (uint8_t)x;
3913 3913 arg1 = ldgp->ldvp;
3914 3914 arg2 = hxgep;
3915 3915 if (ldgp->nldvs == 1) {
3916 3916 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3917 3917 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3918 3918 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3919 3919 "1-1 int handler (entry %d)\n",
3920 3920 arg1, arg2, x));
3921 3921 } else if (ldgp->nldvs > 1) {
3922 3922 inthandler = (uint_t *)ldgp->sys_intr_handler;
3923 3923 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3924 3924 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3925 3925 "nldevs %d int handler (entry %d)\n",
3926 3926 arg1, arg2, ldgp->nldvs, x));
3927 3927 }
3928 3928 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3929 3929 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3930 3930 "htable 0x%llx", x, intrp->htable[x]));
3931 3931
3932 3932 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3933 3933 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3934 3934 DDI_SUCCESS) {
3935 3935 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3936 3936 "==> hxge_add_intrs_adv_type: failed #%d "
3937 3937 "status 0x%x", x, ddi_status));
3938 3938 for (y = 0; y < intrp->intr_added; y++) {
3939 3939 (void) ddi_intr_remove_handler(
3940 3940 intrp->htable[y]);
3941 3941 }
3942 3942
3943 3943 /* Free already allocated intr */
3944 3944 for (y = 0; y < nactual; y++) {
3945 3945 (void) ddi_intr_free(intrp->htable[y]);
3946 3946 }
3947 3947 kmem_free(intrp->htable, intrp->intr_size);
3948 3948
3949 3949 (void) hxge_ldgv_uninit(hxgep);
3950 3950
3951 3951 return (HXGE_ERROR | HXGE_DDI_FAILED);
3952 3952 }
3953 3953
3954 3954 ldgp->htable_idx = x;
3955 3955 intrp->intr_added++;
3956 3956 }
3957 3957 intrp->msi_intx_cnt = nactual;
3958 3958
3959 3959 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3960 3960 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3961 3961 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3962 3962
3963 3963 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3964 3964 (void) hxge_intr_ldgv_init(hxgep);
3965 3965
3966 3966 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3967 3967
3968 3968 return (status);
3969 3969 }
3970 3970
3971 3971 /*ARGSUSED*/
3972 3972 static hxge_status_t
3973 3973 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3974 3974 {
3975 3975 dev_info_t *dip = hxgep->dip;
3976 3976 p_hxge_ldg_t ldgp;
3977 3977 p_hxge_intr_t intrp;
3978 3978 uint_t *inthandler;
3979 3979 void *arg1, *arg2;
3980 3980 int behavior;
3981 3981 int nintrs, navail;
3982 3982 int nactual, nrequired;
3983 3983 int inum = 0;
3984 3984 int x, y;
3985 3985 int ddi_status = DDI_SUCCESS;
3986 3986 hxge_status_t status = HXGE_OK;
3987 3987
3988 3988 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3989 3989 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3990 3990
3991 3991 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3992 3992 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3993 3993 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3994 3994 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3995 3995 "nintrs: %d", status, nintrs));
3996 3996 return (HXGE_ERROR | HXGE_DDI_FAILED);
3997 3997 }
3998 3998
3999 3999 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
4000 4000 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4001 4001 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4002 4002 "ddi_intr_get_navail() failed, status: 0x%x%, "
4003 4003 "nintrs: %d", ddi_status, navail));
4004 4004 return (HXGE_ERROR | HXGE_DDI_FAILED);
4005 4005 }
4006 4006
4007 4007 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4008 4008 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4009 4009 nintrs, navail));
4010 4010
4011 4011 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4012 4012 DDI_INTR_ALLOC_NORMAL);
4013 4013 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4014 4014 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4015 4015 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4016 4016 navail, &nactual, behavior);
4017 4017 if (ddi_status != DDI_SUCCESS || nactual == 0) {
4018 4018 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4019 4019 " ddi_intr_alloc() failed: %d", ddi_status));
4020 4020 kmem_free(intrp->htable, intrp->intr_size);
4021 4021 return (HXGE_ERROR | HXGE_DDI_FAILED);
4022 4022 }
4023 4023
4024 4024 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4025 4025 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4026 4026 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4027 4027 " ddi_intr_get_pri() failed: %d", ddi_status));
4028 4028 /* Free already allocated interrupts */
4029 4029 for (y = 0; y < nactual; y++) {
4030 4030 (void) ddi_intr_free(intrp->htable[y]);
4031 4031 }
4032 4032
4033 4033 kmem_free(intrp->htable, intrp->intr_size);
4034 4034 return (HXGE_ERROR | HXGE_DDI_FAILED);
4035 4035 }
4036 4036
4037 4037 nrequired = 0;
4038 4038 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4039 4039 if (status != HXGE_OK) {
4040 4040 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4041 4041 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4042 4042 "failed: 0x%x", status));
4043 4043 /* Free already allocated interrupts */
4044 4044 for (y = 0; y < nactual; y++) {
4045 4045 (void) ddi_intr_free(intrp->htable[y]);
4046 4046 }
4047 4047
4048 4048 kmem_free(intrp->htable, intrp->intr_size);
4049 4049 return (status);
4050 4050 }
4051 4051
4052 4052 ldgp = hxgep->ldgvp->ldgp;
4053 4053 for (x = 0; x < nrequired; x++, ldgp++) {
4054 4054 ldgp->vector = (uint8_t)x;
4055 4055 arg1 = ldgp->ldvp;
4056 4056 arg2 = hxgep;
4057 4057 if (ldgp->nldvs == 1) {
4058 4058 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4059 4059 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4060 4060 "hxge_add_intrs_adv_type_fix: "
4061 4061 "1-1 int handler(%d) ldg %d ldv %d "
4062 4062 "arg1 $%p arg2 $%p\n",
4063 4063 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4064 4064 } else if (ldgp->nldvs > 1) {
4065 4065 inthandler = (uint_t *)ldgp->sys_intr_handler;
4066 4066 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4067 4067 "hxge_add_intrs_adv_type_fix: "
4068 4068 "shared ldv %d int handler(%d) ldv %d ldg %d"
4069 4069 "arg1 0x%016llx arg2 0x%016llx\n",
4070 4070 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4071 4071 arg1, arg2));
4072 4072 }
4073 4073
4074 4074 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4075 4075 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4076 4076 DDI_SUCCESS) {
4077 4077 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4078 4078 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4079 4079 "status 0x%x", x, ddi_status));
4080 4080 for (y = 0; y < intrp->intr_added; y++) {
4081 4081 (void) ddi_intr_remove_handler(
4082 4082 intrp->htable[y]);
4083 4083 }
4084 4084 for (y = 0; y < nactual; y++) {
4085 4085 (void) ddi_intr_free(intrp->htable[y]);
4086 4086 }
4087 4087 /* Free already allocated intr */
4088 4088 kmem_free(intrp->htable, intrp->intr_size);
4089 4089
4090 4090 (void) hxge_ldgv_uninit(hxgep);
4091 4091
4092 4092 return (HXGE_ERROR | HXGE_DDI_FAILED);
4093 4093 }
4094 4094 intrp->intr_added++;
4095 4095 }
4096 4096
4097 4097 intrp->msi_intx_cnt = nactual;
4098 4098
4099 4099 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4100 4100
4101 4101 status = hxge_intr_ldgv_init(hxgep);
4102 4102
4103 4103 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4104 4104
4105 4105 return (status);
4106 4106 }
4107 4107
4108 4108 /*ARGSUSED*/
4109 4109 static void
4110 4110 hxge_remove_intrs(p_hxge_t hxgep)
4111 4111 {
4112 4112 int i, inum;
4113 4113 p_hxge_intr_t intrp;
4114 4114
4115 4115 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4116 4116 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4117 4117 if (!intrp->intr_registered) {
4118 4118 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4119 4119 "<== hxge_remove_intrs: interrupts not registered"));
4120 4120 return;
4121 4121 }
4122 4122
4123 4123 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4124 4124
4125 4125 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4126 4126 (void) ddi_intr_block_disable(intrp->htable,
4127 4127 intrp->intr_added);
4128 4128 } else {
4129 4129 for (i = 0; i < intrp->intr_added; i++) {
4130 4130 (void) ddi_intr_disable(intrp->htable[i]);
4131 4131 }
4132 4132 }
4133 4133
4134 4134 for (inum = 0; inum < intrp->intr_added; inum++) {
4135 4135 if (intrp->htable[inum]) {
4136 4136 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4137 4137 }
4138 4138 }
4139 4139
4140 4140 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4141 4141 if (intrp->htable[inum]) {
4142 4142 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4143 4143 "hxge_remove_intrs: ddi_intr_free inum %d "
4144 4144 "msi_intx_cnt %d intr_added %d",
4145 4145 inum, intrp->msi_intx_cnt, intrp->intr_added));
4146 4146
4147 4147 (void) ddi_intr_free(intrp->htable[inum]);
4148 4148 }
4149 4149 }
4150 4150
4151 4151 kmem_free(intrp->htable, intrp->intr_size);
4152 4152 intrp->intr_registered = B_FALSE;
4153 4153 intrp->intr_enabled = B_FALSE;
4154 4154 intrp->msi_intx_cnt = 0;
4155 4155 intrp->intr_added = 0;
4156 4156
4157 4157 (void) hxge_ldgv_uninit(hxgep);
4158 4158
4159 4159 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4160 4160 }
4161 4161
4162 4162 /*ARGSUSED*/
4163 4163 static void
4164 4164 hxge_intrs_enable(p_hxge_t hxgep)
4165 4165 {
4166 4166 p_hxge_intr_t intrp;
4167 4167 int i;
4168 4168 int status;
4169 4169
4170 4170 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4171 4171
4172 4172 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4173 4173
4174 4174 if (!intrp->intr_registered) {
4175 4175 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4176 4176 "interrupts are not registered"));
4177 4177 return;
4178 4178 }
4179 4179
4180 4180 if (intrp->intr_enabled) {
4181 4181 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4182 4182 "<== hxge_intrs_enable: already enabled"));
4183 4183 return;
4184 4184 }
4185 4185
4186 4186 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4187 4187 status = ddi_intr_block_enable(intrp->htable,
4188 4188 intrp->intr_added);
4189 4189 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4190 4190 "block enable - status 0x%x total inums #%d\n",
4191 4191 status, intrp->intr_added));
4192 4192 } else {
4193 4193 for (i = 0; i < intrp->intr_added; i++) {
4194 4194 status = ddi_intr_enable(intrp->htable[i]);
4195 4195 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4196 4196 "ddi_intr_enable:enable - status 0x%x "
4197 4197 "total inums %d enable inum #%d\n",
4198 4198 status, intrp->intr_added, i));
4199 4199 if (status == DDI_SUCCESS) {
4200 4200 intrp->intr_enabled = B_TRUE;
4201 4201 }
4202 4202 }
4203 4203 }
4204 4204
4205 4205 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4206 4206 }
4207 4207
4208 4208 /*ARGSUSED*/
4209 4209 static void
4210 4210 hxge_intrs_disable(p_hxge_t hxgep)
4211 4211 {
4212 4212 p_hxge_intr_t intrp;
4213 4213 int i;
4214 4214
4215 4215 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4216 4216
4217 4217 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4218 4218
4219 4219 if (!intrp->intr_registered) {
4220 4220 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4221 4221 "interrupts are not registered"));
4222 4222 return;
4223 4223 }
4224 4224
4225 4225 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4226 4226 (void) ddi_intr_block_disable(intrp->htable,
4227 4227 intrp->intr_added);
4228 4228 } else {
4229 4229 for (i = 0; i < intrp->intr_added; i++) {
4230 4230 (void) ddi_intr_disable(intrp->htable[i]);
4231 4231 }
4232 4232 }
4233 4233
4234 4234 intrp->intr_enabled = B_FALSE;
4235 4235 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4236 4236 }
4237 4237
4238 4238 static hxge_status_t
4239 4239 hxge_mac_register(p_hxge_t hxgep)
4240 4240 {
4241 4241 mac_register_t *macp;
4242 4242 int status;
4243 4243
4244 4244 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4245 4245
4246 4246 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4247 4247 return (HXGE_ERROR);
4248 4248
4249 4249 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4250 4250 macp->m_driver = hxgep;
4251 4251 macp->m_dip = hxgep->dip;
4252 4252 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4253 4253 macp->m_callbacks = &hxge_m_callbacks;
4254 4254 macp->m_min_sdu = 0;
4255 4255 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4256 4256 macp->m_margin = VLAN_TAGSZ;
4257 4257 macp->m_priv_props = hxge_priv_props;
4258 4258 macp->m_v12n = MAC_VIRT_LEVEL1;
4259 4259
4260 4260 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4261 4261 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4262 4262 macp->m_src_addr[0],
4263 4263 macp->m_src_addr[1],
4264 4264 macp->m_src_addr[2],
4265 4265 macp->m_src_addr[3],
4266 4266 macp->m_src_addr[4],
4267 4267 macp->m_src_addr[5]));
4268 4268
4269 4269 status = mac_register(macp, &hxgep->mach);
4270 4270 mac_free(macp);
4271 4271
4272 4272 if (status != 0) {
4273 4273 cmn_err(CE_WARN,
4274 4274 "hxge_mac_register failed (status %d instance %d)",
4275 4275 status, hxgep->instance);
4276 4276 return (HXGE_ERROR);
4277 4277 }
4278 4278
4279 4279 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4280 4280 "(instance %d)", hxgep->instance));
4281 4281
4282 4282 return (HXGE_OK);
4283 4283 }
4284 4284
4285 4285 static int
4286 4286 hxge_init_common_dev(p_hxge_t hxgep)
4287 4287 {
4288 4288 p_hxge_hw_list_t hw_p;
4289 4289 dev_info_t *p_dip;
4290 4290
4291 4291 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4292 4292
4293 4293 p_dip = hxgep->p_dip;
4294 4294 MUTEX_ENTER(&hxge_common_lock);
4295 4295
4296 4296 /*
4297 4297 * Loop through existing per Hydra hardware list.
4298 4298 */
4299 4299 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4300 4300 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4301 4301 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4302 4302 hw_p, p_dip));
4303 4303 if (hw_p->parent_devp == p_dip) {
4304 4304 hxgep->hxge_hw_p = hw_p;
4305 4305 hw_p->ndevs++;
4306 4306 hw_p->hxge_p = hxgep;
4307 4307 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4308 4308 "==> hxge_init_common_device: "
4309 4309 "hw_p $%p parent dip $%p ndevs %d (found)",
4310 4310 hw_p, p_dip, hw_p->ndevs));
4311 4311 break;
4312 4312 }
4313 4313 }
4314 4314
4315 4315 if (hw_p == NULL) {
4316 4316 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4317 4317 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4318 4318 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4319 4319 hw_p->parent_devp = p_dip;
4320 4320 hw_p->magic = HXGE_MAGIC;
4321 4321 hxgep->hxge_hw_p = hw_p;
4322 4322 hw_p->ndevs++;
4323 4323 hw_p->hxge_p = hxgep;
4324 4324 hw_p->next = hxge_hw_list;
4325 4325
4326 4326 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4327 4327 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4328 4328 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4329 4329
4330 4330 hxge_hw_list = hw_p;
4331 4331 }
4332 4332 MUTEX_EXIT(&hxge_common_lock);
4333 4333 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4334 4334 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4335 4335 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4336 4336
4337 4337 return (HXGE_OK);
4338 4338 }
4339 4339
4340 4340 static void
4341 4341 hxge_uninit_common_dev(p_hxge_t hxgep)
4342 4342 {
4343 4343 p_hxge_hw_list_t hw_p, h_hw_p;
4344 4344 dev_info_t *p_dip;
4345 4345
4346 4346 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4347 4347 if (hxgep->hxge_hw_p == NULL) {
4348 4348 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4349 4349 "<== hxge_uninit_common_dev (no common)"));
4350 4350 return;
4351 4351 }
4352 4352
4353 4353 MUTEX_ENTER(&hxge_common_lock);
4354 4354 h_hw_p = hxge_hw_list;
4355 4355 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4356 4356 p_dip = hw_p->parent_devp;
4357 4357 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4358 4358 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4359 4359 hw_p->magic == HXGE_MAGIC) {
4360 4360 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4361 4361 "==> hxge_uninit_common_dev: "
4362 4362 "hw_p $%p parent dip $%p ndevs %d (found)",
4363 4363 hw_p, p_dip, hw_p->ndevs));
4364 4364
4365 4365 hxgep->hxge_hw_p = NULL;
4366 4366 if (hw_p->ndevs) {
4367 4367 hw_p->ndevs--;
4368 4368 }
4369 4369 hw_p->hxge_p = NULL;
4370 4370 if (!hw_p->ndevs) {
4371 4371 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4372 4372 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4373 4373 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4374 4374 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4375 4375 "==> hxge_uninit_common_dev: "
4376 4376 "hw_p $%p parent dip $%p ndevs %d (last)",
4377 4377 hw_p, p_dip, hw_p->ndevs));
4378 4378
4379 4379 if (hw_p == hxge_hw_list) {
4380 4380 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4381 4381 "==> hxge_uninit_common_dev:"
4382 4382 "remove head "
4383 4383 "hw_p $%p parent dip $%p "
4384 4384 "ndevs %d (head)",
4385 4385 hw_p, p_dip, hw_p->ndevs));
4386 4386 hxge_hw_list = hw_p->next;
4387 4387 } else {
4388 4388 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4389 4389 "==> hxge_uninit_common_dev:"
4390 4390 "remove middle "
4391 4391 "hw_p $%p parent dip $%p "
4392 4392 "ndevs %d (middle)",
4393 4393 hw_p, p_dip, hw_p->ndevs));
4394 4394 h_hw_p->next = hw_p->next;
4395 4395 }
4396 4396
4397 4397 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4398 4398 }
4399 4399 break;
4400 4400 } else {
4401 4401 h_hw_p = hw_p;
4402 4402 }
4403 4403 }
4404 4404
4405 4405 MUTEX_EXIT(&hxge_common_lock);
4406 4406 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4407 4407 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4408 4408
4409 4409 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4410 4410 }
4411 4411
4412 4412 #define HXGE_MSIX_ENTRIES 32
4413 4413 #define HXGE_MSIX_WAIT_COUNT 10
4414 4414 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4415 4415
4416 4416 static void
4417 4417 hxge_link_poll(void *arg)
4418 4418 {
4419 4419 p_hxge_t hxgep = (p_hxge_t)arg;
4420 4420 hpi_handle_t handle;
4421 4421 cip_link_stat_t link_stat;
4422 4422 hxge_timeout *to = &hxgep->timeout;
4423 4423
4424 4424 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4425 4425 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4426 4426
4427 4427 if (to->report_link_status ||
4428 4428 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4429 4429 to->link_status = link_stat.bits.xpcs0_link_up;
4430 4430 to->report_link_status = B_FALSE;
4431 4431
4432 4432 if (link_stat.bits.xpcs0_link_up) {
4433 4433 hxge_link_update(hxgep, LINK_STATE_UP);
4434 4434 } else {
4435 4435 hxge_link_update(hxgep, LINK_STATE_DOWN);
4436 4436 }
4437 4437 }
4438 4438
4439 4439 /* Restart the link status timer to check the link status */
4440 4440 MUTEX_ENTER(&to->lock);
4441 4441 to->id = timeout(hxge_link_poll, arg, to->ticks);
4442 4442 MUTEX_EXIT(&to->lock);
4443 4443 }
4444 4444
4445 4445 static void
4446 4446 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4447 4447 {
4448 4448 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4449 4449
4450 4450 mac_link_update(hxgep->mach, state);
4451 4451 if (state == LINK_STATE_UP) {
4452 4452 statsp->mac_stats.link_speed = 10000;
4453 4453 statsp->mac_stats.link_duplex = 2;
4454 4454 statsp->mac_stats.link_up = 1;
4455 4455 } else {
4456 4456 statsp->mac_stats.link_speed = 0;
4457 4457 statsp->mac_stats.link_duplex = 0;
4458 4458 statsp->mac_stats.link_up = 0;
4459 4459 }
4460 4460 }
4461 4461
4462 4462 static void
4463 4463 hxge_msix_init(p_hxge_t hxgep)
4464 4464 {
4465 4465 uint32_t data0;
4466 4466 uint32_t data1;
4467 4467 uint32_t data2;
4468 4468 int i;
4469 4469 uint32_t msix_entry0;
4470 4470 uint32_t msix_entry1;
4471 4471 uint32_t msix_entry2;
4472 4472 uint32_t msix_entry3;
4473 4473
4474 4474 /* Change to use MSIx bar instead of indirect access */
4475 4475 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4476 4476 data0 = 0xffffffff - i;
4477 4477 data1 = 0xffffffff - i - 1;
4478 4478 data2 = 0xffffffff - i - 2;
4479 4479
4480 4480 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4481 4481 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4482 4482 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4483 4483 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4484 4484 }
4485 4485
4486 4486 /* Initialize ram data out buffer. */
4487 4487 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4488 4488 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4489 4489 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4490 4490 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4491 4491 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4492 4492 }
4493 4493 }
4494 4494
4495 4495 /*
4496 4496 * The following function is to support
4497 4497 * PSARC/2007/453 MSI-X interrupt limit override.
4498 4498 */
4499 4499 static int
4500 4500 hxge_create_msi_property(p_hxge_t hxgep)
4501 4501 {
4502 4502 int nmsi;
4503 4503 extern int ncpus;
4504 4504
4505 4505 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4506 4506
4507 4507 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4508 4508 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4509 4509 /*
4510 4510 * The maximum MSI-X requested will be 8.
4511 4511 * If the # of CPUs is less than 8, we will reqeust
4512 4512 * # MSI-X based on the # of CPUs.
4513 4513 */
4514 4514 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4515 4515 nmsi = HXGE_MSIX_REQUEST_10G;
4516 4516 } else {
4517 4517 nmsi = ncpus;
4518 4518 }
4519 4519
4520 4520 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4521 4521 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4522 4522 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4523 4523 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4524 4524
4525 4525 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4526 4526 return (nmsi);
4527 4527 }
↓ open down ↓ |
971 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX