Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nge/nge_main.c
+++ new/usr/src/uts/common/io/nge/nge_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27
28 28 #include "nge.h"
29 29
30 30 /*
31 31 * Describes the chip's DMA engine
32 32 */
33 33
34 34 static ddi_dma_attr_t hot_dma_attr = {
35 35 DMA_ATTR_V0, /* dma_attr version */
36 36 0x0000000000000000ull, /* dma_attr_addr_lo */
37 37 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */
38 38 0x000000007FFFFFFFull, /* dma_attr_count_max */
39 39 0x0000000000000010ull, /* dma_attr_align */
40 40 0x00000FFF, /* dma_attr_burstsizes */
41 41 0x00000001, /* dma_attr_minxfer */
42 42 0x000000000000FFFFull, /* dma_attr_maxxfer */
43 43 0x000000FFFFFFFFFFull, /* dma_attr_seg */
44 44 1, /* dma_attr_sgllen */
45 45 0x00000001, /* dma_attr_granular */
46 46 0
47 47 };
48 48
49 49 static ddi_dma_attr_t hot_tx_dma_attr = {
50 50 DMA_ATTR_V0, /* dma_attr version */
51 51 0x0000000000000000ull, /* dma_attr_addr_lo */
52 52 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */
53 53 0x0000000000003FFFull, /* dma_attr_count_max */
54 54 0x0000000000000010ull, /* dma_attr_align */
55 55 0x00000FFF, /* dma_attr_burstsizes */
56 56 0x00000001, /* dma_attr_minxfer */
57 57 0x0000000000003FFFull, /* dma_attr_maxxfer */
58 58 0x000000FFFFFFFFFFull, /* dma_attr_seg */
59 59 NGE_MAX_COOKIES, /* dma_attr_sgllen */
60 60 1, /* dma_attr_granular */
61 61 0
62 62 };
63 63
64 64 static ddi_dma_attr_t sum_dma_attr = {
65 65 DMA_ATTR_V0, /* dma_attr version */
66 66 0x0000000000000000ull, /* dma_attr_addr_lo */
67 67 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */
68 68 0x000000007FFFFFFFull, /* dma_attr_count_max */
69 69 0x0000000000000010ull, /* dma_attr_align */
70 70 0x00000FFF, /* dma_attr_burstsizes */
71 71 0x00000001, /* dma_attr_minxfer */
72 72 0x000000000000FFFFull, /* dma_attr_maxxfer */
73 73 0x00000000FFFFFFFFull, /* dma_attr_seg */
74 74 1, /* dma_attr_sgllen */
75 75 0x00000001, /* dma_attr_granular */
76 76 0
77 77 };
78 78
79 79 static ddi_dma_attr_t sum_tx_dma_attr = {
80 80 DMA_ATTR_V0, /* dma_attr version */
81 81 0x0000000000000000ull, /* dma_attr_addr_lo */
82 82 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */
83 83 0x0000000000003FFFull, /* dma_attr_count_max */
84 84 0x0000000000000010ull, /* dma_attr_align */
85 85 0x00000FFF, /* dma_attr_burstsizes */
86 86 0x00000001, /* dma_attr_minxfer */
87 87 0x0000000000003FFFull, /* dma_attr_maxxfer */
88 88 0x00000000FFFFFFFFull, /* dma_attr_seg */
89 89 NGE_MAX_COOKIES, /* dma_attr_sgllen */
90 90 1, /* dma_attr_granular */
91 91 0
92 92 };
93 93
94 94 /*
95 95 * DMA access attributes for data.
96 96 */
97 97 ddi_device_acc_attr_t nge_data_accattr = {
98 98 DDI_DEVICE_ATTR_V0,
99 99 DDI_STRUCTURE_LE_ACC,
100 100 DDI_STRICTORDER_ACC,
101 101 DDI_DEFAULT_ACC
102 102 };
103 103
104 104 /*
105 105 * DMA access attributes for descriptors.
106 106 */
107 107 static ddi_device_acc_attr_t nge_desc_accattr = {
108 108 DDI_DEVICE_ATTR_V0,
109 109 DDI_STRUCTURE_LE_ACC,
110 110 DDI_STRICTORDER_ACC,
111 111 DDI_DEFAULT_ACC
112 112 };
113 113
114 114 /*
115 115 * PIO access attributes for registers
116 116 */
117 117 static ddi_device_acc_attr_t nge_reg_accattr = {
118 118 DDI_DEVICE_ATTR_V0,
119 119 DDI_STRUCTURE_LE_ACC,
120 120 DDI_STRICTORDER_ACC,
121 121 DDI_DEFAULT_ACC
122 122 };
123 123
124 124 /*
125 125 * NIC DESC MODE 2
126 126 */
127 127
128 128 static const nge_desc_attr_t nge_sum_desc = {
129 129
130 130 sizeof (sum_rx_bd),
131 131 sizeof (sum_tx_bd),
132 132 &sum_dma_attr,
133 133 &sum_tx_dma_attr,
134 134 nge_sum_rxd_fill,
135 135 nge_sum_rxd_check,
136 136 nge_sum_txd_fill,
137 137 nge_sum_txd_check,
138 138 };
139 139
140 140 /*
141 141 * NIC DESC MODE 3
142 142 */
143 143
144 144 static const nge_desc_attr_t nge_hot_desc = {
145 145
146 146 sizeof (hot_rx_bd),
147 147 sizeof (hot_tx_bd),
148 148 &hot_dma_attr,
149 149 &hot_tx_dma_attr,
150 150 nge_hot_rxd_fill,
151 151 nge_hot_rxd_check,
152 152 nge_hot_txd_fill,
153 153 nge_hot_txd_check,
154 154 };
155 155
156 156 static char nge_ident[] = "nVidia 1Gb Ethernet";
157 157 static char clsize_propname[] = "cache-line-size";
158 158 static char latency_propname[] = "latency-timer";
159 159 static char debug_propname[] = "nge-debug-flags";
160 160 static char intr_moderation[] = "intr-moderation";
161 161 static char rx_data_hw[] = "rx-data-hw";
162 162 static char rx_prd_lw[] = "rx-prd-lw";
163 163 static char rx_prd_hw[] = "rx-prd-hw";
164 164 static char sw_intr_intv[] = "sw-intr-intvl";
165 165 static char nge_desc_mode[] = "desc-mode";
166 166 static char default_mtu[] = "default_mtu";
167 167 static char low_memory_mode[] = "minimal-memory-usage";
168 168 extern kmutex_t nge_log_mutex[1];
169 169
170 170 static int nge_m_start(void *);
171 171 static void nge_m_stop(void *);
172 172 static int nge_m_promisc(void *, boolean_t);
173 173 static int nge_m_multicst(void *, boolean_t, const uint8_t *);
174 174 static int nge_m_unicst(void *, const uint8_t *);
175 175 static void nge_m_ioctl(void *, queue_t *, mblk_t *);
176 176 static boolean_t nge_m_getcapab(void *, mac_capab_t, void *);
177 177 static int nge_m_setprop(void *, const char *, mac_prop_id_t,
178 178 uint_t, const void *);
179 179 static int nge_m_getprop(void *, const char *, mac_prop_id_t,
180 180 uint_t, void *);
181 181 static void nge_m_propinfo(void *, const char *, mac_prop_id_t,
182 182 mac_prop_info_handle_t);
183 183 static int nge_set_priv_prop(nge_t *, const char *, uint_t,
184 184 const void *);
185 185 static int nge_get_priv_prop(nge_t *, const char *, uint_t,
186 186 void *);
187 187
188 188 #define NGE_M_CALLBACK_FLAGS\
189 189 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | \
190 190 MC_PROPINFO)
191 191
192 192 static mac_callbacks_t nge_m_callbacks = {
193 193 NGE_M_CALLBACK_FLAGS,
194 194 nge_m_stat,
195 195 nge_m_start,
196 196 nge_m_stop,
197 197 nge_m_promisc,
198 198 nge_m_multicst,
199 199 nge_m_unicst,
200 200 nge_m_tx,
201 201 NULL,
202 202 nge_m_ioctl,
203 203 nge_m_getcapab,
204 204 NULL,
205 205 NULL,
206 206 nge_m_setprop,
207 207 nge_m_getprop,
208 208 nge_m_propinfo
209 209 };
210 210
211 211 char *nge_priv_props[] = {
212 212 "_tx_bcopy_threshold",
213 213 "_rx_bcopy_threshold",
214 214 "_recv_max_packet",
215 215 "_poll_quiet_time",
216 216 "_poll_busy_time",
217 217 "_rx_intr_hwater",
218 218 "_rx_intr_lwater",
219 219 NULL
220 220 };
221 221
222 222 static int nge_add_intrs(nge_t *, int);
223 223 static void nge_rem_intrs(nge_t *);
224 224 static int nge_register_intrs_and_init_locks(nge_t *);
225 225
226 226 /*
227 227 * NGE MSI tunable:
228 228 */
229 229 boolean_t nge_enable_msi = B_FALSE;
230 230
231 231 static enum ioc_reply
232 232 nge_set_loop_mode(nge_t *ngep, uint32_t mode)
233 233 {
234 234 /*
235 235 * If the mode isn't being changed, there's nothing to do ...
236 236 */
237 237 if (mode == ngep->param_loop_mode)
238 238 return (IOC_ACK);
239 239
240 240 /*
241 241 * Validate the requested mode and prepare a suitable message
242 242 * to explain the link down/up cycle that the change will
243 243 * probably induce ...
244 244 */
245 245 switch (mode) {
246 246 default:
247 247 return (IOC_INVAL);
248 248
249 249 case NGE_LOOP_NONE:
250 250 case NGE_LOOP_EXTERNAL_100:
251 251 case NGE_LOOP_EXTERNAL_10:
252 252 case NGE_LOOP_INTERNAL_PHY:
253 253 break;
254 254 }
255 255
256 256 /*
257 257 * All OK; tell the caller to reprogram
258 258 * the PHY and/or MAC for the new mode ...
259 259 */
260 260 ngep->param_loop_mode = mode;
261 261 return (IOC_RESTART_ACK);
262 262 }
263 263
264 264 #undef NGE_DBG
265 265 #define NGE_DBG NGE_DBG_INIT
266 266
267 267 /*
268 268 * Utility routine to carve a slice off a chunk of allocated memory,
269 269 * updating the chunk descriptor accordingly. The size of the slice
270 270 * is given by the product of the <qty> and <size> parameters.
271 271 */
272 272 void
273 273 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
274 274 uint32_t qty, uint32_t size)
275 275 {
276 276 size_t totsize;
277 277
278 278 totsize = qty*size;
279 279 ASSERT(size > 0);
280 280 ASSERT(totsize <= chunk->alength);
281 281
282 282 *slice = *chunk;
283 283 slice->nslots = qty;
284 284 slice->size = size;
285 285 slice->alength = totsize;
286 286
287 287 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
288 288 chunk->alength -= totsize;
289 289 chunk->offset += totsize;
290 290 chunk->cookie.dmac_laddress += totsize;
291 291 chunk->cookie.dmac_size -= totsize;
292 292 }
293 293
294 294 /*
295 295 * Allocate an area of memory and a DMA handle for accessing it
296 296 */
297 297 int
298 298 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p,
299 299 uint_t dma_flags, dma_area_t *dma_p)
300 300 {
301 301 int err;
302 302 caddr_t va;
303 303
304 304 NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
305 305 (void *)ngep, memsize, attr_p, dma_flags, dma_p));
306 306 /*
307 307 * Allocate handle
308 308 */
309 309 err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr,
310 310 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
311 311 if (err != DDI_SUCCESS)
312 312 goto fail;
313 313
314 314 /*
315 315 * Allocate memory
316 316 */
317 317 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
318 318 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
319 319 DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
320 320 if (err != DDI_SUCCESS)
321 321 goto fail;
322 322
323 323 /*
324 324 * Bind the two together
325 325 */
326 326 dma_p->mem_va = va;
327 327 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
328 328 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
329 329 &dma_p->cookie, &dma_p->ncookies);
330 330
331 331 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
332 332 goto fail;
333 333
334 334 dma_p->nslots = ~0U;
335 335 dma_p->size = ~0U;
336 336 dma_p->offset = 0;
337 337
338 338 return (DDI_SUCCESS);
339 339
340 340 fail:
341 341 nge_free_dma_mem(dma_p);
342 342 NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!"));
343 343
344 344 return (DDI_FAILURE);
345 345 }
346 346
347 347 /*
348 348 * Free one allocated area of DMAable memory
349 349 */
350 350 void
351 351 nge_free_dma_mem(dma_area_t *dma_p)
352 352 {
353 353 if (dma_p->dma_hdl != NULL) {
354 354 if (dma_p->ncookies) {
355 355 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
356 356 dma_p->ncookies = 0;
357 357 }
358 358 }
359 359 if (dma_p->acc_hdl != NULL) {
360 360 ddi_dma_mem_free(&dma_p->acc_hdl);
361 361 dma_p->acc_hdl = NULL;
362 362 }
363 363 if (dma_p->dma_hdl != NULL) {
364 364 ddi_dma_free_handle(&dma_p->dma_hdl);
365 365 dma_p->dma_hdl = NULL;
366 366 }
367 367 }
368 368
369 369 #define ALLOC_TX_BUF 0x1
370 370 #define ALLOC_TX_DESC 0x2
371 371 #define ALLOC_RX_DESC 0x4
372 372
373 373 int
374 374 nge_alloc_bufs(nge_t *ngep)
375 375 {
376 376 int err;
377 377 int split;
378 378 int progress;
379 379 size_t txbuffsize;
380 380 size_t rxdescsize;
381 381 size_t txdescsize;
382 382
383 383 txbuffsize = ngep->tx_desc * ngep->buf_size;
384 384 rxdescsize = ngep->rx_desc;
385 385 txdescsize = ngep->tx_desc;
386 386 rxdescsize *= ngep->desc_attr.rxd_size;
387 387 txdescsize *= ngep->desc_attr.txd_size;
388 388 progress = 0;
389 389
390 390 NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep));
391 391 /*
392 392 * Allocate memory & handles for TX buffers
393 393 */
394 394 ASSERT((txbuffsize % ngep->nge_split) == 0);
395 395 for (split = 0; split < ngep->nge_split; ++split) {
396 396 err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split,
397 397 &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE,
398 398 &ngep->send->buf[split]);
399 399 if (err != DDI_SUCCESS)
400 400 goto fail;
401 401 }
402 402
403 403 progress |= ALLOC_TX_BUF;
404 404
405 405 /*
406 406 * Allocate memory & handles for receive return rings and
407 407 * buffer (producer) descriptor rings
408 408 */
409 409 err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr,
410 410 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc);
411 411 if (err != DDI_SUCCESS)
412 412 goto fail;
413 413 progress |= ALLOC_RX_DESC;
414 414
415 415 /*
416 416 * Allocate memory & handles for TX descriptor rings,
417 417 */
418 418 err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr,
419 419 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc);
420 420 if (err != DDI_SUCCESS)
421 421 goto fail;
422 422 return (DDI_SUCCESS);
423 423
424 424 fail:
425 425 if (progress & ALLOC_RX_DESC)
426 426 nge_free_dma_mem(&ngep->recv->desc);
427 427 if (progress & ALLOC_TX_BUF) {
428 428 for (split = 0; split < ngep->nge_split; ++split)
429 429 nge_free_dma_mem(&ngep->send->buf[split]);
430 430 }
431 431
432 432 return (DDI_FAILURE);
433 433 }
434 434
435 435 /*
436 436 * This routine frees the transmit and receive buffers and descriptors.
437 437 * Make sure the chip is stopped before calling it!
438 438 */
439 439 void
440 440 nge_free_bufs(nge_t *ngep)
441 441 {
442 442 int split;
443 443
444 444 NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep));
445 445
446 446 nge_free_dma_mem(&ngep->recv->desc);
447 447 nge_free_dma_mem(&ngep->send->desc);
448 448
449 449 for (split = 0; split < ngep->nge_split; ++split)
450 450 nge_free_dma_mem(&ngep->send->buf[split]);
451 451 }
452 452
453 453 /*
454 454 * Clean up initialisation done above before the memory is freed
455 455 */
456 456 static void
457 457 nge_fini_send_ring(nge_t *ngep)
458 458 {
459 459 uint32_t slot;
460 460 size_t dmah_num;
461 461 send_ring_t *srp;
462 462 sw_tx_sbd_t *ssbdp;
463 463
464 464 srp = ngep->send;
465 465 ssbdp = srp->sw_sbds;
466 466
467 467 NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep));
468 468
469 469 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
470 470
471 471 for (slot = 0; slot < dmah_num; ++slot) {
472 472 if (srp->dmahndl[slot].hndl) {
473 473 (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl);
474 474 ddi_dma_free_handle(&srp->dmahndl[slot].hndl);
475 475 srp->dmahndl[slot].hndl = NULL;
476 476 srp->dmahndl[slot].next = NULL;
477 477 }
478 478 }
479 479
480 480 srp->dmah_free.head = NULL;
481 481 srp->dmah_free.tail = NULL;
482 482
483 483 kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
484 484
485 485 }
486 486
487 487 /*
488 488 * Initialise the specified Send Ring, using the information in the
489 489 * <dma_area> descriptors that it contains to set up all the other
490 490 * fields. This routine should be called only once for each ring.
491 491 */
492 492 static int
493 493 nge_init_send_ring(nge_t *ngep)
494 494 {
495 495 size_t dmah_num;
496 496 uint32_t nslots;
497 497 uint32_t err;
498 498 uint32_t slot;
499 499 uint32_t split;
500 500 send_ring_t *srp;
501 501 sw_tx_sbd_t *ssbdp;
502 502 dma_area_t desc;
503 503 dma_area_t pbuf;
504 504
505 505 srp = ngep->send;
506 506 srp->desc.nslots = ngep->tx_desc;
507 507 nslots = srp->desc.nslots;
508 508
509 509 NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep));
510 510 /*
511 511 * Other one-off initialisation of per-ring data
512 512 */
513 513 srp->ngep = ngep;
514 514
515 515 /*
516 516 * Allocate the array of s/w Send Buffer Descriptors
517 517 */
518 518 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
519 519 srp->sw_sbds = ssbdp;
520 520
521 521 /*
522 522 * Now initialise each array element once and for all
523 523 */
524 524 desc = srp->desc;
525 525 for (split = 0; split < ngep->nge_split; ++split) {
526 526 pbuf = srp->buf[split];
527 527 for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) {
528 528 nge_slice_chunk(&ssbdp->desc, &desc, 1,
529 529 ngep->desc_attr.txd_size);
530 530 nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
531 531 ngep->buf_size);
532 532 }
533 533 ASSERT(pbuf.alength == 0);
534 534 }
535 535 ASSERT(desc.alength == 0);
536 536
537 537 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
538 538
539 539 /* preallocate dma handles for tx buffer */
540 540 for (slot = 0; slot < dmah_num; ++slot) {
541 541
542 542 err = ddi_dma_alloc_handle(ngep->devinfo,
543 543 ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT,
544 544 NULL, &srp->dmahndl[slot].hndl);
545 545
546 546 if (err != DDI_SUCCESS) {
547 547 nge_fini_send_ring(ngep);
548 548 nge_error(ngep,
549 549 "nge_init_send_ring: alloc dma handle fails");
550 550 return (DDI_FAILURE);
551 551 }
552 552 srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
553 553 }
554 554
555 555 srp->dmah_free.head = srp->dmahndl;
556 556 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
557 557 srp->dmah_free.tail->next = NULL;
558 558
559 559 return (DDI_SUCCESS);
560 560 }
561 561
562 562 /*
563 563 * Intialize the tx recycle pointer and tx sending pointer of tx ring
564 564 * and set the type of tx's data descriptor by default.
565 565 */
566 566 static void
567 567 nge_reinit_send_ring(nge_t *ngep)
568 568 {
569 569 size_t dmah_num;
570 570 uint32_t slot;
571 571 send_ring_t *srp;
572 572 sw_tx_sbd_t *ssbdp;
573 573
574 574 srp = ngep->send;
575 575
576 576 /*
577 577 * Reinitialise control variables ...
578 578 */
579 579
580 580 srp->tx_hwmark = NGE_DESC_MIN;
581 581 srp->tx_lwmark = NGE_DESC_MIN;
582 582
583 583 srp->tx_next = 0;
584 584 srp->tx_free = srp->desc.nslots;
585 585 srp->tc_next = 0;
586 586
587 587 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
588 588
589 589 for (slot = 0; slot - dmah_num != 0; ++slot)
590 590 srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
591 591
592 592 srp->dmah_free.head = srp->dmahndl;
593 593 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
594 594 srp->dmah_free.tail->next = NULL;
595 595
596 596 /*
597 597 * Zero and sync all the h/w Send Buffer Descriptors
598 598 */
599 599 for (slot = 0; slot < srp->desc.nslots; ++slot) {
600 600 ssbdp = &srp->sw_sbds[slot];
601 601 ssbdp->flags = HOST_OWN;
602 602 }
603 603
604 604 DMA_ZERO(srp->desc);
605 605 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
606 606 }
607 607
608 608 /*
609 609 * Initialize the slot number of rx's ring
610 610 */
611 611 static void
612 612 nge_init_recv_ring(nge_t *ngep)
613 613 {
614 614 recv_ring_t *rrp;
615 615
616 616 rrp = ngep->recv;
617 617 rrp->desc.nslots = ngep->rx_desc;
618 618 rrp->ngep = ngep;
619 619 }
620 620
621 621 /*
622 622 * Intialize the rx recycle pointer and rx sending pointer of rx ring
623 623 */
624 624 static void
625 625 nge_reinit_recv_ring(nge_t *ngep)
626 626 {
627 627 recv_ring_t *rrp;
628 628
629 629 rrp = ngep->recv;
630 630
631 631 /*
632 632 * Reinitialise control variables ...
633 633 */
634 634 rrp->prod_index = 0;
635 635 /*
636 636 * Zero and sync all the h/w Send Buffer Descriptors
637 637 */
638 638 DMA_ZERO(rrp->desc);
639 639 DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV);
640 640 }
641 641
642 642 /*
643 643 * Clean up initialisation done above before the memory is freed
644 644 */
645 645 static void
646 646 nge_fini_buff_ring(nge_t *ngep)
647 647 {
648 648 uint32_t i;
649 649 buff_ring_t *brp;
650 650 dma_area_t *bufp;
651 651 sw_rx_sbd_t *bsbdp;
652 652
653 653 brp = ngep->buff;
654 654 bsbdp = brp->sw_rbds;
655 655
656 656 NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep));
657 657
658 658 mutex_enter(brp->recycle_lock);
659 659 brp->buf_sign++;
660 660 mutex_exit(brp->recycle_lock);
661 661 for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) {
662 662 if (bsbdp->bufp) {
663 663 if (bsbdp->bufp->mp)
664 664 freemsg(bsbdp->bufp->mp);
665 665 nge_free_dma_mem(bsbdp->bufp);
666 666 kmem_free(bsbdp->bufp, sizeof (dma_area_t));
667 667 bsbdp->bufp = NULL;
668 668 }
669 669 }
670 670 while (brp->free_list != NULL) {
671 671 bufp = brp->free_list;
672 672 brp->free_list = bufp->next;
673 673 bufp->next = NULL;
674 674 if (bufp->mp)
675 675 freemsg(bufp->mp);
676 676 nge_free_dma_mem(bufp);
677 677 kmem_free(bufp, sizeof (dma_area_t));
678 678 }
679 679 while (brp->recycle_list != NULL) {
680 680 bufp = brp->recycle_list;
681 681 brp->recycle_list = bufp->next;
682 682 bufp->next = NULL;
683 683 if (bufp->mp)
684 684 freemsg(bufp->mp);
685 685 nge_free_dma_mem(bufp);
686 686 kmem_free(bufp, sizeof (dma_area_t));
687 687 }
688 688
689 689
690 690 kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp)));
691 691 brp->sw_rbds = NULL;
692 692 }
693 693
694 694 /*
695 695 * Intialize the Rx's data ring and free ring
696 696 */
697 697 static int
698 698 nge_init_buff_ring(nge_t *ngep)
699 699 {
700 700 uint32_t err;
701 701 uint32_t slot;
702 702 uint32_t nslots_buff;
703 703 uint32_t nslots_recv;
704 704 buff_ring_t *brp;
705 705 recv_ring_t *rrp;
706 706 dma_area_t desc;
707 707 dma_area_t *bufp;
708 708 sw_rx_sbd_t *bsbdp;
709 709
710 710 rrp = ngep->recv;
711 711 brp = ngep->buff;
712 712 brp->nslots = ngep->rx_buf;
713 713 brp->rx_bcopy = B_FALSE;
714 714 nslots_recv = rrp->desc.nslots;
715 715 nslots_buff = brp->nslots;
716 716 brp->ngep = ngep;
717 717
718 718 NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep));
719 719
720 720 /*
721 721 * Allocate the array of s/w Recv Buffer Descriptors
722 722 */
723 723 bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP);
724 724 brp->sw_rbds = bsbdp;
725 725 brp->free_list = NULL;
726 726 brp->recycle_list = NULL;
727 727 for (slot = 0; slot < nslots_buff; ++slot) {
728 728 bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP);
729 729 err = nge_alloc_dma_mem(ngep, (ngep->buf_size
730 730 + NGE_HEADROOM),
731 731 &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp);
732 732 if (err != DDI_SUCCESS) {
733 733 kmem_free(bufp, sizeof (dma_area_t));
734 734 return (DDI_FAILURE);
735 735 }
736 736
737 737 bufp->alength -= NGE_HEADROOM;
738 738 bufp->offset += NGE_HEADROOM;
739 739 bufp->private = (caddr_t)ngep;
740 740 bufp->rx_recycle.free_func = nge_recv_recycle;
741 741 bufp->rx_recycle.free_arg = (caddr_t)bufp;
742 742 bufp->signature = brp->buf_sign;
743 743 bufp->rx_delivered = B_FALSE;
744 744 bufp->mp = desballoc(DMA_VPTR(*bufp),
745 745 ngep->buf_size + NGE_HEADROOM,
746 746 0, &bufp->rx_recycle);
747 747
748 748 if (bufp->mp == NULL) {
749 749 return (DDI_FAILURE);
750 750 }
751 751 bufp->next = brp->free_list;
752 752 brp->free_list = bufp;
753 753 }
754 754
755 755 /*
756 756 * Now initialise each array element once and for all
757 757 */
758 758 desc = rrp->desc;
759 759 for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) {
760 760 nge_slice_chunk(&bsbdp->desc, &desc, 1,
761 761 ngep->desc_attr.rxd_size);
762 762 bufp = brp->free_list;
763 763 brp->free_list = bufp->next;
764 764 bsbdp->bufp = bufp;
765 765 bsbdp->flags = CONTROLER_OWN;
766 766 bufp->next = NULL;
767 767 }
768 768
769 769 ASSERT(desc.alength == 0);
770 770 return (DDI_SUCCESS);
771 771 }
772 772
773 773 /*
774 774 * Fill the host address of data in rx' descriptor
775 775 * and initialize free pointers of rx free ring
776 776 */
777 777 static int
778 778 nge_reinit_buff_ring(nge_t *ngep)
779 779 {
780 780 uint32_t slot;
781 781 uint32_t nslots_recv;
782 782 buff_ring_t *brp;
783 783 recv_ring_t *rrp;
784 784 sw_rx_sbd_t *bsbdp;
785 785 void *hw_bd_p;
786 786
787 787 brp = ngep->buff;
788 788 rrp = ngep->recv;
789 789 bsbdp = brp->sw_rbds;
790 790 nslots_recv = rrp->desc.nslots;
791 791 for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) {
792 792 hw_bd_p = DMA_VPTR(bsbdp->desc);
793 793 /*
794 794 * There is a scenario: When the traffic of small tcp
795 795 * packet is heavy, suspending the tcp traffic will
796 796 * cause the preallocated buffers for rx not to be
797 797 * released in time by tcp taffic and cause rx's buffer
798 798 * pointers not to be refilled in time.
799 799 *
800 800 * At this point, if we reinitialize the driver, the bufp
801 801 * pointer for rx's traffic will be NULL.
802 802 * So the result of the reinitializion fails.
803 803 */
804 804 if (bsbdp->bufp == NULL)
805 805 return (DDI_FAILURE);
806 806
807 807 ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie,
808 808 bsbdp->bufp->alength);
809 809 }
810 810 return (DDI_SUCCESS);
811 811 }
812 812
813 813 static void
814 814 nge_init_ring_param_lock(nge_t *ngep)
815 815 {
816 816 buff_ring_t *brp;
817 817 send_ring_t *srp;
818 818
819 819 srp = ngep->send;
820 820 brp = ngep->buff;
821 821
822 822 /* Init the locks for send ring */
823 823 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
824 824 DDI_INTR_PRI(ngep->intr_pri));
825 825 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
826 826 DDI_INTR_PRI(ngep->intr_pri));
827 827 mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER,
828 828 DDI_INTR_PRI(ngep->intr_pri));
829 829
830 830 /* Init parameters of buffer ring */
831 831 brp->free_list = NULL;
832 832 brp->recycle_list = NULL;
833 833 brp->rx_hold = 0;
834 834 brp->buf_sign = 0;
835 835
836 836 /* Init recycle list lock */
837 837 mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER,
838 838 DDI_INTR_PRI(ngep->intr_pri));
839 839 }
840 840
841 841 int
842 842 nge_init_rings(nge_t *ngep)
843 843 {
844 844 uint32_t err;
845 845
846 846 err = nge_init_send_ring(ngep);
847 847 if (err != DDI_SUCCESS) {
848 848 return (err);
849 849 }
850 850 nge_init_recv_ring(ngep);
851 851
852 852 err = nge_init_buff_ring(ngep);
853 853 if (err != DDI_SUCCESS) {
854 854 nge_fini_send_ring(ngep);
855 855 return (DDI_FAILURE);
856 856 }
857 857
858 858 return (err);
859 859 }
860 860
861 861 static int
862 862 nge_reinit_ring(nge_t *ngep)
863 863 {
864 864 int err;
865 865
866 866 nge_reinit_recv_ring(ngep);
867 867 nge_reinit_send_ring(ngep);
868 868 err = nge_reinit_buff_ring(ngep);
869 869 return (err);
870 870 }
871 871
872 872
873 873 void
874 874 nge_fini_rings(nge_t *ngep)
875 875 {
876 876 /*
877 877 * For receive ring, nothing need to be finished.
878 878 * So only finish buffer ring and send ring here.
879 879 */
880 880 nge_fini_buff_ring(ngep);
881 881 nge_fini_send_ring(ngep);
882 882 }
883 883
884 884 /*
885 885 * Loopback ioctl code
886 886 */
887 887
888 888 static lb_property_t loopmodes[] = {
889 889 { normal, "normal", NGE_LOOP_NONE },
890 890 { external, "100Mbps", NGE_LOOP_EXTERNAL_100 },
891 891 { external, "10Mbps", NGE_LOOP_EXTERNAL_10 },
892 892 { internal, "PHY", NGE_LOOP_INTERNAL_PHY },
893 893 };
894 894
895 895 enum ioc_reply
896 896 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
897 897 {
898 898 int cmd;
899 899 uint32_t *lbmp;
900 900 lb_info_sz_t *lbsp;
901 901 lb_property_t *lbpp;
902 902
903 903 /*
904 904 * Validate format of ioctl
905 905 */
906 906 if (mp->b_cont == NULL)
907 907 return (IOC_INVAL);
908 908
909 909 cmd = iocp->ioc_cmd;
910 910
911 911 switch (cmd) {
912 912 default:
913 913 return (IOC_INVAL);
914 914
915 915 case LB_GET_INFO_SIZE:
916 916 if (iocp->ioc_count != sizeof (lb_info_sz_t))
917 917 return (IOC_INVAL);
918 918 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
919 919 *lbsp = sizeof (loopmodes);
920 920 return (IOC_REPLY);
921 921
922 922 case LB_GET_INFO:
923 923 if (iocp->ioc_count != sizeof (loopmodes))
924 924 return (IOC_INVAL);
925 925 lbpp = (lb_property_t *)mp->b_cont->b_rptr;
926 926 bcopy(loopmodes, lbpp, sizeof (loopmodes));
927 927 return (IOC_REPLY);
928 928
929 929 case LB_GET_MODE:
930 930 if (iocp->ioc_count != sizeof (uint32_t))
931 931 return (IOC_INVAL);
932 932 lbmp = (uint32_t *)mp->b_cont->b_rptr;
933 933 *lbmp = ngep->param_loop_mode;
934 934 return (IOC_REPLY);
935 935
936 936 case LB_SET_MODE:
937 937 if (iocp->ioc_count != sizeof (uint32_t))
938 938 return (IOC_INVAL);
939 939 lbmp = (uint32_t *)mp->b_cont->b_rptr;
940 940 return (nge_set_loop_mode(ngep, *lbmp));
941 941 }
942 942 }
943 943
944 944 #undef NGE_DBG
945 945 #define NGE_DBG NGE_DBG_NEMO
946 946
947 947
948 948 static void
949 949 nge_check_desc_prop(nge_t *ngep)
950 950 {
951 951 if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD)
952 952 ngep->desc_mode = DESC_HOT;
953 953
954 954 if (ngep->desc_mode == DESC_OFFLOAD) {
955 955
956 956 ngep->desc_attr = nge_sum_desc;
957 957
958 958 } else if (ngep->desc_mode == DESC_HOT) {
959 959
960 960 ngep->desc_attr = nge_hot_desc;
961 961 }
962 962 }
963 963
964 964 /*
965 965 * nge_get_props -- get the parameters to tune the driver
966 966 */
967 967 static void
968 968 nge_get_props(nge_t *ngep)
969 969 {
970 970 chip_info_t *infop;
971 971 dev_info_t *devinfo;
972 972 nge_dev_spec_param_t *dev_param_p;
973 973
974 974 devinfo = ngep->devinfo;
975 975 infop = (chip_info_t *)&ngep->chipinfo;
976 976 dev_param_p = &ngep->dev_spec_param;
977 977
978 978 infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
979 979 DDI_PROP_DONTPASS, clsize_propname, 32);
980 980
981 981 infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
982 982 DDI_PROP_DONTPASS, latency_propname, 64);
983 983 ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
984 984 DDI_PROP_DONTPASS, intr_moderation, NGE_SET);
985 985 ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
986 986 DDI_PROP_DONTPASS, rx_data_hw, 0x20);
987 987 ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
988 988 DDI_PROP_DONTPASS, rx_prd_lw, 0x4);
989 989 ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
990 990 DDI_PROP_DONTPASS, rx_prd_hw, 0xc);
991 991
992 992 ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
993 993 DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC);
994 994 ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
995 995 DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP);
996 996 ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
997 997 DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type);
998 998 ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
999 999 DDI_PROP_DONTPASS, low_memory_mode, 0);
1000 1000
1001 1001 if (dev_param_p->jumbo) {
1002 1002 ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1003 1003 DDI_PROP_DONTPASS, default_mtu, ETHERMTU);
1004 1004 } else
1005 1005 ngep->default_mtu = ETHERMTU;
1006 1006 if (dev_param_p->tx_pause_frame)
1007 1007 ngep->param_link_tx_pause = B_TRUE;
1008 1008 else
1009 1009 ngep->param_link_tx_pause = B_FALSE;
1010 1010
1011 1011 if (dev_param_p->rx_pause_frame)
1012 1012 ngep->param_link_rx_pause = B_TRUE;
1013 1013 else
1014 1014 ngep->param_link_rx_pause = B_FALSE;
1015 1015
1016 1016 if (ngep->default_mtu > ETHERMTU &&
1017 1017 ngep->default_mtu <= NGE_MTU_2500) {
1018 1018 ngep->buf_size = NGE_JB2500_BUFSZ;
1019 1019 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
1020 1020 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
1021 1021 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
1022 1022 ngep->nge_split = NGE_SPLIT_256;
1023 1023 } else if (ngep->default_mtu > NGE_MTU_2500 &&
1024 1024 ngep->default_mtu <= NGE_MTU_4500) {
1025 1025 ngep->buf_size = NGE_JB4500_BUFSZ;
1026 1026 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
1027 1027 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
1028 1028 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
1029 1029 ngep->nge_split = NGE_SPLIT_256;
1030 1030 } else if (ngep->default_mtu > NGE_MTU_4500 &&
1031 1031 ngep->default_mtu <= NGE_MAX_MTU) {
1032 1032 ngep->buf_size = NGE_JB9000_BUFSZ;
1033 1033 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1034 1034 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1035 1035 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1036 1036 ngep->nge_split = NGE_SPLIT_256;
1037 1037 } else if (ngep->default_mtu > NGE_MAX_MTU) {
1038 1038 ngep->default_mtu = NGE_MAX_MTU;
1039 1039 ngep->buf_size = NGE_JB9000_BUFSZ;
1040 1040 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1041 1041 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1042 1042 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1043 1043 ngep->nge_split = NGE_SPLIT_256;
1044 1044 } else if (ngep->lowmem_mode != 0) {
1045 1045 ngep->default_mtu = ETHERMTU;
1046 1046 ngep->buf_size = NGE_STD_BUFSZ;
1047 1047 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1048 1048 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1049 1049 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1050 1050 ngep->nge_split = NGE_SPLIT_32;
1051 1051 } else {
1052 1052 ngep->default_mtu = ETHERMTU;
1053 1053 ngep->buf_size = NGE_STD_BUFSZ;
1054 1054 ngep->tx_desc = dev_param_p->tx_desc_num;
1055 1055 ngep->rx_desc = dev_param_p->rx_desc_num;
1056 1056 ngep->rx_buf = dev_param_p->rx_desc_num * 2;
1057 1057 ngep->nge_split = dev_param_p->nge_split;
1058 1058 }
1059 1059
1060 1060 nge_check_desc_prop(ngep);
1061 1061 }
1062 1062
1063 1063
1064 1064 static int
1065 1065 nge_reset_dev(nge_t *ngep)
1066 1066 {
1067 1067 int err;
1068 1068 nge_mul_addr1 maddr1;
1069 1069 nge_sw_statistics_t *sw_stp;
1070 1070 sw_stp = &ngep->statistics.sw_statistics;
1071 1071 send_ring_t *srp = ngep->send;
1072 1072
1073 1073 ASSERT(mutex_owned(ngep->genlock));
1074 1074 mutex_enter(srp->tc_lock);
1075 1075 mutex_enter(srp->tx_lock);
1076 1076
1077 1077 nge_tx_recycle_all(ngep);
1078 1078 err = nge_reinit_ring(ngep);
1079 1079 if (err == DDI_FAILURE) {
1080 1080 mutex_exit(srp->tx_lock);
1081 1081 mutex_exit(srp->tc_lock);
1082 1082 return (err);
1083 1083 }
1084 1084 err = nge_chip_reset(ngep);
1085 1085 /*
1086 1086 * Clear the Multicast mac address table
1087 1087 */
1088 1088 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0);
1089 1089 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1);
1090 1090 maddr1.addr_bits.addr = 0;
1091 1091 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val);
1092 1092
1093 1093 mutex_exit(srp->tx_lock);
1094 1094 mutex_exit(srp->tc_lock);
1095 1095 if (err == DDI_FAILURE)
1096 1096 return (err);
1097 1097 ngep->watchdog = 0;
1098 1098 ngep->resched_needed = B_FALSE;
1099 1099 ngep->promisc = B_FALSE;
1100 1100 ngep->param_loop_mode = NGE_LOOP_NONE;
1101 1101 ngep->factotum_flag = 0;
1102 1102 ngep->resched_needed = 0;
1103 1103 ngep->nge_mac_state = NGE_MAC_RESET;
1104 1104 ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL;
1105 1105 ngep->max_sdu += VTAG_SIZE;
1106 1106 ngep->rx_def = 0x16;
1107 1107
1108 1108 /* Clear the software statistics */
1109 1109 sw_stp->recv_count = 0;
1110 1110 sw_stp->xmit_count = 0;
1111 1111 sw_stp->rbytes = 0;
1112 1112 sw_stp->obytes = 0;
1113 1113
1114 1114 return (DDI_SUCCESS);
1115 1115 }
1116 1116
1117 1117 static void
1118 1118 nge_m_stop(void *arg)
1119 1119 {
1120 1120 nge_t *ngep = arg; /* private device info */
1121 1121 int err;
1122 1122
1123 1123 NGE_TRACE(("nge_m_stop($%p)", arg));
1124 1124
1125 1125 /*
1126 1126 * Just stop processing, then record new MAC state
1127 1127 */
1128 1128 mutex_enter(ngep->genlock);
1129 1129 /* If suspended, the adapter is already stopped, just return. */
1130 1130 if (ngep->suspended) {
1131 1131 ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED);
1132 1132 mutex_exit(ngep->genlock);
1133 1133 return;
1134 1134 }
1135 1135 rw_enter(ngep->rwlock, RW_WRITER);
1136 1136
1137 1137 err = nge_chip_stop(ngep, B_FALSE);
1138 1138 if (err == DDI_FAILURE)
1139 1139 err = nge_chip_reset(ngep);
1140 1140 if (err == DDI_FAILURE)
1141 1141 nge_problem(ngep, "nge_m_stop: stop chip failed");
1142 1142 ngep->nge_mac_state = NGE_MAC_STOPPED;
1143 1143
1144 1144 /* Recycle all the TX BD */
1145 1145 nge_tx_recycle_all(ngep);
1146 1146 nge_fini_rings(ngep);
1147 1147 nge_free_bufs(ngep);
1148 1148
1149 1149 NGE_DEBUG(("nge_m_stop($%p) done", arg));
1150 1150
1151 1151 rw_exit(ngep->rwlock);
1152 1152 mutex_exit(ngep->genlock);
1153 1153 }
1154 1154
1155 1155 static int
1156 1156 nge_m_start(void *arg)
1157 1157 {
1158 1158 int err;
1159 1159 nge_t *ngep = arg;
1160 1160
1161 1161 NGE_TRACE(("nge_m_start($%p)", arg));
1162 1162
1163 1163 /*
1164 1164 * Start processing and record new MAC state
1165 1165 */
1166 1166 mutex_enter(ngep->genlock);
1167 1167 /*
1168 1168 * If suspended, don't start, as the resume processing
1169 1169 * will recall this function with the suspended flag off.
1170 1170 */
1171 1171 if (ngep->suspended) {
1172 1172 mutex_exit(ngep->genlock);
1173 1173 return (EIO);
1174 1174 }
1175 1175 rw_enter(ngep->rwlock, RW_WRITER);
1176 1176 err = nge_alloc_bufs(ngep);
1177 1177 if (err != DDI_SUCCESS) {
1178 1178 nge_problem(ngep, "nge_m_start: DMA buffer allocation failed");
1179 1179 goto finish;
1180 1180 }
1181 1181 err = nge_init_rings(ngep);
1182 1182 if (err != DDI_SUCCESS) {
1183 1183 nge_free_bufs(ngep);
1184 1184 nge_problem(ngep, "nge_init_rings() failed,err=%x", err);
1185 1185 goto finish;
1186 1186 }
1187 1187 err = nge_restart(ngep);
1188 1188
1189 1189 NGE_DEBUG(("nge_m_start($%p) done", arg));
1190 1190 finish:
1191 1191 rw_exit(ngep->rwlock);
1192 1192 mutex_exit(ngep->genlock);
1193 1193
1194 1194 return (err == DDI_SUCCESS ? 0 : EIO);
1195 1195 }
1196 1196
1197 1197 static int
1198 1198 nge_m_unicst(void *arg, const uint8_t *macaddr)
1199 1199 {
1200 1200 nge_t *ngep = arg;
1201 1201
1202 1202 NGE_TRACE(("nge_m_unicst($%p)", arg));
1203 1203 /*
1204 1204 * Remember the new current address in the driver state
1205 1205 * Sync the chip's idea of the address too ...
1206 1206 */
1207 1207 mutex_enter(ngep->genlock);
1208 1208
1209 1209 ethaddr_copy(macaddr, ngep->cur_uni_addr.addr);
1210 1210 ngep->cur_uni_addr.set = 1;
1211 1211
1212 1212 /*
1213 1213 * If we are suspended, we want to quit now, and not update
1214 1214 * the chip. Doing so might put it in a bad state, but the
1215 1215 * resume will get the unicast address installed.
1216 1216 */
1217 1217 if (ngep->suspended) {
1218 1218 mutex_exit(ngep->genlock);
1219 1219 return (DDI_SUCCESS);
1220 1220 }
1221 1221 nge_chip_sync(ngep);
1222 1222
1223 1223 NGE_DEBUG(("nge_m_unicst($%p) done", arg));
1224 1224 mutex_exit(ngep->genlock);
1225 1225
1226 1226 return (0);
1227 1227 }
1228 1228
1229 1229 static int
1230 1230 nge_m_promisc(void *arg, boolean_t on)
1231 1231 {
1232 1232 nge_t *ngep = arg;
1233 1233
1234 1234 NGE_TRACE(("nge_m_promisc($%p)", arg));
1235 1235
1236 1236 /*
1237 1237 * Store specified mode and pass to chip layer to update h/w
1238 1238 */
1239 1239 mutex_enter(ngep->genlock);
1240 1240 /*
1241 1241 * If suspended, there is no need to do anything, even
1242 1242 * recording the promiscuious mode is not neccessary, as
1243 1243 * it won't be properly set on resume. Just return failing.
1244 1244 */
1245 1245 if (ngep->suspended) {
1246 1246 mutex_exit(ngep->genlock);
1247 1247 return (DDI_FAILURE);
1248 1248 }
1249 1249 if (ngep->promisc == on) {
1250 1250 mutex_exit(ngep->genlock);
1251 1251 NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1252 1252 return (0);
1253 1253 }
1254 1254 ngep->promisc = on;
1255 1255 ngep->record_promisc = ngep->promisc;
1256 1256 nge_chip_sync(ngep);
1257 1257 NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1258 1258 mutex_exit(ngep->genlock);
1259 1259
1260 1260 return (0);
1261 1261 }
1262 1262
1263 1263 static void nge_mulparam(nge_t *ngep)
1264 1264 {
1265 1265 uint8_t number;
1266 1266 ether_addr_t pand;
1267 1267 ether_addr_t por;
1268 1268 mul_item *plist;
1269 1269
1270 1270 for (number = 0; number < ETHERADDRL; number++) {
1271 1271 pand[number] = 0x00;
1272 1272 por[number] = 0x00;
1273 1273 }
1274 1274 for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) {
1275 1275 for (number = 0; number < ETHERADDRL; number++) {
1276 1276 pand[number] &= plist->mul_addr[number];
1277 1277 por[number] |= plist->mul_addr[number];
1278 1278 }
1279 1279 }
1280 1280 for (number = 0; number < ETHERADDRL; number++) {
1281 1281 ngep->cur_mul_addr.addr[number]
1282 1282 = pand[number] & por[number];
1283 1283 ngep->cur_mul_mask.addr[number]
1284 1284 = pand [number] | (~por[number]);
1285 1285 }
1286 1286 }
1287 1287 static int
1288 1288 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1289 1289 {
1290 1290 boolean_t update;
1291 1291 boolean_t b_eq;
1292 1292 nge_t *ngep = arg;
1293 1293 mul_item *plist;
1294 1294 mul_item *plist_prev;
1295 1295 mul_item *pitem;
1296 1296
1297 1297 NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg,
1298 1298 (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1299 1299
1300 1300 update = B_FALSE;
1301 1301 plist = plist_prev = NULL;
1302 1302 mutex_enter(ngep->genlock);
1303 1303 if (add) {
1304 1304 if (ngep->pcur_mulist != NULL) {
1305 1305 for (plist = ngep->pcur_mulist; plist != NULL;
1306 1306 plist = plist->next) {
1307 1307 b_eq = ether_eq(plist->mul_addr, mca);
1308 1308 if (b_eq) {
1309 1309 plist->ref_cnt++;
1310 1310 break;
1311 1311 }
1312 1312 plist_prev = plist;
1313 1313 }
1314 1314 }
1315 1315
1316 1316 if (plist == NULL) {
1317 1317 pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP);
1318 1318 ether_copy(mca, pitem->mul_addr);
1319 1319 pitem ->ref_cnt++;
1320 1320 pitem ->next = NULL;
1321 1321 if (plist_prev == NULL)
1322 1322 ngep->pcur_mulist = pitem;
1323 1323 else
1324 1324 plist_prev->next = pitem;
1325 1325 update = B_TRUE;
1326 1326 }
1327 1327 } else {
1328 1328 if (ngep->pcur_mulist != NULL) {
1329 1329 for (plist = ngep->pcur_mulist; plist != NULL;
1330 1330 plist = plist->next) {
1331 1331 b_eq = ether_eq(plist->mul_addr, mca);
1332 1332 if (b_eq) {
1333 1333 update = B_TRUE;
1334 1334 break;
1335 1335 }
1336 1336 plist_prev = plist;
1337 1337 }
1338 1338
1339 1339 if (update) {
1340 1340 if ((plist_prev == NULL) &&
1341 1341 (plist->next == NULL))
1342 1342 ngep->pcur_mulist = NULL;
1343 1343 else if ((plist_prev == NULL) &&
1344 1344 (plist->next != NULL))
1345 1345 ngep->pcur_mulist = plist->next;
1346 1346 else
1347 1347 plist_prev->next = plist->next;
1348 1348 kmem_free(plist, sizeof (mul_item));
1349 1349 }
1350 1350 }
1351 1351 }
1352 1352
1353 1353 if (update && !ngep->suspended) {
1354 1354 nge_mulparam(ngep);
1355 1355 nge_chip_sync(ngep);
1356 1356 }
1357 1357 NGE_DEBUG(("nge_m_multicst($%p) done", arg));
1358 1358 mutex_exit(ngep->genlock);
1359 1359
1360 1360 return (0);
1361 1361 }
1362 1362
1363 1363 static void
1364 1364 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1365 1365 {
1366 1366 int err;
1367 1367 int cmd;
1368 1368 nge_t *ngep = arg;
1369 1369 struct iocblk *iocp;
1370 1370 enum ioc_reply status;
1371 1371 boolean_t need_privilege;
1372 1372
1373 1373 /*
1374 1374 * If suspended, we might actually be able to do some of
1375 1375 * these ioctls, but it is harder to make sure they occur
1376 1376 * without actually putting the hardware in an undesireable
1377 1377 * state. So just NAK it.
1378 1378 */
1379 1379 mutex_enter(ngep->genlock);
1380 1380 if (ngep->suspended) {
1381 1381 miocnak(wq, mp, 0, EINVAL);
1382 1382 mutex_exit(ngep->genlock);
1383 1383 return;
1384 1384 }
1385 1385 mutex_exit(ngep->genlock);
1386 1386
1387 1387 /*
1388 1388 * Validate the command before bothering with the mutex ...
1389 1389 */
1390 1390 iocp = (struct iocblk *)mp->b_rptr;
1391 1391 iocp->ioc_error = 0;
1392 1392 need_privilege = B_TRUE;
1393 1393 cmd = iocp->ioc_cmd;
1394 1394
1395 1395 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd));
1396 1396 switch (cmd) {
1397 1397 default:
1398 1398 NGE_LDB(NGE_DBG_BADIOC,
1399 1399 ("nge_m_ioctl: unknown cmd 0x%x", cmd));
1400 1400
1401 1401 miocnak(wq, mp, 0, EINVAL);
1402 1402 return;
1403 1403
1404 1404 case NGE_MII_READ:
1405 1405 case NGE_MII_WRITE:
1406 1406 case NGE_SEE_READ:
1407 1407 case NGE_SEE_WRITE:
1408 1408 case NGE_DIAG:
1409 1409 case NGE_PEEK:
1410 1410 case NGE_POKE:
1411 1411 case NGE_PHY_RESET:
1412 1412 case NGE_SOFT_RESET:
1413 1413 case NGE_HARD_RESET:
1414 1414 break;
1415 1415
1416 1416 case LB_GET_INFO_SIZE:
1417 1417 case LB_GET_INFO:
1418 1418 case LB_GET_MODE:
1419 1419 need_privilege = B_FALSE;
1420 1420 break;
1421 1421 case LB_SET_MODE:
1422 1422 break;
1423 1423 }
1424 1424
1425 1425 if (need_privilege) {
1426 1426 /*
1427 1427 * Check for specific net_config privilege.
1428 1428 */
1429 1429 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1430 1430 if (err != 0) {
1431 1431 NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d",
1432 1432 cmd, err));
1433 1433 miocnak(wq, mp, 0, err);
1434 1434 return;
1435 1435 }
1436 1436 }
1437 1437
1438 1438 mutex_enter(ngep->genlock);
1439 1439
1440 1440 switch (cmd) {
1441 1441 default:
1442 1442 _NOTE(NOTREACHED)
1443 1443 status = IOC_INVAL;
1444 1444 break;
1445 1445
1446 1446 case NGE_MII_READ:
1447 1447 case NGE_MII_WRITE:
1448 1448 case NGE_SEE_READ:
1449 1449 case NGE_SEE_WRITE:
1450 1450 case NGE_DIAG:
1451 1451 case NGE_PEEK:
1452 1452 case NGE_POKE:
1453 1453 case NGE_PHY_RESET:
1454 1454 case NGE_SOFT_RESET:
1455 1455 case NGE_HARD_RESET:
1456 1456 status = nge_chip_ioctl(ngep, mp, iocp);
1457 1457 break;
1458 1458
1459 1459 case LB_GET_INFO_SIZE:
1460 1460 case LB_GET_INFO:
1461 1461 case LB_GET_MODE:
1462 1462 case LB_SET_MODE:
1463 1463 status = nge_loop_ioctl(ngep, mp, iocp);
1464 1464 break;
1465 1465
1466 1466 }
1467 1467
1468 1468 /*
1469 1469 * Do we need to reprogram the PHY and/or the MAC?
1470 1470 * Do it now, while we still have the mutex.
1471 1471 *
1472 1472 * Note: update the PHY first, 'cos it controls the
1473 1473 * speed/duplex parameters that the MAC code uses.
1474 1474 */
1475 1475
1476 1476 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status));
1477 1477
1478 1478 switch (status) {
1479 1479 case IOC_RESTART_REPLY:
1480 1480 case IOC_RESTART_ACK:
1481 1481 (*ngep->physops->phys_update)(ngep);
1482 1482 nge_chip_sync(ngep);
1483 1483 break;
1484 1484
1485 1485 default:
1486 1486 break;
1487 1487 }
1488 1488
1489 1489 mutex_exit(ngep->genlock);
1490 1490
1491 1491 /*
1492 1492 * Finally, decide how to reply
1493 1493 */
1494 1494 switch (status) {
1495 1495
1496 1496 default:
1497 1497 case IOC_INVAL:
1498 1498 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1499 1499 EINVAL : iocp->ioc_error);
1500 1500 break;
1501 1501
1502 1502 case IOC_DONE:
1503 1503 break;
1504 1504
1505 1505 case IOC_RESTART_ACK:
1506 1506 case IOC_ACK:
1507 1507 miocack(wq, mp, 0, 0);
1508 1508 break;
1509 1509
1510 1510 case IOC_RESTART_REPLY:
1511 1511 case IOC_REPLY:
1512 1512 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1513 1513 M_IOCACK : M_IOCNAK;
1514 1514 qreply(wq, mp);
1515 1515 break;
1516 1516 }
1517 1517 }
1518 1518
1519 1519 static boolean_t
1520 1520 nge_param_locked(mac_prop_id_t pr_num)
1521 1521 {
1522 1522 /*
1523 1523 * All adv_* parameters are locked (read-only) while
1524 1524 * the device is in any sort of loopback mode ...
1525 1525 */
1526 1526 switch (pr_num) {
1527 1527 case MAC_PROP_ADV_1000FDX_CAP:
1528 1528 case MAC_PROP_EN_1000FDX_CAP:
1529 1529 case MAC_PROP_ADV_1000HDX_CAP:
1530 1530 case MAC_PROP_EN_1000HDX_CAP:
1531 1531 case MAC_PROP_ADV_100FDX_CAP:
1532 1532 case MAC_PROP_EN_100FDX_CAP:
1533 1533 case MAC_PROP_ADV_100HDX_CAP:
1534 1534 case MAC_PROP_EN_100HDX_CAP:
1535 1535 case MAC_PROP_ADV_10FDX_CAP:
1536 1536 case MAC_PROP_EN_10FDX_CAP:
1537 1537 case MAC_PROP_ADV_10HDX_CAP:
1538 1538 case MAC_PROP_EN_10HDX_CAP:
1539 1539 case MAC_PROP_AUTONEG:
1540 1540 case MAC_PROP_FLOWCTRL:
1541 1541 return (B_TRUE);
1542 1542 }
1543 1543 return (B_FALSE);
1544 1544 }
1545 1545
1546 1546 /*
1547 1547 * callback functions for set/get of properties
1548 1548 */
1549 1549 static int
1550 1550 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1551 1551 uint_t pr_valsize, const void *pr_val)
1552 1552 {
1553 1553 nge_t *ngep = barg;
1554 1554 int err = 0;
1555 1555 uint32_t cur_mtu, new_mtu;
1556 1556 link_flowctrl_t fl;
1557 1557
1558 1558 mutex_enter(ngep->genlock);
1559 1559 if (ngep->param_loop_mode != NGE_LOOP_NONE &&
1560 1560 nge_param_locked(pr_num)) {
1561 1561 /*
1562 1562 * All adv_* parameters are locked (read-only)
1563 1563 * while the device is in any sort of loopback mode.
1564 1564 */
1565 1565 mutex_exit(ngep->genlock);
1566 1566 return (EBUSY);
1567 1567 }
1568 1568 switch (pr_num) {
1569 1569 case MAC_PROP_EN_1000FDX_CAP:
1570 1570 ngep->param_en_1000fdx = *(uint8_t *)pr_val;
1571 1571 ngep->param_adv_1000fdx = *(uint8_t *)pr_val;
1572 1572 goto reprogram;
1573 1573 case MAC_PROP_EN_100FDX_CAP:
1574 1574 ngep->param_en_100fdx = *(uint8_t *)pr_val;
1575 1575 ngep->param_adv_100fdx = *(uint8_t *)pr_val;
1576 1576 goto reprogram;
1577 1577 case MAC_PROP_EN_100HDX_CAP:
1578 1578 ngep->param_en_100hdx = *(uint8_t *)pr_val;
1579 1579 ngep->param_adv_100hdx = *(uint8_t *)pr_val;
1580 1580 goto reprogram;
1581 1581 case MAC_PROP_EN_10FDX_CAP:
1582 1582 ngep->param_en_10fdx = *(uint8_t *)pr_val;
1583 1583 ngep->param_adv_10fdx = *(uint8_t *)pr_val;
1584 1584 goto reprogram;
1585 1585 case MAC_PROP_EN_10HDX_CAP:
1586 1586 ngep->param_en_10hdx = *(uint8_t *)pr_val;
1587 1587 ngep->param_adv_10hdx = *(uint8_t *)pr_val;
1588 1588 reprogram:
1589 1589 (*ngep->physops->phys_update)(ngep);
1590 1590 nge_chip_sync(ngep);
1591 1591 break;
1592 1592
1593 1593 case MAC_PROP_ADV_1000FDX_CAP:
1594 1594 case MAC_PROP_ADV_1000HDX_CAP:
1595 1595 case MAC_PROP_ADV_100FDX_CAP:
1596 1596 case MAC_PROP_ADV_100HDX_CAP:
1597 1597 case MAC_PROP_ADV_10FDX_CAP:
1598 1598 case MAC_PROP_ADV_10HDX_CAP:
1599 1599 case MAC_PROP_STATUS:
1600 1600 case MAC_PROP_SPEED:
1601 1601 case MAC_PROP_DUPLEX:
1602 1602 case MAC_PROP_EN_1000HDX_CAP:
1603 1603 err = ENOTSUP; /* read-only prop. Can't set this */
1604 1604 break;
1605 1605 case MAC_PROP_AUTONEG:
1606 1606 ngep->param_adv_autoneg = *(uint8_t *)pr_val;
1607 1607 (*ngep->physops->phys_update)(ngep);
1608 1608 nge_chip_sync(ngep);
1609 1609 break;
1610 1610 case MAC_PROP_MTU:
1611 1611 cur_mtu = ngep->default_mtu;
1612 1612 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1613 1613 if (new_mtu == cur_mtu) {
1614 1614 err = 0;
1615 1615 break;
1616 1616 }
1617 1617 if (new_mtu < ETHERMTU ||
1618 1618 new_mtu > NGE_MAX_MTU) {
1619 1619 err = EINVAL;
1620 1620 break;
1621 1621 }
1622 1622 if ((new_mtu > ETHERMTU) &&
1623 1623 (!ngep->dev_spec_param.jumbo)) {
1624 1624 err = EINVAL;
1625 1625 break;
1626 1626 }
1627 1627 if (ngep->nge_mac_state == NGE_MAC_STARTED) {
1628 1628 err = EBUSY;
1629 1629 break;
1630 1630 }
1631 1631
1632 1632 ngep->default_mtu = new_mtu;
1633 1633 if (ngep->default_mtu > ETHERMTU &&
1634 1634 ngep->default_mtu <= NGE_MTU_2500) {
1635 1635 ngep->buf_size = NGE_JB2500_BUFSZ;
1636 1636 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
1637 1637 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
1638 1638 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
1639 1639 ngep->nge_split = NGE_SPLIT_256;
1640 1640 } else if (ngep->default_mtu > NGE_MTU_2500 &&
1641 1641 ngep->default_mtu <= NGE_MTU_4500) {
1642 1642 ngep->buf_size = NGE_JB4500_BUFSZ;
1643 1643 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
1644 1644 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
1645 1645 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
1646 1646 ngep->nge_split = NGE_SPLIT_256;
1647 1647 } else if (ngep->default_mtu > NGE_MTU_4500 &&
1648 1648 ngep->default_mtu <= NGE_MAX_MTU) {
1649 1649 ngep->buf_size = NGE_JB9000_BUFSZ;
1650 1650 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1651 1651 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1652 1652 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1653 1653 ngep->nge_split = NGE_SPLIT_256;
1654 1654 } else if (ngep->default_mtu > NGE_MAX_MTU) {
1655 1655 ngep->default_mtu = NGE_MAX_MTU;
1656 1656 ngep->buf_size = NGE_JB9000_BUFSZ;
1657 1657 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1658 1658 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1659 1659 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1660 1660 ngep->nge_split = NGE_SPLIT_256;
1661 1661 } else if (ngep->lowmem_mode != 0) {
1662 1662 ngep->default_mtu = ETHERMTU;
1663 1663 ngep->buf_size = NGE_STD_BUFSZ;
1664 1664 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1665 1665 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1666 1666 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1667 1667 ngep->nge_split = NGE_SPLIT_32;
1668 1668 } else {
1669 1669 ngep->default_mtu = ETHERMTU;
1670 1670 ngep->buf_size = NGE_STD_BUFSZ;
1671 1671 ngep->tx_desc =
1672 1672 ngep->dev_spec_param.tx_desc_num;
1673 1673 ngep->rx_desc =
1674 1674 ngep->dev_spec_param.rx_desc_num;
1675 1675 ngep->rx_buf =
1676 1676 ngep->dev_spec_param.rx_desc_num * 2;
1677 1677 ngep->nge_split =
1678 1678 ngep->dev_spec_param.nge_split;
1679 1679 }
1680 1680
1681 1681 err = mac_maxsdu_update(ngep->mh, ngep->default_mtu);
1682 1682
1683 1683 break;
1684 1684 case MAC_PROP_FLOWCTRL:
1685 1685 bcopy(pr_val, &fl, sizeof (fl));
1686 1686 switch (fl) {
1687 1687 default:
1688 1688 err = ENOTSUP;
1689 1689 break;
1690 1690 case LINK_FLOWCTRL_NONE:
1691 1691 ngep->param_adv_pause = 0;
1692 1692 ngep->param_adv_asym_pause = 0;
1693 1693
1694 1694 ngep->param_link_rx_pause = B_FALSE;
1695 1695 ngep->param_link_tx_pause = B_FALSE;
1696 1696 break;
1697 1697 case LINK_FLOWCTRL_RX:
1698 1698 if (!((ngep->param_lp_pause == 0) &&
1699 1699 (ngep->param_lp_asym_pause == 1))) {
1700 1700 err = EINVAL;
1701 1701 break;
1702 1702 }
1703 1703 ngep->param_adv_pause = 1;
1704 1704 ngep->param_adv_asym_pause = 1;
1705 1705
1706 1706 ngep->param_link_rx_pause = B_TRUE;
1707 1707 ngep->param_link_tx_pause = B_FALSE;
1708 1708 break;
1709 1709 case LINK_FLOWCTRL_TX:
1710 1710 if (!((ngep->param_lp_pause == 1) &&
1711 1711 (ngep->param_lp_asym_pause == 1))) {
1712 1712 err = EINVAL;
1713 1713 break;
1714 1714 }
1715 1715 ngep->param_adv_pause = 0;
1716 1716 ngep->param_adv_asym_pause = 1;
1717 1717
1718 1718 ngep->param_link_rx_pause = B_FALSE;
1719 1719 ngep->param_link_tx_pause = B_TRUE;
1720 1720 break;
1721 1721 case LINK_FLOWCTRL_BI:
1722 1722 if (ngep->param_lp_pause != 1) {
1723 1723 err = EINVAL;
1724 1724 break;
1725 1725 }
1726 1726 ngep->param_adv_pause = 1;
1727 1727
1728 1728 ngep->param_link_rx_pause = B_TRUE;
1729 1729 ngep->param_link_tx_pause = B_TRUE;
1730 1730 break;
1731 1731 }
1732 1732
1733 1733 if (err == 0) {
1734 1734 (*ngep->physops->phys_update)(ngep);
1735 1735 nge_chip_sync(ngep);
1736 1736 }
1737 1737
1738 1738 break;
1739 1739 case MAC_PROP_PRIVATE:
1740 1740 err = nge_set_priv_prop(ngep, pr_name, pr_valsize,
1741 1741 pr_val);
1742 1742 if (err == 0) {
1743 1743 (*ngep->physops->phys_update)(ngep);
1744 1744 nge_chip_sync(ngep);
1745 1745 }
1746 1746 break;
1747 1747 default:
1748 1748 err = ENOTSUP;
1749 1749 }
1750 1750 mutex_exit(ngep->genlock);
1751 1751 return (err);
1752 1752 }
1753 1753
1754 1754 static int
1755 1755 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1756 1756 uint_t pr_valsize, void *pr_val)
1757 1757 {
1758 1758 nge_t *ngep = barg;
1759 1759 int err = 0;
1760 1760 link_flowctrl_t fl;
1761 1761 uint64_t speed;
1762 1762
1763 1763 switch (pr_num) {
1764 1764 case MAC_PROP_DUPLEX:
1765 1765 ASSERT(pr_valsize >= sizeof (link_duplex_t));
1766 1766 bcopy(&ngep->param_link_duplex, pr_val,
1767 1767 sizeof (link_duplex_t));
1768 1768 break;
1769 1769 case MAC_PROP_SPEED:
1770 1770 ASSERT(pr_valsize >= sizeof (uint64_t));
1771 1771 speed = ngep->param_link_speed * 1000000ull;
1772 1772 bcopy(&speed, pr_val, sizeof (speed));
1773 1773 break;
1774 1774 case MAC_PROP_AUTONEG:
1775 1775 *(uint8_t *)pr_val = ngep->param_adv_autoneg;
1776 1776 break;
1777 1777 case MAC_PROP_FLOWCTRL:
1778 1778 ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
1779 1779 if (ngep->param_link_rx_pause &&
1780 1780 !ngep->param_link_tx_pause)
1781 1781 fl = LINK_FLOWCTRL_RX;
1782 1782
1783 1783 if (!ngep->param_link_rx_pause &&
1784 1784 !ngep->param_link_tx_pause)
1785 1785 fl = LINK_FLOWCTRL_NONE;
1786 1786
1787 1787 if (!ngep->param_link_rx_pause &&
1788 1788 ngep->param_link_tx_pause)
1789 1789 fl = LINK_FLOWCTRL_TX;
1790 1790
1791 1791 if (ngep->param_link_rx_pause &&
1792 1792 ngep->param_link_tx_pause)
1793 1793 fl = LINK_FLOWCTRL_BI;
1794 1794 bcopy(&fl, pr_val, sizeof (fl));
1795 1795 break;
1796 1796 case MAC_PROP_ADV_1000FDX_CAP:
1797 1797 *(uint8_t *)pr_val = ngep->param_adv_1000fdx;
1798 1798 break;
1799 1799 case MAC_PROP_EN_1000FDX_CAP:
1800 1800 *(uint8_t *)pr_val = ngep->param_en_1000fdx;
1801 1801 break;
1802 1802 case MAC_PROP_ADV_1000HDX_CAP:
1803 1803 *(uint8_t *)pr_val = ngep->param_adv_1000hdx;
1804 1804 break;
1805 1805 case MAC_PROP_EN_1000HDX_CAP:
1806 1806 *(uint8_t *)pr_val = ngep->param_en_1000hdx;
1807 1807 break;
1808 1808 case MAC_PROP_ADV_100FDX_CAP:
1809 1809 *(uint8_t *)pr_val = ngep->param_adv_100fdx;
1810 1810 break;
1811 1811 case MAC_PROP_EN_100FDX_CAP:
1812 1812 *(uint8_t *)pr_val = ngep->param_en_100fdx;
1813 1813 break;
1814 1814 case MAC_PROP_ADV_100HDX_CAP:
1815 1815 *(uint8_t *)pr_val = ngep->param_adv_100hdx;
1816 1816 break;
1817 1817 case MAC_PROP_EN_100HDX_CAP:
1818 1818 *(uint8_t *)pr_val = ngep->param_en_100hdx;
1819 1819 break;
1820 1820 case MAC_PROP_ADV_10FDX_CAP:
1821 1821 *(uint8_t *)pr_val = ngep->param_adv_10fdx;
1822 1822 break;
1823 1823 case MAC_PROP_EN_10FDX_CAP:
1824 1824 *(uint8_t *)pr_val = ngep->param_en_10fdx;
1825 1825 break;
1826 1826 case MAC_PROP_ADV_10HDX_CAP:
1827 1827 *(uint8_t *)pr_val = ngep->param_adv_10hdx;
1828 1828 break;
1829 1829 case MAC_PROP_EN_10HDX_CAP:
1830 1830 *(uint8_t *)pr_val = ngep->param_en_10hdx;
1831 1831 break;
1832 1832 case MAC_PROP_ADV_100T4_CAP:
1833 1833 case MAC_PROP_EN_100T4_CAP:
1834 1834 *(uint8_t *)pr_val = 0;
1835 1835 break;
1836 1836 case MAC_PROP_PRIVATE:
1837 1837 err = nge_get_priv_prop(ngep, pr_name,
1838 1838 pr_valsize, pr_val);
1839 1839 break;
1840 1840 default:
1841 1841 err = ENOTSUP;
1842 1842 }
1843 1843 return (err);
1844 1844 }
1845 1845
1846 1846 static void
1847 1847 nge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1848 1848 mac_prop_info_handle_t prh)
1849 1849 {
1850 1850 nge_t *ngep = barg;
1851 1851
1852 1852 switch (pr_num) {
1853 1853 case MAC_PROP_DUPLEX:
1854 1854 case MAC_PROP_SPEED:
1855 1855 case MAC_PROP_ADV_1000FDX_CAP:
1856 1856 case MAC_PROP_ADV_1000HDX_CAP:
1857 1857 case MAC_PROP_ADV_100FDX_CAP:
1858 1858 case MAC_PROP_EN_1000HDX_CAP:
1859 1859 case MAC_PROP_ADV_100HDX_CAP:
1860 1860 case MAC_PROP_ADV_10FDX_CAP:
1861 1861 case MAC_PROP_ADV_10HDX_CAP:
1862 1862 case MAC_PROP_ADV_100T4_CAP:
1863 1863 case MAC_PROP_EN_100T4_CAP:
1864 1864 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1865 1865 break;
1866 1866
1867 1867 case MAC_PROP_EN_1000FDX_CAP:
1868 1868 case MAC_PROP_EN_100FDX_CAP:
1869 1869 case MAC_PROP_EN_100HDX_CAP:
1870 1870 case MAC_PROP_EN_10FDX_CAP:
1871 1871 case MAC_PROP_EN_10HDX_CAP:
1872 1872 mac_prop_info_set_default_uint8(prh, 1);
1873 1873 break;
1874 1874
1875 1875 case MAC_PROP_AUTONEG:
1876 1876 mac_prop_info_set_default_uint8(prh, 1);
1877 1877 break;
1878 1878
1879 1879 case MAC_PROP_FLOWCTRL:
1880 1880 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
1881 1881 break;
1882 1882
1883 1883 case MAC_PROP_MTU:
1884 1884 mac_prop_info_set_range_uint32(prh, ETHERMTU,
1885 1885 ngep->dev_spec_param.jumbo ? NGE_MAX_MTU : ETHERMTU);
1886 1886 break;
1887 1887
1888 1888 case MAC_PROP_PRIVATE: {
1889 1889 char valstr[64];
1890 1890 int value;
1891 1891
1892 1892 bzero(valstr, sizeof (valstr));
1893 1893 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
1894 1894 value = NGE_TX_COPY_SIZE;
1895 1895 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
1896 1896 value = NGE_RX_COPY_SIZE;
1897 1897 } else if (strcmp(pr_name, "_recv_max_packet") == 0) {
1898 1898 value = 128;
1899 1899 } else if (strcmp(pr_name, "_poll_quiet_time") == 0) {
1900 1900 value = NGE_POLL_QUIET_TIME;
1901 1901 } else if (strcmp(pr_name, "_poll_busy_time") == 0) {
1902 1902 value = NGE_POLL_BUSY_TIME;
1903 1903 } else if (strcmp(pr_name, "_rx_intr_hwater") == 0) {
1904 1904 value = 1;
1905 1905 } else if (strcmp(pr_name, "_rx_intr_lwater") == 0) {
1906 1906 value = 8;
1907 1907 } else {
1908 1908 return;
1909 1909 }
1910 1910
1911 1911 (void) snprintf(valstr, sizeof (valstr), "%d", value);
1912 1912 }
1913 1913 }
1914 1914
1915 1915 }
1916 1916
1917 1917 /* ARGSUSED */
1918 1918 static int
1919 1919 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize,
1920 1920 const void *pr_val)
1921 1921 {
1922 1922 int err = 0;
1923 1923 long result;
1924 1924
1925 1925 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
1926 1926 if (pr_val == NULL) {
1927 1927 err = EINVAL;
1928 1928 return (err);
1929 1929 }
1930 1930 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1931 1931 if (result < 0 || result > NGE_MAX_SDU) {
1932 1932 err = EINVAL;
1933 1933 } else {
1934 1934 ngep->param_txbcopy_threshold = (uint32_t)result;
1935 1935 goto reprogram;
1936 1936 }
1937 1937 return (err);
1938 1938 }
1939 1939 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
1940 1940 if (pr_val == NULL) {
1941 1941 err = EINVAL;
1942 1942 return (err);
1943 1943 }
1944 1944 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1945 1945 if (result < 0 || result > NGE_MAX_SDU) {
1946 1946 err = EINVAL;
1947 1947 } else {
1948 1948 ngep->param_rxbcopy_threshold = (uint32_t)result;
1949 1949 goto reprogram;
1950 1950 }
1951 1951 return (err);
1952 1952 }
1953 1953 if (strcmp(pr_name, "_recv_max_packet") == 0) {
1954 1954 if (pr_val == NULL) {
1955 1955 err = EINVAL;
1956 1956 return (err);
1957 1957 }
1958 1958 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1959 1959 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
1960 1960 err = EINVAL;
1961 1961 } else {
1962 1962 ngep->param_recv_max_packet = (uint32_t)result;
1963 1963 goto reprogram;
1964 1964 }
1965 1965 return (err);
1966 1966 }
1967 1967 if (strcmp(pr_name, "_poll_quiet_time") == 0) {
1968 1968 if (pr_val == NULL) {
1969 1969 err = EINVAL;
1970 1970 return (err);
1971 1971 }
1972 1972 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1973 1973 if (result < 0 || result > 10000) {
1974 1974 err = EINVAL;
1975 1975 } else {
1976 1976 ngep->param_poll_quiet_time = (uint32_t)result;
1977 1977 goto reprogram;
1978 1978 }
1979 1979 return (err);
1980 1980 }
1981 1981 if (strcmp(pr_name, "_poll_busy_time") == 0) {
1982 1982 if (pr_val == NULL) {
1983 1983 err = EINVAL;
1984 1984 return (err);
1985 1985 }
1986 1986 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1987 1987 if (result < 0 || result > 10000) {
1988 1988 err = EINVAL;
1989 1989 } else {
1990 1990 ngep->param_poll_busy_time = (uint32_t)result;
1991 1991 goto reprogram;
1992 1992 }
1993 1993 return (err);
1994 1994 }
1995 1995 if (strcmp(pr_name, "_rx_intr_hwater") == 0) {
1996 1996 if (pr_val == NULL) {
1997 1997 err = EINVAL;
1998 1998 return (err);
1999 1999 }
2000 2000 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2001 2001 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
2002 2002 err = EINVAL;
2003 2003 } else {
2004 2004 ngep->param_rx_intr_hwater = (uint32_t)result;
2005 2005 goto reprogram;
2006 2006 }
2007 2007 return (err);
2008 2008 }
2009 2009 if (strcmp(pr_name, "_rx_intr_lwater") == 0) {
2010 2010 if (pr_val == NULL) {
2011 2011 err = EINVAL;
2012 2012 return (err);
2013 2013 }
2014 2014 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2015 2015 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
2016 2016 err = EINVAL;
2017 2017 } else {
2018 2018 ngep->param_rx_intr_lwater = (uint32_t)result;
2019 2019 goto reprogram;
2020 2020 }
2021 2021 return (err);
2022 2022 }
2023 2023 err = ENOTSUP;
2024 2024 return (err);
2025 2025
2026 2026 reprogram:
2027 2027 if (err == 0) {
2028 2028 (*ngep->physops->phys_update)(ngep);
2029 2029 nge_chip_sync(ngep);
2030 2030 }
2031 2031
2032 2032 return (err);
2033 2033 }
2034 2034
2035 2035 static int
2036 2036 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize,
2037 2037 void *pr_val)
2038 2038 {
2039 2039 int err = ENOTSUP;
2040 2040 int value;
2041 2041
2042 2042 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
2043 2043 value = ngep->param_txbcopy_threshold;
2044 2044 err = 0;
2045 2045 goto done;
2046 2046 }
2047 2047 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
2048 2048 value = ngep->param_rxbcopy_threshold;
2049 2049 err = 0;
2050 2050 goto done;
2051 2051 }
2052 2052 if (strcmp(pr_name, "_recv_max_packet") == 0) {
2053 2053 value = ngep->param_recv_max_packet;
2054 2054 err = 0;
2055 2055 goto done;
2056 2056 }
2057 2057 if (strcmp(pr_name, "_poll_quiet_time") == 0) {
2058 2058 value = ngep->param_poll_quiet_time;
2059 2059 err = 0;
2060 2060 goto done;
2061 2061 }
2062 2062 if (strcmp(pr_name, "_poll_busy_time") == 0) {
2063 2063 value = ngep->param_poll_busy_time;
2064 2064 err = 0;
2065 2065 goto done;
2066 2066 }
2067 2067 if (strcmp(pr_name, "_rx_intr_hwater") == 0) {
2068 2068 value = ngep->param_rx_intr_hwater;
2069 2069 err = 0;
2070 2070 goto done;
2071 2071 }
2072 2072 if (strcmp(pr_name, "_rx_intr_lwater") == 0) {
2073 2073 value = ngep->param_rx_intr_lwater;
2074 2074 err = 0;
2075 2075 goto done;
2076 2076 }
2077 2077
2078 2078 done:
2079 2079 if (err == 0) {
2080 2080 (void) snprintf(pr_val, pr_valsize, "%d", value);
2081 2081 }
2082 2082 return (err);
2083 2083 }
2084 2084
2085 2085 /* ARGSUSED */
2086 2086 static boolean_t
2087 2087 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2088 2088 {
2089 2089 nge_t *ngep = arg;
2090 2090 nge_dev_spec_param_t *dev_param_p;
2091 2091
2092 2092 dev_param_p = &ngep->dev_spec_param;
2093 2093
2094 2094 switch (cap) {
2095 2095 case MAC_CAPAB_HCKSUM: {
2096 2096 uint32_t *hcksum_txflags = cap_data;
2097 2097
2098 2098 if (dev_param_p->tx_hw_checksum) {
2099 2099 *hcksum_txflags = dev_param_p->tx_hw_checksum;
2100 2100 } else
2101 2101 return (B_FALSE);
2102 2102 break;
2103 2103 }
2104 2104 default:
2105 2105 return (B_FALSE);
2106 2106 }
2107 2107 return (B_TRUE);
2108 2108 }
2109 2109
2110 2110 #undef NGE_DBG
2111 2111 #define NGE_DBG NGE_DBG_INIT /* debug flag for this code */
2112 2112 int
2113 2113 nge_restart(nge_t *ngep)
2114 2114 {
2115 2115 int err = 0;
2116 2116 err = nge_reset_dev(ngep);
2117 2117 /* write back the promisc setting */
2118 2118 ngep->promisc = ngep->record_promisc;
2119 2119 nge_chip_sync(ngep);
2120 2120 if (!err)
2121 2121 err = nge_chip_start(ngep);
2122 2122
2123 2123 if (err) {
2124 2124 ngep->nge_mac_state = NGE_MAC_STOPPED;
2125 2125 return (DDI_FAILURE);
2126 2126 } else {
2127 2127 ngep->nge_mac_state = NGE_MAC_STARTED;
2128 2128 return (DDI_SUCCESS);
2129 2129 }
2130 2130 }
2131 2131
2132 2132 void
2133 2133 nge_wake_factotum(nge_t *ngep)
2134 2134 {
2135 2135 mutex_enter(ngep->softlock);
2136 2136 if (ngep->factotum_flag == 0) {
2137 2137 ngep->factotum_flag = 1;
2138 2138 (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL);
2139 2139 }
2140 2140 mutex_exit(ngep->softlock);
2141 2141 }
2142 2142
2143 2143 void
2144 2144 nge_interrupt_optimize(nge_t *ngep)
2145 2145 {
2146 2146 uint32_t tx_pkts;
2147 2147 tx_pkts = ngep->statistics.sw_statistics.xmit_count - ngep->tpkts_last;
2148 2148 ngep->tpkts_last = ngep->statistics.sw_statistics.xmit_count;
2149 2149 if ((tx_pkts > NGE_POLL_TUNE) &&
2150 2150 (tx_pkts <= NGE_POLL_MAX))
2151 2151 ngep->tfint_threshold = (tx_pkts / NGE_POLL_ENTER);
2152 2152 else
2153 2153 ngep->tfint_threshold = NGE_TFINT_DEFAULT;
2154 2154 }
2155 2155
2156 2156 /*
2157 2157 * High-level cyclic handler
2158 2158 *
2159 2159 * This routine schedules a (low-level) softint callback to the
2160 2160 * factotum.
2161 2161 */
2162 2162
2163 2163 static void
2164 2164 nge_chip_cyclic(void *arg)
2165 2165 {
2166 2166 nge_t *ngep;
2167 2167
2168 2168 ngep = (nge_t *)arg;
2169 2169
2170 2170 switch (ngep->nge_chip_state) {
2171 2171 default:
2172 2172 return;
2173 2173
2174 2174 case NGE_CHIP_RUNNING:
2175 2175 nge_interrupt_optimize(ngep);
2176 2176 break;
2177 2177
2178 2178 case NGE_CHIP_FAULT:
2179 2179 case NGE_CHIP_ERROR:
2180 2180 break;
2181 2181 }
2182 2182
2183 2183 nge_wake_factotum(ngep);
2184 2184 }
2185 2185
2186 2186 /*
2187 2187 * Get/Release semaphore of SMU
2188 2188 * For SMU enabled chipset
2189 2189 * When nge driver is attached, driver should acquire
2190 2190 * semaphore before PHY init and accessing MAC registers.
2191 2191 * When nge driver is unattached, driver should release
2192 2192 * semaphore.
2193 2193 */
2194 2194
2195 2195 static int
2196 2196 nge_smu_sema(nge_t *ngep, boolean_t acquire)
2197 2197 {
2198 2198 nge_tx_en tx_en;
2199 2199 uint32_t tries;
2200 2200
2201 2201 if (acquire) {
2202 2202 for (tries = 0; tries < 5; tries++) {
2203 2203 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2204 2204 if (tx_en.bits.smu2mac == NGE_SMU_FREE)
2205 2205 break;
2206 2206 delay(drv_usectohz(1000000));
2207 2207 }
2208 2208 if (tx_en.bits.smu2mac != NGE_SMU_FREE)
2209 2209 return (DDI_FAILURE);
2210 2210 for (tries = 0; tries < 5; tries++) {
2211 2211 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2212 2212 tx_en.bits.mac2smu = NGE_SMU_GET;
2213 2213 nge_reg_put32(ngep, NGE_TX_EN, tx_en.val);
2214 2214 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2215 2215
2216 2216 if (tx_en.bits.mac2smu == NGE_SMU_GET &&
2217 2217 tx_en.bits.smu2mac == NGE_SMU_FREE)
2218 2218 return (DDI_SUCCESS);
2219 2219 drv_usecwait(10);
2220 2220 }
2221 2221 return (DDI_FAILURE);
2222 2222 } else
2223 2223 nge_reg_put32(ngep, NGE_TX_EN, 0x0);
2224 2224
2225 2225 return (DDI_SUCCESS);
2226 2226
2227 2227 }
2228 2228 static void
2229 2229 nge_unattach(nge_t *ngep)
2230 2230 {
2231 2231 send_ring_t *srp;
2232 2232 buff_ring_t *brp;
2233 2233
2234 2234 srp = ngep->send;
2235 2235 brp = ngep->buff;
2236 2236 NGE_TRACE(("nge_unattach($%p)", (void *)ngep));
2237 2237
2238 2238 /*
2239 2239 * Flag that no more activity may be initiated
2240 2240 */
2241 2241 ngep->progress &= ~PROGRESS_READY;
2242 2242 ngep->nge_mac_state = NGE_MAC_UNATTACH;
2243 2243
2244 2244 /*
2245 2245 * Quiesce the PHY and MAC (leave it reset but still powered).
2246 2246 * Clean up and free all NGE data structures
2247 2247 */
2248 2248 if (ngep->periodic_id != NULL) {
2249 2249 ddi_periodic_delete(ngep->periodic_id);
2250 2250 ngep->periodic_id = NULL;
2251 2251 }
2252 2252
2253 2253 if (ngep->progress & PROGRESS_KSTATS)
2254 2254 nge_fini_kstats(ngep);
2255 2255
2256 2256 if (ngep->progress & PROGRESS_HWINT) {
2257 2257 mutex_enter(ngep->genlock);
2258 2258 nge_restore_mac_addr(ngep);
2259 2259 (void) nge_chip_stop(ngep, B_FALSE);
2260 2260 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2261 2261 ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2262 2262 (void) nge_smu_sema(ngep, B_FALSE);
2263 2263 }
2264 2264 mutex_exit(ngep->genlock);
2265 2265 }
2266 2266
2267 2267 if (ngep->progress & PROGRESS_SWINT)
2268 2268 nge_rem_intrs(ngep);
2269 2269
2270 2270 if (ngep->progress & PROGRESS_FACTOTUM)
2271 2271 (void) ddi_intr_remove_softint(ngep->factotum_hdl);
2272 2272
2273 2273 if (ngep->progress & PROGRESS_RESCHED)
2274 2274 (void) ddi_intr_remove_softint(ngep->resched_hdl);
2275 2275
2276 2276 if (ngep->progress & PROGRESS_INTR) {
2277 2277 mutex_destroy(srp->tx_lock);
2278 2278 mutex_destroy(srp->tc_lock);
2279 2279 mutex_destroy(&srp->dmah_lock);
2280 2280 mutex_destroy(brp->recycle_lock);
2281 2281
2282 2282 mutex_destroy(ngep->genlock);
2283 2283 mutex_destroy(ngep->softlock);
2284 2284 rw_destroy(ngep->rwlock);
2285 2285 }
2286 2286
2287 2287 if (ngep->progress & PROGRESS_REGS)
2288 2288 ddi_regs_map_free(&ngep->io_handle);
2289 2289
2290 2290 if (ngep->progress & PROGRESS_CFG)
2291 2291 pci_config_teardown(&ngep->cfg_handle);
2292 2292
2293 2293 ddi_remove_minor_node(ngep->devinfo, NULL);
2294 2294
2295 2295 kmem_free(ngep, sizeof (*ngep));
2296 2296 }
2297 2297
2298 2298 static int
2299 2299 nge_resume(dev_info_t *devinfo)
2300 2300 {
2301 2301 nge_t *ngep;
2302 2302 chip_info_t *infop;
2303 2303 int err;
2304 2304
2305 2305 ASSERT(devinfo != NULL);
2306 2306
2307 2307 ngep = ddi_get_driver_private(devinfo);
2308 2308 err = 0;
2309 2309
2310 2310 /*
2311 2311 * If there are state inconsistancies, this is bad. Returning
2312 2312 * DDI_FAILURE here will eventually cause the machine to panic,
2313 2313 * so it is best done here so that there is a possibility of
2314 2314 * debugging the problem.
2315 2315 */
2316 2316 if (ngep == NULL)
2317 2317 cmn_err(CE_PANIC,
2318 2318 "nge: ngep returned from ddi_get_driver_private was NULL");
2319 2319 infop = (chip_info_t *)&ngep->chipinfo;
2320 2320
2321 2321 if (ngep->devinfo != devinfo)
2322 2322 cmn_err(CE_PANIC,
2323 2323 "nge: passed devinfo not the same as saved devinfo");
2324 2324
2325 2325 mutex_enter(ngep->genlock);
2326 2326 rw_enter(ngep->rwlock, RW_WRITER);
2327 2327
2328 2328 /*
2329 2329 * Fetch the config space. Even though we have most of it cached,
2330 2330 * some values *might* change across a suspend/resume.
2331 2331 */
2332 2332 nge_chip_cfg_init(ngep, infop, B_FALSE);
2333 2333
2334 2334 /*
2335 2335 * Only in one case, this conditional branch can be executed: the port
2336 2336 * hasn't been plumbed.
2337 2337 */
2338 2338 if (ngep->suspended == B_FALSE) {
2339 2339 rw_exit(ngep->rwlock);
2340 2340 mutex_exit(ngep->genlock);
2341 2341 return (DDI_SUCCESS);
2342 2342 }
2343 2343
2344 2344 nge_tx_recycle_all(ngep);
2345 2345 err = nge_reinit_ring(ngep);
2346 2346 if (!err) {
2347 2347 err = nge_chip_reset(ngep);
2348 2348 if (!err)
2349 2349 err = nge_chip_start(ngep);
2350 2350 }
2351 2351
2352 2352 if (err) {
2353 2353 /*
2354 2354 * We note the failure, but return success, as the
2355 2355 * system is still usable without this controller.
2356 2356 */
2357 2357 cmn_err(CE_WARN, "nge: resume: failed to restart controller");
2358 2358 } else {
2359 2359 ngep->nge_mac_state = NGE_MAC_STARTED;
2360 2360 }
2361 2361 ngep->suspended = B_FALSE;
2362 2362
2363 2363 rw_exit(ngep->rwlock);
2364 2364 mutex_exit(ngep->genlock);
2365 2365
2366 2366 return (DDI_SUCCESS);
2367 2367 }
2368 2368
2369 2369 /*
2370 2370 * attach(9E) -- Attach a device to the system
2371 2371 *
2372 2372 * Called once for each board successfully probed.
2373 2373 */
2374 2374 static int
2375 2375 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2376 2376 {
2377 2377 int err;
2378 2378 int i;
2379 2379 int instance;
2380 2380 caddr_t regs;
2381 2381 nge_t *ngep;
2382 2382 chip_info_t *infop;
2383 2383 mac_register_t *macp;
2384 2384
2385 2385 switch (cmd) {
2386 2386 default:
2387 2387 return (DDI_FAILURE);
2388 2388
2389 2389 case DDI_RESUME:
2390 2390 return (nge_resume(devinfo));
2391 2391
2392 2392 case DDI_ATTACH:
2393 2393 break;
2394 2394 }
2395 2395
2396 2396 ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP);
2397 2397 instance = ddi_get_instance(devinfo);
2398 2398 ddi_set_driver_private(devinfo, ngep);
2399 2399 ngep->devinfo = devinfo;
2400 2400
2401 2401 (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d",
2402 2402 NGE_DRIVER_NAME, instance);
2403 2403 err = pci_config_setup(devinfo, &ngep->cfg_handle);
2404 2404 if (err != DDI_SUCCESS) {
2405 2405 nge_problem(ngep, "nge_attach: pci_config_setup() failed");
2406 2406 goto attach_fail;
2407 2407 }
2408 2408 /*
2409 2409 * param_txbcopy_threshold and param_rxbcopy_threshold are tx/rx bcopy
2410 2410 * thresholds. Bounds: min 0, max NGE_MAX_SDU
2411 2411 */
2412 2412 ngep->param_txbcopy_threshold = NGE_TX_COPY_SIZE;
2413 2413 ngep->param_rxbcopy_threshold = NGE_RX_COPY_SIZE;
2414 2414
2415 2415 /*
2416 2416 * param_recv_max_packet is max packet received per interupt.
2417 2417 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024
2418 2418 */
2419 2419 ngep->param_recv_max_packet = 128;
2420 2420
2421 2421 /*
2422 2422 * param_poll_quiet_time and param_poll_busy_time are quiet/busy time
2423 2423 * switch from per packet interrupt to polling interrupt.
2424 2424 * Bounds: min 0, max 10000
2425 2425 */
2426 2426 ngep->param_poll_quiet_time = NGE_POLL_QUIET_TIME;
2427 2427 ngep->param_poll_busy_time = NGE_POLL_BUSY_TIME;
2428 2428 ngep->tfint_threshold = NGE_TFINT_DEFAULT;
2429 2429 ngep->poll = B_FALSE;
2430 2430 ngep->ch_intr_mode = B_FALSE;
2431 2431
2432 2432 /*
2433 2433 * param_rx_intr_hwater/param_rx_intr_lwater: ackets received
2434 2434 * to trigger the poll_quiet_time/poll_busy_time counter.
2435 2435 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024.
2436 2436 */
2437 2437 ngep->param_rx_intr_hwater = 1;
2438 2438 ngep->param_rx_intr_lwater = 8;
2439 2439
2440 2440
2441 2441 infop = (chip_info_t *)&ngep->chipinfo;
2442 2442 nge_chip_cfg_init(ngep, infop, B_FALSE);
2443 2443 nge_init_dev_spec_param(ngep);
2444 2444 nge_get_props(ngep);
2445 2445 ngep->progress |= PROGRESS_CFG;
2446 2446
2447 2447 err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER,
2448 2448 ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle);
2449 2449 if (err != DDI_SUCCESS) {
2450 2450 nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed");
2451 2451 goto attach_fail;
2452 2452 }
2453 2453 ngep->io_regs = regs;
2454 2454 ngep->progress |= PROGRESS_REGS;
2455 2455
2456 2456 err = nge_register_intrs_and_init_locks(ngep);
2457 2457 if (err != DDI_SUCCESS) {
2458 2458 nge_problem(ngep, "nge_attach:"
2459 2459 " register intrs and init locks failed");
2460 2460 goto attach_fail;
2461 2461 }
2462 2462 nge_init_ring_param_lock(ngep);
2463 2463 ngep->progress |= PROGRESS_INTR;
2464 2464
2465 2465 mutex_enter(ngep->genlock);
2466 2466
2467 2467 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2468 2468 ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2469 2469 err = nge_smu_sema(ngep, B_TRUE);
2470 2470 if (err != DDI_SUCCESS) {
2471 2471 nge_problem(ngep, "nge_attach: nge_smu_sema() failed");
2472 2472 goto attach_fail;
2473 2473 }
2474 2474 }
2475 2475 /*
2476 2476 * Initialise link state variables
2477 2477 * Stop, reset & reinitialise the chip.
2478 2478 * Initialise the (internal) PHY.
2479 2479 */
2480 2480 nge_phys_init(ngep);
2481 2481 ngep->nge_chip_state = NGE_CHIP_INITIAL;
2482 2482 err = nge_chip_reset(ngep);
2483 2483 if (err != DDI_SUCCESS) {
2484 2484 nge_problem(ngep, "nge_attach: nge_chip_reset() failed");
2485 2485 mutex_exit(ngep->genlock);
2486 2486 goto attach_fail;
2487 2487 }
2488 2488 nge_chip_sync(ngep);
2489 2489
2490 2490 /*
2491 2491 * Now that mutex locks are initialized, enable interrupts.
2492 2492 */
2493 2493 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2494 2494 /* Call ddi_intr_block_enable() for MSI interrupts */
2495 2495 (void) ddi_intr_block_enable(ngep->htable,
2496 2496 ngep->intr_actual_cnt);
2497 2497 } else {
2498 2498 /* Call ddi_intr_enable for MSI or FIXED interrupts */
2499 2499 for (i = 0; i < ngep->intr_actual_cnt; i++) {
2500 2500 (void) ddi_intr_enable(ngep->htable[i]);
2501 2501 }
2502 2502 }
2503 2503
2504 2504 ngep->link_state = LINK_STATE_UNKNOWN;
2505 2505 ngep->progress |= PROGRESS_HWINT;
2506 2506
2507 2507 /*
2508 2508 * Register NDD-tweakable parameters
2509 2509 */
2510 2510 if (nge_nd_init(ngep)) {
2511 2511 nge_problem(ngep, "nge_attach: nge_nd_init() failed");
2512 2512 mutex_exit(ngep->genlock);
2513 2513 goto attach_fail;
2514 2514 }
2515 2515 ngep->progress |= PROGRESS_NDD;
2516 2516
2517 2517 /*
2518 2518 * Create & initialise named kstats
2519 2519 */
2520 2520 nge_init_kstats(ngep, instance);
2521 2521 ngep->progress |= PROGRESS_KSTATS;
2522 2522
2523 2523 mutex_exit(ngep->genlock);
2524 2524
2525 2525 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2526 2526 goto attach_fail;
2527 2527 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2528 2528 macp->m_driver = ngep;
2529 2529 macp->m_dip = devinfo;
2530 2530 macp->m_src_addr = infop->vendor_addr.addr;
2531 2531 macp->m_callbacks = &nge_m_callbacks;
2532 2532 macp->m_min_sdu = 0;
2533 2533 macp->m_max_sdu = ngep->default_mtu;
2534 2534 macp->m_margin = VTAG_SIZE;
2535 2535 macp->m_priv_props = nge_priv_props;
2536 2536 /*
2537 2537 * Finally, we're ready to register ourselves with the mac
2538 2538 * interface; if this succeeds, we're all ready to start()
2539 2539 */
2540 2540 err = mac_register(macp, &ngep->mh);
2541 2541 mac_free(macp);
2542 2542 if (err != 0)
2543 2543 goto attach_fail;
2544 2544
2545 2545 /*
2546 2546 * Register a periodical handler.
2547 2547 * nge_chip_cyclic() is invoked in kernel context.
2548 2548 */
2549 2549 ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep,
2550 2550 NGE_CYCLIC_PERIOD, DDI_IPL_0);
2551 2551
2552 2552 ngep->progress |= PROGRESS_READY;
2553 2553 return (DDI_SUCCESS);
2554 2554
2555 2555 attach_fail:
2556 2556 nge_unattach(ngep);
2557 2557 return (DDI_FAILURE);
2558 2558 }
2559 2559
2560 2560 static int
2561 2561 nge_suspend(nge_t *ngep)
2562 2562 {
2563 2563 mutex_enter(ngep->genlock);
2564 2564 rw_enter(ngep->rwlock, RW_WRITER);
2565 2565
2566 2566 /* if the port hasn't been plumbed, just return */
2567 2567 if (ngep->nge_mac_state != NGE_MAC_STARTED) {
2568 2568 rw_exit(ngep->rwlock);
2569 2569 mutex_exit(ngep->genlock);
2570 2570 return (DDI_SUCCESS);
2571 2571 }
2572 2572 ngep->suspended = B_TRUE;
2573 2573 (void) nge_chip_stop(ngep, B_FALSE);
2574 2574 ngep->nge_mac_state = NGE_MAC_STOPPED;
2575 2575
2576 2576 rw_exit(ngep->rwlock);
2577 2577 mutex_exit(ngep->genlock);
2578 2578 return (DDI_SUCCESS);
2579 2579 }
2580 2580
2581 2581 /*
2582 2582 * detach(9E) -- Detach a device from the system
2583 2583 */
2584 2584 static int
2585 2585 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2586 2586 {
2587 2587 int i;
2588 2588 nge_t *ngep;
2589 2589 mul_item *p, *nextp;
2590 2590 buff_ring_t *brp;
2591 2591
2592 2592 NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd));
2593 2593
2594 2594 ngep = ddi_get_driver_private(devinfo);
2595 2595 brp = ngep->buff;
2596 2596
2597 2597 switch (cmd) {
2598 2598 default:
2599 2599 return (DDI_FAILURE);
2600 2600
2601 2601 case DDI_SUSPEND:
2602 2602 /*
2603 2603 * Stop the NIC
2604 2604 * Note: This driver doesn't currently support WOL, but
2605 2605 * should it in the future, it is important to
2606 2606 * make sure the PHY remains powered so that the
2607 2607 * wakeup packet can actually be recieved.
2608 2608 */
2609 2609 return (nge_suspend(ngep));
2610 2610
2611 2611 case DDI_DETACH:
2612 2612 break;
2613 2613 }
2614 2614
2615 2615 /* Try to wait all the buffer post to upper layer be released */
2616 2616 for (i = 0; i < 1000; i++) {
2617 2617 if (brp->rx_hold == 0)
2618 2618 break;
2619 2619 drv_usecwait(1000);
2620 2620 }
2621 2621
2622 2622 /* If there is any posted buffer, reject to detach */
2623 2623 if (brp->rx_hold != 0)
2624 2624 return (DDI_FAILURE);
2625 2625
2626 2626 /*
2627 2627 * Unregister from the GLD subsystem. This can fail, in
2628 2628 * particular if there are DLPI style-2 streams still open -
2629 2629 * in which case we just return failure without shutting
2630 2630 * down chip operations.
2631 2631 */
2632 2632 if (mac_unregister(ngep->mh) != DDI_SUCCESS)
2633 2633 return (DDI_FAILURE);
2634 2634
2635 2635 /*
2636 2636 * Recycle the multicast table. mac_unregister() should be called
2637 2637 * before it to ensure the multicast table can be used even if
2638 2638 * mac_unregister() fails.
2639 2639 */
2640 2640 for (p = ngep->pcur_mulist; p != NULL; p = nextp) {
2641 2641 nextp = p->next;
2642 2642 kmem_free(p, sizeof (mul_item));
2643 2643 }
2644 2644 ngep->pcur_mulist = NULL;
2645 2645
2646 2646 /*
2647 2647 * All activity stopped, so we can clean up & exit
2648 2648 */
2649 2649 nge_unattach(ngep);
2650 2650 return (DDI_SUCCESS);
2651 2651 }
2652 2652
2653 2653 /*
2654 2654 * quiesce(9E) entry point.
2655 2655 *
2656 2656 * This function is called when the system is single-threaded at high
2657 2657 * PIL with preemption disabled. Therefore, this function must not be
2658 2658 * blocked.
2659 2659 *
2660 2660 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2661 2661 * DDI_FAILURE indicates an error condition and should almost never happen.
2662 2662 */
2663 2663 static int
2664 2664 nge_quiesce(dev_info_t *devinfo)
2665 2665 {
2666 2666 nge_t *ngep;
2667 2667
2668 2668 ngep = ddi_get_driver_private(devinfo);
2669 2669
2670 2670 if (ngep == NULL)
2671 2671 return (DDI_FAILURE);
2672 2672
2673 2673 /*
2674 2674 * Turn off debug tracing
2675 2675 */
2676 2676 nge_debug = 0;
2677 2677 ngep->debug = 0;
2678 2678
2679 2679 nge_restore_mac_addr(ngep);
2680 2680 (void) nge_chip_stop(ngep, B_FALSE);
2681 2681
2682 2682 return (DDI_SUCCESS);
2683 2683 }
2684 2684
2685 2685
2686 2686
2687 2687 /*
2688 2688 * ========== Module Loading Data & Entry Points ==========
2689 2689 */
2690 2690
2691 2691 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach,
↓ open down ↓ |
2691 lines elided |
↑ open up ↑ |
2692 2692 NULL, NULL, D_MP, NULL, nge_quiesce);
2693 2693
2694 2694
2695 2695 static struct modldrv nge_modldrv = {
2696 2696 &mod_driverops, /* Type of module. This one is a driver */
2697 2697 nge_ident, /* short description */
2698 2698 &nge_dev_ops /* driver specific ops */
2699 2699 };
2700 2700
2701 2701 static struct modlinkage modlinkage = {
2702 - MODREV_1, (void *)&nge_modldrv, NULL
2702 + MODREV_1, { (void *)&nge_modldrv, NULL }
2703 2703 };
2704 2704
2705 2705
2706 2706 int
2707 2707 _info(struct modinfo *modinfop)
2708 2708 {
2709 2709 return (mod_info(&modlinkage, modinfop));
2710 2710 }
2711 2711
2712 2712 int
2713 2713 _init(void)
2714 2714 {
2715 2715 int status;
2716 2716
2717 2717 mac_init_ops(&nge_dev_ops, "nge");
2718 2718 status = mod_install(&modlinkage);
2719 2719 if (status != DDI_SUCCESS)
2720 2720 mac_fini_ops(&nge_dev_ops);
2721 2721 else
2722 2722 mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL);
2723 2723
2724 2724 return (status);
2725 2725 }
2726 2726
2727 2727 int
2728 2728 _fini(void)
2729 2729 {
2730 2730 int status;
2731 2731
2732 2732 status = mod_remove(&modlinkage);
2733 2733 if (status == DDI_SUCCESS) {
2734 2734 mac_fini_ops(&nge_dev_ops);
2735 2735 mutex_destroy(nge_log_mutex);
2736 2736 }
2737 2737
2738 2738 return (status);
2739 2739 }
2740 2740
2741 2741 /*
2742 2742 * ============ Init MSI/Fixed/SoftInterrupt routines ==============
2743 2743 */
2744 2744
2745 2745 /*
2746 2746 * Register interrupts and initialize each mutex and condition variables
2747 2747 */
2748 2748
2749 2749 static int
2750 2750 nge_register_intrs_and_init_locks(nge_t *ngep)
2751 2751 {
2752 2752 int err;
2753 2753 int intr_types;
2754 2754 uint_t soft_prip;
2755 2755 nge_msi_mask msi_mask;
2756 2756 nge_msi_map0_vec map0_vec;
2757 2757 nge_msi_map1_vec map1_vec;
2758 2758
2759 2759 /*
2760 2760 * Add the softint handlers:
2761 2761 *
2762 2762 * Both of these handlers are used to avoid restrictions on the
2763 2763 * context and/or mutexes required for some operations. In
2764 2764 * particular, the hardware interrupt handler and its subfunctions
2765 2765 * can detect a number of conditions that we don't want to handle
2766 2766 * in that context or with that set of mutexes held. So, these
2767 2767 * softints are triggered instead:
2768 2768 *
2769 2769 * the <resched> softint is triggered if if we have previously
2770 2770 * had to refuse to send a packet because of resource shortage
2771 2771 * (we've run out of transmit buffers), but the send completion
2772 2772 * interrupt handler has now detected that more buffers have
2773 2773 * become available. Its only purpose is to call gld_sched()
2774 2774 * to retry the pending transmits (we're not allowed to hold
2775 2775 * driver-defined mutexes across gld_sched()).
2776 2776 *
2777 2777 * the <factotum> is triggered if the h/w interrupt handler
2778 2778 * sees the <link state changed> or <error> bits in the status
2779 2779 * block. It's also triggered periodically to poll the link
2780 2780 * state, just in case we aren't getting link status change
2781 2781 * interrupts ...
2782 2782 */
2783 2783 err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl,
2784 2784 DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep);
2785 2785 if (err != DDI_SUCCESS) {
2786 2786 nge_problem(ngep,
2787 2787 "nge_attach: add nge_reschedule softintr failed");
2788 2788
2789 2789 return (DDI_FAILURE);
2790 2790 }
2791 2791 ngep->progress |= PROGRESS_RESCHED;
2792 2792 err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl,
2793 2793 DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep);
2794 2794 if (err != DDI_SUCCESS) {
2795 2795 nge_problem(ngep,
2796 2796 "nge_attach: add nge_chip_factotum softintr failed!");
2797 2797
2798 2798 return (DDI_FAILURE);
2799 2799 }
2800 2800 if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip)
2801 2801 != DDI_SUCCESS) {
2802 2802 nge_problem(ngep, "nge_attach: get softintr priority failed\n");
2803 2803
2804 2804 return (DDI_FAILURE);
2805 2805 }
2806 2806 ngep->soft_pri = soft_prip;
2807 2807
2808 2808 ngep->progress |= PROGRESS_FACTOTUM;
2809 2809 /* Get supported interrupt types */
2810 2810 if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types)
2811 2811 != DDI_SUCCESS) {
2812 2812 nge_error(ngep, "ddi_intr_get_supported_types failed\n");
2813 2813
2814 2814 return (DDI_FAILURE);
2815 2815 }
2816 2816
2817 2817 NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x",
2818 2818 intr_types));
2819 2819
2820 2820 if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) {
2821 2821
2822 2822 /* MSI Configurations for mcp55 chipset */
2823 2823 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2824 2824 ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2825 2825
2826 2826
2827 2827 /* Enable the 8 vectors */
2828 2828 msi_mask.msi_mask_val =
2829 2829 nge_reg_get32(ngep, NGE_MSI_MASK);
2830 2830 msi_mask.msi_msk_bits.vec0 = NGE_SET;
2831 2831 msi_mask.msi_msk_bits.vec1 = NGE_SET;
2832 2832 msi_mask.msi_msk_bits.vec2 = NGE_SET;
2833 2833 msi_mask.msi_msk_bits.vec3 = NGE_SET;
2834 2834 msi_mask.msi_msk_bits.vec4 = NGE_SET;
2835 2835 msi_mask.msi_msk_bits.vec5 = NGE_SET;
2836 2836 msi_mask.msi_msk_bits.vec6 = NGE_SET;
2837 2837 msi_mask.msi_msk_bits.vec7 = NGE_SET;
2838 2838 nge_reg_put32(ngep, NGE_MSI_MASK,
2839 2839 msi_mask.msi_mask_val);
2840 2840
2841 2841 /*
2842 2842 * Remapping the MSI MAP0 and MAP1. MCP55
2843 2843 * is default mapping all the interrupt to 0 vector.
2844 2844 * Software needs to remapping this.
2845 2845 * This mapping is same as CK804.
2846 2846 */
2847 2847 map0_vec.msi_map0_val =
2848 2848 nge_reg_get32(ngep, NGE_MSI_MAP0);
2849 2849 map1_vec.msi_map1_val =
2850 2850 nge_reg_get32(ngep, NGE_MSI_MAP1);
2851 2851 map0_vec.vecs_bits.reint_vec = 0;
2852 2852 map0_vec.vecs_bits.rcint_vec = 0;
2853 2853 map0_vec.vecs_bits.miss_vec = 3;
2854 2854 map0_vec.vecs_bits.teint_vec = 5;
2855 2855 map0_vec.vecs_bits.tcint_vec = 5;
2856 2856 map0_vec.vecs_bits.stint_vec = 2;
2857 2857 map0_vec.vecs_bits.mint_vec = 6;
2858 2858 map0_vec.vecs_bits.rfint_vec = 0;
2859 2859 map1_vec.vecs_bits.tfint_vec = 5;
2860 2860 map1_vec.vecs_bits.feint_vec = 6;
2861 2861 map1_vec.vecs_bits.resv8_11 = 3;
2862 2862 map1_vec.vecs_bits.resv12_15 = 1;
2863 2863 map1_vec.vecs_bits.resv16_19 = 0;
2864 2864 map1_vec.vecs_bits.resv20_23 = 7;
2865 2865 map1_vec.vecs_bits.resv24_31 = 0xff;
2866 2866 nge_reg_put32(ngep, NGE_MSI_MAP0,
2867 2867 map0_vec.msi_map0_val);
2868 2868 nge_reg_put32(ngep, NGE_MSI_MAP1,
2869 2869 map1_vec.msi_map1_val);
2870 2870 }
2871 2871 if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
2872 2872 NGE_DEBUG(("MSI registration failed, "
2873 2873 "trying FIXED interrupt type\n"));
2874 2874 } else {
2875 2875 nge_log(ngep, "Using MSI interrupt type\n");
2876 2876
2877 2877 ngep->intr_type = DDI_INTR_TYPE_MSI;
2878 2878 ngep->progress |= PROGRESS_SWINT;
2879 2879 }
2880 2880 }
2881 2881
2882 2882 if (!(ngep->progress & PROGRESS_SWINT) &&
2883 2883 (intr_types & DDI_INTR_TYPE_FIXED)) {
2884 2884 if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
2885 2885 nge_error(ngep, "FIXED interrupt "
2886 2886 "registration failed\n");
2887 2887
2888 2888 return (DDI_FAILURE);
2889 2889 }
2890 2890
2891 2891 nge_log(ngep, "Using FIXED interrupt type\n");
2892 2892
2893 2893 ngep->intr_type = DDI_INTR_TYPE_FIXED;
2894 2894 ngep->progress |= PROGRESS_SWINT;
2895 2895 }
2896 2896
2897 2897
2898 2898 if (!(ngep->progress & PROGRESS_SWINT)) {
2899 2899 nge_error(ngep, "No interrupts registered\n");
2900 2900
2901 2901 return (DDI_FAILURE);
2902 2902 }
2903 2903 mutex_init(ngep->genlock, NULL, MUTEX_DRIVER,
2904 2904 DDI_INTR_PRI(ngep->intr_pri));
2905 2905 mutex_init(ngep->softlock, NULL, MUTEX_DRIVER,
2906 2906 DDI_INTR_PRI(ngep->soft_pri));
2907 2907 rw_init(ngep->rwlock, NULL, RW_DRIVER,
2908 2908 DDI_INTR_PRI(ngep->intr_pri));
2909 2909
2910 2910 return (DDI_SUCCESS);
2911 2911 }
2912 2912
2913 2913 /*
2914 2914 * nge_add_intrs:
2915 2915 *
2916 2916 * Register FIXED or MSI interrupts.
2917 2917 */
2918 2918 static int
2919 2919 nge_add_intrs(nge_t *ngep, int intr_type)
2920 2920 {
2921 2921 dev_info_t *dip = ngep->devinfo;
2922 2922 int avail, actual, intr_size, count = 0;
2923 2923 int i, flag, ret;
2924 2924
2925 2925 NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type));
2926 2926
2927 2927 /* Get number of interrupts */
2928 2928 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
2929 2929 if ((ret != DDI_SUCCESS) || (count == 0)) {
2930 2930 nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, "
2931 2931 "count: %d", ret, count);
2932 2932
2933 2933 return (DDI_FAILURE);
2934 2934 }
2935 2935
2936 2936 /* Get number of available interrupts */
2937 2937 ret = ddi_intr_get_navail(dip, intr_type, &avail);
2938 2938 if ((ret != DDI_SUCCESS) || (avail == 0)) {
2939 2939 nge_error(ngep, "ddi_intr_get_navail() failure, "
2940 2940 "ret: %d, avail: %d\n", ret, avail);
2941 2941
2942 2942 return (DDI_FAILURE);
2943 2943 }
2944 2944
2945 2945 if (avail < count) {
2946 2946 NGE_DEBUG(("nitrs() returned %d, navail returned %d\n",
2947 2947 count, avail));
2948 2948 }
2949 2949 flag = DDI_INTR_ALLOC_NORMAL;
2950 2950
2951 2951 /* Allocate an array of interrupt handles */
2952 2952 intr_size = count * sizeof (ddi_intr_handle_t);
2953 2953 ngep->htable = kmem_alloc(intr_size, KM_SLEEP);
2954 2954
2955 2955 /* Call ddi_intr_alloc() */
2956 2956 ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0,
2957 2957 count, &actual, flag);
2958 2958
2959 2959 if ((ret != DDI_SUCCESS) || (actual == 0)) {
2960 2960 nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret);
2961 2961
2962 2962 kmem_free(ngep->htable, intr_size);
2963 2963 return (DDI_FAILURE);
2964 2964 }
2965 2965
2966 2966 if (actual < count) {
2967 2967 NGE_DEBUG(("Requested: %d, Received: %d\n",
2968 2968 count, actual));
2969 2969 }
2970 2970
2971 2971 ngep->intr_actual_cnt = actual;
2972 2972 ngep->intr_req_cnt = count;
2973 2973
2974 2974 /*
2975 2975 * Get priority for first msi, assume remaining are all the same
2976 2976 */
2977 2977 if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) !=
2978 2978 DDI_SUCCESS) {
2979 2979 nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret);
2980 2980
2981 2981 /* Free already allocated intr */
2982 2982 for (i = 0; i < actual; i++) {
2983 2983 (void) ddi_intr_free(ngep->htable[i]);
2984 2984 }
2985 2985
2986 2986 kmem_free(ngep->htable, intr_size);
2987 2987
2988 2988 return (DDI_FAILURE);
2989 2989 }
2990 2990 /* Test for high level mutex */
2991 2991 if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) {
2992 2992 nge_error(ngep, "nge_add_intrs:"
2993 2993 "Hi level interrupt not supported");
2994 2994
2995 2995 for (i = 0; i < actual; i++)
2996 2996 (void) ddi_intr_free(ngep->htable[i]);
2997 2997
2998 2998 kmem_free(ngep->htable, intr_size);
2999 2999
3000 3000 return (DDI_FAILURE);
3001 3001 }
3002 3002
3003 3003
3004 3004 /* Call ddi_intr_add_handler() */
3005 3005 for (i = 0; i < actual; i++) {
3006 3006 if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr,
3007 3007 (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
3008 3008 nge_error(ngep, "ddi_intr_add_handler() "
3009 3009 "failed %d\n", ret);
3010 3010
3011 3011 /* Free already allocated intr */
3012 3012 for (i = 0; i < actual; i++) {
3013 3013 (void) ddi_intr_free(ngep->htable[i]);
3014 3014 }
3015 3015
3016 3016 kmem_free(ngep->htable, intr_size);
3017 3017
3018 3018 return (DDI_FAILURE);
3019 3019 }
3020 3020 }
3021 3021
3022 3022 if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap))
3023 3023 != DDI_SUCCESS) {
3024 3024 nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret);
3025 3025
3026 3026 for (i = 0; i < actual; i++) {
3027 3027 (void) ddi_intr_remove_handler(ngep->htable[i]);
3028 3028 (void) ddi_intr_free(ngep->htable[i]);
3029 3029 }
3030 3030
3031 3031 kmem_free(ngep->htable, intr_size);
3032 3032
3033 3033 return (DDI_FAILURE);
3034 3034 }
3035 3035
3036 3036 return (DDI_SUCCESS);
3037 3037 }
3038 3038
3039 3039 /*
3040 3040 * nge_rem_intrs:
3041 3041 *
3042 3042 * Unregister FIXED or MSI interrupts
3043 3043 */
3044 3044 static void
3045 3045 nge_rem_intrs(nge_t *ngep)
3046 3046 {
3047 3047 int i;
3048 3048
3049 3049 NGE_DEBUG(("nge_rem_intrs\n"));
3050 3050
3051 3051 /* Disable all interrupts */
3052 3052 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
3053 3053 /* Call ddi_intr_block_disable() */
3054 3054 (void) ddi_intr_block_disable(ngep->htable,
3055 3055 ngep->intr_actual_cnt);
3056 3056 } else {
3057 3057 for (i = 0; i < ngep->intr_actual_cnt; i++) {
3058 3058 (void) ddi_intr_disable(ngep->htable[i]);
3059 3059 }
3060 3060 }
3061 3061
3062 3062 /* Call ddi_intr_remove_handler() */
3063 3063 for (i = 0; i < ngep->intr_actual_cnt; i++) {
3064 3064 (void) ddi_intr_remove_handler(ngep->htable[i]);
3065 3065 (void) ddi_intr_free(ngep->htable[i]);
3066 3066 }
3067 3067
3068 3068 kmem_free(ngep->htable,
3069 3069 ngep->intr_req_cnt * sizeof (ddi_intr_handle_t));
3070 3070 }
↓ open down ↓ |
358 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX