Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/rge/rge_main.c
+++ new/usr/src/uts/common/io/rge/rge_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include "rge.h"
27 27
28 28 /*
29 29 * This is the string displayed by modinfo, etc.
30 30 * Make sure you keep the version ID up to date!
31 31 */
32 32 static char rge_ident[] = "Realtek 1Gb Ethernet";
33 33
34 34 /*
35 35 * Used for buffers allocated by ddi_dma_mem_alloc()
36 36 */
37 37 static ddi_dma_attr_t dma_attr_buf = {
38 38 DMA_ATTR_V0, /* dma_attr version */
39 39 (uint32_t)0, /* dma_attr_addr_lo */
40 40 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
41 41 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */
42 42 (uint32_t)16, /* dma_attr_align */
43 43 0xFFFFFFFF, /* dma_attr_burstsizes */
44 44 1, /* dma_attr_minxfer */
45 45 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
46 46 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
47 47 1, /* dma_attr_sgllen */
48 48 1, /* dma_attr_granular */
49 49 0, /* dma_attr_flags */
50 50 };
51 51
52 52 /*
53 53 * Used for BDs allocated by ddi_dma_mem_alloc()
54 54 */
55 55 static ddi_dma_attr_t dma_attr_desc = {
56 56 DMA_ATTR_V0, /* dma_attr version */
57 57 (uint32_t)0, /* dma_attr_addr_lo */
58 58 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
59 59 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */
60 60 (uint32_t)256, /* dma_attr_align */
61 61 0xFFFFFFFF, /* dma_attr_burstsizes */
62 62 1, /* dma_attr_minxfer */
63 63 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
64 64 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
65 65 1, /* dma_attr_sgllen */
66 66 1, /* dma_attr_granular */
67 67 0, /* dma_attr_flags */
68 68 };
69 69
70 70 /*
71 71 * PIO access attributes for registers
72 72 */
73 73 static ddi_device_acc_attr_t rge_reg_accattr = {
74 74 DDI_DEVICE_ATTR_V0,
75 75 DDI_STRUCTURE_LE_ACC,
76 76 DDI_STRICTORDER_ACC,
77 77 DDI_DEFAULT_ACC
78 78 };
79 79
80 80 /*
81 81 * DMA access attributes for descriptors
82 82 */
83 83 static ddi_device_acc_attr_t rge_desc_accattr = {
84 84 DDI_DEVICE_ATTR_V0,
85 85 DDI_NEVERSWAP_ACC,
86 86 DDI_STRICTORDER_ACC,
87 87 DDI_DEFAULT_ACC
88 88 };
89 89
90 90 /*
91 91 * DMA access attributes for data
92 92 */
93 93 static ddi_device_acc_attr_t rge_buf_accattr = {
94 94 DDI_DEVICE_ATTR_V0,
95 95 DDI_NEVERSWAP_ACC,
96 96 DDI_STRICTORDER_ACC,
97 97 DDI_DEFAULT_ACC
98 98 };
99 99
100 100 /*
101 101 * Property names
102 102 */
103 103 static char debug_propname[] = "rge_debug_flags";
104 104 static char mtu_propname[] = "default_mtu";
105 105 static char msi_propname[] = "msi_enable";
106 106
107 107 static int rge_m_start(void *);
108 108 static void rge_m_stop(void *);
109 109 static int rge_m_promisc(void *, boolean_t);
110 110 static int rge_m_multicst(void *, boolean_t, const uint8_t *);
111 111 static int rge_m_unicst(void *, const uint8_t *);
112 112 static void rge_m_ioctl(void *, queue_t *, mblk_t *);
113 113 static boolean_t rge_m_getcapab(void *, mac_capab_t, void *);
114 114
115 115 #define RGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
116 116
117 117 static mac_callbacks_t rge_m_callbacks = {
118 118 RGE_M_CALLBACK_FLAGS,
119 119 rge_m_stat,
120 120 rge_m_start,
121 121 rge_m_stop,
122 122 rge_m_promisc,
123 123 rge_m_multicst,
124 124 rge_m_unicst,
125 125 rge_m_tx,
126 126 NULL,
127 127 rge_m_ioctl,
128 128 rge_m_getcapab
129 129 };
130 130
131 131 /*
132 132 * Allocate an area of memory and a DMA handle for accessing it
133 133 */
134 134 static int
135 135 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
136 136 ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p)
137 137 {
138 138 caddr_t vaddr;
139 139 int err;
140 140
141 141 /*
142 142 * Allocate handle
143 143 */
144 144 err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
145 145 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
146 146 if (err != DDI_SUCCESS) {
147 147 dma_p->dma_hdl = NULL;
148 148 return (DDI_FAILURE);
149 149 }
150 150
151 151 /*
152 152 * Allocate memory
153 153 */
154 154 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
155 155 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
156 156 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
157 157 if (err != DDI_SUCCESS) {
158 158 ddi_dma_free_handle(&dma_p->dma_hdl);
159 159 dma_p->dma_hdl = NULL;
160 160 dma_p->acc_hdl = NULL;
161 161 return (DDI_FAILURE);
162 162 }
163 163
164 164 /*
165 165 * Bind the two together
166 166 */
167 167 dma_p->mem_va = vaddr;
168 168 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
169 169 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
170 170 &dma_p->cookie, &dma_p->ncookies);
171 171 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) {
172 172 ddi_dma_mem_free(&dma_p->acc_hdl);
173 173 ddi_dma_free_handle(&dma_p->dma_hdl);
174 174 dma_p->acc_hdl = NULL;
175 175 dma_p->dma_hdl = NULL;
176 176 return (DDI_FAILURE);
177 177 }
178 178
179 179 dma_p->nslots = ~0U;
180 180 dma_p->size = ~0U;
181 181 dma_p->token = ~0U;
182 182 dma_p->offset = 0;
183 183 return (DDI_SUCCESS);
184 184 }
185 185
186 186 /*
187 187 * Free one allocated area of DMAable memory
188 188 */
189 189 static void
190 190 rge_free_dma_mem(dma_area_t *dma_p)
191 191 {
192 192 if (dma_p->dma_hdl != NULL) {
193 193 if (dma_p->ncookies) {
194 194 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
195 195 dma_p->ncookies = 0;
196 196 }
197 197 ddi_dma_free_handle(&dma_p->dma_hdl);
198 198 dma_p->dma_hdl = NULL;
199 199 }
200 200
201 201 if (dma_p->acc_hdl != NULL) {
202 202 ddi_dma_mem_free(&dma_p->acc_hdl);
203 203 dma_p->acc_hdl = NULL;
204 204 }
205 205 }
206 206
207 207 /*
208 208 * Utility routine to carve a slice off a chunk of allocated memory,
209 209 * updating the chunk descriptor accordingly. The size of the slice
210 210 * is given by the product of the <qty> and <size> parameters.
211 211 */
212 212 static void
213 213 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
214 214 uint32_t qty, uint32_t size)
215 215 {
216 216 static uint32_t sequence = 0xbcd5704a;
217 217 size_t totsize;
218 218
219 219 totsize = qty*size;
220 220 ASSERT(totsize <= chunk->alength);
221 221
222 222 *slice = *chunk;
223 223 slice->nslots = qty;
224 224 slice->size = size;
225 225 slice->alength = totsize;
226 226 slice->token = ++sequence;
227 227
228 228 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
229 229 chunk->alength -= totsize;
230 230 chunk->offset += totsize;
231 231 chunk->cookie.dmac_laddress += totsize;
232 232 chunk->cookie.dmac_size -= totsize;
233 233 }
234 234
235 235 static int
236 236 rge_alloc_bufs(rge_t *rgep)
237 237 {
238 238 size_t txdescsize;
239 239 size_t rxdescsize;
240 240 int err;
241 241
242 242 /*
243 243 * Allocate memory & handle for packet statistics
244 244 */
245 245 err = rge_alloc_dma_mem(rgep,
246 246 RGE_STATS_DUMP_SIZE,
247 247 &dma_attr_desc,
248 248 &rge_desc_accattr,
249 249 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
250 250 &rgep->dma_area_stats);
251 251 if (err != DDI_SUCCESS)
252 252 return (DDI_FAILURE);
253 253 rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats);
254 254
255 255 /*
256 256 * Allocate memory & handle for Tx descriptor ring
257 257 */
258 258 txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t);
259 259 err = rge_alloc_dma_mem(rgep,
260 260 txdescsize,
261 261 &dma_attr_desc,
262 262 &rge_desc_accattr,
263 263 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
264 264 &rgep->dma_area_txdesc);
265 265 if (err != DDI_SUCCESS)
266 266 return (DDI_FAILURE);
267 267
268 268 /*
269 269 * Allocate memory & handle for Rx descriptor ring
270 270 */
271 271 rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t);
272 272 err = rge_alloc_dma_mem(rgep,
273 273 rxdescsize,
274 274 &dma_attr_desc,
275 275 &rge_desc_accattr,
276 276 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
277 277 &rgep->dma_area_rxdesc);
278 278 if (err != DDI_SUCCESS)
279 279 return (DDI_FAILURE);
280 280
281 281 return (DDI_SUCCESS);
282 282 }
283 283
284 284 /*
285 285 * rge_free_bufs() -- free descriptors/buffers allocated for this
286 286 * device instance.
287 287 */
288 288 static void
289 289 rge_free_bufs(rge_t *rgep)
290 290 {
291 291 rge_free_dma_mem(&rgep->dma_area_stats);
292 292 rge_free_dma_mem(&rgep->dma_area_txdesc);
293 293 rge_free_dma_mem(&rgep->dma_area_rxdesc);
294 294 }
295 295
296 296 /*
297 297 * ========== Transmit and receive ring reinitialisation ==========
298 298 */
299 299
300 300 /*
301 301 * These <reinit> routines each reset the rx/tx rings to an initial
302 302 * state, assuming that the corresponding <init> routine has already
303 303 * been called exactly once.
304 304 */
305 305 static void
306 306 rge_reinit_send_ring(rge_t *rgep)
307 307 {
308 308 sw_sbd_t *ssbdp;
309 309 rge_bd_t *bdp;
310 310 uint32_t slot;
311 311
312 312 /*
313 313 * re-init send ring
314 314 */
315 315 DMA_ZERO(rgep->tx_desc);
316 316 ssbdp = rgep->sw_sbds;
317 317 bdp = rgep->tx_ring;
318 318 for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
319 319 bdp->host_buf_addr =
320 320 RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress);
321 321 bdp->host_buf_addr_hi =
322 322 RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32);
323 323 /* last BD in Tx ring */
324 324 if (slot == (RGE_SEND_SLOTS - 1))
325 325 bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR);
326 326 ssbdp++;
327 327 bdp++;
328 328 }
329 329 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
330 330 rgep->tx_next = 0;
331 331 rgep->tc_next = 0;
332 332 rgep->tc_tail = 0;
333 333 rgep->tx_flow = 0;
334 334 rgep->tx_free = RGE_SEND_SLOTS;
335 335 }
336 336
337 337 static void
338 338 rge_reinit_recv_ring(rge_t *rgep)
339 339 {
340 340 rge_bd_t *bdp;
341 341 sw_rbd_t *srbdp;
342 342 dma_area_t *pbuf;
343 343 uint32_t slot;
344 344
345 345 /*
346 346 * re-init receive ring
347 347 */
348 348 DMA_ZERO(rgep->rx_desc);
349 349 srbdp = rgep->sw_rbds;
350 350 bdp = rgep->rx_ring;
351 351 for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
352 352 pbuf = &srbdp->rx_buf->pbuf;
353 353 bdp->host_buf_addr =
354 354 RGE_BSWAP_32(pbuf->cookie.dmac_laddress + rgep->head_room);
355 355 bdp->host_buf_addr_hi =
356 356 RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
357 357 bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
358 358 (rgep->rxbuf_size - rgep->head_room));
359 359 /* last BD in Tx ring */
360 360 if (slot == (RGE_RECV_SLOTS - 1))
361 361 bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
362 362 srbdp++;
363 363 bdp++;
364 364 }
365 365 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
366 366 rgep->watchdog = 0;
367 367 rgep->rx_next = 0;
368 368 }
369 369
370 370 static void
371 371 rge_reinit_buf_ring(rge_t *rgep)
372 372 {
373 373
374 374 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
375 375 return;
376 376
377 377 /*
378 378 * If all the up-sending buffers haven't been returned to driver,
379 379 * use bcopy() only in rx process.
380 380 */
381 381 if (rgep->rx_free != RGE_BUF_SLOTS)
382 382 rgep->rx_bcopy = B_TRUE;
383 383 }
384 384
385 385 static void
386 386 rge_reinit_rings(rge_t *rgep)
387 387 {
388 388 rge_reinit_send_ring(rgep);
389 389 rge_reinit_recv_ring(rgep);
390 390 rge_reinit_buf_ring(rgep);
391 391 }
392 392
393 393 static void
394 394 rge_fini_send_ring(rge_t *rgep)
395 395 {
396 396 sw_sbd_t *ssbdp;
397 397 uint32_t slot;
398 398
399 399 ssbdp = rgep->sw_sbds;
400 400 for (slot = 0; slot < RGE_SEND_SLOTS; ++slot) {
401 401 rge_free_dma_mem(&ssbdp->pbuf);
402 402 ssbdp++;
403 403 }
404 404
405 405 kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t));
406 406 rgep->sw_sbds = NULL;
407 407 }
408 408
409 409 static void
410 410 rge_fini_recv_ring(rge_t *rgep)
411 411 {
412 412 sw_rbd_t *srbdp;
413 413 uint32_t slot;
414 414
415 415 srbdp = rgep->sw_rbds;
416 416 for (slot = 0; slot < RGE_RECV_SLOTS; ++srbdp, ++slot) {
417 417 if (srbdp->rx_buf) {
418 418 if (srbdp->rx_buf->mp != NULL) {
419 419 freemsg(srbdp->rx_buf->mp);
420 420 srbdp->rx_buf->mp = NULL;
421 421 }
422 422 rge_free_dma_mem(&srbdp->rx_buf->pbuf);
423 423 kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
424 424 srbdp->rx_buf = NULL;
425 425 }
426 426 }
427 427
428 428 kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t));
429 429 rgep->sw_rbds = NULL;
430 430 }
431 431
432 432 static void
433 433 rge_fini_buf_ring(rge_t *rgep)
434 434 {
435 435 sw_rbd_t *srbdp;
436 436 uint32_t slot;
437 437
438 438 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
439 439 return;
440 440
441 441 ASSERT(rgep->rx_free == RGE_BUF_SLOTS);
442 442
443 443 srbdp = rgep->free_srbds;
444 444 for (slot = 0; slot < RGE_BUF_SLOTS; ++srbdp, ++slot) {
445 445 if (srbdp->rx_buf != NULL) {
446 446 if (srbdp->rx_buf->mp != NULL) {
447 447 freemsg(srbdp->rx_buf->mp);
448 448 srbdp->rx_buf->mp = NULL;
449 449 }
450 450 rge_free_dma_mem(&srbdp->rx_buf->pbuf);
451 451 kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
452 452 srbdp->rx_buf = NULL;
453 453 }
454 454 }
455 455
456 456 kmem_free(rgep->free_srbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t));
457 457 rgep->free_srbds = NULL;
458 458 }
459 459
460 460 static void
461 461 rge_fini_rings(rge_t *rgep)
462 462 {
463 463 rge_fini_send_ring(rgep);
464 464 rge_fini_recv_ring(rgep);
465 465 rge_fini_buf_ring(rgep);
466 466 }
467 467
468 468 static int
469 469 rge_init_send_ring(rge_t *rgep)
470 470 {
471 471 uint32_t slot;
472 472 sw_sbd_t *ssbdp;
473 473 dma_area_t *pbuf;
474 474 dma_area_t desc;
475 475 int err;
476 476
477 477 /*
478 478 * Allocate the array of s/w Tx Buffer Descriptors
479 479 */
480 480 ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP);
481 481 rgep->sw_sbds = ssbdp;
482 482
483 483 /*
484 484 * Init send ring
485 485 */
486 486 rgep->tx_desc = rgep->dma_area_txdesc;
487 487 DMA_ZERO(rgep->tx_desc);
488 488 rgep->tx_ring = rgep->tx_desc.mem_va;
489 489
490 490 desc = rgep->tx_desc;
491 491 for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
492 492 rge_slice_chunk(&ssbdp->desc, &desc, 1, sizeof (rge_bd_t));
493 493
494 494 /*
495 495 * Allocate memory & handle for Tx buffers
496 496 */
497 497 pbuf = &ssbdp->pbuf;
498 498 err = rge_alloc_dma_mem(rgep, rgep->txbuf_size,
499 499 &dma_attr_buf, &rge_buf_accattr,
500 500 DDI_DMA_WRITE | DDI_DMA_STREAMING, pbuf);
501 501 if (err != DDI_SUCCESS) {
502 502 rge_error(rgep,
503 503 "rge_init_send_ring: alloc tx buffer failed");
504 504 rge_fini_send_ring(rgep);
505 505 return (DDI_FAILURE);
506 506 }
507 507 ssbdp++;
508 508 }
509 509 ASSERT(desc.alength == 0);
510 510
511 511 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
512 512 return (DDI_SUCCESS);
513 513 }
514 514
515 515 static int
516 516 rge_init_recv_ring(rge_t *rgep)
517 517 {
518 518 uint32_t slot;
519 519 sw_rbd_t *srbdp;
520 520 dma_buf_t *rx_buf;
521 521 dma_area_t *pbuf;
522 522 int err;
523 523
524 524 /*
525 525 * Allocate the array of s/w Rx Buffer Descriptors
526 526 */
527 527 srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP);
528 528 rgep->sw_rbds = srbdp;
529 529
530 530 /*
531 531 * Init receive ring
532 532 */
533 533 rgep->rx_next = 0;
534 534 rgep->rx_desc = rgep->dma_area_rxdesc;
535 535 DMA_ZERO(rgep->rx_desc);
536 536 rgep->rx_ring = rgep->rx_desc.mem_va;
537 537
538 538 for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
539 539 srbdp->rx_buf = rx_buf =
540 540 kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
541 541
542 542 /*
543 543 * Allocate memory & handle for Rx buffers
544 544 */
545 545 pbuf = &rx_buf->pbuf;
546 546 err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
547 547 &dma_attr_buf, &rge_buf_accattr,
548 548 DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
549 549 if (err != DDI_SUCCESS) {
550 550 rge_fini_recv_ring(rgep);
551 551 rge_error(rgep,
552 552 "rge_init_recv_ring: alloc rx buffer failed");
553 553 return (DDI_FAILURE);
554 554 }
555 555
556 556 pbuf->alength -= rgep->head_room;
557 557 pbuf->offset += rgep->head_room;
558 558 if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)) {
559 559 rx_buf->rx_recycle.free_func = rge_rx_recycle;
560 560 rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
561 561 rx_buf->private = (caddr_t)rgep;
562 562 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
563 563 rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
564 564 if (rx_buf->mp == NULL) {
565 565 rge_fini_recv_ring(rgep);
566 566 rge_problem(rgep,
567 567 "rge_init_recv_ring: desballoc() failed");
568 568 return (DDI_FAILURE);
569 569 }
570 570 }
571 571 srbdp++;
572 572 }
573 573 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
574 574 return (DDI_SUCCESS);
575 575 }
576 576
577 577 static int
578 578 rge_init_buf_ring(rge_t *rgep)
579 579 {
580 580 uint32_t slot;
581 581 sw_rbd_t *free_srbdp;
582 582 dma_buf_t *rx_buf;
583 583 dma_area_t *pbuf;
584 584 int err;
585 585
586 586 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) {
587 587 rgep->rx_bcopy = B_TRUE;
588 588 return (DDI_SUCCESS);
589 589 }
590 590
591 591 /*
592 592 * Allocate the array of s/w free Buffer Descriptors
593 593 */
594 594 free_srbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_srbdp), KM_SLEEP);
595 595 rgep->free_srbds = free_srbdp;
596 596
597 597 /*
598 598 * Init free buffer ring
599 599 */
600 600 rgep->rc_next = 0;
601 601 rgep->rf_next = 0;
602 602 rgep->rx_bcopy = B_FALSE;
603 603 rgep->rx_free = RGE_BUF_SLOTS;
604 604 for (slot = 0; slot < RGE_BUF_SLOTS; slot++) {
605 605 free_srbdp->rx_buf = rx_buf =
606 606 kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
607 607
608 608 /*
609 609 * Allocate memory & handle for free Rx buffers
610 610 */
611 611 pbuf = &rx_buf->pbuf;
612 612 err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
613 613 &dma_attr_buf, &rge_buf_accattr,
614 614 DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
615 615 if (err != DDI_SUCCESS) {
616 616 rge_fini_buf_ring(rgep);
617 617 rge_error(rgep,
618 618 "rge_init_buf_ring: alloc rx free buffer failed");
619 619 return (DDI_FAILURE);
620 620 }
621 621 pbuf->alength -= rgep->head_room;
622 622 pbuf->offset += rgep->head_room;
623 623 rx_buf->rx_recycle.free_func = rge_rx_recycle;
624 624 rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
625 625 rx_buf->private = (caddr_t)rgep;
626 626 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
627 627 rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
628 628 if (rx_buf->mp == NULL) {
629 629 rge_fini_buf_ring(rgep);
630 630 rge_problem(rgep,
631 631 "rge_init_buf_ring: desballoc() failed");
632 632 return (DDI_FAILURE);
633 633 }
634 634 free_srbdp++;
635 635 }
636 636 return (DDI_SUCCESS);
637 637 }
638 638
639 639 static int
640 640 rge_init_rings(rge_t *rgep)
641 641 {
642 642 int err;
643 643
644 644 err = rge_init_send_ring(rgep);
645 645 if (err != DDI_SUCCESS)
646 646 return (DDI_FAILURE);
647 647
648 648 err = rge_init_recv_ring(rgep);
649 649 if (err != DDI_SUCCESS) {
650 650 rge_fini_send_ring(rgep);
651 651 return (DDI_FAILURE);
652 652 }
653 653
654 654 err = rge_init_buf_ring(rgep);
655 655 if (err != DDI_SUCCESS) {
656 656 rge_fini_send_ring(rgep);
657 657 rge_fini_recv_ring(rgep);
658 658 return (DDI_FAILURE);
659 659 }
660 660
661 661 return (DDI_SUCCESS);
662 662 }
663 663
664 664 /*
665 665 * ========== Internal state management entry points ==========
666 666 */
667 667
668 668 #undef RGE_DBG
669 669 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */
670 670
671 671 /*
672 672 * These routines provide all the functionality required by the
673 673 * corresponding MAC layer entry points, but don't update the
674 674 * MAC state so they can be called internally without disturbing
675 675 * our record of what NEMO thinks we should be doing ...
676 676 */
677 677
678 678 /*
679 679 * rge_reset() -- reset h/w & rings to initial state
680 680 */
681 681 static void
682 682 rge_reset(rge_t *rgep)
683 683 {
684 684 ASSERT(mutex_owned(rgep->genlock));
685 685
686 686 /*
687 687 * Grab all the other mutexes in the world (this should
688 688 * ensure no other threads are manipulating driver state)
689 689 */
690 690 mutex_enter(rgep->rx_lock);
691 691 mutex_enter(rgep->rc_lock);
692 692 rw_enter(rgep->errlock, RW_WRITER);
693 693
694 694 (void) rge_chip_reset(rgep);
695 695 rge_reinit_rings(rgep);
696 696 rge_chip_init(rgep);
697 697
698 698 /*
699 699 * Free the world ...
700 700 */
701 701 rw_exit(rgep->errlock);
702 702 mutex_exit(rgep->rc_lock);
703 703 mutex_exit(rgep->rx_lock);
704 704
705 705 rgep->stats.rpackets = 0;
706 706 rgep->stats.rbytes = 0;
707 707 rgep->stats.opackets = 0;
708 708 rgep->stats.obytes = 0;
709 709 rgep->stats.tx_pre_ismax = B_FALSE;
710 710 rgep->stats.tx_cur_ismax = B_FALSE;
711 711
712 712 RGE_DEBUG(("rge_reset($%p) done", (void *)rgep));
713 713 }
714 714
715 715 /*
716 716 * rge_stop() -- stop processing, don't reset h/w or rings
717 717 */
718 718 static void
719 719 rge_stop(rge_t *rgep)
720 720 {
721 721 ASSERT(mutex_owned(rgep->genlock));
722 722
723 723 rge_chip_stop(rgep, B_FALSE);
724 724
725 725 RGE_DEBUG(("rge_stop($%p) done", (void *)rgep));
726 726 }
727 727
728 728 /*
729 729 * rge_start() -- start transmitting/receiving
730 730 */
731 731 static void
732 732 rge_start(rge_t *rgep)
733 733 {
734 734 ASSERT(mutex_owned(rgep->genlock));
735 735
736 736 /*
737 737 * Start chip processing, including enabling interrupts
738 738 */
739 739 rge_chip_start(rgep);
740 740 rgep->watchdog = 0;
741 741 }
742 742
743 743 /*
744 744 * rge_restart - restart transmitting/receiving after error or suspend
745 745 */
746 746 void
747 747 rge_restart(rge_t *rgep)
748 748 {
749 749 uint32_t i;
750 750
751 751 ASSERT(mutex_owned(rgep->genlock));
752 752 /*
753 753 * Wait for posted buffer to be freed...
754 754 */
755 755 if (!rgep->rx_bcopy) {
756 756 for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
757 757 if (rgep->rx_free == RGE_BUF_SLOTS)
758 758 break;
759 759 drv_usecwait(1000);
760 760 RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
761 761 }
762 762 }
763 763 rge_reset(rgep);
764 764 rgep->stats.chip_reset++;
765 765 if (rgep->rge_mac_state == RGE_MAC_STARTED) {
766 766 rge_start(rgep);
767 767 rgep->resched_needed = B_TRUE;
768 768 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
769 769 }
770 770 }
771 771
772 772
773 773 /*
774 774 * ========== Nemo-required management entry points ==========
775 775 */
776 776
777 777 #undef RGE_DBG
778 778 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */
779 779
780 780 /*
781 781 * rge_m_stop() -- stop transmitting/receiving
782 782 */
783 783 static void
784 784 rge_m_stop(void *arg)
785 785 {
786 786 rge_t *rgep = arg; /* private device info */
787 787 uint32_t i;
788 788
789 789 /*
790 790 * Just stop processing, then record new MAC state
791 791 */
792 792 mutex_enter(rgep->genlock);
793 793 if (rgep->suspended) {
794 794 ASSERT(rgep->rge_mac_state == RGE_MAC_STOPPED);
795 795 mutex_exit(rgep->genlock);
796 796 return;
797 797 }
798 798 rge_stop(rgep);
799 799 /*
800 800 * Wait for posted buffer to be freed...
801 801 */
802 802 if (!rgep->rx_bcopy) {
803 803 for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
804 804 if (rgep->rx_free == RGE_BUF_SLOTS)
805 805 break;
806 806 drv_usecwait(1000);
807 807 RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
808 808 }
809 809 }
810 810 rgep->rge_mac_state = RGE_MAC_STOPPED;
811 811 RGE_DEBUG(("rge_m_stop($%p) done", arg));
812 812 mutex_exit(rgep->genlock);
813 813 }
814 814
815 815 /*
816 816 * rge_m_start() -- start transmitting/receiving
817 817 */
818 818 static int
819 819 rge_m_start(void *arg)
820 820 {
821 821 rge_t *rgep = arg; /* private device info */
822 822
823 823 mutex_enter(rgep->genlock);
824 824 if (rgep->suspended) {
825 825 mutex_exit(rgep->genlock);
826 826 return (DDI_FAILURE);
827 827 }
828 828 /*
829 829 * Clear hw/sw statistics
830 830 */
831 831 DMA_ZERO(rgep->dma_area_stats);
832 832 bzero(&rgep->stats, sizeof (rge_stats_t));
833 833
834 834 /*
835 835 * Start processing and record new MAC state
836 836 */
837 837 rge_reset(rgep);
838 838 rge_start(rgep);
839 839 rgep->rge_mac_state = RGE_MAC_STARTED;
840 840 RGE_DEBUG(("rge_m_start($%p) done", arg));
841 841
842 842 mutex_exit(rgep->genlock);
843 843
844 844 return (0);
845 845 }
846 846
847 847 /*
848 848 * rge_m_unicst_set() -- set the physical network address
849 849 */
850 850 static int
851 851 rge_m_unicst(void *arg, const uint8_t *macaddr)
852 852 {
853 853 rge_t *rgep = arg; /* private device info */
854 854
855 855 /*
856 856 * Remember the new current address in the driver state
857 857 * Sync the chip's idea of the address too ...
858 858 */
859 859 mutex_enter(rgep->genlock);
860 860 bcopy(macaddr, rgep->netaddr, ETHERADDRL);
861 861
862 862 if (rgep->suspended) {
863 863 mutex_exit(rgep->genlock);
864 864 return (DDI_SUCCESS);
865 865 }
866 866
867 867 rge_chip_sync(rgep, RGE_SET_MAC);
868 868 mutex_exit(rgep->genlock);
869 869
870 870 return (0);
871 871 }
872 872
873 873 /*
874 874 * Compute the index of the required bit in the multicast hash map.
875 875 * This must mirror the way the hardware actually does it!
876 876 */
877 877 static uint32_t
878 878 rge_hash_index(const uint8_t *mca)
879 879 {
880 880 uint32_t crc = (uint32_t)RGE_HASH_CRC;
881 881 uint32_t const POLY = RGE_HASH_POLY;
882 882 uint32_t msb;
883 883 int bytes;
884 884 uchar_t currentbyte;
885 885 uint32_t index;
886 886 int bit;
887 887
888 888 for (bytes = 0; bytes < ETHERADDRL; bytes++) {
889 889 currentbyte = mca[bytes];
890 890 for (bit = 0; bit < 8; bit++) {
891 891 msb = crc >> 31;
892 892 crc <<= 1;
893 893 if (msb ^ (currentbyte & 1))
894 894 crc ^= POLY;
895 895 currentbyte >>= 1;
896 896 }
897 897 }
898 898 index = crc >> 26;
899 899 /* the index value is between 0 and 63(0x3f) */
900 900
901 901 return (index);
902 902 }
903 903
904 904 /*
905 905 * rge_m_multicst_add() -- enable/disable a multicast address
906 906 */
907 907 static int
908 908 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
909 909 {
910 910 rge_t *rgep = arg; /* private device info */
911 911 struct ether_addr *addr;
912 912 uint32_t index;
913 913 uint32_t reg;
914 914 uint8_t *hashp;
915 915
916 916 mutex_enter(rgep->genlock);
917 917 hashp = rgep->mcast_hash;
918 918 addr = (struct ether_addr *)mca;
919 919 /*
920 920 * Calculate the Multicast address hash index value
921 921 * Normally, the position of MAR0-MAR7 is
922 922 * MAR0: offset 0x08, ..., MAR7: offset 0x0F.
923 923 *
924 924 * For pcie chipset, the position of MAR0-MAR7 is
925 925 * different from others:
926 926 * MAR0: offset 0x0F, ..., MAR7: offset 0x08.
927 927 */
928 928 index = rge_hash_index(addr->ether_addr_octet);
929 929 if (rgep->chipid.is_pcie)
930 930 reg = (~(index / RGE_MCAST_NUM)) & 0x7;
931 931 else
932 932 reg = index / RGE_MCAST_NUM;
933 933
934 934 if (add) {
935 935 if (rgep->mcast_refs[index]++) {
936 936 mutex_exit(rgep->genlock);
937 937 return (0);
938 938 }
939 939 hashp[reg] |= 1 << (index % RGE_MCAST_NUM);
940 940 } else {
941 941 if (--rgep->mcast_refs[index]) {
942 942 mutex_exit(rgep->genlock);
943 943 return (0);
944 944 }
945 945 hashp[reg] &= ~ (1 << (index % RGE_MCAST_NUM));
946 946 }
947 947
948 948 if (rgep->suspended) {
949 949 mutex_exit(rgep->genlock);
950 950 return (DDI_SUCCESS);
951 951 }
952 952
953 953 /*
954 954 * Set multicast register
955 955 */
956 956 rge_chip_sync(rgep, RGE_SET_MUL);
957 957
958 958 mutex_exit(rgep->genlock);
959 959 return (0);
960 960 }
961 961
962 962 /*
963 963 * rge_m_promisc() -- set or reset promiscuous mode on the board
964 964 *
965 965 * Program the hardware to enable/disable promiscuous and/or
966 966 * receive-all-multicast modes.
967 967 */
968 968 static int
969 969 rge_m_promisc(void *arg, boolean_t on)
970 970 {
971 971 rge_t *rgep = arg;
972 972
973 973 /*
974 974 * Store MAC layer specified mode and pass to chip layer to update h/w
975 975 */
976 976 mutex_enter(rgep->genlock);
977 977
978 978 if (rgep->promisc == on) {
979 979 mutex_exit(rgep->genlock);
980 980 return (0);
981 981 }
982 982 rgep->promisc = on;
983 983
984 984 if (rgep->suspended) {
985 985 mutex_exit(rgep->genlock);
986 986 return (DDI_SUCCESS);
987 987 }
988 988
989 989 rge_chip_sync(rgep, RGE_SET_PROMISC);
990 990 RGE_DEBUG(("rge_m_promisc_set($%p) done", arg));
991 991 mutex_exit(rgep->genlock);
992 992 return (0);
993 993 }
994 994
995 995 /*
996 996 * Loopback ioctl code
997 997 */
998 998
999 999 static lb_property_t loopmodes[] = {
1000 1000 { normal, "normal", RGE_LOOP_NONE },
1001 1001 { internal, "PHY", RGE_LOOP_INTERNAL_PHY },
1002 1002 { internal, "MAC", RGE_LOOP_INTERNAL_MAC }
1003 1003 };
1004 1004
1005 1005 static enum ioc_reply
1006 1006 rge_set_loop_mode(rge_t *rgep, uint32_t mode)
1007 1007 {
1008 1008 /*
1009 1009 * If the mode isn't being changed, there's nothing to do ...
1010 1010 */
1011 1011 if (mode == rgep->param_loop_mode)
1012 1012 return (IOC_ACK);
1013 1013
1014 1014 /*
1015 1015 * Validate the requested mode and prepare a suitable message
1016 1016 * to explain the link down/up cycle that the change will
1017 1017 * probably induce ...
1018 1018 */
1019 1019 switch (mode) {
1020 1020 default:
1021 1021 return (IOC_INVAL);
1022 1022
1023 1023 case RGE_LOOP_NONE:
1024 1024 case RGE_LOOP_INTERNAL_PHY:
1025 1025 case RGE_LOOP_INTERNAL_MAC:
1026 1026 break;
1027 1027 }
1028 1028
1029 1029 /*
1030 1030 * All OK; tell the caller to reprogram
1031 1031 * the PHY and/or MAC for the new mode ...
1032 1032 */
1033 1033 rgep->param_loop_mode = mode;
1034 1034 return (IOC_RESTART_ACK);
1035 1035 }
1036 1036
1037 1037 static enum ioc_reply
1038 1038 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1039 1039 {
1040 1040 lb_info_sz_t *lbsp;
1041 1041 lb_property_t *lbpp;
1042 1042 uint32_t *lbmp;
1043 1043 int cmd;
1044 1044
1045 1045 _NOTE(ARGUNUSED(wq))
1046 1046
1047 1047 /*
1048 1048 * Validate format of ioctl
1049 1049 */
1050 1050 if (mp->b_cont == NULL)
1051 1051 return (IOC_INVAL);
1052 1052
1053 1053 cmd = iocp->ioc_cmd;
1054 1054 switch (cmd) {
1055 1055 default:
1056 1056 /* NOTREACHED */
1057 1057 rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd);
1058 1058 return (IOC_INVAL);
1059 1059
1060 1060 case LB_GET_INFO_SIZE:
1061 1061 if (iocp->ioc_count != sizeof (lb_info_sz_t))
1062 1062 return (IOC_INVAL);
1063 1063 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
1064 1064 *lbsp = sizeof (loopmodes);
1065 1065 return (IOC_REPLY);
1066 1066
1067 1067 case LB_GET_INFO:
1068 1068 if (iocp->ioc_count != sizeof (loopmodes))
1069 1069 return (IOC_INVAL);
1070 1070 lbpp = (lb_property_t *)mp->b_cont->b_rptr;
1071 1071 bcopy(loopmodes, lbpp, sizeof (loopmodes));
1072 1072 return (IOC_REPLY);
1073 1073
1074 1074 case LB_GET_MODE:
1075 1075 if (iocp->ioc_count != sizeof (uint32_t))
1076 1076 return (IOC_INVAL);
1077 1077 lbmp = (uint32_t *)mp->b_cont->b_rptr;
1078 1078 *lbmp = rgep->param_loop_mode;
1079 1079 return (IOC_REPLY);
1080 1080
1081 1081 case LB_SET_MODE:
1082 1082 if (iocp->ioc_count != sizeof (uint32_t))
1083 1083 return (IOC_INVAL);
1084 1084 lbmp = (uint32_t *)mp->b_cont->b_rptr;
1085 1085 return (rge_set_loop_mode(rgep, *lbmp));
1086 1086 }
1087 1087 }
1088 1088
1089 1089 /*
1090 1090 * Specific rge IOCTLs, the MAC layer handles the generic ones.
1091 1091 */
1092 1092 static void
1093 1093 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1094 1094 {
1095 1095 rge_t *rgep = arg;
1096 1096 struct iocblk *iocp;
1097 1097 enum ioc_reply status;
1098 1098 boolean_t need_privilege;
1099 1099 int err;
1100 1100 int cmd;
1101 1101
1102 1102 /*
1103 1103 * If suspended, we might actually be able to do some of
1104 1104 * these ioctls, but it is harder to make sure they occur
1105 1105 * without actually putting the hardware in an undesireable
1106 1106 * state. So just NAK it.
1107 1107 */
1108 1108 mutex_enter(rgep->genlock);
1109 1109 if (rgep->suspended) {
1110 1110 miocnak(wq, mp, 0, EINVAL);
1111 1111 mutex_exit(rgep->genlock);
1112 1112 return;
1113 1113 }
1114 1114 mutex_exit(rgep->genlock);
1115 1115
1116 1116 /*
1117 1117 * Validate the command before bothering with the mutex ...
1118 1118 */
1119 1119 iocp = (struct iocblk *)mp->b_rptr;
1120 1120 iocp->ioc_error = 0;
1121 1121 need_privilege = B_TRUE;
1122 1122 cmd = iocp->ioc_cmd;
1123 1123 switch (cmd) {
1124 1124 default:
1125 1125 miocnak(wq, mp, 0, EINVAL);
1126 1126 return;
1127 1127
1128 1128 case RGE_MII_READ:
1129 1129 case RGE_MII_WRITE:
1130 1130 case RGE_DIAG:
1131 1131 case RGE_PEEK:
1132 1132 case RGE_POKE:
1133 1133 case RGE_PHY_RESET:
1134 1134 case RGE_SOFT_RESET:
1135 1135 case RGE_HARD_RESET:
1136 1136 break;
1137 1137
1138 1138 case LB_GET_INFO_SIZE:
1139 1139 case LB_GET_INFO:
1140 1140 case LB_GET_MODE:
1141 1141 need_privilege = B_FALSE;
1142 1142 /* FALLTHRU */
1143 1143 case LB_SET_MODE:
1144 1144 break;
1145 1145
1146 1146 case ND_GET:
1147 1147 need_privilege = B_FALSE;
1148 1148 /* FALLTHRU */
1149 1149 case ND_SET:
1150 1150 break;
1151 1151 }
1152 1152
1153 1153 if (need_privilege) {
1154 1154 /*
1155 1155 * Check for specific net_config privilege
1156 1156 */
1157 1157 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1158 1158 if (err != 0) {
1159 1159 miocnak(wq, mp, 0, err);
1160 1160 return;
1161 1161 }
1162 1162 }
1163 1163
1164 1164 mutex_enter(rgep->genlock);
1165 1165
1166 1166 switch (cmd) {
1167 1167 default:
1168 1168 _NOTE(NOTREACHED)
1169 1169 status = IOC_INVAL;
1170 1170 break;
1171 1171
1172 1172 case RGE_MII_READ:
1173 1173 case RGE_MII_WRITE:
1174 1174 case RGE_DIAG:
1175 1175 case RGE_PEEK:
1176 1176 case RGE_POKE:
1177 1177 case RGE_PHY_RESET:
1178 1178 case RGE_SOFT_RESET:
1179 1179 case RGE_HARD_RESET:
1180 1180 status = rge_chip_ioctl(rgep, wq, mp, iocp);
1181 1181 break;
1182 1182
1183 1183 case LB_GET_INFO_SIZE:
1184 1184 case LB_GET_INFO:
1185 1185 case LB_GET_MODE:
1186 1186 case LB_SET_MODE:
1187 1187 status = rge_loop_ioctl(rgep, wq, mp, iocp);
1188 1188 break;
1189 1189
1190 1190 case ND_GET:
1191 1191 case ND_SET:
1192 1192 status = rge_nd_ioctl(rgep, wq, mp, iocp);
1193 1193 break;
1194 1194 }
1195 1195
1196 1196 /*
1197 1197 * Do we need to reprogram the PHY and/or the MAC?
1198 1198 * Do it now, while we still have the mutex.
1199 1199 *
1200 1200 * Note: update the PHY first, 'cos it controls the
1201 1201 * speed/duplex parameters that the MAC code uses.
1202 1202 */
1203 1203 switch (status) {
1204 1204 case IOC_RESTART_REPLY:
1205 1205 case IOC_RESTART_ACK:
1206 1206 rge_phy_update(rgep);
1207 1207 break;
1208 1208 }
1209 1209
1210 1210 mutex_exit(rgep->genlock);
1211 1211
1212 1212 /*
1213 1213 * Finally, decide how to reply
1214 1214 */
1215 1215 switch (status) {
1216 1216 default:
1217 1217 case IOC_INVAL:
1218 1218 /*
1219 1219 * Error, reply with a NAK and EINVAL or the specified error
1220 1220 */
1221 1221 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1222 1222 EINVAL : iocp->ioc_error);
1223 1223 break;
1224 1224
1225 1225 case IOC_DONE:
1226 1226 /*
1227 1227 * OK, reply already sent
1228 1228 */
1229 1229 break;
1230 1230
1231 1231 case IOC_RESTART_ACK:
1232 1232 case IOC_ACK:
1233 1233 /*
1234 1234 * OK, reply with an ACK
1235 1235 */
1236 1236 miocack(wq, mp, 0, 0);
1237 1237 break;
1238 1238
1239 1239 case IOC_RESTART_REPLY:
1240 1240 case IOC_REPLY:
1241 1241 /*
1242 1242 * OK, send prepared reply as ACK or NAK
1243 1243 */
1244 1244 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1245 1245 M_IOCACK : M_IOCNAK;
1246 1246 qreply(wq, mp);
1247 1247 break;
1248 1248 }
1249 1249 }
1250 1250
1251 1251 /* ARGSUSED */
1252 1252 static boolean_t
1253 1253 rge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1254 1254 {
1255 1255 rge_t *rgep = arg;
1256 1256
1257 1257 switch (cap) {
1258 1258 case MAC_CAPAB_HCKSUM: {
1259 1259 uint32_t *hcksum_txflags = cap_data;
1260 1260 switch (rgep->chipid.mac_ver) {
1261 1261 case MAC_VER_8169:
1262 1262 case MAC_VER_8169S_D:
1263 1263 case MAC_VER_8169S_E:
1264 1264 case MAC_VER_8169SB:
1265 1265 case MAC_VER_8169SC:
1266 1266 case MAC_VER_8168:
1267 1267 case MAC_VER_8168B_B:
1268 1268 case MAC_VER_8168B_C:
1269 1269 case MAC_VER_8101E:
1270 1270 *hcksum_txflags = HCKSUM_INET_FULL_V4 |
1271 1271 HCKSUM_IPHDRCKSUM;
1272 1272 break;
1273 1273 case MAC_VER_8168C:
1274 1274 case MAC_VER_8101E_B:
1275 1275 case MAC_VER_8101E_C:
1276 1276 default:
1277 1277 *hcksum_txflags = 0;
1278 1278 break;
1279 1279 }
1280 1280 break;
1281 1281 }
1282 1282 default:
1283 1283 return (B_FALSE);
1284 1284 }
1285 1285 return (B_TRUE);
1286 1286 }
1287 1287
1288 1288 /*
1289 1289 * ============ Init MSI/Fixed Interrupt routines ==============
1290 1290 */
1291 1291
1292 1292 /*
1293 1293 * rge_add_intrs:
1294 1294 *
1295 1295 * Register FIXED or MSI interrupts.
1296 1296 */
1297 1297 static int
1298 1298 rge_add_intrs(rge_t *rgep, int intr_type)
1299 1299 {
1300 1300 dev_info_t *dip = rgep->devinfo;
1301 1301 int avail;
1302 1302 int actual;
1303 1303 int intr_size;
1304 1304 int count;
1305 1305 int i, j;
1306 1306 int ret;
1307 1307
1308 1308 /* Get number of interrupts */
1309 1309 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1310 1310 if ((ret != DDI_SUCCESS) || (count == 0)) {
1311 1311 rge_error(rgep, "ddi_intr_get_nintrs() failure, ret: %d, "
1312 1312 "count: %d", ret, count);
1313 1313 return (DDI_FAILURE);
1314 1314 }
1315 1315
1316 1316 /* Get number of available interrupts */
1317 1317 ret = ddi_intr_get_navail(dip, intr_type, &avail);
1318 1318 if ((ret != DDI_SUCCESS) || (avail == 0)) {
1319 1319 rge_error(rgep, "ddi_intr_get_navail() failure, "
1320 1320 "ret: %d, avail: %d\n", ret, avail);
1321 1321 return (DDI_FAILURE);
1322 1322 }
1323 1323
1324 1324 /* Allocate an array of interrupt handles */
1325 1325 intr_size = count * sizeof (ddi_intr_handle_t);
1326 1326 rgep->htable = kmem_alloc(intr_size, KM_SLEEP);
1327 1327 rgep->intr_rqst = count;
1328 1328
1329 1329 /* Call ddi_intr_alloc() */
1330 1330 ret = ddi_intr_alloc(dip, rgep->htable, intr_type, 0,
1331 1331 count, &actual, DDI_INTR_ALLOC_NORMAL);
1332 1332 if (ret != DDI_SUCCESS || actual == 0) {
1333 1333 rge_error(rgep, "ddi_intr_alloc() failed %d\n", ret);
1334 1334 kmem_free(rgep->htable, intr_size);
1335 1335 return (DDI_FAILURE);
1336 1336 }
1337 1337 if (actual < count) {
1338 1338 rge_log(rgep, "ddi_intr_alloc() Requested: %d, Received: %d\n",
1339 1339 count, actual);
1340 1340 }
1341 1341 rgep->intr_cnt = actual;
1342 1342
1343 1343 /*
1344 1344 * Get priority for first msi, assume remaining are all the same
1345 1345 */
1346 1346 if ((ret = ddi_intr_get_pri(rgep->htable[0], &rgep->intr_pri)) !=
1347 1347 DDI_SUCCESS) {
1348 1348 rge_error(rgep, "ddi_intr_get_pri() failed %d\n", ret);
1349 1349 /* Free already allocated intr */
1350 1350 for (i = 0; i < actual; i++) {
1351 1351 (void) ddi_intr_free(rgep->htable[i]);
1352 1352 }
1353 1353 kmem_free(rgep->htable, intr_size);
1354 1354 return (DDI_FAILURE);
1355 1355 }
1356 1356
1357 1357 /* Test for high level mutex */
1358 1358 if (rgep->intr_pri >= ddi_intr_get_hilevel_pri()) {
1359 1359 rge_error(rgep, "rge_add_intrs:"
1360 1360 "Hi level interrupt not supported");
1361 1361 for (i = 0; i < actual; i++)
1362 1362 (void) ddi_intr_free(rgep->htable[i]);
1363 1363 kmem_free(rgep->htable, intr_size);
1364 1364 return (DDI_FAILURE);
1365 1365 }
1366 1366
1367 1367 /* Call ddi_intr_add_handler() */
1368 1368 for (i = 0; i < actual; i++) {
1369 1369 if ((ret = ddi_intr_add_handler(rgep->htable[i], rge_intr,
1370 1370 (caddr_t)rgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
1371 1371 rge_error(rgep, "ddi_intr_add_handler() "
1372 1372 "failed %d\n", ret);
1373 1373 /* Remove already added intr */
1374 1374 for (j = 0; j < i; j++)
1375 1375 (void) ddi_intr_remove_handler(rgep->htable[j]);
1376 1376 /* Free already allocated intr */
1377 1377 for (i = 0; i < actual; i++) {
1378 1378 (void) ddi_intr_free(rgep->htable[i]);
1379 1379 }
1380 1380 kmem_free(rgep->htable, intr_size);
1381 1381 return (DDI_FAILURE);
1382 1382 }
1383 1383 }
1384 1384
1385 1385 if ((ret = ddi_intr_get_cap(rgep->htable[0], &rgep->intr_cap))
1386 1386 != DDI_SUCCESS) {
1387 1387 rge_error(rgep, "ddi_intr_get_cap() failed %d\n", ret);
1388 1388 for (i = 0; i < actual; i++) {
1389 1389 (void) ddi_intr_remove_handler(rgep->htable[i]);
1390 1390 (void) ddi_intr_free(rgep->htable[i]);
1391 1391 }
1392 1392 kmem_free(rgep->htable, intr_size);
1393 1393 return (DDI_FAILURE);
1394 1394 }
1395 1395
1396 1396 return (DDI_SUCCESS);
1397 1397 }
1398 1398
1399 1399 /*
1400 1400 * rge_rem_intrs:
1401 1401 *
1402 1402 * Unregister FIXED or MSI interrupts
1403 1403 */
1404 1404 static void
1405 1405 rge_rem_intrs(rge_t *rgep)
1406 1406 {
1407 1407 int i;
1408 1408
1409 1409 /* Disable all interrupts */
1410 1410 if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1411 1411 /* Call ddi_intr_block_disable() */
1412 1412 (void) ddi_intr_block_disable(rgep->htable, rgep->intr_cnt);
1413 1413 } else {
1414 1414 for (i = 0; i < rgep->intr_cnt; i++) {
1415 1415 (void) ddi_intr_disable(rgep->htable[i]);
1416 1416 }
1417 1417 }
1418 1418
1419 1419 /* Call ddi_intr_remove_handler() */
1420 1420 for (i = 0; i < rgep->intr_cnt; i++) {
1421 1421 (void) ddi_intr_remove_handler(rgep->htable[i]);
1422 1422 (void) ddi_intr_free(rgep->htable[i]);
1423 1423 }
1424 1424
1425 1425 kmem_free(rgep->htable, rgep->intr_rqst * sizeof (ddi_intr_handle_t));
1426 1426 }
1427 1427
1428 1428 /*
1429 1429 * ========== Per-instance setup/teardown code ==========
1430 1430 */
1431 1431
1432 1432 #undef RGE_DBG
1433 1433 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */
1434 1434
1435 1435 static void
1436 1436 rge_unattach(rge_t *rgep)
1437 1437 {
1438 1438 /*
1439 1439 * Flag that no more activity may be initiated
1440 1440 */
1441 1441 rgep->progress &= ~PROGRESS_READY;
1442 1442 rgep->rge_mac_state = RGE_MAC_UNATTACH;
1443 1443
1444 1444 /*
1445 1445 * Quiesce the PHY and MAC (leave it reset but still powered).
1446 1446 * Clean up and free all RGE data structures
1447 1447 */
1448 1448 if (rgep->periodic_id != NULL) {
1449 1449 ddi_periodic_delete(rgep->periodic_id);
1450 1450 rgep->periodic_id = NULL;
1451 1451 }
1452 1452
1453 1453 if (rgep->progress & PROGRESS_KSTATS)
1454 1454 rge_fini_kstats(rgep);
1455 1455
1456 1456 if (rgep->progress & PROGRESS_PHY)
1457 1457 (void) rge_phy_reset(rgep);
1458 1458
1459 1459 if (rgep->progress & PROGRESS_INIT) {
1460 1460 mutex_enter(rgep->genlock);
1461 1461 (void) rge_chip_reset(rgep);
1462 1462 mutex_exit(rgep->genlock);
1463 1463 rge_fini_rings(rgep);
1464 1464 }
1465 1465
1466 1466 if (rgep->progress & PROGRESS_INTR) {
1467 1467 rge_rem_intrs(rgep);
1468 1468 mutex_destroy(rgep->rc_lock);
1469 1469 mutex_destroy(rgep->rx_lock);
1470 1470 mutex_destroy(rgep->tc_lock);
1471 1471 mutex_destroy(rgep->tx_lock);
1472 1472 rw_destroy(rgep->errlock);
1473 1473 mutex_destroy(rgep->genlock);
1474 1474 }
1475 1475
1476 1476 if (rgep->progress & PROGRESS_FACTOTUM)
1477 1477 (void) ddi_intr_remove_softint(rgep->factotum_hdl);
1478 1478
1479 1479 if (rgep->progress & PROGRESS_RESCHED)
1480 1480 (void) ddi_intr_remove_softint(rgep->resched_hdl);
1481 1481
1482 1482 if (rgep->progress & PROGRESS_NDD)
1483 1483 rge_nd_cleanup(rgep);
1484 1484
1485 1485 rge_free_bufs(rgep);
1486 1486
1487 1487 if (rgep->progress & PROGRESS_REGS)
1488 1488 ddi_regs_map_free(&rgep->io_handle);
1489 1489
1490 1490 if (rgep->progress & PROGRESS_CFG)
1491 1491 pci_config_teardown(&rgep->cfg_handle);
1492 1492
1493 1493 ddi_remove_minor_node(rgep->devinfo, NULL);
1494 1494 kmem_free(rgep, sizeof (*rgep));
1495 1495 }
1496 1496
1497 1497 static int
1498 1498 rge_resume(dev_info_t *devinfo)
1499 1499 {
1500 1500 rge_t *rgep; /* Our private data */
1501 1501 chip_id_t *cidp;
1502 1502 chip_id_t chipid;
1503 1503
1504 1504 rgep = ddi_get_driver_private(devinfo);
1505 1505
1506 1506 /*
1507 1507 * If there are state inconsistancies, this is bad. Returning
1508 1508 * DDI_FAILURE here will eventually cause the machine to panic,
1509 1509 * so it is best done here so that there is a possibility of
1510 1510 * debugging the problem.
1511 1511 */
1512 1512 if (rgep == NULL)
1513 1513 cmn_err(CE_PANIC,
1514 1514 "rge: ngep returned from ddi_get_driver_private was NULL");
1515 1515
1516 1516 /*
1517 1517 * Refuse to resume if the data structures aren't consistent
1518 1518 */
1519 1519 if (rgep->devinfo != devinfo)
1520 1520 cmn_err(CE_PANIC,
1521 1521 "rge: passed devinfo not the same as saved devinfo");
1522 1522
1523 1523 /*
1524 1524 * Read chip ID & set up config space command register(s)
1525 1525 * Refuse to resume if the chip has changed its identity!
1526 1526 */
1527 1527 cidp = &rgep->chipid;
1528 1528 rge_chip_cfg_init(rgep, &chipid);
1529 1529 if (chipid.vendor != cidp->vendor)
1530 1530 return (DDI_FAILURE);
1531 1531 if (chipid.device != cidp->device)
1532 1532 return (DDI_FAILURE);
1533 1533 if (chipid.revision != cidp->revision)
1534 1534 return (DDI_FAILURE);
1535 1535
1536 1536 mutex_enter(rgep->genlock);
1537 1537
1538 1538 /*
1539 1539 * Only in one case, this conditional branch can be executed: the port
1540 1540 * hasn't been plumbed.
1541 1541 */
1542 1542 if (rgep->suspended == B_FALSE) {
1543 1543 mutex_exit(rgep->genlock);
1544 1544 return (DDI_SUCCESS);
1545 1545 }
1546 1546 rgep->rge_mac_state = RGE_MAC_STARTED;
1547 1547 /*
1548 1548 * All OK, reinitialise h/w & kick off NEMO scheduling
1549 1549 */
1550 1550 rge_restart(rgep);
1551 1551 rgep->suspended = B_FALSE;
1552 1552
1553 1553 mutex_exit(rgep->genlock);
1554 1554
1555 1555 return (DDI_SUCCESS);
1556 1556 }
1557 1557
1558 1558
1559 1559 /*
1560 1560 * attach(9E) -- Attach a device to the system
1561 1561 *
1562 1562 * Called once for each board successfully probed.
1563 1563 */
1564 1564 static int
1565 1565 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1566 1566 {
1567 1567 rge_t *rgep; /* Our private data */
1568 1568 mac_register_t *macp;
1569 1569 chip_id_t *cidp;
1570 1570 int intr_types;
1571 1571 caddr_t regs;
1572 1572 int instance;
1573 1573 int i;
1574 1574 int err;
1575 1575
1576 1576 /*
1577 1577 * we don't support high level interrupts in the driver
1578 1578 */
1579 1579 if (ddi_intr_hilevel(devinfo, 0) != 0) {
1580 1580 cmn_err(CE_WARN,
1581 1581 "rge_attach -- unsupported high level interrupt");
1582 1582 return (DDI_FAILURE);
1583 1583 }
1584 1584
1585 1585 instance = ddi_get_instance(devinfo);
1586 1586 RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1587 1587 (void *)devinfo, cmd, instance));
1588 1588 RGE_BRKPT(NULL, "rge_attach");
1589 1589
1590 1590 switch (cmd) {
1591 1591 default:
1592 1592 return (DDI_FAILURE);
1593 1593
1594 1594 case DDI_RESUME:
1595 1595 return (rge_resume(devinfo));
1596 1596
1597 1597 case DDI_ATTACH:
1598 1598 break;
1599 1599 }
1600 1600
1601 1601 rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP);
1602 1602 ddi_set_driver_private(devinfo, rgep);
1603 1603 rgep->devinfo = devinfo;
1604 1604
1605 1605 /*
1606 1606 * Initialize more fields in RGE private data
1607 1607 */
1608 1608 rgep->rge_mac_state = RGE_MAC_ATTACH;
1609 1609 rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1610 1610 DDI_PROP_DONTPASS, debug_propname, rge_debug);
1611 1611 rgep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1612 1612 DDI_PROP_DONTPASS, mtu_propname, ETHERMTU);
1613 1613 rgep->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1614 1614 DDI_PROP_DONTPASS, msi_propname, B_TRUE);
1615 1615 (void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
1616 1616 RGE_DRIVER_NAME, instance);
1617 1617
1618 1618 /*
1619 1619 * Map config space registers
1620 1620 * Read chip ID & set up config space command register(s)
1621 1621 *
1622 1622 * Note: this leaves the chip accessible by Memory Space
1623 1623 * accesses, but with interrupts and Bus Mastering off.
1624 1624 * This should ensure that nothing untoward will happen
1625 1625 * if it has been left active by the (net-)bootloader.
1626 1626 * We'll re-enable Bus Mastering once we've reset the chip,
1627 1627 * and allow interrupts only when everything else is set up.
1628 1628 */
1629 1629 err = pci_config_setup(devinfo, &rgep->cfg_handle);
1630 1630 if (err != DDI_SUCCESS) {
1631 1631 rge_problem(rgep, "pci_config_setup() failed");
1632 1632 goto attach_fail;
1633 1633 }
1634 1634 rgep->progress |= PROGRESS_CFG;
1635 1635 cidp = &rgep->chipid;
1636 1636 bzero(cidp, sizeof (*cidp));
1637 1637 rge_chip_cfg_init(rgep, cidp);
1638 1638
1639 1639 /*
1640 1640 * Map operating registers
1641 1641 */
1642 1642 err = ddi_regs_map_setup(devinfo, 2, ®s,
1643 1643 0, 0, &rge_reg_accattr, &rgep->io_handle);
1644 1644
1645 1645 /*
1646 1646 * MMIO map will fail if the assigned address is bigger than 4G
1647 1647 * then choose I/O map
1648 1648 */
1649 1649 if (err != DDI_SUCCESS) {
1650 1650 err = ddi_regs_map_setup(devinfo, 1, ®s,
1651 1651 0, 0, &rge_reg_accattr, &rgep->io_handle);
1652 1652 }
1653 1653 if (err != DDI_SUCCESS) {
1654 1654 rge_problem(rgep, "ddi_regs_map_setup() failed");
1655 1655 goto attach_fail;
1656 1656 }
1657 1657 rgep->io_regs = regs;
1658 1658 rgep->progress |= PROGRESS_REGS;
1659 1659
1660 1660 /*
1661 1661 * Characterise the device, so we know its requirements.
1662 1662 * Then allocate the appropriate TX and RX descriptors & buffers.
1663 1663 */
1664 1664 rge_chip_ident(rgep);
1665 1665 err = rge_alloc_bufs(rgep);
1666 1666 if (err != DDI_SUCCESS) {
1667 1667 rge_problem(rgep, "DMA buffer allocation failed");
1668 1668 goto attach_fail;
1669 1669 }
1670 1670
1671 1671 /*
1672 1672 * Register NDD-tweakable parameters
1673 1673 */
1674 1674 if (rge_nd_init(rgep)) {
1675 1675 rge_problem(rgep, "rge_nd_init() failed");
1676 1676 goto attach_fail;
1677 1677 }
1678 1678 rgep->progress |= PROGRESS_NDD;
1679 1679
1680 1680 /*
1681 1681 * Add the softint handlers:
1682 1682 *
1683 1683 * Both of these handlers are used to avoid restrictions on the
1684 1684 * context and/or mutexes required for some operations. In
1685 1685 * particular, the hardware interrupt handler and its subfunctions
1686 1686 * can detect a number of conditions that we don't want to handle
1687 1687 * in that context or with that set of mutexes held. So, these
1688 1688 * softints are triggered instead:
1689 1689 *
1690 1690 * the <resched> softint is triggered if if we have previously
1691 1691 * had to refuse to send a packet because of resource shortage
1692 1692 * (we've run out of transmit buffers), but the send completion
1693 1693 * interrupt handler has now detected that more buffers have
1694 1694 * become available.
1695 1695 *
1696 1696 * the <factotum> is triggered if the h/w interrupt handler
1697 1697 * sees the <link state changed> or <error> bits in the status
1698 1698 * block. It's also triggered periodically to poll the link
1699 1699 * state, just in case we aren't getting link status change
1700 1700 * interrupts ...
1701 1701 */
1702 1702 err = ddi_intr_add_softint(devinfo, &rgep->resched_hdl,
1703 1703 DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep);
1704 1704 if (err != DDI_SUCCESS) {
1705 1705 rge_problem(rgep, "ddi_intr_add_softint() failed");
1706 1706 goto attach_fail;
1707 1707 }
1708 1708 rgep->progress |= PROGRESS_RESCHED;
1709 1709 err = ddi_intr_add_softint(devinfo, &rgep->factotum_hdl,
1710 1710 DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep);
1711 1711 if (err != DDI_SUCCESS) {
1712 1712 rge_problem(rgep, "ddi_intr_add_softint() failed");
1713 1713 goto attach_fail;
1714 1714 }
1715 1715 rgep->progress |= PROGRESS_FACTOTUM;
1716 1716
1717 1717 /*
1718 1718 * Get supported interrupt types
1719 1719 */
1720 1720 if (ddi_intr_get_supported_types(devinfo, &intr_types)
1721 1721 != DDI_SUCCESS) {
1722 1722 rge_error(rgep, "ddi_intr_get_supported_types failed\n");
1723 1723 goto attach_fail;
1724 1724 }
1725 1725
1726 1726 /*
1727 1727 * Add the h/w interrupt handler and initialise mutexes
1728 1728 * RTL8101E is observed to have MSI invalidation issue after S/R.
1729 1729 * So the FIXED interrupt is used instead.
1730 1730 */
1731 1731 if (rgep->chipid.mac_ver == MAC_VER_8101E)
1732 1732 rgep->msi_enable = B_FALSE;
1733 1733 if ((intr_types & DDI_INTR_TYPE_MSI) && rgep->msi_enable) {
1734 1734 if (rge_add_intrs(rgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
1735 1735 rge_error(rgep, "MSI registration failed, "
1736 1736 "trying FIXED interrupt type\n");
1737 1737 } else {
1738 1738 rge_log(rgep, "Using MSI interrupt type\n");
1739 1739 rgep->intr_type = DDI_INTR_TYPE_MSI;
1740 1740 rgep->progress |= PROGRESS_INTR;
1741 1741 }
1742 1742 }
1743 1743 if (!(rgep->progress & PROGRESS_INTR) &&
1744 1744 (intr_types & DDI_INTR_TYPE_FIXED)) {
1745 1745 if (rge_add_intrs(rgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
1746 1746 rge_error(rgep, "FIXED interrupt "
1747 1747 "registration failed\n");
1748 1748 goto attach_fail;
1749 1749 }
1750 1750 rge_log(rgep, "Using FIXED interrupt type\n");
1751 1751 rgep->intr_type = DDI_INTR_TYPE_FIXED;
1752 1752 rgep->progress |= PROGRESS_INTR;
1753 1753 }
1754 1754 if (!(rgep->progress & PROGRESS_INTR)) {
1755 1755 rge_error(rgep, "No interrupts registered\n");
1756 1756 goto attach_fail;
1757 1757 }
1758 1758 mutex_init(rgep->genlock, NULL, MUTEX_DRIVER,
1759 1759 DDI_INTR_PRI(rgep->intr_pri));
1760 1760 rw_init(rgep->errlock, NULL, RW_DRIVER,
1761 1761 DDI_INTR_PRI(rgep->intr_pri));
1762 1762 mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER,
1763 1763 DDI_INTR_PRI(rgep->intr_pri));
1764 1764 mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER,
1765 1765 DDI_INTR_PRI(rgep->intr_pri));
1766 1766 mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER,
1767 1767 DDI_INTR_PRI(rgep->intr_pri));
1768 1768 mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER,
1769 1769 DDI_INTR_PRI(rgep->intr_pri));
1770 1770
1771 1771 /*
1772 1772 * Initialize rings
1773 1773 */
1774 1774 err = rge_init_rings(rgep);
1775 1775 if (err != DDI_SUCCESS) {
1776 1776 rge_problem(rgep, "rge_init_rings() failed");
1777 1777 goto attach_fail;
1778 1778 }
1779 1779 rgep->progress |= PROGRESS_INIT;
1780 1780
1781 1781 /*
1782 1782 * Now that mutex locks are initialized, enable interrupts.
1783 1783 */
1784 1784 if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1785 1785 /* Call ddi_intr_block_enable() for MSI interrupts */
1786 1786 (void) ddi_intr_block_enable(rgep->htable, rgep->intr_cnt);
1787 1787 } else {
1788 1788 /* Call ddi_intr_enable for MSI or FIXED interrupts */
1789 1789 for (i = 0; i < rgep->intr_cnt; i++) {
1790 1790 (void) ddi_intr_enable(rgep->htable[i]);
1791 1791 }
1792 1792 }
1793 1793
1794 1794 /*
1795 1795 * Initialise link state variables
1796 1796 * Stop, reset & reinitialise the chip.
1797 1797 * Initialise the (internal) PHY.
1798 1798 */
1799 1799 rgep->param_link_up = LINK_STATE_UNKNOWN;
1800 1800
1801 1801 /*
1802 1802 * Reset chip & rings to initial state; also reset address
1803 1803 * filtering, promiscuity, loopback mode.
1804 1804 */
1805 1805 mutex_enter(rgep->genlock);
1806 1806 (void) rge_chip_reset(rgep);
1807 1807 rge_chip_sync(rgep, RGE_GET_MAC);
1808 1808 bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash));
1809 1809 bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs));
1810 1810 rgep->promisc = B_FALSE;
1811 1811 rgep->param_loop_mode = RGE_LOOP_NONE;
1812 1812 mutex_exit(rgep->genlock);
1813 1813 rge_phy_init(rgep);
1814 1814 rgep->progress |= PROGRESS_PHY;
1815 1815
1816 1816 /*
1817 1817 * Create & initialise named kstats
1818 1818 */
1819 1819 rge_init_kstats(rgep, instance);
1820 1820 rgep->progress |= PROGRESS_KSTATS;
1821 1821
1822 1822 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1823 1823 goto attach_fail;
1824 1824 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1825 1825 macp->m_driver = rgep;
1826 1826 macp->m_dip = devinfo;
1827 1827 macp->m_src_addr = rgep->netaddr;
1828 1828 macp->m_callbacks = &rge_m_callbacks;
1829 1829 macp->m_min_sdu = 0;
1830 1830 macp->m_max_sdu = rgep->default_mtu;
1831 1831 macp->m_margin = VLAN_TAGSZ;
1832 1832
1833 1833 /*
1834 1834 * Finally, we're ready to register ourselves with the MAC layer
1835 1835 * interface; if this succeeds, we're all ready to start()
1836 1836 */
1837 1837 err = mac_register(macp, &rgep->mh);
1838 1838 mac_free(macp);
1839 1839 if (err != 0)
1840 1840 goto attach_fail;
1841 1841
1842 1842 /*
1843 1843 * Register a periodical handler.
1844 1844 * reg_chip_cyclic() is invoked in kernel context.
1845 1845 */
1846 1846 rgep->periodic_id = ddi_periodic_add(rge_chip_cyclic, rgep,
1847 1847 RGE_CYCLIC_PERIOD, DDI_IPL_0);
1848 1848
1849 1849 rgep->progress |= PROGRESS_READY;
1850 1850 return (DDI_SUCCESS);
1851 1851
1852 1852 attach_fail:
1853 1853 rge_unattach(rgep);
1854 1854 return (DDI_FAILURE);
1855 1855 }
1856 1856
1857 1857 /*
1858 1858 * rge_suspend() -- suspend transmit/receive for powerdown
1859 1859 */
1860 1860 static int
1861 1861 rge_suspend(rge_t *rgep)
1862 1862 {
1863 1863 /*
1864 1864 * Stop processing and idle (powerdown) the PHY ...
1865 1865 */
1866 1866 mutex_enter(rgep->genlock);
1867 1867 rw_enter(rgep->errlock, RW_WRITER);
1868 1868
1869 1869 if (rgep->rge_mac_state != RGE_MAC_STARTED) {
1870 1870 rw_exit(rgep->errlock);
1871 1871 mutex_exit(rgep->genlock);
1872 1872 return (DDI_SUCCESS);
1873 1873 }
1874 1874
1875 1875 rgep->suspended = B_TRUE;
1876 1876 rge_stop(rgep);
1877 1877 rgep->rge_mac_state = RGE_MAC_STOPPED;
1878 1878
1879 1879 rw_exit(rgep->errlock);
1880 1880 mutex_exit(rgep->genlock);
1881 1881
1882 1882 return (DDI_SUCCESS);
1883 1883 }
1884 1884
1885 1885 /*
1886 1886 * quiesce(9E) entry point.
1887 1887 *
1888 1888 * This function is called when the system is single-threaded at high
1889 1889 * PIL with preemption disabled. Therefore, this function must not be
1890 1890 * blocked.
1891 1891 *
1892 1892 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1893 1893 * DDI_FAILURE indicates an error condition and should almost never happen.
1894 1894 */
1895 1895 static int
1896 1896 rge_quiesce(dev_info_t *devinfo)
1897 1897 {
1898 1898 rge_t *rgep = ddi_get_driver_private(devinfo);
1899 1899
1900 1900 if (rgep == NULL)
1901 1901 return (DDI_FAILURE);
1902 1902
1903 1903 /*
1904 1904 * Turn off debugging
1905 1905 */
1906 1906 rge_debug = 0;
1907 1907 rgep->debug = 0;
1908 1908
1909 1909 /* Stop the chip */
1910 1910 rge_chip_stop(rgep, B_FALSE);
1911 1911
1912 1912 return (DDI_SUCCESS);
1913 1913 }
1914 1914
1915 1915 /*
1916 1916 * detach(9E) -- Detach a device from the system
1917 1917 */
1918 1918 static int
1919 1919 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1920 1920 {
1921 1921 rge_t *rgep;
1922 1922
1923 1923 RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd));
1924 1924
1925 1925 rgep = ddi_get_driver_private(devinfo);
1926 1926
1927 1927 switch (cmd) {
1928 1928 default:
1929 1929 return (DDI_FAILURE);
1930 1930
1931 1931 case DDI_SUSPEND:
1932 1932 return (rge_suspend(rgep));
1933 1933
1934 1934 case DDI_DETACH:
1935 1935 break;
1936 1936 }
1937 1937
1938 1938 /*
1939 1939 * If there is any posted buffer, the driver should reject to be
1940 1940 * detached. Need notice upper layer to release them.
1941 1941 */
1942 1942 if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) &&
1943 1943 rgep->rx_free != RGE_BUF_SLOTS)
1944 1944 return (DDI_FAILURE);
1945 1945
1946 1946 /*
1947 1947 * Unregister from the MAC layer subsystem. This can fail, in
1948 1948 * particular if there are DLPI style-2 streams still open -
1949 1949 * in which case we just return failure without shutting
1950 1950 * down chip operations.
1951 1951 */
1952 1952 if (mac_unregister(rgep->mh) != 0)
1953 1953 return (DDI_FAILURE);
1954 1954
1955 1955 /*
1956 1956 * All activity stopped, so we can clean up & exit
1957 1957 */
1958 1958 rge_unattach(rgep);
1959 1959 return (DDI_SUCCESS);
1960 1960 }
1961 1961
1962 1962
1963 1963 /*
1964 1964 * ========== Module Loading Data & Entry Points ==========
1965 1965 */
1966 1966
1967 1967 #undef RGE_DBG
1968 1968 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */
↓ open down ↓ |
1968 lines elided |
↑ open up ↑ |
1969 1969 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach,
1970 1970 nodev, NULL, D_MP, NULL, rge_quiesce);
1971 1971
1972 1972 static struct modldrv rge_modldrv = {
1973 1973 &mod_driverops, /* Type of module. This one is a driver */
1974 1974 rge_ident, /* short description */
1975 1975 &rge_dev_ops /* driver specific ops */
1976 1976 };
1977 1977
1978 1978 static struct modlinkage modlinkage = {
1979 - MODREV_1, (void *)&rge_modldrv, NULL
1979 + MODREV_1, { (void *)&rge_modldrv, NULL }
1980 1980 };
1981 1981
1982 1982
1983 1983 int
1984 1984 _info(struct modinfo *modinfop)
1985 1985 {
1986 1986 return (mod_info(&modlinkage, modinfop));
1987 1987 }
1988 1988
1989 1989 int
1990 1990 _init(void)
1991 1991 {
1992 1992 int status;
1993 1993
1994 1994 mac_init_ops(&rge_dev_ops, "rge");
1995 1995 status = mod_install(&modlinkage);
1996 1996 if (status == DDI_SUCCESS)
1997 1997 mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1998 1998 else
1999 1999 mac_fini_ops(&rge_dev_ops);
2000 2000
2001 2001 return (status);
2002 2002 }
2003 2003
2004 2004 int
2005 2005 _fini(void)
2006 2006 {
2007 2007 int status;
2008 2008
2009 2009 status = mod_remove(&modlinkage);
2010 2010 if (status == DDI_SUCCESS) {
2011 2011 mac_fini_ops(&rge_dev_ops);
2012 2012 mutex_destroy(rge_log_mutex);
2013 2013 }
2014 2014 return (status);
2015 2015 }
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX