Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/vr/vr.c
+++ new/usr/src/uts/common/io/vr/vr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <sys/types.h>
28 28 #include <sys/stream.h>
29 29 #include <sys/strsun.h>
30 30 #include <sys/stat.h>
31 31 #include <sys/pci.h>
32 32 #include <sys/modctl.h>
33 33 #include <sys/kstat.h>
34 34 #include <sys/ethernet.h>
35 35 #include <sys/devops.h>
36 36 #include <sys/debug.h>
37 37 #include <sys/conf.h>
38 38 #include <sys/mac.h>
39 39 #include <sys/mac_provider.h>
40 40 #include <sys/mac_ether.h>
41 41 #include <sys/sysmacros.h>
42 42 #include <sys/dditypes.h>
43 43 #include <sys/ddi.h>
44 44 #include <sys/sunddi.h>
45 45 #include <sys/miiregs.h>
46 46 #include <sys/byteorder.h>
47 47 #include <sys/note.h>
48 48 #include <sys/vlan.h>
49 49
50 50 #include "vr.h"
51 51 #include "vr_impl.h"
52 52
53 53 /*
54 54 * VR in a nutshell
55 55 * The card uses two rings of data structures to communicate with the host.
56 56 * These are referred to as "descriptor rings" and there is one for transmit
57 57 * (TX) and one for receive (RX).
58 58 *
59 59 * The driver uses a "DMA buffer" data type for mapping to those descriptor
60 60 * rings. This is a structure with handles and a DMA'able buffer attached to it.
61 61 *
62 62 * Receive
63 63 * The receive ring is filled with DMA buffers. Received packets are copied into
64 64 * a newly allocated mblk's and passed upstream.
65 65 *
66 66 * Transmit
67 67 * Each transmit descriptor has a DMA buffer attached to it. The data of TX
68 68 * packets is copied into the DMA buffer which is then enqueued for
69 69 * transmission.
70 70 *
71 71 * Reclaim of transmitted packets is done as a result of a transmit completion
72 72 * interrupt which is generated 3 times per ring at minimum.
73 73 */
74 74
75 75 #if defined(DEBUG)
76 76 uint32_t vrdebug = 1;
77 77 #define VR_DEBUG(args) do { \
78 78 if (vrdebug > 0) \
79 79 (*vr_debug()) args; \
80 80 _NOTE(CONSTANTCONDITION) \
81 81 } while (0)
82 82 static void vr_prt(const char *fmt, ...);
83 83 void (*vr_debug())(const char *fmt, ...);
84 84 #else
85 85 #define VR_DEBUG(args) do ; _NOTE(CONSTANTCONDITION) while (0)
86 86 #endif
87 87
88 88 static char vr_ident[] = "VIA Rhine Ethernet";
89 89
90 90 /*
91 91 * Attributes for accessing registers and memory descriptors for this device.
92 92 */
93 93 static ddi_device_acc_attr_t vr_dev_dma_accattr = {
94 94 DDI_DEVICE_ATTR_V0,
95 95 DDI_STRUCTURE_LE_ACC,
96 96 DDI_STRICTORDER_ACC
97 97 };
98 98
99 99 /*
100 100 * Attributes for accessing data.
101 101 */
102 102 static ddi_device_acc_attr_t vr_data_dma_accattr = {
103 103 DDI_DEVICE_ATTR_V0,
104 104 DDI_NEVERSWAP_ACC,
105 105 DDI_STRICTORDER_ACC
106 106 };
107 107
108 108 /*
109 109 * DMA attributes for descriptors for communication with the device
110 110 * This driver assumes that all descriptors of one ring fit in one consequitive
111 111 * memory area of max 4K (256 descriptors) that does not cross a page boundary.
112 112 * Therefore, we request 4K alignement.
113 113 */
114 114 static ddi_dma_attr_t vr_dev_dma_attr = {
115 115 DMA_ATTR_V0, /* version number */
116 116 0, /* low DMA address range */
117 117 0xFFFFFFFF, /* high DMA address range */
118 118 0x7FFFFFFF, /* DMA counter register */
119 119 0x1000, /* DMA address alignment */
120 120 0x7F, /* DMA burstsizes */
121 121 1, /* min effective DMA size */
122 122 0xFFFFFFFF, /* max DMA xfer size */
123 123 0xFFFFFFFF, /* segment boundary */
124 124 1, /* s/g list length */
125 125 1, /* granularity of device */
126 126 0 /* DMA transfer flags */
127 127 };
128 128
129 129 /*
130 130 * DMA attributes for the data moved to/from the device
131 131 * Note that the alignement is set to 2K so hat a 1500 byte packet never
132 132 * crosses a page boundary and thus that a DMA transfer is not split up in
133 133 * multiple cookies with a 4K/8K pagesize
134 134 */
135 135 static ddi_dma_attr_t vr_data_dma_attr = {
136 136 DMA_ATTR_V0, /* version number */
137 137 0, /* low DMA address range */
138 138 0xFFFFFFFF, /* high DMA address range */
139 139 0x7FFFFFFF, /* DMA counter register */
140 140 0x800, /* DMA address alignment */
141 141 0xfff, /* DMA burstsizes */
142 142 1, /* min effective DMA size */
143 143 0xFFFFFFFF, /* max DMA xfer size */
144 144 0xFFFFFFFF, /* segment boundary */
145 145 1, /* s/g list length */
146 146 1, /* granularity of device */
147 147 0 /* DMA transfer flags */
148 148 };
149 149
150 150 static mac_callbacks_t vr_mac_callbacks = {
151 151 MC_SETPROP|MC_GETPROP|MC_PROPINFO, /* Which callbacks are set */
152 152 vr_mac_getstat, /* Get the value of a statistic */
153 153 vr_mac_start, /* Start the device */
154 154 vr_mac_stop, /* Stop the device */
155 155 vr_mac_set_promisc, /* Enable or disable promiscuous mode */
156 156 vr_mac_set_multicast, /* Enable or disable a multicast addr */
157 157 vr_mac_set_ether_addr, /* Set the unicast MAC address */
158 158 vr_mac_tx_enqueue_list, /* Transmit a packet */
159 159 NULL,
160 160 NULL, /* Process an unknown ioctl */
161 161 NULL, /* Get capability information */
162 162 NULL, /* Open the device */
163 163 NULL, /* Close the device */
164 164 vr_mac_setprop, /* Set properties of the device */
165 165 vr_mac_getprop, /* Get properties of the device */
166 166 vr_mac_propinfo /* Get properties attributes */
167 167 };
168 168
169 169 /*
170 170 * Table with bugs and features for each incarnation of the card.
171 171 */
172 172 static const chip_info_t vr_chip_info [] = {
173 173 {
174 174 0x0, 0x0,
175 175 "VIA Rhine Fast Ethernet",
176 176 (VR_BUG_NO_MEMIO),
177 177 (VR_FEATURE_NONE)
178 178 },
179 179 {
180 180 0x04, 0x21,
181 181 "VIA VT86C100A Fast Ethernet",
182 182 (VR_BUG_NEEDMODE2PCEROPT | VR_BUG_NO_TXQUEUEING |
183 183 VR_BUG_NEEDMODE10T | VR_BUG_TXALIGN | VR_BUG_NO_MEMIO |
184 184 VR_BUG_MIIPOLLSTOP),
185 185 (VR_FEATURE_NONE)
186 186 },
187 187 {
188 188 0x40, 0x41,
189 189 "VIA VT6102-A Rhine II Fast Ethernet",
190 190 (VR_BUG_NEEDMODE2PCEROPT),
191 191 (VR_FEATURE_RX_PAUSE_CAP)
192 192 },
193 193 {
194 194 0x42, 0x7f,
195 195 "VIA VT6102-C Rhine II Fast Ethernet",
196 196 (VR_BUG_NEEDMODE2PCEROPT),
197 197 (VR_FEATURE_RX_PAUSE_CAP)
198 198 },
199 199 {
200 200 0x80, 0x82,
201 201 "VIA VT6105-A Rhine III Fast Ethernet",
202 202 (VR_BUG_NONE),
203 203 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
204 204 },
205 205 {
206 206 0x83, 0x89,
207 207 "VIA VT6105-B Rhine III Fast Ethernet",
208 208 (VR_BUG_NONE),
209 209 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
210 210 },
211 211 {
212 212 0x8a, 0x8b,
213 213 "VIA VT6105-LOM Rhine III Fast Ethernet",
214 214 (VR_BUG_NONE),
215 215 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
216 216 },
217 217 {
218 218 0x8c, 0x8c,
219 219 "VIA VT6107-A0 Rhine III Fast Ethernet",
220 220 (VR_BUG_NONE),
221 221 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
222 222 },
223 223 {
224 224 0x8d, 0x8f,
225 225 "VIA VT6107-A1 Rhine III Fast Ethernet",
226 226 (VR_BUG_NONE),
227 227 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
228 228 VR_FEATURE_MRDLNMULTIPLE)
229 229 },
230 230 {
231 231 0x90, 0x93,
232 232 "VIA VT6105M-A0 Rhine III Fast Ethernet Management Adapter",
233 233 (VR_BUG_NONE),
234 234 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
235 235 VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
236 236 VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
237 237 VR_FEATURE_MIBCOUNTER)
238 238 },
239 239 {
240 240 0x94, 0xff,
241 241 "VIA VT6105M-B1 Rhine III Fast Ethernet Management Adapter",
242 242 (VR_BUG_NONE),
243 243 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
244 244 VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
245 245 VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
246 246 VR_FEATURE_MIBCOUNTER)
247 247 }
248 248 };
249 249
250 250 /*
251 251 * Function prototypes
252 252 */
253 253 static vr_result_t vr_add_intr(vr_t *vrp);
254 254 static void vr_remove_intr(vr_t *vrp);
255 255 static int32_t vr_cam_index(vr_t *vrp, const uint8_t *maddr);
256 256 static uint32_t ether_crc_be(const uint8_t *address);
257 257 static void vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp);
258 258 static void vr_log(vr_t *vrp, int level, const char *fmt, ...);
259 259 static int vr_resume(dev_info_t *devinfo);
260 260 static int vr_suspend(dev_info_t *devinfo);
261 261 static vr_result_t vr_bus_config(vr_t *vrp);
262 262 static void vr_bus_unconfig(vr_t *vrp);
263 263 static void vr_reset(vr_t *vrp);
264 264 static int vr_start(vr_t *vrp);
265 265 static int vr_stop(vr_t *vrp);
266 266 static vr_result_t vr_rings_init(vr_t *vrp);
267 267 static void vr_rings_fini(vr_t *vrp);
268 268 static vr_result_t vr_alloc_ring(vr_t *vrp, vr_ring_t *r, size_t n);
269 269 static void vr_free_ring(vr_ring_t *r, size_t n);
270 270 static vr_result_t vr_rxring_init(vr_t *vrp);
271 271 static void vr_rxring_fini(vr_t *vrp);
272 272 static vr_result_t vr_txring_init(vr_t *vrp);
273 273 static void vr_txring_fini(vr_t *vrp);
274 274 static vr_result_t vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap,
275 275 uint_t flags);
276 276 static void vr_free_dmabuf(vr_data_dma_t *dmap);
277 277 static void vr_param_init(vr_t *vrp);
278 278 static mblk_t *vr_receive(vr_t *vrp);
279 279 static void vr_tx_reclaim(vr_t *vrp);
280 280 static void vr_periodic(void *p);
281 281 static void vr_error(vr_t *vrp);
282 282 static void vr_phy_read(vr_t *vrp, int offset, uint16_t *value);
283 283 static void vr_phy_write(vr_t *vrp, int offset, uint16_t value);
284 284 static void vr_phy_autopoll_disable(vr_t *vrp);
285 285 static void vr_phy_autopoll_enable(vr_t *vrp);
286 286 static void vr_link_init(vr_t *vrp);
287 287 static void vr_link_state(vr_t *vrp);
288 288 static void vr_kstats_init(vr_t *vrp);
289 289 static int vr_update_kstats(kstat_t *ksp, int access);
290 290 static void vr_remove_kstats(vr_t *vrp);
291 291
292 292 static int
293 293 vr_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
294 294 {
295 295 vr_t *vrp;
296 296 mac_register_t *macreg;
297 297
298 298 if (cmd == DDI_RESUME)
299 299 return (vr_resume(devinfo));
300 300 else if (cmd != DDI_ATTACH)
301 301 return (DDI_FAILURE);
302 302
303 303 /*
304 304 * Attach.
305 305 */
306 306 vrp = kmem_zalloc(sizeof (vr_t), KM_SLEEP);
307 307 ddi_set_driver_private(devinfo, vrp);
308 308 vrp->devinfo = devinfo;
309 309
310 310 /*
311 311 * Store the name+instance of the module.
312 312 */
313 313 (void) snprintf(vrp->ifname, sizeof (vrp->ifname), "%s%d",
314 314 MODULENAME, ddi_get_instance(devinfo));
315 315
316 316 /*
317 317 * Bus initialization.
318 318 */
319 319 if (vr_bus_config(vrp) != VR_SUCCESS) {
320 320 vr_log(vrp, CE_WARN, "vr_bus_config failed");
321 321 goto fail0;
322 322 }
323 323
324 324 /*
325 325 * Initialize default parameters.
326 326 */
327 327 vr_param_init(vrp);
328 328
329 329 /*
330 330 * Setup the descriptor rings.
331 331 */
332 332 if (vr_rings_init(vrp) != VR_SUCCESS) {
333 333 vr_log(vrp, CE_WARN, "vr_rings_init failed");
334 334 goto fail1;
335 335 }
336 336
337 337 /*
338 338 * Initialize kstats.
339 339 */
340 340 vr_kstats_init(vrp);
341 341
342 342 /*
343 343 * Add interrupt to the OS.
344 344 */
345 345 if (vr_add_intr(vrp) != VR_SUCCESS) {
346 346 vr_log(vrp, CE_WARN, "vr_add_intr failed in attach");
347 347 goto fail3;
348 348 }
349 349
350 350 /*
351 351 * Add mutexes.
352 352 */
353 353 mutex_init(&vrp->intrlock, NULL, MUTEX_DRIVER,
354 354 DDI_INTR_PRI(vrp->intr_pri));
355 355 mutex_init(&vrp->oplock, NULL, MUTEX_DRIVER, NULL);
356 356 mutex_init(&vrp->tx.lock, NULL, MUTEX_DRIVER, NULL);
357 357
358 358 /*
359 359 * Enable interrupt.
360 360 */
361 361 if (ddi_intr_enable(vrp->intr_hdl) != DDI_SUCCESS) {
362 362 vr_log(vrp, CE_NOTE, "ddi_intr_enable failed");
363 363 goto fail5;
364 364 }
365 365
366 366 /*
367 367 * Register with parent, mac.
368 368 */
369 369 if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
370 370 vr_log(vrp, CE_WARN, "mac_alloc failed in attach");
371 371 goto fail6;
372 372 }
373 373
374 374 macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
375 375 macreg->m_driver = vrp;
376 376 macreg->m_dip = devinfo;
377 377 macreg->m_src_addr = vrp->vendor_ether_addr;
378 378 macreg->m_callbacks = &vr_mac_callbacks;
379 379 macreg->m_min_sdu = 0;
380 380 macreg->m_max_sdu = ETHERMTU;
381 381 macreg->m_margin = VLAN_TAGSZ;
382 382
383 383 if (mac_register(macreg, &vrp->machdl) != 0) {
384 384 vr_log(vrp, CE_WARN, "mac_register failed in attach");
385 385 goto fail7;
386 386 }
387 387 mac_free(macreg);
388 388 return (DDI_SUCCESS);
389 389
390 390 fail7:
391 391 mac_free(macreg);
392 392 fail6:
393 393 (void) ddi_intr_disable(vrp->intr_hdl);
394 394 fail5:
395 395 mutex_destroy(&vrp->tx.lock);
396 396 mutex_destroy(&vrp->oplock);
397 397 mutex_destroy(&vrp->intrlock);
398 398 vr_remove_intr(vrp);
399 399 fail3:
400 400 vr_remove_kstats(vrp);
401 401 fail2:
402 402 vr_rings_fini(vrp);
403 403 fail1:
404 404 vr_bus_unconfig(vrp);
405 405 fail0:
406 406 kmem_free(vrp, sizeof (vr_t));
407 407 return (DDI_FAILURE);
408 408 }
409 409
410 410 static int
411 411 vr_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
412 412 {
413 413 vr_t *vrp;
414 414
415 415 vrp = ddi_get_driver_private(devinfo);
416 416
417 417 if (cmd == DDI_SUSPEND)
418 418 return (vr_suspend(devinfo));
419 419 else if (cmd != DDI_DETACH)
420 420 return (DDI_FAILURE);
421 421
422 422 if (vrp->chip.state == CHIPSTATE_RUNNING)
423 423 return (DDI_FAILURE);
424 424
425 425 /*
426 426 * Try to un-register from the MAC layer.
427 427 */
428 428 if (mac_unregister(vrp->machdl) != 0)
429 429 return (DDI_FAILURE);
430 430
431 431 (void) ddi_intr_disable(vrp->intr_hdl);
432 432 vr_remove_intr(vrp);
433 433 mutex_destroy(&vrp->tx.lock);
434 434 mutex_destroy(&vrp->oplock);
435 435 mutex_destroy(&vrp->intrlock);
436 436 vr_remove_kstats(vrp);
437 437 vr_rings_fini(vrp);
438 438 vr_bus_unconfig(vrp);
439 439 kmem_free(vrp, sizeof (vr_t));
440 440 return (DDI_SUCCESS);
441 441 }
442 442
443 443 /*
444 444 * quiesce the card for fast reboot.
445 445 */
446 446 int
447 447 vr_quiesce(dev_info_t *dev_info)
448 448 {
449 449 vr_t *vrp;
450 450
451 451 vrp = (vr_t *)ddi_get_driver_private(dev_info);
452 452
453 453 /*
454 454 * Stop interrupts.
455 455 */
456 456 VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
457 457 VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
458 458
459 459 /*
460 460 * Stop DMA.
461 461 */
462 462 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
463 463 return (DDI_SUCCESS);
464 464 }
465 465
466 466 /*
467 467 * Add an interrupt for our device to the OS.
468 468 */
469 469 static vr_result_t
470 470 vr_add_intr(vr_t *vrp)
471 471 {
472 472 int nintrs;
473 473 int rc;
474 474
475 475 rc = ddi_intr_alloc(vrp->devinfo, &vrp->intr_hdl,
476 476 DDI_INTR_TYPE_FIXED, /* type */
477 477 0, /* number */
478 478 1, /* count */
479 479 &nintrs, /* actualp */
480 480 DDI_INTR_ALLOC_STRICT);
481 481
482 482 if (rc != DDI_SUCCESS) {
483 483 vr_log(vrp, CE_NOTE, "ddi_intr_alloc failed: %d", rc);
484 484 return (VR_FAILURE);
485 485 }
486 486
487 487 rc = ddi_intr_add_handler(vrp->intr_hdl, vr_intr, vrp, NULL);
488 488 if (rc != DDI_SUCCESS) {
489 489 vr_log(vrp, CE_NOTE, "ddi_intr_add_handler failed");
490 490 if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
491 491 vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
492 492 return (VR_FAILURE);
493 493 }
494 494
495 495 rc = ddi_intr_get_pri(vrp->intr_hdl, &vrp->intr_pri);
496 496 if (rc != DDI_SUCCESS) {
497 497 vr_log(vrp, CE_NOTE, "ddi_intr_get_pri failed");
498 498 if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
499 499 vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
500 500
501 501 if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
502 502 vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
503 503
504 504 return (VR_FAILURE);
505 505 }
506 506 return (VR_SUCCESS);
507 507 }
508 508
509 509 /*
510 510 * Remove our interrupt from the OS.
511 511 */
512 512 static void
513 513 vr_remove_intr(vr_t *vrp)
514 514 {
515 515 if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
516 516 vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
517 517
518 518 if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
519 519 vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
520 520 }
521 521
522 522 /*
523 523 * Resume operation after suspend.
524 524 */
525 525 static int
526 526 vr_resume(dev_info_t *devinfo)
527 527 {
528 528 vr_t *vrp;
529 529
530 530 vrp = (vr_t *)ddi_get_driver_private(devinfo);
531 531 mutex_enter(&vrp->oplock);
532 532 if (vrp->chip.state == CHIPSTATE_SUSPENDED_RUNNING)
533 533 (void) vr_start(vrp);
534 534 mutex_exit(&vrp->oplock);
535 535 return (DDI_SUCCESS);
536 536 }
537 537
538 538 /*
539 539 * Suspend operation.
540 540 */
541 541 static int
542 542 vr_suspend(dev_info_t *devinfo)
543 543 {
544 544 vr_t *vrp;
545 545
546 546 vrp = (vr_t *)ddi_get_driver_private(devinfo);
547 547 mutex_enter(&vrp->oplock);
548 548 if (vrp->chip.state == CHIPSTATE_RUNNING) {
549 549 (void) vr_stop(vrp);
550 550 vrp->chip.state = CHIPSTATE_SUSPENDED_RUNNING;
551 551 }
552 552 mutex_exit(&vrp->oplock);
553 553 return (DDI_SUCCESS);
554 554 }
555 555
556 556 /*
557 557 * Initial bus- and device configuration during attach(9E).
558 558 */
559 559 static vr_result_t
560 560 vr_bus_config(vr_t *vrp)
561 561 {
562 562 uint32_t addr;
563 563 int n, nsets, rc;
564 564 uint_t elem;
565 565 pci_regspec_t *regs;
566 566
567 567 /*
568 568 * Get the reg property which describes the various access methods.
569 569 */
570 570 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vrp->devinfo,
571 571 0, "reg", (int **)®s, &elem) != DDI_PROP_SUCCESS) {
572 572 vr_log(vrp, CE_WARN, "Can't get reg property");
573 573 return (VR_FAILURE);
574 574 }
575 575 nsets = (elem * sizeof (uint_t)) / sizeof (pci_regspec_t);
576 576
577 577 /*
578 578 * Setup access to all available sets.
579 579 */
580 580 vrp->nsets = nsets;
581 581 vrp->regset = kmem_zalloc(nsets * sizeof (vr_acc_t), KM_SLEEP);
582 582 for (n = 0; n < nsets; n++) {
583 583 rc = ddi_regs_map_setup(vrp->devinfo, n,
584 584 &vrp->regset[n].addr, 0, 0,
585 585 &vr_dev_dma_accattr,
586 586 &vrp->regset[n].hdl);
587 587 if (rc != DDI_SUCCESS) {
588 588 vr_log(vrp, CE_NOTE,
589 589 "Setup of register set %d failed", n);
590 590 while (--n >= 0)
591 591 ddi_regs_map_free(&vrp->regset[n].hdl);
592 592 kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
593 593 ddi_prop_free(regs);
594 594 return (VR_FAILURE);
595 595 }
596 596 bcopy(®s[n], &vrp->regset[n].reg, sizeof (pci_regspec_t));
597 597 }
598 598 ddi_prop_free(regs);
599 599
600 600 /*
601 601 * Assign type-named pointers to the register sets.
602 602 */
603 603 for (n = 0; n < nsets; n++) {
604 604 addr = vrp->regset[n].reg.pci_phys_hi & PCI_REG_ADDR_M;
605 605 if (addr == PCI_ADDR_CONFIG && vrp->acc_cfg == NULL)
606 606 vrp->acc_cfg = &vrp->regset[n];
607 607 else if (addr == PCI_ADDR_IO && vrp->acc_io == NULL)
608 608 vrp->acc_io = &vrp->regset[n];
609 609 else if (addr == PCI_ADDR_MEM32 && vrp->acc_mem == NULL)
610 610 vrp->acc_mem = &vrp->regset[n];
611 611 }
612 612
613 613 /*
614 614 * Assure there is one of each type.
615 615 */
616 616 if (vrp->acc_cfg == NULL ||
617 617 vrp->acc_io == NULL ||
618 618 vrp->acc_mem == NULL) {
619 619 for (n = 0; n < nsets; n++)
620 620 ddi_regs_map_free(&vrp->regset[n].hdl);
621 621 kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
622 622 vr_log(vrp, CE_WARN,
623 623 "Config-, I/O- and memory sets not available");
624 624 return (VR_FAILURE);
625 625 }
626 626
627 627 /*
628 628 * Store vendor/device/revision.
629 629 */
630 630 vrp->chip.vendor = VR_GET16(vrp->acc_cfg, PCI_CONF_VENID);
631 631 vrp->chip.device = VR_GET16(vrp->acc_cfg, PCI_CONF_DEVID);
632 632 vrp->chip.revision = VR_GET16(vrp->acc_cfg, PCI_CONF_REVID);
633 633
634 634 /*
635 635 * Copy the matching chip_info_t structure.
636 636 */
637 637 elem = sizeof (vr_chip_info) / sizeof (chip_info_t);
638 638 for (n = 0; n < elem; n++) {
639 639 if (vrp->chip.revision >= vr_chip_info[n].revmin &&
640 640 vrp->chip.revision <= vr_chip_info[n].revmax) {
641 641 bcopy((void*)&vr_chip_info[n],
642 642 (void*)&vrp->chip.info,
643 643 sizeof (chip_info_t));
644 644 break;
645 645 }
646 646 }
647 647
648 648 /*
649 649 * If we didn't find a chip_info_t for this card, copy the first
650 650 * entry of the info structures. This is a generic Rhine whith no
651 651 * bugs and no features.
652 652 */
653 653 if (vrp->chip.info.name == NULL) {
654 654 bcopy((void*)&vr_chip_info[0],
655 655 (void*) &vrp->chip.info,
656 656 sizeof (chip_info_t));
657 657 }
658 658
659 659 /*
660 660 * Tell what is found.
661 661 */
662 662 vr_log(vrp, CE_NOTE, "pci%d,%d,%d: %s, revision 0x%0x",
663 663 PCI_REG_BUS_G(vrp->acc_cfg->reg.pci_phys_hi),
664 664 PCI_REG_DEV_G(vrp->acc_cfg->reg.pci_phys_hi),
665 665 PCI_REG_FUNC_G(vrp->acc_cfg->reg.pci_phys_hi),
666 666 vrp->chip.info.name,
667 667 vrp->chip.revision);
668 668
669 669 /*
670 670 * Assure that the device is prepared for memory space accesses
671 671 * This should be the default as the device advertises memory
672 672 * access in it's BAR's. However, my VT6102 on a EPIA CL board doesn't
673 673 * and thus we explicetely enable it.
674 674 */
675 675 VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
676 676
677 677 /*
678 678 * Setup a handle for regular usage, prefer memory space accesses.
679 679 */
680 680 if (vrp->acc_mem != NULL &&
681 681 (vrp->chip.info.bugs & VR_BUG_NO_MEMIO) == 0)
682 682 vrp->acc_reg = vrp->acc_mem;
683 683 else
684 684 vrp->acc_reg = vrp->acc_io;
685 685
686 686 /*
687 687 * Store the vendor's MAC address.
688 688 */
689 689 for (n = 0; n < ETHERADDRL; n++) {
690 690 vrp->vendor_ether_addr[n] = VR_GET8(vrp->acc_reg,
691 691 VR_ETHERADDR + n);
692 692 }
693 693 return (VR_SUCCESS);
694 694 }
695 695
696 696 static void
697 697 vr_bus_unconfig(vr_t *vrp)
698 698 {
699 699 uint_t n;
700 700
701 701 /*
702 702 * Free the register access handles.
703 703 */
704 704 for (n = 0; n < vrp->nsets; n++)
705 705 ddi_regs_map_free(&vrp->regset[n].hdl);
706 706 kmem_free(vrp->regset, vrp->nsets * sizeof (vr_acc_t));
707 707 }
708 708
709 709 /*
710 710 * Initialize parameter structures.
711 711 */
712 712 static void
713 713 vr_param_init(vr_t *vrp)
714 714 {
715 715 /*
716 716 * Initialize default link configuration parameters.
717 717 */
718 718 vrp->param.an_en = VR_LINK_AUTONEG_ON;
719 719 vrp->param.anadv_en = 1; /* Select 802.3 autonegotiation */
720 720 vrp->param.anadv_en |= MII_ABILITY_100BASE_T4;
721 721 vrp->param.anadv_en |= MII_ABILITY_100BASE_TX_FD;
722 722 vrp->param.anadv_en |= MII_ABILITY_100BASE_TX;
723 723 vrp->param.anadv_en |= MII_ABILITY_10BASE_T_FD;
724 724 vrp->param.anadv_en |= MII_ABILITY_10BASE_T;
725 725 /* Not a PHY ability, but advertised on behalf of MAC */
726 726 vrp->param.anadv_en |= MII_ABILITY_PAUSE;
727 727 vrp->param.mtu = ETHERMTU;
728 728
729 729 /*
730 730 * Store the PHY identity.
731 731 */
732 732 vr_phy_read(vrp, MII_PHYIDH, &vrp->chip.mii.identh);
733 733 vr_phy_read(vrp, MII_PHYIDL, &vrp->chip.mii.identl);
734 734
735 735 /*
736 736 * Clear incapabilities imposed by PHY in phymask.
737 737 */
738 738 vrp->param.an_phymask = vrp->param.anadv_en;
739 739 vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
740 740 if ((vrp->chip.mii.status & MII_STATUS_10) == 0)
741 741 vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T;
742 742
743 743 if ((vrp->chip.mii.status & MII_STATUS_10_FD) == 0)
744 744 vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T_FD;
745 745
746 746 if ((vrp->chip.mii.status & MII_STATUS_100_BASEX) == 0)
747 747 vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX;
748 748
749 749 if ((vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) == 0)
750 750 vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX_FD;
751 751
752 752 if ((vrp->chip.mii.status & MII_STATUS_100_BASE_T4) == 0)
753 753 vrp->param.an_phymask &= ~MII_ABILITY_100BASE_T4;
754 754
755 755 /*
756 756 * Clear incapabilities imposed by MAC in macmask
757 757 * Note that flowcontrol (FCS?) is never masked. All of our adapters
758 758 * have the ability to honor incoming pause frames. Only the newer can
759 759 * transmit pause frames. Since there's no asym flowcontrol in 100Mbit
760 760 * Ethernet, we always advertise (symmetric) pause.
761 761 */
762 762 vrp->param.an_macmask = vrp->param.anadv_en;
763 763
764 764 /*
765 765 * Advertised capabilities is enabled minus incapable.
766 766 */
767 767 vrp->chip.mii.anadv = vrp->param.anadv_en &
768 768 (vrp->param.an_phymask & vrp->param.an_macmask);
769 769
770 770 /*
771 771 * Ensure that autoneg of the PHY matches our default.
772 772 */
773 773 if (vrp->param.an_en == VR_LINK_AUTONEG_ON)
774 774 vrp->chip.mii.control = MII_CONTROL_ANE;
775 775 else
776 776 vrp->chip.mii.control =
777 777 (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
778 778 }
779 779
780 780 /*
781 781 * Setup the descriptor rings.
782 782 */
783 783 static vr_result_t
784 784 vr_rings_init(vr_t *vrp)
785 785 {
786 786
787 787 vrp->rx.ndesc = VR_RX_N_DESC;
788 788 vrp->tx.ndesc = VR_TX_N_DESC;
789 789
790 790 /*
791 791 * Create a ring for receive.
792 792 */
793 793 if (vr_alloc_ring(vrp, &vrp->rxring, vrp->rx.ndesc) != VR_SUCCESS)
794 794 return (VR_FAILURE);
795 795
796 796 /*
797 797 * Create a ring for transmit.
798 798 */
799 799 if (vr_alloc_ring(vrp, &vrp->txring, vrp->tx.ndesc) != VR_SUCCESS) {
800 800 vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
801 801 return (VR_FAILURE);
802 802 }
803 803
804 804 vrp->rx.ring = vrp->rxring.desc;
805 805 vrp->tx.ring = vrp->txring.desc;
806 806 return (VR_SUCCESS);
807 807 }
808 808
809 809 static void
810 810 vr_rings_fini(vr_t *vrp)
811 811 {
812 812 vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
813 813 vr_free_ring(&vrp->txring, vrp->tx.ndesc);
814 814 }
815 815
816 816 /*
817 817 * Allocate a descriptor ring
818 818 * The number of descriptor entries must fit in a single page so that the
819 819 * whole ring fits in one consequtive space.
820 820 * i386: 4K page / 16 byte descriptor = 256 entries
821 821 * sparc: 8K page / 16 byte descriptor = 512 entries
822 822 */
823 823 static vr_result_t
824 824 vr_alloc_ring(vr_t *vrp, vr_ring_t *ring, size_t n)
825 825 {
826 826 ddi_dma_cookie_t desc_dma_cookie;
827 827 uint_t desc_cookiecnt;
828 828 int i, rc;
829 829 size_t rbytes;
830 830
831 831 /*
832 832 * Allocate a DMA handle for the chip descriptors.
833 833 */
834 834 rc = ddi_dma_alloc_handle(vrp->devinfo,
835 835 &vr_dev_dma_attr,
836 836 DDI_DMA_SLEEP,
837 837 NULL,
838 838 &ring->handle);
839 839
840 840 if (rc != DDI_SUCCESS) {
841 841 vr_log(vrp, CE_WARN,
842 842 "ddi_dma_alloc_handle in vr_alloc_ring failed.");
843 843 return (VR_FAILURE);
844 844 }
845 845
846 846 /*
847 847 * Allocate memory for the chip descriptors.
848 848 */
849 849 rc = ddi_dma_mem_alloc(ring->handle,
850 850 n * sizeof (vr_chip_desc_t),
851 851 &vr_dev_dma_accattr,
852 852 DDI_DMA_CONSISTENT,
853 853 DDI_DMA_SLEEP,
854 854 NULL,
855 855 (caddr_t *)&ring->cdesc,
856 856 &rbytes,
857 857 &ring->acchdl);
858 858
859 859 if (rc != DDI_SUCCESS) {
860 860 vr_log(vrp, CE_WARN,
861 861 "ddi_dma_mem_alloc in vr_alloc_ring failed.");
862 862 ddi_dma_free_handle(&ring->handle);
863 863 return (VR_FAILURE);
864 864 }
865 865
866 866 /*
867 867 * Map the descriptor memory.
868 868 */
869 869 rc = ddi_dma_addr_bind_handle(ring->handle,
870 870 NULL,
871 871 (caddr_t)ring->cdesc,
872 872 rbytes,
873 873 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
874 874 DDI_DMA_SLEEP,
875 875 NULL,
876 876 &desc_dma_cookie,
877 877 &desc_cookiecnt);
878 878
879 879 if (rc != DDI_DMA_MAPPED || desc_cookiecnt > 1) {
880 880 vr_log(vrp, CE_WARN,
881 881 "ddi_dma_addr_bind_handle in vr_alloc_ring failed: "
882 882 "rc = %d, cookiecnt = %d", rc, desc_cookiecnt);
883 883 ddi_dma_mem_free(&ring->acchdl);
884 884 ddi_dma_free_handle(&ring->handle);
885 885 return (VR_FAILURE);
886 886 }
887 887 ring->cdesc_paddr = desc_dma_cookie.dmac_address;
888 888
889 889 /*
890 890 * Allocate memory for the host descriptor ring.
891 891 */
892 892 ring->desc =
893 893 (vr_desc_t *)kmem_zalloc(n * sizeof (vr_desc_t), KM_SLEEP);
894 894
895 895 /*
896 896 * Interlink the descriptors and connect host- to chip descriptors.
897 897 */
898 898 for (i = 0; i < n; i++) {
899 899 /*
900 900 * Connect the host descriptor to a chip descriptor.
901 901 */
902 902 ring->desc[i].cdesc = &ring->cdesc[i];
903 903
904 904 /*
905 905 * Store the DMA address and offset in the descriptor
906 906 * Offset is for ddi_dma_sync() and paddr is for ddi_get/-put().
907 907 */
908 908 ring->desc[i].offset = i * sizeof (vr_chip_desc_t);
909 909 ring->desc[i].paddr = ring->cdesc_paddr + ring->desc[i].offset;
910 910
911 911 /*
912 912 * Link the previous descriptor to this one.
913 913 */
914 914 if (i > 0) {
915 915 /* Host */
916 916 ring->desc[i-1].next = &ring->desc[i];
917 917
918 918 /* Chip */
919 919 ddi_put32(ring->acchdl,
920 920 &ring->cdesc[i-1].next,
921 921 ring->desc[i].paddr);
922 922 }
923 923 }
924 924
925 925 /*
926 926 * Make rings out of this list by pointing last to first.
927 927 */
928 928 i = n - 1;
929 929 ring->desc[i].next = &ring->desc[0];
930 930 ddi_put32(ring->acchdl, &ring->cdesc[i].next, ring->desc[0].paddr);
931 931 return (VR_SUCCESS);
932 932 }
933 933
934 934 /*
935 935 * Free the memory allocated for a ring.
936 936 */
937 937 static void
938 938 vr_free_ring(vr_ring_t *r, size_t n)
939 939 {
940 940 /*
941 941 * Unmap and free the chip descriptors.
942 942 */
943 943 (void) ddi_dma_unbind_handle(r->handle);
944 944 ddi_dma_mem_free(&r->acchdl);
945 945 ddi_dma_free_handle(&r->handle);
946 946
947 947 /*
948 948 * Free the memory for storing host descriptors
949 949 */
950 950 kmem_free(r->desc, n * sizeof (vr_desc_t));
951 951 }
952 952
953 953 /*
954 954 * Initialize the receive ring.
955 955 */
956 956 static vr_result_t
957 957 vr_rxring_init(vr_t *vrp)
958 958 {
959 959 int i, rc;
960 960 vr_desc_t *rp;
961 961
962 962 /*
963 963 * Set the read pointer at the start of the ring.
964 964 */
965 965 vrp->rx.rp = &vrp->rx.ring[0];
966 966
967 967 /*
968 968 * Assign a DMA buffer to each receive descriptor.
969 969 */
970 970 for (i = 0; i < vrp->rx.ndesc; i++) {
971 971 rp = &vrp->rx.ring[i];
972 972 rc = vr_alloc_dmabuf(vrp,
973 973 &vrp->rx.ring[i].dmabuf,
974 974 DDI_DMA_STREAMING | DDI_DMA_READ);
975 975
976 976 if (rc != VR_SUCCESS) {
977 977 while (--i >= 0)
978 978 vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
979 979 return (VR_FAILURE);
980 980 }
981 981
982 982 /*
983 983 * Store the address of the dma buffer in the chip descriptor
984 984 */
985 985 ddi_put32(vrp->rxring.acchdl,
986 986 &rp->cdesc->data,
987 987 rp->dmabuf.paddr);
988 988
989 989 /*
990 990 * Put the buffer length in the chip descriptor. Ensure that
991 991 * length fits in the 11 bits of stat1 (2047/0x7FF)
992 992 */
993 993 ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat1,
994 994 MIN(VR_MAX_PKTSZ, rp->dmabuf.bufsz));
995 995
996 996 /*
997 997 * Set descriptor ownership to the card
998 998 */
999 999 ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat0, VR_RDES0_OWN);
1000 1000
1001 1001 /*
1002 1002 * Sync the descriptor with main memory
1003 1003 */
1004 1004 (void) ddi_dma_sync(vrp->rxring.handle, rp->offset,
1005 1005 sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1006 1006 }
1007 1007 return (VR_SUCCESS);
1008 1008 }
1009 1009
1010 1010 /*
1011 1011 * Free the DMA buffers assigned to the receive ring.
1012 1012 */
1013 1013 static void
1014 1014 vr_rxring_fini(vr_t *vrp)
1015 1015 {
1016 1016 int i;
1017 1017
1018 1018 for (i = 0; i < vrp->rx.ndesc; i++)
1019 1019 vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
1020 1020 }
1021 1021
1022 1022 static vr_result_t
1023 1023 vr_txring_init(vr_t *vrp)
1024 1024 {
1025 1025 vr_desc_t *wp;
1026 1026 int i, rc;
1027 1027
1028 1028 /*
1029 1029 * Set the write- and claim pointer.
1030 1030 */
1031 1031 vrp->tx.wp = &vrp->tx.ring[0];
1032 1032 vrp->tx.cp = &vrp->tx.ring[0];
1033 1033
1034 1034 /*
1035 1035 * (Re)set the TX bookkeeping.
1036 1036 */
1037 1037 vrp->tx.stallticks = 0;
1038 1038 vrp->tx.resched = 0;
1039 1039
1040 1040 /*
1041 1041 * Every transmit decreases nfree. Every reclaim increases nfree.
1042 1042 */
1043 1043 vrp->tx.nfree = vrp->tx.ndesc;
1044 1044
1045 1045 /*
1046 1046 * Attach a DMA buffer to each transmit descriptor.
1047 1047 */
1048 1048 for (i = 0; i < vrp->tx.ndesc; i++) {
1049 1049 rc = vr_alloc_dmabuf(vrp,
1050 1050 &vrp->tx.ring[i].dmabuf,
1051 1051 DDI_DMA_STREAMING | DDI_DMA_WRITE);
1052 1052
1053 1053 if (rc != VR_SUCCESS) {
1054 1054 while (--i >= 0)
1055 1055 vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1056 1056 return (VR_FAILURE);
1057 1057 }
1058 1058 }
1059 1059
1060 1060 /*
1061 1061 * Init & sync the TX descriptors so the device sees a valid ring.
1062 1062 */
1063 1063 for (i = 0; i < vrp->tx.ndesc; i++) {
1064 1064 wp = &vrp->tx.ring[i];
1065 1065 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, 0);
1066 1066 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1, 0);
1067 1067 ddi_put32(vrp->txring.acchdl, &wp->cdesc->data,
1068 1068 wp->dmabuf.paddr);
1069 1069 (void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1070 1070 sizeof (vr_chip_desc_t),
1071 1071 DDI_DMA_SYNC_FORDEV);
1072 1072 }
1073 1073 return (VR_SUCCESS);
1074 1074 }
1075 1075
1076 1076 /*
1077 1077 * Free the DMA buffers attached to the TX ring.
1078 1078 */
1079 1079 static void
1080 1080 vr_txring_fini(vr_t *vrp)
1081 1081 {
1082 1082 int i;
1083 1083
1084 1084 /*
1085 1085 * Free the DMA buffers attached to the TX ring
1086 1086 */
1087 1087 for (i = 0; i < vrp->tx.ndesc; i++)
1088 1088 vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1089 1089 }
1090 1090
1091 1091 /*
1092 1092 * Allocate a DMA buffer.
1093 1093 */
1094 1094 static vr_result_t
1095 1095 vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap, uint_t dmaflags)
1096 1096 {
1097 1097 ddi_dma_cookie_t dma_cookie;
1098 1098 uint_t cookiecnt;
1099 1099 int rc;
1100 1100
1101 1101 /*
1102 1102 * Allocate a DMA handle for the buffer
1103 1103 */
1104 1104 rc = ddi_dma_alloc_handle(vrp->devinfo,
1105 1105 &vr_data_dma_attr,
1106 1106 DDI_DMA_DONTWAIT, NULL,
1107 1107 &dmap->handle);
1108 1108
1109 1109 if (rc != DDI_SUCCESS) {
1110 1110 vr_log(vrp, CE_WARN,
1111 1111 "ddi_dma_alloc_handle failed in vr_alloc_dmabuf");
1112 1112 return (VR_FAILURE);
1113 1113 }
1114 1114
1115 1115 /*
1116 1116 * Allocate the buffer
1117 1117 * The allocated buffer is aligned on 2K boundary. This ensures that
1118 1118 * a 1500 byte frame never cross a page boundary and thus that the DMA
1119 1119 * mapping can be established in 1 fragment.
1120 1120 */
1121 1121 rc = ddi_dma_mem_alloc(dmap->handle,
1122 1122 VR_DMABUFSZ,
1123 1123 &vr_data_dma_accattr,
1124 1124 DDI_DMA_RDWR | DDI_DMA_STREAMING,
1125 1125 DDI_DMA_DONTWAIT, NULL,
1126 1126 &dmap->buf,
1127 1127 &dmap->bufsz,
1128 1128 &dmap->acchdl);
1129 1129
1130 1130 if (rc != DDI_SUCCESS) {
1131 1131 vr_log(vrp, CE_WARN,
1132 1132 "ddi_dma_mem_alloc failed in vr_alloc_dmabuf");
1133 1133 ddi_dma_free_handle(&dmap->handle);
1134 1134 return (VR_FAILURE);
1135 1135 }
1136 1136
1137 1137 /*
1138 1138 * Map the memory
1139 1139 */
1140 1140 rc = ddi_dma_addr_bind_handle(dmap->handle,
1141 1141 NULL,
1142 1142 (caddr_t)dmap->buf,
1143 1143 dmap->bufsz,
1144 1144 dmaflags,
1145 1145 DDI_DMA_DONTWAIT,
1146 1146 NULL,
1147 1147 &dma_cookie,
1148 1148 &cookiecnt);
1149 1149
1150 1150 /*
1151 1151 * The cookiecount should never > 1 because we requested 2K alignment
1152 1152 */
1153 1153 if (rc != DDI_DMA_MAPPED || cookiecnt > 1) {
1154 1154 vr_log(vrp, CE_WARN,
1155 1155 "dma_addr_bind_handle failed in vr_alloc_dmabuf: "
1156 1156 "rc = %d, cookiecnt = %d", rc, cookiecnt);
1157 1157 ddi_dma_mem_free(&dmap->acchdl);
1158 1158 ddi_dma_free_handle(&dmap->handle);
1159 1159 return (VR_FAILURE);
1160 1160 }
1161 1161 dmap->paddr = dma_cookie.dmac_address;
1162 1162 return (VR_SUCCESS);
1163 1163 }
1164 1164
1165 1165 /*
1166 1166 * Destroy a DMA buffer.
1167 1167 */
1168 1168 static void
1169 1169 vr_free_dmabuf(vr_data_dma_t *dmap)
1170 1170 {
1171 1171 (void) ddi_dma_unbind_handle(dmap->handle);
1172 1172 ddi_dma_mem_free(&dmap->acchdl);
1173 1173 ddi_dma_free_handle(&dmap->handle);
1174 1174 }
1175 1175
1176 1176 /*
1177 1177 * Interrupt service routine
1178 1178 * When our vector is shared with another device, av_dispatch_autovect calls
1179 1179 * all service routines for the vector until *none* of them return claimed
1180 1180 * That means that, when sharing vectors, this routine is called at least
1181 1181 * twice for each interrupt.
1182 1182 */
1183 1183 uint_t
1184 1184 vr_intr(caddr_t arg1, caddr_t arg2)
1185 1185 {
1186 1186 vr_t *vrp;
1187 1187 uint16_t status;
1188 1188 mblk_t *lp = NULL;
1189 1189 uint32_t tx_resched;
1190 1190 uint32_t link_change;
1191 1191
1192 1192 tx_resched = 0;
1193 1193 link_change = 0;
1194 1194 vrp = (void *)arg1;
1195 1195 _NOTE(ARGUNUSED(arg2))
1196 1196
1197 1197 mutex_enter(&vrp->intrlock);
1198 1198 /*
1199 1199 * If the driver is not in running state it is not our interrupt.
1200 1200 * Shared interrupts can end up here without us being started.
1201 1201 */
1202 1202 if (vrp->chip.state != CHIPSTATE_RUNNING) {
1203 1203 mutex_exit(&vrp->intrlock);
1204 1204 return (DDI_INTR_UNCLAIMED);
1205 1205 }
1206 1206
1207 1207 /*
1208 1208 * Read the status register to see if the interrupt is from our device
1209 1209 * This read also ensures that posted writes are brought to main memory.
1210 1210 */
1211 1211 status = VR_GET16(vrp->acc_reg, VR_ISR0) & VR_ICR0_CFG;
1212 1212 if (status == 0) {
1213 1213 /*
1214 1214 * Status contains no configured interrupts
1215 1215 * The interrupt was not generated by our device.
1216 1216 */
1217 1217 vrp->stats.intr_unclaimed++;
1218 1218 mutex_exit(&vrp->intrlock);
1219 1219 return (DDI_INTR_UNCLAIMED);
1220 1220 }
1221 1221 vrp->stats.intr_claimed++;
1222 1222
1223 1223 /*
1224 1224 * Acknowledge the event(s) that caused interruption.
1225 1225 */
1226 1226 VR_PUT16(vrp->acc_reg, VR_ISR0, status);
1227 1227
1228 1228 /*
1229 1229 * Receive completion.
1230 1230 */
1231 1231 if ((status & (VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS)) != 0) {
1232 1232 /*
1233 1233 * Received some packets.
1234 1234 */
1235 1235 lp = vr_receive(vrp);
1236 1236
1237 1237 /*
1238 1238 * DMA stops after a conflict in the FIFO.
1239 1239 */
1240 1240 if ((status & VR_ISR_RX_ERR_BITS) != 0)
1241 1241 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1242 1242 status &= ~(VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS);
1243 1243 }
1244 1244
1245 1245 /*
1246 1246 * Transmit completion.
1247 1247 */
1248 1248 if ((status & (VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS)) != 0) {
1249 1249 /*
1250 1250 * Card done with transmitting some packets
1251 1251 * TX_DONE is generated 3 times per ring but it appears
1252 1252 * more often because it is also set when an RX_DONE
1253 1253 * interrupt is generated.
1254 1254 */
1255 1255 mutex_enter(&vrp->tx.lock);
1256 1256 vr_tx_reclaim(vrp);
1257 1257 tx_resched = vrp->tx.resched;
1258 1258 vrp->tx.resched = 0;
1259 1259 mutex_exit(&vrp->tx.lock);
1260 1260 status &= ~(VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS);
1261 1261 }
1262 1262
1263 1263 /*
1264 1264 * Link status change.
1265 1265 */
1266 1266 if ((status & VR_ICR0_LINKSTATUS) != 0) {
1267 1267 /*
1268 1268 * Get new link state and inform the mac layer.
1269 1269 */
1270 1270 mutex_enter(&vrp->oplock);
1271 1271 mutex_enter(&vrp->tx.lock);
1272 1272 vr_link_state(vrp);
1273 1273 mutex_exit(&vrp->tx.lock);
1274 1274 mutex_exit(&vrp->oplock);
1275 1275 status &= ~VR_ICR0_LINKSTATUS;
1276 1276 vrp->stats.linkchanges++;
1277 1277 link_change = 1;
1278 1278 }
1279 1279
1280 1280 /*
1281 1281 * Bus error.
1282 1282 */
1283 1283 if ((status & VR_ISR0_BUSERR) != 0) {
1284 1284 vr_log(vrp, CE_WARN, "bus error occured");
1285 1285 vrp->reset = 1;
1286 1286 status &= ~VR_ISR0_BUSERR;
1287 1287 }
1288 1288
1289 1289 /*
1290 1290 * We must have handled all things here.
1291 1291 */
1292 1292 ASSERT(status == 0);
1293 1293 mutex_exit(&vrp->intrlock);
1294 1294
1295 1295 /*
1296 1296 * Reset the device if requested
1297 1297 * The request can come from the periodic tx check or from the interrupt
1298 1298 * status.
1299 1299 */
1300 1300 if (vrp->reset != 0) {
1301 1301 vr_error(vrp);
1302 1302 vrp->reset = 0;
1303 1303 }
1304 1304
1305 1305 /*
1306 1306 * Pass up the list with received packets.
1307 1307 */
1308 1308 if (lp != NULL)
1309 1309 mac_rx(vrp->machdl, 0, lp);
1310 1310
1311 1311 /*
1312 1312 * Inform the upper layer on the linkstatus if there was a change.
1313 1313 */
1314 1314 if (link_change != 0)
1315 1315 mac_link_update(vrp->machdl,
1316 1316 (link_state_t)vrp->chip.link.state);
1317 1317 /*
1318 1318 * Restart transmissions if we were waiting for tx descriptors.
1319 1319 */
1320 1320 if (tx_resched == 1)
1321 1321 mac_tx_update(vrp->machdl);
1322 1322
1323 1323 /*
1324 1324 * Read something from the card to ensure that all of our configuration
1325 1325 * writes are delivered to the device before the interrupt is ended.
1326 1326 */
1327 1327 (void) VR_GET8(vrp->acc_reg, VR_ETHERADDR);
1328 1328 return (DDI_INTR_CLAIMED);
1329 1329 }
1330 1330
1331 1331 /*
1332 1332 * Respond to an unforseen situation by resetting the card and our bookkeeping.
1333 1333 */
1334 1334 static void
1335 1335 vr_error(vr_t *vrp)
1336 1336 {
1337 1337 vr_log(vrp, CE_WARN, "resetting MAC.");
1338 1338 mutex_enter(&vrp->intrlock);
1339 1339 mutex_enter(&vrp->oplock);
1340 1340 mutex_enter(&vrp->tx.lock);
1341 1341 (void) vr_stop(vrp);
1342 1342 vr_reset(vrp);
1343 1343 (void) vr_start(vrp);
1344 1344 mutex_exit(&vrp->tx.lock);
1345 1345 mutex_exit(&vrp->oplock);
1346 1346 mutex_exit(&vrp->intrlock);
1347 1347 vrp->stats.resets++;
1348 1348 }
1349 1349
1350 1350 /*
1351 1351 * Collect received packets in a list.
1352 1352 */
1353 1353 static mblk_t *
1354 1354 vr_receive(vr_t *vrp)
1355 1355 {
1356 1356 mblk_t *lp, *mp, *np;
1357 1357 vr_desc_t *rxp;
1358 1358 vr_data_dma_t *dmap;
1359 1359 uint32_t pklen;
1360 1360 uint32_t rxstat0;
1361 1361 uint32_t n;
1362 1362
1363 1363 lp = NULL;
1364 1364 n = 0;
1365 1365 for (rxp = vrp->rx.rp; ; rxp = rxp->next, n++) {
1366 1366 /*
1367 1367 * Sync the descriptor before looking at it.
1368 1368 */
1369 1369 (void) ddi_dma_sync(vrp->rxring.handle, rxp->offset,
1370 1370 sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORKERNEL);
1371 1371
1372 1372 /*
1373 1373 * Get the status from the descriptor.
1374 1374 */
1375 1375 rxstat0 = ddi_get32(vrp->rxring.acchdl, &rxp->cdesc->stat0);
1376 1376
1377 1377 /*
1378 1378 * We're done if the descriptor is owned by the card.
1379 1379 */
1380 1380 if ((rxstat0 & VR_RDES0_OWN) != 0)
1381 1381 break;
1382 1382 else if ((rxstat0 & VR_RDES0_RXOK) != 0) {
1383 1383 /*
1384 1384 * Received a good packet
1385 1385 */
1386 1386 dmap = &rxp->dmabuf;
1387 1387 pklen = (rxstat0 >> 16) - ETHERFCSL;
1388 1388
1389 1389 /*
1390 1390 * Sync the data.
1391 1391 */
1392 1392 (void) ddi_dma_sync(dmap->handle, 0,
1393 1393 pklen, DDI_DMA_SYNC_FORKERNEL);
1394 1394
1395 1395 /*
1396 1396 * Send a new copied message upstream.
1397 1397 */
1398 1398 np = allocb(pklen, 0);
1399 1399 if (np != NULL) {
1400 1400 bcopy(dmap->buf, np->b_rptr, pklen);
1401 1401 np->b_wptr = np->b_rptr + pklen;
1402 1402
1403 1403 vrp->stats.mac_stat_ipackets++;
1404 1404 vrp->stats.mac_stat_rbytes += pklen;
1405 1405
1406 1406 if ((rxstat0 & VR_RDES0_BAR) != 0)
1407 1407 vrp->stats.mac_stat_brdcstrcv++;
1408 1408 else if ((rxstat0 & VR_RDES0_MAR) != 0)
1409 1409 vrp->stats.mac_stat_multircv++;
1410 1410
1411 1411 /*
1412 1412 * Link this packet in the list.
1413 1413 */
1414 1414 np->b_next = NULL;
1415 1415 if (lp == NULL)
1416 1416 lp = mp = np;
1417 1417 else {
1418 1418 mp->b_next = np;
1419 1419 mp = np;
1420 1420 }
1421 1421 } else {
1422 1422 vrp->stats.allocbfail++;
1423 1423 vrp->stats.mac_stat_norcvbuf++;
1424 1424 }
1425 1425
1426 1426 } else {
1427 1427 /*
1428 1428 * Received with errors.
1429 1429 */
1430 1430 vrp->stats.mac_stat_ierrors++;
1431 1431 if ((rxstat0 & VR_RDES0_FAE) != 0)
1432 1432 vrp->stats.ether_stat_align_errors++;
1433 1433 if ((rxstat0 & VR_RDES0_CRCERR) != 0)
1434 1434 vrp->stats.ether_stat_fcs_errors++;
1435 1435 if ((rxstat0 & VR_RDES0_LONG) != 0)
1436 1436 vrp->stats.ether_stat_toolong_errors++;
1437 1437 if ((rxstat0 & VR_RDES0_RUNT) != 0)
1438 1438 vrp->stats.ether_stat_tooshort_errors++;
1439 1439 if ((rxstat0 & VR_RDES0_FOV) != 0)
1440 1440 vrp->stats.mac_stat_overflows++;
1441 1441 }
1442 1442
1443 1443 /*
1444 1444 * Reset descriptor ownership to the MAC.
1445 1445 */
1446 1446 ddi_put32(vrp->rxring.acchdl,
1447 1447 &rxp->cdesc->stat0,
1448 1448 VR_RDES0_OWN);
1449 1449 (void) ddi_dma_sync(vrp->rxring.handle,
1450 1450 rxp->offset,
1451 1451 sizeof (vr_chip_desc_t),
1452 1452 DDI_DMA_SYNC_FORDEV);
1453 1453 }
1454 1454 vrp->rx.rp = rxp;
1455 1455
1456 1456 /*
1457 1457 * If we do flowcontrol and if the card can transmit pause frames,
1458 1458 * increment the "available receive descriptors" register.
1459 1459 */
1460 1460 if (n > 0 && vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
1461 1461 /*
1462 1462 * Whenever the card moves a fragment to host memory it
1463 1463 * decrements the RXBUFCOUNT register. If the value in the
1464 1464 * register reaches a low watermark, the card transmits a pause
1465 1465 * frame. If the value in this register reaches a high
1466 1466 * watermark, the card sends a "cancel pause" frame
1467 1467 *
1468 1468 * Non-zero values written to this byte register are added
1469 1469 * by the chip to the register's contents, so we must write
1470 1470 * the number of descriptors free'd.
1471 1471 */
1472 1472 VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT, MIN(n, 0xFF));
1473 1473 }
1474 1474 return (lp);
1475 1475 }
1476 1476
1477 1477 /*
1478 1478 * Enqueue a list of packets for transmission
1479 1479 * Return the packets not transmitted.
1480 1480 */
1481 1481 mblk_t *
1482 1482 vr_mac_tx_enqueue_list(void *p, mblk_t *mp)
1483 1483 {
1484 1484 vr_t *vrp;
1485 1485 mblk_t *nextp;
1486 1486
1487 1487 vrp = (vr_t *)p;
1488 1488 mutex_enter(&vrp->tx.lock);
1489 1489 do {
1490 1490 if (vrp->tx.nfree == 0) {
1491 1491 vrp->stats.ether_stat_defer_xmts++;
1492 1492 vrp->tx.resched = 1;
1493 1493 break;
1494 1494 }
1495 1495 nextp = mp->b_next;
1496 1496 mp->b_next = mp->b_prev = NULL;
1497 1497 vr_tx_enqueue_msg(vrp, mp);
1498 1498 mp = nextp;
1499 1499 vrp->tx.nfree--;
1500 1500 } while (mp != NULL);
1501 1501 mutex_exit(&vrp->tx.lock);
1502 1502
1503 1503 /*
1504 1504 * Tell the chip to poll the TX ring.
1505 1505 */
1506 1506 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1507 1507 return (mp);
1508 1508 }
1509 1509
1510 1510 /*
1511 1511 * Enqueue a message for transmission.
1512 1512 */
1513 1513 static void
1514 1514 vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp)
1515 1515 {
1516 1516 vr_desc_t *wp;
1517 1517 vr_data_dma_t *dmap;
1518 1518 uint32_t pklen;
1519 1519 uint32_t nextp;
1520 1520 int padlen;
1521 1521
1522 1522 if ((uchar_t)mp->b_rptr[0] == 0xff &&
1523 1523 (uchar_t)mp->b_rptr[1] == 0xff &&
1524 1524 (uchar_t)mp->b_rptr[2] == 0xff &&
1525 1525 (uchar_t)mp->b_rptr[3] == 0xff &&
1526 1526 (uchar_t)mp->b_rptr[4] == 0xff &&
1527 1527 (uchar_t)mp->b_rptr[5] == 0xff)
1528 1528 vrp->stats.mac_stat_brdcstxmt++;
1529 1529 else if ((uchar_t)mp->b_rptr[0] == 1)
1530 1530 vrp->stats.mac_stat_multixmt++;
1531 1531
1532 1532 pklen = msgsize(mp);
1533 1533 wp = vrp->tx.wp;
1534 1534 dmap = &wp->dmabuf;
1535 1535
1536 1536 /*
1537 1537 * Copy the message into the pre-mapped buffer and free mp
1538 1538 */
1539 1539 mcopymsg(mp, dmap->buf);
1540 1540
1541 1541 /*
1542 1542 * Clean padlen bytes of short packet.
1543 1543 */
1544 1544 padlen = ETHERMIN - pklen;
1545 1545 if (padlen > 0) {
1546 1546 bzero(dmap->buf + pklen, padlen);
1547 1547 pklen += padlen;
1548 1548 }
1549 1549
1550 1550 /*
1551 1551 * Most of the statistics are updated on reclaim, after the actual
1552 1552 * transmit. obytes is maintained here because the length is cleared
1553 1553 * after transmission
1554 1554 */
1555 1555 vrp->stats.mac_stat_obytes += pklen;
1556 1556
1557 1557 /*
1558 1558 * Sync the data so the device sees the new content too.
1559 1559 */
1560 1560 (void) ddi_dma_sync(dmap->handle, 0, pklen, DDI_DMA_SYNC_FORDEV);
1561 1561
1562 1562 /*
1563 1563 * If we have reached the TX interrupt distance, enable a TX interrupt
1564 1564 * for this packet. The Interrupt Control (IC) bit in the transmit
1565 1565 * descriptor doesn't have any effect on the interrupt generation
1566 1566 * despite the vague statements in the datasheet. Thus, we use the
1567 1567 * more obscure interrupt suppress bit which is probably part of the
1568 1568 * MAC's bookkeeping for TX interrupts and fragmented packets.
1569 1569 */
1570 1570 vrp->tx.intr_distance++;
1571 1571 nextp = ddi_get32(vrp->txring.acchdl, &wp->cdesc->next);
1572 1572 if (vrp->tx.intr_distance >= VR_TX_MAX_INTR_DISTANCE) {
1573 1573 /*
1574 1574 * Don't suppress the interrupt for this packet.
1575 1575 */
1576 1576 vrp->tx.intr_distance = 0;
1577 1577 nextp &= (~VR_TDES3_SUPPRESS_INTR);
1578 1578 } else {
1579 1579 /*
1580 1580 * Suppress the interrupt for this packet.
1581 1581 */
1582 1582 nextp |= VR_TDES3_SUPPRESS_INTR;
1583 1583 }
1584 1584
1585 1585 /*
1586 1586 * Write and sync the chip's descriptor
1587 1587 */
1588 1588 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1,
1589 1589 pklen | (VR_TDES1_STP | VR_TDES1_EDP | VR_TDES1_CHN));
1590 1590 ddi_put32(vrp->txring.acchdl, &wp->cdesc->next, nextp);
1591 1591 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, VR_TDES0_OWN);
1592 1592 (void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1593 1593 sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1594 1594
1595 1595 /*
1596 1596 * The ticks counter is cleared by reclaim when it reclaimed some
1597 1597 * descriptors and incremented by the periodic TX stall check.
1598 1598 */
1599 1599 vrp->tx.stallticks = 1;
1600 1600 vrp->tx.wp = wp->next;
1601 1601 }
1602 1602
1603 1603 /*
1604 1604 * Free transmitted descriptors.
1605 1605 */
1606 1606 static void
1607 1607 vr_tx_reclaim(vr_t *vrp)
1608 1608 {
1609 1609 vr_desc_t *cp;
1610 1610 uint32_t stat0, stat1, freed, dirty;
1611 1611
1612 1612 ASSERT(mutex_owned(&vrp->tx.lock));
1613 1613
1614 1614 freed = 0;
1615 1615 dirty = vrp->tx.ndesc - vrp->tx.nfree;
1616 1616 for (cp = vrp->tx.cp; dirty > 0; cp = cp->next) {
1617 1617 /*
1618 1618 * Sync & get descriptor status.
1619 1619 */
1620 1620 (void) ddi_dma_sync(vrp->txring.handle, cp->offset,
1621 1621 sizeof (vr_chip_desc_t),
1622 1622 DDI_DMA_SYNC_FORKERNEL);
1623 1623 stat0 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat0);
1624 1624
1625 1625 if ((stat0 & VR_TDES0_OWN) != 0)
1626 1626 break;
1627 1627
1628 1628 /*
1629 1629 * Do stats for the first descriptor in a chain.
1630 1630 */
1631 1631 stat1 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat1);
1632 1632 if ((stat1 & VR_TDES1_STP) != 0) {
1633 1633 if ((stat0 & VR_TDES0_TERR) != 0) {
1634 1634 vrp->stats.ether_stat_macxmt_errors++;
1635 1635 if ((stat0 & VR_TDES0_UDF) != 0)
1636 1636 vrp->stats.mac_stat_underflows++;
1637 1637 if ((stat0 & VR_TDES0_ABT) != 0)
1638 1638 vrp-> stats.ether_stat_ex_collisions++;
1639 1639 /*
1640 1640 * Abort and FIFO underflow stop the MAC.
1641 1641 * Packet queueing must be disabled with HD
1642 1642 * links because otherwise the MAC is also lost
1643 1643 * after a few of these events.
1644 1644 */
1645 1645 VR_PUT8(vrp->acc_reg, VR_CTRL0,
1646 1646 VR_CTRL0_DMA_GO);
1647 1647 } else
1648 1648 vrp->stats.mac_stat_opackets++;
1649 1649
1650 1650 if ((stat0 & VR_TDES0_COL) != 0) {
1651 1651 if ((stat0 & VR_TDES0_NCR) == 1) {
1652 1652 vrp->stats.
1653 1653 ether_stat_first_collisions++;
1654 1654 } else {
1655 1655 vrp->stats.
1656 1656 ether_stat_multi_collisions++;
1657 1657 }
1658 1658 vrp->stats.mac_stat_collisions +=
1659 1659 (stat0 & VR_TDES0_NCR);
1660 1660 }
1661 1661
1662 1662 if ((stat0 & VR_TDES0_CRS) != 0)
1663 1663 vrp->stats.ether_stat_carrier_errors++;
1664 1664
1665 1665 if ((stat0 & VR_TDES0_OWC) != 0)
1666 1666 vrp->stats.ether_stat_tx_late_collisions++;
1667 1667 }
1668 1668 freed += 1;
1669 1669 dirty -= 1;
1670 1670 }
1671 1671 vrp->tx.cp = cp;
1672 1672
1673 1673 if (freed > 0) {
1674 1674 vrp->tx.nfree += freed;
1675 1675 vrp->tx.stallticks = 0;
1676 1676 vrp->stats.txreclaims += 1;
1677 1677 } else
1678 1678 vrp->stats.txreclaim0 += 1;
1679 1679 }
1680 1680
1681 1681 /*
1682 1682 * Check TX health every 2 seconds.
1683 1683 */
1684 1684 static void
1685 1685 vr_periodic(void *p)
1686 1686 {
1687 1687 vr_t *vrp;
1688 1688
1689 1689 vrp = (vr_t *)p;
1690 1690 if (vrp->chip.state == CHIPSTATE_RUNNING &&
1691 1691 vrp->chip.link.state == VR_LINK_STATE_UP && vrp->reset == 0) {
1692 1692 if (mutex_tryenter(&vrp->intrlock) != 0) {
1693 1693 mutex_enter(&vrp->tx.lock);
1694 1694 if (vrp->tx.resched == 1) {
1695 1695 if (vrp->tx.stallticks >= VR_MAXTXCHECKS) {
1696 1696 /*
1697 1697 * No succesful reclaim in the last n
1698 1698 * intervals. Reset the MAC.
1699 1699 */
1700 1700 vrp->reset = 1;
1701 1701 vr_log(vrp, CE_WARN,
1702 1702 "TX stalled, resetting MAC");
1703 1703 vrp->stats.txstalls++;
1704 1704 } else {
1705 1705 /*
1706 1706 * Increase until we find that we've
1707 1707 * waited long enough.
1708 1708 */
1709 1709 vrp->tx.stallticks += 1;
1710 1710 }
1711 1711 }
1712 1712 mutex_exit(&vrp->tx.lock);
1713 1713 mutex_exit(&vrp->intrlock);
1714 1714 vrp->stats.txchecks++;
1715 1715 }
1716 1716 }
1717 1717 vrp->stats.cyclics++;
1718 1718 }
1719 1719
1720 1720 /*
1721 1721 * Bring the device to our desired initial state.
1722 1722 */
1723 1723 static void
1724 1724 vr_reset(vr_t *vrp)
1725 1725 {
1726 1726 uint32_t time;
1727 1727
1728 1728 /*
1729 1729 * Reset the MAC
1730 1730 * If we don't wait long enough for the forced reset to complete,
1731 1731 * MAC looses sync with PHY. Result link up, no link change interrupt
1732 1732 * and no data transfer.
1733 1733 */
1734 1734 time = 0;
1735 1735 VR_PUT8(vrp->acc_io, VR_CTRL1, VR_CTRL1_RESET);
1736 1736 do {
1737 1737 drv_usecwait(100);
1738 1738 time += 100;
1739 1739 if (time >= 100000) {
1740 1740 VR_PUT8(vrp->acc_io, VR_MISC1, VR_MISC1_RESET);
1741 1741 delay(drv_usectohz(200000));
1742 1742 }
1743 1743 } while ((VR_GET8(vrp->acc_io, VR_CTRL1) & VR_CTRL1_RESET) != 0);
1744 1744 delay(drv_usectohz(10000));
1745 1745
1746 1746 /*
1747 1747 * Load the PROM contents into the MAC again.
1748 1748 */
1749 1749 VR_SETBIT8(vrp->acc_io, VR_PROMCTL, VR_PROMCTL_RELOAD);
1750 1750 delay(drv_usectohz(100000));
1751 1751
1752 1752 /*
1753 1753 * Tell the MAC via IO space that we like to use memory space for
1754 1754 * accessing registers.
1755 1755 */
1756 1756 VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
1757 1757 }
1758 1758
1759 1759 /*
1760 1760 * Prepare and enable the card (MAC + PHY + PCI).
1761 1761 */
1762 1762 static int
1763 1763 vr_start(vr_t *vrp)
1764 1764 {
1765 1765 uint8_t pci_latency, pci_mode;
1766 1766
1767 1767 ASSERT(mutex_owned(&vrp->oplock));
1768 1768
1769 1769 /*
1770 1770 * Allocate DMA buffers for RX.
1771 1771 */
1772 1772 if (vr_rxring_init(vrp) != VR_SUCCESS) {
1773 1773 vr_log(vrp, CE_NOTE, "vr_rxring_init() failed");
1774 1774 return (ENOMEM);
1775 1775 }
1776 1776
1777 1777 /*
1778 1778 * Allocate DMA buffers for TX.
1779 1779 */
1780 1780 if (vr_txring_init(vrp) != VR_SUCCESS) {
1781 1781 vr_log(vrp, CE_NOTE, "vr_txring_init() failed");
1782 1782 vr_rxring_fini(vrp);
1783 1783 return (ENOMEM);
1784 1784 }
1785 1785
1786 1786 /*
1787 1787 * Changes of the chip specific registers as done in VIA's fet driver
1788 1788 * These bits are not in the datasheet and controlled by vr_chip_info.
1789 1789 */
1790 1790 pci_mode = VR_GET8(vrp->acc_reg, VR_MODE2);
1791 1791 if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE10T) != 0)
1792 1792 pci_mode |= VR_MODE2_MODE10T;
1793 1793
1794 1794 if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE2PCEROPT) != 0)
1795 1795 pci_mode |= VR_MODE2_PCEROPT;
1796 1796
1797 1797 if ((vrp->chip.info.features & VR_FEATURE_MRDLNMULTIPLE) != 0)
1798 1798 pci_mode |= VR_MODE2_MRDPL;
1799 1799 VR_PUT8(vrp->acc_reg, VR_MODE2, pci_mode);
1800 1800
1801 1801 pci_mode = VR_GET8(vrp->acc_reg, VR_MODE3);
1802 1802 if ((vrp->chip.info.bugs & VR_BUG_NEEDMIION) != 0)
1803 1803 pci_mode |= VR_MODE3_MIION;
1804 1804 VR_PUT8(vrp->acc_reg, VR_MODE3, pci_mode);
1805 1805
1806 1806 /*
1807 1807 * RX: Accept broadcast packets.
1808 1808 */
1809 1809 VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTBROAD);
1810 1810
1811 1811 /*
1812 1812 * RX: Start DMA when there are 256 bytes in the FIFO.
1813 1813 */
1814 1814 VR_SETBITS8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_FIFO_THRESHOLD_BITS,
1815 1815 VR_RXCFG_FIFO_THRESHOLD_256);
1816 1816 VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_RX_FIFO_THRESHOLD_BITS,
1817 1817 VR_BCR0_RX_FIFO_THRESHOLD_256);
1818 1818
1819 1819 /*
1820 1820 * TX: Start transmit when there are 256 bytes in the FIFO.
1821 1821 */
1822 1822 VR_SETBITS8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_FIFO_THRESHOLD_BITS,
1823 1823 VR_TXCFG_FIFO_THRESHOLD_256);
1824 1824 VR_SETBITS8(vrp->acc_reg, VR_BCR1, VR_BCR1_TX_FIFO_THRESHOLD_BITS,
1825 1825 VR_BCR1_TX_FIFO_THRESHOLD_256);
1826 1826
1827 1827 /*
1828 1828 * Burst transfers up to 256 bytes.
1829 1829 */
1830 1830 VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_DMABITS, VR_BCR0_DMA256);
1831 1831
1832 1832 /*
1833 1833 * Disable TX autopolling as it is bad for RX performance
1834 1834 * I assume this is because the RX process finds the bus often occupied
1835 1835 * by the polling process.
1836 1836 */
1837 1837 VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_NOAUTOPOLL);
1838 1838
1839 1839 /*
1840 1840 * Honor the PCI latency timer if it is reasonable.
1841 1841 */
1842 1842 pci_latency = VR_GET8(vrp->acc_cfg, PCI_CONF_LATENCY_TIMER);
1843 1843 if (pci_latency != 0 && pci_latency != 0xFF)
1844 1844 VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1845 1845 else
1846 1846 VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1847 1847
1848 1848 /*
1849 1849 * Ensure that VLAN filtering is off, because this strips the tag.
1850 1850 */
1851 1851 if ((vrp->chip.info.features & VR_FEATURE_VLANTAGGING) != 0) {
1852 1852 VR_CLRBIT8(vrp->acc_reg, VR_BCR1, VR_BCR1_VLANFILTER);
1853 1853 VR_CLRBIT8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_8021PQ_EN);
1854 1854 }
1855 1855
1856 1856 /*
1857 1857 * Clear the CAM filter.
1858 1858 */
1859 1859 if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
1860 1860 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
1861 1861 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 0);
1862 1862 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1863 1863
1864 1864 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
1865 1865 VR_CAM_CTRL_ENABLE|VR_CAM_CTRL_SELECT_VLAN);
1866 1866 VR_PUT8(vrp->acc_reg, VR_VCAM0, 0);
1867 1867 VR_PUT8(vrp->acc_reg, VR_VCAM1, 0);
1868 1868 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_WRITE);
1869 1869 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 1);
1870 1870 drv_usecwait(2);
1871 1871 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1872 1872 }
1873 1873
1874 1874 /*
1875 1875 * Give the start addresses of the descriptor rings to the DMA
1876 1876 * controller on the MAC.
1877 1877 */
1878 1878 VR_PUT32(vrp->acc_reg, VR_RXADDR, vrp->rx.rp->paddr);
1879 1879 VR_PUT32(vrp->acc_reg, VR_TXADDR, vrp->tx.wp->paddr);
1880 1880
1881 1881 /*
1882 1882 * We don't use the additionally invented interrupt ICR1 register,
1883 1883 * so make sure these are disabled.
1884 1884 */
1885 1885 VR_PUT8(vrp->acc_reg, VR_ISR1, 0xFF);
1886 1886 VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1887 1887
1888 1888 /*
1889 1889 * Enable interrupts.
1890 1890 */
1891 1891 VR_PUT16(vrp->acc_reg, VR_ISR0, 0xFFFF);
1892 1892 VR_PUT16(vrp->acc_reg, VR_ICR0, VR_ICR0_CFG);
1893 1893
1894 1894 /*
1895 1895 * Enable the DMA controller.
1896 1896 */
1897 1897 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1898 1898
1899 1899 /*
1900 1900 * Configure the link. Rely on the link change interrupt for getting
1901 1901 * the link state into the driver.
1902 1902 */
1903 1903 vr_link_init(vrp);
1904 1904
1905 1905 /*
1906 1906 * Set the software view on the state to 'running'.
1907 1907 */
1908 1908 vrp->chip.state = CHIPSTATE_RUNNING;
1909 1909 return (0);
1910 1910 }
1911 1911
1912 1912 /*
1913 1913 * Stop DMA and interrupts.
1914 1914 */
1915 1915 static int
1916 1916 vr_stop(vr_t *vrp)
1917 1917 {
1918 1918 ASSERT(mutex_owned(&vrp->oplock));
1919 1919
1920 1920 /*
1921 1921 * Stop interrupts.
1922 1922 */
1923 1923 VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
1924 1924 VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1925 1925
1926 1926 /*
1927 1927 * Stop DMA.
1928 1928 */
1929 1929 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
1930 1930
1931 1931 /*
1932 1932 * Set the software view on the state to stopped.
1933 1933 */
1934 1934 vrp->chip.state = CHIPSTATE_STOPPED;
1935 1935
1936 1936 /*
1937 1937 * Remove DMA buffers from the rings.
1938 1938 */
1939 1939 vr_rxring_fini(vrp);
1940 1940 vr_txring_fini(vrp);
1941 1941 return (0);
1942 1942 }
1943 1943
1944 1944 int
1945 1945 vr_mac_start(void *p)
1946 1946 {
1947 1947 vr_t *vrp;
1948 1948 int rc;
1949 1949
1950 1950 vrp = (vr_t *)p;
1951 1951 mutex_enter(&vrp->oplock);
1952 1952
1953 1953 /*
1954 1954 * Reset the card.
1955 1955 */
1956 1956 vr_reset(vrp);
1957 1957
1958 1958 /*
1959 1959 * Prepare and enable the card.
1960 1960 */
1961 1961 rc = vr_start(vrp);
1962 1962
1963 1963 /*
1964 1964 * Configure a cyclic function to keep the card & driver from diverting.
1965 1965 */
1966 1966 vrp->periodic_id =
1967 1967 ddi_periodic_add(vr_periodic, vrp, VR_CHECK_INTERVAL, DDI_IPL_0);
1968 1968
1969 1969 mutex_exit(&vrp->oplock);
1970 1970 return (rc);
1971 1971 }
1972 1972
1973 1973 void
1974 1974 vr_mac_stop(void *p)
1975 1975 {
1976 1976 vr_t *vrp = p;
1977 1977
1978 1978 mutex_enter(&vrp->oplock);
1979 1979 mutex_enter(&vrp->tx.lock);
1980 1980
1981 1981 /*
1982 1982 * Stop the device.
1983 1983 */
1984 1984 (void) vr_stop(vrp);
1985 1985 mutex_exit(&vrp->tx.lock);
1986 1986
1987 1987 /*
1988 1988 * Remove the cyclic from the system.
1989 1989 */
1990 1990 ddi_periodic_delete(vrp->periodic_id);
1991 1991 mutex_exit(&vrp->oplock);
1992 1992 }
1993 1993
1994 1994 /*
1995 1995 * Add or remove a multicast address to/from the filter
1996 1996 *
1997 1997 * From the 21143 manual:
1998 1998 * The 21143 can store 512 bits serving as hash bucket heads, and one physical
1999 1999 * 48-bit Ethernet address. Incoming frames with multicast destination
2000 2000 * addresses are subjected to imperfect filtering. Frames with physical
2001 2001 * destination addresses are checked against the single physical address.
2002 2002 * For any incoming frame with a multicast destination address, the 21143
2003 2003 * applies the standard Ethernet cyclic redundancy check (CRC) function to the
2004 2004 * first 6 bytes containing the destination address, then it uses the most
2005 2005 * significant 9 bits of the result as a bit index into the table. If the
2006 2006 * indexed bit is set, the frame is accepted. If the bit is cleared, the frame
2007 2007 * is rejected. This filtering mode is called imperfect because multicast
2008 2008 * frames not addressed to this station may slip through, but it still
2009 2009 * decreases the number of frames that the host can receive.
2010 2010 * I assume the above is also the way the VIA chips work. There's not a single
2011 2011 * word about the multicast filter in the datasheet.
2012 2012 *
2013 2013 * Another word on the CAM filter on VT6105M controllers:
2014 2014 * The VT6105M has content addressable memory which can be used for perfect
2015 2015 * filtering of 32 multicast addresses and a few VLAN id's
2016 2016 *
2017 2017 * I think it works like this: When the controller receives a multicast
2018 2018 * address, it looks up the address using CAM. When it is found, it takes the
2019 2019 * matching cell address (index) and compares this to the bit position in the
2020 2020 * cam mask. If the bit is set, the packet is passed up. If CAM lookup does not
2021 2021 * result in a match, the packet is filtered using the hash based filter,
2022 2022 * if that matches, the packet is passed up and dropped otherwise
2023 2023 * Also, there's not a single word in the datasheet on how this cam is supposed
2024 2024 * to work ...
2025 2025 */
2026 2026 int
2027 2027 vr_mac_set_multicast(void *p, boolean_t add, const uint8_t *mca)
2028 2028 {
2029 2029 vr_t *vrp;
2030 2030 uint32_t crc_index;
2031 2031 int32_t cam_index;
2032 2032 uint32_t cam_mask;
2033 2033 boolean_t use_hash_filter;
2034 2034 ether_addr_t taddr;
2035 2035 uint32_t a;
2036 2036
2037 2037 vrp = (vr_t *)p;
2038 2038 mutex_enter(&vrp->oplock);
2039 2039 mutex_enter(&vrp->intrlock);
2040 2040 use_hash_filter = B_FALSE;
2041 2041
2042 2042 if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
2043 2043 /*
2044 2044 * Program the perfect filter.
2045 2045 */
2046 2046 cam_mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2047 2047 if (add == B_TRUE) {
2048 2048 /*
2049 2049 * Get index of first empty slot.
2050 2050 */
2051 2051 bzero(&taddr, sizeof (taddr));
2052 2052 cam_index = vr_cam_index(vrp, taddr);
2053 2053 if (cam_index != -1) {
2054 2054 /*
2055 2055 * Add address at cam_index.
2056 2056 */
2057 2057 cam_mask |= (1 << cam_index);
2058 2058 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2059 2059 VR_CAM_CTRL_ENABLE);
2060 2060 VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, cam_index);
2061 2061 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2062 2062 for (a = 0; a < ETHERADDRL; a++) {
2063 2063 VR_PUT8(vrp->acc_reg,
2064 2064 VR_MCAM0 + a, mca[a]);
2065 2065 }
2066 2066 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2067 2067 VR_CAM_CTRL_WRITE);
2068 2068 drv_usecwait(2);
2069 2069 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2070 2070 VR_CAM_CTRL_DONE);
2071 2071 } else {
2072 2072 /*
2073 2073 * No free CAM slots available
2074 2074 * Add mca to the imperfect filter.
2075 2075 */
2076 2076 use_hash_filter = B_TRUE;
2077 2077 }
2078 2078 } else {
2079 2079 /*
2080 2080 * Find the index of the entry to remove
2081 2081 * If the entry was not found (-1), the addition was
2082 2082 * probably done when the table was full.
2083 2083 */
2084 2084 cam_index = vr_cam_index(vrp, mca);
2085 2085 if (cam_index != -1) {
2086 2086 /*
2087 2087 * Disable the corresponding mask bit.
2088 2088 */
2089 2089 cam_mask &= ~(1 << cam_index);
2090 2090 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2091 2091 VR_CAM_CTRL_ENABLE);
2092 2092 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2093 2093 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2094 2094 VR_CAM_CTRL_DONE);
2095 2095 } else {
2096 2096 /*
2097 2097 * The entry to be removed was not found
2098 2098 * The likely cause is that the CAM was full
2099 2099 * during addition. The entry is added to the
2100 2100 * hash filter in that case and needs to be
2101 2101 * removed there too.
2102 2102 */
2103 2103 use_hash_filter = B_TRUE;
2104 2104 }
2105 2105 }
2106 2106 } else {
2107 2107 /*
2108 2108 * No CAM in the MAC, thus we need the hash filter.
2109 2109 */
2110 2110 use_hash_filter = B_TRUE;
2111 2111 }
2112 2112
2113 2113 if (use_hash_filter == B_TRUE) {
2114 2114 /*
2115 2115 * Get the CRC-32 of the multicast address
2116 2116 * The card uses the "MSB first" direction when calculating the
2117 2117 * the CRC. This is odd because ethernet is "LSB first"
2118 2118 * We have to use that "big endian" approach as well.
2119 2119 */
2120 2120 crc_index = ether_crc_be(mca) >> (32 - 6);
2121 2121 if (add == B_TRUE) {
2122 2122 /*
2123 2123 * Turn bit[crc_index] on.
2124 2124 */
2125 2125 if (crc_index < 32)
2126 2126 vrp->mhash0 |= (1 << crc_index);
2127 2127 else
2128 2128 vrp->mhash1 |= (1 << (crc_index - 32));
2129 2129 } else {
2130 2130 /*
2131 2131 * Turn bit[crc_index] off.
2132 2132 */
2133 2133 if (crc_index < 32)
2134 2134 vrp->mhash0 &= ~(0 << crc_index);
2135 2135 else
2136 2136 vrp->mhash1 &= ~(0 << (crc_index - 32));
2137 2137 }
2138 2138
2139 2139 /*
2140 2140 * When not promiscuous write the filter now. When promiscuous,
2141 2141 * the filter is open and will be written when promiscuous ends.
2142 2142 */
2143 2143 if (vrp->promisc == B_FALSE) {
2144 2144 VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2145 2145 VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2146 2146 }
2147 2147 }
2148 2148
2149 2149 /*
2150 2150 * Enable/disable multicast receivements based on mcount.
2151 2151 */
2152 2152 if (add == B_TRUE)
2153 2153 vrp->mcount++;
2154 2154 else if (vrp->mcount != 0)
2155 2155 vrp->mcount --;
2156 2156 if (vrp->mcount != 0)
2157 2157 VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2158 2158 else
2159 2159 VR_CLRBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2160 2160
2161 2161 mutex_exit(&vrp->intrlock);
2162 2162 mutex_exit(&vrp->oplock);
2163 2163 return (0);
2164 2164 }
2165 2165
2166 2166 /*
2167 2167 * Calculate the CRC32 for 6 bytes of multicast address in MSB(it) first order.
2168 2168 * The MSB first order is a bit odd because Ethernet standard is LSB first
2169 2169 */
2170 2170 static uint32_t
2171 2171 ether_crc_be(const uint8_t *data)
2172 2172 {
2173 2173 uint32_t crc = (uint32_t)0xFFFFFFFFU;
2174 2174 uint32_t carry;
2175 2175 uint32_t bit;
2176 2176 uint32_t length;
2177 2177 uint8_t c;
2178 2178
2179 2179 for (length = 0; length < ETHERADDRL; length++) {
2180 2180 c = data[length];
2181 2181 for (bit = 0; bit < 8; bit++) {
2182 2182 carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
2183 2183 crc <<= 1;
2184 2184 c >>= 1;
2185 2185 if (carry)
2186 2186 crc = (crc ^ 0x04C11DB6) | carry;
2187 2187 }
2188 2188 }
2189 2189 return (crc);
2190 2190 }
2191 2191
2192 2192
2193 2193 /*
2194 2194 * Return the CAM index (base 0) of maddr or -1 if maddr is not found
2195 2195 * If maddr is 0, return the index of an empty slot in CAM or -1 when no free
2196 2196 * slots available.
2197 2197 */
2198 2198 static int32_t
2199 2199 vr_cam_index(vr_t *vrp, const uint8_t *maddr)
2200 2200 {
2201 2201 ether_addr_t taddr;
2202 2202 int32_t index;
2203 2203 uint32_t mask;
2204 2204 uint32_t a;
2205 2205
2206 2206 bzero(&taddr, sizeof (taddr));
2207 2207
2208 2208 /*
2209 2209 * Read the CAM mask from the controller.
2210 2210 */
2211 2211 mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2212 2212
2213 2213 /*
2214 2214 * If maddr is 0, return the first unused slot or -1 for no unused.
2215 2215 */
2216 2216 if (bcmp(maddr, taddr, ETHERADDRL) == 0) {
2217 2217 /*
2218 2218 * Look for the first unused position in mask.
2219 2219 */
2220 2220 for (index = 0; index < VR_CAM_SZ; index++) {
2221 2221 if (((mask >> index) & 1) == 0)
2222 2222 return (index);
2223 2223 }
2224 2224 return (-1);
2225 2225 } else {
2226 2226 /*
2227 2227 * Look for maddr in CAM.
2228 2228 */
2229 2229 for (index = 0; index < VR_CAM_SZ; index++) {
2230 2230 /* Look at enabled entries only */
2231 2231 if (((mask >> index) & 1) == 0)
2232 2232 continue;
2233 2233
2234 2234 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
2235 2235 VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, index);
2236 2236 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_READ);
2237 2237 drv_usecwait(2);
2238 2238 for (a = 0; a < ETHERADDRL; a++)
2239 2239 taddr[a] = VR_GET8(vrp->acc_reg, VR_MCAM0 + a);
2240 2240 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
2241 2241 if (bcmp(maddr, taddr, ETHERADDRL) == 0)
2242 2242 return (index);
2243 2243 }
2244 2244 }
2245 2245 return (-1);
2246 2246 }
2247 2247
2248 2248 /*
2249 2249 * Set promiscuous mode on or off.
2250 2250 */
2251 2251 int
2252 2252 vr_mac_set_promisc(void *p, boolean_t promiscflag)
2253 2253 {
2254 2254 vr_t *vrp;
2255 2255 uint8_t rxcfg;
2256 2256
2257 2257 vrp = (vr_t *)p;
2258 2258
2259 2259 mutex_enter(&vrp->intrlock);
2260 2260 mutex_enter(&vrp->oplock);
2261 2261 mutex_enter(&vrp->tx.lock);
2262 2262
2263 2263 /*
2264 2264 * Get current receive configuration.
2265 2265 */
2266 2266 rxcfg = VR_GET8(vrp->acc_reg, VR_RXCFG);
2267 2267 vrp->promisc = promiscflag;
2268 2268
2269 2269 if (promiscflag == B_TRUE) {
2270 2270 /*
2271 2271 * Enable promiscuous mode and open the multicast filter.
2272 2272 */
2273 2273 rxcfg |= (VR_RXCFG_PROMISC | VR_RXCFG_ACCEPTMULTI);
2274 2274 VR_PUT32(vrp->acc_reg, VR_MAR0, 0xffffffff);
2275 2275 VR_PUT32(vrp->acc_reg, VR_MAR1, 0xffffffff);
2276 2276 } else {
2277 2277 /*
2278 2278 * Restore the multicast filter and disable promiscuous mode.
2279 2279 */
2280 2280 VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2281 2281 VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2282 2282 rxcfg &= ~VR_RXCFG_PROMISC;
2283 2283 if (vrp->mcount != 0)
2284 2284 rxcfg |= VR_RXCFG_ACCEPTMULTI;
2285 2285 }
2286 2286 VR_PUT8(vrp->acc_reg, VR_RXCFG, rxcfg);
2287 2287 mutex_exit(&vrp->tx.lock);
2288 2288 mutex_exit(&vrp->oplock);
2289 2289 mutex_exit(&vrp->intrlock);
2290 2290 return (0);
2291 2291 }
2292 2292
2293 2293 int
2294 2294 vr_mac_getstat(void *arg, uint_t stat, uint64_t *val)
2295 2295 {
2296 2296 vr_t *vrp;
2297 2297 uint64_t v;
2298 2298
2299 2299 vrp = (void *) arg;
2300 2300
2301 2301 switch (stat) {
2302 2302 default:
2303 2303 return (ENOTSUP);
2304 2304
2305 2305 case ETHER_STAT_ADV_CAP_100T4:
2306 2306 v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_T4) != 0;
2307 2307 break;
2308 2308
2309 2309 case ETHER_STAT_ADV_CAP_100FDX:
2310 2310 v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX_FD) != 0;
2311 2311 break;
2312 2312
2313 2313 case ETHER_STAT_ADV_CAP_100HDX:
2314 2314 v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX) != 0;
2315 2315 break;
2316 2316
2317 2317 case ETHER_STAT_ADV_CAP_10FDX:
2318 2318 v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T_FD) != 0;
2319 2319 break;
2320 2320
2321 2321 case ETHER_STAT_ADV_CAP_10HDX:
2322 2322 v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T) != 0;
2323 2323 break;
2324 2324
2325 2325 case ETHER_STAT_ADV_CAP_ASMPAUSE:
2326 2326 v = 0;
2327 2327 break;
2328 2328
2329 2329 case ETHER_STAT_ADV_CAP_AUTONEG:
2330 2330 v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0;
2331 2331 break;
2332 2332
2333 2333 case ETHER_STAT_ADV_CAP_PAUSE:
2334 2334 v = (vrp->chip.mii.anadv & MII_ABILITY_PAUSE) != 0;
2335 2335 break;
2336 2336
2337 2337 case ETHER_STAT_ADV_REMFAULT:
2338 2338 v = (vrp->chip.mii.anadv & MII_AN_ADVERT_REMFAULT) != 0;
2339 2339 break;
2340 2340
2341 2341 case ETHER_STAT_ALIGN_ERRORS:
2342 2342 v = vrp->stats.ether_stat_align_errors;
2343 2343 break;
2344 2344
2345 2345 case ETHER_STAT_CAP_100T4:
2346 2346 v = (vrp->chip.mii.status & MII_STATUS_100_BASE_T4) != 0;
2347 2347 break;
2348 2348
2349 2349 case ETHER_STAT_CAP_100FDX:
2350 2350 v = (vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) != 0;
2351 2351 break;
2352 2352
2353 2353 case ETHER_STAT_CAP_100HDX:
2354 2354 v = (vrp->chip.mii.status & MII_STATUS_100_BASEX) != 0;
2355 2355 break;
2356 2356
2357 2357 case ETHER_STAT_CAP_10FDX:
2358 2358 v = (vrp->chip.mii.status & MII_STATUS_10_FD) != 0;
2359 2359 break;
2360 2360
2361 2361 case ETHER_STAT_CAP_10HDX:
2362 2362 v = (vrp->chip.mii.status & MII_STATUS_10) != 0;
2363 2363 break;
2364 2364
2365 2365 case ETHER_STAT_CAP_ASMPAUSE:
2366 2366 v = 0;
2367 2367 break;
2368 2368
2369 2369 case ETHER_STAT_CAP_AUTONEG:
2370 2370 v = (vrp->chip.mii.status & MII_STATUS_CANAUTONEG) != 0;
2371 2371 break;
2372 2372
2373 2373 case ETHER_STAT_CAP_PAUSE:
2374 2374 v = 1;
2375 2375 break;
2376 2376
2377 2377 case ETHER_STAT_CAP_REMFAULT:
2378 2378 v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2379 2379 break;
2380 2380
2381 2381 case ETHER_STAT_CARRIER_ERRORS:
2382 2382 /*
2383 2383 * Number of times carrier was lost or never detected on a
2384 2384 * transmission attempt.
2385 2385 */
2386 2386 v = vrp->stats.ether_stat_carrier_errors;
2387 2387 break;
2388 2388
2389 2389 case ETHER_STAT_JABBER_ERRORS:
2390 2390 return (ENOTSUP);
2391 2391
2392 2392 case ETHER_STAT_DEFER_XMTS:
2393 2393 /*
2394 2394 * Packets without collisions where first transmit attempt was
2395 2395 * delayed because the medium was busy.
2396 2396 */
2397 2397 v = vrp->stats.ether_stat_defer_xmts;
2398 2398 break;
2399 2399
2400 2400 case ETHER_STAT_EX_COLLISIONS:
2401 2401 /*
2402 2402 * Frames where excess collisions occurred on transmit, causing
2403 2403 * transmit failure.
2404 2404 */
2405 2405 v = vrp->stats.ether_stat_ex_collisions;
2406 2406 break;
2407 2407
2408 2408 case ETHER_STAT_FCS_ERRORS:
2409 2409 /*
2410 2410 * Packets received with CRC errors.
2411 2411 */
2412 2412 v = vrp->stats.ether_stat_fcs_errors;
2413 2413 break;
2414 2414
2415 2415 case ETHER_STAT_FIRST_COLLISIONS:
2416 2416 /*
2417 2417 * Packets successfully transmitted with exactly one collision.
2418 2418 */
2419 2419 v = vrp->stats.ether_stat_first_collisions;
2420 2420 break;
2421 2421
2422 2422 case ETHER_STAT_LINK_ASMPAUSE:
2423 2423 v = 0;
2424 2424 break;
2425 2425
2426 2426 case ETHER_STAT_LINK_AUTONEG:
2427 2427 v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0 &&
2428 2428 (vrp->chip.mii.status & MII_STATUS_ANDONE) != 0;
2429 2429 break;
2430 2430
2431 2431 case ETHER_STAT_LINK_DUPLEX:
2432 2432 v = vrp->chip.link.duplex;
2433 2433 break;
2434 2434
2435 2435 case ETHER_STAT_LINK_PAUSE:
2436 2436 v = vrp->chip.link.flowctrl;
2437 2437 break;
2438 2438
2439 2439 case ETHER_STAT_LP_CAP_100T4:
2440 2440 v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_T4) != 0;
2441 2441 break;
2442 2442
2443 2443 case ETHER_STAT_LP_CAP_1000FDX:
2444 2444 v = 0;
2445 2445 break;
2446 2446
2447 2447 case ETHER_STAT_LP_CAP_1000HDX:
2448 2448 v = 0;
2449 2449 break;
2450 2450
2451 2451 case ETHER_STAT_LP_CAP_100FDX:
2452 2452 v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX_FD) != 0;
2453 2453 break;
2454 2454
2455 2455 case ETHER_STAT_LP_CAP_100HDX:
2456 2456 v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX) != 0;
2457 2457 break;
2458 2458
2459 2459 case ETHER_STAT_LP_CAP_10FDX:
2460 2460 v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T_FD) != 0;
2461 2461 break;
2462 2462
2463 2463 case ETHER_STAT_LP_CAP_10HDX:
2464 2464 v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T) != 0;
2465 2465 break;
2466 2466
2467 2467 case ETHER_STAT_LP_CAP_ASMPAUSE:
2468 2468 v = 0;
2469 2469 break;
2470 2470
2471 2471 case ETHER_STAT_LP_CAP_AUTONEG:
2472 2472 v = (vrp->chip.mii.anexp & MII_AN_EXP_LPCANAN) != 0;
2473 2473 break;
2474 2474
2475 2475 case ETHER_STAT_LP_CAP_PAUSE:
2476 2476 v = (vrp->chip.mii.lpable & MII_ABILITY_PAUSE) != 0;
2477 2477 break;
2478 2478
2479 2479 case ETHER_STAT_LP_REMFAULT:
2480 2480 v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2481 2481 break;
2482 2482
2483 2483 case ETHER_STAT_MACRCV_ERRORS:
2484 2484 /*
2485 2485 * Packets received with MAC errors, except align_errors,
2486 2486 * fcs_errors, and toolong_errors.
2487 2487 */
2488 2488 v = vrp->stats.ether_stat_macrcv_errors;
2489 2489 break;
2490 2490
2491 2491 case ETHER_STAT_MACXMT_ERRORS:
2492 2492 /*
2493 2493 * Packets encountering transmit MAC failures, except carrier
2494 2494 * and collision failures.
2495 2495 */
2496 2496 v = vrp->stats.ether_stat_macxmt_errors;
2497 2497 break;
2498 2498
2499 2499 case ETHER_STAT_MULTI_COLLISIONS:
2500 2500 /*
2501 2501 * Packets successfully transmitted with multiple collisions.
2502 2502 */
2503 2503 v = vrp->stats.ether_stat_multi_collisions;
2504 2504 break;
2505 2505
2506 2506 case ETHER_STAT_SQE_ERRORS:
2507 2507 /*
2508 2508 * Number of times signal quality error was reported
2509 2509 * This one is reported by the PHY.
2510 2510 */
2511 2511 return (ENOTSUP);
2512 2512
2513 2513 case ETHER_STAT_TOOLONG_ERRORS:
2514 2514 /*
2515 2515 * Packets received larger than the maximum permitted length.
2516 2516 */
2517 2517 v = vrp->stats.ether_stat_toolong_errors;
2518 2518 break;
2519 2519
2520 2520 case ETHER_STAT_TOOSHORT_ERRORS:
2521 2521 v = vrp->stats.ether_stat_tooshort_errors;
2522 2522 break;
2523 2523
2524 2524 case ETHER_STAT_TX_LATE_COLLISIONS:
2525 2525 /*
2526 2526 * Number of times a transmit collision occurred late
2527 2527 * (after 512 bit times).
2528 2528 */
2529 2529 v = vrp->stats.ether_stat_tx_late_collisions;
2530 2530 break;
2531 2531
2532 2532 case ETHER_STAT_XCVR_ADDR:
2533 2533 /*
2534 2534 * MII address in the 0 to 31 range of the physical layer
2535 2535 * device in use for a given Ethernet device.
2536 2536 */
2537 2537 v = vrp->chip.phyaddr;
2538 2538 break;
2539 2539
2540 2540 case ETHER_STAT_XCVR_ID:
2541 2541 /*
2542 2542 * MII transceiver manufacturer and device ID.
2543 2543 */
2544 2544 v = (vrp->chip.mii.identh << 16) | vrp->chip.mii.identl;
2545 2545 break;
2546 2546
2547 2547 case ETHER_STAT_XCVR_INUSE:
2548 2548 v = vrp->chip.link.mau;
2549 2549 break;
2550 2550
2551 2551 case MAC_STAT_BRDCSTRCV:
2552 2552 v = vrp->stats.mac_stat_brdcstrcv;
2553 2553 break;
2554 2554
2555 2555 case MAC_STAT_BRDCSTXMT:
2556 2556 v = vrp->stats.mac_stat_brdcstxmt;
2557 2557 break;
2558 2558
2559 2559 case MAC_STAT_MULTIXMT:
2560 2560 v = vrp->stats.mac_stat_multixmt;
2561 2561 break;
2562 2562
2563 2563 case MAC_STAT_COLLISIONS:
2564 2564 v = vrp->stats.mac_stat_collisions;
2565 2565 break;
2566 2566
2567 2567 case MAC_STAT_IERRORS:
2568 2568 v = vrp->stats.mac_stat_ierrors;
2569 2569 break;
2570 2570
2571 2571 case MAC_STAT_IFSPEED:
2572 2572 if (vrp->chip.link.speed == VR_LINK_SPEED_100MBS)
2573 2573 v = 100 * 1000 * 1000;
2574 2574 else if (vrp->chip.link.speed == VR_LINK_SPEED_10MBS)
2575 2575 v = 10 * 1000 * 1000;
2576 2576 else
2577 2577 v = 0;
2578 2578 break;
2579 2579
2580 2580 case MAC_STAT_IPACKETS:
2581 2581 v = vrp->stats.mac_stat_ipackets;
2582 2582 break;
2583 2583
2584 2584 case MAC_STAT_MULTIRCV:
2585 2585 v = vrp->stats.mac_stat_multircv;
2586 2586 break;
2587 2587
2588 2588 case MAC_STAT_NORCVBUF:
2589 2589 vrp->stats.mac_stat_norcvbuf +=
2590 2590 VR_GET16(vrp->acc_reg, VR_TALLY_MPA);
2591 2591 VR_PUT16(vrp->acc_reg, VR_TALLY_MPA, 0);
2592 2592 v = vrp->stats.mac_stat_norcvbuf;
2593 2593 break;
2594 2594
2595 2595 case MAC_STAT_NOXMTBUF:
2596 2596 v = vrp->stats.mac_stat_noxmtbuf;
2597 2597 break;
2598 2598
2599 2599 case MAC_STAT_OBYTES:
2600 2600 v = vrp->stats.mac_stat_obytes;
2601 2601 break;
2602 2602
2603 2603 case MAC_STAT_OERRORS:
2604 2604 v = vrp->stats.ether_stat_macxmt_errors +
2605 2605 vrp->stats.mac_stat_underflows +
2606 2606 vrp->stats.ether_stat_align_errors +
2607 2607 vrp->stats.ether_stat_carrier_errors +
2608 2608 vrp->stats.ether_stat_fcs_errors;
2609 2609 break;
2610 2610
2611 2611 case MAC_STAT_OPACKETS:
2612 2612 v = vrp->stats.mac_stat_opackets;
2613 2613 break;
2614 2614
2615 2615 case MAC_STAT_RBYTES:
2616 2616 v = vrp->stats.mac_stat_rbytes;
2617 2617 break;
2618 2618
2619 2619 case MAC_STAT_UNKNOWNS:
2620 2620 /*
2621 2621 * Isn't this something for the MAC layer to maintain?
2622 2622 */
2623 2623 return (ENOTSUP);
2624 2624
2625 2625 case MAC_STAT_UNDERFLOWS:
2626 2626 v = vrp->stats.mac_stat_underflows;
2627 2627 break;
2628 2628
2629 2629 case MAC_STAT_OVERFLOWS:
2630 2630 v = vrp->stats.mac_stat_overflows;
2631 2631 break;
2632 2632 }
2633 2633 *val = v;
2634 2634 return (0);
2635 2635 }
2636 2636
2637 2637 int
2638 2638 vr_mac_set_ether_addr(void *p, const uint8_t *ea)
2639 2639 {
2640 2640 vr_t *vrp;
2641 2641 int i;
2642 2642
2643 2643 vrp = (vr_t *)p;
2644 2644 mutex_enter(&vrp->oplock);
2645 2645 mutex_enter(&vrp->intrlock);
2646 2646
2647 2647 /*
2648 2648 * Set a new station address.
2649 2649 */
2650 2650 for (i = 0; i < ETHERADDRL; i++)
2651 2651 VR_PUT8(vrp->acc_reg, VR_ETHERADDR + i, ea[i]);
2652 2652
2653 2653 mutex_exit(&vrp->intrlock);
2654 2654 mutex_exit(&vrp->oplock);
2655 2655 return (0);
2656 2656 }
2657 2657
2658 2658 /*
2659 2659 * Configure the ethernet link according to param and chip.mii.
2660 2660 */
2661 2661 static void
2662 2662 vr_link_init(vr_t *vrp)
2663 2663 {
2664 2664 ASSERT(mutex_owned(&vrp->oplock));
2665 2665 if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2666 2666 /*
2667 2667 * If we do autoneg, ensure restart autoneg is ON.
2668 2668 */
2669 2669 vrp->chip.mii.control |= MII_CONTROL_RSAN;
2670 2670
2671 2671 /*
2672 2672 * The advertisements are prepared by param_init.
2673 2673 */
2674 2674 vr_phy_write(vrp, MII_AN_ADVERT, vrp->chip.mii.anadv);
2675 2675 } else {
2676 2676 /*
2677 2677 * If we don't autoneg, we need speed, duplex and flowcontrol
2678 2678 * to configure the link. However, dladm doesn't allow changes
2679 2679 * to speed and duplex (readonly). The way this is solved
2680 2680 * (ahem) is to select the highest enabled combination
2681 2681 * Speed and duplex should be r/w when autoneg is off.
2682 2682 */
2683 2683 if ((vrp->param.anadv_en &
2684 2684 MII_ABILITY_100BASE_TX_FD) != 0) {
2685 2685 vrp->chip.mii.control |= MII_CONTROL_100MB;
2686 2686 vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2687 2687 } else if ((vrp->param.anadv_en &
2688 2688 MII_ABILITY_100BASE_TX) != 0) {
2689 2689 vrp->chip.mii.control |= MII_CONTROL_100MB;
2690 2690 vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2691 2691 } else if ((vrp->param.anadv_en &
2692 2692 MII_ABILITY_10BASE_T_FD) != 0) {
2693 2693 vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2694 2694 vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2695 2695 } else {
2696 2696 vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2697 2697 vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2698 2698 }
2699 2699 }
2700 2700 /*
2701 2701 * Write the control register.
2702 2702 */
2703 2703 vr_phy_write(vrp, MII_CONTROL, vrp->chip.mii.control);
2704 2704
2705 2705 /*
2706 2706 * With autoneg off we cannot rely on the link_change interrupt for
2707 2707 * for getting the status into the driver.
2708 2708 */
2709 2709 if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
2710 2710 vr_link_state(vrp);
2711 2711 mac_link_update(vrp->machdl,
2712 2712 (link_state_t)vrp->chip.link.state);
2713 2713 }
2714 2714 }
2715 2715
2716 2716 /*
2717 2717 * Get link state in the driver and configure the MAC accordingly.
2718 2718 */
2719 2719 static void
2720 2720 vr_link_state(vr_t *vrp)
2721 2721 {
2722 2722 uint16_t mask;
2723 2723
2724 2724 ASSERT(mutex_owned(&vrp->oplock));
2725 2725
2726 2726 vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
2727 2727 vr_phy_read(vrp, MII_CONTROL, &vrp->chip.mii.control);
2728 2728 vr_phy_read(vrp, MII_AN_ADVERT, &vrp->chip.mii.anadv);
2729 2729 vr_phy_read(vrp, MII_AN_LPABLE, &vrp->chip.mii.lpable);
2730 2730 vr_phy_read(vrp, MII_AN_EXPANSION, &vrp->chip.mii.anexp);
2731 2731
2732 2732 /*
2733 2733 * If we did autongeg, deduce the link type/speed by selecting the
2734 2734 * highest common denominator.
2735 2735 */
2736 2736 if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2737 2737 mask = vrp->chip.mii.anadv & vrp->chip.mii.lpable;
2738 2738 if ((mask & MII_ABILITY_100BASE_TX_FD) != 0) {
2739 2739 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2740 2740 vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2741 2741 vrp->chip.link.mau = VR_MAU_100X;
2742 2742 } else if ((mask & MII_ABILITY_100BASE_T4) != 0) {
2743 2743 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2744 2744 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2745 2745 vrp->chip.link.mau = VR_MAU_100T4;
2746 2746 } else if ((mask & MII_ABILITY_100BASE_TX) != 0) {
2747 2747 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2748 2748 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2749 2749 vrp->chip.link.mau = VR_MAU_100X;
2750 2750 } else if ((mask & MII_ABILITY_10BASE_T_FD) != 0) {
2751 2751 vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2752 2752 vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2753 2753 vrp->chip.link.mau = VR_MAU_10;
2754 2754 } else if ((mask & MII_ABILITY_10BASE_T) != 0) {
2755 2755 vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2756 2756 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2757 2757 vrp->chip.link.mau = VR_MAU_10;
2758 2758 } else {
2759 2759 vrp->chip.link.speed = VR_LINK_SPEED_UNKNOWN;
2760 2760 vrp->chip.link.duplex = VR_LINK_DUPLEX_UNKNOWN;
2761 2761 vrp->chip.link.mau = VR_MAU_UNKNOWN;
2762 2762 }
2763 2763
2764 2764 /*
2765 2765 * Did we negotiate pause?
2766 2766 */
2767 2767 if ((mask & MII_ABILITY_PAUSE) != 0 &&
2768 2768 vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL)
2769 2769 vrp->chip.link.flowctrl = VR_PAUSE_BIDIRECTIONAL;
2770 2770 else
2771 2771 vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2772 2772
2773 2773 /*
2774 2774 * Did either one detect a AN fault?
2775 2775 */
2776 2776 if ((vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0)
2777 2777 vr_log(vrp, CE_WARN,
2778 2778 "AN remote fault reported by LP.");
2779 2779
2780 2780 if ((vrp->chip.mii.lpable & MII_AN_ADVERT_REMFAULT) != 0)
2781 2781 vr_log(vrp, CE_WARN, "AN remote fault caused for LP.");
2782 2782 } else {
2783 2783 /*
2784 2784 * We didn't autoneg
2785 2785 * The link type is defined by the control register.
2786 2786 */
2787 2787 if ((vrp->chip.mii.control & MII_CONTROL_100MB) != 0) {
2788 2788 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2789 2789 vrp->chip.link.mau = VR_MAU_100X;
2790 2790 } else {
2791 2791 vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2792 2792 vrp->chip.link.mau = VR_MAU_10;
2793 2793 }
2794 2794
2795 2795 if ((vrp->chip.mii.control & MII_CONTROL_FDUPLEX) != 0)
2796 2796 vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2797 2797 else {
2798 2798 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2799 2799 /*
2800 2800 * No pause on HDX links.
2801 2801 */
2802 2802 vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2803 2803 }
2804 2804 }
2805 2805
2806 2806 /*
2807 2807 * Set the duplex mode on the MAC according to that of the PHY.
2808 2808 */
2809 2809 if (vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL) {
2810 2810 VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2811 2811 /*
2812 2812 * Enable packet queueing on FDX links.
2813 2813 */
2814 2814 if ((vrp->chip.info.bugs & VR_BUG_NO_TXQUEUEING) == 0)
2815 2815 VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2816 2816 } else {
2817 2817 VR_CLRBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2818 2818 /*
2819 2819 * Disable packet queueing on HDX links. With queueing enabled,
2820 2820 * this MAC get's lost after a TX abort (too many colisions).
2821 2821 */
2822 2822 VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2823 2823 }
2824 2824
2825 2825 /*
2826 2826 * Set pause options on the MAC.
2827 2827 */
2828 2828 if (vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
2829 2829 /*
2830 2830 * All of our MAC's can receive pause frames.
2831 2831 */
2832 2832 VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXRFEN);
2833 2833
2834 2834 /*
2835 2835 * VT6105 and above can transmit pause frames.
2836 2836 */
2837 2837 if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2838 2838 /*
2839 2839 * Set the number of available receive descriptors
2840 2840 * Non-zero values written to this register are added
2841 2841 * to the register's contents. Careful: Writing zero
2842 2842 * clears the register and thus causes a (long) pause
2843 2843 * request.
2844 2844 */
2845 2845 VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT,
2846 2846 MIN(vrp->rx.ndesc, 0xFF) -
2847 2847 VR_GET8(vrp->acc_reg,
2848 2848 VR_FCR0_RXBUFCOUNT));
2849 2849
2850 2850 /*
2851 2851 * Request pause when we have 4 descs left.
2852 2852 */
2853 2853 VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2854 2854 VR_FCR1_PAUSEONBITS, VR_FCR1_PAUSEON_04);
2855 2855
2856 2856 /*
2857 2857 * Cancel the pause when there are 24 descriptors again.
2858 2858 */
2859 2859 VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2860 2860 VR_FCR1_PAUSEOFFBITS, VR_FCR1_PAUSEOFF_24);
2861 2861
2862 2862 /*
2863 2863 * Request a pause of FFFF bit-times. This long pause
2864 2864 * is cancelled when the high watermark is reached.
2865 2865 */
2866 2866 VR_PUT16(vrp->acc_reg, VR_FCR2_PAUSE, 0xFFFF);
2867 2867
2868 2868 /*
2869 2869 * Enable flow control on the MAC.
2870 2870 */
2871 2871 VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXTFEN);
2872 2872 VR_SETBIT8(vrp->acc_reg, VR_FCR1, VR_FCR1_FD_RX_EN |
2873 2873 VR_FCR1_FD_TX_EN | VR_FCR1_XONXOFF_EN);
2874 2874 }
2875 2875 } else {
2876 2876 /*
2877 2877 * Turn flow control OFF.
2878 2878 */
2879 2879 VR_CLRBIT8(vrp->acc_reg,
2880 2880 VR_MISC0, VR_MISC0_FDXRFEN | VR_MISC0_FDXTFEN);
2881 2881 if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2882 2882 VR_CLRBIT8(vrp->acc_reg, VR_FCR1,
2883 2883 VR_FCR1_FD_RX_EN | VR_FCR1_FD_TX_EN |
2884 2884 VR_FCR1_XONXOFF_EN);
2885 2885 }
2886 2886 }
2887 2887
2888 2888 /*
2889 2889 * Set link state.
2890 2890 */
2891 2891 if ((vrp->chip.mii.status & MII_STATUS_LINKUP) != 0)
2892 2892 vrp->chip.link.state = VR_LINK_STATE_UP;
2893 2893 else
2894 2894 vrp->chip.link.state = VR_LINK_STATE_DOWN;
2895 2895 }
2896 2896
2897 2897 /*
2898 2898 * The PHY is automatically polled by the MAC once per 1024 MD clock cycles
2899 2899 * MD is clocked once per 960ns so polling happens about every 1M ns, some
2900 2900 * 1000 times per second
2901 2901 * This polling process is required for the functionality of the link change
2902 2902 * interrupt. Polling process must be disabled in order to access PHY registers
2903 2903 * using MDIO
2904 2904 *
2905 2905 * Turn off PHY polling so that the PHY registers can be accessed.
2906 2906 */
2907 2907 static void
2908 2908 vr_phy_autopoll_disable(vr_t *vrp)
2909 2909 {
2910 2910 uint32_t time;
2911 2911 uint8_t miicmd, miiaddr;
2912 2912
2913 2913 /*
2914 2914 * Special procedure to stop the autopolling.
2915 2915 */
2916 2916 if ((vrp->chip.info.bugs & VR_BUG_MIIPOLLSTOP) != 0) {
2917 2917 /*
2918 2918 * If polling is enabled.
2919 2919 */
2920 2920 miicmd = VR_GET8(vrp->acc_reg, VR_MIICMD);
2921 2921 if ((miicmd & VR_MIICMD_MD_AUTO) != 0) {
2922 2922 /*
2923 2923 * Wait for the end of a cycle (mdone set).
2924 2924 */
2925 2925 time = 0;
2926 2926 do {
2927 2927 drv_usecwait(10);
2928 2928 if (time >= VR_MMI_WAITMAX) {
2929 2929 vr_log(vrp, CE_WARN,
2930 2930 "Timeout in "
2931 2931 "disable MII polling");
2932 2932 break;
2933 2933 }
2934 2934 time += VR_MMI_WAITINCR;
2935 2935 miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2936 2936 } while ((miiaddr & VR_MIIADDR_MDONE) == 0);
2937 2937 }
2938 2938 /*
2939 2939 * Once paused, we can disable autopolling.
2940 2940 */
2941 2941 VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2942 2942 } else {
2943 2943 /*
2944 2944 * Turn off MII polling.
2945 2945 */
2946 2946 VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2947 2947
2948 2948 /*
2949 2949 * Wait for MIDLE in MII address register.
2950 2950 */
2951 2951 time = 0;
2952 2952 do {
2953 2953 drv_usecwait(VR_MMI_WAITINCR);
2954 2954 if (time >= VR_MMI_WAITMAX) {
2955 2955 vr_log(vrp, CE_WARN,
2956 2956 "Timeout in disable MII polling");
2957 2957 break;
2958 2958 }
2959 2959 time += VR_MMI_WAITINCR;
2960 2960 miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2961 2961 } while ((miiaddr & VR_MIIADDR_MIDLE) == 0);
2962 2962 }
2963 2963 }
2964 2964
2965 2965 /*
2966 2966 * Turn on PHY polling. PHY's registers cannot be accessed.
2967 2967 */
2968 2968 static void
2969 2969 vr_phy_autopoll_enable(vr_t *vrp)
2970 2970 {
2971 2971 uint32_t time;
2972 2972
2973 2973 VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2974 2974 VR_PUT8(vrp->acc_reg, VR_MIIADDR, MII_STATUS|VR_MIIADDR_MAUTO);
2975 2975 VR_PUT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_AUTO);
2976 2976
2977 2977 /*
2978 2978 * Wait for the polling process to finish.
2979 2979 */
2980 2980 time = 0;
2981 2981 do {
2982 2982 drv_usecwait(VR_MMI_WAITINCR);
2983 2983 if (time >= VR_MMI_WAITMAX) {
2984 2984 vr_log(vrp, CE_NOTE, "Timeout in enable MII polling");
2985 2985 break;
2986 2986 }
2987 2987 time += VR_MMI_WAITINCR;
2988 2988 } while ((VR_GET8(vrp->acc_reg, VR_MIIADDR) & VR_MIIADDR_MDONE) == 0);
2989 2989
2990 2990 /*
2991 2991 * Initiate a polling.
2992 2992 */
2993 2993 VR_SETBIT8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_MAUTO);
2994 2994 }
2995 2995
2996 2996 /*
2997 2997 * Read a register from the PHY using MDIO.
2998 2998 */
2999 2999 static void
3000 3000 vr_phy_read(vr_t *vrp, int offset, uint16_t *value)
3001 3001 {
3002 3002 uint32_t time;
3003 3003
3004 3004 vr_phy_autopoll_disable(vrp);
3005 3005
3006 3006 /*
3007 3007 * Write the register number to the lower 5 bits of the MII address
3008 3008 * register.
3009 3009 */
3010 3010 VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3011 3011
3012 3012 /*
3013 3013 * Write a READ command to the MII control register
3014 3014 * This bit will be cleared when the read is finished.
3015 3015 */
3016 3016 VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_READ);
3017 3017
3018 3018 /*
3019 3019 * Wait until the read is done.
3020 3020 */
3021 3021 time = 0;
3022 3022 do {
3023 3023 drv_usecwait(VR_MMI_WAITINCR);
3024 3024 if (time >= VR_MMI_WAITMAX) {
3025 3025 vr_log(vrp, CE_NOTE, "Timeout in MII read command");
3026 3026 break;
3027 3027 }
3028 3028 time += VR_MMI_WAITINCR;
3029 3029 } while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_READ) != 0);
3030 3030
3031 3031 *value = VR_GET16(vrp->acc_reg, VR_MIIDATA);
3032 3032 vr_phy_autopoll_enable(vrp);
3033 3033 }
3034 3034
3035 3035 /*
3036 3036 * Write to a PHY's register.
3037 3037 */
3038 3038 static void
3039 3039 vr_phy_write(vr_t *vrp, int offset, uint16_t value)
3040 3040 {
3041 3041 uint32_t time;
3042 3042
3043 3043 vr_phy_autopoll_disable(vrp);
3044 3044
3045 3045 /*
3046 3046 * Write the register number to the MII address register.
3047 3047 */
3048 3048 VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3049 3049
3050 3050 /*
3051 3051 * Write the value to the data register.
3052 3052 */
3053 3053 VR_PUT16(vrp->acc_reg, VR_MIIDATA, value);
3054 3054
3055 3055 /*
3056 3056 * Issue the WRITE command to the command register.
3057 3057 * This bit will be cleared when the write is finished.
3058 3058 */
3059 3059 VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_WRITE);
3060 3060
3061 3061 time = 0;
3062 3062 do {
3063 3063 drv_usecwait(VR_MMI_WAITINCR);
3064 3064 if (time >= VR_MMI_WAITMAX) {
3065 3065 vr_log(vrp, CE_NOTE, "Timeout in MII write command");
3066 3066 break;
3067 3067 }
3068 3068 time += VR_MMI_WAITINCR;
3069 3069 } while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_WRITE) != 0);
3070 3070 vr_phy_autopoll_enable(vrp);
3071 3071 }
3072 3072
3073 3073 /*
3074 3074 * Initialize and install some private kstats.
3075 3075 */
3076 3076 typedef struct {
3077 3077 char *name;
3078 3078 uchar_t type;
3079 3079 } vr_kstat_t;
3080 3080
3081 3081 static const vr_kstat_t vr_driver_stats [] = {
3082 3082 {"allocbfail", KSTAT_DATA_INT32},
3083 3083 {"intr_claimed", KSTAT_DATA_INT64},
3084 3084 {"intr_unclaimed", KSTAT_DATA_INT64},
3085 3085 {"linkchanges", KSTAT_DATA_INT64},
3086 3086 {"txnfree", KSTAT_DATA_INT32},
3087 3087 {"txstalls", KSTAT_DATA_INT32},
3088 3088 {"resets", KSTAT_DATA_INT32},
3089 3089 {"txreclaims", KSTAT_DATA_INT64},
3090 3090 {"txreclaim0", KSTAT_DATA_INT64},
3091 3091 {"cyclics", KSTAT_DATA_INT64},
3092 3092 {"txchecks", KSTAT_DATA_INT64},
3093 3093 };
3094 3094
3095 3095 static void
3096 3096 vr_kstats_init(vr_t *vrp)
3097 3097 {
3098 3098 kstat_t *ksp;
3099 3099 struct kstat_named *knp;
3100 3100 int i;
3101 3101 int nstats;
3102 3102
3103 3103 nstats = sizeof (vr_driver_stats) / sizeof (vr_kstat_t);
3104 3104
3105 3105 ksp = kstat_create(MODULENAME, ddi_get_instance(vrp->devinfo),
3106 3106 "driver", "net", KSTAT_TYPE_NAMED, nstats, 0);
3107 3107
3108 3108 if (ksp == NULL)
3109 3109 vr_log(vrp, CE_WARN, "kstat_create failed");
3110 3110
3111 3111 ksp->ks_update = vr_update_kstats;
3112 3112 ksp->ks_private = (void*) vrp;
3113 3113 knp = ksp->ks_data;
3114 3114
3115 3115 for (i = 0; i < nstats; i++, knp++) {
3116 3116 kstat_named_init(knp, vr_driver_stats[i].name,
3117 3117 vr_driver_stats[i].type);
3118 3118 }
3119 3119 kstat_install(ksp);
3120 3120 vrp->ksp = ksp;
3121 3121 }
3122 3122
3123 3123 static int
3124 3124 vr_update_kstats(kstat_t *ksp, int access)
3125 3125 {
3126 3126 vr_t *vrp;
3127 3127 struct kstat_named *knp;
3128 3128
3129 3129 vrp = (vr_t *)ksp->ks_private;
3130 3130 knp = ksp->ks_data;
3131 3131
3132 3132 if (access != KSTAT_READ)
3133 3133 return (EACCES);
3134 3134
3135 3135 (knp++)->value.ui32 = vrp->stats.allocbfail;
3136 3136 (knp++)->value.ui64 = vrp->stats.intr_claimed;
3137 3137 (knp++)->value.ui64 = vrp->stats.intr_unclaimed;
3138 3138 (knp++)->value.ui64 = vrp->stats.linkchanges;
3139 3139 (knp++)->value.ui32 = vrp->tx.nfree;
3140 3140 (knp++)->value.ui32 = vrp->stats.txstalls;
3141 3141 (knp++)->value.ui32 = vrp->stats.resets;
3142 3142 (knp++)->value.ui64 = vrp->stats.txreclaims;
3143 3143 (knp++)->value.ui64 = vrp->stats.txreclaim0;
3144 3144 (knp++)->value.ui64 = vrp->stats.cyclics;
3145 3145 (knp++)->value.ui64 = vrp->stats.txchecks;
3146 3146 return (0);
3147 3147 }
3148 3148
3149 3149 /*
3150 3150 * Remove 'private' kstats.
3151 3151 */
3152 3152 static void
3153 3153 vr_remove_kstats(vr_t *vrp)
3154 3154 {
3155 3155 if (vrp->ksp != NULL)
3156 3156 kstat_delete(vrp->ksp);
3157 3157 }
3158 3158
3159 3159 /*
3160 3160 * Get a property of the device/driver
3161 3161 * Remarks:
3162 3162 * - pr_val is always an integer of size pr_valsize
3163 3163 * - ENABLED (EN) is what is configured via dladm
3164 3164 * - ADVERTISED (ADV) is ENABLED minus constraints, like PHY/MAC capabilities
3165 3165 * - DEFAULT are driver- and hardware defaults (DEFAULT is implemented as a
3166 3166 * flag in pr_flags instead of MAC_PROP_DEFAULT_)
3167 3167 * - perm is the permission printed on ndd -get /.. \?
3168 3168 */
3169 3169 int
3170 3170 vr_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3171 3171 uint_t pr_valsize, void *pr_val)
3172 3172 {
3173 3173 vr_t *vrp;
3174 3174 uint32_t err;
3175 3175 uint64_t val;
3176 3176
3177 3177 /* Since we have no private properties */
3178 3178 _NOTE(ARGUNUSED(pr_name))
3179 3179
3180 3180 err = 0;
3181 3181 vrp = (vr_t *)arg;
3182 3182 switch (pr_num) {
3183 3183 case MAC_PROP_ADV_1000FDX_CAP:
3184 3184 case MAC_PROP_ADV_1000HDX_CAP:
3185 3185 case MAC_PROP_EN_1000FDX_CAP:
3186 3186 case MAC_PROP_EN_1000HDX_CAP:
3187 3187 val = 0;
3188 3188 break;
3189 3189
3190 3190 case MAC_PROP_ADV_100FDX_CAP:
3191 3191 val = (vrp->chip.mii.anadv &
3192 3192 MII_ABILITY_100BASE_TX_FD) != 0;
3193 3193 break;
3194 3194
3195 3195 case MAC_PROP_ADV_100HDX_CAP:
3196 3196 val = (vrp->chip.mii.anadv &
3197 3197 MII_ABILITY_100BASE_TX) != 0;
3198 3198 break;
3199 3199
3200 3200 case MAC_PROP_ADV_100T4_CAP:
3201 3201 val = (vrp->chip.mii.anadv &
3202 3202 MII_ABILITY_100BASE_T4) != 0;
3203 3203 break;
3204 3204
3205 3205 case MAC_PROP_ADV_10FDX_CAP:
3206 3206 val = (vrp->chip.mii.anadv &
3207 3207 MII_ABILITY_10BASE_T_FD) != 0;
3208 3208 break;
3209 3209
3210 3210 case MAC_PROP_ADV_10HDX_CAP:
3211 3211 val = (vrp->chip.mii.anadv &
3212 3212 MII_ABILITY_10BASE_T) != 0;
3213 3213 break;
3214 3214
3215 3215 case MAC_PROP_AUTONEG:
3216 3216 val = (vrp->chip.mii.control &
3217 3217 MII_CONTROL_ANE) != 0;
3218 3218 break;
3219 3219
3220 3220 case MAC_PROP_DUPLEX:
3221 3221 val = vrp->chip.link.duplex;
3222 3222 break;
3223 3223
3224 3224 case MAC_PROP_EN_100FDX_CAP:
3225 3225 val = (vrp->param.anadv_en &
3226 3226 MII_ABILITY_100BASE_TX_FD) != 0;
3227 3227 break;
3228 3228
3229 3229 case MAC_PROP_EN_100HDX_CAP:
3230 3230 val = (vrp->param.anadv_en &
3231 3231 MII_ABILITY_100BASE_TX) != 0;
3232 3232 break;
3233 3233
3234 3234 case MAC_PROP_EN_100T4_CAP:
3235 3235 val = (vrp->param.anadv_en &
3236 3236 MII_ABILITY_100BASE_T4) != 0;
3237 3237 break;
3238 3238
3239 3239 case MAC_PROP_EN_10FDX_CAP:
3240 3240 val = (vrp->param.anadv_en &
3241 3241 MII_ABILITY_10BASE_T_FD) != 0;
3242 3242 break;
3243 3243
3244 3244 case MAC_PROP_EN_10HDX_CAP:
3245 3245 val = (vrp->param.anadv_en &
3246 3246 MII_ABILITY_10BASE_T) != 0;
3247 3247 break;
3248 3248
3249 3249 case MAC_PROP_EN_AUTONEG:
3250 3250 val = vrp->param.an_en == VR_LINK_AUTONEG_ON;
3251 3251 break;
3252 3252
3253 3253 case MAC_PROP_FLOWCTRL:
3254 3254 val = vrp->chip.link.flowctrl;
3255 3255 break;
3256 3256
3257 3257 case MAC_PROP_MTU:
3258 3258 val = vrp->param.mtu;
3259 3259 break;
3260 3260
3261 3261 case MAC_PROP_SPEED:
3262 3262 if (vrp->chip.link.speed ==
3263 3263 VR_LINK_SPEED_100MBS)
3264 3264 val = 100 * 1000 * 1000;
3265 3265 else if (vrp->chip.link.speed ==
3266 3266 VR_LINK_SPEED_10MBS)
3267 3267 val = 10 * 1000 * 1000;
3268 3268 else
3269 3269 val = 0;
3270 3270 break;
3271 3271
3272 3272 case MAC_PROP_STATUS:
3273 3273 val = vrp->chip.link.state;
3274 3274 break;
3275 3275
3276 3276 default:
3277 3277 err = ENOTSUP;
3278 3278 break;
3279 3279 }
3280 3280
3281 3281 if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3282 3282 if (pr_valsize == sizeof (uint64_t))
3283 3283 *(uint64_t *)pr_val = val;
3284 3284 else if (pr_valsize == sizeof (uint32_t))
3285 3285 *(uint32_t *)pr_val = val;
3286 3286 else if (pr_valsize == sizeof (uint16_t))
3287 3287 *(uint16_t *)pr_val = val;
3288 3288 else if (pr_valsize == sizeof (uint8_t))
3289 3289 *(uint8_t *)pr_val = val;
3290 3290 else
3291 3291 err = EINVAL;
3292 3292 }
3293 3293 return (err);
3294 3294 }
3295 3295
3296 3296 void
3297 3297 vr_mac_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3298 3298 mac_prop_info_handle_t prh)
3299 3299 {
3300 3300 vr_t *vrp = (vr_t *)arg;
3301 3301 uint8_t val, perm;
3302 3302
3303 3303 /* Since we have no private properties */
3304 3304 _NOTE(ARGUNUSED(pr_name))
3305 3305
3306 3306 switch (pr_num) {
3307 3307 case MAC_PROP_ADV_1000FDX_CAP:
3308 3308 case MAC_PROP_ADV_1000HDX_CAP:
3309 3309 case MAC_PROP_EN_1000FDX_CAP:
3310 3310 case MAC_PROP_EN_1000HDX_CAP:
3311 3311 case MAC_PROP_ADV_100FDX_CAP:
3312 3312 case MAC_PROP_ADV_100HDX_CAP:
3313 3313 case MAC_PROP_ADV_100T4_CAP:
3314 3314 case MAC_PROP_ADV_10FDX_CAP:
3315 3315 case MAC_PROP_ADV_10HDX_CAP:
3316 3316 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3317 3317 return;
3318 3318
3319 3319 case MAC_PROP_EN_100FDX_CAP:
3320 3320 val = (vrp->chip.mii.status &
3321 3321 MII_STATUS_100_BASEX_FD) != 0;
3322 3322 break;
3323 3323
3324 3324 case MAC_PROP_EN_100HDX_CAP:
3325 3325 val = (vrp->chip.mii.status &
3326 3326 MII_STATUS_100_BASEX) != 0;
3327 3327 break;
3328 3328
3329 3329 case MAC_PROP_EN_100T4_CAP:
3330 3330 val = (vrp->chip.mii.status &
3331 3331 MII_STATUS_100_BASE_T4) != 0;
3332 3332 break;
3333 3333
3334 3334 case MAC_PROP_EN_10FDX_CAP:
3335 3335 val = (vrp->chip.mii.status &
3336 3336 MII_STATUS_10_FD) != 0;
3337 3337 break;
3338 3338
3339 3339 case MAC_PROP_EN_10HDX_CAP:
3340 3340 val = (vrp->chip.mii.status &
3341 3341 MII_STATUS_10) != 0;
3342 3342 break;
3343 3343
3344 3344 case MAC_PROP_AUTONEG:
3345 3345 case MAC_PROP_EN_AUTONEG:
3346 3346 val = (vrp->chip.mii.status &
3347 3347 MII_STATUS_CANAUTONEG) != 0;
3348 3348 break;
3349 3349
3350 3350 case MAC_PROP_FLOWCTRL:
3351 3351 mac_prop_info_set_default_link_flowctrl(prh,
3352 3352 LINK_FLOWCTRL_BI);
3353 3353 return;
3354 3354
3355 3355 case MAC_PROP_MTU:
3356 3356 mac_prop_info_set_range_uint32(prh,
3357 3357 ETHERMTU, ETHERMTU);
3358 3358 return;
3359 3359
3360 3360 case MAC_PROP_DUPLEX:
3361 3361 /*
3362 3362 * Writability depends on autoneg.
3363 3363 */
3364 3364 perm = ((vrp->chip.mii.control &
3365 3365 MII_CONTROL_ANE) == 0) ? MAC_PROP_PERM_RW :
3366 3366 MAC_PROP_PERM_READ;
3367 3367 mac_prop_info_set_perm(prh, perm);
3368 3368
3369 3369 if (perm == MAC_PROP_PERM_RW) {
3370 3370 mac_prop_info_set_default_uint8(prh,
3371 3371 VR_LINK_DUPLEX_FULL);
3372 3372 }
3373 3373 return;
3374 3374
3375 3375 case MAC_PROP_SPEED:
3376 3376 perm = ((vrp->chip.mii.control &
3377 3377 MII_CONTROL_ANE) == 0) ?
3378 3378 MAC_PROP_PERM_RW : MAC_PROP_PERM_READ;
3379 3379 mac_prop_info_set_perm(prh, perm);
3380 3380
3381 3381 if (perm == MAC_PROP_PERM_RW) {
3382 3382 mac_prop_info_set_default_uint64(prh,
3383 3383 100 * 1000 * 1000);
3384 3384 }
3385 3385 return;
3386 3386
3387 3387 case MAC_PROP_STATUS:
3388 3388 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3389 3389 return;
3390 3390
3391 3391 default:
3392 3392 return;
3393 3393 }
3394 3394
3395 3395 mac_prop_info_set_default_uint8(prh, val);
3396 3396 }
3397 3397
3398 3398 /*
3399 3399 * Set a property of the device.
3400 3400 */
3401 3401 int
3402 3402 vr_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3403 3403 uint_t pr_valsize, const void *pr_val)
3404 3404 {
3405 3405 vr_t *vrp;
3406 3406 uint32_t err;
3407 3407 uint64_t val;
3408 3408
3409 3409 /* Since we have no private properties */
3410 3410 _NOTE(ARGUNUSED(pr_name))
3411 3411
3412 3412 err = 0;
3413 3413 vrp = (vr_t *)arg;
3414 3414 mutex_enter(&vrp->oplock);
3415 3415
3416 3416 /*
3417 3417 * The current set of public property values are passed as integers
3418 3418 * Private properties are passed as strings in pr_val length pr_valsize.
3419 3419 */
3420 3420 if (pr_num != MAC_PROP_PRIVATE) {
3421 3421 if (pr_valsize == sizeof (uint64_t))
3422 3422 val = *(uint64_t *)pr_val;
3423 3423 else if (pr_valsize == sizeof (uint32_t))
3424 3424 val = *(uint32_t *)pr_val;
3425 3425 else if (pr_valsize == sizeof (uint16_t))
3426 3426 val = *(uint32_t *)pr_val;
3427 3427 else if (pr_valsize == sizeof (uint8_t))
3428 3428 val = *(uint8_t *)pr_val;
3429 3429 else {
3430 3430 mutex_exit(&vrp->oplock);
3431 3431 return (EINVAL);
3432 3432 }
3433 3433 }
3434 3434
3435 3435 switch (pr_num) {
3436 3436 case MAC_PROP_DUPLEX:
3437 3437 if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
3438 3438 if (val == LINK_DUPLEX_FULL)
3439 3439 vrp->chip.mii.control |=
3440 3440 MII_CONTROL_FDUPLEX;
3441 3441 else if (val == LINK_DUPLEX_HALF)
3442 3442 vrp->chip.mii.control &=
3443 3443 ~MII_CONTROL_FDUPLEX;
3444 3444 else
3445 3445 err = EINVAL;
3446 3446 } else
3447 3447 err = EINVAL;
3448 3448 break;
3449 3449
3450 3450 case MAC_PROP_EN_100FDX_CAP:
3451 3451 if (val == 0)
3452 3452 vrp->param.anadv_en &=
3453 3453 ~MII_ABILITY_100BASE_TX_FD;
3454 3454 else
3455 3455 vrp->param.anadv_en |=
3456 3456 MII_ABILITY_100BASE_TX_FD;
3457 3457 break;
3458 3458
3459 3459 case MAC_PROP_EN_100HDX_CAP:
3460 3460 if (val == 0)
3461 3461 vrp->param.anadv_en &=
3462 3462 ~MII_ABILITY_100BASE_TX;
3463 3463 else
3464 3464 vrp->param.anadv_en |=
3465 3465 MII_ABILITY_100BASE_TX;
3466 3466 break;
3467 3467
3468 3468 case MAC_PROP_EN_100T4_CAP:
3469 3469 if (val == 0)
3470 3470 vrp->param.anadv_en &=
3471 3471 ~MII_ABILITY_100BASE_T4;
3472 3472 else
3473 3473 vrp->param.anadv_en |=
3474 3474 MII_ABILITY_100BASE_T4;
3475 3475 break;
3476 3476
3477 3477 case MAC_PROP_EN_10FDX_CAP:
3478 3478 if (val == 0)
3479 3479 vrp->param.anadv_en &=
3480 3480 ~MII_ABILITY_10BASE_T_FD;
3481 3481 else
3482 3482 vrp->param.anadv_en |=
3483 3483 MII_ABILITY_10BASE_T_FD;
3484 3484 break;
3485 3485
3486 3486 case MAC_PROP_EN_10HDX_CAP:
3487 3487 if (val == 0)
3488 3488 vrp->param.anadv_en &=
3489 3489 ~MII_ABILITY_10BASE_T;
3490 3490 else
3491 3491 vrp->param.anadv_en |=
3492 3492 MII_ABILITY_10BASE_T;
3493 3493 break;
3494 3494
3495 3495 case MAC_PROP_AUTONEG:
3496 3496 case MAC_PROP_EN_AUTONEG:
3497 3497 if (val == 0) {
3498 3498 vrp->param.an_en = VR_LINK_AUTONEG_OFF;
3499 3499 vrp->chip.mii.control &= ~MII_CONTROL_ANE;
3500 3500 } else {
3501 3501 vrp->param.an_en = VR_LINK_AUTONEG_ON;
3502 3502 if ((vrp->chip.mii.status &
3503 3503 MII_STATUS_CANAUTONEG) != 0)
3504 3504 vrp->chip.mii.control |=
3505 3505 MII_CONTROL_ANE;
3506 3506 else
3507 3507 err = EINVAL;
3508 3508 }
3509 3509 break;
3510 3510
3511 3511 case MAC_PROP_FLOWCTRL:
3512 3512 if (val == LINK_FLOWCTRL_NONE)
3513 3513 vrp->param.anadv_en &= ~MII_ABILITY_PAUSE;
3514 3514 else if (val == LINK_FLOWCTRL_BI)
3515 3515 vrp->param.anadv_en |= MII_ABILITY_PAUSE;
3516 3516 else
3517 3517 err = EINVAL;
3518 3518 break;
3519 3519
3520 3520 case MAC_PROP_MTU:
3521 3521 if (val >= ETHERMIN && val <= ETHERMTU)
3522 3522 vrp->param.mtu = (uint32_t)val;
3523 3523 else
3524 3524 err = EINVAL;
3525 3525 break;
3526 3526
3527 3527 case MAC_PROP_SPEED:
3528 3528 if (val == 10 * 1000 * 1000)
3529 3529 vrp->chip.link.speed =
3530 3530 VR_LINK_SPEED_10MBS;
3531 3531 else if (val == 100 * 1000 * 1000)
3532 3532 vrp->chip.link.speed =
3533 3533 VR_LINK_SPEED_100MBS;
3534 3534 else
3535 3535 err = EINVAL;
3536 3536 break;
3537 3537
3538 3538 default:
3539 3539 err = ENOTSUP;
3540 3540 break;
3541 3541 }
3542 3542 if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3543 3543 vrp->chip.mii.anadv = vrp->param.anadv_en &
3544 3544 (vrp->param.an_phymask & vrp->param.an_macmask);
3545 3545 vr_link_init(vrp);
3546 3546 }
3547 3547 mutex_exit(&vrp->oplock);
3548 3548 return (err);
3549 3549 }
3550 3550
3551 3551
3552 3552 /*
3553 3553 * Logging and debug functions.
3554 3554 */
3555 3555 static struct {
3556 3556 kmutex_t mutex[1];
3557 3557 const char *ifname;
3558 3558 const char *fmt;
3559 3559 int level;
3560 3560 } prtdata;
3561 3561
3562 3562 static void
3563 3563 vr_vprt(const char *fmt, va_list args)
3564 3564 {
3565 3565 char buf[512];
3566 3566
3567 3567 ASSERT(mutex_owned(prtdata.mutex));
3568 3568 (void) vsnprintf(buf, sizeof (buf), fmt, args);
3569 3569 cmn_err(prtdata.level, prtdata.fmt, prtdata.ifname, buf);
3570 3570 }
3571 3571
3572 3572 static void
3573 3573 vr_log(vr_t *vrp, int level, const char *fmt, ...)
3574 3574 {
3575 3575 va_list args;
3576 3576
3577 3577 mutex_enter(prtdata.mutex);
3578 3578 prtdata.ifname = vrp->ifname;
3579 3579 prtdata.fmt = "!%s: %s";
3580 3580 prtdata.level = level;
3581 3581
3582 3582 va_start(args, fmt);
3583 3583 vr_vprt(fmt, args);
3584 3584 va_end(args);
3585 3585
3586 3586 mutex_exit(prtdata.mutex);
3587 3587 }
3588 3588
3589 3589 #if defined(DEBUG)
3590 3590 static void
3591 3591 vr_prt(const char *fmt, ...)
3592 3592 {
3593 3593 va_list args;
3594 3594
3595 3595 ASSERT(mutex_owned(prtdata.mutex));
3596 3596
3597 3597 va_start(args, fmt);
3598 3598 vr_vprt(fmt, args);
3599 3599 va_end(args);
3600 3600
3601 3601 mutex_exit(prtdata.mutex);
3602 3602 }
3603 3603
3604 3604 void
3605 3605 (*vr_debug())(const char *fmt, ...)
3606 3606 {
3607 3607 mutex_enter(prtdata.mutex);
3608 3608 prtdata.ifname = MODULENAME;
3609 3609 prtdata.fmt = "^%s: %s\n";
3610 3610 prtdata.level = CE_CONT;
3611 3611
3612 3612 return (vr_prt);
3613 3613 }
3614 3614 #endif /* DEBUG */
3615 3615
↓ open down ↓ |
3615 lines elided |
↑ open up ↑ |
3616 3616 DDI_DEFINE_STREAM_OPS(vr_dev_ops, nulldev, nulldev, vr_attach, vr_detach,
3617 3617 nodev, NULL, D_MP, NULL, vr_quiesce);
3618 3618
3619 3619 static struct modldrv vr_modldrv = {
3620 3620 &mod_driverops, /* Type of module. This one is a driver */
3621 3621 vr_ident, /* short description */
3622 3622 &vr_dev_ops /* driver specific ops */
3623 3623 };
3624 3624
3625 3625 static struct modlinkage modlinkage = {
3626 - MODREV_1, (void *)&vr_modldrv, NULL
3626 + MODREV_1, { (void *)&vr_modldrv, NULL }
3627 3627 };
3628 3628
3629 3629 int
3630 3630 _info(struct modinfo *modinfop)
3631 3631 {
3632 3632 return (mod_info(&modlinkage, modinfop));
3633 3633 }
3634 3634
3635 3635 int
3636 3636 _init(void)
3637 3637 {
3638 3638 int status;
3639 3639
3640 3640 mac_init_ops(&vr_dev_ops, MODULENAME);
3641 3641 status = mod_install(&modlinkage);
3642 3642 if (status == DDI_SUCCESS)
3643 3643 mutex_init(prtdata.mutex, NULL, MUTEX_DRIVER, NULL);
3644 3644 else
3645 3645 mac_fini_ops(&vr_dev_ops);
3646 3646 return (status);
3647 3647 }
3648 3648
3649 3649 int
3650 3650 _fini(void)
3651 3651 {
3652 3652 int status;
3653 3653
3654 3654 status = mod_remove(&modlinkage);
3655 3655 if (status == 0) {
3656 3656 mac_fini_ops(&vr_dev_ops);
3657 3657 mutex_destroy(prtdata.mutex);
3658 3658 }
3659 3659 return (status);
3660 3660 }
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX