Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/sfe/sfe.c
+++ new/usr/src/uts/common/io/sfe/sfe.c
1 1 /*
2 2 * sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris
3 3 *
4 4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
5 5 *
6 6 * Redistribution and use in source and binary forms, with or without
7 7 * modification, are permitted provided that the following conditions are met:
8 8 *
9 9 * 1. Redistributions of source code must retain the above copyright notice,
10 10 * this list of conditions and the following disclaimer.
11 11 *
12 12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 13 * this list of conditions and the following disclaimer in the documentation
14 14 * and/or other materials provided with the distribution.
15 15 *
16 16 * 3. Neither the name of the author nor the names of its contributors may be
17 17 * used to endorse or promote products derived from this software without
18 18 * specific prior written permission.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 31 * DAMAGE.
32 32 */
33 33
34 34 /* Avoid undefined symbol for non IA architectures */
35 35 #pragma weak inb
36 36 #pragma weak outb
37 37
38 38 /*
39 39 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
40 40 * Use is subject to license terms.
41 41 */
42 42
43 43 /*
44 44 * System Header files.
45 45 */
46 46 #include <sys/types.h>
47 47 #include <sys/conf.h>
48 48 #include <sys/debug.h>
49 49 #include <sys/kmem.h>
50 50 #include <sys/modctl.h>
51 51 #include <sys/errno.h>
52 52 #include <sys/ddi.h>
53 53 #include <sys/sunddi.h>
54 54 #include <sys/byteorder.h>
55 55 #include <sys/ethernet.h>
56 56 #include <sys/pci.h>
57 57
58 58 #include "sfe_mii.h"
59 59 #include "sfe_util.h"
60 60 #include "sfereg.h"
61 61
62 62 char ident[] = "sis900/dp83815 driver v" "2.6.1t30os";
63 63
64 64 /* Debugging support */
65 65 #ifdef DEBUG_LEVEL
66 66 static int sfe_debug = DEBUG_LEVEL;
67 67 #if DEBUG_LEVEL > 4
68 68 #define CONS "^"
69 69 #else
70 70 #define CONS "!"
71 71 #endif
72 72 #define DPRINTF(n, args) if (sfe_debug > (n)) cmn_err args
73 73 #else
74 74 #define CONS "!"
75 75 #define DPRINTF(n, args)
76 76 #endif
77 77
78 78 /*
79 79 * Useful macros and typedefs
80 80 */
81 81 #define ONESEC (drv_usectohz(1*1000000))
82 82 #define ROUNDUP2(x, a) (((x) + (a) - 1) & ~((a) - 1))
83 83
84 84 /*
85 85 * Our configuration
86 86 */
87 87 #define MAXTXFRAGS 1
88 88 #define MAXRXFRAGS 1
89 89
90 90 #ifndef TX_BUF_SIZE
91 91 #define TX_BUF_SIZE 64
92 92 #endif
93 93 #ifndef TX_RING_SIZE
94 94 #if MAXTXFRAGS == 1
95 95 #define TX_RING_SIZE TX_BUF_SIZE
96 96 #else
97 97 #define TX_RING_SIZE (TX_BUF_SIZE * 4)
98 98 #endif
99 99 #endif
100 100
101 101 #ifndef RX_BUF_SIZE
102 102 #define RX_BUF_SIZE 256
103 103 #endif
104 104 #ifndef RX_RING_SIZE
105 105 #define RX_RING_SIZE RX_BUF_SIZE
106 106 #endif
107 107
108 108 #define OUR_INTR_BITS \
109 109 (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR | \
110 110 ISR_TXURN | ISR_TXDESC | ISR_TXERR | \
111 111 ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
112 112
113 113 #define USE_MULTICAST_HASHTBL
114 114
115 115 static int sfe_tx_copy_thresh = 256;
116 116 static int sfe_rx_copy_thresh = 256;
117 117
118 118 /* special PHY registers for SIS900 */
119 119 #define MII_CONFIG1 0x0010
120 120 #define MII_CONFIG2 0x0011
121 121 #define MII_MASK 0x0013
122 122 #define MII_RESV 0x0014
123 123
124 124 #define PHY_MASK 0xfffffff0
125 125 #define PHY_SIS900_INTERNAL 0x001d8000
126 126 #define PHY_ICS1893 0x0015f440
127 127
128 128
129 129 #define SFE_DESC_SIZE 16 /* including pads rounding up to power of 2 */
130 130
131 131 /*
132 132 * Supported chips
133 133 */
134 134 struct chip_info {
135 135 uint16_t venid;
136 136 uint16_t devid;
137 137 char *chip_name;
138 138 int chip_type;
139 139 #define CHIPTYPE_DP83815 0
140 140 #define CHIPTYPE_SIS900 1
141 141 };
142 142
143 143 /*
144 144 * Chip dependent MAC state
145 145 */
146 146 struct sfe_dev {
147 147 /* misc HW information */
148 148 struct chip_info *chip;
149 149 uint32_t our_intr_bits;
150 150 uint32_t isr_pended;
151 151 uint32_t cr;
152 152 uint_t tx_drain_threshold;
153 153 uint_t tx_fill_threshold;
154 154 uint_t rx_drain_threshold;
155 155 uint_t rx_fill_threshold;
156 156 uint8_t revid; /* revision from PCI configuration */
157 157 boolean_t (*get_mac_addr)(struct gem_dev *);
158 158 uint8_t mac_addr[ETHERADDRL];
159 159 uint8_t bridge_revid;
160 160 };
161 161
162 162 /*
163 163 * Hardware information
164 164 */
165 165 struct chip_info sfe_chiptbl[] = {
166 166 { 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, },
167 167 { 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, },
168 168 { 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, },
169 169 };
170 170 #define CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
171 171
172 172 /* ======================================================== */
173 173
174 174 /* mii operations */
175 175 static void sfe_mii_sync_dp83815(struct gem_dev *);
176 176 static void sfe_mii_sync_sis900(struct gem_dev *);
177 177 static uint16_t sfe_mii_read_dp83815(struct gem_dev *, uint_t);
178 178 static uint16_t sfe_mii_read_sis900(struct gem_dev *, uint_t);
179 179 static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t);
180 180 static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t);
181 181 static void sfe_set_eq_sis630(struct gem_dev *dp);
182 182 /* nic operations */
183 183 static int sfe_reset_chip_sis900(struct gem_dev *);
184 184 static int sfe_reset_chip_dp83815(struct gem_dev *);
185 185 static int sfe_init_chip(struct gem_dev *);
186 186 static int sfe_start_chip(struct gem_dev *);
187 187 static int sfe_stop_chip(struct gem_dev *);
188 188 static int sfe_set_media(struct gem_dev *);
189 189 static int sfe_set_rx_filter_dp83815(struct gem_dev *);
190 190 static int sfe_set_rx_filter_sis900(struct gem_dev *);
191 191 static int sfe_get_stats(struct gem_dev *);
192 192 static int sfe_attach_chip(struct gem_dev *);
193 193
194 194 /* descriptor operations */
195 195 static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
196 196 ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags);
197 197 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
198 198 static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
199 199 ddi_dma_cookie_t *dmacookie, int frags);
200 200 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
201 201 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
202 202
203 203 static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
204 204 static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
205 205 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
206 206 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
207 207
208 208 /* interrupt handler */
209 209 static uint_t sfe_interrupt(struct gem_dev *dp);
210 210
211 211 /* ======================================================== */
212 212
213 213 /* mapping attributes */
214 214 /* Data access requirements. */
215 215 static struct ddi_device_acc_attr sfe_dev_attr = {
216 216 DDI_DEVICE_ATTR_V0,
217 217 DDI_STRUCTURE_LE_ACC,
218 218 DDI_STRICTORDER_ACC
219 219 };
220 220
221 221 /* On sparc, Buffers should be native endian for speed */
222 222 static struct ddi_device_acc_attr sfe_buf_attr = {
223 223 DDI_DEVICE_ATTR_V0,
224 224 DDI_NEVERSWAP_ACC, /* native endianness */
225 225 DDI_STRICTORDER_ACC
226 226 };
227 227
228 228 static ddi_dma_attr_t sfe_dma_attr_buf = {
229 229 DMA_ATTR_V0, /* dma_attr_version */
230 230 0, /* dma_attr_addr_lo */
231 231 0xffffffffull, /* dma_attr_addr_hi */
232 232 0x00000fffull, /* dma_attr_count_max */
233 233 0, /* patched later */ /* dma_attr_align */
234 234 0x000003fc, /* dma_attr_burstsizes */
235 235 1, /* dma_attr_minxfer */
236 236 0x00000fffull, /* dma_attr_maxxfer */
237 237 0xffffffffull, /* dma_attr_seg */
238 238 0, /* patched later */ /* dma_attr_sgllen */
239 239 1, /* dma_attr_granular */
240 240 0 /* dma_attr_flags */
241 241 };
242 242
243 243 static ddi_dma_attr_t sfe_dma_attr_desc = {
244 244 DMA_ATTR_V0, /* dma_attr_version */
245 245 16, /* dma_attr_addr_lo */
246 246 0xffffffffull, /* dma_attr_addr_hi */
247 247 0xffffffffull, /* dma_attr_count_max */
248 248 16, /* dma_attr_align */
249 249 0x000003fc, /* dma_attr_burstsizes */
250 250 1, /* dma_attr_minxfer */
251 251 0xffffffffull, /* dma_attr_maxxfer */
252 252 0xffffffffull, /* dma_attr_seg */
253 253 1, /* dma_attr_sgllen */
254 254 1, /* dma_attr_granular */
255 255 0 /* dma_attr_flags */
256 256 };
257 257
258 258 uint32_t sfe_use_pcimemspace = 0;
259 259
260 260 /* ======================================================== */
261 261 /*
262 262 * HW manipulation routines
263 263 */
264 264 /* ======================================================== */
265 265
266 266 #define SFE_EEPROM_DELAY(dp) \
267 267 { (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
268 268 #define EE_CMD_READ 6
269 269 #define EE_CMD_SHIFT 6
270 270
271 271 static uint16_t
272 272 sfe_read_eeprom(struct gem_dev *dp, uint_t offset)
273 273 {
274 274 int eedi;
275 275 int i;
276 276 uint16_t ret;
277 277
278 278 /* ensure de-assert chip select */
279 279 OUTL(dp, EROMAR, 0);
280 280 SFE_EEPROM_DELAY(dp);
281 281 OUTL(dp, EROMAR, EROMAR_EESK);
282 282 SFE_EEPROM_DELAY(dp);
283 283
284 284 /* assert chip select */
285 285 offset |= EE_CMD_READ << EE_CMD_SHIFT;
286 286
287 287 for (i = 8; i >= 0; i--) {
288 288 /* make command */
289 289 eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT;
290 290
291 291 /* send 1 bit */
292 292 OUTL(dp, EROMAR, EROMAR_EECS | eedi);
293 293 SFE_EEPROM_DELAY(dp);
294 294 OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK);
295 295 SFE_EEPROM_DELAY(dp);
296 296 }
297 297
298 298 OUTL(dp, EROMAR, EROMAR_EECS);
299 299
300 300 ret = 0;
301 301 for (i = 0; i < 16; i++) {
302 302 /* Get 1 bit */
303 303 OUTL(dp, EROMAR, EROMAR_EECS);
304 304 SFE_EEPROM_DELAY(dp);
305 305 OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK);
306 306 SFE_EEPROM_DELAY(dp);
307 307
308 308 ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1);
309 309 }
310 310
311 311 OUTL(dp, EROMAR, 0);
312 312 SFE_EEPROM_DELAY(dp);
313 313
314 314 return (ret);
315 315 }
316 316 #undef SFE_EEPROM_DELAY
317 317
318 318 static boolean_t
319 319 sfe_get_mac_addr_dp83815(struct gem_dev *dp)
320 320 {
321 321 uint8_t *mac;
322 322 uint_t val;
323 323 int i;
324 324
325 325 #define BITSET(p, ix, v) (p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
326 326
327 327 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
328 328
329 329 mac = dp->dev_addr.ether_addr_octet;
330 330
331 331 /* first of all, clear MAC address buffer */
332 332 bzero(mac, ETHERADDRL);
333 333
334 334 /* get bit 0 */
335 335 val = sfe_read_eeprom(dp, 0x6);
336 336 BITSET(mac, 0, val & 1);
337 337
338 338 /* get bit 1 - 16 */
339 339 val = sfe_read_eeprom(dp, 0x7);
340 340 for (i = 0; i < 16; i++) {
341 341 BITSET(mac, 1 + i, val & (1 << (15 - i)));
342 342 }
343 343
344 344 /* get bit 17 - 32 */
345 345 val = sfe_read_eeprom(dp, 0x8);
346 346 for (i = 0; i < 16; i++) {
347 347 BITSET(mac, 17 + i, val & (1 << (15 - i)));
348 348 }
349 349
350 350 /* get bit 33 - 47 */
351 351 val = sfe_read_eeprom(dp, 0x9);
352 352 for (i = 0; i < 15; i++) {
353 353 BITSET(mac, 33 + i, val & (1 << (15 - i)));
354 354 }
355 355
356 356 return (B_TRUE);
357 357 #undef BITSET
358 358 }
359 359
360 360 static boolean_t
361 361 sfe_get_mac_addr_sis900(struct gem_dev *dp)
362 362 {
363 363 uint_t val;
364 364 int i;
365 365 uint8_t *mac;
366 366
367 367 mac = dp->dev_addr.ether_addr_octet;
368 368
369 369 for (i = 0; i < ETHERADDRL/2; i++) {
370 370 val = sfe_read_eeprom(dp, 0x8 + i);
371 371 *mac++ = (uint8_t)val;
372 372 *mac++ = (uint8_t)(val >> 8);
373 373 }
374 374
375 375 return (B_TRUE);
376 376 }
377 377
378 378 static dev_info_t *
379 379 sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id)
380 380 {
381 381 dev_info_t *child_id;
382 382 dev_info_t *ret;
383 383 int vid, did;
384 384
385 385 if (cur_node == NULL) {
386 386 return (NULL);
387 387 }
388 388
389 389 /* check brothers */
390 390 do {
391 391 vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
392 392 DDI_PROP_DONTPASS, "vendor-id", -1);
393 393 did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
394 394 DDI_PROP_DONTPASS, "device-id", -1);
395 395
396 396 if (vid == vendor_id && did == device_id) {
397 397 /* found */
398 398 return (cur_node);
399 399 }
400 400
401 401 /* check children */
402 402 if ((child_id = ddi_get_child(cur_node)) != NULL) {
403 403 if ((ret = sfe_search_pci_dev_subr(child_id,
404 404 vendor_id, device_id)) != NULL) {
405 405 return (ret);
406 406 }
407 407 }
408 408
409 409 } while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL);
410 410
411 411 /* not found */
412 412 return (NULL);
413 413 }
414 414
415 415 static dev_info_t *
416 416 sfe_search_pci_dev(int vendor_id, int device_id)
417 417 {
418 418 return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id));
419 419 }
420 420
421 421 static boolean_t
422 422 sfe_get_mac_addr_sis630e(struct gem_dev *dp)
423 423 {
424 424 int i;
425 425 dev_info_t *isa_bridge;
426 426 ddi_acc_handle_t isa_handle;
427 427 int reg;
428 428
429 429 if (inb == NULL || outb == NULL) {
430 430 /* this is not IA architecture */
431 431 return (B_FALSE);
432 432 }
433 433
434 434 if ((isa_bridge = sfe_search_pci_dev(0x1039, 0x8)) == NULL) {
435 435 cmn_err(CE_WARN, "%s: failed to find isa-bridge pci1039,8",
436 436 dp->name);
437 437 return (B_FALSE);
438 438 }
439 439
440 440 if (pci_config_setup(isa_bridge, &isa_handle) != DDI_SUCCESS) {
441 441 cmn_err(CE_WARN, "%s: ddi_regs_map_setup failed",
442 442 dp->name);
443 443 return (B_FALSE);
444 444 }
445 445
446 446 /* enable to access CMOS RAM */
447 447 reg = pci_config_get8(isa_handle, 0x48);
448 448 pci_config_put8(isa_handle, 0x48, reg | 0x40);
449 449
450 450 for (i = 0; i < ETHERADDRL; i++) {
451 451 outb(0x70, 0x09 + i);
452 452 dp->dev_addr.ether_addr_octet[i] = inb(0x71);
453 453 }
454 454
455 455 /* disable to access CMOS RAM */
456 456 pci_config_put8(isa_handle, 0x48, reg);
457 457 pci_config_teardown(&isa_handle);
458 458
459 459 return (B_TRUE);
460 460 }
461 461
462 462 static boolean_t
463 463 sfe_get_mac_addr_sis635(struct gem_dev *dp)
464 464 {
465 465 int i;
466 466 uint32_t rfcr;
467 467 uint16_t v;
468 468 struct sfe_dev *lp = dp->private;
469 469
470 470 DPRINTF(2, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
471 471 rfcr = INL(dp, RFCR);
472 472
473 473 OUTL(dp, CR, lp->cr | CR_RELOAD);
474 474 OUTL(dp, CR, lp->cr);
475 475
476 476 /* disable packet filtering before reading filter */
477 477 OUTL(dp, RFCR, rfcr & ~RFCR_RFEN);
478 478
479 479 /* load MAC addr from filter data register */
480 480 for (i = 0; i < ETHERADDRL; i += 2) {
481 481 OUTL(dp, RFCR,
482 482 (RFADDR_MAC_SIS900 + (i/2)) << RFCR_RFADDR_SHIFT_SIS900);
483 483 v = INL(dp, RFDR);
484 484 dp->dev_addr.ether_addr_octet[i] = (uint8_t)v;
485 485 dp->dev_addr.ether_addr_octet[i+1] = (uint8_t)(v >> 8);
486 486 }
487 487
488 488 /* re-enable packet filtering */
489 489 OUTL(dp, RFCR, rfcr | RFCR_RFEN);
490 490
491 491 return (B_TRUE);
492 492 }
493 493
494 494 static boolean_t
495 495 sfe_get_mac_addr_sis962(struct gem_dev *dp)
496 496 {
497 497 boolean_t ret;
498 498 int i;
499 499
500 500 ret = B_FALSE;
501 501
502 502 /* rise request signal to access EEPROM */
503 503 OUTL(dp, MEAR, EROMAR_EEREQ);
504 504 for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) {
505 505 if (i > 200) {
506 506 /* failed to acquire eeprom */
507 507 cmn_err(CE_NOTE,
508 508 CONS "%s: failed to access eeprom", dp->name);
509 509 goto x;
510 510 }
511 511 drv_usecwait(10);
512 512 }
513 513 ret = sfe_get_mac_addr_sis900(dp);
514 514 x:
515 515 /* release EEPROM */
516 516 OUTL(dp, MEAR, EROMAR_EEDONE);
517 517
518 518 return (ret);
519 519 }
520 520
521 521 static int
522 522 sfe_reset_chip_sis900(struct gem_dev *dp)
523 523 {
524 524 int i;
525 525 uint32_t done;
526 526 uint32_t val;
527 527 struct sfe_dev *lp = dp->private;
528 528
529 529 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
530 530
531 531 /* invalidate mac addr cache */
532 532 bzero(lp->mac_addr, sizeof (lp->mac_addr));
533 533
534 534 lp->cr = 0;
535 535
536 536 /* inhibit interrupt */
537 537 OUTL(dp, IMR, 0);
538 538 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
539 539
540 540 OUTLINL(dp, RFCR, 0);
541 541
542 542 OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR);
543 543 drv_usecwait(10);
544 544
545 545 done = 0;
546 546 for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) {
547 547 if (i > 1000) {
548 548 cmn_err(CE_WARN, "%s: chip reset timeout", dp->name);
549 549 return (GEM_FAILURE);
550 550 }
551 551 done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP);
552 552 drv_usecwait(10);
553 553 }
554 554
555 555 if (lp->revid == SIS630ET_900_REV) {
556 556 lp->cr |= CR_ACCESSMODE;
557 557 OUTL(dp, CR, lp->cr | INL(dp, CR));
558 558 }
559 559
560 560 /* Configuration register: enable PCI parity */
561 561 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
562 562 dp->name, INL(dp, CFG), CFG_BITS_SIS900));
563 563 val = 0;
564 564 if (lp->revid >= SIS635A_900_REV ||
565 565 lp->revid == SIS900B_900_REV) {
566 566 /* what is this ? */
567 567 val |= CFG_RND_CNT;
568 568 }
569 569 OUTL(dp, CFG, val);
570 570 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
571 571 INL(dp, CFG), CFG_BITS_SIS900));
572 572
573 573 return (GEM_SUCCESS);
574 574 }
575 575
576 576 static int
577 577 sfe_reset_chip_dp83815(struct gem_dev *dp)
578 578 {
579 579 int i;
580 580 uint32_t val;
581 581 struct sfe_dev *lp = dp->private;
582 582
583 583 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
584 584
585 585 /* invalidate mac addr cache */
586 586 bzero(lp->mac_addr, sizeof (lp->mac_addr));
587 587
588 588 lp->cr = 0;
589 589
590 590 /* inhibit interrupts */
591 591 OUTL(dp, IMR, 0);
592 592 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
593 593
594 594 OUTL(dp, RFCR, 0);
595 595
596 596 OUTL(dp, CR, CR_RST);
597 597 drv_usecwait(10);
598 598
599 599 for (i = 0; INL(dp, CR) & CR_RST; i++) {
600 600 if (i > 100) {
601 601 cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name);
602 602 return (GEM_FAILURE);
603 603 }
604 604 drv_usecwait(10);
605 605 }
606 606 DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10));
607 607
608 608 OUTL(dp, CCSR, CCSR_PMESTS);
609 609 OUTL(dp, CCSR, 0);
610 610
611 611 /* Configuration register: enable PCI parity */
612 612 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
613 613 dp->name, INL(dp, CFG), CFG_BITS_DP83815));
614 614 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
615 615 OUTL(dp, CFG, val | CFG_PAUSE_ADV);
616 616 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
617 617 INL(dp, CFG), CFG_BITS_DP83815));
618 618
619 619 return (GEM_SUCCESS);
620 620 }
621 621
622 622 static int
623 623 sfe_init_chip(struct gem_dev *dp)
624 624 {
625 625 /* Configuration register: have been set up in sfe_chip_reset */
626 626
627 627 /* PCI test control register: do nothing */
628 628
629 629 /* Interrupt status register : do nothing */
630 630
631 631 /* Interrupt mask register: clear, but leave lp->our_intr_bits */
632 632 OUTL(dp, IMR, 0);
633 633
634 634 /* Enhanced PHY Access register (sis900): do nothing */
635 635
636 636 /* Transmit Descriptor Pointer register: base addr of TX ring */
637 637 OUTL(dp, TXDP, dp->tx_ring_dma);
638 638
639 639 /* Receive descriptor pointer register: base addr of RX ring */
640 640 OUTL(dp, RXDP, dp->rx_ring_dma);
641 641
642 642 return (GEM_SUCCESS);
643 643 }
644 644
645 645 static uint_t
646 646 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr)
647 647 {
648 648 return (gem_ether_crc_be(addr, ETHERADDRL));
649 649 }
650 650
651 651 #ifdef DEBUG_LEVEL
652 652 static void
653 653 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end)
654 654 {
655 655 int i;
656 656 int j;
657 657 uint16_t ram[0x10];
658 658
659 659 cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name);
660 660 #define WORDS_PER_LINE 4
661 661 for (i = start; i < end; i += WORDS_PER_LINE*2) {
662 662 for (j = 0; j < WORDS_PER_LINE; j++) {
663 663 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2);
664 664 ram[j] = INL(dp, RFDR);
665 665 }
666 666
667 667 cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
668 668 i, ram[0], ram[1], ram[2], ram[3]);
669 669 }
670 670
671 671 #undef WORDS_PER_LINE
672 672 }
673 673 #endif
674 674
675 675 static uint_t sfe_rf_perfect_base_dp83815[] = {
676 676 RFADDR_PMATCH0_DP83815,
677 677 RFADDR_PMATCH1_DP83815,
678 678 RFADDR_PMATCH2_DP83815,
679 679 RFADDR_PMATCH3_DP83815,
680 680 };
681 681
682 682 static int
683 683 sfe_set_rx_filter_dp83815(struct gem_dev *dp)
684 684 {
685 685 int i;
686 686 int j;
687 687 uint32_t mode;
688 688 uint8_t *mac = dp->cur_addr.ether_addr_octet;
689 689 uint16_t hash_tbl[32];
690 690 struct sfe_dev *lp = dp->private;
691 691
692 692 DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b",
693 693 dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS));
694 694
695 695 #if DEBUG_LEVEL > 0
696 696 for (i = 0; i < dp->mc_count; i++) {
697 697 cmn_err(CE_CONT,
698 698 "!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
699 699 dp->name, i,
700 700 dp->mc_list[i].addr.ether_addr_octet[0],
701 701 dp->mc_list[i].addr.ether_addr_octet[1],
702 702 dp->mc_list[i].addr.ether_addr_octet[2],
703 703 dp->mc_list[i].addr.ether_addr_octet[3],
704 704 dp->mc_list[i].addr.ether_addr_octet[4],
705 705 dp->mc_list[i].addr.ether_addr_octet[5]);
706 706 }
707 707 #endif
708 708 if ((dp->rxmode & RXMODE_ENABLE) == 0) {
709 709 /* disable rx filter */
710 710 OUTL(dp, RFCR, 0);
711 711 return (GEM_SUCCESS);
712 712 }
713 713
714 714 /*
715 715 * Set Receive filter control register
716 716 */
717 717 if (dp->rxmode & RXMODE_PROMISC) {
718 718 /* all broadcast, all multicast, all physical */
719 719 mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
720 720 } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) {
721 721 /* all broadcast, all multicast, physical for the chip */
722 722 mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815;
723 723 } else if (dp->mc_count > 4) {
724 724 /*
725 725 * Use multicast hash table,
726 726 * accept all broadcast and physical for the chip.
727 727 */
728 728 mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815;
729 729
730 730 bzero(hash_tbl, sizeof (hash_tbl));
731 731 for (i = 0; i < dp->mc_count; i++) {
732 732 j = dp->mc_list[i].hash >> (32 - 9);
733 733 hash_tbl[j / 16] |= 1 << (j % 16);
734 734 }
735 735 } else {
736 736 /*
737 737 * Use pattern mach filter for multicast address,
738 738 * accept all broadcast and physical for the chip
739 739 */
740 740 /* need to enable corresponding pattern registers */
741 741 mode = RFCR_AAB | RFCR_APM_DP83815 |
742 742 (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT);
743 743 }
744 744
745 745 #if DEBUG_LEVEL > 1
746 746 cmn_err(CE_CONT,
747 747 "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
748 748 " cache %02x:%02x:%02x:%02x:%02x:%02x",
749 749 dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
750 750 lp->mac_addr[0], lp->mac_addr[1],
751 751 lp->mac_addr[2], lp->mac_addr[3],
752 752 lp->mac_addr[4], lp->mac_addr[5]);
753 753 #endif
754 754 if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
755 755 /*
756 756 * XXX - need to *disable* rx filter to load mac address for
757 757 * the chip. otherwise, we cannot setup rxfilter correctly.
758 758 */
759 759 /* setup perfect match register for my station address */
760 760 for (i = 0; i < ETHERADDRL; i += 2) {
761 761 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i);
762 762 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
763 763 }
764 764
765 765 bcopy(mac, lp->mac_addr, ETHERADDRL);
766 766 }
767 767
768 768 #if DEBUG_LEVEL > 3
769 769 /* clear pattern ram */
770 770 for (j = 0x200; j < 0x380; j += 2) {
771 771 OUTL(dp, RFCR, j);
772 772 OUTL(dp, RFDR, 0);
773 773 }
774 774 #endif
775 775 if (mode & RFCR_APAT_DP83815) {
776 776 /* setup multicast address into pattern match registers */
777 777 for (j = 0; j < dp->mc_count; j++) {
778 778 mac = &dp->mc_list[j].addr.ether_addr_octet[0];
779 779 for (i = 0; i < ETHERADDRL; i += 2) {
780 780 OUTL(dp, RFCR,
781 781 sfe_rf_perfect_base_dp83815[j] + i*2);
782 782 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
783 783 }
784 784 }
785 785
786 786 /* setup pattern count registers */
787 787 OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815);
788 788 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
789 789 OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815);
790 790 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
791 791 }
792 792
793 793 if (mode & RFCR_MHEN_DP83815) {
794 794 /* Load Multicast hash table */
795 795 for (i = 0; i < 32; i++) {
796 796 /* for DP83815, index is in byte */
797 797 OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2);
798 798 OUTL(dp, RFDR, hash_tbl[i]);
799 799 }
800 800 }
801 801 #if DEBUG_LEVEL > 2
802 802 sfe_rxfilter_dump(dp, 0, 0x10);
803 803 sfe_rxfilter_dump(dp, 0x200, 0x380);
804 804 #endif
805 805 /* Set rx filter mode and enable rx filter */
806 806 OUTL(dp, RFCR, RFCR_RFEN | mode);
807 807
808 808 return (GEM_SUCCESS);
809 809 }
810 810
811 811 static int
812 812 sfe_set_rx_filter_sis900(struct gem_dev *dp)
813 813 {
814 814 int i;
815 815 uint32_t mode;
816 816 uint16_t hash_tbl[16];
817 817 uint8_t *mac = dp->cur_addr.ether_addr_octet;
818 818 int hash_size;
819 819 int hash_shift;
820 820 struct sfe_dev *lp = dp->private;
821 821
822 822 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
823 823
824 824 if ((dp->rxmode & RXMODE_ENABLE) == 0) {
825 825 /* disable rx filter */
826 826 OUTLINL(dp, RFCR, 0);
827 827 return (GEM_SUCCESS);
828 828 }
829 829
830 830 /*
831 831 * determine hardware hash table size in word.
832 832 */
833 833 hash_shift = 25;
834 834 if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) {
835 835 hash_shift = 24;
836 836 }
837 837 hash_size = (1 << (32 - hash_shift)) / 16;
838 838 bzero(hash_tbl, sizeof (hash_tbl));
839 839
840 840 /* Set Receive filter control register */
841 841
842 842 if (dp->rxmode & RXMODE_PROMISC) {
843 843 /* all broadcast, all multicast, all physical */
844 844 mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
845 845 } else if ((dp->rxmode & RXMODE_ALLMULTI) ||
846 846 dp->mc_count > hash_size*16/2) {
847 847 /* all broadcast, all multicast, physical for the chip */
848 848 mode = RFCR_AAB | RFCR_AAM;
849 849 } else {
850 850 /* all broadcast, physical for the chip */
851 851 mode = RFCR_AAB;
852 852 }
853 853
854 854 /* make hash table */
855 855 for (i = 0; i < dp->mc_count; i++) {
856 856 uint_t h;
857 857 h = dp->mc_list[i].hash >> hash_shift;
858 858 hash_tbl[h / 16] |= 1 << (h % 16);
859 859 }
860 860
861 861 if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
862 862 /* Disable Rx filter and load mac address */
863 863 for (i = 0; i < ETHERADDRL/2; i++) {
864 864 /* For sis900, index is in word */
865 865 OUTLINL(dp, RFCR,
866 866 (RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900);
867 867 OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]);
868 868 }
869 869
870 870 bcopy(mac, lp->mac_addr, ETHERADDRL);
871 871 }
872 872
873 873 /* Load Multicast hash table */
874 874 for (i = 0; i < hash_size; i++) {
875 875 /* For sis900, index is in word */
876 876 OUTLINL(dp, RFCR,
877 877 (RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900);
878 878 OUTLINL(dp, RFDR, hash_tbl[i]);
879 879 }
880 880
881 881 /* Load rx filter mode and enable rx filter */
882 882 OUTLINL(dp, RFCR, RFCR_RFEN | mode);
883 883
884 884 return (GEM_SUCCESS);
885 885 }
886 886
887 887 static int
888 888 sfe_start_chip(struct gem_dev *dp)
889 889 {
890 890 struct sfe_dev *lp = dp->private;
891 891
892 892 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
893 893
894 894 /*
895 895 * setup interrupt mask, which shouldn't include ISR_TOK
896 896 * to improve performance.
897 897 */
898 898 lp->our_intr_bits = OUR_INTR_BITS;
899 899
900 900 /* enable interrupt */
901 901 if ((dp->misc_flag & GEM_NOINTR) == 0) {
902 902 OUTL(dp, IER, 1);
903 903 OUTL(dp, IMR, lp->our_intr_bits);
904 904 }
905 905
906 906 /* Kick RX */
907 907 OUTL(dp, CR, lp->cr | CR_RXE);
908 908
909 909 return (GEM_SUCCESS);
910 910 }
911 911
912 912 /*
913 913 * Stop nic core gracefully.
914 914 */
915 915 static int
916 916 sfe_stop_chip(struct gem_dev *dp)
917 917 {
918 918 struct sfe_dev *lp = dp->private;
919 919 uint32_t done;
920 920 int i;
921 921 uint32_t val;
922 922
923 923 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
924 924
925 925 /*
926 926 * Although we inhibit interrupt here, we don't clear soft copy of
927 927 * interrupt mask to avoid bogus interrupts.
928 928 */
929 929 OUTL(dp, IMR, 0);
930 930
931 931 /* stop TX and RX immediately */
932 932 OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR);
933 933
934 934 done = 0;
935 935 for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
936 936 if (i > 1000) {
937 937 /*
938 938 * As gem layer will call sfe_reset_chip(),
939 939 * we don't neet to reset futher
940 940 */
941 941 cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout",
942 942 dp->name, __func__);
943 943
944 944 return (GEM_FAILURE);
945 945 }
946 946 val = INL(dp, ISR);
947 947 done |= val & (ISR_RXRCMP | ISR_TXRCMP);
948 948 lp->isr_pended |= val & lp->our_intr_bits;
949 949 drv_usecwait(10);
950 950 }
951 951
952 952 return (GEM_SUCCESS);
953 953 }
954 954
955 955 #ifndef __sparc
956 956 /*
957 957 * Stop nic core gracefully for quiesce
958 958 */
959 959 static int
960 960 sfe_stop_chip_quiesce(struct gem_dev *dp)
961 961 {
962 962 struct sfe_dev *lp = dp->private;
963 963 uint32_t done;
964 964 int i;
965 965 uint32_t val;
966 966
967 967 /*
968 968 * Although we inhibit interrupt here, we don't clear soft copy of
969 969 * interrupt mask to avoid bogus interrupts.
970 970 */
971 971 OUTL(dp, IMR, 0);
972 972
973 973 /* stop TX and RX immediately */
974 974 OUTL(dp, CR, CR_TXR | CR_RXR);
975 975
976 976 done = 0;
977 977 for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
978 978 if (i > 1000) {
979 979 /*
980 980 * As gem layer will call sfe_reset_chip(),
981 981 * we don't neet to reset futher
982 982 */
983 983
984 984 return (DDI_FAILURE);
985 985 }
986 986 val = INL(dp, ISR);
987 987 done |= val & (ISR_RXRCMP | ISR_TXRCMP);
988 988 lp->isr_pended |= val & lp->our_intr_bits;
989 989 drv_usecwait(10);
990 990 }
991 991 return (DDI_SUCCESS);
992 992 }
993 993 #endif
994 994
995 995 /*
996 996 * Setup media mode
997 997 */
998 998 static uint_t
999 999 sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
1000 1000
1001 1001 static uint_t
1002 1002 sfe_encode_mxdma(uint_t burstsize)
1003 1003 {
1004 1004 int i;
1005 1005
1006 1006 if (burstsize > 256) {
1007 1007 /* choose 512 */
1008 1008 return (0);
1009 1009 }
1010 1010
1011 1011 for (i = 1; i < 8; i++) {
1012 1012 if (burstsize <= sfe_mxdma_value[i]) {
1013 1013 break;
1014 1014 }
1015 1015 }
1016 1016 return (i);
1017 1017 }
1018 1018
1019 1019 static int
1020 1020 sfe_set_media(struct gem_dev *dp)
1021 1021 {
1022 1022 uint32_t txcfg;
1023 1023 uint32_t rxcfg;
1024 1024 uint32_t pcr;
1025 1025 uint32_t val;
1026 1026 uint32_t txmxdma;
1027 1027 uint32_t rxmxdma;
1028 1028 struct sfe_dev *lp = dp->private;
1029 1029 #ifdef DEBUG_LEVEL
1030 1030 extern int gem_speed_value[];
1031 1031 #endif
1032 1032 DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps",
1033 1033 dp->name, __func__,
1034 1034 dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed]));
1035 1035
1036 1036 /* initialize txcfg and rxcfg */
1037 1037 txcfg = TXCFG_ATP;
1038 1038 if (dp->full_duplex) {
1039 1039 txcfg |= (TXCFG_CSI | TXCFG_HBI);
1040 1040 }
1041 1041 rxcfg = RXCFG_AEP | RXCFG_ARP;
1042 1042 if (dp->full_duplex) {
1043 1043 rxcfg |= RXCFG_ATX;
1044 1044 }
1045 1045
1046 1046 /* select txmxdma and rxmxdma, maxmum burst length */
1047 1047 if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1048 1048 #ifdef DEBUG_SIS900_EDB
1049 1049 val = CFG_EDB_MASTER;
1050 1050 #else
1051 1051 val = INL(dp, CFG) & CFG_EDB_MASTER;
1052 1052 #endif
1053 1053 if (val) {
1054 1054 /*
1055 1055 * sis900 built-in cores:
1056 1056 * max burst length must be fixed to 64
1057 1057 */
1058 1058 txmxdma = 64;
1059 1059 rxmxdma = 64;
1060 1060 } else {
1061 1061 /*
1062 1062 * sis900 pci chipset:
1063 1063 * the vendor recommended to fix max burst length
1064 1064 * to 512
1065 1065 */
1066 1066 txmxdma = 512;
1067 1067 rxmxdma = 512;
1068 1068 }
1069 1069 } else {
1070 1070 /*
1071 1071 * NS dp83815/816:
1072 1072 * use user defined or default for tx/rx max burst length
1073 1073 */
1074 1074 txmxdma = max(dp->txmaxdma, 256);
1075 1075 rxmxdma = max(dp->rxmaxdma, 256);
1076 1076 }
1077 1077
1078 1078
1079 1079 /* tx high water mark */
1080 1080 lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT);
1081 1081
1082 1082 /* determine tx_fill_threshold accroding drain threshold */
1083 1083 lp->tx_fill_threshold =
1084 1084 TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT;
1085 1085
1086 1086 /* tune txmxdma not to exceed tx_fill_threshold */
1087 1087 for (; ; ) {
1088 1088 /* normalize txmxdma requested */
1089 1089 val = sfe_encode_mxdma(txmxdma);
1090 1090 txmxdma = sfe_mxdma_value[val];
1091 1091
1092 1092 if (txmxdma <= lp->tx_fill_threshold) {
1093 1093 break;
1094 1094 }
1095 1095 /* select new txmxdma */
1096 1096 txmxdma = txmxdma / 2;
1097 1097 }
1098 1098 txcfg |= val << TXCFG_MXDMA_SHIFT;
1099 1099
1100 1100 /* encode rxmxdma, maxmum burst length for rx */
1101 1101 val = sfe_encode_mxdma(rxmxdma);
1102 1102 rxcfg |= val << RXCFG_MXDMA_SHIFT;
1103 1103 rxmxdma = sfe_mxdma_value[val];
1104 1104
1105 1105 /* receive starting threshold - it have only 5bit-wide field */
1106 1106 val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT);
1107 1107 lp->rx_drain_threshold =
1108 1108 min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT);
1109 1109
1110 1110 DPRINTF(0, (CE_CONT,
1111 1111 "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
1112 1112 " rx: drain:%d mxdma:%d",
1113 1113 dp->name, __func__,
1114 1114 lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold,
1115 1115 lp->tx_fill_threshold, txmxdma,
1116 1116 lp->rx_drain_threshold, rxmxdma));
1117 1117
1118 1118 ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT);
1119 1119 ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT);
1120 1120 ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT);
1121 1121
1122 1122 txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT)
1123 1123 | (lp->tx_drain_threshold/TXCFG_FIFO_UNIT);
1124 1124 OUTL(dp, TXCFG, txcfg);
1125 1125
1126 1126 rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT);
1127 1127 if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1128 1128 rxcfg |= RXCFG_ALP_DP83815;
1129 1129 }
1130 1130 OUTL(dp, RXCFG, rxcfg);
1131 1131
1132 1132 DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b",
1133 1133 dp->name, __func__,
1134 1134 txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS));
1135 1135
1136 1136 /* Flow control */
1137 1137 if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1138 1138 pcr = INL(dp, PCR);
1139 1139 switch (dp->flow_control) {
1140 1140 case FLOW_CONTROL_SYMMETRIC:
1141 1141 case FLOW_CONTROL_RX_PAUSE:
1142 1142 OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST);
1143 1143 break;
1144 1144
1145 1145 default:
1146 1146 OUTL(dp, PCR,
1147 1147 pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA));
1148 1148 break;
1149 1149 }
1150 1150 DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name,
1151 1151 INL(dp, PCR), PCR_BITS));
1152 1152
1153 1153 } else if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1154 1154 switch (dp->flow_control) {
1155 1155 case FLOW_CONTROL_SYMMETRIC:
1156 1156 case FLOW_CONTROL_RX_PAUSE:
1157 1157 OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN);
1158 1158 break;
1159 1159 default:
1160 1160 OUTL(dp, FLOWCTL, 0);
1161 1161 break;
1162 1162 }
1163 1163 DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b",
1164 1164 dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS));
1165 1165 }
1166 1166 return (GEM_SUCCESS);
1167 1167 }
1168 1168
1169 1169 static int
1170 1170 sfe_get_stats(struct gem_dev *dp)
1171 1171 {
1172 1172 /* do nothing */
1173 1173 return (GEM_SUCCESS);
1174 1174 }
1175 1175
1176 1176 /*
1177 1177 * descriptor manipulations
1178 1178 */
1179 1179 static int
1180 1180 sfe_tx_desc_write(struct gem_dev *dp, int slot,
1181 1181 ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags)
1182 1182 {
1183 1183 uint32_t mark;
1184 1184 struct sfe_desc *tdp;
1185 1185 ddi_dma_cookie_t *dcp;
1186 1186 uint32_t tmp0;
1187 1187 #if DEBUG_LEVEL > 2
1188 1188 int i;
1189 1189
1190 1190 cmn_err(CE_CONT,
1191 1191 CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
1192 1192 dp->name, ddi_get_lbolt(), __func__,
1193 1193 dp->tx_desc_tail, slot, frags, flags);
1194 1194
1195 1195 for (i = 0; i < frags; i++) {
1196 1196 cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x",
1197 1197 i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1198 1198 }
1199 1199 #endif
1200 1200 /*
1201 1201 * write tx descriptor in reversed order.
1202 1202 */
1203 1203 #if DEBUG_LEVEL > 3
1204 1204 flags |= GEM_TXFLAG_INTR;
1205 1205 #endif
1206 1206 mark = (flags & GEM_TXFLAG_INTR)
1207 1207 ? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN;
1208 1208
1209 1209 ASSERT(frags == 1);
1210 1210 dcp = &dmacookie[0];
1211 1211 if (flags & GEM_TXFLAG_HEAD) {
1212 1212 mark &= ~CMDSTS_OWN;
1213 1213 }
1214 1214
1215 1215 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1216 1216 tmp0 = (uint32_t)dcp->dmac_address;
1217 1217 mark |= (uint32_t)dcp->dmac_size;
1218 1218 tdp->d_bufptr = LE_32(tmp0);
1219 1219 tdp->d_cmdsts = LE_32(mark);
1220 1220
1221 1221 return (frags);
1222 1222 }
1223 1223
1224 1224 static void
1225 1225 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot)
1226 1226 {
1227 1227 uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
1228 1228 struct sfe_desc *tdp;
1229 1229 struct sfe_dev *lp = dp->private;
1230 1230
1231 1231 if (nslot > 1) {
1232 1232 gem_tx_desc_dma_sync(dp,
1233 1233 SLOT(start_slot + 1, tx_ring_size),
1234 1234 nslot - 1, DDI_DMA_SYNC_FORDEV);
1235 1235 }
1236 1236
1237 1237 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot];
1238 1238 tdp->d_cmdsts |= LE_32(CMDSTS_OWN);
1239 1239
1240 1240 gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV);
1241 1241
1242 1242 /*
1243 1243 * Let the Transmit Buffer Manager Fill state machine active.
1244 1244 */
1245 1245 if (dp->mac_active) {
1246 1246 OUTL(dp, CR, lp->cr | CR_TXE);
1247 1247 }
1248 1248 }
1249 1249
1250 1250 static void
1251 1251 sfe_rx_desc_write(struct gem_dev *dp, int slot,
1252 1252 ddi_dma_cookie_t *dmacookie, int frags)
1253 1253 {
1254 1254 struct sfe_desc *rdp;
1255 1255 uint32_t tmp0;
1256 1256 uint32_t tmp1;
1257 1257 #if DEBUG_LEVEL > 2
1258 1258 int i;
1259 1259
1260 1260 ASSERT(frags == 1);
1261 1261
1262 1262 cmn_err(CE_CONT, CONS
1263 1263 "%s: %s seqnum: %d, slot %d, frags: %d",
1264 1264 dp->name, __func__, dp->rx_active_tail, slot, frags);
1265 1265 for (i = 0; i < frags; i++) {
1266 1266 cmn_err(CE_CONT, CONS " frag: %d addr: 0x%llx, len: 0x%lx",
1267 1267 i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1268 1268 }
1269 1269 #endif
1270 1270 /* for the last slot of the packet */
1271 1271 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1272 1272
1273 1273 tmp0 = (uint32_t)dmacookie->dmac_address;
1274 1274 tmp1 = CMDSTS_INTR | (uint32_t)dmacookie->dmac_size;
1275 1275 rdp->d_bufptr = LE_32(tmp0);
1276 1276 rdp->d_cmdsts = LE_32(tmp1);
1277 1277 }
1278 1278
1279 1279 static uint_t
1280 1280 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1281 1281 {
1282 1282 uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
1283 1283 struct sfe_desc *tdp;
1284 1284 uint32_t status;
1285 1285 int cols;
1286 1286 struct sfe_dev *lp = dp->private;
1287 1287 #ifdef DEBUG_LEVEL
1288 1288 int i;
1289 1289 clock_t delay;
1290 1290 #endif
1291 1291 /* check status of the last descriptor */
1292 1292 tdp = (void *)
1293 1293 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)];
1294 1294
1295 1295 /*
1296 1296 * Don't use LE_32() directly to refer tdp->d_cmdsts.
1297 1297 * It is not atomic for big endian cpus.
1298 1298 */
1299 1299 status = tdp->d_cmdsts;
1300 1300 status = LE_32(status);
1301 1301
1302 1302 DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1303 1303 dp->name, ddi_get_lbolt(), __func__,
1304 1304 slot, status, TXSTAT_BITS));
1305 1305
1306 1306 if (status & CMDSTS_OWN) {
1307 1307 /*
1308 1308 * not yet transmitted
1309 1309 */
1310 1310 /* workaround for tx hang */
1311 1311 if (lp->chip->chip_type == CHIPTYPE_DP83815 &&
1312 1312 dp->mac_active) {
1313 1313 OUTL(dp, CR, lp->cr | CR_TXE);
1314 1314 }
1315 1315 return (0);
1316 1316 }
1317 1317
1318 1318 if (status & CMDSTS_MORE) {
1319 1319 /* XXX - the hardware problem but don't panic the system */
1320 1320 /* avoid lint bug for %b format string including 32nd bit */
1321 1321 cmn_err(CE_NOTE, CONS
1322 1322 "%s: tx status bits incorrect: slot:%d, status:0x%x",
1323 1323 dp->name, slot, status);
1324 1324 }
1325 1325
1326 1326 #if DEBUG_LEVEL > 3
1327 1327 delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10;
1328 1328 if (delay >= 50) {
1329 1329 DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d",
1330 1330 dp->name, delay, slot));
1331 1331 }
1332 1332 #endif
1333 1333
1334 1334 #if DEBUG_LEVEL > 3
1335 1335 for (i = 0; i < nfrag-1; i++) {
1336 1336 uint32_t s;
1337 1337 int n;
1338 1338
1339 1339 n = SLOT(slot + i, tx_ring_size);
1340 1340 s = LE_32(
1341 1341 ((struct sfe_desc *)((void *)
1342 1342 &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts);
1343 1343
1344 1344 ASSERT(s & CMDSTS_MORE);
1345 1345 ASSERT((s & CMDSTS_OWN) == 0);
1346 1346 }
1347 1347 #endif
1348 1348
1349 1349 /*
1350 1350 * collect statistics
1351 1351 */
1352 1352 if ((status & CMDSTS_OK) == 0) {
1353 1353
1354 1354 /* failed to transmit the packet */
1355 1355
1356 1356 DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b",
1357 1357 dp->name, status, TXSTAT_BITS));
1358 1358
1359 1359 dp->stats.errxmt++;
1360 1360
1361 1361 if (status & CMDSTS_TFU) {
1362 1362 dp->stats.underflow++;
1363 1363 } else if (status & CMDSTS_CRS) {
1364 1364 dp->stats.nocarrier++;
1365 1365 } else if (status & CMDSTS_OWC) {
1366 1366 dp->stats.xmtlatecoll++;
1367 1367 } else if ((!dp->full_duplex) && (status & CMDSTS_EC)) {
1368 1368 dp->stats.excoll++;
1369 1369 dp->stats.collisions += 16;
1370 1370 } else {
1371 1371 dp->stats.xmit_internal_err++;
1372 1372 }
1373 1373 } else if (!dp->full_duplex) {
1374 1374 cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK;
1375 1375
1376 1376 if (cols > 0) {
1377 1377 if (cols == 1) {
1378 1378 dp->stats.first_coll++;
1379 1379 } else /* (cols > 1) */ {
1380 1380 dp->stats.multi_coll++;
1381 1381 }
1382 1382 dp->stats.collisions += cols;
1383 1383 } else if (status & CMDSTS_TD) {
1384 1384 dp->stats.defer++;
1385 1385 }
1386 1386 }
1387 1387 return (GEM_TX_DONE);
1388 1388 }
1389 1389
1390 1390 static uint64_t
1391 1391 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1392 1392 {
1393 1393 struct sfe_desc *rdp;
1394 1394 uint_t len;
1395 1395 uint_t flag;
1396 1396 uint32_t status;
1397 1397
1398 1398 flag = GEM_RX_DONE;
1399 1399
1400 1400 /* Dont read ISR because we cannot ack only to rx interrupt. */
1401 1401
1402 1402 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1403 1403
1404 1404 /*
1405 1405 * Don't use LE_32() directly to refer rdp->d_cmdsts.
1406 1406 * It is not atomic for big endian cpus.
1407 1407 */
1408 1408 status = rdp->d_cmdsts;
1409 1409 status = LE_32(status);
1410 1410
1411 1411 DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1412 1412 dp->name, ddi_get_lbolt(), __func__,
1413 1413 slot, status, RXSTAT_BITS));
1414 1414
1415 1415 if ((status & CMDSTS_OWN) == 0) {
1416 1416 /*
1417 1417 * No more received packets because
1418 1418 * this buffer is owned by NIC.
1419 1419 */
1420 1420 return (0);
1421 1421 }
1422 1422
1423 1423 #define RX_ERR_BITS \
1424 1424 (CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
1425 1425 CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
1426 1426
1427 1427 if (status & RX_ERR_BITS) {
1428 1428 /*
1429 1429 * Packet with error received
1430 1430 */
1431 1431 DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet "
1432 1432 "received, buffer status: %b",
1433 1433 dp->name, status, RXSTAT_BITS));
1434 1434
1435 1435 /* collect statistics information */
1436 1436 dp->stats.errrcv++;
1437 1437
1438 1438 if (status & CMDSTS_RXO) {
1439 1439 dp->stats.overflow++;
1440 1440 } else if (status & (CMDSTS_LONG | CMDSTS_MORE)) {
1441 1441 dp->stats.frame_too_long++;
1442 1442 } else if (status & CMDSTS_RUNT) {
1443 1443 dp->stats.runt++;
1444 1444 } else if (status & (CMDSTS_ISE | CMDSTS_FAE)) {
1445 1445 dp->stats.frame++;
1446 1446 } else if (status & CMDSTS_CRCE) {
1447 1447 dp->stats.crc++;
1448 1448 } else {
1449 1449 dp->stats.rcv_internal_err++;
1450 1450 }
1451 1451
1452 1452 return (flag | GEM_RX_ERR);
1453 1453 }
1454 1454
1455 1455 /*
1456 1456 * this packet was received without errors
1457 1457 */
1458 1458 if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) {
1459 1459 len -= ETHERFCSL;
1460 1460 }
1461 1461
1462 1462 #if DEBUG_LEVEL > 10
1463 1463 {
1464 1464 int i;
1465 1465 uint8_t *bp = dp->rx_buf_head->rxb_buf;
1466 1466
1467 1467 cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len);
1468 1468
1469 1469 for (i = 0; i < 60; i += 10) {
1470 1470 cmn_err(CE_CONT, CONS
1471 1471 "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
1472 1472 bp[0], bp[1], bp[2], bp[3], bp[4],
1473 1473 bp[5], bp[6], bp[7], bp[8], bp[9]);
1474 1474 }
1475 1475 bp += 10;
1476 1476 }
1477 1477 #endif
1478 1478 return (flag | (len & GEM_RX_LEN));
1479 1479 }
1480 1480
1481 1481 static void
1482 1482 sfe_tx_desc_init(struct gem_dev *dp, int slot)
1483 1483 {
1484 1484 uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
1485 1485 struct sfe_desc *tdp;
1486 1486 uint32_t here;
1487 1487
1488 1488 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1489 1489
1490 1490 /* don't clear d_link field, which have a valid pointer */
1491 1491 tdp->d_cmdsts = 0;
1492 1492
1493 1493 /* make a link to this from the previous descriptor */
1494 1494 here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot;
1495 1495
1496 1496 tdp = (void *)
1497 1497 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)];
1498 1498 tdp->d_link = LE_32(here);
1499 1499 }
1500 1500
1501 1501 static void
1502 1502 sfe_rx_desc_init(struct gem_dev *dp, int slot)
1503 1503 {
1504 1504 uint_t rx_ring_size = dp->gc.gc_rx_ring_size;
1505 1505 struct sfe_desc *rdp;
1506 1506 uint32_t here;
1507 1507
1508 1508 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1509 1509
1510 1510 /* don't clear d_link field, which have a valid pointer */
1511 1511 rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1512 1512
1513 1513 /* make a link to this from the previous descriptor */
1514 1514 here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot;
1515 1515
1516 1516 rdp = (void *)
1517 1517 &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)];
1518 1518 rdp->d_link = LE_32(here);
1519 1519 }
1520 1520
1521 1521 static void
1522 1522 sfe_tx_desc_clean(struct gem_dev *dp, int slot)
1523 1523 {
1524 1524 struct sfe_desc *tdp;
1525 1525
1526 1526 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1527 1527 tdp->d_cmdsts = 0;
1528 1528 }
1529 1529
1530 1530 static void
1531 1531 sfe_rx_desc_clean(struct gem_dev *dp, int slot)
1532 1532 {
1533 1533 struct sfe_desc *rdp;
1534 1534
1535 1535 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1536 1536 rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1537 1537 }
1538 1538
1539 1539 /*
1540 1540 * Device depend interrupt handler
1541 1541 */
1542 1542 static uint_t
1543 1543 sfe_interrupt(struct gem_dev *dp)
1544 1544 {
1545 1545 uint_t rx_ring_size = dp->gc.gc_rx_ring_size;
1546 1546 uint32_t isr;
1547 1547 uint32_t isr_bogus;
1548 1548 uint_t flags = 0;
1549 1549 boolean_t need_to_reset = B_FALSE;
1550 1550 struct sfe_dev *lp = dp->private;
1551 1551
1552 1552 /* read reason and clear interrupt */
1553 1553 isr = INL(dp, ISR);
1554 1554
1555 1555 isr_bogus = lp->isr_pended;
1556 1556 lp->isr_pended = 0;
1557 1557
1558 1558 if (((isr | isr_bogus) & lp->our_intr_bits) == 0) {
1559 1559 /* we are not the interrupt source */
1560 1560 return (DDI_INTR_UNCLAIMED);
1561 1561 }
1562 1562
1563 1563 DPRINTF(3, (CE_CONT,
1564 1564 CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
1565 1565 dp->name, ddi_get_lbolt(), __func__,
1566 1566 isr, INTR_BITS, dp->rx_active_head));
1567 1567
1568 1568 if (!dp->mac_active) {
1569 1569 /* the device is going to stop */
1570 1570 lp->our_intr_bits = 0;
1571 1571 return (DDI_INTR_CLAIMED);
1572 1572 }
1573 1573
1574 1574 isr &= lp->our_intr_bits;
1575 1575
1576 1576 if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR |
1577 1577 ISR_RXDESC | ISR_RXOK)) {
1578 1578 (void) gem_receive(dp);
1579 1579
1580 1580 if (isr & (ISR_RXSOVR | ISR_RXORN)) {
1581 1581 DPRINTF(0, (CE_CONT,
1582 1582 CONS "%s: rx fifo overrun: isr %b",
1583 1583 dp->name, isr, INTR_BITS));
1584 1584 /* no need restart rx */
1585 1585 dp->stats.overflow++;
1586 1586 }
1587 1587
1588 1588 if (isr & ISR_RXIDLE) {
1589 1589 DPRINTF(0, (CE_CONT,
1590 1590 CONS "%s: rx buffer ran out: isr %b",
1591 1591 dp->name, isr, INTR_BITS));
1592 1592
1593 1593 dp->stats.norcvbuf++;
1594 1594
1595 1595 /*
1596 1596 * Make RXDP points the head of receive
1597 1597 * buffer list.
1598 1598 */
1599 1599 OUTL(dp, RXDP, dp->rx_ring_dma +
1600 1600 SFE_DESC_SIZE *
1601 1601 SLOT(dp->rx_active_head, rx_ring_size));
1602 1602
1603 1603 /* Restart the receive engine */
1604 1604 OUTL(dp, CR, lp->cr | CR_RXE);
1605 1605 }
1606 1606 }
1607 1607
1608 1608 if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC |
1609 1609 ISR_TXIDLE | ISR_TXOK)) {
1610 1610 /* need to reclaim tx buffers */
1611 1611 if (gem_tx_done(dp)) {
1612 1612 flags |= INTR_RESTART_TX;
1613 1613 }
1614 1614 /*
1615 1615 * XXX - tx error statistics will be counted in
1616 1616 * sfe_tx_desc_stat() and no need to restart tx on errors.
1617 1617 */
1618 1618 }
1619 1619
1620 1620 if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) {
1621 1621 cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.",
1622 1622 dp->name, isr, INTR_BITS);
1623 1623 need_to_reset = B_TRUE;
1624 1624 }
1625 1625 reset:
1626 1626 if (need_to_reset) {
1627 1627 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1628 1628 flags |= INTR_RESTART_TX;
1629 1629 }
1630 1630
1631 1631 DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b",
1632 1632 dp->name, __func__, isr, INTR_BITS));
1633 1633
1634 1634 return (DDI_INTR_CLAIMED | flags);
1635 1635 }
1636 1636
1637 1637 /* ======================================================== */
1638 1638 /*
1639 1639 * HW depend MII routine
1640 1640 */
1641 1641 /* ======================================================== */
1642 1642
1643 1643 /*
1644 1644 * MII routines for NS DP83815
1645 1645 */
1646 1646 static void
1647 1647 sfe_mii_sync_dp83815(struct gem_dev *dp)
1648 1648 {
1649 1649 /* do nothing */
1650 1650 }
1651 1651
1652 1652 static uint16_t
1653 1653 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset)
1654 1654 {
1655 1655 DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x",
1656 1656 dp->name, __func__, offset));
1657 1657 return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4));
1658 1658 }
1659 1659
1660 1660 static void
1661 1661 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val)
1662 1662 {
1663 1663 DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x",
1664 1664 dp->name, __func__, offset, val));
1665 1665 OUTL(dp, MII_REGS_BASE + offset*4, val);
1666 1666 }
1667 1667
1668 1668 static int
1669 1669 sfe_mii_config_dp83815(struct gem_dev *dp)
1670 1670 {
1671 1671 uint32_t srr;
1672 1672
1673 1673 srr = INL(dp, SRR) & SRR_REV;
1674 1674
1675 1675 DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x",
1676 1676 dp->name, srr,
1677 1677 INW(dp, 0x00cc), /* PGSEL */
1678 1678 INW(dp, 0x00e4), /* PMDCSR */
1679 1679 INW(dp, 0x00fc), /* TSTDAT */
1680 1680 INW(dp, 0x00f4), /* DSPCFG */
1681 1681 INW(dp, 0x00f8))); /* SDCFG */
1682 1682
1683 1683 if (srr == SRR_REV_DP83815CVNG) {
1684 1684 /*
1685 1685 * NS datasheet says that DP83815CVNG needs following
1686 1686 * registers to be patched for optimizing its performance.
1687 1687 * A report said that CRC errors on RX disappeared
1688 1688 * with the patch.
1689 1689 */
1690 1690 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */
1691 1691 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */
1692 1692 OUTW(dp, 0x00fc, 0x0000); /* TSTDAT */
1693 1693 OUTW(dp, 0x00f4, 0x5040); /* DSPCFG */
1694 1694 OUTW(dp, 0x00f8, 0x008c); /* SDCFG */
1695 1695 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */
1696 1696
1697 1697 DPRINTF(0, (CE_CONT,
1698 1698 CONS "%s: PHY patched %04x %04x %04x %04x %04x",
1699 1699 dp->name,
1700 1700 INW(dp, 0x00cc), /* PGSEL */
1701 1701 INW(dp, 0x00e4), /* PMDCSR */
1702 1702 INW(dp, 0x00fc), /* TSTDAT */
1703 1703 INW(dp, 0x00f4), /* DSPCFG */
1704 1704 INW(dp, 0x00f8))); /* SDCFG */
1705 1705 } else if (((srr ^ SRR_REV_DP83815DVNG) & 0xff00) == 0 ||
1706 1706 ((srr ^ SRR_REV_DP83816AVNG) & 0xff00) == 0) {
1707 1707 /*
1708 1708 * Additional packets for later chipset
1709 1709 */
1710 1710 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */
1711 1711 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */
1712 1712 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */
1713 1713
1714 1714 DPRINTF(0, (CE_CONT,
1715 1715 CONS "%s: PHY patched %04x %04x",
1716 1716 dp->name,
1717 1717 INW(dp, 0x00cc), /* PGSEL */
1718 1718 INW(dp, 0x00e4))); /* PMDCSR */
1719 1719 }
1720 1720
1721 1721 return (gem_mii_config_default(dp));
1722 1722 }
1723 1723
1724 1724 static int
1725 1725 sfe_mii_probe_dp83815(struct gem_dev *dp)
1726 1726 {
1727 1727 uint32_t val;
1728 1728
1729 1729 /* try external phy first */
1730 1730 DPRINTF(0, (CE_CONT, CONS "%s: %s: trying external phy",
1731 1731 dp->name, __func__));
1732 1732 dp->mii_phy_addr = 0;
1733 1733 dp->gc.gc_mii_sync = &sfe_mii_sync_sis900;
1734 1734 dp->gc.gc_mii_read = &sfe_mii_read_sis900;
1735 1735 dp->gc.gc_mii_write = &sfe_mii_write_sis900;
1736 1736
1737 1737 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1738 1738 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1739 1739
1740 1740 if (gem_mii_probe_default(dp) == GEM_SUCCESS) {
1741 1741 return (GEM_SUCCESS);
1742 1742 }
1743 1743
1744 1744 /* switch to internal phy */
1745 1745 DPRINTF(0, (CE_CONT, CONS "%s: %s: switching to internal phy",
1746 1746 dp->name, __func__));
1747 1747 dp->mii_phy_addr = -1;
1748 1748 dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815;
1749 1749 dp->gc.gc_mii_read = &sfe_mii_read_dp83815;
1750 1750 dp->gc.gc_mii_write = &sfe_mii_write_dp83815;
1751 1751
1752 1752 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1753 1753 OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST);
1754 1754 drv_usecwait(100); /* keep to assert RST bit for a while */
1755 1755 OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1756 1756
1757 1757 /* wait for PHY reset */
1758 1758 delay(drv_usectohz(10000));
1759 1759
1760 1760 return (gem_mii_probe_default(dp));
1761 1761 }
1762 1762
1763 1763 static int
1764 1764 sfe_mii_init_dp83815(struct gem_dev *dp)
1765 1765 {
1766 1766 uint32_t val;
1767 1767
1768 1768 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1769 1769
1770 1770 if (dp->mii_phy_addr == -1) {
1771 1771 /* select internal phy */
1772 1772 OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1773 1773 } else {
1774 1774 /* select external phy */
1775 1775 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1776 1776 }
1777 1777
1778 1778 return (GEM_SUCCESS);
1779 1779 }
1780 1780
1781 1781 /*
1782 1782 * MII routines for SiS900
1783 1783 */
1784 1784 #define MDIO_DELAY(dp) {(void) INL(dp, MEAR); (void) INL(dp, MEAR); }
1785 1785 static void
1786 1786 sfe_mii_sync_sis900(struct gem_dev *dp)
1787 1787 {
1788 1788 int i;
1789 1789
1790 1790 /* send 32 ONE's to make MII line idle */
1791 1791 for (i = 0; i < 32; i++) {
1792 1792 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO);
1793 1793 MDIO_DELAY(dp);
1794 1794 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC);
1795 1795 MDIO_DELAY(dp);
1796 1796 }
1797 1797 }
1798 1798
1799 1799 static int
1800 1800 sfe_mii_config_sis900(struct gem_dev *dp)
1801 1801 {
1802 1802 struct sfe_dev *lp = dp->private;
1803 1803
1804 1804 /* Do chip depend setup */
1805 1805 if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) {
1806 1806 /* workaround for ICS1893 PHY */
1807 1807 gem_mii_write(dp, 0x0018, 0xD200);
1808 1808 }
1809 1809
1810 1810 if (lp->revid == SIS630E_900_REV) {
1811 1811 /*
1812 1812 * SiS 630E has bugs on default values
1813 1813 * of PHY registers
1814 1814 */
1815 1815 gem_mii_write(dp, MII_AN_ADVERT, 0x05e1);
1816 1816 gem_mii_write(dp, MII_CONFIG1, 0x0022);
1817 1817 gem_mii_write(dp, MII_CONFIG2, 0xff00);
1818 1818 gem_mii_write(dp, MII_MASK, 0xffc0);
1819 1819 }
1820 1820 sfe_set_eq_sis630(dp);
1821 1821
1822 1822 return (gem_mii_config_default(dp));
1823 1823 }
1824 1824
1825 1825 static uint16_t
1826 1826 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg)
1827 1827 {
1828 1828 uint32_t cmd;
1829 1829 uint16_t ret;
1830 1830 int i;
1831 1831 uint32_t data;
1832 1832
1833 1833 cmd = MII_READ_CMD(dp->mii_phy_addr, reg);
1834 1834
1835 1835 for (i = 31; i >= 18; i--) {
1836 1836 data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1837 1837 OUTL(dp, MEAR, data | MEAR_MDDIR);
1838 1838 MDIO_DELAY(dp);
1839 1839 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1840 1840 MDIO_DELAY(dp);
1841 1841 }
1842 1842
1843 1843 /* turn around cycle */
1844 1844 OUTL(dp, MEAR, 0);
1845 1845 MDIO_DELAY(dp);
1846 1846
1847 1847 /* get response from PHY */
1848 1848 OUTL(dp, MEAR, MEAR_MDC);
1849 1849 MDIO_DELAY(dp);
1850 1850
1851 1851 OUTL(dp, MEAR, 0);
1852 1852 #if DEBUG_LEBEL > 0
1853 1853 (void) INL(dp, MEAR); /* delay */
1854 1854 if (INL(dp, MEAR) & MEAR_MDIO) {
1855 1855 cmn_err(CE_WARN, "%s: PHY@%d not responded",
1856 1856 dp->name, dp->mii_phy_addr);
1857 1857 }
1858 1858 #else
1859 1859 MDIO_DELAY(dp);
1860 1860 #endif
1861 1861 /* terminate response cycle */
1862 1862 OUTL(dp, MEAR, MEAR_MDC);
1863 1863 MDIO_DELAY(dp);
1864 1864
1865 1865 ret = 0; /* to avoid lint errors */
1866 1866 for (i = 16; i > 0; i--) {
1867 1867 OUTL(dp, MEAR, 0);
1868 1868 (void) INL(dp, MEAR); /* delay */
1869 1869 ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1);
1870 1870 OUTL(dp, MEAR, MEAR_MDC);
1871 1871 MDIO_DELAY(dp);
1872 1872 }
1873 1873
1874 1874 /* send two idle(Z) bits to terminate the read cycle */
1875 1875 for (i = 0; i < 2; i++) {
1876 1876 OUTL(dp, MEAR, 0);
1877 1877 MDIO_DELAY(dp);
1878 1878 OUTL(dp, MEAR, MEAR_MDC);
1879 1879 MDIO_DELAY(dp);
1880 1880 }
1881 1881
1882 1882 return (ret);
1883 1883 }
1884 1884
1885 1885 static void
1886 1886 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val)
1887 1887 {
1888 1888 uint32_t cmd;
1889 1889 int i;
1890 1890 uint32_t data;
1891 1891
1892 1892 cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val);
1893 1893
1894 1894 for (i = 31; i >= 0; i--) {
1895 1895 data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1896 1896 OUTL(dp, MEAR, data | MEAR_MDDIR);
1897 1897 MDIO_DELAY(dp);
1898 1898 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1899 1899 MDIO_DELAY(dp);
1900 1900 }
1901 1901
1902 1902 /* send two idle(Z) bits to terminate the write cycle. */
1903 1903 for (i = 0; i < 2; i++) {
1904 1904 OUTL(dp, MEAR, 0);
1905 1905 MDIO_DELAY(dp);
1906 1906 OUTL(dp, MEAR, MEAR_MDC);
1907 1907 MDIO_DELAY(dp);
1908 1908 }
1909 1909 }
1910 1910 #undef MDIO_DELAY
1911 1911
1912 1912 static void
1913 1913 sfe_set_eq_sis630(struct gem_dev *dp)
1914 1914 {
1915 1915 uint16_t reg14h;
1916 1916 uint16_t eq_value;
1917 1917 uint16_t max_value;
1918 1918 uint16_t min_value;
1919 1919 int i;
1920 1920 uint8_t rev;
1921 1921 struct sfe_dev *lp = dp->private;
1922 1922
1923 1923 rev = lp->revid;
1924 1924
1925 1925 if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1926 1926 rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) {
1927 1927 /* it doesn't have a internal PHY */
1928 1928 return;
1929 1929 }
1930 1930
1931 1931 if (dp->mii_state == MII_STATE_LINKUP) {
1932 1932 reg14h = gem_mii_read(dp, MII_RESV);
1933 1933 gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
1934 1934
1935 1935 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1936 1936 max_value = min_value = eq_value;
1937 1937 for (i = 1; i < 10; i++) {
1938 1938 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1939 1939 max_value = max(eq_value, max_value);
1940 1940 min_value = min(eq_value, min_value);
1941 1941 }
1942 1942
1943 1943 /* for 630E, rule to determine the equalizer value */
1944 1944 if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1945 1945 rev == SIS630ET_900_REV) {
1946 1946 if (max_value < 5) {
1947 1947 eq_value = max_value;
1948 1948 } else if (5 <= max_value && max_value < 15) {
1949 1949 eq_value =
1950 1950 max(max_value + 1,
1951 1951 min_value + 2);
1952 1952 } else if (15 <= max_value) {
1953 1953 eq_value =
1954 1954 max(max_value + 5,
1955 1955 min_value + 6);
1956 1956 }
1957 1957 }
1958 1958 /* for 630B0&B1, rule to determine the equalizer value */
1959 1959 else
1960 1960 if (rev == SIS630A_900_REV &&
1961 1961 (lp->bridge_revid == SIS630B0 ||
1962 1962 lp->bridge_revid == SIS630B1)) {
1963 1963
1964 1964 if (max_value == 0) {
1965 1965 eq_value = 3;
1966 1966 } else {
1967 1967 eq_value = (max_value + min_value + 1)/2;
1968 1968 }
1969 1969 }
1970 1970 /* write equalizer value and setting */
1971 1971 reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8;
1972 1972 reg14h |= 0x6000 | (eq_value << 3);
1973 1973 gem_mii_write(dp, MII_RESV, reg14h);
1974 1974 } else {
1975 1975 reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000;
1976 1976 if (rev == SIS630A_900_REV &&
1977 1977 (lp->bridge_revid == SIS630B0 ||
1978 1978 lp->bridge_revid == SIS630B1)) {
1979 1979
1980 1980 reg14h |= 0x0200;
1981 1981 }
1982 1982 gem_mii_write(dp, MII_RESV, reg14h);
1983 1983 }
1984 1984 }
1985 1985
1986 1986 /* ======================================================== */
1987 1987 /*
1988 1988 * OS depend (device driver) routine
1989 1989 */
1990 1990 /* ======================================================== */
1991 1991 static void
1992 1992 sfe_chipinfo_init_sis900(struct gem_dev *dp)
1993 1993 {
1994 1994 int rev;
1995 1995 struct sfe_dev *lp = (struct sfe_dev *)dp->private;
1996 1996
1997 1997 rev = lp->revid;
1998 1998
1999 1999 if (rev == SIS630E_900_REV /* 0x81 */) {
2000 2000 /* sis630E */
2001 2001 lp->get_mac_addr = &sfe_get_mac_addr_sis630e;
2002 2002 } else if (rev > 0x81 && rev <= 0x90) {
2003 2003 /* 630S, 630EA1, 630ET, 635A */
2004 2004 lp->get_mac_addr = &sfe_get_mac_addr_sis635;
2005 2005 } else if (rev == SIS962_900_REV /* 0x91 */) {
2006 2006 /* sis962 or later */
2007 2007 lp->get_mac_addr = &sfe_get_mac_addr_sis962;
2008 2008 } else {
2009 2009 /* sis900 */
2010 2010 lp->get_mac_addr = &sfe_get_mac_addr_sis900;
2011 2011 }
2012 2012
2013 2013 lp->bridge_revid = 0;
2014 2014
2015 2015 if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
2016 2016 rev == SIS630A_900_REV || rev == SIS630ET_900_REV) {
2017 2017 /*
2018 2018 * read host bridge revision
2019 2019 */
2020 2020 dev_info_t *bridge;
2021 2021 ddi_acc_handle_t bridge_handle;
2022 2022
2023 2023 if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) {
2024 2024 cmn_err(CE_WARN,
2025 2025 "%s: cannot find host bridge (pci1039,630)",
2026 2026 dp->name);
2027 2027 return;
2028 2028 }
2029 2029
2030 2030 if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) {
2031 2031 cmn_err(CE_WARN, "%s: pci_config_setup failed",
2032 2032 dp->name);
2033 2033 return;
2034 2034 }
2035 2035
2036 2036 lp->bridge_revid =
2037 2037 pci_config_get8(bridge_handle, PCI_CONF_REVID);
2038 2038 pci_config_teardown(&bridge_handle);
2039 2039 }
2040 2040 }
2041 2041
2042 2042 static int
2043 2043 sfe_attach_chip(struct gem_dev *dp)
2044 2044 {
2045 2045 struct sfe_dev *lp = (struct sfe_dev *)dp->private;
2046 2046
2047 2047 DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__));
2048 2048
2049 2049 /* setup chip-depend get_mac_address function */
2050 2050 if (lp->chip->chip_type == CHIPTYPE_SIS900) {
2051 2051 sfe_chipinfo_init_sis900(dp);
2052 2052 } else {
2053 2053 lp->get_mac_addr = &sfe_get_mac_addr_dp83815;
2054 2054 }
2055 2055
2056 2056 /* read MAC address */
2057 2057 if (!(lp->get_mac_addr)(dp)) {
2058 2058 cmn_err(CE_WARN,
2059 2059 "!%s: %s: failed to get factory mac address"
2060 2060 " please specify a mac address in sfe.conf",
2061 2061 dp->name, __func__);
2062 2062 return (GEM_FAILURE);
2063 2063 }
2064 2064
2065 2065 if (lp->chip->chip_type == CHIPTYPE_DP83815) {
2066 2066 dp->mii_phy_addr = -1; /* no need to scan PHY */
2067 2067 dp->misc_flag |= GEM_VLAN_SOFT;
2068 2068 dp->txthr += 4; /* VTAG_SIZE */
2069 2069 }
2070 2070 dp->txthr = min(dp->txthr, TXFIFOSIZE - 2);
2071 2071
2072 2072 return (GEM_SUCCESS);
2073 2073 }
2074 2074
2075 2075 static int
2076 2076 sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2077 2077 {
2078 2078 int unit;
2079 2079 const char *drv_name;
2080 2080 int i;
2081 2081 ddi_acc_handle_t conf_handle;
2082 2082 uint16_t vid;
2083 2083 uint16_t did;
2084 2084 uint8_t rev;
2085 2085 #ifdef DEBUG_LEVEL
2086 2086 uint32_t iline;
2087 2087 uint8_t latim;
2088 2088 #endif
2089 2089 struct chip_info *p;
2090 2090 struct gem_dev *dp;
2091 2091 struct sfe_dev *lp;
2092 2092 caddr_t base;
2093 2093 ddi_acc_handle_t regs_ha;
2094 2094 struct gem_conf *gcp;
2095 2095
2096 2096 unit = ddi_get_instance(dip);
2097 2097 drv_name = ddi_driver_name(dip);
2098 2098
2099 2099 DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit));
2100 2100
2101 2101 /*
2102 2102 * Common codes after power-up
2103 2103 */
2104 2104 if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) {
2105 2105 cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed",
2106 2106 drv_name, unit);
2107 2107 goto err;
2108 2108 }
2109 2109
2110 2110 vid = pci_config_get16(conf_handle, PCI_CONF_VENID);
2111 2111 did = pci_config_get16(conf_handle, PCI_CONF_DEVID);
2112 2112 rev = pci_config_get16(conf_handle, PCI_CONF_REVID);
2113 2113 #ifdef DEBUG_LEVEL
2114 2114 iline = pci_config_get32(conf_handle, PCI_CONF_ILINE);
2115 2115 latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER);
2116 2116 #endif
2117 2117 #ifdef DEBUG_BUILT_IN_SIS900
2118 2118 rev = SIS630E_900_REV;
2119 2119 #endif
2120 2120 for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) {
2121 2121 if (p->venid == vid && p->devid == did) {
2122 2122 /* found */
2123 2123 goto chip_found;
2124 2124 }
2125 2125 }
2126 2126
2127 2127 /* Not found */
2128 2128 cmn_err(CE_WARN,
2129 2129 "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
2130 2130 drv_name, unit, vid, did);
2131 2131 pci_config_teardown(&conf_handle);
2132 2132 goto err;
2133 2133
2134 2134 chip_found:
2135 2135 pci_config_put16(conf_handle, PCI_CONF_COMM,
2136 2136 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME |
2137 2137 pci_config_get16(conf_handle, PCI_CONF_COMM));
2138 2138
2139 2139 /* ensure D0 mode */
2140 2140 (void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0);
2141 2141
2142 2142 pci_config_teardown(&conf_handle);
2143 2143
2144 2144 switch (cmd) {
2145 2145 case DDI_RESUME:
2146 2146 return (gem_resume(dip));
2147 2147
2148 2148 case DDI_ATTACH:
2149 2149
2150 2150 DPRINTF(0, (CE_CONT,
2151 2151 CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x",
2152 2152 drv_name, unit, iline, latim));
2153 2153
2154 2154 /*
2155 2155 * Map in the device registers.
2156 2156 */
2157 2157 if (gem_pci_regs_map_setup(dip,
2158 2158 (sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815)
2159 2159 ? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK,
2160 2160 &sfe_dev_attr, &base, ®s_ha) != DDI_SUCCESS) {
2161 2161 cmn_err(CE_WARN,
2162 2162 "%s%d: ddi_regs_map_setup failed",
2163 2163 drv_name, unit);
2164 2164 goto err;
2165 2165 }
2166 2166
2167 2167 /*
2168 2168 * construct gem configuration
2169 2169 */
2170 2170 gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP);
2171 2171
2172 2172 /* name */
2173 2173 (void) sprintf(gcp->gc_name, "%s%d", drv_name, unit);
2174 2174
2175 2175 /* consistency on tx and rx */
2176 2176 gcp->gc_tx_buf_align = sizeof (uint8_t) - 1;
2177 2177 gcp->gc_tx_max_frags = MAXTXFRAGS;
2178 2178 gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags;
2179 2179 gcp->gc_tx_desc_unit_shift = 4; /* 16 byte */
2180 2180 gcp->gc_tx_buf_size = TX_BUF_SIZE;
2181 2181 gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size;
2182 2182 gcp->gc_tx_ring_size = TX_RING_SIZE;
2183 2183 gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size;
2184 2184 gcp->gc_tx_auto_pad = B_TRUE;
2185 2185 gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh;
2186 2186 gcp->gc_tx_desc_write_oo = B_TRUE;
2187 2187
2188 2188 gcp->gc_rx_buf_align = sizeof (uint8_t) - 1;
2189 2189 gcp->gc_rx_max_frags = MAXRXFRAGS;
2190 2190 gcp->gc_rx_desc_unit_shift = 4;
2191 2191 gcp->gc_rx_ring_size = RX_RING_SIZE;
2192 2192 gcp->gc_rx_buf_max = RX_BUF_SIZE;
2193 2193 gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh;
2194 2194
2195 2195 /* map attributes */
2196 2196 gcp->gc_dev_attr = sfe_dev_attr;
2197 2197 gcp->gc_buf_attr = sfe_buf_attr;
2198 2198 gcp->gc_desc_attr = sfe_buf_attr;
2199 2199
2200 2200 /* dma attributes */
2201 2201 gcp->gc_dma_attr_desc = sfe_dma_attr_desc;
2202 2202
2203 2203 gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf;
2204 2204 gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1;
2205 2205 gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags;
2206 2206
2207 2207 gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf;
2208 2208 gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1;
2209 2209 gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags;
2210 2210
2211 2211 /* time out parameters */
2212 2212 gcp->gc_tx_timeout = 3*ONESEC;
2213 2213 gcp->gc_tx_timeout_interval = ONESEC;
2214 2214 if (p->chip_type == CHIPTYPE_DP83815) {
2215 2215 /* workaround for tx hang */
2216 2216 gcp->gc_tx_timeout_interval = ONESEC/20; /* 50mS */
2217 2217 }
2218 2218
2219 2219 /* MII timeout parameters */
2220 2220 gcp->gc_mii_link_watch_interval = ONESEC;
2221 2221 gcp->gc_mii_an_watch_interval = ONESEC/5;
2222 2222 gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT; /* 1 sec */
2223 2223 gcp->gc_mii_an_timeout = MII_AN_TIMEOUT; /* 5 sec */
2224 2224 gcp->gc_mii_an_wait = 0;
2225 2225 gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
2226 2226
2227 2227 /* setting for general PHY */
2228 2228 gcp->gc_mii_an_delay = 0;
2229 2229 gcp->gc_mii_linkdown_action = MII_ACTION_RSA;
2230 2230 gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET;
2231 2231 gcp->gc_mii_dont_reset = B_FALSE;
2232 2232
2233 2233
2234 2234 /* I/O methods */
2235 2235
2236 2236 /* mac operation */
2237 2237 gcp->gc_attach_chip = &sfe_attach_chip;
2238 2238 if (p->chip_type == CHIPTYPE_DP83815) {
2239 2239 gcp->gc_reset_chip = &sfe_reset_chip_dp83815;
2240 2240 } else {
2241 2241 gcp->gc_reset_chip = &sfe_reset_chip_sis900;
2242 2242 }
2243 2243 gcp->gc_init_chip = &sfe_init_chip;
2244 2244 gcp->gc_start_chip = &sfe_start_chip;
2245 2245 gcp->gc_stop_chip = &sfe_stop_chip;
2246 2246 #ifdef USE_MULTICAST_HASHTBL
2247 2247 gcp->gc_multicast_hash = &sfe_mcast_hash;
2248 2248 #endif
2249 2249 if (p->chip_type == CHIPTYPE_DP83815) {
2250 2250 gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815;
2251 2251 } else {
2252 2252 gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900;
2253 2253 }
2254 2254 gcp->gc_set_media = &sfe_set_media;
2255 2255 gcp->gc_get_stats = &sfe_get_stats;
2256 2256 gcp->gc_interrupt = &sfe_interrupt;
2257 2257
2258 2258 /* descriptor operation */
2259 2259 gcp->gc_tx_desc_write = &sfe_tx_desc_write;
2260 2260 gcp->gc_tx_start = &sfe_tx_start;
2261 2261 gcp->gc_rx_desc_write = &sfe_rx_desc_write;
2262 2262 gcp->gc_rx_start = NULL;
2263 2263
2264 2264 gcp->gc_tx_desc_stat = &sfe_tx_desc_stat;
2265 2265 gcp->gc_rx_desc_stat = &sfe_rx_desc_stat;
2266 2266 gcp->gc_tx_desc_init = &sfe_tx_desc_init;
2267 2267 gcp->gc_rx_desc_init = &sfe_rx_desc_init;
2268 2268 gcp->gc_tx_desc_clean = &sfe_tx_desc_clean;
2269 2269 gcp->gc_rx_desc_clean = &sfe_rx_desc_clean;
2270 2270
2271 2271 /* mii operations */
2272 2272 if (p->chip_type == CHIPTYPE_DP83815) {
2273 2273 gcp->gc_mii_probe = &sfe_mii_probe_dp83815;
2274 2274 gcp->gc_mii_init = &sfe_mii_init_dp83815;
2275 2275 gcp->gc_mii_config = &sfe_mii_config_dp83815;
2276 2276 gcp->gc_mii_sync = &sfe_mii_sync_dp83815;
2277 2277 gcp->gc_mii_read = &sfe_mii_read_dp83815;
2278 2278 gcp->gc_mii_write = &sfe_mii_write_dp83815;
2279 2279 gcp->gc_mii_tune_phy = NULL;
2280 2280 gcp->gc_flow_control = FLOW_CONTROL_NONE;
2281 2281 } else {
2282 2282 gcp->gc_mii_probe = &gem_mii_probe_default;
2283 2283 gcp->gc_mii_init = NULL;
2284 2284 gcp->gc_mii_config = &sfe_mii_config_sis900;
2285 2285 gcp->gc_mii_sync = &sfe_mii_sync_sis900;
2286 2286 gcp->gc_mii_read = &sfe_mii_read_sis900;
2287 2287 gcp->gc_mii_write = &sfe_mii_write_sis900;
2288 2288 gcp->gc_mii_tune_phy = &sfe_set_eq_sis630;
2289 2289 gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE;
2290 2290 }
2291 2291
2292 2292 lp = kmem_zalloc(sizeof (*lp), KM_SLEEP);
2293 2293 lp->chip = p;
2294 2294 lp->revid = rev;
2295 2295 lp->our_intr_bits = 0;
2296 2296 lp->isr_pended = 0;
2297 2297
2298 2298 cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x",
2299 2299 drv_name, unit, p->chip_name, rev);
2300 2300
2301 2301 dp = gem_do_attach(dip, 0, gcp, base, ®s_ha,
2302 2302 lp, sizeof (*lp));
2303 2303 kmem_free(gcp, sizeof (*gcp));
2304 2304
2305 2305 if (dp == NULL) {
2306 2306 goto err_freelp;
2307 2307 }
2308 2308
2309 2309 return (DDI_SUCCESS);
2310 2310
2311 2311 err_freelp:
2312 2312 kmem_free(lp, sizeof (struct sfe_dev));
2313 2313 err:
2314 2314 return (DDI_FAILURE);
2315 2315 }
2316 2316 return (DDI_FAILURE);
2317 2317 }
2318 2318
2319 2319 static int
2320 2320 sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2321 2321 {
2322 2322 switch (cmd) {
2323 2323 case DDI_SUSPEND:
2324 2324 return (gem_suspend(dip));
2325 2325
2326 2326 case DDI_DETACH:
2327 2327 return (gem_do_detach(dip));
2328 2328 }
2329 2329 return (DDI_FAILURE);
2330 2330 }
2331 2331
2332 2332 /*
2333 2333 * quiesce(9E) entry point.
2334 2334 *
2335 2335 * This function is called when the system is single-threaded at high
2336 2336 * PIL with preemption disabled. Therefore, this function must not be
2337 2337 * blocked.
2338 2338 *
2339 2339 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2340 2340 * DDI_FAILURE indicates an error condition and should almost never happen.
2341 2341 */
2342 2342 #ifdef __sparc
2343 2343 #define sfe_quiesce ddi_quiesce_not_supported
2344 2344 #else
2345 2345 static int
2346 2346 sfe_quiesce(dev_info_t *dip)
2347 2347 {
2348 2348 struct gem_dev *dp;
2349 2349 int ret = 0;
2350 2350
2351 2351 dp = GEM_GET_DEV(dip);
2352 2352
2353 2353 if (dp == NULL)
2354 2354 return (DDI_FAILURE);
2355 2355
2356 2356 ret = sfe_stop_chip_quiesce(dp);
2357 2357
2358 2358 return (ret);
2359 2359 }
2360 2360 #endif
2361 2361
2362 2362 /* ======================================================== */
2363 2363 /*
2364 2364 * OS depend (loadable streams driver) routine
2365 2365 */
2366 2366 /* ======================================================== */
↓ open down ↓ |
2366 lines elided |
↑ open up ↑ |
2367 2367 DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach,
2368 2368 nodev, NULL, D_MP, NULL, sfe_quiesce);
2369 2369
2370 2370 static struct modldrv modldrv = {
2371 2371 &mod_driverops, /* Type of module. This one is a driver */
2372 2372 ident,
2373 2373 &sfe_ops, /* driver ops */
2374 2374 };
2375 2375
2376 2376 static struct modlinkage modlinkage = {
2377 - MODREV_1, &modldrv, NULL
2377 + MODREV_1, { &modldrv, NULL }
2378 2378 };
2379 2379
2380 2380 /* ======================================================== */
2381 2381 /*
2382 2382 * Loadable module support
2383 2383 */
2384 2384 /* ======================================================== */
2385 2385 int
2386 2386 _init(void)
2387 2387 {
2388 2388 int status;
2389 2389
2390 2390 DPRINTF(2, (CE_CONT, CONS "sfe: _init: called"));
2391 2391 gem_mod_init(&sfe_ops, "sfe");
2392 2392 status = mod_install(&modlinkage);
2393 2393 if (status != DDI_SUCCESS) {
2394 2394 gem_mod_fini(&sfe_ops);
2395 2395 }
2396 2396 return (status);
2397 2397 }
2398 2398
2399 2399 /*
2400 2400 * _fini : done
2401 2401 */
2402 2402 int
2403 2403 _fini(void)
2404 2404 {
2405 2405 int status;
2406 2406
2407 2407 DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called"));
2408 2408 status = mod_remove(&modlinkage);
2409 2409 if (status == DDI_SUCCESS) {
2410 2410 gem_mod_fini(&sfe_ops);
2411 2411 }
2412 2412 return (status);
2413 2413 }
2414 2414
2415 2415 int
2416 2416 _info(struct modinfo *modinfop)
2417 2417 {
2418 2418 return (mod_info(&modlinkage, modinfop));
2419 2419 }
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX