Print this page
2976 remove useless offsetof() macros
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/sfe/sfe_util.c
+++ new/usr/src/uts/common/io/sfe/sfe_util.c
1 1 /*
2 2 * sfe_util.c: general ethernet mac driver framework version 2.6
3 3 *
4 4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
5 5 *
6 6 * Redistribution and use in source and binary forms, with or without
7 7 * modification, are permitted provided that the following conditions are met:
8 8 *
9 9 * 1. Redistributions of source code must retain the above copyright notice,
10 10 * this list of conditions and the following disclaimer.
11 11 *
12 12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 13 * this list of conditions and the following disclaimer in the documentation
14 14 * and/or other materials provided with the distribution.
15 15 *
16 16 * 3. Neither the name of the author nor the names of its contributors may be
17 17 * used to endorse or promote products derived from this software without
18 18 * specific prior written permission.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 31 * DAMAGE.
32 32 */
33 33
34 34 /*
35 35 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
36 36 * Use is subject to license terms.
37 37 */
38 38
39 39 /*
40 40 * System Header files.
41 41 */
42 42 #include <sys/types.h>
43 43 #include <sys/conf.h>
44 44 #include <sys/debug.h>
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
45 45 #include <sys/kmem.h>
46 46 #include <sys/vtrace.h>
47 47 #include <sys/ethernet.h>
48 48 #include <sys/modctl.h>
49 49 #include <sys/errno.h>
50 50 #include <sys/ddi.h>
51 51 #include <sys/sunddi.h>
52 52 #include <sys/stream.h> /* required for MBLK* */
53 53 #include <sys/strsun.h> /* required for mionack() */
54 54 #include <sys/byteorder.h>
55 +#include <sys/sysmacros.h>
55 56 #include <sys/pci.h>
56 57 #include <inet/common.h>
57 58 #include <inet/led.h>
58 59 #include <inet/mi.h>
59 60 #include <inet/nd.h>
60 61 #include <sys/crc32.h>
61 62
62 63 #include <sys/note.h>
63 64
64 65 #include "sfe_mii.h"
65 66 #include "sfe_util.h"
66 67
67 68
68 69
69 70 extern char ident[];
70 71
71 72 /* Debugging support */
72 73 #ifdef GEM_DEBUG_LEVEL
73 74 static int gem_debug = GEM_DEBUG_LEVEL;
74 75 #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args
75 76 #else
76 77 #define DPRINTF(n, args)
77 78 #undef ASSERT
78 79 #define ASSERT(x)
79 80 #endif
80 81
81 82 #define IOC_LINESIZE 0x40 /* Is it right for amd64? */
82 83
83 84 /*
84 85 * Useful macros and typedefs
85 86 */
86 87 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
87 88
88 89 #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
89 90 #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
90 91
91 92 #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9])
92 93 #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6])
93 94
94 95
95 96 #ifndef INT32_MAX
96 97 #define INT32_MAX 0x7fffffff
97 98 #endif
98 99
99 100 #define VTAG_OFF (ETHERADDRL*2)
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
100 101 #ifndef VTAG_SIZE
101 102 #define VTAG_SIZE 4
102 103 #endif
103 104 #ifndef VTAG_TPID
104 105 #define VTAG_TPID 0x8100U
105 106 #endif
106 107
107 108 #define GET_TXBUF(dp, sn) \
108 109 &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
109 110
110 -#ifndef offsetof
111 -#define offsetof(t, m) ((long)&(((t *) 0)->m))
112 -#endif
113 111 #define TXFLAG_VTAG(flag) \
114 112 (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
115 113
116 114 #define MAXPKTBUF(dp) \
117 115 ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
118 116
119 117 #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */
120 118 #define BOOLEAN(x) ((x) != 0)
121 119
122 120 /*
123 121 * Macros to distinct chip generation.
124 122 */
125 123
126 124 /*
127 125 * Private functions
128 126 */
129 127 static void gem_mii_start(struct gem_dev *);
130 128 static void gem_mii_stop(struct gem_dev *);
131 129
132 130 /* local buffer management */
133 131 static void gem_nd_setup(struct gem_dev *dp);
134 132 static void gem_nd_cleanup(struct gem_dev *dp);
135 133 static int gem_alloc_memory(struct gem_dev *);
136 134 static void gem_free_memory(struct gem_dev *);
137 135 static void gem_init_rx_ring(struct gem_dev *);
138 136 static void gem_init_tx_ring(struct gem_dev *);
139 137 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
140 138
141 139 static void gem_tx_timeout(struct gem_dev *);
142 140 static void gem_mii_link_watcher(struct gem_dev *dp);
143 141 static int gem_mac_init(struct gem_dev *dp);
144 142 static int gem_mac_start(struct gem_dev *dp);
145 143 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
146 144 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
147 145
148 146 static struct ether_addr gem_etherbroadcastaddr = {
149 147 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
150 148 };
151 149
152 150 int gem_speed_value[] = {10, 100, 1000};
153 151
154 152 /* ============================================================== */
155 153 /*
156 154 * Misc runtime routines
157 155 */
158 156 /* ============================================================== */
159 157 /*
160 158 * Ether CRC calculation according to 21143 data sheet
161 159 */
162 160 uint32_t
163 161 gem_ether_crc_le(const uint8_t *addr, int len)
164 162 {
165 163 uint32_t crc;
166 164
167 165 CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
168 166 return (crc);
169 167 }
170 168
171 169 uint32_t
172 170 gem_ether_crc_be(const uint8_t *addr, int len)
173 171 {
174 172 int idx;
175 173 int bit;
176 174 uint_t data;
177 175 uint32_t crc;
178 176 #define CRC32_POLY_BE 0x04c11db7
179 177
180 178 crc = 0xffffffff;
181 179 for (idx = 0; idx < len; idx++) {
182 180 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
183 181 crc = (crc << 1)
184 182 ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
185 183 }
186 184 }
187 185 return (crc);
188 186 #undef CRC32_POLY_BE
189 187 }
190 188
191 189 int
192 190 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
193 191 {
194 192 char propname[32];
195 193
196 194 (void) sprintf(propname, prop_template, dp->name);
197 195
198 196 return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
199 197 DDI_PROP_DONTPASS, propname, def_val));
200 198 }
201 199
202 200 static int
203 201 gem_population(uint32_t x)
204 202 {
205 203 int i;
206 204 int cnt;
207 205
208 206 cnt = 0;
209 207 for (i = 0; i < 32; i++) {
210 208 if (x & (1 << i)) {
211 209 cnt++;
212 210 }
213 211 }
214 212 return (cnt);
215 213 }
216 214
217 215 #ifdef GEM_DEBUG_LEVEL
218 216 #ifdef GEM_DEBUG_VLAN
219 217 static void
220 218 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
221 219 boolean_t check_cksum)
222 220 {
223 221 char msg[180];
224 222 uint8_t buf[18+20+20];
225 223 uint8_t *p;
226 224 size_t offset;
227 225 uint_t ethertype;
228 226 uint_t proto;
229 227 uint_t ipproto = 0;
230 228 uint_t iplen;
231 229 uint_t iphlen;
232 230 uint_t tcplen;
233 231 uint_t udplen;
234 232 uint_t cksum;
235 233 int rest;
236 234 int len;
237 235 char *bp;
238 236 mblk_t *tp;
239 237 extern uint_t ip_cksum(mblk_t *, int, uint32_t);
240 238
241 239 msg[0] = 0;
242 240 bp = msg;
243 241
244 242 rest = sizeof (buf);
245 243 offset = 0;
246 244 for (tp = mp; tp; tp = tp->b_cont) {
247 245 len = tp->b_wptr - tp->b_rptr;
248 246 len = min(rest, len);
249 247 bcopy(tp->b_rptr, &buf[offset], len);
250 248 rest -= len;
251 249 offset += len;
252 250 if (rest == 0) {
253 251 break;
254 252 }
255 253 }
256 254
257 255 offset = 0;
258 256 p = &buf[offset];
259 257
260 258 /* ethernet address */
261 259 sprintf(bp,
262 260 "ether: %02x:%02x:%02x:%02x:%02x:%02x"
263 261 " -> %02x:%02x:%02x:%02x:%02x:%02x",
264 262 p[6], p[7], p[8], p[9], p[10], p[11],
265 263 p[0], p[1], p[2], p[3], p[4], p[5]);
266 264 bp = &msg[strlen(msg)];
267 265
268 266 /* vlag tag and etherrtype */
269 267 ethertype = GET_ETHERTYPE(p);
270 268 if (ethertype == VTAG_TPID) {
271 269 sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
272 270 bp = &msg[strlen(msg)];
273 271
274 272 offset += VTAG_SIZE;
275 273 p = &buf[offset];
276 274 ethertype = GET_ETHERTYPE(p);
277 275 }
278 276 sprintf(bp, " type:%04x", ethertype);
279 277 bp = &msg[strlen(msg)];
280 278
281 279 /* ethernet packet length */
282 280 sprintf(bp, " mblklen:%d", msgdsize(mp));
283 281 bp = &msg[strlen(msg)];
284 282 if (mp->b_cont) {
285 283 sprintf(bp, "(");
286 284 bp = &msg[strlen(msg)];
287 285 for (tp = mp; tp; tp = tp->b_cont) {
288 286 if (tp == mp) {
289 287 sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
290 288 } else {
291 289 sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
292 290 }
293 291 bp = &msg[strlen(msg)];
294 292 }
295 293 sprintf(bp, ")");
296 294 bp = &msg[strlen(msg)];
297 295 }
298 296
299 297 if (ethertype != ETHERTYPE_IP) {
300 298 goto x;
301 299 }
302 300
303 301 /* ip address */
304 302 offset += sizeof (struct ether_header);
305 303 p = &buf[offset];
306 304 ipproto = p[9];
307 305 iplen = GET_NET16(&p[2]);
308 306 sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
309 307 p[12], p[13], p[14], p[15],
310 308 p[16], p[17], p[18], p[19],
311 309 ipproto, iplen);
312 310 bp = (void *)&msg[strlen(msg)];
313 311
314 312 iphlen = (p[0] & 0xf) * 4;
315 313
316 314 /* cksum for psuedo header */
317 315 cksum = *(uint16_t *)&p[12];
318 316 cksum += *(uint16_t *)&p[14];
319 317 cksum += *(uint16_t *)&p[16];
320 318 cksum += *(uint16_t *)&p[18];
321 319 cksum += BE_16(ipproto);
322 320
323 321 /* tcp or udp protocol header */
324 322 offset += iphlen;
325 323 p = &buf[offset];
326 324 if (ipproto == IPPROTO_TCP) {
327 325 tcplen = iplen - iphlen;
328 326 sprintf(bp, ", tcp: len:%d cksum:%x",
329 327 tcplen, GET_NET16(&p[16]));
330 328 bp = (void *)&msg[strlen(msg)];
331 329
332 330 if (check_cksum) {
333 331 cksum += BE_16(tcplen);
334 332 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
335 333 sprintf(bp, " (%s)",
336 334 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
337 335 bp = (void *)&msg[strlen(msg)];
338 336 }
339 337 } else if (ipproto == IPPROTO_UDP) {
340 338 udplen = GET_NET16(&p[4]);
341 339 sprintf(bp, ", udp: len:%d cksum:%x",
342 340 udplen, GET_NET16(&p[6]));
343 341 bp = (void *)&msg[strlen(msg)];
344 342
345 343 if (GET_NET16(&p[6]) && check_cksum) {
346 344 cksum += *(uint16_t *)&p[4];
347 345 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
348 346 sprintf(bp, " (%s)",
349 347 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
350 348 bp = (void *)&msg[strlen(msg)];
351 349 }
352 350 }
353 351 x:
354 352 cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
355 353 }
356 354 #endif /* GEM_DEBUG_VLAN */
357 355 #endif /* GEM_DEBUG_LEVEL */
358 356
359 357 /* ============================================================== */
360 358 /*
361 359 * IO cache flush
362 360 */
363 361 /* ============================================================== */
364 362 __INLINE__ void
365 363 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
366 364 {
367 365 int n;
368 366 int m;
369 367 int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
370 368
371 369 /* sync active descriptors */
372 370 if (rx_desc_unit_shift < 0 || nslot == 0) {
373 371 /* no rx descriptor ring */
374 372 return;
375 373 }
376 374
377 375 n = dp->gc.gc_rx_ring_size - head;
378 376 if ((m = nslot - n) > 0) {
379 377 (void) ddi_dma_sync(dp->desc_dma_handle,
380 378 (off_t)0,
381 379 (size_t)(m << rx_desc_unit_shift),
382 380 how);
383 381 nslot = n;
384 382 }
385 383
386 384 (void) ddi_dma_sync(dp->desc_dma_handle,
387 385 (off_t)(head << rx_desc_unit_shift),
388 386 (size_t)(nslot << rx_desc_unit_shift),
389 387 how);
390 388 }
391 389
392 390 __INLINE__ void
393 391 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
394 392 {
395 393 int n;
396 394 int m;
397 395 int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
398 396
399 397 /* sync active descriptors */
400 398 if (tx_desc_unit_shift < 0 || nslot == 0) {
401 399 /* no tx descriptor ring */
402 400 return;
403 401 }
404 402
405 403 n = dp->gc.gc_tx_ring_size - head;
406 404 if ((m = nslot - n) > 0) {
407 405 (void) ddi_dma_sync(dp->desc_dma_handle,
408 406 (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
409 407 (size_t)(m << tx_desc_unit_shift),
410 408 how);
411 409 nslot = n;
412 410 }
413 411
414 412 (void) ddi_dma_sync(dp->desc_dma_handle,
415 413 (off_t)((head << tx_desc_unit_shift)
416 414 + (dp->tx_ring_dma - dp->rx_ring_dma)),
417 415 (size_t)(nslot << tx_desc_unit_shift),
418 416 how);
419 417 }
420 418
421 419 static void
422 420 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
423 421 {
424 422 gem_rx_desc_dma_sync(dp,
425 423 SLOT(head, dp->gc.gc_rx_ring_size), nslot,
426 424 DDI_DMA_SYNC_FORDEV);
427 425 }
428 426
429 427 /* ============================================================== */
430 428 /*
431 429 * Buffer management
432 430 */
433 431 /* ============================================================== */
434 432 static void
435 433 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
436 434 {
437 435 cmn_err(level,
438 436 "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
439 437 "tx_softq: %d[%d] %d[%d] (+%d), "
440 438 "tx_free: %d[%d] %d[%d] (+%d), "
441 439 "tx_desc: %d[%d] %d[%d] (+%d), "
442 440 "intr: %d[%d] (+%d), ",
443 441 dp->name, title,
444 442 dp->tx_active_head,
445 443 SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
446 444 dp->tx_active_tail,
447 445 SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
448 446 dp->tx_active_tail - dp->tx_active_head,
449 447 dp->tx_softq_head,
450 448 SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
451 449 dp->tx_softq_tail,
452 450 SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
453 451 dp->tx_softq_tail - dp->tx_softq_head,
454 452 dp->tx_free_head,
455 453 SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
456 454 dp->tx_free_tail,
457 455 SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
458 456 dp->tx_free_tail - dp->tx_free_head,
459 457 dp->tx_desc_head,
460 458 SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
461 459 dp->tx_desc_tail,
462 460 SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
463 461 dp->tx_desc_tail - dp->tx_desc_head,
464 462 dp->tx_desc_intr,
465 463 SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
466 464 dp->tx_desc_intr - dp->tx_desc_head);
467 465 }
468 466
469 467 static void
470 468 gem_free_rxbuf(struct rxbuf *rbp)
471 469 {
472 470 struct gem_dev *dp;
473 471
474 472 dp = rbp->rxb_devp;
475 473 ASSERT(mutex_owned(&dp->intrlock));
476 474 rbp->rxb_next = dp->rx_buf_freelist;
477 475 dp->rx_buf_freelist = rbp;
478 476 dp->rx_buf_freecnt++;
479 477 }
480 478
481 479 /*
482 480 * gem_get_rxbuf: supply a receive buffer which have been mapped into
483 481 * DMA space.
484 482 */
485 483 struct rxbuf *
486 484 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
487 485 {
488 486 struct rxbuf *rbp;
489 487 uint_t count = 0;
490 488 int i;
491 489 int err;
492 490
493 491 ASSERT(mutex_owned(&dp->intrlock));
494 492
495 493 DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
496 494 dp->rx_buf_freecnt));
497 495 /*
498 496 * Get rx buffer management structure
499 497 */
500 498 rbp = dp->rx_buf_freelist;
501 499 if (rbp) {
502 500 /* get one from the recycle list */
503 501 ASSERT(dp->rx_buf_freecnt > 0);
504 502
505 503 dp->rx_buf_freelist = rbp->rxb_next;
506 504 dp->rx_buf_freecnt--;
507 505 rbp->rxb_next = NULL;
508 506 return (rbp);
509 507 }
510 508
511 509 /*
512 510 * Allocate a rx buffer management structure
513 511 */
514 512 rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
515 513 if (rbp == NULL) {
516 514 /* no memory */
517 515 return (NULL);
518 516 }
519 517
520 518 /*
521 519 * Prepare a back pointer to the device structure which will be
522 520 * refered on freeing the buffer later.
523 521 */
524 522 rbp->rxb_devp = dp;
525 523
526 524 /* allocate a dma handle for rx data buffer */
527 525 if ((err = ddi_dma_alloc_handle(dp->dip,
528 526 &dp->gc.gc_dma_attr_rxbuf,
529 527 (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
530 528 NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
531 529
532 530 cmn_err(CE_WARN,
533 531 "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
534 532 dp->name, __func__, err);
535 533
536 534 kmem_free(rbp, sizeof (struct rxbuf));
537 535 return (NULL);
538 536 }
539 537
540 538 /* allocate a bounce buffer for rx */
541 539 if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
542 540 ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
543 541 &dp->gc.gc_buf_attr,
544 542 /*
545 543 * if the nic requires a header at the top of receive buffers,
546 544 * it may access the rx buffer randomly.
547 545 */
548 546 (dp->gc.gc_rx_header_len > 0)
549 547 ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
550 548 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
551 549 NULL,
552 550 &rbp->rxb_buf, &rbp->rxb_buf_len,
553 551 &rbp->rxb_bah)) != DDI_SUCCESS) {
554 552
555 553 cmn_err(CE_WARN,
556 554 "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
557 555 dp->name, __func__, err);
558 556
559 557 ddi_dma_free_handle(&rbp->rxb_dh);
560 558 kmem_free(rbp, sizeof (struct rxbuf));
561 559 return (NULL);
562 560 }
563 561
564 562 /* Mapin the bounce buffer into the DMA space */
565 563 if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
566 564 NULL, rbp->rxb_buf, dp->rx_buf_len,
567 565 ((dp->gc.gc_rx_header_len > 0)
568 566 ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
569 567 :(DDI_DMA_READ | DDI_DMA_STREAMING)),
570 568 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
571 569 NULL,
572 570 rbp->rxb_dmacookie,
573 571 &count)) != DDI_DMA_MAPPED) {
574 572
575 573 ASSERT(err != DDI_DMA_INUSE);
576 574 DPRINTF(0, (CE_WARN,
577 575 "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
578 576 dp->name, __func__, err));
579 577
580 578 /*
581 579 * we failed to allocate a dma resource
582 580 * for the rx bounce buffer.
583 581 */
584 582 ddi_dma_mem_free(&rbp->rxb_bah);
585 583 ddi_dma_free_handle(&rbp->rxb_dh);
586 584 kmem_free(rbp, sizeof (struct rxbuf));
587 585 return (NULL);
588 586 }
589 587
590 588 /* correct the rest of the DMA mapping */
591 589 for (i = 1; i < count; i++) {
592 590 ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
593 591 }
594 592 rbp->rxb_nfrags = count;
595 593
596 594 /* Now we successfully prepared an rx buffer */
597 595 dp->rx_buf_allocated++;
598 596
599 597 return (rbp);
600 598 }
601 599
602 600 /* ============================================================== */
603 601 /*
604 602 * memory resource management
605 603 */
606 604 /* ============================================================== */
607 605 static int
608 606 gem_alloc_memory(struct gem_dev *dp)
609 607 {
610 608 caddr_t ring;
611 609 caddr_t buf;
612 610 size_t req_size;
613 611 size_t ring_len;
614 612 size_t buf_len;
615 613 ddi_dma_cookie_t ring_cookie;
616 614 ddi_dma_cookie_t buf_cookie;
617 615 uint_t count;
618 616 int i;
619 617 int err;
620 618 struct txbuf *tbp;
621 619 int tx_buf_len;
622 620 ddi_dma_attr_t dma_attr_txbounce;
623 621
624 622 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
625 623
626 624 dp->desc_dma_handle = NULL;
627 625 req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
628 626
629 627 if (req_size > 0) {
630 628 /*
631 629 * Alloc RX/TX descriptors and a io area.
632 630 */
633 631 if ((err = ddi_dma_alloc_handle(dp->dip,
634 632 &dp->gc.gc_dma_attr_desc,
635 633 DDI_DMA_SLEEP, NULL,
636 634 &dp->desc_dma_handle)) != DDI_SUCCESS) {
637 635 cmn_err(CE_WARN,
638 636 "!%s: %s: ddi_dma_alloc_handle failed: %d",
639 637 dp->name, __func__, err);
640 638 return (ENOMEM);
641 639 }
642 640
643 641 if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
644 642 req_size, &dp->gc.gc_desc_attr,
645 643 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
646 644 &ring, &ring_len,
647 645 &dp->desc_acc_handle)) != DDI_SUCCESS) {
648 646 cmn_err(CE_WARN,
649 647 "!%s: %s: ddi_dma_mem_alloc failed: "
650 648 "ret %d, request size: %d",
651 649 dp->name, __func__, err, (int)req_size);
652 650 ddi_dma_free_handle(&dp->desc_dma_handle);
653 651 return (ENOMEM);
654 652 }
655 653
656 654 if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
657 655 NULL, ring, ring_len,
658 656 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
659 657 DDI_DMA_SLEEP, NULL,
660 658 &ring_cookie, &count)) != DDI_SUCCESS) {
661 659 ASSERT(err != DDI_DMA_INUSE);
662 660 cmn_err(CE_WARN,
663 661 "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
664 662 dp->name, __func__, err);
665 663 ddi_dma_mem_free(&dp->desc_acc_handle);
666 664 ddi_dma_free_handle(&dp->desc_dma_handle);
667 665 return (ENOMEM);
668 666 }
669 667 ASSERT(count == 1);
670 668
671 669 /* set base of rx descriptor ring */
672 670 dp->rx_ring = ring;
673 671 dp->rx_ring_dma = ring_cookie.dmac_laddress;
674 672
675 673 /* set base of tx descriptor ring */
676 674 dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
677 675 dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
678 676
679 677 /* set base of io area */
680 678 dp->io_area = dp->tx_ring + dp->tx_desc_size;
681 679 dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
682 680 }
683 681
684 682 /*
685 683 * Prepare DMA resources for tx packets
686 684 */
687 685 ASSERT(dp->gc.gc_tx_buf_size > 0);
688 686
689 687 /* Special dma attribute for tx bounce buffers */
690 688 dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
691 689 dma_attr_txbounce.dma_attr_sgllen = 1;
692 690 dma_attr_txbounce.dma_attr_align =
693 691 max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
694 692
695 693 /* Size for tx bounce buffers must be max tx packet size. */
696 694 tx_buf_len = MAXPKTBUF(dp);
697 695 tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
698 696
699 697 ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
700 698
701 699 for (i = 0, tbp = dp->tx_buf;
702 700 i < dp->gc.gc_tx_buf_size; i++, tbp++) {
703 701
704 702 /* setup bounce buffers for tx packets */
705 703 if ((err = ddi_dma_alloc_handle(dp->dip,
706 704 &dma_attr_txbounce,
707 705 DDI_DMA_SLEEP, NULL,
708 706 &tbp->txb_bdh)) != DDI_SUCCESS) {
709 707
710 708 cmn_err(CE_WARN,
711 709 "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
712 710 " err=%d, i=%d",
713 711 dp->name, __func__, err, i);
714 712 goto err_alloc_dh;
715 713 }
716 714
717 715 if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
718 716 tx_buf_len,
719 717 &dp->gc.gc_buf_attr,
720 718 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
721 719 &buf, &buf_len,
722 720 &tbp->txb_bah)) != DDI_SUCCESS) {
723 721 cmn_err(CE_WARN,
724 722 "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
725 723 "ret %d, request size %d",
726 724 dp->name, __func__, err, tx_buf_len);
727 725 ddi_dma_free_handle(&tbp->txb_bdh);
728 726 goto err_alloc_dh;
729 727 }
730 728
731 729 if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
732 730 NULL, buf, buf_len,
733 731 DDI_DMA_WRITE | DDI_DMA_STREAMING,
734 732 DDI_DMA_SLEEP, NULL,
735 733 &buf_cookie, &count)) != DDI_SUCCESS) {
736 734 ASSERT(err != DDI_DMA_INUSE);
737 735 cmn_err(CE_WARN,
738 736 "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
739 737 dp->name, __func__, err);
740 738 ddi_dma_mem_free(&tbp->txb_bah);
741 739 ddi_dma_free_handle(&tbp->txb_bdh);
742 740 goto err_alloc_dh;
743 741 }
744 742 ASSERT(count == 1);
745 743 tbp->txb_buf = buf;
746 744 tbp->txb_buf_dma = buf_cookie.dmac_laddress;
747 745 }
748 746
749 747 return (0);
750 748
751 749 err_alloc_dh:
752 750 if (dp->gc.gc_tx_buf_size > 0) {
753 751 while (i-- > 0) {
754 752 (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
755 753 ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
756 754 ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
757 755 }
758 756 }
759 757
760 758 if (dp->desc_dma_handle) {
761 759 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
762 760 ddi_dma_mem_free(&dp->desc_acc_handle);
763 761 ddi_dma_free_handle(&dp->desc_dma_handle);
764 762 dp->desc_dma_handle = NULL;
765 763 }
766 764
767 765 return (ENOMEM);
768 766 }
769 767
770 768 static void
771 769 gem_free_memory(struct gem_dev *dp)
772 770 {
773 771 int i;
774 772 struct rxbuf *rbp;
775 773 struct txbuf *tbp;
776 774
777 775 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
778 776
779 777 /* Free TX/RX descriptors and tx padding buffer */
780 778 if (dp->desc_dma_handle) {
781 779 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
782 780 ddi_dma_mem_free(&dp->desc_acc_handle);
783 781 ddi_dma_free_handle(&dp->desc_dma_handle);
784 782 dp->desc_dma_handle = NULL;
785 783 }
786 784
787 785 /* Free dma handles for Tx */
788 786 for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
789 787 /* Free bounce buffer associated to each txbuf */
790 788 (void) ddi_dma_unbind_handle(tbp->txb_bdh);
791 789 ddi_dma_mem_free(&tbp->txb_bah);
792 790 ddi_dma_free_handle(&tbp->txb_bdh);
793 791 }
794 792
795 793 /* Free rx buffer */
796 794 while ((rbp = dp->rx_buf_freelist) != NULL) {
797 795
798 796 ASSERT(dp->rx_buf_freecnt > 0);
799 797
800 798 dp->rx_buf_freelist = rbp->rxb_next;
801 799 dp->rx_buf_freecnt--;
802 800
803 801 /* release DMA mapping */
804 802 ASSERT(rbp->rxb_dh != NULL);
805 803
806 804 /* free dma handles for rx bbuf */
807 805 /* it has dma mapping always */
808 806 ASSERT(rbp->rxb_nfrags > 0);
809 807 (void) ddi_dma_unbind_handle(rbp->rxb_dh);
810 808
811 809 /* free the associated bounce buffer and dma handle */
812 810 ASSERT(rbp->rxb_bah != NULL);
813 811 ddi_dma_mem_free(&rbp->rxb_bah);
814 812 /* free the associated dma handle */
815 813 ddi_dma_free_handle(&rbp->rxb_dh);
816 814
817 815 /* free the base memory of rx buffer management */
818 816 kmem_free(rbp, sizeof (struct rxbuf));
819 817 }
820 818 }
821 819
822 820 /* ============================================================== */
823 821 /*
824 822 * Rx/Tx descriptor slot management
825 823 */
826 824 /* ============================================================== */
827 825 /*
828 826 * Initialize an empty rx ring.
829 827 */
830 828 static void
831 829 gem_init_rx_ring(struct gem_dev *dp)
832 830 {
833 831 int i;
834 832 int rx_ring_size = dp->gc.gc_rx_ring_size;
835 833
836 834 DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
837 835 dp->name, __func__,
838 836 rx_ring_size, dp->gc.gc_rx_buf_max));
839 837
840 838 /* make a physical chain of rx descriptors */
841 839 for (i = 0; i < rx_ring_size; i++) {
842 840 (*dp->gc.gc_rx_desc_init)(dp, i);
843 841 }
844 842 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
845 843
846 844 dp->rx_active_head = (seqnum_t)0;
847 845 dp->rx_active_tail = (seqnum_t)0;
848 846
849 847 ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
850 848 ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
851 849 }
852 850
853 851 /*
854 852 * Prepare rx buffers and put them into the rx buffer/descriptor ring.
855 853 */
856 854 static void
857 855 gem_prepare_rx_buf(struct gem_dev *dp)
858 856 {
859 857 int i;
860 858 int nrbuf;
861 859 struct rxbuf *rbp;
862 860
863 861 ASSERT(mutex_owned(&dp->intrlock));
864 862
865 863 /* Now we have no active buffers in rx ring */
866 864
867 865 nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
868 866 for (i = 0; i < nrbuf; i++) {
869 867 if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
870 868 break;
871 869 }
872 870 gem_append_rxbuf(dp, rbp);
873 871 }
874 872
875 873 gem_rx_desc_dma_sync(dp,
876 874 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
877 875 }
878 876
879 877 /*
880 878 * Reclaim active rx buffers in rx buffer ring.
881 879 */
882 880 static void
883 881 gem_clean_rx_buf(struct gem_dev *dp)
884 882 {
885 883 int i;
886 884 struct rxbuf *rbp;
887 885 int rx_ring_size = dp->gc.gc_rx_ring_size;
888 886 #ifdef GEM_DEBUG_LEVEL
889 887 int total;
890 888 #endif
891 889 ASSERT(mutex_owned(&dp->intrlock));
892 890
893 891 DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
894 892 dp->name, __func__, dp->rx_buf_freecnt));
895 893 /*
896 894 * clean up HW descriptors
897 895 */
898 896 for (i = 0; i < rx_ring_size; i++) {
899 897 (*dp->gc.gc_rx_desc_clean)(dp, i);
900 898 }
901 899 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
902 900
903 901 #ifdef GEM_DEBUG_LEVEL
904 902 total = 0;
905 903 #endif
906 904 /*
907 905 * Reclaim allocated rx buffers
908 906 */
909 907 while ((rbp = dp->rx_buf_head) != NULL) {
910 908 #ifdef GEM_DEBUG_LEVEL
911 909 total++;
912 910 #endif
913 911 /* remove the first one from rx buffer list */
914 912 dp->rx_buf_head = rbp->rxb_next;
915 913
916 914 /* recycle the rxbuf */
917 915 gem_free_rxbuf(rbp);
918 916 }
919 917 dp->rx_buf_tail = (struct rxbuf *)NULL;
920 918
921 919 DPRINTF(2, (CE_CONT,
922 920 "!%s: %s: %d buffers freeed, total: %d free",
923 921 dp->name, __func__, total, dp->rx_buf_freecnt));
924 922 }
925 923
926 924 /*
927 925 * Initialize an empty transmit buffer/descriptor ring
928 926 */
929 927 static void
930 928 gem_init_tx_ring(struct gem_dev *dp)
931 929 {
932 930 int i;
933 931 int tx_buf_size = dp->gc.gc_tx_buf_size;
934 932 int tx_ring_size = dp->gc.gc_tx_ring_size;
935 933
936 934 DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
937 935 dp->name, __func__,
938 936 dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
939 937
940 938 ASSERT(!dp->mac_active);
941 939
942 940 /* initialize active list and free list */
943 941 dp->tx_slots_base =
944 942 SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
945 943 dp->tx_softq_tail -= dp->tx_softq_head;
946 944 dp->tx_softq_head = (seqnum_t)0;
947 945
948 946 dp->tx_active_head = dp->tx_softq_head;
949 947 dp->tx_active_tail = dp->tx_softq_head;
950 948
951 949 dp->tx_free_head = dp->tx_softq_tail;
952 950 dp->tx_free_tail = dp->gc.gc_tx_buf_limit;
953 951
954 952 dp->tx_desc_head = (seqnum_t)0;
955 953 dp->tx_desc_tail = (seqnum_t)0;
956 954 dp->tx_desc_intr = (seqnum_t)0;
957 955
958 956 for (i = 0; i < tx_ring_size; i++) {
959 957 (*dp->gc.gc_tx_desc_init)(dp, i);
960 958 }
961 959 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
962 960 }
963 961
964 962 __INLINE__
965 963 static void
966 964 gem_txbuf_free_dma_resources(struct txbuf *tbp)
967 965 {
968 966 if (tbp->txb_mp) {
969 967 freemsg(tbp->txb_mp);
970 968 tbp->txb_mp = NULL;
971 969 }
972 970 tbp->txb_nfrags = 0;
973 971 tbp->txb_flag = 0;
974 972 }
975 973 #pragma inline(gem_txbuf_free_dma_resources)
976 974
977 975 /*
978 976 * reclaim active tx buffers and reset positions in tx rings.
979 977 */
980 978 static void
981 979 gem_clean_tx_buf(struct gem_dev *dp)
982 980 {
983 981 int i;
984 982 seqnum_t head;
985 983 seqnum_t tail;
986 984 seqnum_t sn;
987 985 struct txbuf *tbp;
988 986 int tx_ring_size = dp->gc.gc_tx_ring_size;
989 987 #ifdef GEM_DEBUG_LEVEL
990 988 int err;
991 989 #endif
992 990
993 991 ASSERT(!dp->mac_active);
994 992 ASSERT(dp->tx_busy == 0);
995 993 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
996 994
997 995 /*
998 996 * clean up all HW descriptors
999 997 */
1000 998 for (i = 0; i < tx_ring_size; i++) {
1001 999 (*dp->gc.gc_tx_desc_clean)(dp, i);
1002 1000 }
1003 1001 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1004 1002
1005 1003 /* dequeue all active and loaded buffers */
1006 1004 head = dp->tx_active_head;
1007 1005 tail = dp->tx_softq_tail;
1008 1006
1009 1007 ASSERT(dp->tx_free_head - head >= 0);
1010 1008 tbp = GET_TXBUF(dp, head);
1011 1009 for (sn = head; sn != tail; sn++) {
1012 1010 gem_txbuf_free_dma_resources(tbp);
1013 1011 ASSERT(tbp->txb_mp == NULL);
1014 1012 dp->stats.errxmt++;
1015 1013 tbp = tbp->txb_next;
1016 1014 }
1017 1015
1018 1016 #ifdef GEM_DEBUG_LEVEL
1019 1017 /* ensure no dma resources for tx are not in use now */
1020 1018 err = 0;
1021 1019 while (sn != head + dp->gc.gc_tx_buf_size) {
1022 1020 if (tbp->txb_mp || tbp->txb_nfrags) {
1023 1021 DPRINTF(0, (CE_CONT,
1024 1022 "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1025 1023 dp->name, __func__,
1026 1024 sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1027 1025 tbp->txb_mp, tbp->txb_nfrags));
1028 1026 err = 1;
1029 1027 }
1030 1028 sn++;
1031 1029 tbp = tbp->txb_next;
1032 1030 }
1033 1031
1034 1032 if (err) {
1035 1033 gem_dump_txbuf(dp, CE_WARN,
1036 1034 "gem_clean_tx_buf: tbp->txb_mp != NULL");
1037 1035 }
1038 1036 #endif
1039 1037 /* recycle buffers, now no active tx buffers in the ring */
1040 1038 dp->tx_free_tail += tail - head;
1041 1039 ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1042 1040
1043 1041 /* fix positions in tx buffer rings */
1044 1042 dp->tx_active_head = dp->tx_free_head;
1045 1043 dp->tx_active_tail = dp->tx_free_head;
1046 1044 dp->tx_softq_head = dp->tx_free_head;
1047 1045 dp->tx_softq_tail = dp->tx_free_head;
1048 1046 }
1049 1047
1050 1048 /*
1051 1049 * Reclaim transmitted buffers from tx buffer/descriptor ring.
1052 1050 */
1053 1051 __INLINE__ int
1054 1052 gem_reclaim_txbuf(struct gem_dev *dp)
1055 1053 {
1056 1054 struct txbuf *tbp;
1057 1055 uint_t txstat;
1058 1056 int err = GEM_SUCCESS;
1059 1057 seqnum_t head;
1060 1058 seqnum_t tail;
1061 1059 seqnum_t sn;
1062 1060 seqnum_t desc_head;
1063 1061 int tx_ring_size = dp->gc.gc_tx_ring_size;
1064 1062 uint_t (*tx_desc_stat)(struct gem_dev *dp,
1065 1063 int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1066 1064 clock_t now;
1067 1065
1068 1066 now = ddi_get_lbolt();
1069 1067 if (now == (clock_t)0) {
1070 1068 /* make non-zero timestamp */
1071 1069 now--;
1072 1070 }
1073 1071
1074 1072 mutex_enter(&dp->xmitlock);
1075 1073
1076 1074 head = dp->tx_active_head;
1077 1075 tail = dp->tx_active_tail;
1078 1076
1079 1077 #if GEM_DEBUG_LEVEL > 2
1080 1078 if (head != tail) {
1081 1079 cmn_err(CE_CONT, "!%s: %s: "
1082 1080 "testing active_head:%d[%d], active_tail:%d[%d]",
1083 1081 dp->name, __func__,
1084 1082 head, SLOT(head, dp->gc.gc_tx_buf_size),
1085 1083 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1086 1084 }
1087 1085 #endif
1088 1086 #ifdef DEBUG
1089 1087 if (dp->tx_reclaim_busy == 0) {
1090 1088 /* check tx buffer management consistency */
1091 1089 ASSERT(dp->tx_free_tail - dp->tx_active_head
1092 1090 == dp->gc.gc_tx_buf_limit);
1093 1091 /* EMPTY */
1094 1092 }
1095 1093 #endif
1096 1094 dp->tx_reclaim_busy++;
1097 1095
1098 1096 /* sync all active HW descriptors */
1099 1097 gem_tx_desc_dma_sync(dp,
1100 1098 SLOT(dp->tx_desc_head, tx_ring_size),
1101 1099 dp->tx_desc_tail - dp->tx_desc_head,
1102 1100 DDI_DMA_SYNC_FORKERNEL);
1103 1101
1104 1102 tbp = GET_TXBUF(dp, head);
1105 1103 desc_head = dp->tx_desc_head;
1106 1104 for (sn = head; sn != tail;
1107 1105 dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1108 1106 int ndescs;
1109 1107
1110 1108 ASSERT(tbp->txb_desc == desc_head);
1111 1109
1112 1110 ndescs = tbp->txb_ndescs;
1113 1111 if (ndescs == 0) {
1114 1112 /* skip errored descriptors */
1115 1113 continue;
1116 1114 }
1117 1115 txstat = (*tx_desc_stat)(dp,
1118 1116 SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1119 1117
1120 1118 if (txstat == 0) {
1121 1119 /* not transmitted yet */
1122 1120 break;
1123 1121 }
1124 1122
1125 1123 if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1126 1124 dp->tx_blocked = now;
1127 1125 }
1128 1126
1129 1127 ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1130 1128
1131 1129 if (txstat & GEM_TX_ERR) {
1132 1130 err = GEM_FAILURE;
1133 1131 cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1134 1132 dp->name, sn, SLOT(sn, tx_ring_size));
1135 1133 }
1136 1134 #if GEM_DEBUG_LEVEL > 4
1137 1135 if (now - tbp->txb_stime >= 50) {
1138 1136 cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1139 1137 dp->name, (now - tbp->txb_stime)*10);
1140 1138 }
1141 1139 #endif
1142 1140 /* free transmitted descriptors */
1143 1141 desc_head += ndescs;
1144 1142 }
1145 1143
1146 1144 if (dp->tx_desc_head != desc_head) {
1147 1145 /* we have reclaimed one or more tx buffers */
1148 1146 dp->tx_desc_head = desc_head;
1149 1147
1150 1148 /* If we passed the next interrupt position, update it */
1151 1149 if (desc_head - dp->tx_desc_intr > 0) {
1152 1150 dp->tx_desc_intr = desc_head;
1153 1151 }
1154 1152 }
1155 1153 mutex_exit(&dp->xmitlock);
1156 1154
1157 1155 /* free dma mapping resources associated with transmitted tx buffers */
1158 1156 tbp = GET_TXBUF(dp, head);
1159 1157 tail = sn;
1160 1158 #if GEM_DEBUG_LEVEL > 2
1161 1159 if (head != tail) {
1162 1160 cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1163 1161 __func__,
1164 1162 head, SLOT(head, dp->gc.gc_tx_buf_size),
1165 1163 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1166 1164 }
1167 1165 #endif
1168 1166 for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1169 1167 gem_txbuf_free_dma_resources(tbp);
1170 1168 }
1171 1169
1172 1170 /* recycle the tx buffers */
1173 1171 mutex_enter(&dp->xmitlock);
1174 1172 if (--dp->tx_reclaim_busy == 0) {
1175 1173 /* we are the last thread who can update free tail */
1176 1174 #if GEM_DEBUG_LEVEL > 4
1177 1175 /* check all resouces have been deallocated */
1178 1176 sn = dp->tx_free_tail;
1179 1177 tbp = GET_TXBUF(dp, new_tail);
1180 1178 while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1181 1179 if (tbp->txb_nfrags) {
1182 1180 /* in use */
1183 1181 break;
1184 1182 }
1185 1183 ASSERT(tbp->txb_mp == NULL);
1186 1184 tbp = tbp->txb_next;
1187 1185 sn++;
1188 1186 }
1189 1187 ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1190 1188 #endif
1191 1189 dp->tx_free_tail =
1192 1190 dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1193 1191 }
1194 1192 if (!dp->mac_active) {
1195 1193 /* someone may be waiting for me. */
1196 1194 cv_broadcast(&dp->tx_drain_cv);
1197 1195 }
1198 1196 #if GEM_DEBUG_LEVEL > 2
1199 1197 cmn_err(CE_CONT, "!%s: %s: called, "
1200 1198 "free_head:%d free_tail:%d(+%d) added:%d",
1201 1199 dp->name, __func__,
1202 1200 dp->tx_free_head, dp->tx_free_tail,
1203 1201 dp->tx_free_tail - dp->tx_free_head, tail - head);
1204 1202 #endif
1205 1203 mutex_exit(&dp->xmitlock);
1206 1204
1207 1205 return (err);
1208 1206 }
1209 1207 #pragma inline(gem_reclaim_txbuf)
1210 1208
1211 1209
1212 1210 /*
1213 1211 * Make tx descriptors in out-of-order manner
1214 1212 */
1215 1213 static void
1216 1214 gem_tx_load_descs_oo(struct gem_dev *dp,
1217 1215 seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1218 1216 {
1219 1217 seqnum_t sn;
1220 1218 struct txbuf *tbp;
1221 1219 int tx_ring_size = dp->gc.gc_tx_ring_size;
1222 1220 int (*tx_desc_write)
1223 1221 (struct gem_dev *dp, int slot,
1224 1222 ddi_dma_cookie_t *dmacookie,
1225 1223 int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1226 1224 clock_t now = ddi_get_lbolt();
1227 1225
1228 1226 sn = start_slot;
1229 1227 tbp = GET_TXBUF(dp, sn);
1230 1228 do {
1231 1229 #if GEM_DEBUG_LEVEL > 1
1232 1230 if (dp->tx_cnt < 100) {
1233 1231 dp->tx_cnt++;
1234 1232 flags |= GEM_TXFLAG_INTR;
1235 1233 }
1236 1234 #endif
1237 1235 /* write a tx descriptor */
1238 1236 tbp->txb_desc = sn;
1239 1237 tbp->txb_ndescs = (*tx_desc_write)(dp,
1240 1238 SLOT(sn, tx_ring_size),
1241 1239 tbp->txb_dmacookie,
1242 1240 tbp->txb_nfrags, flags | tbp->txb_flag);
1243 1241 tbp->txb_stime = now;
1244 1242 ASSERT(tbp->txb_ndescs == 1);
1245 1243
1246 1244 flags = 0;
1247 1245 sn++;
1248 1246 tbp = tbp->txb_next;
1249 1247 } while (sn != end_slot);
1250 1248 }
1251 1249
1252 1250 __INLINE__
1253 1251 static size_t
1254 1252 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1255 1253 {
1256 1254 size_t min_pkt;
1257 1255 caddr_t bp;
1258 1256 size_t off;
1259 1257 mblk_t *tp;
1260 1258 size_t len;
1261 1259 uint64_t flag;
1262 1260
1263 1261 ASSERT(tbp->txb_mp == NULL);
1264 1262
1265 1263 /* we use bounce buffer for the packet */
1266 1264 min_pkt = ETHERMIN;
1267 1265 bp = tbp->txb_buf;
1268 1266 off = 0;
1269 1267 tp = mp;
1270 1268
1271 1269 flag = tbp->txb_flag;
1272 1270 if (flag & GEM_TXFLAG_SWVTAG) {
1273 1271 /* need to increase min packet size */
1274 1272 min_pkt += VTAG_SIZE;
1275 1273 ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1276 1274 }
1277 1275
1278 1276 /* copy the rest */
1279 1277 for (; tp; tp = tp->b_cont) {
1280 1278 if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1281 1279 bcopy(tp->b_rptr, &bp[off], len);
1282 1280 off += len;
1283 1281 }
1284 1282 }
1285 1283
1286 1284 if (off < min_pkt &&
1287 1285 (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1288 1286 /*
1289 1287 * Extend the packet to minimum packet size explicitly.
1290 1288 * For software vlan packets, we shouldn't use tx autopad
1291 1289 * function because nics may not be aware of vlan.
1292 1290 * we must keep 46 octet of payload even if we use vlan.
1293 1291 */
1294 1292 bzero(&bp[off], min_pkt - off);
1295 1293 off = min_pkt;
1296 1294 }
1297 1295
1298 1296 (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1299 1297
1300 1298 tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1301 1299 tbp->txb_dmacookie[0].dmac_size = off;
1302 1300
1303 1301 DPRINTF(2, (CE_CONT,
1304 1302 "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1305 1303 dp->name, __func__,
1306 1304 tbp->txb_dmacookie[0].dmac_laddress,
1307 1305 tbp->txb_dmacookie[0].dmac_size,
1308 1306 (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1309 1307 min_pkt));
1310 1308
1311 1309 /* save misc info */
1312 1310 tbp->txb_mp = mp;
1313 1311 tbp->txb_nfrags = 1;
1314 1312 #ifdef DEBUG_MULTIFRAGS
1315 1313 if (dp->gc.gc_tx_max_frags >= 3 &&
1316 1314 tbp->txb_dmacookie[0].dmac_size > 16*3) {
1317 1315 tbp->txb_dmacookie[1].dmac_laddress =
1318 1316 tbp->txb_dmacookie[0].dmac_laddress + 16;
1319 1317 tbp->txb_dmacookie[2].dmac_laddress =
1320 1318 tbp->txb_dmacookie[1].dmac_laddress + 16;
1321 1319
1322 1320 tbp->txb_dmacookie[2].dmac_size =
1323 1321 tbp->txb_dmacookie[0].dmac_size - 16*2;
1324 1322 tbp->txb_dmacookie[1].dmac_size = 16;
1325 1323 tbp->txb_dmacookie[0].dmac_size = 16;
1326 1324 tbp->txb_nfrags = 3;
1327 1325 }
1328 1326 #endif
1329 1327 return (off);
1330 1328 }
1331 1329 #pragma inline(gem_setup_txbuf_copy)
1332 1330
1333 1331 __INLINE__
1334 1332 static void
1335 1333 gem_tx_start_unit(struct gem_dev *dp)
1336 1334 {
1337 1335 seqnum_t head;
1338 1336 seqnum_t tail;
1339 1337 struct txbuf *tbp_head;
1340 1338 struct txbuf *tbp_tail;
1341 1339
1342 1340 /* update HW descriptors from soft queue */
1343 1341 ASSERT(mutex_owned(&dp->xmitlock));
1344 1342 ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1345 1343
1346 1344 head = dp->tx_softq_head;
1347 1345 tail = dp->tx_softq_tail;
1348 1346
1349 1347 DPRINTF(1, (CE_CONT,
1350 1348 "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1351 1349 dp->name, __func__, head, tail, tail - head,
1352 1350 dp->tx_desc_head, dp->tx_desc_tail,
1353 1351 dp->tx_desc_tail - dp->tx_desc_head));
1354 1352
1355 1353 ASSERT(tail - head > 0);
1356 1354
1357 1355 dp->tx_desc_tail = tail;
1358 1356
1359 1357 tbp_head = GET_TXBUF(dp, head);
1360 1358 tbp_tail = GET_TXBUF(dp, tail - 1);
1361 1359
1362 1360 ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1363 1361
1364 1362 dp->gc.gc_tx_start(dp,
1365 1363 SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1366 1364 tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1367 1365
1368 1366 /* advance softq head and active tail */
1369 1367 dp->tx_softq_head = dp->tx_active_tail = tail;
1370 1368 }
1371 1369 #pragma inline(gem_tx_start_unit)
1372 1370
1373 1371 #ifdef GEM_DEBUG_LEVEL
1374 1372 static int gem_send_cnt[10];
1375 1373 #endif
1376 1374 #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE)
1377 1375 #define EHLEN (sizeof (struct ether_header))
1378 1376 /*
1379 1377 * check ether packet type and ip protocol
1380 1378 */
1381 1379 static uint64_t
1382 1380 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1383 1381 {
1384 1382 mblk_t *tp;
1385 1383 ssize_t len;
1386 1384 uint_t vtag;
1387 1385 int off;
1388 1386 uint64_t flag;
1389 1387
1390 1388 flag = 0ULL;
1391 1389
1392 1390 /*
1393 1391 * prepare continuous header of the packet for protocol analysis
1394 1392 */
1395 1393 if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1396 1394 /* we use work buffer to copy mblk */
1397 1395 for (tp = mp, off = 0;
1398 1396 tp && (off < PKT_MIN_SIZE);
1399 1397 tp = tp->b_cont, off += len) {
1400 1398 len = (long)tp->b_wptr - (long)tp->b_rptr;
1401 1399 len = min(len, PKT_MIN_SIZE - off);
1402 1400 bcopy(tp->b_rptr, &bp[off], len);
1403 1401 }
1404 1402 } else {
1405 1403 /* we can use mblk without copy */
1406 1404 bp = mp->b_rptr;
1407 1405 }
1408 1406
1409 1407 /* process vlan tag for GLD v3 */
1410 1408 if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1411 1409 if (dp->misc_flag & GEM_VLAN_HARD) {
1412 1410 vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1413 1411 ASSERT(vtag);
1414 1412 flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1415 1413 } else {
1416 1414 flag |= GEM_TXFLAG_SWVTAG;
1417 1415 }
1418 1416 }
1419 1417 return (flag);
1420 1418 }
1421 1419 #undef EHLEN
1422 1420 #undef PKT_MIN_SIZE
1423 1421 /*
1424 1422 * gem_send_common is an exported function because hw depend routines may
1425 1423 * use it for sending control frames like setup frames for 2114x chipset.
1426 1424 */
1427 1425 mblk_t *
1428 1426 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1429 1427 {
1430 1428 int nmblk;
1431 1429 int avail;
1432 1430 mblk_t *tp;
1433 1431 mblk_t *mp;
1434 1432 int i;
1435 1433 struct txbuf *tbp;
1436 1434 seqnum_t head;
1437 1435 uint64_t load_flags;
1438 1436 uint64_t len_total = 0;
1439 1437 uint32_t bcast = 0;
1440 1438 uint32_t mcast = 0;
1441 1439
1442 1440 ASSERT(mp_head != NULL);
1443 1441
1444 1442 mp = mp_head;
1445 1443 nmblk = 1;
1446 1444 while ((mp = mp->b_next) != NULL) {
1447 1445 nmblk++;
1448 1446 }
1449 1447 #ifdef GEM_DEBUG_LEVEL
1450 1448 gem_send_cnt[0]++;
1451 1449 gem_send_cnt[min(nmblk, 9)]++;
1452 1450 #endif
1453 1451 /*
1454 1452 * Aquire resources
1455 1453 */
1456 1454 mutex_enter(&dp->xmitlock);
1457 1455 if (dp->mac_suspended) {
1458 1456 mutex_exit(&dp->xmitlock);
1459 1457 mp = mp_head;
1460 1458 while (mp) {
1461 1459 tp = mp->b_next;
1462 1460 freemsg(mp);
1463 1461 mp = tp;
1464 1462 }
1465 1463 return (NULL);
1466 1464 }
1467 1465
1468 1466 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1469 1467 /* don't send data packets while mac isn't active */
1470 1468 /* XXX - should we discard packets? */
1471 1469 mutex_exit(&dp->xmitlock);
1472 1470 return (mp_head);
1473 1471 }
1474 1472
1475 1473 /* allocate free slots */
1476 1474 head = dp->tx_free_head;
1477 1475 avail = dp->tx_free_tail - head;
1478 1476
1479 1477 DPRINTF(2, (CE_CONT,
1480 1478 "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1481 1479 dp->name, __func__,
1482 1480 dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1483 1481
1484 1482 avail = min(avail, dp->tx_max_packets);
1485 1483
1486 1484 if (nmblk > avail) {
1487 1485 if (avail == 0) {
1488 1486 /* no resources; short cut */
1489 1487 DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1490 1488 dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1491 1489 goto done;
1492 1490 }
1493 1491 nmblk = avail;
1494 1492 }
1495 1493
1496 1494 dp->tx_free_head = head + nmblk;
1497 1495 load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1498 1496
1499 1497 /* update last interrupt position if tx buffers exhaust. */
1500 1498 if (nmblk == avail) {
1501 1499 tbp = GET_TXBUF(dp, head + avail - 1);
1502 1500 tbp->txb_flag = GEM_TXFLAG_INTR;
1503 1501 dp->tx_desc_intr = head + avail;
1504 1502 }
1505 1503 mutex_exit(&dp->xmitlock);
1506 1504
1507 1505 tbp = GET_TXBUF(dp, head);
1508 1506
1509 1507 for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1510 1508 uint8_t *bp;
1511 1509 uint64_t txflag;
1512 1510
1513 1511 /* remove one from the mblk list */
1514 1512 ASSERT(mp_head != NULL);
1515 1513 mp = mp_head;
1516 1514 mp_head = mp_head->b_next;
1517 1515 mp->b_next = NULL;
1518 1516
1519 1517 /* statistics for non-unicast packets */
1520 1518 bp = mp->b_rptr;
1521 1519 if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1522 1520 if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1523 1521 ETHERADDRL) == 0) {
1524 1522 bcast++;
1525 1523 } else {
1526 1524 mcast++;
1527 1525 }
1528 1526 }
1529 1527
1530 1528 /* save misc info */
1531 1529 txflag = tbp->txb_flag;
1532 1530 txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1533 1531 txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1534 1532 tbp->txb_flag = txflag;
1535 1533
1536 1534 len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1537 1535 }
1538 1536
1539 1537 (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1540 1538
1541 1539 /* Append the tbp at the tail of the active tx buffer list */
1542 1540 mutex_enter(&dp->xmitlock);
1543 1541
1544 1542 if ((--dp->tx_busy) == 0) {
1545 1543 /* extend the tail of softq, as new packets have been ready. */
1546 1544 dp->tx_softq_tail = dp->tx_free_head;
1547 1545
1548 1546 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1549 1547 /*
1550 1548 * The device status has changed while we are
1551 1549 * preparing tx buf.
1552 1550 * As we are the last one that make tx non-busy.
1553 1551 * wake up someone who may wait for us.
1554 1552 */
1555 1553 cv_broadcast(&dp->tx_drain_cv);
1556 1554 } else {
1557 1555 ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1558 1556 gem_tx_start_unit(dp);
1559 1557 }
1560 1558 }
1561 1559 dp->stats.obytes += len_total;
1562 1560 dp->stats.opackets += nmblk;
1563 1561 dp->stats.obcast += bcast;
1564 1562 dp->stats.omcast += mcast;
1565 1563 done:
1566 1564 mutex_exit(&dp->xmitlock);
1567 1565
1568 1566 return (mp_head);
1569 1567 }
1570 1568
1571 1569 /* ========================================================== */
1572 1570 /*
1573 1571 * error detection and restart routines
1574 1572 */
1575 1573 /* ========================================================== */
1576 1574 int
1577 1575 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1578 1576 {
1579 1577 ASSERT(mutex_owned(&dp->intrlock));
1580 1578
1581 1579 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1582 1580 #ifdef GEM_DEBUG_LEVEL
1583 1581 #if GEM_DEBUG_LEVEL > 1
1584 1582 gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1585 1583 #endif
1586 1584 #endif
1587 1585
1588 1586 if (dp->mac_suspended) {
1589 1587 /* should we return GEM_FAILURE ? */
1590 1588 return (GEM_FAILURE);
1591 1589 }
1592 1590
1593 1591 /*
1594 1592 * We should avoid calling any routines except xxx_chip_reset
1595 1593 * when we are resuming the system.
1596 1594 */
1597 1595 if (dp->mac_active) {
1598 1596 if (flags & GEM_RESTART_KEEP_BUF) {
1599 1597 /* stop rx gracefully */
1600 1598 dp->rxmode &= ~RXMODE_ENABLE;
1601 1599 (void) (*dp->gc.gc_set_rx_filter)(dp);
1602 1600 }
1603 1601 (void) gem_mac_stop(dp, flags);
1604 1602 }
1605 1603
1606 1604 /* reset the chip. */
1607 1605 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1608 1606 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1609 1607 dp->name, __func__);
1610 1608 goto err;
1611 1609 }
1612 1610
1613 1611 if (gem_mac_init(dp) != GEM_SUCCESS) {
1614 1612 goto err;
1615 1613 }
1616 1614
1617 1615 /* setup media mode if the link have been up */
1618 1616 if (dp->mii_state == MII_STATE_LINKUP) {
1619 1617 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1620 1618 goto err;
1621 1619 }
1622 1620 }
1623 1621
1624 1622 /* setup mac address and enable rx filter */
1625 1623 dp->rxmode |= RXMODE_ENABLE;
1626 1624 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1627 1625 goto err;
1628 1626 }
1629 1627
1630 1628 /*
1631 1629 * XXX - a panic happened because of linkdown.
1632 1630 * We must check mii_state here, because the link can be down just
1633 1631 * before the restart event happen. If the link is down now,
1634 1632 * gem_mac_start() will be called from gem_mii_link_check() when
1635 1633 * the link become up later.
1636 1634 */
1637 1635 if (dp->mii_state == MII_STATE_LINKUP) {
1638 1636 /* restart the nic */
1639 1637 ASSERT(!dp->mac_active);
1640 1638 (void) gem_mac_start(dp);
1641 1639 }
1642 1640 return (GEM_SUCCESS);
1643 1641 err:
1644 1642 return (GEM_FAILURE);
1645 1643 }
1646 1644
1647 1645
1648 1646 static void
1649 1647 gem_tx_timeout(struct gem_dev *dp)
1650 1648 {
1651 1649 clock_t now;
1652 1650 boolean_t tx_sched;
1653 1651 struct txbuf *tbp;
1654 1652
1655 1653 mutex_enter(&dp->intrlock);
1656 1654
1657 1655 tx_sched = B_FALSE;
1658 1656 now = ddi_get_lbolt();
1659 1657
1660 1658 mutex_enter(&dp->xmitlock);
1661 1659 if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1662 1660 mutex_exit(&dp->xmitlock);
1663 1661 goto schedule_next;
1664 1662 }
1665 1663 mutex_exit(&dp->xmitlock);
1666 1664
1667 1665 /* reclaim transmitted buffers to check the trasmitter hangs or not. */
1668 1666 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1669 1667 /* tx error happened, reset transmitter in the chip */
1670 1668 (void) gem_restart_nic(dp, 0);
1671 1669 tx_sched = B_TRUE;
1672 1670 dp->tx_blocked = (clock_t)0;
1673 1671
1674 1672 goto schedule_next;
1675 1673 }
1676 1674
1677 1675 mutex_enter(&dp->xmitlock);
1678 1676 /* check if the transmitter thread is stuck */
1679 1677 if (dp->tx_active_head == dp->tx_active_tail) {
1680 1678 /* no tx buffer is loaded to the nic */
1681 1679 if (dp->tx_blocked &&
1682 1680 now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1683 1681 gem_dump_txbuf(dp, CE_WARN,
1684 1682 "gem_tx_timeout: tx blocked");
1685 1683 tx_sched = B_TRUE;
1686 1684 dp->tx_blocked = (clock_t)0;
1687 1685 }
1688 1686 mutex_exit(&dp->xmitlock);
1689 1687 goto schedule_next;
1690 1688 }
1691 1689
1692 1690 tbp = GET_TXBUF(dp, dp->tx_active_head);
1693 1691 if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1694 1692 mutex_exit(&dp->xmitlock);
1695 1693 goto schedule_next;
1696 1694 }
1697 1695 mutex_exit(&dp->xmitlock);
1698 1696
1699 1697 gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1700 1698
1701 1699 /* discard untransmitted packet and restart tx. */
1702 1700 (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1703 1701 tx_sched = B_TRUE;
1704 1702 dp->tx_blocked = (clock_t)0;
1705 1703
1706 1704 schedule_next:
1707 1705 mutex_exit(&dp->intrlock);
1708 1706
1709 1707 /* restart the downstream if needed */
1710 1708 if (tx_sched) {
1711 1709 mac_tx_update(dp->mh);
1712 1710 }
1713 1711
1714 1712 DPRINTF(4, (CE_CONT,
1715 1713 "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1716 1714 dp->name, BOOLEAN(dp->tx_blocked),
1717 1715 dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1718 1716 dp->timeout_id =
1719 1717 timeout((void (*)(void *))gem_tx_timeout,
1720 1718 (void *)dp, dp->gc.gc_tx_timeout_interval);
1721 1719 }
1722 1720
1723 1721 /* ================================================================== */
1724 1722 /*
1725 1723 * Interrupt handler
1726 1724 */
1727 1725 /* ================================================================== */
1728 1726 __INLINE__
1729 1727 static void
1730 1728 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1731 1729 {
1732 1730 struct rxbuf *rbp;
1733 1731 seqnum_t tail;
1734 1732 int rx_ring_size = dp->gc.gc_rx_ring_size;
1735 1733
1736 1734 ASSERT(rbp_head != NULL);
1737 1735 ASSERT(mutex_owned(&dp->intrlock));
1738 1736
1739 1737 DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1740 1738 dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1741 1739
1742 1740 /*
1743 1741 * Add new buffers into active rx buffer list
1744 1742 */
1745 1743 if (dp->rx_buf_head == NULL) {
1746 1744 dp->rx_buf_head = rbp_head;
1747 1745 ASSERT(dp->rx_buf_tail == NULL);
1748 1746 } else {
1749 1747 dp->rx_buf_tail->rxb_next = rbp_head;
1750 1748 }
1751 1749
1752 1750 tail = dp->rx_active_tail;
1753 1751 for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1754 1752 /* need to notify the tail for the lower layer */
1755 1753 dp->rx_buf_tail = rbp;
1756 1754
1757 1755 dp->gc.gc_rx_desc_write(dp,
1758 1756 SLOT(tail, rx_ring_size),
1759 1757 rbp->rxb_dmacookie,
1760 1758 rbp->rxb_nfrags);
1761 1759
1762 1760 dp->rx_active_tail = tail = tail + 1;
1763 1761 }
1764 1762 }
1765 1763 #pragma inline(gem_append_rxbuf)
1766 1764
1767 1765 mblk_t *
1768 1766 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1769 1767 {
1770 1768 int rx_header_len = dp->gc.gc_rx_header_len;
1771 1769 uint8_t *bp;
1772 1770 mblk_t *mp;
1773 1771
1774 1772 /* allocate a new mblk */
1775 1773 if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1776 1774 ASSERT(mp->b_next == NULL);
1777 1775 ASSERT(mp->b_cont == NULL);
1778 1776
1779 1777 mp->b_rptr += VTAG_SIZE;
1780 1778 bp = mp->b_rptr;
1781 1779 mp->b_wptr = bp + len;
1782 1780
1783 1781 /*
1784 1782 * flush the range of the entire buffer to invalidate
1785 1783 * all of corresponding dirty entries in iocache.
1786 1784 */
1787 1785 (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1788 1786 0, DDI_DMA_SYNC_FORKERNEL);
1789 1787
1790 1788 bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1791 1789 }
1792 1790 return (mp);
1793 1791 }
1794 1792
1795 1793 #ifdef GEM_DEBUG_LEVEL
1796 1794 uint_t gem_rx_pkts[17];
1797 1795 #endif
1798 1796
1799 1797
1800 1798 int
1801 1799 gem_receive(struct gem_dev *dp)
1802 1800 {
1803 1801 uint64_t len_total = 0;
1804 1802 struct rxbuf *rbp;
1805 1803 mblk_t *mp;
1806 1804 int cnt = 0;
1807 1805 uint64_t rxstat;
1808 1806 struct rxbuf *newbufs;
1809 1807 struct rxbuf **newbufs_tailp;
1810 1808 mblk_t *rx_head;
1811 1809 mblk_t **rx_tailp;
1812 1810 int rx_ring_size = dp->gc.gc_rx_ring_size;
1813 1811 seqnum_t active_head;
1814 1812 uint64_t (*rx_desc_stat)(struct gem_dev *dp,
1815 1813 int slot, int ndesc);
1816 1814 int ethermin = ETHERMIN;
1817 1815 int ethermax = dp->mtu + sizeof (struct ether_header);
1818 1816 int rx_header_len = dp->gc.gc_rx_header_len;
1819 1817
1820 1818 ASSERT(mutex_owned(&dp->intrlock));
1821 1819
1822 1820 DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1823 1821 dp->name, dp->rx_buf_head));
1824 1822
1825 1823 rx_desc_stat = dp->gc.gc_rx_desc_stat;
1826 1824 newbufs_tailp = &newbufs;
1827 1825 rx_tailp = &rx_head;
1828 1826 for (active_head = dp->rx_active_head;
1829 1827 (rbp = dp->rx_buf_head) != NULL; active_head++) {
1830 1828 int len;
1831 1829 if (cnt == 0) {
1832 1830 cnt = max(dp->poll_pkt_delay*2, 10);
1833 1831 cnt = min(cnt,
1834 1832 dp->rx_active_tail - active_head);
1835 1833 gem_rx_desc_dma_sync(dp,
1836 1834 SLOT(active_head, rx_ring_size),
1837 1835 cnt,
1838 1836 DDI_DMA_SYNC_FORKERNEL);
1839 1837 }
1840 1838
1841 1839 if (rx_header_len > 0) {
1842 1840 (void) ddi_dma_sync(rbp->rxb_dh, 0,
1843 1841 rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1844 1842 }
1845 1843
1846 1844 if (((rxstat = (*rx_desc_stat)(dp,
1847 1845 SLOT(active_head, rx_ring_size),
1848 1846 rbp->rxb_nfrags))
1849 1847 & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1850 1848 /* not received yet */
1851 1849 break;
1852 1850 }
1853 1851
1854 1852 /* Remove the head of the rx buffer list */
1855 1853 dp->rx_buf_head = rbp->rxb_next;
1856 1854 cnt--;
1857 1855
1858 1856
1859 1857 if (rxstat & GEM_RX_ERR) {
1860 1858 goto next;
1861 1859 }
1862 1860
1863 1861 len = rxstat & GEM_RX_LEN;
1864 1862 DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1865 1863 dp->name, __func__, rxstat, len));
1866 1864
1867 1865 /*
1868 1866 * Copy the packet
1869 1867 */
1870 1868 if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1871 1869 /* no memory, discard the packet */
1872 1870 dp->stats.norcvbuf++;
1873 1871 goto next;
1874 1872 }
1875 1873
1876 1874 /*
1877 1875 * Process VLAN tag
1878 1876 */
1879 1877 ethermin = ETHERMIN;
1880 1878 ethermax = dp->mtu + sizeof (struct ether_header);
1881 1879 if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1882 1880 ethermax += VTAG_SIZE;
1883 1881 }
1884 1882
1885 1883 /* check packet size */
1886 1884 if (len < ethermin) {
1887 1885 dp->stats.errrcv++;
1888 1886 dp->stats.runt++;
1889 1887 freemsg(mp);
1890 1888 goto next;
1891 1889 }
1892 1890
1893 1891 if (len > ethermax) {
1894 1892 dp->stats.errrcv++;
1895 1893 dp->stats.frame_too_long++;
1896 1894 freemsg(mp);
1897 1895 goto next;
1898 1896 }
1899 1897
1900 1898 len_total += len;
1901 1899
1902 1900 #ifdef GEM_DEBUG_VLAN
1903 1901 if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1904 1902 gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1905 1903 }
1906 1904 #endif
1907 1905 /* append received packet to temporaly rx buffer list */
1908 1906 *rx_tailp = mp;
1909 1907 rx_tailp = &mp->b_next;
1910 1908
1911 1909 if (mp->b_rptr[0] & 1) {
1912 1910 if (bcmp(mp->b_rptr,
1913 1911 gem_etherbroadcastaddr.ether_addr_octet,
1914 1912 ETHERADDRL) == 0) {
1915 1913 dp->stats.rbcast++;
1916 1914 } else {
1917 1915 dp->stats.rmcast++;
1918 1916 }
1919 1917 }
1920 1918 next:
1921 1919 ASSERT(rbp != NULL);
1922 1920
1923 1921 /* append new one to temporal new buffer list */
1924 1922 *newbufs_tailp = rbp;
1925 1923 newbufs_tailp = &rbp->rxb_next;
1926 1924 }
1927 1925
1928 1926 /* advance rx_active_head */
1929 1927 if ((cnt = active_head - dp->rx_active_head) > 0) {
1930 1928 dp->stats.rbytes += len_total;
1931 1929 dp->stats.rpackets += cnt;
1932 1930 }
1933 1931 dp->rx_active_head = active_head;
1934 1932
1935 1933 /* terminate the working list */
1936 1934 *newbufs_tailp = NULL;
1937 1935 *rx_tailp = NULL;
1938 1936
1939 1937 if (dp->rx_buf_head == NULL) {
1940 1938 dp->rx_buf_tail = NULL;
1941 1939 }
1942 1940
1943 1941 DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1944 1942 dp->name, __func__, cnt, rx_head));
1945 1943
1946 1944 if (newbufs) {
1947 1945 /*
1948 1946 * fillfull rx list with new buffers
1949 1947 */
1950 1948 seqnum_t head;
1951 1949
1952 1950 /* save current tail */
1953 1951 head = dp->rx_active_tail;
1954 1952 gem_append_rxbuf(dp, newbufs);
1955 1953
1956 1954 /* call hw depend start routine if we have. */
1957 1955 dp->gc.gc_rx_start(dp,
1958 1956 SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1959 1957 }
1960 1958
1961 1959 if (rx_head) {
1962 1960 /*
1963 1961 * send up received packets
1964 1962 */
1965 1963 mutex_exit(&dp->intrlock);
1966 1964 mac_rx(dp->mh, NULL, rx_head);
1967 1965 mutex_enter(&dp->intrlock);
1968 1966 }
1969 1967
1970 1968 #ifdef GEM_DEBUG_LEVEL
1971 1969 gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1972 1970 #endif
1973 1971 return (cnt);
1974 1972 }
1975 1973
1976 1974 boolean_t
1977 1975 gem_tx_done(struct gem_dev *dp)
1978 1976 {
1979 1977 boolean_t tx_sched = B_FALSE;
1980 1978
1981 1979 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1982 1980 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1983 1981 DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1984 1982 dp->name, dp->tx_active_head, dp->tx_active_tail));
1985 1983 tx_sched = B_TRUE;
1986 1984 goto x;
1987 1985 }
1988 1986
1989 1987 mutex_enter(&dp->xmitlock);
1990 1988
1991 1989 /* XXX - we must not have any packets in soft queue */
1992 1990 ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1993 1991 /*
1994 1992 * If we won't have chance to get more free tx buffers, and blocked,
1995 1993 * it is worth to reschedule the downstream i.e. tx side.
1996 1994 */
1997 1995 ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1998 1996 if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1999 1997 /*
2000 1998 * As no further tx-done interrupts are scheduled, this
2001 1999 * is the last chance to kick tx side, which may be
2002 2000 * blocked now, otherwise the tx side never works again.
2003 2001 */
2004 2002 tx_sched = B_TRUE;
2005 2003 dp->tx_blocked = (clock_t)0;
2006 2004 dp->tx_max_packets =
2007 2005 min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2008 2006 }
2009 2007
2010 2008 mutex_exit(&dp->xmitlock);
2011 2009
2012 2010 DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2013 2011 dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2014 2012 x:
2015 2013 return (tx_sched);
2016 2014 }
2017 2015
2018 2016 static uint_t
2019 2017 gem_intr(struct gem_dev *dp)
2020 2018 {
2021 2019 uint_t ret;
2022 2020
2023 2021 mutex_enter(&dp->intrlock);
2024 2022 if (dp->mac_suspended) {
2025 2023 mutex_exit(&dp->intrlock);
2026 2024 return (DDI_INTR_UNCLAIMED);
2027 2025 }
2028 2026 dp->intr_busy = B_TRUE;
2029 2027
2030 2028 ret = (*dp->gc.gc_interrupt)(dp);
2031 2029
2032 2030 if (ret == DDI_INTR_UNCLAIMED) {
2033 2031 dp->intr_busy = B_FALSE;
2034 2032 mutex_exit(&dp->intrlock);
2035 2033 return (ret);
2036 2034 }
2037 2035
2038 2036 if (!dp->mac_active) {
2039 2037 cv_broadcast(&dp->tx_drain_cv);
2040 2038 }
2041 2039
2042 2040
2043 2041 dp->stats.intr++;
2044 2042 dp->intr_busy = B_FALSE;
2045 2043
2046 2044 mutex_exit(&dp->intrlock);
2047 2045
2048 2046 if (ret & INTR_RESTART_TX) {
2049 2047 DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2050 2048 mac_tx_update(dp->mh);
2051 2049 ret &= ~INTR_RESTART_TX;
2052 2050 }
2053 2051 return (ret);
2054 2052 }
2055 2053
2056 2054 static void
2057 2055 gem_intr_watcher(struct gem_dev *dp)
2058 2056 {
2059 2057 (void) gem_intr(dp);
2060 2058
2061 2059 /* schedule next call of tu_intr_watcher */
2062 2060 dp->intr_watcher_id =
2063 2061 timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2064 2062 }
2065 2063
2066 2064 /* ======================================================================== */
2067 2065 /*
2068 2066 * MII support routines
2069 2067 */
2070 2068 /* ======================================================================== */
2071 2069 static void
2072 2070 gem_choose_forcedmode(struct gem_dev *dp)
2073 2071 {
2074 2072 /* choose media mode */
2075 2073 if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2076 2074 dp->speed = GEM_SPD_1000;
2077 2075 dp->full_duplex = dp->anadv_1000fdx;
2078 2076 } else if (dp->anadv_100fdx || dp->anadv_100t4) {
2079 2077 dp->speed = GEM_SPD_100;
2080 2078 dp->full_duplex = B_TRUE;
2081 2079 } else if (dp->anadv_100hdx) {
2082 2080 dp->speed = GEM_SPD_100;
2083 2081 dp->full_duplex = B_FALSE;
2084 2082 } else {
2085 2083 dp->speed = GEM_SPD_10;
2086 2084 dp->full_duplex = dp->anadv_10fdx;
2087 2085 }
2088 2086 }
2089 2087
2090 2088 uint16_t
2091 2089 gem_mii_read(struct gem_dev *dp, uint_t reg)
2092 2090 {
2093 2091 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2094 2092 (*dp->gc.gc_mii_sync)(dp);
2095 2093 }
2096 2094 return ((*dp->gc.gc_mii_read)(dp, reg));
2097 2095 }
2098 2096
2099 2097 void
2100 2098 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2101 2099 {
2102 2100 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2103 2101 (*dp->gc.gc_mii_sync)(dp);
2104 2102 }
2105 2103 (*dp->gc.gc_mii_write)(dp, reg, val);
2106 2104 }
2107 2105
2108 2106 #define fc_cap_decode(x) \
2109 2107 ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
2110 2108 (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2111 2109
2112 2110 int
2113 2111 gem_mii_config_default(struct gem_dev *dp)
2114 2112 {
2115 2113 uint16_t mii_stat;
2116 2114 uint16_t val;
2117 2115 static uint16_t fc_cap_encode[4] = {
2118 2116 0, /* none */
2119 2117 MII_ABILITY_PAUSE, /* symmetric */
2120 2118 MII_ABILITY_ASMPAUSE, /* tx */
2121 2119 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2122 2120 };
2123 2121
2124 2122 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2125 2123
2126 2124 /*
2127 2125 * Configure bits in advertisement register
2128 2126 */
2129 2127 mii_stat = dp->mii_status;
2130 2128
2131 2129 DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2132 2130 dp->name, __func__, mii_stat, MII_STATUS_BITS));
2133 2131
2134 2132 if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2135 2133 /* it's funny */
2136 2134 cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2137 2135 dp->name, mii_stat, MII_STATUS_BITS);
2138 2136 return (GEM_FAILURE);
2139 2137 }
2140 2138
2141 2139 /* Do not change the rest of the ability bits in the advert reg */
2142 2140 val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2143 2141
2144 2142 DPRINTF(0, (CE_CONT,
2145 2143 "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2146 2144 dp->name, __func__,
2147 2145 dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2148 2146 dp->anadv_10fdx, dp->anadv_10hdx));
2149 2147
2150 2148 if (dp->anadv_100t4) {
2151 2149 val |= MII_ABILITY_100BASE_T4;
2152 2150 }
2153 2151 if (dp->anadv_100fdx) {
2154 2152 val |= MII_ABILITY_100BASE_TX_FD;
2155 2153 }
2156 2154 if (dp->anadv_100hdx) {
2157 2155 val |= MII_ABILITY_100BASE_TX;
2158 2156 }
2159 2157 if (dp->anadv_10fdx) {
2160 2158 val |= MII_ABILITY_10BASE_T_FD;
2161 2159 }
2162 2160 if (dp->anadv_10hdx) {
2163 2161 val |= MII_ABILITY_10BASE_T;
2164 2162 }
2165 2163
2166 2164 /* set flow control capability */
2167 2165 val |= fc_cap_encode[dp->anadv_flow_control];
2168 2166
2169 2167 DPRINTF(0, (CE_CONT,
2170 2168 "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2171 2169 dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2172 2170 dp->anadv_flow_control));
2173 2171
2174 2172 gem_mii_write(dp, MII_AN_ADVERT, val);
2175 2173
2176 2174 if (mii_stat & MII_STATUS_XSTATUS) {
2177 2175 /*
2178 2176 * 1000Base-T GMII support
2179 2177 */
2180 2178 if (!dp->anadv_autoneg) {
2181 2179 /* enable manual configuration */
2182 2180 val = MII_1000TC_CFG_EN;
2183 2181 } else {
2184 2182 val = 0;
2185 2183 if (dp->anadv_1000fdx) {
2186 2184 val |= MII_1000TC_ADV_FULL;
2187 2185 }
2188 2186 if (dp->anadv_1000hdx) {
2189 2187 val |= MII_1000TC_ADV_HALF;
2190 2188 }
2191 2189 }
2192 2190 DPRINTF(0, (CE_CONT,
2193 2191 "!%s: %s: setting MII_1000TC reg:%b",
2194 2192 dp->name, __func__, val, MII_1000TC_BITS));
2195 2193
2196 2194 gem_mii_write(dp, MII_1000TC, val);
2197 2195 }
2198 2196
2199 2197 return (GEM_SUCCESS);
2200 2198 }
2201 2199
2202 2200 #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
2203 2201 #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
2204 2202
2205 2203 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2206 2204 /* none symm tx rx/symm */
2207 2205 /* none */
2208 2206 {FLOW_CONTROL_NONE,
2209 2207 FLOW_CONTROL_NONE,
2210 2208 FLOW_CONTROL_NONE,
2211 2209 FLOW_CONTROL_NONE},
2212 2210 /* sym */
2213 2211 {FLOW_CONTROL_NONE,
2214 2212 FLOW_CONTROL_SYMMETRIC,
2215 2213 FLOW_CONTROL_NONE,
2216 2214 FLOW_CONTROL_SYMMETRIC},
2217 2215 /* tx */
2218 2216 {FLOW_CONTROL_NONE,
2219 2217 FLOW_CONTROL_NONE,
2220 2218 FLOW_CONTROL_NONE,
2221 2219 FLOW_CONTROL_TX_PAUSE},
2222 2220 /* rx/symm */
2223 2221 {FLOW_CONTROL_NONE,
2224 2222 FLOW_CONTROL_SYMMETRIC,
2225 2223 FLOW_CONTROL_RX_PAUSE,
2226 2224 FLOW_CONTROL_SYMMETRIC},
2227 2225 };
2228 2226
2229 2227 static char *gem_fc_type[] = {
2230 2228 "without",
2231 2229 "with symmetric",
2232 2230 "with tx",
2233 2231 "with rx",
2234 2232 };
2235 2233
2236 2234 boolean_t
2237 2235 gem_mii_link_check(struct gem_dev *dp)
2238 2236 {
2239 2237 uint16_t old_mii_state;
2240 2238 boolean_t tx_sched = B_FALSE;
2241 2239 uint16_t status;
2242 2240 uint16_t advert;
2243 2241 uint16_t lpable;
2244 2242 uint16_t exp;
2245 2243 uint16_t ctl1000;
2246 2244 uint16_t stat1000;
2247 2245 uint16_t val;
2248 2246 clock_t now;
2249 2247 clock_t diff;
2250 2248 int linkdown_action;
2251 2249 boolean_t fix_phy = B_FALSE;
2252 2250
2253 2251 now = ddi_get_lbolt();
2254 2252 old_mii_state = dp->mii_state;
2255 2253
2256 2254 DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2257 2255 dp->name, __func__, now, dp->mii_state));
2258 2256
2259 2257 diff = now - dp->mii_last_check;
2260 2258 dp->mii_last_check = now;
2261 2259
2262 2260 /*
2263 2261 * For NWAM, don't show linkdown state right
2264 2262 * after the system boots
2265 2263 */
2266 2264 if (dp->linkup_delay > 0) {
2267 2265 if (dp->linkup_delay > diff) {
2268 2266 dp->linkup_delay -= diff;
2269 2267 } else {
2270 2268 /* link up timeout */
2271 2269 dp->linkup_delay = -1;
2272 2270 }
2273 2271 }
2274 2272
2275 2273 next_nowait:
2276 2274 switch (dp->mii_state) {
2277 2275 case MII_STATE_UNKNOWN:
2278 2276 /* power-up, DP83840 requires 32 sync bits */
2279 2277 (*dp->gc.gc_mii_sync)(dp);
2280 2278 goto reset_phy;
2281 2279
2282 2280 case MII_STATE_RESETTING:
2283 2281 dp->mii_timer -= diff;
2284 2282 if (dp->mii_timer > 0) {
2285 2283 /* don't read phy registers in resetting */
2286 2284 dp->mii_interval = WATCH_INTERVAL_FAST;
2287 2285 goto next;
2288 2286 }
2289 2287
2290 2288 /* Timer expired, ensure reset bit is not set */
2291 2289
2292 2290 if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2293 2291 /* some phys need sync bits after reset */
2294 2292 (*dp->gc.gc_mii_sync)(dp);
2295 2293 }
2296 2294 val = gem_mii_read(dp, MII_CONTROL);
2297 2295 if (val & MII_CONTROL_RESET) {
2298 2296 cmn_err(CE_NOTE,
2299 2297 "!%s: time:%ld resetting phy not complete."
2300 2298 " mii_control:0x%b",
2301 2299 dp->name, ddi_get_lbolt(),
2302 2300 val, MII_CONTROL_BITS);
2303 2301 }
2304 2302
2305 2303 /* ensure neither isolated nor pwrdown nor auto-nego mode */
2306 2304 /* XXX -- this operation is required for NS DP83840A. */
2307 2305 gem_mii_write(dp, MII_CONTROL, 0);
2308 2306
2309 2307 /* As resetting PHY has completed, configure PHY registers */
2310 2308 if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2311 2309 /* we failed to configure PHY. */
2312 2310 goto reset_phy;
2313 2311 }
2314 2312
2315 2313 /* mii_config may disable autonegatiation */
2316 2314 gem_choose_forcedmode(dp);
2317 2315
2318 2316 dp->mii_lpable = 0;
2319 2317 dp->mii_advert = 0;
2320 2318 dp->mii_exp = 0;
2321 2319 dp->mii_ctl1000 = 0;
2322 2320 dp->mii_stat1000 = 0;
2323 2321 dp->flow_control = FLOW_CONTROL_NONE;
2324 2322
2325 2323 if (!dp->anadv_autoneg) {
2326 2324 /* skip auto-negotiation phase */
2327 2325 dp->mii_state = MII_STATE_MEDIA_SETUP;
2328 2326 dp->mii_timer = 0;
2329 2327 dp->mii_interval = 0;
2330 2328 goto next_nowait;
2331 2329 }
2332 2330
2333 2331 /* Issue auto-negotiation command */
2334 2332 goto autonego;
2335 2333
2336 2334 case MII_STATE_AUTONEGOTIATING:
2337 2335 /*
2338 2336 * Autonegotiation is in progress
2339 2337 */
2340 2338 dp->mii_timer -= diff;
2341 2339 if (dp->mii_timer -
2342 2340 (dp->gc.gc_mii_an_timeout
2343 2341 - dp->gc.gc_mii_an_wait) > 0) {
2344 2342 /*
2345 2343 * wait for a while, typically autonegotiation
2346 2344 * completes in 2.3 - 2.5 sec.
2347 2345 */
2348 2346 dp->mii_interval = WATCH_INTERVAL_FAST;
2349 2347 goto next;
2350 2348 }
2351 2349
2352 2350 /* read PHY status */
2353 2351 status = gem_mii_read(dp, MII_STATUS);
2354 2352 DPRINTF(4, (CE_CONT,
2355 2353 "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2356 2354 dp->name, __func__, dp->mii_state,
2357 2355 status, MII_STATUS_BITS));
2358 2356
2359 2357 if (status & MII_STATUS_REMFAULT) {
2360 2358 /*
2361 2359 * The link parnert told me something wrong happend.
2362 2360 * What do we do ?
2363 2361 */
2364 2362 cmn_err(CE_CONT,
2365 2363 "!%s: auto-negotiation failed: remote fault",
2366 2364 dp->name);
2367 2365 goto autonego;
2368 2366 }
2369 2367
2370 2368 if ((status & MII_STATUS_ANDONE) == 0) {
2371 2369 if (dp->mii_timer <= 0) {
2372 2370 /*
2373 2371 * Auto-negotiation was timed out,
2374 2372 * try again w/o resetting phy.
2375 2373 */
2376 2374 if (!dp->mii_supress_msg) {
2377 2375 cmn_err(CE_WARN,
2378 2376 "!%s: auto-negotiation failed: timeout",
2379 2377 dp->name);
2380 2378 dp->mii_supress_msg = B_TRUE;
2381 2379 }
2382 2380 goto autonego;
2383 2381 }
2384 2382 /*
2385 2383 * Auto-negotiation is in progress. Wait.
2386 2384 */
2387 2385 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2388 2386 goto next;
2389 2387 }
2390 2388
2391 2389 /*
2392 2390 * Auto-negotiation have completed.
2393 2391 * Assume linkdown and fall through.
2394 2392 */
2395 2393 dp->mii_supress_msg = B_FALSE;
2396 2394 dp->mii_state = MII_STATE_AN_DONE;
2397 2395 DPRINTF(0, (CE_CONT,
2398 2396 "!%s: auto-negotiation completed, MII_STATUS:%b",
2399 2397 dp->name, status, MII_STATUS_BITS));
2400 2398
2401 2399 if (dp->gc.gc_mii_an_delay > 0) {
2402 2400 dp->mii_timer = dp->gc.gc_mii_an_delay;
2403 2401 dp->mii_interval = drv_usectohz(20*1000);
2404 2402 goto next;
2405 2403 }
2406 2404
2407 2405 dp->mii_timer = 0;
2408 2406 diff = 0;
2409 2407 goto next_nowait;
2410 2408
2411 2409 case MII_STATE_AN_DONE:
2412 2410 /*
2413 2411 * Auto-negotiation have done. Now we can set up media.
2414 2412 */
2415 2413 dp->mii_timer -= diff;
2416 2414 if (dp->mii_timer > 0) {
2417 2415 /* wait for a while */
2418 2416 dp->mii_interval = WATCH_INTERVAL_FAST;
2419 2417 goto next;
2420 2418 }
2421 2419
2422 2420 /*
2423 2421 * set up the result of auto negotiation
2424 2422 */
2425 2423
2426 2424 /*
2427 2425 * Read registers required to determin current
2428 2426 * duplex mode and media speed.
2429 2427 */
2430 2428 if (dp->gc.gc_mii_an_delay > 0) {
2431 2429 /*
2432 2430 * As the link watcher context has been suspended,
2433 2431 * 'status' is invalid. We must status register here
2434 2432 */
2435 2433 status = gem_mii_read(dp, MII_STATUS);
2436 2434 }
2437 2435 advert = gem_mii_read(dp, MII_AN_ADVERT);
2438 2436 lpable = gem_mii_read(dp, MII_AN_LPABLE);
2439 2437 exp = gem_mii_read(dp, MII_AN_EXPANSION);
2440 2438 if (exp == 0xffff) {
2441 2439 /* some phys don't have exp register */
2442 2440 exp = 0;
2443 2441 }
2444 2442 ctl1000 = 0;
2445 2443 stat1000 = 0;
2446 2444 if (dp->mii_status & MII_STATUS_XSTATUS) {
2447 2445 ctl1000 = gem_mii_read(dp, MII_1000TC);
2448 2446 stat1000 = gem_mii_read(dp, MII_1000TS);
2449 2447 }
2450 2448 dp->mii_lpable = lpable;
2451 2449 dp->mii_advert = advert;
2452 2450 dp->mii_exp = exp;
2453 2451 dp->mii_ctl1000 = ctl1000;
2454 2452 dp->mii_stat1000 = stat1000;
2455 2453
2456 2454 cmn_err(CE_CONT,
2457 2455 "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2458 2456 dp->name,
2459 2457 advert, MII_ABILITY_BITS,
2460 2458 lpable, MII_ABILITY_BITS,
2461 2459 exp, MII_AN_EXP_BITS);
2462 2460
2463 2461 if (dp->mii_status & MII_STATUS_XSTATUS) {
2464 2462 cmn_err(CE_CONT,
2465 2463 "! MII_1000TC:%b, MII_1000TS:%b",
2466 2464 ctl1000, MII_1000TC_BITS,
2467 2465 stat1000, MII_1000TS_BITS);
2468 2466 }
2469 2467
2470 2468 if (gem_population(lpable) <= 1 &&
2471 2469 (exp & MII_AN_EXP_LPCANAN) == 0) {
2472 2470 if ((advert & MII_ABILITY_TECH) != lpable) {
2473 2471 cmn_err(CE_WARN,
2474 2472 "!%s: but the link partnar doesn't seem"
2475 2473 " to have auto-negotiation capability."
2476 2474 " please check the link configuration.",
2477 2475 dp->name);
2478 2476 }
2479 2477 /*
2480 2478 * it should be result of parallel detection, which
2481 2479 * cannot detect duplex mode.
2482 2480 */
2483 2481 if (lpable & MII_ABILITY_100BASE_TX) {
2484 2482 /*
2485 2483 * we prefer full duplex mode for 100Mbps
2486 2484 * connection, if we can.
2487 2485 */
2488 2486 lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2489 2487 }
2490 2488
2491 2489 if ((advert & lpable) == 0 &&
2492 2490 lpable & MII_ABILITY_10BASE_T) {
2493 2491 lpable |= advert & MII_ABILITY_10BASE_T_FD;
2494 2492 }
2495 2493 /*
2496 2494 * as the link partnar isn't auto-negotiatable, use
2497 2495 * fixed mode temporally.
2498 2496 */
2499 2497 fix_phy = B_TRUE;
2500 2498 } else if (lpable == 0) {
2501 2499 cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2502 2500 goto reset_phy;
2503 2501 }
2504 2502 /*
2505 2503 * configure current link mode according to AN priority.
2506 2504 */
2507 2505 val = advert & lpable;
2508 2506 if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2509 2507 (stat1000 & MII_1000TS_LP_FULL)) {
2510 2508 /* 1000BaseT & full duplex */
2511 2509 dp->speed = GEM_SPD_1000;
2512 2510 dp->full_duplex = B_TRUE;
2513 2511 } else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2514 2512 (stat1000 & MII_1000TS_LP_HALF)) {
2515 2513 /* 1000BaseT & half duplex */
2516 2514 dp->speed = GEM_SPD_1000;
2517 2515 dp->full_duplex = B_FALSE;
2518 2516 } else if (val & MII_ABILITY_100BASE_TX_FD) {
2519 2517 /* 100BaseTx & full duplex */
2520 2518 dp->speed = GEM_SPD_100;
2521 2519 dp->full_duplex = B_TRUE;
2522 2520 } else if (val & MII_ABILITY_100BASE_T4) {
2523 2521 /* 100BaseT4 & full duplex */
2524 2522 dp->speed = GEM_SPD_100;
2525 2523 dp->full_duplex = B_TRUE;
2526 2524 } else if (val & MII_ABILITY_100BASE_TX) {
2527 2525 /* 100BaseTx & half duplex */
2528 2526 dp->speed = GEM_SPD_100;
2529 2527 dp->full_duplex = B_FALSE;
2530 2528 } else if (val & MII_ABILITY_10BASE_T_FD) {
2531 2529 /* 10BaseT & full duplex */
2532 2530 dp->speed = GEM_SPD_10;
2533 2531 dp->full_duplex = B_TRUE;
2534 2532 } else if (val & MII_ABILITY_10BASE_T) {
2535 2533 /* 10BaseT & half duplex */
2536 2534 dp->speed = GEM_SPD_10;
2537 2535 dp->full_duplex = B_FALSE;
2538 2536 } else {
2539 2537 /*
2540 2538 * It seems that the link partnar doesn't have
2541 2539 * auto-negotiation capability and our PHY
2542 2540 * could not report the correct current mode.
2543 2541 * We guess current mode by mii_control register.
2544 2542 */
2545 2543 val = gem_mii_read(dp, MII_CONTROL);
2546 2544
2547 2545 /* select 100m full or 10m half */
2548 2546 dp->speed = (val & MII_CONTROL_100MB) ?
2549 2547 GEM_SPD_100 : GEM_SPD_10;
2550 2548 dp->full_duplex = dp->speed != GEM_SPD_10;
2551 2549 fix_phy = B_TRUE;
2552 2550
2553 2551 cmn_err(CE_NOTE,
2554 2552 "!%s: auto-negotiation done but "
2555 2553 "common ability not found.\n"
2556 2554 "PHY state: control:%b advert:%b lpable:%b\n"
2557 2555 "guessing %d Mbps %s duplex mode",
2558 2556 dp->name,
2559 2557 val, MII_CONTROL_BITS,
2560 2558 advert, MII_ABILITY_BITS,
2561 2559 lpable, MII_ABILITY_BITS,
2562 2560 gem_speed_value[dp->speed],
2563 2561 dp->full_duplex ? "full" : "half");
2564 2562 }
2565 2563
2566 2564 if (dp->full_duplex) {
2567 2565 dp->flow_control =
2568 2566 gem_fc_result[fc_cap_decode(advert)]
2569 2567 [fc_cap_decode(lpable)];
2570 2568 } else {
2571 2569 dp->flow_control = FLOW_CONTROL_NONE;
2572 2570 }
2573 2571 dp->mii_state = MII_STATE_MEDIA_SETUP;
2574 2572 /* FALLTHROUGH */
2575 2573
2576 2574 case MII_STATE_MEDIA_SETUP:
2577 2575 dp->mii_state = MII_STATE_LINKDOWN;
2578 2576 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2579 2577 DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2580 2578 dp->mii_supress_msg = B_FALSE;
2581 2579
2582 2580 /* use short interval */
2583 2581 dp->mii_interval = WATCH_INTERVAL_FAST;
2584 2582
2585 2583 if ((!dp->anadv_autoneg) ||
2586 2584 dp->gc.gc_mii_an_oneshot || fix_phy) {
2587 2585
2588 2586 /*
2589 2587 * write specified mode to phy.
2590 2588 */
2591 2589 val = gem_mii_read(dp, MII_CONTROL);
2592 2590 val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2593 2591 MII_CONTROL_ANE | MII_CONTROL_RSAN);
2594 2592
2595 2593 if (dp->full_duplex) {
2596 2594 val |= MII_CONTROL_FDUPLEX;
2597 2595 }
2598 2596
2599 2597 switch (dp->speed) {
2600 2598 case GEM_SPD_1000:
2601 2599 val |= MII_CONTROL_1000MB;
2602 2600 break;
2603 2601
2604 2602 case GEM_SPD_100:
2605 2603 val |= MII_CONTROL_100MB;
2606 2604 break;
2607 2605
2608 2606 default:
2609 2607 cmn_err(CE_WARN, "%s: unknown speed:%d",
2610 2608 dp->name, dp->speed);
2611 2609 /* FALLTHROUGH */
2612 2610 case GEM_SPD_10:
2613 2611 /* for GEM_SPD_10, do nothing */
2614 2612 break;
2615 2613 }
2616 2614
2617 2615 if (dp->mii_status & MII_STATUS_XSTATUS) {
2618 2616 gem_mii_write(dp,
2619 2617 MII_1000TC, MII_1000TC_CFG_EN);
2620 2618 }
2621 2619 gem_mii_write(dp, MII_CONTROL, val);
2622 2620 }
2623 2621
2624 2622 if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2625 2623 /* notify the result of auto-negotiation to mac */
2626 2624 (*dp->gc.gc_set_media)(dp);
2627 2625 }
2628 2626
2629 2627 if ((void *)dp->gc.gc_mii_tune_phy) {
2630 2628 /* for built-in sis900 */
2631 2629 /* XXX - this code should be removed. */
2632 2630 (*dp->gc.gc_mii_tune_phy)(dp);
2633 2631 }
2634 2632
2635 2633 goto next_nowait;
2636 2634
2637 2635 case MII_STATE_LINKDOWN:
2638 2636 status = gem_mii_read(dp, MII_STATUS);
2639 2637 if (status & MII_STATUS_LINKUP) {
2640 2638 /*
2641 2639 * Link going up
2642 2640 */
2643 2641 dp->mii_state = MII_STATE_LINKUP;
2644 2642 dp->mii_supress_msg = B_FALSE;
2645 2643
2646 2644 DPRINTF(0, (CE_CONT,
2647 2645 "!%s: link up detected: mii_stat:%b",
2648 2646 dp->name, status, MII_STATUS_BITS));
2649 2647
2650 2648 /*
2651 2649 * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
2652 2650 * ignored when MII_CONTROL_ANE is set.
2653 2651 */
2654 2652 cmn_err(CE_CONT,
2655 2653 "!%s: Link up: %d Mbps %s duplex %s flow control",
2656 2654 dp->name,
2657 2655 gem_speed_value[dp->speed],
2658 2656 dp->full_duplex ? "full" : "half",
2659 2657 gem_fc_type[dp->flow_control]);
2660 2658
2661 2659 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2662 2660
2663 2661 /* XXX - we need other timer to watch statictics */
2664 2662 if (dp->gc.gc_mii_hw_link_detection &&
2665 2663 dp->nic_state == NIC_STATE_ONLINE) {
2666 2664 dp->mii_interval = 0;
2667 2665 }
2668 2666
2669 2667 if (dp->nic_state == NIC_STATE_ONLINE) {
2670 2668 if (!dp->mac_active) {
2671 2669 (void) gem_mac_start(dp);
2672 2670 }
2673 2671 tx_sched = B_TRUE;
2674 2672 }
2675 2673 goto next;
2676 2674 }
2677 2675
2678 2676 dp->mii_supress_msg = B_TRUE;
2679 2677 if (dp->anadv_autoneg) {
2680 2678 dp->mii_timer -= diff;
2681 2679 if (dp->mii_timer <= 0) {
2682 2680 /*
2683 2681 * link down timer expired.
2684 2682 * need to restart auto-negotiation.
2685 2683 */
2686 2684 linkdown_action =
2687 2685 dp->gc.gc_mii_linkdown_timeout_action;
2688 2686 goto restart_autonego;
2689 2687 }
2690 2688 }
2691 2689 /* don't change mii_state */
2692 2690 break;
2693 2691
2694 2692 case MII_STATE_LINKUP:
2695 2693 status = gem_mii_read(dp, MII_STATUS);
2696 2694 if ((status & MII_STATUS_LINKUP) == 0) {
2697 2695 /*
2698 2696 * Link going down
2699 2697 */
2700 2698 cmn_err(CE_NOTE,
2701 2699 "!%s: link down detected: mii_stat:%b",
2702 2700 dp->name, status, MII_STATUS_BITS);
2703 2701
2704 2702 if (dp->nic_state == NIC_STATE_ONLINE &&
2705 2703 dp->mac_active &&
2706 2704 dp->gc.gc_mii_stop_mac_on_linkdown) {
2707 2705 (void) gem_mac_stop(dp, 0);
2708 2706
2709 2707 if (dp->tx_blocked) {
2710 2708 /* drain tx */
2711 2709 tx_sched = B_TRUE;
2712 2710 }
2713 2711 }
2714 2712
2715 2713 if (dp->anadv_autoneg) {
2716 2714 /* need to restart auto-negotiation */
2717 2715 linkdown_action = dp->gc.gc_mii_linkdown_action;
2718 2716 goto restart_autonego;
2719 2717 }
2720 2718
2721 2719 dp->mii_state = MII_STATE_LINKDOWN;
2722 2720 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2723 2721
2724 2722 if ((void *)dp->gc.gc_mii_tune_phy) {
2725 2723 /* for built-in sis900 */
2726 2724 (*dp->gc.gc_mii_tune_phy)(dp);
2727 2725 }
2728 2726 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2729 2727 goto next;
2730 2728 }
2731 2729
2732 2730 /* don't change mii_state */
2733 2731 if (dp->gc.gc_mii_hw_link_detection &&
2734 2732 dp->nic_state == NIC_STATE_ONLINE) {
2735 2733 dp->mii_interval = 0;
2736 2734 goto next;
2737 2735 }
2738 2736 break;
2739 2737 }
2740 2738 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2741 2739 goto next;
2742 2740
2743 2741 /* Actions on the end of state routine */
2744 2742
2745 2743 restart_autonego:
2746 2744 switch (linkdown_action) {
2747 2745 case MII_ACTION_RESET:
2748 2746 if (!dp->mii_supress_msg) {
2749 2747 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2750 2748 }
2751 2749 dp->mii_supress_msg = B_TRUE;
2752 2750 goto reset_phy;
2753 2751
2754 2752 case MII_ACTION_NONE:
2755 2753 dp->mii_supress_msg = B_TRUE;
2756 2754 if (dp->gc.gc_mii_an_oneshot) {
2757 2755 goto autonego;
2758 2756 }
2759 2757 /* PHY will restart autonego automatically */
2760 2758 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2761 2759 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2762 2760 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2763 2761 goto next;
2764 2762
2765 2763 case MII_ACTION_RSA:
2766 2764 if (!dp->mii_supress_msg) {
2767 2765 cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2768 2766 dp->name);
2769 2767 }
2770 2768 dp->mii_supress_msg = B_TRUE;
2771 2769 goto autonego;
2772 2770
2773 2771 default:
2774 2772 cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2775 2773 dp->name, dp->gc.gc_mii_linkdown_action);
2776 2774 dp->mii_supress_msg = B_TRUE;
2777 2775 }
2778 2776 /* NOTREACHED */
2779 2777
2780 2778 reset_phy:
2781 2779 if (!dp->mii_supress_msg) {
2782 2780 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2783 2781 }
2784 2782 dp->mii_state = MII_STATE_RESETTING;
2785 2783 dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2786 2784 if (!dp->gc.gc_mii_dont_reset) {
2787 2785 gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2788 2786 }
2789 2787 dp->mii_interval = WATCH_INTERVAL_FAST;
2790 2788 goto next;
2791 2789
2792 2790 autonego:
2793 2791 if (!dp->mii_supress_msg) {
2794 2792 cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2795 2793 }
2796 2794 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2797 2795 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2798 2796
2799 2797 /* start/restart auto nego */
2800 2798 val = gem_mii_read(dp, MII_CONTROL) &
2801 2799 ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2802 2800
2803 2801 gem_mii_write(dp, MII_CONTROL,
2804 2802 val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2805 2803
2806 2804 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2807 2805
2808 2806 next:
2809 2807 if (dp->link_watcher_id == 0 && dp->mii_interval) {
2810 2808 /* we must schedule next mii_watcher */
2811 2809 dp->link_watcher_id =
2812 2810 timeout((void (*)(void *))&gem_mii_link_watcher,
2813 2811 (void *)dp, dp->mii_interval);
2814 2812 }
2815 2813
2816 2814 if (old_mii_state != dp->mii_state) {
2817 2815 /* notify new mii link state */
2818 2816 if (dp->mii_state == MII_STATE_LINKUP) {
2819 2817 dp->linkup_delay = 0;
2820 2818 GEM_LINKUP(dp);
2821 2819 } else if (dp->linkup_delay <= 0) {
2822 2820 GEM_LINKDOWN(dp);
2823 2821 }
2824 2822 } else if (dp->linkup_delay < 0) {
2825 2823 /* first linkup timeout */
2826 2824 dp->linkup_delay = 0;
2827 2825 GEM_LINKDOWN(dp);
2828 2826 }
2829 2827
2830 2828 return (tx_sched);
2831 2829 }
2832 2830
2833 2831 static void
2834 2832 gem_mii_link_watcher(struct gem_dev *dp)
2835 2833 {
2836 2834 boolean_t tx_sched;
2837 2835
2838 2836 mutex_enter(&dp->intrlock);
2839 2837
2840 2838 dp->link_watcher_id = 0;
2841 2839 tx_sched = gem_mii_link_check(dp);
2842 2840 #if GEM_DEBUG_LEVEL > 2
2843 2841 if (dp->link_watcher_id == 0) {
2844 2842 cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2845 2843 }
2846 2844 #endif
2847 2845 mutex_exit(&dp->intrlock);
2848 2846
2849 2847 if (tx_sched) {
2850 2848 /* kick potentially stopped downstream */
2851 2849 mac_tx_update(dp->mh);
2852 2850 }
2853 2851 }
2854 2852
2855 2853 int
2856 2854 gem_mii_probe_default(struct gem_dev *dp)
2857 2855 {
2858 2856 int8_t phy;
2859 2857 uint16_t status;
2860 2858 uint16_t adv;
2861 2859 uint16_t adv_org;
2862 2860
2863 2861 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2864 2862
2865 2863 /*
2866 2864 * Scan PHY
2867 2865 */
2868 2866 /* ensure to send sync bits */
2869 2867 dp->mii_status = 0;
2870 2868
2871 2869 /* Try default phy first */
2872 2870 if (dp->mii_phy_addr) {
2873 2871 status = gem_mii_read(dp, MII_STATUS);
2874 2872 if (status != 0xffff && status != 0) {
2875 2873 gem_mii_write(dp, MII_CONTROL, 0);
2876 2874 goto PHY_found;
2877 2875 }
2878 2876
2879 2877 if (dp->mii_phy_addr < 0) {
2880 2878 cmn_err(CE_NOTE,
2881 2879 "!%s: failed to probe default internal and/or non-MII PHY",
2882 2880 dp->name);
2883 2881 return (GEM_FAILURE);
2884 2882 }
2885 2883
2886 2884 cmn_err(CE_NOTE,
2887 2885 "!%s: failed to probe default MII PHY at %d",
2888 2886 dp->name, dp->mii_phy_addr);
2889 2887 }
2890 2888
2891 2889 /* Try all possible address */
2892 2890 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2893 2891 dp->mii_phy_addr = phy;
2894 2892 status = gem_mii_read(dp, MII_STATUS);
2895 2893
2896 2894 if (status != 0xffff && status != 0) {
2897 2895 gem_mii_write(dp, MII_CONTROL, 0);
2898 2896 goto PHY_found;
2899 2897 }
2900 2898 }
2901 2899
2902 2900 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2903 2901 dp->mii_phy_addr = phy;
2904 2902 gem_mii_write(dp, MII_CONTROL, 0);
2905 2903 status = gem_mii_read(dp, MII_STATUS);
2906 2904
2907 2905 if (status != 0xffff && status != 0) {
2908 2906 goto PHY_found;
2909 2907 }
2910 2908 }
2911 2909
2912 2910 cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2913 2911 dp->mii_phy_addr = -1;
2914 2912
2915 2913 return (GEM_FAILURE);
2916 2914
2917 2915 PHY_found:
2918 2916 dp->mii_status = status;
2919 2917 dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2920 2918 gem_mii_read(dp, MII_PHYIDL);
2921 2919
2922 2920 if (dp->mii_phy_addr < 0) {
2923 2921 cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2924 2922 dp->name, dp->mii_phy_id);
2925 2923 } else {
2926 2924 cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2927 2925 dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2928 2926 }
2929 2927
2930 2928 cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2931 2929 dp->name,
2932 2930 gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2933 2931 status, MII_STATUS_BITS,
2934 2932 gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2935 2933 gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2936 2934
2937 2935 dp->mii_xstatus = 0;
2938 2936 if (status & MII_STATUS_XSTATUS) {
2939 2937 dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2940 2938
2941 2939 cmn_err(CE_CONT, "!%s: xstatus:%b",
2942 2940 dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2943 2941 }
2944 2942
2945 2943 /* check if the phy can advertize pause abilities */
2946 2944 adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2947 2945
2948 2946 gem_mii_write(dp, MII_AN_ADVERT,
2949 2947 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2950 2948
2951 2949 adv = gem_mii_read(dp, MII_AN_ADVERT);
2952 2950
2953 2951 if ((adv & MII_ABILITY_PAUSE) == 0) {
2954 2952 dp->gc.gc_flow_control &= ~1;
2955 2953 }
2956 2954
2957 2955 if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2958 2956 dp->gc.gc_flow_control &= ~2;
2959 2957 }
2960 2958
2961 2959 gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2962 2960
2963 2961 return (GEM_SUCCESS);
2964 2962 }
2965 2963
2966 2964 static void
2967 2965 gem_mii_start(struct gem_dev *dp)
2968 2966 {
2969 2967 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2970 2968
2971 2969 /* make a first call of check link */
2972 2970 dp->mii_state = MII_STATE_UNKNOWN;
2973 2971 dp->mii_last_check = ddi_get_lbolt();
2974 2972 dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2975 2973 (void) gem_mii_link_watcher(dp);
2976 2974 }
2977 2975
2978 2976 static void
2979 2977 gem_mii_stop(struct gem_dev *dp)
2980 2978 {
2981 2979 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2982 2980
2983 2981 /* Ensure timer routine stopped */
2984 2982 mutex_enter(&dp->intrlock);
2985 2983 if (dp->link_watcher_id) {
2986 2984 while (untimeout(dp->link_watcher_id) == -1)
2987 2985 ;
2988 2986 dp->link_watcher_id = 0;
2989 2987 }
2990 2988 mutex_exit(&dp->intrlock);
2991 2989 }
2992 2990
2993 2991 boolean_t
2994 2992 gem_get_mac_addr_conf(struct gem_dev *dp)
2995 2993 {
2996 2994 char propname[32];
2997 2995 char *valstr;
2998 2996 uint8_t mac[ETHERADDRL];
2999 2997 char *cp;
3000 2998 int c;
3001 2999 int i;
3002 3000 int j;
3003 3001 uint8_t v;
3004 3002 uint8_t d;
3005 3003 uint8_t ored;
3006 3004
3007 3005 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3008 3006 /*
3009 3007 * Get ethernet address from .conf file
3010 3008 */
3011 3009 (void) sprintf(propname, "mac-addr");
3012 3010 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3013 3011 DDI_PROP_DONTPASS, propname, &valstr)) !=
3014 3012 DDI_PROP_SUCCESS) {
3015 3013 return (B_FALSE);
3016 3014 }
3017 3015
3018 3016 if (strlen(valstr) != ETHERADDRL*3-1) {
3019 3017 goto syntax_err;
3020 3018 }
3021 3019
3022 3020 cp = valstr;
3023 3021 j = 0;
3024 3022 ored = 0;
3025 3023 for (;;) {
3026 3024 v = 0;
3027 3025 for (i = 0; i < 2; i++) {
3028 3026 c = *cp++;
3029 3027
3030 3028 if (c >= 'a' && c <= 'f') {
3031 3029 d = c - 'a' + 10;
3032 3030 } else if (c >= 'A' && c <= 'F') {
3033 3031 d = c - 'A' + 10;
3034 3032 } else if (c >= '0' && c <= '9') {
3035 3033 d = c - '0';
3036 3034 } else {
3037 3035 goto syntax_err;
3038 3036 }
3039 3037 v = (v << 4) | d;
3040 3038 }
3041 3039
3042 3040 mac[j++] = v;
3043 3041 ored |= v;
3044 3042 if (j == ETHERADDRL) {
3045 3043 /* done */
3046 3044 break;
3047 3045 }
3048 3046
3049 3047 c = *cp++;
3050 3048 if (c != ':') {
3051 3049 goto syntax_err;
3052 3050 }
3053 3051 }
3054 3052
3055 3053 if (ored == 0) {
3056 3054 goto err;
3057 3055 }
3058 3056 for (i = 0; i < ETHERADDRL; i++) {
3059 3057 dp->dev_addr.ether_addr_octet[i] = mac[i];
3060 3058 }
3061 3059 ddi_prop_free(valstr);
3062 3060 return (B_TRUE);
3063 3061
3064 3062 syntax_err:
3065 3063 cmn_err(CE_CONT,
3066 3064 "!%s: read mac addr: trying .conf: syntax err %s",
3067 3065 dp->name, valstr);
3068 3066 err:
3069 3067 ddi_prop_free(valstr);
3070 3068
3071 3069 return (B_FALSE);
3072 3070 }
3073 3071
3074 3072
3075 3073 /* ============================================================== */
3076 3074 /*
3077 3075 * internal start/stop interface
3078 3076 */
3079 3077 /* ============================================================== */
3080 3078 static int
3081 3079 gem_mac_set_rx_filter(struct gem_dev *dp)
3082 3080 {
3083 3081 return ((*dp->gc.gc_set_rx_filter)(dp));
3084 3082 }
3085 3083
3086 3084 /*
3087 3085 * gem_mac_init: cold start
3088 3086 */
3089 3087 static int
3090 3088 gem_mac_init(struct gem_dev *dp)
3091 3089 {
3092 3090 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3093 3091
3094 3092 if (dp->mac_suspended) {
3095 3093 return (GEM_FAILURE);
3096 3094 }
3097 3095
3098 3096 dp->mac_active = B_FALSE;
3099 3097
3100 3098 gem_init_rx_ring(dp);
3101 3099 gem_init_tx_ring(dp);
3102 3100
3103 3101 /* reset transmitter state */
3104 3102 dp->tx_blocked = (clock_t)0;
3105 3103 dp->tx_busy = 0;
3106 3104 dp->tx_reclaim_busy = 0;
3107 3105 dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3108 3106
3109 3107 if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3110 3108 return (GEM_FAILURE);
3111 3109 }
3112 3110
3113 3111 gem_prepare_rx_buf(dp);
3114 3112
3115 3113 return (GEM_SUCCESS);
3116 3114 }
3117 3115 /*
3118 3116 * gem_mac_start: warm start
3119 3117 */
3120 3118 static int
3121 3119 gem_mac_start(struct gem_dev *dp)
3122 3120 {
3123 3121 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3124 3122
3125 3123 ASSERT(mutex_owned(&dp->intrlock));
3126 3124 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3127 3125 ASSERT(dp->mii_state == MII_STATE_LINKUP);
3128 3126
3129 3127 /* enable tx and rx */
3130 3128 mutex_enter(&dp->xmitlock);
3131 3129 if (dp->mac_suspended) {
3132 3130 mutex_exit(&dp->xmitlock);
3133 3131 return (GEM_FAILURE);
3134 3132 }
3135 3133 dp->mac_active = B_TRUE;
3136 3134 mutex_exit(&dp->xmitlock);
3137 3135
3138 3136 /* setup rx buffers */
3139 3137 (*dp->gc.gc_rx_start)(dp,
3140 3138 SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3141 3139 dp->rx_active_tail - dp->rx_active_head);
3142 3140
3143 3141 if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3144 3142 cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3145 3143 dp->name, __func__);
3146 3144 return (GEM_FAILURE);
3147 3145 }
3148 3146
3149 3147 mutex_enter(&dp->xmitlock);
3150 3148
3151 3149 /* load untranmitted packets to the nic */
3152 3150 ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3153 3151 if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3154 3152 gem_tx_load_descs_oo(dp,
3155 3153 dp->tx_softq_head, dp->tx_softq_tail,
3156 3154 GEM_TXFLAG_HEAD);
3157 3155 /* issue preloaded tx buffers */
3158 3156 gem_tx_start_unit(dp);
3159 3157 }
3160 3158
3161 3159 mutex_exit(&dp->xmitlock);
3162 3160
3163 3161 return (GEM_SUCCESS);
3164 3162 }
3165 3163
3166 3164 static int
3167 3165 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3168 3166 {
3169 3167 int i;
3170 3168 int wait_time; /* in uS */
3171 3169 #ifdef GEM_DEBUG_LEVEL
3172 3170 clock_t now;
3173 3171 #endif
3174 3172 int ret = GEM_SUCCESS;
3175 3173
3176 3174 DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3177 3175 dp->name, __func__, dp->rx_buf_freecnt));
3178 3176
3179 3177 ASSERT(mutex_owned(&dp->intrlock));
3180 3178 ASSERT(!mutex_owned(&dp->xmitlock));
3181 3179
3182 3180 /*
3183 3181 * Block transmits
3184 3182 */
3185 3183 mutex_enter(&dp->xmitlock);
3186 3184 if (dp->mac_suspended) {
3187 3185 mutex_exit(&dp->xmitlock);
3188 3186 return (GEM_SUCCESS);
3189 3187 }
3190 3188 dp->mac_active = B_FALSE;
3191 3189
3192 3190 while (dp->tx_busy > 0) {
3193 3191 cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3194 3192 }
3195 3193 mutex_exit(&dp->xmitlock);
3196 3194
3197 3195 if ((flags & GEM_RESTART_NOWAIT) == 0) {
3198 3196 /*
3199 3197 * Wait for all tx buffers sent.
3200 3198 */
3201 3199 wait_time =
3202 3200 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3203 3201 (dp->tx_active_tail - dp->tx_active_head);
3204 3202
3205 3203 DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3206 3204 dp->name, __func__, wait_time));
3207 3205 i = 0;
3208 3206 #ifdef GEM_DEBUG_LEVEL
3209 3207 now = ddi_get_lbolt();
3210 3208 #endif
3211 3209 while (dp->tx_active_tail != dp->tx_active_head) {
3212 3210 if (i > wait_time) {
3213 3211 /* timeout */
3214 3212 cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3215 3213 dp->name, __func__);
3216 3214 break;
3217 3215 }
3218 3216 (void) gem_reclaim_txbuf(dp);
3219 3217 drv_usecwait(100);
3220 3218 i += 100;
3221 3219 }
3222 3220 DPRINTF(0, (CE_NOTE,
3223 3221 "!%s: %s: the nic have drained in %d uS, real %d mS",
3224 3222 dp->name, __func__, i,
3225 3223 10*((int)(ddi_get_lbolt() - now))));
3226 3224 }
3227 3225
3228 3226 /*
3229 3227 * Now we can stop the nic safely.
3230 3228 */
3231 3229 if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3232 3230 cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3233 3231 dp->name, __func__);
3234 3232 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3235 3233 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3236 3234 dp->name, __func__);
3237 3235 }
3238 3236 }
3239 3237
3240 3238 /*
3241 3239 * Clear all rx buffers
3242 3240 */
3243 3241 if (flags & GEM_RESTART_KEEP_BUF) {
3244 3242 (void) gem_receive(dp);
3245 3243 }
3246 3244 gem_clean_rx_buf(dp);
3247 3245
3248 3246 /*
3249 3247 * Update final statistics
3250 3248 */
3251 3249 (*dp->gc.gc_get_stats)(dp);
3252 3250
3253 3251 /*
3254 3252 * Clear all pended tx packets
3255 3253 */
3256 3254 ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3257 3255 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3258 3256 if (flags & GEM_RESTART_KEEP_BUF) {
3259 3257 /* restore active tx buffers */
3260 3258 dp->tx_active_tail = dp->tx_active_head;
3261 3259 dp->tx_softq_head = dp->tx_active_head;
3262 3260 } else {
3263 3261 gem_clean_tx_buf(dp);
3264 3262 }
3265 3263
3266 3264 return (ret);
3267 3265 }
3268 3266
3269 3267 static int
3270 3268 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3271 3269 {
3272 3270 int cnt;
3273 3271 int err;
3274 3272
3275 3273 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3276 3274
3277 3275 mutex_enter(&dp->intrlock);
3278 3276 if (dp->mac_suspended) {
3279 3277 mutex_exit(&dp->intrlock);
3280 3278 return (GEM_FAILURE);
3281 3279 }
3282 3280
3283 3281 if (dp->mc_count_req++ < GEM_MAXMC) {
3284 3282 /* append the new address at the end of the mclist */
3285 3283 cnt = dp->mc_count;
3286 3284 bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3287 3285 ETHERADDRL);
3288 3286 if (dp->gc.gc_multicast_hash) {
3289 3287 dp->mc_list[cnt].hash =
3290 3288 (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3291 3289 }
3292 3290 dp->mc_count = cnt + 1;
3293 3291 }
3294 3292
3295 3293 if (dp->mc_count_req != dp->mc_count) {
3296 3294 /* multicast address list overflow */
3297 3295 dp->rxmode |= RXMODE_MULTI_OVF;
3298 3296 } else {
3299 3297 dp->rxmode &= ~RXMODE_MULTI_OVF;
3300 3298 }
3301 3299
3302 3300 /* tell new multicast list to the hardware */
3303 3301 err = gem_mac_set_rx_filter(dp);
3304 3302
3305 3303 mutex_exit(&dp->intrlock);
3306 3304
3307 3305 return (err);
3308 3306 }
3309 3307
3310 3308 static int
3311 3309 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3312 3310 {
3313 3311 size_t len;
3314 3312 int i;
3315 3313 int cnt;
3316 3314 int err;
3317 3315
3318 3316 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3319 3317
3320 3318 mutex_enter(&dp->intrlock);
3321 3319 if (dp->mac_suspended) {
3322 3320 mutex_exit(&dp->intrlock);
3323 3321 return (GEM_FAILURE);
3324 3322 }
3325 3323
3326 3324 dp->mc_count_req--;
3327 3325 cnt = dp->mc_count;
3328 3326 for (i = 0; i < cnt; i++) {
3329 3327 if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3330 3328 continue;
3331 3329 }
3332 3330 /* shrink the mclist by copying forward */
3333 3331 len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3334 3332 if (len > 0) {
3335 3333 bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3336 3334 }
3337 3335 dp->mc_count--;
3338 3336 break;
3339 3337 }
3340 3338
3341 3339 if (dp->mc_count_req != dp->mc_count) {
3342 3340 /* multicast address list overflow */
3343 3341 dp->rxmode |= RXMODE_MULTI_OVF;
3344 3342 } else {
3345 3343 dp->rxmode &= ~RXMODE_MULTI_OVF;
3346 3344 }
3347 3345 /* In gem v2, don't hold xmitlock on calling set_rx_filter */
3348 3346 err = gem_mac_set_rx_filter(dp);
3349 3347
3350 3348 mutex_exit(&dp->intrlock);
3351 3349
3352 3350 return (err);
3353 3351 }
3354 3352
3355 3353 /* ============================================================== */
3356 3354 /*
3357 3355 * ND interface
3358 3356 */
3359 3357 /* ============================================================== */
3360 3358 enum {
3361 3359 PARAM_AUTONEG_CAP,
3362 3360 PARAM_PAUSE_CAP,
3363 3361 PARAM_ASYM_PAUSE_CAP,
3364 3362 PARAM_1000FDX_CAP,
3365 3363 PARAM_1000HDX_CAP,
3366 3364 PARAM_100T4_CAP,
3367 3365 PARAM_100FDX_CAP,
3368 3366 PARAM_100HDX_CAP,
3369 3367 PARAM_10FDX_CAP,
3370 3368 PARAM_10HDX_CAP,
3371 3369
3372 3370 PARAM_ADV_AUTONEG_CAP,
3373 3371 PARAM_ADV_PAUSE_CAP,
3374 3372 PARAM_ADV_ASYM_PAUSE_CAP,
3375 3373 PARAM_ADV_1000FDX_CAP,
3376 3374 PARAM_ADV_1000HDX_CAP,
3377 3375 PARAM_ADV_100T4_CAP,
3378 3376 PARAM_ADV_100FDX_CAP,
3379 3377 PARAM_ADV_100HDX_CAP,
3380 3378 PARAM_ADV_10FDX_CAP,
3381 3379 PARAM_ADV_10HDX_CAP,
3382 3380
3383 3381 PARAM_LP_AUTONEG_CAP,
3384 3382 PARAM_LP_PAUSE_CAP,
3385 3383 PARAM_LP_ASYM_PAUSE_CAP,
3386 3384 PARAM_LP_1000FDX_CAP,
3387 3385 PARAM_LP_1000HDX_CAP,
3388 3386 PARAM_LP_100T4_CAP,
3389 3387 PARAM_LP_100FDX_CAP,
3390 3388 PARAM_LP_100HDX_CAP,
3391 3389 PARAM_LP_10FDX_CAP,
3392 3390 PARAM_LP_10HDX_CAP,
3393 3391
3394 3392 PARAM_LINK_STATUS,
3395 3393 PARAM_LINK_SPEED,
3396 3394 PARAM_LINK_DUPLEX,
3397 3395
3398 3396 PARAM_LINK_AUTONEG,
3399 3397 PARAM_LINK_RX_PAUSE,
3400 3398 PARAM_LINK_TX_PAUSE,
3401 3399
3402 3400 PARAM_LOOP_MODE,
3403 3401 PARAM_MSI_CNT,
3404 3402
3405 3403 #ifdef DEBUG_RESUME
3406 3404 PARAM_RESUME_TEST,
3407 3405 #endif
3408 3406 PARAM_COUNT
3409 3407 };
3410 3408
3411 3409 enum ioc_reply {
3412 3410 IOC_INVAL = -1, /* bad, NAK with EINVAL */
3413 3411 IOC_DONE, /* OK, reply sent */
3414 3412 IOC_ACK, /* OK, just send ACK */
3415 3413 IOC_REPLY, /* OK, just send reply */
3416 3414 IOC_RESTART_ACK, /* OK, restart & ACK */
3417 3415 IOC_RESTART_REPLY /* OK, restart & reply */
3418 3416 };
3419 3417
3420 3418 struct gem_nd_arg {
3421 3419 struct gem_dev *dp;
3422 3420 int item;
3423 3421 };
3424 3422
3425 3423 static int
3426 3424 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3427 3425 {
3428 3426 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3429 3427 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3430 3428 long val;
3431 3429
3432 3430 DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3433 3431 dp->name, __func__, item));
3434 3432
3435 3433 switch (item) {
3436 3434 case PARAM_AUTONEG_CAP:
3437 3435 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3438 3436 DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3439 3437 break;
3440 3438
3441 3439 case PARAM_PAUSE_CAP:
3442 3440 val = BOOLEAN(dp->gc.gc_flow_control & 1);
3443 3441 break;
3444 3442
3445 3443 case PARAM_ASYM_PAUSE_CAP:
3446 3444 val = BOOLEAN(dp->gc.gc_flow_control & 2);
3447 3445 break;
3448 3446
3449 3447 case PARAM_1000FDX_CAP:
3450 3448 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3451 3449 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3452 3450 break;
3453 3451
3454 3452 case PARAM_1000HDX_CAP:
3455 3453 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3456 3454 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3457 3455 break;
3458 3456
3459 3457 case PARAM_100T4_CAP:
3460 3458 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3461 3459 break;
3462 3460
3463 3461 case PARAM_100FDX_CAP:
3464 3462 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3465 3463 break;
3466 3464
3467 3465 case PARAM_100HDX_CAP:
3468 3466 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3469 3467 break;
3470 3468
3471 3469 case PARAM_10FDX_CAP:
3472 3470 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3473 3471 break;
3474 3472
3475 3473 case PARAM_10HDX_CAP:
3476 3474 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3477 3475 break;
3478 3476
3479 3477 case PARAM_ADV_AUTONEG_CAP:
3480 3478 val = dp->anadv_autoneg;
3481 3479 break;
3482 3480
3483 3481 case PARAM_ADV_PAUSE_CAP:
3484 3482 val = BOOLEAN(dp->anadv_flow_control & 1);
3485 3483 break;
3486 3484
3487 3485 case PARAM_ADV_ASYM_PAUSE_CAP:
3488 3486 val = BOOLEAN(dp->anadv_flow_control & 2);
3489 3487 break;
3490 3488
3491 3489 case PARAM_ADV_1000FDX_CAP:
3492 3490 val = dp->anadv_1000fdx;
3493 3491 break;
3494 3492
3495 3493 case PARAM_ADV_1000HDX_CAP:
3496 3494 val = dp->anadv_1000hdx;
3497 3495 break;
3498 3496
3499 3497 case PARAM_ADV_100T4_CAP:
3500 3498 val = dp->anadv_100t4;
3501 3499 break;
3502 3500
3503 3501 case PARAM_ADV_100FDX_CAP:
3504 3502 val = dp->anadv_100fdx;
3505 3503 break;
3506 3504
3507 3505 case PARAM_ADV_100HDX_CAP:
3508 3506 val = dp->anadv_100hdx;
3509 3507 break;
3510 3508
3511 3509 case PARAM_ADV_10FDX_CAP:
3512 3510 val = dp->anadv_10fdx;
3513 3511 break;
3514 3512
3515 3513 case PARAM_ADV_10HDX_CAP:
3516 3514 val = dp->anadv_10hdx;
3517 3515 break;
3518 3516
3519 3517 case PARAM_LP_AUTONEG_CAP:
3520 3518 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3521 3519 break;
3522 3520
3523 3521 case PARAM_LP_PAUSE_CAP:
3524 3522 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3525 3523 break;
3526 3524
3527 3525 case PARAM_LP_ASYM_PAUSE_CAP:
3528 3526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3529 3527 break;
3530 3528
3531 3529 case PARAM_LP_1000FDX_CAP:
3532 3530 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3533 3531 break;
3534 3532
3535 3533 case PARAM_LP_1000HDX_CAP:
3536 3534 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3537 3535 break;
3538 3536
3539 3537 case PARAM_LP_100T4_CAP:
3540 3538 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3541 3539 break;
3542 3540
3543 3541 case PARAM_LP_100FDX_CAP:
3544 3542 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3545 3543 break;
3546 3544
3547 3545 case PARAM_LP_100HDX_CAP:
3548 3546 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3549 3547 break;
3550 3548
3551 3549 case PARAM_LP_10FDX_CAP:
3552 3550 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3553 3551 break;
3554 3552
3555 3553 case PARAM_LP_10HDX_CAP:
3556 3554 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3557 3555 break;
3558 3556
3559 3557 case PARAM_LINK_STATUS:
3560 3558 val = (dp->mii_state == MII_STATE_LINKUP);
3561 3559 break;
3562 3560
3563 3561 case PARAM_LINK_SPEED:
3564 3562 val = gem_speed_value[dp->speed];
3565 3563 break;
3566 3564
3567 3565 case PARAM_LINK_DUPLEX:
3568 3566 val = 0;
3569 3567 if (dp->mii_state == MII_STATE_LINKUP) {
3570 3568 val = dp->full_duplex ? 2 : 1;
3571 3569 }
3572 3570 break;
3573 3571
3574 3572 case PARAM_LINK_AUTONEG:
3575 3573 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3576 3574 break;
3577 3575
3578 3576 case PARAM_LINK_RX_PAUSE:
3579 3577 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3580 3578 (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3581 3579 break;
3582 3580
3583 3581 case PARAM_LINK_TX_PAUSE:
3584 3582 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3585 3583 (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3586 3584 break;
3587 3585
3588 3586 #ifdef DEBUG_RESUME
3589 3587 case PARAM_RESUME_TEST:
3590 3588 val = 0;
3591 3589 break;
3592 3590 #endif
3593 3591 default:
3594 3592 cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3595 3593 dp->name, item);
3596 3594 break;
3597 3595 }
3598 3596
3599 3597 (void) mi_mpprintf(mp, "%ld", val);
3600 3598
3601 3599 return (0);
3602 3600 }
3603 3601
3604 3602 static int
3605 3603 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3606 3604 {
3607 3605 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3608 3606 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3609 3607 long val;
3610 3608 char *end;
3611 3609
3612 3610 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3613 3611 if (ddi_strtol(value, &end, 10, &val)) {
3614 3612 return (EINVAL);
3615 3613 }
3616 3614 if (end == value) {
3617 3615 return (EINVAL);
3618 3616 }
3619 3617
3620 3618 switch (item) {
3621 3619 case PARAM_ADV_AUTONEG_CAP:
3622 3620 if (val != 0 && val != 1) {
3623 3621 goto err;
3624 3622 }
3625 3623 if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3626 3624 goto err;
3627 3625 }
3628 3626 dp->anadv_autoneg = (int)val;
3629 3627 break;
3630 3628
3631 3629 case PARAM_ADV_PAUSE_CAP:
3632 3630 if (val != 0 && val != 1) {
3633 3631 goto err;
3634 3632 }
3635 3633 if (val) {
3636 3634 dp->anadv_flow_control |= 1;
3637 3635 } else {
3638 3636 dp->anadv_flow_control &= ~1;
3639 3637 }
3640 3638 break;
3641 3639
3642 3640 case PARAM_ADV_ASYM_PAUSE_CAP:
3643 3641 if (val != 0 && val != 1) {
3644 3642 goto err;
3645 3643 }
3646 3644 if (val) {
3647 3645 dp->anadv_flow_control |= 2;
3648 3646 } else {
3649 3647 dp->anadv_flow_control &= ~2;
3650 3648 }
3651 3649 break;
3652 3650
3653 3651 case PARAM_ADV_1000FDX_CAP:
3654 3652 if (val != 0 && val != 1) {
3655 3653 goto err;
3656 3654 }
3657 3655 if (val && (dp->mii_xstatus &
3658 3656 (MII_XSTATUS_1000BASET_FD |
3659 3657 MII_XSTATUS_1000BASEX_FD)) == 0) {
3660 3658 goto err;
3661 3659 }
3662 3660 dp->anadv_1000fdx = (int)val;
3663 3661 break;
3664 3662
3665 3663 case PARAM_ADV_1000HDX_CAP:
3666 3664 if (val != 0 && val != 1) {
3667 3665 goto err;
3668 3666 }
3669 3667 if (val && (dp->mii_xstatus &
3670 3668 (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3671 3669 goto err;
3672 3670 }
3673 3671 dp->anadv_1000hdx = (int)val;
3674 3672 break;
3675 3673
3676 3674 case PARAM_ADV_100T4_CAP:
3677 3675 if (val != 0 && val != 1) {
3678 3676 goto err;
3679 3677 }
3680 3678 if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3681 3679 goto err;
3682 3680 }
3683 3681 dp->anadv_100t4 = (int)val;
3684 3682 break;
3685 3683
3686 3684 case PARAM_ADV_100FDX_CAP:
3687 3685 if (val != 0 && val != 1) {
3688 3686 goto err;
3689 3687 }
3690 3688 if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3691 3689 goto err;
3692 3690 }
3693 3691 dp->anadv_100fdx = (int)val;
3694 3692 break;
3695 3693
3696 3694 case PARAM_ADV_100HDX_CAP:
3697 3695 if (val != 0 && val != 1) {
3698 3696 goto err;
3699 3697 }
3700 3698 if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3701 3699 goto err;
3702 3700 }
3703 3701 dp->anadv_100hdx = (int)val;
3704 3702 break;
3705 3703
3706 3704 case PARAM_ADV_10FDX_CAP:
3707 3705 if (val != 0 && val != 1) {
3708 3706 goto err;
3709 3707 }
3710 3708 if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3711 3709 goto err;
3712 3710 }
3713 3711 dp->anadv_10fdx = (int)val;
3714 3712 break;
3715 3713
3716 3714 case PARAM_ADV_10HDX_CAP:
3717 3715 if (val != 0 && val != 1) {
3718 3716 goto err;
3719 3717 }
3720 3718 if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3721 3719 goto err;
3722 3720 }
3723 3721 dp->anadv_10hdx = (int)val;
3724 3722 break;
3725 3723 }
3726 3724
3727 3725 /* sync with PHY */
3728 3726 gem_choose_forcedmode(dp);
3729 3727
3730 3728 dp->mii_state = MII_STATE_UNKNOWN;
3731 3729 if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3732 3730 /* XXX - Can we ignore the return code ? */
3733 3731 (void) gem_mii_link_check(dp);
3734 3732 }
3735 3733
3736 3734 return (0);
3737 3735 err:
3738 3736 return (EINVAL);
3739 3737 }
3740 3738
3741 3739 static void
3742 3740 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3743 3741 {
3744 3742 struct gem_nd_arg *arg;
3745 3743
3746 3744 ASSERT(item >= 0);
3747 3745 ASSERT(item < PARAM_COUNT);
3748 3746
3749 3747 arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3750 3748 arg->dp = dp;
3751 3749 arg->item = item;
3752 3750
3753 3751 DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3754 3752 dp->name, __func__, name, item));
3755 3753 (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3756 3754 }
3757 3755
3758 3756 static void
3759 3757 gem_nd_setup(struct gem_dev *dp)
3760 3758 {
3761 3759 DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3762 3760 dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3763 3761
3764 3762 ASSERT(dp->nd_arg_p == NULL);
3765 3763
3766 3764 dp->nd_arg_p =
3767 3765 kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3768 3766
3769 3767 #define SETFUNC(x) ((x) ? gem_param_set : NULL)
3770 3768
3771 3769 gem_nd_load(dp, "autoneg_cap",
3772 3770 gem_param_get, NULL, PARAM_AUTONEG_CAP);
3773 3771 gem_nd_load(dp, "pause_cap",
3774 3772 gem_param_get, NULL, PARAM_PAUSE_CAP);
3775 3773 gem_nd_load(dp, "asym_pause_cap",
3776 3774 gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3777 3775 gem_nd_load(dp, "1000fdx_cap",
3778 3776 gem_param_get, NULL, PARAM_1000FDX_CAP);
3779 3777 gem_nd_load(dp, "1000hdx_cap",
3780 3778 gem_param_get, NULL, PARAM_1000HDX_CAP);
3781 3779 gem_nd_load(dp, "100T4_cap",
3782 3780 gem_param_get, NULL, PARAM_100T4_CAP);
3783 3781 gem_nd_load(dp, "100fdx_cap",
3784 3782 gem_param_get, NULL, PARAM_100FDX_CAP);
3785 3783 gem_nd_load(dp, "100hdx_cap",
3786 3784 gem_param_get, NULL, PARAM_100HDX_CAP);
3787 3785 gem_nd_load(dp, "10fdx_cap",
3788 3786 gem_param_get, NULL, PARAM_10FDX_CAP);
3789 3787 gem_nd_load(dp, "10hdx_cap",
3790 3788 gem_param_get, NULL, PARAM_10HDX_CAP);
3791 3789
3792 3790 /* Our advertised capabilities */
3793 3791 gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3794 3792 SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3795 3793 PARAM_ADV_AUTONEG_CAP);
3796 3794 gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3797 3795 SETFUNC(dp->gc.gc_flow_control & 1),
3798 3796 PARAM_ADV_PAUSE_CAP);
3799 3797 gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3800 3798 SETFUNC(dp->gc.gc_flow_control & 2),
3801 3799 PARAM_ADV_ASYM_PAUSE_CAP);
3802 3800 gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3803 3801 SETFUNC(dp->mii_xstatus &
3804 3802 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3805 3803 PARAM_ADV_1000FDX_CAP);
3806 3804 gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3807 3805 SETFUNC(dp->mii_xstatus &
3808 3806 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3809 3807 PARAM_ADV_1000HDX_CAP);
3810 3808 gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3811 3809 SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3812 3810 !dp->mii_advert_ro),
3813 3811 PARAM_ADV_100T4_CAP);
3814 3812 gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3815 3813 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3816 3814 !dp->mii_advert_ro),
3817 3815 PARAM_ADV_100FDX_CAP);
3818 3816 gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3819 3817 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3820 3818 !dp->mii_advert_ro),
3821 3819 PARAM_ADV_100HDX_CAP);
3822 3820 gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3823 3821 SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3824 3822 !dp->mii_advert_ro),
3825 3823 PARAM_ADV_10FDX_CAP);
3826 3824 gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3827 3825 SETFUNC((dp->mii_status & MII_STATUS_10) &&
3828 3826 !dp->mii_advert_ro),
3829 3827 PARAM_ADV_10HDX_CAP);
3830 3828
3831 3829 /* Partner's advertised capabilities */
3832 3830 gem_nd_load(dp, "lp_autoneg_cap",
3833 3831 gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3834 3832 gem_nd_load(dp, "lp_pause_cap",
3835 3833 gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3836 3834 gem_nd_load(dp, "lp_asym_pause_cap",
3837 3835 gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3838 3836 gem_nd_load(dp, "lp_1000fdx_cap",
3839 3837 gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3840 3838 gem_nd_load(dp, "lp_1000hdx_cap",
3841 3839 gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3842 3840 gem_nd_load(dp, "lp_100T4_cap",
3843 3841 gem_param_get, NULL, PARAM_LP_100T4_CAP);
3844 3842 gem_nd_load(dp, "lp_100fdx_cap",
3845 3843 gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3846 3844 gem_nd_load(dp, "lp_100hdx_cap",
3847 3845 gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3848 3846 gem_nd_load(dp, "lp_10fdx_cap",
3849 3847 gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3850 3848 gem_nd_load(dp, "lp_10hdx_cap",
3851 3849 gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3852 3850
3853 3851 /* Current operating modes */
3854 3852 gem_nd_load(dp, "link_status",
3855 3853 gem_param_get, NULL, PARAM_LINK_STATUS);
3856 3854 gem_nd_load(dp, "link_speed",
3857 3855 gem_param_get, NULL, PARAM_LINK_SPEED);
3858 3856 gem_nd_load(dp, "link_duplex",
3859 3857 gem_param_get, NULL, PARAM_LINK_DUPLEX);
3860 3858 gem_nd_load(dp, "link_autoneg",
3861 3859 gem_param_get, NULL, PARAM_LINK_AUTONEG);
3862 3860 gem_nd_load(dp, "link_rx_pause",
3863 3861 gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3864 3862 gem_nd_load(dp, "link_tx_pause",
3865 3863 gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3866 3864 #ifdef DEBUG_RESUME
3867 3865 gem_nd_load(dp, "resume_test",
3868 3866 gem_param_get, NULL, PARAM_RESUME_TEST);
3869 3867 #endif
3870 3868 #undef SETFUNC
3871 3869 }
3872 3870
3873 3871 static
3874 3872 enum ioc_reply
3875 3873 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3876 3874 {
3877 3875 boolean_t ok;
3878 3876
3879 3877 ASSERT(mutex_owned(&dp->intrlock));
3880 3878
3881 3879 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3882 3880
3883 3881 switch (iocp->ioc_cmd) {
3884 3882 case ND_GET:
3885 3883 ok = nd_getset(wq, dp->nd_data_p, mp);
3886 3884 DPRINTF(0, (CE_CONT,
3887 3885 "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3888 3886 return (ok ? IOC_REPLY : IOC_INVAL);
3889 3887
3890 3888 case ND_SET:
3891 3889 ok = nd_getset(wq, dp->nd_data_p, mp);
3892 3890
3893 3891 DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3894 3892 dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3895 3893
3896 3894 if (!ok) {
3897 3895 return (IOC_INVAL);
3898 3896 }
3899 3897
3900 3898 if (iocp->ioc_error) {
3901 3899 return (IOC_REPLY);
3902 3900 }
3903 3901
3904 3902 return (IOC_RESTART_REPLY);
3905 3903 }
3906 3904
3907 3905 cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3908 3906
3909 3907 return (IOC_INVAL);
3910 3908 }
3911 3909
3912 3910 static void
3913 3911 gem_nd_cleanup(struct gem_dev *dp)
3914 3912 {
3915 3913 ASSERT(dp->nd_data_p != NULL);
3916 3914 ASSERT(dp->nd_arg_p != NULL);
3917 3915
3918 3916 nd_free(&dp->nd_data_p);
3919 3917
3920 3918 kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3921 3919 dp->nd_arg_p = NULL;
3922 3920 }
3923 3921
3924 3922 static void
3925 3923 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3926 3924 {
3927 3925 struct iocblk *iocp;
3928 3926 enum ioc_reply status;
3929 3927 int cmd;
3930 3928
3931 3929 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3932 3930
3933 3931 /*
3934 3932 * Validate the command before bothering with the mutex ...
3935 3933 */
3936 3934 iocp = (void *)mp->b_rptr;
3937 3935 iocp->ioc_error = 0;
3938 3936 cmd = iocp->ioc_cmd;
3939 3937
3940 3938 DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3941 3939
3942 3940 mutex_enter(&dp->intrlock);
3943 3941 mutex_enter(&dp->xmitlock);
3944 3942
3945 3943 switch (cmd) {
3946 3944 default:
3947 3945 _NOTE(NOTREACHED)
3948 3946 status = IOC_INVAL;
3949 3947 break;
3950 3948
3951 3949 case ND_GET:
3952 3950 case ND_SET:
3953 3951 status = gem_nd_ioctl(dp, wq, mp, iocp);
3954 3952 break;
3955 3953 }
3956 3954
3957 3955 mutex_exit(&dp->xmitlock);
3958 3956 mutex_exit(&dp->intrlock);
3959 3957
3960 3958 #ifdef DEBUG_RESUME
3961 3959 if (cmd == ND_GET) {
3962 3960 gem_suspend(dp->dip);
3963 3961 gem_resume(dp->dip);
3964 3962 }
3965 3963 #endif
3966 3964 /*
3967 3965 * Finally, decide how to reply
3968 3966 */
3969 3967 switch (status) {
3970 3968 default:
3971 3969 case IOC_INVAL:
3972 3970 /*
3973 3971 * Error, reply with a NAK and EINVAL or the specified error
3974 3972 */
3975 3973 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3976 3974 EINVAL : iocp->ioc_error);
3977 3975 break;
3978 3976
3979 3977 case IOC_DONE:
3980 3978 /*
3981 3979 * OK, reply already sent
3982 3980 */
3983 3981 break;
3984 3982
3985 3983 case IOC_RESTART_ACK:
3986 3984 case IOC_ACK:
3987 3985 /*
3988 3986 * OK, reply with an ACK
3989 3987 */
3990 3988 miocack(wq, mp, 0, 0);
3991 3989 break;
3992 3990
3993 3991 case IOC_RESTART_REPLY:
3994 3992 case IOC_REPLY:
3995 3993 /*
3996 3994 * OK, send prepared reply as ACK or NAK
3997 3995 */
3998 3996 mp->b_datap->db_type =
3999 3997 iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
4000 3998 qreply(wq, mp);
4001 3999 break;
4002 4000 }
4003 4001 }
4004 4002
4005 4003 #ifndef SYS_MAC_H
4006 4004 #define XCVR_UNDEFINED 0
4007 4005 #define XCVR_NONE 1
4008 4006 #define XCVR_10 2
4009 4007 #define XCVR_100T4 3
4010 4008 #define XCVR_100X 4
4011 4009 #define XCVR_100T2 5
4012 4010 #define XCVR_1000X 6
4013 4011 #define XCVR_1000T 7
4014 4012 #endif
4015 4013 static int
4016 4014 gem_mac_xcvr_inuse(struct gem_dev *dp)
4017 4015 {
4018 4016 int val = XCVR_UNDEFINED;
4019 4017
4020 4018 if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4021 4019 if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4022 4020 val = XCVR_100T4;
4023 4021 } else if (dp->mii_status &
4024 4022 (MII_STATUS_100_BASEX_FD |
4025 4023 MII_STATUS_100_BASEX)) {
4026 4024 val = XCVR_100X;
4027 4025 } else if (dp->mii_status &
4028 4026 (MII_STATUS_100_BASE_T2_FD |
4029 4027 MII_STATUS_100_BASE_T2)) {
4030 4028 val = XCVR_100T2;
4031 4029 } else if (dp->mii_status &
4032 4030 (MII_STATUS_10_FD | MII_STATUS_10)) {
4033 4031 val = XCVR_10;
4034 4032 }
4035 4033 } else if (dp->mii_xstatus &
4036 4034 (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4037 4035 val = XCVR_1000T;
4038 4036 } else if (dp->mii_xstatus &
4039 4037 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4040 4038 val = XCVR_1000X;
4041 4039 }
4042 4040
4043 4041 return (val);
4044 4042 }
4045 4043
4046 4044 /* ============================================================== */
4047 4045 /*
4048 4046 * GLDv3 interface
4049 4047 */
4050 4048 /* ============================================================== */
4051 4049 static int gem_m_getstat(void *, uint_t, uint64_t *);
4052 4050 static int gem_m_start(void *);
4053 4051 static void gem_m_stop(void *);
4054 4052 static int gem_m_setpromisc(void *, boolean_t);
4055 4053 static int gem_m_multicst(void *, boolean_t, const uint8_t *);
4056 4054 static int gem_m_unicst(void *, const uint8_t *);
4057 4055 static mblk_t *gem_m_tx(void *, mblk_t *);
4058 4056 static void gem_m_ioctl(void *, queue_t *, mblk_t *);
4059 4057 static boolean_t gem_m_getcapab(void *, mac_capab_t, void *);
4060 4058
4061 4059 #define GEM_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
4062 4060
4063 4061 static mac_callbacks_t gem_m_callbacks = {
4064 4062 GEM_M_CALLBACK_FLAGS,
4065 4063 gem_m_getstat,
4066 4064 gem_m_start,
4067 4065 gem_m_stop,
4068 4066 gem_m_setpromisc,
4069 4067 gem_m_multicst,
4070 4068 gem_m_unicst,
4071 4069 gem_m_tx,
4072 4070 NULL,
4073 4071 gem_m_ioctl,
4074 4072 gem_m_getcapab,
4075 4073 };
4076 4074
4077 4075 static int
4078 4076 gem_m_start(void *arg)
4079 4077 {
4080 4078 int err = 0;
4081 4079 struct gem_dev *dp = arg;
4082 4080
4083 4081 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4084 4082
4085 4083 mutex_enter(&dp->intrlock);
4086 4084 if (dp->mac_suspended) {
4087 4085 err = EIO;
4088 4086 goto x;
4089 4087 }
4090 4088 if (gem_mac_init(dp) != GEM_SUCCESS) {
4091 4089 err = EIO;
4092 4090 goto x;
4093 4091 }
4094 4092 dp->nic_state = NIC_STATE_INITIALIZED;
4095 4093
4096 4094 /* reset rx filter state */
4097 4095 dp->mc_count = 0;
4098 4096 dp->mc_count_req = 0;
4099 4097
4100 4098 /* setup media mode if the link have been up */
4101 4099 if (dp->mii_state == MII_STATE_LINKUP) {
4102 4100 (dp->gc.gc_set_media)(dp);
4103 4101 }
4104 4102
4105 4103 /* setup initial rx filter */
4106 4104 bcopy(dp->dev_addr.ether_addr_octet,
4107 4105 dp->cur_addr.ether_addr_octet, ETHERADDRL);
4108 4106 dp->rxmode |= RXMODE_ENABLE;
4109 4107
4110 4108 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4111 4109 err = EIO;
4112 4110 goto x;
4113 4111 }
4114 4112
4115 4113 dp->nic_state = NIC_STATE_ONLINE;
4116 4114 if (dp->mii_state == MII_STATE_LINKUP) {
4117 4115 if (gem_mac_start(dp) != GEM_SUCCESS) {
4118 4116 err = EIO;
4119 4117 goto x;
4120 4118 }
4121 4119 }
4122 4120
4123 4121 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4124 4122 (void *)dp, dp->gc.gc_tx_timeout_interval);
4125 4123 mutex_exit(&dp->intrlock);
4126 4124
4127 4125 return (0);
4128 4126 x:
4129 4127 dp->nic_state = NIC_STATE_STOPPED;
4130 4128 mutex_exit(&dp->intrlock);
4131 4129 return (err);
4132 4130 }
4133 4131
4134 4132 static void
4135 4133 gem_m_stop(void *arg)
4136 4134 {
4137 4135 struct gem_dev *dp = arg;
4138 4136
4139 4137 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4140 4138
4141 4139 /* stop rx */
4142 4140 mutex_enter(&dp->intrlock);
4143 4141 if (dp->mac_suspended) {
4144 4142 mutex_exit(&dp->intrlock);
4145 4143 return;
4146 4144 }
4147 4145 dp->rxmode &= ~RXMODE_ENABLE;
4148 4146 (void) gem_mac_set_rx_filter(dp);
4149 4147 mutex_exit(&dp->intrlock);
4150 4148
4151 4149 /* stop tx timeout watcher */
4152 4150 if (dp->timeout_id) {
4153 4151 while (untimeout(dp->timeout_id) == -1)
4154 4152 ;
4155 4153 dp->timeout_id = 0;
4156 4154 }
4157 4155
4158 4156 /* make the nic state inactive */
4159 4157 mutex_enter(&dp->intrlock);
4160 4158 if (dp->mac_suspended) {
4161 4159 mutex_exit(&dp->intrlock);
4162 4160 return;
4163 4161 }
4164 4162 dp->nic_state = NIC_STATE_STOPPED;
4165 4163
4166 4164 /* we need deassert mac_active due to block interrupt handler */
4167 4165 mutex_enter(&dp->xmitlock);
4168 4166 dp->mac_active = B_FALSE;
4169 4167 mutex_exit(&dp->xmitlock);
4170 4168
4171 4169 /* block interrupts */
4172 4170 while (dp->intr_busy) {
4173 4171 cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4174 4172 }
4175 4173 (void) gem_mac_stop(dp, 0);
4176 4174 mutex_exit(&dp->intrlock);
4177 4175 }
4178 4176
4179 4177 static int
4180 4178 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4181 4179 {
4182 4180 int err;
4183 4181 int ret;
4184 4182 struct gem_dev *dp = arg;
4185 4183
4186 4184 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4187 4185
4188 4186 if (add) {
4189 4187 ret = gem_add_multicast(dp, ep);
4190 4188 } else {
4191 4189 ret = gem_remove_multicast(dp, ep);
4192 4190 }
4193 4191
4194 4192 err = 0;
4195 4193 if (ret != GEM_SUCCESS) {
4196 4194 err = EIO;
4197 4195 }
4198 4196
4199 4197 return (err);
4200 4198 }
4201 4199
4202 4200 static int
4203 4201 gem_m_setpromisc(void *arg, boolean_t on)
4204 4202 {
4205 4203 int err = 0; /* no error */
4206 4204 struct gem_dev *dp = arg;
4207 4205
4208 4206 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4209 4207
4210 4208 mutex_enter(&dp->intrlock);
4211 4209 if (dp->mac_suspended) {
4212 4210 mutex_exit(&dp->intrlock);
4213 4211 return (EIO);
4214 4212 }
4215 4213 if (on) {
4216 4214 dp->rxmode |= RXMODE_PROMISC;
4217 4215 } else {
4218 4216 dp->rxmode &= ~RXMODE_PROMISC;
4219 4217 }
4220 4218
4221 4219 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4222 4220 err = EIO;
4223 4221 }
4224 4222 mutex_exit(&dp->intrlock);
4225 4223
4226 4224 return (err);
4227 4225 }
4228 4226
4229 4227 int
4230 4228 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4231 4229 {
4232 4230 struct gem_dev *dp = arg;
4233 4231 struct gem_stats *gstp = &dp->stats;
4234 4232 uint64_t val = 0;
4235 4233
4236 4234 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4237 4235
4238 4236 if (mutex_owned(&dp->intrlock)) {
4239 4237 if (dp->mac_suspended) {
4240 4238 return (EIO);
4241 4239 }
4242 4240 } else {
4243 4241 mutex_enter(&dp->intrlock);
4244 4242 if (dp->mac_suspended) {
4245 4243 mutex_exit(&dp->intrlock);
4246 4244 return (EIO);
4247 4245 }
4248 4246 mutex_exit(&dp->intrlock);
4249 4247 }
4250 4248
4251 4249 if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4252 4250 return (EIO);
4253 4251 }
4254 4252
4255 4253 switch (stat) {
4256 4254 case MAC_STAT_IFSPEED:
4257 4255 val = gem_speed_value[dp->speed] *1000000ull;
4258 4256 break;
4259 4257
4260 4258 case MAC_STAT_MULTIRCV:
4261 4259 val = gstp->rmcast;
4262 4260 break;
4263 4261
4264 4262 case MAC_STAT_BRDCSTRCV:
4265 4263 val = gstp->rbcast;
4266 4264 break;
4267 4265
4268 4266 case MAC_STAT_MULTIXMT:
4269 4267 val = gstp->omcast;
4270 4268 break;
4271 4269
4272 4270 case MAC_STAT_BRDCSTXMT:
4273 4271 val = gstp->obcast;
4274 4272 break;
4275 4273
4276 4274 case MAC_STAT_NORCVBUF:
4277 4275 val = gstp->norcvbuf + gstp->missed;
4278 4276 break;
4279 4277
4280 4278 case MAC_STAT_IERRORS:
4281 4279 val = gstp->errrcv;
4282 4280 break;
4283 4281
4284 4282 case MAC_STAT_NOXMTBUF:
4285 4283 val = gstp->noxmtbuf;
4286 4284 break;
4287 4285
4288 4286 case MAC_STAT_OERRORS:
4289 4287 val = gstp->errxmt;
4290 4288 break;
4291 4289
4292 4290 case MAC_STAT_COLLISIONS:
4293 4291 val = gstp->collisions;
4294 4292 break;
4295 4293
4296 4294 case MAC_STAT_RBYTES:
4297 4295 val = gstp->rbytes;
4298 4296 break;
4299 4297
4300 4298 case MAC_STAT_IPACKETS:
4301 4299 val = gstp->rpackets;
4302 4300 break;
4303 4301
4304 4302 case MAC_STAT_OBYTES:
4305 4303 val = gstp->obytes;
4306 4304 break;
4307 4305
4308 4306 case MAC_STAT_OPACKETS:
4309 4307 val = gstp->opackets;
4310 4308 break;
4311 4309
4312 4310 case MAC_STAT_UNDERFLOWS:
4313 4311 val = gstp->underflow;
4314 4312 break;
4315 4313
4316 4314 case MAC_STAT_OVERFLOWS:
4317 4315 val = gstp->overflow;
4318 4316 break;
4319 4317
4320 4318 case ETHER_STAT_ALIGN_ERRORS:
4321 4319 val = gstp->frame;
4322 4320 break;
4323 4321
4324 4322 case ETHER_STAT_FCS_ERRORS:
4325 4323 val = gstp->crc;
4326 4324 break;
4327 4325
4328 4326 case ETHER_STAT_FIRST_COLLISIONS:
4329 4327 val = gstp->first_coll;
4330 4328 break;
4331 4329
4332 4330 case ETHER_STAT_MULTI_COLLISIONS:
4333 4331 val = gstp->multi_coll;
4334 4332 break;
4335 4333
4336 4334 case ETHER_STAT_SQE_ERRORS:
4337 4335 val = gstp->sqe;
4338 4336 break;
4339 4337
4340 4338 case ETHER_STAT_DEFER_XMTS:
4341 4339 val = gstp->defer;
4342 4340 break;
4343 4341
4344 4342 case ETHER_STAT_TX_LATE_COLLISIONS:
4345 4343 val = gstp->xmtlatecoll;
4346 4344 break;
4347 4345
4348 4346 case ETHER_STAT_EX_COLLISIONS:
4349 4347 val = gstp->excoll;
4350 4348 break;
4351 4349
4352 4350 case ETHER_STAT_MACXMT_ERRORS:
4353 4351 val = gstp->xmit_internal_err;
4354 4352 break;
4355 4353
4356 4354 case ETHER_STAT_CARRIER_ERRORS:
4357 4355 val = gstp->nocarrier;
4358 4356 break;
4359 4357
4360 4358 case ETHER_STAT_TOOLONG_ERRORS:
4361 4359 val = gstp->frame_too_long;
4362 4360 break;
4363 4361
4364 4362 case ETHER_STAT_MACRCV_ERRORS:
4365 4363 val = gstp->rcv_internal_err;
4366 4364 break;
4367 4365
4368 4366 case ETHER_STAT_XCVR_ADDR:
4369 4367 val = dp->mii_phy_addr;
4370 4368 break;
4371 4369
4372 4370 case ETHER_STAT_XCVR_ID:
4373 4371 val = dp->mii_phy_id;
4374 4372 break;
4375 4373
4376 4374 case ETHER_STAT_XCVR_INUSE:
4377 4375 val = gem_mac_xcvr_inuse(dp);
4378 4376 break;
4379 4377
4380 4378 case ETHER_STAT_CAP_1000FDX:
4381 4379 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4382 4380 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4383 4381 break;
4384 4382
4385 4383 case ETHER_STAT_CAP_1000HDX:
4386 4384 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4387 4385 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4388 4386 break;
4389 4387
4390 4388 case ETHER_STAT_CAP_100FDX:
4391 4389 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4392 4390 break;
4393 4391
4394 4392 case ETHER_STAT_CAP_100HDX:
4395 4393 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4396 4394 break;
4397 4395
4398 4396 case ETHER_STAT_CAP_10FDX:
4399 4397 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4400 4398 break;
4401 4399
4402 4400 case ETHER_STAT_CAP_10HDX:
4403 4401 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4404 4402 break;
4405 4403
4406 4404 case ETHER_STAT_CAP_ASMPAUSE:
4407 4405 val = BOOLEAN(dp->gc.gc_flow_control & 2);
4408 4406 break;
4409 4407
4410 4408 case ETHER_STAT_CAP_PAUSE:
4411 4409 val = BOOLEAN(dp->gc.gc_flow_control & 1);
4412 4410 break;
4413 4411
4414 4412 case ETHER_STAT_CAP_AUTONEG:
4415 4413 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4416 4414 break;
4417 4415
4418 4416 case ETHER_STAT_ADV_CAP_1000FDX:
4419 4417 val = dp->anadv_1000fdx;
4420 4418 break;
4421 4419
4422 4420 case ETHER_STAT_ADV_CAP_1000HDX:
4423 4421 val = dp->anadv_1000hdx;
4424 4422 break;
4425 4423
4426 4424 case ETHER_STAT_ADV_CAP_100FDX:
4427 4425 val = dp->anadv_100fdx;
4428 4426 break;
4429 4427
4430 4428 case ETHER_STAT_ADV_CAP_100HDX:
4431 4429 val = dp->anadv_100hdx;
4432 4430 break;
4433 4431
4434 4432 case ETHER_STAT_ADV_CAP_10FDX:
4435 4433 val = dp->anadv_10fdx;
4436 4434 break;
4437 4435
4438 4436 case ETHER_STAT_ADV_CAP_10HDX:
4439 4437 val = dp->anadv_10hdx;
4440 4438 break;
4441 4439
4442 4440 case ETHER_STAT_ADV_CAP_ASMPAUSE:
4443 4441 val = BOOLEAN(dp->anadv_flow_control & 2);
4444 4442 break;
4445 4443
4446 4444 case ETHER_STAT_ADV_CAP_PAUSE:
4447 4445 val = BOOLEAN(dp->anadv_flow_control & 1);
4448 4446 break;
4449 4447
4450 4448 case ETHER_STAT_ADV_CAP_AUTONEG:
4451 4449 val = dp->anadv_autoneg;
4452 4450 break;
4453 4451
4454 4452 case ETHER_STAT_LP_CAP_1000FDX:
4455 4453 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4456 4454 break;
4457 4455
4458 4456 case ETHER_STAT_LP_CAP_1000HDX:
4459 4457 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4460 4458 break;
4461 4459
4462 4460 case ETHER_STAT_LP_CAP_100FDX:
4463 4461 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4464 4462 break;
4465 4463
4466 4464 case ETHER_STAT_LP_CAP_100HDX:
4467 4465 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4468 4466 break;
4469 4467
4470 4468 case ETHER_STAT_LP_CAP_10FDX:
4471 4469 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4472 4470 break;
4473 4471
4474 4472 case ETHER_STAT_LP_CAP_10HDX:
4475 4473 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4476 4474 break;
4477 4475
4478 4476 case ETHER_STAT_LP_CAP_ASMPAUSE:
4479 4477 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4480 4478 break;
4481 4479
4482 4480 case ETHER_STAT_LP_CAP_PAUSE:
4483 4481 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4484 4482 break;
4485 4483
4486 4484 case ETHER_STAT_LP_CAP_AUTONEG:
4487 4485 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4488 4486 break;
4489 4487
4490 4488 case ETHER_STAT_LINK_ASMPAUSE:
4491 4489 val = BOOLEAN(dp->flow_control & 2);
4492 4490 break;
4493 4491
4494 4492 case ETHER_STAT_LINK_PAUSE:
4495 4493 val = BOOLEAN(dp->flow_control & 1);
4496 4494 break;
4497 4495
4498 4496 case ETHER_STAT_LINK_AUTONEG:
4499 4497 val = dp->anadv_autoneg &&
4500 4498 BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4501 4499 break;
4502 4500
4503 4501 case ETHER_STAT_LINK_DUPLEX:
4504 4502 val = (dp->mii_state == MII_STATE_LINKUP) ?
4505 4503 (dp->full_duplex ? 2 : 1) : 0;
4506 4504 break;
4507 4505
4508 4506 case ETHER_STAT_TOOSHORT_ERRORS:
4509 4507 val = gstp->runt;
4510 4508 break;
4511 4509 case ETHER_STAT_LP_REMFAULT:
4512 4510 val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4513 4511 break;
4514 4512
4515 4513 case ETHER_STAT_JABBER_ERRORS:
4516 4514 val = gstp->jabber;
4517 4515 break;
4518 4516
4519 4517 case ETHER_STAT_CAP_100T4:
4520 4518 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4521 4519 break;
4522 4520
4523 4521 case ETHER_STAT_ADV_CAP_100T4:
4524 4522 val = dp->anadv_100t4;
4525 4523 break;
4526 4524
4527 4525 case ETHER_STAT_LP_CAP_100T4:
4528 4526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4529 4527 break;
4530 4528
4531 4529 default:
4532 4530 #if GEM_DEBUG_LEVEL > 2
4533 4531 cmn_err(CE_WARN,
4534 4532 "%s: unrecognized parameter value = %d",
4535 4533 __func__, stat);
4536 4534 #endif
4537 4535 return (ENOTSUP);
4538 4536 }
4539 4537
4540 4538 *valp = val;
4541 4539
4542 4540 return (0);
4543 4541 }
4544 4542
4545 4543 static int
4546 4544 gem_m_unicst(void *arg, const uint8_t *mac)
4547 4545 {
4548 4546 int err = 0;
4549 4547 struct gem_dev *dp = arg;
4550 4548
4551 4549 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4552 4550
4553 4551 mutex_enter(&dp->intrlock);
4554 4552 if (dp->mac_suspended) {
4555 4553 mutex_exit(&dp->intrlock);
4556 4554 return (EIO);
4557 4555 }
4558 4556 bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4559 4557 dp->rxmode |= RXMODE_ENABLE;
4560 4558
4561 4559 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4562 4560 err = EIO;
4563 4561 }
4564 4562 mutex_exit(&dp->intrlock);
4565 4563
4566 4564 return (err);
4567 4565 }
4568 4566
4569 4567 /*
4570 4568 * gem_m_tx is used only for sending data packets into ethernet wire.
4571 4569 */
4572 4570 static mblk_t *
4573 4571 gem_m_tx(void *arg, mblk_t *mp)
4574 4572 {
4575 4573 uint32_t flags = 0;
4576 4574 struct gem_dev *dp = arg;
4577 4575 mblk_t *tp;
4578 4576
4579 4577 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4580 4578
4581 4579 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4582 4580 if (dp->mii_state != MII_STATE_LINKUP) {
4583 4581 /* Some nics hate to send packets when the link is down. */
4584 4582 while (mp) {
4585 4583 tp = mp->b_next;
4586 4584 mp->b_next = NULL;
4587 4585 freemsg(mp);
4588 4586 mp = tp;
4589 4587 }
4590 4588 return (NULL);
4591 4589 }
4592 4590
4593 4591 return (gem_send_common(dp, mp, flags));
4594 4592 }
4595 4593
4596 4594 static void
4597 4595 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4598 4596 {
4599 4597 DPRINTF(0, (CE_CONT, "!%s: %s: called",
4600 4598 ((struct gem_dev *)arg)->name, __func__));
4601 4599
4602 4600 gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4603 4601 }
4604 4602
4605 4603 /* ARGSUSED */
4606 4604 static boolean_t
4607 4605 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4608 4606 {
4609 4607 return (B_FALSE);
4610 4608 }
4611 4609
4612 4610 static void
4613 4611 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4614 4612 {
4615 4613 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4616 4614 macp->m_driver = dp;
4617 4615 macp->m_dip = dp->dip;
4618 4616 macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4619 4617 macp->m_callbacks = &gem_m_callbacks;
4620 4618 macp->m_min_sdu = 0;
4621 4619 macp->m_max_sdu = dp->mtu;
4622 4620
4623 4621 if (dp->misc_flag & GEM_VLAN) {
4624 4622 macp->m_margin = VTAG_SIZE;
4625 4623 }
4626 4624 }
4627 4625
4628 4626 /* ======================================================================== */
4629 4627 /*
4630 4628 * attach/detatch support
4631 4629 */
4632 4630 /* ======================================================================== */
4633 4631 static void
4634 4632 gem_read_conf(struct gem_dev *dp)
4635 4633 {
4636 4634 int val;
4637 4635
4638 4636 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4639 4637
4640 4638 /*
4641 4639 * Get media mode infomation from .conf file
4642 4640 */
4643 4641 dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4644 4642 dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4645 4643 dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4646 4644 dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4647 4645 dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4648 4646 dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4649 4647 dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4650 4648 dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4651 4649
4652 4650 if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4653 4651 DDI_PROP_DONTPASS, "full-duplex"))) {
4654 4652 dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4655 4653 dp->anadv_autoneg = B_FALSE;
4656 4654 if (dp->full_duplex) {
4657 4655 dp->anadv_1000hdx = B_FALSE;
4658 4656 dp->anadv_100hdx = B_FALSE;
4659 4657 dp->anadv_10hdx = B_FALSE;
4660 4658 } else {
4661 4659 dp->anadv_1000fdx = B_FALSE;
4662 4660 dp->anadv_100fdx = B_FALSE;
4663 4661 dp->anadv_10fdx = B_FALSE;
4664 4662 }
4665 4663 }
4666 4664
4667 4665 if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4668 4666 dp->anadv_autoneg = B_FALSE;
4669 4667 switch (val) {
4670 4668 case 1000:
4671 4669 dp->speed = GEM_SPD_1000;
4672 4670 dp->anadv_100t4 = B_FALSE;
4673 4671 dp->anadv_100fdx = B_FALSE;
4674 4672 dp->anadv_100hdx = B_FALSE;
4675 4673 dp->anadv_10fdx = B_FALSE;
4676 4674 dp->anadv_10hdx = B_FALSE;
4677 4675 break;
4678 4676 case 100:
4679 4677 dp->speed = GEM_SPD_100;
4680 4678 dp->anadv_1000fdx = B_FALSE;
4681 4679 dp->anadv_1000hdx = B_FALSE;
4682 4680 dp->anadv_10fdx = B_FALSE;
4683 4681 dp->anadv_10hdx = B_FALSE;
4684 4682 break;
4685 4683 case 10:
4686 4684 dp->speed = GEM_SPD_10;
4687 4685 dp->anadv_1000fdx = B_FALSE;
4688 4686 dp->anadv_1000hdx = B_FALSE;
4689 4687 dp->anadv_100t4 = B_FALSE;
4690 4688 dp->anadv_100fdx = B_FALSE;
4691 4689 dp->anadv_100hdx = B_FALSE;
4692 4690 break;
4693 4691 default:
4694 4692 cmn_err(CE_WARN,
4695 4693 "!%s: property %s: illegal value:%d",
4696 4694 dp->name, "speed", val);
4697 4695 dp->anadv_autoneg = B_TRUE;
4698 4696 break;
4699 4697 }
4700 4698 }
4701 4699
4702 4700 val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4703 4701 if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4704 4702 cmn_err(CE_WARN,
4705 4703 "!%s: property %s: illegal value:%d",
4706 4704 dp->name, "flow-control", val);
4707 4705 } else {
4708 4706 val = min(val, dp->gc.gc_flow_control);
4709 4707 }
4710 4708 dp->anadv_flow_control = val;
4711 4709
4712 4710 if (gem_prop_get_int(dp, "nointr", 0)) {
4713 4711 dp->misc_flag |= GEM_NOINTR;
4714 4712 cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4715 4713 }
4716 4714
4717 4715 dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4718 4716 dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4719 4717 dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4720 4718 dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4721 4719 dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4722 4720 }
4723 4721
4724 4722
4725 4723 /*
4726 4724 * Gem kstat support
4727 4725 */
4728 4726
4729 4727 #define GEM_LOCAL_DATA_SIZE(gc) \
4730 4728 (sizeof (struct gem_dev) + \
4731 4729 sizeof (struct mcast_addr) * GEM_MAXMC + \
4732 4730 sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4733 4731 sizeof (void *) * ((gc)->gc_tx_buf_size))
4734 4732
4735 4733 struct gem_dev *
4736 4734 gem_do_attach(dev_info_t *dip, int port,
4737 4735 struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4738 4736 void *lp, int lmsize)
4739 4737 {
4740 4738 struct gem_dev *dp;
4741 4739 int i;
4742 4740 ddi_iblock_cookie_t c;
4743 4741 mac_register_t *macp = NULL;
4744 4742 int ret;
4745 4743 int unit;
4746 4744 int nports;
4747 4745
4748 4746 unit = ddi_get_instance(dip);
4749 4747 if ((nports = gc->gc_nports) == 0) {
4750 4748 nports = 1;
4751 4749 }
4752 4750 if (nports == 1) {
4753 4751 ddi_set_driver_private(dip, NULL);
4754 4752 }
4755 4753
4756 4754 DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4757 4755 unit));
4758 4756
4759 4757 /*
4760 4758 * Allocate soft data structure
4761 4759 */
4762 4760 dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4763 4761
4764 4762 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4765 4763 cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4766 4764 unit, __func__);
4767 4765 return (NULL);
4768 4766 }
4769 4767 /* ddi_set_driver_private(dip, dp); */
4770 4768
4771 4769 /* link to private area */
4772 4770 dp->private = lp;
4773 4771 dp->priv_size = lmsize;
4774 4772 dp->mc_list = (struct mcast_addr *)&dp[1];
4775 4773
4776 4774 dp->dip = dip;
4777 4775 (void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4778 4776
4779 4777 /*
4780 4778 * Get iblock cookie
4781 4779 */
4782 4780 if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4783 4781 cmn_err(CE_CONT,
4784 4782 "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4785 4783 dp->name);
4786 4784 goto err_free_private;
4787 4785 }
4788 4786 dp->iblock_cookie = c;
4789 4787
4790 4788 /*
4791 4789 * Initialize mutex's for this device.
4792 4790 */
4793 4791 mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4794 4792 mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4795 4793 cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4796 4794
4797 4795 /*
4798 4796 * configure gem parameter
4799 4797 */
4800 4798 dp->base_addr = base;
4801 4799 dp->regs_handle = *regs_handlep;
4802 4800 dp->gc = *gc;
4803 4801 gc = &dp->gc;
4804 4802 /* patch for simplify dma resource management */
4805 4803 gc->gc_tx_max_frags = 1;
4806 4804 gc->gc_tx_max_descs_per_pkt = 1;
4807 4805 gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4808 4806 gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4809 4807 gc->gc_tx_desc_write_oo = B_TRUE;
4810 4808
4811 4809 gc->gc_nports = nports; /* fix nports */
4812 4810
4813 4811 /* fix copy threadsholds */
4814 4812 gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4815 4813 gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4816 4814
4817 4815 /* fix rx buffer boundary for iocache line size */
4818 4816 ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4819 4817 ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4820 4818 gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4821 4819 gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4822 4820
4823 4821 /* fix descriptor boundary for cache line size */
4824 4822 gc->gc_dma_attr_desc.dma_attr_align =
4825 4823 max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4826 4824
4827 4825 /* patch get_packet method */
4828 4826 if (gc->gc_get_packet == NULL) {
4829 4827 gc->gc_get_packet = &gem_get_packet_default;
4830 4828 }
4831 4829
4832 4830 /* patch get_rx_start method */
4833 4831 if (gc->gc_rx_start == NULL) {
4834 4832 gc->gc_rx_start = &gem_rx_start_default;
4835 4833 }
4836 4834
4837 4835 /* calculate descriptor area */
4838 4836 if (gc->gc_rx_desc_unit_shift >= 0) {
4839 4837 dp->rx_desc_size =
4840 4838 ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4841 4839 gc->gc_dma_attr_desc.dma_attr_align);
4842 4840 }
4843 4841 if (gc->gc_tx_desc_unit_shift >= 0) {
4844 4842 dp->tx_desc_size =
4845 4843 ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4846 4844 gc->gc_dma_attr_desc.dma_attr_align);
4847 4845 }
4848 4846
4849 4847 dp->mtu = ETHERMTU;
4850 4848 dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4851 4849 /* link tx buffers */
4852 4850 for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4853 4851 dp->tx_buf[i].txb_next =
4854 4852 &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4855 4853 }
4856 4854
4857 4855 dp->rxmode = 0;
4858 4856 dp->speed = GEM_SPD_10; /* default is 10Mbps */
4859 4857 dp->full_duplex = B_FALSE; /* default is half */
4860 4858 dp->flow_control = FLOW_CONTROL_NONE;
4861 4859 dp->poll_pkt_delay = 8; /* typical coalease for rx packets */
4862 4860
4863 4861 /* performance tuning parameters */
4864 4862 dp->txthr = ETHERMAX; /* tx fifo threshold */
4865 4863 dp->txmaxdma = 16*4; /* tx max dma burst size */
4866 4864 dp->rxthr = 128; /* rx fifo threshold */
4867 4865 dp->rxmaxdma = 16*4; /* rx max dma burst size */
4868 4866
4869 4867 /*
4870 4868 * Get media mode information from .conf file
4871 4869 */
4872 4870 gem_read_conf(dp);
4873 4871
4874 4872 /* rx_buf_len is required buffer length without padding for alignment */
4875 4873 dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4876 4874
4877 4875 /*
4878 4876 * Reset the chip
4879 4877 */
4880 4878 mutex_enter(&dp->intrlock);
4881 4879 dp->nic_state = NIC_STATE_STOPPED;
4882 4880 ret = (*dp->gc.gc_reset_chip)(dp);
4883 4881 mutex_exit(&dp->intrlock);
4884 4882 if (ret != GEM_SUCCESS) {
4885 4883 goto err_free_regs;
4886 4884 }
4887 4885
4888 4886 /*
4889 4887 * HW dependant paremeter initialization
4890 4888 */
4891 4889 mutex_enter(&dp->intrlock);
4892 4890 ret = (*dp->gc.gc_attach_chip)(dp);
4893 4891 mutex_exit(&dp->intrlock);
4894 4892 if (ret != GEM_SUCCESS) {
4895 4893 goto err_free_regs;
4896 4894 }
4897 4895
4898 4896 #ifdef DEBUG_MULTIFRAGS
4899 4897 dp->gc.gc_tx_copy_thresh = dp->mtu;
4900 4898 #endif
4901 4899 /* allocate tx and rx resources */
4902 4900 if (gem_alloc_memory(dp)) {
4903 4901 goto err_free_regs;
4904 4902 }
4905 4903
4906 4904 DPRINTF(0, (CE_CONT,
4907 4905 "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4908 4906 dp->name, (long)dp->base_addr,
4909 4907 dp->dev_addr.ether_addr_octet[0],
4910 4908 dp->dev_addr.ether_addr_octet[1],
4911 4909 dp->dev_addr.ether_addr_octet[2],
4912 4910 dp->dev_addr.ether_addr_octet[3],
4913 4911 dp->dev_addr.ether_addr_octet[4],
4914 4912 dp->dev_addr.ether_addr_octet[5]));
4915 4913
4916 4914 /* copy mac address */
4917 4915 dp->cur_addr = dp->dev_addr;
4918 4916
4919 4917 gem_gld3_init(dp, macp);
4920 4918
4921 4919 /* Probe MII phy (scan phy) */
4922 4920 dp->mii_lpable = 0;
4923 4921 dp->mii_advert = 0;
4924 4922 dp->mii_exp = 0;
4925 4923 dp->mii_ctl1000 = 0;
4926 4924 dp->mii_stat1000 = 0;
4927 4925 if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4928 4926 goto err_free_ring;
4929 4927 }
4930 4928
4931 4929 /* mask unsupported abilities */
4932 4930 dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4933 4931 dp->anadv_1000fdx &=
4934 4932 BOOLEAN(dp->mii_xstatus &
4935 4933 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4936 4934 dp->anadv_1000hdx &=
4937 4935 BOOLEAN(dp->mii_xstatus &
4938 4936 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4939 4937 dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4940 4938 dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4941 4939 dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4942 4940 dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4943 4941 dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4944 4942
4945 4943 gem_choose_forcedmode(dp);
4946 4944
4947 4945 /* initialize MII phy if required */
4948 4946 if (dp->gc.gc_mii_init) {
4949 4947 if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4950 4948 goto err_free_ring;
4951 4949 }
4952 4950 }
4953 4951
4954 4952 /*
4955 4953 * initialize kstats including mii statistics
4956 4954 */
4957 4955 gem_nd_setup(dp);
4958 4956
4959 4957 /*
4960 4958 * Add interrupt to system.
4961 4959 */
4962 4960 if (ret = mac_register(macp, &dp->mh)) {
4963 4961 cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4964 4962 dp->name, ret);
4965 4963 goto err_release_stats;
4966 4964 }
4967 4965 mac_free(macp);
4968 4966 macp = NULL;
4969 4967
4970 4968 if (dp->misc_flag & GEM_SOFTINTR) {
4971 4969 if (ddi_add_softintr(dip,
4972 4970 DDI_SOFTINT_LOW, &dp->soft_id,
4973 4971 NULL, NULL,
4974 4972 (uint_t (*)(caddr_t))gem_intr,
4975 4973 (caddr_t)dp) != DDI_SUCCESS) {
4976 4974 cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4977 4975 dp->name);
4978 4976 goto err_unregister;
4979 4977 }
4980 4978 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4981 4979 if (ddi_add_intr(dip, 0, NULL, NULL,
4982 4980 (uint_t (*)(caddr_t))gem_intr,
4983 4981 (caddr_t)dp) != DDI_SUCCESS) {
4984 4982 cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4985 4983 goto err_unregister;
4986 4984 }
4987 4985 } else {
4988 4986 /*
4989 4987 * Dont use interrupt.
4990 4988 * schedule first call of gem_intr_watcher
4991 4989 */
4992 4990 dp->intr_watcher_id =
4993 4991 timeout((void (*)(void *))gem_intr_watcher,
4994 4992 (void *)dp, drv_usectohz(3*1000000));
4995 4993 }
4996 4994
4997 4995 /* link this device to dev_info */
4998 4996 dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4999 4997 dp->port = port;
5000 4998 ddi_set_driver_private(dip, (caddr_t)dp);
5001 4999
5002 5000 /* reset mii phy and start mii link watcher */
5003 5001 gem_mii_start(dp);
5004 5002
5005 5003 DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5006 5004 return (dp);
5007 5005
5008 5006 err_unregister:
5009 5007 (void) mac_unregister(dp->mh);
5010 5008 err_release_stats:
5011 5009 /* release NDD resources */
5012 5010 gem_nd_cleanup(dp);
5013 5011
5014 5012 err_free_ring:
5015 5013 gem_free_memory(dp);
5016 5014 err_free_regs:
5017 5015 ddi_regs_map_free(&dp->regs_handle);
5018 5016 err_free_locks:
5019 5017 mutex_destroy(&dp->xmitlock);
5020 5018 mutex_destroy(&dp->intrlock);
5021 5019 cv_destroy(&dp->tx_drain_cv);
5022 5020 err_free_private:
5023 5021 if (macp) {
5024 5022 mac_free(macp);
5025 5023 }
5026 5024 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5027 5025
5028 5026 return (NULL);
5029 5027 }
5030 5028
5031 5029 int
5032 5030 gem_do_detach(dev_info_t *dip)
5033 5031 {
5034 5032 struct gem_dev *dp;
5035 5033 struct gem_dev *tmp;
5036 5034 caddr_t private;
5037 5035 int priv_size;
5038 5036 ddi_acc_handle_t rh;
5039 5037
5040 5038 dp = GEM_GET_DEV(dip);
5041 5039 if (dp == NULL) {
5042 5040 return (DDI_SUCCESS);
5043 5041 }
5044 5042
5045 5043 rh = dp->regs_handle;
5046 5044 private = dp->private;
5047 5045 priv_size = dp->priv_size;
5048 5046
5049 5047 while (dp) {
5050 5048 /* unregister with gld v3 */
5051 5049 if (mac_unregister(dp->mh) != 0) {
5052 5050 return (DDI_FAILURE);
5053 5051 }
5054 5052
5055 5053 /* ensure any rx buffers are not used */
5056 5054 if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5057 5055 /* resource is busy */
5058 5056 cmn_err(CE_PANIC,
5059 5057 "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5060 5058 dp->name, __func__,
5061 5059 dp->rx_buf_allocated, dp->rx_buf_freecnt);
5062 5060 /* NOT REACHED */
5063 5061 }
5064 5062
5065 5063 /* stop mii link watcher */
5066 5064 gem_mii_stop(dp);
5067 5065
5068 5066 /* unregister interrupt handler */
5069 5067 if (dp->misc_flag & GEM_SOFTINTR) {
5070 5068 ddi_remove_softintr(dp->soft_id);
5071 5069 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5072 5070 ddi_remove_intr(dip, 0, dp->iblock_cookie);
5073 5071 } else {
5074 5072 /* stop interrupt watcher */
5075 5073 if (dp->intr_watcher_id) {
5076 5074 while (untimeout(dp->intr_watcher_id) == -1)
5077 5075 ;
5078 5076 dp->intr_watcher_id = 0;
5079 5077 }
5080 5078 }
5081 5079
5082 5080 /* release NDD resources */
5083 5081 gem_nd_cleanup(dp);
5084 5082 /* release buffers, descriptors and dma resources */
5085 5083 gem_free_memory(dp);
5086 5084
5087 5085 /* release locks and condition variables */
5088 5086 mutex_destroy(&dp->xmitlock);
5089 5087 mutex_destroy(&dp->intrlock);
5090 5088 cv_destroy(&dp->tx_drain_cv);
5091 5089
5092 5090 /* release basic memory resources */
5093 5091 tmp = dp->next;
5094 5092 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5095 5093 dp = tmp;
5096 5094 }
5097 5095
5098 5096 /* release common private memory for the nic */
5099 5097 kmem_free(private, priv_size);
5100 5098
5101 5099 /* release register mapping resources */
5102 5100 ddi_regs_map_free(&rh);
5103 5101
5104 5102 DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5105 5103 ddi_driver_name(dip), ddi_get_instance(dip)));
5106 5104
5107 5105 return (DDI_SUCCESS);
5108 5106 }
5109 5107
5110 5108 int
5111 5109 gem_suspend(dev_info_t *dip)
5112 5110 {
5113 5111 struct gem_dev *dp;
5114 5112
5115 5113 /*
5116 5114 * stop the device
5117 5115 */
5118 5116 dp = GEM_GET_DEV(dip);
5119 5117 ASSERT(dp);
5120 5118
5121 5119 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5122 5120
5123 5121 for (; dp; dp = dp->next) {
5124 5122
5125 5123 /* stop mii link watcher */
5126 5124 gem_mii_stop(dp);
5127 5125
5128 5126 /* stop interrupt watcher for no-intr mode */
5129 5127 if (dp->misc_flag & GEM_NOINTR) {
5130 5128 if (dp->intr_watcher_id) {
5131 5129 while (untimeout(dp->intr_watcher_id) == -1)
5132 5130 ;
5133 5131 }
5134 5132 dp->intr_watcher_id = 0;
5135 5133 }
5136 5134
5137 5135 /* stop tx timeout watcher */
5138 5136 if (dp->timeout_id) {
5139 5137 while (untimeout(dp->timeout_id) == -1)
5140 5138 ;
5141 5139 dp->timeout_id = 0;
5142 5140 }
5143 5141
5144 5142 /* make the nic state inactive */
5145 5143 mutex_enter(&dp->intrlock);
5146 5144 (void) gem_mac_stop(dp, 0);
5147 5145 ASSERT(!dp->mac_active);
5148 5146
5149 5147 /* no further register access */
5150 5148 dp->mac_suspended = B_TRUE;
5151 5149 mutex_exit(&dp->intrlock);
5152 5150 }
5153 5151
5154 5152 /* XXX - power down the nic */
5155 5153
5156 5154 return (DDI_SUCCESS);
5157 5155 }
5158 5156
5159 5157 int
5160 5158 gem_resume(dev_info_t *dip)
5161 5159 {
5162 5160 struct gem_dev *dp;
5163 5161
5164 5162 /*
5165 5163 * restart the device
5166 5164 */
5167 5165 dp = GEM_GET_DEV(dip);
5168 5166 ASSERT(dp);
5169 5167
5170 5168 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5171 5169
5172 5170 for (; dp; dp = dp->next) {
5173 5171
5174 5172 /*
5175 5173 * Bring up the nic after power up
5176 5174 */
5177 5175
5178 5176 /* gem_xxx.c layer to setup power management state. */
5179 5177 ASSERT(!dp->mac_active);
5180 5178
5181 5179 /* reset the chip, because we are just after power up. */
5182 5180 mutex_enter(&dp->intrlock);
5183 5181
5184 5182 dp->mac_suspended = B_FALSE;
5185 5183 dp->nic_state = NIC_STATE_STOPPED;
5186 5184
5187 5185 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5188 5186 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5189 5187 dp->name, __func__);
5190 5188 mutex_exit(&dp->intrlock);
5191 5189 goto err;
5192 5190 }
5193 5191 mutex_exit(&dp->intrlock);
5194 5192
5195 5193 /* initialize mii phy because we are just after power up */
5196 5194 if (dp->gc.gc_mii_init) {
5197 5195 (void) (*dp->gc.gc_mii_init)(dp);
5198 5196 }
5199 5197
5200 5198 if (dp->misc_flag & GEM_NOINTR) {
5201 5199 /*
5202 5200 * schedule first call of gem_intr_watcher
5203 5201 * instead of interrupts.
5204 5202 */
5205 5203 dp->intr_watcher_id =
5206 5204 timeout((void (*)(void *))gem_intr_watcher,
5207 5205 (void *)dp, drv_usectohz(3*1000000));
5208 5206 }
5209 5207
5210 5208 /* restart mii link watcher */
5211 5209 gem_mii_start(dp);
5212 5210
5213 5211 /* restart mac */
5214 5212 mutex_enter(&dp->intrlock);
5215 5213
5216 5214 if (gem_mac_init(dp) != GEM_SUCCESS) {
5217 5215 mutex_exit(&dp->intrlock);
5218 5216 goto err_reset;
5219 5217 }
5220 5218 dp->nic_state = NIC_STATE_INITIALIZED;
5221 5219
5222 5220 /* setup media mode if the link have been up */
5223 5221 if (dp->mii_state == MII_STATE_LINKUP) {
5224 5222 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5225 5223 mutex_exit(&dp->intrlock);
5226 5224 goto err_reset;
5227 5225 }
5228 5226 }
5229 5227
5230 5228 /* enable mac address and rx filter */
5231 5229 dp->rxmode |= RXMODE_ENABLE;
5232 5230 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5233 5231 mutex_exit(&dp->intrlock);
5234 5232 goto err_reset;
5235 5233 }
5236 5234 dp->nic_state = NIC_STATE_ONLINE;
5237 5235
5238 5236 /* restart tx timeout watcher */
5239 5237 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5240 5238 (void *)dp,
5241 5239 dp->gc.gc_tx_timeout_interval);
5242 5240
5243 5241 /* now the nic is fully functional */
5244 5242 if (dp->mii_state == MII_STATE_LINKUP) {
5245 5243 if (gem_mac_start(dp) != GEM_SUCCESS) {
5246 5244 mutex_exit(&dp->intrlock);
5247 5245 goto err_reset;
5248 5246 }
5249 5247 }
5250 5248 mutex_exit(&dp->intrlock);
5251 5249 }
5252 5250
5253 5251 return (DDI_SUCCESS);
5254 5252
5255 5253 err_reset:
5256 5254 if (dp->intr_watcher_id) {
5257 5255 while (untimeout(dp->intr_watcher_id) == -1)
5258 5256 ;
5259 5257 dp->intr_watcher_id = 0;
5260 5258 }
5261 5259 mutex_enter(&dp->intrlock);
5262 5260 (*dp->gc.gc_reset_chip)(dp);
5263 5261 dp->nic_state = NIC_STATE_STOPPED;
5264 5262 mutex_exit(&dp->intrlock);
5265 5263
5266 5264 err:
5267 5265 return (DDI_FAILURE);
5268 5266 }
5269 5267
5270 5268 /*
5271 5269 * misc routines for PCI
5272 5270 */
5273 5271 uint8_t
5274 5272 gem_search_pci_cap(dev_info_t *dip,
5275 5273 ddi_acc_handle_t conf_handle, uint8_t target)
5276 5274 {
5277 5275 uint8_t pci_cap_ptr;
5278 5276 uint32_t pci_cap;
5279 5277
5280 5278 /* search power management capablities */
5281 5279 pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5282 5280 while (pci_cap_ptr) {
5283 5281 /* read pci capability header */
5284 5282 pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5285 5283 if ((pci_cap & 0xff) == target) {
5286 5284 /* found */
5287 5285 break;
5288 5286 }
5289 5287 /* get next_ptr */
5290 5288 pci_cap_ptr = (pci_cap >> 8) & 0xff;
5291 5289 }
5292 5290 return (pci_cap_ptr);
5293 5291 }
5294 5292
5295 5293 int
5296 5294 gem_pci_set_power_state(dev_info_t *dip,
5297 5295 ddi_acc_handle_t conf_handle, uint_t new_mode)
5298 5296 {
5299 5297 uint8_t pci_cap_ptr;
5300 5298 uint32_t pmcsr;
5301 5299 uint_t unit;
5302 5300 const char *drv_name;
5303 5301
5304 5302 ASSERT(new_mode < 4);
5305 5303
5306 5304 unit = ddi_get_instance(dip);
5307 5305 drv_name = ddi_driver_name(dip);
5308 5306
5309 5307 /* search power management capablities */
5310 5308 pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5311 5309
5312 5310 if (pci_cap_ptr == 0) {
5313 5311 cmn_err(CE_CONT,
5314 5312 "!%s%d: doesn't have pci power management capability",
5315 5313 drv_name, unit);
5316 5314 return (DDI_FAILURE);
5317 5315 }
5318 5316
5319 5317 /* read power management capabilities */
5320 5318 pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5321 5319
5322 5320 DPRINTF(0, (CE_CONT,
5323 5321 "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5324 5322 drv_name, unit, pci_cap_ptr, pmcsr));
5325 5323
5326 5324 /*
5327 5325 * Is the resuested power mode supported?
5328 5326 */
5329 5327 /* not yet */
5330 5328
5331 5329 /*
5332 5330 * move to new mode
5333 5331 */
5334 5332 pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5335 5333 pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5336 5334
5337 5335 return (DDI_SUCCESS);
5338 5336 }
5339 5337
5340 5338 /*
5341 5339 * select suitable register for by specified address space or register
5342 5340 * offset in PCI config space
5343 5341 */
5344 5342 int
5345 5343 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5346 5344 struct ddi_device_acc_attr *attrp,
5347 5345 caddr_t *basep, ddi_acc_handle_t *hp)
5348 5346 {
5349 5347 struct pci_phys_spec *regs;
5350 5348 uint_t len;
5351 5349 uint_t unit;
5352 5350 uint_t n;
5353 5351 uint_t i;
5354 5352 int ret;
5355 5353 const char *drv_name;
5356 5354
5357 5355 unit = ddi_get_instance(dip);
5358 5356 drv_name = ddi_driver_name(dip);
5359 5357
5360 5358 /* Search IO-range or memory-range to be mapped */
5361 5359 regs = NULL;
5362 5360 len = 0;
5363 5361
5364 5362 if ((ret = ddi_prop_lookup_int_array(
5365 5363 DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5366 5364 "reg", (void *)®s, &len)) != DDI_PROP_SUCCESS) {
5367 5365 cmn_err(CE_WARN,
5368 5366 "!%s%d: failed to get reg property (ret:%d)",
5369 5367 drv_name, unit, ret);
5370 5368 return (DDI_FAILURE);
5371 5369 }
5372 5370 n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5373 5371
5374 5372 ASSERT(regs != NULL && len > 0);
5375 5373
5376 5374 #if GEM_DEBUG_LEVEL > 0
5377 5375 for (i = 0; i < n; i++) {
5378 5376 cmn_err(CE_CONT,
5379 5377 "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5380 5378 drv_name, unit, i,
5381 5379 regs[i].pci_phys_hi,
5382 5380 regs[i].pci_phys_mid,
5383 5381 regs[i].pci_phys_low,
5384 5382 regs[i].pci_size_hi,
5385 5383 regs[i].pci_size_low);
5386 5384 }
5387 5385 #endif
5388 5386 for (i = 0; i < n; i++) {
5389 5387 if ((regs[i].pci_phys_hi & mask) == which) {
5390 5388 /* it's the requested space */
5391 5389 ddi_prop_free(regs);
5392 5390 goto address_range_found;
5393 5391 }
5394 5392 }
5395 5393 ddi_prop_free(regs);
5396 5394 return (DDI_FAILURE);
5397 5395
5398 5396 address_range_found:
5399 5397 if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5400 5398 != DDI_SUCCESS) {
5401 5399 cmn_err(CE_CONT,
5402 5400 "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5403 5401 drv_name, unit, ret);
5404 5402 }
5405 5403
5406 5404 return (ret);
5407 5405 }
5408 5406
5409 5407 void
5410 5408 gem_mod_init(struct dev_ops *dop, char *name)
5411 5409 {
5412 5410 mac_init_ops(dop, name);
5413 5411 }
5414 5412
5415 5413 void
5416 5414 gem_mod_fini(struct dev_ops *dop)
5417 5415 {
5418 5416 mac_fini_ops(dop);
5419 5417 }
↓ open down ↓ |
5297 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX