Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/dmfe/dmfe_main.c
+++ new/usr/src/uts/common/io/dmfe/dmfe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26
27 27 #include <sys/types.h>
28 28 #include <sys/sunddi.h>
29 29 #include <sys/policy.h>
30 30 #include <sys/sdt.h>
31 31 #include "dmfe_impl.h"
32 32
33 33 /*
34 34 * This is the string displayed by modinfo, etc.
35 35 */
36 36 static char dmfe_ident[] = "Davicom DM9102 Ethernet";
37 37
38 38
39 39 /*
40 40 * NOTES:
41 41 *
42 42 * #defines:
43 43 *
44 44 * DMFE_PCI_RNUMBER is the register-set number to use for the operating
45 45 * registers. On an OBP-based machine, regset 0 refers to CONFIG space,
46 46 * regset 1 will be the operating registers in I/O space, and regset 2
47 47 * will be the operating registers in MEMORY space (preferred). If an
48 48 * expansion ROM is fitted, it may appear as a further register set.
49 49 *
50 50 * DMFE_SLOP defines the amount by which the chip may read beyond
51 51 * the end of a buffer or descriptor, apparently 6-8 dwords :(
52 52 * We have to make sure this doesn't cause it to access unallocated
53 53 * or unmapped memory.
54 54 *
55 55 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
56 56 * rounded up to a multiple of 4. Here we choose a power of two for
57 57 * speed & simplicity at the cost of a bit more memory.
58 58 *
59 59 * However, the buffer length field in the TX/RX descriptors is only
60 60 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
61 61 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
62 62 * (2000) bytes each.
63 63 *
64 64 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
65 65 * the data buffers. The descriptors are always set up in CONSISTENT
66 66 * mode.
67 67 *
68 68 * DMFE_HEADROOM defines how much space we'll leave in allocated
69 69 * mblks before the first valid data byte. This should be chosen
70 70 * to be 2 modulo 4, so that once the ethernet header (14 bytes)
71 71 * has been stripped off, the packet data will be 4-byte aligned.
72 72 * The remaining space can be used by upstream modules to prepend
73 73 * any headers required.
74 74 *
75 75 * Patchable globals:
76 76 *
77 77 * dmfe_bus_modes: the bus mode bits to be put into CSR0.
78 78 * Setting READ_MULTIPLE in this register seems to cause
79 79 * the chip to generate a READ LINE command with a parity
80 80 * error! Don't do it!
81 81 *
82 82 * dmfe_setup_desc1: the value to be put into descriptor word 1
83 83 * when sending a SETUP packet.
84 84 *
85 85 * Setting TX_LAST_DESC in desc1 in a setup packet seems
86 86 * to make the chip spontaneously reset internally - it
87 87 * attempts to give back the setup packet descriptor by
88 88 * writing to PCI address 00000000 - which may or may not
89 89 * get a MASTER ABORT - after which most of its registers
90 90 * seem to have either default values or garbage!
91 91 *
92 92 * TX_FIRST_DESC doesn't seem to have the same effect but
93 93 * it isn't needed on a setup packet so we'll leave it out
94 94 * too, just in case it has some other wierd side-effect.
95 95 *
96 96 * The default hardware packet filtering mode is now
97 97 * HASH_AND_PERFECT (imperfect filtering of multicast
98 98 * packets and perfect filtering of unicast packets).
99 99 * If this is found not to work reliably, setting the
100 100 * TX_FILTER_TYPE1 bit will cause a switchover to using
101 101 * HASH_ONLY mode (imperfect filtering of *all* packets).
102 102 * Software will then perform the additional filtering
103 103 * as required.
104 104 */
105 105
106 106 #define DMFE_PCI_RNUMBER 2
107 107 #define DMFE_SLOP (8*sizeof (uint32_t))
108 108 #define DMFE_BUF_SIZE 2048
109 109 #define DMFE_BUF_SIZE_1 2000
110 110 #define DMFE_DMA_MODE DDI_DMA_STREAMING
111 111 #define DMFE_HEADROOM 34
112 112
113 113 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN;
114 114 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE |
115 115 TX_FILTER_TYPE0;
116 116
117 117 /*
118 118 * Some tunable parameters ...
119 119 * Number of RX/TX ring entries (128/128)
120 120 * Minimum number of TX ring slots to keep free (1)
121 121 * Low-water mark at which to try to reclaim TX ring slots (1)
122 122 * How often to take a TX-done interrupt (twice per ring cycle)
123 123 * Whether to reclaim TX ring entries on a TX-done interrupt (no)
124 124 */
125 125
126 126 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */
127 127 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */
128 128
129 129 static uint32_t dmfe_rx_desc = DMFE_RX_DESC;
130 130 static uint32_t dmfe_tx_desc = DMFE_TX_DESC;
131 131 static uint32_t dmfe_tx_min_free = 1;
132 132 static uint32_t dmfe_tx_reclaim_level = 1;
133 133 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1;
134 134 static boolean_t dmfe_reclaim_on_done = B_FALSE;
135 135
136 136 /*
137 137 * Time-related parameters:
138 138 *
139 139 * We use a cyclic to provide a periodic callback; this is then used
140 140 * to check for TX-stall and poll the link status register.
141 141 *
142 142 * DMFE_TICK is the interval between cyclic callbacks, in microseconds.
143 143 *
144 144 * TX_STALL_TIME_100 is the timeout in microseconds between passing
145 145 * a packet to the chip for transmission and seeing that it's gone,
146 146 * when running at 100Mb/s. If we haven't reclaimed at least one
147 147 * descriptor in this time we assume the transmitter has stalled
148 148 * and reset the chip.
149 149 *
150 150 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
151 151 *
152 152 * Patchable globals:
153 153 *
154 154 * dmfe_tick_us: DMFE_TICK
155 155 * dmfe_tx100_stall_us: TX_STALL_TIME_100
156 156 * dmfe_tx10_stall_us: TX_STALL_TIME_10
157 157 *
158 158 * These are then used in _init() to calculate:
159 159 *
160 160 * stall_100_tix[]: number of consecutive cyclic callbacks without a
161 161 * reclaim before the TX process is considered stalled,
162 162 * when running at 100Mb/s. The elements are indexed
163 163 * by transmit-engine-state.
164 164 * stall_10_tix[]: number of consecutive cyclic callbacks without a
165 165 * reclaim before the TX process is considered stalled,
166 166 * when running at 10Mb/s. The elements are indexed
167 167 * by transmit-engine-state.
168 168 */
169 169
170 170 #define DMFE_TICK 25000 /* microseconds */
171 171 #define TX_STALL_TIME_100 50000 /* microseconds */
172 172 #define TX_STALL_TIME_10 200000 /* microseconds */
173 173
174 174 static uint32_t dmfe_tick_us = DMFE_TICK;
175 175 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100;
176 176 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10;
177 177
178 178 /*
179 179 * Calculated from above in _init()
180 180 */
181 181
182 182 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1];
183 183 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1];
184 184
185 185 /*
186 186 * Property names
187 187 */
188 188 static char localmac_propname[] = "local-mac-address";
189 189 static char opmode_propname[] = "opmode-reg-value";
190 190
191 191 static int dmfe_m_start(void *);
192 192 static void dmfe_m_stop(void *);
193 193 static int dmfe_m_promisc(void *, boolean_t);
194 194 static int dmfe_m_multicst(void *, boolean_t, const uint8_t *);
195 195 static int dmfe_m_unicst(void *, const uint8_t *);
196 196 static void dmfe_m_ioctl(void *, queue_t *, mblk_t *);
197 197 static mblk_t *dmfe_m_tx(void *, mblk_t *);
198 198 static int dmfe_m_stat(void *, uint_t, uint64_t *);
199 199 static int dmfe_m_getprop(void *, const char *, mac_prop_id_t,
200 200 uint_t, void *);
201 201 static int dmfe_m_setprop(void *, const char *, mac_prop_id_t,
202 202 uint_t, const void *);
203 203 static void dmfe_m_propinfo(void *, const char *, mac_prop_id_t,
204 204 mac_prop_info_handle_t);
205 205
206 206 static mac_callbacks_t dmfe_m_callbacks = {
207 207 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
208 208 dmfe_m_stat,
209 209 dmfe_m_start,
210 210 dmfe_m_stop,
211 211 dmfe_m_promisc,
212 212 dmfe_m_multicst,
213 213 dmfe_m_unicst,
214 214 dmfe_m_tx,
215 215 NULL,
216 216 dmfe_m_ioctl,
217 217 NULL, /* getcapab */
218 218 NULL, /* open */
219 219 NULL, /* close */
220 220 dmfe_m_setprop,
221 221 dmfe_m_getprop,
222 222 dmfe_m_propinfo
223 223 };
224 224
225 225
226 226 /*
227 227 * Describes the chip's DMA engine
228 228 */
229 229 static ddi_dma_attr_t dma_attr = {
230 230 DMA_ATTR_V0, /* dma_attr version */
231 231 0, /* dma_attr_addr_lo */
232 232 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
233 233 0x0FFFFFF, /* dma_attr_count_max */
234 234 0x20, /* dma_attr_align */
235 235 0x7F, /* dma_attr_burstsizes */
236 236 1, /* dma_attr_minxfer */
237 237 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
238 238 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
239 239 1, /* dma_attr_sgllen */
240 240 1, /* dma_attr_granular */
241 241 0 /* dma_attr_flags */
242 242 };
243 243
244 244 /*
245 245 * DMA access attributes for registers and descriptors
246 246 */
247 247 static ddi_device_acc_attr_t dmfe_reg_accattr = {
248 248 DDI_DEVICE_ATTR_V0,
249 249 DDI_STRUCTURE_LE_ACC,
250 250 DDI_STRICTORDER_ACC
251 251 };
252 252
253 253 /*
254 254 * DMA access attributes for data: NOT to be byte swapped.
255 255 */
256 256 static ddi_device_acc_attr_t dmfe_data_accattr = {
257 257 DDI_DEVICE_ATTR_V0,
258 258 DDI_NEVERSWAP_ACC,
259 259 DDI_STRICTORDER_ACC
260 260 };
261 261
262 262 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = {
263 263 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
264 264 };
265 265
266 266
267 267 /*
268 268 * ========== Lowest-level chip register & ring access routines ==========
269 269 */
270 270
271 271 /*
272 272 * I/O register get/put routines
273 273 */
274 274 uint32_t
275 275 dmfe_chip_get32(dmfe_t *dmfep, off_t offset)
276 276 {
277 277 uint32_t *addr;
278 278
279 279 addr = (void *)(dmfep->io_reg + offset);
280 280 return (ddi_get32(dmfep->io_handle, addr));
281 281 }
282 282
283 283 void
284 284 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value)
285 285 {
286 286 uint32_t *addr;
287 287
288 288 addr = (void *)(dmfep->io_reg + offset);
289 289 ddi_put32(dmfep->io_handle, addr, value);
290 290 }
291 291
292 292 /*
293 293 * TX/RX ring get/put routines
294 294 */
295 295 static uint32_t
296 296 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset)
297 297 {
298 298 uint32_t *addr;
299 299
300 300 addr = (void *)dma_p->mem_va;
301 301 return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset));
302 302 }
303 303
304 304 static void
305 305 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value)
306 306 {
307 307 uint32_t *addr;
308 308
309 309 addr = (void *)dma_p->mem_va;
310 310 ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value);
311 311 }
312 312
313 313 /*
314 314 * Setup buffer get/put routines
315 315 */
316 316 static uint32_t
317 317 dmfe_setup_get32(dma_area_t *dma_p, uint_t index)
318 318 {
319 319 uint32_t *addr;
320 320
321 321 addr = (void *)dma_p->setup_va;
322 322 return (ddi_get32(dma_p->acc_hdl, addr + index));
323 323 }
324 324
325 325 static void
326 326 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value)
327 327 {
328 328 uint32_t *addr;
329 329
330 330 addr = (void *)dma_p->setup_va;
331 331 ddi_put32(dma_p->acc_hdl, addr + index, value);
332 332 }
333 333
334 334
335 335 /*
336 336 * ========== Low-level chip & ring buffer manipulation ==========
337 337 */
338 338
339 339 /*
340 340 * dmfe_set_opmode() -- function to set operating mode
341 341 */
342 342 static void
343 343 dmfe_set_opmode(dmfe_t *dmfep)
344 344 {
345 345 ASSERT(mutex_owned(dmfep->oplock));
346 346
347 347 dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode);
348 348 drv_usecwait(10);
349 349 }
350 350
351 351 /*
352 352 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
353 353 */
354 354 static void
355 355 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate)
356 356 {
357 357 ASSERT(mutex_owned(dmfep->oplock));
358 358
359 359 /*
360 360 * Stop the chip:
361 361 * disable all interrupts
362 362 * stop TX/RX processes
363 363 * clear the status bits for TX/RX stopped
364 364 * If required, reset the chip
365 365 * Record the new state
366 366 */
367 367 dmfe_chip_put32(dmfep, INT_MASK_REG, 0);
368 368 dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE);
369 369 dmfe_set_opmode(dmfep);
370 370 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
371 371
372 372 switch (newstate) {
373 373 default:
374 374 ASSERT(!"can't get here");
375 375 return;
376 376
377 377 case CHIP_STOPPED:
378 378 case CHIP_ERROR:
379 379 break;
380 380
381 381 case CHIP_RESET:
382 382 dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET);
383 383 drv_usecwait(10);
384 384 dmfe_chip_put32(dmfep, BUS_MODE_REG, 0);
385 385 drv_usecwait(10);
386 386 dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes);
387 387 break;
388 388 }
389 389
390 390 dmfep->chip_state = newstate;
391 391 }
392 392
393 393 /*
394 394 * Initialize transmit and receive descriptor rings, and
395 395 * set the chip to point to the first entry in each ring
396 396 */
397 397 static void
398 398 dmfe_init_rings(dmfe_t *dmfep)
399 399 {
400 400 dma_area_t *descp;
401 401 uint32_t pstart;
402 402 uint32_t pnext;
403 403 uint32_t pbuff;
404 404 uint32_t desc1;
405 405 int i;
406 406
407 407 /*
408 408 * You need all the locks in order to rewrite the descriptor rings
409 409 */
410 410 ASSERT(mutex_owned(dmfep->oplock));
411 411 ASSERT(mutex_owned(dmfep->rxlock));
412 412 ASSERT(mutex_owned(dmfep->txlock));
413 413
414 414 /*
415 415 * Program the RX ring entries
416 416 */
417 417 descp = &dmfep->rx_desc;
418 418 pstart = descp->mem_dvma;
419 419 pnext = pstart + sizeof (struct rx_desc_type);
420 420 pbuff = dmfep->rx_buff.mem_dvma;
421 421 desc1 = RX_CHAINING | DMFE_BUF_SIZE_1;
422 422
423 423 for (i = 0; i < dmfep->rx.n_desc; ++i) {
424 424 dmfe_ring_put32(descp, i, RD_NEXT, pnext);
425 425 dmfe_ring_put32(descp, i, BUFFER1, pbuff);
426 426 dmfe_ring_put32(descp, i, DESC1, desc1);
427 427 dmfe_ring_put32(descp, i, DESC0, RX_OWN);
428 428
429 429 pnext += sizeof (struct rx_desc_type);
430 430 pbuff += DMFE_BUF_SIZE;
431 431 }
432 432
433 433 /*
434 434 * Fix up last entry & sync
435 435 */
436 436 dmfe_ring_put32(descp, --i, RD_NEXT, pstart);
437 437 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
438 438 dmfep->rx.next_free = 0;
439 439
440 440 /*
441 441 * Set the base address of the RX descriptor list in CSR3
442 442 */
443 443 dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma);
444 444
445 445 /*
446 446 * Program the TX ring entries
447 447 */
448 448 descp = &dmfep->tx_desc;
449 449 pstart = descp->mem_dvma;
450 450 pnext = pstart + sizeof (struct tx_desc_type);
451 451 pbuff = dmfep->tx_buff.mem_dvma;
452 452 desc1 = TX_CHAINING;
453 453
454 454 for (i = 0; i < dmfep->tx.n_desc; ++i) {
455 455 dmfe_ring_put32(descp, i, TD_NEXT, pnext);
456 456 dmfe_ring_put32(descp, i, BUFFER1, pbuff);
457 457 dmfe_ring_put32(descp, i, DESC1, desc1);
458 458 dmfe_ring_put32(descp, i, DESC0, 0);
459 459
460 460 pnext += sizeof (struct tx_desc_type);
461 461 pbuff += DMFE_BUF_SIZE;
462 462 }
463 463
464 464 /*
465 465 * Fix up last entry & sync
466 466 */
467 467 dmfe_ring_put32(descp, --i, TD_NEXT, pstart);
468 468 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
469 469 dmfep->tx.n_free = dmfep->tx.n_desc;
470 470 dmfep->tx.next_free = dmfep->tx.next_busy = 0;
471 471
472 472 /*
473 473 * Set the base address of the TX descrptor list in CSR4
474 474 */
475 475 dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma);
476 476 }
477 477
478 478 /*
479 479 * dmfe_start_chip() -- start the chip transmitting and/or receiving
480 480 */
481 481 static void
482 482 dmfe_start_chip(dmfe_t *dmfep, int mode)
483 483 {
484 484 ASSERT(mutex_owned(dmfep->oplock));
485 485
486 486 dmfep->opmode |= mode;
487 487 dmfe_set_opmode(dmfep);
488 488
489 489 dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0);
490 490 /*
491 491 * Enable VLAN length mode (allows packets to be 4 bytes Longer).
492 492 */
493 493 dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE);
494 494
495 495 /*
496 496 * Clear any pending process-stopped interrupts
497 497 */
498 498 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
499 499 dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX :
500 500 mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
501 501 }
502 502
503 503 /*
504 504 * dmfe_enable_interrupts() -- enable our favourite set of interrupts.
505 505 *
506 506 * Normal interrupts:
507 507 * We always enable:
508 508 * RX_PKTDONE_INT (packet received)
509 509 * TX_PKTDONE_INT (TX complete)
510 510 * We never enable:
511 511 * TX_ALLDONE_INT (next TX buffer not ready)
512 512 *
513 513 * Abnormal interrupts:
514 514 * We always enable:
515 515 * RX_STOPPED_INT
516 516 * TX_STOPPED_INT
517 517 * SYSTEM_ERR_INT
518 518 * RX_UNAVAIL_INT
519 519 * We never enable:
520 520 * RX_EARLY_INT
521 521 * RX_WATCHDOG_INT
522 522 * TX_JABBER_INT
523 523 * TX_EARLY_INT
524 524 * TX_UNDERFLOW_INT
525 525 * GP_TIMER_INT (not valid in -9 chips)
526 526 * LINK_STATUS_INT (not valid in -9 chips)
527 527 */
528 528 static void
529 529 dmfe_enable_interrupts(dmfe_t *dmfep)
530 530 {
531 531 ASSERT(mutex_owned(dmfep->oplock));
532 532
533 533 /*
534 534 * Put 'the standard set of interrupts' in the interrupt mask register
535 535 */
536 536 dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT |
537 537 RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT;
538 538
539 539 dmfe_chip_put32(dmfep, INT_MASK_REG,
540 540 NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
541 541 dmfep->chip_state = CHIP_RUNNING;
542 542 }
543 543
544 544 /*
545 545 * ========== RX side routines ==========
546 546 */
547 547
548 548 /*
549 549 * Function to update receive statistics on various errors
550 550 */
551 551 static void
552 552 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0)
553 553 {
554 554 ASSERT(mutex_owned(dmfep->rxlock));
555 555
556 556 /*
557 557 * The error summary bit and the error bits that it summarises
558 558 * are only valid if this is the last fragment. Therefore, a
559 559 * fragment only contributes to the error statistics if both
560 560 * the last-fragment and error summary bits are set.
561 561 */
562 562 if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) {
563 563 dmfep->rx_stats_ierrors += 1;
564 564
565 565 /*
566 566 * There are some other error bits in the descriptor for
567 567 * which there don't seem to be appropriate MAC statistics,
568 568 * notably RX_COLLISION and perhaps RX_DESC_ERR. The
569 569 * latter may not be possible if it is supposed to indicate
570 570 * that one buffer has been filled with a partial packet
571 571 * and the next buffer required for the rest of the packet
572 572 * was not available, as all our buffers are more than large
573 573 * enough for a whole packet without fragmenting.
574 574 */
575 575
576 576 if (desc0 & RX_OVERFLOW) {
577 577 dmfep->rx_stats_overflow += 1;
578 578
579 579 } else if (desc0 & RX_RUNT_FRAME)
580 580 dmfep->rx_stats_short += 1;
581 581
582 582 if (desc0 & RX_CRC)
583 583 dmfep->rx_stats_fcs += 1;
584 584
585 585 if (desc0 & RX_FRAME2LONG)
586 586 dmfep->rx_stats_toolong += 1;
587 587 }
588 588
589 589 /*
590 590 * A receive watchdog timeout is counted as a MAC-level receive
591 591 * error. Strangely, it doesn't set the packet error summary bit,
592 592 * according to the chip data sheet :-?
593 593 */
594 594 if (desc0 & RX_RCV_WD_TO)
595 595 dmfep->rx_stats_macrcv_errors += 1;
596 596
597 597 if (desc0 & RX_DRIBBLING)
598 598 dmfep->rx_stats_align += 1;
599 599
600 600 if (desc0 & RX_MII_ERR)
601 601 dmfep->rx_stats_macrcv_errors += 1;
602 602 }
603 603
604 604 /*
605 605 * Receive incoming packet(s) and pass them up ...
606 606 */
607 607 static mblk_t *
608 608 dmfe_getp(dmfe_t *dmfep)
609 609 {
610 610 dma_area_t *descp;
611 611 mblk_t **tail;
612 612 mblk_t *head;
613 613 mblk_t *mp;
614 614 char *rxb;
615 615 uchar_t *dp;
616 616 uint32_t desc0;
617 617 uint32_t misses;
618 618 int packet_length;
619 619 int index;
620 620
621 621 mutex_enter(dmfep->rxlock);
622 622
623 623 /*
624 624 * Update the missed frame statistic from the on-chip counter.
625 625 */
626 626 misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG);
627 627 dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK);
628 628
629 629 /*
630 630 * sync (all) receive descriptors before inspecting them
631 631 */
632 632 descp = &dmfep->rx_desc;
633 633 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
634 634
635 635 /*
636 636 * We should own at least one RX entry, since we've had a
637 637 * receive interrupt, but let's not be dogmatic about it.
638 638 */
639 639 index = dmfep->rx.next_free;
640 640 desc0 = dmfe_ring_get32(descp, index, DESC0);
641 641
642 642 DTRACE_PROBE1(rx__start, uint32_t, desc0);
643 643 for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) {
644 644 /*
645 645 * Maintain statistics for every descriptor returned
646 646 * to us by the chip ...
647 647 */
648 648 dmfe_update_rx_stats(dmfep, desc0);
649 649
650 650 /*
651 651 * Check that the entry has both "packet start" and
652 652 * "packet end" flags. We really shouldn't get packet
653 653 * fragments, 'cos all the RX buffers are bigger than
654 654 * the largest valid packet. So we'll just drop any
655 655 * fragments we find & skip on to the next entry.
656 656 */
657 657 if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) {
658 658 DTRACE_PROBE1(rx__frag, uint32_t, desc0);
659 659 goto skip;
660 660 }
661 661
662 662 /*
663 663 * A whole packet in one buffer. We have to check error
664 664 * status and packet length before forwarding it upstream.
665 665 */
666 666 if (desc0 & RX_ERR_SUMMARY) {
667 667 DTRACE_PROBE1(rx__err, uint32_t, desc0);
668 668 goto skip;
669 669 }
670 670
671 671 packet_length = (desc0 >> 16) & 0x3fff;
672 672 if (packet_length > DMFE_MAX_PKT_SIZE) {
673 673 DTRACE_PROBE1(rx__toobig, int, packet_length);
674 674 goto skip;
675 675 } else if (packet_length < ETHERMIN) {
676 676 /*
677 677 * Note that VLAN packet would be even larger,
678 678 * but we don't worry about dropping runt VLAN
679 679 * frames.
680 680 *
681 681 * This check is probably redundant, as well,
682 682 * since the hardware should drop RUNT frames.
683 683 */
684 684 DTRACE_PROBE1(rx__runt, int, packet_length);
685 685 goto skip;
686 686 }
687 687
688 688 /*
689 689 * Sync the data, so we can examine it; then check that
690 690 * the packet is really intended for us (remember that
691 691 * if we're using Imperfect Filtering, then the chip will
692 692 * receive unicast packets sent to stations whose addresses
693 693 * just happen to hash to the same value as our own; we
694 694 * discard these here so they don't get sent upstream ...)
695 695 */
696 696 (void) ddi_dma_sync(dmfep->rx_buff.dma_hdl,
697 697 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE,
698 698 DDI_DMA_SYNC_FORKERNEL);
699 699 rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE];
700 700
701 701
702 702 /*
703 703 * We do not bother to check that the packet is really for
704 704 * us, we let the MAC framework make that check instead.
705 705 * This is especially important if we ever want to support
706 706 * multiple MAC addresses.
707 707 */
708 708
709 709 /*
710 710 * Packet looks good; get a buffer to copy it into. We
711 711 * allow some space at the front of the allocated buffer
712 712 * (HEADROOM) in case any upstream modules want to prepend
713 713 * some sort of header. The value has been carefully chosen
714 714 * So that it also has the side-effect of making the packet
715 715 * *contents* 4-byte aligned, as required by NCA!
716 716 */
717 717 mp = allocb(DMFE_HEADROOM + packet_length, 0);
718 718 if (mp == NULL) {
719 719 DTRACE_PROBE(rx__no__buf);
720 720 dmfep->rx_stats_norcvbuf += 1;
721 721 goto skip;
722 722 }
723 723
724 724 /*
725 725 * Account for statistics of good packets.
726 726 */
727 727 dmfep->rx_stats_ipackets += 1;
728 728 dmfep->rx_stats_rbytes += packet_length;
729 729 if (desc0 & RX_MULTI_FRAME) {
730 730 if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) {
731 731 dmfep->rx_stats_multi += 1;
732 732 } else {
733 733 dmfep->rx_stats_bcast += 1;
734 734 }
735 735 }
736 736
737 737 /*
738 738 * Copy the packet into the STREAMS buffer
739 739 */
740 740 dp = mp->b_rptr += DMFE_HEADROOM;
741 741 mp->b_cont = mp->b_next = NULL;
742 742
743 743 /*
744 744 * Don't worry about stripping the vlan tag, the MAC
745 745 * layer will take care of that for us.
746 746 */
747 747 bcopy(rxb, dp, packet_length);
748 748
749 749 /*
750 750 * Fix up the packet length, and link it to the chain
751 751 */
752 752 mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL;
753 753 *tail = mp;
754 754 tail = &mp->b_next;
755 755
756 756 skip:
757 757 /*
758 758 * Return ownership of ring entry & advance to next
759 759 */
760 760 dmfe_ring_put32(descp, index, DESC0, RX_OWN);
761 761 index = NEXT(index, dmfep->rx.n_desc);
762 762 desc0 = dmfe_ring_get32(descp, index, DESC0);
763 763 }
764 764
765 765 /*
766 766 * Remember where to start looking next time ...
767 767 */
768 768 dmfep->rx.next_free = index;
769 769
770 770 /*
771 771 * sync the receive descriptors that we've given back
772 772 * (actually, we sync all of them for simplicity), and
773 773 * wake the chip in case it had suspended receive
774 774 */
775 775 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
776 776 dmfe_chip_put32(dmfep, RX_POLL_REG, 0);
777 777
778 778 mutex_exit(dmfep->rxlock);
779 779 return (head);
780 780 }
781 781
782 782 /*
783 783 * ========== Primary TX side routines ==========
784 784 */
785 785
786 786 /*
787 787 * TX ring management:
788 788 *
789 789 * There are <tx.n_desc> entries in the ring, of which those from
790 790 * <tx.next_free> round to but not including <tx.next_busy> must
791 791 * be owned by the CPU. The number of such entries should equal
792 792 * <tx.n_free>; but there may also be some more entries which the
793 793 * chip has given back but which we haven't yet accounted for.
794 794 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
795 795 * as it discovers such entries.
796 796 *
797 797 * Initially, or when the ring is entirely free:
798 798 * C = Owned by CPU
799 799 * D = Owned by Davicom (DMFE) chip
800 800 *
801 801 * tx.next_free tx.n_desc = 16
802 802 * |
803 803 * v
804 804 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
805 805 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
806 806 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
807 807 * ^
808 808 * |
809 809 * tx.next_busy tx.n_free = 16
810 810 *
811 811 * On entry to reclaim() during normal use:
812 812 *
813 813 * tx.next_free tx.n_desc = 16
814 814 * |
815 815 * v
816 816 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
817 817 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
818 818 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
819 819 * ^
820 820 * |
821 821 * tx.next_busy tx.n_free = 9
822 822 *
823 823 * On exit from reclaim():
824 824 *
825 825 * tx.next_free tx.n_desc = 16
826 826 * |
827 827 * v
828 828 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
829 829 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
830 830 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
831 831 * ^
832 832 * |
833 833 * tx.next_busy tx.n_free = 13
834 834 *
835 835 * The ring is considered "full" when only one entry is owned by
836 836 * the CPU; thus <tx.n_free> should always be >= 1.
837 837 *
838 838 * tx.next_free tx.n_desc = 16
839 839 * |
840 840 * v
841 841 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
842 842 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
843 843 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
844 844 * ^
845 845 * |
846 846 * tx.next_busy tx.n_free = 1
847 847 */
848 848
849 849 /*
850 850 * Function to update transmit statistics on various errors
851 851 */
852 852 static void
853 853 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1)
854 854 {
855 855 uint32_t collisions;
856 856 uint32_t errbits;
857 857 uint32_t errsum;
858 858
859 859 ASSERT(mutex_owned(dmfep->txlock));
860 860
861 861 collisions = ((desc0 >> 3) & 0x0f);
862 862 errsum = desc0 & TX_ERR_SUMMARY;
863 863 errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS |
864 864 TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
865 865 if ((errsum == 0) != (errbits == 0)) {
866 866 dmfe_log(dmfep, "dubious TX error status 0x%x", desc0);
867 867 desc0 |= TX_ERR_SUMMARY;
868 868 }
869 869
870 870 if (desc0 & TX_ERR_SUMMARY) {
871 871 dmfep->tx_stats_oerrors += 1;
872 872
873 873 /*
874 874 * If we ever see a transmit jabber timeout, we count it
875 875 * as a MAC-level transmit error; but we probably won't
876 876 * see it as it causes an Abnormal interrupt and we reset
877 877 * the chip in order to recover
878 878 */
879 879 if (desc0 & TX_JABBER_TO) {
880 880 dmfep->tx_stats_macxmt_errors += 1;
881 881 dmfep->tx_stats_jabber += 1;
882 882 }
883 883
884 884 if (desc0 & TX_UNDERFLOW)
885 885 dmfep->tx_stats_underflow += 1;
886 886 else if (desc0 & TX_LATE_COLL)
887 887 dmfep->tx_stats_xmtlatecoll += 1;
888 888
889 889 if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER))
890 890 dmfep->tx_stats_nocarrier += 1;
891 891
892 892 if (desc0 & TX_EXCESS_COLL) {
893 893 dmfep->tx_stats_excoll += 1;
894 894 collisions = 16;
895 895 }
896 896 } else {
897 897 int bit = index % NBBY;
898 898 int byt = index / NBBY;
899 899
900 900 if (dmfep->tx_mcast[byt] & bit) {
901 901 dmfep->tx_mcast[byt] &= ~bit;
902 902 dmfep->tx_stats_multi += 1;
903 903
904 904 } else if (dmfep->tx_bcast[byt] & bit) {
905 905 dmfep->tx_bcast[byt] &= ~bit;
906 906 dmfep->tx_stats_bcast += 1;
907 907 }
908 908
909 909 dmfep->tx_stats_opackets += 1;
910 910 dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1;
911 911 }
912 912
913 913 if (collisions == 1)
914 914 dmfep->tx_stats_first_coll += 1;
915 915 else if (collisions != 0)
916 916 dmfep->tx_stats_multi_coll += 1;
917 917 dmfep->tx_stats_collisions += collisions;
918 918
919 919 if (desc0 & TX_DEFERRED)
920 920 dmfep->tx_stats_defer += 1;
921 921 }
922 922
923 923 /*
924 924 * Reclaim all the ring entries that the chip has returned to us ...
925 925 *
926 926 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims
927 927 * as many as possible, restarts the TX stall timeout, and returns B_TRUE.
928 928 */
929 929 static boolean_t
930 930 dmfe_reclaim_tx_desc(dmfe_t *dmfep)
931 931 {
932 932 dma_area_t *descp;
933 933 uint32_t desc0;
934 934 uint32_t desc1;
935 935 int i;
936 936
937 937 ASSERT(mutex_owned(dmfep->txlock));
938 938
939 939 /*
940 940 * sync transmit descriptor ring before looking at it
941 941 */
942 942 descp = &dmfep->tx_desc;
943 943 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
944 944
945 945 /*
946 946 * Early exit if there are no descriptors to reclaim, either
947 947 * because they're all reclaimed already, or because the next
948 948 * one is still owned by the chip ...
949 949 */
950 950 i = dmfep->tx.next_busy;
951 951 if (i == dmfep->tx.next_free)
952 952 return (B_FALSE);
953 953 desc0 = dmfe_ring_get32(descp, i, DESC0);
954 954 if (desc0 & TX_OWN)
955 955 return (B_FALSE);
956 956
957 957 /*
958 958 * Reclaim as many descriptors as possible ...
959 959 */
960 960 for (;;) {
961 961 desc1 = dmfe_ring_get32(descp, i, DESC1);
962 962 ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0);
963 963
964 964 if ((desc1 & TX_SETUP_PACKET) == 0) {
965 965 /*
966 966 * Regular packet - just update stats
967 967 */
968 968 dmfe_update_tx_stats(dmfep, i, desc0, desc1);
969 969 }
970 970
971 971 /*
972 972 * Update count & index; we're all done if the ring is
973 973 * now fully reclaimed, or the next entry if still owned
974 974 * by the chip ...
975 975 */
976 976 dmfep->tx.n_free += 1;
977 977 i = NEXT(i, dmfep->tx.n_desc);
978 978 if (i == dmfep->tx.next_free)
979 979 break;
980 980 desc0 = dmfe_ring_get32(descp, i, DESC0);
981 981 if (desc0 & TX_OWN)
982 982 break;
983 983 }
984 984
985 985 dmfep->tx.next_busy = i;
986 986 dmfep->tx_pending_tix = 0;
987 987 return (B_TRUE);
988 988 }
989 989
990 990 /*
991 991 * Send the message in the message block chain <mp>.
992 992 *
993 993 * The message is freed if and only if its contents are successfully copied
994 994 * and queued for transmission (so that the return value is B_TRUE).
995 995 * If we can't queue the message, the return value is B_FALSE and
996 996 * the message is *not* freed.
997 997 *
998 998 * This routine handles the special case of <mp> == NULL, which indicates
999 999 * that we want to "send" the special "setup packet" allocated during
1000 1000 * startup. We have to use some different flags in the packet descriptor
1001 1001 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the
1002 1002 * setup packet *isn't* freed after use.
1003 1003 */
1004 1004 static boolean_t
1005 1005 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp)
1006 1006 {
1007 1007 dma_area_t *descp;
1008 1008 mblk_t *bp;
1009 1009 char *txb;
1010 1010 uint32_t desc1;
1011 1011 uint32_t index;
1012 1012 size_t totlen;
1013 1013 size_t mblen;
1014 1014 uint32_t paddr;
1015 1015
1016 1016 /*
1017 1017 * If the number of free slots is below the reclaim threshold
1018 1018 * (soft limit), we'll try to reclaim some. If we fail, and
1019 1019 * the number of free slots is also below the minimum required
1020 1020 * (the hard limit, usually 1), then we can't send the packet.
1021 1021 */
1022 1022 mutex_enter(dmfep->txlock);
1023 1023 if (dmfep->suspended)
1024 1024 return (B_FALSE);
1025 1025
1026 1026 if (dmfep->tx.n_free <= dmfe_tx_reclaim_level &&
1027 1027 dmfe_reclaim_tx_desc(dmfep) == B_FALSE &&
1028 1028 dmfep->tx.n_free <= dmfe_tx_min_free) {
1029 1029 /*
1030 1030 * Resource shortage - return B_FALSE so the packet
1031 1031 * will be queued for retry after the next TX-done
1032 1032 * interrupt.
1033 1033 */
1034 1034 mutex_exit(dmfep->txlock);
1035 1035 DTRACE_PROBE(tx__no__desc);
1036 1036 return (B_FALSE);
1037 1037 }
1038 1038
1039 1039 /*
1040 1040 * There's a slot available, so claim it by incrementing
1041 1041 * the next-free index and decrementing the free count.
1042 1042 * If the ring is currently empty, we also restart the
1043 1043 * stall-detect timer. The ASSERTions check that our
1044 1044 * invariants still hold:
1045 1045 * the next-free index must not match the next-busy index
1046 1046 * there must still be at least one free entry
1047 1047 * After this, we now have exclusive ownership of the ring
1048 1048 * entry (and matching buffer) indicated by <index>, so we
1049 1049 * don't need to hold the TX lock any longer
1050 1050 */
1051 1051 index = dmfep->tx.next_free;
1052 1052 dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc);
1053 1053 ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy);
1054 1054 if (dmfep->tx.n_free-- == dmfep->tx.n_desc)
1055 1055 dmfep->tx_pending_tix = 0;
1056 1056 ASSERT(dmfep->tx.n_free >= 1);
1057 1057 mutex_exit(dmfep->txlock);
1058 1058
1059 1059 /*
1060 1060 * Check the ownership of the ring entry ...
1061 1061 */
1062 1062 descp = &dmfep->tx_desc;
1063 1063 ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0);
1064 1064
1065 1065 if (mp == NULL) {
1066 1066 /*
1067 1067 * Indicates we should send a SETUP packet, which we do by
1068 1068 * temporarily switching the BUFFER1 pointer in the ring
1069 1069 * entry. The reclaim routine will restore BUFFER1 to its
1070 1070 * usual value.
1071 1071 *
1072 1072 * Note that as the setup packet is tagged on the end of
1073 1073 * the TX ring, when we sync the descriptor we're also
1074 1074 * implicitly syncing the setup packet - hence, we don't
1075 1075 * need a separate ddi_dma_sync() call here.
1076 1076 */
1077 1077 desc1 = dmfe_setup_desc1;
1078 1078 paddr = descp->setup_dvma;
1079 1079 } else {
1080 1080 /*
1081 1081 * A regular packet; we copy the data into a pre-mapped
1082 1082 * buffer, which avoids the overhead (and complication)
1083 1083 * of mapping/unmapping STREAMS buffers and keeping hold
1084 1084 * of them until the DMA has completed.
1085 1085 *
1086 1086 * Because all buffers are the same size, and larger
1087 1087 * than the longest single valid message, we don't have
1088 1088 * to bother about splitting the message across multiple
1089 1089 * buffers.
1090 1090 */
1091 1091 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
1092 1092 totlen = 0;
1093 1093 bp = mp;
1094 1094
1095 1095 /*
1096 1096 * Copy all (remaining) mblks in the message ...
1097 1097 */
1098 1098 for (; bp != NULL; bp = bp->b_cont) {
1099 1099 mblen = MBLKL(bp);
1100 1100 if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) {
1101 1101 bcopy(bp->b_rptr, txb, mblen);
1102 1102 txb += mblen;
1103 1103 }
1104 1104 }
1105 1105
1106 1106 /*
1107 1107 * Is this a multicast or broadcast packet? We do
1108 1108 * this so that we can track statistics accurately
1109 1109 * when we reclaim it.
1110 1110 */
1111 1111 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
1112 1112 if (txb[0] & 0x1) {
1113 1113 if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) {
1114 1114 dmfep->tx_bcast[index / NBBY] |=
1115 1115 (1 << (index % NBBY));
1116 1116 } else {
1117 1117 dmfep->tx_mcast[index / NBBY] |=
1118 1118 (1 << (index % NBBY));
1119 1119 }
1120 1120 }
1121 1121
1122 1122 /*
1123 1123 * We'e reached the end of the chain; and we should have
1124 1124 * collected no more than DMFE_MAX_PKT_SIZE bytes into our
1125 1125 * buffer. Note that the <size> field in the descriptor is
1126 1126 * only 11 bits, so bigger packets would be a problem!
1127 1127 */
1128 1128 ASSERT(bp == NULL);
1129 1129 ASSERT(totlen <= DMFE_MAX_PKT_SIZE);
1130 1130 totlen &= TX_BUFFER_SIZE1;
1131 1131 desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen;
1132 1132 paddr = dmfep->tx_buff.mem_dvma + index*DMFE_BUF_SIZE;
1133 1133
1134 1134 (void) ddi_dma_sync(dmfep->tx_buff.dma_hdl,
1135 1135 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV);
1136 1136 }
1137 1137
1138 1138 /*
1139 1139 * Update ring descriptor entries, sync them, and wake up the
1140 1140 * transmit process
1141 1141 */
1142 1142 if ((index & dmfe_tx_int_factor) == 0)
1143 1143 desc1 |= TX_INT_ON_COMP;
1144 1144 desc1 |= TX_CHAINING;
1145 1145 dmfe_ring_put32(descp, index, BUFFER1, paddr);
1146 1146 dmfe_ring_put32(descp, index, DESC1, desc1);
1147 1147 dmfe_ring_put32(descp, index, DESC0, TX_OWN);
1148 1148 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
1149 1149 dmfe_chip_put32(dmfep, TX_POLL_REG, 0);
1150 1150
1151 1151 /*
1152 1152 * Finally, free the message & return success
1153 1153 */
1154 1154 if (mp)
1155 1155 freemsg(mp);
1156 1156 return (B_TRUE);
1157 1157 }
1158 1158
1159 1159 /*
1160 1160 * dmfe_m_tx() -- send a chain of packets
1161 1161 *
1162 1162 * Called when packet(s) are ready to be transmitted. A pointer to an
1163 1163 * M_DATA message that contains the packet is passed to this routine.
1164 1164 * The complete LLC header is contained in the message's first message
1165 1165 * block, and the remainder of the packet is contained within
1166 1166 * additional M_DATA message blocks linked to the first message block.
1167 1167 *
1168 1168 * Additional messages may be passed by linking with b_next.
1169 1169 */
1170 1170 static mblk_t *
1171 1171 dmfe_m_tx(void *arg, mblk_t *mp)
1172 1172 {
1173 1173 dmfe_t *dmfep = arg; /* private device info */
1174 1174 mblk_t *next;
1175 1175
1176 1176 ASSERT(mp != NULL);
1177 1177 ASSERT(dmfep->mac_state == DMFE_MAC_STARTED);
1178 1178
1179 1179 if (dmfep->chip_state != CHIP_RUNNING)
1180 1180 return (mp);
1181 1181
1182 1182 while (mp != NULL) {
1183 1183 next = mp->b_next;
1184 1184 mp->b_next = NULL;
1185 1185 if (!dmfe_send_msg(dmfep, mp)) {
1186 1186 mp->b_next = next;
1187 1187 break;
1188 1188 }
1189 1189 mp = next;
1190 1190 }
1191 1191
1192 1192 return (mp);
1193 1193 }
1194 1194
1195 1195 /*
1196 1196 * ========== Address-setting routines (TX-side) ==========
1197 1197 */
1198 1198
1199 1199 /*
1200 1200 * Find the index of the relevant bit in the setup packet.
1201 1201 * This must mirror the way the hardware will actually calculate it!
1202 1202 */
1203 1203 static uint32_t
1204 1204 dmfe_hash_index(const uint8_t *address)
1205 1205 {
1206 1206 uint32_t const POLY = HASH_POLY;
1207 1207 uint32_t crc = HASH_CRC;
1208 1208 uint32_t index;
1209 1209 uint32_t msb;
1210 1210 uchar_t currentbyte;
1211 1211 int byteslength;
1212 1212 int shift;
1213 1213 int bit;
1214 1214
1215 1215 for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) {
1216 1216 currentbyte = address[byteslength];
1217 1217 for (bit = 0; bit < 8; ++bit) {
1218 1218 msb = crc >> 31;
1219 1219 crc <<= 1;
1220 1220 if (msb ^ (currentbyte & 1)) {
1221 1221 crc ^= POLY;
1222 1222 crc |= 0x00000001;
1223 1223 }
1224 1224 currentbyte >>= 1;
1225 1225 }
1226 1226 }
1227 1227
1228 1228 for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift)
1229 1229 index |= (((crc >> bit) & 1) << shift);
1230 1230
1231 1231 return (index);
1232 1232 }
1233 1233
1234 1234 /*
1235 1235 * Find and set/clear the relevant bit in the setup packet hash table
1236 1236 * This must mirror the way the hardware will actually interpret it!
1237 1237 */
1238 1238 static void
1239 1239 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val)
1240 1240 {
1241 1241 dma_area_t *descp;
1242 1242 uint32_t tmp;
1243 1243
1244 1244 ASSERT(mutex_owned(dmfep->oplock));
1245 1245
1246 1246 descp = &dmfep->tx_desc;
1247 1247 tmp = dmfe_setup_get32(descp, index/16);
1248 1248 if (val)
1249 1249 tmp |= 1 << (index%16);
1250 1250 else
1251 1251 tmp &= ~(1 << (index%16));
1252 1252 dmfe_setup_put32(descp, index/16, tmp);
1253 1253 }
1254 1254
1255 1255 /*
1256 1256 * Update the refcount for the bit in the setup packet corresponding
1257 1257 * to the specified address; if it changes between zero & nonzero,
1258 1258 * also update the bitmap itself & return B_TRUE, so that the caller
1259 1259 * knows to re-send the setup packet. Otherwise (only the refcount
1260 1260 * changed), return B_FALSE
1261 1261 */
1262 1262 static boolean_t
1263 1263 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val)
1264 1264 {
1265 1265 uint32_t index;
1266 1266 uint8_t *refp;
1267 1267 boolean_t change;
1268 1268
1269 1269 index = dmfe_hash_index(mca);
1270 1270 refp = &dmfep->mcast_refs[index];
1271 1271 change = (val ? (*refp)++ : --(*refp)) == 0;
1272 1272
1273 1273 if (change)
1274 1274 dmfe_update_hash(dmfep, index, val);
1275 1275
1276 1276 return (change);
1277 1277 }
1278 1278
1279 1279 /*
1280 1280 * "Transmit" the (possibly updated) magic setup packet
1281 1281 */
1282 1282 static int
1283 1283 dmfe_send_setup(dmfe_t *dmfep)
1284 1284 {
1285 1285 int status;
1286 1286
1287 1287 ASSERT(mutex_owned(dmfep->oplock));
1288 1288
1289 1289 if (dmfep->suspended)
1290 1290 return (0);
1291 1291
1292 1292 /*
1293 1293 * If the chip isn't running, we can't really send the setup frame
1294 1294 * now but it doesn't matter, 'cos it will be sent when the transmit
1295 1295 * process is restarted (see dmfe_start()).
1296 1296 */
1297 1297 if ((dmfep->opmode & START_TRANSMIT) == 0)
1298 1298 return (0);
1299 1299
1300 1300 /*
1301 1301 * "Send" the setup frame. If it fails (e.g. no resources),
1302 1302 * set a flag; then the factotum will retry the "send". Once
1303 1303 * it works, we can clear the flag no matter how many attempts
1304 1304 * had previously failed. We tell the caller that it worked
1305 1305 * whether it did or not; after all, it *will* work eventually.
1306 1306 */
1307 1307 status = dmfe_send_msg(dmfep, NULL);
1308 1308 dmfep->need_setup = status ? B_FALSE : B_TRUE;
1309 1309 return (0);
1310 1310 }
1311 1311
1312 1312 /*
1313 1313 * dmfe_m_unicst() -- set the physical network address
1314 1314 */
1315 1315 static int
1316 1316 dmfe_m_unicst(void *arg, const uint8_t *macaddr)
1317 1317 {
1318 1318 dmfe_t *dmfep = arg;
1319 1319 int status;
1320 1320 int index;
1321 1321
1322 1322 /*
1323 1323 * Update our current address and send out a new setup packet
1324 1324 *
1325 1325 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
1326 1326 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
1327 1327 *
1328 1328 * It is said that there is a bug in the 21140 where it fails to
1329 1329 * receive packes addresses to the specified perfect filter address.
1330 1330 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
1331 1331 * bit should be set in the module variable dmfe_setup_desc1.
1332 1332 *
1333 1333 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
1334 1334 * In this mode, *all* incoming addresses are hashed and looked
1335 1335 * up in the bitmap described by the setup packet. Therefore,
1336 1336 * the bit representing the station address has to be added to
1337 1337 * the table before sending it out. If the address is changed,
1338 1338 * the old entry should be removed before the new entry is made.
1339 1339 *
1340 1340 * NOTE: in this mode, unicast packets that are not intended for
1341 1341 * this station may be received; it is up to software to filter
1342 1342 * them out afterwards!
1343 1343 *
1344 1344 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
1345 1345 * filtering. In this mode, multicast addresses are hashed and
1346 1346 * checked against the bitmap, while unicast addresses are simply
1347 1347 * matched against the one physical address specified in the setup
1348 1348 * packet. This means that we shouldn't receive unicast packets
1349 1349 * that aren't intended for us (but software still has to filter
1350 1350 * multicast packets just the same).
1351 1351 *
1352 1352 * Whichever mode we're using, we have to enter the broadcast
1353 1353 * address into the multicast filter map too, so we do this on
1354 1354 * the first time through after attach or reset.
1355 1355 */
1356 1356 mutex_enter(dmfep->oplock);
1357 1357
1358 1358 if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1)
1359 1359 (void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE);
1360 1360 if (dmfe_setup_desc1 & TX_FILTER_TYPE1)
1361 1361 (void) dmfe_update_mcast(dmfep, macaddr, B_TRUE);
1362 1362 if (!dmfep->addr_set)
1363 1363 (void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE);
1364 1364
1365 1365 /*
1366 1366 * Remember the new current address
1367 1367 */
1368 1368 ethaddr_copy(macaddr, dmfep->curr_addr);
1369 1369 dmfep->addr_set = B_TRUE;
1370 1370
1371 1371 /*
1372 1372 * Install the new physical address into the proper position in
1373 1373 * the setup frame; this is only used if we select hash+perfect
1374 1374 * filtering, but we'll put it in anyway. The ugliness here is
1375 1375 * down to the usual war of the egg :(
1376 1376 */
1377 1377 for (index = 0; index < ETHERADDRL; index += 2)
1378 1378 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
1379 1379 (macaddr[index+1] << 8) | macaddr[index]);
1380 1380
1381 1381 /*
1382 1382 * Finally, we're ready to "transmit" the setup frame
1383 1383 */
1384 1384 status = dmfe_send_setup(dmfep);
1385 1385 mutex_exit(dmfep->oplock);
1386 1386
1387 1387 return (status);
1388 1388 }
1389 1389
1390 1390 /*
1391 1391 * dmfe_m_multicst() -- enable or disable a multicast address
1392 1392 *
1393 1393 * Program the hardware to enable/disable the multicast address
1394 1394 * in "mca" (enable if add is true, otherwise disable it.)
1395 1395 * We keep a refcount for each bit in the map, so that it still
1396 1396 * works out properly if multiple addresses hash to the same bit.
1397 1397 * dmfe_update_mcast() tells us whether the map actually changed;
1398 1398 * if so, we have to re-"transmit" the magic setup packet.
1399 1399 */
1400 1400 static int
1401 1401 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1402 1402 {
1403 1403 dmfe_t *dmfep = arg; /* private device info */
1404 1404 int status = 0;
1405 1405
1406 1406 mutex_enter(dmfep->oplock);
1407 1407 if (dmfe_update_mcast(dmfep, mca, add))
1408 1408 status = dmfe_send_setup(dmfep);
1409 1409 mutex_exit(dmfep->oplock);
1410 1410
1411 1411 return (status);
1412 1412 }
1413 1413
1414 1414
1415 1415 /*
1416 1416 * ========== Internal state management entry points ==========
1417 1417 */
1418 1418
1419 1419 /*
1420 1420 * These routines provide all the functionality required by the
1421 1421 * corresponding MAC layer entry points, but don't update the MAC layer state
1422 1422 * so they can be called internally without disturbing our record
1423 1423 * of what MAC layer thinks we should be doing ...
1424 1424 */
1425 1425
1426 1426 /*
1427 1427 * dmfe_stop() -- stop processing, don't reset h/w or rings
1428 1428 */
1429 1429 static void
1430 1430 dmfe_stop(dmfe_t *dmfep)
1431 1431 {
1432 1432 ASSERT(mutex_owned(dmfep->oplock));
1433 1433
1434 1434 dmfe_stop_chip(dmfep, CHIP_STOPPED);
1435 1435 }
1436 1436
1437 1437 /*
1438 1438 * dmfe_reset() -- stop processing, reset h/w & rings to initial state
1439 1439 */
1440 1440 static void
1441 1441 dmfe_reset(dmfe_t *dmfep)
1442 1442 {
1443 1443 ASSERT(mutex_owned(dmfep->oplock));
1444 1444 ASSERT(mutex_owned(dmfep->rxlock));
1445 1445 ASSERT(mutex_owned(dmfep->txlock));
1446 1446
1447 1447 dmfe_stop_chip(dmfep, CHIP_RESET);
1448 1448 dmfe_init_rings(dmfep);
1449 1449 }
1450 1450
1451 1451 /*
1452 1452 * dmfe_start() -- start transmitting/receiving
1453 1453 */
1454 1454 static void
1455 1455 dmfe_start(dmfe_t *dmfep)
1456 1456 {
1457 1457 uint32_t gpsr;
1458 1458
1459 1459 ASSERT(mutex_owned(dmfep->oplock));
1460 1460
1461 1461 ASSERT(dmfep->chip_state == CHIP_RESET ||
1462 1462 dmfep->chip_state == CHIP_STOPPED);
1463 1463
1464 1464 /*
1465 1465 * Make opmode consistent with PHY duplex setting
1466 1466 */
1467 1467 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
1468 1468 if (gpsr & GPS_FULL_DUPLEX)
1469 1469 dmfep->opmode |= FULL_DUPLEX;
1470 1470 else
1471 1471 dmfep->opmode &= ~FULL_DUPLEX;
1472 1472
1473 1473 /*
1474 1474 * Start transmit processing
1475 1475 * Set up the address filters
1476 1476 * Start receive processing
1477 1477 * Enable interrupts
1478 1478 */
1479 1479 dmfe_start_chip(dmfep, START_TRANSMIT);
1480 1480 (void) dmfe_send_setup(dmfep);
1481 1481 drv_usecwait(10);
1482 1482 dmfe_start_chip(dmfep, START_RECEIVE);
1483 1483 dmfe_enable_interrupts(dmfep);
1484 1484 }
1485 1485
1486 1486 /*
1487 1487 * dmfe_restart - restart transmitting/receiving after error or suspend
1488 1488 */
1489 1489 static void
1490 1490 dmfe_restart(dmfe_t *dmfep)
1491 1491 {
1492 1492 ASSERT(mutex_owned(dmfep->oplock));
1493 1493
1494 1494 /*
1495 1495 * You need not only <oplock>, but also <rxlock> AND <txlock>
1496 1496 * in order to reset the rings, but then <txlock> *mustn't*
1497 1497 * be held across the call to dmfe_start()
1498 1498 */
1499 1499 mutex_enter(dmfep->rxlock);
1500 1500 mutex_enter(dmfep->txlock);
1501 1501 dmfe_reset(dmfep);
1502 1502 mutex_exit(dmfep->txlock);
1503 1503 mutex_exit(dmfep->rxlock);
1504 1504 if (dmfep->mac_state == DMFE_MAC_STARTED) {
1505 1505 dmfe_start(dmfep);
1506 1506 }
1507 1507 }
1508 1508
1509 1509
1510 1510 /*
1511 1511 * ========== MAC-required management entry points ==========
1512 1512 */
1513 1513
1514 1514 /*
1515 1515 * dmfe_m_stop() -- stop transmitting/receiving
1516 1516 */
1517 1517 static void
1518 1518 dmfe_m_stop(void *arg)
1519 1519 {
1520 1520 dmfe_t *dmfep = arg; /* private device info */
1521 1521
1522 1522 /*
1523 1523 * Just stop processing, then record new MAC state
1524 1524 */
1525 1525 mii_stop(dmfep->mii);
1526 1526
1527 1527 mutex_enter(dmfep->oplock);
1528 1528 if (!dmfep->suspended)
1529 1529 dmfe_stop(dmfep);
1530 1530 dmfep->mac_state = DMFE_MAC_STOPPED;
1531 1531 mutex_exit(dmfep->oplock);
1532 1532 }
1533 1533
1534 1534 /*
1535 1535 * dmfe_m_start() -- start transmitting/receiving
1536 1536 */
1537 1537 static int
1538 1538 dmfe_m_start(void *arg)
1539 1539 {
1540 1540 dmfe_t *dmfep = arg; /* private device info */
1541 1541
1542 1542 /*
1543 1543 * Start processing and record new MAC state
1544 1544 */
1545 1545 mutex_enter(dmfep->oplock);
1546 1546 if (!dmfep->suspended)
1547 1547 dmfe_start(dmfep);
1548 1548 dmfep->mac_state = DMFE_MAC_STARTED;
1549 1549 mutex_exit(dmfep->oplock);
1550 1550
1551 1551 mii_start(dmfep->mii);
1552 1552
1553 1553 return (0);
1554 1554 }
1555 1555
1556 1556 /*
1557 1557 * dmfe_m_promisc() -- set or reset promiscuous mode on the board
1558 1558 *
1559 1559 * Program the hardware to enable/disable promiscuous and/or
1560 1560 * receive-all-multicast modes. Davicom don't document this
1561 1561 * clearly, but it looks like we can do this on-the-fly (i.e.
1562 1562 * without stopping & restarting the TX/RX processes).
1563 1563 */
1564 1564 static int
1565 1565 dmfe_m_promisc(void *arg, boolean_t on)
1566 1566 {
1567 1567 dmfe_t *dmfep = arg;
1568 1568
1569 1569 mutex_enter(dmfep->oplock);
1570 1570 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
1571 1571 if (on)
1572 1572 dmfep->opmode |= PROMISC_MODE;
1573 1573 if (!dmfep->suspended)
1574 1574 dmfe_set_opmode(dmfep);
1575 1575 mutex_exit(dmfep->oplock);
1576 1576
1577 1577 return (0);
1578 1578 }
1579 1579
1580 1580 /*
1581 1581 * ========== Factotum, implemented as a softint handler ==========
1582 1582 */
1583 1583
1584 1584 /*
1585 1585 * The factotum is woken up when there's something to do that we'd rather
1586 1586 * not do from inside a (high-level?) hardware interrupt handler. Its
1587 1587 * two main tasks are:
1588 1588 * reset & restart the chip after an error
1589 1589 * update & restart the chip after a link status change
1590 1590 */
1591 1591 static uint_t
1592 1592 dmfe_factotum(caddr_t arg)
1593 1593 {
1594 1594 dmfe_t *dmfep;
1595 1595
1596 1596 dmfep = (void *)arg;
1597 1597 ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
1598 1598
1599 1599 mutex_enter(dmfep->oplock);
1600 1600 if (dmfep->suspended) {
1601 1601 mutex_exit(dmfep->oplock);
1602 1602 return (DDI_INTR_CLAIMED);
1603 1603 }
1604 1604
1605 1605 dmfep->factotum_flag = 0;
1606 1606 DRV_KS_INC(dmfep, KS_FACTOTUM_RUN);
1607 1607
1608 1608 /*
1609 1609 * Check for chip error ...
1610 1610 */
1611 1611 if (dmfep->chip_state == CHIP_ERROR) {
1612 1612 /*
1613 1613 * Error recovery required: reset the chip and the rings,
1614 1614 * then, if it's supposed to be running, kick it off again.
1615 1615 */
1616 1616 DRV_KS_INC(dmfep, KS_RECOVERY);
1617 1617 dmfe_restart(dmfep);
1618 1618 mutex_exit(dmfep->oplock);
1619 1619
1620 1620 mii_reset(dmfep->mii);
1621 1621
1622 1622 } else if (dmfep->need_setup) {
1623 1623 (void) dmfe_send_setup(dmfep);
1624 1624 mutex_exit(dmfep->oplock);
1625 1625 }
1626 1626
1627 1627 return (DDI_INTR_CLAIMED);
1628 1628 }
1629 1629
1630 1630 static void
1631 1631 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why)
1632 1632 {
1633 1633 _NOTE(ARGUNUSED(why));
1634 1634 ASSERT(mutex_owned(dmfep->oplock));
1635 1635 DRV_KS_INC(dmfep, ks_id);
1636 1636
1637 1637 if (dmfep->factotum_flag++ == 0)
1638 1638 ddi_trigger_softintr(dmfep->factotum_id);
1639 1639 }
1640 1640
1641 1641
1642 1642 /*
1643 1643 * ========== Periodic Tasks (Cyclic handler & friends) ==========
1644 1644 */
1645 1645
1646 1646 /*
1647 1647 * Periodic tick tasks, run from the cyclic handler
1648 1648 *
1649 1649 * Check for TX stall; flag an error and wake the factotum if so.
1650 1650 */
1651 1651 static void
1652 1652 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
1653 1653 {
1654 1654 boolean_t tx_stall;
1655 1655 uint32_t tx_state;
1656 1656 uint32_t limit;
1657 1657
1658 1658 ASSERT(mutex_owned(dmfep->oplock));
1659 1659
1660 1660 /*
1661 1661 * Check for transmit stall ...
1662 1662 *
1663 1663 * IF there's at least one packet in the ring, AND the timeout
1664 1664 * has elapsed, AND we can't reclaim any descriptors, THEN we've
1665 1665 * stalled; we return B_TRUE to trigger a reset-and-recover cycle.
1666 1666 *
1667 1667 * Note that the timeout limit is based on the transmit engine
1668 1668 * state; we allow the transmitter longer to make progress in
1669 1669 * some states than in others, based on observations of this
1670 1670 * chip's actual behaviour in the lab.
1671 1671 *
1672 1672 * By observation, we find that on about 1 in 10000 passes through
1673 1673 * here, the TX lock is already held. In that case, we'll skip
1674 1674 * the check on this pass rather than wait. Most likely, the send
1675 1675 * routine was holding the lock when the interrupt happened, and
1676 1676 * we'll succeed next time through. In the event of a real stall,
1677 1677 * the TX ring will fill up, after which the send routine won't be
1678 1678 * called any more and then we're sure to get in.
1679 1679 */
1680 1680 tx_stall = B_FALSE;
1681 1681 if (mutex_tryenter(dmfep->txlock)) {
1682 1682 if (dmfep->tx.n_free < dmfep->tx.n_desc) {
1683 1683 tx_state = TX_PROCESS_STATE(istat);
1684 1684 if (gpsr & GPS_LINK_100)
1685 1685 limit = stall_100_tix[tx_state];
1686 1686 else
1687 1687 limit = stall_10_tix[tx_state];
1688 1688 if (++dmfep->tx_pending_tix >= limit &&
1689 1689 dmfe_reclaim_tx_desc(dmfep) == B_FALSE) {
1690 1690 dmfe_log(dmfep, "TX stall detected "
1691 1691 "after %d ticks in state %d; "
1692 1692 "automatic recovery initiated",
1693 1693 dmfep->tx_pending_tix, tx_state);
1694 1694 tx_stall = B_TRUE;
1695 1695 }
1696 1696 }
1697 1697 mutex_exit(dmfep->txlock);
1698 1698 }
1699 1699
1700 1700 if (tx_stall) {
1701 1701 dmfe_stop_chip(dmfep, CHIP_ERROR);
1702 1702 dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)");
1703 1703 }
1704 1704 }
1705 1705
1706 1706 /*
1707 1707 * Cyclic callback handler
1708 1708 */
1709 1709 static void
1710 1710 dmfe_cyclic(void *arg)
1711 1711 {
1712 1712 dmfe_t *dmfep = arg; /* private device info */
1713 1713 uint32_t istat;
1714 1714 uint32_t gpsr;
1715 1715
1716 1716 /*
1717 1717 * If the chip's not RUNNING, there's nothing to do.
1718 1718 * If we can't get the mutex straight away, we'll just
1719 1719 * skip this pass; we'll back back soon enough anyway.
1720 1720 */
1721 1721 if (mutex_tryenter(dmfep->oplock) == 0)
1722 1722 return;
1723 1723 if ((dmfep->suspended) || (dmfep->chip_state != CHIP_RUNNING)) {
1724 1724 mutex_exit(dmfep->oplock);
1725 1725 return;
1726 1726 }
1727 1727
1728 1728 /*
1729 1729 * Recheck chip state (it might have been stopped since we
1730 1730 * checked above). If still running, call each of the *tick*
1731 1731 * tasks. They will check for link change, TX stall, etc ...
1732 1732 */
1733 1733 if (dmfep->chip_state == CHIP_RUNNING) {
1734 1734 istat = dmfe_chip_get32(dmfep, STATUS_REG);
1735 1735 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
1736 1736 dmfe_tick_stall_check(dmfep, gpsr, istat);
1737 1737 }
1738 1738
1739 1739 DRV_KS_INC(dmfep, KS_CYCLIC_RUN);
1740 1740 mutex_exit(dmfep->oplock);
1741 1741 }
1742 1742
1743 1743 /*
1744 1744 * ========== Hardware interrupt handler ==========
1745 1745 */
1746 1746
1747 1747 /*
1748 1748 * dmfe_interrupt() -- handle chip interrupts
1749 1749 */
1750 1750 static uint_t
1751 1751 dmfe_interrupt(caddr_t arg)
1752 1752 {
1753 1753 dmfe_t *dmfep; /* private device info */
1754 1754 uint32_t interrupts;
1755 1755 uint32_t istat;
1756 1756 const char *msg;
1757 1757 mblk_t *mp;
1758 1758 boolean_t warning_msg = B_TRUE;
1759 1759
1760 1760 dmfep = (void *)arg;
1761 1761
1762 1762 mutex_enter(dmfep->oplock);
1763 1763 if (dmfep->suspended) {
1764 1764 mutex_exit(dmfep->oplock);
1765 1765 return (DDI_INTR_UNCLAIMED);
1766 1766 }
1767 1767
1768 1768 /*
1769 1769 * A quick check as to whether the interrupt was from this
1770 1770 * device, before we even finish setting up all our local
1771 1771 * variables. Note that reading the interrupt status register
1772 1772 * doesn't have any unpleasant side effects such as clearing
1773 1773 * the bits read, so it's quite OK to re-read it once we have
1774 1774 * determined that we are going to service this interrupt and
1775 1775 * grabbed the mutexen.
1776 1776 */
1777 1777 istat = dmfe_chip_get32(dmfep, STATUS_REG);
1778 1778 if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) {
1779 1779
1780 1780 mutex_exit(dmfep->oplock);
1781 1781 return (DDI_INTR_UNCLAIMED);
1782 1782 }
1783 1783
1784 1784 DRV_KS_INC(dmfep, KS_INTERRUPT);
1785 1785
1786 1786 /*
1787 1787 * Identify bits that represent enabled interrupts ...
1788 1788 */
1789 1789 istat |= dmfe_chip_get32(dmfep, STATUS_REG);
1790 1790 interrupts = istat & dmfep->imask;
1791 1791 ASSERT(interrupts != 0);
1792 1792
1793 1793 DTRACE_PROBE1(intr, uint32_t, istat);
1794 1794
1795 1795 /*
1796 1796 * Check for any interrupts other than TX/RX done.
1797 1797 * If there are any, they are considered Abnormal
1798 1798 * and will cause the chip to be reset.
1799 1799 */
1800 1800 if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) {
1801 1801 if (istat & ABNORMAL_SUMMARY_INT) {
1802 1802 /*
1803 1803 * Any Abnormal interrupts will lead to us
1804 1804 * resetting the chip, so we don't bother
1805 1805 * to clear each interrupt individually.
1806 1806 *
1807 1807 * Our main task here is to identify the problem,
1808 1808 * by pointing out the most significant unexpected
1809 1809 * bit. Additional bits may well be consequences
1810 1810 * of the first problem, so we consider the possible
1811 1811 * causes in order of severity.
1812 1812 */
1813 1813 if (interrupts & SYSTEM_ERR_INT) {
1814 1814 switch (istat & SYSTEM_ERR_BITS) {
1815 1815 case SYSTEM_ERR_M_ABORT:
1816 1816 msg = "Bus Master Abort";
1817 1817 break;
1818 1818
1819 1819 case SYSTEM_ERR_T_ABORT:
1820 1820 msg = "Bus Target Abort";
1821 1821 break;
1822 1822
1823 1823 case SYSTEM_ERR_PARITY:
1824 1824 msg = "Parity Error";
1825 1825 break;
1826 1826
1827 1827 default:
1828 1828 msg = "Unknown System Bus Error";
1829 1829 break;
1830 1830 }
1831 1831 } else if (interrupts & RX_STOPPED_INT) {
1832 1832 msg = "RX process stopped";
1833 1833 } else if (interrupts & RX_UNAVAIL_INT) {
1834 1834 msg = "RX buffer unavailable";
1835 1835 warning_msg = B_FALSE;
1836 1836 } else if (interrupts & RX_WATCHDOG_INT) {
1837 1837 msg = "RX watchdog timeout?";
1838 1838 } else if (interrupts & RX_EARLY_INT) {
1839 1839 msg = "RX early interrupt?";
1840 1840 } else if (interrupts & TX_STOPPED_INT) {
1841 1841 msg = "TX process stopped";
1842 1842 } else if (interrupts & TX_JABBER_INT) {
1843 1843 msg = "TX jabber timeout";
1844 1844 } else if (interrupts & TX_UNDERFLOW_INT) {
1845 1845 msg = "TX underflow?";
1846 1846 } else if (interrupts & TX_EARLY_INT) {
1847 1847 msg = "TX early interrupt?";
1848 1848
1849 1849 } else if (interrupts & LINK_STATUS_INT) {
1850 1850 msg = "Link status change?";
1851 1851 } else if (interrupts & GP_TIMER_INT) {
1852 1852 msg = "Timer expired?";
1853 1853 }
1854 1854
1855 1855 if (warning_msg)
1856 1856 dmfe_warning(dmfep, "abnormal interrupt, "
1857 1857 "status 0x%x: %s", istat, msg);
1858 1858
1859 1859 /*
1860 1860 * We don't want to run the entire reinitialisation
1861 1861 * code out of this (high-level?) interrupt, so we
1862 1862 * simply STOP the chip, and wake up the factotum
1863 1863 * to reinitalise it ...
1864 1864 */
1865 1865 dmfe_stop_chip(dmfep, CHIP_ERROR);
1866 1866 dmfe_wake_factotum(dmfep, KS_CHIP_ERROR,
1867 1867 "interrupt (error)");
1868 1868 } else {
1869 1869 /*
1870 1870 * We shouldn't really get here (it would mean
1871 1871 * there were some unprocessed enabled bits but
1872 1872 * they weren't Abnormal?), but we'll check just
1873 1873 * in case ...
1874 1874 */
1875 1875 DTRACE_PROBE1(intr__unexpected, uint32_t, istat);
1876 1876 }
1877 1877 }
1878 1878
1879 1879 /*
1880 1880 * Acknowledge all the original bits - except in the case of an
1881 1881 * error, when we leave them unacknowledged so that the recovery
1882 1882 * code can see what was going on when the problem occurred ...
1883 1883 */
1884 1884 if (dmfep->chip_state != CHIP_ERROR) {
1885 1885 (void) dmfe_chip_put32(dmfep, STATUS_REG, istat);
1886 1886 /*
1887 1887 * Read-after-write forces completion on PCI bus.
1888 1888 *
1889 1889 */
1890 1890 (void) dmfe_chip_get32(dmfep, STATUS_REG);
1891 1891 }
1892 1892
1893 1893
1894 1894 /*
1895 1895 * We've finished talking to the chip, so we can drop <oplock>
1896 1896 * before handling the normal interrupts, which only involve
1897 1897 * manipulation of descriptors ...
1898 1898 */
1899 1899 mutex_exit(dmfep->oplock);
1900 1900
1901 1901 if (interrupts & RX_PKTDONE_INT)
1902 1902 if ((mp = dmfe_getp(dmfep)) != NULL)
1903 1903 mac_rx(dmfep->mh, NULL, mp);
1904 1904
1905 1905 if (interrupts & TX_PKTDONE_INT) {
1906 1906 /*
1907 1907 * The only reason for taking this interrupt is to give
1908 1908 * MAC a chance to schedule queued packets after a
1909 1909 * ring-full condition. To minimise the number of
1910 1910 * redundant TX-Done interrupts, we only mark two of the
1911 1911 * ring descriptors as 'interrupt-on-complete' - all the
1912 1912 * others are simply handed back without an interrupt.
1913 1913 */
1914 1914 if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) {
1915 1915 (void) dmfe_reclaim_tx_desc(dmfep);
1916 1916 mutex_exit(dmfep->txlock);
1917 1917 }
1918 1918 mac_tx_update(dmfep->mh);
1919 1919 }
1920 1920
1921 1921 return (DDI_INTR_CLAIMED);
1922 1922 }
1923 1923
1924 1924 /*
1925 1925 * ========== Statistics update handler ==========
1926 1926 */
1927 1927
1928 1928 static int
1929 1929 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val)
1930 1930 {
1931 1931 dmfe_t *dmfep = arg;
1932 1932 int rv = 0;
1933 1933
1934 1934 /* Let MII handle its own stats. */
1935 1935 if (mii_m_getstat(dmfep->mii, stat, val) == 0) {
1936 1936 return (0);
1937 1937 }
1938 1938
1939 1939 mutex_enter(dmfep->oplock);
1940 1940 mutex_enter(dmfep->rxlock);
1941 1941 mutex_enter(dmfep->txlock);
1942 1942
1943 1943 /* make sure we have all the stats collected */
1944 1944 (void) dmfe_reclaim_tx_desc(dmfep);
1945 1945
1946 1946 switch (stat) {
1947 1947
1948 1948 case MAC_STAT_IPACKETS:
1949 1949 *val = dmfep->rx_stats_ipackets;
1950 1950 break;
1951 1951
1952 1952 case MAC_STAT_MULTIRCV:
1953 1953 *val = dmfep->rx_stats_multi;
1954 1954 break;
1955 1955
1956 1956 case MAC_STAT_BRDCSTRCV:
1957 1957 *val = dmfep->rx_stats_bcast;
1958 1958 break;
1959 1959
1960 1960 case MAC_STAT_RBYTES:
1961 1961 *val = dmfep->rx_stats_rbytes;
1962 1962 break;
1963 1963
1964 1964 case MAC_STAT_IERRORS:
1965 1965 *val = dmfep->rx_stats_ierrors;
1966 1966 break;
1967 1967
1968 1968 case MAC_STAT_NORCVBUF:
1969 1969 *val = dmfep->rx_stats_norcvbuf;
1970 1970 break;
1971 1971
1972 1972 case MAC_STAT_COLLISIONS:
1973 1973 *val = dmfep->tx_stats_collisions;
1974 1974 break;
1975 1975
1976 1976 case MAC_STAT_OERRORS:
1977 1977 *val = dmfep->tx_stats_oerrors;
1978 1978 break;
1979 1979
1980 1980 case MAC_STAT_OPACKETS:
1981 1981 *val = dmfep->tx_stats_opackets;
1982 1982 break;
1983 1983
1984 1984 case MAC_STAT_MULTIXMT:
1985 1985 *val = dmfep->tx_stats_multi;
1986 1986 break;
1987 1987
1988 1988 case MAC_STAT_BRDCSTXMT:
1989 1989 *val = dmfep->tx_stats_bcast;
1990 1990 break;
1991 1991
1992 1992 case MAC_STAT_OBYTES:
1993 1993 *val = dmfep->tx_stats_obytes;
1994 1994 break;
1995 1995
1996 1996 case MAC_STAT_OVERFLOWS:
1997 1997 *val = dmfep->rx_stats_overflow;
1998 1998 break;
1999 1999
2000 2000 case MAC_STAT_UNDERFLOWS:
2001 2001 *val = dmfep->tx_stats_underflow;
2002 2002 break;
2003 2003
2004 2004 case ETHER_STAT_ALIGN_ERRORS:
2005 2005 *val = dmfep->rx_stats_align;
2006 2006 break;
2007 2007
2008 2008 case ETHER_STAT_FCS_ERRORS:
2009 2009 *val = dmfep->rx_stats_fcs;
2010 2010 break;
2011 2011
2012 2012 case ETHER_STAT_TOOLONG_ERRORS:
2013 2013 *val = dmfep->rx_stats_toolong;
2014 2014 break;
2015 2015
2016 2016 case ETHER_STAT_TOOSHORT_ERRORS:
2017 2017 *val = dmfep->rx_stats_short;
2018 2018 break;
2019 2019
2020 2020 case ETHER_STAT_MACRCV_ERRORS:
2021 2021 *val = dmfep->rx_stats_macrcv_errors;
2022 2022 break;
2023 2023
2024 2024 case ETHER_STAT_MACXMT_ERRORS:
2025 2025 *val = dmfep->tx_stats_macxmt_errors;
2026 2026 break;
2027 2027
2028 2028 case ETHER_STAT_JABBER_ERRORS:
2029 2029 *val = dmfep->tx_stats_jabber;
2030 2030 break;
2031 2031
2032 2032 case ETHER_STAT_CARRIER_ERRORS:
2033 2033 *val = dmfep->tx_stats_nocarrier;
2034 2034 break;
2035 2035
2036 2036 case ETHER_STAT_TX_LATE_COLLISIONS:
2037 2037 *val = dmfep->tx_stats_xmtlatecoll;
2038 2038 break;
2039 2039
2040 2040 case ETHER_STAT_EX_COLLISIONS:
2041 2041 *val = dmfep->tx_stats_excoll;
2042 2042 break;
2043 2043
2044 2044 case ETHER_STAT_DEFER_XMTS:
2045 2045 *val = dmfep->tx_stats_defer;
2046 2046 break;
2047 2047
2048 2048 case ETHER_STAT_FIRST_COLLISIONS:
2049 2049 *val = dmfep->tx_stats_first_coll;
2050 2050 break;
2051 2051
2052 2052 case ETHER_STAT_MULTI_COLLISIONS:
2053 2053 *val = dmfep->tx_stats_multi_coll;
2054 2054 break;
2055 2055
2056 2056 default:
2057 2057 rv = ENOTSUP;
2058 2058 }
2059 2059
2060 2060 mutex_exit(dmfep->txlock);
2061 2061 mutex_exit(dmfep->rxlock);
2062 2062 mutex_exit(dmfep->oplock);
2063 2063
2064 2064 return (rv);
2065 2065 }
2066 2066
2067 2067 /*
2068 2068 * ========== Ioctl handler & subfunctions ==========
2069 2069 */
2070 2070
2071 2071 static lb_property_t dmfe_loopmodes[] = {
2072 2072 { normal, "normal", 0 },
2073 2073 { internal, "Internal", 1 },
2074 2074 { external, "External", 2 },
2075 2075 };
2076 2076
2077 2077 /*
2078 2078 * Specific dmfe IOCTLs, the mac module handles the generic ones.
2079 2079 * Unfortunately, the DM9102 doesn't seem to work well with MII based
2080 2080 * loopback, so we have to do something special for it.
2081 2081 */
2082 2082
2083 2083 static void
2084 2084 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2085 2085 {
2086 2086 dmfe_t *dmfep = arg;
2087 2087 struct iocblk *iocp;
2088 2088 int rv = 0;
2089 2089 lb_info_sz_t sz;
2090 2090 int cmd;
2091 2091 uint32_t mode;
2092 2092
2093 2093 iocp = (void *)mp->b_rptr;
2094 2094 cmd = iocp->ioc_cmd;
2095 2095
2096 2096 if (mp->b_cont == NULL) {
2097 2097 /*
2098 2098 * All of these ioctls need data!
2099 2099 */
2100 2100 miocnak(wq, mp, 0, EINVAL);
2101 2101 return;
2102 2102 }
2103 2103
2104 2104 switch (cmd) {
2105 2105 case LB_GET_INFO_SIZE:
2106 2106 if (iocp->ioc_count != sizeof (sz)) {
2107 2107 rv = EINVAL;
2108 2108 } else {
2109 2109 sz = sizeof (dmfe_loopmodes);
2110 2110 bcopy(&sz, mp->b_cont->b_rptr, sizeof (sz));
2111 2111 }
2112 2112 break;
2113 2113
2114 2114 case LB_GET_INFO:
2115 2115 if (iocp->ioc_count != sizeof (dmfe_loopmodes)) {
2116 2116 rv = EINVAL;
2117 2117 } else {
2118 2118 bcopy(dmfe_loopmodes, mp->b_cont->b_rptr,
2119 2119 iocp->ioc_count);
2120 2120 }
2121 2121 break;
2122 2122
2123 2123 case LB_GET_MODE:
2124 2124 if (iocp->ioc_count != sizeof (mode)) {
2125 2125 rv = EINVAL;
2126 2126 } else {
2127 2127 mutex_enter(dmfep->oplock);
2128 2128 switch (dmfep->opmode & LOOPBACK_MODE_MASK) {
2129 2129 case LOOPBACK_OFF:
2130 2130 mode = 0;
2131 2131 break;
2132 2132 case LOOPBACK_INTERNAL:
2133 2133 mode = 1;
2134 2134 break;
2135 2135 default:
2136 2136 mode = 2;
2137 2137 break;
2138 2138 }
2139 2139 mutex_exit(dmfep->oplock);
2140 2140 bcopy(&mode, mp->b_cont->b_rptr, sizeof (mode));
2141 2141 }
2142 2142 break;
2143 2143
2144 2144 case LB_SET_MODE:
2145 2145 rv = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2146 2146 if (rv != 0)
2147 2147 break;
2148 2148 if (iocp->ioc_count != sizeof (mode)) {
2149 2149 rv = EINVAL;
2150 2150 break;
2151 2151 }
2152 2152 bcopy(mp->b_cont->b_rptr, &mode, sizeof (mode));
2153 2153
2154 2154 mutex_enter(dmfep->oplock);
2155 2155 dmfep->opmode &= ~LOOPBACK_MODE_MASK;
2156 2156 switch (mode) {
2157 2157 case 2:
2158 2158 dmfep->opmode |= LOOPBACK_PHY_D;
2159 2159 break;
2160 2160 case 1:
2161 2161 dmfep->opmode |= LOOPBACK_INTERNAL;
2162 2162 break;
2163 2163 default:
2164 2164 break;
2165 2165 }
2166 2166 if (!dmfep->suspended) {
2167 2167 dmfe_restart(dmfep);
2168 2168 }
2169 2169 mutex_exit(dmfep->oplock);
2170 2170 break;
2171 2171
2172 2172 default:
2173 2173 rv = EINVAL;
2174 2174 break;
2175 2175 }
2176 2176
2177 2177 if (rv == 0) {
2178 2178 miocack(wq, mp, iocp->ioc_count, 0);
2179 2179 } else {
2180 2180 miocnak(wq, mp, 0, rv);
2181 2181 }
2182 2182 }
2183 2183
2184 2184 int
2185 2185 dmfe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2186 2186 void *val)
2187 2187 {
2188 2188 dmfe_t *dmfep = arg;
2189 2189
2190 2190 return (mii_m_getprop(dmfep->mii, name, num, sz, val));
2191 2191 }
2192 2192
2193 2193 int
2194 2194 dmfe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2195 2195 const void *val)
2196 2196 {
2197 2197 dmfe_t *dmfep = arg;
2198 2198
2199 2199 return (mii_m_setprop(dmfep->mii, name, num, sz, val));
2200 2200 }
2201 2201
2202 2202 static void
2203 2203 dmfe_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
2204 2204 mac_prop_info_handle_t mph)
2205 2205 {
2206 2206 dmfe_t *dmfep = arg;
2207 2207
2208 2208 mii_m_propinfo(dmfep->mii, name, num, mph);
2209 2209 }
2210 2210
2211 2211 /*
2212 2212 * ========== Per-instance setup/teardown code ==========
2213 2213 */
2214 2214
2215 2215 /*
2216 2216 * Determine local MAC address & broadcast address for this interface
2217 2217 */
2218 2218 static void
2219 2219 dmfe_find_mac_address(dmfe_t *dmfep)
2220 2220 {
2221 2221 uchar_t *prop;
2222 2222 uint_t propsize;
2223 2223 int err;
2224 2224
2225 2225 /*
2226 2226 * We have to find the "vendor's factory-set address". This is
2227 2227 * the value of the property "local-mac-address", as set by OBP
2228 2228 * (or a .conf file!)
2229 2229 *
2230 2230 * If the property is not there, then we try to find the factory
2231 2231 * mac address from the devices serial EEPROM.
2232 2232 */
2233 2233 bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr));
2234 2234 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo,
2235 2235 DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
2236 2236 if (err == DDI_PROP_SUCCESS) {
2237 2237 if (propsize == ETHERADDRL)
2238 2238 ethaddr_copy(prop, dmfep->curr_addr);
2239 2239 ddi_prop_free(prop);
2240 2240 } else {
2241 2241 /* no property set... check eeprom */
2242 2242 dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr,
2243 2243 ETHERADDRL);
2244 2244 }
2245 2245 }
2246 2246
2247 2247 static int
2248 2248 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
2249 2249 size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p,
2250 2250 uint_t dma_flags, dma_area_t *dma_p)
2251 2251 {
2252 2252 ddi_dma_cookie_t dma_cookie;
2253 2253 uint_t ncookies;
2254 2254 int err;
2255 2255
2256 2256 /*
2257 2257 * Allocate handle
2258 2258 */
2259 2259 err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr,
2260 2260 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
2261 2261 if (err != DDI_SUCCESS) {
2262 2262 dmfe_error(dmfep, "DMA handle allocation failed");
2263 2263 return (DDI_FAILURE);
2264 2264 }
2265 2265
2266 2266 /*
2267 2267 * Allocate memory
2268 2268 */
2269 2269 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop,
2270 2270 attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2271 2271 DDI_DMA_SLEEP, NULL,
2272 2272 &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
2273 2273 if (err != DDI_SUCCESS) {
2274 2274 dmfe_error(dmfep, "DMA memory allocation failed: %d", err);
2275 2275 return (DDI_FAILURE);
2276 2276 }
2277 2277
2278 2278 /*
2279 2279 * Bind the two together
2280 2280 */
2281 2281 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
2282 2282 dma_p->mem_va, dma_p->alength, dma_flags,
2283 2283 DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
2284 2284 if (err != DDI_DMA_MAPPED) {
2285 2285 dmfe_error(dmfep, "DMA mapping failed: %d", err);
2286 2286 return (DDI_FAILURE);
2287 2287 }
2288 2288 if ((dma_p->ncookies = ncookies) != 1) {
2289 2289 dmfe_error(dmfep, "Too many DMA cookeis: %d", ncookies);
2290 2290 return (DDI_FAILURE);
2291 2291 }
2292 2292
2293 2293 dma_p->mem_dvma = dma_cookie.dmac_address;
2294 2294 if (setup > 0) {
2295 2295 dma_p->setup_dvma = dma_p->mem_dvma + memsize;
2296 2296 dma_p->setup_va = dma_p->mem_va + memsize;
2297 2297 } else {
2298 2298 dma_p->setup_dvma = 0;
2299 2299 dma_p->setup_va = NULL;
2300 2300 }
2301 2301
2302 2302 return (DDI_SUCCESS);
2303 2303 }
2304 2304
2305 2305 /*
2306 2306 * This function allocates the transmit and receive buffers and descriptors.
2307 2307 */
2308 2308 static int
2309 2309 dmfe_alloc_bufs(dmfe_t *dmfep)
2310 2310 {
2311 2311 size_t memsize;
2312 2312 int err;
2313 2313
2314 2314 /*
2315 2315 * Allocate memory & handles for TX descriptor ring
2316 2316 */
2317 2317 memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type);
2318 2318 err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP,
2319 2319 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2320 2320 &dmfep->tx_desc);
2321 2321 if (err != DDI_SUCCESS) {
2322 2322 dmfe_error(dmfep, "TX descriptor allocation failed");
2323 2323 return (DDI_FAILURE);
2324 2324 }
2325 2325
2326 2326 /*
2327 2327 * Allocate memory & handles for TX buffers
2328 2328 */
2329 2329 memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE;
2330 2330 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
2331 2331 &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
2332 2332 &dmfep->tx_buff);
2333 2333 if (err != DDI_SUCCESS) {
2334 2334 dmfe_error(dmfep, "TX buffer allocation failed");
2335 2335 return (DDI_FAILURE);
2336 2336 }
2337 2337
2338 2338 /*
2339 2339 * Allocate memory & handles for RX descriptor ring
2340 2340 */
2341 2341 memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type);
2342 2342 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP,
2343 2343 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2344 2344 &dmfep->rx_desc);
2345 2345 if (err != DDI_SUCCESS) {
2346 2346 dmfe_error(dmfep, "RX descriptor allocation failed");
2347 2347 return (DDI_FAILURE);
2348 2348 }
2349 2349
2350 2350 /*
2351 2351 * Allocate memory & handles for RX buffers
2352 2352 */
2353 2353 memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE;
2354 2354 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
2355 2355 &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff);
2356 2356 if (err != DDI_SUCCESS) {
2357 2357 dmfe_error(dmfep, "RX buffer allocation failed");
2358 2358 return (DDI_FAILURE);
2359 2359 }
2360 2360
2361 2361 /*
2362 2362 * Allocate bitmasks for tx packet type tracking
2363 2363 */
2364 2364 dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
2365 2365 dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
2366 2366
2367 2367 return (DDI_SUCCESS);
2368 2368 }
2369 2369
2370 2370 static void
2371 2371 dmfe_free_dma_mem(dma_area_t *dma_p)
2372 2372 {
2373 2373 if (dma_p->dma_hdl != NULL) {
2374 2374 if (dma_p->ncookies) {
2375 2375 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
2376 2376 dma_p->ncookies = 0;
2377 2377 }
2378 2378 ddi_dma_free_handle(&dma_p->dma_hdl);
2379 2379 dma_p->dma_hdl = NULL;
2380 2380 dma_p->mem_dvma = 0;
2381 2381 dma_p->setup_dvma = 0;
2382 2382 }
2383 2383
2384 2384 if (dma_p->acc_hdl != NULL) {
2385 2385 ddi_dma_mem_free(&dma_p->acc_hdl);
2386 2386 dma_p->acc_hdl = NULL;
2387 2387 dma_p->mem_va = NULL;
2388 2388 dma_p->setup_va = NULL;
2389 2389 }
2390 2390 }
2391 2391
2392 2392 /*
2393 2393 * This routine frees the transmit and receive buffers and descriptors.
2394 2394 * Make sure the chip is stopped before calling it!
2395 2395 */
2396 2396 static void
2397 2397 dmfe_free_bufs(dmfe_t *dmfep)
2398 2398 {
2399 2399 dmfe_free_dma_mem(&dmfep->rx_buff);
2400 2400 dmfe_free_dma_mem(&dmfep->rx_desc);
2401 2401 dmfe_free_dma_mem(&dmfep->tx_buff);
2402 2402 dmfe_free_dma_mem(&dmfep->tx_desc);
2403 2403 if (dmfep->tx_mcast)
2404 2404 kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY);
2405 2405 if (dmfep->tx_bcast)
2406 2406 kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY);
2407 2407 }
2408 2408
2409 2409 static void
2410 2410 dmfe_unattach(dmfe_t *dmfep)
2411 2411 {
2412 2412 /*
2413 2413 * Clean up and free all DMFE data structures
2414 2414 */
2415 2415 if (dmfep->cycid != NULL) {
2416 2416 ddi_periodic_delete(dmfep->cycid);
2417 2417 dmfep->cycid = NULL;
2418 2418 }
2419 2419
2420 2420 if (dmfep->ksp_drv != NULL)
2421 2421 kstat_delete(dmfep->ksp_drv);
2422 2422 if (dmfep->progress & PROGRESS_HWINT) {
2423 2423 ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk);
2424 2424 }
2425 2425 if (dmfep->progress & PROGRESS_SOFTINT)
2426 2426 ddi_remove_softintr(dmfep->factotum_id);
2427 2427 if (dmfep->mii != NULL)
2428 2428 mii_free(dmfep->mii);
2429 2429 if (dmfep->progress & PROGRESS_MUTEX) {
2430 2430 mutex_destroy(dmfep->txlock);
2431 2431 mutex_destroy(dmfep->rxlock);
2432 2432 mutex_destroy(dmfep->oplock);
2433 2433 }
2434 2434 dmfe_free_bufs(dmfep);
2435 2435 if (dmfep->io_handle != NULL)
2436 2436 ddi_regs_map_free(&dmfep->io_handle);
2437 2437
2438 2438 kmem_free(dmfep, sizeof (*dmfep));
2439 2439 }
2440 2440
2441 2441 static int
2442 2442 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp)
2443 2443 {
2444 2444 ddi_acc_handle_t handle;
2445 2445 uint32_t regval;
2446 2446
2447 2447 if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS)
2448 2448 return (DDI_FAILURE);
2449 2449
2450 2450 /*
2451 2451 * Get vendor/device/revision. We expect (but don't check) that
2452 2452 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
2453 2453 */
2454 2454 idp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
2455 2455 idp->device = pci_config_get16(handle, PCI_CONF_DEVID);
2456 2456 idp->revision = pci_config_get8(handle, PCI_CONF_REVID);
2457 2457
2458 2458 /*
2459 2459 * Turn on Bus Master Enable bit and ensure the device is not asleep
2460 2460 */
2461 2461 regval = pci_config_get32(handle, PCI_CONF_COMM);
2462 2462 pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME));
2463 2463
2464 2464 regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD);
2465 2465 pci_config_put32(handle, PCI_DMFE_CONF_CFDD,
2466 2466 regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
2467 2467
2468 2468 pci_config_teardown(&handle);
2469 2469 return (DDI_SUCCESS);
2470 2470 }
2471 2471
2472 2472 struct ks_index {
2473 2473 int index;
2474 2474 char *name;
2475 2475 };
2476 2476
2477 2477 static const struct ks_index ks_drv_names[] = {
2478 2478 { KS_INTERRUPT, "intr" },
2479 2479 { KS_CYCLIC_RUN, "cyclic_run" },
2480 2480
2481 2481 { KS_TX_STALL, "tx_stall_detect" },
2482 2482 { KS_CHIP_ERROR, "chip_error_interrupt" },
2483 2483
2484 2484 { KS_FACTOTUM_RUN, "factotum_run" },
2485 2485 { KS_RECOVERY, "factotum_recover" },
2486 2486
2487 2487 { -1, NULL }
2488 2488 };
2489 2489
2490 2490 static void
2491 2491 dmfe_init_kstats(dmfe_t *dmfep, int instance)
2492 2492 {
2493 2493 kstat_t *ksp;
2494 2494 kstat_named_t *knp;
2495 2495 const struct ks_index *ksip;
2496 2496
2497 2497 /* no need to create MII stats, the mac module already does it */
2498 2498
2499 2499 /* Create and initialise driver-defined kstats */
2500 2500 ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net",
2501 2501 KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
2502 2502 if (ksp != NULL) {
2503 2503 for (knp = ksp->ks_data, ksip = ks_drv_names;
2504 2504 ksip->name != NULL; ++ksip) {
2505 2505 kstat_named_init(&knp[ksip->index], ksip->name,
2506 2506 KSTAT_DATA_UINT64);
2507 2507 }
2508 2508 dmfep->ksp_drv = ksp;
2509 2509 dmfep->knp_drv = knp;
2510 2510 kstat_install(ksp);
2511 2511 } else {
2512 2512 dmfe_error(dmfep, "kstat_create() for dmfe_events failed");
2513 2513 }
2514 2514 }
2515 2515
2516 2516 static int
2517 2517 dmfe_resume(dev_info_t *devinfo)
2518 2518 {
2519 2519 dmfe_t *dmfep; /* Our private data */
2520 2520 chip_id_t chipid;
2521 2521 boolean_t restart = B_FALSE;
2522 2522
2523 2523 dmfep = ddi_get_driver_private(devinfo);
2524 2524 if (dmfep == NULL)
2525 2525 return (DDI_FAILURE);
2526 2526
2527 2527 /*
2528 2528 * Refuse to resume if the data structures aren't consistent
2529 2529 */
2530 2530 if (dmfep->devinfo != devinfo)
2531 2531 return (DDI_FAILURE);
2532 2532
2533 2533 /*
2534 2534 * Refuse to resume if the chip's changed its identity (*boggle*)
2535 2535 */
2536 2536 if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS)
2537 2537 return (DDI_FAILURE);
2538 2538 if (chipid.vendor != dmfep->chipid.vendor)
2539 2539 return (DDI_FAILURE);
2540 2540 if (chipid.device != dmfep->chipid.device)
2541 2541 return (DDI_FAILURE);
2542 2542 if (chipid.revision != dmfep->chipid.revision)
2543 2543 return (DDI_FAILURE);
2544 2544
2545 2545 mutex_enter(dmfep->oplock);
2546 2546 mutex_enter(dmfep->txlock);
2547 2547 dmfep->suspended = B_FALSE;
2548 2548 mutex_exit(dmfep->txlock);
2549 2549
2550 2550 /*
2551 2551 * All OK, reinitialise h/w & kick off MAC scheduling
2552 2552 */
2553 2553 if (dmfep->mac_state == DMFE_MAC_STARTED) {
2554 2554 dmfe_restart(dmfep);
2555 2555 restart = B_TRUE;
2556 2556 }
2557 2557 mutex_exit(dmfep->oplock);
2558 2558
2559 2559 if (restart) {
2560 2560 mii_resume(dmfep->mii);
2561 2561 mac_tx_update(dmfep->mh);
2562 2562 }
2563 2563 return (DDI_SUCCESS);
2564 2564 }
2565 2565
2566 2566 /*
2567 2567 * attach(9E) -- Attach a device to the system
2568 2568 *
2569 2569 * Called once for each board successfully probed.
2570 2570 */
2571 2571 static int
2572 2572 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2573 2573 {
2574 2574 mac_register_t *macp;
2575 2575 dmfe_t *dmfep; /* Our private data */
2576 2576 uint32_t csr6;
2577 2577 int instance;
2578 2578 int err;
2579 2579
2580 2580 instance = ddi_get_instance(devinfo);
2581 2581
2582 2582 switch (cmd) {
2583 2583 default:
2584 2584 return (DDI_FAILURE);
2585 2585
2586 2586 case DDI_RESUME:
2587 2587 return (dmfe_resume(devinfo));
2588 2588
2589 2589 case DDI_ATTACH:
2590 2590 break;
2591 2591 }
2592 2592
2593 2593 dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP);
2594 2594 ddi_set_driver_private(devinfo, dmfep);
2595 2595 dmfep->devinfo = devinfo;
2596 2596 dmfep->dmfe_guard = DMFE_GUARD;
2597 2597
2598 2598 /*
2599 2599 * Initialize more fields in DMFE private data
2600 2600 * Determine the local MAC address
2601 2601 */
2602 2602 #if DMFEDEBUG
2603 2603 dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0,
2604 2604 debug_propname, dmfe_debug);
2605 2605 #endif /* DMFEDEBUG */
2606 2606 dmfep->cycid = NULL;
2607 2607 (void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d",
2608 2608 instance);
2609 2609
2610 2610 /*
2611 2611 * Check for custom "opmode-reg-value" property;
2612 2612 * if none, use the defaults below for CSR6 ...
2613 2613 */
2614 2614 csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1;
2615 2615 dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2616 2616 DDI_PROP_DONTPASS, opmode_propname, csr6);
2617 2617
2618 2618 /*
2619 2619 * Read chip ID & set up config space command register(s)
2620 2620 */
2621 2621 if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) {
2622 2622 dmfe_error(dmfep, "dmfe_config_init() failed");
2623 2623 goto attach_fail;
2624 2624 }
2625 2625
2626 2626 /*
2627 2627 * Map operating registers
2628 2628 */
2629 2629 err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER,
2630 2630 &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
2631 2631 if (err != DDI_SUCCESS) {
2632 2632 dmfe_error(dmfep, "ddi_regs_map_setup() failed");
2633 2633 goto attach_fail;
2634 2634 }
2635 2635
2636 2636 /*
2637 2637 * Get our MAC address.
2638 2638 */
2639 2639 dmfe_find_mac_address(dmfep);
2640 2640
2641 2641 /*
2642 2642 * Allocate the TX and RX descriptors/buffers.
2643 2643 */
2644 2644 dmfep->tx.n_desc = dmfe_tx_desc;
2645 2645 dmfep->rx.n_desc = dmfe_rx_desc;
2646 2646 err = dmfe_alloc_bufs(dmfep);
2647 2647 if (err != DDI_SUCCESS) {
2648 2648 goto attach_fail;
2649 2649 }
2650 2650
2651 2651 /*
2652 2652 * Add the softint handler
2653 2653 */
2654 2654 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id,
2655 2655 NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
2656 2656 dmfe_error(dmfep, "ddi_add_softintr() failed");
2657 2657 goto attach_fail;
2658 2658 }
2659 2659 dmfep->progress |= PROGRESS_SOFTINT;
2660 2660
2661 2661 /*
2662 2662 * Add the h/w interrupt handler & initialise mutexen
2663 2663 */
2664 2664 if (ddi_get_iblock_cookie(devinfo, 0, &dmfep->iblk) != DDI_SUCCESS) {
2665 2665 dmfe_error(dmfep, "ddi_get_iblock_cookie() failed");
2666 2666 goto attach_fail;
2667 2667 }
2668 2668
2669 2669 mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL);
2670 2670 mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk);
2671 2671 mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk);
2672 2672 mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk);
2673 2673 dmfep->progress |= PROGRESS_MUTEX;
2674 2674
2675 2675 if (ddi_add_intr(devinfo, 0, NULL, NULL,
2676 2676 dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
2677 2677 dmfe_error(dmfep, "ddi_add_intr() failed");
2678 2678 goto attach_fail;
2679 2679 }
2680 2680 dmfep->progress |= PROGRESS_HWINT;
2681 2681
2682 2682 /*
2683 2683 * Create & initialise named kstats
2684 2684 */
2685 2685 dmfe_init_kstats(dmfep, instance);
2686 2686
2687 2687 /*
2688 2688 * Reset & initialise the chip and the ring buffers
2689 2689 * Initialise the (internal) PHY
2690 2690 */
2691 2691 mutex_enter(dmfep->oplock);
2692 2692 mutex_enter(dmfep->rxlock);
2693 2693 mutex_enter(dmfep->txlock);
2694 2694
2695 2695 dmfe_reset(dmfep);
2696 2696
2697 2697 /*
2698 2698 * Prepare the setup packet
2699 2699 */
2700 2700 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE);
2701 2701 bzero(dmfep->mcast_refs, MCASTBUF_SIZE);
2702 2702 dmfep->addr_set = B_FALSE;
2703 2703 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
2704 2704 dmfep->mac_state = DMFE_MAC_RESET;
2705 2705
2706 2706 mutex_exit(dmfep->txlock);
2707 2707 mutex_exit(dmfep->rxlock);
2708 2708 mutex_exit(dmfep->oplock);
2709 2709
2710 2710 if (dmfe_init_phy(dmfep) != B_TRUE)
2711 2711 goto attach_fail;
2712 2712
2713 2713 /*
2714 2714 * Send a reasonable setup frame. This configures our starting
2715 2715 * address and the broadcast address.
2716 2716 */
2717 2717 (void) dmfe_m_unicst(dmfep, dmfep->curr_addr);
2718 2718
2719 2719 /*
2720 2720 * Initialize pointers to device specific functions which
2721 2721 * will be used by the generic layer.
2722 2722 */
2723 2723 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2724 2724 goto attach_fail;
2725 2725 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2726 2726 macp->m_driver = dmfep;
2727 2727 macp->m_dip = devinfo;
2728 2728 macp->m_src_addr = dmfep->curr_addr;
2729 2729 macp->m_callbacks = &dmfe_m_callbacks;
2730 2730 macp->m_min_sdu = 0;
2731 2731 macp->m_max_sdu = ETHERMTU;
2732 2732 macp->m_margin = VLAN_TAGSZ;
2733 2733
2734 2734 /*
2735 2735 * Finally, we're ready to register ourselves with the MAC layer
2736 2736 * interface; if this succeeds, we're all ready to start()
2737 2737 */
2738 2738 err = mac_register(macp, &dmfep->mh);
2739 2739 mac_free(macp);
2740 2740 if (err != 0)
2741 2741 goto attach_fail;
2742 2742 ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
2743 2743
2744 2744 /*
2745 2745 * Install the cyclic callback that we use to check for link
2746 2746 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
2747 2747 * is invoked in kernel context then.
2748 2748 */
2749 2749 ASSERT(dmfep->cycid == NULL);
2750 2750 dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep,
2751 2751 dmfe_tick_us * 1000, DDI_IPL_0);
2752 2752 return (DDI_SUCCESS);
2753 2753
2754 2754 attach_fail:
2755 2755 dmfe_unattach(dmfep);
2756 2756 return (DDI_FAILURE);
2757 2757 }
2758 2758
2759 2759 /*
2760 2760 * dmfe_suspend() -- suspend transmit/receive for powerdown
2761 2761 */
2762 2762 static int
2763 2763 dmfe_suspend(dmfe_t *dmfep)
2764 2764 {
2765 2765 /*
2766 2766 * Just stop processing ...
2767 2767 */
2768 2768 mii_suspend(dmfep->mii);
2769 2769 mutex_enter(dmfep->oplock);
2770 2770 dmfe_stop(dmfep);
2771 2771
2772 2772 mutex_enter(dmfep->txlock);
2773 2773 dmfep->suspended = B_TRUE;
2774 2774 mutex_exit(dmfep->txlock);
2775 2775 mutex_exit(dmfep->oplock);
2776 2776
2777 2777 return (DDI_SUCCESS);
2778 2778 }
2779 2779
2780 2780 /*
2781 2781 * detach(9E) -- Detach a device from the system
2782 2782 */
2783 2783 static int
2784 2784 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2785 2785 {
2786 2786 dmfe_t *dmfep;
2787 2787
2788 2788 dmfep = ddi_get_driver_private(devinfo);
2789 2789
2790 2790 switch (cmd) {
2791 2791 default:
2792 2792 return (DDI_FAILURE);
2793 2793
2794 2794 case DDI_SUSPEND:
2795 2795 return (dmfe_suspend(dmfep));
2796 2796
2797 2797 case DDI_DETACH:
2798 2798 break;
2799 2799 }
2800 2800
2801 2801 /*
2802 2802 * Unregister from the MAC subsystem. This can fail, in
2803 2803 * particular if there are DLPI style-2 streams still open -
2804 2804 * in which case we just return failure without shutting
2805 2805 * down chip operations.
2806 2806 */
2807 2807 if (mac_unregister(dmfep->mh) != DDI_SUCCESS)
2808 2808 return (DDI_FAILURE);
2809 2809
2810 2810 /*
2811 2811 * All activity stopped, so we can clean up & exit
2812 2812 */
2813 2813 dmfe_unattach(dmfep);
2814 2814 return (DDI_SUCCESS);
2815 2815 }
2816 2816
2817 2817
2818 2818 /*
2819 2819 * ========== Module Loading Data & Entry Points ==========
2820 2820 */
2821 2821
↓ open down ↓ |
2821 lines elided |
↑ open up ↑ |
2822 2822 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach,
2823 2823 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
2824 2824
2825 2825 static struct modldrv dmfe_modldrv = {
2826 2826 &mod_driverops, /* Type of module. This one is a driver */
2827 2827 dmfe_ident, /* short description */
2828 2828 &dmfe_dev_ops /* driver specific ops */
2829 2829 };
2830 2830
2831 2831 static struct modlinkage modlinkage = {
2832 - MODREV_1, (void *)&dmfe_modldrv, NULL
2832 + MODREV_1, { (void *)&dmfe_modldrv, NULL }
2833 2833 };
2834 2834
2835 2835 int
2836 2836 _info(struct modinfo *modinfop)
2837 2837 {
2838 2838 return (mod_info(&modlinkage, modinfop));
2839 2839 }
2840 2840
2841 2841 int
2842 2842 _init(void)
2843 2843 {
2844 2844 uint32_t tmp100;
2845 2845 uint32_t tmp10;
2846 2846 int i;
2847 2847 int status;
2848 2848
2849 2849 /* Calculate global timing parameters */
2850 2850 tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
2851 2851 tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
2852 2852
2853 2853 for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) {
2854 2854 switch (i) {
2855 2855 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA):
2856 2856 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END):
2857 2857 /*
2858 2858 * The chip doesn't spontaneously recover from
2859 2859 * a stall in these states, so we reset early
2860 2860 */
2861 2861 stall_100_tix[i] = tmp100;
2862 2862 stall_10_tix[i] = tmp10;
2863 2863 break;
2864 2864
2865 2865 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND):
2866 2866 default:
2867 2867 /*
2868 2868 * The chip has been seen to spontaneously recover
2869 2869 * after an apparent stall in the SUSPEND state,
2870 2870 * so we'll allow it rather longer to do so. As
2871 2871 * stalls in other states have not been observed,
2872 2872 * we'll use long timeouts for them too ...
2873 2873 */
2874 2874 stall_100_tix[i] = tmp100 * 20;
2875 2875 stall_10_tix[i] = tmp10 * 20;
2876 2876 break;
2877 2877 }
2878 2878 }
2879 2879
2880 2880 mac_init_ops(&dmfe_dev_ops, "dmfe");
2881 2881 status = mod_install(&modlinkage);
2882 2882 if (status == DDI_SUCCESS)
2883 2883 dmfe_log_init();
2884 2884
2885 2885 return (status);
2886 2886 }
2887 2887
2888 2888 int
2889 2889 _fini(void)
2890 2890 {
2891 2891 int status;
2892 2892
2893 2893 status = mod_remove(&modlinkage);
2894 2894 if (status == DDI_SUCCESS) {
2895 2895 mac_fini_ops(&dmfe_dev_ops);
2896 2896 dmfe_log_fini();
2897 2897 }
2898 2898
2899 2899 return (status);
2900 2900 }
↓ open down ↓ |
58 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX