Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/hme/hme.c
+++ new/usr/src/uts/common/io/hme/hme.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25
26 26 /*
27 27 * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver
28 28 */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/debug.h>
32 32 #include <sys/stream.h>
33 33 #include <sys/cmn_err.h>
34 34 #include <sys/kmem.h>
35 35 #include <sys/crc32.h>
36 36 #include <sys/modctl.h>
37 37 #include <sys/conf.h>
38 38 #include <sys/strsun.h>
39 39 #include <sys/kstat.h>
40 40 #include <sys/pattr.h>
41 41 #include <sys/dlpi.h>
42 42 #include <sys/strsubr.h>
43 43 #include <sys/mac_provider.h>
44 44 #include <sys/mac_ether.h>
45 45 #include <sys/mii.h>
46 46 #include <sys/ethernet.h>
47 47 #include <sys/vlan.h>
48 48 #include <sys/pci.h>
49 49 #include <sys/policy.h>
50 50 #include <sys/ddi.h>
51 51 #include <sys/sunddi.h>
52 52 #include <sys/byteorder.h>
53 53 #include "hme_phy.h"
54 54 #include "hme_mac.h"
55 55 #include "hme.h"
56 56
57 57 typedef void (*fptrv_t)();
58 58
59 59 typedef enum {
60 60 NO_MSG = 0,
61 61 AUTOCONFIG_MSG,
62 62 DISPLAY_MSG,
63 63 INIT_MSG,
64 64 UNINIT_MSG,
65 65 CONFIG_MSG,
66 66 MII_MSG,
67 67 FATAL_ERR_MSG,
68 68 NFATAL_ERR_MSG,
69 69 XCVR_MSG,
70 70 NOXCVR_MSG,
71 71 ERX_MSG,
72 72 DDI_MSG,
73 73 } msg_t;
74 74
75 75 msg_t hme_debug_level = NO_MSG;
76 76
77 77 static char *msg_string[] = {
78 78 "NONE ",
79 79 "AUTOCONFIG ",
80 80 "DISPLAY "
81 81 "INIT ",
82 82 "UNINIT ",
83 83 "CONFIG ",
84 84 "MII ",
85 85 "FATAL_ERR ",
86 86 "NFATAL_ERR ",
87 87 "XCVR ",
88 88 "NOXCVR ",
89 89 "ERX ",
90 90 "DDI ",
91 91 };
92 92
93 93 #define SEVERITY_NONE 0
94 94 #define SEVERITY_LOW 0
95 95 #define SEVERITY_MID 1
96 96 #define SEVERITY_HIGH 2
97 97 #define SEVERITY_UNKNOWN 99
98 98
99 99 #define FEPS_URUN_BUG
100 100 #define HME_CODEVIOL_BUG
101 101
102 102 #define KIOIP KSTAT_INTR_PTR(hmep->hme_intrstats)
103 103
104 104 /*
105 105 * The following variables are used for checking fixes in Sbus/FEPS 2.0
106 106 */
107 107 static int hme_urun_fix = 0; /* Bug fixed in Sbus/FEPS 2.0 */
108 108
109 109 /*
110 110 * The following variables are used for configuring various features
111 111 */
112 112 static int hme_64bit_enable = 1; /* Use 64-bit sbus transfers */
113 113 static int hme_reject_own = 1; /* Reject packets with own SA */
114 114 static int hme_ngu_enable = 0; /* Never Give Up mode */
115 115
116 116 char *hme_priv_prop[] = {
117 117 "_ipg0",
118 118 "_ipg1",
119 119 "_ipg2",
120 120 "_lance_mode",
121 121 NULL
122 122 };
123 123
124 124 static int hme_lance_mode = 1; /* to enable lance mode */
125 125 static int hme_ipg0 = 16;
126 126 static int hme_ipg1 = 8;
127 127 static int hme_ipg2 = 4;
128 128
129 129 /*
130 130 * The following parameters may be configured by the user. If they are not
131 131 * configured by the user, the values will be based on the capabilities of
132 132 * the transceiver.
133 133 * The value "HME_NOTUSR" is ORed with the parameter value to indicate values
134 134 * which are NOT configured by the user.
135 135 */
136 136
137 137 #define HME_NOTUSR 0x0f000000
138 138 #define HME_MASK_1BIT 0x1
139 139 #define HME_MASK_5BIT 0x1f
140 140 #define HME_MASK_8BIT 0xff
141 141
142 142 /*
143 143 * All strings used by hme messaging functions
144 144 */
145 145
146 146 static char *no_xcvr_msg =
147 147 "No transceiver found.";
148 148
149 149 static char *burst_size_msg =
150 150 "Could not identify the burst size";
151 151
152 152 static char *unk_rx_ringsz_msg =
153 153 "Unknown receive RINGSZ";
154 154
155 155 static char *add_intr_fail_msg =
156 156 "ddi_add_intr(9F) failed";
157 157
158 158 static char *mregs_4global_reg_fail_msg =
159 159 "ddi_regs_map_setup(9F) for global reg failed";
160 160
161 161 static char *mregs_4etx_reg_fail_msg =
162 162 "ddi_map_regs for etx reg failed";
163 163
164 164 static char *mregs_4erx_reg_fail_msg =
165 165 "ddi_map_regs for erx reg failed";
166 166
167 167 static char *mregs_4bmac_reg_fail_msg =
168 168 "ddi_map_regs for bmac reg failed";
169 169
170 170 static char *mregs_4mif_reg_fail_msg =
171 171 "ddi_map_regs for mif reg failed";
172 172
173 173 static char *init_fail_gen_msg =
174 174 "Failed to initialize hardware/driver";
175 175
176 176 static char *ddi_nregs_fail_msg =
177 177 "ddi_dev_nregs failed(9F), returned %d";
178 178
179 179 static char *bad_num_regs_msg =
180 180 "Invalid number of registers.";
181 181
182 182
183 183 /* FATAL ERR msgs */
184 184 /*
185 185 * Function prototypes.
186 186 */
187 187 /* these two are global so that qfe can use them */
188 188 int hmeattach(dev_info_t *, ddi_attach_cmd_t);
189 189 int hmedetach(dev_info_t *, ddi_detach_cmd_t);
190 190 int hmequiesce(dev_info_t *);
191 191 static boolean_t hmeinit_xfer_params(struct hme *);
192 192 static uint_t hmestop(struct hme *);
193 193 static void hmestatinit(struct hme *);
194 194 static int hmeallocthings(struct hme *);
195 195 static void hmefreethings(struct hme *);
196 196 static int hmeallocbuf(struct hme *, hmebuf_t *, int);
197 197 static int hmeallocbufs(struct hme *);
198 198 static void hmefreebufs(struct hme *);
199 199 static void hmeget_hm_rev_property(struct hme *);
200 200 static boolean_t hmestart(struct hme *, mblk_t *);
201 201 static uint_t hmeintr(caddr_t);
202 202 static void hmereclaim(struct hme *);
203 203 static int hmeinit(struct hme *);
204 204 static void hmeuninit(struct hme *hmep);
205 205 static mblk_t *hmeread(struct hme *, hmebuf_t *, uint32_t);
206 206 static void hmesavecntrs(struct hme *);
207 207 static void hme_fatal_err(struct hme *, uint_t);
208 208 static void hme_nonfatal_err(struct hme *, uint_t);
209 209 static int hmeburstsizes(struct hme *);
210 210 static void send_bit(struct hme *, uint16_t);
211 211 static uint16_t get_bit_std(uint8_t, struct hme *);
212 212 static uint16_t hme_bb_mii_read(struct hme *, uint8_t, uint8_t);
213 213 static void hme_bb_mii_write(struct hme *, uint8_t, uint8_t, uint16_t);
214 214 static void hme_bb_force_idle(struct hme *);
215 215 static uint16_t hme_mii_read(void *, uint8_t, uint8_t);
216 216 static void hme_mii_write(void *, uint8_t, uint8_t, uint16_t);
217 217 static void hme_setup_mac_address(struct hme *, dev_info_t *);
218 218 static void hme_mii_notify(void *, link_state_t);
219 219
220 220 static void hme_fault_msg(struct hme *, uint_t, msg_t, char *, ...);
221 221
222 222 static void hme_check_acc_handle(char *, uint_t, struct hme *,
223 223 ddi_acc_handle_t);
224 224
225 225 /*
226 226 * Nemo (GLDv3) Functions.
227 227 */
228 228 static int hme_m_stat(void *, uint_t, uint64_t *);
229 229 static int hme_m_start(void *);
230 230 static void hme_m_stop(void *);
231 231 static int hme_m_promisc(void *, boolean_t);
232 232 static int hme_m_multicst(void *, boolean_t, const uint8_t *);
233 233 static int hme_m_unicst(void *, const uint8_t *);
234 234 static mblk_t *hme_m_tx(void *, mblk_t *);
235 235 static boolean_t hme_m_getcapab(void *, mac_capab_t, void *);
236 236 static int hme_m_getprop(void *, const char *, mac_prop_id_t, uint_t, void *);
237 237 static void hme_m_propinfo(void *, const char *, mac_prop_id_t,
238 238 mac_prop_info_handle_t);
239 239 static int hme_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
240 240 const void *);
241 241
242 242 static mii_ops_t hme_mii_ops = {
243 243 MII_OPS_VERSION,
244 244 hme_mii_read,
245 245 hme_mii_write,
246 246 hme_mii_notify,
247 247 NULL
248 248 };
249 249
250 250 static mac_callbacks_t hme_m_callbacks = {
251 251 MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
252 252 hme_m_stat,
253 253 hme_m_start,
254 254 hme_m_stop,
255 255 hme_m_promisc,
256 256 hme_m_multicst,
257 257 hme_m_unicst,
258 258 hme_m_tx,
259 259 NULL,
260 260 NULL,
261 261 hme_m_getcapab,
262 262 NULL,
263 263 NULL,
264 264 hme_m_setprop,
265 265 hme_m_getprop,
266 266 hme_m_propinfo
267 267 };
268 268
269 269 DDI_DEFINE_STREAM_OPS(hme_dev_ops, nulldev, nulldev, hmeattach, hmedetach,
270 270 nodev, NULL, D_MP, NULL, hmequiesce);
271 271
272 272 #define HME_FAULT_MSG1(p, s, t, f) \
273 273 hme_fault_msg((p), (s), (t), (f));
274 274
275 275 #define HME_FAULT_MSG2(p, s, t, f, a) \
276 276 hme_fault_msg((p), (s), (t), (f), (a));
277 277
278 278 #define HME_FAULT_MSG3(p, s, t, f, a, b) \
279 279 hme_fault_msg((p), (s), (t), (f), (a), (b));
280 280
281 281 #define HME_FAULT_MSG4(p, s, t, f, a, b, c) \
282 282 hme_fault_msg((p), (s), (t), (f), (a), (b), (c));
283 283
284 284 #define CHECK_MIFREG() \
285 285 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh)
286 286 #define CHECK_ETXREG() \
287 287 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh)
288 288 #define CHECK_ERXREG() \
289 289 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh)
290 290 #define CHECK_MACREG() \
291 291 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh)
292 292 #define CHECK_GLOBREG() \
293 293 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh)
294 294
295 295 /*
296 296 * Claim the device is ultra-capable of burst in the beginning. Use
297 297 * the value returned by ddi_dma_burstsizes() to actually set the HME
298 298 * global configuration register later.
299 299 *
300 300 * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports
301 301 * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains
302 302 * the the burstsizes in both the lo and hi words.
303 303 */
304 304 #define HMELIMADDRLO ((uint64_t)0x00000000)
305 305 #define HMELIMADDRHI ((uint64_t)0xffffffff)
306 306
307 307 /*
308 308 * Note that rx and tx data buffers can be arbitrarily aligned, but
309 309 * that the descriptor rings need to be aligned on 2K boundaries, per
310 310 * the spec.
311 311 */
312 312 static ddi_dma_attr_t hme_dma_attr = {
313 313 DMA_ATTR_V0, /* version number. */
314 314 (uint64_t)HMELIMADDRLO, /* low address */
315 315 (uint64_t)HMELIMADDRHI, /* high address */
316 316 (uint64_t)0x00ffffff, /* address counter max */
317 317 (uint64_t)HME_HMDALIGN, /* alignment */
318 318 (uint_t)0x00700070, /* dlim_burstsizes for 32 and 64 bit xfers */
319 319 (uint32_t)0x1, /* minimum transfer size */
320 320 (uint64_t)0x7fffffff, /* maximum transfer size */
321 321 (uint64_t)0x00ffffff, /* maximum segment size */
322 322 1, /* scatter/gather list length */
323 323 512, /* granularity */
324 324 0 /* attribute flags */
325 325 };
326 326
327 327 static ddi_device_acc_attr_t hme_buf_attr = {
328 328 DDI_DEVICE_ATTR_V0,
329 329 DDI_NEVERSWAP_ACC,
330 330 DDI_STRICTORDER_ACC, /* probably could allow merging & caching */
331 331 DDI_DEFAULT_ACC,
332 332 };
333 333
334 334 static uchar_t pci_latency_timer = 0;
335 335
↓ open down ↓ |
335 lines elided |
↑ open up ↑ |
336 336 /*
337 337 * Module linkage information for the kernel.
338 338 */
339 339 static struct modldrv modldrv = {
340 340 &mod_driverops, /* Type of module. This one is a driver */
341 341 "Sun HME 10/100 Mb Ethernet",
342 342 &hme_dev_ops, /* driver ops */
343 343 };
344 344
345 345 static struct modlinkage modlinkage = {
346 - MODREV_1, &modldrv, NULL
346 + MODREV_1, { &modldrv, NULL }
347 347 };
348 348
349 349 /* <<<<<<<<<<<<<<<<<<<<<< Register operations >>>>>>>>>>>>>>>>>>>>> */
350 350
351 351 #define GET_MIFREG(reg) \
352 352 ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg)
353 353 #define PUT_MIFREG(reg, value) \
354 354 ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value)
355 355
356 356 #define GET_ETXREG(reg) \
357 357 ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg)
358 358 #define PUT_ETXREG(reg, value) \
359 359 ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value)
360 360 #define GET_ERXREG(reg) \
361 361 ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg)
362 362 #define PUT_ERXREG(reg, value) \
363 363 ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value)
364 364 #define GET_MACREG(reg) \
365 365 ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg)
366 366 #define PUT_MACREG(reg, value) \
367 367 ddi_put32(hmep->hme_bmacregh, \
368 368 (uint32_t *)&hmep->hme_bmacregp->reg, value)
369 369 #define GET_GLOBREG(reg) \
370 370 ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg)
371 371 #define PUT_GLOBREG(reg, value) \
372 372 ddi_put32(hmep->hme_globregh, \
373 373 (uint32_t *)&hmep->hme_globregp->reg, value)
374 374 #define PUT_TMD(ptr, paddr, len, flags) \
375 375 ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_addr, paddr); \
376 376 ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags, \
377 377 len | flags)
378 378 #define GET_TMD_FLAGS(ptr) \
379 379 ddi_get32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags)
380 380 #define PUT_RMD(ptr, paddr) \
381 381 ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_addr, paddr); \
382 382 ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags, \
383 383 (uint32_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
384 384 #define GET_RMD_FLAGS(ptr) \
385 385 ddi_get32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags)
386 386
387 387 #define GET_ROM8(offset) \
388 388 ddi_get8((hmep->hme_romh), (offset))
389 389
390 390 /*
391 391 * Ether_copy is not endian-correct. Define an endian-correct version.
392 392 */
393 393 #define ether_bcopy(a, b) (bcopy(a, b, 6))
394 394
395 395 /*
396 396 * Ether-type is specifically big-endian, but data region is unknown endian
397 397 */
398 398 #define get_ether_type(ptr) \
399 399 (((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13])))
400 400
401 401 /* <<<<<<<<<<<<<<<<<<<<<< Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */
402 402
403 403 #define BMAC_DEFAULT_JAMSIZE (0x04) /* jamsize equals 4 */
404 404 #define BMAC_LONG_JAMSIZE (0x10) /* jamsize equals 0x10 */
405 405 static int jamsize = BMAC_DEFAULT_JAMSIZE;
406 406
407 407
408 408 /*
409 409 * Calculate the bit in the multicast address filter that selects the given
410 410 * address.
411 411 */
412 412
413 413 static uint32_t
414 414 hmeladrf_bit(const uint8_t *addr)
415 415 {
416 416 uint32_t crc;
417 417
418 418 CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
419 419
420 420 /*
421 421 * Just want the 6 most significant bits.
422 422 */
423 423 return (crc >> 26);
424 424 }
425 425
426 426 /* <<<<<<<<<<<<<<<<<<<<<<<< Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
427 427
428 428 static void
429 429 send_bit(struct hme *hmep, uint16_t x)
430 430 {
431 431 PUT_MIFREG(mif_bbdata, x);
432 432 PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
433 433 PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
434 434 }
435 435
436 436
437 437 /*
438 438 * To read the MII register bits according to the IEEE Standard
439 439 */
440 440 static uint16_t
441 441 get_bit_std(uint8_t phyad, struct hme *hmep)
442 442 {
443 443 uint16_t x;
444 444
445 445 PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
446 446 drv_usecwait(1); /* wait for >330 ns for stable data */
447 447 if (phyad == HME_INTERNAL_PHYAD)
448 448 x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
449 449 else
450 450 x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
451 451 PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
452 452 return (x);
453 453 }
454 454
455 455 #define SEND_BIT(x) send_bit(hmep, x)
456 456 #define GET_BIT_STD(phyad, x) x = get_bit_std(phyad, hmep)
457 457
458 458
459 459 static void
460 460 hme_bb_mii_write(struct hme *hmep, uint8_t phyad, uint8_t regad, uint16_t data)
461 461 {
462 462 int i;
463 463
464 464 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */
465 465 (void) hme_bb_force_idle(hmep);
466 466 SEND_BIT(0); SEND_BIT(1); /* <ST> */
467 467 SEND_BIT(0); SEND_BIT(1); /* <OP> */
468 468
469 469 for (i = 4; i >= 0; i--) { /* <AAAAA> */
470 470 SEND_BIT((phyad >> i) & 1);
471 471 }
472 472
473 473 for (i = 4; i >= 0; i--) { /* <RRRRR> */
474 474 SEND_BIT((regad >> i) & 1);
475 475 }
476 476
477 477 SEND_BIT(1); SEND_BIT(0); /* <TA> */
478 478
479 479 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
480 480 SEND_BIT((data >> i) & 1);
481 481 }
482 482
483 483 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */
484 484 CHECK_MIFREG();
485 485 }
486 486
487 487 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
488 488 static uint16_t
489 489 hme_bb_mii_read(struct hme *hmep, uint8_t phyad, uint8_t regad)
490 490 {
491 491 int i;
492 492 uint32_t x;
493 493 uint16_t data = 0;
494 494
495 495 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */
496 496 (void) hme_bb_force_idle(hmep);
497 497 SEND_BIT(0); SEND_BIT(1); /* <ST> */
498 498 SEND_BIT(1); SEND_BIT(0); /* <OP> */
499 499 for (i = 4; i >= 0; i--) { /* <AAAAA> */
500 500 SEND_BIT((phyad >> i) & 1);
501 501 }
502 502 for (i = 4; i >= 0; i--) { /* <RRRRR> */
503 503 SEND_BIT((regad >> i) & 1);
504 504 }
505 505
506 506 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */
507 507
508 508 GET_BIT_STD(phyad, x);
509 509 GET_BIT_STD(phyad, x); /* <TA> */
510 510 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
511 511 GET_BIT_STD(phyad, x);
512 512 data += (x << i);
513 513 }
514 514 /*
515 515 * Kludge to get the Transceiver out of hung mode
516 516 */
517 517 GET_BIT_STD(phyad, x);
518 518 GET_BIT_STD(phyad, x);
519 519 GET_BIT_STD(phyad, x);
520 520 CHECK_MIFREG();
521 521 return (data);
522 522 }
523 523
524 524
525 525 static void
526 526 hme_bb_force_idle(struct hme *hmep)
527 527 {
528 528 int i;
529 529
530 530 for (i = 0; i < 33; i++) {
531 531 SEND_BIT(1);
532 532 }
533 533 }
534 534
535 535 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
536 536
537 537
538 538 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
539 539
540 540 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
541 541 static uint16_t
542 542 hme_mii_read(void *arg, uint8_t phyad, uint8_t regad)
543 543 {
544 544 struct hme *hmep = arg;
545 545 uint32_t frame;
546 546 uint32_t tmp_mif;
547 547 uint32_t tmp_xif;
548 548
549 549 tmp_mif = GET_MIFREG(mif_cfg);
550 550 tmp_xif = GET_MACREG(xifc);
551 551
552 552 switch (phyad) {
553 553 case HME_EXTERNAL_PHYAD:
554 554 PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
555 555 PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
556 556 break;
557 557 case HME_INTERNAL_PHYAD:
558 558 PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
559 559 PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
560 560 break;
561 561 default:
562 562 return (0xffff);
563 563 }
564 564
565 565 if (!hmep->hme_frame_enable) {
566 566 frame = (hme_bb_mii_read(hmep, phyad, regad));
567 567 PUT_MACREG(xifc, tmp_xif);
568 568 PUT_MIFREG(mif_cfg, tmp_mif);
569 569 return (frame & 0xffff);
570 570 }
571 571
572 572 PUT_MIFREG(mif_frame,
573 573 HME_MIF_FRREAD | (phyad << HME_MIF_FRPHYAD_SHIFT) |
574 574 (regad << HME_MIF_FRREGAD_SHIFT));
575 575 /*
576 576 * HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
577 577 */
578 578 HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
579 579 frame = GET_MIFREG(mif_frame);
580 580 CHECK_MIFREG();
581 581
582 582 PUT_MACREG(xifc, tmp_xif);
583 583 PUT_MIFREG(mif_cfg, tmp_mif);
584 584
585 585 if ((frame & HME_MIF_FRTA0) == 0) {
586 586
587 587
588 588 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, MII_MSG,
589 589 "MIF Read failure");
590 590 return (0xffff);
591 591 }
592 592 return ((uint16_t)(frame & HME_MIF_FRDATA));
593 593 }
594 594
595 595 static void
596 596 hme_mii_write(void *arg, uint8_t phyad, uint8_t regad, uint16_t data)
597 597 {
598 598 struct hme *hmep = arg;
599 599 uint32_t frame;
600 600 uint32_t tmp_mif;
601 601 uint32_t tmp_xif;
602 602
603 603 tmp_mif = GET_MIFREG(mif_cfg);
604 604 tmp_xif = GET_MACREG(xifc);
605 605
606 606 switch (phyad) {
607 607 case HME_EXTERNAL_PHYAD:
608 608 PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
609 609 PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
610 610 break;
611 611 case HME_INTERNAL_PHYAD:
612 612 PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
613 613 PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
614 614 break;
615 615 default:
616 616 return;
617 617 }
618 618
619 619 if (!hmep->hme_frame_enable) {
620 620 hme_bb_mii_write(hmep, phyad, regad, data);
621 621 PUT_MACREG(xifc, tmp_xif);
622 622 PUT_MIFREG(mif_cfg, tmp_mif);
623 623 return;
624 624 }
625 625
626 626 PUT_MIFREG(mif_frame,
627 627 HME_MIF_FRWRITE | (phyad << HME_MIF_FRPHYAD_SHIFT) |
628 628 (regad << HME_MIF_FRREGAD_SHIFT) | data);
629 629 /*
630 630 * HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
631 631 */
632 632 HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
633 633 frame = GET_MIFREG(mif_frame);
634 634 PUT_MACREG(xifc, tmp_xif);
635 635 PUT_MIFREG(mif_cfg, tmp_mif);
636 636 CHECK_MIFREG();
637 637 if ((frame & HME_MIF_FRTA0) == 0) {
638 638 HME_FAULT_MSG1(hmep, SEVERITY_MID, MII_MSG,
639 639 "MIF Write failure");
640 640 }
641 641 }
642 642
643 643 static void
644 644 hme_mii_notify(void *arg, link_state_t link)
645 645 {
646 646 struct hme *hmep = arg;
647 647
648 648 if (link == LINK_STATE_UP) {
649 649 (void) hmeinit(hmep);
650 650 }
651 651 mac_link_update(hmep->hme_mh, link);
652 652 }
653 653
654 654 /* <<<<<<<<<<<<<<<<<<<<<<<<<<< LOADABLE ENTRIES >>>>>>>>>>>>>>>>>>>>>>> */
655 655
656 656 int
657 657 _init(void)
658 658 {
659 659 int status;
660 660
661 661 mac_init_ops(&hme_dev_ops, "hme");
662 662 if ((status = mod_install(&modlinkage)) != 0) {
663 663 mac_fini_ops(&hme_dev_ops);
664 664 }
665 665 return (status);
666 666 }
667 667
668 668 int
669 669 _fini(void)
670 670 {
671 671 int status;
672 672
673 673 if ((status = mod_remove(&modlinkage)) == 0) {
674 674 mac_fini_ops(&hme_dev_ops);
675 675 }
676 676 return (status);
677 677 }
678 678
679 679 int
680 680 _info(struct modinfo *modinfop)
681 681 {
682 682 return (mod_info(&modlinkage, modinfop));
683 683 }
684 684
685 685 /*
686 686 * ddi_dma_sync() a TMD or RMD descriptor.
687 687 */
688 688 #define HMESYNCRMD(num, who) \
689 689 (void) ddi_dma_sync(hmep->hme_rmd_dmah, \
690 690 (num * sizeof (struct hme_rmd)), \
691 691 sizeof (struct hme_rmd), \
692 692 who)
693 693
↓ open down ↓ |
337 lines elided |
↑ open up ↑ |
694 694 #define HMESYNCTMD(num, who) \
695 695 (void) ddi_dma_sync(hmep->hme_tmd_dmah, \
696 696 (num * sizeof (struct hme_tmd)), \
697 697 sizeof (struct hme_tmd), \
698 698 who)
699 699
700 700 /*
701 701 * Ethernet broadcast address definition.
702 702 */
703 703 static struct ether_addr etherbroadcastaddr = {
704 - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
704 + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
705 705 };
706 706
707 707 /*
708 708 * MIB II broadcast/multicast packets
709 709 */
710 710 #define IS_BROADCAST(pkt) (bcmp(pkt, ðerbroadcastaddr, ETHERADDRL) == 0)
711 711 #define IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
712 712 #define BUMP_InNUcast(hmep, pkt) \
713 713 if (IS_MULTICAST(pkt)) { \
714 714 if (IS_BROADCAST(pkt)) { \
715 715 hmep->hme_brdcstrcv++; \
716 716 } else { \
717 717 hmep->hme_multircv++; \
718 718 } \
719 719 }
720 720 #define BUMP_OutNUcast(hmep, pkt) \
721 721 if (IS_MULTICAST(pkt)) { \
722 722 if (IS_BROADCAST(pkt)) { \
723 723 hmep->hme_brdcstxmt++; \
724 724 } else { \
725 725 hmep->hme_multixmt++; \
726 726 } \
727 727 }
728 728
729 729 static int
730 730 hme_create_prop_from_kw(dev_info_t *dip, char *vpdname, char *vpdstr)
731 731 {
732 732 char propstr[80];
733 733 int i, needprop = 0;
734 734 struct ether_addr local_mac;
735 735
736 736 if (strcmp(vpdname, "NA") == 0) {
737 737 (void) strcpy(propstr, "local-mac-address");
738 738 needprop = 1;
739 739 } else if (strcmp(vpdname, "Z0") == 0) {
740 740 (void) strcpy(propstr, "model");
741 741 needprop = 1;
742 742 } else if (strcmp(vpdname, "Z1") == 0) {
743 743 (void) strcpy(propstr, "board-model");
744 744 needprop = 1;
745 745 }
746 746
747 747 if (needprop == 1) {
748 748
749 749 if (strcmp(propstr, "local-mac-address") == 0) {
750 750 for (i = 0; i < ETHERADDRL; i++)
751 751 local_mac.ether_addr_octet[i] =
752 752 (uchar_t)vpdstr[i];
753 753 if (ddi_prop_create(DDI_DEV_T_NONE, dip,
754 754 DDI_PROP_CANSLEEP, propstr,
755 755 (char *)local_mac.ether_addr_octet, ETHERADDRL)
756 756 != DDI_SUCCESS) {
757 757 return (DDI_FAILURE);
758 758 }
759 759 } else {
760 760 if (ddi_prop_create(DDI_DEV_T_NONE, dip,
761 761 DDI_PROP_CANSLEEP, propstr, vpdstr,
762 762 strlen(vpdstr)+1) != DDI_SUCCESS) {
763 763 return (DDI_FAILURE);
764 764 }
765 765 }
766 766 }
767 767 return (0);
768 768 }
769 769
770 770 /*
771 771 * Get properties from old VPD
772 772 * for PCI cards
773 773 */
774 774 static int
775 775 hme_get_oldvpd_props(dev_info_t *dip, int vpd_base)
776 776 {
777 777 struct hme *hmep;
778 778 int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
779 779 char kw_namestr[3];
780 780 char kw_fieldstr[256];
781 781 int i;
782 782
783 783 hmep = ddi_get_driver_private(dip);
784 784
785 785 vpd_start = vpd_base;
786 786
787 787 if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
788 788 return (1); /* error */
789 789 } else {
790 790 vpd_len = 9;
791 791 }
792 792
793 793 /* Get local-mac-address */
794 794 kw_start = vpd_start + 3; /* Location of 1st keyword */
795 795 kw_ptr = kw_start;
796 796 while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
797 797 kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
798 798 kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
799 799 kw_namestr[2] = '\0';
800 800 kw_len = (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
801 801 for (i = 0, kw_ptr += 3; i < kw_len; i++)
802 802 kw_fieldstr[i] = GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
803 803 kw_fieldstr[i] = '\0';
804 804 if (hme_create_prop_from_kw(dip, kw_namestr, kw_fieldstr)) {
805 805 return (DDI_FAILURE);
806 806 }
807 807 kw_ptr += kw_len;
808 808 } /* next keyword */
809 809
810 810 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "model",
811 811 "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS) {
812 812 return (DDI_FAILURE);
813 813 }
814 814 return (0);
815 815 }
816 816
817 817
818 818 /*
819 819 * Get properties from new VPD
820 820 * for CompactPCI cards
821 821 */
822 822 static int
823 823 hme_get_newvpd_props(dev_info_t *dip, int vpd_base)
824 824 {
825 825 struct hme *hmep;
826 826 int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
827 827 char kw_namestr[3];
828 828 char kw_fieldstr[256];
829 829 int maxvpdsize, i;
830 830
831 831 hmep = ddi_get_driver_private(dip);
832 832
833 833 maxvpdsize = 1024; /* Real size not known until after it is read */
834 834
835 835 vpd_start = (int)((GET_ROM8(&(hmep->hme_romp[vpd_base+1])) & 0xff) |
836 836 ((GET_ROM8(&hmep->hme_romp[vpd_base+2]) & 0xff) << 8)) +3;
837 837 vpd_start = vpd_base + vpd_start;
838 838 while (vpd_start < (vpd_base + maxvpdsize)) { /* Get all VPDs */
839 839 if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
840 840 break; /* no VPD found */
841 841 } else {
842 842 vpd_len = (int)((GET_ROM8(&hmep->hme_romp[vpd_start
843 843 + 1]) & 0xff) | (GET_ROM8(&hmep->hme_romp[vpd_start
844 844 + 2]) & 0xff) << 8);
845 845 }
846 846 /* Get all keywords in this VPD */
847 847 kw_start = vpd_start + 3; /* Location of 1st keyword */
848 848 kw_ptr = kw_start;
849 849 while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
850 850 kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
851 851 kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
852 852 kw_namestr[2] = '\0';
853 853 kw_len =
854 854 (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
855 855 for (i = 0, kw_ptr += 3; i < kw_len; i++)
856 856 kw_fieldstr[i] =
857 857 GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
858 858 kw_fieldstr[i] = '\0';
859 859 if (hme_create_prop_from_kw(dip, kw_namestr,
860 860 kw_fieldstr)) {
861 861 return (DDI_FAILURE);
862 862 }
863 863 kw_ptr += kw_len;
864 864 } /* next keyword */
865 865 vpd_start += (vpd_len + 3);
866 866 } /* next VPD */
867 867 return (0);
868 868 }
869 869
870 870
871 871 /*
872 872 * Get properties from VPD
873 873 */
874 874 static int
875 875 hme_get_vpd_props(dev_info_t *dip)
876 876 {
877 877 struct hme *hmep;
878 878 int v0, v1, vpd_base;
879 879 int i, epromsrchlimit;
880 880
881 881
882 882 hmep = ddi_get_driver_private(dip);
883 883
884 884 v0 = (int)(GET_ROM8(&(hmep->hme_romp[0])));
885 885 v1 = (int)(GET_ROM8(&(hmep->hme_romp[1])));
886 886 v0 = ((v0 & 0xff) << 8 | v1);
887 887
888 888 if ((v0 & 0xffff) != 0x55aa) {
889 889 cmn_err(CE_NOTE, " Valid pci prom not found \n");
890 890 return (1);
891 891 }
892 892
893 893 epromsrchlimit = 4096;
894 894 for (i = 2; i < epromsrchlimit; i++) {
895 895 /* "PCIR" */
896 896 if (((GET_ROM8(&(hmep->hme_romp[i])) & 0xff) == 'P') &&
897 897 ((GET_ROM8(&(hmep->hme_romp[i+1])) & 0xff) == 'C') &&
898 898 ((GET_ROM8(&(hmep->hme_romp[i+2])) & 0xff) == 'I') &&
899 899 ((GET_ROM8(&(hmep->hme_romp[i+3])) & 0xff) == 'R')) {
900 900 vpd_base =
901 901 (int)((GET_ROM8(&(hmep->hme_romp[i+8])) & 0xff) |
902 902 (GET_ROM8(&(hmep->hme_romp[i+9])) & 0xff) << 8);
903 903 break; /* VPD pointer found */
904 904 }
905 905 }
906 906
907 907 /* No VPD found */
908 908 if (vpd_base == 0) {
909 909 cmn_err(CE_NOTE, " Vital Product Data pointer not found \n");
910 910 return (1);
911 911 }
912 912
913 913 v0 = (int)(GET_ROM8(&(hmep->hme_romp[vpd_base])));
914 914 if (v0 == 0x82) {
915 915 if (hme_get_newvpd_props(dip, vpd_base))
916 916 return (1);
917 917 return (0);
918 918 } else if (v0 == 0x90) {
919 919 /* If we are are SUNW,qfe card, look for the Nth "NA" descr */
920 920 if ((GET_ROM8(&hmep->hme_romp[vpd_base + 12]) != 0x79) &&
921 921 GET_ROM8(&hmep->hme_romp[vpd_base + 4 * 12]) == 0x79) {
922 922 vpd_base += hmep->hme_devno * 12;
923 923 }
924 924 if (hme_get_oldvpd_props(dip, vpd_base))
925 925 return (1);
926 926 return (0);
927 927 } else
928 928 return (1); /* unknown start byte in VPD */
929 929 }
930 930
931 931 /*
932 932 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
933 933 * cards, so we have to extract it from the ebus bridge that is
934 934 * function zero of the same device. This is a bit of an ugly hack.
935 935 * (The ebus bridge leaves the entire ROM mapped at base address
936 936 * register 0x10.)
937 937 */
938 938
939 939 typedef struct {
940 940 struct hme *hmep;
941 941 dev_info_t *parent;
942 942 uint8_t bus, dev;
943 943 ddi_acc_handle_t acch;
944 944 caddr_t romp;
945 945 } ebus_rom_t;
946 946
947 947 static int
948 948 hme_mapebusrom(dev_info_t *dip, void *arg)
949 949 {
950 950 int *regs;
951 951 unsigned nregs;
952 952 int reg;
953 953 ebus_rom_t *rom = arg;
954 954 struct hme *hmep = rom->hmep;
955 955
956 956 /*
957 957 * We only want to look at our peers. Skip our parent.
958 958 */
959 959 if (dip == rom->parent) {
960 960 return (DDI_WALK_PRUNESIB);
961 961 }
962 962
963 963 if (ddi_get_parent(dip) != rom->parent)
964 964 return (DDI_WALK_CONTINUE);
965 965
966 966 if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
967 967 "reg", ®s, &nregs)) != DDI_PROP_SUCCESS) {
968 968 return (DDI_WALK_PRUNECHILD);
969 969 }
970 970
971 971 if (nregs < 1) {
972 972 ddi_prop_free(regs);
973 973 return (DDI_WALK_PRUNECHILD);
974 974 }
975 975 reg = regs[0];
976 976 ddi_prop_free(regs);
977 977
978 978 /*
979 979 * Look for function 0 on our bus and device. If the device doesn't
980 980 * match, it might be an alternate peer, in which case we don't want
981 981 * to examine any of its children.
982 982 */
983 983 if ((PCI_REG_BUS_G(reg) != rom->bus) ||
984 984 (PCI_REG_DEV_G(reg) != rom->dev) ||
985 985 (PCI_REG_FUNC_G(reg) != 0)) {
986 986 return (DDI_WALK_PRUNECHILD);
987 987 }
988 988
989 989 (void) ddi_regs_map_setup(dip, 1, &rom->romp, 0, 0, &hmep->hme_dev_attr,
990 990 &rom->acch);
991 991 /*
992 992 * If we can't map the registers, the caller will notice that
993 993 * the acch is NULL.
994 994 */
995 995 return (DDI_WALK_TERMINATE);
996 996 }
997 997
998 998 static int
999 999 hmeget_promebus(dev_info_t *dip)
1000 1000 {
1001 1001 ebus_rom_t rom;
1002 1002 int *regs;
1003 1003 unsigned nregs;
1004 1004 struct hme *hmep;
1005 1005
1006 1006 hmep = ddi_get_driver_private(dip);
1007 1007
1008 1008 bzero(&rom, sizeof (rom));
1009 1009
1010 1010 /*
1011 1011 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
1012 1012 * cards, so we have to extract it from the eBus bridge that is
1013 1013 * function zero. This is a bit of an ugly hack.
1014 1014 */
1015 1015 if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
1016 1016 "reg", ®s, &nregs)) != DDI_PROP_SUCCESS) {
1017 1017 return (DDI_FAILURE);
1018 1018 }
1019 1019
1020 1020 if (nregs < 5) {
1021 1021 ddi_prop_free(regs);
1022 1022 return (DDI_FAILURE);
1023 1023 }
1024 1024 rom.hmep = hmep;
1025 1025 rom.bus = PCI_REG_BUS_G(regs[0]);
1026 1026 rom.dev = PCI_REG_DEV_G(regs[0]);
1027 1027 hmep->hme_devno = rom.dev;
1028 1028 rom.parent = ddi_get_parent(dip);
1029 1029
1030 1030 /*
1031 1031 * The implementation of ddi_walk_devs says that we must not
1032 1032 * be called during autoconfiguration. However, it turns out
1033 1033 * that it is safe to call this during our attach routine,
1034 1034 * because we are not a nexus device.
1035 1035 *
1036 1036 * Previously we rooted our search at our immediate parent,
1037 1037 * but this triggered an assertion panic in debug kernels.
1038 1038 */
1039 1039 ddi_walk_devs(ddi_root_node(), hme_mapebusrom, &rom);
1040 1040
1041 1041 if (rom.acch) {
1042 1042 hmep->hme_romh = rom.acch;
1043 1043 hmep->hme_romp = (unsigned char *)rom.romp;
1044 1044 return (DDI_SUCCESS);
1045 1045 }
1046 1046 return (DDI_FAILURE);
1047 1047 }
1048 1048
1049 1049 static int
1050 1050 hmeget_promprops(dev_info_t *dip)
1051 1051 {
1052 1052 struct hme *hmep;
1053 1053 int rom_bar;
1054 1054 ddi_acc_handle_t cfg_handle;
1055 1055 struct {
1056 1056 uint16_t vendorid;
1057 1057 uint16_t devid;
1058 1058 uint16_t command;
1059 1059 uint16_t status;
1060 1060 uint32_t junk1;
1061 1061 uint8_t cache_line;
1062 1062 uint8_t latency;
1063 1063 uint8_t header;
1064 1064 uint8_t bist;
1065 1065 uint32_t base;
1066 1066 uint32_t base14;
1067 1067 uint32_t base18;
1068 1068 uint32_t base1c;
1069 1069 uint32_t base20;
1070 1070 uint32_t base24;
1071 1071 uint32_t base28;
1072 1072 uint32_t base2c;
1073 1073 uint32_t base30;
1074 1074 } *cfg_ptr;
1075 1075
1076 1076 hmep = ddi_get_driver_private(dip);
1077 1077
1078 1078
1079 1079 /*
1080 1080 * map configuration space
1081 1081 */
1082 1082 if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1083 1083 0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1084 1084 return (DDI_FAILURE);
1085 1085 }
1086 1086
1087 1087 /*
1088 1088 * Enable bus-master and memory accesses
1089 1089 */
1090 1090 ddi_put16(cfg_handle, &cfg_ptr->command,
1091 1091 PCI_COMM_SERR_ENABLE | PCI_COMM_PARITY_DETECT |
1092 1092 PCI_COMM_MAE | PCI_COMM_ME);
1093 1093
1094 1094 /*
1095 1095 * Enable rom accesses
1096 1096 */
1097 1097 rom_bar = ddi_get32(cfg_handle, &cfg_ptr->base30);
1098 1098 ddi_put32(cfg_handle, &cfg_ptr->base30, rom_bar | 1);
1099 1099
1100 1100
1101 1101 if ((ddi_regs_map_setup(dip, 2, (caddr_t *)&(hmep->hme_romp), 0, 0,
1102 1102 &hmep->hme_dev_attr, &hmep->hme_romh) != DDI_SUCCESS) &&
1103 1103 (hmeget_promebus(dip) != DDI_SUCCESS)) {
1104 1104
1105 1105 if (cfg_ptr)
1106 1106 ddi_regs_map_free(&cfg_handle);
1107 1107 return (DDI_FAILURE);
1108 1108 } else {
1109 1109 if (hme_get_vpd_props(dip))
1110 1110 return (DDI_FAILURE);
1111 1111 }
1112 1112 if (hmep->hme_romp)
1113 1113 ddi_regs_map_free(&hmep->hme_romh);
1114 1114 if (cfg_ptr)
1115 1115 ddi_regs_map_free(&cfg_handle);
1116 1116 return (DDI_SUCCESS);
1117 1117
1118 1118 }
1119 1119
1120 1120 static void
1121 1121 hmeget_hm_rev_property(struct hme *hmep)
1122 1122 {
1123 1123 int hm_rev;
1124 1124
1125 1125
1126 1126 hm_rev = hmep->asic_rev;
1127 1127 switch (hm_rev) {
1128 1128 case HME_2P1_REVID:
1129 1129 case HME_2P1_REVID_OBP:
1130 1130 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1131 1131 "SBus 2.1 Found (Rev Id = %x)", hm_rev);
1132 1132 hmep->hme_frame_enable = 1;
1133 1133 break;
1134 1134
1135 1135 case HME_2P0_REVID:
1136 1136 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1137 1137 "SBus 2.0 Found (Rev Id = %x)", hm_rev);
1138 1138 break;
1139 1139
1140 1140 case HME_1C0_REVID:
1141 1141 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1142 1142 "PCI IO 1.0 Found (Rev Id = %x)", hm_rev);
1143 1143 break;
1144 1144
1145 1145 default:
1146 1146 HME_FAULT_MSG3(hmep, SEVERITY_NONE, DISPLAY_MSG,
1147 1147 "%s (Rev Id = %x) Found",
1148 1148 (hm_rev == HME_2C0_REVID) ? "PCI IO 2.0" : "Sbus", hm_rev);
1149 1149 hmep->hme_frame_enable = 1;
1150 1150 hmep->hme_lance_mode_enable = 1;
1151 1151 hmep->hme_rxcv_enable = 1;
1152 1152 break;
1153 1153 }
1154 1154 }
1155 1155
1156 1156 /*
1157 1157 * Interface exists: make available by filling in network interface
1158 1158 * record. System will initialize the interface when it is ready
1159 1159 * to accept packets.
1160 1160 */
1161 1161 int
1162 1162 hmeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1163 1163 {
1164 1164 struct hme *hmep;
1165 1165 mac_register_t *macp = NULL;
1166 1166 int regno;
1167 1167 int hm_rev = 0;
1168 1168 int prop_len = sizeof (int);
1169 1169 ddi_acc_handle_t cfg_handle;
1170 1170 struct {
1171 1171 uint16_t vendorid;
1172 1172 uint16_t devid;
1173 1173 uint16_t command;
1174 1174 uint16_t status;
1175 1175 uint8_t revid;
1176 1176 uint8_t j1;
1177 1177 uint16_t j2;
1178 1178 } *cfg_ptr;
1179 1179
1180 1180 switch (cmd) {
1181 1181 case DDI_ATTACH:
1182 1182 break;
1183 1183
1184 1184 case DDI_RESUME:
1185 1185 if ((hmep = ddi_get_driver_private(dip)) == NULL)
1186 1186 return (DDI_FAILURE);
1187 1187
1188 1188 hmep->hme_flags &= ~HMESUSPENDED;
1189 1189
1190 1190 mii_resume(hmep->hme_mii);
1191 1191
1192 1192 if (hmep->hme_started)
1193 1193 (void) hmeinit(hmep);
1194 1194 return (DDI_SUCCESS);
1195 1195
1196 1196 default:
1197 1197 return (DDI_FAILURE);
1198 1198 }
1199 1199
1200 1200 /*
1201 1201 * Allocate soft device data structure
1202 1202 */
1203 1203 hmep = kmem_zalloc(sizeof (*hmep), KM_SLEEP);
1204 1204
1205 1205 /*
1206 1206 * Might as well set up elements of data structure
1207 1207 */
1208 1208 hmep->dip = dip;
1209 1209 hmep->instance = ddi_get_instance(dip);
1210 1210 hmep->pagesize = ddi_ptob(dip, (ulong_t)1); /* IOMMU PSize */
1211 1211
1212 1212 /*
1213 1213 * Might as well setup the driver private
1214 1214 * structure as part of the dip.
1215 1215 */
1216 1216 ddi_set_driver_private(dip, hmep);
1217 1217
1218 1218 /*
1219 1219 * Reject this device if it's in a slave-only slot.
1220 1220 */
1221 1221 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
1222 1222 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1223 1223 "Dev not used - dev in slave only slot");
1224 1224 goto error_state;
1225 1225 }
1226 1226
1227 1227 /*
1228 1228 * Map in the device registers.
1229 1229 *
1230 1230 * Reg # 0 is the Global register set
1231 1231 * Reg # 1 is the ETX register set
1232 1232 * Reg # 2 is the ERX register set
1233 1233 * Reg # 3 is the BigMAC register set.
1234 1234 * Reg # 4 is the MIF register set
1235 1235 */
1236 1236 if (ddi_dev_nregs(dip, ®no) != (DDI_SUCCESS)) {
1237 1237 HME_FAULT_MSG2(hmep, SEVERITY_HIGH, INIT_MSG,
1238 1238 ddi_nregs_fail_msg, regno);
1239 1239 goto error_state;
1240 1240 }
1241 1241
1242 1242 switch (regno) {
1243 1243 case 5:
1244 1244 hmep->hme_cheerio_mode = 0;
1245 1245 break;
1246 1246 case 2:
1247 1247 case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */
1248 1248 hmep->hme_cheerio_mode = 1;
1249 1249 break;
1250 1250 default:
1251 1251 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
1252 1252 bad_num_regs_msg);
1253 1253 goto error_state;
1254 1254 }
1255 1255
1256 1256 /* Initialize device attributes structure */
1257 1257 hmep->hme_dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1258 1258
1259 1259 if (hmep->hme_cheerio_mode)
1260 1260 hmep->hme_dev_attr.devacc_attr_endian_flags =
1261 1261 DDI_STRUCTURE_LE_ACC;
1262 1262 else
1263 1263 hmep->hme_dev_attr.devacc_attr_endian_flags =
1264 1264 DDI_STRUCTURE_BE_ACC;
1265 1265
1266 1266 hmep->hme_dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1267 1267
1268 1268 if (hmep->hme_cheerio_mode) {
1269 1269 uint8_t oldLT;
1270 1270 uint8_t newLT = 0;
1271 1271 dev_info_t *pdip;
1272 1272 const char *pdrvname;
1273 1273
1274 1274 /*
1275 1275 * Map the PCI config space
1276 1276 */
1277 1277 if (pci_config_setup(dip, &hmep->pci_config_handle) !=
1278 1278 DDI_SUCCESS) {
1279 1279 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1280 1280 "pci_config_setup() failed..");
1281 1281 goto error_state;
1282 1282 }
1283 1283
1284 1284 if (ddi_regs_map_setup(dip, 1,
1285 1285 (caddr_t *)&(hmep->hme_globregp), 0, 0,
1286 1286 &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1287 1287 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1288 1288 mregs_4global_reg_fail_msg);
1289 1289 goto error_unmap;
1290 1290 }
1291 1291 hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1292 1292 hmep->hme_mifregh = hmep->hme_globregh;
1293 1293
1294 1294 hmep->hme_etxregp =
1295 1295 (void *)(((caddr_t)hmep->hme_globregp) + 0x2000);
1296 1296 hmep->hme_erxregp =
1297 1297 (void *)(((caddr_t)hmep->hme_globregp) + 0x4000);
1298 1298 hmep->hme_bmacregp =
1299 1299 (void *)(((caddr_t)hmep->hme_globregp) + 0x6000);
1300 1300 hmep->hme_mifregp =
1301 1301 (void *)(((caddr_t)hmep->hme_globregp) + 0x7000);
1302 1302
1303 1303 /*
1304 1304 * Get parent pci bridge info.
1305 1305 */
1306 1306 pdip = ddi_get_parent(dip);
1307 1307 pdrvname = ddi_driver_name(pdip);
1308 1308
1309 1309 oldLT = pci_config_get8(hmep->pci_config_handle,
1310 1310 PCI_CONF_LATENCY_TIMER);
1311 1311 /*
1312 1312 * Honor value set in /etc/system
1313 1313 * "set hme:pci_latency_timer=0xYY"
1314 1314 */
1315 1315 if (pci_latency_timer)
1316 1316 newLT = pci_latency_timer;
1317 1317 /*
1318 1318 * Modify LT for simba
1319 1319 */
1320 1320 else if (strcmp("simba", pdrvname) == 0)
1321 1321 newLT = 0xf0;
1322 1322 /*
1323 1323 * Ensure minimum cheerio latency timer of 0x50
1324 1324 * Usually OBP or pci bridge should set this value
1325 1325 * based on cheerio
1326 1326 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8
1327 1327 * Some system set cheerio LT at 0x40
1328 1328 */
1329 1329 else if (oldLT < 0x40)
1330 1330 newLT = 0x50;
1331 1331
1332 1332 /*
1333 1333 * Now program cheerio's pci latency timer with newLT
1334 1334 */
1335 1335 if (newLT)
1336 1336 pci_config_put8(hmep->pci_config_handle,
1337 1337 PCI_CONF_LATENCY_TIMER, (uchar_t)newLT);
1338 1338 } else { /* Map register sets */
1339 1339 if (ddi_regs_map_setup(dip, 0,
1340 1340 (caddr_t *)&(hmep->hme_globregp), 0, 0,
1341 1341 &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1342 1342 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1343 1343 mregs_4global_reg_fail_msg);
1344 1344 goto error_state;
1345 1345 }
1346 1346 if (ddi_regs_map_setup(dip, 1,
1347 1347 (caddr_t *)&(hmep->hme_etxregp), 0, 0,
1348 1348 &hmep->hme_dev_attr, &hmep->hme_etxregh)) {
1349 1349 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1350 1350 mregs_4etx_reg_fail_msg);
1351 1351 goto error_unmap;
1352 1352 }
1353 1353 if (ddi_regs_map_setup(dip, 2,
1354 1354 (caddr_t *)&(hmep->hme_erxregp), 0, 0,
1355 1355 &hmep->hme_dev_attr, &hmep->hme_erxregh)) {
1356 1356 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1357 1357 mregs_4erx_reg_fail_msg);
1358 1358 goto error_unmap;
1359 1359 }
1360 1360 if (ddi_regs_map_setup(dip, 3,
1361 1361 (caddr_t *)&(hmep->hme_bmacregp), 0, 0,
1362 1362 &hmep->hme_dev_attr, &hmep->hme_bmacregh)) {
1363 1363 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1364 1364 mregs_4bmac_reg_fail_msg);
1365 1365 goto error_unmap;
1366 1366 }
1367 1367
1368 1368 if (ddi_regs_map_setup(dip, 4,
1369 1369 (caddr_t *)&(hmep->hme_mifregp), 0, 0,
1370 1370 &hmep->hme_dev_attr, &hmep->hme_mifregh)) {
1371 1371 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1372 1372 mregs_4mif_reg_fail_msg);
1373 1373 goto error_unmap;
1374 1374 }
1375 1375 } /* Endif cheerio_mode */
1376 1376
1377 1377 /*
1378 1378 * Based on the hm-rev, set some capabilities
1379 1379 * Set up default capabilities for HM 2.0
1380 1380 */
1381 1381 hmep->hme_frame_enable = 0;
1382 1382 hmep->hme_lance_mode_enable = 0;
1383 1383 hmep->hme_rxcv_enable = 0;
1384 1384
1385 1385 /* NEW routine to get the properties */
1386 1386
1387 1387 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, hmep->dip, 0, "hm-rev",
1388 1388 (caddr_t)&hm_rev, &prop_len) == DDI_PROP_SUCCESS) {
1389 1389
1390 1390 hmep->asic_rev = hm_rev;
1391 1391 hmeget_hm_rev_property(hmep);
1392 1392 } else {
1393 1393 /*
1394 1394 * hm_rev property not found so, this is
1395 1395 * case of hot insertion of card without interpreting fcode.
1396 1396 * Get it from revid in config space after mapping it.
1397 1397 */
1398 1398 if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1399 1399 0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1400 1400 return (DDI_FAILURE);
1401 1401 }
1402 1402 /*
1403 1403 * Since this is cheerio-based PCI card, we write 0xC in the
1404 1404 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits
1405 1405 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1)
1406 1406 */
1407 1407 hm_rev = ddi_get8(cfg_handle, &cfg_ptr->revid);
1408 1408 hm_rev = HME_1C0_REVID | (hm_rev & HME_REV_VERS_MASK);
1409 1409 hmep->asic_rev = hm_rev;
1410 1410 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1411 1411 "hm-rev", (caddr_t)&hm_rev, sizeof (hm_rev)) !=
1412 1412 DDI_SUCCESS) {
1413 1413 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1414 1414 "ddi_prop_create error for hm_rev");
1415 1415 }
1416 1416 ddi_regs_map_free(&cfg_handle);
1417 1417
1418 1418 hmeget_hm_rev_property(hmep);
1419 1419
1420 1420 /* get info via VPD */
1421 1421 if (hmeget_promprops(dip) != DDI_SUCCESS) {
1422 1422 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1423 1423 "no promprops");
1424 1424 }
1425 1425 }
1426 1426
1427 1427 if (ddi_intr_hilevel(dip, 0)) {
1428 1428 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, NFATAL_ERR_MSG,
1429 1429 " high-level interrupts are not supported");
1430 1430 goto error_unmap;
1431 1431 }
1432 1432
1433 1433 /*
1434 1434 * Get intr. block cookie so that mutex locks can be initialized.
1435 1435 */
1436 1436 if (ddi_get_iblock_cookie(dip, 0, &hmep->hme_cookie) != DDI_SUCCESS)
1437 1437 goto error_unmap;
1438 1438
1439 1439 /*
1440 1440 * Initialize mutex's for this device.
1441 1441 */
1442 1442 mutex_init(&hmep->hme_xmitlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1443 1443 mutex_init(&hmep->hme_intrlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1444 1444
1445 1445 /*
1446 1446 * Quiesce the hardware.
1447 1447 */
1448 1448 (void) hmestop(hmep);
1449 1449
1450 1450 /*
1451 1451 * Add interrupt to system
1452 1452 */
1453 1453 if (ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL,
1454 1454 (ddi_idevice_cookie_t *)NULL, hmeintr, (caddr_t)hmep)) {
1455 1455 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1456 1456 add_intr_fail_msg);
1457 1457 goto error_mutex;
1458 1458 }
1459 1459
1460 1460 /*
1461 1461 * Set up the ethernet mac address.
1462 1462 */
1463 1463 hme_setup_mac_address(hmep, dip);
1464 1464
1465 1465 if (!hmeinit_xfer_params(hmep))
1466 1466 goto error_intr;
1467 1467
1468 1468 if (hmeburstsizes(hmep) == DDI_FAILURE) {
1469 1469 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, burst_size_msg);
1470 1470 goto error_intr;
1471 1471 }
1472 1472
1473 1473 if (hmeallocthings(hmep) != DDI_SUCCESS) {
1474 1474 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1475 1475 "resource allocation failed");
1476 1476 goto error_intr;
1477 1477 }
1478 1478
1479 1479 if (hmeallocbufs(hmep) != DDI_SUCCESS) {
1480 1480 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1481 1481 "buffer allocation failed");
1482 1482 goto error_intr;
1483 1483 }
1484 1484
1485 1485 hmestatinit(hmep);
1486 1486
1487 1487 /* our external (preferred) PHY is at address 0 */
1488 1488 (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, "first-phy", 0);
1489 1489
1490 1490 hmep->hme_mii = mii_alloc(hmep, dip, &hme_mii_ops);
1491 1491 if (hmep->hme_mii == NULL) {
1492 1492 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1493 1493 "mii_alloc failed");
1494 1494 goto error_intr;
1495 1495 }
1496 1496 /* force a probe for the PHY */
1497 1497 mii_probe(hmep->hme_mii);
1498 1498
1499 1499 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
1500 1500 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1501 1501 "mac_alloc failed");
1502 1502 goto error_intr;
1503 1503 }
1504 1504 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1505 1505 macp->m_driver = hmep;
1506 1506 macp->m_dip = dip;
1507 1507 macp->m_src_addr = hmep->hme_ouraddr.ether_addr_octet;
1508 1508 macp->m_callbacks = &hme_m_callbacks;
1509 1509 macp->m_min_sdu = 0;
1510 1510 macp->m_max_sdu = ETHERMTU;
1511 1511 macp->m_margin = VLAN_TAGSZ;
1512 1512 macp->m_priv_props = hme_priv_prop;
1513 1513 if (mac_register(macp, &hmep->hme_mh) != 0) {
1514 1514 mac_free(macp);
1515 1515 goto error_intr;
1516 1516 }
1517 1517
1518 1518 mac_free(macp);
1519 1519
1520 1520 ddi_report_dev(dip);
1521 1521 return (DDI_SUCCESS);
1522 1522
1523 1523 /*
1524 1524 * Failure Exit
1525 1525 */
1526 1526
1527 1527 error_intr:
1528 1528 if (hmep->hme_cookie)
1529 1529 ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1530 1530
1531 1531 if (hmep->hme_mii)
1532 1532 mii_free(hmep->hme_mii);
1533 1533
1534 1534 error_mutex:
1535 1535 mutex_destroy(&hmep->hme_xmitlock);
1536 1536 mutex_destroy(&hmep->hme_intrlock);
1537 1537
1538 1538 error_unmap:
1539 1539 if (hmep->hme_globregh)
1540 1540 ddi_regs_map_free(&hmep->hme_globregh);
1541 1541 if (hmep->hme_cheerio_mode == 0) {
1542 1542 if (hmep->hme_etxregh)
1543 1543 ddi_regs_map_free(&hmep->hme_etxregh);
1544 1544 if (hmep->hme_erxregh)
1545 1545 ddi_regs_map_free(&hmep->hme_erxregh);
1546 1546 if (hmep->hme_bmacregh)
1547 1547 ddi_regs_map_free(&hmep->hme_bmacregh);
1548 1548 if (hmep->hme_mifregh)
1549 1549 ddi_regs_map_free(&hmep->hme_mifregh);
1550 1550 } else {
1551 1551 if (hmep->pci_config_handle)
1552 1552 (void) pci_config_teardown(&hmep->pci_config_handle);
1553 1553 hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1554 1554 hmep->hme_mifregh = hmep->hme_globregh = NULL;
1555 1555 }
1556 1556
1557 1557 error_state:
1558 1558 hmefreethings(hmep);
1559 1559 hmefreebufs(hmep);
1560 1560
1561 1561 if (hmep) {
1562 1562 kmem_free((caddr_t)hmep, sizeof (*hmep));
1563 1563 ddi_set_driver_private(dip, NULL);
1564 1564 }
1565 1565
1566 1566 return (DDI_FAILURE);
1567 1567 }
1568 1568
1569 1569 int
1570 1570 hmedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1571 1571 {
1572 1572 struct hme *hmep;
1573 1573
1574 1574 if ((hmep = ddi_get_driver_private(dip)) == NULL)
1575 1575 return (DDI_FAILURE);
1576 1576
1577 1577 switch (cmd) {
1578 1578 case DDI_DETACH:
1579 1579 break;
1580 1580
1581 1581 case DDI_SUSPEND:
1582 1582 mii_suspend(hmep->hme_mii);
1583 1583 hmep->hme_flags |= HMESUSPENDED;
1584 1584 hmeuninit(hmep);
1585 1585 return (DDI_SUCCESS);
1586 1586
1587 1587 default:
1588 1588 return (DDI_FAILURE);
1589 1589 }
1590 1590
1591 1591
1592 1592 if (mac_unregister(hmep->hme_mh) != 0) {
1593 1593 return (DDI_FAILURE);
1594 1594 }
1595 1595
1596 1596 /*
1597 1597 * Make driver quiescent, we don't want to prevent the
1598 1598 * detach on failure. Note that this should be redundant,
1599 1599 * since mac_stop should already have called hmeuninit().
1600 1600 */
1601 1601 if (!(hmep->hme_flags & HMESUSPENDED)) {
1602 1602 (void) hmestop(hmep);
1603 1603 }
1604 1604
1605 1605 if (hmep->hme_mii)
1606 1606 mii_free(hmep->hme_mii);
1607 1607
1608 1608 /*
1609 1609 * Remove instance of the intr
1610 1610 */
1611 1611 ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1612 1612
1613 1613 /*
1614 1614 * Unregister kstats.
1615 1615 */
1616 1616 if (hmep->hme_ksp != NULL)
1617 1617 kstat_delete(hmep->hme_ksp);
1618 1618 if (hmep->hme_intrstats != NULL)
1619 1619 kstat_delete(hmep->hme_intrstats);
1620 1620
1621 1621 hmep->hme_ksp = NULL;
1622 1622 hmep->hme_intrstats = NULL;
1623 1623
1624 1624 /*
1625 1625 * Destroy all mutexes and data structures allocated during
1626 1626 * attach time.
1627 1627 *
1628 1628 * Note: at this time we should be the only thread accessing
1629 1629 * the structures for this instance.
1630 1630 */
1631 1631
1632 1632 if (hmep->hme_globregh)
1633 1633 ddi_regs_map_free(&hmep->hme_globregh);
1634 1634 if (hmep->hme_cheerio_mode == 0) {
1635 1635 if (hmep->hme_etxregh)
1636 1636 ddi_regs_map_free(&hmep->hme_etxregh);
1637 1637 if (hmep->hme_erxregh)
1638 1638 ddi_regs_map_free(&hmep->hme_erxregh);
1639 1639 if (hmep->hme_bmacregh)
1640 1640 ddi_regs_map_free(&hmep->hme_bmacregh);
1641 1641 if (hmep->hme_mifregh)
1642 1642 ddi_regs_map_free(&hmep->hme_mifregh);
1643 1643 } else {
1644 1644 if (hmep->pci_config_handle)
1645 1645 (void) pci_config_teardown(&hmep->pci_config_handle);
1646 1646 hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1647 1647 hmep->hme_mifregh = hmep->hme_globregh = NULL;
1648 1648 }
1649 1649
1650 1650 mutex_destroy(&hmep->hme_xmitlock);
1651 1651 mutex_destroy(&hmep->hme_intrlock);
1652 1652
1653 1653 hmefreethings(hmep);
1654 1654 hmefreebufs(hmep);
1655 1655
1656 1656 ddi_set_driver_private(dip, NULL);
1657 1657 kmem_free(hmep, sizeof (struct hme));
1658 1658
1659 1659 return (DDI_SUCCESS);
1660 1660 }
1661 1661
1662 1662 int
1663 1663 hmequiesce(dev_info_t *dip)
1664 1664 {
1665 1665 struct hme *hmep;
1666 1666
1667 1667 if ((hmep = ddi_get_driver_private(dip)) == NULL)
1668 1668 return (DDI_FAILURE);
1669 1669
1670 1670 (void) hmestop(hmep);
1671 1671 return (DDI_SUCCESS);
1672 1672 }
1673 1673
1674 1674 static boolean_t
1675 1675 hmeinit_xfer_params(struct hme *hmep)
1676 1676 {
1677 1677 int hme_ipg1_conf, hme_ipg2_conf;
1678 1678 int hme_ipg0_conf, hme_lance_mode_conf;
1679 1679 int prop_len = sizeof (int);
1680 1680 dev_info_t *dip;
1681 1681
1682 1682 dip = hmep->dip;
1683 1683
1684 1684 /*
1685 1685 * Set up the start-up values for user-configurable parameters
1686 1686 * Get the values from the global variables first.
1687 1687 * Use the MASK to limit the value to allowed maximum.
1688 1688 */
1689 1689 hmep->hme_ipg1 = hme_ipg1 & HME_MASK_8BIT;
1690 1690 hmep->hme_ipg2 = hme_ipg2 & HME_MASK_8BIT;
1691 1691 hmep->hme_ipg0 = hme_ipg0 & HME_MASK_5BIT;
1692 1692
1693 1693 /*
1694 1694 * Get the parameter values configured in .conf file.
1695 1695 */
1696 1696 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg1",
1697 1697 (caddr_t)&hme_ipg1_conf, &prop_len) == DDI_PROP_SUCCESS) {
1698 1698 hmep->hme_ipg1 = hme_ipg1_conf & HME_MASK_8BIT;
1699 1699 }
1700 1700
1701 1701 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg2",
1702 1702 (caddr_t)&hme_ipg2_conf, &prop_len) == DDI_PROP_SUCCESS) {
1703 1703 hmep->hme_ipg2 = hme_ipg2_conf & HME_MASK_8BIT;
1704 1704 }
1705 1705
1706 1706 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg0",
1707 1707 (caddr_t)&hme_ipg0_conf, &prop_len) == DDI_PROP_SUCCESS) {
1708 1708 hmep->hme_ipg0 = hme_ipg0_conf & HME_MASK_5BIT;
1709 1709 }
1710 1710
1711 1711 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "lance_mode",
1712 1712 (caddr_t)&hme_lance_mode_conf, &prop_len) == DDI_PROP_SUCCESS) {
1713 1713 hmep->hme_lance_mode = hme_lance_mode_conf & HME_MASK_1BIT;
1714 1714 }
1715 1715
1716 1716 return (B_TRUE);
1717 1717 }
1718 1718
1719 1719 /*
1720 1720 * Return 0 upon success, 1 on failure.
1721 1721 */
1722 1722 static uint_t
1723 1723 hmestop(struct hme *hmep)
1724 1724 {
1725 1725 /*
1726 1726 * Disable the Tx dma engine.
1727 1727 */
1728 1728 PUT_ETXREG(config, (GET_ETXREG(config) & ~HMET_CONFIG_TXDMA_EN));
1729 1729 HMEDELAY(((GET_ETXREG(state_mach) & 0x1f) == 0x1), HMEMAXRSTDELAY);
1730 1730
1731 1731 /*
1732 1732 * Disable the Rx dma engine.
1733 1733 */
1734 1734 PUT_ERXREG(config, (GET_ERXREG(config) & ~HMER_CONFIG_RXDMA_EN));
1735 1735 HMEDELAY(((GET_ERXREG(state_mach) & 0x3f) == 0), HMEMAXRSTDELAY);
1736 1736
1737 1737 /*
1738 1738 * By this time all things should be quiet, so hit the
1739 1739 * chip with a reset.
1740 1740 */
1741 1741 PUT_GLOBREG(reset, HMEG_RESET_GLOBAL);
1742 1742
1743 1743 HMEDELAY((GET_GLOBREG(reset) == 0), HMEMAXRSTDELAY);
1744 1744 if (GET_GLOBREG(reset)) {
1745 1745 return (1);
1746 1746 }
1747 1747
1748 1748 CHECK_GLOBREG();
1749 1749 return (0);
1750 1750 }
1751 1751
1752 1752 static int
1753 1753 hmestat_kstat_update(kstat_t *ksp, int rw)
1754 1754 {
1755 1755 struct hme *hmep;
1756 1756 struct hmekstat *hkp;
1757 1757
1758 1758 hmep = (struct hme *)ksp->ks_private;
1759 1759 hkp = (struct hmekstat *)ksp->ks_data;
1760 1760
1761 1761 if (rw != KSTAT_READ)
1762 1762 return (EACCES);
1763 1763
1764 1764 /*
1765 1765 * Update all the stats by reading all the counter registers.
1766 1766 * Counter register stats are not updated till they overflow
1767 1767 * and interrupt.
1768 1768 */
1769 1769
1770 1770 mutex_enter(&hmep->hme_xmitlock);
1771 1771 if (hmep->hme_flags & HMERUNNING) {
1772 1772 hmereclaim(hmep);
1773 1773 hmesavecntrs(hmep);
1774 1774 }
1775 1775 mutex_exit(&hmep->hme_xmitlock);
1776 1776
1777 1777 hkp->hk_cvc.value.ul = hmep->hme_cvc;
1778 1778 hkp->hk_lenerr.value.ul = hmep->hme_lenerr;
1779 1779 hkp->hk_buff.value.ul = hmep->hme_buff;
1780 1780 hkp->hk_missed.value.ul = hmep->hme_missed;
1781 1781 hkp->hk_allocbfail.value.ul = hmep->hme_allocbfail;
1782 1782 hkp->hk_babl.value.ul = hmep->hme_babl;
1783 1783 hkp->hk_tmder.value.ul = hmep->hme_tmder;
1784 1784 hkp->hk_txlaterr.value.ul = hmep->hme_txlaterr;
1785 1785 hkp->hk_rxlaterr.value.ul = hmep->hme_rxlaterr;
1786 1786 hkp->hk_slvparerr.value.ul = hmep->hme_slvparerr;
1787 1787 hkp->hk_txparerr.value.ul = hmep->hme_txparerr;
1788 1788 hkp->hk_rxparerr.value.ul = hmep->hme_rxparerr;
1789 1789 hkp->hk_slverrack.value.ul = hmep->hme_slverrack;
1790 1790 hkp->hk_txerrack.value.ul = hmep->hme_txerrack;
1791 1791 hkp->hk_rxerrack.value.ul = hmep->hme_rxerrack;
1792 1792 hkp->hk_txtagerr.value.ul = hmep->hme_txtagerr;
1793 1793 hkp->hk_rxtagerr.value.ul = hmep->hme_rxtagerr;
1794 1794 hkp->hk_eoperr.value.ul = hmep->hme_eoperr;
1795 1795 hkp->hk_notmds.value.ul = hmep->hme_notmds;
1796 1796 hkp->hk_notbufs.value.ul = hmep->hme_notbufs;
1797 1797 hkp->hk_norbufs.value.ul = hmep->hme_norbufs;
1798 1798
1799 1799 /*
1800 1800 * Debug kstats
1801 1801 */
1802 1802 hkp->hk_inits.value.ul = hmep->inits;
1803 1803 hkp->hk_phyfail.value.ul = hmep->phyfail;
1804 1804
1805 1805 /*
1806 1806 * xcvr kstats
1807 1807 */
1808 1808 hkp->hk_asic_rev.value.ul = hmep->asic_rev;
1809 1809
1810 1810 return (0);
1811 1811 }
1812 1812
1813 1813 static void
1814 1814 hmestatinit(struct hme *hmep)
1815 1815 {
1816 1816 struct kstat *ksp;
1817 1817 struct hmekstat *hkp;
1818 1818 const char *driver;
1819 1819 int instance;
1820 1820 char buf[16];
1821 1821
1822 1822 instance = hmep->instance;
1823 1823 driver = ddi_driver_name(hmep->dip);
1824 1824
1825 1825 if ((ksp = kstat_create(driver, instance,
1826 1826 "driver_info", "net", KSTAT_TYPE_NAMED,
1827 1827 sizeof (struct hmekstat) / sizeof (kstat_named_t), 0)) == NULL) {
1828 1828 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG,
1829 1829 "kstat_create failed");
1830 1830 return;
1831 1831 }
1832 1832
1833 1833 (void) snprintf(buf, sizeof (buf), "%sc%d", driver, instance);
1834 1834 hmep->hme_intrstats = kstat_create(driver, instance, buf, "controller",
1835 1835 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
1836 1836 if (hmep->hme_intrstats)
1837 1837 kstat_install(hmep->hme_intrstats);
1838 1838
1839 1839 hmep->hme_ksp = ksp;
1840 1840 hkp = (struct hmekstat *)ksp->ks_data;
1841 1841 kstat_named_init(&hkp->hk_cvc, "code_violations",
1842 1842 KSTAT_DATA_ULONG);
1843 1843 kstat_named_init(&hkp->hk_lenerr, "len_errors",
1844 1844 KSTAT_DATA_ULONG);
1845 1845 kstat_named_init(&hkp->hk_buff, "buff",
1846 1846 KSTAT_DATA_ULONG);
1847 1847 kstat_named_init(&hkp->hk_missed, "missed",
1848 1848 KSTAT_DATA_ULONG);
1849 1849 kstat_named_init(&hkp->hk_nocanput, "nocanput",
1850 1850 KSTAT_DATA_ULONG);
1851 1851 kstat_named_init(&hkp->hk_allocbfail, "allocbfail",
1852 1852 KSTAT_DATA_ULONG);
1853 1853 kstat_named_init(&hkp->hk_babl, "babble",
1854 1854 KSTAT_DATA_ULONG);
1855 1855 kstat_named_init(&hkp->hk_tmder, "tmd_error",
1856 1856 KSTAT_DATA_ULONG);
1857 1857 kstat_named_init(&hkp->hk_txlaterr, "tx_late_error",
1858 1858 KSTAT_DATA_ULONG);
1859 1859 kstat_named_init(&hkp->hk_rxlaterr, "rx_late_error",
1860 1860 KSTAT_DATA_ULONG);
1861 1861 kstat_named_init(&hkp->hk_slvparerr, "slv_parity_error",
1862 1862 KSTAT_DATA_ULONG);
1863 1863 kstat_named_init(&hkp->hk_txparerr, "tx_parity_error",
1864 1864 KSTAT_DATA_ULONG);
1865 1865 kstat_named_init(&hkp->hk_rxparerr, "rx_parity_error",
1866 1866 KSTAT_DATA_ULONG);
1867 1867 kstat_named_init(&hkp->hk_slverrack, "slv_error_ack",
1868 1868 KSTAT_DATA_ULONG);
1869 1869 kstat_named_init(&hkp->hk_txerrack, "tx_error_ack",
1870 1870 KSTAT_DATA_ULONG);
1871 1871 kstat_named_init(&hkp->hk_rxerrack, "rx_error_ack",
1872 1872 KSTAT_DATA_ULONG);
1873 1873 kstat_named_init(&hkp->hk_txtagerr, "tx_tag_error",
1874 1874 KSTAT_DATA_ULONG);
1875 1875 kstat_named_init(&hkp->hk_rxtagerr, "rx_tag_error",
1876 1876 KSTAT_DATA_ULONG);
1877 1877 kstat_named_init(&hkp->hk_eoperr, "eop_error",
1878 1878 KSTAT_DATA_ULONG);
1879 1879 kstat_named_init(&hkp->hk_notmds, "no_tmds",
1880 1880 KSTAT_DATA_ULONG);
1881 1881 kstat_named_init(&hkp->hk_notbufs, "no_tbufs",
1882 1882 KSTAT_DATA_ULONG);
1883 1883 kstat_named_init(&hkp->hk_norbufs, "no_rbufs",
1884 1884 KSTAT_DATA_ULONG);
1885 1885
1886 1886 /*
1887 1887 * Debugging kstats
1888 1888 */
1889 1889 kstat_named_init(&hkp->hk_inits, "inits",
1890 1890 KSTAT_DATA_ULONG);
1891 1891 kstat_named_init(&hkp->hk_phyfail, "phy_failures",
1892 1892 KSTAT_DATA_ULONG);
1893 1893
1894 1894 /*
1895 1895 * xcvr kstats
1896 1896 */
1897 1897 kstat_named_init(&hkp->hk_asic_rev, "asic_rev",
1898 1898 KSTAT_DATA_ULONG);
1899 1899
1900 1900 ksp->ks_update = hmestat_kstat_update;
1901 1901 ksp->ks_private = (void *) hmep;
1902 1902 kstat_install(ksp);
1903 1903 }
1904 1904
1905 1905 int
1906 1906 hme_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1907 1907 void *val)
1908 1908 {
1909 1909 struct hme *hmep = arg;
1910 1910 int value;
1911 1911 int rv;
1912 1912
1913 1913 rv = mii_m_getprop(hmep->hme_mii, name, num, sz, val);
1914 1914 if (rv != ENOTSUP)
1915 1915 return (rv);
1916 1916
1917 1917 switch (num) {
1918 1918 case MAC_PROP_PRIVATE:
1919 1919 break;
1920 1920 default:
1921 1921 return (ENOTSUP);
1922 1922 }
1923 1923
1924 1924 if (strcmp(name, "_ipg0") == 0) {
1925 1925 value = hmep->hme_ipg0;
1926 1926 } else if (strcmp(name, "_ipg1") == 0) {
1927 1927 value = hmep->hme_ipg1;
1928 1928 } else if (strcmp(name, "_ipg2") == 0) {
1929 1929 value = hmep->hme_ipg2;
1930 1930 } else if (strcmp(name, "_lance_mode") == 0) {
1931 1931 value = hmep->hme_lance_mode;
1932 1932 } else {
1933 1933 return (ENOTSUP);
1934 1934 }
1935 1935 (void) snprintf(val, sz, "%d", value);
1936 1936 return (0);
1937 1937 }
1938 1938
1939 1939 static void
1940 1940 hme_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
1941 1941 mac_prop_info_handle_t mph)
1942 1942 {
1943 1943 struct hme *hmep = arg;
1944 1944
1945 1945 mii_m_propinfo(hmep->hme_mii, name, num, mph);
1946 1946
1947 1947 switch (num) {
1948 1948 case MAC_PROP_PRIVATE: {
1949 1949 char valstr[64];
1950 1950 int default_val;
1951 1951
1952 1952 if (strcmp(name, "_ipg0") == 0) {
1953 1953 default_val = hme_ipg0;
1954 1954 } else if (strcmp(name, "_ipg1") == 0) {
1955 1955 default_val = hme_ipg1;
1956 1956 } else if (strcmp(name, "_ipg2") == 0) {
1957 1957 default_val = hme_ipg2;
1958 1958 } if (strcmp(name, "_lance_mode") == 0) {
1959 1959 default_val = hme_lance_mode;
1960 1960 } else {
1961 1961 return;
1962 1962 }
1963 1963
1964 1964 (void) snprintf(valstr, sizeof (valstr), "%d", default_val);
1965 1965 mac_prop_info_set_default_str(mph, valstr);
1966 1966 break;
1967 1967 }
1968 1968 }
1969 1969 }
1970 1970
1971 1971 int
1972 1972 hme_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1973 1973 const void *val)
1974 1974 {
1975 1975 struct hme *hmep = arg;
1976 1976 int rv;
1977 1977 long lval;
1978 1978 boolean_t init = B_FALSE;
1979 1979
1980 1980 rv = mii_m_setprop(hmep->hme_mii, name, num, sz, val);
1981 1981 if (rv != ENOTSUP)
1982 1982 return (rv);
1983 1983 rv = 0;
1984 1984
1985 1985 switch (num) {
1986 1986 case MAC_PROP_PRIVATE:
1987 1987 break;
1988 1988 default:
1989 1989 return (ENOTSUP);
1990 1990 }
1991 1991
1992 1992 (void) ddi_strtol(val, NULL, 0, &lval);
1993 1993
1994 1994 if (strcmp(name, "_ipg1") == 0) {
1995 1995 if ((lval >= 0) && (lval <= 255)) {
1996 1996 hmep->hme_ipg1 = lval & 0xff;
1997 1997 init = B_TRUE;
1998 1998 } else {
1999 1999 return (EINVAL);
2000 2000 }
2001 2001
2002 2002 } else if (strcmp(name, "_ipg2") == 0) {
2003 2003 if ((lval >= 0) && (lval <= 255)) {
2004 2004 hmep->hme_ipg2 = lval & 0xff;
2005 2005 init = B_TRUE;
2006 2006 } else {
2007 2007 return (EINVAL);
2008 2008 }
2009 2009
2010 2010 } else if (strcmp(name, "_ipg0") == 0) {
2011 2011 if ((lval >= 0) && (lval <= 31)) {
2012 2012 hmep->hme_ipg0 = lval & 0xff;
2013 2013 init = B_TRUE;
2014 2014 } else {
2015 2015 return (EINVAL);
2016 2016 }
2017 2017 } else if (strcmp(name, "_lance_mode") == 0) {
2018 2018 if ((lval >= 0) && (lval <= 1)) {
2019 2019 hmep->hme_lance_mode = lval & 0xff;
2020 2020 init = B_TRUE;
2021 2021 } else {
2022 2022 return (EINVAL);
2023 2023 }
2024 2024
2025 2025 } else {
2026 2026 rv = ENOTSUP;
2027 2027 }
2028 2028
2029 2029 if (init) {
2030 2030 (void) hmeinit(hmep);
2031 2031 }
2032 2032 return (rv);
2033 2033 }
2034 2034
2035 2035
2036 2036 /*ARGSUSED*/
2037 2037 static boolean_t
2038 2038 hme_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2039 2039 {
2040 2040 switch (cap) {
2041 2041 case MAC_CAPAB_HCKSUM:
2042 2042 *(uint32_t *)cap_data = HCKSUM_INET_PARTIAL;
2043 2043 return (B_TRUE);
2044 2044 default:
2045 2045 return (B_FALSE);
2046 2046 }
2047 2047 }
2048 2048
2049 2049 static int
2050 2050 hme_m_promisc(void *arg, boolean_t on)
2051 2051 {
2052 2052 struct hme *hmep = arg;
2053 2053
2054 2054 hmep->hme_promisc = on;
2055 2055 (void) hmeinit(hmep);
2056 2056 return (0);
2057 2057 }
2058 2058
2059 2059 static int
2060 2060 hme_m_unicst(void *arg, const uint8_t *macaddr)
2061 2061 {
2062 2062 struct hme *hmep = arg;
2063 2063
2064 2064 /*
2065 2065 * Set new interface local address and re-init device.
2066 2066 * This is destructive to any other streams attached
2067 2067 * to this device.
2068 2068 */
2069 2069 mutex_enter(&hmep->hme_intrlock);
2070 2070 bcopy(macaddr, &hmep->hme_ouraddr, ETHERADDRL);
2071 2071 mutex_exit(&hmep->hme_intrlock);
2072 2072 (void) hmeinit(hmep);
2073 2073 return (0);
2074 2074 }
2075 2075
2076 2076 static int
2077 2077 hme_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
2078 2078 {
2079 2079 struct hme *hmep = arg;
2080 2080 uint32_t ladrf_bit;
2081 2081 boolean_t doinit = B_FALSE;
2082 2082
2083 2083 /*
2084 2084 * If this address's bit was not already set in the local address
2085 2085 * filter, add it and re-initialize the Hardware.
2086 2086 */
2087 2087 ladrf_bit = hmeladrf_bit(macaddr);
2088 2088
2089 2089 mutex_enter(&hmep->hme_intrlock);
2090 2090 if (add) {
2091 2091 hmep->hme_ladrf_refcnt[ladrf_bit]++;
2092 2092 if (hmep->hme_ladrf_refcnt[ladrf_bit] == 1) {
2093 2093 hmep->hme_ladrf[ladrf_bit >> 4] |=
2094 2094 1 << (ladrf_bit & 0xf);
2095 2095 hmep->hme_multi++;
2096 2096 doinit = B_TRUE;
2097 2097 }
2098 2098 } else {
2099 2099 hmep->hme_ladrf_refcnt[ladrf_bit]--;
2100 2100 if (hmep->hme_ladrf_refcnt[ladrf_bit] == 0) {
2101 2101 hmep->hme_ladrf[ladrf_bit >> 4] &=
2102 2102 ~(1 << (ladrf_bit & 0xf));
2103 2103 doinit = B_TRUE;
2104 2104 }
2105 2105 }
2106 2106 mutex_exit(&hmep->hme_intrlock);
2107 2107
2108 2108 if (doinit) {
2109 2109 (void) hmeinit(hmep);
2110 2110 }
2111 2111
2112 2112 return (0);
2113 2113 }
2114 2114
2115 2115 static int
2116 2116 hme_m_start(void *arg)
2117 2117 {
2118 2118 struct hme *hmep = arg;
2119 2119
2120 2120 if (hmeinit(hmep) != 0) {
2121 2121 /* initialization failed -- really want DL_INITFAILED */
2122 2122 return (EIO);
2123 2123 } else {
2124 2124 hmep->hme_started = B_TRUE;
2125 2125 mii_start(hmep->hme_mii);
2126 2126 return (0);
2127 2127 }
2128 2128 }
2129 2129
2130 2130 static void
2131 2131 hme_m_stop(void *arg)
2132 2132 {
2133 2133 struct hme *hmep = arg;
2134 2134
2135 2135 mii_stop(hmep->hme_mii);
2136 2136 hmep->hme_started = B_FALSE;
2137 2137 hmeuninit(hmep);
2138 2138 }
2139 2139
2140 2140 static int
2141 2141 hme_m_stat(void *arg, uint_t stat, uint64_t *val)
2142 2142 {
2143 2143 struct hme *hmep = arg;
2144 2144
2145 2145 mutex_enter(&hmep->hme_xmitlock);
2146 2146 if (hmep->hme_flags & HMERUNNING) {
2147 2147 hmereclaim(hmep);
2148 2148 hmesavecntrs(hmep);
2149 2149 }
2150 2150 mutex_exit(&hmep->hme_xmitlock);
2151 2151
2152 2152
2153 2153 if (mii_m_getstat(hmep->hme_mii, stat, val) == 0) {
2154 2154 return (0);
2155 2155 }
2156 2156 switch (stat) {
2157 2157 case MAC_STAT_IPACKETS:
2158 2158 *val = hmep->hme_ipackets;
2159 2159 break;
2160 2160 case MAC_STAT_RBYTES:
2161 2161 *val = hmep->hme_rbytes;
2162 2162 break;
2163 2163 case MAC_STAT_IERRORS:
2164 2164 *val = hmep->hme_ierrors;
2165 2165 break;
2166 2166 case MAC_STAT_OPACKETS:
2167 2167 *val = hmep->hme_opackets;
2168 2168 break;
2169 2169 case MAC_STAT_OBYTES:
2170 2170 *val = hmep->hme_obytes;
2171 2171 break;
2172 2172 case MAC_STAT_OERRORS:
2173 2173 *val = hmep->hme_oerrors;
2174 2174 break;
2175 2175 case MAC_STAT_MULTIRCV:
2176 2176 *val = hmep->hme_multircv;
2177 2177 break;
2178 2178 case MAC_STAT_MULTIXMT:
2179 2179 *val = hmep->hme_multixmt;
2180 2180 break;
2181 2181 case MAC_STAT_BRDCSTRCV:
2182 2182 *val = hmep->hme_brdcstrcv;
2183 2183 break;
2184 2184 case MAC_STAT_BRDCSTXMT:
2185 2185 *val = hmep->hme_brdcstxmt;
2186 2186 break;
2187 2187 case MAC_STAT_UNDERFLOWS:
2188 2188 *val = hmep->hme_uflo;
2189 2189 break;
2190 2190 case MAC_STAT_OVERFLOWS:
2191 2191 *val = hmep->hme_oflo;
2192 2192 break;
2193 2193 case MAC_STAT_COLLISIONS:
2194 2194 *val = hmep->hme_coll;
2195 2195 break;
2196 2196 case MAC_STAT_NORCVBUF:
2197 2197 *val = hmep->hme_norcvbuf;
2198 2198 break;
2199 2199 case MAC_STAT_NOXMTBUF:
2200 2200 *val = hmep->hme_noxmtbuf;
2201 2201 break;
2202 2202 case ETHER_STAT_LINK_DUPLEX:
2203 2203 *val = hmep->hme_duplex;
2204 2204 break;
2205 2205 case ETHER_STAT_ALIGN_ERRORS:
2206 2206 *val = hmep->hme_align_errors;
2207 2207 break;
2208 2208 case ETHER_STAT_FCS_ERRORS:
2209 2209 *val = hmep->hme_fcs_errors;
2210 2210 break;
2211 2211 case ETHER_STAT_EX_COLLISIONS:
2212 2212 *val = hmep->hme_excol;
2213 2213 break;
2214 2214 case ETHER_STAT_DEFER_XMTS:
2215 2215 *val = hmep->hme_defer_xmts;
2216 2216 break;
2217 2217 case ETHER_STAT_SQE_ERRORS:
2218 2218 *val = hmep->hme_sqe_errors;
2219 2219 break;
2220 2220 case ETHER_STAT_FIRST_COLLISIONS:
2221 2221 *val = hmep->hme_fstcol;
2222 2222 break;
2223 2223 case ETHER_STAT_TX_LATE_COLLISIONS:
2224 2224 *val = hmep->hme_tlcol;
2225 2225 break;
2226 2226 case ETHER_STAT_TOOLONG_ERRORS:
2227 2227 *val = hmep->hme_toolong_errors;
2228 2228 break;
2229 2229 case ETHER_STAT_TOOSHORT_ERRORS:
2230 2230 *val = hmep->hme_runt;
2231 2231 break;
2232 2232 case ETHER_STAT_CARRIER_ERRORS:
2233 2233 *val = hmep->hme_carrier_errors;
2234 2234 break;
2235 2235 default:
2236 2236 return (EINVAL);
2237 2237 }
2238 2238 return (0);
2239 2239 }
2240 2240
2241 2241 static mblk_t *
2242 2242 hme_m_tx(void *arg, mblk_t *mp)
2243 2243 {
2244 2244 struct hme *hmep = arg;
2245 2245 mblk_t *next;
2246 2246
2247 2247 while (mp != NULL) {
2248 2248 next = mp->b_next;
2249 2249 mp->b_next = NULL;
2250 2250 if (!hmestart(hmep, mp)) {
2251 2251 mp->b_next = next;
2252 2252 break;
2253 2253 }
2254 2254 mp = next;
2255 2255 }
2256 2256 return (mp);
2257 2257 }
2258 2258
2259 2259 /*
2260 2260 * Software IP checksum, for the edge cases that the
2261 2261 * hardware can't handle. See hmestart for more info.
2262 2262 */
2263 2263 static uint16_t
2264 2264 hme_cksum(void *data, int len)
2265 2265 {
2266 2266 uint16_t *words = data;
2267 2267 int i, nwords = len / 2;
2268 2268 uint32_t sum = 0;
2269 2269
2270 2270 /* just add up the words */
2271 2271 for (i = 0; i < nwords; i++) {
2272 2272 sum += *words++;
2273 2273 }
2274 2274
2275 2275 /* pick up residual byte ... assume even half-word allocations */
2276 2276 if (len % 2) {
2277 2277 sum += (*words & htons(0xff00));
2278 2278 }
2279 2279
2280 2280 sum = (sum >> 16) + (sum & 0xffff);
2281 2281 sum = (sum >> 16) + (sum & 0xffff);
2282 2282
2283 2283 return (~(sum & 0xffff));
2284 2284 }
2285 2285
2286 2286 static boolean_t
2287 2287 hmestart(struct hme *hmep, mblk_t *mp)
2288 2288 {
2289 2289 uint32_t len;
2290 2290 boolean_t retval = B_TRUE;
2291 2291 hmebuf_t *tbuf;
2292 2292 uint32_t txptr;
2293 2293
2294 2294 uint32_t csflags = 0;
2295 2295 uint32_t flags;
2296 2296 uint32_t start_offset;
2297 2297 uint32_t stuff_offset;
2298 2298
2299 2299 mac_hcksum_get(mp, &start_offset, &stuff_offset, NULL, NULL, &flags);
2300 2300
2301 2301 if (flags & HCK_PARTIALCKSUM) {
2302 2302 if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
2303 2303 start_offset += sizeof (struct ether_header) + 4;
2304 2304 stuff_offset += sizeof (struct ether_header) + 4;
2305 2305 } else {
2306 2306 start_offset += sizeof (struct ether_header);
2307 2307 stuff_offset += sizeof (struct ether_header);
2308 2308 }
2309 2309 csflags = HMETMD_CSENABL |
2310 2310 (start_offset << HMETMD_CSSTART_SHIFT) |
2311 2311 (stuff_offset << HMETMD_CSSTUFF_SHIFT);
2312 2312 }
2313 2313
2314 2314 mutex_enter(&hmep->hme_xmitlock);
2315 2315
2316 2316 if (hmep->hme_flags & HMESUSPENDED) {
2317 2317 hmep->hme_carrier_errors++;
2318 2318 hmep->hme_oerrors++;
2319 2319 goto bad;
2320 2320 }
2321 2321
2322 2322 if (hmep->hme_txindex != hmep->hme_txreclaim) {
2323 2323 hmereclaim(hmep);
2324 2324 }
2325 2325 if ((hmep->hme_txindex - HME_TMDMAX) == hmep->hme_txreclaim)
2326 2326 goto notmds;
2327 2327 txptr = hmep->hme_txindex % HME_TMDMAX;
2328 2328 tbuf = &hmep->hme_tbuf[txptr];
2329 2329
2330 2330 /*
2331 2331 * Note that for checksum offload, the hardware cannot
2332 2332 * generate correct checksums if the packet is smaller than
2333 2333 * 64-bytes. In such a case, we bcopy the packet and use
2334 2334 * a software checksum.
2335 2335 */
2336 2336
2337 2337 len = msgsize(mp);
2338 2338 if (len < 64) {
2339 2339 /* zero fill the padding */
2340 2340 bzero(tbuf->kaddr, 64);
2341 2341 }
2342 2342 mcopymsg(mp, tbuf->kaddr);
2343 2343
2344 2344 if ((csflags != 0) && ((len < 64) ||
2345 2345 (start_offset > HMETMD_CSSTART_MAX) ||
2346 2346 (stuff_offset > HMETMD_CSSTUFF_MAX))) {
2347 2347 uint16_t sum;
2348 2348 sum = hme_cksum(tbuf->kaddr + start_offset,
2349 2349 len - start_offset);
2350 2350 bcopy(&sum, tbuf->kaddr + stuff_offset, sizeof (sum));
2351 2351 csflags = 0;
2352 2352 }
2353 2353
2354 2354 if (ddi_dma_sync(tbuf->dmah, 0, len, DDI_DMA_SYNC_FORDEV) ==
2355 2355 DDI_FAILURE) {
2356 2356 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
2357 2357 "ddi_dma_sync failed");
2358 2358 }
2359 2359
2360 2360 /*
2361 2361 * update MIB II statistics
2362 2362 */
2363 2363 BUMP_OutNUcast(hmep, tbuf->kaddr);
2364 2364
2365 2365 PUT_TMD(txptr, tbuf->paddr, len,
2366 2366 HMETMD_OWN | HMETMD_SOP | HMETMD_EOP | csflags);
2367 2367
2368 2368 HMESYNCTMD(txptr, DDI_DMA_SYNC_FORDEV);
2369 2369 hmep->hme_txindex++;
2370 2370
2371 2371 PUT_ETXREG(txpend, HMET_TXPEND_TDMD);
2372 2372 CHECK_ETXREG();
2373 2373
2374 2374 mutex_exit(&hmep->hme_xmitlock);
2375 2375
2376 2376 hmep->hme_starts++;
2377 2377 return (B_TRUE);
2378 2378
2379 2379 bad:
2380 2380 mutex_exit(&hmep->hme_xmitlock);
2381 2381 freemsg(mp);
2382 2382 return (B_TRUE);
2383 2383
2384 2384 notmds:
2385 2385 hmep->hme_notmds++;
2386 2386 hmep->hme_wantw = B_TRUE;
2387 2387 hmereclaim(hmep);
2388 2388 retval = B_FALSE;
2389 2389 done:
2390 2390 mutex_exit(&hmep->hme_xmitlock);
2391 2391
2392 2392 return (retval);
2393 2393 }
2394 2394
2395 2395 /*
2396 2396 * Initialize channel.
2397 2397 * Return 0 on success, nonzero on error.
2398 2398 *
2399 2399 * The recommended sequence for initialization is:
2400 2400 * 1. Issue a Global Reset command to the Ethernet Channel.
2401 2401 * 2. Poll the Global_Reset bits until the execution of the reset has been
2402 2402 * completed.
2403 2403 * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2404 2404 * Poll Register 0 to till the Resetbit is 0.
2405 2405 * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2406 2406 * 100Mbps and Non-Isolated mode. The main point here is to bring the
2407 2407 * PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2408 2408 * to the MII interface so that the Bigmac core can correctly reset
2409 2409 * upon a software reset.
2410 2410 * 2(c). Issue another Global Reset command to the Ethernet Channel and poll
2411 2411 * the Global_Reset bits till completion.
2412 2412 * 3. Set up all the data structures in the host memory.
2413 2413 * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2414 2414 * Register).
2415 2415 * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2416 2416 * Register).
2417 2417 * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2418 2418 * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2419 2419 * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2420 2420 * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2421 2421 * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2422 2422 * 11. Program the XIF Configuration Register (enable the XIF).
2423 2423 * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2424 2424 * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2425 2425 */
2426 2426
2427 2427
2428 2428 #ifdef FEPS_URUN_BUG
2429 2429 static int hme_palen = 32;
2430 2430 #endif
2431 2431
2432 2432 static int
2433 2433 hmeinit(struct hme *hmep)
2434 2434 {
2435 2435 uint32_t i;
2436 2436 int ret;
2437 2437 boolean_t fdx;
2438 2438 int phyad;
2439 2439
2440 2440 /*
2441 2441 * Lock sequence:
2442 2442 * hme_intrlock, hme_xmitlock.
2443 2443 */
2444 2444 mutex_enter(&hmep->hme_intrlock);
2445 2445
2446 2446 /*
2447 2447 * Don't touch the hardware if we are suspended. But don't
2448 2448 * fail either. Some time later we may be resumed, and then
2449 2449 * we'll be back here to program the device using the settings
2450 2450 * in the soft state.
2451 2451 */
2452 2452 if (hmep->hme_flags & HMESUSPENDED) {
2453 2453 mutex_exit(&hmep->hme_intrlock);
2454 2454 return (0);
2455 2455 }
2456 2456
2457 2457 /*
2458 2458 * This should prevent us from clearing any interrupts that
2459 2459 * may occur by temporarily stopping interrupts from occurring
2460 2460 * for a short time. We need to update the interrupt mask
2461 2461 * later in this function.
2462 2462 */
2463 2463 PUT_GLOBREG(intmask, ~HMEG_MASK_MIF_INTR);
2464 2464
2465 2465
2466 2466 /*
2467 2467 * Rearranged the mutex acquisition order to solve the deadlock
2468 2468 * situation as described in bug ID 4065896.
2469 2469 */
2470 2470
2471 2471 mutex_enter(&hmep->hme_xmitlock);
2472 2472
2473 2473 hmep->hme_flags = 0;
2474 2474 hmep->hme_wantw = B_FALSE;
2475 2475
2476 2476 if (hmep->inits)
2477 2477 hmesavecntrs(hmep);
2478 2478
2479 2479 /*
2480 2480 * Perform Global reset of the Sbus/FEPS ENET channel.
2481 2481 */
2482 2482 (void) hmestop(hmep);
2483 2483
2484 2484 /*
2485 2485 * Clear all descriptors.
2486 2486 */
2487 2487 bzero(hmep->hme_rmdp, HME_RMDMAX * sizeof (struct hme_rmd));
2488 2488 bzero(hmep->hme_tmdp, HME_TMDMAX * sizeof (struct hme_tmd));
2489 2489
2490 2490 /*
2491 2491 * Hang out receive buffers.
2492 2492 */
2493 2493 for (i = 0; i < HME_RMDMAX; i++) {
2494 2494 PUT_RMD(i, hmep->hme_rbuf[i].paddr);
2495 2495 }
2496 2496
2497 2497 /*
2498 2498 * DMA sync descriptors.
2499 2499 */
2500 2500 (void) ddi_dma_sync(hmep->hme_rmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2501 2501 (void) ddi_dma_sync(hmep->hme_tmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2502 2502
2503 2503 /*
2504 2504 * Reset RMD and TMD 'walking' pointers.
2505 2505 */
2506 2506 hmep->hme_rxindex = 0;
2507 2507 hmep->hme_txindex = hmep->hme_txreclaim = 0;
2508 2508
2509 2509 /*
2510 2510 * This is the right place to initialize MIF !!!
2511 2511 */
2512 2512
2513 2513 PUT_MIFREG(mif_imask, HME_MIF_INTMASK); /* mask all interrupts */
2514 2514
2515 2515 if (!hmep->hme_frame_enable)
2516 2516 PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) | HME_MIF_CFGBB);
2517 2517 else
2518 2518 PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) & ~HME_MIF_CFGBB);
2519 2519 /* enable frame mode */
2520 2520
2521 2521 /*
2522 2522 * Depending on the transceiver detected, select the source
2523 2523 * of the clocks for the MAC. Without the clocks, TX_MAC does
2524 2524 * not reset. When the Global Reset is issued to the Sbus/FEPS
2525 2525 * ASIC, it selects Internal by default.
2526 2526 */
2527 2527
2528 2528 switch ((phyad = mii_get_addr(hmep->hme_mii))) {
2529 2529 case -1:
2530 2530 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
2531 2531 goto init_fail; /* abort initialization */
2532 2532
2533 2533 case HME_INTERNAL_PHYAD:
2534 2534 PUT_MACREG(xifc, 0);
2535 2535 break;
2536 2536 case HME_EXTERNAL_PHYAD:
2537 2537 /* Isolate the Int. xcvr */
2538 2538 PUT_MACREG(xifc, BMAC_XIFC_MIIBUFDIS);
2539 2539 break;
2540 2540 }
2541 2541
2542 2542 hmep->inits++;
2543 2543
2544 2544 /*
2545 2545 * Initialize BigMAC registers.
2546 2546 * First set the tx enable bit in tx config reg to 0 and poll on
2547 2547 * it till it turns to 0. Same for rx config, hash and address
2548 2548 * filter reg.
2549 2549 * Here is the sequence per the spec.
2550 2550 * MADD2 - MAC Address 2
2551 2551 * MADD1 - MAC Address 1
2552 2552 * MADD0 - MAC Address 0
2553 2553 * HASH3, HASH2, HASH1, HASH0 for group address
2554 2554 * AFR2, AFR1, AFR0 and AFMR for address filter mask
2555 2555 * Program RXMIN and RXMAX for packet length if not 802.3
2556 2556 * RXCFG - Rx config for not stripping CRC
2557 2557 * XXX Anything else to hme configured in RXCFG
2558 2558 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN
2559 2559 * if not 802.3 compliant
2560 2560 * XIF register for speed selection
2561 2561 * MASK - Interrupt mask
2562 2562 * Set bit 0 of TXCFG
2563 2563 * Set bit 0 of RXCFG
2564 2564 */
2565 2565
2566 2566 /*
2567 2567 * Initialize the TX_MAC registers
2568 2568 * Initialization of jamsize to work around rx crc bug
2569 2569 */
2570 2570 PUT_MACREG(jam, jamsize);
2571 2571
2572 2572 #ifdef FEPS_URUN_BUG
2573 2573 if (hme_urun_fix)
2574 2574 PUT_MACREG(palen, hme_palen);
2575 2575 #endif
2576 2576
2577 2577 PUT_MACREG(ipg1, hmep->hme_ipg1);
2578 2578 PUT_MACREG(ipg2, hmep->hme_ipg2);
2579 2579
2580 2580 PUT_MACREG(rseed,
2581 2581 ((hmep->hme_ouraddr.ether_addr_octet[0] << 8) & 0x3) |
2582 2582 hmep->hme_ouraddr.ether_addr_octet[1]);
2583 2583
2584 2584 /* Initialize the RX_MAC registers */
2585 2585
2586 2586 /*
2587 2587 * Program BigMAC with local individual ethernet address.
2588 2588 */
2589 2589 PUT_MACREG(madd2, (hmep->hme_ouraddr.ether_addr_octet[4] << 8) |
2590 2590 hmep->hme_ouraddr.ether_addr_octet[5]);
2591 2591 PUT_MACREG(madd1, (hmep->hme_ouraddr.ether_addr_octet[2] << 8) |
2592 2592 hmep->hme_ouraddr.ether_addr_octet[3]);
2593 2593 PUT_MACREG(madd0, (hmep->hme_ouraddr.ether_addr_octet[0] << 8) |
2594 2594 hmep->hme_ouraddr.ether_addr_octet[1]);
2595 2595
2596 2596 /*
2597 2597 * Set up multicast address filter by passing all multicast
2598 2598 * addresses through a crc generator, and then using the
2599 2599 * low order 6 bits as a index into the 64 bit logical
2600 2600 * address filter. The high order three bits select the word,
2601 2601 * while the rest of the bits select the bit within the word.
2602 2602 */
2603 2603 PUT_MACREG(hash0, hmep->hme_ladrf[0]);
2604 2604 PUT_MACREG(hash1, hmep->hme_ladrf[1]);
2605 2605 PUT_MACREG(hash2, hmep->hme_ladrf[2]);
2606 2606 PUT_MACREG(hash3, hmep->hme_ladrf[3]);
2607 2607
2608 2608 /*
2609 2609 * Configure parameters to support VLAN. (VLAN encapsulation adds
2610 2610 * four bytes.)
2611 2611 */
2612 2612 PUT_MACREG(txmax, ETHERMAX + ETHERFCSL + 4);
2613 2613 PUT_MACREG(rxmax, ETHERMAX + ETHERFCSL + 4);
2614 2614
2615 2615 /*
2616 2616 * Initialize HME Global registers, ETX registers and ERX registers.
2617 2617 */
2618 2618
2619 2619 PUT_ETXREG(txring, hmep->hme_tmd_paddr);
2620 2620 PUT_ERXREG(rxring, hmep->hme_rmd_paddr);
2621 2621
2622 2622 /*
2623 2623 * ERX registers can be written only if they have even no. of bits set.
2624 2624 * So, if the value written is not read back, set the lsb and write
2625 2625 * again.
2626 2626 * static int hme_erx_fix = 1; : Use the fix for erx bug
2627 2627 */
2628 2628 {
2629 2629 uint32_t temp;
2630 2630 temp = hmep->hme_rmd_paddr;
2631 2631
2632 2632 if (GET_ERXREG(rxring) != temp)
2633 2633 PUT_ERXREG(rxring, (temp | 4));
2634 2634 }
2635 2635
2636 2636 PUT_GLOBREG(config, (hmep->hme_config |
2637 2637 (hmep->hme_64bit_xfer << HMEG_CONFIG_64BIT_SHIFT)));
2638 2638
2639 2639 /*
2640 2640 * Significant performance improvements can be achieved by
2641 2641 * disabling transmit interrupt. Thus TMD's are reclaimed only
2642 2642 * when we run out of them in hmestart().
2643 2643 */
2644 2644 PUT_GLOBREG(intmask,
2645 2645 HMEG_MASK_INTR | HMEG_MASK_TINT | HMEG_MASK_TX_ALL);
2646 2646
2647 2647 PUT_ETXREG(txring_size, ((HME_TMDMAX -1)>> HMET_RINGSZ_SHIFT));
2648 2648 PUT_ETXREG(config, (GET_ETXREG(config) | HMET_CONFIG_TXDMA_EN
2649 2649 | HMET_CONFIG_TXFIFOTH));
2650 2650 /* get the rxring size bits */
2651 2651 switch (HME_RMDMAX) {
2652 2652 case 32:
2653 2653 i = HMER_CONFIG_RXRINGSZ32;
2654 2654 break;
2655 2655 case 64:
2656 2656 i = HMER_CONFIG_RXRINGSZ64;
2657 2657 break;
2658 2658 case 128:
2659 2659 i = HMER_CONFIG_RXRINGSZ128;
2660 2660 break;
2661 2661 case 256:
2662 2662 i = HMER_CONFIG_RXRINGSZ256;
2663 2663 break;
2664 2664 default:
2665 2665 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2666 2666 unk_rx_ringsz_msg);
2667 2667 goto init_fail;
2668 2668 }
2669 2669 i |= (HME_FSTBYTE_OFFSET << HMER_CONFIG_FBO_SHIFT)
2670 2670 | HMER_CONFIG_RXDMA_EN;
2671 2671
2672 2672 /* h/w checks start offset in half words */
2673 2673 i |= ((sizeof (struct ether_header) / 2) << HMER_RX_CSSTART_SHIFT);
2674 2674
2675 2675 PUT_ERXREG(config, i);
2676 2676
2677 2677 /*
2678 2678 * Bug related to the parity handling in ERX. When erxp-config is
2679 2679 * read back.
2680 2680 * Sbus/FEPS drives the parity bit. This value is used while
2681 2681 * writing again.
2682 2682 * This fixes the RECV problem in SS5.
2683 2683 * static int hme_erx_fix = 1; : Use the fix for erx bug
2684 2684 */
2685 2685 {
2686 2686 uint32_t temp;
2687 2687 temp = GET_ERXREG(config);
2688 2688 PUT_ERXREG(config, i);
2689 2689
2690 2690 if (GET_ERXREG(config) != i)
2691 2691 HME_FAULT_MSG4(hmep, SEVERITY_UNKNOWN, ERX_MSG,
2692 2692 "error:temp = %x erxp->config = %x, should be %x",
2693 2693 temp, GET_ERXREG(config), i);
2694 2694 }
2695 2695
2696 2696 /*
2697 2697 * Set up the rxconfig, txconfig and seed register without enabling
2698 2698 * them the former two at this time
2699 2699 *
2700 2700 * BigMAC strips the CRC bytes by default. Since this is
2701 2701 * contrary to other pieces of hardware, this bit needs to
2702 2702 * enabled to tell BigMAC not to strip the CRC bytes.
2703 2703 * Do not filter this node's own packets.
2704 2704 */
2705 2705
2706 2706 if (hme_reject_own) {
2707 2707 PUT_MACREG(rxcfg,
2708 2708 ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2709 2709 BMAC_RXCFG_MYOWN | BMAC_RXCFG_HASH));
2710 2710 } else {
2711 2711 PUT_MACREG(rxcfg,
2712 2712 ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2713 2713 BMAC_RXCFG_HASH));
2714 2714 }
2715 2715
2716 2716 drv_usecwait(10); /* wait after setting Hash Enable bit */
2717 2717
2718 2718 fdx = (mii_get_duplex(hmep->hme_mii) == LINK_DUPLEX_FULL);
2719 2719
2720 2720 if (hme_ngu_enable)
2721 2721 PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX : 0) |
2722 2722 BMAC_TXCFG_NGU);
2723 2723 else
2724 2724 PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX: 0));
2725 2725
2726 2726 i = 0;
2727 2727 if ((hmep->hme_lance_mode) && (hmep->hme_lance_mode_enable))
2728 2728 i = ((hmep->hme_ipg0 & HME_MASK_5BIT) << BMAC_XIFC_IPG0_SHIFT)
2729 2729 | BMAC_XIFC_LANCE_ENAB;
2730 2730 if (phyad == HME_INTERNAL_PHYAD)
2731 2731 PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB));
2732 2732 else
2733 2733 PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB | BMAC_XIFC_MIIBUFDIS));
2734 2734
2735 2735 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2736 2736 PUT_MACREG(txcfg, GET_MACREG(txcfg) | BMAC_TXCFG_ENAB);
2737 2737
2738 2738 hmep->hme_flags |= (HMERUNNING | HMEINITIALIZED);
2739 2739 /*
2740 2740 * Update the interrupt mask : this will re-allow interrupts to occur
2741 2741 */
2742 2742 PUT_GLOBREG(intmask, HMEG_MASK_INTR);
2743 2743 mac_tx_update(hmep->hme_mh);
2744 2744
2745 2745 init_fail:
2746 2746 /*
2747 2747 * Release the locks in reverse order
2748 2748 */
2749 2749 mutex_exit(&hmep->hme_xmitlock);
2750 2750 mutex_exit(&hmep->hme_intrlock);
2751 2751
2752 2752 ret = !(hmep->hme_flags & HMERUNNING);
2753 2753 if (ret) {
2754 2754 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2755 2755 init_fail_gen_msg);
2756 2756 }
2757 2757
2758 2758 /*
2759 2759 * Hardware checks.
2760 2760 */
2761 2761 CHECK_GLOBREG();
2762 2762 CHECK_MIFREG();
2763 2763 CHECK_MACREG();
2764 2764 CHECK_ERXREG();
2765 2765 CHECK_ETXREG();
2766 2766
2767 2767 init_exit:
2768 2768 return (ret);
2769 2769 }
2770 2770
2771 2771 /*
2772 2772 * Calculate the dvma burstsize by setting up a dvma temporarily. Return
2773 2773 * 0 as burstsize upon failure as it signifies no burst size.
2774 2774 * Requests for 64-bit transfer setup, if the platform supports it.
2775 2775 * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f),
2776 2776 * sun4u Ultra-2 incorrectly returns a 32bit transfer.
2777 2777 */
2778 2778 static int
2779 2779 hmeburstsizes(struct hme *hmep)
2780 2780 {
2781 2781 int burstsizes;
2782 2782 ddi_dma_handle_t handle;
2783 2783
2784 2784 if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2785 2785 DDI_DMA_DONTWAIT, NULL, &handle)) {
2786 2786 return (0);
2787 2787 }
2788 2788
2789 2789 hmep->hme_burstsizes = burstsizes = ddi_dma_burstsizes(handle);
2790 2790 ddi_dma_free_handle(&handle);
2791 2791
2792 2792 /*
2793 2793 * Use user-configurable parameter for enabling 64-bit transfers
2794 2794 */
2795 2795 burstsizes = (hmep->hme_burstsizes >> 16);
2796 2796 if (burstsizes)
2797 2797 hmep->hme_64bit_xfer = hme_64bit_enable; /* user config value */
2798 2798 else
2799 2799 burstsizes = hmep->hme_burstsizes;
2800 2800
2801 2801 if (hmep->hme_cheerio_mode)
2802 2802 hmep->hme_64bit_xfer = 0; /* Disable for cheerio */
2803 2803
2804 2804 if (burstsizes & 0x40)
2805 2805 hmep->hme_config = HMEG_CONFIG_BURST64;
2806 2806 else if (burstsizes & 0x20)
2807 2807 hmep->hme_config = HMEG_CONFIG_BURST32;
2808 2808 else
2809 2809 hmep->hme_config = HMEG_CONFIG_BURST16;
2810 2810
2811 2811 return (DDI_SUCCESS);
2812 2812 }
2813 2813
2814 2814 static int
2815 2815 hmeallocbuf(struct hme *hmep, hmebuf_t *buf, int dir)
2816 2816 {
2817 2817 ddi_dma_cookie_t dmac;
2818 2818 size_t len;
2819 2819 unsigned ccnt;
2820 2820
2821 2821 if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2822 2822 DDI_DMA_DONTWAIT, NULL, &buf->dmah) != DDI_SUCCESS) {
2823 2823 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2824 2824 "cannot allocate buf dma handle - failed");
2825 2825 return (DDI_FAILURE);
2826 2826 }
2827 2827
2828 2828 if (ddi_dma_mem_alloc(buf->dmah, ROUNDUP(HMEBUFSIZE, 512),
2829 2829 &hme_buf_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
2830 2830 &buf->kaddr, &len, &buf->acch) != DDI_SUCCESS) {
2831 2831 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2832 2832 "cannot allocate buf memory - failed");
2833 2833 return (DDI_FAILURE);
2834 2834 }
2835 2835
2836 2836 if (ddi_dma_addr_bind_handle(buf->dmah, NULL, buf->kaddr,
2837 2837 len, dir | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2838 2838 &dmac, &ccnt) != DDI_DMA_MAPPED) {
2839 2839 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2840 2840 "cannot map buf for dma - failed");
2841 2841 return (DDI_FAILURE);
2842 2842 }
2843 2843 buf->paddr = dmac.dmac_address;
2844 2844
2845 2845 /* apparently they don't handle multiple cookies */
2846 2846 if (ccnt > 1) {
2847 2847 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2848 2848 "too many buf dma cookies");
2849 2849 return (DDI_FAILURE);
2850 2850 }
2851 2851 return (DDI_SUCCESS);
2852 2852 }
2853 2853
2854 2854 static int
2855 2855 hmeallocbufs(struct hme *hmep)
2856 2856 {
2857 2857 hmep->hme_tbuf = kmem_zalloc(HME_TMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2858 2858 hmep->hme_rbuf = kmem_zalloc(HME_RMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2859 2859
2860 2860 /* Alloc RX buffers. */
2861 2861 for (int i = 0; i < HME_RMDMAX; i++) {
2862 2862 if (hmeallocbuf(hmep, &hmep->hme_rbuf[i], DDI_DMA_READ) !=
2863 2863 DDI_SUCCESS) {
2864 2864 return (DDI_FAILURE);
2865 2865 }
2866 2866 }
2867 2867
2868 2868 /* Alloc TX buffers. */
2869 2869 for (int i = 0; i < HME_TMDMAX; i++) {
2870 2870 if (hmeallocbuf(hmep, &hmep->hme_tbuf[i], DDI_DMA_WRITE) !=
2871 2871 DDI_SUCCESS) {
2872 2872 return (DDI_FAILURE);
2873 2873 }
2874 2874 }
2875 2875 return (DDI_SUCCESS);
2876 2876 }
2877 2877
2878 2878 static void
2879 2879 hmefreebufs(struct hme *hmep)
2880 2880 {
2881 2881 int i;
2882 2882
2883 2883 if (hmep->hme_rbuf == NULL)
2884 2884 return;
2885 2885
2886 2886 /*
2887 2887 * Free and unload pending xmit and recv buffers.
2888 2888 * Maintaining the 1-to-1 ordered sequence of
2889 2889 * We have written the routine to be idempotent.
2890 2890 */
2891 2891
2892 2892 for (i = 0; i < HME_TMDMAX; i++) {
2893 2893 hmebuf_t *tbuf = &hmep->hme_tbuf[i];
2894 2894 if (tbuf->paddr) {
2895 2895 (void) ddi_dma_unbind_handle(tbuf->dmah);
2896 2896 }
2897 2897 if (tbuf->kaddr) {
2898 2898 ddi_dma_mem_free(&tbuf->acch);
2899 2899 }
2900 2900 if (tbuf->dmah) {
2901 2901 ddi_dma_free_handle(&tbuf->dmah);
2902 2902 }
2903 2903 }
2904 2904 for (i = 0; i < HME_RMDMAX; i++) {
2905 2905 hmebuf_t *rbuf = &hmep->hme_rbuf[i];
2906 2906 if (rbuf->paddr) {
2907 2907 (void) ddi_dma_unbind_handle(rbuf->dmah);
2908 2908 }
2909 2909 if (rbuf->kaddr) {
2910 2910 ddi_dma_mem_free(&rbuf->acch);
2911 2911 }
2912 2912 if (rbuf->dmah) {
2913 2913 ddi_dma_free_handle(&rbuf->dmah);
2914 2914 }
2915 2915 }
2916 2916 kmem_free(hmep->hme_rbuf, HME_RMDMAX * sizeof (hmebuf_t));
2917 2917 kmem_free(hmep->hme_tbuf, HME_TMDMAX * sizeof (hmebuf_t));
2918 2918 }
2919 2919
2920 2920 /*
2921 2921 * Un-initialize (STOP) HME channel.
2922 2922 */
2923 2923 static void
2924 2924 hmeuninit(struct hme *hmep)
2925 2925 {
2926 2926 /*
2927 2927 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete.
2928 2928 */
2929 2929 HMEDELAY((hmep->hme_txindex == hmep->hme_txreclaim), HMEDRAINTIME);
2930 2930
2931 2931 mutex_enter(&hmep->hme_intrlock);
2932 2932 mutex_enter(&hmep->hme_xmitlock);
2933 2933
2934 2934 hmep->hme_flags &= ~HMERUNNING;
2935 2935
2936 2936 (void) hmestop(hmep);
2937 2937
2938 2938 mutex_exit(&hmep->hme_xmitlock);
2939 2939 mutex_exit(&hmep->hme_intrlock);
2940 2940 }
2941 2941
2942 2942 /*
2943 2943 * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2944 2944 * map it in IO space. Allocate space for transmit and receive ddi_dma_handle
2945 2945 * structures to use the DMA interface.
2946 2946 */
2947 2947 static int
2948 2948 hmeallocthings(struct hme *hmep)
2949 2949 {
2950 2950 int size;
2951 2951 int rval;
2952 2952 size_t real_len;
2953 2953 uint_t cookiec;
2954 2954 ddi_dma_cookie_t dmac;
2955 2955 dev_info_t *dip = hmep->dip;
2956 2956
2957 2957 /*
2958 2958 * Allocate the TMD and RMD descriptors and extra for page alignment.
2959 2959 */
2960 2960
2961 2961 rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2962 2962 &hmep->hme_rmd_dmah);
2963 2963 if (rval != DDI_SUCCESS) {
2964 2964 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2965 2965 "cannot allocate rmd handle - failed");
2966 2966 return (DDI_FAILURE);
2967 2967 }
2968 2968 size = HME_RMDMAX * sizeof (struct hme_rmd);
2969 2969 rval = ddi_dma_mem_alloc(hmep->hme_rmd_dmah, size,
2970 2970 &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2971 2971 &hmep->hme_rmd_kaddr, &real_len, &hmep->hme_rmd_acch);
2972 2972 if (rval != DDI_SUCCESS) {
2973 2973 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2974 2974 "cannot allocate rmd dma mem - failed");
2975 2975 return (DDI_FAILURE);
2976 2976 }
2977 2977 hmep->hme_rmdp = (void *)(hmep->hme_rmd_kaddr);
2978 2978 rval = ddi_dma_addr_bind_handle(hmep->hme_rmd_dmah, NULL,
2979 2979 hmep->hme_rmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2980 2980 DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
2981 2981 if (rval != DDI_DMA_MAPPED) {
2982 2982 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2983 2983 "cannot allocate rmd dma - failed");
2984 2984 return (DDI_FAILURE);
2985 2985 }
2986 2986 hmep->hme_rmd_paddr = dmac.dmac_address;
2987 2987 if (cookiec != 1) {
2988 2988 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2989 2989 "too many rmd cookies - failed");
2990 2990 return (DDI_FAILURE);
2991 2991 }
2992 2992
2993 2993 rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2994 2994 &hmep->hme_tmd_dmah);
2995 2995 if (rval != DDI_SUCCESS) {
2996 2996 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2997 2997 "cannot allocate tmd handle - failed");
2998 2998 return (DDI_FAILURE);
2999 2999 }
3000 3000 size = HME_TMDMAX * sizeof (struct hme_rmd);
3001 3001 rval = ddi_dma_mem_alloc(hmep->hme_tmd_dmah, size,
3002 3002 &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
3003 3003 &hmep->hme_tmd_kaddr, &real_len, &hmep->hme_tmd_acch);
3004 3004 if (rval != DDI_SUCCESS) {
3005 3005 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3006 3006 "cannot allocate tmd dma mem - failed");
3007 3007 return (DDI_FAILURE);
3008 3008 }
3009 3009 hmep->hme_tmdp = (void *)(hmep->hme_tmd_kaddr);
3010 3010 rval = ddi_dma_addr_bind_handle(hmep->hme_tmd_dmah, NULL,
3011 3011 hmep->hme_tmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3012 3012 DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
3013 3013 if (rval != DDI_DMA_MAPPED) {
3014 3014 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3015 3015 "cannot allocate tmd dma - failed");
3016 3016 return (DDI_FAILURE);
3017 3017 }
3018 3018 hmep->hme_tmd_paddr = dmac.dmac_address;
3019 3019 if (cookiec != 1) {
3020 3020 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3021 3021 "too many tmd cookies - failed");
3022 3022 return (DDI_FAILURE);
3023 3023 }
3024 3024
3025 3025 return (DDI_SUCCESS);
3026 3026 }
3027 3027
3028 3028 static void
3029 3029 hmefreethings(struct hme *hmep)
3030 3030 {
3031 3031 if (hmep->hme_rmd_paddr) {
3032 3032 (void) ddi_dma_unbind_handle(hmep->hme_rmd_dmah);
3033 3033 hmep->hme_rmd_paddr = 0;
3034 3034 }
3035 3035 if (hmep->hme_rmd_acch)
3036 3036 ddi_dma_mem_free(&hmep->hme_rmd_acch);
3037 3037 if (hmep->hme_rmd_dmah)
3038 3038 ddi_dma_free_handle(&hmep->hme_rmd_dmah);
3039 3039
3040 3040 if (hmep->hme_tmd_paddr) {
3041 3041 (void) ddi_dma_unbind_handle(hmep->hme_tmd_dmah);
3042 3042 hmep->hme_tmd_paddr = 0;
3043 3043 }
3044 3044 if (hmep->hme_tmd_acch)
3045 3045 ddi_dma_mem_free(&hmep->hme_tmd_acch);
3046 3046 if (hmep->hme_tmd_dmah)
3047 3047 ddi_dma_free_handle(&hmep->hme_tmd_dmah);
3048 3048 }
3049 3049
3050 3050 /*
3051 3051 * First check to see if it our device interrupting.
3052 3052 */
3053 3053 static uint_t
3054 3054 hmeintr(caddr_t arg)
3055 3055 {
3056 3056 struct hme *hmep = (void *)arg;
3057 3057 uint32_t hmesbits;
3058 3058 uint32_t serviced = DDI_INTR_UNCLAIMED;
3059 3059 uint32_t num_reads = 0;
3060 3060 uint32_t rflags;
3061 3061 mblk_t *mp, *head, **tail;
3062 3062
3063 3063
3064 3064 head = NULL;
3065 3065 tail = &head;
3066 3066
3067 3067 mutex_enter(&hmep->hme_intrlock);
3068 3068
3069 3069 /*
3070 3070 * The status register auto-clears on read except for
3071 3071 * MIF Interrupt bit
3072 3072 */
3073 3073 hmesbits = GET_GLOBREG(status);
3074 3074 CHECK_GLOBREG();
3075 3075
3076 3076 /*
3077 3077 * Note: TINT is sometimes enabled in thr hmereclaim()
3078 3078 */
3079 3079
3080 3080 /*
3081 3081 * Bugid 1227832 - to handle spurious interrupts on fusion systems.
3082 3082 * Claim the first interrupt after initialization
3083 3083 */
3084 3084 if (hmep->hme_flags & HMEINITIALIZED) {
3085 3085 hmep->hme_flags &= ~HMEINITIALIZED;
3086 3086 serviced = DDI_INTR_CLAIMED;
3087 3087 }
3088 3088
3089 3089 if ((hmesbits & (HMEG_STATUS_INTR | HMEG_STATUS_TINT)) == 0) {
3090 3090 /* No interesting interrupt */
3091 3091 if (hmep->hme_intrstats) {
3092 3092 if (serviced == DDI_INTR_UNCLAIMED)
3093 3093 KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
3094 3094 else
3095 3095 KIOIP->intrs[KSTAT_INTR_HARD]++;
3096 3096 }
3097 3097 mutex_exit(&hmep->hme_intrlock);
3098 3098 return (serviced);
3099 3099 }
3100 3100
3101 3101 serviced = DDI_INTR_CLAIMED;
3102 3102
3103 3103 if (!(hmep->hme_flags & HMERUNNING)) {
3104 3104 if (hmep->hme_intrstats)
3105 3105 KIOIP->intrs[KSTAT_INTR_HARD]++;
3106 3106 mutex_exit(&hmep->hme_intrlock);
3107 3107 hmeuninit(hmep);
3108 3108 return (serviced);
3109 3109 }
3110 3110
3111 3111 if (hmesbits & (HMEG_STATUS_FATAL_ERR | HMEG_STATUS_NONFATAL_ERR)) {
3112 3112 if (hmesbits & HMEG_STATUS_FATAL_ERR) {
3113 3113
3114 3114 if (hmep->hme_intrstats)
3115 3115 KIOIP->intrs[KSTAT_INTR_HARD]++;
3116 3116 hme_fatal_err(hmep, hmesbits);
3117 3117
3118 3118 mutex_exit(&hmep->hme_intrlock);
3119 3119 (void) hmeinit(hmep);
3120 3120 return (serviced);
3121 3121 }
3122 3122 hme_nonfatal_err(hmep, hmesbits);
3123 3123 }
3124 3124
3125 3125 if (hmesbits & (HMEG_STATUS_TX_ALL | HMEG_STATUS_TINT)) {
3126 3126 mutex_enter(&hmep->hme_xmitlock);
3127 3127
3128 3128 hmereclaim(hmep);
3129 3129 mutex_exit(&hmep->hme_xmitlock);
3130 3130 }
3131 3131
3132 3132 if (hmesbits & HMEG_STATUS_RINT) {
3133 3133
3134 3134 /*
3135 3135 * This dummy PIO is required to flush the SBus
3136 3136 * Bridge buffers in QFE.
3137 3137 */
3138 3138 (void) GET_GLOBREG(config);
3139 3139
3140 3140 /*
3141 3141 * Loop through each RMD no more than once.
3142 3142 */
3143 3143 while (num_reads++ < HME_RMDMAX) {
3144 3144 hmebuf_t *rbuf;
3145 3145 int rxptr;
3146 3146
3147 3147 rxptr = hmep->hme_rxindex % HME_RMDMAX;
3148 3148 HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORKERNEL);
3149 3149
3150 3150 rflags = GET_RMD_FLAGS(rxptr);
3151 3151 if (rflags & HMERMD_OWN) {
3152 3152 /*
3153 3153 * Chip still owns it. We're done.
3154 3154 */
3155 3155 break;
3156 3156 }
3157 3157
3158 3158 /*
3159 3159 * Retrieve the packet.
3160 3160 */
3161 3161 rbuf = &hmep->hme_rbuf[rxptr];
3162 3162 mp = hmeread(hmep, rbuf, rflags);
3163 3163
3164 3164 /*
3165 3165 * Return ownership of the RMD.
3166 3166 */
3167 3167 PUT_RMD(rxptr, rbuf->paddr);
3168 3168 HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORDEV);
3169 3169
3170 3170 if (mp != NULL) {
3171 3171 *tail = mp;
3172 3172 tail = &mp->b_next;
3173 3173 }
3174 3174
3175 3175 /*
3176 3176 * Advance to the next RMD.
3177 3177 */
3178 3178 hmep->hme_rxindex++;
3179 3179 }
3180 3180 }
3181 3181
3182 3182 if (hmep->hme_intrstats)
3183 3183 KIOIP->intrs[KSTAT_INTR_HARD]++;
3184 3184
3185 3185 mutex_exit(&hmep->hme_intrlock);
3186 3186
3187 3187 if (head != NULL)
3188 3188 mac_rx(hmep->hme_mh, NULL, head);
3189 3189
3190 3190 return (serviced);
3191 3191 }
3192 3192
3193 3193 /*
3194 3194 * Transmit completion reclaiming.
3195 3195 */
3196 3196 static void
3197 3197 hmereclaim(struct hme *hmep)
3198 3198 {
3199 3199 boolean_t reclaimed = B_FALSE;
3200 3200
3201 3201 /*
3202 3202 * Loop through each TMD.
3203 3203 */
3204 3204 while (hmep->hme_txindex > hmep->hme_txreclaim) {
3205 3205
3206 3206 int reclaim;
3207 3207 uint32_t flags;
3208 3208
3209 3209 reclaim = hmep->hme_txreclaim % HME_TMDMAX;
3210 3210 HMESYNCTMD(reclaim, DDI_DMA_SYNC_FORKERNEL);
3211 3211
3212 3212 flags = GET_TMD_FLAGS(reclaim);
3213 3213 if (flags & HMETMD_OWN) {
3214 3214 /*
3215 3215 * Chip still owns it. We're done.
3216 3216 */
3217 3217 break;
3218 3218 }
3219 3219
3220 3220 /*
3221 3221 * Count a chained packet only once.
3222 3222 */
3223 3223 if (flags & HMETMD_SOP) {
3224 3224 hmep->hme_opackets++;
3225 3225 }
3226 3226
3227 3227 /*
3228 3228 * MIB II
3229 3229 */
3230 3230 hmep->hme_obytes += flags & HMETMD_BUFSIZE;
3231 3231
3232 3232 reclaimed = B_TRUE;
3233 3233 hmep->hme_txreclaim++;
3234 3234 }
3235 3235
3236 3236 if (reclaimed) {
3237 3237 /*
3238 3238 * we could reclaim some TMDs so turn off interrupts
3239 3239 */
3240 3240 if (hmep->hme_wantw) {
3241 3241 PUT_GLOBREG(intmask,
3242 3242 HMEG_MASK_INTR | HMEG_MASK_TINT |
3243 3243 HMEG_MASK_TX_ALL);
3244 3244 hmep->hme_wantw = B_FALSE;
3245 3245 mac_tx_update(hmep->hme_mh);
3246 3246 }
3247 3247 } else {
3248 3248 /*
3249 3249 * enable TINTS: so that even if there is no further activity
3250 3250 * hmereclaim will get called
3251 3251 */
3252 3252 if (hmep->hme_wantw)
3253 3253 PUT_GLOBREG(intmask,
3254 3254 GET_GLOBREG(intmask) & ~HMEG_MASK_TX_ALL);
3255 3255 }
3256 3256 CHECK_GLOBREG();
3257 3257 }
3258 3258
3259 3259 /*
3260 3260 * Handle interrupts for fatal errors
3261 3261 * Need reinitialization of the ENET channel.
3262 3262 */
3263 3263 static void
3264 3264 hme_fatal_err(struct hme *hmep, uint_t hmesbits)
3265 3265 {
3266 3266
3267 3267 if (hmesbits & HMEG_STATUS_SLV_PAR_ERR) {
3268 3268 hmep->hme_slvparerr++;
3269 3269 }
3270 3270
3271 3271 if (hmesbits & HMEG_STATUS_SLV_ERR_ACK) {
3272 3272 hmep->hme_slverrack++;
3273 3273 }
3274 3274
3275 3275 if (hmesbits & HMEG_STATUS_TX_TAG_ERR) {
3276 3276 hmep->hme_txtagerr++;
3277 3277 hmep->hme_oerrors++;
3278 3278 }
3279 3279
3280 3280 if (hmesbits & HMEG_STATUS_TX_PAR_ERR) {
3281 3281 hmep->hme_txparerr++;
3282 3282 hmep->hme_oerrors++;
3283 3283 }
3284 3284
3285 3285 if (hmesbits & HMEG_STATUS_TX_LATE_ERR) {
3286 3286 hmep->hme_txlaterr++;
3287 3287 hmep->hme_oerrors++;
3288 3288 }
3289 3289
3290 3290 if (hmesbits & HMEG_STATUS_TX_ERR_ACK) {
3291 3291 hmep->hme_txerrack++;
3292 3292 hmep->hme_oerrors++;
3293 3293 }
3294 3294
3295 3295 if (hmesbits & HMEG_STATUS_EOP_ERR) {
3296 3296 hmep->hme_eoperr++;
3297 3297 }
3298 3298
3299 3299 if (hmesbits & HMEG_STATUS_RX_TAG_ERR) {
3300 3300 hmep->hme_rxtagerr++;
3301 3301 hmep->hme_ierrors++;
3302 3302 }
3303 3303
3304 3304 if (hmesbits & HMEG_STATUS_RX_PAR_ERR) {
3305 3305 hmep->hme_rxparerr++;
3306 3306 hmep->hme_ierrors++;
3307 3307 }
3308 3308
3309 3309 if (hmesbits & HMEG_STATUS_RX_LATE_ERR) {
3310 3310 hmep->hme_rxlaterr++;
3311 3311 hmep->hme_ierrors++;
3312 3312 }
3313 3313
3314 3314 if (hmesbits & HMEG_STATUS_RX_ERR_ACK) {
3315 3315 hmep->hme_rxerrack++;
3316 3316 hmep->hme_ierrors++;
3317 3317 }
3318 3318 }
3319 3319
3320 3320 /*
3321 3321 * Handle interrupts regarding non-fatal errors.
3322 3322 */
3323 3323 static void
3324 3324 hme_nonfatal_err(struct hme *hmep, uint_t hmesbits)
3325 3325 {
3326 3326
3327 3327 if (hmesbits & HMEG_STATUS_RX_DROP) {
3328 3328 hmep->hme_missed++;
3329 3329 hmep->hme_ierrors++;
3330 3330 }
3331 3331
3332 3332 if (hmesbits & HMEG_STATUS_DEFTIMR_EXP) {
3333 3333 hmep->hme_defer_xmts++;
3334 3334 }
3335 3335
3336 3336 if (hmesbits & HMEG_STATUS_FSTCOLC_EXP) {
3337 3337 hmep->hme_fstcol += 256;
3338 3338 }
3339 3339
3340 3340 if (hmesbits & HMEG_STATUS_LATCOLC_EXP) {
3341 3341 hmep->hme_tlcol += 256;
3342 3342 hmep->hme_oerrors += 256;
3343 3343 }
3344 3344
3345 3345 if (hmesbits & HMEG_STATUS_EXCOLC_EXP) {
3346 3346 hmep->hme_excol += 256;
3347 3347 hmep->hme_oerrors += 256;
3348 3348 }
3349 3349
3350 3350 if (hmesbits & HMEG_STATUS_NRMCOLC_EXP) {
3351 3351 hmep->hme_coll += 256;
3352 3352 }
3353 3353
3354 3354 if (hmesbits & HMEG_STATUS_MXPKTSZ_ERR) {
3355 3355 hmep->hme_babl++;
3356 3356 hmep->hme_oerrors++;
3357 3357 }
3358 3358
3359 3359 /*
3360 3360 * This error is fatal and the board needs to
3361 3361 * be reinitialized. Comments?
3362 3362 */
3363 3363 if (hmesbits & HMEG_STATUS_TXFIFO_UNDR) {
3364 3364 hmep->hme_uflo++;
3365 3365 hmep->hme_oerrors++;
3366 3366 }
3367 3367
3368 3368 if (hmesbits & HMEG_STATUS_SQE_TST_ERR) {
3369 3369 hmep->hme_sqe_errors++;
3370 3370 }
3371 3371
3372 3372 if (hmesbits & HMEG_STATUS_RCV_CNT_EXP) {
3373 3373 if (hmep->hme_rxcv_enable) {
3374 3374 hmep->hme_cvc += 256;
3375 3375 }
3376 3376 }
3377 3377
3378 3378 if (hmesbits & HMEG_STATUS_RXFIFO_OVFL) {
3379 3379 hmep->hme_oflo++;
3380 3380 hmep->hme_ierrors++;
3381 3381 }
3382 3382
3383 3383 if (hmesbits & HMEG_STATUS_LEN_CNT_EXP) {
3384 3384 hmep->hme_lenerr += 256;
3385 3385 hmep->hme_ierrors += 256;
3386 3386 }
3387 3387
3388 3388 if (hmesbits & HMEG_STATUS_ALN_CNT_EXP) {
3389 3389 hmep->hme_align_errors += 256;
3390 3390 hmep->hme_ierrors += 256;
3391 3391 }
3392 3392
3393 3393 if (hmesbits & HMEG_STATUS_CRC_CNT_EXP) {
3394 3394 hmep->hme_fcs_errors += 256;
3395 3395 hmep->hme_ierrors += 256;
3396 3396 }
3397 3397 }
3398 3398
3399 3399 static mblk_t *
3400 3400 hmeread(struct hme *hmep, hmebuf_t *rbuf, uint32_t rflags)
3401 3401 {
3402 3402 mblk_t *bp;
3403 3403 uint32_t len;
3404 3404 t_uscalar_t type;
3405 3405
3406 3406 len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT;
3407 3407
3408 3408 /*
3409 3409 * Check for short packet
3410 3410 * and check for overflow packet also. The processing is the
3411 3411 * same for both the cases - reuse the buffer. Update the Buffer
3412 3412 * overflow counter.
3413 3413 */
3414 3414 if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) ||
3415 3415 (len > (ETHERMAX + 4))) {
3416 3416 if (len < ETHERMIN)
3417 3417 hmep->hme_runt++;
3418 3418
3419 3419 else {
3420 3420 hmep->hme_buff++;
3421 3421 hmep->hme_toolong_errors++;
3422 3422 }
3423 3423 hmep->hme_ierrors++;
3424 3424 return (NULL);
3425 3425 }
3426 3426
3427 3427 /*
3428 3428 * Sync the received buffer before looking at it.
3429 3429 */
3430 3430
3431 3431 (void) ddi_dma_sync(rbuf->dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
3432 3432
3433 3433 /*
3434 3434 * copy the packet data and then recycle the descriptor.
3435 3435 */
3436 3436
3437 3437 if ((bp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_HI)) == NULL) {
3438 3438
3439 3439 hmep->hme_allocbfail++;
3440 3440 hmep->hme_norcvbuf++;
3441 3441
3442 3442 return (NULL);
3443 3443 }
3444 3444
3445 3445 bcopy(rbuf->kaddr, bp->b_rptr, len + HME_FSTBYTE_OFFSET);
3446 3446
3447 3447 hmep->hme_ipackets++;
3448 3448
3449 3449 /* Add the First Byte offset to the b_rptr and copy */
3450 3450 bp->b_rptr += HME_FSTBYTE_OFFSET;
3451 3451 bp->b_wptr = bp->b_rptr + len;
3452 3452
3453 3453 /*
3454 3454 * update MIB II statistics
3455 3455 */
3456 3456 BUMP_InNUcast(hmep, bp->b_rptr);
3457 3457 hmep->hme_rbytes += len;
3458 3458
3459 3459 type = get_ether_type(bp->b_rptr);
3460 3460
3461 3461 /*
3462 3462 * TCP partial checksum in hardware
3463 3463 */
3464 3464 if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {
3465 3465 uint16_t cksum = ~rflags & HMERMD_CKSUM;
3466 3466 uint_t end = len - sizeof (struct ether_header);
3467 3467 mac_hcksum_set(bp, 0, 0, end, htons(cksum), HCK_PARTIALCKSUM);
3468 3468 }
3469 3469
3470 3470 return (bp);
3471 3471 }
3472 3472
3473 3473 /*VARARGS*/
3474 3474 static void
3475 3475 hme_fault_msg(struct hme *hmep, uint_t severity, msg_t type, char *fmt, ...)
3476 3476 {
3477 3477 char msg_buffer[255];
3478 3478 va_list ap;
3479 3479
3480 3480 va_start(ap, fmt);
3481 3481 (void) vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap);
3482 3482
3483 3483 if (hmep == NULL) {
3484 3484 cmn_err(CE_NOTE, "hme : %s", msg_buffer);
3485 3485
3486 3486 } else if (type == DISPLAY_MSG) {
3487 3487 cmn_err(CE_CONT, "?%s%d : %s\n", ddi_driver_name(hmep->dip),
3488 3488 hmep->instance, msg_buffer);
3489 3489 } else if (severity == SEVERITY_HIGH) {
3490 3490 cmn_err(CE_WARN, "%s%d : %s, SEVERITY_HIGH, %s\n",
3491 3491 ddi_driver_name(hmep->dip), hmep->instance,
3492 3492 msg_buffer, msg_string[type]);
3493 3493 } else {
3494 3494 cmn_err(CE_CONT, "%s%d : %s\n", ddi_driver_name(hmep->dip),
3495 3495 hmep->instance, msg_buffer);
3496 3496 }
3497 3497 va_end(ap);
3498 3498 }
3499 3499
3500 3500 /*
3501 3501 * if this is the first init do not bother to save the
3502 3502 * counters. They should be 0, but do not count on it.
3503 3503 */
3504 3504 static void
3505 3505 hmesavecntrs(struct hme *hmep)
3506 3506 {
3507 3507 uint32_t fecnt, aecnt, lecnt, rxcv;
3508 3508 uint32_t ltcnt, excnt;
3509 3509
3510 3510 /* XXX What all gets added in ierrors and oerrors? */
3511 3511 fecnt = GET_MACREG(fecnt);
3512 3512 PUT_MACREG(fecnt, 0);
3513 3513
3514 3514 aecnt = GET_MACREG(aecnt);
3515 3515 hmep->hme_align_errors += aecnt;
3516 3516 PUT_MACREG(aecnt, 0);
3517 3517
3518 3518 lecnt = GET_MACREG(lecnt);
3519 3519 hmep->hme_lenerr += lecnt;
3520 3520 PUT_MACREG(lecnt, 0);
3521 3521
3522 3522 rxcv = GET_MACREG(rxcv);
3523 3523 #ifdef HME_CODEVIOL_BUG
3524 3524 /*
3525 3525 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier
3526 3526 */
3527 3527 if (!hmep->hme_rxcv_enable) {
3528 3528 rxcv = 0;
3529 3529 }
3530 3530 #endif
3531 3531 hmep->hme_cvc += rxcv;
3532 3532 PUT_MACREG(rxcv, 0);
3533 3533
3534 3534 ltcnt = GET_MACREG(ltcnt);
3535 3535 hmep->hme_tlcol += ltcnt;
3536 3536 PUT_MACREG(ltcnt, 0);
3537 3537
3538 3538 excnt = GET_MACREG(excnt);
3539 3539 hmep->hme_excol += excnt;
3540 3540 PUT_MACREG(excnt, 0);
3541 3541
3542 3542 hmep->hme_fcs_errors += fecnt;
3543 3543 hmep->hme_ierrors += (fecnt + aecnt + lecnt);
3544 3544 hmep->hme_oerrors += (ltcnt + excnt);
3545 3545 hmep->hme_coll += (GET_MACREG(nccnt) + ltcnt);
3546 3546
3547 3547 PUT_MACREG(nccnt, 0);
3548 3548 CHECK_MACREG();
3549 3549 }
3550 3550
3551 3551 /*
3552 3552 * To set up the mac address for the network interface:
3553 3553 * The adapter card may support a local mac address which is published
3554 3554 * in a device node property "local-mac-address". This mac address is
3555 3555 * treated as the factory-installed mac address for DLPI interface.
3556 3556 * If the adapter firmware has used the device for diskless boot
3557 3557 * operation it publishes a property called "mac-address" for use by
3558 3558 * inetboot and the device driver.
3559 3559 * If "mac-address" is not found, the system options property
3560 3560 * "local-mac-address" is used to select the mac-address. If this option
3561 3561 * is set to "true", and "local-mac-address" has been found, then
3562 3562 * local-mac-address is used; otherwise the system mac address is used
3563 3563 * by calling the "localetheraddr()" function.
3564 3564 */
3565 3565 static void
3566 3566 hme_setup_mac_address(struct hme *hmep, dev_info_t *dip)
3567 3567 {
3568 3568 char *prop;
3569 3569 int prop_len = sizeof (int);
3570 3570
3571 3571 hmep->hme_addrflags = 0;
3572 3572
3573 3573 /*
3574 3574 * Check if it is an adapter with its own local mac address
3575 3575 * If it is present, save it as the "factory-address"
3576 3576 * for this adapter.
3577 3577 */
3578 3578 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3579 3579 "local-mac-address",
3580 3580 (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3581 3581 if (prop_len == ETHERADDRL) {
3582 3582 hmep->hme_addrflags = HME_FACTADDR_PRESENT;
3583 3583 ether_bcopy(prop, &hmep->hme_factaddr);
3584 3584 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
3585 3585 "Local Ethernet address = %s",
3586 3586 ether_sprintf(&hmep->hme_factaddr));
3587 3587 }
3588 3588 kmem_free(prop, prop_len);
3589 3589 }
3590 3590
3591 3591 /*
3592 3592 * Check if the adapter has published "mac-address" property.
3593 3593 * If it is present, use it as the mac address for this device.
3594 3594 */
3595 3595 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3596 3596 "mac-address", (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3597 3597 if (prop_len >= ETHERADDRL) {
3598 3598 ether_bcopy(prop, &hmep->hme_ouraddr);
3599 3599 kmem_free(prop, prop_len);
3600 3600 return;
3601 3601 }
3602 3602 kmem_free(prop, prop_len);
3603 3603 }
3604 3604
3605 3605 #ifdef __sparc
3606 3606 /*
3607 3607 * On sparc, we might be able to use the mac address from the
3608 3608 * system. However, on all other systems, we need to use the
3609 3609 * address from the PROM.
3610 3610 */
3611 3611 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
3612 3612 (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3613 3613 if ((strncmp("true", prop, prop_len) == 0) &&
3614 3614 (hmep->hme_addrflags & HME_FACTADDR_PRESENT)) {
3615 3615 hmep->hme_addrflags |= HME_FACTADDR_USE;
3616 3616 ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3617 3617 kmem_free(prop, prop_len);
3618 3618 HME_FAULT_MSG1(hmep, SEVERITY_NONE, DISPLAY_MSG,
3619 3619 "Using local MAC address");
3620 3620 return;
3621 3621 }
3622 3622 kmem_free(prop, prop_len);
3623 3623 }
3624 3624
3625 3625 /*
3626 3626 * Get the system ethernet address.
3627 3627 */
3628 3628 (void) localetheraddr((struct ether_addr *)NULL, &hmep->hme_ouraddr);
3629 3629 #else
3630 3630 ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3631 3631 #endif
3632 3632 }
3633 3633
3634 3634 /* ARGSUSED */
3635 3635 static void
3636 3636 hme_check_acc_handle(char *file, uint_t line, struct hme *hmep,
3637 3637 ddi_acc_handle_t handle)
3638 3638 {
3639 3639 }
↓ open down ↓ |
2925 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX