Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/amd8111s/amd8111s_main.c
+++ new/usr/src/uts/intel/io/amd8111s/amd8111s_main.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2001-2006 Advanced Micro Devices, Inc. All rights reserved.
8 8 *
9 9 * Redistribution and use in source and binary forms, with or without
10 10 * modification, are permitted provided that the following conditions are met:
11 11 *
12 12 * + Redistributions of source code must retain the above copyright notice,
13 13 * + this list of conditions and the following disclaimer.
14 14 *
15 15 * + Redistributions in binary form must reproduce the above copyright
16 16 * + notice, this list of conditions and the following disclaimer in the
17 17 * + documentation and/or other materials provided with the distribution.
18 18 *
19 19 * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
20 20 * + contributors may be used to endorse or promote products derived from
21 21 * + this software without specific prior written permission.
22 22 *
23 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24 24 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25 25 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 27 * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
28 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
30 30 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34 34 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 35 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 36 *
37 37 * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
38 38 * Compliance with Applicable Laws. Notice is hereby given that
39 39 * the software may be subject to restrictions on use, release,
40 40 * transfer, importation, exportation and/or re-exportation under
41 41 * the laws and regulations of the United States or other
42 42 * countries ("Applicable Laws"), which include but are not
43 43 * limited to U.S. export control laws such as the Export
44 44 * Administration Regulations and national security controls as
45 45 * defined thereunder, as well as State Department controls under
46 46 * the U.S. Munitions List. Permission to use and/or
47 47 * redistribute the software is conditioned upon compliance with
48 48 * all Applicable Laws, including U.S. export control laws
49 49 * regarding specifically designated persons, countries and
50 50 * nationals of countries subject to national security controls.
51 51 */
52 52
53 53 /* include files */
54 54 #include <sys/disp.h>
55 55 #include <sys/atomic.h>
56 56 #include <sys/vlan.h>
57 57 #include "amd8111s_main.h"
58 58
59 59 /* Global macro Definations */
60 60 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
61 61 #define INTERFACE_NAME "amd8111s"
62 62 #define AMD8111S_SPLIT 128
63 63 #define AMD8111S_SEND_MAX 64
64 64
65 65 static char ident[] = "AMD8111 10/100M Ethernet";
66 66
67 67 /*
68 68 * Driver Entry Points
69 69 */
70 70 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
71 71 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
72 72
73 73 /*
74 74 * GLD Entry points prototype
75 75 */
76 76 static int amd8111s_m_unicst(void *, const uint8_t *);
77 77 static int amd8111s_m_promisc(void *, boolean_t);
78 78 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
79 79 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
80 80 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
81 81 static int amd8111s_m_start(void *);
82 82 static void amd8111s_m_stop(void *);
83 83 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
84 84 static uint_t amd8111s_intr(caddr_t);
85 85
86 86 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
87 87
88 88 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
89 89 static int amd8111s_odlInit(struct LayerPointers *);
90 90 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
91 91 static void amd8111s_free_descriptors(struct LayerPointers *);
92 92 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
93 93 struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
94 94 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
95 95
96 96
97 97 static void amd8111s_log(struct LayerPointers *adapter, int level,
98 98 char *fmt, ...);
99 99
100 100 static struct cb_ops amd8111s_cb_ops = {
101 101 nulldev,
102 102 nulldev,
103 103 nodev,
104 104 nodev,
105 105 nodev,
106 106 nodev,
107 107 nodev,
108 108 nodev,
109 109 nodev,
110 110 nodev,
111 111 nodev,
112 112 nochpoll,
113 113 ddi_prop_op,
114 114 NULL,
115 115 D_NEW | D_MP,
116 116 CB_REV, /* cb_rev */
117 117 nodev, /* cb_aread */
118 118 nodev /* cb_awrite */
119 119 };
120 120
121 121 static struct dev_ops amd8111s_dev_ops = {
122 122 DEVO_REV, /* devo_rev */
123 123 0, /* devo_refcnt */
124 124 NULL, /* devo_getinfo */
125 125 nulldev, /* devo_identify */
126 126 nulldev, /* devo_probe */
127 127 amd8111s_attach, /* devo_attach */
128 128 amd8111s_detach, /* devo_detach */
129 129 nodev, /* devo_reset */
130 130 &amd8111s_cb_ops, /* devo_cb_ops */
131 131 NULL, /* devo_bus_ops */
132 132 nodev, /* devo_power */
↓ open down ↓ |
132 lines elided |
↑ open up ↑ |
133 133 ddi_quiesce_not_supported, /* devo_quiesce */
134 134 };
135 135
136 136 struct modldrv amd8111s_modldrv = {
137 137 &mod_driverops, /* Type of module. This one is a driver */
138 138 ident, /* short description */
139 139 &amd8111s_dev_ops /* driver specific ops */
140 140 };
141 141
142 142 struct modlinkage amd8111s_modlinkage = {
143 - MODREV_1, (void *)&amd8111s_modldrv, NULL
143 + MODREV_1, { (void *)&amd8111s_modldrv, NULL }
144 144 };
145 145
146 146 /*
147 147 * Global Variables
148 148 */
149 149 struct LayerPointers *amd8111sadapter;
150 150
151 151 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
152 152 DMA_ATTR_V0, /* dma_attr_version */
153 153 (uint64_t)0, /* dma_attr_addr_lo */
154 154 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */
155 155 (uint64_t)0xFFFFFFFF, /* dma_attr_count_max */
156 156 (uint64_t)1, /* dma_attr_align */
157 157 (uint_t)0x7F, /* dma_attr_burstsizes */
158 158 (uint32_t)1, /* dma_attr_minxfer */
159 159 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */
160 160 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */
161 161 (int)1, /* dma_attr_sgllen */
162 162 (uint32_t)1, /* granularity */
163 163 (uint_t)0 /* dma_attr_flags */
164 164 };
165 165
166 166 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
167 167 DMA_ATTR_V0, /* dma_attr_version */
168 168 (uint64_t)0, /* dma_attr_addr_lo */
169 169 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */
170 170 (uint64_t)0x7FFFFFFF, /* dma_attr_count_max */
171 171 (uint64_t)0x10, /* dma_attr_align */
172 172 (uint_t)0xFFFFFFFFU, /* dma_attr_burstsizes */
173 173 (uint32_t)1, /* dma_attr_minxfer */
174 174 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */
175 175 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */
176 176 (int)1, /* dma_attr_sgllen */
177 177 (uint32_t)1, /* granularity */
178 178 (uint_t)0 /* dma_attr_flags */
179 179 };
180 180
181 181 /* PIO access attributes for registers */
182 182 static ddi_device_acc_attr_t pcn_acc_attr = {
183 183 DDI_DEVICE_ATTR_V0,
184 184 DDI_STRUCTURE_LE_ACC,
185 185 DDI_STRICTORDER_ACC
186 186 };
187 187
188 188
189 189 static mac_callbacks_t amd8111s_m_callbacks = {
190 190 MC_IOCTL,
191 191 amd8111s_m_stat,
192 192 amd8111s_m_start,
193 193 amd8111s_m_stop,
194 194 amd8111s_m_promisc,
195 195 amd8111s_m_multicst,
196 196 amd8111s_m_unicst,
197 197 amd8111s_m_tx,
198 198 NULL,
199 199 amd8111s_m_ioctl
200 200 };
201 201
202 202
203 203 /*
204 204 * Standard Driver Load Entry Point
205 205 * It will be called at load time of driver.
206 206 */
207 207 int
208 208 _init()
209 209 {
210 210 int status;
211 211 mac_init_ops(&amd8111s_dev_ops, "amd8111s");
212 212
213 213 status = mod_install(&amd8111s_modlinkage);
214 214 if (status != DDI_SUCCESS) {
215 215 mac_fini_ops(&amd8111s_dev_ops);
216 216 }
217 217
218 218 return (status);
219 219 }
220 220
221 221 /*
222 222 * Standard Driver Entry Point for Query.
223 223 * It will be called at any time to get Driver info.
224 224 */
225 225 int
226 226 _info(struct modinfo *modinfop)
227 227 {
228 228 return (mod_info(&amd8111s_modlinkage, modinfop));
229 229 }
230 230
231 231 /*
232 232 * Standard Driver Entry Point for Unload.
233 233 * It will be called at unload time of driver.
234 234 */
235 235 int
236 236 _fini()
237 237 {
238 238 int status;
239 239
240 240 status = mod_remove(&amd8111s_modlinkage);
241 241 if (status == DDI_SUCCESS) {
242 242 mac_fini_ops(&amd8111s_dev_ops);
243 243 }
244 244
245 245 return (status);
246 246 }
247 247
248 248 /*
249 249 * Loopback Support
250 250 */
251 251 static lb_property_t loopmodes[] = {
252 252 { normal, "normal", AMD8111S_LB_NONE },
253 253 { external, "100Mbps", AMD8111S_LB_EXTERNAL_100 },
254 254 { external, "10Mbps", AMD8111S_LB_EXTERNAL_10 },
255 255 { internal, "MAC", AMD8111S_LB_INTERNAL_MAC }
256 256 };
257 257
258 258 static void
259 259 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
260 260 {
261 261
262 262 /*
263 263 * If the mode isn't being changed, there's nothing to do ...
264 264 */
265 265 if (mode == adapter->pOdl->loopback_mode)
266 266 return;
267 267
268 268 /*
269 269 * Validate the requested mode and prepare a suitable message
270 270 * to explain the link down/up cycle that the change will
271 271 * probably induce ...
272 272 */
273 273 switch (mode) {
274 274 default:
275 275 return;
276 276
277 277 case AMD8111S_LB_NONE:
278 278 mdlStopChip(adapter);
279 279 if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
280 280 cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
281 281 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
282 282 INLOOP);
283 283 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
284 284 FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
285 285 } else {
286 286 cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
287 287 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
288 288 EXLOOP);
289 289 }
290 290
291 291 amd8111s_reset(adapter);
292 292 adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
293 293 adapter->pOdl->rx_fcs_stripped = B_FALSE;
294 294 mdlStartChip(adapter);
295 295 break;
296 296
297 297 case AMD8111S_LB_EXTERNAL_100:
298 298 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
299 299 mdlStopChip(adapter);
300 300 amd8111s_reset(adapter);
301 301 SetIntrCoalesc(adapter, B_FALSE);
302 302 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
303 303 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
304 304 VAL0 | EXLOOP);
305 305 adapter->pOdl->LinkStatus = LINK_STATE_UP;
306 306 adapter->pMdl->Speed = 100;
307 307 adapter->pMdl->FullDuplex = B_TRUE;
308 308 /* Tell GLD the state of the physical link. */
309 309 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
310 310
311 311 adapter->pOdl->rx_fcs_stripped = B_TRUE;
312 312
313 313 mdlStartChip(adapter);
314 314 break;
315 315
316 316 case AMD8111S_LB_EXTERNAL_10:
317 317 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
318 318 mdlStopChip(adapter);
319 319 amd8111s_reset(adapter);
320 320 SetIntrCoalesc(adapter, B_FALSE);
321 321 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
322 322 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
323 323 VAL0 | EXLOOP);
324 324 adapter->pOdl->LinkStatus = LINK_STATE_UP;
325 325 adapter->pMdl->Speed = 10;
326 326 adapter->pMdl->FullDuplex = B_TRUE;
327 327 /* Tell GLD the state of the physical link. */
328 328 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
329 329
330 330 adapter->pOdl->rx_fcs_stripped = B_TRUE;
331 331
332 332 mdlStartChip(adapter);
333 333 break;
334 334
335 335 case AMD8111S_LB_INTERNAL_MAC:
336 336 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
337 337 mdlStopChip(adapter);
338 338 amd8111s_reset(adapter);
339 339 SetIntrCoalesc(adapter, B_FALSE);
340 340 /* Disable Port Manager */
341 341 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
342 342 EN_PMGR);
343 343 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
344 344 VAL0 | INLOOP);
345 345
346 346 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
347 347 VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
348 348
349 349 adapter->pOdl->LinkStatus = LINK_STATE_UP;
350 350 adapter->pMdl->FullDuplex = B_TRUE;
351 351 /* Tell GLD the state of the physical link. */
352 352 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
353 353
354 354 adapter->pOdl->rx_fcs_stripped = B_TRUE;
355 355
356 356 mdlStartChip(adapter);
357 357 break;
358 358 }
359 359
360 360 /*
361 361 * All OK; tell the caller to reprogram
362 362 * the PHY and/or MAC for the new mode ...
363 363 */
364 364 adapter->pOdl->loopback_mode = mode;
365 365 }
366 366
367 367 static enum ioc_reply
368 368 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
369 369 mblk_t *mp)
370 370 {
371 371 lb_info_sz_t *lbsp;
372 372 lb_property_t *lbpp;
373 373 uint32_t *lbmp;
374 374 int cmd;
375 375
376 376 /*
377 377 * Validate format of ioctl
378 378 */
379 379 if (mp->b_cont == NULL)
380 380 return (IOC_INVAL);
381 381
382 382 cmd = iocp->ioc_cmd;
383 383 switch (cmd) {
384 384 default:
385 385 /* NOTREACHED */
386 386 amd8111s_log(adapter, CE_NOTE,
387 387 "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
388 388 return (IOC_INVAL);
389 389
390 390 case LB_GET_INFO_SIZE:
391 391 if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
392 392 amd8111s_log(adapter, CE_NOTE,
393 393 "wrong LB_GET_INFO_SIZE size");
394 394 return (IOC_INVAL);
395 395 }
396 396 lbsp = (void *)mp->b_cont->b_rptr;
397 397 *lbsp = sizeof (loopmodes);
398 398 break;
399 399
400 400 case LB_GET_INFO:
401 401 if (iocp->ioc_count != sizeof (loopmodes)) {
402 402 amd8111s_log(adapter, CE_NOTE,
403 403 "Wrong LB_GET_INFO size");
404 404 return (IOC_INVAL);
405 405 }
406 406 lbpp = (void *)mp->b_cont->b_rptr;
407 407 bcopy(loopmodes, lbpp, sizeof (loopmodes));
408 408 break;
409 409
410 410 case LB_GET_MODE:
411 411 if (iocp->ioc_count != sizeof (uint32_t)) {
412 412 amd8111s_log(adapter, CE_NOTE,
413 413 "Wrong LB_GET_MODE size");
414 414 return (IOC_INVAL);
415 415 }
416 416 lbmp = (void *)mp->b_cont->b_rptr;
417 417 *lbmp = adapter->pOdl->loopback_mode;
418 418 break;
419 419
420 420 case LB_SET_MODE:
421 421 if (iocp->ioc_count != sizeof (uint32_t)) {
422 422 amd8111s_log(adapter, CE_NOTE,
423 423 "Wrong LB_SET_MODE size");
424 424 return (IOC_INVAL);
425 425 }
426 426 lbmp = (void *)mp->b_cont->b_rptr;
427 427 amd8111s_set_loop_mode(adapter, *lbmp);
428 428 break;
429 429 }
430 430 return (IOC_REPLY);
431 431 }
432 432
433 433 static void
434 434 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
435 435 {
436 436 struct iocblk *iocp;
437 437 struct LayerPointers *adapter;
438 438 enum ioc_reply status;
439 439
440 440 iocp = (void *)mp->b_rptr;
441 441 iocp->ioc_error = 0;
442 442 adapter = arg;
443 443
444 444 ASSERT(adapter);
445 445 if (adapter == NULL) {
446 446 miocnak(q, mp, 0, EINVAL);
447 447 return;
448 448 }
449 449
450 450 switch (iocp->ioc_cmd) {
451 451
452 452 case LB_GET_INFO_SIZE:
453 453 case LB_GET_INFO:
454 454 case LB_GET_MODE:
455 455 case LB_SET_MODE:
456 456 status = amd8111s_loopback_ioctl(adapter, iocp, mp);
457 457 break;
458 458
459 459 default:
460 460 status = IOC_INVAL;
461 461 break;
462 462 }
463 463
464 464 /*
465 465 * Decide how to reply
466 466 */
467 467 switch (status) {
468 468 default:
469 469 case IOC_INVAL:
470 470 /*
471 471 * Error, reply with a NAK and EINVAL or the specified error
472 472 */
473 473 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
474 474 EINVAL : iocp->ioc_error);
475 475 break;
476 476
477 477 case IOC_DONE:
478 478 /*
479 479 * OK, reply already sent
480 480 */
481 481 break;
482 482
483 483 case IOC_ACK:
484 484 /*
485 485 * OK, reply with an ACK
486 486 */
487 487 miocack(q, mp, 0, 0);
488 488 break;
489 489
490 490 case IOC_REPLY:
491 491 /*
492 492 * OK, send prepared reply as ACK or NAK
493 493 */
494 494 mp->b_datap->db_type = iocp->ioc_error == 0 ?
495 495 M_IOCACK : M_IOCNAK;
496 496 qreply(q, mp);
497 497 break;
498 498 }
499 499 }
500 500
501 501 /*
502 502 * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
503 503 */
504 504 static boolean_t
505 505 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
506 506 {
507 507 int length = 0;
508 508 mblk_t *mp;
509 509 struct rx_desc *descriptor;
510 510 struct odl *pOdl = pLayerPointers->pOdl;
511 511 struct amd8111s_statistics *statistics = &pOdl->statistics;
512 512 struct nonphysical *pNonphysical = pLayerPointers->pMil
513 513 ->pNonphysical;
514 514
515 515 mutex_enter(&pOdl->mdlRcvLock);
516 516 descriptor = pNonphysical->RxBufDescQRead->descriptor;
517 517 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
518 518 pNonphysical->RxBufDescQRead->descriptor -
519 519 pNonphysical->RxBufDescQStart->descriptor,
520 520 sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
521 521 if ((descriptor->Rx_OWN) == 0) {
522 522 /*
523 523 * If the frame is received with errors, then set MCNT
524 524 * of that pkt in ReceiveArray to 0. This packet would
525 525 * be discarded later and not indicated to OS.
526 526 */
527 527 if (descriptor->Rx_ERR) {
528 528 statistics->rx_desc_err ++;
529 529 descriptor->Rx_ERR = 0;
530 530 if (descriptor->Rx_FRAM == 1) {
531 531 statistics->rx_desc_err_FRAM ++;
532 532 descriptor->Rx_FRAM = 0;
533 533 }
534 534 if (descriptor->Rx_OFLO == 1) {
535 535 statistics->rx_desc_err_OFLO ++;
536 536 descriptor->Rx_OFLO = 0;
537 537 pOdl->rx_overflow_counter ++;
538 538 if ((pOdl->rx_overflow_counter > 5) &&
539 539 (pOdl->pause_interval == 0)) {
540 540 statistics->rx_double_overflow ++;
541 541 mdlSendPause(pLayerPointers);
542 542 pOdl->rx_overflow_counter = 0;
543 543 pOdl->pause_interval = 25;
544 544 }
545 545 }
546 546 if (descriptor->Rx_CRC == 1) {
547 547 statistics->rx_desc_err_CRC ++;
548 548 descriptor->Rx_CRC = 0;
549 549 }
550 550 if (descriptor->Rx_BUFF == 1) {
551 551 statistics->rx_desc_err_BUFF ++;
552 552 descriptor->Rx_BUFF = 0;
553 553 }
554 554 goto Next_Descriptor;
555 555 }
556 556
557 557 /* Length of incoming packet */
558 558 if (pOdl->rx_fcs_stripped) {
559 559 length = descriptor->Rx_MCNT -4;
560 560 } else {
561 561 length = descriptor->Rx_MCNT;
562 562 }
563 563 if (length < 62) {
564 564 statistics->rx_error_zerosize ++;
565 565 }
566 566
567 567 if ((mp = allocb(length, BPRI_MED)) == NULL) {
568 568 statistics->rx_allocfail ++;
569 569 goto failed;
570 570 }
571 571 /* Copy from virtual address of incoming packet */
572 572 bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
573 573 mp->b_rptr, length);
574 574 mp->b_wptr = mp->b_rptr + length;
575 575 statistics->rx_ok_packets ++;
576 576 if (*last_mp == NULL) {
577 577 *last_mp = mp;
578 578 } else {
579 579 (*last_mp)->b_next = mp;
580 580 *last_mp = mp;
581 581 }
582 582
583 583 Next_Descriptor:
584 584 descriptor->Rx_MCNT = 0;
585 585 descriptor->Rx_SOP = 0;
586 586 descriptor->Rx_EOP = 0;
587 587 descriptor->Rx_PAM = 0;
588 588 descriptor->Rx_BAM = 0;
589 589 descriptor->TT = 0;
590 590 descriptor->Rx_OWN = 1;
591 591 pNonphysical->RxBufDescQRead->descriptor++;
592 592 pNonphysical->RxBufDescQRead->USpaceMap++;
593 593 if (pNonphysical->RxBufDescQRead->descriptor >
594 594 pNonphysical->RxBufDescQEnd->descriptor) {
595 595 pNonphysical->RxBufDescQRead->descriptor =
596 596 pNonphysical->RxBufDescQStart->descriptor;
597 597 pNonphysical->RxBufDescQRead->USpaceMap =
598 598 pNonphysical->RxBufDescQStart->USpaceMap;
599 599 }
600 600 mutex_exit(&pOdl->mdlRcvLock);
601 601
602 602 return (B_TRUE);
603 603 }
604 604
605 605 failed:
606 606 mutex_exit(&pOdl->mdlRcvLock);
607 607 return (B_FALSE);
608 608 }
609 609
610 610 /*
611 611 * Get the received packets from NIC card and send them to GLD.
612 612 */
613 613 static void
614 614 amd8111s_receive(struct LayerPointers *pLayerPointers)
615 615 {
616 616 int numOfPkts = 0;
617 617 struct odl *pOdl;
618 618 mblk_t *ret_mp = NULL, *last_mp = NULL;
619 619
620 620 pOdl = pLayerPointers->pOdl;
621 621
622 622 rw_enter(&pOdl->chip_lock, RW_READER);
623 623 if (!pLayerPointers->run) {
624 624 rw_exit(&pOdl->chip_lock);
625 625 return;
626 626 }
627 627
628 628 if (pOdl->pause_interval > 0)
629 629 pOdl->pause_interval --;
630 630
631 631 while (numOfPkts < RX_RING_SIZE) {
632 632
633 633 if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
634 634 break;
635 635 }
636 636 if (ret_mp == NULL)
637 637 ret_mp = last_mp;
638 638 numOfPkts++;
639 639 }
640 640
641 641 if (ret_mp) {
642 642 mac_rx(pOdl->mh, NULL, ret_mp);
643 643 }
644 644
645 645 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
646 646 DDI_DMA_SYNC_FORDEV);
647 647
648 648 mdlReceive(pLayerPointers);
649 649
650 650 rw_exit(&pOdl->chip_lock);
651 651
652 652 }
653 653
654 654 /*
655 655 * Print message in release-version driver.
656 656 */
657 657 static void
658 658 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
659 659 {
660 660 auto char name[32];
661 661 auto char buf[256];
662 662 va_list ap;
663 663
664 664 if (adapter != NULL) {
665 665 (void) sprintf(name, "amd8111s%d",
666 666 ddi_get_instance(adapter->pOdl->devinfo));
667 667 } else {
668 668 (void) sprintf(name, "amd8111s");
669 669 }
670 670 va_start(ap, fmt);
671 671 (void) vsprintf(buf, fmt, ap);
672 672 va_end(ap);
673 673 cmn_err(level, "%s: %s", name, buf);
674 674 }
675 675
676 676 /*
677 677 * To allocate & initilize all resources.
678 678 * Called by amd8111s_attach().
679 679 */
680 680 static int
681 681 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
682 682 {
683 683 unsigned long mem_req_array[MEM_REQ_MAX];
684 684 unsigned long mem_set_array[MEM_REQ_MAX];
685 685 unsigned long *pmem_req_array;
686 686 unsigned long *pmem_set_array;
687 687 int i, size;
688 688
689 689 for (i = 0; i < MEM_REQ_MAX; i++) {
690 690 mem_req_array[i] = 0;
691 691 mem_set_array[i] = 0;
692 692 }
693 693
694 694 milRequestResources(mem_req_array);
695 695
696 696 pmem_req_array = mem_req_array;
697 697 pmem_set_array = mem_set_array;
698 698 while (*pmem_req_array) {
699 699 switch (*pmem_req_array) {
700 700 case VIRTUAL:
701 701 *pmem_set_array = VIRTUAL;
702 702 pmem_req_array++;
703 703 pmem_set_array++;
704 704 *(pmem_set_array) = *(pmem_req_array);
705 705 pmem_set_array++;
706 706 *(pmem_set_array) = (unsigned long) kmem_zalloc(
707 707 *(pmem_req_array), KM_NOSLEEP);
708 708 if (*pmem_set_array == NULL)
709 709 goto odl_init_failure;
710 710 break;
711 711 }
712 712 pmem_req_array++;
713 713 pmem_set_array++;
714 714 }
715 715
716 716 /*
717 717 * Initilize memory on lower layers
718 718 */
719 719 milSetResources(pLayerPointers, mem_set_array);
720 720
721 721 /* Allocate Rx/Tx descriptors */
722 722 if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
723 723 *pmem_set_array = NULL;
724 724 goto odl_init_failure;
725 725 }
726 726
727 727 /*
728 728 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
729 729 * routine to fill physical address of Rx buffer into Rx descriptor.
730 730 */
731 731 if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
732 732 amd8111s_free_descriptors(pLayerPointers);
733 733 *pmem_set_array = NULL;
734 734 goto odl_init_failure;
735 735 }
736 736 milInitGlbds(pLayerPointers);
737 737
738 738 return (0);
739 739
740 740 odl_init_failure:
741 741 /*
742 742 * Free All memory allocated so far
743 743 */
744 744 pmem_req_array = mem_set_array;
745 745 while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
746 746 switch (*pmem_req_array) {
747 747 case VIRTUAL:
748 748 pmem_req_array++; /* Size */
749 749 size = *(pmem_req_array);
750 750 pmem_req_array++; /* Virtual Address */
751 751 if (pmem_req_array == NULL)
752 752 return (1);
753 753 kmem_free((int *)*pmem_req_array, size);
754 754 break;
755 755 }
756 756 pmem_req_array++;
757 757 }
758 758 return (1);
759 759 }
760 760
761 761 /*
762 762 * Allocate and initialize Tx/Rx descriptors
763 763 */
764 764 static boolean_t
765 765 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
766 766 {
767 767 struct odl *pOdl = pLayerPointers->pOdl;
768 768 struct mil *pMil = pLayerPointers->pMil;
769 769 dev_info_t *devinfo = pOdl->devinfo;
770 770 uint_t length, count, i;
771 771 size_t real_length;
772 772
773 773 /*
774 774 * Allocate Rx descriptors
775 775 */
776 776 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
777 777 NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
778 778 amd8111s_log(pLayerPointers, CE_WARN,
779 779 "ddi_dma_alloc_handle for Rx desc failed");
780 780 pOdl->rx_desc_dma_handle = NULL;
781 781 return (B_FALSE);
782 782 }
783 783
784 784 length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
785 785 if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
786 786 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
787 787 NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
788 788 &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
789 789
790 790 amd8111s_log(pLayerPointers, CE_WARN,
791 791 "ddi_dma_mem_handle for Rx desc failed");
792 792 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
793 793 pOdl->rx_desc_dma_handle = NULL;
794 794 return (B_FALSE);
795 795 }
796 796
797 797 if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
798 798 NULL, (caddr_t)pMil->Rx_desc_original, real_length,
799 799 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
800 800 NULL, &pOdl->rx_desc_dma_cookie,
801 801 &count) != DDI_SUCCESS) {
802 802
803 803 amd8111s_log(pLayerPointers, CE_WARN,
804 804 "ddi_dma_addr_bind_handle for Rx desc failed");
805 805 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
806 806 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
807 807 pOdl->rx_desc_dma_handle = NULL;
808 808 return (B_FALSE);
809 809 }
810 810 ASSERT(count == 1);
811 811
812 812 /* Initialize Rx descriptors related variables */
813 813 pMil->Rx_desc = (struct rx_desc *)
814 814 ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
815 815 pMil->Rx_desc_pa = (unsigned int)
816 816 ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
817 817
818 818 pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
819 819
820 820
821 821 /*
822 822 * Allocate Tx descriptors
823 823 */
824 824 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
825 825 NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
826 826 amd8111s_log(pLayerPointers, CE_WARN,
827 827 "ddi_dma_alloc_handle for Tx desc failed");
828 828 goto allocate_desc_fail;
829 829 }
830 830
831 831 length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
832 832 if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
833 833 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
834 834 NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
835 835 &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
836 836
837 837 amd8111s_log(pLayerPointers, CE_WARN,
838 838 "ddi_dma_mem_handle for Tx desc failed");
839 839 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
840 840 goto allocate_desc_fail;
841 841 }
842 842
843 843 if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
844 844 NULL, (caddr_t)pMil->Tx_desc_original, real_length,
845 845 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
846 846 NULL, &pOdl->tx_desc_dma_cookie,
847 847 &count) != DDI_SUCCESS) {
848 848
849 849 amd8111s_log(pLayerPointers, CE_WARN,
850 850 "ddi_dma_addr_bind_handle for Tx desc failed");
851 851 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
852 852 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
853 853 goto allocate_desc_fail;
854 854 }
855 855 ASSERT(count == 1);
856 856 /* Set the DMA area to all zeros */
857 857 bzero((caddr_t)pMil->Tx_desc_original, length);
858 858
859 859 /* Initialize Tx descriptors related variables */
860 860 pMil->Tx_desc = (struct tx_desc *)
861 861 ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
862 862 pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
863 863 pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
864 864 pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
865 865 pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
866 866
867 867 /* Physical Addr of Tx_desc_original & Tx_desc */
868 868 pLayerPointers->pMil->Tx_desc_pa =
869 869 ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
870 870 ~ALIGNMENT);
871 871
872 872 /* Setting the reserved bits in the tx descriptors */
873 873 for (i = 0; i < TX_RING_SIZE; i++) {
874 874 pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
875 875 pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
876 876 pMil->pNonphysical->TxDescQWrite++;
877 877 }
878 878 pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
879 879
880 880 pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
881 881
882 882 return (B_TRUE);
883 883
884 884 allocate_desc_fail:
885 885 pOdl->tx_desc_dma_handle = NULL;
886 886 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
887 887 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
888 888 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
889 889 pOdl->rx_desc_dma_handle = NULL;
890 890 return (B_FALSE);
891 891 }
892 892
893 893 /*
894 894 * Free Tx/Rx descriptors
895 895 */
896 896 static void
897 897 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
898 898 {
899 899 struct odl *pOdl = pLayerPointers->pOdl;
900 900
901 901 /* Free Rx descriptors */
902 902 if (pOdl->rx_desc_dma_handle) {
903 903 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
904 904 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
905 905 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
906 906 pOdl->rx_desc_dma_handle = NULL;
907 907 }
908 908
909 909 /* Free Rx descriptors */
910 910 if (pOdl->tx_desc_dma_handle) {
911 911 (void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
912 912 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
913 913 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
914 914 pOdl->tx_desc_dma_handle = NULL;
915 915 }
916 916 }
917 917
918 918 /*
919 919 * Allocate Tx/Rx Ring buffer
920 920 */
921 921 static boolean_t
922 922 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
923 923 struct amd8111s_dma_ringbuf *pRing,
924 924 uint32_t ring_size, uint32_t msg_size)
925 925 {
926 926 uint32_t idx, msg_idx = 0, msg_acc;
927 927 dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
928 928 size_t real_length;
929 929 uint_t count = 0;
930 930
931 931 ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
932 932 pRing->dma_buf_sz = msg_size;
933 933 pRing->ring_size = ring_size;
934 934 pRing->trunk_num = AMD8111S_SPLIT;
935 935 pRing->buf_sz = msg_size * ring_size;
936 936 if (ring_size < pRing->trunk_num)
937 937 pRing->trunk_num = ring_size;
938 938 ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
939 939
940 940 pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
941 941 ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
942 942
943 943 pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
944 944 ring_size, KM_NOSLEEP);
945 945 pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
946 946 pRing->trunk_num, KM_NOSLEEP);
947 947 pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
948 948 pRing->trunk_num, KM_NOSLEEP);
949 949 pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
950 950 pRing->trunk_num, KM_NOSLEEP);
951 951 pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
952 952 pRing->trunk_num, KM_NOSLEEP);
953 953 if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
954 954 pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
955 955 pRing->dma_cookie == NULL) {
956 956 amd8111s_log(pLayerPointers, CE_NOTE,
957 957 "kmem_zalloc failed");
958 958 goto failed;
959 959 }
960 960
961 961 for (idx = 0; idx < pRing->trunk_num; ++idx) {
962 962 if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
963 963 DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
964 964 != DDI_SUCCESS) {
965 965
966 966 amd8111s_log(pLayerPointers, CE_WARN,
967 967 "ddi_dma_alloc_handle failed");
968 968 goto failed;
969 969 } else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
970 970 pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
971 971 DDI_DMA_SLEEP, NULL,
972 972 (caddr_t *)&(pRing->trunk_addr[idx]),
973 973 (size_t *)(&real_length), &pRing->acc_hdl[idx])
974 974 != DDI_SUCCESS) {
975 975
976 976 amd8111s_log(pLayerPointers, CE_WARN,
977 977 "ddi_dma_mem_alloc failed");
978 978 goto failed;
979 979 } else if (real_length != pRing->trunk_sz) {
980 980 amd8111s_log(pLayerPointers, CE_WARN,
981 981 "ddi_dma_mem_alloc failed");
982 982 goto failed;
983 983 } else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
984 984 NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
985 985 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
986 986 &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
987 987
988 988 amd8111s_log(pLayerPointers, CE_WARN,
989 989 "ddi_dma_addr_bind_handle failed");
990 990 goto failed;
991 991 } else {
992 992 for (msg_acc = 0;
993 993 msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
994 994 ++ msg_acc) {
995 995 pRing->msg_buf[msg_idx].offset =
996 996 msg_acc * pRing->dma_buf_sz;
997 997 pRing->msg_buf[msg_idx].vir_addr =
998 998 pRing->trunk_addr[idx] +
999 999 pRing->msg_buf[msg_idx].offset;
1000 1000 pRing->msg_buf[msg_idx].phy_addr =
1001 1001 pRing->dma_cookie[idx].dmac_laddress +
1002 1002 pRing->msg_buf[msg_idx].offset;
1003 1003 pRing->msg_buf[msg_idx].p_hdl =
1004 1004 pRing->dma_hdl[idx];
1005 1005 msg_idx ++;
1006 1006 }
1007 1007 }
1008 1008 }
1009 1009
1010 1010 pRing->free = pRing->msg_buf;
1011 1011 pRing->next = pRing->msg_buf;
1012 1012 pRing->curr = pRing->msg_buf;
1013 1013
1014 1014 return (B_TRUE);
1015 1015 failed:
1016 1016 amd8111s_free_dma_ringbuf(pRing);
1017 1017 return (B_FALSE);
1018 1018 }
1019 1019
1020 1020 /*
1021 1021 * Free Tx/Rx ring buffer
1022 1022 */
1023 1023 static void
1024 1024 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1025 1025 {
1026 1026 int idx;
1027 1027
1028 1028 if (pRing->dma_cookie != NULL) {
1029 1029 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1030 1030 if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1031 1031 break;
1032 1032 }
1033 1033 (void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1034 1034 }
1035 1035 kmem_free(pRing->dma_cookie,
1036 1036 sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1037 1037 }
1038 1038
1039 1039 if (pRing->acc_hdl != NULL) {
1040 1040 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1041 1041 if (pRing->acc_hdl[idx] == NULL)
1042 1042 break;
1043 1043 ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1044 1044 }
1045 1045 kmem_free(pRing->acc_hdl,
1046 1046 sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1047 1047 }
1048 1048
1049 1049 if (pRing->dma_hdl != NULL) {
1050 1050 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1051 1051 if (pRing->dma_hdl[idx] == 0) {
1052 1052 break;
1053 1053 }
1054 1054 ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1055 1055 }
1056 1056 kmem_free(pRing->dma_hdl,
1057 1057 sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1058 1058 }
1059 1059
1060 1060 if (pRing->msg_buf != NULL) {
1061 1061 kmem_free(pRing->msg_buf,
1062 1062 sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1063 1063 }
1064 1064
1065 1065 if (pRing->trunk_addr != NULL) {
1066 1066 kmem_free(pRing->trunk_addr,
1067 1067 sizeof (caddr_t) * pRing->trunk_num);
1068 1068 }
1069 1069
1070 1070 bzero(pRing, sizeof (*pRing));
1071 1071 }
1072 1072
1073 1073
1074 1074 /*
1075 1075 * Allocate all Tx buffer.
1076 1076 * Allocate a Rx buffer for each Rx descriptor. Then
1077 1077 * call mil routine to fill physical address of Rx
1078 1078 * buffer into Rx descriptors
1079 1079 */
1080 1080 static boolean_t
1081 1081 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1082 1082 {
1083 1083 struct odl *pOdl = pLayerPointers->pOdl;
1084 1084
1085 1085 /*
1086 1086 * Allocate rx Buffers
1087 1087 */
1088 1088 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1089 1089 RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1090 1090 amd8111s_log(pLayerPointers, CE_WARN,
1091 1091 "amd8111s_alloc_dma_ringbuf for tx failed");
1092 1092 goto allocate_buf_fail;
1093 1093 }
1094 1094
1095 1095 /*
1096 1096 * Allocate Tx buffers
1097 1097 */
1098 1098 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1099 1099 TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1100 1100 amd8111s_log(pLayerPointers, CE_WARN,
1101 1101 "amd8111s_alloc_dma_ringbuf for tx failed");
1102 1102 goto allocate_buf_fail;
1103 1103 }
1104 1104
1105 1105 /*
1106 1106 * Initilize the mil Queues
1107 1107 */
1108 1108 milInitGlbds(pLayerPointers);
1109 1109
1110 1110 milInitRxQ(pLayerPointers);
1111 1111
1112 1112 return (B_TRUE);
1113 1113
1114 1114 allocate_buf_fail:
1115 1115
1116 1116 amd8111s_log(pLayerPointers, CE_WARN,
1117 1117 "amd8111s_allocate_buffers failed");
1118 1118 return (B_FALSE);
1119 1119 }
1120 1120
1121 1121 /*
1122 1122 * Free all Rx/Tx buffer
1123 1123 */
1124 1124
1125 1125 static void
1126 1126 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1127 1127 {
1128 1128 /* Free Tx buffers */
1129 1129 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1130 1130
1131 1131 /* Free Rx Buffers */
1132 1132 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1133 1133 }
1134 1134
1135 1135 /*
1136 1136 * Try to recycle all the descriptors and Tx buffers
1137 1137 * which are already freed by hardware.
1138 1138 */
1139 1139 static int
1140 1140 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1141 1141 {
1142 1142 struct nonphysical *pNonphysical;
1143 1143 uint32_t count = 0;
1144 1144
1145 1145 pNonphysical = pLayerPointers->pMil->pNonphysical;
1146 1146 while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1147 1147 pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1148 1148 pLayerPointers->pOdl->tx_buf.free =
1149 1149 NEXT(pLayerPointers->pOdl->tx_buf, free);
1150 1150 pNonphysical->TxDescQRead++;
1151 1151 if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1152 1152 pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1153 1153 }
1154 1154 count ++;
1155 1155 }
1156 1156
1157 1157 if (pLayerPointers->pMil->tx_reschedule)
1158 1158 ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1159 1159
1160 1160 return (count);
1161 1161 }
1162 1162
1163 1163 /*
1164 1164 * Get packets in the Tx buffer, then copy them to the send buffer.
1165 1165 * Trigger hardware to send out packets.
1166 1166 */
1167 1167 static void
1168 1168 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1169 1169 {
1170 1170 struct nonphysical *pNonphysical;
1171 1171 uint32_t count;
1172 1172
1173 1173 pNonphysical = pLayerPointers->pMil->pNonphysical;
1174 1174
1175 1175 mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1176 1176
1177 1177 for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1178 1178 if (pLayerPointers->pOdl->tx_buf.curr ==
1179 1179 pLayerPointers->pOdl->tx_buf.next) {
1180 1180 break;
1181 1181 }
1182 1182 /* to verify if it needs to recycle the tx Buf */
1183 1183 if (((pNonphysical->TxDescQWrite + 1 >
1184 1184 pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1185 1185 (pNonphysical->TxDescQWrite + 1)) ==
1186 1186 pNonphysical->TxDescQRead)
1187 1187 if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1188 1188 pLayerPointers->pOdl
1189 1189 ->statistics.tx_no_descriptor ++;
1190 1190 break;
1191 1191 }
1192 1192
1193 1193 /* Fill packet length */
1194 1194 pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1195 1195 ->pOdl->tx_buf.curr->msg_size;
1196 1196
1197 1197 /* Fill physical buffer address */
1198 1198 pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1199 1199 pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1200 1200
1201 1201 pNonphysical->TxDescQWrite->Tx_SOP = 1;
1202 1202 pNonphysical->TxDescQWrite->Tx_EOP = 1;
1203 1203 pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1204 1204 pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1205 1205 pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1206 1206 pNonphysical->TxDescQWrite->Tx_OWN = 1;
1207 1207
1208 1208 pNonphysical->TxDescQWrite++;
1209 1209 if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1210 1210 pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1211 1211 }
1212 1212
1213 1213 pLayerPointers->pOdl->tx_buf.curr =
1214 1214 NEXT(pLayerPointers->pOdl->tx_buf, curr);
1215 1215
1216 1216 }
1217 1217
1218 1218 pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1219 1219
1220 1220 mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1221 1221
1222 1222 /* Call mdlTransmit to send the pkt out on the network */
1223 1223 mdlTransmit(pLayerPointers);
1224 1224
1225 1225 }
1226 1226
1227 1227 /*
1228 1228 * Softintr entrance. try to send out packets in the Tx buffer.
1229 1229 * If reschedule is True, call mac_tx_update to re-enable the
1230 1230 * transmit
1231 1231 */
1232 1232 static uint_t
1233 1233 amd8111s_send_drain(caddr_t arg)
1234 1234 {
1235 1235 struct LayerPointers *pLayerPointers = (void *)arg;
1236 1236
1237 1237 amd8111s_send_serial(pLayerPointers);
1238 1238
1239 1239 if (pLayerPointers->pMil->tx_reschedule &&
1240 1240 NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1241 1241 pLayerPointers->pOdl->tx_buf.free) {
1242 1242 mac_tx_update(pLayerPointers->pOdl->mh);
1243 1243 pLayerPointers->pMil->tx_reschedule = B_FALSE;
1244 1244 }
1245 1245
1246 1246 return (DDI_INTR_CLAIMED);
1247 1247 }
1248 1248
1249 1249 /*
1250 1250 * Get a Tx buffer
1251 1251 */
1252 1252 static struct amd8111s_msgbuf *
1253 1253 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1254 1254 {
1255 1255 struct amd8111s_msgbuf *tmp, *next;
1256 1256
1257 1257 mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1258 1258 next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1259 1259 if (next == pLayerPointers->pOdl->tx_buf.free) {
1260 1260 tmp = NULL;
1261 1261 } else {
1262 1262 tmp = pLayerPointers->pOdl->tx_buf.next;
1263 1263 pLayerPointers->pOdl->tx_buf.next = next;
1264 1264 }
1265 1265 mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1266 1266
1267 1267 return (tmp);
1268 1268 }
1269 1269
1270 1270 static boolean_t
1271 1271 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1272 1272 {
1273 1273 struct odl *pOdl;
1274 1274 size_t frag_len;
1275 1275 mblk_t *tmp;
1276 1276 struct amd8111s_msgbuf *txBuf;
1277 1277 uint8_t *pMsg;
1278 1278
1279 1279 pOdl = pLayerPointers->pOdl;
1280 1280
1281 1281 /* alloc send buffer */
1282 1282 txBuf = amd8111s_getTxbuf(pLayerPointers);
1283 1283 if (txBuf == NULL) {
1284 1284 pOdl->statistics.tx_no_buffer ++;
1285 1285 pLayerPointers->pMil->tx_reschedule = B_TRUE;
1286 1286 amd8111s_send_serial(pLayerPointers);
1287 1287 return (B_FALSE);
1288 1288 }
1289 1289
1290 1290 /* copy packet to send buffer */
1291 1291 txBuf->msg_size = 0;
1292 1292 pMsg = (uint8_t *)txBuf->vir_addr;
1293 1293 for (tmp = mp; tmp; tmp = tmp->b_cont) {
1294 1294 frag_len = MBLKL(tmp);
1295 1295 bcopy(tmp->b_rptr, pMsg, frag_len);
1296 1296 txBuf->msg_size += frag_len;
1297 1297 pMsg += frag_len;
1298 1298 }
1299 1299 freemsg(mp);
1300 1300
1301 1301 amd8111s_send_serial(pLayerPointers);
1302 1302
1303 1303 return (B_TRUE);
1304 1304 }
1305 1305
1306 1306 /*
1307 1307 * (GLD Entry Point) Send the message block to lower layer
1308 1308 */
1309 1309 static mblk_t *
1310 1310 amd8111s_m_tx(void *arg, mblk_t *mp)
1311 1311 {
1312 1312 struct LayerPointers *pLayerPointers = arg;
1313 1313 mblk_t *next;
1314 1314
1315 1315 rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1316 1316 if (!pLayerPointers->run) {
1317 1317 pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1318 1318 freemsgchain(mp);
1319 1319 mp = NULL;
1320 1320 }
1321 1321
1322 1322 while (mp != NULL) {
1323 1323 next = mp->b_next;
1324 1324 mp->b_next = NULL;
1325 1325 if (!amd8111s_send(pLayerPointers, mp)) {
1326 1326 /* Send fail */
1327 1327 mp->b_next = next;
1328 1328 break;
1329 1329 }
1330 1330 mp = next;
1331 1331 }
1332 1332
1333 1333 rw_exit(&pLayerPointers->pOdl->chip_lock);
1334 1334 return (mp);
1335 1335 }
1336 1336
1337 1337 /*
1338 1338 * (GLD Entry Point) Interrupt Service Routine
1339 1339 */
1340 1340 static uint_t
1341 1341 amd8111s_intr(caddr_t arg)
1342 1342 {
1343 1343 unsigned int intrCauses;
1344 1344 struct LayerPointers *pLayerPointers = (void *)arg;
1345 1345
1346 1346 /* Read the interrupt status from mdl */
1347 1347 intrCauses = mdlReadInterrupt(pLayerPointers);
1348 1348
1349 1349 if (intrCauses == 0) {
1350 1350 pLayerPointers->pOdl->statistics.intr_OTHER ++;
1351 1351 return (DDI_INTR_UNCLAIMED);
1352 1352 }
1353 1353
1354 1354 if (intrCauses & LCINT) {
1355 1355 if (mdlReadLink(pLayerPointers) == LINK_UP) {
1356 1356 mdlGetActiveMediaInfo(pLayerPointers);
1357 1357 /* Link status changed */
1358 1358 if (pLayerPointers->pOdl->LinkStatus !=
1359 1359 LINK_STATE_UP) {
1360 1360 pLayerPointers->pOdl->LinkStatus =
1361 1361 LINK_STATE_UP;
1362 1362 mac_link_update(pLayerPointers->pOdl->mh,
1363 1363 LINK_STATE_UP);
1364 1364 }
1365 1365 } else {
1366 1366 if (pLayerPointers->pOdl->LinkStatus !=
1367 1367 LINK_STATE_DOWN) {
1368 1368 pLayerPointers->pOdl->LinkStatus =
1369 1369 LINK_STATE_DOWN;
1370 1370 mac_link_update(pLayerPointers->pOdl->mh,
1371 1371 LINK_STATE_DOWN);
1372 1372 }
1373 1373 }
1374 1374 }
1375 1375 /*
1376 1376 * RINT0: Receive Interrupt is set by the controller after the last
1377 1377 * descriptor of a receive frame for this ring has been updated by
1378 1378 * writing a 0 to the OWNership bit.
1379 1379 */
1380 1380 if (intrCauses & RINT0) {
1381 1381 pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1382 1382 amd8111s_receive(pLayerPointers);
1383 1383 }
1384 1384
1385 1385 /*
1386 1386 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1387 1387 * in the last descriptor of a transmit frame in this particular ring
1388 1388 * has been cleared to indicate the frame has been copied to the
1389 1389 * transmit FIFO.
1390 1390 */
1391 1391 if (intrCauses & TINT0) {
1392 1392 pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1393 1393 /*
1394 1394 * if desc ring is NULL and tx buf is not NULL, it should
1395 1395 * drain tx buffer
1396 1396 */
1397 1397 amd8111s_send_serial(pLayerPointers);
1398 1398 }
1399 1399
1400 1400 if (intrCauses & STINT) {
1401 1401 pLayerPointers->pOdl->statistics.intr_STINT ++;
1402 1402 }
1403 1403
1404 1404
1405 1405 return (DDI_INTR_CLAIMED);
1406 1406 }
1407 1407
1408 1408 /*
1409 1409 * To re-initilize data structures.
1410 1410 */
1411 1411 static void
1412 1412 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1413 1413 {
1414 1414 /* Reset all Tx/Rx queues and descriptors */
1415 1415 milResetTxQ(pLayerPointers);
1416 1416 milInitRxQ(pLayerPointers);
1417 1417 }
1418 1418
1419 1419 /*
1420 1420 * Send all pending tx packets
1421 1421 */
1422 1422 static void
1423 1423 amd8111s_tx_drain(struct LayerPointers *adapter)
1424 1424 {
1425 1425 struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1426 1426 int i, desc_count = 0;
1427 1427 for (i = 0; i < 30; i++) {
1428 1428 while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1429 1429 /* This packet has been transmitted */
1430 1430 pTx_desc ++;
1431 1431 desc_count ++;
1432 1432 }
1433 1433 if (desc_count == TX_RING_SIZE) {
1434 1434 break;
1435 1435 }
1436 1436 /* Wait 1 ms */
1437 1437 drv_usecwait(1000);
1438 1438 }
1439 1439 adapter->pOdl->statistics.tx_draintime = i;
1440 1440 }
1441 1441
1442 1442 /*
1443 1443 * (GLD Entry Point) To start card will be called at
1444 1444 * ifconfig plumb
1445 1445 */
1446 1446 static int
1447 1447 amd8111s_m_start(void *arg)
1448 1448 {
1449 1449 struct LayerPointers *pLayerPointers = arg;
1450 1450 struct odl *pOdl = pLayerPointers->pOdl;
1451 1451
1452 1452 amd8111s_sw_reset(pLayerPointers);
1453 1453 mdlHWReset(pLayerPointers);
1454 1454 rw_enter(&pOdl->chip_lock, RW_WRITER);
1455 1455 pLayerPointers->run = B_TRUE;
1456 1456 rw_exit(&pOdl->chip_lock);
1457 1457 return (0);
1458 1458 }
1459 1459
1460 1460 /*
1461 1461 * (GLD Entry Point) To stop card will be called at
1462 1462 * ifconfig unplumb
1463 1463 */
1464 1464 static void
1465 1465 amd8111s_m_stop(void *arg)
1466 1466 {
1467 1467 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1468 1468 struct odl *pOdl = pLayerPointers->pOdl;
1469 1469
1470 1470 /* Ensure send all pending tx packets */
1471 1471 amd8111s_tx_drain(pLayerPointers);
1472 1472 /*
1473 1473 * Stop the controller and disable the controller interrupt
1474 1474 */
1475 1475 rw_enter(&pOdl->chip_lock, RW_WRITER);
1476 1476 mdlStopChip(pLayerPointers);
1477 1477 pLayerPointers->run = B_FALSE;
1478 1478 rw_exit(&pOdl->chip_lock);
1479 1479 }
1480 1480
1481 1481 /*
1482 1482 * To clean up all
1483 1483 */
1484 1484 static void
1485 1485 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1486 1486 {
1487 1487 unsigned long mem_free_array[100];
1488 1488 unsigned long *pmem_free_array, size;
1489 1489
1490 1490 /* Free Rx/Tx descriptors */
1491 1491 amd8111s_free_descriptors(pLayerPointers);
1492 1492
1493 1493 /* Free memory on lower layers */
1494 1494 milFreeResources(pLayerPointers, mem_free_array);
1495 1495 pmem_free_array = mem_free_array;
1496 1496 while (*pmem_free_array) {
1497 1497 switch (*pmem_free_array) {
1498 1498 case VIRTUAL:
1499 1499 size = *(++pmem_free_array);
1500 1500 pmem_free_array++;
1501 1501 kmem_free((void *)*(pmem_free_array), size);
1502 1502 break;
1503 1503 }
1504 1504 pmem_free_array++;
1505 1505 }
1506 1506
1507 1507 amd8111s_free_buffers(pLayerPointers);
1508 1508 }
1509 1509
1510 1510 /*
1511 1511 * (GLD Enty pointer) To add/delete multi cast addresses
1512 1512 *
1513 1513 */
1514 1514 static int
1515 1515 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1516 1516 {
1517 1517 struct LayerPointers *pLayerPointers = arg;
1518 1518
1519 1519 if (add) {
1520 1520 /* Add a multicast entry */
1521 1521 mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1522 1522 } else {
1523 1523 /* Delete a multicast entry */
1524 1524 mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1525 1525 }
1526 1526
1527 1527 return (0);
1528 1528 }
1529 1529
1530 1530 #ifdef AMD8111S_DEBUG
1531 1531 /*
1532 1532 * The size of MIB registers is only 32 bits. Dump them before one
1533 1533 * of them overflows.
1534 1534 */
1535 1535 static void
1536 1536 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1537 1537 {
1538 1538 struct amd8111s_statistics *adapterStat;
1539 1539
1540 1540 adapterStat = &pLayerPointers->pOdl->statistics;
1541 1541
1542 1542 adapterStat->mib_dump_counter ++;
1543 1543
1544 1544 /*
1545 1545 * Rx Counters
1546 1546 */
1547 1547 adapterStat->rx_mib_unicst_packets +=
1548 1548 mdlReadMib(pLayerPointers, RcvUniCastPkts);
1549 1549 adapterStat->rx_mib_multicst_packets +=
1550 1550 mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1551 1551 adapterStat->rx_mib_broadcst_packets +=
1552 1552 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1553 1553 adapterStat->rx_mib_macctrl_packets +=
1554 1554 mdlReadMib(pLayerPointers, RcvMACCtrl);
1555 1555 adapterStat->rx_mib_flowctrl_packets +=
1556 1556 mdlReadMib(pLayerPointers, RcvFlowCtrl);
1557 1557
1558 1558 adapterStat->rx_mib_bytes +=
1559 1559 mdlReadMib(pLayerPointers, RcvOctets);
1560 1560 adapterStat->rx_mib_good_bytes +=
1561 1561 mdlReadMib(pLayerPointers, RcvGoodOctets);
1562 1562
1563 1563 adapterStat->rx_mib_undersize_packets +=
1564 1564 mdlReadMib(pLayerPointers, RcvUndersizePkts);
1565 1565 adapterStat->rx_mib_oversize_packets +=
1566 1566 mdlReadMib(pLayerPointers, RcvOversizePkts);
1567 1567
1568 1568 adapterStat->rx_mib_drop_packets +=
1569 1569 mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1570 1570 adapterStat->rx_mib_align_err_packets +=
1571 1571 mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1572 1572 adapterStat->rx_mib_fcs_err_packets +=
1573 1573 mdlReadMib(pLayerPointers, RcvFCSErrors);
1574 1574 adapterStat->rx_mib_symbol_err_packets +=
1575 1575 mdlReadMib(pLayerPointers, RcvSymbolErrors);
1576 1576 adapterStat->rx_mib_miss_packets +=
1577 1577 mdlReadMib(pLayerPointers, RcvMissPkts);
1578 1578
1579 1579 /*
1580 1580 * Tx Counters
1581 1581 */
1582 1582 adapterStat->tx_mib_packets +=
1583 1583 mdlReadMib(pLayerPointers, XmtPackets);
1584 1584 adapterStat->tx_mib_multicst_packets +=
1585 1585 mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1586 1586 adapterStat->tx_mib_broadcst_packets +=
1587 1587 mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1588 1588 adapterStat->tx_mib_flowctrl_packets +=
1589 1589 mdlReadMib(pLayerPointers, XmtFlowCtrl);
1590 1590
1591 1591 adapterStat->tx_mib_bytes +=
1592 1592 mdlReadMib(pLayerPointers, XmtOctets);
1593 1593
1594 1594 adapterStat->tx_mib_defer_trans_packets +=
1595 1595 mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1596 1596 adapterStat->tx_mib_collision_packets +=
1597 1597 mdlReadMib(pLayerPointers, XmtCollisions);
1598 1598 adapterStat->tx_mib_one_coll_packets +=
1599 1599 mdlReadMib(pLayerPointers, XmtOneCollision);
1600 1600 adapterStat->tx_mib_multi_coll_packets +=
1601 1601 mdlReadMib(pLayerPointers, XmtMultipleCollision);
1602 1602 adapterStat->tx_mib_late_coll_packets +=
1603 1603 mdlReadMib(pLayerPointers, XmtLateCollision);
1604 1604 adapterStat->tx_mib_ex_coll_packets +=
1605 1605 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1606 1606
1607 1607
1608 1608 /* Clear all MIB registers */
1609 1609 WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1610 1610 + MIB_ADDR, MIB_CLEAR);
1611 1611 }
1612 1612 #endif
1613 1613
1614 1614 /*
1615 1615 * (GLD Entry Point) set/unset promiscus mode
1616 1616 */
1617 1617 static int
1618 1618 amd8111s_m_promisc(void *arg, boolean_t on)
1619 1619 {
1620 1620 struct LayerPointers *pLayerPointers = arg;
1621 1621
1622 1622 if (on) {
1623 1623 mdlSetPromiscuous(pLayerPointers);
1624 1624 } else {
1625 1625 mdlDisablePromiscuous(pLayerPointers);
1626 1626 }
1627 1627
1628 1628 return (0);
1629 1629 }
1630 1630
1631 1631 /*
1632 1632 * (Gld Entry point) Changes the Mac address of card
1633 1633 */
1634 1634 static int
1635 1635 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1636 1636 {
1637 1637 struct LayerPointers *pLayerPointers = arg;
1638 1638
1639 1639 mdlDisableInterrupt(pLayerPointers);
1640 1640 mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1641 1641 mdlEnableInterrupt(pLayerPointers);
1642 1642
1643 1643 return (0);
1644 1644 }
1645 1645
1646 1646 /*
1647 1647 * Reset the card
1648 1648 */
1649 1649 void
1650 1650 amd8111s_reset(struct LayerPointers *pLayerPointers)
1651 1651 {
1652 1652 amd8111s_sw_reset(pLayerPointers);
1653 1653 mdlHWReset(pLayerPointers);
1654 1654 }
1655 1655
1656 1656 /*
1657 1657 * attach(9E) -- Attach a device to the system
1658 1658 *
1659 1659 * Called once for each board after successfully probed.
1660 1660 * will do
1661 1661 * a. creating minor device node for the instance.
1662 1662 * b. allocate & Initilize four layers (call odlInit)
1663 1663 * c. get MAC address
1664 1664 * d. initilize pLayerPointers to gld private pointer
1665 1665 * e. register with GLD
1666 1666 * if any action fails does clean up & returns DDI_FAILURE
1667 1667 * else retursn DDI_SUCCESS
1668 1668 */
1669 1669 static int
1670 1670 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1671 1671 {
1672 1672 mac_register_t *macp;
1673 1673 struct LayerPointers *pLayerPointers;
1674 1674 struct odl *pOdl;
1675 1675 ddi_acc_handle_t *pci_handle;
1676 1676 ddi_device_acc_attr_t dev_attr;
1677 1677 caddr_t addrp = NULL;
1678 1678
1679 1679 switch (cmd) {
1680 1680 case DDI_ATTACH:
1681 1681 break;
1682 1682 default:
1683 1683 return (DDI_FAILURE);
1684 1684 }
1685 1685
1686 1686 pLayerPointers = (struct LayerPointers *)
1687 1687 kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1688 1688 amd8111sadapter = pLayerPointers;
1689 1689
1690 1690 /* Get device instance number */
1691 1691 pLayerPointers->instance = ddi_get_instance(devinfo);
1692 1692 ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1693 1693
1694 1694 pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1695 1695 pLayerPointers->pOdl = pOdl;
1696 1696
1697 1697 pOdl->devinfo = devinfo;
1698 1698
1699 1699 /*
1700 1700 * Here, we only allocate memory for struct odl and initilize it.
1701 1701 * All other memory allocation & initilization will be done in odlInit
1702 1702 * later on this routine.
1703 1703 */
1704 1704 if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1705 1705 != DDI_SUCCESS) {
1706 1706 amd8111s_log(pLayerPointers, CE_NOTE,
1707 1707 "attach: get iblock cookies failed");
1708 1708 goto attach_failure;
1709 1709 }
1710 1710
1711 1711 rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1712 1712 mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1713 1713 MUTEX_DRIVER, (void *)pOdl->iblock);
1714 1714 mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1715 1715 MUTEX_DRIVER, (void *)pOdl->iblock);
1716 1716
1717 1717 /* Setup PCI space */
1718 1718 if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1719 1719 return (DDI_FAILURE);
1720 1720 }
1721 1721 pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1722 1722 pci_handle = &pOdl->pci_handle;
1723 1723
1724 1724 pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1725 1725 pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1726 1726
1727 1727 /*
1728 1728 * Allocate and initialize all resource and map device registers.
1729 1729 * If failed, it returns a non-zero value.
1730 1730 */
1731 1731 if (amd8111s_odlInit(pLayerPointers) != 0) {
1732 1732 goto attach_failure;
1733 1733 }
1734 1734 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1735 1735
1736 1736 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1737 1737 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1738 1738 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1739 1739
1740 1740 if (ddi_regs_map_setup(devinfo, 1, &addrp, 0, 4096, &dev_attr,
1741 1741 &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1742 1742 amd8111s_log(pLayerPointers, CE_NOTE,
1743 1743 "attach: ddi_regs_map_setup failed");
1744 1744 goto attach_failure;
1745 1745 }
1746 1746 pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1747 1747
1748 1748 /* Initialize HW */
1749 1749 mdlOpen(pLayerPointers);
1750 1750 mdlGetActiveMediaInfo(pLayerPointers);
1751 1751 pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1752 1752
1753 1753 /*
1754 1754 * Setup the interrupt
1755 1755 */
1756 1756 if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1757 1757 (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1758 1758 goto attach_failure;
1759 1759 }
1760 1760 pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1761 1761
1762 1762 /*
1763 1763 * Setup soft intr
1764 1764 */
1765 1765 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1766 1766 NULL, NULL, amd8111s_send_drain,
1767 1767 (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1768 1768 goto attach_failure;
1769 1769 }
1770 1770 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1771 1771
1772 1772 /*
1773 1773 * Initilize the mac structure
1774 1774 */
1775 1775 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1776 1776 goto attach_failure;
1777 1777
1778 1778 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1779 1779 macp->m_driver = pLayerPointers;
1780 1780 macp->m_dip = devinfo;
1781 1781 /* Get MAC address */
1782 1782 mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1783 1783 macp->m_src_addr = pOdl->MacAddress;
1784 1784 macp->m_callbacks = &amd8111s_m_callbacks;
1785 1785 macp->m_min_sdu = 0;
1786 1786 /* 1518 - 14 (ether header) - 4 (CRC) */
1787 1787 macp->m_max_sdu = ETHERMTU;
1788 1788 macp->m_margin = VLAN_TAGSZ;
1789 1789
1790 1790 /*
1791 1791 * Finally, we're ready to register ourselves with the MAC layer
1792 1792 * interface; if this succeeds, we're ready to start.
1793 1793 */
1794 1794 if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1795 1795 mac_free(macp);
1796 1796 goto attach_failure;
1797 1797 }
1798 1798 mac_free(macp);
1799 1799
1800 1800 pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1801 1801
1802 1802 return (DDI_SUCCESS);
1803 1803
1804 1804 attach_failure:
1805 1805 (void) amd8111s_unattach(devinfo, pLayerPointers);
1806 1806 return (DDI_FAILURE);
1807 1807
1808 1808 }
1809 1809
1810 1810 /*
1811 1811 * detach(9E) -- Detach a device from the system
1812 1812 *
1813 1813 * It is called for each device instance when the system is preparing to
1814 1814 * unload a dynamically unloadable driver.
1815 1815 * will Do
1816 1816 * a. check if any driver buffers are held by OS.
1817 1817 * b. do clean up of all allocated memory if it is not in use by OS.
1818 1818 * c. un register with GLD
1819 1819 * d. return DDI_SUCCESS on succes full free & unregister
1820 1820 * else GLD_FAILURE
1821 1821 */
1822 1822 static int
1823 1823 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1824 1824 {
1825 1825 struct LayerPointers *pLayerPointers;
1826 1826
1827 1827 switch (cmd) {
1828 1828 case DDI_DETACH:
1829 1829 break;
1830 1830 default:
1831 1831 return (DDI_FAILURE);
1832 1832 }
1833 1833
1834 1834 /*
1835 1835 * Get the driver private (struct LayerPointers *) structure
1836 1836 */
1837 1837 if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1838 1838 (devinfo)) == NULL) {
1839 1839 return (DDI_FAILURE);
1840 1840 }
1841 1841
1842 1842 return (amd8111s_unattach(devinfo, pLayerPointers));
1843 1843 }
1844 1844
1845 1845 static int
1846 1846 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1847 1847 {
1848 1848 struct odl *pOdl = pLayerPointers->pOdl;
1849 1849
1850 1850 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1851 1851 /* Unregister driver from the GLD interface */
1852 1852 if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1853 1853 return (DDI_FAILURE);
1854 1854 }
1855 1855 }
1856 1856
1857 1857 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1858 1858 ddi_remove_intr(devinfo, 0, pOdl->iblock);
1859 1859 }
1860 1860
1861 1861 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1862 1862 ddi_remove_softintr(pOdl->drain_id);
1863 1863 }
1864 1864
1865 1865 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1866 1866 /* Stop HW */
1867 1867 mdlStopChip(pLayerPointers);
1868 1868 ddi_regs_map_free(&(pOdl->MemBasehandle));
1869 1869 }
1870 1870
1871 1871 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1872 1872 /* Free All memory allocated */
1873 1873 amd8111s_free_resource(pLayerPointers);
1874 1874 }
1875 1875
1876 1876 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1877 1877 pci_config_teardown(&pOdl->pci_handle);
1878 1878 mutex_destroy(&pOdl->mdlSendLock);
1879 1879 mutex_destroy(&pOdl->mdlRcvLock);
1880 1880 rw_destroy(&pOdl->chip_lock);
1881 1881 }
1882 1882
1883 1883 kmem_free(pOdl, sizeof (struct odl));
1884 1884 kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1885 1885
1886 1886 return (DDI_SUCCESS);
1887 1887 }
1888 1888
1889 1889 /*
1890 1890 * (GLD Entry Point)GLD will call this entry point perodicaly to
1891 1891 * get driver statistices.
1892 1892 */
1893 1893 static int
1894 1894 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1895 1895 {
1896 1896 struct LayerPointers *pLayerPointers = arg;
1897 1897 struct amd8111s_statistics *adapterStat;
1898 1898
1899 1899 adapterStat = &pLayerPointers->pOdl->statistics;
1900 1900
1901 1901 switch (stat) {
1902 1902
1903 1903 /*
1904 1904 * Current Status
1905 1905 */
1906 1906 case MAC_STAT_IFSPEED:
1907 1907 *val = pLayerPointers->pMdl->Speed * 1000000;
1908 1908 break;
1909 1909
1910 1910 case ETHER_STAT_LINK_DUPLEX:
1911 1911 if (pLayerPointers->pMdl->FullDuplex) {
1912 1912 *val = LINK_DUPLEX_FULL;
1913 1913 } else {
1914 1914 *val = LINK_DUPLEX_HALF;
1915 1915 }
1916 1916 break;
1917 1917
1918 1918 /*
1919 1919 * Capabilities
1920 1920 */
1921 1921 case ETHER_STAT_CAP_1000FDX:
1922 1922 *val = 0;
1923 1923 break;
1924 1924
1925 1925 case ETHER_STAT_CAP_1000HDX:
1926 1926 *val = 0;
1927 1927 break;
1928 1928
1929 1929 case ETHER_STAT_CAP_100FDX:
1930 1930 *val = 1;
1931 1931 break;
1932 1932
1933 1933 case ETHER_STAT_CAP_100HDX:
1934 1934 *val = 1;
1935 1935 break;
1936 1936
1937 1937 case ETHER_STAT_CAP_10FDX:
1938 1938 *val = 1;
1939 1939 break;
1940 1940
1941 1941 case ETHER_STAT_CAP_10HDX:
1942 1942 *val = 1;
1943 1943 break;
1944 1944
1945 1945 case ETHER_STAT_CAP_ASMPAUSE:
1946 1946 *val = 1;
1947 1947 break;
1948 1948
1949 1949 case ETHER_STAT_CAP_PAUSE:
1950 1950 *val = 1;
1951 1951 break;
1952 1952
1953 1953 case ETHER_STAT_CAP_AUTONEG:
1954 1954 *val = 1;
1955 1955 break;
1956 1956
1957 1957 case ETHER_STAT_ADV_CAP_1000FDX:
1958 1958 *val = 0;
1959 1959 break;
1960 1960
1961 1961 case ETHER_STAT_ADV_CAP_1000HDX:
1962 1962 *val = 0;
1963 1963 break;
1964 1964
1965 1965 case ETHER_STAT_ADV_CAP_100FDX:
1966 1966 *val = 1;
1967 1967 break;
1968 1968
1969 1969 case ETHER_STAT_ADV_CAP_100HDX:
1970 1970 *val = 1;
1971 1971 break;
1972 1972
1973 1973 case ETHER_STAT_ADV_CAP_10FDX:
1974 1974 *val = 1;
1975 1975 break;
1976 1976
1977 1977 case ETHER_STAT_ADV_CAP_10HDX:
1978 1978 *val = 1;
1979 1979 break;
1980 1980
1981 1981 case ETHER_STAT_ADV_CAP_ASMPAUSE:
1982 1982 *val = 1;
1983 1983 break;
1984 1984
1985 1985 case ETHER_STAT_ADV_CAP_PAUSE:
1986 1986 *val = 1;
1987 1987 break;
1988 1988
1989 1989 case ETHER_STAT_ADV_CAP_AUTONEG:
1990 1990 *val = 1;
1991 1991 break;
1992 1992
1993 1993 /*
1994 1994 * Rx Counters
1995 1995 */
1996 1996 case MAC_STAT_IPACKETS:
1997 1997 *val = adapterStat->rx_mib_unicst_packets +
1998 1998 adapterStat->rx_mib_multicst_packets +
1999 1999 adapterStat->rx_mib_broadcst_packets +
2000 2000 mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2001 2001 mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2002 2002 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2003 2003 break;
2004 2004
2005 2005 case MAC_STAT_RBYTES:
2006 2006 *val = adapterStat->rx_mib_bytes +
2007 2007 mdlReadMib(pLayerPointers, RcvOctets);
2008 2008 break;
2009 2009
2010 2010 case MAC_STAT_MULTIRCV:
2011 2011 *val = adapterStat->rx_mib_multicst_packets +
2012 2012 mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2013 2013 break;
2014 2014
2015 2015 case MAC_STAT_BRDCSTRCV:
2016 2016 *val = adapterStat->rx_mib_broadcst_packets +
2017 2017 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2018 2018 break;
2019 2019
2020 2020 case MAC_STAT_NORCVBUF:
2021 2021 *val = adapterStat->rx_allocfail +
2022 2022 adapterStat->rx_mib_drop_packets +
2023 2023 mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2024 2024 break;
2025 2025
2026 2026 case MAC_STAT_IERRORS:
2027 2027 *val = adapterStat->rx_mib_align_err_packets +
2028 2028 adapterStat->rx_mib_fcs_err_packets +
2029 2029 adapterStat->rx_mib_symbol_err_packets +
2030 2030 mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2031 2031 mdlReadMib(pLayerPointers, RcvFCSErrors) +
2032 2032 mdlReadMib(pLayerPointers, RcvSymbolErrors);
2033 2033 break;
2034 2034
2035 2035 case ETHER_STAT_ALIGN_ERRORS:
2036 2036 *val = adapterStat->rx_mib_align_err_packets +
2037 2037 mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2038 2038 break;
2039 2039
2040 2040 case ETHER_STAT_FCS_ERRORS:
2041 2041 *val = adapterStat->rx_mib_fcs_err_packets +
2042 2042 mdlReadMib(pLayerPointers, RcvFCSErrors);
2043 2043 break;
2044 2044
2045 2045 /*
2046 2046 * Tx Counters
2047 2047 */
2048 2048 case MAC_STAT_OPACKETS:
2049 2049 *val = adapterStat->tx_mib_packets +
2050 2050 mdlReadMib(pLayerPointers, XmtPackets);
2051 2051 break;
2052 2052
2053 2053 case MAC_STAT_OBYTES:
2054 2054 *val = adapterStat->tx_mib_bytes +
2055 2055 mdlReadMib(pLayerPointers, XmtOctets);
2056 2056 break;
2057 2057
2058 2058 case MAC_STAT_MULTIXMT:
2059 2059 *val = adapterStat->tx_mib_multicst_packets +
2060 2060 mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2061 2061 break;
2062 2062
2063 2063 case MAC_STAT_BRDCSTXMT:
2064 2064 *val = adapterStat->tx_mib_broadcst_packets +
2065 2065 mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2066 2066 break;
2067 2067
2068 2068 case MAC_STAT_NOXMTBUF:
2069 2069 *val = adapterStat->tx_no_descriptor;
2070 2070 break;
2071 2071
2072 2072 case MAC_STAT_OERRORS:
2073 2073 *val = adapterStat->tx_mib_ex_coll_packets +
2074 2074 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2075 2075 break;
2076 2076
2077 2077 case MAC_STAT_COLLISIONS:
2078 2078 *val = adapterStat->tx_mib_ex_coll_packets +
2079 2079 mdlReadMib(pLayerPointers, XmtCollisions);
2080 2080 break;
2081 2081
2082 2082 case ETHER_STAT_FIRST_COLLISIONS:
2083 2083 *val = adapterStat->tx_mib_one_coll_packets +
2084 2084 mdlReadMib(pLayerPointers, XmtOneCollision);
2085 2085 break;
2086 2086
2087 2087 case ETHER_STAT_MULTI_COLLISIONS:
2088 2088 *val = adapterStat->tx_mib_multi_coll_packets +
2089 2089 mdlReadMib(pLayerPointers, XmtMultipleCollision);
2090 2090 break;
2091 2091
2092 2092 case ETHER_STAT_EX_COLLISIONS:
2093 2093 *val = adapterStat->tx_mib_ex_coll_packets +
2094 2094 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2095 2095 break;
2096 2096
2097 2097 case ETHER_STAT_TX_LATE_COLLISIONS:
2098 2098 *val = adapterStat->tx_mib_late_coll_packets +
2099 2099 mdlReadMib(pLayerPointers, XmtLateCollision);
2100 2100 break;
2101 2101
2102 2102 case ETHER_STAT_DEFER_XMTS:
2103 2103 *val = adapterStat->tx_mib_defer_trans_packets +
2104 2104 mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2105 2105 break;
2106 2106
2107 2107 default:
2108 2108 return (ENOTSUP);
2109 2109 }
2110 2110 return (0);
2111 2111 }
2112 2112
2113 2113 /*
2114 2114 * Memory Read Function Used by MDL to set card registers.
2115 2115 */
2116 2116 unsigned char
2117 2117 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2118 2118 {
2119 2119 return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2120 2120 }
2121 2121
2122 2122 int
2123 2123 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2124 2124 {
2125 2125 return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2126 2126 (uint16_t *)(x)));
2127 2127 }
2128 2128
2129 2129 long
2130 2130 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2131 2131 {
2132 2132 return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2133 2133 (uint32_t *)(x)));
2134 2134 }
2135 2135
2136 2136 void
2137 2137 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2138 2138 {
2139 2139 ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2140 2140 }
2141 2141
2142 2142 void
2143 2143 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2144 2144 {
2145 2145 ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2146 2146 }
2147 2147
2148 2148 void
2149 2149 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2150 2150 {
2151 2151 ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2152 2152 }
2153 2153
2154 2154 void
2155 2155 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2156 2156 {
2157 2157 int i;
2158 2158 for (i = 0; i < 8; i++) {
2159 2159 WRITE_REG8(pLayerPointers, (x + i), y[i]);
2160 2160 }
2161 2161 }
↓ open down ↓ |
2008 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX