Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/chxge/ch.c
+++ new/usr/src/uts/common/io/chxge/ch.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * This file is part of the Chelsio T1 Ethernet driver.
29 29 *
30 30 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved.
31 31 */
32 32
33 33 /*
34 34 * Solaris Multithreaded STREAMS DLPI Chelsio PCI Ethernet Driver
35 35 */
36 36
37 37 /* #define CH_DEBUG 1 */
38 38 #ifdef CH_DEBUG
39 39 #define DEBUG_ENTER(a) debug_enter(a)
40 40 #define PRINT(a) printf a
41 41 #else
42 42 #define DEBUG_ENTER(a)
43 43 #define PRINT(a)
44 44 #endif
45 45
46 46 #include <sys/types.h>
47 47 #include <sys/conf.h>
48 48 #include <sys/debug.h>
49 49 #include <sys/stropts.h>
50 50 #include <sys/stream.h>
51 51 #include <sys/strlog.h>
52 52 #include <sys/kmem.h>
53 53 #include <sys/stat.h>
54 54 #include <sys/kstat.h>
55 55 #include <sys/modctl.h>
56 56 #include <sys/errno.h>
57 57 #include <sys/cmn_err.h>
58 58 #include <sys/ddi.h>
59 59 #include <sys/sunddi.h>
60 60 #include <sys/dlpi.h>
61 61 #include <sys/ethernet.h>
62 62 #include <sys/strsun.h>
63 63 #include <sys/strsubr.h>
64 64 #include <inet/common.h>
65 65 #include <inet/nd.h>
66 66 #include <inet/ip.h>
67 67 #include <inet/tcp.h>
68 68 #include <sys/pattr.h>
69 69 #include <sys/gld.h>
70 70 #include "ostypes.h"
71 71 #include "common.h"
72 72 #include "oschtoe.h"
73 73 #include "sge.h"
74 74 #include "regs.h"
75 75 #include "ch.h" /* Chelsio Driver specific parameters */
76 76 #include "version.h"
77 77
78 78 /*
79 79 * Function prototypes.
80 80 */
81 81 static int ch_attach(dev_info_t *, ddi_attach_cmd_t);
82 82 static int ch_detach(dev_info_t *, ddi_detach_cmd_t);
83 83 static int ch_quiesce(dev_info_t *);
84 84 static void ch_free_dma_handles(ch_t *chp);
85 85 static void ch_set_name(ch_t *chp, int unit);
86 86 static void ch_free_name(ch_t *chp);
87 87 static void ch_get_prop(ch_t *chp);
88 88
89 89 #if defined(__sparc)
90 90 static void ch_free_dvma_handles(ch_t *chp);
91 91 #endif
92 92
93 93 /* GLD interfaces */
94 94 static int ch_reset(gld_mac_info_t *);
95 95 static int ch_start(gld_mac_info_t *);
96 96 static int ch_stop(gld_mac_info_t *);
97 97 static int ch_set_mac_address(gld_mac_info_t *, uint8_t *);
98 98 static int ch_set_multicast(gld_mac_info_t *, uint8_t *, int);
99 99 static int ch_ioctl(gld_mac_info_t *, queue_t *, mblk_t *);
100 100 static int ch_set_promiscuous(gld_mac_info_t *, int);
101 101 static int ch_get_stats(gld_mac_info_t *, struct gld_stats *);
102 102 static int ch_send(gld_mac_info_t *, mblk_t *);
103 103 static uint_t ch_intr(gld_mac_info_t *);
104 104
105 105 /*
106 106 * Data access requirements.
107 107 */
108 108 static struct ddi_device_acc_attr le_attr = {
109 109 DDI_DEVICE_ATTR_V0,
110 110 DDI_STRUCTURE_LE_ACC,
111 111 DDI_STRICTORDER_ACC
112 112 };
113 113
114 114 /*
115 115 * No swap mapping device attributes
116 116 */
117 117 static struct ddi_device_acc_attr null_attr = {
118 118 DDI_DEVICE_ATTR_V0,
119 119 DDI_NEVERSWAP_ACC,
120 120 DDI_STRICTORDER_ACC
121 121 };
122 122
123 123 /*
124 124 * STREAMS driver identification struture module_info(9s)
125 125 *
126 126 * driver limit values
127 127 */
128 128
129 129 static struct module_info ch_minfo = {
130 130 CHIDNUM, /* mi_idnum */
131 131 CHNAME, /* mi_idname */
132 132 CHMINPSZ, /* mi_minpsz */
133 133 CHMAXPSZ, /* mi_maxpsz */
134 134 CHHIWAT, /* mi_hiwat */
135 135 CHLOWAT /* mi_lowat */
136 136 };
137 137
138 138 /*
139 139 * STREAMS queue processiong procedures qinit(9s)
140 140 *
141 141 * read queue procedures
142 142 */
143 143
144 144 static struct qinit ch_rinit = {
145 145 (int (*)()) NULL, /* qi_putp */
146 146 gld_rsrv, /* qi_srvp */
147 147 gld_open, /* qi_qopen */
148 148 gld_close, /* qi_qclose */
149 149 (int (*)()) NULL, /* qi_qadmin */
150 150 &ch_minfo, /* qi_minfo */
151 151 NULL /* qi_mstat */
152 152 };
153 153
154 154 /*
155 155 * STREAMS queue processiong procedures qinit(9s)
156 156 *
157 157 * write queue procedures
158 158 */
159 159
160 160 static struct qinit ch_winit = {
161 161 gld_wput, /* qi_putp */
162 162 gld_wsrv, /* qi_srvp */
163 163 (int (*)()) NULL, /* qi_qopen */
164 164 (int (*)()) NULL, /* qi_qclose */
165 165 (int (*)()) NULL, /* qi_qadmin */
166 166 &ch_minfo, /* qi_minfo */
167 167 NULL /* qi_mstat */
168 168 };
169 169
170 170 /*
171 171 * STREAMS entity declaration structure - streamtab(9s)
172 172 */
173 173 static struct streamtab chinfo = {
174 174 &ch_rinit, /* read queue information */
175 175 &ch_winit, /* write queue information */
176 176 NULL, /* st_muxrinit */
177 177 NULL /* st_muxwrinit */
178 178 };
179 179
180 180 /*
181 181 * Device driver ops vector - cb_ops(9s)
182 182 *
183 183 * charater/block entry points structure.
184 184 * chinfo identifies driver as a STREAMS driver.
185 185 */
186 186
187 187 static struct cb_ops cb_ch_ops = {
188 188 nulldev, /* cb_open */
189 189 nulldev, /* cb_close */
190 190 nodev, /* cb_strategy */
191 191 nodev, /* cb_print */
192 192 nodev, /* cb_dump */
193 193 nodev, /* cb_read */
194 194 nodev, /* cb_write */
195 195 nodev, /* cb_ioctl */
196 196 nodev, /* cb_devmap */
197 197 nodev, /* cb_mmap */
198 198 nodev, /* cb_segmap */
199 199 nochpoll, /* cb_chpoll */
200 200 ddi_prop_op, /* report driver property information - prop_op(9e) */
201 201 &chinfo, /* cb_stream */
202 202 #if defined(__sparc)
203 203 D_MP | D_64BIT,
204 204 #else
205 205 D_MP, /* cb_flag (supports multi-threading) */
206 206 #endif
207 207 CB_REV, /* cb_rev */
208 208 nodev, /* cb_aread */
209 209 nodev /* cb_awrite */
210 210 };
211 211
212 212 /*
213 213 * dev_ops(9S) structure
214 214 *
215 215 * Device Operations table, for autoconfiguration
216 216 */
217 217
218 218 static struct dev_ops ch_ops = {
219 219 DEVO_REV, /* Driver build version */
220 220 0, /* Initial driver reference count */
221 221 gld_getinfo, /* funcp: get driver information - getinfo(9e) */
222 222 nulldev, /* funcp: entry point obsolute - identify(9e) */
223 223 nulldev, /* funp: probe for device - probe(9e) */
224 224 ch_attach, /* funp: attach driver to dev_info - attach(9e) */
225 225 ch_detach, /* funp: detach driver to unload - detach(9e) */
226 226 nodev, /* funp: reset device (not supported) - dev_ops(9s) */
227 227 &cb_ch_ops, /* ptr to cb_ops structure */
228 228 NULL, /* ptr to nexus bus operations structure (leaf) */
229 229 NULL, /* funp: change device power level - power(9e) */
230 230 ch_quiesce, /* devo_quiesce */
231 231 };
232 232
233 233 /*
234 234 * modldrv(9s) structure
235 235 *
236 236 * Definition for module specific device driver linkage structures (modctl.h)
237 237 */
238 238
239 239 static struct modldrv modldrv = {
240 240 &mod_driverops, /* driver module */
241 241 VERSION,
242 242 &ch_ops, /* driver ops */
↓ open down ↓ |
242 lines elided |
↑ open up ↑ |
243 243 };
244 244
245 245 /*
246 246 * modlinkage(9s) structure
247 247 *
248 248 * module linkage base structure (modctl.h)
249 249 */
250 250
251 251 static struct modlinkage modlinkage = {
252 252 MODREV_1, /* revision # of system */
253 - &modldrv, /* NULL terminated list of linkage strucures */
254 - NULL
253 + { &modldrv, NULL } /* NULL terminated list of linkage strucures */
255 254 };
256 255
257 256 /* ===================== start of STREAMS driver code ================== */
258 257
259 258 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
260 259 /*
261 260 * global pointer to toe per-driver control structure.
262 261 */
263 262 #define MAX_CARDS 4
264 263 ch_t *gchp[MAX_CARDS];
265 264 #endif
266 265
267 266 kmutex_t in_use_l;
268 267 uint32_t buffers_in_use[SZ_INUSE];
269 268 uint32_t in_use_index;
270 269
271 270 /*
272 271 * Ethernet broadcast address definition.
273 272 */
274 273 static struct ether_addr etherbroadcastaddr = {
275 - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
274 + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
276 275 };
277 276
278 277 /*
279 278 * Module initialization functions.
280 279 *
281 280 * Routine Called by
282 281 * _init(9E) modload(9F)
283 282 * _info(9E) modinfo(9F)
284 283 * _fini(9E) modunload(9F)
285 284 */
286 285
287 286 /*
288 287 * _init(9E):
289 288 *
290 289 * Initial, one-time, resource allocation and data initialization.
291 290 */
292 291
293 292 int
294 293 _init(void)
295 294 {
296 295 int status;
297 296
298 297 status = mod_install(&modlinkage);
299 298
300 299 mutex_init(&in_use_l, NULL, MUTEX_DRIVER, NULL);
301 300
302 301 return (status);
303 302 }
304 303
305 304 /*
306 305 * _fini(9E): It is here that any device information that was allocated
307 306 * during the _init(9E) routine should be released and the module removed
308 307 * from the system. In the case of per-instance information, that information
309 308 * should be released in the _detach(9E) routine.
310 309 */
311 310
312 311 int
313 312 _fini(void)
314 313 {
315 314 int status;
316 315 int i;
317 316 uint32_t t = 0;
318 317
319 318 for (i = 0; i < SZ_INUSE; i++)
320 319 t += buffers_in_use[i];
321 320
322 321 if (t != NULL)
323 322 return (DDI_FAILURE);
324 323
325 324 status = mod_remove(&modlinkage);
326 325
327 326 if (status == DDI_SUCCESS)
328 327 mutex_destroy(&in_use_l);
329 328
330 329 return (status);
331 330 }
332 331
333 332 int
334 333 _info(struct modinfo *modinfop)
335 334 {
336 335 int status;
337 336
338 337
339 338 status = mod_info(&modlinkage, modinfop);
340 339
341 340 return (status);
342 341 }
343 342
344 343 /*
345 344 * Attach(9E) - This is called on the open to the device. It creates
346 345 * an instance of the driver. In this routine we create the minor
347 346 * device node. The routine also initializes all per-unit
348 347 * mutex's and conditional variables.
349 348 *
350 349 * If we were resuming a suspended instance of a device due to power
351 350 * management, then that would be handled here as well. For more on
352 351 * that subject see the man page for pm(9E)
353 352 *
354 353 * Interface exists: make available by filling in network interface
355 354 * record. System will initialize the interface when it is ready
356 355 * to accept packets.
357 356 */
358 357 int chdebug = 0;
359 358 int ch_abort_debug = 0;
360 359
361 360 static int
362 361 ch_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
363 362 {
364 363 ch_t *chp;
365 364 int rv;
366 365 int unit;
367 366 #ifdef CH_DEBUG
368 367 int Version;
369 368 int VendorID;
370 369 int DeviceID;
371 370 int SubDeviceID;
372 371 int Command;
373 372 #endif
374 373 gld_mac_info_t *macinfo; /* GLD stuff follows */
375 374 char *driver;
376 375
377 376 if (ch_abort_debug)
378 377 debug_enter("ch_attach");
379 378
380 379 if (chdebug)
381 380 return (DDI_FAILURE);
382 381
383 382
384 383 if (cmd == DDI_ATTACH) {
385 384
386 385 unit = ddi_get_instance(dip);
387 386
388 387 driver = (char *)ddi_driver_name(dip);
389 388
390 389 PRINT(("driver %s unit: %d\n", driver, unit));
391 390
392 391 macinfo = gld_mac_alloc(dip);
393 392 if (macinfo == NULL) {
394 393 PRINT(("macinfo allocation failed\n"));
395 394 DEBUG_ENTER("ch_attach");
396 395 return (DDI_FAILURE);
397 396 }
398 397
399 398 chp = (ch_t *)kmem_zalloc(sizeof (ch_t), KM_SLEEP);
400 399
401 400 if (chp == NULL) {
402 401 PRINT(("zalloc of chp failed\n"));
403 402 DEBUG_ENTER("ch_attach");
404 403
405 404 gld_mac_free(macinfo);
406 405
407 406 return (DDI_FAILURE);
408 407 }
409 408
410 409 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
411 410 /* Solaris TOE support */
412 411 gchp[unit] = chp;
413 412 #endif
414 413
415 414 PRINT(("attach macinfo: %p chp: %p\n", macinfo, chp));
416 415
417 416 chp->ch_dip = dip;
418 417 chp->ch_macp = macinfo;
419 418 chp->ch_unit = unit;
420 419 ch_set_name(chp, unit);
421 420
422 421 /*
423 422 * map in PCI register spaces
424 423 *
425 424 * PCI register set 0 - PCI configuration space
426 425 * PCI register set 1 - T101 card register space #1
427 426 */
428 427
429 428 /* map in T101 PCI configuration space */
430 429 rv = pci_config_setup(
431 430 dip, /* ptr to dev's dev_info struct */
432 431 &chp->ch_hpci); /* ptr to data access handle */
433 432
434 433 if (rv != DDI_SUCCESS) {
435 434 PRINT(("PCI config setup failed\n"));
436 435 DEBUG_ENTER("ch_attach");
437 436 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
438 437 gchp[unit] = NULL;
439 438 #endif
440 439 cmn_err(CE_WARN, "%s: ddi_config_setup PCI error %d\n",
441 440 chp->ch_name, rv);
442 441
443 442 ch_free_name(chp);
444 443 kmem_free(chp, sizeof (ch_t));
445 444 gld_mac_free(macinfo);
446 445
447 446 return (DDI_FAILURE);
448 447 }
449 448
450 449 ch_get_prop(chp);
451 450
452 451 macinfo->gldm_devinfo = dip;
453 452 macinfo->gldm_private = (caddr_t)chp;
454 453 macinfo->gldm_reset = ch_reset;
455 454 macinfo->gldm_start = ch_start;
456 455 macinfo->gldm_stop = ch_stop;
457 456 macinfo->gldm_set_mac_addr = ch_set_mac_address;
458 457 macinfo->gldm_send = ch_send;
459 458 macinfo->gldm_set_promiscuous = ch_set_promiscuous;
460 459 macinfo->gldm_get_stats = ch_get_stats;
461 460 macinfo->gldm_ioctl = ch_ioctl;
462 461 macinfo->gldm_set_multicast = ch_set_multicast;
463 462 macinfo->gldm_intr = ch_intr;
464 463 macinfo->gldm_mctl = NULL;
465 464
466 465 macinfo->gldm_ident = driver;
467 466 macinfo->gldm_type = DL_ETHER;
468 467 macinfo->gldm_minpkt = 0;
469 468 macinfo->gldm_maxpkt = chp->ch_mtu;
470 469 macinfo->gldm_addrlen = ETHERADDRL;
471 470 macinfo->gldm_saplen = -2;
472 471 macinfo->gldm_ppa = unit;
473 472 macinfo->gldm_broadcast_addr =
474 473 etherbroadcastaddr.ether_addr_octet;
475 474
476 475
477 476 /*
478 477 * do a power reset of card
479 478 *
480 479 * 1. set PwrState to D3hot (3)
481 480 * 2. clear PwrState flags
482 481 */
483 482 pci_config_put32(chp->ch_hpci, 0x44, 3);
484 483 pci_config_put32(chp->ch_hpci, 0x44, 0);
485 484
486 485 /* delay .5 sec */
487 486 DELAY(500000);
488 487
489 488 #ifdef CH_DEBUG
490 489 VendorID = pci_config_get16(chp->ch_hpci, 0);
491 490 DeviceID = pci_config_get16(chp->ch_hpci, 2);
492 491 SubDeviceID = pci_config_get16(chp->ch_hpci, 0x2e);
493 492 Command = pci_config_get16(chp->ch_hpci, 4);
494 493
495 494 PRINT(("IDs: %x,%x,%x\n", VendorID, DeviceID, SubDeviceID));
496 495 PRINT(("Command: %x\n", Command));
497 496 #endif
498 497 /* map in T101 register space (BAR0) */
499 498 rv = ddi_regs_map_setup(
500 499 dip, /* ptr to dev's dev_info struct */
501 500 BAR0, /* register address space */
502 501 &chp->ch_bar0, /* address of offset */
503 502 0, /* offset into register address space */
504 503 0, /* length mapped (everything) */
505 504 &le_attr, /* ptr to device attr structure */
506 505 &chp->ch_hbar0); /* ptr to data access handle */
507 506
508 507 if (rv != DDI_SUCCESS) {
509 508 PRINT(("map registers failed\n"));
510 509 DEBUG_ENTER("ch_attach");
511 510 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
512 511 gchp[unit] = NULL;
513 512 #endif
514 513 cmn_err(CE_WARN,
515 514 "%s: ddi_regs_map_setup BAR0 error %d\n",
516 515 chp->ch_name, rv);
517 516
518 517 pci_config_teardown(&chp->ch_hpci);
519 518 ch_free_name(chp);
520 519 kmem_free(chp, sizeof (ch_t));
521 520 gld_mac_free(macinfo);
522 521
523 522 return (DDI_FAILURE);
524 523 }
525 524
526 525 #ifdef CH_DEBUG
527 526 Version = ddi_get32(chp->ch_hbar0,
528 527 (uint32_t *)(chp->ch_bar0+0x6c));
529 528 #endif
530 529
531 530 (void) ddi_dev_regsize(dip, 1, &chp->ch_bar0sz);
532 531
533 532 PRINT(("PCI BAR0 space addr: %p\n", chp->ch_bar0));
534 533 PRINT(("PCI BAR0 space size: %x\n", chp->ch_bar0sz));
535 534 PRINT(("PE Version: %x\n", Version));
536 535
537 536 /*
538 537 * Add interrupt to system.
539 538 */
540 539 rv = ddi_get_iblock_cookie(
541 540 dip, /* ptr to dev's dev_info struct */
542 541 0, /* interrupt # (0) */
543 542 &chp->ch_icookp); /* ptr to interrupt block cookie */
544 543
545 544 if (rv != DDI_SUCCESS) {
546 545 PRINT(("iblock cookie failed\n"));
547 546 DEBUG_ENTER("ch_attach");
548 547 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
549 548 gchp[unit] = NULL;
550 549 #endif
551 550 cmn_err(CE_WARN,
552 551 "%s: ddi_get_iblock_cookie error %d\n",
553 552 chp->ch_name, rv);
554 553
555 554 ddi_regs_map_free(&chp->ch_hbar0);
556 555 pci_config_teardown(&chp->ch_hpci);
557 556 ch_free_name(chp);
558 557 kmem_free(chp, sizeof (ch_t));
559 558 gld_mac_free(macinfo);
560 559
561 560 return (DDI_FAILURE);
562 561 }
563 562
564 563 /*
565 564 * add interrupt handler before card setup.
566 565 */
567 566 rv = ddi_add_intr(
568 567 dip, /* ptr to dev's dev_info struct */
569 568 0, /* interrupt # (0) */
570 569 0, /* iblock cookie ptr (NULL) */
571 570 0, /* idevice cookie ptr (NULL) */
572 571 gld_intr, /* function ptr to interrupt handler */
573 572 (caddr_t)macinfo); /* handler argument */
574 573
575 574 if (rv != DDI_SUCCESS) {
576 575 PRINT(("add_intr failed\n"));
577 576 DEBUG_ENTER("ch_attach");
578 577 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
579 578 gchp[unit] = NULL;
580 579 #endif
581 580 cmn_err(CE_WARN, "%s: ddi_add_intr error %d\n",
582 581 chp->ch_name, rv);
583 582
584 583 ddi_regs_map_free(&chp->ch_hbar0);
585 584 pci_config_teardown(&chp->ch_hpci);
586 585 ch_free_name(chp);
587 586 kmem_free(chp, sizeof (ch_t));
588 587 gld_mac_free(macinfo);
589 588
590 589 return (DDI_FAILURE);
591 590 }
592 591
593 592 /* initalize all the remaining per-card locks */
594 593 mutex_init(&chp->ch_lock, NULL, MUTEX_DRIVER,
595 594 (void *)chp->ch_icookp);
596 595 mutex_init(&chp->ch_intr, NULL, MUTEX_DRIVER,
597 596 (void *)chp->ch_icookp);
598 597 mutex_init(&chp->ch_mc_lck, NULL, MUTEX_DRIVER, NULL);
599 598 mutex_init(&chp->ch_dh_lck, NULL, MUTEX_DRIVER, NULL);
600 599 mutex_init(&chp->mac_lock, NULL, MUTEX_DRIVER, NULL);
601 600
602 601 /* ------- initialize Chelsio card ------- */
603 602
604 603 if (pe_attach(chp)) {
605 604 PRINT(("card initialization failed\n"));
606 605 DEBUG_ENTER("ch_attach");
607 606 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
608 607 gchp[unit] = NULL;
609 608 #endif
610 609 cmn_err(CE_WARN, "%s: pe_attach failed\n",
611 610 chp->ch_name);
612 611
613 612 mutex_destroy(&chp->ch_lock);
614 613 mutex_destroy(&chp->ch_intr);
615 614 mutex_destroy(&chp->ch_mc_lck);
616 615 mutex_destroy(&chp->ch_dh_lck);
617 616 mutex_destroy(&chp->mac_lock);
618 617 ddi_remove_intr(dip, 0, chp->ch_icookp);
619 618 ddi_regs_map_free(&chp->ch_hbar0);
620 619 pci_config_teardown(&chp->ch_hpci);
621 620 ch_free_name(chp);
622 621 kmem_free(chp, sizeof (ch_t));
623 622 gld_mac_free(macinfo);
624 623
625 624 return (DDI_FAILURE);
626 625 }
627 626
628 627 /* ------- done with Chelsio card ------- */
629 628
630 629 /* now can set mac address */
631 630 macinfo->gldm_vendor_addr = pe_get_mac(chp);
632 631
633 632 macinfo->gldm_cookie = chp->ch_icookp;
634 633
635 634 /*
636 635 * We only active checksum offload for T2 architectures.
637 636 */
638 637 if (is_T2(chp)) {
639 638 if (chp->ch_config.cksum_enabled)
640 639 macinfo->gldm_capabilities |=
641 640 GLD_CAP_CKSUM_FULL_V4;
642 641 } else
643 642 chp->ch_config.cksum_enabled = 0;
644 643
645 644 rv = gld_register(
646 645 dip, /* ptr to dev's dev_info struct */
647 646 (char *)ddi_driver_name(dip), /* driver name */
648 647 macinfo); /* ptr to gld macinfo buffer */
649 648
650 649 /*
651 650 * The Jumbo frames capability is not yet available
652 651 * in Solaris 10 so registration will fail. MTU > 1500 is
653 652 * supported in Update 1.
654 653 */
655 654 if (rv != DDI_SUCCESS) {
656 655 cmn_err(CE_NOTE, "MTU > 1500 not supported by GLD.\n");
657 656 cmn_err(CE_NOTE, "Setting MTU to 1500. \n");
658 657 macinfo->gldm_maxpkt = chp->ch_mtu = 1500;
659 658 rv = gld_register(
660 659 dip, /* ptr to dev's dev_info struct */
661 660 (char *)ddi_driver_name(dip), /* driver name */
662 661 macinfo); /* ptr to gld macinfo buffer */
663 662 }
664 663
665 664
666 665 if (rv != DDI_SUCCESS) {
667 666 PRINT(("gld_register failed\n"));
668 667 DEBUG_ENTER("ch_attach");
669 668
670 669 cmn_err(CE_WARN, "%s: gld_register error %d\n",
671 670 chp->ch_name, rv);
672 671
673 672 pe_detach(chp);
674 673
675 674 mutex_destroy(&chp->ch_lock);
676 675 mutex_destroy(&chp->ch_intr);
677 676 mutex_destroy(&chp->ch_mc_lck);
678 677 mutex_destroy(&chp->ch_dh_lck);
679 678 mutex_destroy(&chp->mac_lock);
680 679 ddi_remove_intr(dip, 0, chp->ch_icookp);
681 680 ddi_regs_map_free(&chp->ch_hbar0);
682 681 pci_config_teardown(&chp->ch_hpci);
683 682 ch_free_name(chp);
684 683 kmem_free(chp, sizeof (ch_t));
685 684 gld_mac_free(macinfo);
686 685
687 686 return (DDI_FAILURE);
688 687 }
689 688
690 689 /*
691 690 * print a banner at boot time (verbose mode), announcing
692 691 * the device pointed to by dip
693 692 */
694 693 ddi_report_dev(dip);
695 694
696 695 if (ch_abort_debug)
697 696 debug_enter("ch_attach");
698 697
699 698 return (DDI_SUCCESS);
700 699
701 700 } else if (cmd == DDI_RESUME) {
702 701 PRINT(("attach resume\n"));
703 702 DEBUG_ENTER("ch_attach");
704 703 if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL)
705 704 return (DDI_FAILURE);
706 705
707 706 mutex_enter(&chp->ch_lock);
708 707 chp->ch_flags &= ~PESUSPENDED;
709 708 mutex_exit(&chp->ch_lock);
710 709 return (DDI_SUCCESS);
711 710 } else {
712 711 PRINT(("attach: bad command\n"));
713 712 DEBUG_ENTER("ch_attach");
714 713
715 714 return (DDI_FAILURE);
716 715 }
717 716 }
718 717
719 718 /*
720 719 * quiesce(9E) entry point.
721 720 *
722 721 * This function is called when the system is single-threaded at high
723 722 * PIL with preemption disabled. Therefore, this function must not be
724 723 * blocked.
725 724 *
726 725 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
727 726 * DDI_FAILURE indicates an error condition and should almost never happen.
728 727 */
729 728 static int
730 729 ch_quiesce(dev_info_t *dip)
731 730 {
732 731 ch_t *chp;
733 732 gld_mac_info_t *macinfo =
734 733 (gld_mac_info_t *)ddi_get_driver_private(dip);
735 734
736 735 chp = (ch_t *)macinfo->gldm_private;
737 736 chdebug = 0;
738 737 ch_abort_debug = 0;
739 738
740 739 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
741 740 gchp[chp->ch_unit] = NULL;
742 741 #endif
743 742
744 743 /* Set driver state for this card to IDLE */
745 744 chp->ch_state = PEIDLE;
746 745
747 746 /*
748 747 * Do a power reset of card
749 748 * 1. set PwrState to D3hot (3)
750 749 * 2. clear PwrState flags
751 750 */
752 751 pci_config_put32(chp->ch_hpci, 0x44, 3);
753 752 pci_config_put32(chp->ch_hpci, 0x44, 0);
754 753
755 754 /* Wait 0.5 sec */
756 755 drv_usecwait(500000);
757 756
758 757 /*
759 758 * Now stop the chip
760 759 */
761 760 chp->ch_refcnt = 0;
762 761 chp->ch_state = PESTOP;
763 762
764 763 /* Disables all interrupts */
765 764 t1_interrupts_disable(chp);
766 765
767 766 /* Disables SGE queues */
768 767 t1_write_reg_4(chp->sge->obj, A_SG_CONTROL, 0x0);
769 768 t1_write_reg_4(chp->sge->obj, A_SG_INT_CAUSE, 0x0);
770 769
771 770 return (DDI_SUCCESS);
772 771 }
773 772
774 773 static int
775 774 ch_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
776 775 {
777 776 gld_mac_info_t *macinfo;
778 777 ch_t *chp;
779 778
780 779 if (cmd == DDI_DETACH) {
781 780 macinfo = (gld_mac_info_t *)ddi_get_driver_private(dip);
782 781 chp = (ch_t *)macinfo->gldm_private;
783 782
784 783 /*
785 784 * fail detach if there are outstanding mblks still
786 785 * in use somewhere.
787 786 */
788 787 DEBUG_ENTER("ch_detach");
789 788 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
790 789 mutex_enter(&chp->ch_lock);
791 790 if (chp->ch_refcnt > 0) {
792 791 mutex_exit(&chp->ch_lock);
793 792 return (DDI_FAILURE);
794 793 }
795 794 mutex_exit(&chp->ch_lock);
796 795 gchp[chp->ch_unit] = NULL;
797 796 #endif
798 797 /*
799 798 * set driver state for this card to IDLE. We're
800 799 * shutting down.
801 800 */
802 801 mutex_enter(&chp->ch_lock);
803 802 chp->ch_state = PEIDLE;
804 803 mutex_exit(&chp->ch_lock);
805 804
806 805 /*
807 806 * do a power reset of card
808 807 *
809 808 * 1. set PwrState to D3hot (3)
810 809 * 2. clear PwrState flags
811 810 */
812 811 pci_config_put32(chp->ch_hpci, 0x44, 3);
813 812 pci_config_put32(chp->ch_hpci, 0x44, 0);
814 813
815 814 /* delay .5 sec */
816 815 DELAY(500000);
817 816
818 817 /* free register resources */
819 818 (void) gld_unregister(macinfo);
820 819
821 820 /* make sure no interrupts while shutting down card */
822 821 ddi_remove_intr(dip, 0, chp->ch_icookp);
823 822
824 823 /*
825 824 * reset device and recover resources
826 825 */
827 826 pe_detach(chp);
828 827
829 828 ddi_regs_map_free(&chp->ch_hbar0);
830 829 pci_config_teardown(&chp->ch_hpci);
831 830 mutex_destroy(&chp->ch_lock);
832 831 mutex_destroy(&chp->ch_intr);
833 832 mutex_destroy(&chp->ch_mc_lck);
834 833 mutex_destroy(&chp->ch_dh_lck);
835 834 mutex_destroy(&chp->mac_lock);
836 835 ch_free_dma_handles(chp);
837 836 #if defined(__sparc)
838 837 ch_free_dvma_handles(chp);
839 838 #endif
840 839 ch_free_name(chp);
841 840 kmem_free(chp, sizeof (ch_t));
842 841 gld_mac_free(macinfo);
843 842
844 843 DEBUG_ENTER("ch_detach end");
845 844
846 845 return (DDI_SUCCESS);
847 846
848 847 } else if ((cmd == DDI_SUSPEND) || (cmd == DDI_PM_SUSPEND)) {
849 848 DEBUG_ENTER("suspend");
850 849 if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL)
851 850 return (DDI_FAILURE);
852 851 mutex_enter(&chp->ch_lock);
853 852 chp->ch_flags |= PESUSPENDED;
854 853 mutex_exit(&chp->ch_lock);
855 854 #ifdef TODO
856 855 /* Un-initialize (STOP) T101 */
857 856 #endif
858 857 return (DDI_SUCCESS);
859 858 } else
860 859 return (DDI_FAILURE);
861 860 }
862 861
863 862 /*
864 863 * ch_alloc_dma_mem
865 864 *
866 865 * allocates DMA handle
867 866 * allocates kernel memory
868 867 * allocates DMA access handle
869 868 *
870 869 * chp - per-board descriptor
871 870 * type - byteswap mapping?
872 871 * flags - type of mapping
873 872 * size - # bytes mapped
874 873 * paddr - physical address
875 874 * dh - ddi dma handle
876 875 * ah - ddi access handle
877 876 */
878 877
879 878 void *
880 879 ch_alloc_dma_mem(ch_t *chp, int type, int flags, int size, uint64_t *paddr,
881 880 ulong_t *dh, ulong_t *ah)
882 881 {
883 882 ddi_dma_attr_t ch_dma_attr;
884 883 ddi_dma_cookie_t cookie;
885 884 ddi_dma_handle_t ch_dh;
886 885 ddi_acc_handle_t ch_ah;
887 886 ddi_device_acc_attr_t *dev_attrp;
888 887 caddr_t ch_vaddr;
889 888 size_t rlen;
890 889 uint_t count;
891 890 uint_t mapping;
892 891 uint_t align;
893 892 uint_t rv;
894 893 uint_t direction;
895 894
896 895 mapping = (flags&DMA_STREAM)?DDI_DMA_STREAMING:DDI_DMA_CONSISTENT;
897 896 if (flags & DMA_4KALN)
898 897 align = 0x4000;
899 898 else if (flags & DMA_SMALN)
900 899 align = chp->ch_sm_buf_aln;
901 900 else if (flags & DMA_BGALN)
902 901 align = chp->ch_bg_buf_aln;
903 902 else {
904 903 cmn_err(CE_WARN, "ch_alloc_dma_mem(%s): bad alignment flag\n",
905 904 chp->ch_name);
906 905 return (0);
907 906 }
908 907 direction = (flags&DMA_OUT)?DDI_DMA_WRITE:DDI_DMA_READ;
909 908
910 909 /*
911 910 * dynamically create a dma attribute structure
912 911 */
913 912 ch_dma_attr.dma_attr_version = DMA_ATTR_V0;
914 913 ch_dma_attr.dma_attr_addr_lo = 0;
915 914 ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff;
916 915 ch_dma_attr.dma_attr_count_max = 0x00ffffff;
917 916 ch_dma_attr.dma_attr_align = align;
918 917 ch_dma_attr.dma_attr_burstsizes = 0xfff;
919 918 ch_dma_attr.dma_attr_minxfer = 1;
920 919 ch_dma_attr.dma_attr_maxxfer = 0x00ffffff;
921 920 ch_dma_attr.dma_attr_seg = 0xffffffff;
922 921 ch_dma_attr.dma_attr_sgllen = 1;
923 922 ch_dma_attr.dma_attr_granular = 1;
924 923 ch_dma_attr.dma_attr_flags = 0;
925 924
926 925 rv = ddi_dma_alloc_handle(
927 926 chp->ch_dip, /* device dev_info structure */
928 927 &ch_dma_attr, /* DMA attributes */
929 928 DDI_DMA_SLEEP, /* Wait if no memory */
930 929 NULL, /* no argument to callback */
931 930 &ch_dh); /* DMA handle */
932 931 if (rv != DDI_SUCCESS) {
933 932
934 933 cmn_err(CE_WARN,
935 934 "%s: ch_alloc_dma_mem: ddi_dma_alloc_handle error %d\n",
936 935 chp->ch_name, rv);
937 936
938 937 return (0);
939 938 }
940 939
941 940 /* set byte order for data xfer */
942 941 if (type)
943 942 dev_attrp = &null_attr;
944 943 else
945 944 dev_attrp = &le_attr;
946 945
947 946 rv = ddi_dma_mem_alloc(
948 947 ch_dh, /* dma handle */
949 948 size, /* size desired allocate */
950 949 dev_attrp, /* access attributes */
951 950 mapping,
952 951 DDI_DMA_SLEEP, /* wait for resources */
953 952 NULL, /* no argument */
954 953 &ch_vaddr, /* allocated memory */
955 954 &rlen, /* real size allocated */
956 955 &ch_ah); /* data access handle */
957 956 if (rv != DDI_SUCCESS) {
958 957 ddi_dma_free_handle(&ch_dh);
959 958
960 959 cmn_err(CE_WARN,
961 960 "%s: ch_alloc_dma_mem: ddi_dma_mem_alloc error %d\n",
962 961 chp->ch_name, rv);
963 962
964 963 return (0);
965 964 }
966 965
967 966 rv = ddi_dma_addr_bind_handle(
968 967 ch_dh, /* dma handle */
969 968 (struct as *)0, /* kernel address space */
970 969 ch_vaddr, /* virtual address */
971 970 rlen, /* length of object */
972 971 direction|mapping,
973 972 DDI_DMA_SLEEP, /* Wait for resources */
974 973 NULL, /* no argument */
975 974 &cookie, /* dma cookie */
976 975 &count);
977 976 if (rv != DDI_DMA_MAPPED) {
978 977 ddi_dma_mem_free(&ch_ah);
979 978 ddi_dma_free_handle(&ch_dh);
980 979
981 980 cmn_err(CE_WARN,
982 981 "%s: ch_alloc_dma_mem: ddi_dma_addr_bind_handle error %d\n",
983 982 chp->ch_name, rv);
984 983
985 984 return (0);
986 985 }
987 986
988 987 if (count != 1) {
989 988 cmn_err(CE_WARN,
990 989 "%s: ch_alloc_dma_mem: ch_alloc_dma_mem cookie count %d\n",
991 990 chp->ch_name, count);
992 991 PRINT(("ch_alloc_dma_mem cookie count %d\n", count));
993 992
994 993 ddi_dma_mem_free(&ch_ah);
995 994 ddi_dma_free_handle(&ch_dh);
996 995
997 996 return (0);
998 997 }
999 998
1000 999 *paddr = cookie.dmac_laddress;
1001 1000
1002 1001 *(ddi_dma_handle_t *)dh = ch_dh;
1003 1002 *(ddi_acc_handle_t *)ah = ch_ah;
1004 1003
1005 1004 return ((void *)ch_vaddr);
1006 1005 }
1007 1006
1008 1007 /*
1009 1008 * ch_free_dma_mem
1010 1009 *
1011 1010 * frees resources allocated by ch_alloc_dma_mem()
1012 1011 *
1013 1012 * frees DMA handle
1014 1013 * frees kernel memory
1015 1014 * frees DMA access handle
1016 1015 */
1017 1016
1018 1017 void
1019 1018 ch_free_dma_mem(ulong_t dh, ulong_t ah)
1020 1019 {
1021 1020 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dh;
1022 1021 ddi_acc_handle_t ch_ah = (ddi_acc_handle_t)ah;
1023 1022
1024 1023 (void) ddi_dma_unbind_handle(ch_dh);
1025 1024 ddi_dma_mem_free(&ch_ah);
1026 1025 ddi_dma_free_handle(&ch_dh);
1027 1026 }
1028 1027
1029 1028 /*
1030 1029 * create a dma handle and return a dma handle entry.
1031 1030 */
1032 1031 free_dh_t *
1033 1032 ch_get_dma_handle(ch_t *chp)
1034 1033 {
1035 1034 ddi_dma_handle_t ch_dh;
1036 1035 ddi_dma_attr_t ch_dma_attr;
1037 1036 free_dh_t *dhe;
1038 1037 int rv;
1039 1038
1040 1039 dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP);
1041 1040
1042 1041 ch_dma_attr.dma_attr_version = DMA_ATTR_V0;
1043 1042 ch_dma_attr.dma_attr_addr_lo = 0;
1044 1043 ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff;
1045 1044 ch_dma_attr.dma_attr_count_max = 0x00ffffff;
1046 1045 ch_dma_attr.dma_attr_align = 1;
1047 1046 ch_dma_attr.dma_attr_burstsizes = 0xfff;
1048 1047 ch_dma_attr.dma_attr_minxfer = 1;
1049 1048 ch_dma_attr.dma_attr_maxxfer = 0x00ffffff;
1050 1049 ch_dma_attr.dma_attr_seg = 0xffffffff;
1051 1050 ch_dma_attr.dma_attr_sgllen = 5;
1052 1051 ch_dma_attr.dma_attr_granular = 1;
1053 1052 ch_dma_attr.dma_attr_flags = 0;
1054 1053
1055 1054 rv = ddi_dma_alloc_handle(
1056 1055 chp->ch_dip, /* device dev_info */
1057 1056 &ch_dma_attr, /* DMA attributes */
1058 1057 DDI_DMA_SLEEP, /* Wait if no memory */
1059 1058 NULL, /* no argument */
1060 1059 &ch_dh); /* DMA handle */
1061 1060 if (rv != DDI_SUCCESS) {
1062 1061
1063 1062 cmn_err(CE_WARN,
1064 1063 "%s: ch_get_dma_handle: ddi_dma_alloc_handle error %d\n",
1065 1064 chp->ch_name, rv);
1066 1065
1067 1066 kmem_free(dhe, sizeof (*dhe));
1068 1067
1069 1068 return ((free_dh_t *)0);
1070 1069 }
1071 1070
1072 1071 dhe->dhe_dh = (ulong_t)ch_dh;
1073 1072
1074 1073 return (dhe);
1075 1074 }
1076 1075
1077 1076 /*
1078 1077 * free the linked list of dma descriptor entries.
1079 1078 */
1080 1079 static void
1081 1080 ch_free_dma_handles(ch_t *chp)
1082 1081 {
1083 1082 free_dh_t *dhe, *the;
1084 1083
1085 1084 dhe = chp->ch_dh;
1086 1085 while (dhe) {
1087 1086 ddi_dma_free_handle((ddi_dma_handle_t *)&dhe->dhe_dh);
1088 1087 the = dhe;
1089 1088 dhe = dhe->dhe_next;
1090 1089 kmem_free(the, sizeof (*the));
1091 1090 }
1092 1091 chp->ch_dh = NULL;
1093 1092 }
1094 1093
1095 1094 /*
1096 1095 * ch_bind_dma_handle()
1097 1096 *
1098 1097 * returns # of entries used off of cmdQ_ce_t array to hold physical addrs.
1099 1098 *
1100 1099 * chp - per-board descriptor
1101 1100 * size - # bytes mapped
1102 1101 * vaddr - virtual address
1103 1102 * cmp - array of cmdQ_ce_t entries
1104 1103 * cnt - # free entries in cmp array
1105 1104 */
1106 1105
1107 1106 uint32_t
1108 1107 ch_bind_dma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp,
1109 1108 uint32_t cnt)
1110 1109 {
1111 1110 ddi_dma_cookie_t cookie;
1112 1111 ddi_dma_handle_t ch_dh;
1113 1112 uint_t count;
1114 1113 uint32_t n = 1;
1115 1114 free_dh_t *dhe;
1116 1115 uint_t rv;
1117 1116
1118 1117 mutex_enter(&chp->ch_dh_lck);
1119 1118 if ((dhe = chp->ch_dh) != NULL) {
1120 1119 chp->ch_dh = dhe->dhe_next;
1121 1120 }
1122 1121 mutex_exit(&chp->ch_dh_lck);
1123 1122
1124 1123 if (dhe == NULL) {
1125 1124 return (0);
1126 1125 }
1127 1126
1128 1127 ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1129 1128
1130 1129 rv = ddi_dma_addr_bind_handle(
1131 1130 ch_dh, /* dma handle */
1132 1131 (struct as *)0, /* kernel address space */
1133 1132 vaddr, /* virtual address */
1134 1133 size, /* length of object */
1135 1134 DDI_DMA_WRITE|DDI_DMA_STREAMING,
1136 1135 DDI_DMA_SLEEP, /* Wait for resources */
1137 1136 NULL, /* no argument */
1138 1137 &cookie, /* dma cookie */
1139 1138 &count);
1140 1139 if (rv != DDI_DMA_MAPPED) {
1141 1140
1142 1141 /* return dma header descriptor back to free list */
1143 1142 mutex_enter(&chp->ch_dh_lck);
1144 1143 dhe->dhe_next = chp->ch_dh;
1145 1144 chp->ch_dh = dhe;
1146 1145 mutex_exit(&chp->ch_dh_lck);
1147 1146
1148 1147 cmn_err(CE_WARN,
1149 1148 "%s: ch_bind_dma_handle: ddi_dma_addr_bind_handle err %d\n",
1150 1149 chp->ch_name, rv);
1151 1150
1152 1151 return (0);
1153 1152 }
1154 1153
1155 1154 /*
1156 1155 * abort if we've run out of space
1157 1156 */
1158 1157 if (count > cnt) {
1159 1158 /* return dma header descriptor back to free list */
1160 1159 mutex_enter(&chp->ch_dh_lck);
1161 1160 dhe->dhe_next = chp->ch_dh;
1162 1161 chp->ch_dh = dhe;
1163 1162 mutex_exit(&chp->ch_dh_lck);
1164 1163
1165 1164 return (0);
1166 1165 }
1167 1166
1168 1167 cmp->ce_pa = cookie.dmac_laddress;
1169 1168 cmp->ce_dh = NULL;
1170 1169 cmp->ce_len = cookie.dmac_size;
1171 1170 cmp->ce_mp = NULL;
1172 1171 cmp->ce_flg = DH_DMA;
1173 1172
1174 1173 while (--count) {
1175 1174 cmp++;
1176 1175 n++;
1177 1176 ddi_dma_nextcookie(ch_dh, &cookie);
1178 1177 cmp->ce_pa = cookie.dmac_laddress;
1179 1178 cmp->ce_dh = NULL;
1180 1179 cmp->ce_len = cookie.dmac_size;
1181 1180 cmp->ce_mp = NULL;
1182 1181 cmp->ce_flg = DH_DMA;
1183 1182 }
1184 1183
1185 1184 cmp->ce_dh = dhe;
1186 1185
1187 1186 return (n);
1188 1187 }
1189 1188
1190 1189 /*
1191 1190 * ch_unbind_dma_handle()
1192 1191 *
1193 1192 * frees resources alloacted by ch_bind_dma_handle().
1194 1193 *
1195 1194 * frees DMA handle
1196 1195 */
1197 1196
1198 1197 void
1199 1198 ch_unbind_dma_handle(ch_t *chp, free_dh_t *dhe)
1200 1199 {
1201 1200 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1202 1201
1203 1202 if (ddi_dma_unbind_handle(ch_dh))
1204 1203 cmn_err(CE_WARN, "%s: ddi_dma_unbind_handle failed",
1205 1204 chp->ch_name);
1206 1205
1207 1206 mutex_enter(&chp->ch_dh_lck);
1208 1207 dhe->dhe_next = chp->ch_dh;
1209 1208 chp->ch_dh = dhe;
1210 1209 mutex_exit(&chp->ch_dh_lck);
1211 1210 }
1212 1211
1213 1212 #if defined(__sparc)
1214 1213 /*
1215 1214 * DVMA stuff. Solaris only.
1216 1215 */
1217 1216
1218 1217 /*
1219 1218 * create a dvma handle and return a dma handle entry.
1220 1219 * DVMA is on sparc only!
1221 1220 */
1222 1221
1223 1222 free_dh_t *
1224 1223 ch_get_dvma_handle(ch_t *chp)
1225 1224 {
1226 1225 ddi_dma_handle_t ch_dh;
1227 1226 ddi_dma_lim_t ch_dvma_attr;
1228 1227 free_dh_t *dhe;
1229 1228 int rv;
1230 1229
1231 1230 dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP);
1232 1231
1233 1232 ch_dvma_attr.dlim_addr_lo = 0;
1234 1233 ch_dvma_attr.dlim_addr_hi = 0xffffffff;
1235 1234 ch_dvma_attr.dlim_cntr_max = 0xffffffff;
1236 1235 ch_dvma_attr.dlim_burstsizes = 0xfff;
1237 1236 ch_dvma_attr.dlim_minxfer = 1;
1238 1237 ch_dvma_attr.dlim_dmaspeed = 0;
1239 1238
1240 1239 rv = dvma_reserve(
1241 1240 chp->ch_dip, /* device dev_info */
1242 1241 &ch_dvma_attr, /* DVMA attributes */
1243 1242 3, /* number of pages */
1244 1243 &ch_dh); /* DVMA handle */
1245 1244
1246 1245 if (rv != DDI_SUCCESS) {
1247 1246
1248 1247 cmn_err(CE_WARN,
1249 1248 "%s: ch_get_dvma_handle: dvma_reserve() error %d\n",
1250 1249 chp->ch_name, rv);
1251 1250
1252 1251 kmem_free(dhe, sizeof (*dhe));
1253 1252
1254 1253 return ((free_dh_t *)0);
1255 1254 }
1256 1255
1257 1256 dhe->dhe_dh = (ulong_t)ch_dh;
1258 1257
1259 1258 return (dhe);
1260 1259 }
1261 1260
1262 1261 /*
1263 1262 * free the linked list of dvma descriptor entries.
1264 1263 * DVMA is only on sparc!
1265 1264 */
1266 1265
1267 1266 static void
1268 1267 ch_free_dvma_handles(ch_t *chp)
1269 1268 {
1270 1269 free_dh_t *dhe, *the;
1271 1270
1272 1271 dhe = chp->ch_vdh;
1273 1272 while (dhe) {
1274 1273 dvma_release((ddi_dma_handle_t)dhe->dhe_dh);
1275 1274 the = dhe;
1276 1275 dhe = dhe->dhe_next;
1277 1276 kmem_free(the, sizeof (*the));
1278 1277 }
1279 1278 chp->ch_vdh = NULL;
1280 1279 }
1281 1280
1282 1281 /*
1283 1282 * ch_bind_dvma_handle()
1284 1283 *
1285 1284 * returns # of entries used off of cmdQ_ce_t array to hold physical addrs.
1286 1285 * DVMA in sparc only
1287 1286 *
1288 1287 * chp - per-board descriptor
1289 1288 * size - # bytes mapped
1290 1289 * vaddr - virtual address
1291 1290 * cmp - array of cmdQ_ce_t entries
1292 1291 * cnt - # free entries in cmp array
1293 1292 */
1294 1293
1295 1294 uint32_t
1296 1295 ch_bind_dvma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp,
1297 1296 uint32_t cnt)
1298 1297 {
1299 1298 ddi_dma_cookie_t cookie;
1300 1299 ddi_dma_handle_t ch_dh;
1301 1300 uint32_t n = 1;
1302 1301 free_dh_t *dhe;
1303 1302
1304 1303 mutex_enter(&chp->ch_dh_lck);
1305 1304 if ((dhe = chp->ch_vdh) != NULL) {
1306 1305 chp->ch_vdh = dhe->dhe_next;
1307 1306 }
1308 1307 mutex_exit(&chp->ch_dh_lck);
1309 1308
1310 1309 if (dhe == NULL) {
1311 1310 return (0);
1312 1311 }
1313 1312
1314 1313 ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1315 1314 n = cnt;
1316 1315
1317 1316 dvma_kaddr_load(
1318 1317 ch_dh, /* dvma handle */
1319 1318 vaddr, /* virtual address */
1320 1319 size, /* length of object */
1321 1320 0, /* start at index 0 */
1322 1321 &cookie);
1323 1322
1324 1323 dvma_sync(ch_dh, 0, DDI_DMA_SYNC_FORDEV);
1325 1324
1326 1325 cookie.dmac_notused = 0;
1327 1326 n = 1;
1328 1327
1329 1328 cmp->ce_pa = cookie.dmac_laddress;
1330 1329 cmp->ce_dh = dhe;
1331 1330 cmp->ce_len = cookie.dmac_size;
1332 1331 cmp->ce_mp = NULL;
1333 1332 cmp->ce_flg = DH_DVMA; /* indicate a dvma descriptor */
1334 1333
1335 1334 return (n);
1336 1335 }
1337 1336
1338 1337 /*
1339 1338 * ch_unbind_dvma_handle()
1340 1339 *
1341 1340 * frees resources alloacted by ch_bind_dvma_handle().
1342 1341 *
1343 1342 * frees DMA handle
1344 1343 */
1345 1344
1346 1345 void
1347 1346 ch_unbind_dvma_handle(ch_t *chp, free_dh_t *dhe)
1348 1347 {
1349 1348 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1350 1349
1351 1350 dvma_unload(ch_dh, 0, -1);
1352 1351
1353 1352 mutex_enter(&chp->ch_dh_lck);
1354 1353 dhe->dhe_next = chp->ch_vdh;
1355 1354 chp->ch_vdh = dhe;
1356 1355 mutex_exit(&chp->ch_dh_lck);
1357 1356 }
1358 1357
1359 1358 #endif /* defined(__sparc) */
1360 1359
1361 1360 /*
1362 1361 * send received packet up stream.
1363 1362 *
1364 1363 * if driver has been stopped, then we drop the message.
1365 1364 */
1366 1365 void
1367 1366 ch_send_up(ch_t *chp, mblk_t *mp, uint32_t cksum, int flg)
1368 1367 {
1369 1368 /*
1370 1369 * probably do not need a lock here. When we set PESTOP in
1371 1370 * ch_stop() a packet could have just passed here and gone
1372 1371 * upstream. The next one will be dropped.
1373 1372 */
1374 1373 if (chp->ch_state == PERUNNING) {
1375 1374 /*
1376 1375 * note that flg will not be set unless enable_checksum_offload
1377 1376 * set in /etc/system (see sge.c).
1378 1377 */
1379 1378 if (flg)
1380 1379 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, cksum,
1381 1380 HCK_FULLCKSUM, 0);
1382 1381 gld_recv(chp->ch_macp, mp);
1383 1382 } else {
1384 1383 freemsg(mp);
1385 1384 }
1386 1385 }
1387 1386
1388 1387 /*
1389 1388 * unblock gld driver.
1390 1389 */
1391 1390 void
1392 1391 ch_gld_ok(ch_t *chp)
1393 1392 {
1394 1393 gld_sched(chp->ch_macp);
1395 1394 }
1396 1395
1397 1396
1398 1397 /*
1399 1398 * reset the card.
1400 1399 *
1401 1400 * Note: we only do this after the card has been initialized.
1402 1401 */
1403 1402 static int
1404 1403 ch_reset(gld_mac_info_t *mp)
1405 1404 {
1406 1405 ch_t *chp;
1407 1406
1408 1407 if (mp == NULL) {
1409 1408 return (GLD_FAILURE);
1410 1409 }
1411 1410
1412 1411 chp = (ch_t *)mp->gldm_private;
1413 1412
1414 1413 if (chp == NULL) {
1415 1414 return (GLD_FAILURE);
1416 1415 }
1417 1416
1418 1417 #ifdef NOTYET
1419 1418 /*
1420 1419 * do a reset of card
1421 1420 *
1422 1421 * 1. set PwrState to D3hot (3)
1423 1422 * 2. clear PwrState flags
1424 1423 */
1425 1424 /*
1426 1425 * When we did this, the card didn't start. First guess is that
1427 1426 * the initialization is not quite correct. For now, we don't
1428 1427 * reset things.
1429 1428 */
1430 1429 if (chp->ch_hpci) {
1431 1430 pci_config_put32(chp->ch_hpci, 0x44, 3);
1432 1431 pci_config_put32(chp->ch_hpci, 0x44, 0);
1433 1432
1434 1433 /* delay .5 sec */
1435 1434 DELAY(500000);
1436 1435 }
1437 1436 #endif
1438 1437
1439 1438 return (GLD_SUCCESS);
1440 1439 }
1441 1440
1442 1441 static int
1443 1442 ch_start(gld_mac_info_t *macinfo)
1444 1443 {
1445 1444 ch_t *chp = (ch_t *)macinfo->gldm_private;
1446 1445 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1447 1446 /* only initialize card on first attempt */
1448 1447 mutex_enter(&chp->ch_lock);
1449 1448 chp->ch_refcnt++;
1450 1449 if (chp->ch_refcnt == 1) {
1451 1450 chp->ch_state = PERUNNING;
1452 1451 mutex_exit(&chp->ch_lock);
1453 1452 pe_init((void *)chp);
1454 1453 } else
1455 1454 mutex_exit(&chp->ch_lock);
1456 1455 #else
1457 1456 pe_init((void *)chp);
1458 1457
1459 1458 /* go to running state, we're being started */
1460 1459 mutex_enter(&chp->ch_lock);
1461 1460 chp->ch_state = PERUNNING;
1462 1461 mutex_exit(&chp->ch_lock);
1463 1462 #endif
1464 1463
1465 1464 return (GLD_SUCCESS);
1466 1465 }
1467 1466
1468 1467 static int
1469 1468 ch_stop(gld_mac_info_t *mp)
1470 1469 {
1471 1470 ch_t *chp = (ch_t *)mp->gldm_private;
1472 1471
1473 1472 /*
1474 1473 * can only stop the chip if it's been initialized
1475 1474 */
1476 1475 mutex_enter(&chp->ch_lock);
1477 1476 if (chp->ch_state == PEIDLE) {
1478 1477 mutex_exit(&chp->ch_lock);
1479 1478 return (GLD_FAILURE);
1480 1479 }
1481 1480 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1482 1481 chp->ch_refcnt--;
1483 1482 if (chp->ch_refcnt == 0) {
1484 1483 chp->ch_state = PESTOP;
1485 1484 mutex_exit(&chp->ch_lock);
1486 1485 pe_stop(chp);
1487 1486 } else
1488 1487 mutex_exit(&chp->ch_lock);
1489 1488 #else
1490 1489 chp->ch_state = PESTOP;
1491 1490 mutex_exit(&chp->ch_lock);
1492 1491 pe_stop(chp);
1493 1492 #endif
1494 1493 return (GLD_SUCCESS);
1495 1494 }
1496 1495
1497 1496 static int
1498 1497 ch_set_mac_address(gld_mac_info_t *mp, uint8_t *mac)
1499 1498 {
1500 1499 ch_t *chp;
1501 1500
1502 1501 if (mp) {
1503 1502 chp = (ch_t *)mp->gldm_private;
1504 1503 } else {
1505 1504 return (GLD_FAILURE);
1506 1505 }
1507 1506
1508 1507 pe_set_mac(chp, mac);
1509 1508
1510 1509 return (GLD_SUCCESS);
1511 1510 }
1512 1511
1513 1512 static int
1514 1513 ch_set_multicast(gld_mac_info_t *mp, uint8_t *ep, int flg)
1515 1514 {
1516 1515 ch_t *chp = (ch_t *)mp->gldm_private;
1517 1516
1518 1517 return (pe_set_mc(chp, ep, flg));
1519 1518 }
1520 1519
1521 1520 static int
1522 1521 ch_ioctl(gld_mac_info_t *macinfo, queue_t *q, mblk_t *mp)
1523 1522 {
1524 1523 struct iocblk *iocp;
1525 1524
1526 1525 switch (mp->b_datap->db_type) {
1527 1526 case M_IOCTL:
1528 1527 /* pe_ioctl() does qreply() */
1529 1528 pe_ioctl((ch_t *)(macinfo->gldm_private), q, mp);
1530 1529 break;
1531 1530
1532 1531 default:
1533 1532 /*
1534 1533 * cmn_err(CE_NOTE, "ch_ioctl not M_IOCTL\n");
1535 1534 * debug_enter("bad ch_ioctl");
1536 1535 */
1537 1536
1538 1537 iocp = (struct iocblk *)mp->b_rptr;
1539 1538
1540 1539 if (mp->b_cont)
1541 1540 freemsg(mp->b_cont);
1542 1541 mp->b_cont = NULL;
1543 1542
1544 1543 mp->b_datap->db_type = M_IOCNAK;
1545 1544 iocp->ioc_error = EINVAL;
1546 1545 qreply(q, mp);
1547 1546 break;
1548 1547 }
1549 1548
1550 1549 return (GLD_SUCCESS);
1551 1550 }
1552 1551
1553 1552 static int
1554 1553 ch_set_promiscuous(gld_mac_info_t *mp, int flag)
1555 1554 {
1556 1555 ch_t *chp = (ch_t *)mp->gldm_private;
1557 1556
1558 1557 switch (flag) {
1559 1558 case GLD_MAC_PROMISC_MULTI:
1560 1559 pe_set_promiscuous(chp, 2);
1561 1560 break;
1562 1561
1563 1562 case GLD_MAC_PROMISC_NONE:
1564 1563 pe_set_promiscuous(chp, 0);
1565 1564 break;
1566 1565
1567 1566 case GLD_MAC_PROMISC_PHYS:
1568 1567 default:
1569 1568 pe_set_promiscuous(chp, 1);
1570 1569 break;
1571 1570 }
1572 1571
1573 1572 return (GLD_SUCCESS);
1574 1573 }
1575 1574
1576 1575 static int
1577 1576 ch_get_stats(gld_mac_info_t *mp, struct gld_stats *gs)
1578 1577 {
1579 1578 ch_t *chp = (ch_t *)mp->gldm_private;
1580 1579 uint64_t speed;
1581 1580 uint32_t intrcnt;
1582 1581 uint32_t norcvbuf;
1583 1582 uint32_t oerrors;
1584 1583 uint32_t ierrors;
1585 1584 uint32_t underrun;
1586 1585 uint32_t overrun;
1587 1586 uint32_t framing;
1588 1587 uint32_t crc;
1589 1588 uint32_t carrier;
1590 1589 uint32_t collisions;
1591 1590 uint32_t xcollisions;
1592 1591 uint32_t late;
1593 1592 uint32_t defer;
1594 1593 uint32_t xerrs;
1595 1594 uint32_t rerrs;
1596 1595 uint32_t toolong;
1597 1596 uint32_t runt;
1598 1597 ulong_t multixmt;
1599 1598 ulong_t multircv;
1600 1599 ulong_t brdcstxmt;
1601 1600 ulong_t brdcstrcv;
1602 1601
1603 1602 /*
1604 1603 * race looks benign here.
1605 1604 */
1606 1605 if (chp->ch_state != PERUNNING) {
1607 1606 return (GLD_FAILURE);
1608 1607 }
1609 1608
1610 1609 (void) pe_get_stats(chp,
1611 1610 &speed,
1612 1611 &intrcnt,
1613 1612 &norcvbuf,
1614 1613 &oerrors,
1615 1614 &ierrors,
1616 1615 &underrun,
1617 1616 &overrun,
1618 1617 &framing,
1619 1618 &crc,
1620 1619 &carrier,
1621 1620 &collisions,
1622 1621 &xcollisions,
1623 1622 &late,
1624 1623 &defer,
1625 1624 &xerrs,
1626 1625 &rerrs,
1627 1626 &toolong,
1628 1627 &runt,
1629 1628 &multixmt,
1630 1629 &multircv,
1631 1630 &brdcstxmt,
1632 1631 &brdcstrcv);
1633 1632
1634 1633 gs->glds_speed = speed;
1635 1634 gs->glds_media = GLDM_UNKNOWN;
1636 1635 gs->glds_intr = intrcnt;
1637 1636 gs->glds_norcvbuf = norcvbuf;
1638 1637 gs->glds_errxmt = oerrors;
1639 1638 gs->glds_errrcv = ierrors;
1640 1639 gs->glds_missed = ierrors; /* ??? */
1641 1640 gs->glds_underflow = underrun;
1642 1641 gs->glds_overflow = overrun;
1643 1642 gs->glds_frame = framing;
1644 1643 gs->glds_crc = crc;
1645 1644 gs->glds_duplex = GLD_DUPLEX_FULL;
1646 1645 gs->glds_nocarrier = carrier;
1647 1646 gs->glds_collisions = collisions;
1648 1647 gs->glds_excoll = xcollisions;
1649 1648 gs->glds_xmtlatecoll = late;
1650 1649 gs->glds_defer = defer;
1651 1650 gs->glds_dot3_first_coll = 0; /* Not available */
1652 1651 gs->glds_dot3_multi_coll = 0; /* Not available */
1653 1652 gs->glds_dot3_sqe_error = 0; /* Not available */
1654 1653 gs->glds_dot3_mac_xmt_error = xerrs;
1655 1654 gs->glds_dot3_mac_rcv_error = rerrs;
1656 1655 gs->glds_dot3_frame_too_long = toolong;
1657 1656 gs->glds_short = runt;
1658 1657
1659 1658 gs->glds_noxmtbuf = 0; /* not documented */
1660 1659 gs->glds_xmtretry = 0; /* not documented */
1661 1660 gs->glds_multixmt = multixmt; /* not documented */
1662 1661 gs->glds_multircv = multircv; /* not documented */
1663 1662 gs->glds_brdcstxmt = brdcstxmt; /* not documented */
1664 1663 gs->glds_brdcstrcv = brdcstrcv; /* not documented */
1665 1664
1666 1665 return (GLD_SUCCESS);
1667 1666 }
1668 1667
1669 1668
1670 1669 static int
1671 1670 ch_send(gld_mac_info_t *macinfo, mblk_t *mp)
1672 1671 {
1673 1672 ch_t *chp = (ch_t *)macinfo->gldm_private;
1674 1673 uint32_t flg;
1675 1674 uint32_t msg_flg;
1676 1675
1677 1676 #ifdef TX_CKSUM_FIX
1678 1677 mblk_t *nmp;
1679 1678 int frags;
1680 1679 size_t msg_len;
1681 1680 struct ether_header *ehdr;
1682 1681 ipha_t *ihdr;
1683 1682 int tflg = 0;
1684 1683 #endif /* TX_CKSUM_FIX */
1685 1684
1686 1685 /*
1687 1686 * race looks benign here.
1688 1687 */
1689 1688 if (chp->ch_state != PERUNNING) {
1690 1689 return (GLD_FAILURE);
1691 1690 }
1692 1691
1693 1692 msg_flg = 0;
1694 1693 if (chp->ch_config.cksum_enabled) {
1695 1694 if (is_T2(chp)) {
1696 1695 hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL,
1697 1696 NULL, &msg_flg);
1698 1697 flg = (msg_flg & HCK_FULLCKSUM)?
1699 1698 CH_NO_CPL: CH_NO_HWCKSUM|CH_NO_CPL;
1700 1699 } else
1701 1700 flg = CH_NO_CPL;
1702 1701 } else
1703 1702 flg = CH_NO_HWCKSUM | CH_NO_CPL;
1704 1703
1705 1704 #ifdef TX_CKSUM_FIX
1706 1705 /*
1707 1706 * Check if the message spans more than one mblk or
1708 1707 * if it does and the ip header is not in the first
1709 1708 * fragment then pull up the message. This case is
1710 1709 * expected to be rare.
1711 1710 */
1712 1711 frags = 0;
1713 1712 msg_len = 0;
1714 1713 nmp = mp;
1715 1714 do {
1716 1715 frags++;
1717 1716 msg_len += MBLKL(nmp);
1718 1717 nmp = nmp->b_cont;
1719 1718 } while (nmp);
1720 1719 #define MAX_ALL_HDRLEN SZ_CPL_TX_PKT + sizeof (struct ether_header) + \
1721 1720 TCP_MAX_COMBINED_HEADER_LENGTH
1722 1721 /*
1723 1722 * If the first mblk has enough space at the beginning of
1724 1723 * the data buffer to hold a CPL header, then, we'll expancd
1725 1724 * the front of the buffer so a pullup will leave space for
1726 1725 * pe_start() to add the CPL header in line. We need to remember
1727 1726 * that we've done this so we can undo it after the pullup.
1728 1727 *
1729 1728 * Note that if we decide to do an allocb to hold the CPL header,
1730 1729 * we need to catch the case where we've added an empty mblk for
1731 1730 * the header but never did a pullup. This would result in the
1732 1731 * tests for etherheader, etc. being done on the initial, empty,
1733 1732 * mblk instead of the one with data. See PR3646 for further
1734 1733 * details. (note this PR is closed since it is no longer relevant).
1735 1734 *
1736 1735 * Another point is that if we do add an allocb to add space for
1737 1736 * a CPL header, after a pullup, the initial pointer, mp, in GLD will
1738 1737 * no longer point to a valid mblk. When we get the mblk (by allocb),
1739 1738 * we need to switch the mblk structure values between it and the
1740 1739 * mp structure values referenced by GLD. This handles the case where
1741 1740 * we've run out of cmdQ entries and report GLD_NORESOURCES back to
1742 1741 * GLD. The pointer to the mblk data will have been modified to hold
1743 1742 * an empty 8 bytes for the CPL header, For now, we let the pe_start()
1744 1743 * routine prepend an 8 byte mblk.
1745 1744 */
1746 1745 if (MBLKHEAD(mp) >= SZ_CPL_TX_PKT) {
1747 1746 mp->b_rptr -= SZ_CPL_TX_PKT;
1748 1747 tflg = 1;
1749 1748 }
1750 1749 if (frags > 3) {
1751 1750 chp->sge->intr_cnt.tx_msg_pullups++;
1752 1751 if (pullupmsg(mp, -1) == 0) {
1753 1752 freemsg(mp);
1754 1753 return (GLD_SUCCESS);
1755 1754 }
1756 1755 } else if ((msg_len > MAX_ALL_HDRLEN) &&
1757 1756 (MBLKL(mp) < MAX_ALL_HDRLEN)) {
1758 1757 chp->sge->intr_cnt.tx_hdr_pullups++;
1759 1758 if (pullupmsg(mp, MAX_ALL_HDRLEN) == 0) {
1760 1759 freemsg(mp);
1761 1760 return (GLD_SUCCESS);
1762 1761 }
1763 1762 }
1764 1763 if (tflg)
1765 1764 mp->b_rptr += SZ_CPL_TX_PKT;
1766 1765
1767 1766 ehdr = (struct ether_header *)mp->b_rptr;
1768 1767 if (ehdr->ether_type == htons(ETHERTYPE_IP)) {
1769 1768 ihdr = (ipha_t *)&mp->b_rptr[sizeof (struct ether_header)];
1770 1769 if ((ihdr->ipha_fragment_offset_and_flags & IPH_MF)) {
1771 1770 if (ihdr->ipha_protocol == IPPROTO_UDP) {
1772 1771 flg |= CH_UDP_MF;
1773 1772 chp->sge->intr_cnt.tx_udp_ip_frag++;
1774 1773 } else if (ihdr->ipha_protocol == IPPROTO_TCP) {
1775 1774 flg |= CH_TCP_MF;
1776 1775 chp->sge->intr_cnt.tx_tcp_ip_frag++;
1777 1776 }
1778 1777 } else if (ihdr->ipha_protocol == IPPROTO_UDP)
1779 1778 flg |= CH_UDP;
1780 1779 }
1781 1780 #endif /* TX_CKSUM_FIX */
1782 1781
1783 1782 /*
1784 1783 * return 0 - data send successfully
1785 1784 * return 1 - no resources, reschedule
1786 1785 */
1787 1786 if (pe_start(chp, mp, flg))
1788 1787 return (GLD_NORESOURCES);
1789 1788 else
1790 1789 return (GLD_SUCCESS);
1791 1790 }
1792 1791
1793 1792 static uint_t
1794 1793 ch_intr(gld_mac_info_t *mp)
1795 1794 {
1796 1795 return (pe_intr((ch_t *)mp->gldm_private));
1797 1796 }
1798 1797
1799 1798 /*
1800 1799 * generate name of driver with unit# postpended.
1801 1800 */
1802 1801 void
1803 1802 ch_set_name(ch_t *chp, int unit)
1804 1803 {
1805 1804 chp->ch_name = (char *)kmem_alloc(sizeof ("chxge00"), KM_SLEEP);
1806 1805 if (unit > 9) {
1807 1806 bcopy("chxge00", (void *)chp->ch_name, sizeof ("chxge00"));
1808 1807 chp->ch_name[5] += unit/10;
1809 1808 chp->ch_name[6] += unit%10;
1810 1809 } else {
1811 1810 bcopy("chxge0", (void *)chp->ch_name, sizeof ("chxge0"));
1812 1811 chp->ch_name[5] += unit;
1813 1812 }
1814 1813 }
1815 1814
1816 1815 void
1817 1816 ch_free_name(ch_t *chp)
1818 1817 {
1819 1818 if (chp->ch_name)
1820 1819 kmem_free(chp->ch_name, sizeof ("chxge00"));
1821 1820 chp->ch_name = NULL;
1822 1821 }
1823 1822
1824 1823 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1825 1824 /*
1826 1825 * register toe offload.
1827 1826 */
1828 1827 void *
1829 1828 ch_register(void *instp, void *toe_rcv, void *toe_free, void *toe_tunnel,
1830 1829 kmutex_t *toe_tx_mx, kcondvar_t *toe_of_cv, int unit)
1831 1830 {
1832 1831 ch_t *chp = gchp[unit];
1833 1832 if (chp != NULL) {
1834 1833 mutex_enter(&chp->ch_lock);
1835 1834
1836 1835 chp->toe_rcv = (void (*)(void *, mblk_t *))toe_rcv;
1837 1836 chp->ch_toeinst = instp;
1838 1837 chp->toe_free = (void (*)(void *, tbuf_t *))toe_free;
1839 1838 chp->toe_tunnel = (int (*)(void *, mblk_t *))toe_tunnel;
1840 1839 chp->ch_tx_overflow_mutex = toe_tx_mx;
1841 1840 chp->ch_tx_overflow_cv = toe_of_cv;
1842 1841 chp->open_device_map |= TOEDEV_DEVMAP_BIT;
1843 1842
1844 1843 /* start up adapter if first user */
1845 1844 chp->ch_refcnt++;
1846 1845 if (chp->ch_refcnt == 1) {
1847 1846 chp->ch_state = PERUNNING;
1848 1847 mutex_exit(&chp->ch_lock);
1849 1848 pe_init((void *)chp);
1850 1849 } else
1851 1850 mutex_exit(&chp->ch_lock);
1852 1851 }
1853 1852 return ((void *)gchp[unit]);
1854 1853 }
1855 1854
1856 1855 /*
1857 1856 * unregister toe offload.
1858 1857 * XXX Need to fix races here.
1859 1858 * 1. turn off SGE interrupts.
1860 1859 * 2. do update
1861 1860 * 3. re-enable SGE interrupts
1862 1861 * 4. SGE doorbell to make sure things get restarted.
1863 1862 */
1864 1863 void
1865 1864 ch_unregister(void)
1866 1865 {
1867 1866 int i;
1868 1867 ch_t *chp;
1869 1868
1870 1869 for (i = 0; i < MAX_CARDS; i++) {
1871 1870 chp = gchp[i];
1872 1871 if (chp == NULL)
1873 1872 continue;
1874 1873
1875 1874 mutex_enter(&chp->ch_lock);
1876 1875
1877 1876 chp->ch_refcnt--;
1878 1877 if (chp->ch_refcnt == 0) {
1879 1878 chp->ch_state = PESTOP;
1880 1879 mutex_exit(&chp->ch_lock);
1881 1880 pe_stop(chp);
1882 1881 } else
1883 1882 mutex_exit(&chp->ch_lock);
1884 1883
1885 1884 chp->open_device_map &= ~TOEDEV_DEVMAP_BIT;
1886 1885 chp->toe_rcv = NULL;
1887 1886 chp->ch_toeinst = NULL;
1888 1887 chp->toe_free = NULL;
1889 1888 chp->toe_tunnel = NULL;
1890 1889 chp->ch_tx_overflow_mutex = NULL;
1891 1890 chp->ch_tx_overflow_cv = NULL;
1892 1891 }
1893 1892 }
1894 1893 #endif /* CONFIG_CHELSIO_T1_OFFLOAD */
1895 1894
1896 1895 /*
1897 1896 * get properties from chxge.conf
1898 1897 */
1899 1898 static void
1900 1899 ch_get_prop(ch_t *chp)
1901 1900 {
1902 1901 int val;
1903 1902 int tval = 0;
1904 1903 extern int enable_latency_timer;
1905 1904 extern uint32_t sge_cmdq0_cnt;
1906 1905 extern uint32_t sge_cmdq1_cnt;
1907 1906 extern uint32_t sge_flq0_cnt;
1908 1907 extern uint32_t sge_flq1_cnt;
1909 1908 extern uint32_t sge_respq_cnt;
1910 1909 extern uint32_t sge_cmdq0_cnt_orig;
1911 1910 extern uint32_t sge_cmdq1_cnt_orig;
1912 1911 extern uint32_t sge_flq0_cnt_orig;
1913 1912 extern uint32_t sge_flq1_cnt_orig;
1914 1913 extern uint32_t sge_respq_cnt_orig;
1915 1914 dev_info_t *pdip;
1916 1915 uint32_t vendor_id, device_id, revision_id;
1917 1916 uint32_t *prop_val = NULL;
1918 1917 uint32_t prop_len = NULL;
1919 1918
1920 1919 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1921 1920 "enable_dvma", -1);
1922 1921 if (val == -1)
1923 1922 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1924 1923 "enable-dvma", -1);
1925 1924 if (val != -1) {
1926 1925 if (val != 0)
1927 1926 chp->ch_config.enable_dvma = 1;
1928 1927 }
1929 1928
1930 1929 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1931 1930 "amd_bug_workaround", -1);
1932 1931 if (val == -1)
1933 1932 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1934 1933 "amd-bug-workaround", -1);
1935 1934
1936 1935 if (val != -1) {
1937 1936 if (val == 0) {
1938 1937 chp->ch_config.burstsize_set = 0;
1939 1938 chp->ch_config.transaction_cnt_set = 0;
1940 1939 goto fail_exit;
1941 1940 }
1942 1941 }
1943 1942 /*
1944 1943 * Step up to the parent node, That's the node above us
1945 1944 * in the device tree. And will typically be the PCI host
1946 1945 * Controller.
1947 1946 */
1948 1947 pdip = ddi_get_parent(chp->ch_dip);
1949 1948
1950 1949 /*
1951 1950 * Now get the 'Vendor id' properties
1952 1951 */
1953 1952 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "vendor-id",
1954 1953 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1955 1954 chp->ch_config.burstsize_set = 0;
1956 1955 chp->ch_config.transaction_cnt_set = 0;
1957 1956 goto fail_exit;
1958 1957 }
1959 1958 vendor_id = *(uint32_t *)prop_val;
1960 1959 ddi_prop_free(prop_val);
1961 1960
1962 1961 /*
1963 1962 * Now get the 'Device id' properties
1964 1963 */
1965 1964 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "device-id",
1966 1965 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1967 1966 chp->ch_config.burstsize_set = 0;
1968 1967 chp->ch_config.transaction_cnt_set = 0;
1969 1968 goto fail_exit;
1970 1969 }
1971 1970 device_id = *(uint32_t *)prop_val;
1972 1971 ddi_prop_free(prop_val);
1973 1972
1974 1973 /*
1975 1974 * Now get the 'Revision id' properties
1976 1975 */
1977 1976 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "revision-id",
1978 1977 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1979 1978 chp->ch_config.burstsize_set = 0;
1980 1979 chp->ch_config.transaction_cnt_set = 0;
1981 1980 goto fail_exit;
1982 1981 }
1983 1982 revision_id = *(uint32_t *)prop_val;
1984 1983 ddi_prop_free(prop_val);
1985 1984
1986 1985 /*
1987 1986 * set default values based on node above us.
1988 1987 */
1989 1988 if ((vendor_id == AMD_VENDOR_ID) && (device_id == AMD_BRIDGE) &&
1990 1989 (revision_id <= AMD_BRIDGE_REV)) {
1991 1990 uint32_t v;
1992 1991 uint32_t burst;
1993 1992 uint32_t cnt;
1994 1993
1995 1994 /* if 133 Mhz not enabled, then do nothing - we're not PCIx */
1996 1995 v = pci_config_get32(chp->ch_hpci, 0x64);
1997 1996 if ((v & 0x20000) == NULL) {
1998 1997 chp->ch_config.burstsize_set = 0;
1999 1998 chp->ch_config.transaction_cnt_set = 0;
2000 1999 goto fail_exit;
2001 2000 }
2002 2001
2003 2002 /* check burst size and transaction count */
2004 2003 v = pci_config_get32(chp->ch_hpci, 0x60);
2005 2004 burst = (v >> 18) & 3;
2006 2005 cnt = (v >> 20) & 7;
2007 2006
2008 2007 switch (burst) {
2009 2008 case 0: /* 512 */
2010 2009 /* 512 burst size legal with split cnts 1,2,3 */
2011 2010 if (cnt <= 2) {
2012 2011 chp->ch_config.burstsize_set = 0;
2013 2012 chp->ch_config.transaction_cnt_set = 0;
2014 2013 goto fail_exit;
2015 2014 }
2016 2015 break;
2017 2016 case 1: /* 1024 */
2018 2017 /* 1024 burst size legal with split cnts 1,2 */
2019 2018 if (cnt <= 1) {
2020 2019 chp->ch_config.burstsize_set = 0;
2021 2020 chp->ch_config.transaction_cnt_set = 0;
2022 2021 goto fail_exit;
2023 2022 }
2024 2023 break;
2025 2024 case 2: /* 2048 */
2026 2025 /* 2048 burst size legal with split cnts 1 */
2027 2026 if (cnt == 0) {
2028 2027 chp->ch_config.burstsize_set = 0;
2029 2028 chp->ch_config.transaction_cnt_set = 0;
2030 2029 goto fail_exit;
2031 2030 }
2032 2031 break;
2033 2032 case 3: /* 4096 */
2034 2033 break;
2035 2034 }
2036 2035 } else {
2037 2036 goto fail_exit;
2038 2037 }
2039 2038
2040 2039 /*
2041 2040 * if illegal burst size seen, then default to 1024 burst size
2042 2041 */
2043 2042 chp->ch_config.burstsize = 1;
2044 2043 chp->ch_config.burstsize_set = 1;
2045 2044 /*
2046 2045 * if illegal transaction cnt seen, then default to 2
2047 2046 */
2048 2047 chp->ch_config.transaction_cnt = 1;
2049 2048 chp->ch_config.transaction_cnt_set = 1;
2050 2049
2051 2050
2052 2051 fail_exit:
2053 2052
2054 2053 /*
2055 2054 * alter the burstsize parameter via an entry
2056 2055 * in chxge.conf
2057 2056 */
2058 2057
2059 2058 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2060 2059 "pci_burstsize", -1);
2061 2060 if (val == -1)
2062 2061 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2063 2062 "pci-burstsize", -1);
2064 2063
2065 2064 if (val != -1) {
2066 2065
2067 2066 switch (val) {
2068 2067 case 0: /* use default */
2069 2068 chp->ch_config.burstsize_set = 0;
2070 2069 break;
2071 2070
2072 2071 case 1024:
2073 2072 chp->ch_config.burstsize_set = 1;
2074 2073 chp->ch_config.burstsize = 1;
2075 2074 break;
2076 2075
2077 2076 case 2048:
2078 2077 chp->ch_config.burstsize_set = 1;
2079 2078 chp->ch_config.burstsize = 2;
2080 2079 break;
2081 2080
2082 2081 case 4096:
2083 2082 cmn_err(CE_WARN, "%s not supported %d\n",
2084 2083 chp->ch_name, val);
2085 2084 break;
2086 2085
2087 2086 default:
2088 2087 cmn_err(CE_WARN, "%s illegal burst size %d\n",
2089 2088 chp->ch_name, val);
2090 2089 break;
2091 2090 }
2092 2091 }
2093 2092
2094 2093 /*
2095 2094 * set transaction count
2096 2095 */
2097 2096 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2098 2097 "pci_split_transaction_cnt", -1);
2099 2098 if (val == -1)
2100 2099 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2101 2100 "pci-split-transaction-cnt", -1);
2102 2101
2103 2102 if (val != -1) {
2104 2103 switch (val) {
2105 2104 case 0: /* use default */
2106 2105 chp->ch_config.transaction_cnt_set = 0;
2107 2106 break;
2108 2107
2109 2108 case 1:
2110 2109 chp->ch_config.transaction_cnt_set = 1;
2111 2110 chp->ch_config.transaction_cnt = 0;
2112 2111 break;
2113 2112
2114 2113 case 2:
2115 2114 chp->ch_config.transaction_cnt_set = 1;
2116 2115 chp->ch_config.transaction_cnt = 1;
2117 2116 break;
2118 2117
2119 2118 case 3:
2120 2119 chp->ch_config.transaction_cnt_set = 1;
2121 2120 chp->ch_config.transaction_cnt = 2;
2122 2121 break;
2123 2122
2124 2123 case 4:
2125 2124 chp->ch_config.transaction_cnt_set = 1;
2126 2125 chp->ch_config.transaction_cnt = 3;
2127 2126 break;
2128 2127
2129 2128 case 8:
2130 2129 chp->ch_config.transaction_cnt_set = 1;
2131 2130 chp->ch_config.transaction_cnt = 4;
2132 2131 break;
2133 2132
2134 2133 case 12:
2135 2134 chp->ch_config.transaction_cnt_set = 1;
2136 2135 chp->ch_config.transaction_cnt = 5;
2137 2136 break;
2138 2137
2139 2138 case 16:
2140 2139 chp->ch_config.transaction_cnt_set = 1;
2141 2140 chp->ch_config.transaction_cnt = 6;
2142 2141 break;
2143 2142
2144 2143 case 32:
2145 2144 chp->ch_config.transaction_cnt_set = 1;
2146 2145 chp->ch_config.transaction_cnt = 7;
2147 2146 break;
2148 2147
2149 2148 default:
2150 2149 cmn_err(CE_WARN, "%s illegal transaction cnt %d\n",
2151 2150 chp->ch_name, val);
2152 2151 break;
2153 2152 }
2154 2153 }
2155 2154
2156 2155 /*
2157 2156 * set relaxed ordering bit?
2158 2157 */
2159 2158 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2160 2159 "pci_relaxed_ordering_on", -1);
2161 2160 if (val == -1)
2162 2161 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2163 2162 "pci-relaxed-ordering-on", -1);
2164 2163
2165 2164 /*
2166 2165 * default is to use system default value.
2167 2166 */
2168 2167 chp->ch_config.relaxed_ordering = 0;
2169 2168
2170 2169 if (val != -1) {
2171 2170 if (val)
2172 2171 chp->ch_config.relaxed_ordering = 1;
2173 2172 }
2174 2173
2175 2174 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2176 2175 "enable_latency_timer", -1);
2177 2176 if (val == -1)
2178 2177 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2179 2178 "enable-latency-timer", -1);
2180 2179 if (val != -1)
2181 2180 enable_latency_timer = (val == 0)? 0: 1;
2182 2181
2183 2182 /*
2184 2183 * default maximum Jumbo Frame size.
2185 2184 */
2186 2185 chp->ch_maximum_mtu = 9198; /* tunable via chxge.conf */
2187 2186 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2188 2187 "maximum_mtu", -1);
2189 2188 if (val == -1) {
2190 2189 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2191 2190 "maximum-mtu", -1);
2192 2191 }
2193 2192 if (val != -1) {
2194 2193 if (val > 9582) {
2195 2194 cmn_err(CE_WARN,
2196 2195 "maximum_mtu value %d > 9582. Value set to 9582",
2197 2196 val);
2198 2197 val = 9582;
2199 2198 } else if (val < 1500) {
2200 2199 cmn_err(CE_WARN,
2201 2200 "maximum_mtu value %d < 1500. Value set to 1500",
2202 2201 val);
2203 2202 val = 1500;
2204 2203 }
2205 2204
2206 2205 if (val)
2207 2206 chp->ch_maximum_mtu = val;
2208 2207 }
2209 2208
2210 2209 /*
2211 2210 * default value for this instance mtu
2212 2211 */
2213 2212 chp->ch_mtu = ETHERMTU;
2214 2213
2215 2214 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2216 2215 "accept_jumbo", -1);
2217 2216 if (val == -1) {
2218 2217 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2219 2218 "accept-jumbo", -1);
2220 2219 }
2221 2220 if (val != -1) {
2222 2221 if (val)
2223 2222 chp->ch_mtu = chp->ch_maximum_mtu;
2224 2223 }
2225 2224 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
2226 2225 chp->ch_sm_buf_sz = 0x800;
2227 2226 chp->ch_sm_buf_aln = 0x800;
2228 2227 chp->ch_bg_buf_sz = 0x4000;
2229 2228 chp->ch_bg_buf_aln = 0x4000;
2230 2229 #else
2231 2230 chp->ch_sm_buf_sz = 0x200;
2232 2231 chp->ch_sm_buf_aln = 0x200;
2233 2232 chp->ch_bg_buf_sz = 0x800;
2234 2233 chp->ch_bg_buf_aln = 0x800;
2235 2234 if ((chp->ch_mtu > 0x800) && (chp->ch_mtu <= 0x1000)) {
2236 2235 chp->ch_sm_buf_sz = 0x400;
2237 2236 chp->ch_sm_buf_aln = 0x400;
2238 2237 chp->ch_bg_buf_sz = 0x1000;
2239 2238 chp->ch_bg_buf_aln = 0x1000;
2240 2239 } else if ((chp->ch_mtu > 0x1000) && (chp->ch_mtu <= 0x2000)) {
2241 2240 chp->ch_sm_buf_sz = 0x400;
2242 2241 chp->ch_sm_buf_aln = 0x400;
2243 2242 chp->ch_bg_buf_sz = 0x2000;
2244 2243 chp->ch_bg_buf_aln = 0x2000;
2245 2244 } else if (chp->ch_mtu > 0x2000) {
2246 2245 chp->ch_sm_buf_sz = 0x400;
2247 2246 chp->ch_sm_buf_aln = 0x400;
2248 2247 chp->ch_bg_buf_sz = 0x3000;
2249 2248 chp->ch_bg_buf_aln = 0x4000;
2250 2249 }
2251 2250 #endif
2252 2251 chp->ch_config.cksum_enabled = 1;
2253 2252
2254 2253 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2255 2254 "enable_checksum_offload", -1);
2256 2255 if (val == -1)
2257 2256 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2258 2257 "enable-checksum-offload", -1);
2259 2258 if (val != -1) {
2260 2259 if (val == NULL)
2261 2260 chp->ch_config.cksum_enabled = 0;
2262 2261 }
2263 2262
2264 2263 /*
2265 2264 * Provides a tuning capability for the command queue 0 size.
2266 2265 */
2267 2266 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2268 2267 "sge_cmdq0_cnt", -1);
2269 2268 if (val == -1)
2270 2269 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2271 2270 "sge-cmdq0-cnt", -1);
2272 2271 if (val != -1) {
2273 2272 if (val > 10)
2274 2273 sge_cmdq0_cnt = val;
2275 2274 }
2276 2275
2277 2276 if (sge_cmdq0_cnt > 65535) {
2278 2277 cmn_err(CE_WARN,
2279 2278 "%s: sge-cmdQ0-cnt > 65535 - resetting value to default",
2280 2279 chp->ch_name);
2281 2280 sge_cmdq0_cnt = sge_cmdq0_cnt_orig;
2282 2281 }
2283 2282 tval += sge_cmdq0_cnt;
2284 2283
2285 2284 /*
2286 2285 * Provides a tuning capability for the command queue 1 size.
2287 2286 */
2288 2287 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2289 2288 "sge_cmdq1_cnt", -1);
2290 2289 if (val == -1)
2291 2290 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2292 2291 "sge-cmdq1-cnt", -1);
2293 2292 if (val != -1) {
2294 2293 if (val > 10)
2295 2294 sge_cmdq1_cnt = val;
2296 2295 }
2297 2296
2298 2297 if (sge_cmdq1_cnt > 65535) {
2299 2298 cmn_err(CE_WARN,
2300 2299 "%s: sge-cmdQ0-cnt > 65535 - resetting value to default",
2301 2300 chp->ch_name);
2302 2301 sge_cmdq1_cnt = sge_cmdq1_cnt_orig;
2303 2302 }
2304 2303
2305 2304 /*
2306 2305 * Provides a tuning capability for the free list 0 size.
2307 2306 */
2308 2307 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2309 2308 "sge_flq0_cnt", -1);
2310 2309 if (val == -1)
2311 2310 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2312 2311 "sge-flq0-cnt", -1);
2313 2312 if (val != -1) {
2314 2313 if (val > 512)
2315 2314 sge_flq0_cnt = val;
2316 2315 }
2317 2316
2318 2317 if (sge_flq0_cnt > 65535) {
2319 2318 cmn_err(CE_WARN,
2320 2319 "%s: sge-flq0-cnt > 65535 - resetting value to default",
2321 2320 chp->ch_name);
2322 2321 sge_flq0_cnt = sge_flq0_cnt_orig;
2323 2322 }
2324 2323
2325 2324 tval += sge_flq0_cnt;
2326 2325
2327 2326 /*
2328 2327 * Provides a tuning capability for the free list 1 size.
2329 2328 */
2330 2329 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2331 2330 "sge_flq1_cnt", -1);
2332 2331 if (val == -1)
2333 2332 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2334 2333 "sge-flq1-cnt", -1);
2335 2334 if (val != -1) {
2336 2335 if (val > 512)
2337 2336 sge_flq1_cnt = val;
2338 2337 }
2339 2338
2340 2339 if (sge_flq1_cnt > 65535) {
2341 2340 cmn_err(CE_WARN,
2342 2341 "%s: sge-flq1-cnt > 65535 - resetting value to default",
2343 2342 chp->ch_name);
2344 2343 sge_flq1_cnt = sge_flq1_cnt_orig;
2345 2344 }
2346 2345
2347 2346 tval += sge_flq1_cnt;
2348 2347
2349 2348 /*
2350 2349 * Provides a tuning capability for the responce queue size.
2351 2350 */
2352 2351 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2353 2352 "sge_respq_cnt", -1);
2354 2353 if (val == -1)
2355 2354 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2356 2355 "sge-respq-cnt", -1);
2357 2356 if (val != -1) {
2358 2357 if (val > 30)
2359 2358 sge_respq_cnt = val;
2360 2359 }
2361 2360
2362 2361 if (sge_respq_cnt > 65535) {
2363 2362 cmn_err(CE_WARN,
2364 2363 "%s: sge-respq-cnt > 65535 - resetting value to default",
2365 2364 chp->ch_name);
2366 2365 sge_respq_cnt = sge_respq_cnt_orig;
2367 2366 }
2368 2367
2369 2368 if (tval > sge_respq_cnt) {
2370 2369 if (tval <= 65535) {
2371 2370 cmn_err(CE_WARN,
2372 2371 "%s: sge-respq-cnt < %d - setting value to %d (cmdQ+flq0+flq1)",
2373 2372 chp->ch_name, tval, tval);
2374 2373
2375 2374 sge_respq_cnt = tval;
2376 2375 } else {
2377 2376 cmn_err(CE_WARN,
2378 2377 "%s: Q sizes invalid - resetting to default values",
2379 2378 chp->ch_name);
2380 2379
2381 2380 sge_cmdq0_cnt = sge_cmdq0_cnt_orig;
2382 2381 sge_cmdq1_cnt = sge_cmdq1_cnt_orig;
2383 2382 sge_flq0_cnt = sge_flq0_cnt_orig;
2384 2383 sge_flq1_cnt = sge_flq1_cnt_orig;
2385 2384 sge_respq_cnt = sge_respq_cnt_orig;
2386 2385 }
2387 2386 }
2388 2387 }
↓ open down ↓ |
2103 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX