Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 QLogic Corporation. All rights reserved.
24 24 */
25 25
26 26 #include <qlge.h>
27 27 #include <sys/atomic.h>
28 28 #include <sys/strsubr.h>
29 29 #include <sys/pattr.h>
30 30 #include <netinet/in.h>
31 31 #include <netinet/ip.h>
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
32 32 #include <netinet/ip6.h>
33 33 #include <netinet/tcp.h>
34 34 #include <netinet/udp.h>
35 35 #include <inet/ip.h>
36 36
37 37
38 38
39 39 /*
40 40 * Local variables
41 41 */
42 -static struct ether_addr ql_ether_broadcast_addr =
43 - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
42 +static struct ether_addr ql_ether_broadcast_addr = {
43 + {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
44 44 static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
45 45
46 46 /*
47 47 * Local function prototypes
48 48 */
49 49 static void ql_free_resources(qlge_t *);
50 50 static void ql_fini_kstats(qlge_t *);
51 51 static uint32_t ql_get_link_state(qlge_t *);
52 52 static void ql_read_conf(qlge_t *);
53 53 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54 54 ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55 55 size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56 56 static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
57 57 ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
58 58 size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
59 59 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
60 60 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
61 61 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
62 62 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
63 63 static int ql_bringdown_adapter(qlge_t *);
64 64 static int ql_bringup_adapter(qlge_t *);
65 65 static int ql_asic_reset(qlge_t *);
66 66 static void ql_wake_mpi_reset_soft_intr(qlge_t *);
67 67 static void ql_stop_timer(qlge_t *qlge);
68 68 static void ql_fm_fini(qlge_t *qlge);
69 69 int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
70 70
71 71 /*
72 72 * TX dma maping handlers allow multiple sscatter-gather lists
73 73 */
74 74 ddi_dma_attr_t tx_mapping_dma_attr = {
75 75 DMA_ATTR_V0, /* dma_attr_version */
76 76 QL_DMA_LOW_ADDRESS, /* low DMA address range */
77 77 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
78 78 QL_DMA_XFER_COUNTER, /* DMA counter register */
79 79 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
80 80 QL_DMA_BURSTSIZES, /* DMA burstsizes */
81 81 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
82 82 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
83 83 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
84 84 QL_MAX_TX_DMA_HANDLES, /* s/g list length */
85 85 QL_DMA_GRANULARITY, /* granularity of device */
86 86 DDI_DMA_RELAXED_ORDERING /* DMA transfer flags */
87 87 };
88 88
89 89 /*
90 90 * Receive buffers and Request/Response queues do not allow scatter-gather lists
91 91 */
92 92 ddi_dma_attr_t dma_attr = {
93 93 DMA_ATTR_V0, /* dma_attr_version */
94 94 QL_DMA_LOW_ADDRESS, /* low DMA address range */
95 95 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
96 96 QL_DMA_XFER_COUNTER, /* DMA counter register */
97 97 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
98 98 QL_DMA_BURSTSIZES, /* DMA burstsizes */
99 99 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
100 100 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
101 101 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
102 102 1, /* s/g list length, i.e no sg list */
103 103 QL_DMA_GRANULARITY, /* granularity of device */
104 104 QL_DMA_XFER_FLAGS /* DMA transfer flags */
105 105 };
106 106 /*
107 107 * Receive buffers do not allow scatter-gather lists
108 108 */
109 109 ddi_dma_attr_t dma_attr_rbuf = {
110 110 DMA_ATTR_V0, /* dma_attr_version */
111 111 QL_DMA_LOW_ADDRESS, /* low DMA address range */
112 112 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
113 113 QL_DMA_XFER_COUNTER, /* DMA counter register */
114 114 0x1, /* DMA address alignment, default - 8 */
115 115 QL_DMA_BURSTSIZES, /* DMA burstsizes */
116 116 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
117 117 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
118 118 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
119 119 1, /* s/g list length, i.e no sg list */
120 120 QL_DMA_GRANULARITY, /* granularity of device */
121 121 DDI_DMA_RELAXED_ORDERING /* DMA transfer flags */
122 122 };
123 123 /*
124 124 * DMA access attribute structure.
125 125 */
126 126 /* device register access from host */
127 127 ddi_device_acc_attr_t ql_dev_acc_attr = {
128 128 DDI_DEVICE_ATTR_V0,
129 129 DDI_STRUCTURE_LE_ACC,
130 130 DDI_STRICTORDER_ACC
131 131 };
132 132
133 133 /* host ring descriptors */
134 134 ddi_device_acc_attr_t ql_desc_acc_attr = {
135 135 DDI_DEVICE_ATTR_V0,
136 136 DDI_NEVERSWAP_ACC,
137 137 DDI_STRICTORDER_ACC
138 138 };
139 139
140 140 /* host ring buffer */
141 141 ddi_device_acc_attr_t ql_buf_acc_attr = {
142 142 DDI_DEVICE_ATTR_V0,
143 143 DDI_NEVERSWAP_ACC,
144 144 DDI_STRICTORDER_ACC
145 145 };
146 146
147 147 /*
148 148 * Hash key table for Receive Side Scaling (RSS) support
149 149 */
150 150 const uint8_t key_data[] = {
151 151 0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
152 152 0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
153 153 0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
154 154 0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
155 155
156 156 /*
157 157 * Shadow Registers:
158 158 * Outbound queues have a consumer index that is maintained by the chip.
159 159 * Inbound queues have a producer index that is maintained by the chip.
160 160 * For lower overhead, these registers are "shadowed" to host memory
161 161 * which allows the device driver to track the queue progress without
162 162 * PCI reads. When an entry is placed on an inbound queue, the chip will
163 163 * update the relevant index register and then copy the value to the
164 164 * shadow register in host memory.
165 165 * Currently, ql_read_sh_reg only read Inbound queues'producer index.
166 166 */
167 167
168 168 static inline unsigned int
169 169 ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
170 170 {
171 171 uint32_t rtn;
172 172
173 173 /* re-synchronize shadow prod index dma buffer before reading */
174 174 (void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
175 175 rx_ring->prod_idx_sh_reg_offset,
176 176 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
177 177
178 178 rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
179 179 (uint32_t *)rx_ring->prod_idx_sh_reg);
180 180
181 181 return (rtn);
182 182 }
183 183
184 184 /*
185 185 * Read 32 bit atomically
186 186 */
187 187 uint32_t
188 188 ql_atomic_read_32(volatile uint32_t *target)
189 189 {
190 190 /*
191 191 * atomic_add_32_nv returns the new value after the add,
192 192 * we are adding 0 so we should get the original value
193 193 */
194 194 return (atomic_add_32_nv(target, 0));
195 195 }
196 196
197 197 /*
198 198 * Set 32 bit atomically
199 199 */
200 200 void
201 201 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
202 202 {
203 203 (void) atomic_swap_32(target, newval);
204 204 }
205 205
206 206
207 207 /*
208 208 * Setup device PCI configuration registers.
209 209 * Kernel context.
210 210 */
211 211 static void
212 212 ql_pci_config(qlge_t *qlge)
213 213 {
214 214 uint16_t w;
215 215
216 216 qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
217 217 PCI_CONF_VENID);
218 218 qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
219 219 PCI_CONF_DEVID);
220 220
221 221 /*
222 222 * we want to respect framework's setting of PCI
223 223 * configuration space command register and also
224 224 * want to make sure that all bits of interest to us
225 225 * are properly set in PCI Command register(0x04).
226 226 * PCI_COMM_IO 0x1 I/O access enable
227 227 * PCI_COMM_MAE 0x2 Memory access enable
228 228 * PCI_COMM_ME 0x4 bus master enable
229 229 * PCI_COMM_MEMWR_INVAL 0x10 memory write and invalidate enable.
230 230 */
231 231 w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
232 232 w = (uint16_t)(w & (~PCI_COMM_IO));
233 233 w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
234 234 /* PCI_COMM_MEMWR_INVAL | */
235 235 PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
236 236
237 237 pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
238 238
239 239 w = pci_config_get16(qlge->pci_handle, 0x54);
240 240 w = (uint16_t)(w & (~0x7000));
241 241 w = (uint16_t)(w | 0x5000);
242 242 pci_config_put16(qlge->pci_handle, 0x54, w);
243 243
244 244 ql_dump_pci_config(qlge);
245 245 }
246 246
247 247 /*
248 248 * This routine parforms the neccessary steps to set GLD mac information
249 249 * such as Function number, xgmac mask and shift bits
250 250 */
251 251 static int
252 252 ql_set_mac_info(qlge_t *qlge)
253 253 {
254 254 uint32_t value;
255 255 int rval = DDI_FAILURE;
256 256 uint32_t fn0_net, fn1_net;
257 257
258 258 /* set default value */
259 259 qlge->fn0_net = FN0_NET;
260 260 qlge->fn1_net = FN1_NET;
261 261
262 262 if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
263 263 cmn_err(CE_WARN, "%s(%d) read MPI register failed",
264 264 __func__, qlge->instance);
265 265 goto exit;
266 266 } else {
267 267 fn0_net = (value >> 1) & 0x07;
268 268 fn1_net = (value >> 5) & 0x07;
269 269 if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
270 270 cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
271 271 "nic0 function number %d,"
272 272 "nic1 function number %d "
273 273 "use default\n",
274 274 __func__, qlge->instance, value, fn0_net, fn1_net);
275 275 goto exit;
276 276 } else {
277 277 qlge->fn0_net = fn0_net;
278 278 qlge->fn1_net = fn1_net;
279 279 }
280 280 }
281 281
282 282 /* Get the function number that the driver is associated with */
283 283 value = ql_read_reg(qlge, REG_STATUS);
284 284 qlge->func_number = (uint8_t)((value >> 6) & 0x03);
285 285 QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
286 286 value, qlge->func_number));
287 287
288 288 /* The driver is loaded on a non-NIC function? */
289 289 if ((qlge->func_number != qlge->fn0_net) &&
290 290 (qlge->func_number != qlge->fn1_net)) {
291 291 cmn_err(CE_WARN,
292 292 "Invalid function number = 0x%x\n", qlge->func_number);
293 293 goto exit;
294 294 }
295 295 /* network port 0? */
296 296 if (qlge->func_number == qlge->fn0_net) {
297 297 qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
298 298 qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
299 299 } else {
300 300 qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
301 301 qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
302 302 }
303 303 rval = DDI_SUCCESS;
304 304 exit:
305 305 return (rval);
306 306
307 307 }
308 308
309 309 /*
310 310 * write to doorbell register
311 311 */
312 312 void
313 313 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
314 314 {
315 315 ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
316 316 }
317 317
318 318 /*
319 319 * read from doorbell register
320 320 */
321 321 uint32_t
322 322 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
323 323 {
324 324 uint32_t ret;
325 325
326 326 ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
327 327
328 328 return (ret);
329 329 }
330 330
331 331 /*
332 332 * This function waits for a specific bit to come ready
333 333 * in a given register. It is used mostly by the initialize
334 334 * process, but is also used in kernel thread API such as
335 335 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
336 336 */
337 337 static int
338 338 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
339 339 {
340 340 uint32_t temp;
341 341 int count = UDELAY_COUNT;
342 342
343 343 while (count) {
344 344 temp = ql_read_reg(qlge, reg);
345 345
346 346 /* check for errors */
347 347 if ((temp & err_bit) != 0) {
348 348 break;
349 349 } else if ((temp & bit) != 0)
350 350 return (DDI_SUCCESS);
351 351 qlge_delay(UDELAY_DELAY);
352 352 count--;
353 353 }
354 354 cmn_err(CE_WARN,
355 355 "Waiting for reg %x to come ready failed.", reg);
356 356 if (qlge->fm_enable) {
357 357 ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
358 358 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
359 359 }
360 360 return (DDI_FAILURE);
361 361 }
362 362
363 363 /*
364 364 * The CFG register is used to download TX and RX control blocks
365 365 * to the chip. This function waits for an operation to complete.
366 366 */
367 367 static int
368 368 ql_wait_cfg(qlge_t *qlge, uint32_t bit)
369 369 {
370 370 return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
371 371 }
372 372
373 373
374 374 /*
375 375 * Used to issue init control blocks to hw. Maps control block,
376 376 * sets address, triggers download, waits for completion.
377 377 */
378 378 static int
379 379 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
380 380 {
381 381 int status = DDI_SUCCESS;
382 382 uint32_t mask;
383 383 uint32_t value;
384 384
385 385 status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
386 386 if (status != DDI_SUCCESS) {
387 387 goto exit;
388 388 }
389 389 status = ql_wait_cfg(qlge, bit);
390 390 if (status != DDI_SUCCESS) {
391 391 goto exit;
392 392 }
393 393
394 394 ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
395 395 ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
396 396
397 397 mask = CFG_Q_MASK | (bit << 16);
398 398 value = bit | (q_id << CFG_Q_SHIFT);
399 399 ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
400 400
401 401 /*
402 402 * Wait for the bit to clear after signaling hw.
403 403 */
404 404 status = ql_wait_cfg(qlge, bit);
405 405 ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
406 406
407 407 exit:
408 408 return (status);
409 409 }
410 410
411 411 /*
412 412 * Initialize adapter instance
413 413 */
414 414 static int
415 415 ql_init_instance(qlge_t *qlge)
416 416 {
417 417 int i;
418 418
419 419 /* Default value */
420 420 qlge->mac_flags = QL_MAC_INIT;
421 421 qlge->mtu = ETHERMTU; /* set normal size as default */
422 422 qlge->page_size = VM_PAGE_SIZE; /* default page size */
423 423
424 424 for (i = 0; i < MAX_RX_RINGS; i++) {
425 425 qlge->rx_polls[i] = 0;
426 426 qlge->rx_interrupts[i] = 0;
427 427 }
428 428
429 429 /*
430 430 * Set up the operating parameters.
431 431 */
432 432 qlge->multicast_list_count = 0;
433 433
434 434 /*
435 435 * Set up the max number of unicast list
436 436 */
437 437 qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
438 438 qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
439 439
440 440 /*
441 441 * read user defined properties in .conf file
442 442 */
443 443 ql_read_conf(qlge); /* mtu, pause, LSO etc */
444 444 qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
445 445
446 446 QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
447 447
448 448 /* choose Memory Space mapping and get Vendor Id, Device ID etc */
449 449 ql_pci_config(qlge);
450 450 qlge->ip_hdr_offset = 0;
451 451
452 452 if (qlge->device_id == 0x8000) {
453 453 /* Schultz card */
454 454 qlge->cfg_flags |= CFG_CHIP_8100;
455 455 /* enable just ipv4 chksum offload for Schultz */
456 456 qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
457 457 /*
458 458 * Schultz firmware does not do pseduo IP header checksum
459 459 * calculation, needed to be done by driver
460 460 */
461 461 qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
462 462 if (qlge->lso_enable)
463 463 qlge->cfg_flags |= CFG_LSO;
464 464 qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
465 465 /* Schultz must split packet header */
466 466 qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
467 467 qlge->max_read_mbx = 5;
468 468 qlge->ip_hdr_offset = 2;
469 469 }
470 470
471 471 /* Set Function Number and some of the iocb mac information */
472 472 if (ql_set_mac_info(qlge) != DDI_SUCCESS)
473 473 return (DDI_FAILURE);
474 474
475 475 /* Read network settings from NVRAM */
476 476 /* After nvram is read successfully, update dev_addr */
477 477 if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
478 478 QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
479 479 for (i = 0; i < ETHERADDRL; i++) {
480 480 qlge->dev_addr.ether_addr_octet[i] =
481 481 qlge->nic_config.factory_MAC[i];
482 482 }
483 483 } else {
484 484 cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
485 485 __func__, qlge->instance);
486 486 return (DDI_FAILURE);
487 487 }
488 488
489 489 bcopy(qlge->dev_addr.ether_addr_octet,
490 490 qlge->unicst_addr[0].addr.ether_addr_octet,
491 491 ETHERADDRL);
492 492 QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
493 493 &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
494 494
495 495 qlge->port_link_state = LS_DOWN;
496 496
497 497 return (DDI_SUCCESS);
498 498 }
499 499
500 500
501 501 /*
502 502 * This hardware semaphore provides the mechanism for exclusive access to
503 503 * resources shared between the NIC driver, MPI firmware,
504 504 * FCOE firmware and the FC driver.
505 505 */
506 506 static int
507 507 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
508 508 {
509 509 uint32_t sem_bits = 0;
510 510
511 511 switch (sem_mask) {
512 512 case SEM_XGMAC0_MASK:
513 513 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
514 514 break;
515 515 case SEM_XGMAC1_MASK:
516 516 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
517 517 break;
518 518 case SEM_ICB_MASK:
519 519 sem_bits = SEM_SET << SEM_ICB_SHIFT;
520 520 break;
521 521 case SEM_MAC_ADDR_MASK:
522 522 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
523 523 break;
524 524 case SEM_FLASH_MASK:
525 525 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
526 526 break;
527 527 case SEM_PROBE_MASK:
528 528 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
529 529 break;
530 530 case SEM_RT_IDX_MASK:
531 531 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
532 532 break;
533 533 case SEM_PROC_REG_MASK:
534 534 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
535 535 break;
536 536 default:
537 537 cmn_err(CE_WARN, "Bad Semaphore mask!.");
538 538 return (DDI_FAILURE);
539 539 }
540 540
541 541 ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
542 542 return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
543 543 }
544 544
545 545 /*
546 546 * Lock a specific bit of Semaphore register to gain
547 547 * access to a particular shared register
548 548 */
549 549 int
550 550 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
551 551 {
552 552 unsigned int wait_count = 30;
553 553
554 554 while (wait_count) {
555 555 if (!ql_sem_trylock(qlge, sem_mask))
556 556 return (DDI_SUCCESS);
557 557 qlge_delay(100);
558 558 wait_count--;
559 559 }
560 560 cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
561 561 __func__, qlge->instance, sem_mask);
562 562 return (DDI_FAILURE);
563 563 }
564 564
565 565 /*
566 566 * Unock a specific bit of Semaphore register to release
567 567 * access to a particular shared register
568 568 */
569 569 void
570 570 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
571 571 {
572 572 ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
573 573 (void) ql_read_reg(qlge, REG_SEMAPHORE); /* flush */
574 574 }
575 575
576 576 /*
577 577 * Get property value from configuration file.
578 578 *
579 579 * string = property string pointer.
580 580 *
581 581 * Returns:
582 582 * 0xFFFFFFFF = no property else property value.
583 583 */
584 584 static uint32_t
585 585 ql_get_prop(qlge_t *qlge, char *string)
586 586 {
587 587 char buf[256];
588 588 uint32_t data;
589 589
590 590 /* Get adapter instance parameter. */
591 591 (void) sprintf(buf, "hba%d-%s", qlge->instance, string);
592 592 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
593 593 (int)0xffffffff);
594 594
595 595 /* Adapter instance parameter found? */
596 596 if (data == 0xffffffff) {
597 597 /* No, get default parameter. */
598 598 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
599 599 string, (int)0xffffffff);
600 600 }
601 601
602 602 return (data);
603 603 }
604 604
605 605 /*
606 606 * Read user setting from configuration file.
607 607 */
608 608 static void
609 609 ql_read_conf(qlge_t *qlge)
610 610 {
611 611 uint32_t data;
612 612
613 613 /* clear configuration flags */
614 614 qlge->cfg_flags = 0;
615 615
616 616 /* Set up the default ring sizes. */
617 617 qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
618 618 data = ql_get_prop(qlge, "tx_ring_size");
619 619 /* if data is valid */
620 620 if ((data != 0xffffffff) && data) {
621 621 if (qlge->tx_ring_size != data) {
622 622 qlge->tx_ring_size = (uint16_t)data;
623 623 }
624 624 }
625 625
626 626 qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
627 627 data = ql_get_prop(qlge, "rx_ring_size");
628 628 /* if data is valid */
629 629 if ((data != 0xffffffff) && data) {
630 630 if (qlge->rx_ring_size != data) {
631 631 qlge->rx_ring_size = (uint16_t)data;
632 632 }
633 633 }
634 634
635 635 qlge->tx_ring_count = 8;
636 636 data = ql_get_prop(qlge, "tx_ring_count");
637 637 /* if data is valid */
638 638 if ((data != 0xffffffff) && data) {
639 639 if (qlge->tx_ring_count != data) {
640 640 qlge->tx_ring_count = (uint16_t)data;
641 641 }
642 642 }
643 643
644 644 qlge->rss_ring_count = 8;
645 645 data = ql_get_prop(qlge, "rss_ring_count");
646 646 /* if data is valid */
647 647 if ((data != 0xffffffff) && data) {
648 648 if (qlge->rss_ring_count != data) {
649 649 qlge->rss_ring_count = (uint16_t)data;
650 650 }
651 651 }
652 652
653 653 /* Get default rx_copy enable/disable. */
654 654 if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
655 655 data == 0) {
656 656 qlge->rx_copy = B_FALSE;
657 657 QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
658 658 } else if (data == 1) {
659 659 qlge->rx_copy = B_TRUE;
660 660 QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
661 661 }
662 662
663 663 qlge->rx_copy_threshold = qlge->rx_ring_size / 4;
664 664 data = ql_get_prop(qlge, "rx_copy_threshold");
665 665 if ((data != 0xffffffff) && (data != 0)) {
666 666 qlge->rx_copy_threshold = data;
667 667 cmn_err(CE_NOTE, "!new rx_copy_threshold %d \n",
668 668 qlge->rx_copy_threshold);
669 669 }
670 670
671 671 /* Get mtu packet size. */
672 672 data = ql_get_prop(qlge, "mtu");
673 673 if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
674 674 if (qlge->mtu != data) {
675 675 qlge->mtu = data;
676 676 cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
677 677 }
678 678 }
679 679
680 680 if (qlge->mtu == JUMBO_MTU) {
681 681 qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
682 682 qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
683 683 qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
684 684 qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
685 685 }
686 686
687 687
688 688 /* Get pause mode, default is Per Priority mode. */
689 689 qlge->pause = PAUSE_MODE_PER_PRIORITY;
690 690 data = ql_get_prop(qlge, "pause");
691 691 if (data <= PAUSE_MODE_PER_PRIORITY) {
692 692 if (qlge->pause != data) {
693 693 qlge->pause = data;
694 694 cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
695 695 }
696 696 }
697 697 /* Receive interrupt delay */
698 698 qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
699 699 data = ql_get_prop(qlge, "rx_intr_delay");
700 700 /* if data is valid */
701 701 if ((data != 0xffffffff) && data) {
702 702 if (qlge->rx_coalesce_usecs != data) {
703 703 qlge->rx_coalesce_usecs = (uint16_t)data;
704 704 }
705 705 }
706 706 /* Rx inter-packet delay. */
707 707 qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
708 708 data = ql_get_prop(qlge, "rx_ipkt_delay");
709 709 /* if data is valid */
710 710 if ((data != 0xffffffff) && data) {
711 711 if (qlge->rx_max_coalesced_frames != data) {
712 712 qlge->rx_max_coalesced_frames = (uint16_t)data;
713 713 }
714 714 }
715 715 /* Transmit interrupt delay */
716 716 qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
717 717 data = ql_get_prop(qlge, "tx_intr_delay");
718 718 /* if data is valid */
719 719 if ((data != 0xffffffff) && data) {
720 720 if (qlge->tx_coalesce_usecs != data) {
721 721 qlge->tx_coalesce_usecs = (uint16_t)data;
722 722 }
723 723 }
724 724 /* Tx inter-packet delay. */
725 725 qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
726 726 data = ql_get_prop(qlge, "tx_ipkt_delay");
727 727 /* if data is valid */
728 728 if ((data != 0xffffffff) && data) {
729 729 if (qlge->tx_max_coalesced_frames != data) {
730 730 qlge->tx_max_coalesced_frames = (uint16_t)data;
731 731 }
732 732 }
733 733
734 734 /* Get split header payload_copy_thresh. */
735 735 qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
736 736 data = ql_get_prop(qlge, "payload_copy_thresh");
737 737 /* if data is valid */
738 738 if ((data != 0xffffffff) && (data != 0)) {
739 739 if (qlge->payload_copy_thresh != data) {
740 740 qlge->payload_copy_thresh = data;
741 741 }
742 742 }
743 743
744 744 /* large send offload (LSO) capability. */
745 745 qlge->lso_enable = 1;
746 746 data = ql_get_prop(qlge, "lso_enable");
747 747 /* if data is valid */
748 748 if ((data == 0) || (data == 1)) {
749 749 if (qlge->lso_enable != data) {
750 750 qlge->lso_enable = (uint16_t)data;
751 751 }
752 752 }
753 753
754 754 /* dcbx capability. */
755 755 qlge->dcbx_enable = 1;
756 756 data = ql_get_prop(qlge, "dcbx_enable");
757 757 /* if data is valid */
758 758 if ((data == 0) || (data == 1)) {
759 759 if (qlge->dcbx_enable != data) {
760 760 qlge->dcbx_enable = (uint16_t)data;
761 761 }
762 762 }
763 763 /* fault management enable */
764 764 qlge->fm_enable = B_TRUE;
765 765 data = ql_get_prop(qlge, "fm-enable");
766 766 if ((data == 0x1) || (data == 0)) {
767 767 qlge->fm_enable = (boolean_t)data;
768 768 }
769 769
770 770 }
771 771
772 772 /*
773 773 * Enable global interrupt
774 774 */
775 775 static void
776 776 ql_enable_global_interrupt(qlge_t *qlge)
777 777 {
778 778 ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
779 779 (INTR_EN_EI << 16) | INTR_EN_EI);
780 780 qlge->flags |= INTERRUPTS_ENABLED;
781 781 }
782 782
783 783 /*
784 784 * Disable global interrupt
785 785 */
786 786 static void
787 787 ql_disable_global_interrupt(qlge_t *qlge)
788 788 {
789 789 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
790 790 qlge->flags &= ~INTERRUPTS_ENABLED;
791 791 }
792 792
793 793 /*
794 794 * Enable one ring interrupt
795 795 */
796 796 void
797 797 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
798 798 {
799 799 struct intr_ctx *ctx = qlge->intr_ctx + intr;
800 800
801 801 QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
802 802 __func__, qlge->instance, intr, ctx->irq_cnt));
803 803
804 804 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
805 805 /*
806 806 * Always enable if we're MSIX multi interrupts and
807 807 * it's not the default (zeroeth) interrupt.
808 808 */
809 809 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
810 810 return;
811 811 }
812 812
813 813 if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
814 814 mutex_enter(&qlge->hw_mutex);
815 815 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
816 816 mutex_exit(&qlge->hw_mutex);
817 817 QL_PRINT(DBG_INTR,
818 818 ("%s(%d): write %x to intr enable register \n",
819 819 __func__, qlge->instance, ctx->intr_en_mask));
820 820 }
821 821 }
822 822
823 823 /*
824 824 * ql_forced_disable_completion_interrupt
825 825 * Used by call from OS, may be called without
826 826 * a pending interrupt so force the disable
827 827 */
828 828 uint32_t
829 829 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
830 830 {
831 831 uint32_t var = 0;
832 832 struct intr_ctx *ctx = qlge->intr_ctx + intr;
833 833
834 834 QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
835 835 __func__, qlge->instance, intr, ctx->irq_cnt));
836 836
837 837 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
838 838 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
839 839 var = ql_read_reg(qlge, REG_STATUS);
840 840 return (var);
841 841 }
842 842
843 843 mutex_enter(&qlge->hw_mutex);
844 844 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
845 845 var = ql_read_reg(qlge, REG_STATUS);
846 846 mutex_exit(&qlge->hw_mutex);
847 847
848 848 return (var);
849 849 }
850 850
851 851 /*
852 852 * Disable a completion interrupt
853 853 */
854 854 void
855 855 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
856 856 {
857 857 struct intr_ctx *ctx;
858 858
859 859 ctx = qlge->intr_ctx + intr;
860 860 QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
861 861 __func__, qlge->instance, intr, ctx->irq_cnt));
862 862 /*
863 863 * HW disables for us if we're MSIX multi interrupts and
864 864 * it's not the default (zeroeth) interrupt.
865 865 */
866 866 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
867 867 return;
868 868
869 869 if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
870 870 mutex_enter(&qlge->hw_mutex);
871 871 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
872 872 mutex_exit(&qlge->hw_mutex);
873 873 }
874 874 atomic_inc_32(&ctx->irq_cnt);
875 875 }
876 876
877 877 /*
878 878 * Enable all completion interrupts
879 879 */
880 880 static void
881 881 ql_enable_all_completion_interrupts(qlge_t *qlge)
882 882 {
883 883 int i;
884 884 uint32_t value = 1;
885 885
886 886 for (i = 0; i < qlge->intr_cnt; i++) {
887 887 /*
888 888 * Set the count to 1 for Legacy / MSI interrupts or for the
889 889 * default interrupt (0)
890 890 */
891 891 if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
892 892 ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
893 893 }
894 894 ql_enable_completion_interrupt(qlge, i);
895 895 }
896 896 }
897 897
898 898 /*
899 899 * Disable all completion interrupts
900 900 */
901 901 static void
902 902 ql_disable_all_completion_interrupts(qlge_t *qlge)
903 903 {
904 904 int i;
905 905 uint32_t value = 0;
906 906
907 907 for (i = 0; i < qlge->intr_cnt; i++) {
908 908
909 909 /*
910 910 * Set the count to 0 for Legacy / MSI interrupts or for the
911 911 * default interrupt (0)
912 912 */
913 913 if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
914 914 ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
915 915
916 916 ql_disable_completion_interrupt(qlge, i);
917 917 }
918 918 }
919 919
920 920 /*
921 921 * Update small buffer queue producer index
922 922 */
923 923 static void
924 924 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
925 925 {
926 926 /* Update the buffer producer index */
927 927 QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
928 928 rx_ring->sbq_prod_idx));
929 929 ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
930 930 rx_ring->sbq_prod_idx);
931 931 }
932 932
933 933 /*
934 934 * Update large buffer queue producer index
935 935 */
936 936 static void
937 937 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
938 938 {
939 939 /* Update the buffer producer index */
940 940 QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
941 941 rx_ring->lbq_prod_idx));
942 942 ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
943 943 rx_ring->lbq_prod_idx);
944 944 }
945 945
946 946 /*
947 947 * Adds a small buffer descriptor to end of its in use list,
948 948 * assumes sbq_lock is already taken
949 949 */
950 950 static void
951 951 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
952 952 struct bq_desc *sbq_desc)
953 953 {
954 954 uint32_t inuse_idx = rx_ring->sbq_use_tail;
955 955
956 956 rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
957 957 inuse_idx++;
958 958 if (inuse_idx >= rx_ring->sbq_len)
959 959 inuse_idx = 0;
960 960 rx_ring->sbq_use_tail = inuse_idx;
961 961 atomic_inc_32(&rx_ring->sbuf_in_use_count);
962 962 ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
963 963 }
964 964
965 965 /*
966 966 * Get a small buffer descriptor from its in use list
967 967 */
968 968 static struct bq_desc *
969 969 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
970 970 {
971 971 struct bq_desc *sbq_desc = NULL;
972 972 uint32_t inuse_idx;
973 973
974 974 /* Pick from head of in use list */
975 975 inuse_idx = rx_ring->sbq_use_head;
976 976 sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
977 977 rx_ring->sbuf_in_use[inuse_idx] = NULL;
978 978
979 979 if (sbq_desc != NULL) {
980 980 inuse_idx++;
981 981 if (inuse_idx >= rx_ring->sbq_len)
982 982 inuse_idx = 0;
983 983 rx_ring->sbq_use_head = inuse_idx;
984 984 atomic_dec_32(&rx_ring->sbuf_in_use_count);
985 985 atomic_inc_32(&rx_ring->rx_indicate);
986 986 sbq_desc->upl_inuse = 1;
987 987 /* if mp is NULL */
988 988 if (sbq_desc->mp == NULL) {
989 989 /* try to remap mp again */
990 990 sbq_desc->mp =
991 991 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
992 992 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
993 993 }
994 994 }
995 995
996 996 return (sbq_desc);
997 997 }
998 998
999 999 /*
1000 1000 * Add a small buffer descriptor to its free list
1001 1001 */
1002 1002 static void
1003 1003 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1004 1004 struct bq_desc *sbq_desc)
1005 1005 {
1006 1006 uint32_t free_idx;
1007 1007
1008 1008 /* Add to the end of free list */
1009 1009 free_idx = rx_ring->sbq_free_tail;
1010 1010 rx_ring->sbuf_free[free_idx] = sbq_desc;
1011 1011 ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1012 1012 free_idx++;
1013 1013 if (free_idx >= rx_ring->sbq_len)
1014 1014 free_idx = 0;
1015 1015 rx_ring->sbq_free_tail = free_idx;
1016 1016 atomic_inc_32(&rx_ring->sbuf_free_count);
1017 1017 }
1018 1018
1019 1019 /*
1020 1020 * Get a small buffer descriptor from its free list
1021 1021 */
1022 1022 static struct bq_desc *
1023 1023 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1024 1024 {
1025 1025 struct bq_desc *sbq_desc;
1026 1026 uint32_t free_idx;
1027 1027
1028 1028 free_idx = rx_ring->sbq_free_head;
1029 1029 /* Pick from top of free list */
1030 1030 sbq_desc = rx_ring->sbuf_free[free_idx];
1031 1031 rx_ring->sbuf_free[free_idx] = NULL;
1032 1032 if (sbq_desc != NULL) {
1033 1033 free_idx++;
1034 1034 if (free_idx >= rx_ring->sbq_len)
1035 1035 free_idx = 0;
1036 1036 rx_ring->sbq_free_head = free_idx;
1037 1037 atomic_dec_32(&rx_ring->sbuf_free_count);
1038 1038 }
1039 1039 return (sbq_desc);
1040 1040 }
1041 1041
1042 1042 /*
1043 1043 * Add a large buffer descriptor to its in use list
1044 1044 */
1045 1045 static void
1046 1046 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1047 1047 struct bq_desc *lbq_desc)
1048 1048 {
1049 1049 uint32_t inuse_idx;
1050 1050
1051 1051 inuse_idx = rx_ring->lbq_use_tail;
1052 1052
1053 1053 rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1054 1054 inuse_idx++;
1055 1055 if (inuse_idx >= rx_ring->lbq_len)
1056 1056 inuse_idx = 0;
1057 1057 rx_ring->lbq_use_tail = inuse_idx;
1058 1058 atomic_inc_32(&rx_ring->lbuf_in_use_count);
1059 1059 }
1060 1060
1061 1061 /*
1062 1062 * Get a large buffer descriptor from in use list
1063 1063 */
1064 1064 static struct bq_desc *
1065 1065 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1066 1066 {
1067 1067 struct bq_desc *lbq_desc;
1068 1068 uint32_t inuse_idx;
1069 1069
1070 1070 /* Pick from head of in use list */
1071 1071 inuse_idx = rx_ring->lbq_use_head;
1072 1072 lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1073 1073 rx_ring->lbuf_in_use[inuse_idx] = NULL;
1074 1074
1075 1075 if (lbq_desc != NULL) {
1076 1076 inuse_idx++;
1077 1077 if (inuse_idx >= rx_ring->lbq_len)
1078 1078 inuse_idx = 0;
1079 1079 rx_ring->lbq_use_head = inuse_idx;
1080 1080 atomic_dec_32(&rx_ring->lbuf_in_use_count);
1081 1081 atomic_inc_32(&rx_ring->rx_indicate);
1082 1082 lbq_desc->upl_inuse = 1;
1083 1083
1084 1084 /* if mp is NULL */
1085 1085 if (lbq_desc->mp == NULL) {
1086 1086 /* try to remap mp again */
1087 1087 lbq_desc->mp =
1088 1088 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1089 1089 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1090 1090 }
1091 1091 }
1092 1092 return (lbq_desc);
1093 1093 }
1094 1094
1095 1095 /*
1096 1096 * Add a large buffer descriptor to free list
1097 1097 */
1098 1098 static void
1099 1099 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1100 1100 struct bq_desc *lbq_desc)
1101 1101 {
1102 1102 uint32_t free_idx;
1103 1103
1104 1104 /* Add to the end of free list */
1105 1105 free_idx = rx_ring->lbq_free_tail;
1106 1106 rx_ring->lbuf_free[free_idx] = lbq_desc;
1107 1107 free_idx++;
1108 1108 if (free_idx >= rx_ring->lbq_len)
1109 1109 free_idx = 0;
1110 1110 rx_ring->lbq_free_tail = free_idx;
1111 1111 atomic_inc_32(&rx_ring->lbuf_free_count);
1112 1112 ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1113 1113 }
1114 1114
1115 1115 /*
1116 1116 * Get a large buffer descriptor from its free list
1117 1117 */
1118 1118 static struct bq_desc *
1119 1119 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1120 1120 {
1121 1121 struct bq_desc *lbq_desc;
1122 1122 uint32_t free_idx;
1123 1123
1124 1124 free_idx = rx_ring->lbq_free_head;
1125 1125 /* Pick from head of free list */
1126 1126 lbq_desc = rx_ring->lbuf_free[free_idx];
1127 1127 rx_ring->lbuf_free[free_idx] = NULL;
1128 1128
1129 1129 if (lbq_desc != NULL) {
1130 1130 free_idx++;
1131 1131 if (free_idx >= rx_ring->lbq_len)
1132 1132 free_idx = 0;
1133 1133 rx_ring->lbq_free_head = free_idx;
1134 1134 atomic_dec_32(&rx_ring->lbuf_free_count);
1135 1135 }
1136 1136 return (lbq_desc);
1137 1137 }
1138 1138
1139 1139 /*
1140 1140 * Add a small buffer descriptor to free list
1141 1141 */
1142 1142 static void
1143 1143 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1144 1144 {
1145 1145 struct rx_ring *rx_ring = sbq_desc->rx_ring;
1146 1146 uint64_t *sbq_entry;
1147 1147 qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1148 1148 /*
1149 1149 * Sync access
1150 1150 */
1151 1151 mutex_enter(&rx_ring->sbq_lock);
1152 1152
1153 1153 sbq_desc->upl_inuse = 0;
1154 1154
1155 1155 /*
1156 1156 * If we are freeing the buffers as a result of adapter unload, get out
1157 1157 */
1158 1158 if ((sbq_desc->free_buf != NULL) ||
1159 1159 (qlge->mac_flags == QL_MAC_DETACH)) {
1160 1160 if (sbq_desc->free_buf == NULL)
1161 1161 atomic_dec_32(&rx_ring->rx_indicate);
1162 1162 mutex_exit(&rx_ring->sbq_lock);
1163 1163 return;
1164 1164 }
1165 1165 #ifdef QLGE_LOAD_UNLOAD
1166 1166 if (rx_ring->rx_indicate == 0)
1167 1167 cmn_err(CE_WARN, "sbq: indicate wrong");
1168 1168 #endif
1169 1169 #ifdef QLGE_TRACK_BUFFER_USAGE
1170 1170 uint32_t sb_consumer_idx;
1171 1171 uint32_t sb_producer_idx;
1172 1172 uint32_t num_free_buffers;
1173 1173 uint32_t temp;
1174 1174
1175 1175 temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1176 1176 sb_producer_idx = temp & 0x0000ffff;
1177 1177 sb_consumer_idx = (temp >> 16);
1178 1178
1179 1179 if (sb_consumer_idx > sb_producer_idx)
1180 1180 num_free_buffers = NUM_SMALL_BUFFERS -
1181 1181 (sb_consumer_idx - sb_producer_idx);
1182 1182 else
1183 1183 num_free_buffers = sb_producer_idx - sb_consumer_idx;
1184 1184
1185 1185 if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1186 1186 qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1187 1187
1188 1188 #endif
1189 1189
1190 1190 #ifdef QLGE_LOAD_UNLOAD
1191 1191 if (rx_ring->rx_indicate > 0xFF000000)
1192 1192 cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1193 1193 " sbq_desc index %d.",
1194 1194 rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1195 1195 sbq_desc->index);
1196 1196 #endif
1197 1197 if (alloc_memory) {
1198 1198 sbq_desc->mp =
1199 1199 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1200 1200 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1201 1201 if (sbq_desc->mp == NULL) {
1202 1202 rx_ring->rx_failed_sbq_allocs++;
1203 1203 }
1204 1204 }
1205 1205
1206 1206 /* Got the packet from the stack decrement rx_indicate count */
1207 1207 atomic_dec_32(&rx_ring->rx_indicate);
1208 1208
1209 1209 ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1210 1210
1211 1211 /* Rearm if possible */
1212 1212 if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1213 1213 (qlge->mac_flags == QL_MAC_STARTED)) {
1214 1214 sbq_entry = rx_ring->sbq_dma.vaddr;
1215 1215 sbq_entry += rx_ring->sbq_prod_idx;
1216 1216
1217 1217 while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1218 1218 /* Get first one from free list */
1219 1219 sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1220 1220
1221 1221 *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1222 1222 sbq_entry++;
1223 1223 rx_ring->sbq_prod_idx++;
1224 1224 if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1225 1225 rx_ring->sbq_prod_idx = 0;
1226 1226 sbq_entry = rx_ring->sbq_dma.vaddr;
1227 1227 }
1228 1228 /* Add to end of in use list */
1229 1229 ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1230 1230 }
1231 1231
1232 1232 /* Update small buffer queue producer index */
1233 1233 ql_update_sbq_prod_idx(qlge, rx_ring);
1234 1234 }
1235 1235
1236 1236 mutex_exit(&rx_ring->sbq_lock);
1237 1237 QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1238 1238 __func__, qlge->instance, rx_ring->sbuf_free_count));
1239 1239 }
1240 1240
1241 1241 /*
1242 1242 * rx recycle call back function
1243 1243 */
1244 1244 static void
1245 1245 ql_release_to_sbuf_free_list(caddr_t p)
1246 1246 {
1247 1247 struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1248 1248
1249 1249 if (sbq_desc == NULL)
1250 1250 return;
1251 1251 ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1252 1252 }
1253 1253
1254 1254 /*
1255 1255 * Add a large buffer descriptor to free list
1256 1256 */
1257 1257 static void
1258 1258 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1259 1259 {
1260 1260 struct rx_ring *rx_ring = lbq_desc->rx_ring;
1261 1261 uint64_t *lbq_entry;
1262 1262 qlge_t *qlge = rx_ring->qlge;
1263 1263
1264 1264 /* Sync access */
1265 1265 mutex_enter(&rx_ring->lbq_lock);
1266 1266
1267 1267 lbq_desc->upl_inuse = 0;
1268 1268 /*
1269 1269 * If we are freeing the buffers as a result of adapter unload, get out
1270 1270 */
1271 1271 if ((lbq_desc->free_buf != NULL) ||
1272 1272 (qlge->mac_flags == QL_MAC_DETACH)) {
1273 1273 if (lbq_desc->free_buf == NULL)
1274 1274 atomic_dec_32(&rx_ring->rx_indicate);
1275 1275 mutex_exit(&rx_ring->lbq_lock);
1276 1276 return;
1277 1277 }
1278 1278 #ifdef QLGE_LOAD_UNLOAD
1279 1279 if (rx_ring->rx_indicate == 0)
1280 1280 cmn_err(CE_WARN, "lbq: indicate wrong");
1281 1281 #endif
1282 1282 #ifdef QLGE_TRACK_BUFFER_USAGE
1283 1283 uint32_t lb_consumer_idx;
1284 1284 uint32_t lb_producer_idx;
1285 1285 uint32_t num_free_buffers;
1286 1286 uint32_t temp;
1287 1287
1288 1288 temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1289 1289
1290 1290 lb_producer_idx = temp & 0x0000ffff;
1291 1291 lb_consumer_idx = (temp >> 16);
1292 1292
1293 1293 if (lb_consumer_idx > lb_producer_idx)
1294 1294 num_free_buffers = NUM_LARGE_BUFFERS -
1295 1295 (lb_consumer_idx - lb_producer_idx);
1296 1296 else
1297 1297 num_free_buffers = lb_producer_idx - lb_consumer_idx;
1298 1298
1299 1299 if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1300 1300 qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1301 1301 }
1302 1302 #endif
1303 1303
1304 1304 #ifdef QLGE_LOAD_UNLOAD
1305 1305 if (rx_ring->rx_indicate > 0xFF000000)
1306 1306 cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1307 1307 "lbq_desc index %d",
1308 1308 rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1309 1309 lbq_desc->index);
1310 1310 #endif
1311 1311 if (alloc_memory) {
1312 1312 lbq_desc->mp =
1313 1313 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1314 1314 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1315 1315 if (lbq_desc->mp == NULL) {
1316 1316 rx_ring->rx_failed_lbq_allocs++;
1317 1317 }
1318 1318 }
1319 1319
1320 1320 /* Got the packet from the stack decrement rx_indicate count */
1321 1321 atomic_dec_32(&rx_ring->rx_indicate);
1322 1322
1323 1323 ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1324 1324
1325 1325 /* Rearm if possible */
1326 1326 if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1327 1327 (qlge->mac_flags == QL_MAC_STARTED)) {
1328 1328 lbq_entry = rx_ring->lbq_dma.vaddr;
1329 1329 lbq_entry += rx_ring->lbq_prod_idx;
1330 1330 while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1331 1331 /* Get first one from free list */
1332 1332 lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1333 1333
1334 1334 *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1335 1335 lbq_entry++;
1336 1336 rx_ring->lbq_prod_idx++;
1337 1337 if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1338 1338 rx_ring->lbq_prod_idx = 0;
1339 1339 lbq_entry = rx_ring->lbq_dma.vaddr;
1340 1340 }
1341 1341
1342 1342 /* Add to end of in use list */
1343 1343 ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1344 1344 }
1345 1345
1346 1346 /* Update large buffer queue producer index */
1347 1347 ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1348 1348 }
1349 1349
1350 1350 mutex_exit(&rx_ring->lbq_lock);
1351 1351 QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1352 1352 __func__, rx_ring->lbuf_free_count));
1353 1353 }
1354 1354 /*
1355 1355 * rx recycle call back function
1356 1356 */
1357 1357 static void
1358 1358 ql_release_to_lbuf_free_list(caddr_t p)
1359 1359 {
1360 1360 struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1361 1361
1362 1362 if (lbq_desc == NULL)
1363 1363 return;
1364 1364 ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1365 1365 }
1366 1366
1367 1367 /*
1368 1368 * free small buffer queue buffers
1369 1369 */
1370 1370 static void
1371 1371 ql_free_sbq_buffers(struct rx_ring *rx_ring)
1372 1372 {
1373 1373 struct bq_desc *sbq_desc;
1374 1374 uint32_t i;
1375 1375 uint32_t j = rx_ring->sbq_free_head;
1376 1376 int force_cnt = 0;
1377 1377
1378 1378 for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1379 1379 sbq_desc = rx_ring->sbuf_free[j];
1380 1380 sbq_desc->free_buf = 1;
1381 1381 j++;
1382 1382 if (j >= rx_ring->sbq_len) {
1383 1383 j = 0;
1384 1384 }
1385 1385 if (sbq_desc->mp != NULL) {
1386 1386 freemsg(sbq_desc->mp);
1387 1387 sbq_desc->mp = NULL;
1388 1388 }
1389 1389 }
1390 1390 rx_ring->sbuf_free_count = 0;
1391 1391
1392 1392 j = rx_ring->sbq_use_head;
1393 1393 for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1394 1394 sbq_desc = rx_ring->sbuf_in_use[j];
1395 1395 sbq_desc->free_buf = 1;
1396 1396 j++;
1397 1397 if (j >= rx_ring->sbq_len) {
1398 1398 j = 0;
1399 1399 }
1400 1400 if (sbq_desc->mp != NULL) {
1401 1401 freemsg(sbq_desc->mp);
1402 1402 sbq_desc->mp = NULL;
1403 1403 }
1404 1404 }
1405 1405 rx_ring->sbuf_in_use_count = 0;
1406 1406
1407 1407 sbq_desc = &rx_ring->sbq_desc[0];
1408 1408 for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1409 1409 /*
1410 1410 * Set flag so that the callback does not allocate a new buffer
1411 1411 */
1412 1412 sbq_desc->free_buf = 1;
1413 1413 if (sbq_desc->upl_inuse != 0) {
1414 1414 force_cnt++;
1415 1415 }
1416 1416 if (sbq_desc->bd_dma.dma_handle != NULL) {
1417 1417 ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1418 1418 &sbq_desc->bd_dma.acc_handle);
1419 1419 sbq_desc->bd_dma.dma_handle = NULL;
1420 1420 sbq_desc->bd_dma.acc_handle = NULL;
1421 1421 }
1422 1422 }
1423 1423 #ifdef QLGE_LOAD_UNLOAD
1424 1424 cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1425 1425 rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1426 1426 #endif
1427 1427 if (rx_ring->sbuf_in_use != NULL) {
1428 1428 kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1429 1429 sizeof (struct bq_desc *)));
1430 1430 rx_ring->sbuf_in_use = NULL;
1431 1431 }
1432 1432
1433 1433 if (rx_ring->sbuf_free != NULL) {
1434 1434 kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1435 1435 sizeof (struct bq_desc *)));
1436 1436 rx_ring->sbuf_free = NULL;
1437 1437 }
1438 1438 }
1439 1439
1440 1440 /* Allocate small buffers */
1441 1441 static int
1442 1442 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1443 1443 {
1444 1444 struct bq_desc *sbq_desc;
1445 1445 int i;
1446 1446 ddi_dma_cookie_t dma_cookie;
1447 1447
1448 1448 rx_ring->sbq_use_head = 0;
1449 1449 rx_ring->sbq_use_tail = 0;
1450 1450 rx_ring->sbuf_in_use_count = 0;
1451 1451 rx_ring->sbq_free_head = 0;
1452 1452 rx_ring->sbq_free_tail = 0;
1453 1453 rx_ring->sbuf_free_count = 0;
1454 1454 rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1455 1455 sizeof (struct bq_desc *), KM_NOSLEEP);
1456 1456 if (rx_ring->sbuf_free == NULL) {
1457 1457 cmn_err(CE_WARN,
1458 1458 "!%s: sbuf_free_list alloc: failed",
1459 1459 __func__);
1460 1460 goto alloc_sbuf_err;
1461 1461 }
1462 1462
1463 1463 rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1464 1464 sizeof (struct bq_desc *), KM_NOSLEEP);
1465 1465 if (rx_ring->sbuf_in_use == NULL) {
1466 1466 cmn_err(CE_WARN,
1467 1467 "!%s: sbuf_inuse_list alloc: failed",
1468 1468 __func__);
1469 1469 goto alloc_sbuf_err;
1470 1470 }
1471 1471
1472 1472 sbq_desc = &rx_ring->sbq_desc[0];
1473 1473
1474 1474 for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1475 1475 /* Allocate buffer */
1476 1476 if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1477 1477 &ql_buf_acc_attr,
1478 1478 DDI_DMA_READ | DDI_DMA_STREAMING,
1479 1479 &sbq_desc->bd_dma.acc_handle,
1480 1480 (size_t)rx_ring->sbq_buf_size, /* mem size */
1481 1481 (size_t)0, /* default alignment */
1482 1482 (caddr_t *)&sbq_desc->bd_dma.vaddr,
1483 1483 &dma_cookie) != 0) {
1484 1484 cmn_err(CE_WARN,
1485 1485 "!%s: ddi_dma_alloc_handle: failed",
1486 1486 __func__);
1487 1487 goto alloc_sbuf_err;
1488 1488 }
1489 1489
1490 1490 /* Set context for Return buffer callback */
1491 1491 sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1492 1492 sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1493 1493 sbq_desc->rx_recycle.free_arg = (caddr_t)sbq_desc;
1494 1494 sbq_desc->rx_ring = rx_ring;
1495 1495 sbq_desc->upl_inuse = 0;
1496 1496 sbq_desc->free_buf = 0;
1497 1497
1498 1498 sbq_desc->mp =
1499 1499 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1500 1500 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1501 1501 if (sbq_desc->mp == NULL) {
1502 1502 cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1503 1503 goto alloc_sbuf_err;
1504 1504 }
1505 1505 ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1506 1506 }
1507 1507
1508 1508 return (DDI_SUCCESS);
1509 1509
1510 1510 alloc_sbuf_err:
1511 1511 ql_free_sbq_buffers(rx_ring);
1512 1512 return (DDI_FAILURE);
1513 1513 }
1514 1514
1515 1515 static void
1516 1516 ql_free_lbq_buffers(struct rx_ring *rx_ring)
1517 1517 {
1518 1518 struct bq_desc *lbq_desc;
1519 1519 uint32_t i, j;
1520 1520 int force_cnt = 0;
1521 1521
1522 1522 j = rx_ring->lbq_free_head;
1523 1523 for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1524 1524 lbq_desc = rx_ring->lbuf_free[j];
1525 1525 lbq_desc->free_buf = 1;
1526 1526 j++;
1527 1527 if (j >= rx_ring->lbq_len)
1528 1528 j = 0;
1529 1529 if (lbq_desc->mp != NULL) {
1530 1530 freemsg(lbq_desc->mp);
1531 1531 lbq_desc->mp = NULL;
1532 1532 }
1533 1533 }
1534 1534 rx_ring->lbuf_free_count = 0;
1535 1535
1536 1536 j = rx_ring->lbq_use_head;
1537 1537 for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1538 1538 lbq_desc = rx_ring->lbuf_in_use[j];
1539 1539 lbq_desc->free_buf = 1;
1540 1540 j++;
1541 1541 if (j >= rx_ring->lbq_len) {
1542 1542 j = 0;
1543 1543 }
1544 1544 if (lbq_desc->mp != NULL) {
1545 1545 freemsg(lbq_desc->mp);
1546 1546 lbq_desc->mp = NULL;
1547 1547 }
1548 1548 }
1549 1549 rx_ring->lbuf_in_use_count = 0;
1550 1550
1551 1551 lbq_desc = &rx_ring->lbq_desc[0];
1552 1552 for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1553 1553 /* Set flag so that callback will not allocate a new buffer */
1554 1554 lbq_desc->free_buf = 1;
1555 1555 if (lbq_desc->upl_inuse != 0) {
1556 1556 force_cnt++;
1557 1557 }
1558 1558 if (lbq_desc->bd_dma.dma_handle != NULL) {
1559 1559 ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1560 1560 &lbq_desc->bd_dma.acc_handle);
1561 1561 lbq_desc->bd_dma.dma_handle = NULL;
1562 1562 lbq_desc->bd_dma.acc_handle = NULL;
1563 1563 }
1564 1564 }
1565 1565 #ifdef QLGE_LOAD_UNLOAD
1566 1566 if (force_cnt) {
1567 1567 cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1568 1568 rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1569 1569 force_cnt);
1570 1570 }
1571 1571 #endif
1572 1572 if (rx_ring->lbuf_in_use != NULL) {
1573 1573 kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1574 1574 sizeof (struct bq_desc *)));
1575 1575 rx_ring->lbuf_in_use = NULL;
1576 1576 }
1577 1577
1578 1578 if (rx_ring->lbuf_free != NULL) {
1579 1579 kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1580 1580 sizeof (struct bq_desc *)));
1581 1581 rx_ring->lbuf_free = NULL;
1582 1582 }
1583 1583 }
1584 1584
1585 1585 /* Allocate large buffers */
1586 1586 static int
1587 1587 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1588 1588 {
1589 1589 struct bq_desc *lbq_desc;
1590 1590 ddi_dma_cookie_t dma_cookie;
1591 1591 int i;
1592 1592 uint32_t lbq_buf_size;
1593 1593
1594 1594 rx_ring->lbq_use_head = 0;
1595 1595 rx_ring->lbq_use_tail = 0;
1596 1596 rx_ring->lbuf_in_use_count = 0;
1597 1597 rx_ring->lbq_free_head = 0;
1598 1598 rx_ring->lbq_free_tail = 0;
1599 1599 rx_ring->lbuf_free_count = 0;
1600 1600 rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1601 1601 sizeof (struct bq_desc *), KM_NOSLEEP);
1602 1602 if (rx_ring->lbuf_free == NULL) {
1603 1603 cmn_err(CE_WARN,
1604 1604 "!%s: lbuf_free_list alloc: failed",
1605 1605 __func__);
1606 1606 goto alloc_lbuf_err;
1607 1607 }
1608 1608
1609 1609 rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1610 1610 sizeof (struct bq_desc *), KM_NOSLEEP);
1611 1611
1612 1612 if (rx_ring->lbuf_in_use == NULL) {
1613 1613 cmn_err(CE_WARN,
1614 1614 "!%s: lbuf_inuse_list alloc: failed",
1615 1615 __func__);
1616 1616 goto alloc_lbuf_err;
1617 1617 }
1618 1618
1619 1619 lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1620 1620 LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1621 1621
1622 1622 lbq_desc = &rx_ring->lbq_desc[0];
1623 1623 for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1624 1624 rx_ring->lbq_buf_size = lbq_buf_size;
1625 1625 /* Allocate buffer */
1626 1626 if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1627 1627 &ql_buf_acc_attr,
1628 1628 DDI_DMA_READ | DDI_DMA_STREAMING,
1629 1629 &lbq_desc->bd_dma.acc_handle,
1630 1630 (size_t)rx_ring->lbq_buf_size, /* mem size */
1631 1631 (size_t)0, /* default alignment */
1632 1632 (caddr_t *)&lbq_desc->bd_dma.vaddr,
1633 1633 &dma_cookie) != 0) {
1634 1634 cmn_err(CE_WARN,
1635 1635 "!%s: ddi_dma_alloc_handle: failed",
1636 1636 __func__);
1637 1637 goto alloc_lbuf_err;
1638 1638 }
1639 1639
1640 1640 /* Set context for Return buffer callback */
1641 1641 lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1642 1642 lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1643 1643 lbq_desc->rx_recycle.free_arg = (caddr_t)lbq_desc;
1644 1644 lbq_desc->rx_ring = rx_ring;
1645 1645 lbq_desc->upl_inuse = 0;
1646 1646 lbq_desc->free_buf = 0;
1647 1647
1648 1648 lbq_desc->mp =
1649 1649 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1650 1650 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1651 1651 if (lbq_desc->mp == NULL) {
1652 1652 cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1653 1653 goto alloc_lbuf_err;
1654 1654 }
1655 1655 ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1656 1656 } /* For all large buffers */
1657 1657
1658 1658 return (DDI_SUCCESS);
1659 1659
1660 1660 alloc_lbuf_err:
1661 1661 ql_free_lbq_buffers(rx_ring);
1662 1662 return (DDI_FAILURE);
1663 1663 }
1664 1664
1665 1665 /*
1666 1666 * Free rx buffers
1667 1667 */
1668 1668 static void
1669 1669 ql_free_rx_buffers(qlge_t *qlge)
1670 1670 {
1671 1671 int i;
1672 1672 struct rx_ring *rx_ring;
1673 1673
1674 1674 for (i = 0; i < qlge->rx_ring_count; i++) {
1675 1675 rx_ring = &qlge->rx_ring[i];
1676 1676 if (rx_ring->type != TX_Q) {
1677 1677 ql_free_lbq_buffers(rx_ring);
1678 1678 ql_free_sbq_buffers(rx_ring);
1679 1679 }
1680 1680 }
1681 1681 }
1682 1682
1683 1683 /*
1684 1684 * Allocate rx buffers
1685 1685 */
1686 1686 static int
1687 1687 ql_alloc_rx_buffers(qlge_t *qlge)
1688 1688 {
1689 1689 struct rx_ring *rx_ring;
1690 1690 int i;
1691 1691
1692 1692 for (i = 0; i < qlge->rx_ring_count; i++) {
1693 1693 rx_ring = &qlge->rx_ring[i];
1694 1694 if (rx_ring->type != TX_Q) {
1695 1695 if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1696 1696 goto alloc_err;
1697 1697 if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1698 1698 goto alloc_err;
1699 1699 }
1700 1700 }
1701 1701 #ifdef QLGE_TRACK_BUFFER_USAGE
1702 1702 for (i = 0; i < qlge->rx_ring_count; i++) {
1703 1703 if (qlge->rx_ring[i].type == RX_Q) {
1704 1704 qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1705 1705 qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1706 1706 }
1707 1707 qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1708 1708 }
1709 1709 #endif
1710 1710 return (DDI_SUCCESS);
1711 1711
1712 1712 alloc_err:
1713 1713 ql_free_rx_buffers(qlge);
1714 1714 return (DDI_FAILURE);
1715 1715 }
1716 1716
1717 1717 /*
1718 1718 * Initialize large buffer queue ring
1719 1719 */
1720 1720 static void
1721 1721 ql_init_lbq_ring(struct rx_ring *rx_ring)
1722 1722 {
1723 1723 uint16_t i;
1724 1724 struct bq_desc *lbq_desc;
1725 1725
1726 1726 bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1727 1727 for (i = 0; i < rx_ring->lbq_len; i++) {
1728 1728 lbq_desc = &rx_ring->lbq_desc[i];
1729 1729 lbq_desc->index = i;
1730 1730 }
1731 1731 }
1732 1732
1733 1733 /*
1734 1734 * Initialize small buffer queue ring
1735 1735 */
1736 1736 static void
1737 1737 ql_init_sbq_ring(struct rx_ring *rx_ring)
1738 1738 {
1739 1739 uint16_t i;
1740 1740 struct bq_desc *sbq_desc;
1741 1741
1742 1742 bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1743 1743 for (i = 0; i < rx_ring->sbq_len; i++) {
1744 1744 sbq_desc = &rx_ring->sbq_desc[i];
1745 1745 sbq_desc->index = i;
1746 1746 }
1747 1747 }
1748 1748
1749 1749 /*
1750 1750 * Calculate the pseudo-header checksum if hardware can not do
1751 1751 */
1752 1752 static void
1753 1753 ql_pseudo_cksum(uint8_t *buf)
1754 1754 {
1755 1755 uint32_t cksum;
1756 1756 uint16_t iphl;
1757 1757 uint16_t proto;
1758 1758
1759 1759 iphl = (uint16_t)(4 * (buf[0] & 0xF));
1760 1760 cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1761 1761 cksum += proto = buf[9];
1762 1762 cksum += (((uint16_t)buf[12])<<8) + buf[13];
1763 1763 cksum += (((uint16_t)buf[14])<<8) + buf[15];
1764 1764 cksum += (((uint16_t)buf[16])<<8) + buf[17];
1765 1765 cksum += (((uint16_t)buf[18])<<8) + buf[19];
1766 1766 cksum = (cksum>>16) + (cksum & 0xFFFF);
1767 1767 cksum = (cksum>>16) + (cksum & 0xFFFF);
1768 1768
1769 1769 /*
1770 1770 * Point it to the TCP/UDP header, and
1771 1771 * update the checksum field.
1772 1772 */
1773 1773 buf += iphl + ((proto == IPPROTO_TCP) ?
1774 1774 TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1775 1775
1776 1776 *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1777 1777
1778 1778 }
1779 1779
1780 1780 /*
1781 1781 * Transmit an incoming packet.
1782 1782 */
1783 1783 mblk_t *
1784 1784 ql_ring_tx(void *arg, mblk_t *mp)
1785 1785 {
1786 1786 struct tx_ring *tx_ring = (struct tx_ring *)arg;
1787 1787 qlge_t *qlge = tx_ring->qlge;
1788 1788 mblk_t *next;
1789 1789 int rval;
1790 1790 uint32_t tx_count = 0;
1791 1791
1792 1792 if (qlge->port_link_state == LS_DOWN) {
1793 1793 /* can not send message while link is down */
1794 1794 mblk_t *tp;
1795 1795
1796 1796 while (mp != NULL) {
1797 1797 tp = mp->b_next;
1798 1798 mp->b_next = NULL;
1799 1799 freemsg(mp);
1800 1800 mp = tp;
1801 1801 }
1802 1802 goto exit;
1803 1803 }
1804 1804
1805 1805 mutex_enter(&tx_ring->tx_lock);
1806 1806 /* if mac is not started, driver is not ready, can not send */
1807 1807 if (tx_ring->mac_flags != QL_MAC_STARTED) {
1808 1808 cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1809 1809 " return packets",
1810 1810 __func__, qlge->instance, tx_ring->mac_flags);
1811 1811 mutex_exit(&tx_ring->tx_lock);
1812 1812 goto exit;
1813 1813 }
1814 1814
1815 1815 /* we must try to send all */
1816 1816 while (mp != NULL) {
1817 1817 /*
1818 1818 * if number of available slots is less than a threshold,
1819 1819 * then quit
1820 1820 */
1821 1821 if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1822 1822 tx_ring->queue_stopped = 1;
1823 1823 rval = DDI_FAILURE;
1824 1824 #ifdef QLGE_LOAD_UNLOAD
1825 1825 cmn_err(CE_WARN, "%s(%d) no resources",
1826 1826 __func__, qlge->instance);
1827 1827 #endif
1828 1828 tx_ring->defer++;
1829 1829 /*
1830 1830 * If we return the buffer back we are expected to call
1831 1831 * mac_tx_ring_update() when resources are available
1832 1832 */
1833 1833 break;
1834 1834 }
1835 1835
1836 1836 next = mp->b_next;
1837 1837 mp->b_next = NULL;
1838 1838
1839 1839 rval = ql_send_common(tx_ring, mp);
1840 1840
1841 1841 if (rval != DDI_SUCCESS) {
1842 1842 mp->b_next = next;
1843 1843 break;
1844 1844 }
1845 1845 tx_count++;
1846 1846 mp = next;
1847 1847 }
1848 1848
1849 1849 /*
1850 1850 * After all msg blocks are mapped or copied to tx buffer,
1851 1851 * trigger the hardware to send!
1852 1852 */
1853 1853 if (tx_count > 0) {
1854 1854 ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1855 1855 tx_ring->prod_idx);
1856 1856 }
1857 1857
1858 1858 mutex_exit(&tx_ring->tx_lock);
1859 1859 exit:
1860 1860 return (mp);
1861 1861 }
1862 1862
1863 1863
1864 1864 /*
1865 1865 * This function builds an mblk list for the given inbound
1866 1866 * completion.
1867 1867 */
1868 1868
1869 1869 static mblk_t *
1870 1870 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1871 1871 struct ib_mac_iocb_rsp *ib_mac_rsp)
1872 1872 {
1873 1873 mblk_t *mp = NULL;
1874 1874 mblk_t *mp1 = NULL; /* packet header */
1875 1875 mblk_t *mp2 = NULL; /* packet content */
1876 1876 struct bq_desc *lbq_desc;
1877 1877 struct bq_desc *sbq_desc;
1878 1878 uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1879 1879 uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1880 1880 uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1881 1881 uint32_t pkt_len = payload_len + header_len;
1882 1882 uint32_t done;
1883 1883 uint64_t *curr_ial_ptr;
1884 1884 uint32_t ial_data_addr_low;
1885 1885 uint32_t actual_data_addr_low;
1886 1886 mblk_t *mp_ial = NULL; /* ial chained packets */
1887 1887 uint32_t size;
1888 1888 uint32_t cp_offset;
1889 1889 boolean_t rx_copy = B_FALSE;
1890 1890 mblk_t *tp = NULL;
1891 1891
1892 1892 /*
1893 1893 * Check if error flags are set
1894 1894 */
1895 1895 if (err_flag != 0) {
1896 1896 if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1897 1897 rx_ring->frame_too_long++;
1898 1898 if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1899 1899 rx_ring->frame_too_short++;
1900 1900 if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1901 1901 rx_ring->fcs_err++;
1902 1902 #ifdef QLGE_LOAD_UNLOAD
1903 1903 cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1904 1904 #endif
1905 1905 QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1906 1906 (uint8_t *)ib_mac_rsp, 8,
1907 1907 (size_t)sizeof (struct ib_mac_iocb_rsp));
1908 1908 }
1909 1909
1910 1910 /* header should not be in large buffer */
1911 1911 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1912 1912 cmn_err(CE_WARN, "header in large buffer or invalid!");
1913 1913 err_flag |= 1;
1914 1914 }
1915 1915 /* if whole packet is too big than rx buffer size */
1916 1916 if (pkt_len > qlge->max_frame_size) {
1917 1917 cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1918 1918 err_flag |= 1;
1919 1919 }
1920 1920 if (qlge->rx_copy ||
1921 1921 (rx_ring->sbuf_in_use_count <= qlge->rx_copy_threshold) ||
1922 1922 (rx_ring->lbuf_in_use_count <= qlge->rx_copy_threshold)) {
1923 1923 rx_copy = B_TRUE;
1924 1924 }
1925 1925
1926 1926 /* if using rx copy mode, we need to allocate a big enough buffer */
1927 1927 if (rx_copy) {
1928 1928 qlge->stats.norcvbuf++;
1929 1929 tp = allocb(payload_len + header_len + qlge->ip_hdr_offset,
1930 1930 BPRI_MED);
1931 1931 if (tp == NULL) {
1932 1932 cmn_err(CE_WARN, "rx copy failed to allocate memory");
1933 1933 } else {
1934 1934 tp->b_rptr += qlge->ip_hdr_offset;
1935 1935 }
1936 1936 }
1937 1937 /*
1938 1938 * Handle the header buffer if present.
1939 1939 * packet header must be valid and saved in one small buffer
1940 1940 * broadcast/multicast packets' headers not splitted
1941 1941 */
1942 1942 if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1943 1943 (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1944 1944 QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1945 1945 header_len));
1946 1946 /* Sync access */
1947 1947 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1948 1948
1949 1949 ASSERT(sbq_desc != NULL);
1950 1950
1951 1951 /*
1952 1952 * Validate addresses from the ASIC with the
1953 1953 * expected sbuf address
1954 1954 */
1955 1955 if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1956 1956 != ib_mac_rsp->hdr_addr) {
1957 1957 /* Small buffer address mismatch */
1958 1958 cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1959 1959 " in wrong small buffer",
1960 1960 __func__, qlge->instance, rx_ring->cq_id);
1961 1961 goto fatal_error;
1962 1962 }
1963 1963 /* get this packet */
1964 1964 mp1 = sbq_desc->mp;
1965 1965 /* Flush DMA'd data */
1966 1966 (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1967 1967 0, header_len, DDI_DMA_SYNC_FORKERNEL);
1968 1968
1969 1969 if ((err_flag != 0)|| (mp1 == NULL)) {
1970 1970 /* failed on this packet, put it back for re-arming */
1971 1971 #ifdef QLGE_LOAD_UNLOAD
1972 1972 cmn_err(CE_WARN, "get header from small buffer fail");
1973 1973 #endif
1974 1974 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1975 1975 mp1 = NULL;
1976 1976 } else if (rx_copy) {
1977 1977 if (tp != NULL) {
1978 1978 bcopy(sbq_desc->bd_dma.vaddr, tp->b_rptr,
1979 1979 header_len);
1980 1980 }
1981 1981 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1982 1982 mp1 = NULL;
1983 1983 } else {
1984 1984 if ((qlge->ip_hdr_offset != 0)&&
1985 1985 (header_len < SMALL_BUFFER_SIZE)) {
1986 1986 /*
1987 1987 * copy entire header to a 2 bytes boundary
1988 1988 * address for 8100 adapters so that the IP
1989 1989 * header can be on a 4 byte boundary address
1990 1990 */
1991 1991 bcopy(mp1->b_rptr,
1992 1992 (mp1->b_rptr + SMALL_BUFFER_SIZE +
1993 1993 qlge->ip_hdr_offset),
1994 1994 header_len);
1995 1995 mp1->b_rptr += SMALL_BUFFER_SIZE +
1996 1996 qlge->ip_hdr_offset;
1997 1997 }
1998 1998
1999 1999 /*
2000 2000 * Adjust the mp payload_len to match
2001 2001 * the packet header payload_len
2002 2002 */
2003 2003 mp1->b_wptr = mp1->b_rptr + header_len;
2004 2004 mp1->b_next = mp1->b_cont = NULL;
2005 2005 QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
2006 2006 (uint8_t *)mp1->b_rptr, 8, header_len);
2007 2007 }
2008 2008 }
2009 2009
2010 2010 /*
2011 2011 * packet data or whole packet can be in small or one or
2012 2012 * several large buffer(s)
2013 2013 */
2014 2014 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2015 2015 /*
2016 2016 * The data is in a single small buffer.
2017 2017 */
2018 2018 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2019 2019
2020 2020 ASSERT(sbq_desc != NULL);
2021 2021
2022 2022 QL_PRINT(DBG_RX,
2023 2023 ("%d bytes in a single small buffer, sbq_desc = %p, "
2024 2024 "sbq_desc->bd_dma.dma_addr = %x,"
2025 2025 " ib_mac_rsp->data_addr = %x, mp = %p\n",
2026 2026 payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
2027 2027 ib_mac_rsp->data_addr, sbq_desc->mp));
2028 2028
2029 2029 /*
2030 2030 * Validate addresses from the ASIC with the
2031 2031 * expected sbuf address
2032 2032 */
2033 2033 if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2034 2034 != ib_mac_rsp->data_addr) {
2035 2035 /* Small buffer address mismatch */
2036 2036 cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2037 2037 " in wrong small buffer",
2038 2038 __func__, qlge->instance, rx_ring->cq_id);
2039 2039 goto fatal_error;
2040 2040 }
2041 2041 /* get this packet */
2042 2042 mp2 = sbq_desc->mp;
2043 2043 (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2044 2044 0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2045 2045 if ((err_flag != 0) || (mp2 == NULL)) {
2046 2046 #ifdef QLGE_LOAD_UNLOAD
2047 2047 /* failed on this packet, put it back for re-arming */
2048 2048 cmn_err(CE_WARN, "ignore bad data from small buffer");
2049 2049 #endif
2050 2050 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2051 2051 mp2 = NULL;
2052 2052 } else if (rx_copy) {
2053 2053 if (tp != NULL) {
2054 2054 bcopy(sbq_desc->bd_dma.vaddr,
2055 2055 tp->b_rptr + header_len, payload_len);
2056 2056 tp->b_wptr =
2057 2057 tp->b_rptr + header_len + payload_len;
2058 2058 }
2059 2059 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2060 2060 mp2 = NULL;
2061 2061 } else {
2062 2062 /* Adjust the buffer length to match the payload_len */
2063 2063 mp2->b_wptr = mp2->b_rptr + payload_len;
2064 2064 mp2->b_next = mp2->b_cont = NULL;
2065 2065 /* Flush DMA'd data */
2066 2066 QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2067 2067 (uint8_t *)mp2->b_rptr, 8, payload_len);
2068 2068 /*
2069 2069 * if payload is too small , copy to
2070 2070 * the end of packet header
2071 2071 */
2072 2072 if ((mp1 != NULL) &&
2073 2073 (payload_len <= qlge->payload_copy_thresh) &&
2074 2074 (pkt_len <
2075 2075 (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2076 2076 bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2077 2077 mp1->b_wptr += payload_len;
2078 2078 freemsg(mp2);
2079 2079 mp2 = NULL;
2080 2080 }
2081 2081 }
2082 2082 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2083 2083 /*
2084 2084 * The data is in a single large buffer.
2085 2085 */
2086 2086 lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2087 2087
2088 2088 QL_PRINT(DBG_RX,
2089 2089 ("%d bytes in a single large buffer, lbq_desc = %p, "
2090 2090 "lbq_desc->bd_dma.dma_addr = %x,"
2091 2091 " ib_mac_rsp->data_addr = %x, mp = %p\n",
2092 2092 payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2093 2093 ib_mac_rsp->data_addr, lbq_desc->mp));
2094 2094
2095 2095 ASSERT(lbq_desc != NULL);
2096 2096
2097 2097 /*
2098 2098 * Validate addresses from the ASIC with
2099 2099 * the expected lbuf address
2100 2100 */
2101 2101 if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2102 2102 != ib_mac_rsp->data_addr) {
2103 2103 /* Large buffer address mismatch */
2104 2104 cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2105 2105 " in wrong large buffer",
2106 2106 __func__, qlge->instance, rx_ring->cq_id);
2107 2107 goto fatal_error;
2108 2108 }
2109 2109 mp2 = lbq_desc->mp;
2110 2110 /* Flush DMA'd data */
2111 2111 (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2112 2112 0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2113 2113 if ((err_flag != 0) || (mp2 == NULL)) {
2114 2114 #ifdef QLGE_LOAD_UNLOAD
2115 2115 cmn_err(CE_WARN, "ignore bad data from large buffer");
2116 2116 #endif
2117 2117 /* failed on this packet, put it back for re-arming */
2118 2118 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2119 2119 mp2 = NULL;
2120 2120 } else if (rx_copy) {
2121 2121 if (tp != NULL) {
2122 2122 bcopy(lbq_desc->bd_dma.vaddr,
2123 2123 tp->b_rptr + header_len, payload_len);
2124 2124 tp->b_wptr =
2125 2125 tp->b_rptr + header_len + payload_len;
2126 2126 }
2127 2127 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2128 2128 mp2 = NULL;
2129 2129 } else {
2130 2130 /*
2131 2131 * Adjust the buffer length to match
2132 2132 * the packet payload_len
2133 2133 */
2134 2134 mp2->b_wptr = mp2->b_rptr + payload_len;
2135 2135 mp2->b_next = mp2->b_cont = NULL;
2136 2136 QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2137 2137 (uint8_t *)mp2->b_rptr, 8, payload_len);
2138 2138 /*
2139 2139 * if payload is too small , copy to
2140 2140 * the end of packet header
2141 2141 */
2142 2142 if ((mp1 != NULL) &&
2143 2143 (payload_len <= qlge->payload_copy_thresh) &&
2144 2144 (pkt_len<
2145 2145 (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2146 2146 bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2147 2147 mp1->b_wptr += payload_len;
2148 2148 freemsg(mp2);
2149 2149 mp2 = NULL;
2150 2150 }
2151 2151 }
2152 2152 } else if (payload_len) { /* ial case */
2153 2153 /*
2154 2154 * payload available but not in sml nor lrg buffer,
2155 2155 * so, it is saved in IAL
2156 2156 */
2157 2157 #ifdef QLGE_LOAD_UNLOAD
2158 2158 cmn_err(CE_NOTE, "packet chained in IAL \n");
2159 2159 #endif
2160 2160 /* lrg buf addresses are saved in one small buffer */
2161 2161 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2162 2162 curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2163 2163 done = 0;
2164 2164 cp_offset = 0;
2165 2165
2166 2166 while (!done) {
2167 2167 ial_data_addr_low =
2168 2168 (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2169 2169 0xFFFFFFFE);
2170 2170 /* check if this is the last packet fragment */
2171 2171 done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2172 2172 curr_ial_ptr++;
2173 2173 /*
2174 2174 * The data is in one or several large buffer(s).
2175 2175 */
2176 2176 lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2177 2177 actual_data_addr_low =
2178 2178 (uint32_t)(lbq_desc->bd_dma.dma_addr &
2179 2179 0xFFFFFFFE);
2180 2180 if (ial_data_addr_low != actual_data_addr_low) {
2181 2181 cmn_err(CE_WARN,
2182 2182 "packet saved in wrong ial lrg buffer"
2183 2183 " expected %x, actual %lx",
2184 2184 ial_data_addr_low,
2185 2185 (uintptr_t)lbq_desc->bd_dma.dma_addr);
2186 2186 goto fatal_error;
2187 2187 }
2188 2188
2189 2189 size = (payload_len < rx_ring->lbq_buf_size)?
2190 2190 payload_len : rx_ring->lbq_buf_size;
2191 2191 payload_len -= size;
2192 2192 mp2 = lbq_desc->mp;
2193 2193 if ((err_flag != 0) || (mp2 == NULL)) {
2194 2194 #ifdef QLGE_LOAD_UNLOAD
2195 2195 cmn_err(CE_WARN,
2196 2196 "ignore bad data from large buffer");
2197 2197 #endif
2198 2198 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2199 2199 mp2 = NULL;
2200 2200 } else if (rx_copy) {
2201 2201 if (tp != NULL) {
2202 2202 (void) ddi_dma_sync(
2203 2203 lbq_desc->bd_dma.dma_handle,
2204 2204 0, size, DDI_DMA_SYNC_FORKERNEL);
2205 2205 bcopy(lbq_desc->bd_dma.vaddr,
2206 2206 tp->b_rptr + header_len + cp_offset,
2207 2207 size);
2208 2208 tp->b_wptr =
2209 2209 tp->b_rptr + size + cp_offset +
2210 2210 header_len;
2211 2211 cp_offset += size;
2212 2212 }
2213 2213 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2214 2214 mp2 = NULL;
2215 2215 } else {
2216 2216 if (mp_ial == NULL) {
2217 2217 mp_ial = mp2;
2218 2218 } else {
2219 2219 linkb(mp_ial, mp2);
2220 2220 }
2221 2221
2222 2222 mp2->b_next = NULL;
2223 2223 mp2->b_cont = NULL;
2224 2224 mp2->b_wptr = mp2->b_rptr + size;
2225 2225 /* Flush DMA'd data */
2226 2226 (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2227 2227 0, size, DDI_DMA_SYNC_FORKERNEL);
2228 2228 QL_PRINT(DBG_RX, ("ial %d payload received \n",
2229 2229 size));
2230 2230 QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2231 2231 (uint8_t *)mp2->b_rptr, 8, size);
2232 2232 }
2233 2233 }
2234 2234 if (err_flag != 0) {
2235 2235 #ifdef QLGE_LOAD_UNLOAD
2236 2236 /* failed on this packet, put it back for re-arming */
2237 2237 cmn_err(CE_WARN, "ignore bad data from small buffer");
2238 2238 #endif
2239 2239 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2240 2240 } else {
2241 2241 mp2 = mp_ial;
2242 2242 freemsg(sbq_desc->mp);
2243 2243 }
2244 2244 }
2245 2245 /*
2246 2246 * some packets' hdr not split, then send mp2 upstream, otherwise,
2247 2247 * concatenate message block mp2 to the tail of message header, mp1
2248 2248 */
2249 2249 if (!err_flag) {
2250 2250 if (rx_copy) {
2251 2251 if (tp != NULL) {
2252 2252 tp->b_next = NULL;
2253 2253 tp->b_cont = NULL;
2254 2254 tp->b_wptr = tp->b_rptr +
2255 2255 header_len + payload_len;
2256 2256 }
2257 2257 mp = tp;
2258 2258 } else {
2259 2259 if (mp1) {
2260 2260 if (mp2) {
2261 2261 QL_PRINT(DBG_RX,
2262 2262 ("packet in mp1 and mp2\n"));
2263 2263 /* mp1->b_cont = mp2; */
2264 2264 linkb(mp1, mp2);
2265 2265 mp = mp1;
2266 2266 } else {
2267 2267 QL_PRINT(DBG_RX,
2268 2268 ("packet in mp1 only\n"));
2269 2269 mp = mp1;
2270 2270 }
2271 2271 } else if (mp2) {
2272 2272 QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2273 2273 mp = mp2;
2274 2274 }
2275 2275 }
2276 2276 }
2277 2277 return (mp);
2278 2278
2279 2279 fatal_error:
2280 2280 /* fatal Error! */
2281 2281 if (qlge->fm_enable) {
2282 2282 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2283 2283 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2284 2284 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2285 2285 }
2286 2286 if (tp) {
2287 2287 freemsg(tp);
2288 2288 }
2289 2289
2290 2290 /* *mp->b_wptr = 0; */
2291 2291 ql_wake_asic_reset_soft_intr(qlge);
2292 2292 return (NULL);
2293 2293
2294 2294 }
2295 2295
2296 2296 /*
2297 2297 * Bump completion queue consumer index.
2298 2298 */
2299 2299 static void
2300 2300 ql_update_cq(struct rx_ring *rx_ring)
2301 2301 {
2302 2302 rx_ring->cnsmr_idx++;
2303 2303 rx_ring->curr_entry++;
2304 2304 if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2305 2305 rx_ring->cnsmr_idx = 0;
2306 2306 rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2307 2307 }
2308 2308 }
2309 2309
2310 2310 /*
2311 2311 * Update completion queue consumer index.
2312 2312 */
2313 2313 static void
2314 2314 ql_write_cq_idx(struct rx_ring *rx_ring)
2315 2315 {
2316 2316 qlge_t *qlge = rx_ring->qlge;
2317 2317
2318 2318 ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2319 2319 rx_ring->cnsmr_idx);
2320 2320 }
2321 2321
2322 2322 /*
2323 2323 * Processes a SYS-Chip Event Notification Completion Event.
2324 2324 * The incoming notification event that describes a link up/down
2325 2325 * or some sorts of error happens.
2326 2326 */
2327 2327 static void
2328 2328 ql_process_chip_ae_intr(qlge_t *qlge,
2329 2329 struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2330 2330 {
2331 2331 uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2332 2332 uint32_t soft_req = 0;
2333 2333
2334 2334 switch (eventType) {
2335 2335 case SYS_EVENT_PORT_LINK_UP: /* 0x0h */
2336 2336 QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2337 2337 break;
2338 2338
2339 2339 case SYS_EVENT_PORT_LINK_DOWN: /* 0x1h */
2340 2340 QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2341 2341 break;
2342 2342
2343 2343 case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2344 2344 cmn_err(CE_WARN, "A multiple CAM hits look up error "
2345 2345 "occurred");
2346 2346 soft_req |= NEED_HW_RESET;
2347 2347 break;
2348 2348
2349 2349 case SYS_EVENT_SOFT_ECC_ERR: /* 0x7h */
2350 2350 cmn_err(CE_WARN, "Soft ECC error detected");
2351 2351 soft_req |= NEED_HW_RESET;
2352 2352 break;
2353 2353
2354 2354 case SYS_EVENT_MGMT_FATAL_ERR: /* 0x8h */
2355 2355 cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2356 2356 " error occured");
2357 2357 soft_req |= NEED_MPI_RESET;
2358 2358 break;
2359 2359
2360 2360 case SYS_EVENT_MAC_INTERRUPT: /* 0x9h */
2361 2361 QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2362 2362 break;
2363 2363
2364 2364 case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF: /* 0x40h */
2365 2365 cmn_err(CE_WARN, "PCI Error reading small/large "
2366 2366 "buffers occured");
2367 2367 soft_req |= NEED_HW_RESET;
2368 2368 break;
2369 2369
2370 2370 default:
2371 2371 QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2372 2372 "type 0x%x occured",
2373 2373 __func__, qlge->instance, eventType));
2374 2374 break;
2375 2375 }
2376 2376
2377 2377 if ((soft_req & NEED_MPI_RESET) != 0) {
2378 2378 ql_wake_mpi_reset_soft_intr(qlge);
2379 2379 if (qlge->fm_enable) {
2380 2380 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2381 2381 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2382 2382 }
2383 2383 } else if ((soft_req & NEED_HW_RESET) != 0) {
2384 2384 ql_wake_asic_reset_soft_intr(qlge);
2385 2385 if (qlge->fm_enable) {
2386 2386 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2387 2387 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2388 2388 }
2389 2389 }
2390 2390 }
2391 2391
2392 2392 /*
2393 2393 * set received packet checksum flag
2394 2394 */
2395 2395 void
2396 2396 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2397 2397 {
2398 2398 uint32_t flags;
2399 2399
2400 2400 /* Not TCP or UDP packet? nothing more to do */
2401 2401 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2402 2402 ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2403 2403 return;
2404 2404
2405 2405 /* No CKO support for IPv6 */
2406 2406 if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2407 2407 return;
2408 2408
2409 2409 /*
2410 2410 * If checksum error, don't set flags; stack will calculate
2411 2411 * checksum, detect the error and update statistics
2412 2412 */
2413 2413 if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2414 2414 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2415 2415 return;
2416 2416
2417 2417 /* TCP or UDP packet and checksum valid */
2418 2418 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2419 2419 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2420 2420 flags = HCK_FULLCKSUM_OK;
2421 2421 mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2422 2422 }
2423 2423 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2424 2424 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2425 2425 flags = HCK_FULLCKSUM_OK;
2426 2426 mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2427 2427 }
2428 2428 }
2429 2429
2430 2430 /*
2431 2431 * This function goes through h/w descriptor in one specified rx ring,
2432 2432 * receives the data if the descriptor status shows the data is ready.
2433 2433 * It returns a chain of mblks containing the received data, to be
2434 2434 * passed up to mac_rx_ring().
2435 2435 */
2436 2436 mblk_t *
2437 2437 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2438 2438 {
2439 2439 qlge_t *qlge = rx_ring->qlge;
2440 2440 uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2441 2441 struct ib_mac_iocb_rsp *net_rsp;
2442 2442 mblk_t *mp;
2443 2443 mblk_t *mblk_head;
2444 2444 mblk_t **mblk_tail;
2445 2445 uint32_t received_bytes = 0;
2446 2446 uint32_t length;
2447 2447 #ifdef QLGE_PERFORMANCE
2448 2448 uint32_t pkt_ct = 0;
2449 2449 #endif
2450 2450
2451 2451 #ifdef QLGE_TRACK_BUFFER_USAGE
2452 2452 uint32_t consumer_idx;
2453 2453 uint32_t producer_idx;
2454 2454 uint32_t num_free_entries;
2455 2455 uint32_t temp;
2456 2456
2457 2457 temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2458 2458 consumer_idx = temp & 0x0000ffff;
2459 2459 producer_idx = (temp >> 16);
2460 2460
2461 2461 if (consumer_idx > producer_idx)
2462 2462 num_free_entries = (consumer_idx - producer_idx);
2463 2463 else
2464 2464 num_free_entries = NUM_RX_RING_ENTRIES - (
2465 2465 producer_idx - consumer_idx);
2466 2466
2467 2467 if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2468 2468 qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2469 2469
2470 2470 #endif
2471 2471 mblk_head = NULL;
2472 2472 mblk_tail = &mblk_head;
2473 2473
2474 2474 while ((prod != rx_ring->cnsmr_idx)) {
2475 2475 QL_PRINT(DBG_RX,
2476 2476 ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2477 2477 __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2478 2478
2479 2479 net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2480 2480 (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2481 2481 (off_t)((uintptr_t)net_rsp -
2482 2482 (uintptr_t)rx_ring->cq_dma.vaddr),
2483 2483 (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2484 2484 QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2485 2485 rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2486 2486
2487 2487 switch (net_rsp->opcode) {
2488 2488
2489 2489 case OPCODE_IB_MAC_IOCB:
2490 2490 /* Adding length of pkt header and payload */
2491 2491 length = le32_to_cpu(net_rsp->data_len) +
2492 2492 le32_to_cpu(net_rsp->hdr_len);
2493 2493 if ((poll_bytes != QLGE_POLL_ALL) &&
2494 2494 ((received_bytes + length) > poll_bytes)) {
2495 2495 continue;
2496 2496 }
2497 2497 received_bytes += length;
2498 2498
2499 2499 #ifdef QLGE_PERFORMANCE
2500 2500 pkt_ct++;
2501 2501 #endif
2502 2502 mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2503 2503 if (mp != NULL) {
2504 2504 if (rx_ring->mac_flags != QL_MAC_STARTED) {
2505 2505 /*
2506 2506 * Increment number of packets we have
2507 2507 * indicated to the stack, should be
2508 2508 * decremented when we get it back
2509 2509 * or when freemsg is called
2510 2510 */
2511 2511 ASSERT(rx_ring->rx_indicate
2512 2512 <= rx_ring->cq_len);
2513 2513 #ifdef QLGE_LOAD_UNLOAD
2514 2514 cmn_err(CE_WARN, "%s do not send to OS,"
2515 2515 " mac_flags %d, indicate %d",
2516 2516 __func__, rx_ring->mac_flags,
2517 2517 rx_ring->rx_indicate);
2518 2518 #endif
2519 2519 QL_PRINT(DBG_RX,
2520 2520 ("cq_id = %d, packet "
2521 2521 "dropped, mac not "
2522 2522 "enabled.\n",
2523 2523 rx_ring->cq_id));
2524 2524 rx_ring->rx_pkt_dropped_mac_unenabled++;
2525 2525
2526 2526 /* rx_lock is expected to be held */
2527 2527 mutex_exit(&rx_ring->rx_lock);
2528 2528 freemsg(mp);
2529 2529 mutex_enter(&rx_ring->rx_lock);
2530 2530 mp = NULL;
2531 2531 }
2532 2532
2533 2533 if (mp != NULL) {
2534 2534 /*
2535 2535 * IP full packet has been
2536 2536 * successfully verified by
2537 2537 * H/W and is correct
2538 2538 */
2539 2539 ql_set_rx_cksum(mp, net_rsp);
2540 2540
2541 2541 rx_ring->rx_packets++;
2542 2542 rx_ring->rx_bytes += length;
2543 2543 *mblk_tail = mp;
2544 2544 mblk_tail = &mp->b_next;
2545 2545 }
2546 2546 } else {
2547 2547 QL_PRINT(DBG_RX,
2548 2548 ("cq_id = %d, packet dropped\n",
2549 2549 rx_ring->cq_id));
2550 2550 rx_ring->rx_packets_dropped_no_buffer++;
2551 2551 }
2552 2552 break;
2553 2553
2554 2554 case OPCODE_IB_SYS_EVENT_IOCB:
2555 2555 ql_process_chip_ae_intr(qlge,
2556 2556 (struct ib_sys_event_iocb_rsp *)
2557 2557 net_rsp);
2558 2558 break;
2559 2559
2560 2560 default:
2561 2561 cmn_err(CE_WARN,
2562 2562 "%s Ring(%d)Hit default case, not handled!"
2563 2563 " dropping the packet, "
2564 2564 "opcode = %x.", __func__, rx_ring->cq_id,
2565 2565 net_rsp->opcode);
2566 2566 break;
2567 2567 }
2568 2568 /* increment cnsmr_idx and curr_entry */
2569 2569 ql_update_cq(rx_ring);
2570 2570 prod = ql_read_sh_reg(qlge, rx_ring);
2571 2571
2572 2572 }
2573 2573
2574 2574 #ifdef QLGE_PERFORMANCE
2575 2575 if (pkt_ct >= 7)
2576 2576 rx_ring->hist[7]++;
2577 2577 else if (pkt_ct == 6)
2578 2578 rx_ring->hist[6]++;
2579 2579 else if (pkt_ct == 5)
2580 2580 rx_ring->hist[5]++;
2581 2581 else if (pkt_ct == 4)
2582 2582 rx_ring->hist[4]++;
2583 2583 else if (pkt_ct == 3)
2584 2584 rx_ring->hist[3]++;
2585 2585 else if (pkt_ct == 2)
2586 2586 rx_ring->hist[2]++;
2587 2587 else if (pkt_ct == 1)
2588 2588 rx_ring->hist[1]++;
2589 2589 else if (pkt_ct == 0)
2590 2590 rx_ring->hist[0]++;
2591 2591 #endif
2592 2592
2593 2593 /* update cnsmr_idx */
2594 2594 ql_write_cq_idx(rx_ring);
2595 2595 /* do not enable interrupt for polling mode */
2596 2596 if (poll_bytes == QLGE_POLL_ALL)
2597 2597 ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2598 2598 return (mblk_head);
2599 2599 }
2600 2600
2601 2601 /* Process an outbound completion from an rx ring. */
2602 2602 static void
2603 2603 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2604 2604 {
2605 2605 struct tx_ring *tx_ring;
2606 2606 struct tx_ring_desc *tx_ring_desc;
2607 2607 int j;
2608 2608
2609 2609 tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2610 2610 tx_ring_desc = tx_ring->wq_desc;
2611 2611 tx_ring_desc += mac_rsp->tid;
2612 2612
2613 2613 if (tx_ring_desc->tx_type == USE_DMA) {
2614 2614 QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2615 2615 __func__, qlge->instance));
2616 2616
2617 2617 /*
2618 2618 * Release the DMA resource that is used for
2619 2619 * DMA binding.
2620 2620 */
2621 2621 for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2622 2622 (void) ddi_dma_unbind_handle(
2623 2623 tx_ring_desc->tx_dma_handle[j]);
2624 2624 }
2625 2625
2626 2626 tx_ring_desc->tx_dma_handle_used = 0;
2627 2627 /*
2628 2628 * Free the mblk after sending completed
2629 2629 */
2630 2630 if (tx_ring_desc->mp != NULL) {
2631 2631 freemsg(tx_ring_desc->mp);
2632 2632 tx_ring_desc->mp = NULL;
2633 2633 }
2634 2634 }
2635 2635
2636 2636 tx_ring->obytes += tx_ring_desc->tx_bytes;
2637 2637 tx_ring->opackets++;
2638 2638
2639 2639 if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2640 2640 OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2641 2641 tx_ring->errxmt++;
2642 2642 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2643 2643 /* EMPTY */
2644 2644 QL_PRINT(DBG_TX,
2645 2645 ("Total descriptor length did not match "
2646 2646 "transfer length.\n"));
2647 2647 }
2648 2648 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2649 2649 /* EMPTY */
2650 2650 QL_PRINT(DBG_TX,
2651 2651 ("Frame too short to be legal, not sent.\n"));
2652 2652 }
2653 2653 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2654 2654 /* EMPTY */
2655 2655 QL_PRINT(DBG_TX,
2656 2656 ("Frame too long, but sent anyway.\n"));
2657 2657 }
2658 2658 if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2659 2659 /* EMPTY */
2660 2660 QL_PRINT(DBG_TX,
2661 2661 ("PCI backplane error. Frame not sent.\n"));
2662 2662 }
2663 2663 }
2664 2664 atomic_inc_32(&tx_ring->tx_free_count);
2665 2665 }
2666 2666
2667 2667 /*
2668 2668 * clean up tx completion iocbs
2669 2669 */
2670 2670 int
2671 2671 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2672 2672 {
2673 2673 qlge_t *qlge = rx_ring->qlge;
2674 2674 uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2675 2675 struct ob_mac_iocb_rsp *net_rsp = NULL;
2676 2676 int count = 0;
2677 2677 struct tx_ring *tx_ring;
2678 2678 boolean_t resume_tx = B_FALSE;
2679 2679
2680 2680 mutex_enter(&rx_ring->rx_lock);
2681 2681 #ifdef QLGE_TRACK_BUFFER_USAGE
2682 2682 {
2683 2683 uint32_t consumer_idx;
2684 2684 uint32_t producer_idx;
2685 2685 uint32_t num_free_entries;
2686 2686 uint32_t temp;
2687 2687
2688 2688 temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2689 2689 consumer_idx = temp & 0x0000ffff;
2690 2690 producer_idx = (temp >> 16);
2691 2691
2692 2692 if (consumer_idx > producer_idx)
2693 2693 num_free_entries = (consumer_idx - producer_idx);
2694 2694 else
2695 2695 num_free_entries = NUM_RX_RING_ENTRIES -
2696 2696 (producer_idx - consumer_idx);
2697 2697
2698 2698 if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2699 2699 qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2700 2700
2701 2701 }
2702 2702 #endif
2703 2703 /* While there are entries in the completion queue. */
2704 2704 while (prod != rx_ring->cnsmr_idx) {
2705 2705
2706 2706 QL_PRINT(DBG_RX,
2707 2707 ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2708 2708 rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2709 2709
2710 2710 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2711 2711 (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2712 2712 (off_t)((uintptr_t)net_rsp -
2713 2713 (uintptr_t)rx_ring->cq_dma.vaddr),
2714 2714 (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2715 2715
2716 2716 QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2717 2717 "response packet data\n",
2718 2718 rx_ring->curr_entry, 8,
2719 2719 (size_t)sizeof (*net_rsp));
2720 2720
2721 2721 switch (net_rsp->opcode) {
2722 2722
2723 2723 case OPCODE_OB_MAC_OFFLOAD_IOCB:
2724 2724 case OPCODE_OB_MAC_IOCB:
2725 2725 ql_process_mac_tx_intr(qlge, net_rsp);
2726 2726 break;
2727 2727
2728 2728 default:
2729 2729 cmn_err(CE_WARN,
2730 2730 "%s Hit default case, not handled! "
2731 2731 "dropping the packet,"
2732 2732 " opcode = %x.",
2733 2733 __func__, net_rsp->opcode);
2734 2734 break;
2735 2735 }
2736 2736 count++;
2737 2737 ql_update_cq(rx_ring);
2738 2738 prod = ql_read_sh_reg(qlge, rx_ring);
2739 2739 }
2740 2740 ql_write_cq_idx(rx_ring);
2741 2741
2742 2742 mutex_exit(&rx_ring->rx_lock);
2743 2743
2744 2744 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2745 2745 tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2746 2746
2747 2747 mutex_enter(&tx_ring->tx_lock);
2748 2748
2749 2749 if (tx_ring->queue_stopped &&
2750 2750 (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2751 2751 /*
2752 2752 * The queue got stopped because the tx_ring was full.
2753 2753 * Wake it up, because it's now at least 25% empty.
2754 2754 */
2755 2755 tx_ring->queue_stopped = 0;
2756 2756 resume_tx = B_TRUE;
2757 2757 }
2758 2758
2759 2759 mutex_exit(&tx_ring->tx_lock);
2760 2760 /* Don't hold the lock during OS callback */
2761 2761 if (resume_tx)
2762 2762 RESUME_TX(tx_ring);
2763 2763 return (count);
2764 2764 }
2765 2765
2766 2766 /*
2767 2767 * reset asic when error happens
2768 2768 */
2769 2769 /* ARGSUSED */
2770 2770 static uint_t
2771 2771 ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2772 2772 {
2773 2773 qlge_t *qlge = (qlge_t *)((void *)arg1);
2774 2774 int status;
2775 2775
2776 2776 mutex_enter(&qlge->gen_mutex);
2777 2777 (void) ql_do_stop(qlge);
2778 2778 /*
2779 2779 * Write default ethernet address to chip register Mac
2780 2780 * Address slot 0 and Enable Primary Mac Function.
2781 2781 */
2782 2782 mutex_enter(&qlge->hw_mutex);
2783 2783 (void) ql_unicst_set(qlge,
2784 2784 (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2785 2785 mutex_exit(&qlge->hw_mutex);
2786 2786 qlge->mac_flags = QL_MAC_INIT;
2787 2787 status = ql_do_start(qlge);
2788 2788 if (status != DDI_SUCCESS)
2789 2789 goto error;
2790 2790 qlge->mac_flags = QL_MAC_STARTED;
2791 2791 mutex_exit(&qlge->gen_mutex);
2792 2792 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2793 2793
2794 2794 return (DDI_INTR_CLAIMED);
2795 2795
2796 2796 error:
2797 2797 mutex_exit(&qlge->gen_mutex);
2798 2798 cmn_err(CE_WARN,
2799 2799 "qlge up/down cycle failed, closing device");
2800 2800 if (qlge->fm_enable) {
2801 2801 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2802 2802 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2803 2803 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2804 2804 }
2805 2805 return (DDI_INTR_CLAIMED);
2806 2806 }
2807 2807
2808 2808 /*
2809 2809 * Reset MPI
2810 2810 */
2811 2811 /* ARGSUSED */
2812 2812 static uint_t
2813 2813 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2814 2814 {
2815 2815 qlge_t *qlge = (qlge_t *)((void *)arg1);
2816 2816
2817 2817 (void) ql_reset_mpi_risc(qlge);
2818 2818 return (DDI_INTR_CLAIMED);
2819 2819 }
2820 2820
2821 2821 /*
2822 2822 * Process MPI mailbox messages
2823 2823 */
2824 2824 /* ARGSUSED */
2825 2825 static uint_t
2826 2826 ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2827 2827 {
2828 2828 qlge_t *qlge = (qlge_t *)((void *)arg1);
2829 2829
2830 2830 ql_do_mpi_intr(qlge);
2831 2831 return (DDI_INTR_CLAIMED);
2832 2832 }
2833 2833
2834 2834 /* Fire up a handler to reset the MPI processor. */
2835 2835 void
2836 2836 ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2837 2837 {
2838 2838 (void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2839 2839 }
2840 2840
2841 2841 static void
2842 2842 ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2843 2843 {
2844 2844 (void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2845 2845 }
2846 2846
2847 2847 static void
2848 2848 ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2849 2849 {
2850 2850 (void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2851 2851 }
2852 2852
2853 2853 /*
2854 2854 * This handles a fatal error, MPI activity, and the default
2855 2855 * rx_ring in an MSI-X multiple interrupt vector environment.
2856 2856 * In MSI/Legacy environment it also process the rest of
2857 2857 * the rx_rings.
2858 2858 */
2859 2859 /* ARGSUSED */
2860 2860 static uint_t
2861 2861 ql_isr(caddr_t arg1, caddr_t arg2)
2862 2862 {
2863 2863 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2864 2864 struct rx_ring *ob_ring;
2865 2865 qlge_t *qlge = rx_ring->qlge;
2866 2866 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2867 2867 uint32_t var, prod;
2868 2868 int i;
2869 2869 int work_done = 0;
2870 2870
2871 2871 mblk_t *mp;
2872 2872
2873 2873 _NOTE(ARGUNUSED(arg2));
2874 2874
2875 2875 ++qlge->rx_interrupts[rx_ring->cq_id];
2876 2876
2877 2877 if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2878 2878 ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2879 2879 var = ql_read_reg(qlge, REG_ERROR_STATUS);
2880 2880 var = ql_read_reg(qlge, REG_STATUS);
2881 2881 var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2882 2882 return (DDI_INTR_CLAIMED);
2883 2883 }
2884 2884
2885 2885 ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2886 2886
2887 2887 /*
2888 2888 * process send completes on first stride tx ring if available
2889 2889 */
2890 2890 if (qlge->isr_stride) {
2891 2891 ob_ring = &qlge->rx_ring[qlge->isr_stride];
2892 2892 if (ql_read_sh_reg(qlge, ob_ring) !=
2893 2893 ob_ring->cnsmr_idx) {
2894 2894 (void) ql_clean_outbound_rx_ring(ob_ring);
2895 2895 }
2896 2896 }
2897 2897 /*
2898 2898 * Check the default queue and wake handler if active.
2899 2899 */
2900 2900 rx_ring = &qlge->rx_ring[0];
2901 2901 prod = ql_read_sh_reg(qlge, rx_ring);
2902 2902 QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2903 2903 prod, rx_ring->cnsmr_idx));
2904 2904 /* check if interrupt is due to incoming packet */
2905 2905 if (prod != rx_ring->cnsmr_idx) {
2906 2906 QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2907 2907 ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2908 2908 mutex_enter(&rx_ring->rx_lock);
2909 2909 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2910 2910 mutex_exit(&rx_ring->rx_lock);
2911 2911
2912 2912 if (mp != NULL)
2913 2913 RX_UPSTREAM(rx_ring, mp);
2914 2914 work_done++;
2915 2915 } else {
2916 2916 /*
2917 2917 * If interrupt is not due to incoming packet, read status
2918 2918 * register to see if error happens or mailbox interrupt.
2919 2919 */
2920 2920 var = ql_read_reg(qlge, REG_STATUS);
2921 2921 if ((var & STATUS_FE) != 0) {
2922 2922 ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2923 2923 if (qlge->fm_enable) {
2924 2924 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2925 2925 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2926 2926 ddi_fm_service_impact(qlge->dip,
2927 2927 DDI_SERVICE_LOST);
2928 2928 }
2929 2929 cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2930 2930 var = ql_read_reg(qlge, REG_ERROR_STATUS);
2931 2931 cmn_err(CE_WARN,
2932 2932 "Resetting chip. Error Status Register = 0x%x",
2933 2933 var);
2934 2934 ql_wake_asic_reset_soft_intr(qlge);
2935 2935 return (DDI_INTR_CLAIMED);
2936 2936 }
2937 2937
2938 2938 /*
2939 2939 * Check MPI processor activity.
2940 2940 */
2941 2941 if ((var & STATUS_PI) != 0) {
2942 2942 /*
2943 2943 * We've got an async event or mailbox completion.
2944 2944 * Handle it and clear the source of the interrupt.
2945 2945 */
2946 2946 ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2947 2947
2948 2948 QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2949 2949 ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2950 2950 ql_wake_mpi_event_soft_intr(qlge);
2951 2951 work_done++;
2952 2952 }
2953 2953 }
2954 2954
2955 2955
2956 2956 if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2957 2957 /*
2958 2958 * Start the DPC for each active queue.
2959 2959 */
2960 2960 for (i = 1; i < qlge->rx_ring_count; i++) {
2961 2961 rx_ring = &qlge->rx_ring[i];
2962 2962
2963 2963 if (ql_read_sh_reg(qlge, rx_ring) !=
2964 2964 rx_ring->cnsmr_idx) {
2965 2965 QL_PRINT(DBG_INTR,
2966 2966 ("Waking handler for rx_ring[%d].\n", i));
2967 2967
2968 2968 ql_disable_completion_interrupt(qlge,
2969 2969 rx_ring->irq);
2970 2970 if (rx_ring->type == TX_Q) {
2971 2971 (void) ql_clean_outbound_rx_ring(
2972 2972 rx_ring);
2973 2973 ql_enable_completion_interrupt(
2974 2974 rx_ring->qlge, rx_ring->irq);
2975 2975 } else {
2976 2976 mutex_enter(&rx_ring->rx_lock);
2977 2977 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2978 2978 mutex_exit(&rx_ring->rx_lock);
2979 2979 if (mp != NULL)
2980 2980 RX_UPSTREAM(rx_ring, mp);
2981 2981 #ifdef QLGE_LOAD_UNLOAD
2982 2982 if (rx_ring->mac_flags ==
2983 2983 QL_MAC_STOPPED)
2984 2984 cmn_err(CE_NOTE,
2985 2985 "%s rx_indicate(%d) %d\n",
2986 2986 __func__, i,
2987 2987 rx_ring->rx_indicate);
2988 2988 #endif
2989 2989 }
2990 2990 work_done++;
2991 2991 }
2992 2992 }
2993 2993 }
2994 2994
2995 2995 ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2996 2996
2997 2997 return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2998 2998 }
2999 2999
3000 3000 /*
3001 3001 * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
3002 3002 */
3003 3003 /* ARGSUSED */
3004 3004 static uint_t
3005 3005 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
3006 3006 {
3007 3007 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3008 3008 qlge_t *qlge = rx_ring->qlge;
3009 3009 _NOTE(ARGUNUSED(arg2));
3010 3010
3011 3011 ++qlge->rx_interrupts[rx_ring->cq_id];
3012 3012 (void) ql_clean_outbound_rx_ring(rx_ring);
3013 3013 ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
3014 3014
3015 3015 return (DDI_INTR_CLAIMED);
3016 3016 }
3017 3017
3018 3018 /*
3019 3019 * MSI-X Multiple Vector Interrupt Handler
3020 3020 */
3021 3021 /* ARGSUSED */
3022 3022 static uint_t
3023 3023 ql_msix_isr(caddr_t arg1, caddr_t arg2)
3024 3024 {
3025 3025 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3026 3026 struct rx_ring *ob_ring;
3027 3027 qlge_t *qlge = rx_ring->qlge;
3028 3028 mblk_t *mp;
3029 3029 _NOTE(ARGUNUSED(arg2));
3030 3030
3031 3031 QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3032 3032
3033 3033 ql_disable_completion_interrupt(qlge, rx_ring->irq);
3034 3034
3035 3035 /*
3036 3036 * process send completes on stride tx ring if available
3037 3037 */
3038 3038 if (qlge->isr_stride) {
3039 3039 ob_ring = rx_ring + qlge->isr_stride;
3040 3040 if (ql_read_sh_reg(qlge, ob_ring) !=
3041 3041 ob_ring->cnsmr_idx) {
3042 3042 ++qlge->rx_interrupts[ob_ring->cq_id];
3043 3043 (void) ql_clean_outbound_rx_ring(ob_ring);
3044 3044 }
3045 3045 }
3046 3046
3047 3047 ++qlge->rx_interrupts[rx_ring->cq_id];
3048 3048
3049 3049 mutex_enter(&rx_ring->rx_lock);
3050 3050 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3051 3051 mutex_exit(&rx_ring->rx_lock);
3052 3052
3053 3053 if (mp != NULL)
3054 3054 RX_UPSTREAM(rx_ring, mp);
3055 3055
3056 3056 return (DDI_INTR_CLAIMED);
3057 3057 }
3058 3058
3059 3059 /*
3060 3060 * Poll n_bytes of chained incoming packets
3061 3061 */
3062 3062 mblk_t *
3063 3063 ql_ring_rx_poll(void *arg, int n_bytes)
3064 3064 {
3065 3065 struct rx_ring *rx_ring = (struct rx_ring *)arg;
3066 3066 qlge_t *qlge = rx_ring->qlge;
3067 3067 mblk_t *mp = NULL;
3068 3068 uint32_t var;
3069 3069
3070 3070 ASSERT(n_bytes >= 0);
3071 3071 QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
3072 3072 __func__, rx_ring->cq_id, n_bytes));
3073 3073
3074 3074 ++qlge->rx_polls[rx_ring->cq_id];
3075 3075
3076 3076 if (n_bytes == 0)
3077 3077 return (mp);
3078 3078 mutex_enter(&rx_ring->rx_lock);
3079 3079 mp = ql_ring_rx(rx_ring, n_bytes);
3080 3080 mutex_exit(&rx_ring->rx_lock);
3081 3081
3082 3082 if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3083 3083 var = ql_read_reg(qlge, REG_STATUS);
3084 3084 /*
3085 3085 * Check for fatal error.
3086 3086 */
3087 3087 if ((var & STATUS_FE) != 0) {
3088 3088 ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3089 3089 var = ql_read_reg(qlge, REG_ERROR_STATUS);
3090 3090 cmn_err(CE_WARN, "Got fatal error %x.", var);
3091 3091 ql_wake_asic_reset_soft_intr(qlge);
3092 3092 if (qlge->fm_enable) {
3093 3093 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3094 3094 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3095 3095 ddi_fm_service_impact(qlge->dip,
3096 3096 DDI_SERVICE_LOST);
3097 3097 }
3098 3098 }
3099 3099 /*
3100 3100 * Check MPI processor activity.
3101 3101 */
3102 3102 if ((var & STATUS_PI) != 0) {
3103 3103 /*
3104 3104 * We've got an async event or mailbox completion.
3105 3105 * Handle it and clear the source of the interrupt.
3106 3106 */
3107 3107 ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3108 3108 ql_do_mpi_intr(qlge);
3109 3109 }
3110 3110 }
3111 3111
3112 3112 return (mp);
3113 3113 }
3114 3114
3115 3115 /*
3116 3116 * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3117 3117 */
3118 3118 /* ARGSUSED */
3119 3119 static uint_t
3120 3120 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3121 3121 {
3122 3122 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3123 3123 qlge_t *qlge = rx_ring->qlge;
3124 3124 mblk_t *mp;
3125 3125 _NOTE(ARGUNUSED(arg2));
3126 3126
3127 3127 QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3128 3128
3129 3129 ++qlge->rx_interrupts[rx_ring->cq_id];
3130 3130
3131 3131 mutex_enter(&rx_ring->rx_lock);
3132 3132 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3133 3133 mutex_exit(&rx_ring->rx_lock);
3134 3134
3135 3135 if (mp != NULL)
3136 3136 RX_UPSTREAM(rx_ring, mp);
3137 3137
3138 3138 return (DDI_INTR_CLAIMED);
3139 3139 }
3140 3140
3141 3141
3142 3142 /*
3143 3143 *
3144 3144 * Allocate DMA Buffer for ioctl service
3145 3145 *
3146 3146 */
3147 3147 static int
3148 3148 ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3149 3149 {
3150 3150 uint64_t phy_addr;
3151 3151 uint64_t alloc_size;
3152 3152 ddi_dma_cookie_t dma_cookie;
3153 3153
3154 3154 alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3155 3155 max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3156 3156 if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3157 3157 &ql_buf_acc_attr,
3158 3158 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3159 3159 &qlge->ioctl_buf_dma_attr.acc_handle,
3160 3160 (size_t)alloc_size, /* mem size */
3161 3161 (size_t)0, /* alignment */
3162 3162 (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3163 3163 &dma_cookie) != 0) {
3164 3164 cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3165 3165 __func__, qlge->instance);
3166 3166 return (DDI_FAILURE);
3167 3167 }
3168 3168
3169 3169 phy_addr = dma_cookie.dmac_laddress;
3170 3170
3171 3171 if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3172 3172 cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3173 3173 return (DDI_FAILURE);
3174 3174 }
3175 3175
3176 3176 qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3177 3177
3178 3178 QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3179 3179 "phy_addr = 0x%lx\n",
3180 3180 __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3181 3181
3182 3182 return (DDI_SUCCESS);
3183 3183 }
3184 3184
3185 3185
3186 3186 /*
3187 3187 * Function to free physical memory.
3188 3188 */
3189 3189 static void
3190 3190 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3191 3191 {
3192 3192 if (*dma_handle != NULL) {
3193 3193 (void) ddi_dma_unbind_handle(*dma_handle);
3194 3194 if (*acc_handle != NULL)
3195 3195 ddi_dma_mem_free(acc_handle);
3196 3196 ddi_dma_free_handle(dma_handle);
3197 3197 *acc_handle = NULL;
3198 3198 *dma_handle = NULL;
3199 3199 }
3200 3200 }
3201 3201
3202 3202 /*
3203 3203 * Function to free ioctl dma buffer.
3204 3204 */
3205 3205 static void
3206 3206 ql_free_ioctl_dma_buf(qlge_t *qlge)
3207 3207 {
3208 3208 if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3209 3209 ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3210 3210 &qlge->ioctl_buf_dma_attr.acc_handle);
3211 3211
3212 3212 qlge->ioctl_buf_dma_attr.vaddr = NULL;
3213 3213 qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3214 3214 }
3215 3215 }
3216 3216
3217 3217 /*
3218 3218 * Free shadow register space used for request and completion queues
3219 3219 */
3220 3220 static void
3221 3221 ql_free_shadow_space(qlge_t *qlge)
3222 3222 {
3223 3223 if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3224 3224 ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3225 3225 &qlge->host_copy_shadow_dma_attr.acc_handle);
3226 3226 bzero(&qlge->host_copy_shadow_dma_attr,
3227 3227 sizeof (qlge->host_copy_shadow_dma_attr));
3228 3228 }
3229 3229
3230 3230 if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3231 3231 ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3232 3232 &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3233 3233 bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3234 3234 sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3235 3235 }
3236 3236 }
3237 3237
3238 3238 /*
3239 3239 * Allocate shadow register space for request and completion queues
3240 3240 */
3241 3241 static int
3242 3242 ql_alloc_shadow_space(qlge_t *qlge)
3243 3243 {
3244 3244 ddi_dma_cookie_t dma_cookie;
3245 3245
3246 3246 if (ql_alloc_phys(qlge->dip,
3247 3247 &qlge->host_copy_shadow_dma_attr.dma_handle,
3248 3248 &ql_dev_acc_attr,
3249 3249 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3250 3250 &qlge->host_copy_shadow_dma_attr.acc_handle,
3251 3251 (size_t)VM_PAGE_SIZE, /* mem size */
3252 3252 (size_t)4, /* 4 bytes alignment */
3253 3253 (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3254 3254 &dma_cookie) != 0) {
3255 3255 bzero(&qlge->host_copy_shadow_dma_attr,
3256 3256 sizeof (qlge->host_copy_shadow_dma_attr));
3257 3257
3258 3258 cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3259 3259 "response shadow registers", __func__, qlge->instance);
3260 3260 return (DDI_FAILURE);
3261 3261 }
3262 3262
3263 3263 qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3264 3264
3265 3265 if (ql_alloc_phys(qlge->dip,
3266 3266 &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3267 3267 &ql_desc_acc_attr,
3268 3268 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3269 3269 &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3270 3270 (size_t)VM_PAGE_SIZE, /* mem size */
3271 3271 (size_t)4, /* 4 bytes alignment */
3272 3272 (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3273 3273 &dma_cookie) != 0) {
3274 3274 bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3275 3275 sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3276 3276
3277 3277 cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3278 3278 "for request shadow registers",
3279 3279 __func__, qlge->instance);
3280 3280 goto err_wqp_sh_area;
3281 3281 }
3282 3282 qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3283 3283
3284 3284 return (DDI_SUCCESS);
3285 3285
3286 3286 err_wqp_sh_area:
3287 3287 ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3288 3288 &qlge->host_copy_shadow_dma_attr.acc_handle);
3289 3289 bzero(&qlge->host_copy_shadow_dma_attr,
3290 3290 sizeof (qlge->host_copy_shadow_dma_attr));
3291 3291
3292 3292 return (DDI_FAILURE);
3293 3293 }
3294 3294
3295 3295 /*
3296 3296 * Initialize a tx ring
3297 3297 */
3298 3298 static void
3299 3299 ql_init_tx_ring(struct tx_ring *tx_ring)
3300 3300 {
3301 3301 int i;
3302 3302 struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
3303 3303 struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
3304 3304
3305 3305 for (i = 0; i < tx_ring->wq_len; i++) {
3306 3306 tx_ring_desc->index = i;
3307 3307 tx_ring_desc->queue_entry = mac_iocb_ptr;
3308 3308 mac_iocb_ptr++;
3309 3309 tx_ring_desc++;
3310 3310 }
3311 3311 tx_ring->tx_free_count = tx_ring->wq_len;
3312 3312 tx_ring->queue_stopped = 0;
3313 3313 }
3314 3314
3315 3315 /*
3316 3316 * Free one tx ring resources
3317 3317 */
3318 3318 static void
3319 3319 ql_free_tx_resources(struct tx_ring *tx_ring)
3320 3320 {
3321 3321 struct tx_ring_desc *tx_ring_desc;
3322 3322 int i, j;
3323 3323
3324 3324 if (tx_ring->wq_dma.dma_handle != NULL) {
3325 3325 ql_free_phys(&tx_ring->wq_dma.dma_handle,
3326 3326 &tx_ring->wq_dma.acc_handle);
3327 3327 bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3328 3328 }
3329 3329 if (tx_ring->wq_desc != NULL) {
3330 3330 tx_ring_desc = tx_ring->wq_desc;
3331 3331 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3332 3332 for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3333 3333 if (tx_ring_desc->tx_dma_handle[j]) {
3334 3334 /*
3335 3335 * The unbinding will happen in tx
3336 3336 * completion, here we just free the
3337 3337 * handles
3338 3338 */
3339 3339 ddi_dma_free_handle(
3340 3340 &(tx_ring_desc->tx_dma_handle[j]));
3341 3341 tx_ring_desc->tx_dma_handle[j] = NULL;
3342 3342 }
3343 3343 }
3344 3344 if (tx_ring_desc->oal != NULL) {
3345 3345 tx_ring_desc->oal_dma_addr = 0;
3346 3346 tx_ring_desc->oal = NULL;
3347 3347 tx_ring_desc->copy_buffer = NULL;
3348 3348 tx_ring_desc->copy_buffer_dma_addr = 0;
3349 3349
3350 3350 ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3351 3351 &tx_ring_desc->oal_dma.acc_handle);
3352 3352 }
3353 3353 }
3354 3354 kmem_free(tx_ring->wq_desc,
3355 3355 tx_ring->wq_len * sizeof (struct tx_ring_desc));
3356 3356 tx_ring->wq_desc = NULL;
3357 3357 }
3358 3358 /* free the wqicb struct */
3359 3359 if (tx_ring->wqicb_dma.dma_handle) {
3360 3360 ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3361 3361 &tx_ring->wqicb_dma.acc_handle);
3362 3362 bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3363 3363 }
3364 3364 }
3365 3365
3366 3366 /*
3367 3367 * Allocate work (request) queue memory and transmit
3368 3368 * descriptors for this transmit ring
3369 3369 */
3370 3370 static int
3371 3371 ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3372 3372 {
3373 3373 ddi_dma_cookie_t dma_cookie;
3374 3374 struct tx_ring_desc *tx_ring_desc;
3375 3375 int i, j;
3376 3376 uint32_t length;
3377 3377
3378 3378 /* allocate dma buffers for obiocbs */
3379 3379 if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3380 3380 &ql_desc_acc_attr,
3381 3381 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3382 3382 &tx_ring->wq_dma.acc_handle,
3383 3383 (size_t)tx_ring->wq_size, /* mem size */
3384 3384 (size_t)128, /* alignment:128 bytes boundary */
3385 3385 (caddr_t *)&tx_ring->wq_dma.vaddr,
3386 3386 &dma_cookie) != 0) {
3387 3387 bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3388 3388 cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3389 3389 __func__, qlge->instance);
3390 3390 return (DDI_FAILURE);
3391 3391 }
3392 3392 tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3393 3393
3394 3394 tx_ring->wq_desc =
3395 3395 kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3396 3396 KM_NOSLEEP);
3397 3397 if (tx_ring->wq_desc == NULL) {
3398 3398 goto err;
3399 3399 } else {
3400 3400 tx_ring_desc = tx_ring->wq_desc;
3401 3401 /*
3402 3402 * Allocate a large enough structure to hold the following
3403 3403 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3404 3404 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3405 3405 */
3406 3406 length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3407 3407 + QL_MAX_COPY_LENGTH;
3408 3408 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3409 3409
3410 3410 if (ql_alloc_phys(qlge->dip,
3411 3411 &tx_ring_desc->oal_dma.dma_handle,
3412 3412 &ql_desc_acc_attr,
3413 3413 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3414 3414 &tx_ring_desc->oal_dma.acc_handle,
3415 3415 (size_t)length, /* mem size */
3416 3416 (size_t)0, /* default alignment:8 bytes boundary */
3417 3417 (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3418 3418 &dma_cookie) != 0) {
3419 3419 bzero(&tx_ring_desc->oal_dma,
3420 3420 sizeof (tx_ring_desc->oal_dma));
3421 3421 cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3422 3422 "oal alloc failed.",
3423 3423 __func__, qlge->instance);
3424 3424 goto err;
3425 3425 }
3426 3426
3427 3427 tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3428 3428 tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3429 3429 tx_ring_desc->copy_buffer =
3430 3430 (caddr_t)((uint8_t *)tx_ring_desc->oal
3431 3431 + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3432 3432 tx_ring_desc->copy_buffer_dma_addr =
3433 3433 (tx_ring_desc->oal_dma_addr
3434 3434 + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3435 3435
3436 3436 /* Allocate dma handles for transmit buffers */
3437 3437 for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3438 3438 if (ddi_dma_alloc_handle(qlge->dip,
3439 3439 &tx_mapping_dma_attr,
3440 3440 DDI_DMA_DONTWAIT,
3441 3441 0, &tx_ring_desc->tx_dma_handle[j])
3442 3442 != DDI_SUCCESS) {
3443 3443 tx_ring_desc->tx_dma_handle[j] = NULL;
3444 3444 cmn_err(CE_WARN,
3445 3445 "!%s: ddi_dma_alloc_handle: "
3446 3446 "tx_dma_handle "
3447 3447 "alloc failed", __func__);
3448 3448 ql_free_phys(
3449 3449 &tx_ring_desc->oal_dma.dma_handle,
3450 3450 &tx_ring_desc->oal_dma.acc_handle);
3451 3451 goto err;
3452 3452 }
3453 3453 }
3454 3454 }
3455 3455 }
3456 3456 /* alloc a wqicb control block to load this tx ring to hw */
3457 3457 if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3458 3458 &ql_desc_acc_attr,
3459 3459 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3460 3460 &tx_ring->wqicb_dma.acc_handle,
3461 3461 (size_t)sizeof (struct wqicb_t), /* mem size */
3462 3462 (size_t)0, /* alignment:128 bytes boundary */
3463 3463 (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3464 3464 &dma_cookie) != 0) {
3465 3465 bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3466 3466 cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3467 3467 __func__, qlge->instance);
3468 3468 goto err;
3469 3469 }
3470 3470 tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3471 3471
3472 3472 return (DDI_SUCCESS);
3473 3473
3474 3474 err:
3475 3475 ql_free_tx_resources(tx_ring);
3476 3476 return (DDI_FAILURE);
3477 3477 }
3478 3478
3479 3479 /*
3480 3480 * Free one rx ring resources
3481 3481 */
3482 3482 static void
3483 3483 ql_free_rx_resources(struct rx_ring *rx_ring)
3484 3484 {
3485 3485 /* Free the small buffer queue. */
3486 3486 if (rx_ring->sbq_dma.dma_handle) {
3487 3487 ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3488 3488 &rx_ring->sbq_dma.acc_handle);
3489 3489 bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3490 3490 }
3491 3491
3492 3492 /* Free the small buffer queue control blocks. */
3493 3493 if (rx_ring->sbq_desc != NULL) {
3494 3494 kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3495 3495 sizeof (struct bq_desc));
3496 3496 rx_ring->sbq_desc = NULL;
3497 3497 }
3498 3498
3499 3499 /* Free the large buffer queue. */
3500 3500 if (rx_ring->lbq_dma.dma_handle) {
3501 3501 ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3502 3502 &rx_ring->lbq_dma.acc_handle);
3503 3503 bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3504 3504 }
3505 3505
3506 3506 /* Free the large buffer queue control blocks. */
3507 3507 if (rx_ring->lbq_desc != NULL) {
3508 3508 kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3509 3509 sizeof (struct bq_desc));
3510 3510 rx_ring->lbq_desc = NULL;
3511 3511 }
3512 3512
3513 3513 /* Free cqicb struct */
3514 3514 if (rx_ring->cqicb_dma.dma_handle) {
3515 3515 ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3516 3516 &rx_ring->cqicb_dma.acc_handle);
3517 3517 bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3518 3518 }
3519 3519 /* Free the rx queue. */
3520 3520 if (rx_ring->cq_dma.dma_handle) {
3521 3521 ql_free_phys(&rx_ring->cq_dma.dma_handle,
3522 3522 &rx_ring->cq_dma.acc_handle);
3523 3523 bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3524 3524 }
3525 3525 }
3526 3526
3527 3527 /*
3528 3528 * Allocate queues and buffers for this completions queue based
3529 3529 * on the values in the parameter structure.
3530 3530 */
3531 3531 static int
3532 3532 ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3533 3533 {
3534 3534 ddi_dma_cookie_t dma_cookie;
3535 3535
3536 3536 if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3537 3537 &ql_desc_acc_attr,
3538 3538 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3539 3539 &rx_ring->cq_dma.acc_handle,
3540 3540 (size_t)rx_ring->cq_size, /* mem size */
3541 3541 (size_t)128, /* alignment:128 bytes boundary */
3542 3542 (caddr_t *)&rx_ring->cq_dma.vaddr,
3543 3543 &dma_cookie) != 0) {
3544 3544 bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3545 3545 cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3546 3546 __func__, qlge->instance);
3547 3547 return (DDI_FAILURE);
3548 3548 }
3549 3549 rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3550 3550
3551 3551 if (rx_ring->sbq_len != 0) {
3552 3552 /*
3553 3553 * Allocate small buffer queue.
3554 3554 */
3555 3555 if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3556 3556 &ql_desc_acc_attr,
3557 3557 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3558 3558 &rx_ring->sbq_dma.acc_handle,
3559 3559 (size_t)rx_ring->sbq_size, /* mem size */
3560 3560 (size_t)128, /* alignment:128 bytes boundary */
3561 3561 (caddr_t *)&rx_ring->sbq_dma.vaddr,
3562 3562 &dma_cookie) != 0) {
3563 3563 bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3564 3564 cmn_err(CE_WARN,
3565 3565 "%s(%d): small buffer queue allocation failed.",
3566 3566 __func__, qlge->instance);
3567 3567 goto err_mem;
3568 3568 }
3569 3569 rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3570 3570
3571 3571 /*
3572 3572 * Allocate small buffer queue control blocks.
3573 3573 */
3574 3574 rx_ring->sbq_desc =
3575 3575 kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3576 3576 KM_NOSLEEP);
3577 3577 if (rx_ring->sbq_desc == NULL) {
3578 3578 cmn_err(CE_WARN,
3579 3579 "sbq control block allocation failed.");
3580 3580 goto err_mem;
3581 3581 }
3582 3582
3583 3583 ql_init_sbq_ring(rx_ring);
3584 3584 }
3585 3585
3586 3586 if (rx_ring->lbq_len != 0) {
3587 3587 /*
3588 3588 * Allocate large buffer queue.
3589 3589 */
3590 3590 if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3591 3591 &ql_desc_acc_attr,
3592 3592 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3593 3593 &rx_ring->lbq_dma.acc_handle,
3594 3594 (size_t)rx_ring->lbq_size, /* mem size */
3595 3595 (size_t)128, /* alignment:128 bytes boundary */
3596 3596 (caddr_t *)&rx_ring->lbq_dma.vaddr,
3597 3597 &dma_cookie) != 0) {
3598 3598 bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3599 3599 cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3600 3600 __func__, qlge->instance);
3601 3601 goto err_mem;
3602 3602 }
3603 3603 rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3604 3604
3605 3605 /*
3606 3606 * Allocate large buffer queue control blocks.
3607 3607 */
3608 3608 rx_ring->lbq_desc =
3609 3609 kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3610 3610 KM_NOSLEEP);
3611 3611 if (rx_ring->lbq_desc == NULL) {
3612 3612 cmn_err(CE_WARN,
3613 3613 "Large buffer queue control block allocation "
3614 3614 "failed.");
3615 3615 goto err_mem;
3616 3616 }
3617 3617 ql_init_lbq_ring(rx_ring);
3618 3618 }
3619 3619
3620 3620 if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3621 3621 &ql_desc_acc_attr,
3622 3622 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3623 3623 &rx_ring->cqicb_dma.acc_handle,
3624 3624 (size_t)sizeof (struct cqicb_t), /* mem size */
3625 3625 (size_t)0, /* alignment:128 bytes boundary */
3626 3626 (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3627 3627 &dma_cookie) != 0) {
3628 3628 bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3629 3629 cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3630 3630 __func__, qlge->instance);
3631 3631 goto err_mem;
3632 3632 }
3633 3633 rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3634 3634
3635 3635 return (DDI_SUCCESS);
3636 3636
3637 3637 err_mem:
3638 3638 ql_free_rx_resources(rx_ring);
3639 3639 return (DDI_FAILURE);
3640 3640 }
3641 3641
3642 3642 /*
3643 3643 * Frees tx/rx queues memory resources
3644 3644 */
3645 3645 static void
3646 3646 ql_free_mem_resources(qlge_t *qlge)
3647 3647 {
3648 3648 int i;
3649 3649
3650 3650 if (qlge->ricb_dma.dma_handle) {
3651 3651 /* free the ricb struct */
3652 3652 ql_free_phys(&qlge->ricb_dma.dma_handle,
3653 3653 &qlge->ricb_dma.acc_handle);
3654 3654 bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3655 3655 }
3656 3656
3657 3657 ql_free_rx_buffers(qlge);
3658 3658
3659 3659 ql_free_ioctl_dma_buf(qlge);
3660 3660
3661 3661 for (i = 0; i < qlge->tx_ring_count; i++)
3662 3662 ql_free_tx_resources(&qlge->tx_ring[i]);
3663 3663
3664 3664 for (i = 0; i < qlge->rx_ring_count; i++)
3665 3665 ql_free_rx_resources(&qlge->rx_ring[i]);
3666 3666
3667 3667 ql_free_shadow_space(qlge);
3668 3668 }
3669 3669
3670 3670 /*
3671 3671 * Allocate buffer queues, large buffers and small buffers etc
3672 3672 *
3673 3673 * This API is called in the gld_attach member function. It is called
3674 3674 * only once. Later reset,reboot should not re-allocate all rings and
3675 3675 * buffers.
3676 3676 */
3677 3677 static int
3678 3678 ql_alloc_mem_resources(qlge_t *qlge)
3679 3679 {
3680 3680 int i;
3681 3681 ddi_dma_cookie_t dma_cookie;
3682 3682
3683 3683 /* Allocate space for our shadow registers */
3684 3684 if (ql_alloc_shadow_space(qlge))
3685 3685 return (DDI_FAILURE);
3686 3686
3687 3687 for (i = 0; i < qlge->rx_ring_count; i++) {
3688 3688 if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3689 3689 cmn_err(CE_WARN, "RX resource allocation failed.");
3690 3690 goto err_mem;
3691 3691 }
3692 3692 }
3693 3693 /* Allocate tx queue resources */
3694 3694 for (i = 0; i < qlge->tx_ring_count; i++) {
3695 3695 if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3696 3696 cmn_err(CE_WARN, "Tx resource allocation failed.");
3697 3697 goto err_mem;
3698 3698 }
3699 3699 }
3700 3700
3701 3701 if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3702 3702 goto err_mem;
3703 3703 }
3704 3704
3705 3705 if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3706 3706 cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3707 3707 __func__, qlge->instance);
3708 3708 goto err_mem;
3709 3709 }
3710 3710
3711 3711 qlge->sequence |= INIT_ALLOC_RX_BUF;
3712 3712
3713 3713 if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3714 3714 &ql_desc_acc_attr,
3715 3715 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3716 3716 &qlge->ricb_dma.acc_handle,
3717 3717 (size_t)sizeof (struct ricb), /* mem size */
3718 3718 (size_t)0, /* alignment:128 bytes boundary */
3719 3719 (caddr_t *)&qlge->ricb_dma.vaddr,
3720 3720 &dma_cookie) != 0) {
3721 3721 bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3722 3722 cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3723 3723 __func__, qlge->instance);
3724 3724 goto err_mem;
3725 3725 }
3726 3726 qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3727 3727
3728 3728 return (DDI_SUCCESS);
3729 3729
3730 3730 err_mem:
3731 3731 ql_free_mem_resources(qlge);
3732 3732 return (DDI_FAILURE);
3733 3733 }
3734 3734
3735 3735
3736 3736 /*
3737 3737 * Function used to allocate physical memory and zero it.
3738 3738 */
3739 3739
3740 3740 static int
3741 3741 ql_alloc_phys_rbuf(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3742 3742 ddi_device_acc_attr_t *device_acc_attr,
3743 3743 uint_t dma_flags,
3744 3744 ddi_acc_handle_t *acc_handle,
3745 3745 size_t size,
3746 3746 size_t alignment,
3747 3747 caddr_t *vaddr,
3748 3748 ddi_dma_cookie_t *dma_cookie)
3749 3749 {
3750 3750 size_t rlen;
3751 3751 uint_t cnt;
3752 3752
3753 3753 /*
3754 3754 * Workaround for SUN XMITS buffer must end and start on 8 byte
3755 3755 * boundary. Else, hardware will overrun the buffer. Simple fix is
3756 3756 * to make sure buffer has enough room for overrun.
3757 3757 */
3758 3758 if (size & 7) {
3759 3759 size += 8 - (size & 7);
3760 3760 }
3761 3761
3762 3762 /* Adjust the alignment if requested */
3763 3763 if (alignment) {
3764 3764 dma_attr.dma_attr_align = alignment;
3765 3765 }
3766 3766
3767 3767 /*
3768 3768 * Allocate DMA handle
3769 3769 */
3770 3770 if (ddi_dma_alloc_handle(dip, &dma_attr_rbuf, DDI_DMA_DONTWAIT, NULL,
3771 3771 dma_handle) != DDI_SUCCESS) {
3772 3772 cmn_err(CE_WARN, QL_BANG "%s: ddi_dma_alloc_handle FAILED",
3773 3773 __func__);
3774 3774 *dma_handle = NULL;
3775 3775 return (QL_ERROR);
3776 3776 }
3777 3777 /*
3778 3778 * Allocate DMA memory
3779 3779 */
3780 3780 if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3781 3781 dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3782 3782 DDI_DMA_DONTWAIT,
3783 3783 NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3784 3784 cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3785 3785 ddi_dma_free_handle(dma_handle);
3786 3786 *acc_handle = NULL;
3787 3787 *dma_handle = NULL;
3788 3788 return (QL_ERROR);
3789 3789 }
3790 3790
3791 3791 if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3792 3792 dma_flags, DDI_DMA_DONTWAIT, NULL,
3793 3793 dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3794 3794 ddi_dma_mem_free(acc_handle);
3795 3795
3796 3796 ddi_dma_free_handle(dma_handle);
3797 3797 cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3798 3798 __func__);
3799 3799 *acc_handle = NULL;
3800 3800 *dma_handle = NULL;
3801 3801 return (QL_ERROR);
3802 3802 }
3803 3803
3804 3804 if (cnt != 1) {
3805 3805
3806 3806 ql_free_phys(dma_handle, acc_handle);
3807 3807
3808 3808 cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3809 3809 __func__);
3810 3810 return (QL_ERROR);
3811 3811 }
3812 3812
3813 3813 bzero((caddr_t)*vaddr, rlen);
3814 3814
3815 3815 return (0);
3816 3816 }
3817 3817
3818 3818 /*
3819 3819 * Function used to allocate physical memory and zero it.
3820 3820 */
3821 3821 static int
3822 3822 ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3823 3823 ddi_device_acc_attr_t *device_acc_attr,
3824 3824 uint_t dma_flags,
3825 3825 ddi_acc_handle_t *acc_handle,
3826 3826 size_t size,
3827 3827 size_t alignment,
3828 3828 caddr_t *vaddr,
3829 3829 ddi_dma_cookie_t *dma_cookie)
3830 3830 {
3831 3831 size_t rlen;
3832 3832 uint_t cnt;
3833 3833
3834 3834 /*
3835 3835 * Workaround for SUN XMITS buffer must end and start on 8 byte
3836 3836 * boundary. Else, hardware will overrun the buffer. Simple fix is
3837 3837 * to make sure buffer has enough room for overrun.
3838 3838 */
3839 3839 if (size & 7) {
3840 3840 size += 8 - (size & 7);
3841 3841 }
3842 3842
3843 3843 /* Adjust the alignment if requested */
3844 3844 if (alignment) {
3845 3845 dma_attr.dma_attr_align = alignment;
3846 3846 }
3847 3847
3848 3848 /*
3849 3849 * Allocate DMA handle
3850 3850 */
3851 3851 if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_DONTWAIT, NULL,
3852 3852 dma_handle) != DDI_SUCCESS) {
3853 3853 cmn_err(CE_WARN, QL_BANG "%s: ddi_dma_alloc_handle FAILED",
3854 3854 __func__);
3855 3855 *dma_handle = NULL;
3856 3856 return (QL_ERROR);
3857 3857 }
3858 3858 /*
3859 3859 * Allocate DMA memory
3860 3860 */
3861 3861 if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3862 3862 dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3863 3863 DDI_DMA_DONTWAIT,
3864 3864 NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3865 3865 cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3866 3866 ddi_dma_free_handle(dma_handle);
3867 3867 *acc_handle = NULL;
3868 3868 *dma_handle = NULL;
3869 3869 return (QL_ERROR);
3870 3870 }
3871 3871
3872 3872 if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3873 3873 dma_flags, DDI_DMA_DONTWAIT, NULL,
3874 3874 dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3875 3875 ddi_dma_mem_free(acc_handle);
3876 3876 ddi_dma_free_handle(dma_handle);
3877 3877 cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3878 3878 __func__);
3879 3879 *acc_handle = NULL;
3880 3880 *dma_handle = NULL;
3881 3881 return (QL_ERROR);
3882 3882 }
3883 3883
3884 3884 if (cnt != 1) {
3885 3885
3886 3886 ql_free_phys(dma_handle, acc_handle);
3887 3887
3888 3888 cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3889 3889 __func__);
3890 3890 return (QL_ERROR);
3891 3891 }
3892 3892
3893 3893 bzero((caddr_t)*vaddr, rlen);
3894 3894
3895 3895 return (0);
3896 3896 }
3897 3897
3898 3898 /*
3899 3899 * Add interrupt handlers based on the interrupt type.
3900 3900 * Before adding the interrupt handlers, the interrupt vectors should
3901 3901 * have been allocated, and the rx/tx rings have also been allocated.
3902 3902 */
3903 3903 static int
3904 3904 ql_add_intr_handlers(qlge_t *qlge)
3905 3905 {
3906 3906 int vector = 0;
3907 3907 int rc, i;
3908 3908 uint32_t value;
3909 3909 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3910 3910
3911 3911 switch (qlge->intr_type) {
3912 3912 case DDI_INTR_TYPE_MSIX:
3913 3913 /*
3914 3914 * Add interrupt handler for rx and tx rings: vector[0 -
3915 3915 * (qlge->intr_cnt -1)].
3916 3916 */
3917 3917 value = 0;
3918 3918 for (vector = 0; vector < qlge->intr_cnt; vector++) {
3919 3919 ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3920 3920
3921 3921 /*
3922 3922 * associate interrupt vector with interrupt handler
3923 3923 */
3924 3924 rc = ddi_intr_add_handler(qlge->htable[vector],
3925 3925 (ddi_intr_handler_t *)intr_ctx->handler,
3926 3926 (void *)&qlge->rx_ring[vector], NULL);
3927 3927
3928 3928 QL_PRINT(DBG_INIT, ("rx_ring[%d] 0x%p\n",
3929 3929 vector, &qlge->rx_ring[vector]));
3930 3930 if (rc != DDI_SUCCESS) {
3931 3931 QL_PRINT(DBG_INIT,
3932 3932 ("Add rx interrupt handler failed. "
3933 3933 "return: %d, vector: %d", rc, vector));
3934 3934 for (vector--; vector >= 0; vector--) {
3935 3935 (void) ddi_intr_remove_handler(
3936 3936 qlge->htable[vector]);
3937 3937 }
3938 3938 return (DDI_FAILURE);
3939 3939 }
3940 3940 intr_ctx++;
3941 3941 }
3942 3942 break;
3943 3943
3944 3944 case DDI_INTR_TYPE_MSI:
3945 3945 /*
3946 3946 * Add interrupt handlers for the only vector
3947 3947 */
3948 3948 ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3949 3949
3950 3950 rc = ddi_intr_add_handler(qlge->htable[vector],
3951 3951 ql_isr,
3952 3952 (caddr_t)&qlge->rx_ring[0], NULL);
3953 3953
3954 3954 if (rc != DDI_SUCCESS) {
3955 3955 QL_PRINT(DBG_INIT,
3956 3956 ("Add MSI interrupt handler failed: %d\n", rc));
3957 3957 return (DDI_FAILURE);
3958 3958 }
3959 3959 break;
3960 3960
3961 3961 case DDI_INTR_TYPE_FIXED:
3962 3962 /*
3963 3963 * Add interrupt handlers for the only vector
3964 3964 */
3965 3965 ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3966 3966
3967 3967 rc = ddi_intr_add_handler(qlge->htable[vector],
3968 3968 ql_isr,
3969 3969 (caddr_t)&qlge->rx_ring[0], NULL);
3970 3970
3971 3971 if (rc != DDI_SUCCESS) {
3972 3972 QL_PRINT(DBG_INIT,
3973 3973 ("Add legacy interrupt handler failed: %d\n", rc));
3974 3974 return (DDI_FAILURE);
3975 3975 }
3976 3976 break;
3977 3977
3978 3978 default:
3979 3979 return (DDI_FAILURE);
3980 3980 }
3981 3981
3982 3982 /* Enable interrupts */
3983 3983 /* Block enable */
3984 3984 if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3985 3985 QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3986 3986 qlge->intr_cnt));
3987 3987 (void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3988 3988 } else { /* Non block enable */
3989 3989 for (i = 0; i < qlge->intr_cnt; i++) {
3990 3990 QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d "
3991 3991 "handle 0x%x\n", i, qlge->htable[i]));
3992 3992 (void) ddi_intr_enable(qlge->htable[i]);
3993 3993 }
3994 3994 }
3995 3995 qlge->sequence |= INIT_INTR_ENABLED;
3996 3996
3997 3997 return (DDI_SUCCESS);
3998 3998 }
3999 3999
4000 4000 /*
4001 4001 * Here we build the intr_ctx structures based on
4002 4002 * our rx_ring count and intr vector count.
4003 4003 * The intr_ctx structure is used to hook each vector
4004 4004 * to possibly different handlers.
4005 4005 */
4006 4006 static void
4007 4007 ql_resolve_queues_to_irqs(qlge_t *qlge)
4008 4008 {
4009 4009 int i = 0;
4010 4010 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
4011 4011
4012 4012 if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
4013 4013 /*
4014 4014 * Each rx_ring has its own intr_ctx since we
4015 4015 * have separate vectors for each queue.
4016 4016 * This only true when MSI-X is enabled.
4017 4017 */
4018 4018 for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
4019 4019 qlge->rx_ring[i].irq = i;
4020 4020 intr_ctx->intr = i;
4021 4021 intr_ctx->qlge = qlge;
4022 4022
4023 4023 /*
4024 4024 * We set up each vectors enable/disable/read bits so
4025 4025 * there's no bit/mask calculations in critical path.
4026 4026 */
4027 4027 intr_ctx->intr_en_mask =
4028 4028 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4029 4029 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4030 4030 INTR_EN_IHD | i;
4031 4031 intr_ctx->intr_dis_mask =
4032 4032 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4033 4033 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4034 4034 INTR_EN_IHD | i;
4035 4035 intr_ctx->intr_read_mask =
4036 4036 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4037 4037 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4038 4038 | i;
4039 4039
4040 4040 if (i == 0) {
4041 4041 /*
4042 4042 * Default queue handles bcast/mcast plus
4043 4043 * async events.
4044 4044 */
4045 4045 intr_ctx->handler = ql_isr;
4046 4046 } else if (qlge->rx_ring[i].type == TX_Q) {
4047 4047 /*
4048 4048 * Outbound queue is for outbound completions
4049 4049 * only.
4050 4050 */
4051 4051 if (qlge->isr_stride)
4052 4052 intr_ctx->handler = ql_msix_isr;
4053 4053 else
4054 4054 intr_ctx->handler = ql_msix_tx_isr;
4055 4055 } else {
4056 4056 /*
4057 4057 * Inbound queues handle unicast frames only.
4058 4058 */
4059 4059 if (qlge->isr_stride)
4060 4060 intr_ctx->handler = ql_msix_isr;
4061 4061 else
4062 4062 intr_ctx->handler = ql_msix_rx_isr;
4063 4063 }
4064 4064 }
4065 4065 i = qlge->intr_cnt;
4066 4066 for (; i < qlge->rx_ring_count; i++, intr_ctx++) {
4067 4067 int iv = i - qlge->isr_stride;
4068 4068 qlge->rx_ring[i].irq = iv;
4069 4069 intr_ctx->intr = iv;
4070 4070 intr_ctx->qlge = qlge;
4071 4071
4072 4072 /*
4073 4073 * We set up each vectors enable/disable/read bits so
4074 4074 * there's no bit/mask calculations in critical path.
4075 4075 */
4076 4076 intr_ctx->intr_en_mask =
4077 4077 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4078 4078 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4079 4079 INTR_EN_IHD | iv;
4080 4080 intr_ctx->intr_dis_mask =
4081 4081 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4082 4082 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4083 4083 INTR_EN_IHD | iv;
4084 4084 intr_ctx->intr_read_mask =
4085 4085 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4086 4086 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4087 4087 | iv;
4088 4088
4089 4089 if (qlge->rx_ring[i].type == TX_Q) {
4090 4090 /*
4091 4091 * Outbound queue is for outbound completions
4092 4092 * only.
4093 4093 */
4094 4094 intr_ctx->handler = ql_msix_isr;
4095 4095 } else {
4096 4096 /*
4097 4097 * Inbound queues handle unicast frames only.
4098 4098 */
4099 4099 intr_ctx->handler = ql_msix_rx_isr;
4100 4100 }
4101 4101 }
4102 4102 } else {
4103 4103 /*
4104 4104 * All rx_rings use the same intr_ctx since
4105 4105 * there is only one vector.
4106 4106 */
4107 4107 intr_ctx->intr = 0;
4108 4108 intr_ctx->qlge = qlge;
4109 4109 /*
4110 4110 * We set up each vectors enable/disable/read bits so
4111 4111 * there's no bit/mask calculations in the critical path.
4112 4112 */
4113 4113 intr_ctx->intr_en_mask =
4114 4114 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4115 4115 INTR_EN_TYPE_ENABLE;
4116 4116 intr_ctx->intr_dis_mask =
4117 4117 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4118 4118 INTR_EN_TYPE_DISABLE;
4119 4119 intr_ctx->intr_read_mask =
4120 4120 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4121 4121 INTR_EN_TYPE_READ;
4122 4122 /*
4123 4123 * Single interrupt means one handler for all rings.
4124 4124 */
4125 4125 intr_ctx->handler = ql_isr;
4126 4126 for (i = 0; i < qlge->rx_ring_count; i++)
4127 4127 qlge->rx_ring[i].irq = 0;
4128 4128 }
4129 4129 }
4130 4130
4131 4131
4132 4132 /*
4133 4133 * Free allocated interrupts.
4134 4134 */
4135 4135 static void
4136 4136 ql_free_irq_vectors(qlge_t *qlge)
4137 4137 {
4138 4138 int i;
4139 4139 int rc;
4140 4140
4141 4141 if (qlge->sequence & INIT_INTR_ENABLED) {
4142 4142 /* Disable all interrupts */
4143 4143 if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
4144 4144 /* Call ddi_intr_block_disable() */
4145 4145 (void) ddi_intr_block_disable(qlge->htable,
4146 4146 qlge->intr_cnt);
4147 4147 } else {
4148 4148 for (i = 0; i < qlge->intr_cnt; i++) {
4149 4149 (void) ddi_intr_disable(qlge->htable[i]);
4150 4150 }
4151 4151 }
4152 4152
4153 4153 qlge->sequence &= ~INIT_INTR_ENABLED;
4154 4154 }
4155 4155
4156 4156 for (i = 0; i < qlge->intr_cnt; i++) {
4157 4157
4158 4158 if (qlge->sequence & INIT_ADD_INTERRUPT)
4159 4159 (void) ddi_intr_remove_handler(qlge->htable[i]);
4160 4160
4161 4161 if (qlge->sequence & INIT_INTR_ALLOC) {
4162 4162 rc = ddi_intr_free(qlge->htable[i]);
4163 4163 if (rc != DDI_SUCCESS) {
4164 4164 /* EMPTY */
4165 4165 QL_PRINT(DBG_INIT, ("Free intr failed: %d",
4166 4166 rc));
4167 4167 }
4168 4168 }
4169 4169 }
4170 4170 if (qlge->sequence & INIT_INTR_ALLOC)
4171 4171 qlge->sequence &= ~INIT_INTR_ALLOC;
4172 4172
4173 4173 if (qlge->sequence & INIT_ADD_INTERRUPT)
4174 4174 qlge->sequence &= ~INIT_ADD_INTERRUPT;
4175 4175
4176 4176 if (qlge->htable) {
4177 4177 kmem_free(qlge->htable, qlge->intr_size);
4178 4178 qlge->htable = NULL;
4179 4179 }
4180 4180 }
4181 4181
4182 4182 /*
4183 4183 * Allocate interrupt vectors
4184 4184 * For legacy and MSI, only 1 handle is needed.
4185 4185 * For MSI-X, if fewer than 2 vectors are available, return failure.
4186 4186 * Upon success, this maps the vectors to rx and tx rings for
4187 4187 * interrupts.
4188 4188 */
4189 4189 static int
4190 4190 ql_request_irq_vectors(qlge_t *qlge, int intr_type)
4191 4191 {
4192 4192 dev_info_t *devinfo;
4193 4193 uint32_t request, orig;
4194 4194 int count, avail, actual;
4195 4195 int minimum;
4196 4196 int rc;
4197 4197
4198 4198 devinfo = qlge->dip;
4199 4199
4200 4200 switch (intr_type) {
4201 4201 case DDI_INTR_TYPE_FIXED:
4202 4202 request = 1; /* Request 1 legacy interrupt handle */
4203 4203 minimum = 1;
4204 4204 QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
4205 4205 break;
4206 4206
4207 4207 case DDI_INTR_TYPE_MSI:
4208 4208 request = 1; /* Request 1 MSI interrupt handle */
4209 4209 minimum = 1;
4210 4210 QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
4211 4211 break;
4212 4212
4213 4213 case DDI_INTR_TYPE_MSIX:
4214 4214 /*
4215 4215 * Ideal number of vectors for the adapter is
4216 4216 * # rss rings + tx completion rings for default completion
4217 4217 * queue.
4218 4218 */
4219 4219 request = qlge->rx_ring_count;
4220 4220
4221 4221 orig = request;
4222 4222 if (request > (MAX_RX_RINGS))
4223 4223 request = MAX_RX_RINGS;
4224 4224 minimum = 2;
4225 4225 QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
4226 4226 break;
4227 4227
4228 4228 default:
4229 4229 QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
4230 4230 return (DDI_FAILURE);
4231 4231 }
4232 4232
4233 4233 QL_PRINT(DBG_INIT, ("interrupt handles requested: %d minimum: %d\n",
4234 4234 request, minimum));
4235 4235
4236 4236 /*
4237 4237 * Get number of supported interrupts
4238 4238 */
4239 4239 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4240 4240 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4241 4241 QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
4242 4242 "count: %d\n", rc, count));
4243 4243 return (DDI_FAILURE);
4244 4244 }
4245 4245 QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
4246 4246
4247 4247 /*
4248 4248 * Get number of available interrupts
4249 4249 */
4250 4250 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4251 4251 if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4252 4252 QL_PRINT(DBG_INIT,
4253 4253 ("Get interrupt available number failed. Return:"
4254 4254 " %d, available: %d\n", rc, avail));
4255 4255 return (DDI_FAILURE);
4256 4256 }
4257 4257 QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
4258 4258
4259 4259 if (avail < request) {
4260 4260 QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
4261 4261 request, avail));
4262 4262 request = avail;
4263 4263 }
4264 4264
4265 4265 actual = 0;
4266 4266 qlge->intr_cnt = 0;
4267 4267
4268 4268 /*
4269 4269 * Allocate an array of interrupt handles
4270 4270 */
4271 4271 qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
4272 4272 qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
4273 4273
4274 4274 rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
4275 4275 (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
4276 4276 if (rc != DDI_SUCCESS) {
4277 4277 cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
4278 4278 " %d, request: %d, actual: %d",
4279 4279 __func__, qlge->instance, rc, request, actual);
4280 4280 goto ql_intr_alloc_fail;
4281 4281 }
4282 4282 qlge->intr_cnt = actual;
4283 4283
4284 4284 qlge->sequence |= INIT_INTR_ALLOC;
4285 4285
4286 4286 /*
4287 4287 * If the actual number of vectors is less than the minumum
4288 4288 * then fail.
4289 4289 */
4290 4290 if (actual < minimum) {
4291 4291 cmn_err(CE_WARN,
4292 4292 "Insufficient interrupt handles available: %d", actual);
4293 4293 goto ql_intr_alloc_fail;
4294 4294 }
4295 4295
4296 4296 /*
4297 4297 * For MSI-X, actual might force us to reduce number of tx & rx rings
4298 4298 */
4299 4299 if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4300 4300 if (actual >= (orig / 2)) {
4301 4301 count = orig / 2;
4302 4302 qlge->rss_ring_count = count;
4303 4303 qlge->tx_ring_count = count;
4304 4304 qlge->isr_stride = count;
4305 4305 } else if (actual >= (orig / 4)) {
4306 4306 count = orig / 4;
4307 4307 qlge->rss_ring_count = count;
4308 4308 qlge->tx_ring_count = count;
4309 4309 qlge->isr_stride = count;
4310 4310 } else if (actual >= (orig / 8)) {
4311 4311 count = orig / 8;
4312 4312 qlge->rss_ring_count = count;
4313 4313 qlge->tx_ring_count = count;
4314 4314 qlge->isr_stride = count;
4315 4315 } else if (actual < MAX_RX_RINGS) {
4316 4316 qlge->tx_ring_count = 1;
4317 4317 qlge->rss_ring_count = actual - 1;
4318 4318 }
4319 4319 qlge->intr_cnt = count;
4320 4320 qlge->rx_ring_count = qlge->tx_ring_count +
4321 4321 qlge->rss_ring_count;
4322 4322 }
4323 4323 cmn_err(CE_NOTE, "!qlge(%d) tx %d, rss %d, stride %d\n", qlge->instance,
4324 4324 qlge->tx_ring_count, qlge->rss_ring_count, qlge->isr_stride);
4325 4325
4326 4326 /*
4327 4327 * Get priority for first vector, assume remaining are all the same
4328 4328 */
4329 4329 rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
4330 4330 if (rc != DDI_SUCCESS) {
4331 4331 QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
4332 4332 goto ql_intr_alloc_fail;
4333 4333 }
4334 4334
4335 4335 rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
4336 4336 if (rc != DDI_SUCCESS) {
4337 4337 QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
4338 4338 goto ql_intr_alloc_fail;
4339 4339 }
4340 4340
4341 4341 qlge->intr_type = intr_type;
4342 4342
4343 4343 return (DDI_SUCCESS);
4344 4344
4345 4345 ql_intr_alloc_fail:
4346 4346 ql_free_irq_vectors(qlge);
4347 4347
4348 4348 return (DDI_FAILURE);
4349 4349 }
4350 4350
4351 4351 /*
4352 4352 * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
4353 4353 * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
4354 4354 * transmit queue.
4355 4355 */
4356 4356 int
4357 4357 ql_alloc_irqs(qlge_t *qlge)
4358 4358 {
4359 4359 int intr_types;
4360 4360 int rval;
4361 4361
4362 4362 /*
4363 4363 * Get supported interrupt types
4364 4364 */
4365 4365 if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
4366 4366 != DDI_SUCCESS) {
4367 4367 cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
4368 4368 __func__, qlge->instance);
4369 4369
4370 4370 return (DDI_FAILURE);
4371 4371 }
4372 4372
4373 4373 QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
4374 4374 __func__, qlge->instance, intr_types));
4375 4375
4376 4376 /* Install MSI-X interrupts */
4377 4377 if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
4378 4378 QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
4379 4379 __func__, qlge->instance, intr_types));
4380 4380 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
4381 4381 if (rval == DDI_SUCCESS) {
4382 4382 return (rval);
4383 4383 }
4384 4384 QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
4385 4385 " trying MSI interrupts ...\n", __func__, qlge->instance));
4386 4386 }
4387 4387
4388 4388 /*
4389 4389 * We will have 2 completion queues in MSI / Legacy mode,
4390 4390 * Queue 0 for default completions
4391 4391 * Queue 1 for transmit completions
4392 4392 */
4393 4393 qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
4394 4394 qlge->tx_ring_count = 1; /* Single tx completion queue */
4395 4395 qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
4396 4396
4397 4397 QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
4398 4398 __func__, qlge->instance));
4399 4399 /*
4400 4400 * Add the h/w interrupt handler and initialise mutexes
4401 4401 */
4402 4402 rval = DDI_FAILURE;
4403 4403
4404 4404 /*
4405 4405 * If OS supports MSIX interrupt but fails to allocate, then try
4406 4406 * MSI interrupt. If MSI interrupt allocation fails also, then roll
4407 4407 * back to fixed interrupt.
4408 4408 */
4409 4409 if (intr_types & DDI_INTR_TYPE_MSI) {
4410 4410 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
4411 4411 if (rval == DDI_SUCCESS) {
4412 4412 qlge->intr_type = DDI_INTR_TYPE_MSI;
4413 4413 QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
4414 4414 __func__, qlge->instance));
4415 4415 }
4416 4416 }
4417 4417
4418 4418 /* Try Fixed interrupt Legacy mode */
4419 4419 if (rval != DDI_SUCCESS) {
4420 4420 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
4421 4421 if (rval != DDI_SUCCESS) {
4422 4422 cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
4423 4423 "allocation failed",
4424 4424 __func__, qlge->instance);
4425 4425 } else {
4426 4426 qlge->intr_type = DDI_INTR_TYPE_FIXED;
4427 4427 QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
4428 4428 __func__, qlge->instance));
4429 4429 }
4430 4430 }
4431 4431
4432 4432 return (rval);
4433 4433 }
4434 4434
4435 4435 static void
4436 4436 ql_free_rx_tx_locks(qlge_t *qlge)
4437 4437 {
4438 4438 int i;
4439 4439 struct rx_ring *rx_ring;
4440 4440 struct tx_ring *tx_ring;
4441 4441
4442 4442 for (i = 0; i < qlge->tx_ring_count; i++) {
4443 4443 tx_ring = &qlge->tx_ring[i];
4444 4444 mutex_destroy(&tx_ring->tx_lock);
4445 4445 }
4446 4446
4447 4447 for (i = 0; i < qlge->rx_ring_count; i++) {
4448 4448 rx_ring = &qlge->rx_ring[i];
4449 4449 mutex_destroy(&rx_ring->rx_lock);
4450 4450 mutex_destroy(&rx_ring->sbq_lock);
4451 4451 mutex_destroy(&rx_ring->lbq_lock);
4452 4452 }
4453 4453 }
4454 4454
4455 4455 /*
4456 4456 * Frees all resources allocated during attach.
4457 4457 *
4458 4458 * Input:
4459 4459 * dip = pointer to device information structure.
4460 4460 * sequence = bits indicating resources to free.
4461 4461 *
4462 4462 * Context:
4463 4463 * Kernel context.
4464 4464 */
4465 4465 static void
4466 4466 ql_free_resources(qlge_t *qlge)
4467 4467 {
4468 4468
4469 4469 /* Disable driver timer */
4470 4470 ql_stop_timer(qlge);
4471 4471
4472 4472 if (qlge->sequence & INIT_MAC_REGISTERED) {
4473 4473 (void) mac_unregister(qlge->mh);
4474 4474 qlge->sequence &= ~INIT_MAC_REGISTERED;
4475 4475 }
4476 4476
4477 4477 if (qlge->sequence & INIT_MAC_ALLOC) {
4478 4478 /* Nothing to do, macp is already freed */
4479 4479 qlge->sequence &= ~INIT_MAC_ALLOC;
4480 4480 }
4481 4481
4482 4482 if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4483 4483 pci_config_teardown(&qlge->pci_handle);
4484 4484 qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4485 4485 }
4486 4486
4487 4487 if (qlge->sequence & INIT_INTR_ALLOC) {
4488 4488 ql_free_irq_vectors(qlge);
4489 4489 qlge->sequence &= ~INIT_ADD_INTERRUPT;
4490 4490 }
4491 4491
4492 4492 if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4493 4493 (void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4494 4494 (void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4495 4495 (void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4496 4496 qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4497 4497 }
4498 4498
4499 4499 if (qlge->sequence & INIT_KSTATS) {
4500 4500 ql_fini_kstats(qlge);
4501 4501 qlge->sequence &= ~INIT_KSTATS;
4502 4502 }
4503 4503
4504 4504 if (qlge->sequence & INIT_MUTEX) {
4505 4505 mutex_destroy(&qlge->gen_mutex);
4506 4506 mutex_destroy(&qlge->hw_mutex);
4507 4507 mutex_destroy(&qlge->mbx_mutex);
4508 4508 cv_destroy(&qlge->cv_mbx_intr);
4509 4509 qlge->sequence &= ~INIT_MUTEX;
4510 4510 }
4511 4511
4512 4512 if (qlge->sequence & INIT_LOCKS_CREATED) {
4513 4513 ql_free_rx_tx_locks(qlge);
4514 4514 qlge->sequence &= ~INIT_LOCKS_CREATED;
4515 4515 }
4516 4516
4517 4517 if (qlge->sequence & INIT_MEMORY_ALLOC) {
4518 4518 ql_free_mem_resources(qlge);
4519 4519 qlge->sequence &= ~INIT_MEMORY_ALLOC;
4520 4520 }
4521 4521
4522 4522 if (qlge->sequence & INIT_REGS_SETUP) {
4523 4523 ddi_regs_map_free(&qlge->dev_handle);
4524 4524 qlge->sequence &= ~INIT_REGS_SETUP;
4525 4525 }
4526 4526
4527 4527 if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4528 4528 ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4529 4529 qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4530 4530 }
4531 4531
4532 4532 /*
4533 4533 * free flash flt table that allocated in attach stage
4534 4534 */
4535 4535 if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4536 4536 (qlge->flt.header.length != 0)) {
4537 4537 kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4538 4538 qlge->flt.ql_flt_entry_ptr = NULL;
4539 4539 }
4540 4540
4541 4541 if (qlge->sequence & INIT_FM) {
4542 4542 ql_fm_fini(qlge);
4543 4543 qlge->sequence &= ~INIT_FM;
4544 4544 }
4545 4545
4546 4546 ddi_prop_remove_all(qlge->dip);
4547 4547 ddi_set_driver_private(qlge->dip, NULL);
4548 4548
4549 4549 /* finally, free qlge structure */
4550 4550 if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4551 4551 kmem_free(qlge, sizeof (qlge_t));
4552 4552 }
4553 4553 }
4554 4554
4555 4555 /*
4556 4556 * Set promiscuous mode of the driver
4557 4557 * Caller must catch HW_LOCK
4558 4558 */
4559 4559 void
4560 4560 ql_set_promiscuous(qlge_t *qlge, int mode)
4561 4561 {
4562 4562 if (mode) {
4563 4563 (void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4564 4564 RT_IDX_VALID, 1);
4565 4565 } else {
4566 4566 (void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4567 4567 RT_IDX_VALID, 0);
4568 4568 }
4569 4569 }
4570 4570 /*
4571 4571 * Write 'data1' to Mac Protocol Address Index Register and
4572 4572 * 'data2' to Mac Protocol Address Data Register
4573 4573 * Assuming that the Mac Protocol semaphore lock has been acquired.
4574 4574 */
4575 4575 static int
4576 4576 ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4577 4577 {
4578 4578 int return_value = DDI_SUCCESS;
4579 4579
4580 4580 if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4581 4581 MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4582 4582 cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4583 4583 "timeout.");
4584 4584 return_value = DDI_FAILURE;
4585 4585 goto out;
4586 4586 }
4587 4587 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4588 4588 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4589 4589 out:
4590 4590 return (return_value);
4591 4591 }
4592 4592 /*
4593 4593 * Enable the 'index'ed multicast address in the host memory's multicast_list
4594 4594 */
4595 4595 int
4596 4596 ql_add_multicast_address(qlge_t *qlge, int index)
4597 4597 {
4598 4598 int rtn_val = DDI_FAILURE;
4599 4599 uint32_t offset;
4600 4600 uint32_t value1, value2;
4601 4601
4602 4602 /* Acquire the required semaphore */
4603 4603 if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4604 4604 return (rtn_val);
4605 4605 }
4606 4606
4607 4607 /* Program Offset0 - lower 32 bits of the MAC address */
4608 4608 offset = 0;
4609 4609 value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4610 4610 (index << 4) | offset;
4611 4611 value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4612 4612 |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4613 4613 |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4614 4614 |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4615 4615 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4616 4616 goto out;
4617 4617
4618 4618 /* Program offset1: upper 16 bits of the MAC address */
4619 4619 offset = 1;
4620 4620 value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4621 4621 (index<<4) | offset;
4622 4622 value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4623 4623 |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4624 4624 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4625 4625 goto out;
4626 4626 }
4627 4627 rtn_val = DDI_SUCCESS;
4628 4628 out:
4629 4629 ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4630 4630 return (rtn_val);
4631 4631 }
4632 4632
4633 4633 /*
4634 4634 * Disable the 'index'ed multicast address in the host memory's multicast_list
4635 4635 */
4636 4636 int
4637 4637 ql_remove_multicast_address(qlge_t *qlge, int index)
4638 4638 {
4639 4639 int rtn_val = DDI_FAILURE;
4640 4640 uint32_t offset;
4641 4641 uint32_t value1, value2;
4642 4642
4643 4643 /* Acquire the required semaphore */
4644 4644 if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4645 4645 return (rtn_val);
4646 4646 }
4647 4647 /* Program Offset0 - lower 32 bits of the MAC address */
4648 4648 offset = 0;
4649 4649 value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4650 4650 value2 =
4651 4651 ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4652 4652 |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4653 4653 |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4654 4654 |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4655 4655 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4656 4656 goto out;
4657 4657 }
4658 4658 /* Program offset1: upper 16 bits of the MAC address */
4659 4659 offset = 1;
4660 4660 value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4661 4661 value2 = 0;
4662 4662 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4663 4663 goto out;
4664 4664 }
4665 4665 rtn_val = DDI_SUCCESS;
4666 4666 out:
4667 4667 ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4668 4668 return (rtn_val);
4669 4669 }
4670 4670
4671 4671 /*
4672 4672 * Add a new multicast address to the list of supported list
4673 4673 * This API is called after OS called gld_set_multicast (GLDv2)
4674 4674 * or m_multicst (GLDv3)
4675 4675 *
4676 4676 * Restriction:
4677 4677 * The number of maximum multicast address is limited by hardware.
4678 4678 */
4679 4679 int
4680 4680 ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4681 4681 {
4682 4682 uint32_t index = qlge->multicast_list_count;
4683 4683 int rval = DDI_SUCCESS;
4684 4684 int status;
4685 4685
4686 4686 if ((ep[0] & 01) == 0) {
4687 4687 rval = EINVAL;
4688 4688 goto exit;
4689 4689 }
4690 4690
4691 4691 /* if there is an availabe space in multicast_list, then add it */
4692 4692 if (index < MAX_MULTICAST_LIST_SIZE) {
4693 4693 bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4694 4694 ETHERADDRL);
4695 4695 /* increment the total number of addresses in multicast list */
4696 4696 (void) ql_add_multicast_address(qlge, index);
4697 4697 qlge->multicast_list_count++;
4698 4698 QL_PRINT(DBG_GLD,
4699 4699 ("%s(%d): added to index of multicast list= 0x%x, "
4700 4700 "total %d\n", __func__, qlge->instance, index,
4701 4701 qlge->multicast_list_count));
4702 4702
4703 4703 if (index > MAX_MULTICAST_HW_SIZE) {
4704 4704 if (!qlge->multicast_promisc) {
4705 4705 status = ql_set_routing_reg(qlge,
4706 4706 RT_IDX_ALLMULTI_SLOT,
4707 4707 RT_IDX_MCAST, 1);
4708 4708 if (status) {
4709 4709 cmn_err(CE_WARN,
4710 4710 "Failed to init routing reg "
4711 4711 "for mcast promisc mode.");
4712 4712 rval = ENOENT;
4713 4713 goto exit;
4714 4714 }
4715 4715 qlge->multicast_promisc = B_TRUE;
4716 4716 }
4717 4717 }
4718 4718 } else {
4719 4719 rval = ENOENT;
4720 4720 }
4721 4721 exit:
4722 4722 return (rval);
4723 4723 }
4724 4724
4725 4725 /*
4726 4726 * Remove an old multicast address from the list of supported multicast
4727 4727 * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4728 4728 * or m_multicst (GLDv3)
4729 4729 * The number of maximum multicast address is limited by hardware.
4730 4730 */
4731 4731 int
4732 4732 ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4733 4733 {
4734 4734 uint32_t total = qlge->multicast_list_count;
4735 4735 int i = 0;
4736 4736 int rmv_index = 0;
4737 4737 size_t length = sizeof (ql_multicast_addr);
4738 4738 int status;
4739 4739
4740 4740 for (i = 0; i < total; i++) {
4741 4741 if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4742 4742 continue;
4743 4743 }
4744 4744
4745 4745 rmv_index = i;
4746 4746 /* block move the reset of other multicast address forward */
4747 4747 length = ((total -1) -i) * sizeof (ql_multicast_addr);
4748 4748 if (length > 0) {
4749 4749 bcopy(&qlge->multicast_list[i+1],
4750 4750 &qlge->multicast_list[i], length);
4751 4751 }
4752 4752 qlge->multicast_list_count--;
4753 4753 if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4754 4754 /*
4755 4755 * there is a deletion in multicast list table,
4756 4756 * re-enable them
4757 4757 */
4758 4758 for (i = rmv_index; i < qlge->multicast_list_count;
4759 4759 i++) {
4760 4760 (void) ql_add_multicast_address(qlge, i);
4761 4761 }
4762 4762 /* and disable the last one */
4763 4763 (void) ql_remove_multicast_address(qlge, i);
4764 4764
4765 4765 /* disable multicast promiscuous mode */
4766 4766 if (qlge->multicast_promisc) {
4767 4767 status = ql_set_routing_reg(qlge,
4768 4768 RT_IDX_ALLMULTI_SLOT,
4769 4769 RT_IDX_MCAST, 0);
4770 4770 if (status) {
4771 4771 cmn_err(CE_WARN,
4772 4772 "Failed to init routing reg for "
4773 4773 "mcast promisc mode.");
4774 4774 goto exit;
4775 4775 }
4776 4776 /* write to config register */
4777 4777 qlge->multicast_promisc = B_FALSE;
4778 4778 }
4779 4779 }
4780 4780 break;
4781 4781 }
4782 4782 exit:
4783 4783 return (DDI_SUCCESS);
4784 4784 }
4785 4785
4786 4786 /*
4787 4787 * Read a XGMAC register
4788 4788 */
4789 4789 int
4790 4790 ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4791 4791 {
4792 4792 int rtn_val = DDI_FAILURE;
4793 4793
4794 4794 /* wait for XGMAC Address register RDY bit set */
4795 4795 if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4796 4796 BIT_SET, 10) != DDI_SUCCESS) {
4797 4797 goto out;
4798 4798 }
4799 4799 /* start rx transaction */
4800 4800 ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4801 4801
4802 4802 /*
4803 4803 * wait for XGMAC Address register RDY bit set,
4804 4804 * which indicates data is ready
4805 4805 */
4806 4806 if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4807 4807 BIT_SET, 10) != DDI_SUCCESS) {
4808 4808 goto out;
4809 4809 }
4810 4810 /* read data from XGAMC_DATA register */
4811 4811 *val = ql_read_reg(qlge, REG_XGMAC_DATA);
4812 4812 rtn_val = DDI_SUCCESS;
4813 4813 out:
4814 4814 return (rtn_val);
4815 4815 }
4816 4816
4817 4817 /*
4818 4818 * Implement checksum offload for IPv4 IP packets
4819 4819 */
4820 4820 static void
4821 4821 ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4822 4822 struct ob_mac_iocb_req *mac_iocb_ptr)
4823 4823 {
4824 4824 struct ip *iphdr = NULL;
4825 4825 struct ether_header *ethhdr;
4826 4826 struct ether_vlan_header *ethvhdr;
4827 4827 struct tcphdr *tcp_hdr;
4828 4828 uint32_t etherType;
4829 4829 int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4830 4830 int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4831 4831
4832 4832 ethhdr = (struct ether_header *)((void *)bp);
4833 4833 ethvhdr = (struct ether_vlan_header *)((void *)bp);
4834 4834 /* Is this vlan packet? */
4835 4835 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4836 4836 mac_hdr_len = sizeof (struct ether_vlan_header);
4837 4837 etherType = ntohs(ethvhdr->ether_type);
4838 4838 } else {
4839 4839 mac_hdr_len = sizeof (struct ether_header);
4840 4840 etherType = ntohs(ethhdr->ether_type);
4841 4841 }
4842 4842 /* Is this IPv4 or IPv6 packet? */
4843 4843 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4844 4844 IPV4_VERSION) {
4845 4845 if (etherType == ETHERTYPE_IP /* 0800 */) {
4846 4846 iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4847 4847 } else {
4848 4848 /* EMPTY */
4849 4849 QL_PRINT(DBG_TX,
4850 4850 ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4851 4851 __func__, qlge->instance, etherType));
4852 4852 }
4853 4853 }
4854 4854 /* ipV4 packets */
4855 4855 if (iphdr != NULL) {
4856 4856
4857 4857 ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4858 4858 QL_PRINT(DBG_TX,
4859 4859 ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4860 4860 " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4861 4861
4862 4862 ip_hdr_off = mac_hdr_len;
4863 4863 QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4864 4864 __func__, qlge->instance, ip_hdr_len));
4865 4865
4866 4866 mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4867 4867 OB_MAC_IOCB_REQ_IPv4);
4868 4868
4869 4869 if (pflags & HCK_IPV4_HDRCKSUM) {
4870 4870 QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4871 4871 __func__, qlge->instance));
4872 4872 mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4873 4873 mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4874 4874 OB_MAC_IOCB_REQ_IC);
4875 4875 iphdr->ip_sum = 0;
4876 4876 mac_iocb_ptr->hdr_off = (uint16_t)
4877 4877 cpu_to_le16(ip_hdr_off);
4878 4878 }
4879 4879 if (pflags & HCK_FULLCKSUM) {
4880 4880 if (iphdr->ip_p == IPPROTO_TCP) {
4881 4881 tcp_hdr =
4882 4882 (struct tcphdr *)(void *)
4883 4883 ((uint8_t *)(void *)iphdr + ip_hdr_len);
4884 4884 QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4885 4885 __func__, qlge->instance));
4886 4886 mac_iocb_ptr->opcode =
4887 4887 OPCODE_OB_MAC_OFFLOAD_IOCB;
4888 4888 mac_iocb_ptr->flag1 =
4889 4889 (uint8_t)(mac_iocb_ptr->flag1 |
4890 4890 OB_MAC_IOCB_REQ_TC);
4891 4891 mac_iocb_ptr->flag2 =
4892 4892 (uint8_t)(mac_iocb_ptr->flag2 |
4893 4893 OB_MAC_IOCB_REQ_IC);
4894 4894 iphdr->ip_sum = 0;
4895 4895 tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4896 4896 tcp_udp_hdr_len = tcp_hdr->th_off*4;
4897 4897 QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4898 4898 __func__, qlge->instance, tcp_udp_hdr_len));
4899 4899 hdr_off = ip_hdr_off;
4900 4900 tcp_udp_hdr_off <<= 6;
4901 4901 hdr_off |= tcp_udp_hdr_off;
4902 4902 mac_iocb_ptr->hdr_off =
4903 4903 (uint16_t)cpu_to_le16(hdr_off);
4904 4904 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4905 4905 cpu_to_le16(mac_hdr_len + ip_hdr_len +
4906 4906 tcp_udp_hdr_len);
4907 4907
4908 4908 /*
4909 4909 * if the chip is unable to do pseudo header
4910 4910 * cksum calculation, do it in then put the
4911 4911 * result to the data passed to the chip
4912 4912 */
4913 4913 if (qlge->cfg_flags &
4914 4914 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4915 4915 ql_pseudo_cksum((uint8_t *)iphdr);
4916 4916 }
4917 4917 } else if (iphdr->ip_p == IPPROTO_UDP) {
4918 4918 QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4919 4919 __func__, qlge->instance));
4920 4920 mac_iocb_ptr->opcode =
4921 4921 OPCODE_OB_MAC_OFFLOAD_IOCB;
4922 4922 mac_iocb_ptr->flag1 =
4923 4923 (uint8_t)(mac_iocb_ptr->flag1 |
4924 4924 OB_MAC_IOCB_REQ_UC);
4925 4925 mac_iocb_ptr->flag2 =
4926 4926 (uint8_t)(mac_iocb_ptr->flag2 |
4927 4927 OB_MAC_IOCB_REQ_IC);
4928 4928 iphdr->ip_sum = 0;
4929 4929 tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4930 4930 tcp_udp_hdr_len = sizeof (struct udphdr);
4931 4931 QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4932 4932 __func__, qlge->instance, tcp_udp_hdr_len));
4933 4933 hdr_off = ip_hdr_off;
4934 4934 tcp_udp_hdr_off <<= 6;
4935 4935 hdr_off |= tcp_udp_hdr_off;
4936 4936 mac_iocb_ptr->hdr_off =
4937 4937 (uint16_t)cpu_to_le16(hdr_off);
4938 4938 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4939 4939 cpu_to_le16(mac_hdr_len + ip_hdr_len
4940 4940 + tcp_udp_hdr_len);
4941 4941
4942 4942 /*
4943 4943 * if the chip is unable to calculate pseudo
4944 4944 * hdr cksum,do it in then put the result to
4945 4945 * the data passed to the chip
4946 4946 */
4947 4947 if (qlge->cfg_flags &
4948 4948 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4949 4949 ql_pseudo_cksum((uint8_t *)iphdr);
4950 4950 }
4951 4951 }
4952 4952 }
4953 4953 }
4954 4954 }
4955 4955
4956 4956 /*
4957 4957 * For TSO/LSO:
4958 4958 * MAC frame transmission with TCP large segment offload is performed in the
4959 4959 * same way as the MAC frame transmission with checksum offload with the
4960 4960 * exception that the maximum TCP segment size (MSS) must be specified to
4961 4961 * allow the chip to segment the data into legal sized frames.
4962 4962 * The host also needs to calculate a pseudo-header checksum over the
4963 4963 * following fields:
4964 4964 * Source IP Address, Destination IP Address, and the Protocol.
4965 4965 * The TCP length is not included in the pseudo-header calculation.
4966 4966 * The pseudo-header checksum is place in the TCP checksum field of the
4967 4967 * prototype header.
4968 4968 */
4969 4969 static void
4970 4970 ql_lso_pseudo_cksum(uint8_t *buf)
4971 4971 {
4972 4972 uint32_t cksum;
4973 4973 uint16_t iphl;
4974 4974 uint16_t proto;
4975 4975
4976 4976 /*
4977 4977 * Calculate the LSO pseudo-header checksum.
4978 4978 */
4979 4979 iphl = (uint16_t)(4 * (buf[0] & 0xF));
4980 4980 cksum = proto = buf[9];
4981 4981 cksum += (((uint16_t)buf[12])<<8) + buf[13];
4982 4982 cksum += (((uint16_t)buf[14])<<8) + buf[15];
4983 4983 cksum += (((uint16_t)buf[16])<<8) + buf[17];
4984 4984 cksum += (((uint16_t)buf[18])<<8) + buf[19];
4985 4985 cksum = (cksum>>16) + (cksum & 0xFFFF);
4986 4986 cksum = (cksum>>16) + (cksum & 0xFFFF);
4987 4987
4988 4988 /*
4989 4989 * Point it to the TCP/UDP header, and
4990 4990 * update the checksum field.
4991 4991 */
4992 4992 buf += iphl + ((proto == IPPROTO_TCP) ?
4993 4993 TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4994 4994
4995 4995 *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4996 4996 }
4997 4997
4998 4998 /*
4999 4999 * For IPv4 IP packets, distribute the tx packets evenly among tx rings
5000 5000 */
5001 5001 typedef uint32_t ub4; /* unsigned 4-byte quantities */
5002 5002 typedef uint8_t ub1;
5003 5003
5004 5004 #define hashsize(n) ((ub4)1<<(n))
5005 5005 #define hashmask(n) (hashsize(n)-1)
5006 5006
5007 5007 #define mix(a, b, c) \
5008 5008 { \
5009 5009 a -= b; a -= c; a ^= (c>>13); \
5010 5010 b -= c; b -= a; b ^= (a<<8); \
5011 5011 c -= a; c -= b; c ^= (b>>13); \
5012 5012 a -= b; a -= c; a ^= (c>>12); \
5013 5013 b -= c; b -= a; b ^= (a<<16); \
5014 5014 c -= a; c -= b; c ^= (b>>5); \
5015 5015 a -= b; a -= c; a ^= (c>>3); \
5016 5016 b -= c; b -= a; b ^= (a<<10); \
5017 5017 c -= a; c -= b; c ^= (b>>15); \
5018 5018 }
5019 5019
5020 5020 ub4
5021 5021 hash(k, length, initval)
5022 5022 register ub1 *k; /* the key */
5023 5023 register ub4 length; /* the length of the key */
5024 5024 register ub4 initval; /* the previous hash, or an arbitrary value */
5025 5025 {
5026 5026 register ub4 a, b, c, len;
5027 5027
5028 5028 /* Set up the internal state */
5029 5029 len = length;
5030 5030 a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */
5031 5031 c = initval; /* the previous hash value */
5032 5032
5033 5033 /* handle most of the key */
5034 5034 while (len >= 12) {
5035 5035 a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
5036 5036 b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
5037 5037 c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
5038 5038 mix(a, b, c);
5039 5039 k += 12;
5040 5040 len -= 12;
5041 5041 }
5042 5042
5043 5043 /* handle the last 11 bytes */
5044 5044 c += length;
5045 5045 /* all the case statements fall through */
5046 5046 switch (len) {
5047 5047 /* FALLTHRU */
5048 5048 case 11: c += ((ub4)k[10]<<24);
5049 5049 /* FALLTHRU */
5050 5050 case 10: c += ((ub4)k[9]<<16);
5051 5051 /* FALLTHRU */
5052 5052 case 9 : c += ((ub4)k[8]<<8);
5053 5053 /* the first byte of c is reserved for the length */
5054 5054 /* FALLTHRU */
5055 5055 case 8 : b += ((ub4)k[7]<<24);
5056 5056 /* FALLTHRU */
5057 5057 case 7 : b += ((ub4)k[6]<<16);
5058 5058 /* FALLTHRU */
5059 5059 case 6 : b += ((ub4)k[5]<<8);
5060 5060 /* FALLTHRU */
5061 5061 case 5 : b += k[4];
5062 5062 /* FALLTHRU */
5063 5063 case 4 : a += ((ub4)k[3]<<24);
5064 5064 /* FALLTHRU */
5065 5065 case 3 : a += ((ub4)k[2]<<16);
5066 5066 /* FALLTHRU */
5067 5067 case 2 : a += ((ub4)k[1]<<8);
5068 5068 /* FALLTHRU */
5069 5069 case 1 : a += k[0];
5070 5070 /* case 0: nothing left to add */
5071 5071 }
5072 5072 mix(a, b, c);
5073 5073 /* report the result */
5074 5074 return (c);
5075 5075 }
5076 5076
5077 5077 uint8_t
5078 5078 ql_tx_hashing(qlge_t *qlge, caddr_t bp)
5079 5079 {
5080 5080 struct ip *iphdr = NULL;
5081 5081 struct ether_header *ethhdr;
5082 5082 struct ether_vlan_header *ethvhdr;
5083 5083 struct tcphdr *tcp_hdr;
5084 5084 struct udphdr *udp_hdr;
5085 5085 uint32_t etherType;
5086 5086 int mac_hdr_len, ip_hdr_len;
5087 5087 uint32_t h = 0; /* 0 by default */
5088 5088 uint8_t tx_ring_id = 0;
5089 5089 uint32_t ip_src_addr = 0;
5090 5090 uint32_t ip_desc_addr = 0;
5091 5091 uint16_t src_port = 0;
5092 5092 uint16_t dest_port = 0;
5093 5093 uint8_t key[12];
5094 5094 QL_PRINT(DBG_TX, ("%s(%d) entered \n", __func__, qlge->instance));
5095 5095
5096 5096 ethhdr = (struct ether_header *)((void *)bp);
5097 5097 ethvhdr = (struct ether_vlan_header *)((void *)bp);
5098 5098
5099 5099 if (qlge->tx_ring_count == 1)
5100 5100 return (tx_ring_id);
5101 5101
5102 5102 /* Is this vlan packet? */
5103 5103 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5104 5104 mac_hdr_len = sizeof (struct ether_vlan_header);
5105 5105 etherType = ntohs(ethvhdr->ether_type);
5106 5106 } else {
5107 5107 mac_hdr_len = sizeof (struct ether_header);
5108 5108 etherType = ntohs(ethhdr->ether_type);
5109 5109 }
5110 5110 /* Is this IPv4 or IPv6 packet? */
5111 5111 if (etherType == ETHERTYPE_IP /* 0800 */) {
5112 5112 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
5113 5113 == IPV4_VERSION) {
5114 5114 iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5115 5115 }
5116 5116 if (((unsigned long)iphdr) & 0x3) {
5117 5117 /* IP hdr not 4-byte aligned */
5118 5118 return (tx_ring_id);
5119 5119 }
5120 5120 }
5121 5121 /* ipV4 packets */
5122 5122 if (iphdr) {
5123 5123
5124 5124 ip_hdr_len = IPH_HDR_LENGTH(iphdr);
5125 5125 ip_src_addr = iphdr->ip_src.s_addr;
5126 5126 ip_desc_addr = iphdr->ip_dst.s_addr;
5127 5127
5128 5128 if (iphdr->ip_p == IPPROTO_TCP) {
5129 5129 tcp_hdr = (struct tcphdr *)(void *)
5130 5130 ((uint8_t *)iphdr + ip_hdr_len);
5131 5131 src_port = tcp_hdr->th_sport;
5132 5132 dest_port = tcp_hdr->th_dport;
5133 5133 } else if (iphdr->ip_p == IPPROTO_UDP) {
5134 5134 udp_hdr = (struct udphdr *)(void *)
5135 5135 ((uint8_t *)iphdr + ip_hdr_len);
5136 5136 src_port = udp_hdr->uh_sport;
5137 5137 dest_port = udp_hdr->uh_dport;
5138 5138 }
5139 5139 key[0] = (uint8_t)((ip_src_addr) &0xFF);
5140 5140 key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
5141 5141 key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
5142 5142 key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
5143 5143 key[4] = (uint8_t)((ip_desc_addr) &0xFF);
5144 5144 key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
5145 5145 key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
5146 5146 key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
5147 5147 key[8] = (uint8_t)((src_port) &0xFF);
5148 5148 key[9] = (uint8_t)((src_port >> 8) &0xFF);
5149 5149 key[10] = (uint8_t)((dest_port) &0xFF);
5150 5150 key[11] = (uint8_t)((dest_port >> 8) &0xFF);
5151 5151 h = hash(key, 12, 0); /* return 32 bit */
5152 5152 tx_ring_id = (h & (qlge->tx_ring_count - 1));
5153 5153 if (tx_ring_id >= qlge->tx_ring_count) {
5154 5154 cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
5155 5155 __func__, tx_ring_id);
5156 5156 tx_ring_id = 0;
5157 5157 }
5158 5158 }
5159 5159 return (tx_ring_id);
5160 5160 }
5161 5161
5162 5162 /*
5163 5163 * Tell the hardware to do Large Send Offload (LSO)
5164 5164 *
5165 5165 * Some fields in ob_mac_iocb need to be set so hardware can know what is
5166 5166 * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
5167 5167 * in the right place of the packet etc, thus, hardware can process the
5168 5168 * packet correctly.
5169 5169 */
5170 5170 static void
5171 5171 ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
5172 5172 struct ob_mac_iocb_req *mac_iocb_ptr)
5173 5173 {
5174 5174 struct ip *iphdr = NULL;
5175 5175 struct ether_header *ethhdr;
5176 5176 struct ether_vlan_header *ethvhdr;
5177 5177 struct tcphdr *tcp_hdr;
5178 5178 struct udphdr *udp_hdr;
5179 5179 uint32_t etherType;
5180 5180 uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
5181 5181 uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
5182 5182
5183 5183 ethhdr = (struct ether_header *)(void *)bp;
5184 5184 ethvhdr = (struct ether_vlan_header *)(void *)bp;
5185 5185
5186 5186 /* Is this vlan packet? */
5187 5187 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5188 5188 mac_hdr_len = sizeof (struct ether_vlan_header);
5189 5189 etherType = ntohs(ethvhdr->ether_type);
5190 5190 } else {
5191 5191 mac_hdr_len = sizeof (struct ether_header);
5192 5192 etherType = ntohs(ethhdr->ether_type);
5193 5193 }
5194 5194 /* Is this IPv4 or IPv6 packet? */
5195 5195 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
5196 5196 IPV4_VERSION) {
5197 5197 if (etherType == ETHERTYPE_IP /* 0800 */) {
5198 5198 iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5199 5199 } else {
5200 5200 /* EMPTY */
5201 5201 QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
5202 5202 " type 0x%x\n",
5203 5203 __func__, qlge->instance, etherType));
5204 5204 }
5205 5205 }
5206 5206
5207 5207 if (iphdr != NULL) { /* ipV4 packets */
5208 5208 ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
5209 5209 QL_PRINT(DBG_TX,
5210 5210 ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
5211 5211 " bytes \n", __func__, qlge->instance, ip_hdr_len));
5212 5212
5213 5213 ip_hdr_off = mac_hdr_len;
5214 5214 QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
5215 5215 __func__, qlge->instance, ip_hdr_len));
5216 5216
5217 5217 mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
5218 5218 OB_MAC_IOCB_REQ_IPv4);
5219 5219 if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
5220 5220 if (iphdr->ip_p == IPPROTO_TCP) {
5221 5221 tcp_hdr = (struct tcphdr *)(void *)
5222 5222 ((uint8_t *)(void *)iphdr +
5223 5223 ip_hdr_len);
5224 5224 QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
5225 5225 "packet\n",
5226 5226 __func__, qlge->instance));
5227 5227 mac_iocb_ptr->opcode =
5228 5228 OPCODE_OB_MAC_OFFLOAD_IOCB;
5229 5229 mac_iocb_ptr->flag1 =
5230 5230 (uint8_t)(mac_iocb_ptr->flag1 |
5231 5231 OB_MAC_IOCB_REQ_LSO);
5232 5232 iphdr->ip_sum = 0;
5233 5233 tcp_udp_hdr_off =
5234 5234 (uint16_t)(mac_hdr_len+ip_hdr_len);
5235 5235 tcp_udp_hdr_len =
5236 5236 (uint16_t)(tcp_hdr->th_off*4);
5237 5237 QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
5238 5238 __func__, qlge->instance, tcp_udp_hdr_len));
5239 5239 hdr_off = ip_hdr_off;
5240 5240 tcp_udp_hdr_off <<= 6;
5241 5241 hdr_off |= tcp_udp_hdr_off;
5242 5242 mac_iocb_ptr->hdr_off =
5243 5243 (uint16_t)cpu_to_le16(hdr_off);
5244 5244 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5245 5245 cpu_to_le16(mac_hdr_len + ip_hdr_len +
5246 5246 tcp_udp_hdr_len);
5247 5247 mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5248 5248
5249 5249 /*
5250 5250 * if the chip is unable to calculate pseudo
5251 5251 * header checksum, do it in then put the result
5252 5252 * to the data passed to the chip
5253 5253 */
5254 5254 if (qlge->cfg_flags &
5255 5255 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5256 5256 ql_lso_pseudo_cksum((uint8_t *)iphdr);
5257 5257 } else if (iphdr->ip_p == IPPROTO_UDP) {
5258 5258 udp_hdr = (struct udphdr *)(void *)
5259 5259 ((uint8_t *)(void *)iphdr
5260 5260 + ip_hdr_len);
5261 5261 QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
5262 5262 "packet\n",
5263 5263 __func__, qlge->instance));
5264 5264 mac_iocb_ptr->opcode =
5265 5265 OPCODE_OB_MAC_OFFLOAD_IOCB;
5266 5266 mac_iocb_ptr->flag1 =
5267 5267 (uint8_t)(mac_iocb_ptr->flag1 |
5268 5268 OB_MAC_IOCB_REQ_LSO);
5269 5269 iphdr->ip_sum = 0;
5270 5270 tcp_udp_hdr_off =
5271 5271 (uint16_t)(mac_hdr_len+ip_hdr_len);
5272 5272 tcp_udp_hdr_len =
5273 5273 (uint16_t)(udp_hdr->uh_ulen*4);
5274 5274 QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
5275 5275 __func__, qlge->instance, tcp_udp_hdr_len));
5276 5276 hdr_off = ip_hdr_off;
5277 5277 tcp_udp_hdr_off <<= 6;
5278 5278 hdr_off |= tcp_udp_hdr_off;
5279 5279 mac_iocb_ptr->hdr_off =
5280 5280 (uint16_t)cpu_to_le16(hdr_off);
5281 5281 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5282 5282 cpu_to_le16(mac_hdr_len + ip_hdr_len +
5283 5283 tcp_udp_hdr_len);
5284 5284 mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5285 5285
5286 5286 /*
5287 5287 * if the chip is unable to do pseudo header
5288 5288 * checksum calculation, do it here then put the
5289 5289 * result to the data passed to the chip
5290 5290 */
5291 5291 if (qlge->cfg_flags &
5292 5292 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5293 5293 ql_lso_pseudo_cksum((uint8_t *)iphdr);
5294 5294 }
5295 5295 }
5296 5296 }
5297 5297 }
5298 5298
5299 5299 /*
5300 5300 * Generic packet sending function which is used to send one packet.
5301 5301 */
5302 5302 int
5303 5303 ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
5304 5304 {
5305 5305 struct tx_ring_desc *tx_cb;
5306 5306 struct ob_mac_iocb_req *mac_iocb_ptr;
5307 5307 mblk_t *tp;
5308 5308 size_t msg_len = 0;
5309 5309 size_t off;
5310 5310 caddr_t bp;
5311 5311 size_t nbyte, total_len;
5312 5312 uint_t i = 0;
5313 5313 int j = 0, frags = 0;
5314 5314 uint32_t phy_addr_low, phy_addr_high;
5315 5315 uint64_t phys_addr;
5316 5316 clock_t now;
5317 5317 uint32_t pflags = 0;
5318 5318 uint32_t mss = 0;
5319 5319 enum tx_mode_t tx_mode;
5320 5320 struct oal_entry *oal_entry;
5321 5321 int status;
5322 5322 uint_t ncookies, oal_entries, max_oal_entries;
5323 5323 size_t max_seg_len = 0;
5324 5324 boolean_t use_lso = B_FALSE;
5325 5325 struct oal_entry *tx_entry = NULL;
5326 5326 struct oal_entry *last_oal_entry;
5327 5327 qlge_t *qlge = tx_ring->qlge;
5328 5328 ddi_dma_cookie_t dma_cookie;
5329 5329 size_t tx_buf_len = QL_MAX_COPY_LENGTH;
5330 5330 int force_pullup = 0;
5331 5331
5332 5332 tp = mp;
5333 5333 total_len = msg_len = 0;
5334 5334 max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
5335 5335
5336 5336 /* Calculate number of data and segments in the incoming message */
5337 5337 for (tp = mp; tp != NULL; tp = tp->b_cont) {
5338 5338 nbyte = MBLKL(tp);
5339 5339 total_len += nbyte;
5340 5340 max_seg_len = max(nbyte, max_seg_len);
5341 5341 QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
5342 5342 "total length: %d\n", frags, nbyte));
5343 5343 frags++;
5344 5344 }
5345 5345
5346 5346 if (total_len >= QL_LSO_MAX) {
5347 5347 freemsg(mp);
5348 5348 #ifdef QLGE_LOAD_UNLOAD
5349 5349 cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
5350 5350 __func__, (int)total_len);
5351 5351 #endif
5352 5352 return (NULL);
5353 5353 }
5354 5354
5355 5355 bp = (caddr_t)mp->b_rptr;
5356 5356 if (bp[0] & 1) {
5357 5357 if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
5358 5358 ETHERADDRL) == 0) {
5359 5359 QL_PRINT(DBG_TX, ("Broadcast packet\n"));
5360 5360 tx_ring->brdcstxmt++;
5361 5361 } else {
5362 5362 QL_PRINT(DBG_TX, ("multicast packet\n"));
5363 5363 tx_ring->multixmt++;
5364 5364 }
5365 5365 }
5366 5366
5367 5367 tx_ring->obytes += total_len;
5368 5368 tx_ring->opackets ++;
5369 5369
5370 5370 QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
5371 5371 " max seg len: %d\n", total_len, frags, max_seg_len));
5372 5372
5373 5373 /* claim a free slot in tx ring */
5374 5374 tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
5375 5375
5376 5376 /* get the tx descriptor */
5377 5377 mac_iocb_ptr = tx_cb->queue_entry;
5378 5378
5379 5379 bzero((void *)mac_iocb_ptr, 20);
5380 5380
5381 5381 ASSERT(tx_cb->mp == NULL);
5382 5382
5383 5383 /*
5384 5384 * Decide to use DMA map or copy mode.
5385 5385 * DMA map mode must be used when the total msg length is more than the
5386 5386 * tx buffer length.
5387 5387 */
5388 5388
5389 5389 if (total_len > tx_buf_len)
5390 5390 tx_mode = USE_DMA;
5391 5391 else if (max_seg_len > QL_MAX_COPY_LENGTH)
5392 5392 tx_mode = USE_DMA;
5393 5393 else
5394 5394 tx_mode = USE_COPY;
5395 5395
5396 5396 if (qlge->chksum_cap) {
5397 5397 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
5398 5398 QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
5399 5399 "is 0x%x \n", pflags, qlge->chksum_cap));
5400 5400 if (qlge->lso_enable) {
5401 5401 uint32_t lso_flags = 0;
5402 5402 mac_lso_get(mp, &mss, &lso_flags);
5403 5403 use_lso = (lso_flags == HW_LSO);
5404 5404 }
5405 5405 QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
5406 5406 mss, use_lso));
5407 5407 }
5408 5408
5409 5409 do_pullup:
5410 5410
5411 5411 /* concatenate all frags into one large packet if too fragmented */
5412 5412 if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
5413 5413 force_pullup) {
5414 5414 mblk_t *mp1;
5415 5415 if ((mp1 = msgpullup(mp, -1)) != NULL) {
5416 5416 freemsg(mp);
5417 5417 mp = mp1;
5418 5418 frags = 1;
5419 5419 } else {
5420 5420 tx_ring->tx_fail_dma_bind++;
5421 5421 goto bad;
5422 5422 }
5423 5423 }
5424 5424
5425 5425 tx_cb->tx_bytes = (uint32_t)total_len;
5426 5426 tx_cb->mp = mp;
5427 5427 tx_cb->tx_dma_handle_used = 0;
5428 5428
5429 5429 if (tx_mode == USE_DMA) {
5430 5430 msg_len = total_len;
5431 5431
5432 5432 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5433 5433 mac_iocb_ptr->tid = tx_ring->prod_idx;
5434 5434 mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5435 5435 mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5436 5436
5437 5437 tx_entry = &mac_iocb_ptr->oal_entry[0];
5438 5438 oal_entry = NULL;
5439 5439
5440 5440 for (tp = mp, oal_entries = j = 0; tp != NULL;
5441 5441 tp = tp->b_cont) {
5442 5442 /* if too many tx dma handles needed */
5443 5443 if (j >= QL_MAX_TX_DMA_HANDLES) {
5444 5444 tx_ring->tx_no_dma_handle++;
5445 5445 if (!force_pullup) {
5446 5446 force_pullup = 1;
5447 5447 goto do_pullup;
5448 5448 } else {
5449 5449 goto bad;
5450 5450 }
5451 5451 }
5452 5452 nbyte = (uint16_t)MBLKL(tp);
5453 5453 if (nbyte == 0)
5454 5454 continue;
5455 5455
5456 5456 status = ddi_dma_addr_bind_handle(
5457 5457 tx_cb->tx_dma_handle[j], NULL,
5458 5458 (caddr_t)tp->b_rptr, nbyte,
5459 5459 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
5460 5460 0, &dma_cookie, &ncookies);
5461 5461
5462 5462 QL_PRINT(DBG_TX, ("map sending data segment: %d, "
5463 5463 "length: %d, spans in %d cookies\n",
5464 5464 j, nbyte, ncookies));
5465 5465
5466 5466 if (status != DDI_DMA_MAPPED) {
5467 5467 goto bad;
5468 5468 }
5469 5469 /*
5470 5470 * Each fragment can span several cookies. One cookie
5471 5471 * will use one tx descriptor to transmit.
5472 5472 */
5473 5473 for (i = ncookies; i > 0; i--, tx_entry++,
5474 5474 oal_entries++) {
5475 5475 /*
5476 5476 * The number of TX descriptors that can be
5477 5477 * saved in tx iocb and oal list is limited
5478 5478 */
5479 5479 if (oal_entries > max_oal_entries) {
5480 5480 tx_ring->tx_no_dma_cookie++;
5481 5481 if (!force_pullup) {
5482 5482 force_pullup = 1;
5483 5483 goto do_pullup;
5484 5484 } else {
5485 5485 goto bad;
5486 5486 }
5487 5487 }
5488 5488
5489 5489 if ((oal_entries == TX_DESC_PER_IOCB) &&
5490 5490 !oal_entry) {
5491 5491 /*
5492 5492 * Time to switch to an oal list
5493 5493 * The last entry should be copied
5494 5494 * to first entry in the oal list
5495 5495 */
5496 5496 oal_entry = tx_cb->oal;
5497 5497 tx_entry =
5498 5498 &mac_iocb_ptr->oal_entry[
5499 5499 TX_DESC_PER_IOCB-1];
5500 5500 bcopy(tx_entry, oal_entry,
5501 5501 sizeof (*oal_entry));
5502 5502
5503 5503 /*
5504 5504 * last entry should be updated to
5505 5505 * point to the extended oal list itself
5506 5506 */
5507 5507 tx_entry->buf_addr_low =
5508 5508 cpu_to_le32(
5509 5509 LS_64BITS(tx_cb->oal_dma_addr));
5510 5510 tx_entry->buf_addr_high =
5511 5511 cpu_to_le32(
5512 5512 MS_64BITS(tx_cb->oal_dma_addr));
5513 5513 /*
5514 5514 * Point tx_entry to the oal list
5515 5515 * second entry
5516 5516 */
5517 5517 tx_entry = &oal_entry[1];
5518 5518 }
5519 5519
5520 5520 tx_entry->buf_len =
5521 5521 (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
5522 5522 phys_addr = dma_cookie.dmac_laddress;
5523 5523 tx_entry->buf_addr_low =
5524 5524 cpu_to_le32(LS_64BITS(phys_addr));
5525 5525 tx_entry->buf_addr_high =
5526 5526 cpu_to_le32(MS_64BITS(phys_addr));
5527 5527
5528 5528 last_oal_entry = tx_entry;
5529 5529
5530 5530 if (i > 1)
5531 5531 ddi_dma_nextcookie(
5532 5532 tx_cb->tx_dma_handle[j],
5533 5533 &dma_cookie);
5534 5534 }
5535 5535 j++;
5536 5536 }
5537 5537 /*
5538 5538 * if OAL is used, the last oal entry in tx iocb indicates
5539 5539 * number of additional address/len pairs in OAL
5540 5540 */
5541 5541 if (oal_entries > TX_DESC_PER_IOCB) {
5542 5542 tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
5543 5543 tx_entry->buf_len = (uint32_t)
5544 5544 (cpu_to_le32((sizeof (struct oal_entry) *
5545 5545 (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
5546 5546 }
5547 5547 last_oal_entry->buf_len = cpu_to_le32(
5548 5548 le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
5549 5549
5550 5550 tx_cb->tx_dma_handle_used = j;
5551 5551 QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
5552 5552 j, oal_entries));
5553 5553
5554 5554 bp = (caddr_t)mp->b_rptr;
5555 5555 }
5556 5556 if (tx_mode == USE_COPY) {
5557 5557 bp = tx_cb->copy_buffer;
5558 5558 off = 0;
5559 5559 nbyte = 0;
5560 5560 frags = 0;
5561 5561 /*
5562 5562 * Copy up to tx_buf_len of the transmit data
5563 5563 * from mp to tx buffer
5564 5564 */
5565 5565 for (tp = mp; tp != NULL; tp = tp->b_cont) {
5566 5566 nbyte = MBLKL(tp);
5567 5567 if ((off + nbyte) <= tx_buf_len) {
5568 5568 bcopy(tp->b_rptr, &bp[off], nbyte);
5569 5569 off += nbyte;
5570 5570 frags ++;
5571 5571 }
5572 5572 }
5573 5573
5574 5574 msg_len = off;
5575 5575
5576 5576 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5577 5577 mac_iocb_ptr->tid = tx_ring->prod_idx;
5578 5578 mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5579 5579 mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5580 5580
5581 5581 QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
5582 5582 "from %d segaments\n", msg_len, frags));
5583 5583
5584 5584 phys_addr = tx_cb->copy_buffer_dma_addr;
5585 5585 phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
5586 5586 phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
5587 5587
5588 5588 QL_DUMP(DBG_TX, "\t requested sending data:\n",
5589 5589 (uint8_t *)tx_cb->copy_buffer, 8, total_len);
5590 5590
5591 5591 mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
5592 5592 cpu_to_le32(msg_len | OAL_LAST_ENTRY);
5593 5593 mac_iocb_ptr->oal_entry[0].buf_addr_low = phy_addr_low;
5594 5594 mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
5595 5595
5596 5596 freemsg(mp); /* no need, we have copied */
5597 5597 tx_cb->mp = NULL;
5598 5598 } /* End of Copy Mode */
5599 5599
5600 5600 /* Do TSO/LSO on TCP packet? */
5601 5601 if (use_lso && mss) {
5602 5602 ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
5603 5603 } else if (pflags & qlge->chksum_cap) {
5604 5604 /* Do checksum offloading */
5605 5605 ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
5606 5606 }
5607 5607
5608 5608 /* let device know the latest outbound IOCB */
5609 5609 (void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
5610 5610 (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
5611 5611 (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
5612 5612
5613 5613 if (tx_mode == USE_DMA) {
5614 5614 /* let device know the latest outbound OAL if necessary */
5615 5615 if (oal_entries > TX_DESC_PER_IOCB) {
5616 5616 (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5617 5617 (off_t)0,
5618 5618 (sizeof (struct oal_entry) *
5619 5619 (oal_entries -TX_DESC_PER_IOCB+1)),
5620 5620 DDI_DMA_SYNC_FORDEV);
5621 5621 }
5622 5622 } else { /* for USE_COPY mode, tx buffer has changed */
5623 5623 /* let device know the latest change */
5624 5624 (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5625 5625 /* copy buf offset */
5626 5626 (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
5627 5627 msg_len, DDI_DMA_SYNC_FORDEV);
5628 5628 }
5629 5629
5630 5630 /* save how the packet was sent */
5631 5631 tx_cb->tx_type = tx_mode;
5632 5632
5633 5633 QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
5634 5634 /* reduce the number of available tx slot */
5635 5635 atomic_dec_32(&tx_ring->tx_free_count);
5636 5636
5637 5637 tx_ring->prod_idx++;
5638 5638 if (tx_ring->prod_idx >= tx_ring->wq_len)
5639 5639 tx_ring->prod_idx = 0;
5640 5640
5641 5641 now = ddi_get_lbolt();
5642 5642 qlge->last_tx_time = now;
5643 5643
5644 5644 return (DDI_SUCCESS);
5645 5645
5646 5646 bad:
5647 5647 /*
5648 5648 * if for any reason driver can not send, delete
5649 5649 * the message pointer, mp
5650 5650 */
5651 5651 now = ddi_get_lbolt();
5652 5652 freemsg(mp);
5653 5653 mp = NULL;
5654 5654 tx_cb->mp = NULL;
5655 5655 for (i = 0; i < j; i++)
5656 5656 (void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5657 5657
5658 5658 QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5659 5659 __func__, qlge->instance, (int)now));
5660 5660
5661 5661 return (DDI_SUCCESS);
5662 5662 }
5663 5663
5664 5664
5665 5665 /*
5666 5666 * Initializes hardware and driver software flags before the driver
5667 5667 * is finally ready to work.
5668 5668 */
5669 5669 int
5670 5670 ql_do_start(qlge_t *qlge)
5671 5671 {
5672 5672 int i;
5673 5673 struct rx_ring *rx_ring;
5674 5674 uint16_t lbq_buf_size;
5675 5675 int rings_done;
5676 5676
5677 5677 ASSERT(qlge != NULL);
5678 5678
5679 5679 mutex_enter(&qlge->hw_mutex);
5680 5680
5681 5681 /* Reset adapter */
5682 5682 (void) ql_asic_reset(qlge);
5683 5683
5684 5684 lbq_buf_size = (uint16_t)
5685 5685 ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
5686 5686 if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5687 5687 #ifdef QLGE_LOAD_UNLOAD
5688 5688 cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5689 5689 qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5690 5690 #endif
5691 5691 /*
5692 5692 * Check if any ring has buffers still with upper layers
5693 5693 * If buffers are pending with upper layers, we use the
5694 5694 * existing buffers and don't reallocate new ones
5695 5695 * Unfortunately there is no way to evict buffers from
5696 5696 * upper layers. Using buffers with the current size may
5697 5697 * cause slightly sub-optimal performance, but that seems
5698 5698 * to be the easiest way to handle this situation.
5699 5699 */
5700 5700 rings_done = 0;
5701 5701 for (i = 0; i < qlge->rx_ring_count; i++) {
5702 5702 rx_ring = &qlge->rx_ring[i];
5703 5703 if (rx_ring->rx_indicate == 0)
5704 5704 rings_done++;
5705 5705 else
5706 5706 break;
5707 5707 }
5708 5708 /*
5709 5709 * No buffers pending with upper layers;
5710 5710 * reallocte them for new MTU size
5711 5711 */
5712 5712 if (rings_done >= qlge->rx_ring_count) {
5713 5713 /* free large buffer pool */
5714 5714 for (i = 0; i < qlge->rx_ring_count; i++) {
5715 5715 rx_ring = &qlge->rx_ring[i];
5716 5716 if (rx_ring->type != TX_Q) {
5717 5717 ql_free_sbq_buffers(rx_ring);
5718 5718 ql_free_lbq_buffers(rx_ring);
5719 5719 }
5720 5720 }
5721 5721 /* reallocate large buffer pool */
5722 5722 for (i = 0; i < qlge->rx_ring_count; i++) {
5723 5723 rx_ring = &qlge->rx_ring[i];
5724 5724 if (rx_ring->type != TX_Q) {
5725 5725 (void) ql_alloc_sbufs(qlge, rx_ring);
5726 5726 (void) ql_alloc_lbufs(qlge, rx_ring);
5727 5727 }
5728 5728 }
5729 5729 }
5730 5730 }
5731 5731
5732 5732 if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5733 5733 cmn_err(CE_WARN, "qlge bringup adapter failed");
5734 5734 mutex_exit(&qlge->hw_mutex);
5735 5735 if (qlge->fm_enable) {
5736 5736 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
5737 5737 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
5738 5738 }
5739 5739 return (DDI_FAILURE);
5740 5740 }
5741 5741
5742 5742 mutex_exit(&qlge->hw_mutex);
5743 5743 /* if adapter is up successfully but was bad before */
5744 5744 if (qlge->flags & ADAPTER_ERROR) {
5745 5745 atomic_and_32(&qlge->flags, ~ADAPTER_ERROR);
5746 5746 if (qlge->fm_enable) {
5747 5747 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
5748 5748 }
5749 5749 }
5750 5750
5751 5751 /* Get current link state */
5752 5752 qlge->port_link_state = ql_get_link_state(qlge);
5753 5753
5754 5754 if (qlge->port_link_state == LS_UP) {
5755 5755 QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5756 5756 __func__, qlge->instance));
5757 5757 /* If driver detects a carrier on */
5758 5758 CARRIER_ON(qlge);
5759 5759 } else {
5760 5760 QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5761 5761 __func__, qlge->instance));
5762 5762 /* If driver detects a lack of carrier */
5763 5763 CARRIER_OFF(qlge);
5764 5764 }
5765 5765 qlge->mac_flags = QL_MAC_STARTED;
5766 5766 return (DDI_SUCCESS);
5767 5767 }
5768 5768
5769 5769 /*
5770 5770 * Stop currently running driver
5771 5771 * Driver needs to stop routing new packets to driver and wait until
5772 5772 * all pending tx/rx buffers to be free-ed.
5773 5773 */
5774 5774 int
5775 5775 ql_do_stop(qlge_t *qlge)
5776 5776 {
5777 5777 int rc = DDI_FAILURE;
5778 5778 uint32_t i, j, k;
5779 5779 struct bq_desc *sbq_desc, *lbq_desc;
5780 5780 struct rx_ring *rx_ring;
5781 5781
5782 5782 ASSERT(qlge != NULL);
5783 5783
5784 5784 CARRIER_OFF(qlge);
5785 5785
5786 5786 rc = ql_bringdown_adapter(qlge);
5787 5787 if (rc != DDI_SUCCESS) {
5788 5788 cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5789 5789 } else
5790 5790 rc = DDI_SUCCESS;
5791 5791
5792 5792 for (k = 0; k < qlge->rx_ring_count; k++) {
5793 5793 rx_ring = &qlge->rx_ring[k];
5794 5794 if (rx_ring->type != TX_Q) {
5795 5795 j = rx_ring->lbq_use_head;
5796 5796 #ifdef QLGE_LOAD_UNLOAD
5797 5797 cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5798 5798 " to free list %d\n total %d\n",
5799 5799 k, rx_ring->lbuf_in_use_count,
5800 5800 rx_ring->lbuf_free_count,
5801 5801 rx_ring->lbuf_in_use_count +
5802 5802 rx_ring->lbuf_free_count);
5803 5803 #endif
5804 5804 for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5805 5805 lbq_desc = rx_ring->lbuf_in_use[j];
5806 5806 j++;
5807 5807 if (j >= rx_ring->lbq_len) {
5808 5808 j = 0;
5809 5809 }
5810 5810 if (lbq_desc->mp) {
5811 5811 atomic_inc_32(&rx_ring->rx_indicate);
5812 5812 freemsg(lbq_desc->mp);
5813 5813 }
5814 5814 }
5815 5815 rx_ring->lbq_use_head = j;
5816 5816 rx_ring->lbq_use_tail = j;
5817 5817 rx_ring->lbuf_in_use_count = 0;
5818 5818 j = rx_ring->sbq_use_head;
5819 5819 #ifdef QLGE_LOAD_UNLOAD
5820 5820 cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5821 5821 " to free list %d\n total %d \n",
5822 5822 k, rx_ring->sbuf_in_use_count,
5823 5823 rx_ring->sbuf_free_count,
5824 5824 rx_ring->sbuf_in_use_count +
5825 5825 rx_ring->sbuf_free_count);
5826 5826 #endif
5827 5827 for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5828 5828 sbq_desc = rx_ring->sbuf_in_use[j];
5829 5829 j++;
5830 5830 if (j >= rx_ring->sbq_len) {
5831 5831 j = 0;
5832 5832 }
5833 5833 if (sbq_desc->mp) {
5834 5834 atomic_inc_32(&rx_ring->rx_indicate);
5835 5835 freemsg(sbq_desc->mp);
5836 5836 }
5837 5837 }
5838 5838 rx_ring->sbq_use_head = j;
5839 5839 rx_ring->sbq_use_tail = j;
5840 5840 rx_ring->sbuf_in_use_count = 0;
5841 5841 }
5842 5842 }
5843 5843
5844 5844 qlge->mac_flags = QL_MAC_STOPPED;
5845 5845
5846 5846 return (rc);
5847 5847 }
5848 5848
5849 5849 /*
5850 5850 * Support
5851 5851 */
5852 5852
5853 5853 void
5854 5854 ql_disable_isr(qlge_t *qlge)
5855 5855 {
5856 5856 /*
5857 5857 * disable the hardware interrupt
5858 5858 */
5859 5859 ISP_DISABLE_GLOBAL_INTRS(qlge);
5860 5860
5861 5861 qlge->flags &= ~INTERRUPTS_ENABLED;
5862 5862 }
5863 5863
5864 5864
5865 5865
5866 5866 /*
5867 5867 * busy wait for 'usecs' microseconds.
5868 5868 */
5869 5869 void
5870 5870 qlge_delay(clock_t usecs)
5871 5871 {
5872 5872 drv_usecwait(usecs);
5873 5873 }
5874 5874
5875 5875 /*
5876 5876 * retrieve firmware details.
5877 5877 */
5878 5878
5879 5879 pci_cfg_t *
5880 5880 ql_get_pci_config(qlge_t *qlge)
5881 5881 {
5882 5882 return (&(qlge->pci_cfg));
5883 5883 }
5884 5884
5885 5885 /*
5886 5886 * Get current Link status
5887 5887 */
5888 5888 static uint32_t
5889 5889 ql_get_link_state(qlge_t *qlge)
5890 5890 {
5891 5891 uint32_t bitToCheck = 0;
5892 5892 uint32_t temp, linkState;
5893 5893
5894 5894 if (qlge->func_number == qlge->fn0_net) {
5895 5895 bitToCheck = STS_PL0;
5896 5896 } else {
5897 5897 bitToCheck = STS_PL1;
5898 5898 }
5899 5899 temp = ql_read_reg(qlge, REG_STATUS);
5900 5900 QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5901 5901 __func__, qlge->instance, temp));
5902 5902
5903 5903 if (temp & bitToCheck) {
5904 5904 linkState = LS_UP;
5905 5905 } else {
5906 5906 linkState = LS_DOWN;
5907 5907 }
5908 5908 if (CFG_IST(qlge, CFG_CHIP_8100)) {
5909 5909 /* for Schultz, link Speed is fixed to 10G, full duplex */
5910 5910 qlge->speed = SPEED_10G;
5911 5911 qlge->duplex = 1;
5912 5912 }
5913 5913 return (linkState);
5914 5914 }
5915 5915 /*
5916 5916 * Get current link status and report to OS
5917 5917 */
5918 5918 static void
5919 5919 ql_get_and_report_link_state(qlge_t *qlge)
5920 5920 {
5921 5921 uint32_t cur_link_state;
5922 5922
5923 5923 /* Get current link state */
5924 5924 cur_link_state = ql_get_link_state(qlge);
5925 5925 /* if link state has changed */
5926 5926 if (cur_link_state != qlge->port_link_state) {
5927 5927
5928 5928 qlge->port_link_state = cur_link_state;
5929 5929
5930 5930 if (qlge->port_link_state == LS_UP) {
5931 5931 QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5932 5932 __func__, qlge->instance));
5933 5933 /* If driver detects a carrier on */
5934 5934 CARRIER_ON(qlge);
5935 5935 } else {
5936 5936 QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5937 5937 __func__, qlge->instance));
5938 5938 /* If driver detects a lack of carrier */
5939 5939 CARRIER_OFF(qlge);
5940 5940 }
5941 5941 }
5942 5942 }
5943 5943
5944 5944 /*
5945 5945 * timer callback function executed after timer expires
5946 5946 */
5947 5947 static void
5948 5948 ql_timer(void* arg)
5949 5949 {
5950 5950 ql_get_and_report_link_state((qlge_t *)arg);
5951 5951 }
5952 5952
5953 5953 /*
5954 5954 * stop the running timer if activated
5955 5955 */
5956 5956 static void
5957 5957 ql_stop_timer(qlge_t *qlge)
5958 5958 {
5959 5959 timeout_id_t timer_id;
5960 5960 /* Disable driver timer */
5961 5961 if (qlge->ql_timer_timeout_id != NULL) {
5962 5962 timer_id = qlge->ql_timer_timeout_id;
5963 5963 qlge->ql_timer_timeout_id = NULL;
5964 5964 (void) untimeout(timer_id);
5965 5965 }
5966 5966 }
5967 5967
5968 5968 /*
5969 5969 * stop then restart timer
5970 5970 */
5971 5971 void
5972 5972 ql_restart_timer(qlge_t *qlge)
5973 5973 {
5974 5974 ql_stop_timer(qlge);
5975 5975 qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5976 5976 qlge->ql_timer_timeout_id = timeout(ql_timer,
5977 5977 (void *)qlge, qlge->ql_timer_ticks);
5978 5978 }
5979 5979
5980 5980 /* ************************************************************************* */
5981 5981 /*
5982 5982 * Hardware K-Stats Data Structures and Subroutines
5983 5983 */
5984 5984 /* ************************************************************************* */
5985 5985 static const ql_ksindex_t ql_kstats_hw[] = {
5986 5986 /* PCI related hardware information */
5987 5987 { 0, "Vendor Id" },
5988 5988 { 1, "Device Id" },
5989 5989 { 2, "Command" },
5990 5990 { 3, "Status" },
5991 5991 { 4, "Revision Id" },
5992 5992 { 5, "Cache Line Size" },
5993 5993 { 6, "Latency Timer" },
5994 5994 { 7, "Header Type" },
5995 5995 { 9, "I/O base addr" },
5996 5996 { 10, "Control Reg Base addr low" },
5997 5997 { 11, "Control Reg Base addr high" },
5998 5998 { 12, "Doorbell Reg Base addr low" },
5999 5999 { 13, "Doorbell Reg Base addr high" },
6000 6000 { 14, "Subsystem Vendor Id" },
6001 6001 { 15, "Subsystem Device ID" },
6002 6002 { 16, "PCIe Device Control" },
6003 6003 { 17, "PCIe Link Status" },
6004 6004
6005 6005 { -1, NULL },
6006 6006 };
6007 6007
6008 6008 /*
6009 6009 * kstat update function for PCI registers
6010 6010 */
6011 6011 static int
6012 6012 ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
6013 6013 {
6014 6014 qlge_t *qlge;
6015 6015 kstat_named_t *knp;
6016 6016
6017 6017 if (flag != KSTAT_READ)
6018 6018 return (EACCES);
6019 6019
6020 6020 qlge = ksp->ks_private;
6021 6021 knp = ksp->ks_data;
6022 6022 (knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
6023 6023 (knp++)->value.ui32 = qlge->pci_cfg.device_id;
6024 6024 (knp++)->value.ui32 = qlge->pci_cfg.command;
6025 6025 (knp++)->value.ui32 = qlge->pci_cfg.status;
6026 6026 (knp++)->value.ui32 = qlge->pci_cfg.revision;
6027 6027 (knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
6028 6028 (knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
6029 6029 (knp++)->value.ui32 = qlge->pci_cfg.header_type;
6030 6030 (knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
6031 6031 (knp++)->value.ui32 =
6032 6032 qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
6033 6033 (knp++)->value.ui32 =
6034 6034 qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
6035 6035 (knp++)->value.ui32 =
6036 6036 qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
6037 6037 (knp++)->value.ui32 =
6038 6038 qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
6039 6039 (knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
6040 6040 (knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
6041 6041 (knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
6042 6042 (knp++)->value.ui32 = qlge->pci_cfg.link_status;
6043 6043
6044 6044 return (0);
6045 6045 }
6046 6046
6047 6047 static const ql_ksindex_t ql_kstats_mii[] = {
6048 6048 /* MAC/MII related hardware information */
6049 6049 { 0, "mtu"},
6050 6050
6051 6051 { -1, NULL},
6052 6052 };
6053 6053
6054 6054
6055 6055 /*
6056 6056 * kstat update function for MII related information.
6057 6057 */
6058 6058 static int
6059 6059 ql_kstats_mii_update(kstat_t *ksp, int flag)
6060 6060 {
6061 6061 qlge_t *qlge;
6062 6062 kstat_named_t *knp;
6063 6063
6064 6064 if (flag != KSTAT_READ)
6065 6065 return (EACCES);
6066 6066
6067 6067 qlge = ksp->ks_private;
6068 6068 knp = ksp->ks_data;
6069 6069
6070 6070 (knp++)->value.ui32 = qlge->mtu;
6071 6071
6072 6072 return (0);
6073 6073 }
6074 6074
6075 6075 static const ql_ksindex_t ql_kstats_reg[] = {
6076 6076 /* Register information */
6077 6077 { 0, "System (0x08)" },
6078 6078 { 1, "Reset/Fail Over(0x0Ch" },
6079 6079 { 2, "Function Specific Control(0x10)" },
6080 6080 { 3, "Status (0x30)" },
6081 6081 { 4, "Intr Enable (0x34)" },
6082 6082 { 5, "Intr Status1 (0x3C)" },
6083 6083 { 6, "Error Status (0x54)" },
6084 6084 { 7, "XGMAC Flow Control(0x11C)" },
6085 6085 { 8, "XGMAC Tx Pause Frames(0x230)" },
6086 6086 { 9, "XGMAC Rx Pause Frames(0x388)" },
6087 6087 { 10, "XGMAC Rx FIFO Drop Count(0x5B8)" },
6088 6088 { 11, "interrupts actually allocated" },
6089 6089 { 12, "interrupts on rx ring 0" },
6090 6090 { 13, "interrupts on rx ring 1" },
6091 6091 { 14, "interrupts on rx ring 2" },
6092 6092 { 15, "interrupts on rx ring 3" },
6093 6093 { 16, "interrupts on rx ring 4" },
6094 6094 { 17, "interrupts on rx ring 5" },
6095 6095 { 18, "interrupts on rx ring 6" },
6096 6096 { 19, "interrupts on rx ring 7" },
6097 6097 { 20, "polls on rx ring 0" },
6098 6098 { 21, "polls on rx ring 1" },
6099 6099 { 22, "polls on rx ring 2" },
6100 6100 { 23, "polls on rx ring 3" },
6101 6101 { 24, "polls on rx ring 4" },
6102 6102 { 25, "polls on rx ring 5" },
6103 6103 { 26, "polls on rx ring 6" },
6104 6104 { 27, "polls on rx ring 7" },
6105 6105 { 28, "tx no resource on ring 0" },
6106 6106 { 29, "tx dma bind fail on ring 0" },
6107 6107 { 30, "tx dma no handle on ring 0" },
6108 6108 { 31, "tx dma no cookie on ring 0" },
6109 6109 { 32, "MPI firmware major version" },
6110 6110 { 33, "MPI firmware minor version" },
6111 6111 { 34, "MPI firmware sub version" },
6112 6112 { 35, "rx no resource" },
6113 6113
6114 6114 { -1, NULL},
6115 6115 };
6116 6116
6117 6117
6118 6118 /*
6119 6119 * kstat update function for device register set
6120 6120 */
6121 6121 static int
6122 6122 ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
6123 6123 {
6124 6124 qlge_t *qlge;
6125 6125 kstat_named_t *knp;
6126 6126 uint32_t val32;
6127 6127 int i = 0;
6128 6128 struct tx_ring *tx_ring;
6129 6129 struct rx_ring *rx_ring;
6130 6130
6131 6131 if (flag != KSTAT_READ)
6132 6132 return (EACCES);
6133 6133
6134 6134 qlge = ksp->ks_private;
6135 6135 knp = ksp->ks_data;
6136 6136
6137 6137 (knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
6138 6138 (knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
6139 6139 (knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
6140 6140 (knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
6141 6141 (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
6142 6142 (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
6143 6143 (knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
6144 6144
6145 6145 if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
6146 6146 return (0);
6147 6147 }
6148 6148 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
6149 6149 (knp++)->value.ui32 = val32;
6150 6150
6151 6151 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
6152 6152 (knp++)->value.ui32 = val32;
6153 6153
6154 6154 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
6155 6155 (knp++)->value.ui32 = val32;
6156 6156
6157 6157 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
6158 6158 (knp++)->value.ui32 = val32;
6159 6159
6160 6160 ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
6161 6161
6162 6162 (knp++)->value.ui32 = qlge->intr_cnt;
6163 6163
6164 6164 for (i = 0; i < 8; i++) {
6165 6165 (knp++)->value.ui32 = qlge->rx_interrupts[i];
6166 6166 }
6167 6167
6168 6168 for (i = 0; i < 8; i++) {
6169 6169 (knp++)->value.ui32 = qlge->rx_polls[i];
6170 6170 }
6171 6171
6172 6172 tx_ring = &qlge->tx_ring[0];
6173 6173 (knp++)->value.ui32 = tx_ring->defer;
6174 6174 (knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
6175 6175 (knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
6176 6176 (knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
6177 6177
6178 6178 (knp++)->value.ui32 = qlge->fw_version_info.major_version;
6179 6179 (knp++)->value.ui32 = qlge->fw_version_info.minor_version;
6180 6180 (knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
6181 6181
6182 6182 for (i = 0; i < qlge->rx_ring_count; i++) {
6183 6183 rx_ring = &qlge->rx_ring[i];
6184 6184 val32 += rx_ring->rx_packets_dropped_no_buffer;
6185 6185 }
6186 6186 (knp++)->value.ui32 = val32;
6187 6187
6188 6188 return (0);
6189 6189 }
6190 6190
6191 6191
6192 6192 static kstat_t *
6193 6193 ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
6194 6194 const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
6195 6195 {
6196 6196 kstat_t *ksp;
6197 6197 kstat_named_t *knp;
6198 6198 char *np;
6199 6199 int type;
6200 6200
6201 6201 size /= sizeof (ql_ksindex_t);
6202 6202 ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
6203 6203 KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
6204 6204 if (ksp == NULL)
6205 6205 return (NULL);
6206 6206
6207 6207 ksp->ks_private = qlge;
6208 6208 ksp->ks_update = update;
6209 6209 for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
6210 6210 switch (*np) {
6211 6211 default:
6212 6212 type = KSTAT_DATA_UINT32;
6213 6213 break;
6214 6214 case '&':
6215 6215 np += 1;
6216 6216 type = KSTAT_DATA_CHAR;
6217 6217 break;
6218 6218 }
6219 6219 kstat_named_init(knp, np, (uint8_t)type);
6220 6220 }
6221 6221 kstat_install(ksp);
6222 6222
6223 6223 return (ksp);
6224 6224 }
6225 6225
6226 6226 /*
6227 6227 * Setup various kstat
6228 6228 */
6229 6229 int
6230 6230 ql_init_kstats(qlge_t *qlge)
6231 6231 {
6232 6232 /* Hardware KStats */
6233 6233 qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
6234 6234 qlge->instance, "chip", ql_kstats_hw,
6235 6235 sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
6236 6236 if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
6237 6237 return (DDI_FAILURE);
6238 6238 }
6239 6239
6240 6240 /* MII KStats */
6241 6241 qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
6242 6242 qlge->instance, "mii", ql_kstats_mii,
6243 6243 sizeof (ql_kstats_mii), ql_kstats_mii_update);
6244 6244 if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
6245 6245 return (DDI_FAILURE);
6246 6246 }
6247 6247
6248 6248 /* REG KStats */
6249 6249 qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
6250 6250 qlge->instance, "reg", ql_kstats_reg,
6251 6251 sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
6252 6252 if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
6253 6253 return (DDI_FAILURE);
6254 6254 }
6255 6255 return (DDI_SUCCESS);
6256 6256 }
6257 6257
6258 6258 /*
6259 6259 * delete all kstat
6260 6260 */
6261 6261 void
6262 6262 ql_fini_kstats(qlge_t *qlge)
6263 6263 {
6264 6264 int i;
6265 6265
6266 6266 for (i = 0; i < QL_KSTAT_COUNT; i++) {
6267 6267 if (qlge->ql_kstats[i] != NULL)
6268 6268 kstat_delete(qlge->ql_kstats[i]);
6269 6269 }
6270 6270 }
6271 6271
6272 6272 /* ************************************************************************* */
6273 6273 /*
6274 6274 * kstat end
6275 6275 */
6276 6276 /* ************************************************************************* */
6277 6277
6278 6278 /*
6279 6279 * Setup the parameters for receive and transmit rings including buffer sizes
6280 6280 * and completion queue sizes
6281 6281 */
6282 6282 static int
6283 6283 ql_setup_rings(qlge_t *qlge)
6284 6284 {
6285 6285 uint8_t i;
6286 6286 struct rx_ring *rx_ring;
6287 6287 struct tx_ring *tx_ring;
6288 6288 uint16_t lbq_buf_size;
6289 6289
6290 6290 lbq_buf_size = (uint16_t)
6291 6291 ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
6292 6292
6293 6293 /*
6294 6294 * rx_ring[0] is always the default queue.
6295 6295 */
6296 6296 /*
6297 6297 * qlge->rx_ring_count:
6298 6298 * Total number of rx_rings. This includes a number
6299 6299 * of outbound completion handler rx_rings, and a
6300 6300 * number of inbound completion handler rx_rings.
6301 6301 * rss is only enabled if we have more than 1 rx completion
6302 6302 * queue. If we have a single rx completion queue
6303 6303 * then all rx completions go to this queue and
6304 6304 * the last completion queue
6305 6305 */
6306 6306
6307 6307 qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
6308 6308
6309 6309 for (i = 0; i < qlge->tx_ring_count; i++) {
6310 6310 tx_ring = &qlge->tx_ring[i];
6311 6311 bzero((void *)tx_ring, sizeof (*tx_ring));
6312 6312 tx_ring->qlge = qlge;
6313 6313 tx_ring->wq_id = i;
6314 6314 tx_ring->wq_len = qlge->tx_ring_size;
6315 6315 tx_ring->wq_size = (uint32_t)(
6316 6316 tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
6317 6317
6318 6318 /*
6319 6319 * The completion queue ID for the tx rings start
6320 6320 * immediately after the last rss completion queue.
6321 6321 */
6322 6322 tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
6323 6323 }
6324 6324
6325 6325 for (i = 0; i < qlge->rx_ring_count; i++) {
6326 6326 rx_ring = &qlge->rx_ring[i];
6327 6327 bzero((void *)rx_ring, sizeof (*rx_ring));
6328 6328 rx_ring->qlge = qlge;
6329 6329 rx_ring->cq_id = i;
6330 6330 if (i != 0)
6331 6331 rx_ring->cpu = (i) % qlge->rx_ring_count;
6332 6332 else
6333 6333 rx_ring->cpu = 0;
6334 6334
6335 6335 if (i < qlge->rss_ring_count) {
6336 6336 /*
6337 6337 * Inbound completions (RSS) queues
6338 6338 * Default queue is queue 0 which handles
6339 6339 * unicast plus bcast/mcast and async events.
6340 6340 * Other inbound queues handle unicast frames only.
6341 6341 */
6342 6342 rx_ring->cq_len = qlge->rx_ring_size;
6343 6343 rx_ring->cq_size = (uint32_t)
6344 6344 (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6345 6345 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
6346 6346 rx_ring->lbq_size = (uint32_t)
6347 6347 (rx_ring->lbq_len * sizeof (uint64_t));
6348 6348 rx_ring->lbq_buf_size = lbq_buf_size;
6349 6349 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
6350 6350 rx_ring->sbq_size = (uint32_t)
6351 6351 (rx_ring->sbq_len * sizeof (uint64_t));
6352 6352 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
6353 6353 rx_ring->type = RX_Q;
6354 6354
6355 6355 QL_PRINT(DBG_GLD,
6356 6356 ("%s(%d)Allocating rss completion queue %d "
6357 6357 "on cpu %d\n", __func__, qlge->instance,
6358 6358 rx_ring->cq_id, rx_ring->cpu));
6359 6359 } else {
6360 6360 /*
6361 6361 * Outbound queue handles outbound completions only
6362 6362 */
6363 6363 /* outbound cq is same size as tx_ring it services. */
6364 6364 QL_PRINT(DBG_INIT, ("rx_ring 0x%p i %d\n", rx_ring, i));
6365 6365 rx_ring->cq_len = qlge->tx_ring_size;
6366 6366 rx_ring->cq_size = (uint32_t)
6367 6367 (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6368 6368 rx_ring->lbq_len = 0;
6369 6369 rx_ring->lbq_size = 0;
6370 6370 rx_ring->lbq_buf_size = 0;
6371 6371 rx_ring->sbq_len = 0;
6372 6372 rx_ring->sbq_size = 0;
6373 6373 rx_ring->sbq_buf_size = 0;
6374 6374 rx_ring->type = TX_Q;
6375 6375
6376 6376 QL_PRINT(DBG_GLD,
6377 6377 ("%s(%d)Allocating TX completion queue %d on"
6378 6378 " cpu %d\n", __func__, qlge->instance,
6379 6379 rx_ring->cq_id, rx_ring->cpu));
6380 6380 }
6381 6381 }
6382 6382
6383 6383 return (DDI_SUCCESS);
6384 6384 }
6385 6385
6386 6386 static int
6387 6387 ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
6388 6388 {
6389 6389 struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
6390 6390 void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6391 6391 (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6392 6392 /* first shadow area is used by wqicb's host copy of consumer index */
6393 6393 + sizeof (uint64_t);
6394 6394 uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6395 6395 (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6396 6396 + sizeof (uint64_t);
6397 6397 /* lrg/sml bufq pointers */
6398 6398 uint8_t *buf_q_base_reg =
6399 6399 (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
6400 6400 (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6401 6401 uint64_t buf_q_base_reg_dma =
6402 6402 qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
6403 6403 (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6404 6404 caddr_t doorbell_area =
6405 6405 qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
6406 6406 int err = 0;
6407 6407 uint16_t bq_len;
6408 6408 uint64_t tmp;
6409 6409 uint64_t *base_indirect_ptr;
6410 6410 int page_entries;
6411 6411
6412 6412 /* Set up the shadow registers for this ring. */
6413 6413 rx_ring->prod_idx_sh_reg = shadow_reg;
6414 6414 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
6415 6415 rx_ring->prod_idx_sh_reg_offset = (off_t)(((rx_ring->cq_id *
6416 6416 sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) + sizeof (uint64_t)));
6417 6417
6418 6418 rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6419 6419 rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
6420 6420
6421 6421 QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
6422 6422 " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
6423 6423 rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
6424 6424
6425 6425 buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6426 6426 buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6427 6427 rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6428 6428 rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
6429 6429
6430 6430 /* PCI doorbell mem area + 0x00 for consumer index register */
6431 6431 rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6432 6432 rx_ring->cnsmr_idx = 0;
6433 6433 *rx_ring->prod_idx_sh_reg = 0;
6434 6434 rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
6435 6435
6436 6436 /* PCI doorbell mem area + 0x04 for valid register */
6437 6437 rx_ring->valid_db_reg = (uint32_t *)(void *)
6438 6438 ((uint8_t *)(void *)doorbell_area + 0x04);
6439 6439
6440 6440 /* PCI doorbell mem area + 0x18 for large buffer consumer */
6441 6441 rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
6442 6442 ((uint8_t *)(void *)doorbell_area + 0x18);
6443 6443
6444 6444 /* PCI doorbell mem area + 0x1c */
6445 6445 rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
6446 6446 ((uint8_t *)(void *)doorbell_area + 0x1c);
6447 6447
6448 6448 bzero((void *)cqicb, sizeof (*cqicb));
6449 6449
6450 6450 cqicb->msix_vect = (uint8_t)rx_ring->irq;
6451 6451
6452 6452 bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
6453 6453 (uint16_t)0 : (uint16_t)rx_ring->cq_len);
6454 6454 cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
6455 6455
6456 6456 cqicb->cq_base_addr_lo =
6457 6457 cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
6458 6458 cqicb->cq_base_addr_hi =
6459 6459 cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
6460 6460
6461 6461 cqicb->prod_idx_addr_lo =
6462 6462 cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6463 6463 cqicb->prod_idx_addr_hi =
6464 6464 cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6465 6465
6466 6466 /*
6467 6467 * Set up the control block load flags.
6468 6468 */
6469 6469 cqicb->flags = FLAGS_LC | /* Load queue base address */
6470 6470 FLAGS_LV | /* Load MSI-X vector */
6471 6471 FLAGS_LI; /* Load irq delay values */
6472 6472 if (rx_ring->lbq_len) {
6473 6473 /* Load lbq values */
6474 6474 cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
6475 6475 tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
6476 6476 base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
6477 6477 page_entries = 0;
6478 6478 do {
6479 6479 *base_indirect_ptr = cpu_to_le64(tmp);
6480 6480 tmp += VM_PAGE_SIZE;
6481 6481 base_indirect_ptr++;
6482 6482 page_entries++;
6483 6483 } while (page_entries < (int)(
6484 6484 ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6485 6485
6486 6486 cqicb->lbq_addr_lo =
6487 6487 cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
6488 6488 cqicb->lbq_addr_hi =
6489 6489 cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
6490 6490 bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
6491 6491 (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
6492 6492 cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
6493 6493 bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
6494 6494 (uint16_t)rx_ring->lbq_len);
6495 6495 cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
6496 6496 rx_ring->lbq_prod_idx = 0;
6497 6497 rx_ring->lbq_curr_idx = 0;
6498 6498 }
6499 6499 if (rx_ring->sbq_len) {
6500 6500 /* Load sbq values */
6501 6501 cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
6502 6502 tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
6503 6503 base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
6504 6504 page_entries = 0;
6505 6505
6506 6506 do {
6507 6507 *base_indirect_ptr = cpu_to_le64(tmp);
6508 6508 tmp += VM_PAGE_SIZE;
6509 6509 base_indirect_ptr++;
6510 6510 page_entries++;
6511 6511 } while (page_entries < (uint32_t)
6512 6512 (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6513 6513
6514 6514 cqicb->sbq_addr_lo =
6515 6515 cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
6516 6516 cqicb->sbq_addr_hi =
6517 6517 cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
6518 6518 cqicb->sbq_buf_size = (uint16_t)
6519 6519 cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
6520 6520 bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
6521 6521 (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
6522 6522 cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
6523 6523 rx_ring->sbq_prod_idx = 0;
6524 6524 rx_ring->sbq_curr_idx = 0;
6525 6525 }
6526 6526 switch (rx_ring->type) {
6527 6527 case TX_Q:
6528 6528 cqicb->irq_delay = (uint16_t)
6529 6529 cpu_to_le16(qlge->tx_coalesce_usecs);
6530 6530 cqicb->pkt_delay = (uint16_t)
6531 6531 cpu_to_le16(qlge->tx_max_coalesced_frames);
6532 6532 break;
6533 6533
6534 6534 case DEFAULT_Q:
6535 6535 cqicb->irq_delay = (uint16_t)
6536 6536 cpu_to_le16(qlge->rx_coalesce_usecs);
6537 6537 cqicb->pkt_delay = (uint16_t)
6538 6538 cpu_to_le16(qlge->rx_max_coalesced_frames);
6539 6539 break;
6540 6540
6541 6541 case RX_Q:
6542 6542 /*
6543 6543 * Inbound completion handling rx_rings run in
6544 6544 * separate NAPI contexts.
6545 6545 */
6546 6546 cqicb->irq_delay = (uint16_t)
6547 6547 cpu_to_le16(qlge->rx_coalesce_usecs);
6548 6548 cqicb->pkt_delay = (uint16_t)
6549 6549 cpu_to_le16(qlge->rx_max_coalesced_frames);
6550 6550 break;
6551 6551 default:
6552 6552 cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
6553 6553 rx_ring->type);
6554 6554 }
6555 6555 QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
6556 6556 rx_ring->cq_id));
6557 6557 /* QL_DUMP_CQICB(qlge, cqicb); */
6558 6558 err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
6559 6559 rx_ring->cq_id);
6560 6560 if (err) {
6561 6561 cmn_err(CE_WARN, "Failed to load CQICB.");
6562 6562 return (err);
6563 6563 }
6564 6564
6565 6565 rx_ring->rx_packets_dropped_no_buffer = 0;
6566 6566 rx_ring->rx_pkt_dropped_mac_unenabled = 0;
6567 6567 rx_ring->rx_failed_sbq_allocs = 0;
6568 6568 rx_ring->rx_failed_lbq_allocs = 0;
6569 6569 rx_ring->rx_packets = 0;
6570 6570 rx_ring->rx_bytes = 0;
6571 6571 rx_ring->frame_too_long = 0;
6572 6572 rx_ring->frame_too_short = 0;
6573 6573 rx_ring->fcs_err = 0;
6574 6574
6575 6575 return (err);
6576 6576 }
6577 6577
6578 6578 /*
6579 6579 * start RSS
6580 6580 */
6581 6581 static int
6582 6582 ql_start_rss(qlge_t *qlge)
6583 6583 {
6584 6584 struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
6585 6585 int status = 0;
6586 6586 int i;
6587 6587 uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
6588 6588
6589 6589 bzero((void *)ricb, sizeof (*ricb));
6590 6590
6591 6591 ricb->base_cq = RSS_L4K;
6592 6592 ricb->flags =
6593 6593 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
6594 6594 RSS_RT6);
6595 6595 ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
6596 6596
6597 6597 /*
6598 6598 * Fill out the Indirection Table.
6599 6599 */
6600 6600 for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
6601 6601 hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
6602 6602
6603 6603 (void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
6604 6604 (void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
6605 6605
6606 6606 QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
6607 6607
6608 6608 status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
6609 6609 if (status) {
6610 6610 cmn_err(CE_WARN, "Failed to load RICB.");
6611 6611 return (status);
6612 6612 }
6613 6613
6614 6614 return (status);
6615 6615 }
6616 6616
6617 6617 /*
6618 6618 * load a tx ring control block to hw and start this ring
6619 6619 */
6620 6620 static int
6621 6621 ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
6622 6622 {
6623 6623 struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
6624 6624 caddr_t doorbell_area =
6625 6625 qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
6626 6626 void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6627 6627 (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6628 6628 uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6629 6629 (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6630 6630 int err = 0;
6631 6631
6632 6632 /*
6633 6633 * Assign doorbell registers for this tx_ring.
6634 6634 */
6635 6635
6636 6636 /* TX PCI doorbell mem area for tx producer index */
6637 6637 tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6638 6638 tx_ring->prod_idx = 0;
6639 6639 /* TX PCI doorbell mem area + 0x04 */
6640 6640 tx_ring->valid_db_reg = (uint32_t *)(void *)
6641 6641 ((uint8_t *)(void *)doorbell_area + 0x04);
6642 6642
6643 6643 /*
6644 6644 * Assign shadow registers for this tx_ring.
6645 6645 */
6646 6646 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
6647 6647 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
6648 6648 *tx_ring->cnsmr_idx_sh_reg = 0;
6649 6649
6650 6650 QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
6651 6651 " phys_addr 0x%lx\n",
6652 6652 __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
6653 6653 tx_ring->cnsmr_idx_sh_reg_dma));
6654 6654
6655 6655 wqicb->len =
6656 6656 (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
6657 6657 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
6658 6658 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
6659 6659 wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
6660 6660 wqicb->rid = 0;
6661 6661 wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
6662 6662 wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
6663 6663 wqicb->cnsmr_idx_addr_lo =
6664 6664 cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6665 6665 wqicb->cnsmr_idx_addr_hi =
6666 6666 cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6667 6667
6668 6668 ql_init_tx_ring(tx_ring);
6669 6669 /* QL_DUMP_WQICB(qlge, wqicb); */
6670 6670 err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6671 6671 tx_ring->wq_id);
6672 6672
6673 6673 if (err) {
6674 6674 cmn_err(CE_WARN, "Failed to load WQICB.");
6675 6675 return (err);
6676 6676 }
6677 6677 return (err);
6678 6678 }
6679 6679
6680 6680 /*
6681 6681 * Set up a MAC, multicast or VLAN address for the
6682 6682 * inbound frame matching.
6683 6683 */
6684 6684 int
6685 6685 ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6686 6686 uint16_t index)
6687 6687 {
6688 6688 uint32_t offset = 0;
6689 6689 int status = DDI_SUCCESS;
6690 6690
6691 6691 switch (type) {
6692 6692 case MAC_ADDR_TYPE_MULTI_MAC:
6693 6693 case MAC_ADDR_TYPE_CAM_MAC: {
6694 6694 uint32_t cam_output;
6695 6695 uint32_t upper = (addr[0] << 8) | addr[1];
6696 6696 uint32_t lower =
6697 6697 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6698 6698 (addr[5]);
6699 6699
6700 6700 QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6701 6701 MAC_ADDR_TYPE_MULTI_MAC) ?
6702 6702 "MULTICAST" : "UNICAST"));
6703 6703 QL_PRINT(DBG_INIT,
6704 6704 ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6705 6705 "the CAM.\n",
6706 6706 addr[0], addr[1], addr[2], addr[3], addr[4],
6707 6707 addr[5], index));
6708 6708
6709 6709 status = ql_wait_reg_rdy(qlge,
6710 6710 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6711 6711 if (status)
6712 6712 goto exit;
6713 6713 /* offset 0 - lower 32 bits of the MAC address */
6714 6714 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6715 6715 (offset++) |
6716 6716 (index << MAC_ADDR_IDX_SHIFT) | /* index */
6717 6717 type); /* type */
6718 6718 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6719 6719 status = ql_wait_reg_rdy(qlge,
6720 6720 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6721 6721 if (status)
6722 6722 goto exit;
6723 6723 /* offset 1 - upper 16 bits of the MAC address */
6724 6724 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6725 6725 (offset++) |
6726 6726 (index << MAC_ADDR_IDX_SHIFT) | /* index */
6727 6727 type); /* type */
6728 6728 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6729 6729 status = ql_wait_reg_rdy(qlge,
6730 6730 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6731 6731 if (status)
6732 6732 goto exit;
6733 6733 /* offset 2 - CQ ID associated with this MAC address */
6734 6734 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6735 6735 (offset) | (index << MAC_ADDR_IDX_SHIFT) | /* index */
6736 6736 type); /* type */
6737 6737 /*
6738 6738 * This field should also include the queue id
6739 6739 * and possibly the function id. Right now we hardcode
6740 6740 * the route field to NIC core.
6741 6741 */
6742 6742 if (type == MAC_ADDR_TYPE_CAM_MAC) {
6743 6743 cam_output = (CAM_OUT_ROUTE_NIC |
6744 6744 (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6745 6745 (0 <<
6746 6746 CAM_OUT_CQ_ID_SHIFT));
6747 6747
6748 6748 /* route to NIC core */
6749 6749 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6750 6750 cam_output);
6751 6751 }
6752 6752 break;
6753 6753 }
6754 6754 default:
6755 6755 cmn_err(CE_WARN,
6756 6756 "Address type %d not yet supported.", type);
6757 6757 status = DDI_FAILURE;
6758 6758 }
6759 6759 exit:
6760 6760 return (status);
6761 6761 }
6762 6762
6763 6763 /*
6764 6764 * The NIC function for this chip has 16 routing indexes. Each one can be used
6765 6765 * to route different frame types to various inbound queues. We send broadcast
6766 6766 * multicast/error frames to the default queue for slow handling,
6767 6767 * and CAM hit/RSS frames to the fast handling queues.
6768 6768 */
6769 6769 static int
6770 6770 ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6771 6771 {
6772 6772 int status;
6773 6773 uint32_t value = 0;
6774 6774
6775 6775 QL_PRINT(DBG_INIT,
6776 6776 ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6777 6777 (enable ? "Adding" : "Removing"),
6778 6778 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6779 6779 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6780 6780 ((index ==
6781 6781 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6782 6782 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6783 6783 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6784 6784 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6785 6785 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6786 6786 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6787 6787 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6788 6788 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6789 6789 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6790 6790 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6791 6791 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6792 6792 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6793 6793 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6794 6794 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6795 6795 (enable ? "to" : "from")));
6796 6796
6797 6797 switch (mask) {
6798 6798 case RT_IDX_CAM_HIT:
6799 6799 value = RT_IDX_DST_CAM_Q | /* dest */
6800 6800 RT_IDX_TYPE_NICQ | /* type */
6801 6801 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6802 6802 break;
6803 6803
6804 6804 case RT_IDX_VALID: /* Promiscuous Mode frames. */
6805 6805 value = RT_IDX_DST_DFLT_Q | /* dest */
6806 6806 RT_IDX_TYPE_NICQ | /* type */
6807 6807 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6808 6808 break;
6809 6809
6810 6810 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
6811 6811 value = RT_IDX_DST_DFLT_Q | /* dest */
6812 6812 RT_IDX_TYPE_NICQ | /* type */
6813 6813 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6814 6814 break;
6815 6815
6816 6816 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
6817 6817 value = RT_IDX_DST_DFLT_Q | /* dest */
6818 6818 RT_IDX_TYPE_NICQ | /* type */
6819 6819 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6820 6820 break;
6821 6821
6822 6822 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
6823 6823 value = RT_IDX_DST_CAM_Q | /* dest */
6824 6824 RT_IDX_TYPE_NICQ | /* type */
6825 6825 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6826 6826 break;
6827 6827
6828 6828 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
6829 6829 value = RT_IDX_DST_CAM_Q | /* dest */
6830 6830 RT_IDX_TYPE_NICQ | /* type */
6831 6831 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6832 6832 break;
6833 6833
6834 6834 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
6835 6835 value = RT_IDX_DST_RSS | /* dest */
6836 6836 RT_IDX_TYPE_NICQ | /* type */
6837 6837 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6838 6838 break;
6839 6839
6840 6840 case 0: /* Clear the E-bit on an entry. */
6841 6841 value = RT_IDX_DST_DFLT_Q | /* dest */
6842 6842 RT_IDX_TYPE_NICQ | /* type */
6843 6843 (index << RT_IDX_IDX_SHIFT); /* index */
6844 6844 break;
6845 6845
6846 6846 default:
6847 6847 cmn_err(CE_WARN, "Mask type %d not yet supported.",
6848 6848 mask);
6849 6849 status = -EPERM;
6850 6850 goto exit;
6851 6851 }
6852 6852
6853 6853 if (value != 0) {
6854 6854 status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6855 6855 if (status)
6856 6856 goto exit;
6857 6857 value |= (enable ? RT_IDX_E : 0);
6858 6858 ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6859 6859 ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6860 6860 }
6861 6861
6862 6862 exit:
6863 6863 return (status);
6864 6864 }
6865 6865
6866 6866 /*
6867 6867 * Clear all the entries in the routing table.
6868 6868 * Caller must get semaphore in advance.
6869 6869 */
6870 6870
6871 6871 static int
6872 6872 ql_stop_routing(qlge_t *qlge)
6873 6873 {
6874 6874 int status = 0;
6875 6875 int i;
6876 6876 /* Clear all the entries in the routing table. */
6877 6877 for (i = 0; i < 16; i++) {
6878 6878 status = ql_set_routing_reg(qlge, i, 0, 0);
6879 6879 if (status) {
6880 6880 cmn_err(CE_WARN, "Stop routing failed. ");
6881 6881 }
6882 6882 }
6883 6883 return (status);
6884 6884 }
6885 6885
6886 6886 /* Initialize the frame-to-queue routing. */
6887 6887 int
6888 6888 ql_route_initialize(qlge_t *qlge)
6889 6889 {
6890 6890 int status = 0;
6891 6891
6892 6892 status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6893 6893 if (status != DDI_SUCCESS)
6894 6894 return (status);
6895 6895
6896 6896 /* Clear all the entries in the routing table. */
6897 6897 status = ql_stop_routing(qlge);
6898 6898 if (status) {
6899 6899 goto exit;
6900 6900 }
6901 6901 status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6902 6902 if (status) {
6903 6903 cmn_err(CE_WARN,
6904 6904 "Failed to init routing register for broadcast packets.");
6905 6905 goto exit;
6906 6906 }
6907 6907 /*
6908 6908 * If we have more than one inbound queue, then turn on RSS in the
6909 6909 * routing block.
6910 6910 */
6911 6911 if (qlge->rss_ring_count > 1) {
6912 6912 status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6913 6913 RT_IDX_RSS_MATCH, 1);
6914 6914 if (status) {
6915 6915 cmn_err(CE_WARN,
6916 6916 "Failed to init routing register for MATCH RSS "
6917 6917 "packets.");
6918 6918 goto exit;
6919 6919 }
6920 6920 }
6921 6921
6922 6922 status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6923 6923 RT_IDX_CAM_HIT, 1);
6924 6924 if (status) {
6925 6925 cmn_err(CE_WARN,
6926 6926 "Failed to init routing register for CAM packets.");
6927 6927 goto exit;
6928 6928 }
6929 6929
6930 6930 status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6931 6931 RT_IDX_MCAST_MATCH, 1);
6932 6932 if (status) {
6933 6933 cmn_err(CE_WARN,
6934 6934 "Failed to init routing register for Multicast "
6935 6935 "packets.");
6936 6936 }
6937 6937
6938 6938 exit:
6939 6939 ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6940 6940 return (status);
6941 6941 }
6942 6942
6943 6943 /*
6944 6944 * Initialize hardware
6945 6945 */
6946 6946 static int
6947 6947 ql_device_initialize(qlge_t *qlge)
6948 6948 {
6949 6949 uint32_t value, mask;
6950 6950 int i;
6951 6951 int status = 0;
6952 6952 uint16_t pause = PAUSE_MODE_DISABLED;
6953 6953 boolean_t update_port_config = B_FALSE;
6954 6954 uint32_t pause_bit_mask;
6955 6955 boolean_t dcbx_enable = B_FALSE;
6956 6956 uint32_t dcbx_bit_mask = 0x10;
6957 6957 /*
6958 6958 * Set up the System register to halt on errors.
6959 6959 */
6960 6960 value = SYS_EFE | SYS_FAE;
6961 6961 mask = value << 16;
6962 6962 ql_write_reg(qlge, REG_SYSTEM, mask | value);
6963 6963
6964 6964 /* Set the default queue. */
6965 6965 value = NIC_RCV_CFG_DFQ;
6966 6966 mask = NIC_RCV_CFG_DFQ_MASK;
6967 6967
6968 6968 ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6969 6969
6970 6970 /* Enable the MPI interrupt. */
6971 6971 ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6972 6972 | INTR_MASK_PI);
6973 6973 /* Enable the function, set pagesize, enable error checking. */
6974 6974 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6975 6975 FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6976 6976 /* Set/clear header splitting. */
6977 6977 if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6978 6978 value |= FSC_SH;
6979 6979 ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6980 6980 }
6981 6981 mask = FSC_VM_PAGESIZE_MASK |
6982 6982 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6983 6983 ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6984 6984 /*
6985 6985 * check current port max frame size, if different from OS setting,
6986 6986 * then we need to change
6987 6987 */
6988 6988 qlge->max_frame_size =
6989 6989 (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6990 6990
6991 6991 mutex_enter(&qlge->mbx_mutex);
6992 6992 status = ql_get_port_cfg(qlge);
6993 6993 mutex_exit(&qlge->mbx_mutex);
6994 6994
6995 6995 if (status == DDI_SUCCESS) {
6996 6996 /* if current frame size is smaller than required size */
6997 6997 if (qlge->port_cfg_info.max_frame_size <
6998 6998 qlge->max_frame_size) {
6999 6999 QL_PRINT(DBG_MBX,
7000 7000 ("update frame size, current %d, new %d\n",
7001 7001 qlge->port_cfg_info.max_frame_size,
7002 7002 qlge->max_frame_size));
7003 7003 qlge->port_cfg_info.max_frame_size =
7004 7004 qlge->max_frame_size;
7005 7005 qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
7006 7006 update_port_config = B_TRUE;
7007 7007 }
7008 7008
7009 7009 if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
7010 7010 pause = PAUSE_MODE_STANDARD;
7011 7011 else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
7012 7012 pause = PAUSE_MODE_PER_PRIORITY;
7013 7013
7014 7014 if (pause != qlge->pause) {
7015 7015 pause_bit_mask = 0x60; /* bit 5-6 */
7016 7016 /* clear pause bits */
7017 7017 qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
7018 7018 if (qlge->pause == PAUSE_MODE_STANDARD)
7019 7019 qlge->port_cfg_info.link_cfg |= STD_PAUSE;
7020 7020 else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
7021 7021 qlge->port_cfg_info.link_cfg |= PP_PAUSE;
7022 7022 update_port_config = B_TRUE;
7023 7023 }
7024 7024
7025 7025 if (qlge->port_cfg_info.link_cfg & DCBX_ENABLE)
7026 7026 dcbx_enable = B_TRUE;
7027 7027 if (dcbx_enable != qlge->dcbx_enable) {
7028 7028 qlge->port_cfg_info.link_cfg &= ~dcbx_bit_mask;
7029 7029 if (qlge->dcbx_enable)
7030 7030 qlge->port_cfg_info.link_cfg |= DCBX_ENABLE;
7031 7031 }
7032 7032
7033 7033 update_port_config = B_TRUE;
7034 7034
7035 7035 /* if need to update port configuration */
7036 7036 if (update_port_config) {
7037 7037 mutex_enter(&qlge->mbx_mutex);
7038 7038 (void) ql_set_mpi_port_config(qlge,
7039 7039 qlge->port_cfg_info);
7040 7040 mutex_exit(&qlge->mbx_mutex);
7041 7041 }
7042 7042 } else
7043 7043 cmn_err(CE_WARN, "ql_get_port_cfg failed");
7044 7044
7045 7045 /* Start up the rx queues. */
7046 7046 for (i = 0; i < qlge->rx_ring_count; i++) {
7047 7047 status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
7048 7048 if (status) {
7049 7049 cmn_err(CE_WARN,
7050 7050 "Failed to start rx ring[%d]", i);
7051 7051 return (status);
7052 7052 }
7053 7053 }
7054 7054
7055 7055 /*
7056 7056 * If there is more than one inbound completion queue
7057 7057 * then download a RICB to configure RSS.
7058 7058 */
7059 7059 if (qlge->rss_ring_count > 1) {
7060 7060 status = ql_start_rss(qlge);
7061 7061 if (status) {
7062 7062 cmn_err(CE_WARN, "Failed to start RSS.");
7063 7063 return (status);
7064 7064 }
7065 7065 }
7066 7066
7067 7067 /* Start up the tx queues. */
7068 7068 for (i = 0; i < qlge->tx_ring_count; i++) {
7069 7069 status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
7070 7070 if (status) {
7071 7071 cmn_err(CE_WARN,
7072 7072 "Failed to start tx ring[%d]", i);
7073 7073 return (status);
7074 7074 }
7075 7075 }
7076 7076 qlge->selected_tx_ring = 0;
7077 7077 /* Set the frame routing filter. */
7078 7078 status = ql_route_initialize(qlge);
7079 7079 if (status) {
7080 7080 cmn_err(CE_WARN,
7081 7081 "Failed to init CAM/Routing tables.");
7082 7082 return (status);
7083 7083 }
7084 7084
7085 7085 return (status);
7086 7086 }
7087 7087 /*
7088 7088 * Issue soft reset to chip.
7089 7089 */
7090 7090 static int
7091 7091 ql_asic_reset(qlge_t *qlge)
7092 7092 {
7093 7093 int status = DDI_SUCCESS;
7094 7094
7095 7095 ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
7096 7096 |FUNCTION_RESET);
7097 7097
7098 7098 if (ql_wait_reg_bit(qlge, REG_RESET_FAILOVER, FUNCTION_RESET,
7099 7099 BIT_RESET, 0) != DDI_SUCCESS) {
7100 7100 cmn_err(CE_WARN,
7101 7101 "TIMEOUT!!! errored out of resetting the chip!");
7102 7102 status = DDI_FAILURE;
7103 7103 }
7104 7104
7105 7105 return (status);
7106 7106 }
7107 7107
7108 7108 /*
7109 7109 * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
7110 7110 * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7111 7111 * to be used by hardware.
7112 7112 */
7113 7113 static void
7114 7114 ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7115 7115 {
7116 7116 struct bq_desc *sbq_desc;
7117 7117 int i;
7118 7118 uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
7119 7119 uint32_t arm_count;
7120 7120
7121 7121 if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
7122 7122 arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
7123 7123 else {
7124 7124 /* Adjust to a multiple of 16 */
7125 7125 arm_count = (rx_ring->sbuf_free_count / 16) * 16;
7126 7126 #ifdef QLGE_LOAD_UNLOAD
7127 7127 cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
7128 7128 #endif
7129 7129 }
7130 7130 for (i = 0; i < arm_count; i++) {
7131 7131 sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
7132 7132 if (sbq_desc == NULL)
7133 7133 break;
7134 7134 /* Arm asic */
7135 7135 *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
7136 7136 sbq_entry++;
7137 7137
7138 7138 /* link the descriptors to in_use_list */
7139 7139 ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
7140 7140 rx_ring->sbq_prod_idx++;
7141 7141 }
7142 7142 ql_update_sbq_prod_idx(qlge, rx_ring);
7143 7143 }
7144 7144
7145 7145 /*
7146 7146 * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
7147 7147 * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7148 7148 * to be used by hardware.
7149 7149 */
7150 7150 static void
7151 7151 ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7152 7152 {
7153 7153 struct bq_desc *lbq_desc;
7154 7154 int i;
7155 7155 uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
7156 7156 uint32_t arm_count;
7157 7157
7158 7158 if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
7159 7159 arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
7160 7160 else {
7161 7161 /* Adjust to a multiple of 16 */
7162 7162 arm_count = (rx_ring->lbuf_free_count / 16) * 16;
7163 7163 #ifdef QLGE_LOAD_UNLOAD
7164 7164 cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
7165 7165 #endif
7166 7166 }
7167 7167 for (i = 0; i < arm_count; i++) {
7168 7168 lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
7169 7169 if (lbq_desc == NULL)
7170 7170 break;
7171 7171 /* Arm asic */
7172 7172 *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
7173 7173 lbq_entry++;
7174 7174
7175 7175 /* link the descriptors to in_use_list */
7176 7176 ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
7177 7177 rx_ring->lbq_prod_idx++;
7178 7178 }
7179 7179 ql_update_lbq_prod_idx(qlge, rx_ring);
7180 7180 }
7181 7181
7182 7182
7183 7183 /*
7184 7184 * Initializes the adapter by configuring request and response queues,
7185 7185 * allocates and ARMs small and large receive buffers to the
7186 7186 * hardware
7187 7187 */
7188 7188 static int
7189 7189 ql_bringup_adapter(qlge_t *qlge)
7190 7190 {
7191 7191 int i;
7192 7192
7193 7193 if (ql_device_initialize(qlge) != DDI_SUCCESS) {
7194 7194 cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
7195 7195 __func__, qlge->instance);
7196 7196 goto err_bringup;
7197 7197 }
7198 7198 qlge->sequence |= INIT_ADAPTER_UP;
7199 7199
7200 7200 #ifdef QLGE_TRACK_BUFFER_USAGE
7201 7201 for (i = 0; i < qlge->rx_ring_count; i++) {
7202 7202 if (qlge->rx_ring[i].type != TX_Q) {
7203 7203 qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
7204 7204 qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
7205 7205 }
7206 7206 qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
7207 7207 }
7208 7208 #endif
7209 7209 /* Arm buffers */
7210 7210 for (i = 0; i < qlge->rx_ring_count; i++) {
7211 7211 if (qlge->rx_ring[i].type != TX_Q) {
7212 7212 ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
7213 7213 ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
7214 7214 }
7215 7215 }
7216 7216
7217 7217 /* Enable work/request queues */
7218 7218 for (i = 0; i < qlge->tx_ring_count; i++) {
7219 7219 if (qlge->tx_ring[i].valid_db_reg)
7220 7220 ql_write_doorbell_reg(qlge,
7221 7221 qlge->tx_ring[i].valid_db_reg,
7222 7222 REQ_Q_VALID);
7223 7223 }
7224 7224
7225 7225 /* Enable completion queues */
7226 7226 for (i = 0; i < qlge->rx_ring_count; i++) {
7227 7227 if (qlge->rx_ring[i].valid_db_reg)
7228 7228 ql_write_doorbell_reg(qlge,
7229 7229 qlge->rx_ring[i].valid_db_reg,
7230 7230 RSP_Q_VALID);
7231 7231 }
7232 7232
7233 7233 for (i = 0; i < qlge->tx_ring_count; i++) {
7234 7234 mutex_enter(&qlge->tx_ring[i].tx_lock);
7235 7235 qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
7236 7236 mutex_exit(&qlge->tx_ring[i].tx_lock);
7237 7237 }
7238 7238
7239 7239 for (i = 0; i < qlge->rx_ring_count; i++) {
7240 7240 mutex_enter(&qlge->rx_ring[i].rx_lock);
7241 7241 qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
7242 7242 mutex_exit(&qlge->rx_ring[i].rx_lock);
7243 7243 }
7244 7244
7245 7245 /* This mutex will get re-acquired in enable_completion interrupt */
7246 7246 mutex_exit(&qlge->hw_mutex);
7247 7247 /* Traffic can start flowing now */
7248 7248 ql_enable_all_completion_interrupts(qlge);
7249 7249 mutex_enter(&qlge->hw_mutex);
7250 7250
7251 7251 ql_enable_global_interrupt(qlge);
7252 7252
7253 7253 qlge->sequence |= ADAPTER_INIT;
7254 7254 return (DDI_SUCCESS);
7255 7255
7256 7256 err_bringup:
7257 7257 (void) ql_asic_reset(qlge);
7258 7258 return (DDI_FAILURE);
7259 7259 }
7260 7260
7261 7261 /*
7262 7262 * Initialize mutexes of each rx/tx rings
7263 7263 */
7264 7264 static int
7265 7265 ql_init_rx_tx_locks(qlge_t *qlge)
7266 7266 {
7267 7267 struct tx_ring *tx_ring;
7268 7268 struct rx_ring *rx_ring;
7269 7269 int i;
7270 7270
7271 7271 for (i = 0; i < qlge->tx_ring_count; i++) {
7272 7272 tx_ring = &qlge->tx_ring[i];
7273 7273 mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
7274 7274 DDI_INTR_PRI(qlge->intr_pri));
7275 7275 }
7276 7276
7277 7277 for (i = 0; i < qlge->rx_ring_count; i++) {
7278 7278 rx_ring = &qlge->rx_ring[i];
7279 7279 mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
7280 7280 DDI_INTR_PRI(qlge->intr_pri));
7281 7281 mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
7282 7282 DDI_INTR_PRI(qlge->intr_pri));
7283 7283 mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
7284 7284 DDI_INTR_PRI(qlge->intr_pri));
7285 7285 }
7286 7286
7287 7287 return (DDI_SUCCESS);
7288 7288 }
7289 7289
7290 7290 /*ARGSUSED*/
7291 7291 /*
7292 7292 * Simply call pci_ereport_post which generates ereports for errors
7293 7293 * that occur in the PCI local bus configuration status registers.
7294 7294 */
7295 7295 static int
7296 7296 ql_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7297 7297 {
7298 7298 pci_ereport_post(dip, err, NULL);
7299 7299 return (err->fme_status);
7300 7300 }
7301 7301
7302 7302 static void
7303 7303 ql_fm_init(qlge_t *qlge)
7304 7304 {
7305 7305 ddi_iblock_cookie_t iblk;
7306 7306
7307 7307 QL_PRINT(DBG_INIT, ("ql_fm_init(%d) entered, FMA capability %x\n",
7308 7308 qlge->instance, qlge->fm_capabilities));
7309 7309 /*
7310 7310 * Register capabilities with IO Fault Services. The capabilities
7311 7311 * set above may not be supported by the parent nexus, in that case
7312 7312 * some capability bits may be cleared.
7313 7313 */
7314 7314 if (qlge->fm_capabilities)
7315 7315 ddi_fm_init(qlge->dip, &qlge->fm_capabilities, &iblk);
7316 7316
7317 7317 /*
7318 7318 * Initialize pci ereport capabilities if ereport capable
7319 7319 */
7320 7320 if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7321 7321 DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7322 7322 pci_ereport_setup(qlge->dip);
7323 7323 }
7324 7324
7325 7325 /* Register error callback if error callback capable */
7326 7326 if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7327 7327 ddi_fm_handler_register(qlge->dip,
7328 7328 ql_fm_error_cb, (void*) qlge);
7329 7329 }
7330 7330
7331 7331 /*
7332 7332 * DDI_FLGERR_ACC indicates:
7333 7333 * Driver will check its access handle(s) for faults on
7334 7334 * a regular basis by calling ddi_fm_acc_err_get
7335 7335 * Driver is able to cope with incorrect results of I/O
7336 7336 * operations resulted from an I/O fault
7337 7337 */
7338 7338 if (DDI_FM_ACC_ERR_CAP(qlge->fm_capabilities)) {
7339 7339 ql_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7340 7340 }
7341 7341
7342 7342 /*
7343 7343 * DDI_DMA_FLAGERR indicates:
7344 7344 * Driver will check its DMA handle(s) for faults on a
7345 7345 * regular basis using ddi_fm_dma_err_get
7346 7346 * Driver is able to cope with incorrect results of DMA
7347 7347 * operations resulted from an I/O fault
7348 7348 */
7349 7349 if (DDI_FM_DMA_ERR_CAP(qlge->fm_capabilities)) {
7350 7350 tx_mapping_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7351 7351 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7352 7352 }
7353 7353 QL_PRINT(DBG_INIT, ("ql_fm_init(%d) done\n",
7354 7354 qlge->instance));
7355 7355 }
7356 7356
7357 7357 static void
7358 7358 ql_fm_fini(qlge_t *qlge)
7359 7359 {
7360 7360 QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) entered\n",
7361 7361 qlge->instance));
7362 7362 /* Only unregister FMA capabilities if we registered some */
7363 7363 if (qlge->fm_capabilities) {
7364 7364
7365 7365 /*
7366 7366 * Release any resources allocated by pci_ereport_setup()
7367 7367 */
7368 7368 if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7369 7369 DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7370 7370 pci_ereport_teardown(qlge->dip);
7371 7371
7372 7372 /*
7373 7373 * Un-register error callback if error callback capable
7374 7374 */
7375 7375 if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7376 7376 ddi_fm_handler_unregister(qlge->dip);
7377 7377
7378 7378 /* Unregister from IO Fault Services */
7379 7379 ddi_fm_fini(qlge->dip);
7380 7380 }
7381 7381 QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) done\n",
7382 7382 qlge->instance));
7383 7383 }
7384 7384 /*
7385 7385 * ql_attach - Driver attach.
7386 7386 */
7387 7387 static int
7388 7388 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
7389 7389 {
7390 7390 int instance;
7391 7391 qlge_t *qlge = NULL;
7392 7392 int rval;
7393 7393 uint16_t w;
7394 7394 mac_register_t *macp = NULL;
7395 7395 uint32_t data;
7396 7396
7397 7397 rval = DDI_FAILURE;
7398 7398
7399 7399 /* first get the instance */
7400 7400 instance = ddi_get_instance(dip);
7401 7401
7402 7402 switch (cmd) {
7403 7403 case DDI_ATTACH:
7404 7404 /*
7405 7405 * Allocate our per-device-instance structure
7406 7406 */
7407 7407 qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
7408 7408 ASSERT(qlge != NULL);
7409 7409 qlge->sequence |= INIT_SOFTSTATE_ALLOC;
7410 7410
7411 7411 qlge->dip = dip;
7412 7412 qlge->instance = instance;
7413 7413 /* Set up the coalescing parameters. */
7414 7414 qlge->ql_dbgprnt = 0;
7415 7415 #if QL_DEBUG
7416 7416 qlge->ql_dbgprnt = QL_DEBUG;
7417 7417 #endif /* QL_DEBUG */
7418 7418
7419 7419 /*
7420 7420 * Initialize for fma support
7421 7421 */
7422 7422 /* fault management (fm) capabilities. */
7423 7423 qlge->fm_capabilities =
7424 7424 DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE;
7425 7425 data = ql_get_prop(qlge, "fm-capable");
7426 7426 if (data <= 0xf) {
7427 7427 qlge->fm_capabilities = data;
7428 7428 }
7429 7429 ql_fm_init(qlge);
7430 7430 qlge->sequence |= INIT_FM;
7431 7431 QL_PRINT(DBG_INIT, ("ql_attach(%d): fma init done\n",
7432 7432 qlge->instance));
7433 7433
7434 7434 /*
7435 7435 * Setup the ISP8x00 registers address mapping to be
7436 7436 * accessed by this particular driver.
7437 7437 * 0x0 Configuration Space
7438 7438 * 0x1 I/O Space
7439 7439 * 0x2 1st Memory Space address - Control Register Set
7440 7440 * 0x3 2nd Memory Space address - Doorbell Memory Space
7441 7441 */
7442 7442 w = 2;
7443 7443 if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
7444 7444 sizeof (dev_reg_t), &ql_dev_acc_attr,
7445 7445 &qlge->dev_handle) != DDI_SUCCESS) {
7446 7446 cmn_err(CE_WARN, "%s(%d): Unable to map device "
7447 7447 "registers", ADAPTER_NAME, instance);
7448 7448 break;
7449 7449 }
7450 7450 QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
7451 7451 qlge->iobase));
7452 7452 qlge->sequence |= INIT_REGS_SETUP;
7453 7453
7454 7454 /* map Doorbell memory space */
7455 7455 w = 3;
7456 7456 if (ddi_regs_map_setup(dip, w,
7457 7457 (caddr_t *)&qlge->doorbell_reg_iobase, 0,
7458 7458 0x100000 /* sizeof (dev_doorbell_reg_t) */,
7459 7459 &ql_dev_acc_attr,
7460 7460 &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
7461 7461 cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
7462 7462 "registers",
7463 7463 ADAPTER_NAME, instance);
7464 7464 break;
7465 7465 }
7466 7466 QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
7467 7467 qlge->doorbell_reg_iobase));
7468 7468 qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
7469 7469
7470 7470 /*
7471 7471 * Allocate a macinfo structure for this instance
7472 7472 */
7473 7473 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
7474 7474 cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
7475 7475 __func__, instance);
7476 7476 break;
7477 7477 }
7478 7478 /* save adapter status to dip private data */
7479 7479 ddi_set_driver_private(dip, qlge);
7480 7480 QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
7481 7481 ADAPTER_NAME, instance));
7482 7482 qlge->sequence |= INIT_MAC_ALLOC;
7483 7483
7484 7484 /*
7485 7485 * Attach this instance of the device
7486 7486 */
7487 7487 /* Setup PCI Local Bus Configuration resource. */
7488 7488 if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
7489 7489 cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
7490 7490 ADAPTER_NAME, instance);
7491 7491 if (qlge->fm_enable) {
7492 7492 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7493 7493 ddi_fm_service_impact(qlge->dip,
7494 7494 DDI_SERVICE_LOST);
7495 7495 }
7496 7496 break;
7497 7497 }
7498 7498 qlge->sequence |= INIT_PCI_CONFIG_SETUP;
7499 7499 QL_PRINT(DBG_GLD, ("ql_attach(%d): pci_config_setup done\n",
7500 7500 instance));
7501 7501
7502 7502 if (ql_init_instance(qlge) != DDI_SUCCESS) {
7503 7503 cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
7504 7504 "instance", ADAPTER_NAME, instance);
7505 7505 if (qlge->fm_enable) {
7506 7506 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7507 7507 ddi_fm_service_impact(qlge->dip,
7508 7508 DDI_SERVICE_LOST);
7509 7509 }
7510 7510 break;
7511 7511 }
7512 7512 QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_init_instance done\n",
7513 7513 instance));
7514 7514
7515 7515 /* Setup interrupt vectors */
7516 7516 if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
7517 7517 break;
7518 7518 }
7519 7519 qlge->sequence |= INIT_INTR_ALLOC;
7520 7520 QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_alloc_irqs done\n",
7521 7521 instance));
7522 7522
7523 7523 /* Configure queues */
7524 7524 if (ql_setup_rings(qlge) != DDI_SUCCESS) {
7525 7525 break;
7526 7526 }
7527 7527 qlge->sequence |= INIT_SETUP_RINGS;
7528 7528 QL_PRINT(DBG_GLD, ("ql_attach(%d): setup rings done\n",
7529 7529 instance));
7530 7530
7531 7531 /*
7532 7532 * Allocate memory resources
7533 7533 */
7534 7534 if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
7535 7535 cmn_err(CE_WARN, "%s(%d): memory allocation failed",
7536 7536 __func__, qlge->instance);
7537 7537 break;
7538 7538 }
7539 7539 qlge->sequence |= INIT_MEMORY_ALLOC;
7540 7540 QL_PRINT(DBG_GLD, ("ql_alloc_mem_resources(%d) done\n",
7541 7541 instance));
7542 7542
7543 7543 /*
7544 7544 * Map queues to interrupt vectors
7545 7545 */
7546 7546 ql_resolve_queues_to_irqs(qlge);
7547 7547
7548 7548 /* Initialize mutex, need the interrupt priority */
7549 7549 (void) ql_init_rx_tx_locks(qlge);
7550 7550 qlge->sequence |= INIT_LOCKS_CREATED;
7551 7551 QL_PRINT(DBG_INIT, ("%s(%d): ql_init_rx_tx_locks done\n",
7552 7552 ADAPTER_NAME, instance));
7553 7553
7554 7554 /*
7555 7555 * Use a soft interrupt to do something that we do not want
7556 7556 * to do in regular network functions or with mutexs being held
7557 7557 */
7558 7558 if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
7559 7559 DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
7560 7560 != DDI_SUCCESS) {
7561 7561 break;
7562 7562 }
7563 7563
7564 7564 if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
7565 7565 DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
7566 7566 != DDI_SUCCESS) {
7567 7567 break;
7568 7568 }
7569 7569
7570 7570 if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
7571 7571 DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
7572 7572 != DDI_SUCCESS) {
7573 7573 break;
7574 7574 }
7575 7575 qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
7576 7576 QL_PRINT(DBG_INIT, ("%s(%d): ddi_intr_add_softint done\n",
7577 7577 ADAPTER_NAME, instance));
7578 7578
7579 7579 /*
7580 7580 * mutex to protect the adapter state structure.
7581 7581 * initialize mutexes according to the interrupt priority
7582 7582 */
7583 7583 mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
7584 7584 DDI_INTR_PRI(qlge->intr_pri));
7585 7585 mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
7586 7586 DDI_INTR_PRI(qlge->intr_pri));
7587 7587 mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
7588 7588 DDI_INTR_PRI(qlge->intr_pri));
7589 7589
7590 7590 /* Mailbox wait and interrupt conditional variable. */
7591 7591 cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
7592 7592 qlge->sequence |= INIT_MUTEX;
7593 7593 QL_PRINT(DBG_INIT, ("%s(%d): mutex_init done\n",
7594 7594 ADAPTER_NAME, instance));
7595 7595
7596 7596 /*
7597 7597 * KStats
7598 7598 */
7599 7599 if (ql_init_kstats(qlge) != DDI_SUCCESS) {
7600 7600 cmn_err(CE_WARN, "%s(%d): KState initialization failed",
7601 7601 ADAPTER_NAME, instance);
7602 7602 break;
7603 7603 }
7604 7604 qlge->sequence |= INIT_KSTATS;
7605 7605 QL_PRINT(DBG_INIT, ("%s(%d): ql_init_kstats done\n",
7606 7606 ADAPTER_NAME, instance));
7607 7607
7608 7608 /*
7609 7609 * Initialize gld macinfo structure
7610 7610 */
7611 7611 ql_gld3_init(qlge, macp);
7612 7612 /*
7613 7613 * Add interrupt handlers
7614 7614 */
7615 7615 if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
7616 7616 cmn_err(CE_WARN, "Failed to add interrupt "
7617 7617 "handlers");
7618 7618 break;
7619 7619 }
7620 7620 qlge->sequence |= INIT_ADD_INTERRUPT;
7621 7621 QL_PRINT(DBG_INIT, ("%s(%d): Add interrupt handler done\n",
7622 7622 ADAPTER_NAME, instance));
7623 7623
7624 7624 /*
7625 7625 * MAC Register
7626 7626 */
7627 7627 if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
7628 7628 cmn_err(CE_WARN, "%s(%d): mac_register failed",
7629 7629 __func__, instance);
7630 7630 break;
7631 7631 }
7632 7632 qlge->sequence |= INIT_MAC_REGISTERED;
7633 7633 QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
7634 7634 ADAPTER_NAME, instance));
7635 7635
7636 7636 mac_free(macp);
7637 7637 macp = NULL;
7638 7638
7639 7639 qlge->mac_flags = QL_MAC_ATTACHED;
7640 7640
7641 7641 ddi_report_dev(dip);
7642 7642
7643 7643 rval = DDI_SUCCESS;
7644 7644
7645 7645 break;
7646 7646 /*
7647 7647 * DDI_RESUME
7648 7648 * When called with cmd set to DDI_RESUME, attach() must
7649 7649 * restore the hardware state of a device (power may have been
7650 7650 * removed from the device), allow pending requests to con-
7651 7651 * tinue, and service new requests. In this case, the driver
7652 7652 * must not make any assumptions about the state of the
7653 7653 * hardware, but must restore the state of the device except
7654 7654 * for the power level of components.
7655 7655 *
7656 7656 */
7657 7657 case DDI_RESUME:
7658 7658
7659 7659 if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
7660 7660 return (DDI_FAILURE);
7661 7661
7662 7662 QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
7663 7663 __func__, qlge->instance));
7664 7664
7665 7665 mutex_enter(&qlge->gen_mutex);
7666 7666 rval = ql_do_start(qlge);
7667 7667 mutex_exit(&qlge->gen_mutex);
7668 7668 break;
7669 7669
7670 7670 default:
7671 7671 break;
7672 7672 }
7673 7673
7674 7674 /* if failed to attach */
7675 7675 if ((cmd == DDI_ATTACH) && (rval != DDI_SUCCESS) && (qlge != NULL)) {
7676 7676 cmn_err(CE_WARN, "qlge driver attach failed, sequence %x",
7677 7677 qlge->sequence);
7678 7678 ql_free_resources(qlge);
7679 7679 }
7680 7680
7681 7681 return (rval);
7682 7682 }
7683 7683
7684 7684 /*
7685 7685 * Unbind all pending tx dma handles during driver bring down
7686 7686 */
7687 7687 static void
7688 7688 ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
7689 7689 {
7690 7690 struct tx_ring_desc *tx_ring_desc;
7691 7691 int i, j;
7692 7692
7693 7693 if (tx_ring->wq_desc) {
7694 7694 tx_ring_desc = tx_ring->wq_desc;
7695 7695 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
7696 7696 for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
7697 7697 if (tx_ring_desc->tx_dma_handle[j]) {
7698 7698 (void) ddi_dma_unbind_handle(
7699 7699 tx_ring_desc->tx_dma_handle[j]);
7700 7700 }
7701 7701 }
7702 7702 tx_ring_desc->tx_dma_handle_used = 0;
7703 7703 } /* end of for loop */
7704 7704 }
7705 7705 }
7706 7706 /*
7707 7707 * Wait for all the packets sent to the chip to finish transmission
7708 7708 * to prevent buffers to be unmapped before or during a transmit operation
7709 7709 */
7710 7710 static int
7711 7711 ql_wait_tx_quiesce(qlge_t *qlge)
7712 7712 {
7713 7713 int count = MAX_TX_WAIT_COUNT, i;
7714 7714 int rings_done;
7715 7715 volatile struct tx_ring *tx_ring;
7716 7716 uint32_t consumer_idx;
7717 7717 uint32_t producer_idx;
7718 7718 uint32_t temp;
7719 7719 int done = 0;
7720 7720 int rval = DDI_FAILURE;
7721 7721
7722 7722 while (!done) {
7723 7723 rings_done = 0;
7724 7724
7725 7725 for (i = 0; i < qlge->tx_ring_count; i++) {
7726 7726 tx_ring = &qlge->tx_ring[i];
7727 7727 temp = ql_read_doorbell_reg(qlge,
7728 7728 tx_ring->prod_idx_db_reg);
7729 7729 producer_idx = temp & 0x0000ffff;
7730 7730 consumer_idx = (temp >> 16);
7731 7731
7732 7732 if (qlge->isr_stride) {
7733 7733 struct rx_ring *ob_ring;
7734 7734 ob_ring = &qlge->rx_ring[tx_ring->cq_id];
7735 7735 if (producer_idx != ob_ring->cnsmr_idx) {
7736 7736 cmn_err(CE_NOTE, " force clean \n");
7737 7737 (void) ql_clean_outbound_rx_ring(
7738 7738 ob_ring);
7739 7739 }
7740 7740 }
7741 7741 /*
7742 7742 * Get the pending iocb count, ones which have not been
7743 7743 * pulled down by the chip
7744 7744 */
7745 7745 if (producer_idx >= consumer_idx)
7746 7746 temp = (producer_idx - consumer_idx);
7747 7747 else
7748 7748 temp = (tx_ring->wq_len - consumer_idx) +
7749 7749 producer_idx;
7750 7750
7751 7751 if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
7752 7752 rings_done++;
7753 7753 else {
7754 7754 done = 1;
7755 7755 break;
7756 7756 }
7757 7757 }
7758 7758
7759 7759 /* If all the rings are done */
7760 7760 if (rings_done >= qlge->tx_ring_count) {
7761 7761 #ifdef QLGE_LOAD_UNLOAD
7762 7762 cmn_err(CE_NOTE, "%s(%d) done successfully \n",
7763 7763 __func__, qlge->instance);
7764 7764 #endif
7765 7765 rval = DDI_SUCCESS;
7766 7766 break;
7767 7767 }
7768 7768
7769 7769 qlge_delay(100);
7770 7770
7771 7771 count--;
7772 7772 if (!count) {
7773 7773
7774 7774 count = MAX_TX_WAIT_COUNT;
7775 7775 #ifdef QLGE_LOAD_UNLOAD
7776 7776 volatile struct rx_ring *rx_ring;
7777 7777 cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
7778 7778 " Transmits on queue %d to complete .\n",
7779 7779 __func__, qlge->instance,
7780 7780 (qlge->tx_ring[i].wq_len -
7781 7781 qlge->tx_ring[i].tx_free_count),
7782 7782 i);
7783 7783
7784 7784 rx_ring = &qlge->rx_ring[i+1];
7785 7785 temp = ql_read_doorbell_reg(qlge,
7786 7786 rx_ring->cnsmr_idx_db_reg);
7787 7787 consumer_idx = temp & 0x0000ffff;
7788 7788 producer_idx = (temp >> 16);
7789 7789 cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
7790 7790 " Producer %d, Consumer %d\n",
7791 7791 __func__, qlge->instance,
7792 7792 i+1,
7793 7793 producer_idx, consumer_idx);
7794 7794
7795 7795 temp = ql_read_doorbell_reg(qlge,
7796 7796 tx_ring->prod_idx_db_reg);
7797 7797 producer_idx = temp & 0x0000ffff;
7798 7798 consumer_idx = (temp >> 16);
7799 7799 cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
7800 7800 " Producer %d, Consumer %d\n",
7801 7801 __func__, qlge->instance, i,
7802 7802 producer_idx, consumer_idx);
7803 7803 #endif
7804 7804
7805 7805 /* For now move on */
7806 7806 break;
7807 7807 }
7808 7808 }
7809 7809 /* Stop the request queue */
7810 7810 mutex_enter(&qlge->hw_mutex);
7811 7811 for (i = 0; i < qlge->tx_ring_count; i++) {
7812 7812 if (qlge->tx_ring[i].valid_db_reg) {
7813 7813 ql_write_doorbell_reg(qlge,
7814 7814 qlge->tx_ring[i].valid_db_reg, 0);
7815 7815 }
7816 7816 }
7817 7817 mutex_exit(&qlge->hw_mutex);
7818 7818 return (rval);
7819 7819 }
7820 7820
7821 7821 /*
7822 7822 * Wait for all the receives indicated to the stack to come back
7823 7823 */
7824 7824 static int
7825 7825 ql_wait_rx_complete(qlge_t *qlge)
7826 7826 {
7827 7827 int i;
7828 7828 /* Disable all the completion queues */
7829 7829 mutex_enter(&qlge->hw_mutex);
7830 7830 for (i = 0; i < qlge->rx_ring_count; i++) {
7831 7831 if (qlge->rx_ring[i].valid_db_reg) {
7832 7832 ql_write_doorbell_reg(qlge,
7833 7833 qlge->rx_ring[i].valid_db_reg, 0);
7834 7834 }
7835 7835 }
7836 7836 mutex_exit(&qlge->hw_mutex);
7837 7837
7838 7838 /* Wait for OS to return all rx buffers */
7839 7839 qlge_delay(QL_ONE_SEC_DELAY);
7840 7840 return (DDI_SUCCESS);
7841 7841 }
7842 7842
7843 7843 /*
7844 7844 * stop the driver
7845 7845 */
7846 7846 static int
7847 7847 ql_bringdown_adapter(qlge_t *qlge)
7848 7848 {
7849 7849 int i;
7850 7850 int status = DDI_SUCCESS;
7851 7851
7852 7852 qlge->mac_flags = QL_MAC_BRINGDOWN;
7853 7853 if (qlge->sequence & ADAPTER_INIT) {
7854 7854 /* stop forwarding external packets to driver */
7855 7855 status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7856 7856 if (status)
7857 7857 return (status);
7858 7858 (void) ql_stop_routing(qlge);
7859 7859 ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7860 7860 /*
7861 7861 * Set the flag for receive and transmit
7862 7862 * operations to cease
7863 7863 */
7864 7864 for (i = 0; i < qlge->tx_ring_count; i++) {
7865 7865 mutex_enter(&qlge->tx_ring[i].tx_lock);
7866 7866 qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7867 7867 mutex_exit(&qlge->tx_ring[i].tx_lock);
7868 7868 }
7869 7869
7870 7870 for (i = 0; i < qlge->rx_ring_count; i++) {
7871 7871 mutex_enter(&qlge->rx_ring[i].rx_lock);
7872 7872 qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7873 7873 mutex_exit(&qlge->rx_ring[i].rx_lock);
7874 7874 }
7875 7875
7876 7876 /*
7877 7877 * Need interrupts to be running while the transmit
7878 7878 * completions are cleared. Wait for the packets
7879 7879 * queued to the chip to be sent out
7880 7880 */
7881 7881 (void) ql_wait_tx_quiesce(qlge);
7882 7882 /* Interrupts not needed from now */
7883 7883 ql_disable_all_completion_interrupts(qlge);
7884 7884
7885 7885 mutex_enter(&qlge->hw_mutex);
7886 7886 /* Disable Global interrupt */
7887 7887 ql_disable_global_interrupt(qlge);
7888 7888 mutex_exit(&qlge->hw_mutex);
7889 7889
7890 7890 /* Wait for all the indicated packets to come back */
7891 7891 status = ql_wait_rx_complete(qlge);
7892 7892
7893 7893 mutex_enter(&qlge->hw_mutex);
7894 7894 /* Reset adapter */
7895 7895 (void) ql_asic_reset(qlge);
7896 7896 /*
7897 7897 * Unbind all tx dma handles to prevent pending tx descriptors'
7898 7898 * dma handles from being re-used.
7899 7899 */
7900 7900 for (i = 0; i < qlge->tx_ring_count; i++) {
7901 7901 ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7902 7902 }
7903 7903
7904 7904 qlge->sequence &= ~ADAPTER_INIT;
7905 7905
7906 7906 mutex_exit(&qlge->hw_mutex);
7907 7907 }
7908 7908 return (status);
7909 7909 }
7910 7910
7911 7911 /*
7912 7912 * ql_detach
7913 7913 * Used to remove all the states associated with a given
7914 7914 * instances of a device node prior to the removal of that
7915 7915 * instance from the system.
7916 7916 */
7917 7917 static int
7918 7918 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7919 7919 {
7920 7920 qlge_t *qlge;
7921 7921 int rval;
7922 7922
7923 7923 rval = DDI_SUCCESS;
7924 7924
7925 7925 switch (cmd) {
7926 7926 case DDI_DETACH:
7927 7927
7928 7928 if ((qlge = QL_GET_DEV(dip)) == NULL)
7929 7929 return (DDI_FAILURE);
7930 7930 rval = ql_bringdown_adapter(qlge);
7931 7931 if (rval != DDI_SUCCESS)
7932 7932 break;
7933 7933
7934 7934 qlge->mac_flags = QL_MAC_DETACH;
7935 7935
7936 7936 /* free memory resources */
7937 7937 if (qlge->sequence & INIT_MEMORY_ALLOC) {
7938 7938 ql_free_mem_resources(qlge);
7939 7939 qlge->sequence &= ~INIT_MEMORY_ALLOC;
7940 7940 }
7941 7941 ql_free_resources(qlge);
7942 7942
7943 7943 break;
7944 7944
7945 7945 case DDI_SUSPEND:
7946 7946 if ((qlge = QL_GET_DEV(dip)) == NULL)
7947 7947 return (DDI_FAILURE);
7948 7948
7949 7949 mutex_enter(&qlge->gen_mutex);
7950 7950 if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7951 7951 (qlge->mac_flags == QL_MAC_STARTED)) {
7952 7952 (void) ql_do_stop(qlge);
7953 7953 }
7954 7954 qlge->mac_flags = QL_MAC_SUSPENDED;
7955 7955 mutex_exit(&qlge->gen_mutex);
7956 7956
7957 7957 break;
7958 7958 default:
7959 7959 rval = DDI_FAILURE;
7960 7960 break;
7961 7961 }
7962 7962
7963 7963 return (rval);
7964 7964 }
7965 7965
7966 7966 /*
7967 7967 * quiesce(9E) entry point.
7968 7968 *
7969 7969 * This function is called when the system is single-threaded at high
7970 7970 * PIL with preemption disabled. Therefore, this function must not be
7971 7971 * blocked.
7972 7972 *
7973 7973 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7974 7974 */
7975 7975 int
7976 7976 ql_quiesce(dev_info_t *dip)
7977 7977 {
7978 7978 qlge_t *qlge;
7979 7979 int i;
7980 7980
7981 7981 if ((qlge = QL_GET_DEV(dip)) == NULL)
7982 7982 return (DDI_FAILURE);
7983 7983
7984 7984 if (CFG_IST(qlge, CFG_CHIP_8100)) {
7985 7985 /* stop forwarding external packets to driver */
7986 7986 (void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7987 7987 (void) ql_stop_routing(qlge);
7988 7988 ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7989 7989 /* Stop all the request queues */
7990 7990 for (i = 0; i < qlge->tx_ring_count; i++) {
7991 7991 if (qlge->tx_ring[i].valid_db_reg) {
7992 7992 ql_write_doorbell_reg(qlge,
7993 7993 qlge->tx_ring[i].valid_db_reg, 0);
7994 7994 }
7995 7995 }
7996 7996 qlge_delay(QL_ONE_SEC_DELAY/4);
7997 7997 /* Interrupts not needed from now */
7998 7998 /* Disable MPI interrupt */
7999 7999 ql_write_reg(qlge, REG_INTERRUPT_MASK,
8000 8000 (INTR_MASK_PI << 16));
8001 8001 ql_disable_global_interrupt(qlge);
8002 8002
8003 8003 /* Disable all the rx completion queues */
8004 8004 for (i = 0; i < qlge->rx_ring_count; i++) {
8005 8005 if (qlge->rx_ring[i].valid_db_reg) {
8006 8006 ql_write_doorbell_reg(qlge,
8007 8007 qlge->rx_ring[i].valid_db_reg, 0);
8008 8008 }
8009 8009 }
8010 8010 qlge_delay(QL_ONE_SEC_DELAY/4);
8011 8011 qlge->mac_flags = QL_MAC_STOPPED;
8012 8012 /* Reset adapter */
8013 8013 (void) ql_asic_reset(qlge);
8014 8014 qlge_delay(100);
8015 8015 }
8016 8016
8017 8017 return (DDI_SUCCESS);
8018 8018 }
8019 8019
8020 8020 QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
8021 8021
8022 8022 /*
↓ open down ↓ |
7969 lines elided |
↑ open up ↑ |
8023 8023 * Loadable Driver Interface Structures.
8024 8024 * Declare and initialize the module configuration section...
8025 8025 */
8026 8026 static struct modldrv modldrv = {
8027 8027 &mod_driverops, /* type of module: driver */
8028 8028 version, /* name of module */
8029 8029 &ql_ops /* driver dev_ops */
8030 8030 };
8031 8031
8032 8032 static struct modlinkage modlinkage = {
8033 - MODREV_1, &modldrv, NULL
8033 + MODREV_1, { &modldrv, NULL }
8034 8034 };
8035 8035
8036 8036 /*
8037 8037 * Loadable Module Routines
8038 8038 */
8039 8039
8040 8040 /*
8041 8041 * _init
8042 8042 * Initializes a loadable module. It is called before any other
8043 8043 * routine in a loadable module.
8044 8044 */
8045 8045 int
8046 8046 _init(void)
8047 8047 {
8048 8048 int rval;
8049 8049
8050 8050 mac_init_ops(&ql_ops, ADAPTER_NAME);
8051 8051 rval = mod_install(&modlinkage);
8052 8052 if (rval != DDI_SUCCESS) {
8053 8053 mac_fini_ops(&ql_ops);
8054 8054 cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
8055 8055 ADAPTER_NAME);
8056 8056 }
8057 8057
8058 8058 return (rval);
8059 8059 }
8060 8060
8061 8061 /*
8062 8062 * _fini
8063 8063 * Prepares a module for unloading. It is called when the system
8064 8064 * wants to unload a module. If the module determines that it can
8065 8065 * be unloaded, then _fini() returns the value returned by
8066 8066 * mod_remove(). Upon successful return from _fini() no other
8067 8067 * routine in the module will be called before _init() is called.
8068 8068 */
8069 8069 int
8070 8070 _fini(void)
8071 8071 {
8072 8072 int rval;
8073 8073
8074 8074 rval = mod_remove(&modlinkage);
8075 8075 if (rval == DDI_SUCCESS) {
8076 8076 mac_fini_ops(&ql_ops);
8077 8077 }
8078 8078
8079 8079 return (rval);
8080 8080 }
8081 8081
8082 8082 /*
8083 8083 * _info
8084 8084 * Returns information about loadable module.
8085 8085 */
8086 8086 int
8087 8087 _info(struct modinfo *modinfop)
8088 8088 {
8089 8089 return (mod_info(&modlinkage, modinfop));
8090 8090 }
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX