Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_init.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_init.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /* Copyright 2010 QLogic Corporation */
23 23
24 24 /*
25 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 26 */
27 27
28 28 #pragma ident "Copyright 2010 QLogic Corporation; ql_init.c"
29 29
30 30 /*
31 31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 32 *
33 33 * ***********************************************************************
34 34 * * **
35 35 * * NOTICE **
36 36 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
37 37 * * ALL RIGHTS RESERVED **
38 38 * * **
39 39 * ***********************************************************************
40 40 *
41 41 */
42 42
43 43 #include <ql_apps.h>
44 44 #include <ql_api.h>
45 45 #include <ql_debug.h>
46 46 #include <ql_init.h>
47 47 #include <ql_iocb.h>
48 48 #include <ql_isr.h>
49 49 #include <ql_mbx.h>
50 50 #include <ql_nx.h>
51 51 #include <ql_xioctl.h>
52 52
53 53 /*
54 54 * Local data
55 55 */
56 56
57 57 /*
58 58 * Local prototypes
59 59 */
60 60 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t);
61 61 static int ql_nvram_24xx_config(ql_adapter_state_t *);
62 62 static void ql_23_properties(ql_adapter_state_t *, nvram_t *);
63 63 static void ql_24xx_properties(ql_adapter_state_t *, nvram_24xx_t *);
64 64 static int ql_check_isp_firmware(ql_adapter_state_t *);
65 65 static int ql_chip_diag(ql_adapter_state_t *);
66 66 static int ql_load_flash_fw(ql_adapter_state_t *);
67 67 static int ql_configure_loop(ql_adapter_state_t *);
68 68 static int ql_configure_hba(ql_adapter_state_t *);
69 69 static int ql_configure_fabric(ql_adapter_state_t *);
70 70 static int ql_configure_device_d_id(ql_adapter_state_t *);
71 71 static void ql_set_max_read_req(ql_adapter_state_t *);
72 72 static void ql_configure_n_port_info(ql_adapter_state_t *);
73 73 static void ql_clear_mcp(ql_adapter_state_t *);
74 74 static void ql_mps_reset(ql_adapter_state_t *);
75 75
76 76 /*
77 77 * ql_initialize_adapter
78 78 * Initialize board.
79 79 *
80 80 * Input:
81 81 * ha = adapter state pointer.
82 82 *
83 83 * Returns:
84 84 * ql local function return status code.
85 85 *
86 86 * Context:
87 87 * Kernel context.
88 88 */
89 89 int
90 90 ql_initialize_adapter(ql_adapter_state_t *ha)
91 91 {
92 92 int rval;
93 93 class_svc_param_t *class3_param;
94 94 caddr_t msg;
95 95 la_els_logi_t *els = &ha->loginparams;
96 96 int retries = 5;
97 97
98 98 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
99 99
100 100 do {
101 101 /* Clear adapter flags. */
102 102 TASK_DAEMON_LOCK(ha);
103 103 ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG |
104 104 TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG |
105 105 TASK_DAEMON_IDLE_CHK_FLG;
106 106 ha->task_daemon_flags |= LOOP_DOWN;
107 107 TASK_DAEMON_UNLOCK(ha);
108 108
109 109 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
110 110 ADAPTER_STATE_LOCK(ha);
111 111 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
112 112 ha->flags &= ~ONLINE;
113 113 ADAPTER_STATE_UNLOCK(ha);
114 114
115 115 ha->state = FC_STATE_OFFLINE;
116 116 msg = "Loop OFFLINE";
117 117
118 118 rval = ql_pci_sbus_config(ha);
119 119 if (rval != QL_SUCCESS) {
120 120 TASK_DAEMON_LOCK(ha);
121 121 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
122 122 EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n");
123 123 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
124 124 }
125 125 TASK_DAEMON_UNLOCK(ha);
126 126 continue;
127 127 }
128 128
129 129 (void) ql_setup_fcache(ha);
130 130
131 131 /* Reset ISP chip. */
132 132 ql_reset_chip(ha);
133 133
134 134 /* Get NVRAM configuration if needed. */
135 135 if (ha->init_ctrl_blk.cb.version == 0) {
136 136 (void) ql_nvram_config(ha);
137 137 }
138 138
139 139 /* Set login parameters. */
140 140 if (CFG_IST(ha, CFG_CTRL_24258081)) {
141 141 els->common_service.rx_bufsize = CHAR_TO_SHORT(
142 142 ha->init_ctrl_blk.cb24.max_frame_length[0],
143 143 ha->init_ctrl_blk.cb24.max_frame_length[1]);
144 144 bcopy((void *)&ha->init_ctrl_blk.cb24.port_name[0],
145 145 (void *)&els->nport_ww_name.raw_wwn[0], 8);
146 146 bcopy((void *)&ha->init_ctrl_blk.cb24.node_name[0],
147 147 (void *)&els->node_ww_name.raw_wwn[0], 8);
148 148 } else {
149 149 els->common_service.rx_bufsize = CHAR_TO_SHORT(
150 150 ha->init_ctrl_blk.cb.max_frame_length[0],
151 151 ha->init_ctrl_blk.cb.max_frame_length[1]);
152 152 bcopy((void *)&ha->init_ctrl_blk.cb.port_name[0],
153 153 (void *)&els->nport_ww_name.raw_wwn[0], 8);
154 154 bcopy((void *)&ha->init_ctrl_blk.cb.node_name[0],
155 155 (void *)&els->node_ww_name.raw_wwn[0], 8);
156 156 }
157 157 bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv,
158 158 strlen(QL_VERSION));
159 159
160 160 /* Determine which RISC code to use. */
161 161 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
162 162 if ((rval = ql_chip_diag(ha)) == QL_SUCCESS) {
163 163 rval = ql_load_isp_firmware(ha);
164 164 }
165 165 }
166 166
167 167 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
168 168 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) {
169 169
170 170 (void) ql_fw_ready(ha, ha->fwwait);
171 171
172 172 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
173 173 ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
174 174 if (ha->topology & QL_LOOP_CONNECTION) {
175 175 ha->state = ha->state | FC_STATE_LOOP;
176 176 msg = "Loop ONLINE";
177 177 ha->task_daemon_flags |= STATE_ONLINE;
178 178 } else if (ha->topology & QL_P2P_CONNECTION) {
179 179 ha->state = ha->state |
180 180 FC_STATE_ONLINE;
181 181 msg = "Link ONLINE";
182 182 ha->task_daemon_flags |= STATE_ONLINE;
183 183 } else {
184 184 msg = "Unknown Link state";
185 185 }
186 186 }
187 187 } else {
188 188 TASK_DAEMON_LOCK(ha);
189 189 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
190 190 EL(ha, "failed, isp_abort_needed\n");
191 191 ha->task_daemon_flags |= ISP_ABORT_NEEDED |
192 192 LOOP_DOWN;
193 193 }
194 194 TASK_DAEMON_UNLOCK(ha);
195 195 }
196 196
197 197 } while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED);
198 198
199 199 cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg);
200 200
201 201 /* Enable ISP interrupts and login parameters. */
202 202 if (CFG_IST(ha, CFG_CTRL_8021)) {
203 203 ql_8021_enable_intrs(ha);
204 204 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
205 205 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
206 206 } else {
207 207 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
208 208 }
209 209
210 210 ADAPTER_STATE_LOCK(ha);
211 211 ha->flags |= (INTERRUPTS_ENABLED | ONLINE);
212 212 ADAPTER_STATE_UNLOCK(ha);
213 213
214 214 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | RESET_MARKER_NEEDED |
215 215 COMMAND_WAIT_NEEDED);
216 216
217 217 /*
218 218 * Setup login parameters.
219 219 */
220 220 els->common_service.fcph_version = 0x2006;
221 221 els->common_service.btob_credit = 3;
222 222 els->common_service.cmn_features = 0x8800;
223 223 els->common_service.conc_sequences = 0xff;
224 224 els->common_service.relative_offset = 3;
225 225 els->common_service.e_d_tov = 0x07d0;
226 226
227 227 class3_param = (class_svc_param_t *)&els->class_3;
228 228 class3_param->class_valid_svc_opt = 0x8800;
229 229 class3_param->rcv_data_size = els->common_service.rx_bufsize;
230 230 class3_param->conc_sequences = 0xff;
231 231
232 232 if (rval != QL_SUCCESS) {
233 233 EL(ha, "failed, rval = %xh\n", rval);
234 234 } else {
235 235 /*EMPTY*/
236 236 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
237 237 }
238 238 return (rval);
239 239 }
240 240
241 241 /*
242 242 * ql_pci_sbus_config
243 243 * Setup device PCI/SBUS configuration registers.
244 244 *
245 245 * Input:
246 246 * ha = adapter state pointer.
247 247 *
248 248 * Returns:
249 249 * ql local function return status code.
250 250 *
251 251 * Context:
252 252 * Kernel context.
253 253 */
254 254 int
255 255 ql_pci_sbus_config(ql_adapter_state_t *ha)
256 256 {
257 257 uint32_t timer;
258 258 uint16_t cmd, w16;
259 259
260 260 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
261 261
262 262 if (CFG_IST(ha, CFG_SBUS_CARD)) {
263 263 w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
264 264 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION));
265 265 EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4,
266 266 w16 & 0xf);
267 267 } else {
268 268 /*
269 269 * we want to respect framework's setting of PCI
270 270 * configuration space command register and also
271 271 * want to make sure that all bits of interest to us
272 272 * are properly set in command register.
273 273 */
274 274 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
275 275 cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE |
276 276 PCI_COMM_ME | PCI_COMM_MEMWR_INVAL |
277 277 PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
278 278
279 279 /*
280 280 * If this is a 2300 card and not 2312, reset the
281 281 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the
282 282 * 2310 also reports itself as a 2300 so we need to get the
283 283 * fb revision level -- a 6 indicates it really is a 2300 and
284 284 * not a 2310.
285 285 */
286 286
287 287 if (ha->device_id == 0x2300) {
288 288 /* Pause RISC. */
289 289 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
290 290 for (timer = 0; timer < 30000; timer++) {
291 291 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) !=
292 292 0) {
293 293 break;
294 294 } else {
295 295 drv_usecwait(MILLISEC);
296 296 }
297 297 }
298 298
299 299 /* Select FPM registers. */
300 300 WRT16_IO_REG(ha, ctrl_status, 0x20);
301 301
302 302 /* Get the fb rev level */
303 303 if (RD16_IO_REG(ha, fb_cmd) == 6) {
304 304 cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL);
305 305 }
306 306
307 307 /* Deselect FPM registers. */
308 308 WRT16_IO_REG(ha, ctrl_status, 0x0);
309 309
310 310 /* Release RISC module. */
311 311 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
312 312 for (timer = 0; timer < 30000; timer++) {
313 313 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) ==
314 314 0) {
315 315 break;
316 316 } else {
317 317 drv_usecwait(MILLISEC);
318 318 }
319 319 }
320 320 } else if (ha->device_id == 0x2312) {
321 321 /*
322 322 * cPCI ISP2312 specific code to service function 1
323 323 * hot-swap registers.
324 324 */
325 325 if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK)
326 326 != 0) {
327 327 ql_pci_config_put8(ha, 0x66, 0xc2);
328 328 }
329 329 }
330 330
331 331 if (!(CFG_IST(ha, CFG_CTRL_8021)) &&
332 332 ha->pci_max_read_req != 0) {
333 333 ql_set_max_read_req(ha);
334 334 }
335 335
336 336 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
337 337
338 338 /* Set cache line register. */
339 339 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10);
340 340
341 341 /* Set latency register. */
342 342 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40);
343 343
344 344 /* Reset expansion ROM address decode enable. */
345 345 w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM);
346 346 w16 = (uint16_t)(w16 & ~BIT_0);
347 347 ql_pci_config_put16(ha, PCI_CONF_ROM, w16);
348 348 }
349 349
350 350 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
351 351
352 352 return (QL_SUCCESS);
353 353 }
354 354
355 355 /*
356 356 * Set the PCI max read request value.
357 357 *
358 358 * Input:
359 359 * ha: adapter state pointer.
360 360 *
361 361 * Output:
362 362 * none.
363 363 *
364 364 * Returns:
365 365 *
366 366 * Context:
367 367 * Kernel context.
368 368 */
369 369
370 370 static void
371 371 ql_set_max_read_req(ql_adapter_state_t *ha)
372 372 {
373 373 uint16_t read_req, w16;
374 374 uint16_t tmp = ha->pci_max_read_req;
375 375
376 376 if ((ha->device_id == 0x2422) ||
377 377 ((ha->device_id & 0xff00) == 0x2300)) {
378 378 /* check for vaild override value */
379 379 if (tmp == 512 || tmp == 1024 || tmp == 2048 ||
380 380 tmp == 4096) {
381 381 /* shift away the don't cares */
382 382 tmp = (uint16_t)(tmp >> 10);
383 383 /* convert bit pos to request value */
384 384 for (read_req = 0; tmp != 0; read_req++) {
385 385 tmp = (uint16_t)(tmp >> 1);
386 386 }
387 387 w16 = (uint16_t)ql_pci_config_get16(ha, 0x4e);
388 388 w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2));
389 389 w16 = (uint16_t)(w16 | (read_req << 2));
390 390 ql_pci_config_put16(ha, 0x4e, w16);
391 391 } else {
392 392 EL(ha, "invalid parameter value for "
393 393 "'pci-max-read-request': %d; using system "
394 394 "default\n", tmp);
395 395 }
396 396 } else if ((ha->device_id == 0x2432) || ((ha->device_id & 0xff00) ==
397 397 0x2500) || (ha->device_id == 0x8432)) {
398 398 /* check for vaild override value */
399 399 if (tmp == 128 || tmp == 256 || tmp == 512 ||
400 400 tmp == 1024 || tmp == 2048 || tmp == 4096) {
401 401 /* shift away the don't cares */
402 402 tmp = (uint16_t)(tmp >> 8);
403 403 /* convert bit pos to request value */
404 404 for (read_req = 0; tmp != 0; read_req++) {
405 405 tmp = (uint16_t)(tmp >> 1);
406 406 }
407 407 w16 = (uint16_t)ql_pci_config_get16(ha, 0x54);
408 408 w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 |
409 409 BIT_12));
410 410 w16 = (uint16_t)(w16 | (read_req << 12));
411 411 ql_pci_config_put16(ha, 0x54, w16);
412 412 } else {
413 413 EL(ha, "invalid parameter value for "
414 414 "'pci-max-read-request': %d; using system "
415 415 "default\n", tmp);
416 416 }
417 417 }
418 418 }
419 419
420 420 /*
421 421 * NVRAM configuration.
422 422 *
423 423 * Input:
424 424 * ha: adapter state pointer.
425 425 * ha->hba_buf = request and response rings
426 426 *
427 427 * Output:
428 428 * ha->init_ctrl_blk = initialization control block
429 429 * host adapters parameters in host adapter block
430 430 *
431 431 * Returns:
432 432 * ql local function return status code.
433 433 *
434 434 * Context:
435 435 * Kernel context.
436 436 */
437 437 int
438 438 ql_nvram_config(ql_adapter_state_t *ha)
439 439 {
440 440 uint32_t cnt;
441 441 caddr_t dptr1, dptr2;
442 442 ql_init_cb_t *icb = &ha->init_ctrl_blk.cb;
443 443 ql_ip_init_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb;
444 444 nvram_t *nv = (nvram_t *)ha->request_ring_bp;
445 445 uint16_t *wptr = (uint16_t *)ha->request_ring_bp;
446 446 uint8_t chksum = 0;
447 447 int rval;
448 448 int idpromlen;
449 449 char idprombuf[32];
450 450 uint32_t start_addr;
451 451
452 452 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
453 453
454 454 if (CFG_IST(ha, CFG_CTRL_24258081)) {
455 455 return (ql_nvram_24xx_config(ha));
456 456 }
457 457
458 458 start_addr = 0;
459 459 if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) ==
460 460 QL_SUCCESS) {
461 461 /* Verify valid NVRAM checksum. */
462 462 for (cnt = 0; cnt < sizeof (nvram_t)/2; cnt++) {
463 463 *wptr = (uint16_t)ql_get_nvram_word(ha,
464 464 (uint32_t)(cnt + start_addr));
465 465 chksum = (uint8_t)(chksum + (uint8_t)*wptr);
466 466 chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8));
467 467 wptr++;
468 468 }
469 469 ql_release_nvram(ha);
470 470 }
471 471
472 472 /* Bad NVRAM data, set defaults parameters. */
473 473 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
474 474 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
475 475 nv->nvram_version < 1) {
476 476
477 477 EL(ha, "failed, rval=%xh, checksum=%xh, "
478 478 "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, "
479 479 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
480 480 nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size,
481 481 ha->subven_id, nv->nvram_version);
482 482
483 483 /* Don't print nvram message if it's an on-board 2200 */
484 484 if (!((CFG_IST(ha, CFG_CTRL_2200)) &&
485 485 (ha->xioctl->fdesc.flash_size == 0))) {
486 486 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed,"
487 487 " using driver defaults.", QL_NAME, ha->instance);
488 488 }
489 489
490 490 /* Reset NVRAM data. */
491 491 bzero((void *)nv, sizeof (nvram_t));
492 492
493 493 /*
494 494 * Set default initialization control block.
495 495 */
496 496 nv->parameter_block_version = ICB_VERSION;
497 497 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
498 498 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
499 499
500 500 nv->max_frame_length[1] = 4;
501 501
502 502 /*
503 503 * Allow 2048 byte frames for 2300
504 504 */
505 505 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
506 506 nv->max_frame_length[1] = 8;
507 507 }
508 508 nv->max_iocb_allocation[1] = 1;
509 509 nv->execution_throttle[0] = 16;
510 510 nv->login_retry_count = 8;
511 511
512 512 idpromlen = 32;
513 513
514 514 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
515 515 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
516 516 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
517 517 &idpromlen) != DDI_PROP_SUCCESS) {
518 518
519 519 QL_PRINT_3(CE_CONT, "(%d): Unable to read idprom "
520 520 "property\n", ha->instance);
521 521 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
522 522 "property", QL_NAME, ha->instance);
523 523
524 524 nv->port_name[2] = 33;
525 525 nv->port_name[3] = 224;
526 526 nv->port_name[4] = 139;
527 527 nv->port_name[7] = (uint8_t)
528 528 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
529 529 } else {
530 530
531 531 nv->port_name[2] = idprombuf[2];
532 532 nv->port_name[3] = idprombuf[3];
533 533 nv->port_name[4] = idprombuf[4];
534 534 nv->port_name[5] = idprombuf[5];
535 535 nv->port_name[6] = idprombuf[6];
536 536 nv->port_name[7] = idprombuf[7];
537 537 nv->port_name[0] = (uint8_t)
538 538 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
539 539 }
540 540
541 541 /* Don't print nvram message if it's an on-board 2200 */
542 542 if (!(CFG_IST(ha, CFG_CTRL_2200)) &&
543 543 (ha->xioctl->fdesc.flash_size == 0)) {
544 544 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using"
545 545 " default HBA parameters and temporary WWPN:"
546 546 " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
547 547 ha->instance, nv->port_name[0], nv->port_name[1],
548 548 nv->port_name[2], nv->port_name[3],
549 549 nv->port_name[4], nv->port_name[5],
550 550 nv->port_name[6], nv->port_name[7]);
551 551 }
552 552
553 553 nv->login_timeout = 4;
554 554
555 555 /* Set default connection options for the 23xx to 2 */
556 556 if (!(CFG_IST(ha, CFG_CTRL_2200))) {
557 557 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
558 558 BIT_5);
559 559 }
560 560
561 561 /*
562 562 * Set default host adapter parameters
563 563 */
564 564 nv->host_p[0] = BIT_1;
565 565 nv->host_p[1] = BIT_2;
566 566 nv->reset_delay = 5;
567 567 nv->port_down_retry_count = 8;
568 568 nv->maximum_luns_per_target[0] = 8;
569 569
570 570 rval = QL_FUNCTION_FAILED;
571 571 }
572 572
573 573 /* Check for adapter node name (big endian). */
574 574 for (cnt = 0; cnt < 8; cnt++) {
575 575 if (nv->node_name[cnt] != 0) {
576 576 break;
577 577 }
578 578 }
579 579
580 580 /* Copy port name if no node name (big endian). */
581 581 if (cnt == 8) {
582 582 bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
583 583 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
584 584 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
585 585 }
586 586
587 587 /* Reset initialization control blocks. */
588 588 bzero((void *)icb, sizeof (ql_init_cb_t));
589 589
590 590 /* Get driver properties. */
591 591 ql_23_properties(ha, nv);
592 592
593 593 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
594 594 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
595 595 QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
596 596 nv->port_name[2], nv->port_name[3], nv->port_name[4],
597 597 nv->port_name[5], nv->port_name[6], nv->port_name[7],
598 598 nv->node_name[0], nv->node_name[1], nv->node_name[2],
599 599 nv->node_name[3], nv->node_name[4], nv->node_name[5],
600 600 nv->node_name[6], nv->node_name[7]);
601 601
602 602 /*
603 603 * Copy over NVRAM RISC parameter block
604 604 * to initialization control block.
605 605 */
606 606 dptr1 = (caddr_t)icb;
607 607 dptr2 = (caddr_t)&nv->parameter_block_version;
608 608 cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] -
609 609 (uintptr_t)&icb->version);
610 610 while (cnt-- != 0) {
611 611 *dptr1++ = *dptr2++;
612 612 }
613 613
614 614 /* Copy 2nd half. */
615 615 dptr1 = (caddr_t)&icb->add_fw_opt[0];
616 616 cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] -
617 617 (uintptr_t)&icb->add_fw_opt[0]);
618 618
619 619 while (cnt-- != 0) {
620 620 *dptr1++ = *dptr2++;
621 621 }
622 622
623 623 /*
624 624 * Setup driver firmware options.
625 625 */
626 626 icb->firmware_options[0] = (uint8_t)
627 627 (icb->firmware_options[0] | BIT_6 | BIT_1);
628 628
629 629 /*
630 630 * There is no use enabling fast post for SBUS or 2300
631 631 * Always enable 64bit addressing, except SBUS cards.
632 632 */
633 633 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
634 634 if (CFG_IST(ha, (CFG_SBUS_CARD | CFG_CTRL_2300 | CFG_CTRL_6322))) {
635 635 icb->firmware_options[0] = (uint8_t)
636 636 (icb->firmware_options[0] & ~BIT_3);
637 637 if (CFG_IST(ha, CFG_SBUS_CARD)) {
638 638 icb->special_options[0] = (uint8_t)
639 639 (icb->special_options[0] | BIT_5);
640 640 ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING;
641 641 }
642 642 } else {
643 643 icb->firmware_options[0] = (uint8_t)
644 644 (icb->firmware_options[0] | BIT_3);
645 645 }
646 646 /* RIO and ZIO not supported. */
647 647 icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
648 648 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
649 649
650 650 icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] |
651 651 BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0);
652 652 icb->firmware_options[0] = (uint8_t)
653 653 (icb->firmware_options[0] & ~(BIT_5 | BIT_4));
654 654 icb->firmware_options[1] = (uint8_t)
655 655 (icb->firmware_options[1] & ~BIT_4);
656 656
657 657 icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4));
658 658 icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1);
659 659
660 660 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
661 661 if ((icb->special_options[1] & 0x20) == 0) {
662 662 EL(ha, "50 ohm is not set\n");
663 663 }
664 664 }
665 665 icb->execution_throttle[0] = 0xff;
666 666 icb->execution_throttle[1] = 0xff;
667 667
668 668 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
669 669 icb->firmware_options[1] = (uint8_t)
670 670 (icb->firmware_options[1] | BIT_7 | BIT_6);
671 671 icb->add_fw_opt[1] = (uint8_t)
672 672 (icb->add_fw_opt[1] | BIT_5 | BIT_4);
673 673 }
674 674
675 675 /*
676 676 * Set host adapter parameters
677 677 */
678 678 ADAPTER_STATE_LOCK(ha);
679 679 ha->nvram_version = nv->nvram_version;
680 680 ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0],
681 681 nv->adapter_features[1]);
682 682
683 683 nv->host_p[0] & BIT_4 ? (ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD) :
684 684 (ha->cfg_flags &= ~CFG_DISABLE_RISC_CODE_LOAD);
685 685 nv->host_p[0] & BIT_5 ? (ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1) :
686 686 (ha->cfg_flags &= ~CFG_SET_CACHE_LINE_SIZE_1);
687 687
688 688 nv->host_p[1] & BIT_1 ? (ha->cfg_flags |= CFG_ENABLE_LIP_RESET) :
689 689 (ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET);
690 690 nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
691 691 (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
692 692 nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
693 693 (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
694 694
695 695 nv->adapter_features[0] & BIT_3 ?
696 696 (ha->cfg_flags |= CFG_MULTI_CHIP_ADAPTER) :
697 697 (ha->cfg_flags &= ~CFG_MULTI_CHIP_ADAPTER);
698 698
699 699 ADAPTER_STATE_UNLOCK(ha);
700 700
701 701 ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
702 702 nv->execution_throttle[1]);
703 703 ha->loop_reset_delay = nv->reset_delay;
704 704 ha->port_down_retry_count = nv->port_down_retry_count;
705 705 ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ?
706 706 R_A_TOV_DEFAULT : icb->login_timeout);
707 707 ha->maximum_luns_per_target = CHAR_TO_SHORT(
708 708 nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]);
709 709 if (ha->maximum_luns_per_target == 0) {
710 710 ha->maximum_luns_per_target++;
711 711 }
712 712
713 713 /*
714 714 * Setup ring parameters in initialization control block
715 715 */
716 716 cnt = REQUEST_ENTRY_CNT;
717 717 icb->request_q_length[0] = LSB(cnt);
718 718 icb->request_q_length[1] = MSB(cnt);
719 719 cnt = RESPONSE_ENTRY_CNT;
720 720 icb->response_q_length[0] = LSB(cnt);
721 721 icb->response_q_length[1] = MSB(cnt);
722 722
723 723 icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
724 724 icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
725 725 icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
726 726 icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
727 727 icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
728 728 icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
729 729 icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
730 730 icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
731 731
732 732 icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
733 733 icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
734 734 icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
735 735 icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
736 736 icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
737 737 icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
738 738 icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
739 739 icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
740 740
741 741 /*
742 742 * Setup IP initialization control block
743 743 */
744 744 ip_icb->version = IP_ICB_VERSION;
745 745
746 746 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
747 747 ip_icb->ip_firmware_options[0] = (uint8_t)
748 748 (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0);
749 749 } else {
750 750 ip_icb->ip_firmware_options[0] = (uint8_t)
751 751 (ip_icb->ip_firmware_options[0] | BIT_2);
752 752 }
753 753
754 754 cnt = RCVBUF_CONTAINER_CNT;
755 755 ip_icb->queue_size[0] = LSB(cnt);
756 756 ip_icb->queue_size[1] = MSB(cnt);
757 757
758 758 ip_icb->queue_address[0] = LSB(LSW(LSD(ha->rcvbuf_dvma)));
759 759 ip_icb->queue_address[1] = MSB(LSW(LSD(ha->rcvbuf_dvma)));
760 760 ip_icb->queue_address[2] = LSB(MSW(LSD(ha->rcvbuf_dvma)));
761 761 ip_icb->queue_address[3] = MSB(MSW(LSD(ha->rcvbuf_dvma)));
762 762 ip_icb->queue_address[4] = LSB(LSW(MSD(ha->rcvbuf_dvma)));
763 763 ip_icb->queue_address[5] = MSB(LSW(MSD(ha->rcvbuf_dvma)));
764 764 ip_icb->queue_address[6] = LSB(MSW(MSD(ha->rcvbuf_dvma)));
765 765 ip_icb->queue_address[7] = MSB(MSW(MSD(ha->rcvbuf_dvma)));
766 766
767 767 if (rval != QL_SUCCESS) {
768 768 EL(ha, "failed, rval = %xh\n", rval);
769 769 } else {
770 770 /*EMPTY*/
771 771 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
772 772 }
773 773 return (rval);
774 774 }
775 775
776 776 /*
777 777 * Get NVRAM data word
778 778 * Calculates word position in NVRAM and calls request routine to
779 779 * get the word from NVRAM.
780 780 *
781 781 * Input:
782 782 * ha = adapter state pointer.
783 783 * address = NVRAM word address.
784 784 *
785 785 * Returns:
786 786 * data word.
787 787 *
788 788 * Context:
789 789 * Kernel context.
790 790 */
791 791 uint16_t
792 792 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address)
793 793 {
794 794 uint32_t nv_cmd;
795 795 uint16_t rval;
796 796
797 797 QL_PRINT_4(CE_CONT, "(%d): started\n", ha->instance);
798 798
799 799 nv_cmd = address << 16;
800 800 nv_cmd = nv_cmd | NV_READ_OP;
801 801
802 802 rval = (uint16_t)ql_nvram_request(ha, nv_cmd);
803 803
804 804 QL_PRINT_4(CE_CONT, "(%d): NVRAM data = %xh\n", ha->instance, rval);
805 805
806 806 return (rval);
807 807 }
808 808
809 809 /*
810 810 * NVRAM request
811 811 * Sends read command to NVRAM and gets data from NVRAM.
812 812 *
813 813 * Input:
814 814 * ha = adapter state pointer.
815 815 * nv_cmd = Bit 26= start bit
816 816 * Bit 25, 24 = opcode
817 817 * Bit 23-16 = address
818 818 * Bit 15-0 = write data
819 819 *
820 820 * Returns:
821 821 * data word.
822 822 *
823 823 * Context:
824 824 * Kernel context.
825 825 */
826 826 static uint16_t
827 827 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd)
828 828 {
829 829 uint8_t cnt;
830 830 uint16_t reg_data;
831 831 uint16_t data = 0;
832 832
833 833 /* Send command to NVRAM. */
834 834
835 835 nv_cmd <<= 5;
836 836 for (cnt = 0; cnt < 11; cnt++) {
837 837 if (nv_cmd & BIT_31) {
838 838 ql_nv_write(ha, NV_DATA_OUT);
839 839 } else {
840 840 ql_nv_write(ha, 0);
841 841 }
842 842 nv_cmd <<= 1;
843 843 }
844 844
845 845 /* Read data from NVRAM. */
846 846
847 847 for (cnt = 0; cnt < 16; cnt++) {
848 848 WRT16_IO_REG(ha, nvram, NV_SELECT+NV_CLOCK);
849 849 ql_nv_delay();
850 850 data <<= 1;
851 851 reg_data = RD16_IO_REG(ha, nvram);
852 852 if (reg_data & NV_DATA_IN) {
853 853 data = (uint16_t)(data | BIT_0);
854 854 }
855 855 WRT16_IO_REG(ha, nvram, NV_SELECT);
856 856 ql_nv_delay();
857 857 }
858 858
859 859 /* Deselect chip. */
860 860
861 861 WRT16_IO_REG(ha, nvram, NV_DESELECT);
862 862 ql_nv_delay();
863 863
864 864 return (data);
865 865 }
866 866
867 867 void
868 868 ql_nv_write(ql_adapter_state_t *ha, uint16_t data)
869 869 {
870 870 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
871 871 ql_nv_delay();
872 872 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK));
873 873 ql_nv_delay();
874 874 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
875 875 ql_nv_delay();
876 876 }
877 877
878 878 void
879 879 ql_nv_delay(void)
880 880 {
881 881 drv_usecwait(NV_DELAY_COUNT);
882 882 }
883 883
884 884 /*
885 885 * ql_nvram_24xx_config
886 886 * ISP2400 nvram.
887 887 *
888 888 * Input:
889 889 * ha: adapter state pointer.
890 890 * ha->hba_buf = request and response rings
891 891 *
892 892 * Output:
893 893 * ha->init_ctrl_blk = initialization control block
894 894 * host adapters parameters in host adapter block
895 895 *
896 896 * Returns:
897 897 * ql local function return status code.
898 898 *
899 899 * Context:
900 900 * Kernel context.
901 901 */
902 902 int
903 903 ql_nvram_24xx_config(ql_adapter_state_t *ha)
904 904 {
905 905 uint32_t index, addr, chksum, saved_chksum;
906 906 uint32_t *longptr;
907 907 nvram_24xx_t nvram;
908 908 int idpromlen;
909 909 char idprombuf[32];
910 910 caddr_t src, dst;
911 911 uint16_t w1;
912 912 int rval;
913 913 nvram_24xx_t *nv = (nvram_24xx_t *)&nvram;
914 914 ql_init_24xx_cb_t *icb =
915 915 (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
916 916 ql_ip_init_24xx_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb24;
917 917
918 918 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
919 919
920 920 if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) {
921 921
922 922 /* Get NVRAM data and calculate checksum. */
923 923 longptr = (uint32_t *)nv;
924 924 chksum = saved_chksum = 0;
925 925 for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) {
926 926 rval = ql_24xx_read_flash(ha, addr++, longptr);
927 927 if (rval != QL_SUCCESS) {
928 928 EL(ha, "24xx_read_flash failed=%xh\n", rval);
929 929 break;
930 930 }
931 931 saved_chksum = chksum;
932 932 chksum += *longptr;
933 933 LITTLE_ENDIAN_32(longptr);
934 934 longptr++;
935 935 }
936 936
937 937 ql_release_nvram(ha);
938 938 }
939 939
940 940 /* Bad NVRAM data, set defaults parameters. */
941 941 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
942 942 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
943 943 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
944 944
945 945 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using "
946 946 "driver defaults.", QL_NAME, ha->instance);
947 947
948 948 EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, "
949 949 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
950 950 nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0],
951 951 nv->nvram_version[1]));
952 952
953 953 saved_chksum = ~saved_chksum + 1;
954 954
955 955 (void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0,
956 956 MSW(saved_chksum), LSW(saved_chksum));
957 957
958 958 /* Reset NVRAM data. */
959 959 bzero((void *)nv, sizeof (nvram_24xx_t));
960 960
961 961 /*
962 962 * Set default initialization control block.
963 963 */
964 964 nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
965 965 nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
966 966
967 967 nv->version[0] = 1;
968 968 nv->max_frame_length[1] = 8;
969 969 nv->execution_throttle[0] = 16;
970 970 nv->exchange_count[0] = 128;
971 971 nv->max_luns_per_target[0] = 8;
972 972
973 973 idpromlen = 32;
974 974
975 975 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
976 976 if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
977 977 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
978 978 &idpromlen) != DDI_PROP_SUCCESS) {
979 979
980 980 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
981 981 "property, rval=%x", QL_NAME, ha->instance, rval);
982 982
983 983 nv->port_name[0] = 33;
984 984 nv->port_name[3] = 224;
985 985 nv->port_name[4] = 139;
986 986 nv->port_name[7] = (uint8_t)
987 987 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
988 988 } else {
989 989 nv->port_name[2] = idprombuf[2];
990 990 nv->port_name[3] = idprombuf[3];
991 991 nv->port_name[4] = idprombuf[4];
992 992 nv->port_name[5] = idprombuf[5];
993 993 nv->port_name[6] = idprombuf[6];
994 994 nv->port_name[7] = idprombuf[7];
995 995 nv->port_name[0] = (uint8_t)
996 996 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
997 997 }
998 998
999 999 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default "
1000 1000 "HBA parameters and temporary "
1001 1001 "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
1002 1002 ha->instance, nv->port_name[0], nv->port_name[1],
1003 1003 nv->port_name[2], nv->port_name[3], nv->port_name[4],
1004 1004 nv->port_name[5], nv->port_name[6], nv->port_name[7]);
1005 1005
1006 1006 nv->login_retry_count[0] = 8;
1007 1007
1008 1008 nv->firmware_options_1[0] = BIT_2 | BIT_1;
1009 1009 nv->firmware_options_1[1] = BIT_5;
1010 1010 nv->firmware_options_2[0] = BIT_5;
1011 1011 nv->firmware_options_2[1] = BIT_4;
1012 1012 nv->firmware_options_3[1] = BIT_6;
1013 1013
1014 1014 /*
1015 1015 * Set default host adapter parameters
1016 1016 */
1017 1017 nv->host_p[0] = BIT_4 | BIT_1;
1018 1018 nv->host_p[1] = BIT_3 | BIT_2;
1019 1019 nv->reset_delay = 5;
1020 1020 nv->max_luns_per_target[0] = 128;
1021 1021 nv->port_down_retry_count[0] = 30;
1022 1022 nv->link_down_timeout[0] = 30;
1023 1023
1024 1024 if (CFG_IST(ha, CFG_CTRL_8081)) {
1025 1025 nv->firmware_options_3[2] = BIT_4;
1026 1026 nv->feature_mask_l[0] = 9;
1027 1027 nv->ext_blk.version[0] = 1;
1028 1028 nv->ext_blk.fcf_vlan_match = 1;
1029 1029 nv->ext_blk.fcf_vlan_id[0] = LSB(1002);
1030 1030 nv->ext_blk.fcf_vlan_id[1] = MSB(1002);
1031 1031 nv->fw.isp8001.e_node_mac_addr[1] = 2;
1032 1032 nv->fw.isp8001.e_node_mac_addr[2] = 3;
1033 1033 nv->fw.isp8001.e_node_mac_addr[3] = 4;
1034 1034 nv->fw.isp8001.e_node_mac_addr[4] = MSB(ha->instance);
1035 1035 nv->fw.isp8001.e_node_mac_addr[5] = LSB(ha->instance);
1036 1036 }
1037 1037
1038 1038 rval = QL_FUNCTION_FAILED;
1039 1039 }
1040 1040
1041 1041 /* Check for adapter node name (big endian). */
1042 1042 for (index = 0; index < 8; index++) {
1043 1043 if (nv->node_name[index] != 0) {
1044 1044 break;
1045 1045 }
1046 1046 }
1047 1047
1048 1048 /* Copy port name if no node name (big endian). */
1049 1049 if (index == 8) {
1050 1050 bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
1051 1051 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
1052 1052 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
1053 1053 }
1054 1054
1055 1055 /* Reset initialization control blocks. */
1056 1056 bzero((void *)icb, sizeof (ql_init_24xx_cb_t));
1057 1057
1058 1058 /* Get driver properties. */
1059 1059 ql_24xx_properties(ha, nv);
1060 1060
1061 1061 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
1062 1062 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1063 1063 QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
1064 1064 nv->port_name[2], nv->port_name[3], nv->port_name[4],
1065 1065 nv->port_name[5], nv->port_name[6], nv->port_name[7],
1066 1066 nv->node_name[0], nv->node_name[1], nv->node_name[2],
1067 1067 nv->node_name[3], nv->node_name[4], nv->node_name[5],
1068 1068 nv->node_name[6], nv->node_name[7]);
1069 1069
1070 1070 /*
1071 1071 * Copy over NVRAM Firmware Initialization Control Block.
1072 1072 */
1073 1073 dst = (caddr_t)icb;
1074 1074 src = (caddr_t)&nv->version;
1075 1075 index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] -
1076 1076 (uintptr_t)icb);
1077 1077 while (index--) {
1078 1078 *dst++ = *src++;
1079 1079 }
1080 1080 icb->login_retry_count[0] = nv->login_retry_count[0];
1081 1081 icb->login_retry_count[1] = nv->login_retry_count[1];
1082 1082 icb->link_down_on_nos[0] = nv->link_down_on_nos[0];
1083 1083 icb->link_down_on_nos[1] = nv->link_down_on_nos[1];
1084 1084
1085 1085 dst = (caddr_t)&icb->interrupt_delay_timer;
1086 1086 src = (caddr_t)&nv->interrupt_delay_timer;
1087 1087 index = (uint32_t)((uintptr_t)&icb->qos -
1088 1088 (uintptr_t)&icb->interrupt_delay_timer);
1089 1089 while (index--) {
1090 1090 *dst++ = *src++;
1091 1091 }
1092 1092
1093 1093 /*
1094 1094 * Setup driver firmware options.
1095 1095 */
1096 1096 if (CFG_IST(ha, CFG_CTRL_8081)) {
1097 1097 dst = (caddr_t)icb->enode_mac_addr;
1098 1098 src = (caddr_t)nv->fw.isp8001.e_node_mac_addr;
1099 1099 index = sizeof (nv->fw.isp8001.e_node_mac_addr);
1100 1100 while (index--) {
1101 1101 *dst++ = *src++;
1102 1102 }
1103 1103 dst = (caddr_t)&icb->ext_blk;
1104 1104 src = (caddr_t)&nv->ext_blk;
1105 1105 index = sizeof (ql_ext_icb_8100_t);
1106 1106 while (index--) {
1107 1107 *dst++ = *src++;
1108 1108 }
1109 1109 EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n",
1110 1110 icb->enode_mac_addr[0], icb->enode_mac_addr[1],
1111 1111 icb->enode_mac_addr[2], icb->enode_mac_addr[3],
1112 1112 icb->enode_mac_addr[4], icb->enode_mac_addr[5]);
1113 1113 } else {
1114 1114 icb->firmware_options_1[0] = (uint8_t)
1115 1115 (icb->firmware_options_1[0] | BIT_1);
1116 1116 icb->firmware_options_1[1] = (uint8_t)
1117 1117 (icb->firmware_options_1[1] | BIT_5 | BIT_2);
1118 1118 icb->firmware_options_3[0] = (uint8_t)
1119 1119 (icb->firmware_options_3[0] | BIT_1);
1120 1120 }
1121 1121 icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] &
1122 1122 ~(BIT_5 | BIT_4));
1123 1123 icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] |
1124 1124 BIT_6);
1125 1125 icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] &
1126 1126 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
1127 1127 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
1128 1128 icb->firmware_options_2[1] = (uint8_t)
1129 1129 (icb->firmware_options_2[1] | BIT_4);
1130 1130 } else {
1131 1131 icb->firmware_options_2[1] = (uint8_t)
1132 1132 (icb->firmware_options_2[1] & ~BIT_4);
1133 1133 }
1134 1134
1135 1135 icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] &
1136 1136 ~BIT_7);
1137 1137
1138 1138 /* enable special N port 2 N port login behaviour */
1139 1139 if (CFG_IST(ha, CFG_CTRL_2425)) {
1140 1140 icb->firmware_options_3[1] =
1141 1141 (uint8_t)(icb->firmware_options_3[1] | BIT_0);
1142 1142 }
1143 1143
1144 1144 icb->execution_throttle[0] = 0xff;
1145 1145 icb->execution_throttle[1] = 0xff;
1146 1146
1147 1147 /*
1148 1148 * Set host adapter parameters
1149 1149 */
1150 1150 ADAPTER_STATE_LOCK(ha);
1151 1151 ha->nvram_version = CHAR_TO_SHORT(nv->nvram_version[0],
1152 1152 nv->nvram_version[1]);
1153 1153 nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
1154 1154 (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
1155 1155 nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
1156 1156 (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
1157 1157 ha->cfg_flags &= ~(CFG_DISABLE_RISC_CODE_LOAD | CFG_LR_SUPPORT |
1158 1158 CFG_SET_CACHE_LINE_SIZE_1 | CFG_MULTI_CHIP_ADAPTER);
1159 1159 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
1160 1160 if (CFG_IST(ha, CFG_CTRL_81XX) && nv->enhanced_features[0] & BIT_0) {
1161 1161 ha->cfg_flags |= CFG_LR_SUPPORT;
1162 1162 }
1163 1163 ADAPTER_STATE_UNLOCK(ha);
1164 1164
1165 1165 ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
1166 1166 nv->execution_throttle[1]);
1167 1167 ha->loop_reset_delay = nv->reset_delay;
1168 1168 ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0],
1169 1169 nv->port_down_retry_count[1]);
1170 1170 w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]);
1171 1171 ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1);
1172 1172 ha->maximum_luns_per_target = CHAR_TO_SHORT(
1173 1173 nv->max_luns_per_target[0], nv->max_luns_per_target[1]);
1174 1174 if (ha->maximum_luns_per_target == 0) {
1175 1175 ha->maximum_luns_per_target++;
1176 1176 }
1177 1177
1178 1178 /* ISP2422 Serial Link Control */
1179 1179 if (CFG_IST(ha, CFG_CTRL_2422)) {
1180 1180 ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0],
1181 1181 nv->fw.isp2400.swing_opt[1]);
1182 1182 ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0],
1183 1183 nv->fw.isp2400.swing_1g[1]);
1184 1184 ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0],
1185 1185 nv->fw.isp2400.swing_2g[1]);
1186 1186 ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0],
1187 1187 nv->fw.isp2400.swing_4g[1]);
1188 1188 }
1189 1189
1190 1190 /*
1191 1191 * Setup ring parameters in initialization control block
1192 1192 */
1193 1193 w1 = REQUEST_ENTRY_CNT;
1194 1194 icb->request_q_length[0] = LSB(w1);
1195 1195 icb->request_q_length[1] = MSB(w1);
1196 1196 w1 = RESPONSE_ENTRY_CNT;
1197 1197 icb->response_q_length[0] = LSB(w1);
1198 1198 icb->response_q_length[1] = MSB(w1);
1199 1199
1200 1200 icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
1201 1201 icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
1202 1202 icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
1203 1203 icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
1204 1204 icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
1205 1205 icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
1206 1206 icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
1207 1207 icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
1208 1208
1209 1209 icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
1210 1210 icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
1211 1211 icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
1212 1212 icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
1213 1213 icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
1214 1214 icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
1215 1215 icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
1216 1216 icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
1217 1217
1218 1218 /*
1219 1219 * Setup IP initialization control block
1220 1220 */
1221 1221 ip_icb->version = IP_ICB_24XX_VERSION;
1222 1222
1223 1223 ip_icb->ip_firmware_options[0] = (uint8_t)
1224 1224 (ip_icb->ip_firmware_options[0] | BIT_2);
1225 1225
1226 1226 if (rval != QL_SUCCESS) {
1227 1227 EL(ha, "failed, rval = %xh\n", rval);
1228 1228 } else {
1229 1229 /*EMPTY*/
1230 1230 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1231 1231 }
1232 1232 return (rval);
1233 1233 }
1234 1234
1235 1235 /*
1236 1236 * ql_lock_nvram
1237 1237 * Locks NVRAM access and returns starting address of NVRAM.
1238 1238 *
1239 1239 * Input:
1240 1240 * ha: adapter state pointer.
1241 1241 * addr: pointer for start address.
1242 1242 * flags: Are mutually exclusive:
1243 1243 * LNF_NVRAM_DATA --> get nvram
1244 1244 * LNF_VPD_DATA --> get vpd data (24/25xx only).
1245 1245 *
1246 1246 * Returns:
1247 1247 * ql local function return status code.
1248 1248 *
1249 1249 * Context:
1250 1250 * Kernel context.
1251 1251 */
1252 1252 int
1253 1253 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags)
1254 1254 {
1255 1255 int i;
1256 1256
1257 1257 if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) {
1258 1258 EL(ha, "invalid options for function");
1259 1259 return (QL_FUNCTION_FAILED);
1260 1260 }
1261 1261
1262 1262 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1263 1263 if ((flags & LNF_NVRAM_DATA) == 0) {
1264 1264 EL(ha, "invalid 2312/2322 option for HBA");
1265 1265 return (QL_FUNCTION_FAILED);
1266 1266 }
1267 1267
1268 1268 /* if function number is non-zero, then adjust offset */
1269 1269 *addr = ha->flash_nvram_addr;
1270 1270
1271 1271 /* Try to get resource lock. Wait for 10 seconds max */
1272 1272 for (i = 0; i < 10000; i++) {
1273 1273 /* if nvram busy bit is reset, acquire sema */
1274 1274 if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) {
1275 1275 WRT16_IO_REG(ha, host_to_host_sema, 1);
1276 1276 drv_usecwait(MILLISEC);
1277 1277 if (RD16_IO_REG(ha, host_to_host_sema) & 1) {
1278 1278 break;
1279 1279 }
1280 1280 }
1281 1281 drv_usecwait(MILLISEC);
1282 1282 }
1283 1283 if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) {
1284 1284 cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock",
1285 1285 QL_NAME, ha->instance);
1286 1286 return (QL_FUNCTION_FAILED);
1287 1287 }
1288 1288 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
1289 1289 if (flags & LNF_VPD_DATA) {
1290 1290 *addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr;
1291 1291 } else if (flags & LNF_NVRAM_DATA) {
1292 1292 *addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr;
1293 1293 } else {
1294 1294 EL(ha, "invalid 2422 option for HBA");
1295 1295 return (QL_FUNCTION_FAILED);
1296 1296 }
1297 1297
1298 1298 GLOBAL_HW_LOCK();
1299 1299 } else if (CFG_IST(ha, CFG_CTRL_258081)) {
1300 1300 if (flags & LNF_VPD_DATA) {
1301 1301 *addr = ha->flash_data_addr | ha->flash_vpd_addr;
1302 1302 } else if (flags & LNF_NVRAM_DATA) {
1303 1303 *addr = ha->flash_data_addr | ha->flash_nvram_addr;
1304 1304 } else {
1305 1305 EL(ha, "invalid 2581 option for HBA");
1306 1306 return (QL_FUNCTION_FAILED);
1307 1307 }
1308 1308
1309 1309 GLOBAL_HW_LOCK();
1310 1310 } else {
1311 1311 if ((flags & LNF_NVRAM_DATA) == 0) {
1312 1312 EL(ha, "invalid option for HBA");
1313 1313 return (QL_FUNCTION_FAILED);
1314 1314 }
1315 1315 *addr = 0;
1316 1316 GLOBAL_HW_LOCK();
1317 1317 }
1318 1318
1319 1319 return (QL_SUCCESS);
1320 1320 }
1321 1321
1322 1322 /*
1323 1323 * ql_release_nvram
1324 1324 * Releases NVRAM access.
1325 1325 *
1326 1326 * Input:
1327 1327 * ha: adapter state pointer.
1328 1328 *
1329 1329 * Context:
1330 1330 * Kernel context.
1331 1331 */
1332 1332 void
1333 1333 ql_release_nvram(ql_adapter_state_t *ha)
1334 1334 {
1335 1335 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1336 1336 /* Release resource lock */
1337 1337 WRT16_IO_REG(ha, host_to_host_sema, 0);
1338 1338 } else {
1339 1339 GLOBAL_HW_UNLOCK();
1340 1340 }
1341 1341 }
1342 1342
1343 1343 /*
1344 1344 * ql_23_properties
1345 1345 * Copies driver properties to NVRAM or adapter structure.
1346 1346 *
1347 1347 * Driver properties are by design global variables and hidden
1348 1348 * completely from administrators. Knowledgeable folks can
1349 1349 * override the default values using driver.conf
1350 1350 *
1351 1351 * Input:
1352 1352 * ha: adapter state pointer.
1353 1353 * nv: NVRAM structure pointer.
1354 1354 *
1355 1355 * Context:
1356 1356 * Kernel context.
1357 1357 */
1358 1358 static void
1359 1359 ql_23_properties(ql_adapter_state_t *ha, nvram_t *nv)
1360 1360 {
1361 1361 uint32_t data, cnt;
1362 1362
1363 1363 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1364 1364
1365 1365 /* Get frame payload size. */
1366 1366 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1367 1367 data = 2048;
1368 1368 }
1369 1369 if (data == 512 || data == 1024 || data == 2048) {
1370 1370 nv->max_frame_length[0] = LSB(data);
1371 1371 nv->max_frame_length[1] = MSB(data);
1372 1372 } else {
1373 1373 EL(ha, "invalid parameter value for 'max-frame-length': "
1374 1374 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1375 1375 nv->max_frame_length[0], nv->max_frame_length[1]));
1376 1376 }
1377 1377
1378 1378 /* Get max IOCB allocation. */
1379 1379 nv->max_iocb_allocation[0] = 0;
1380 1380 nv->max_iocb_allocation[1] = 1;
1381 1381
1382 1382 /* Get execution throttle. */
1383 1383 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1384 1384 data = 32;
1385 1385 }
1386 1386 if (data != 0 && data < 65536) {
1387 1387 nv->execution_throttle[0] = LSB(data);
1388 1388 nv->execution_throttle[1] = MSB(data);
1389 1389 } else {
1390 1390 EL(ha, "invalid parameter value for 'execution-throttle': "
1391 1391 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1392 1392 nv->execution_throttle[0], nv->execution_throttle[1]));
1393 1393 }
1394 1394
1395 1395 /* Get Login timeout. */
1396 1396 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1397 1397 data = 3;
1398 1398 }
1399 1399 if (data < 256) {
1400 1400 nv->login_timeout = (uint8_t)data;
1401 1401 } else {
1402 1402 EL(ha, "invalid parameter value for 'login-timeout': "
1403 1403 "%d; using nvram value of %d\n", data, nv->login_timeout);
1404 1404 }
1405 1405
1406 1406 /* Get retry count. */
1407 1407 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1408 1408 data = 4;
1409 1409 }
1410 1410 if (data < 256) {
1411 1411 nv->login_retry_count = (uint8_t)data;
1412 1412 } else {
1413 1413 EL(ha, "invalid parameter value for 'login-retry-count': "
1414 1414 "%d; using nvram value of %d\n", data,
1415 1415 nv->login_retry_count);
1416 1416 }
1417 1417
1418 1418 /* Get adapter hard loop ID enable. */
1419 1419 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1420 1420 if (data == 0) {
1421 1421 nv->firmware_options[0] =
1422 1422 (uint8_t)(nv->firmware_options[0] & ~BIT_0);
1423 1423 } else if (data == 1) {
1424 1424 nv->firmware_options[0] =
1425 1425 (uint8_t)(nv->firmware_options[0] | BIT_0);
1426 1426 } else if (data != 0xffffffff) {
1427 1427 EL(ha, "invalid parameter value for "
1428 1428 "'enable-adapter-hard-loop-ID': %d; using nvram value "
1429 1429 "of %d\n", data, nv->firmware_options[0] & BIT_0 ? 1 : 0);
1430 1430 }
1431 1431
1432 1432 /* Get adapter hard loop ID. */
1433 1433 data = ql_get_prop(ha, "adapter-hard-loop-ID");
1434 1434 if (data < 126) {
1435 1435 nv->hard_address[0] = (uint8_t)data;
1436 1436 } else if (data != 0xffffffff) {
1437 1437 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': "
1438 1438 "%d; using nvram value of %d\n",
1439 1439 data, nv->hard_address[0]);
1440 1440 }
1441 1441
1442 1442 /* Get LIP reset. */
1443 1443 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1444 1444 0xffffffff) {
1445 1445 data = 0;
1446 1446 }
1447 1447 if (data == 0) {
1448 1448 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_1);
1449 1449 } else if (data == 1) {
1450 1450 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_1);
1451 1451 } else {
1452 1452 EL(ha, "invalid parameter value for "
1453 1453 "'enable-LIP-reset-on-bus-reset': %d; using nvram value "
1454 1454 "of %d\n", data, nv->host_p[1] & BIT_1 ? 1 : 0);
1455 1455 }
1456 1456
1457 1457 /* Get LIP full login. */
1458 1458 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1459 1459 0xffffffff) {
1460 1460 data = 1;
1461 1461 }
1462 1462 if (data == 0) {
1463 1463 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1464 1464 } else if (data == 1) {
1465 1465 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1466 1466 } else {
1467 1467 EL(ha, "invalid parameter value for "
1468 1468 "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1469 1469 "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1470 1470 }
1471 1471
1472 1472 /* Get target reset. */
1473 1473 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1474 1474 0xffffffff) {
1475 1475 data = 0;
1476 1476 }
1477 1477 if (data == 0) {
1478 1478 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1479 1479 } else if (data == 1) {
1480 1480 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1481 1481 } else {
1482 1482 EL(ha, "invalid parameter value for "
1483 1483 "'enable-target-reset-on-bus-reset': %d; using nvram "
1484 1484 "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1485 1485 }
1486 1486
1487 1487 /* Get reset delay. */
1488 1488 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1489 1489 data = 5;
1490 1490 }
1491 1491 if (data != 0 && data < 256) {
1492 1492 nv->reset_delay = (uint8_t)data;
1493 1493 } else {
1494 1494 EL(ha, "invalid parameter value for 'reset-delay': %d; "
1495 1495 "using nvram value of %d", data, nv->reset_delay);
1496 1496 }
1497 1497
1498 1498 /* Get port down retry count. */
1499 1499 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1500 1500 data = 8;
1501 1501 }
1502 1502 if (data < 256) {
1503 1503 nv->port_down_retry_count = (uint8_t)data;
1504 1504 } else {
1505 1505 EL(ha, "invalid parameter value for 'port-down-retry-count':"
1506 1506 " %d; using nvram value of %d\n", data,
1507 1507 nv->port_down_retry_count);
1508 1508 }
1509 1509
1510 1510 /* Get connection mode setting. */
1511 1511 if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1512 1512 data = 2;
1513 1513 }
1514 1514 cnt = CFG_IST(ha, CFG_CTRL_2200) ? 3 : 2;
1515 1515 if (data <= cnt) {
1516 1516 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] &
1517 1517 ~(BIT_6 | BIT_5 | BIT_4));
1518 1518 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
1519 1519 (uint8_t)(data << 4));
1520 1520 } else {
1521 1521 EL(ha, "invalid parameter value for 'connection-options': "
1522 1522 "%d; using nvram value of %d\n", data,
1523 1523 (nv->add_fw_opt[0] >> 4) & 0x3);
1524 1524 }
1525 1525
1526 1526 /* Get data rate setting. */
1527 1527 if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
1528 1528 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1529 1529 data = 2;
1530 1530 }
1531 1531 if (data < 3) {
1532 1532 nv->special_options[1] = (uint8_t)
1533 1533 (nv->special_options[1] & 0x3f);
1534 1534 nv->special_options[1] = (uint8_t)
1535 1535 (nv->special_options[1] | (uint8_t)(data << 6));
1536 1536 } else {
1537 1537 EL(ha, "invalid parameter value for 'fc-data-rate': "
1538 1538 "%d; using nvram value of %d\n", data,
1539 1539 (nv->special_options[1] >> 6) & 0x3);
1540 1540 }
1541 1541 }
1542 1542
1543 1543 /* Get adapter id string for Sun branded 23xx only */
1544 1544 if ((CFG_IST(ha, CFG_CTRL_2300)) && nv->adapInfo[0] != 0) {
1545 1545 (void) snprintf((int8_t *)ha->adapInfo, 16, "%s",
1546 1546 nv->adapInfo);
1547 1547 }
1548 1548
1549 1549 /* Get IP FW container count. */
1550 1550 ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count);
1551 1551 ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count);
1552 1552
1553 1553 /* Get IP low water mark. */
1554 1554 ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water);
1555 1555 ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water);
1556 1556
1557 1557 /* Get IP fast register post count. */
1558 1558 ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] =
1559 1559 ql_ip_fast_post_count;
1560 1560
1561 1561 ADAPTER_STATE_LOCK(ha);
1562 1562
1563 1563 ql_common_properties(ha);
1564 1564
1565 1565 ADAPTER_STATE_UNLOCK(ha);
1566 1566
1567 1567 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1568 1568 }
1569 1569
1570 1570 /*
1571 1571 * ql_common_properties
1572 1572 * Driver properties adapter structure.
1573 1573 *
1574 1574 * Driver properties are by design global variables and hidden
1575 1575 * completely from administrators. Knowledgeable folks can
1576 1576 * override the default values using driver.conf
1577 1577 *
1578 1578 * Input:
1579 1579 * ha: adapter state pointer.
1580 1580 *
1581 1581 * Context:
1582 1582 * Kernel context.
1583 1583 */
1584 1584 void
1585 1585 ql_common_properties(ql_adapter_state_t *ha)
1586 1586 {
1587 1587 uint32_t data;
1588 1588
1589 1589 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1590 1590
1591 1591 /* Get extended logging trace buffer size. */
1592 1592 if ((data = ql_get_prop(ha, "set-ext-log-buffer-size")) !=
1593 1593 0xffffffff && data != 0) {
1594 1594 char *new_trace;
1595 1595 uint32_t new_size;
1596 1596
1597 1597 if (ha->el_trace_desc->trace_buffer != NULL) {
1598 1598 new_size = 1024 * data;
1599 1599 new_trace = (char *)kmem_zalloc(new_size, KM_SLEEP);
1600 1600
1601 1601 if (new_trace == NULL) {
1602 1602 cmn_err(CE_WARN, "%s(%d): can't get new"
1603 1603 " trace buffer",
1604 1604 QL_NAME, ha->instance);
1605 1605 } else {
1606 1606 /* free the previous */
1607 1607 kmem_free(ha->el_trace_desc->trace_buffer,
1608 1608 ha->el_trace_desc->trace_buffer_size);
1609 1609 /* Use the new one */
1610 1610 ha->el_trace_desc->trace_buffer = new_trace;
1611 1611 ha->el_trace_desc->trace_buffer_size = new_size;
1612 1612 }
1613 1613 }
1614 1614
1615 1615 }
1616 1616
1617 1617 /* Get extended logging enable. */
1618 1618 if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff ||
1619 1619 data == 0) {
1620 1620 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1621 1621 } else if (data == 1) {
1622 1622 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1623 1623 } else {
1624 1624 EL(ha, "invalid parameter value for 'extended-logging': %d;"
1625 1625 " using default value of 0\n", data);
1626 1626 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1627 1627 }
1628 1628
1629 1629 /* Get extended logging trace disable. */
1630 1630 if ((data = ql_get_prop(ha, "disable-extended-logging-trace")) ==
1631 1631 0xffffffff || data == 0) {
1632 1632 ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1633 1633 } else if (data == 1) {
1634 1634 ha->cfg_flags |= CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1635 1635 } else {
1636 1636 EL(ha, "invalid parameter value for "
1637 1637 "'disable-extended-logging-trace': %d;"
1638 1638 " using default value of 0\n", data);
1639 1639 ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1640 1640 }
1641 1641
1642 1642 /* Get FCP 2 Error Recovery. */
1643 1643 if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) ==
1644 1644 0xffffffff || data == 1) {
1645 1645 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1646 1646 } else if (data == 0) {
1647 1647 ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT;
1648 1648 } else {
1649 1649 EL(ha, "invalid parameter value for "
1650 1650 "'enable-FCP-2-error-recovery': %d; using nvram value of "
1651 1651 "1\n", data);
1652 1652 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1653 1653 }
1654 1654
1655 1655 #ifdef QL_DEBUG_LEVEL_2
1656 1656 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1657 1657 #endif
1658 1658
1659 1659 /* Get port down retry delay. */
1660 1660 if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) {
1661 1661 ha->port_down_retry_delay = PORT_RETRY_TIME;
1662 1662 } else if (data < 256) {
1663 1663 ha->port_down_retry_delay = (uint8_t)data;
1664 1664 } else {
1665 1665 EL(ha, "invalid parameter value for 'port-down-retry-delay':"
1666 1666 " %d; using default value of %d", data, PORT_RETRY_TIME);
1667 1667 ha->port_down_retry_delay = PORT_RETRY_TIME;
1668 1668 }
1669 1669
1670 1670 /* Get queue full retry count. */
1671 1671 if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) {
1672 1672 ha->qfull_retry_count = 16;
1673 1673 } else if (data < 256) {
1674 1674 ha->qfull_retry_count = (uint8_t)data;
1675 1675 } else {
1676 1676 EL(ha, "invalid parameter value for 'queue-full-retry-count':"
1677 1677 " %d; using default value of 16", data);
1678 1678 ha->qfull_retry_count = 16;
1679 1679 }
1680 1680
1681 1681 /* Get queue full retry delay. */
1682 1682 if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) {
1683 1683 ha->qfull_retry_delay = PORT_RETRY_TIME;
1684 1684 } else if (data < 256) {
1685 1685 ha->qfull_retry_delay = (uint8_t)data;
1686 1686 } else {
1687 1687 EL(ha, "invalid parameter value for 'queue-full-retry-delay':"
1688 1688 " %d; using default value of %d", data, PORT_RETRY_TIME);
1689 1689 ha->qfull_retry_delay = PORT_RETRY_TIME;
1690 1690 }
1691 1691
1692 1692 /* Get loop down timeout. */
1693 1693 if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) {
1694 1694 data = 0;
1695 1695 } else if (data > 255) {
1696 1696 EL(ha, "invalid parameter value for 'link-down-timeout': %d;"
1697 1697 " using nvram value of 0\n", data);
1698 1698 data = 0;
1699 1699 }
1700 1700 ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data);
1701 1701 if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) {
1702 1702 ha->loop_down_abort_time--;
1703 1703 } else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) {
1704 1704 ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1;
1705 1705 }
1706 1706
1707 1707 /* Get link down error enable. */
1708 1708 if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff ||
1709 1709 data == 1) {
1710 1710 ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING;
1711 1711 } else if (data == 0) {
1712 1712 ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING;
1713 1713 } else {
1714 1714 EL(ha, "invalid parameter value for 'link-down-error': %d;"
1715 1715 " using default value of 1\n", data);
1716 1716 }
1717 1717
1718 1718 /*
1719 1719 * Get firmware dump flags.
1720 1720 * TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT BIT_0
1721 1721 * TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR BIT_1
1722 1722 * TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT BIT_2
1723 1723 * TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT BIT_3
1724 1724 */
1725 1725 ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT |
1726 1726 CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT |
1727 1727 CFG_DUMP_LOOP_OFFLINE_TIMEOUT);
1728 1728 if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) {
1729 1729 if (data & BIT_0) {
1730 1730 ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT;
1731 1731 }
1732 1732 if (data & BIT_1) {
1733 1733 ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR;
1734 1734 }
1735 1735 if (data & BIT_2) {
1736 1736 ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT;
1737 1737 }
1738 1738 if (data & BIT_3) {
1739 1739 ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT;
1740 1740 }
1741 1741 }
1742 1742
1743 1743 /* Get the PCI max read request size override. */
1744 1744 ha->pci_max_read_req = 0;
1745 1745 if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff &&
1746 1746 data != 0) {
1747 1747 ha->pci_max_read_req = (uint16_t)(data);
1748 1748 }
1749 1749
1750 1750 /*
1751 1751 * Set default fw wait, adjusted for slow FCF's.
1752 1752 * Revisit when FCF's as fast as FC switches.
1753 1753 */
1754 1754 ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_CTRL_8081) ? 45 : 10);
1755 1755 /* Get the attach fw_ready override value. */
1756 1756 if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) {
1757 1757 if (data > 0 && data <= 240) {
1758 1758 ha->fwwait = (uint8_t)data;
1759 1759 } else {
1760 1760 EL(ha, "invalid parameter value for "
1761 1761 "'init-loop-sync-wait': %d; using default "
1762 1762 "value of %d\n", data, ha->fwwait);
1763 1763 }
1764 1764 }
1765 1765
1766 1766 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1767 1767 }
1768 1768
1769 1769 /*
1770 1770 * ql_24xx_properties
1771 1771 * Copies driver properties to NVRAM or adapter structure.
1772 1772 *
1773 1773 * Driver properties are by design global variables and hidden
1774 1774 * completely from administrators. Knowledgeable folks can
1775 1775 * override the default values using /etc/system.
1776 1776 *
1777 1777 * Input:
1778 1778 * ha: adapter state pointer.
1779 1779 * nv: NVRAM structure pointer.
1780 1780 *
1781 1781 * Context:
1782 1782 * Kernel context.
1783 1783 */
1784 1784 static void
1785 1785 ql_24xx_properties(ql_adapter_state_t *ha, nvram_24xx_t *nv)
1786 1786 {
1787 1787 uint32_t data;
1788 1788
1789 1789 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1790 1790
1791 1791 /* Get frame size */
1792 1792 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1793 1793 data = 2048;
1794 1794 }
1795 1795 if (data == 512 || data == 1024 || data == 2048 || data == 2112) {
1796 1796 nv->max_frame_length[0] = LSB(data);
1797 1797 nv->max_frame_length[1] = MSB(data);
1798 1798 } else {
1799 1799 EL(ha, "invalid parameter value for 'max-frame-length': %d;"
1800 1800 " using nvram default of %d\n", data, CHAR_TO_SHORT(
1801 1801 nv->max_frame_length[0], nv->max_frame_length[1]));
1802 1802 }
1803 1803
1804 1804 /* Get execution throttle. */
1805 1805 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1806 1806 data = 32;
1807 1807 }
1808 1808 if (data != 0 && data < 65536) {
1809 1809 nv->execution_throttle[0] = LSB(data);
1810 1810 nv->execution_throttle[1] = MSB(data);
1811 1811 } else {
1812 1812 EL(ha, "invalid parameter value for 'execution-throttle':"
1813 1813 " %d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1814 1814 nv->execution_throttle[0], nv->execution_throttle[1]));
1815 1815 }
1816 1816
1817 1817 /* Get Login timeout. */
1818 1818 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1819 1819 data = 3;
1820 1820 }
1821 1821 if (data < 65536) {
1822 1822 nv->login_timeout[0] = LSB(data);
1823 1823 nv->login_timeout[1] = MSB(data);
1824 1824 } else {
1825 1825 EL(ha, "invalid parameter value for 'login-timeout': %d; "
1826 1826 "using nvram value of %d\n", data, CHAR_TO_SHORT(
1827 1827 nv->login_timeout[0], nv->login_timeout[1]));
1828 1828 }
1829 1829
1830 1830 /* Get retry count. */
1831 1831 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1832 1832 data = 4;
1833 1833 }
1834 1834 if (data < 65536) {
1835 1835 nv->login_retry_count[0] = LSB(data);
1836 1836 nv->login_retry_count[1] = MSB(data);
1837 1837 } else {
1838 1838 EL(ha, "invalid parameter value for 'login-retry-count': "
1839 1839 "%d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1840 1840 nv->login_retry_count[0], nv->login_retry_count[1]));
1841 1841 }
1842 1842
1843 1843 /* Get adapter hard loop ID enable. */
1844 1844 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1845 1845 if (data == 0) {
1846 1846 nv->firmware_options_1[0] =
1847 1847 (uint8_t)(nv->firmware_options_1[0] & ~BIT_0);
1848 1848 } else if (data == 1) {
1849 1849 nv->firmware_options_1[0] =
1850 1850 (uint8_t)(nv->firmware_options_1[0] | BIT_0);
1851 1851 } else if (data != 0xffffffff) {
1852 1852 EL(ha, "invalid parameter value for "
1853 1853 "'enable-adapter-hard-loop-ID': %d; using nvram value "
1854 1854 "of %d\n", data,
1855 1855 nv->firmware_options_1[0] & BIT_0 ? 1 : 0);
1856 1856 }
1857 1857
1858 1858 /* Get adapter hard loop ID. */
1859 1859 data = ql_get_prop(ha, "adapter-hard-loop-ID");
1860 1860 if (data < 126) {
1861 1861 nv->hard_address[0] = LSB(data);
1862 1862 nv->hard_address[1] = MSB(data);
1863 1863 } else if (data != 0xffffffff) {
1864 1864 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':"
1865 1865 " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1866 1866 nv->hard_address[0], nv->hard_address[1]));
1867 1867 }
1868 1868
1869 1869 /* Get LIP reset. */
1870 1870 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1871 1871 0xffffffff) {
1872 1872 data = 0;
1873 1873 }
1874 1874 if (data == 0) {
1875 1875 ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1876 1876 } else if (data == 1) {
1877 1877 ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1878 1878 } else {
1879 1879 EL(ha, "invalid parameter value for "
1880 1880 "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n",
1881 1881 data);
1882 1882 }
1883 1883
1884 1884 /* Get LIP full login. */
1885 1885 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1886 1886 0xffffffff) {
1887 1887 data = 1;
1888 1888 }
1889 1889 if (data == 0) {
1890 1890 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1891 1891 } else if (data == 1) {
1892 1892 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1893 1893 } else {
1894 1894 EL(ha, "invalid parameter value for "
1895 1895 "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1896 1896 "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1897 1897 }
1898 1898
1899 1899 /* Get target reset. */
1900 1900 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1901 1901 0xffffffff) {
1902 1902 data = 0;
1903 1903 }
1904 1904 if (data == 0) {
1905 1905 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1906 1906 } else if (data == 1) {
1907 1907 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1908 1908 } else {
1909 1909 EL(ha, "invalid parameter value for "
1910 1910 "'enable-target-reset-on-bus-reset': %d; using nvram "
1911 1911 "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1912 1912 }
1913 1913
1914 1914 /* Get reset delay. */
1915 1915 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1916 1916 data = 5;
1917 1917 }
1918 1918 if (data != 0 && data < 256) {
1919 1919 nv->reset_delay = (uint8_t)data;
1920 1920 } else {
1921 1921 EL(ha, "invalid parameter value for 'reset-delay': %d; "
1922 1922 "using nvram value of %d", data, nv->reset_delay);
1923 1923 }
1924 1924
1925 1925 /* Get port down retry count. */
1926 1926 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1927 1927 data = 8;
1928 1928 }
1929 1929 if (data < 256) {
1930 1930 nv->port_down_retry_count[0] = LSB(data);
1931 1931 nv->port_down_retry_count[1] = MSB(data);
1932 1932 } else {
1933 1933 EL(ha, "invalid parameter value for 'port-down-retry-count':"
1934 1934 " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1935 1935 nv->port_down_retry_count[0],
1936 1936 nv->port_down_retry_count[1]));
1937 1937 }
1938 1938
1939 1939 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
1940 1940 /* Get connection mode setting. */
1941 1941 if ((data = ql_get_prop(ha, "connection-options")) ==
1942 1942 0xffffffff) {
1943 1943 data = 2;
1944 1944 }
1945 1945 if (data <= 2) {
1946 1946 nv->firmware_options_2[0] = (uint8_t)
1947 1947 (nv->firmware_options_2[0] &
1948 1948 ~(BIT_6 | BIT_5 | BIT_4));
1949 1949 nv->firmware_options_2[0] = (uint8_t)
1950 1950 (nv->firmware_options_2[0] | (uint8_t)(data << 4));
1951 1951 } else {
1952 1952 EL(ha, "invalid parameter value for 'connection-"
1953 1953 "options': %d; using nvram value of %d\n", data,
1954 1954 (nv->firmware_options_2[0] >> 4) & 0x3);
1955 1955 }
1956 1956
1957 1957 /* Get data rate setting. */
1958 1958 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1959 1959 data = 2;
1960 1960 }
1961 1961 if ((CFG_IST(ha, CFG_CTRL_2422) && data < 4) ||
1962 1962 (CFG_IST(ha, CFG_CTRL_258081) && data < 5)) {
1963 1963 nv->firmware_options_3[1] = (uint8_t)
1964 1964 (nv->firmware_options_3[1] & 0x1f);
1965 1965 nv->firmware_options_3[1] = (uint8_t)
1966 1966 (nv->firmware_options_3[1] | (uint8_t)(data << 5));
1967 1967 } else {
1968 1968 EL(ha, "invalid parameter value for 'fc-data-rate': "
1969 1969 "%d; using nvram value of %d\n", data,
1970 1970 (nv->firmware_options_3[1] >> 5) & 0x7);
1971 1971 }
1972 1972 }
1973 1973
1974 1974 /* Get IP FW container count. */
1975 1975 ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count);
1976 1976 ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count);
1977 1977
1978 1978 /* Get IP low water mark. */
1979 1979 ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water);
1980 1980 ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water);
1981 1981
1982 1982 ADAPTER_STATE_LOCK(ha);
1983 1983
1984 1984 /* Get enable flash load. */
1985 1985 if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff ||
1986 1986 data == 0) {
1987 1987 ha->cfg_flags &= ~CFG_LOAD_FLASH_FW;
1988 1988 } else if (data == 1) {
1989 1989 ha->cfg_flags |= CFG_LOAD_FLASH_FW;
1990 1990 } else {
1991 1991 EL(ha, "invalid parameter value for 'enable-flash-load': "
1992 1992 "%d; using default value of 0\n", data);
1993 1993 }
1994 1994
1995 1995 /* Enable firmware extended tracing */
1996 1996 if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) {
1997 1997 if (data != 0) {
1998 1998 ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE;
1999 1999 }
2000 2000 }
2001 2001
2002 2002 /* Enable firmware fc tracing */
2003 2003 if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) {
2004 2004 ha->cfg_flags |= CFG_ENABLE_FWFCETRACE;
2005 2005 ha->fwfcetraceopt = data;
2006 2006 }
2007 2007
2008 2008 /* Enable fast timeout */
2009 2009 if ((data = ql_get_prop(ha, "enable-fasttimeout")) != 0xffffffff) {
2010 2010 if (data != 0) {
2011 2011 ha->cfg_flags |= CFG_FAST_TIMEOUT;
2012 2012 }
2013 2013 }
2014 2014
2015 2015 ql_common_properties(ha);
2016 2016
2017 2017 ADAPTER_STATE_UNLOCK(ha);
2018 2018
2019 2019 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2020 2020 }
2021 2021
2022 2022 /*
2023 2023 * ql_get_prop
2024 2024 * Get property value from configuration file.
2025 2025 *
2026 2026 * Input:
2027 2027 * ha= adapter state pointer.
2028 2028 * string = property string pointer.
2029 2029 *
2030 2030 * Returns:
2031 2031 * 0xFFFFFFFF = no property else property value.
2032 2032 *
2033 2033 * Context:
2034 2034 * Kernel context.
2035 2035 */
2036 2036 uint32_t
2037 2037 ql_get_prop(ql_adapter_state_t *ha, char *string)
2038 2038 {
2039 2039 char buf[256];
2040 2040 uint32_t data = 0xffffffff;
2041 2041
2042 2042 /*
2043 2043 * Look for a adapter instance NPIV (virtual port) specific parameter
2044 2044 */
2045 2045 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2046 2046 (void) sprintf(buf, "hba%d-vp%d-%s", ha->instance,
2047 2047 ha->vp_index, string);
2048 2048 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2049 2049 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2050 2050 buf, (int)0xffffffff);
2051 2051 }
2052 2052
2053 2053 /*
2054 2054 * Get adapter instance parameter if a vp specific one isn't found.
2055 2055 */
2056 2056 if (data == 0xffffffff) {
2057 2057 (void) sprintf(buf, "hba%d-%s", ha->instance, string);
2058 2058 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2059 2059 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip,
2060 2060 0, buf, (int)0xffffffff);
2061 2061 }
2062 2062
2063 2063 /* Adapter instance parameter found? */
2064 2064 if (data == 0xffffffff) {
2065 2065 /* No, get default parameter. */
2066 2066 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2067 2067 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2068 2068 string, (int)0xffffffff);
2069 2069 }
2070 2070
2071 2071 return (data);
2072 2072 }
2073 2073
2074 2074 /*
2075 2075 * ql_check_isp_firmware
2076 2076 * Checks if using already loaded RISC code or drivers copy.
2077 2077 * If using already loaded code, save a copy of it.
2078 2078 *
2079 2079 * Input:
2080 2080 * ha = adapter state pointer.
2081 2081 *
2082 2082 * Returns:
2083 2083 * ql local function return status code.
2084 2084 *
2085 2085 * Context:
2086 2086 * Kernel context.
2087 2087 */
2088 2088 static int
2089 2089 ql_check_isp_firmware(ql_adapter_state_t *ha)
2090 2090 {
2091 2091 int rval;
2092 2092 uint16_t word_count;
2093 2093 uint32_t byte_count;
2094 2094 uint32_t fw_size, *lptr;
2095 2095 caddr_t bufp;
2096 2096 uint16_t risc_address = (uint16_t)ha->risc_fw[0].addr;
2097 2097
2098 2098 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2099 2099
2100 2100 /* Test for firmware running. */
2101 2101 if (CFG_IST(ha, CFG_CTRL_8021)) {
2102 2102 if (ql_8021_idc_handler(ha) != NX_DEV_READY) {
2103 2103 rval = QL_FUNCTION_FAILED;
2104 2104 } else {
2105 2105 rval = ql_start_firmware(ha);
2106 2106 }
2107 2107 } else if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) {
2108 2108 if (ha->risc_code != NULL) {
2109 2109 kmem_free(ha->risc_code, ha->risc_code_size);
2110 2110 ha->risc_code = NULL;
2111 2111 ha->risc_code_size = 0;
2112 2112 }
2113 2113
2114 2114 /* Get RISC code length. */
2115 2115 rval = ql_rd_risc_ram(ha, risc_address + 3, ha->request_dvma,
2116 2116 1);
2117 2117 if (rval == QL_SUCCESS) {
2118 2118 lptr = (uint32_t *)ha->request_ring_bp;
2119 2119 fw_size = *lptr << 1;
2120 2120
2121 2121 if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) {
2122 2122 ha->risc_code_size = fw_size;
2123 2123 ha->risc_code = bufp;
2124 2124 ha->fw_transfer_size = 128;
2125 2125
2126 2126 /* Dump RISC code. */
2127 2127 do {
2128 2128 if (fw_size > ha->fw_transfer_size) {
2129 2129 byte_count =
2130 2130 ha->fw_transfer_size;
2131 2131 } else {
2132 2132 byte_count = fw_size;
2133 2133 }
2134 2134
2135 2135 word_count =
2136 2136 (uint16_t)(byte_count >> 1);
2137 2137
2138 2138 rval = ql_rd_risc_ram(ha, risc_address,
2139 2139 ha->request_dvma, word_count);
2140 2140 if (rval != QL_SUCCESS) {
2141 2141 kmem_free(ha->risc_code,
2142 2142 ha->risc_code_size);
2143 2143 ha->risc_code = NULL;
2144 2144 ha->risc_code_size = 0;
2145 2145 break;
2146 2146 }
2147 2147
2148 2148 (void) ddi_dma_sync(
2149 2149 ha->hba_buf.dma_handle,
2150 2150 REQUEST_Q_BUFFER_OFFSET,
2151 2151 byte_count,
2152 2152 DDI_DMA_SYNC_FORKERNEL);
2153 2153 ddi_rep_get16(ha->hba_buf.acc_handle,
2154 2154 (uint16_t *)bufp,
2155 2155 (uint16_t *)ha->request_ring_bp,
2156 2156 word_count, DDI_DEV_AUTOINCR);
2157 2157
2158 2158 risc_address += word_count;
2159 2159 fw_size -= byte_count;
2160 2160 bufp += byte_count;
2161 2161 } while (fw_size != 0);
2162 2162 }
2163 2163 rval = QL_FUNCTION_FAILED;
2164 2164 }
2165 2165 } else {
2166 2166 rval = QL_FUNCTION_FAILED;
2167 2167 }
2168 2168
2169 2169 if (rval != QL_SUCCESS) {
2170 2170 EL(ha, "Load RISC code\n");
2171 2171 } else {
2172 2172 /*EMPTY*/
2173 2173 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2174 2174 }
2175 2175 return (rval);
2176 2176 }
2177 2177
2178 2178 /*
2179 2179 * Chip diagnostics
2180 2180 * Test chip for proper operation.
2181 2181 *
2182 2182 * Input:
2183 2183 * ha = adapter state pointer.
2184 2184 *
2185 2185 * Returns:
2186 2186 * ql local function return status code.
2187 2187 *
2188 2188 * Context:
2189 2189 * Kernel context.
2190 2190 */
2191 2191 static int
2192 2192 ql_chip_diag(ql_adapter_state_t *ha)
2193 2193 {
2194 2194 ql_mbx_data_t mr;
2195 2195 int rval;
2196 2196 int32_t retries = 4;
2197 2197 uint16_t id;
2198 2198
2199 2199 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2200 2200
2201 2201 do {
2202 2202 /* Reset ISP chip. */
2203 2203 TASK_DAEMON_LOCK(ha);
2204 2204 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
2205 2205 TASK_DAEMON_UNLOCK(ha);
2206 2206
2207 2207 /* For ISP2200A reduce firmware load size. */
2208 2208 if (CFG_IST(ha, CFG_CTRL_2200) &&
2209 2209 RD16_IO_REG(ha, mailbox_out[7]) == 4) {
2210 2210 ha->fw_transfer_size = 128;
2211 2211 } else {
2212 2212 ha->fw_transfer_size = REQUEST_QUEUE_SIZE;
2213 2213 }
2214 2214
2215 2215 rval = QL_SUCCESS;
2216 2216 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
2217 2217 ql_reset_chip(ha);
2218 2218
2219 2219 /* Check product ID of chip */
2220 2220 mr.mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
2221 2221 mr.mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
2222 2222 mr.mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
2223 2223
2224 2224 if (ha->device_id == 0x5432 ||
2225 2225 ha->device_id == 0x8432) {
2226 2226 id = 0x2432;
2227 2227 } else if (ha->device_id == 0x5422 ||
2228 2228 ha->device_id == 0x8422) {
2229 2229 id = 0x2422;
2230 2230 } else {
2231 2231 id = ha->device_id;
2232 2232 }
2233 2233
2234 2234 if (mr.mb[1] == PROD_ID_1 &&
2235 2235 (mr.mb[2] == PROD_ID_2 || mr.mb[2] == PROD_ID_2a) &&
2236 2236 (mr.mb[3] == PROD_ID_3 || mr.mb[3] == id)) {
2237 2237 ha->adapter_stats->revlvl.isp2200 =
2238 2238 RD16_IO_REG(ha, mailbox_out[4]);
2239 2239 ha->adapter_stats->revlvl.risc =
2240 2240 RD16_IO_REG(ha, mailbox_out[5]);
2241 2241 ha->adapter_stats->revlvl.frmbfr =
2242 2242 RD16_IO_REG(ha, mailbox_out[6]);
2243 2243 ha->adapter_stats->revlvl.riscrom =
2244 2244 RD16_IO_REG(ha, mailbox_out[7]);
2245 2245 } else {
2246 2246 cmn_err(CE_WARN, "%s(%d) - prod id failed!, "
2247 2247 "mb1=%xh, mb2=%xh, mb3=%xh", QL_NAME,
2248 2248 ha->instance, mr.mb[1], mr.mb[2], mr.mb[3]);
2249 2249 rval = QL_FUNCTION_FAILED;
2250 2250 }
2251 2251 } else if (!(ha->task_daemon_flags & FIRMWARE_LOADED)) {
2252 2252 break;
2253 2253 }
2254 2254
2255 2255 if (rval == QL_SUCCESS) {
2256 2256 /* Wrap Incoming Mailboxes Test. */
2257 2257 mr.mb[1] = 0xAAAA;
2258 2258 mr.mb[2] = 0x5555;
2259 2259 mr.mb[3] = 0xAA55;
2260 2260 mr.mb[4] = 0x55AA;
2261 2261 mr.mb[5] = 0xA5A5;
2262 2262 mr.mb[6] = 0x5A5A;
2263 2263 mr.mb[7] = 0x2525;
2264 2264 rval = ql_mbx_wrap_test(ha, &mr);
2265 2265 if (rval == QL_SUCCESS) {
2266 2266 if (mr.mb[1] != 0xAAAA ||
2267 2267 mr.mb[2] != 0x5555 ||
2268 2268 mr.mb[3] != 0xAA55 ||
2269 2269 mr.mb[4] != 0x55AA ||
2270 2270 mr.mb[5] != 0xA5A5 ||
2271 2271 mr.mb[6] != 0x5A5A ||
2272 2272 mr.mb[7] != 0x2525) {
2273 2273 rval = QL_FUNCTION_FAILED;
2274 2274 (void) ql_flash_errlog(ha,
2275 2275 FLASH_ERRLOG_ISP_ERR, 0,
2276 2276 RD16_IO_REG(ha, hccr),
2277 2277 RD16_IO_REG(ha, istatus));
2278 2278 }
2279 2279 } else {
2280 2280 cmn_err(CE_WARN, "%s(%d) - reg test failed="
2281 2281 "%xh!", QL_NAME, ha->instance, rval);
2282 2282 }
2283 2283 }
2284 2284 } while ((retries-- != 0) && (rval != QL_SUCCESS));
2285 2285
2286 2286 if (rval != QL_SUCCESS) {
2287 2287 EL(ha, "failed, rval = %xh\n", rval);
2288 2288 } else {
2289 2289 /*EMPTY*/
2290 2290 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2291 2291 }
2292 2292 return (rval);
2293 2293 }
2294 2294
2295 2295 /*
2296 2296 * ql_load_isp_firmware
2297 2297 * Load and start RISC firmware.
2298 2298 * Uses request ring for DMA buffer.
2299 2299 *
2300 2300 * Input:
2301 2301 * ha = adapter state pointer.
2302 2302 *
2303 2303 * Returns:
2304 2304 * ql local function return status code.
2305 2305 *
2306 2306 * Context:
2307 2307 * Kernel context.
2308 2308 */
2309 2309 int
2310 2310 ql_load_isp_firmware(ql_adapter_state_t *vha)
2311 2311 {
2312 2312 caddr_t risc_code_address;
2313 2313 uint32_t risc_address, risc_code_size;
2314 2314 int rval;
2315 2315 uint32_t word_count, cnt;
2316 2316 size_t byte_count;
2317 2317 ql_adapter_state_t *ha = vha->pha;
2318 2318
2319 2319 if (CFG_IST(ha, CFG_CTRL_8021)) {
2320 2320 rval = ql_8021_load_risc(ha);
2321 2321 } else {
2322 2322 if (CFG_IST(ha, CFG_CTRL_81XX)) {
2323 2323 ql_mps_reset(ha);
2324 2324 }
2325 2325
2326 2326 if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) {
2327 2327 return (ql_load_flash_fw(ha));
2328 2328 }
2329 2329
2330 2330 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2331 2331
2332 2332 /* Load firmware segments */
2333 2333 for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS &&
2334 2334 ha->risc_fw[cnt].code != NULL; cnt++) {
2335 2335
2336 2336 risc_code_address = ha->risc_fw[cnt].code;
2337 2337 risc_address = ha->risc_fw[cnt].addr;
2338 2338 risc_code_size = ha->risc_fw[cnt].length;
2339 2339
2340 2340 while (risc_code_size) {
2341 2341 if (CFG_IST(ha, CFG_CTRL_242581)) {
2342 2342 word_count = ha->fw_transfer_size >> 2;
2343 2343 if (word_count > risc_code_size) {
2344 2344 word_count = risc_code_size;
2345 2345 }
2346 2346 byte_count = word_count << 2;
2347 2347
2348 2348 ddi_rep_put32(ha->hba_buf.acc_handle,
2349 2349 (uint32_t *)risc_code_address,
2350 2350 (uint32_t *)ha->request_ring_bp,
2351 2351 word_count, DDI_DEV_AUTOINCR);
2352 2352 } else {
2353 2353 word_count = ha->fw_transfer_size >> 1;
2354 2354 if (word_count > risc_code_size) {
2355 2355 word_count = risc_code_size;
2356 2356 }
2357 2357 byte_count = word_count << 1;
2358 2358
2359 2359 ddi_rep_put16(ha->hba_buf.acc_handle,
2360 2360 (uint16_t *)risc_code_address,
2361 2361 (uint16_t *)ha->request_ring_bp,
2362 2362 word_count, DDI_DEV_AUTOINCR);
2363 2363 }
2364 2364
2365 2365 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
2366 2366 REQUEST_Q_BUFFER_OFFSET, byte_count,
2367 2367 DDI_DMA_SYNC_FORDEV);
2368 2368
2369 2369 rval = ql_wrt_risc_ram(ha, risc_address,
2370 2370 ha->request_dvma, word_count);
2371 2371 if (rval != QL_SUCCESS) {
2372 2372 EL(ha, "failed, load=%xh\n", rval);
2373 2373 cnt = MAX_RISC_CODE_SEGMENTS;
2374 2374 break;
2375 2375 }
2376 2376
2377 2377 risc_address += word_count;
2378 2378 risc_code_size -= word_count;
2379 2379 risc_code_address += byte_count;
2380 2380 }
2381 2381 }
2382 2382 }
2383 2383
2384 2384 /* Start firmware. */
2385 2385 if (rval == QL_SUCCESS) {
2386 2386 rval = ql_start_firmware(ha);
2387 2387 }
2388 2388
2389 2389 if (rval != QL_SUCCESS) {
2390 2390 EL(ha, "failed, rval = %xh\n", rval);
2391 2391 } else {
2392 2392 /*EMPTY*/
2393 2393 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2394 2394 }
2395 2395
2396 2396 return (rval);
2397 2397 }
2398 2398
2399 2399 /*
2400 2400 * ql_load_flash_fw
2401 2401 * Gets ISP24xx firmware from flash and loads ISP.
2402 2402 *
2403 2403 * Input:
2404 2404 * ha: adapter state pointer.
2405 2405 *
2406 2406 * Returns:
2407 2407 * ql local function return status code.
2408 2408 */
2409 2409 static int
2410 2410 ql_load_flash_fw(ql_adapter_state_t *ha)
2411 2411 {
2412 2412 int rval;
2413 2413 uint8_t seg_cnt;
2414 2414 uint32_t risc_address, xfer_size, count, *bp, faddr;
2415 2415 uint32_t risc_code_size = 0;
2416 2416
2417 2417 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2418 2418
2419 2419 faddr = ha->flash_data_addr | ha->flash_fw_addr;
2420 2420
2421 2421 for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) {
2422 2422 xfer_size = ha->fw_transfer_size >> 2;
2423 2423 do {
2424 2424 GLOBAL_HW_LOCK();
2425 2425
2426 2426 /* Read data from flash. */
2427 2427 bp = (uint32_t *)ha->request_ring_bp;
2428 2428 for (count = 0; count < xfer_size; count++) {
2429 2429 rval = ql_24xx_read_flash(ha, faddr++, bp);
2430 2430 if (rval != QL_SUCCESS) {
2431 2431 break;
2432 2432 }
2433 2433 ql_chg_endian((uint8_t *)bp++, 4);
2434 2434 }
2435 2435
2436 2436 GLOBAL_HW_UNLOCK();
2437 2437
2438 2438 if (rval != QL_SUCCESS) {
2439 2439 EL(ha, "24xx_read_flash failed=%xh\n", rval);
2440 2440 break;
2441 2441 }
2442 2442
2443 2443 if (risc_code_size == 0) {
2444 2444 bp = (uint32_t *)ha->request_ring_bp;
2445 2445 risc_address = bp[2];
2446 2446 risc_code_size = bp[3];
2447 2447 ha->risc_fw[seg_cnt].addr = risc_address;
2448 2448 }
2449 2449
2450 2450 if (risc_code_size < xfer_size) {
2451 2451 faddr -= xfer_size - risc_code_size;
2452 2452 xfer_size = risc_code_size;
2453 2453 }
2454 2454
2455 2455 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
2456 2456 REQUEST_Q_BUFFER_OFFSET, xfer_size << 2,
2457 2457 DDI_DMA_SYNC_FORDEV);
2458 2458
2459 2459 rval = ql_wrt_risc_ram(ha, risc_address,
2460 2460 ha->request_dvma, xfer_size);
2461 2461 if (rval != QL_SUCCESS) {
2462 2462 EL(ha, "ql_wrt_risc_ram failed=%xh\n", rval);
2463 2463 break;
2464 2464 }
2465 2465
2466 2466 risc_address += xfer_size;
2467 2467 risc_code_size -= xfer_size;
2468 2468 } while (risc_code_size);
2469 2469
2470 2470 if (rval != QL_SUCCESS) {
2471 2471 break;
2472 2472 }
2473 2473 }
2474 2474
2475 2475 /* Start firmware. */
2476 2476 if (rval == QL_SUCCESS) {
2477 2477 rval = ql_start_firmware(ha);
2478 2478 }
2479 2479
2480 2480 if (rval != QL_SUCCESS) {
2481 2481 EL(ha, "failed, rval = %xh\n", rval);
2482 2482 } else {
2483 2483 /*EMPTY*/
2484 2484 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2485 2485 }
2486 2486 return (rval);
2487 2487 }
2488 2488
2489 2489 /*
2490 2490 * ql_start_firmware
2491 2491 * Starts RISC code.
2492 2492 *
2493 2493 * Input:
2494 2494 * ha = adapter state pointer.
2495 2495 *
2496 2496 * Returns:
2497 2497 * ql local function return status code.
2498 2498 *
2499 2499 * Context:
2500 2500 * Kernel context.
2501 2501 */
2502 2502 int
2503 2503 ql_start_firmware(ql_adapter_state_t *vha)
2504 2504 {
2505 2505 int rval, rval2;
2506 2506 uint32_t data;
2507 2507 ql_mbx_data_t mr;
2508 2508 ql_adapter_state_t *ha = vha->pha;
2509 2509
2510 2510 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2511 2511
2512 2512 if (CFG_IST(ha, CFG_CTRL_8021)) {
2513 2513 /* Save firmware version. */
2514 2514 rval = ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2515 2515 ha->fw_major_version = mr.mb[1];
2516 2516 ha->fw_minor_version = mr.mb[2];
2517 2517 ha->fw_subminor_version = mr.mb[3];
2518 2518 ha->fw_attributes = mr.mb[6];
2519 2519 } else if ((rval = ql_verify_checksum(ha)) == QL_SUCCESS) {
2520 2520 /* Verify checksum of loaded RISC code. */
2521 2521 /* Start firmware execution. */
2522 2522 (void) ql_execute_fw(ha);
2523 2523
2524 2524 /* Save firmware version. */
2525 2525 (void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2526 2526 ha->fw_major_version = mr.mb[1];
2527 2527 ha->fw_minor_version = mr.mb[2];
2528 2528 ha->fw_subminor_version = mr.mb[3];
2529 2529 ha->fw_ext_memory_size = ((SHORT_TO_LONG(mr.mb[4], mr.mb[5]) -
2530 2530 0x100000) + 1) * 4;
2531 2531 ha->fw_attributes = mr.mb[6];
2532 2532
2533 2533 if (CFG_IST(ha, CFG_CTRL_81XX)) {
2534 2534 ha->phy_fw_major_version = LSB(mr.mb[8]);
2535 2535 ha->phy_fw_minor_version = MSB(mr.mb[9]);
2536 2536 ha->phy_fw_subminor_version = LSB(mr.mb[9]);
2537 2537 ha->mpi_fw_major_version = LSB(mr.mb[10]);
2538 2538 ha->mpi_fw_minor_version = MSB(mr.mb[11]);
2539 2539 ha->mpi_fw_subminor_version = LSB(mr.mb[11]);
2540 2540 ha->mpi_capability_list = SHORT_TO_LONG(mr.mb[13],
2541 2541 mr.mb[12]);
2542 2542 if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE,
2543 2543 0, 0, &data)) == QL_SUCCESS) {
2544 2544 ha->xioctl->fdesc.block_size = data << 2;
2545 2545 QL_PRINT_10(CE_CONT, "(%d): fdesc.block_size="
2546 2546 "%xh\n", ha->instance,
2547 2547 ha->xioctl->fdesc.block_size);
2548 2548 } else {
2549 2549 EL(ha, "flash_access status=%xh\n", rval2);
2550 2550 }
2551 2551 }
2552 2552
2553 2553 /* Set Serdes Transmit Parameters. */
2554 2554 if (CFG_IST(ha, CFG_CTRL_2422) && ha->serdes_param[0] & BIT_0) {
2555 2555 mr.mb[1] = ha->serdes_param[0];
2556 2556 mr.mb[2] = ha->serdes_param[1];
2557 2557 mr.mb[3] = ha->serdes_param[2];
2558 2558 mr.mb[4] = ha->serdes_param[3];
2559 2559 (void) ql_serdes_param(ha, &mr);
2560 2560 }
2561 2561 }
2562 2562 /* ETS workaround */
2563 2563 if (CFG_IST(ha, CFG_CTRL_81XX) && ql_enable_ets) {
2564 2564 if (ql_get_firmware_option(ha, &mr) == QL_SUCCESS) {
2565 2565 mr.mb[2] = (uint16_t)
2566 2566 (mr.mb[2] | FO2_FCOE_512_MAX_MEM_WR_BURST);
2567 2567 (void) ql_set_firmware_option(ha, &mr);
2568 2568 }
2569 2569 }
2570 2570 if (rval != QL_SUCCESS) {
2571 2571 ha->task_daemon_flags &= ~FIRMWARE_LOADED;
2572 2572 EL(ha, "failed, rval = %xh\n", rval);
2573 2573 } else {
2574 2574 ha->task_daemon_flags |= FIRMWARE_LOADED;
2575 2575 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2576 2576 }
2577 2577 return (rval);
2578 2578 }
2579 2579
2580 2580 /*
2581 2581 * ql_set_cache_line
2582 2582 * Sets PCI cache line parameter.
2583 2583 *
2584 2584 * Input:
2585 2585 * ha = adapter state pointer.
2586 2586 *
2587 2587 * Returns:
2588 2588 * ql local function return status code.
2589 2589 *
2590 2590 * Context:
2591 2591 * Kernel context.
2592 2592 */
2593 2593 int
2594 2594 ql_set_cache_line(ql_adapter_state_t *ha)
2595 2595 {
2596 2596 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2597 2597
2598 2598 /* Set the cache line. */
2599 2599 if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) {
2600 2600 /* Set cache line register. */
2601 2601 ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1);
2602 2602 }
2603 2603
2604 2604 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2605 2605
2606 2606 return (QL_SUCCESS);
2607 2607 }
2608 2608
2609 2609 /*
2610 2610 * ql_init_rings
2611 2611 * Initializes firmware and ring pointers.
2612 2612 *
2613 2613 * Beginning of response ring has initialization control block
2614 2614 * already built by nvram config routine.
2615 2615 *
2616 2616 * Input:
2617 2617 * ha = adapter state pointer.
2618 2618 * ha->hba_buf = request and response rings
2619 2619 * ha->init_ctrl_blk = initialization control block
2620 2620 *
2621 2621 * Returns:
2622 2622 * ql local function return status code.
2623 2623 *
2624 2624 * Context:
2625 2625 * Kernel context.
2626 2626 */
2627 2627 int
2628 2628 ql_init_rings(ql_adapter_state_t *vha2)
2629 2629 {
2630 2630 int rval, rval2;
2631 2631 uint16_t index;
2632 2632 ql_mbx_data_t mr;
2633 2633 ql_adapter_state_t *ha = vha2->pha;
2634 2634
2635 2635 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2636 2636
2637 2637 /* Clear outstanding commands array. */
2638 2638 for (index = 0; index < MAX_OUTSTANDING_COMMANDS; index++) {
2639 2639 ha->outstanding_cmds[index] = NULL;
2640 2640 }
2641 2641 ha->osc_index = 1;
2642 2642
2643 2643 ha->pending_cmds.first = NULL;
2644 2644 ha->pending_cmds.last = NULL;
2645 2645
2646 2646 /* Initialize firmware. */
2647 2647 ha->request_ring_ptr = ha->request_ring_bp;
2648 2648 ha->req_ring_index = 0;
2649 2649 ha->req_q_cnt = REQUEST_ENTRY_CNT - 1;
2650 2650 ha->response_ring_ptr = ha->response_ring_bp;
2651 2651 ha->rsp_ring_index = 0;
2652 2652
2653 2653 if (ha->flags & VP_ENABLED) {
2654 2654 ql_adapter_state_t *vha;
2655 2655 uint16_t cnt;
2656 2656 uint32_t max_vports;
2657 2657 ql_init_24xx_cb_t *icb = &ha->init_ctrl_blk.cb24;
2658 2658
2659 2659 max_vports = (CFG_IST(ha, CFG_CTRL_2422) ?
2660 2660 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS);
2661 2661 bzero(icb->vp_count,
2662 2662 ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) -
2663 2663 (uintptr_t)icb->vp_count);
2664 2664 icb->vp_count[0] = (uint8_t)max_vports;
2665 2665
2666 2666 /* Allow connection option 2. */
2667 2667 icb->global_vp_option[0] = BIT_1;
2668 2668
2669 2669 for (cnt = 0, vha = ha->vp_next; cnt < max_vports &&
2670 2670 vha != NULL; vha = vha->vp_next, cnt++) {
2671 2671
2672 2672 index = (uint8_t)(vha->vp_index - 1);
2673 2673 bcopy(vha->loginparams.node_ww_name.raw_wwn,
2674 2674 icb->vpc[index].node_name, 8);
2675 2675 bcopy(vha->loginparams.nport_ww_name.raw_wwn,
2676 2676 icb->vpc[index].port_name, 8);
2677 2677
2678 2678 icb->vpc[index].options = VPO_TARGET_MODE_DISABLED |
2679 2679 VPO_INITIATOR_MODE_ENABLED;
2680 2680 if (vha->flags & VP_ENABLED) {
2681 2681 icb->vpc[index].options = (uint8_t)
2682 2682 (icb->vpc[index].options | VPO_ENABLED);
2683 2683 }
2684 2684 }
2685 2685 }
2686 2686
2687 2687 for (index = 0; index < 2; index++) {
2688 2688 rval = ql_init_firmware(ha);
2689 2689 if (rval == QL_COMMAND_ERROR) {
2690 2690 EL(ha, "stopping firmware\n");
2691 2691 (void) ql_stop_firmware(ha);
2692 2692 } else {
2693 2693 break;
2694 2694 }
2695 2695 }
2696 2696
2697 2697 if (rval == QL_SUCCESS && (CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2698 2698 /* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */
2699 2699 rval = ql_get_firmware_option(ha, &mr);
2700 2700 if (rval == QL_SUCCESS) {
2701 2701 mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9);
2702 2702 mr.mb[2] = 0;
2703 2703 mr.mb[3] = BIT_10;
2704 2704 rval = ql_set_firmware_option(ha, &mr);
2705 2705 }
2706 2706 }
2707 2707
2708 2708 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) {
2709 2709 /* Firmware Fibre Channel Event Trace Buffer */
2710 2710 if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE,
2711 2711 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2712 2712 EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2);
2713 2713 } else {
2714 2714 if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf,
2715 2715 FTO_FCE_TRACE_ENABLE)) != QL_SUCCESS) {
2716 2716 EL(ha, "fcetrace enable failed: %xh\n", rval2);
2717 2717 ql_free_phys(ha, &ha->fwfcetracebuf);
2718 2718 }
2719 2719 }
2720 2720 }
2721 2721
2722 2722 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) {
2723 2723 /* Firmware Extended Trace Buffer */
2724 2724 if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE,
2725 2725 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2726 2726 EL(ha, "exttrace buffer alloc failed: %xh\n", rval2);
2727 2727 } else {
2728 2728 if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf,
2729 2729 FTO_EXT_TRACE_ENABLE)) != QL_SUCCESS) {
2730 2730 EL(ha, "exttrace enable failed: %xh\n", rval2);
2731 2731 ql_free_phys(ha, &ha->fwexttracebuf);
2732 2732 }
2733 2733 }
2734 2734 }
2735 2735
2736 2736 if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) {
2737 2737 ql_mbx_iocb_t *pkt;
2738 2738 clock_t timer;
2739 2739
2740 2740 /* Wait for firmware login of menlo. */
2741 2741 for (timer = 3000; timer; timer--) {
2742 2742 if (ha->flags & MENLO_LOGIN_OPERATIONAL) {
2743 2743 break;
2744 2744 }
2745 2745
2746 2746 if (!(ha->flags & INTERRUPTS_ENABLED) ||
2747 2747 ddi_in_panic()) {
2748 2748 if (INTERRUPT_PENDING(ha)) {
2749 2749 (void) ql_isr((caddr_t)ha);
2750 2750 INTR_LOCK(ha);
2751 2751 ha->intr_claimed = B_TRUE;
2752 2752 INTR_UNLOCK(ha);
2753 2753 }
2754 2754 }
2755 2755
2756 2756 /* Delay for 1 tick (10 milliseconds). */
2757 2757 ql_delay(ha, 10000);
2758 2758 }
2759 2759
2760 2760 if (timer == 0) {
2761 2761 rval = QL_FUNCTION_TIMEOUT;
2762 2762 } else {
2763 2763 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
2764 2764 if (pkt == NULL) {
2765 2765 EL(ha, "failed, kmem_zalloc\n");
2766 2766 rval = QL_MEMORY_ALLOC_FAILED;
2767 2767 } else {
2768 2768 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
2769 2769 pkt->mvfy.entry_count = 1;
2770 2770 pkt->mvfy.options_status =
2771 2771 LE_16(VMF_DO_NOT_UPDATE_FW);
2772 2772
2773 2773 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
2774 2774 sizeof (ql_mbx_iocb_t));
2775 2775 LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
2776 2776 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
2777 2777
2778 2778 if (rval != QL_SUCCESS ||
2779 2779 (pkt->mvfy.entry_status & 0x3c) != 0 ||
2780 2780 pkt->mvfy.options_status != CS_COMPLETE) {
2781 2781 EL(ha, "failed, status=%xh, es=%xh, "
2782 2782 "cs=%xh, fc=%xh\n", rval,
2783 2783 pkt->mvfy.entry_status & 0x3c,
2784 2784 pkt->mvfy.options_status,
2785 2785 pkt->mvfy.failure_code);
2786 2786 if (rval == QL_SUCCESS) {
2787 2787 rval = QL_FUNCTION_FAILED;
2788 2788 }
2789 2789 }
2790 2790
2791 2791 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
2792 2792 }
2793 2793 }
2794 2794 }
2795 2795
2796 2796 if (rval != QL_SUCCESS) {
2797 2797 TASK_DAEMON_LOCK(ha);
2798 2798 ha->task_daemon_flags &= ~FIRMWARE_UP;
2799 2799 TASK_DAEMON_UNLOCK(ha);
2800 2800 EL(ha, "failed, rval = %xh\n", rval);
2801 2801 } else {
2802 2802 TASK_DAEMON_LOCK(ha);
2803 2803 ha->task_daemon_flags |= FIRMWARE_UP;
2804 2804 TASK_DAEMON_UNLOCK(ha);
2805 2805 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2806 2806 }
2807 2807 return (rval);
2808 2808 }
2809 2809
2810 2810 /*
2811 2811 * ql_fw_ready
2812 2812 * Waits for firmware ready. If firmware becomes ready
2813 2813 * device queues and RISC code are synchronized.
2814 2814 *
2815 2815 * Input:
2816 2816 * ha = adapter state pointer.
2817 2817 * secs = max wait time, in seconds (0-255).
2818 2818 *
2819 2819 * Returns:
2820 2820 * ql local function return status code.
2821 2821 *
2822 2822 * Context:
2823 2823 * Kernel context.
2824 2824 */
2825 2825 int
2826 2826 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs)
2827 2827 {
2828 2828 ql_mbx_data_t mr;
2829 2829 clock_t timer;
2830 2830 clock_t dly = 250000;
2831 2831 clock_t sec_delay = MICROSEC / dly;
2832 2832 clock_t wait = secs * sec_delay;
2833 2833 int rval = QL_FUNCTION_FAILED;
2834 2834 uint16_t state = 0xffff;
2835 2835
2836 2836 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2837 2837
2838 2838 timer = ha->r_a_tov < secs ? secs : ha->r_a_tov;
2839 2839 timer = (timer + 2) * sec_delay;
2840 2840
2841 2841 /* Wait for ISP to finish LIP */
2842 2842 while (timer != 0 && wait != 0 &&
2843 2843 !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
2844 2844
2845 2845 rval = ql_get_firmware_state(ha, &mr);
2846 2846 if (rval == QL_SUCCESS) {
2847 2847 if (ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2848 2848 LOOP_DOWN)) {
2849 2849 wait--;
2850 2850 } else if (mr.mb[1] != FSTATE_READY) {
2851 2851 if (mr.mb[1] != FSTATE_WAIT_LOGIN) {
2852 2852 wait--;
2853 2853 }
2854 2854 rval = QL_FUNCTION_FAILED;
2855 2855 } else {
2856 2856 /* Firmware is ready. Get 2 * R_A_TOV. */
2857 2857 rval = ql_get_timeout_parameters(ha,
2858 2858 &ha->r_a_tov);
2859 2859 if (rval != QL_SUCCESS) {
2860 2860 EL(ha, "failed, get_timeout_param"
2861 2861 "=%xh\n", rval);
2862 2862 }
2863 2863
2864 2864 /* Configure loop. */
2865 2865 rval = ql_configure_loop(ha);
2866 2866 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
2867 2867
2868 2868 if (ha->task_daemon_flags &
2869 2869 LOOP_RESYNC_NEEDED) {
2870 2870 wait--;
2871 2871 EL(ha, "loop trans; tdf=%xh\n",
2872 2872 ha->task_daemon_flags);
2873 2873 } else {
2874 2874 break;
2875 2875 }
2876 2876 }
2877 2877 } else {
2878 2878 wait--;
2879 2879 }
2880 2880
2881 2881 if (state != mr.mb[1]) {
2882 2882 EL(ha, "mailbox_reg[1] = %xh\n", mr.mb[1]);
2883 2883 state = mr.mb[1];
2884 2884 }
2885 2885
2886 2886 /* Delay for a tick if waiting. */
2887 2887 if (timer-- != 0 && wait != 0) {
2888 2888 if (timer % 4 == 0) {
2889 2889 delay(drv_usectohz(dly));
2890 2890 } else {
2891 2891 drv_usecwait(dly);
2892 2892 }
2893 2893 } else {
2894 2894 rval = QL_FUNCTION_TIMEOUT;
2895 2895 }
2896 2896 }
2897 2897
2898 2898 if (rval != QL_SUCCESS) {
2899 2899 EL(ha, "failed, rval = %xh\n", rval);
2900 2900 } else {
2901 2901 /*EMPTY*/
2902 2902 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2903 2903 }
2904 2904 return (rval);
2905 2905 }
2906 2906
2907 2907 /*
2908 2908 * ql_configure_loop
2909 2909 * Setup configurations based on loop.
2910 2910 *
2911 2911 * Input:
2912 2912 * ha = adapter state pointer.
2913 2913 *
2914 2914 * Returns:
2915 2915 * ql local function return status code.
2916 2916 *
2917 2917 * Context:
2918 2918 * Kernel context.
2919 2919 */
2920 2920 static int
2921 2921 ql_configure_loop(ql_adapter_state_t *ha)
2922 2922 {
2923 2923 int rval;
2924 2924 ql_adapter_state_t *vha;
2925 2925
2926 2926 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2927 2927
2928 2928 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2929 2929 TASK_DAEMON_LOCK(ha);
2930 2930 if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) &&
2931 2931 vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2932 2932 TASK_DAEMON_UNLOCK(ha);
2933 2933 continue;
2934 2934 }
2935 2935 vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
2936 2936 TASK_DAEMON_UNLOCK(ha);
2937 2937
2938 2938 rval = ql_configure_hba(vha);
2939 2939 if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2940 2940 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2941 2941 rval = ql_configure_device_d_id(vha);
2942 2942 if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2943 2943 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2944 2944 (void) ql_configure_fabric(vha);
2945 2945 }
2946 2946 }
2947 2947 }
2948 2948
2949 2949 if (rval != QL_SUCCESS) {
2950 2950 EL(ha, "failed, rval = %xh\n", rval);
2951 2951 } else {
2952 2952 /*EMPTY*/
2953 2953 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2954 2954 }
2955 2955 return (rval);
2956 2956 }
2957 2957
2958 2958 /*
2959 2959 * ql_configure_n_port_info
2960 2960 * Setup configurations based on N port 2 N port topology.
2961 2961 *
2962 2962 * Input:
2963 2963 * ha = adapter state pointer.
2964 2964 *
2965 2965 * Returns:
2966 2966 * ql local function return status code.
2967 2967 *
2968 2968 * Context:
2969 2969 * Kernel context.
2970 2970 */
2971 2971 static void
2972 2972 ql_configure_n_port_info(ql_adapter_state_t *ha)
2973 2973 {
2974 2974 ql_tgt_t tmp_tq;
2975 2975 ql_tgt_t *tq;
2976 2976 uint8_t *cb_port_name;
2977 2977 ql_link_t *link;
2978 2978 int index, rval;
2979 2979
2980 2980 tq = &tmp_tq;
2981 2981
2982 2982 /* Free existing target queues. */
2983 2983 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
2984 2984 link = ha->dev[index].first;
2985 2985 while (link != NULL) {
2986 2986 tq = link->base_address;
2987 2987 link = link->next;
2988 2988 ql_remove_link(&ha->dev[index], &tq->device);
2989 2989 ql_dev_free(ha, tq);
2990 2990 }
2991 2991 }
2992 2992
2993 2993 /*
2994 2994 * If the N_Port's WWPN is larger than our's then it has the
2995 2995 * N_Port login initiative. It will have determined that and
2996 2996 * logged in with the firmware. This results in a device
2997 2997 * database entry. In this situation we will later send up a PLOGI
2998 2998 * by proxy for the N_Port to get things going.
2999 2999 *
3000 3000 * If the N_Ports WWPN is smaller then the firmware has the
3001 3001 * N_Port login initiative and does a FLOGI in order to obtain the
3002 3002 * N_Ports WWNN and WWPN. These names are required later
3003 3003 * during Leadvilles FLOGI. No PLOGI is done by the firmware in
3004 3004 * anticipation of a PLOGI via the driver from the upper layers.
3005 3005 * Upon reciept of said PLOGI the driver issues an ELS PLOGI
3006 3006 * pass-through command and the firmware assumes the s_id
3007 3007 * and the N_Port assumes the d_id and Bob's your uncle.
3008 3008 */
3009 3009
3010 3010 /*
3011 3011 * In N port 2 N port topology the FW provides a port database entry at
3012 3012 * loop_id 0x7fe which allows us to acquire the Ports WWPN.
3013 3013 */
↓ open down ↓ |
3013 lines elided |
↑ open up ↑ |
3014 3014 tq->d_id.b.al_pa = 0;
3015 3015 tq->d_id.b.area = 0;
3016 3016 tq->d_id.b.domain = 0;
3017 3017 tq->loop_id = 0x7fe;
3018 3018
3019 3019 rval = ql_get_port_database(ha, tq, PDF_NONE);
3020 3020 if (rval == QL_SUCCESS || rval == QL_NOT_LOGGED_IN) {
3021 3021 ql_dev_id_list_t *list;
3022 3022 uint32_t list_size;
3023 3023 ql_mbx_data_t mr;
3024 - port_id_t d_id = {0, 0, 0, 0};
3024 + port_id_t d_id = {{{0, 0, 0}, 0}};
3025 3025 uint16_t loop_id = 0;
3026 3026
3027 3027 cb_port_name = (uint8_t *)(CFG_IST(ha, CFG_CTRL_24258081) ?
3028 3028 &ha->init_ctrl_blk.cb24.port_name[0] :
3029 3029 &ha->init_ctrl_blk.cb.port_name[0]);
3030 3030
3031 3031 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
3032 3032 (la_wwn_t *)cb_port_name) == 1)) {
3033 3033 EL(ha, "target port has N_Port login initiative\n");
3034 3034 } else {
3035 3035 EL(ha, "host port has N_Port login initiative\n");
3036 3036 }
3037 3037
3038 3038 /* Capture the N Ports WWPN */
3039 3039
3040 3040 bcopy((void *)&tq->port_name[0],
3041 3041 (void *)&ha->n_port->port_name[0], 8);
3042 3042 bcopy((void *)&tq->node_name[0],
3043 3043 (void *)&ha->n_port->node_name[0], 8);
3044 3044
3045 3045 /* Resolve an n_port_handle */
3046 3046 ha->n_port->n_port_handle = 0x7fe;
3047 3047
3048 3048 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3049 3049 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
3050 3050
3051 3051 if (list != NULL &&
3052 3052 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
3053 3053 QL_SUCCESS) {
3054 3054 if (mr.mb[1]) {
3055 3055 EL(ha, "id list entries = %d\n", mr.mb[1]);
3056 3056 for (index = 0; index < mr.mb[1]; index++) {
3057 3057 ql_dev_list(ha, list, index,
3058 3058 &d_id, &loop_id);
3059 3059 ha->n_port->n_port_handle = loop_id;
3060 3060 }
3061 3061 } else {
3062 3062 for (index = 0; index <= LAST_LOCAL_LOOP_ID;
3063 3063 index++) {
3064 3064 /* resuse tq */
3065 3065 tq->loop_id = (uint16_t)index;
3066 3066 rval = ql_get_port_database(ha, tq,
3067 3067 PDF_NONE);
3068 3068 if (rval == QL_NOT_LOGGED_IN) {
3069 3069 if (tq->master_state ==
3070 3070 PD_STATE_PLOGI_PENDING) {
3071 3071 ha->n_port->
3072 3072 n_port_handle =
3073 3073 tq->loop_id;
3074 3074 break;
3075 3075 }
3076 3076 } else {
3077 3077 ha->n_port->n_port_handle =
3078 3078 tq->loop_id;
3079 3079 break;
3080 3080 }
3081 3081 }
3082 3082 }
3083 3083 } else {
3084 3084 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
3085 3085 QL_NAME, ha->instance, d_id.b24);
3086 3086 }
3087 3087 if (list != NULL) {
3088 3088 kmem_free(list, list_size);
3089 3089 }
3090 3090 }
3091 3091 }
3092 3092
3093 3093
3094 3094 /*
3095 3095 * ql_configure_hba
3096 3096 * Setup adapter context.
3097 3097 *
3098 3098 * Input:
3099 3099 * ha = adapter state pointer.
3100 3100 *
3101 3101 * Returns:
3102 3102 * ql local function return status code.
3103 3103 *
3104 3104 * Context:
3105 3105 * Kernel context.
3106 3106 */
3107 3107 static int
3108 3108 ql_configure_hba(ql_adapter_state_t *ha)
3109 3109 {
3110 3110 uint8_t *bp;
3111 3111 int rval;
3112 3112 uint32_t state;
3113 3113 ql_mbx_data_t mr;
3114 3114
3115 3115 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3116 3116
3117 3117 /* Get host addresses. */
3118 3118 rval = ql_get_adapter_id(ha, &mr);
3119 3119 if (rval == QL_SUCCESS) {
3120 3120 ha->topology = (uint8_t)(ha->topology &
3121 3121 ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT));
3122 3122
3123 3123 /* Save Host d_id, alpa, loop ID. */
3124 3124 ha->loop_id = mr.mb[1];
3125 3125 ha->d_id.b.al_pa = LSB(mr.mb[2]);
3126 3126 ha->d_id.b.area = MSB(mr.mb[2]);
3127 3127 ha->d_id.b.domain = LSB(mr.mb[3]);
3128 3128
3129 3129 ADAPTER_STATE_LOCK(ha);
3130 3130 ha->flags &= ~FDISC_ENABLED;
3131 3131
3132 3132 /* Get loop topology. */
3133 3133 switch (mr.mb[6]) {
3134 3134 case CNX_LOOP_NO_FABRIC:
3135 3135 ha->topology = (uint8_t)(ha->topology | QL_NL_PORT);
3136 3136 break;
3137 3137 case CNX_FLPORT_IN_LOOP:
3138 3138 ha->topology = (uint8_t)(ha->topology | QL_FL_PORT);
3139 3139 break;
3140 3140 case CNX_NPORT_2_NPORT_P2P:
3141 3141 case CNX_NPORT_2_NPORT_NO_TGT_RSP:
3142 3142 ha->flags |= POINT_TO_POINT;
3143 3143 ha->topology = (uint8_t)(ha->topology | QL_N_PORT);
3144 3144 if (CFG_IST(ha, CFG_CTRL_2425)) {
3145 3145 ql_configure_n_port_info(ha);
3146 3146 }
3147 3147 break;
3148 3148 case CNX_FLPORT_P2P:
3149 3149 ha->flags |= POINT_TO_POINT;
3150 3150 ha->topology = (uint8_t)(ha->topology | QL_F_PORT);
3151 3151
3152 3152 /* Get supported option. */
3153 3153 if (CFG_IST(ha, CFG_CTRL_24258081) &&
3154 3154 mr.mb[7] & GID_FP_NPIV_SUPPORT) {
3155 3155 ha->flags |= FDISC_ENABLED;
3156 3156 }
3157 3157 /* Get VLAN ID, mac address */
3158 3158 if (CFG_IST(ha, CFG_CTRL_8081)) {
3159 3159 ha->fabric_params = mr.mb[7];
3160 3160 ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff);
3161 3161 ha->fcoe_fcf_idx = mr.mb[10];
3162 3162 ha->fcoe_vnport_mac[0] = MSB(mr.mb[11]);
3163 3163 ha->fcoe_vnport_mac[1] = LSB(mr.mb[11]);
3164 3164 ha->fcoe_vnport_mac[2] = MSB(mr.mb[12]);
3165 3165 ha->fcoe_vnport_mac[3] = LSB(mr.mb[12]);
3166 3166 ha->fcoe_vnport_mac[4] = MSB(mr.mb[13]);
3167 3167 ha->fcoe_vnport_mac[5] = LSB(mr.mb[13]);
3168 3168 }
3169 3169 break;
3170 3170 default:
3171 3171 QL_PRINT_2(CE_CONT, "(%d,%d): UNKNOWN topology=%xh, "
3172 3172 "d_id=%xh\n", ha->instance, ha->vp_index, mr.mb[6],
3173 3173 ha->d_id.b24);
3174 3174 rval = QL_FUNCTION_FAILED;
3175 3175 break;
3176 3176 }
3177 3177 ADAPTER_STATE_UNLOCK(ha);
3178 3178
3179 3179 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
3180 3180 CFG_CTRL_24258081))) {
3181 3181 mr.mb[1] = 0;
3182 3182 mr.mb[2] = 0;
3183 3183 rval = ql_data_rate(ha, &mr);
3184 3184 if (rval != QL_SUCCESS) {
3185 3185 EL(ha, "data_rate status=%xh\n", rval);
3186 3186 state = FC_STATE_FULL_SPEED;
3187 3187 } else {
3188 3188 ha->iidma_rate = mr.mb[1];
3189 3189 if (mr.mb[1] == IIDMA_RATE_1GB) {
3190 3190 state = FC_STATE_1GBIT_SPEED;
3191 3191 } else if (mr.mb[1] == IIDMA_RATE_2GB) {
3192 3192 state = FC_STATE_2GBIT_SPEED;
3193 3193 } else if (mr.mb[1] == IIDMA_RATE_4GB) {
3194 3194 state = FC_STATE_4GBIT_SPEED;
3195 3195 } else if (mr.mb[1] == IIDMA_RATE_8GB) {
3196 3196 state = FC_STATE_8GBIT_SPEED;
3197 3197 } else if (mr.mb[1] == IIDMA_RATE_10GB) {
3198 3198 state = FC_STATE_10GBIT_SPEED;
3199 3199 } else {
3200 3200 state = 0;
3201 3201 }
3202 3202 }
3203 3203 } else {
3204 3204 ha->iidma_rate = IIDMA_RATE_1GB;
3205 3205 state = FC_STATE_FULL_SPEED;
3206 3206 }
3207 3207 ha->state = FC_PORT_STATE_MASK(ha->state) | state;
3208 3208 } else if (rval == MBS_COMMAND_ERROR) {
3209 3209 EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n",
3210 3210 rval, mr.mb[1]);
3211 3211 }
3212 3212
3213 3213 if (rval != QL_SUCCESS) {
3214 3214 EL(ha, "failed, rval = %xh\n", rval);
3215 3215 } else {
3216 3216 bp = ha->loginparams.nport_ww_name.raw_wwn;
3217 3217 EL(ha, "topology=%xh, d_id=%xh, "
3218 3218 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n",
3219 3219 ha->topology, ha->d_id.b24, bp[0], bp[1],
3220 3220 bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
3221 3221 }
3222 3222 return (rval);
3223 3223 }
3224 3224
3225 3225 /*
3226 3226 * ql_configure_device_d_id
3227 3227 * Updates device loop ID.
3228 3228 * Also adds to device queue any new devices found on private loop.
3229 3229 *
3230 3230 * Input:
3231 3231 * ha = adapter state pointer.
3232 3232 *
3233 3233 * Returns:
3234 3234 * ql local function return status code.
3235 3235 *
3236 3236 * Context:
3237 3237 * Kernel context.
3238 3238 */
3239 3239 static int
3240 3240 ql_configure_device_d_id(ql_adapter_state_t *ha)
3241 3241 {
3242 3242 port_id_t d_id;
3243 3243 ql_link_t *link;
3244 3244 int rval;
3245 3245 int loop;
3246 3246 ql_tgt_t *tq;
3247 3247 ql_dev_id_list_t *list;
3248 3248 uint32_t list_size;
3249 3249 uint16_t index, loop_id;
3250 3250 ql_mbx_data_t mr;
3251 3251 uint8_t retries = MAX_DEVICE_LOST_RETRY;
3252 3252
3253 3253 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3254 3254
3255 3255 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3256 3256 list = kmem_zalloc(list_size, KM_SLEEP);
3257 3257 if (list == NULL) {
3258 3258 rval = QL_MEMORY_ALLOC_FAILED;
3259 3259 EL(ha, "failed, rval = %xh\n", rval);
3260 3260 return (rval);
3261 3261 }
3262 3262
3263 3263 do {
3264 3264 /*
3265 3265 * Get data from RISC code d_id list to init each device queue.
3266 3266 */
3267 3267 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
3268 3268 if (rval != QL_SUCCESS) {
3269 3269 kmem_free(list, list_size);
3270 3270 EL(ha, "failed, rval = %xh\n", rval);
3271 3271 return (rval);
3272 3272 }
3273 3273
3274 3274 /* Acquire adapter state lock. */
3275 3275 ADAPTER_STATE_LOCK(ha);
3276 3276
3277 3277 /* Mark all queues as unusable. */
3278 3278 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3279 3279 for (link = ha->dev[index].first; link != NULL;
3280 3280 link = link->next) {
3281 3281 tq = link->base_address;
3282 3282 DEVICE_QUEUE_LOCK(tq);
3283 3283 if (!(tq->flags & TQF_PLOGI_PROGRS) &&
3284 3284 !(ha->topology & QL_N_PORT)) {
3285 3285 tq->loop_id = (uint16_t)
3286 3286 (tq->loop_id | PORT_LOST_ID);
3287 3287 }
3288 3288 DEVICE_QUEUE_UNLOCK(tq);
3289 3289 }
3290 3290 }
3291 3291
3292 3292 /* If device not in queues add new queue. */
3293 3293 for (index = 0; index < mr.mb[1]; index++) {
3294 3294 ql_dev_list(ha, list, index, &d_id, &loop_id);
3295 3295
3296 3296 if (VALID_DEVICE_ID(ha, loop_id)) {
3297 3297 tq = ql_dev_init(ha, d_id, loop_id);
3298 3298 if (tq != NULL) {
3299 3299 tq->loop_id = loop_id;
3300 3300
3301 3301 /* Test for fabric device. */
3302 3302 if (d_id.b.domain !=
3303 3303 ha->d_id.b.domain ||
3304 3304 d_id.b.area != ha->d_id.b.area) {
3305 3305 tq->flags |= TQF_FABRIC_DEVICE;
3306 3306 }
3307 3307
3308 3308 ADAPTER_STATE_UNLOCK(ha);
3309 3309 if (ql_get_port_database(ha, tq,
3310 3310 PDF_NONE) == QL_SUCCESS) {
3311 3311 ADAPTER_STATE_LOCK(ha);
3312 3312 tq->loop_id = (uint16_t)
3313 3313 (tq->loop_id &
3314 3314 ~PORT_LOST_ID);
3315 3315 } else {
3316 3316 ADAPTER_STATE_LOCK(ha);
3317 3317 }
3318 3318 }
3319 3319 }
3320 3320 }
3321 3321
3322 3322 /* 24xx does not report switch devices in ID list. */
3323 3323 if ((CFG_IST(ha, CFG_CTRL_24258081)) &&
3324 3324 ha->topology & (QL_F_PORT | QL_FL_PORT)) {
3325 3325 d_id.b24 = 0xfffffe;
3326 3326 tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL);
3327 3327 if (tq != NULL) {
3328 3328 tq->flags |= TQF_FABRIC_DEVICE;
3329 3329 ADAPTER_STATE_UNLOCK(ha);
3330 3330 (void) ql_get_port_database(ha, tq, PDF_NONE);
3331 3331 ADAPTER_STATE_LOCK(ha);
3332 3332 }
3333 3333 d_id.b24 = 0xfffffc;
3334 3334 tq = ql_dev_init(ha, d_id, SNS_24XX_HDL);
3335 3335 if (tq != NULL) {
3336 3336 tq->flags |= TQF_FABRIC_DEVICE;
3337 3337 ADAPTER_STATE_UNLOCK(ha);
3338 3338 if (ha->vp_index != 0) {
3339 3339 (void) ql_login_fport(ha, tq,
3340 3340 SNS_24XX_HDL, LFF_NONE, NULL);
3341 3341 }
3342 3342 (void) ql_get_port_database(ha, tq, PDF_NONE);
3343 3343 ADAPTER_STATE_LOCK(ha);
3344 3344 }
3345 3345 }
3346 3346
3347 3347 /* If F_port exists, allocate queue for FL_Port. */
3348 3348 index = ql_alpa_to_index[0xfe];
3349 3349 d_id.b24 = 0;
3350 3350 if (ha->dev[index].first != NULL) {
3351 3351 tq = ql_dev_init(ha, d_id, (uint16_t)
3352 3352 (CFG_IST(ha, CFG_CTRL_24258081) ?
3353 3353 FL_PORT_24XX_HDL : FL_PORT_LOOP_ID));
3354 3354 if (tq != NULL) {
3355 3355 tq->flags |= TQF_FABRIC_DEVICE;
3356 3356 ADAPTER_STATE_UNLOCK(ha);
3357 3357 (void) ql_get_port_database(ha, tq, PDF_NONE);
3358 3358 ADAPTER_STATE_LOCK(ha);
3359 3359 }
3360 3360 }
3361 3361
3362 3362 /* Allocate queue for broadcast. */
3363 3363 d_id.b24 = 0xffffff;
3364 3364 (void) ql_dev_init(ha, d_id, (uint16_t)
3365 3365 (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL :
3366 3366 IP_BROADCAST_LOOP_ID));
3367 3367
3368 3368 /* Check for any devices lost. */
3369 3369 loop = FALSE;
3370 3370 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3371 3371 for (link = ha->dev[index].first; link != NULL;
3372 3372 link = link->next) {
3373 3373 tq = link->base_address;
3374 3374
3375 3375 if ((tq->loop_id & PORT_LOST_ID) &&
3376 3376 !(tq->flags & (TQF_INITIATOR_DEVICE |
3377 3377 TQF_FABRIC_DEVICE))) {
3378 3378 loop = TRUE;
3379 3379 }
3380 3380 }
3381 3381 }
3382 3382
3383 3383 /* Release adapter state lock. */
3384 3384 ADAPTER_STATE_UNLOCK(ha);
3385 3385
3386 3386 /* Give devices time to recover. */
3387 3387 if (loop == TRUE) {
3388 3388 drv_usecwait(1000000);
3389 3389 }
3390 3390 } while (retries-- && loop == TRUE &&
3391 3391 !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED));
3392 3392
3393 3393 kmem_free(list, list_size);
3394 3394
3395 3395 if (rval != QL_SUCCESS) {
3396 3396 EL(ha, "failed=%xh\n", rval);
3397 3397 } else {
3398 3398 /*EMPTY*/
3399 3399 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3400 3400 }
3401 3401
3402 3402 return (rval);
3403 3403 }
3404 3404
3405 3405 /*
3406 3406 * ql_dev_list
3407 3407 * Gets device d_id and loop ID from firmware device list.
3408 3408 *
3409 3409 * Input:
3410 3410 * ha: adapter state pointer.
3411 3411 * list device list pointer.
3412 3412 * index: list index of device data.
3413 3413 * d_id: pointer for d_id data.
3414 3414 * id: pointer for loop ID.
3415 3415 *
3416 3416 * Context:
3417 3417 * Kernel context.
3418 3418 */
3419 3419 void
3420 3420 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list,
3421 3421 uint32_t index, port_id_t *d_id, uint16_t *id)
3422 3422 {
3423 3423 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3424 3424 struct ql_24_dev_id *list24 = (struct ql_24_dev_id *)list;
3425 3425
3426 3426 d_id->b.al_pa = list24[index].al_pa;
3427 3427 d_id->b.area = list24[index].area;
3428 3428 d_id->b.domain = list24[index].domain;
3429 3429 *id = CHAR_TO_SHORT(list24[index].n_port_hdl_l,
3430 3430 list24[index].n_port_hdl_h);
3431 3431
3432 3432 } else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3433 3433 struct ql_ex_dev_id *list23 = (struct ql_ex_dev_id *)list;
3434 3434
3435 3435 d_id->b.al_pa = list23[index].al_pa;
3436 3436 d_id->b.area = list23[index].area;
3437 3437 d_id->b.domain = list23[index].domain;
3438 3438 *id = CHAR_TO_SHORT(list23[index].loop_id_l,
3439 3439 list23[index].loop_id_h);
3440 3440
3441 3441 } else {
3442 3442 struct ql_dev_id *list22 = (struct ql_dev_id *)list;
3443 3443
3444 3444 d_id->b.al_pa = list22[index].al_pa;
3445 3445 d_id->b.area = list22[index].area;
3446 3446 d_id->b.domain = list22[index].domain;
3447 3447 *id = (uint16_t)list22[index].loop_id;
3448 3448 }
3449 3449 }
3450 3450
3451 3451 /*
3452 3452 * ql_configure_fabric
3453 3453 * Setup fabric context.
3454 3454 *
3455 3455 * Input:
3456 3456 * ha = adapter state pointer.
3457 3457 *
3458 3458 * Returns:
3459 3459 * ql local function return status code.
3460 3460 *
3461 3461 * Context:
3462 3462 * Kernel context.
3463 3463 */
3464 3464 static int
3465 3465 ql_configure_fabric(ql_adapter_state_t *ha)
3466 3466 {
3467 3467 port_id_t d_id;
3468 3468 ql_tgt_t *tq;
3469 3469 int rval = QL_FUNCTION_FAILED;
3470 3470
3471 3471 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3472 3472
3473 3473 ha->topology = (uint8_t)(ha->topology & ~QL_SNS_CONNECTION);
3474 3474
3475 3475 /* Test switch fabric controller present. */
3476 3476 d_id.b24 = FS_FABRIC_F_PORT;
3477 3477 tq = ql_d_id_to_queue(ha, d_id);
3478 3478 if (tq != NULL) {
3479 3479 /* Get port/node names of F_Port. */
3480 3480 (void) ql_get_port_database(ha, tq, PDF_NONE);
3481 3481
3482 3482 d_id.b24 = FS_NAME_SERVER;
3483 3483 tq = ql_d_id_to_queue(ha, d_id);
3484 3484 if (tq != NULL) {
3485 3485 (void) ql_get_port_database(ha, tq, PDF_NONE);
3486 3486 ha->topology = (uint8_t)
3487 3487 (ha->topology | QL_SNS_CONNECTION);
3488 3488 rval = QL_SUCCESS;
3489 3489 }
3490 3490 }
3491 3491
3492 3492 if (rval != QL_SUCCESS) {
3493 3493 EL(ha, "failed=%xh\n", rval);
3494 3494 } else {
3495 3495 /*EMPTY*/
3496 3496 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3497 3497 }
3498 3498 return (rval);
3499 3499 }
3500 3500
3501 3501 /*
3502 3502 * ql_reset_chip
3503 3503 * Reset ISP chip.
3504 3504 *
3505 3505 * Input:
3506 3506 * ha = adapter block pointer.
3507 3507 * All activity on chip must be already stopped.
3508 3508 * ADAPTER_STATE_LOCK must be released.
3509 3509 *
3510 3510 * Context:
3511 3511 * Interrupt or Kernel context, no mailbox commands allowed.
3512 3512 */
3513 3513 void
3514 3514 ql_reset_chip(ql_adapter_state_t *vha)
3515 3515 {
3516 3516 uint32_t cnt;
3517 3517 uint16_t cmd;
3518 3518 ql_adapter_state_t *ha = vha->pha;
3519 3519
3520 3520 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3521 3521
3522 3522 /*
3523 3523 * accessing pci space while not powered can cause panic's
3524 3524 * on some platforms (i.e. Sunblade 1000's)
3525 3525 */
3526 3526 if (ha->power_level == PM_LEVEL_D3) {
3527 3527 QL_PRINT_2(CE_CONT, "(%d): Low Power exit\n", ha->instance);
3528 3528 return;
3529 3529 }
3530 3530
3531 3531 /* Reset all outbound mailbox registers */
3532 3532 for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) {
3533 3533 WRT16_IO_REG(ha, mailbox_in[cnt], (uint16_t)0);
3534 3534 }
3535 3535
3536 3536 if (CFG_IST(ha, CFG_CTRL_8021)) {
3537 3537 ha->timeout_cnt = 0;
3538 3538 ql_8021_reset_chip(ha);
3539 3539 QL_PRINT_3(CE_CONT, "(%d): 8021 exit\n", ha->instance);
3540 3540 return;
3541 3541 }
3542 3542
3543 3543 /* Disable ISP interrupts. */
3544 3544 WRT16_IO_REG(ha, ictrl, 0);
3545 3545 ADAPTER_STATE_LOCK(ha);
3546 3546 ha->flags &= ~INTERRUPTS_ENABLED;
3547 3547 ADAPTER_STATE_UNLOCK(ha);
3548 3548
3549 3549 if (CFG_IST(ha, CFG_CTRL_242581)) {
3550 3550 RD32_IO_REG(ha, ictrl);
3551 3551 ql_reset_24xx_chip(ha);
3552 3552 QL_PRINT_3(CE_CONT, "(%d): 24xx exit\n", ha->instance);
3553 3553 return;
3554 3554 }
3555 3555
3556 3556 /*
3557 3557 * We are going to reset the chip in case of 2300. That might cause
3558 3558 * a PBM ERR if a DMA transaction is in progress. One way of
3559 3559 * avoiding it is to disable Bus Master operation before we start
3560 3560 * the reset activity.
3561 3561 */
3562 3562 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3563 3563 cmd = (uint16_t)(cmd & ~PCI_COMM_ME);
3564 3564 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3565 3565
3566 3566 /* Pause RISC. */
3567 3567 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3568 3568 for (cnt = 0; cnt < 30000; cnt++) {
3569 3569 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3570 3570 break;
3571 3571 }
3572 3572 drv_usecwait(MILLISEC);
3573 3573 }
3574 3574
3575 3575 /*
3576 3576 * A call to ql_isr() can still happen through
3577 3577 * ql_mailbox_command(). So Mark that we are/(will-be)
3578 3578 * running from rom code now.
3579 3579 */
3580 3580 TASK_DAEMON_LOCK(ha);
3581 3581 ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED);
3582 3582 TASK_DAEMON_UNLOCK(ha);
3583 3583
3584 3584 /* Select FPM registers. */
3585 3585 WRT16_IO_REG(ha, ctrl_status, 0x20);
3586 3586
3587 3587 /* FPM Soft Reset. */
3588 3588 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
3589 3589
3590 3590 /* Toggle FPM reset for 2300 */
3591 3591 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3592 3592 WRT16_IO_REG(ha, fpm_diag_config, 0);
3593 3593 }
3594 3594
3595 3595 /* Select frame buffer registers. */
3596 3596 WRT16_IO_REG(ha, ctrl_status, 0x10);
3597 3597
3598 3598 /* Reset frame buffer FIFOs. */
3599 3599 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3600 3600 WRT16_IO_REG(ha, fb_cmd, 0x00fc);
3601 3601 /* read back fb_cmd until zero or 3 seconds max */
3602 3602 for (cnt = 0; cnt < 300000; cnt++) {
3603 3603 if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) {
3604 3604 break;
3605 3605 }
3606 3606 drv_usecwait(10);
3607 3607 }
3608 3608 } else {
3609 3609 WRT16_IO_REG(ha, fb_cmd, 0xa000);
3610 3610 }
3611 3611
3612 3612 /* Select RISC module registers. */
3613 3613 WRT16_IO_REG(ha, ctrl_status, 0);
3614 3614
3615 3615 /* Reset RISC module. */
3616 3616 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
3617 3617
3618 3618 /* Reset ISP semaphore. */
3619 3619 WRT16_IO_REG(ha, semaphore, 0);
3620 3620
3621 3621 /* Release RISC module. */
3622 3622 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3623 3623
3624 3624 /* Insure mailbox registers are free. */
3625 3625 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
3626 3626 WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT);
3627 3627
3628 3628 /* clear the mailbox command pointer. */
3629 3629 ql_clear_mcp(ha);
3630 3630
3631 3631 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3632 3632 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3633 3633
3634 3634 /* Bus Master is disabled so chip reset is safe. */
3635 3635 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3636 3636 WRT16_IO_REG(ha, ctrl_status, ISP_RESET);
3637 3637 drv_usecwait(MILLISEC);
3638 3638
3639 3639 /* Wait for reset to finish. */
3640 3640 for (cnt = 0; cnt < 30000; cnt++) {
3641 3641 if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3642 3642 break;
3643 3643 }
3644 3644 drv_usecwait(MILLISEC);
3645 3645 }
3646 3646 }
3647 3647
3648 3648 /* Wait for RISC to recover from reset. */
3649 3649 for (cnt = 0; cnt < 30000; cnt++) {
3650 3650 if (RD16_IO_REG(ha, mailbox_out[0]) != MBS_BUSY) {
3651 3651 break;
3652 3652 }
3653 3653 drv_usecwait(MILLISEC);
3654 3654 }
3655 3655
3656 3656 /* restore bus master */
3657 3657 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3658 3658 cmd = (uint16_t)(cmd | PCI_COMM_ME);
3659 3659 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3660 3660
3661 3661 /* Disable RISC pause on FPM parity error. */
3662 3662 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
3663 3663
3664 3664 /* Initialize probe registers */
3665 3665 if (CFG_IST(ha, CFG_SBUS_CARD)) {
3666 3666 /* Pause RISC. */
3667 3667 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3668 3668 for (cnt = 0; cnt < 30000; cnt++) {
3669 3669 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3670 3670 break;
3671 3671 } else {
3672 3672 drv_usecwait(MILLISEC);
3673 3673 }
3674 3674 }
3675 3675
3676 3676 /* Select FPM registers. */
3677 3677 WRT16_IO_REG(ha, ctrl_status, 0x30);
3678 3678
3679 3679 /* Set probe register */
3680 3680 WRT16_IO_REG(ha, mailbox_in[23], 0x204c);
3681 3681
3682 3682 /* Select RISC module registers. */
3683 3683 WRT16_IO_REG(ha, ctrl_status, 0);
3684 3684
3685 3685 /* Release RISC module. */
3686 3686 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3687 3687 }
3688 3688
3689 3689 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3690 3690 }
3691 3691
3692 3692 /*
3693 3693 * ql_reset_24xx_chip
3694 3694 * Reset ISP24xx chip.
3695 3695 *
3696 3696 * Input:
3697 3697 * ha = adapter block pointer.
3698 3698 * All activity on chip must be already stopped.
3699 3699 *
3700 3700 * Context:
3701 3701 * Interrupt or Kernel context, no mailbox commands allowed.
3702 3702 */
3703 3703 void
3704 3704 ql_reset_24xx_chip(ql_adapter_state_t *ha)
3705 3705 {
3706 3706 uint32_t timer, stat;
3707 3707
3708 3708 /* Shutdown DMA. */
3709 3709 WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES);
3710 3710
3711 3711 /* Wait for DMA to stop. */
3712 3712 for (timer = 0; timer < 30000; timer++) {
3713 3713 if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) {
3714 3714 break;
3715 3715 }
3716 3716 drv_usecwait(100);
3717 3717 }
3718 3718
3719 3719 /* Stop the firmware. */
3720 3720 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3721 3721 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
3722 3722 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3723 3723 for (timer = 0; timer < 30000; timer++) {
3724 3724 stat = RD32_IO_REG(ha, risc2host);
3725 3725 if (stat & BIT_15) {
3726 3726 if ((stat & 0xff) < 0x12) {
3727 3727 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3728 3728 break;
3729 3729 }
3730 3730 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3731 3731 }
3732 3732 drv_usecwait(100);
3733 3733 }
3734 3734
3735 3735 /* Reset the chip. */
3736 3736 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
3737 3737 MWB_4096_BYTES);
3738 3738 drv_usecwait(100);
3739 3739
3740 3740 /* Wait for idle status from ROM firmware. */
3741 3741 for (timer = 0; timer < 30000; timer++) {
3742 3742 if (RD16_IO_REG(ha, mailbox_out[0]) == 0) {
3743 3743 break;
3744 3744 }
3745 3745 drv_usecwait(100);
3746 3746 }
3747 3747
3748 3748 /* Wait for reset to finish. */
3749 3749 for (timer = 0; timer < 30000; timer++) {
3750 3750 if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3751 3751 break;
3752 3752 }
3753 3753 drv_usecwait(100);
3754 3754 }
3755 3755
3756 3756 /* clear the mailbox command pointer. */
3757 3757 ql_clear_mcp(ha);
3758 3758
3759 3759 /* Insure mailbox registers are free. */
3760 3760 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3761 3761 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3762 3762
3763 3763 if (ha->flags & MPI_RESET_NEEDED) {
3764 3764 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3765 3765 WRT16_IO_REG(ha, mailbox_in[0], MBC_RESTART_MPI);
3766 3766 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3767 3767 for (timer = 0; timer < 30000; timer++) {
3768 3768 stat = RD32_IO_REG(ha, risc2host);
3769 3769 if (stat & BIT_15) {
3770 3770 if ((stat & 0xff) < 0x12) {
3771 3771 WRT32_IO_REG(ha, hccr,
3772 3772 HC24_CLR_RISC_INT);
3773 3773 break;
3774 3774 }
3775 3775 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3776 3776 }
3777 3777 drv_usecwait(100);
3778 3778 }
3779 3779 ADAPTER_STATE_LOCK(ha);
3780 3780 ha->flags &= ~MPI_RESET_NEEDED;
3781 3781 ADAPTER_STATE_UNLOCK(ha);
3782 3782 }
3783 3783
3784 3784 /*
3785 3785 * Set flash write-protection.
3786 3786 */
3787 3787 if ((ha->flags & ONLINE) == 0) {
3788 3788 ql_24xx_protect_flash(ha);
3789 3789 }
3790 3790 }
3791 3791
3792 3792 /*
3793 3793 * ql_clear_mcp
3794 3794 * Carefully clear the mailbox command pointer in the ha struct.
3795 3795 *
3796 3796 * Input:
3797 3797 * ha = adapter block pointer.
3798 3798 *
3799 3799 * Context:
3800 3800 * Interrupt or Kernel context, no mailbox commands allowed.
3801 3801 */
3802 3802
3803 3803 static void
3804 3804 ql_clear_mcp(ql_adapter_state_t *ha)
3805 3805 {
3806 3806 uint32_t cnt;
3807 3807
3808 3808 /* Don't null ha->mcp without the lock, but don't hang either. */
3809 3809 if (MBX_REGISTER_LOCK_OWNER(ha) == curthread) {
3810 3810 ha->mcp = NULL;
3811 3811 } else {
3812 3812 for (cnt = 0; cnt < 300000; cnt++) {
3813 3813 if (TRY_MBX_REGISTER_LOCK(ha) != 0) {
3814 3814 ha->mcp = NULL;
3815 3815 MBX_REGISTER_UNLOCK(ha);
3816 3816 break;
3817 3817 } else {
3818 3818 drv_usecwait(10);
3819 3819 }
3820 3820 }
3821 3821 }
3822 3822 }
3823 3823
3824 3824
3825 3825 /*
3826 3826 * ql_abort_isp
3827 3827 * Resets ISP and aborts all outstanding commands.
3828 3828 *
3829 3829 * Input:
3830 3830 * ha = adapter state pointer.
3831 3831 * DEVICE_QUEUE_LOCK must be released.
3832 3832 *
3833 3833 * Returns:
3834 3834 * ql local function return status code.
3835 3835 *
3836 3836 * Context:
3837 3837 * Kernel context.
3838 3838 */
3839 3839 int
3840 3840 ql_abort_isp(ql_adapter_state_t *vha)
3841 3841 {
3842 3842 ql_link_t *link, *link2;
3843 3843 ddi_devstate_t state;
3844 3844 uint16_t index;
3845 3845 ql_tgt_t *tq;
3846 3846 ql_lun_t *lq;
3847 3847 ql_srb_t *sp;
3848 3848 int rval = QL_SUCCESS;
3849 3849 ql_adapter_state_t *ha = vha->pha;
3850 3850
3851 3851 QL_PRINT_2(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3852 3852
3853 3853 TASK_DAEMON_LOCK(ha);
3854 3854 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
3855 3855 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE ||
3856 3856 (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) {
3857 3857 TASK_DAEMON_UNLOCK(ha);
3858 3858 return (rval);
3859 3859 }
3860 3860
3861 3861 ha->task_daemon_flags |= ABORT_ISP_ACTIVE;
3862 3862 ha->task_daemon_flags &= ~(RESET_MARKER_NEEDED | FIRMWARE_UP |
3863 3863 FIRMWARE_LOADED);
3864 3864 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3865 3865 vha->task_daemon_flags |= LOOP_DOWN;
3866 3866 vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED |
3867 3867 LOOP_RESYNC_NEEDED);
3868 3868 }
3869 3869
3870 3870 TASK_DAEMON_UNLOCK(ha);
3871 3871
3872 3872 if (ha->mailbox_flags & MBX_BUSY_FLG) {
3873 3873 /* Acquire mailbox register lock. */
3874 3874 MBX_REGISTER_LOCK(ha);
3875 3875
3876 3876 /* Wake up mailbox box routine. */
3877 3877 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT);
3878 3878 cv_broadcast(&ha->cv_mbx_intr);
3879 3879
3880 3880 /* Release mailbox register lock. */
3881 3881 MBX_REGISTER_UNLOCK(ha);
3882 3882
3883 3883 /* Wait for mailbox. */
3884 3884 for (index = 100; index &&
3885 3885 ha->mailbox_flags & MBX_ABORT; index--) {
3886 3886 drv_usecwait(50000);
3887 3887 }
3888 3888 }
3889 3889
3890 3890 /* Wait for commands to end gracefully if not in panic. */
3891 3891 if (ha->flags & PARITY_ERROR) {
3892 3892 ADAPTER_STATE_LOCK(ha);
3893 3893 ha->flags &= ~PARITY_ERROR;
3894 3894 ADAPTER_STATE_UNLOCK(ha);
3895 3895 } else if (ddi_in_panic() == 0) {
3896 3896 ql_cmd_wait(ha);
3897 3897 }
3898 3898
3899 3899 /* Shutdown IP. */
3900 3900 if (ha->flags & IP_INITIALIZED) {
3901 3901 (void) ql_shutdown_ip(ha);
3902 3902 }
3903 3903
3904 3904 /* Reset the chip. */
3905 3905 ql_reset_chip(ha);
3906 3906
3907 3907 /*
3908 3908 * Even though we have waited for outstanding commands to complete,
3909 3909 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP,
3910 3910 * there could still be an interrupt thread active. The interrupt
3911 3911 * lock will prevent us from getting an sp from the outstanding
3912 3912 * cmds array that the ISR may be using.
3913 3913 */
3914 3914
3915 3915 /* Place all commands in outstanding cmd list on device queue. */
3916 3916 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
3917 3917 REQUEST_RING_LOCK(ha);
3918 3918 INTR_LOCK(ha);
3919 3919 if ((link = ha->pending_cmds.first) != NULL) {
3920 3920 sp = link->base_address;
3921 3921 ql_remove_link(&ha->pending_cmds, &sp->cmd);
3922 3922
3923 3923 REQUEST_RING_UNLOCK(ha);
3924 3924 index = 0;
3925 3925 } else {
3926 3926 REQUEST_RING_UNLOCK(ha);
3927 3927 if ((sp = ha->outstanding_cmds[index]) == NULL) {
3928 3928 INTR_UNLOCK(ha);
3929 3929 continue;
3930 3930 }
3931 3931 }
3932 3932
3933 3933 /*
3934 3934 * It's not obvious but the index for commands pulled from
3935 3935 * pending will be zero and that entry in the outstanding array
3936 3936 * is not used so nulling it is "no harm, no foul".
3937 3937 */
3938 3938
3939 3939 ha->outstanding_cmds[index] = NULL;
3940 3940 sp->handle = 0;
3941 3941 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3942 3942
3943 3943 INTR_UNLOCK(ha);
3944 3944
3945 3945 /* If command timeout. */
3946 3946 if (sp->flags & SRB_COMMAND_TIMEOUT) {
3947 3947 sp->pkt->pkt_reason = CS_TIMEOUT;
3948 3948 sp->flags &= ~SRB_RETRY;
3949 3949 sp->flags |= SRB_ISP_COMPLETED;
3950 3950
3951 3951 /* Call done routine to handle completion. */
3952 3952 ql_done(&sp->cmd);
3953 3953 continue;
3954 3954 }
3955 3955
3956 3956 /* Acquire target queue lock. */
3957 3957 lq = sp->lun_queue;
3958 3958 tq = lq->target_queue;
3959 3959 DEVICE_QUEUE_LOCK(tq);
3960 3960
3961 3961 /* Reset watchdog time. */
3962 3962 sp->wdg_q_time = sp->init_wdg_q_time;
3963 3963
3964 3964 /* Place request back on top of device queue. */
3965 3965 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED |
3966 3966 SRB_RETRY);
3967 3967
3968 3968 ql_add_link_t(&lq->cmd, &sp->cmd);
3969 3969 sp->flags |= SRB_IN_DEVICE_QUEUE;
3970 3970
3971 3971 /* Release target queue lock. */
3972 3972 DEVICE_QUEUE_UNLOCK(tq);
3973 3973 }
3974 3974
3975 3975 /*
3976 3976 * Clear per LUN active count, because there should not be
3977 3977 * any IO outstanding at this time.
3978 3978 */
3979 3979 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3980 3980 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3981 3981 link = vha->dev[index].first;
3982 3982 while (link != NULL) {
3983 3983 tq = link->base_address;
3984 3984 link = link->next;
3985 3985 DEVICE_QUEUE_LOCK(tq);
3986 3986 tq->outcnt = 0;
3987 3987 tq->flags &= ~TQF_QUEUE_SUSPENDED;
3988 3988 for (link2 = tq->lun_queues.first;
3989 3989 link2 != NULL; link2 = link2->next) {
3990 3990 lq = link2->base_address;
3991 3991 lq->lun_outcnt = 0;
3992 3992 lq->flags &= ~LQF_UNTAGGED_PENDING;
3993 3993 }
3994 3994 DEVICE_QUEUE_UNLOCK(tq);
3995 3995 }
3996 3996 }
3997 3997 }
3998 3998
3999 3999 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
4000 4000 if ((rval = ql_chip_diag(ha)) == QL_SUCCESS) {
4001 4001 rval = ql_load_isp_firmware(ha);
4002 4002 }
4003 4003 }
4004 4004
4005 4005 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
4006 4006 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS &&
4007 4007 (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) {
4008 4008
4009 4009 /* If reset abort needed that may have been set. */
4010 4010 TASK_DAEMON_LOCK(ha);
4011 4011 ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED |
4012 4012 ABORT_ISP_ACTIVE);
4013 4013 TASK_DAEMON_UNLOCK(ha);
4014 4014
4015 4015 /* Enable ISP interrupts. */
4016 4016 if (CFG_IST(ha, CFG_CTRL_8021)) {
4017 4017 ql_8021_enable_intrs(ha);
4018 4018 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
4019 4019 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
4020 4020 } else {
4021 4021 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
4022 4022 }
4023 4023
4024 4024 ADAPTER_STATE_LOCK(ha);
4025 4025 ha->flags |= INTERRUPTS_ENABLED;
4026 4026 ADAPTER_STATE_UNLOCK(ha);
4027 4027
4028 4028 /* Set loop online, if it really is. */
4029 4029 ql_loop_online(ha);
4030 4030
4031 4031 state = ddi_get_devstate(ha->dip);
4032 4032 if (state != DDI_DEVSTATE_UP) {
4033 4033 /*EMPTY*/
4034 4034 ddi_dev_report_fault(ha->dip, DDI_SERVICE_RESTORED,
4035 4035 DDI_DEVICE_FAULT, "Device reset succeeded");
4036 4036 }
4037 4037 } else {
4038 4038 /* Enable ISP interrupts. */
4039 4039 if (CFG_IST(ha, CFG_CTRL_8021)) {
4040 4040 ql_8021_enable_intrs(ha);
4041 4041 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
4042 4042 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
4043 4043 } else {
4044 4044 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
4045 4045 }
4046 4046
4047 4047 ADAPTER_STATE_LOCK(ha);
4048 4048 ha->flags |= INTERRUPTS_ENABLED;
4049 4049 ADAPTER_STATE_UNLOCK(ha);
4050 4050
4051 4051 TASK_DAEMON_LOCK(ha);
4052 4052 ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE);
4053 4053 ha->task_daemon_flags |= LOOP_DOWN;
4054 4054 TASK_DAEMON_UNLOCK(ha);
4055 4055
4056 4056 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
4057 4057 }
4058 4058
4059 4059 if (rval != QL_SUCCESS) {
4060 4060 EL(ha, "failed, rval = %xh\n", rval);
4061 4061 } else {
4062 4062 /*EMPTY*/
4063 4063 QL_PRINT_2(CE_CONT, "(%d): done\n", ha->instance);
4064 4064 }
4065 4065 return (rval);
4066 4066 }
4067 4067
4068 4068 /*
4069 4069 * ql_vport_control
4070 4070 * Issue Virtual Port Control command.
4071 4071 *
4072 4072 * Input:
4073 4073 * ha = virtual adapter state pointer.
4074 4074 * cmd = control command.
4075 4075 *
4076 4076 * Returns:
4077 4077 * ql local function return status code.
4078 4078 *
4079 4079 * Context:
4080 4080 * Kernel context.
4081 4081 */
4082 4082 int
4083 4083 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd)
4084 4084 {
4085 4085 ql_mbx_iocb_t *pkt;
4086 4086 uint8_t bit;
4087 4087 int rval;
4088 4088 uint32_t pkt_size;
4089 4089
4090 4090 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4091 4091
4092 4092 if (ha->vp_index != 0) {
4093 4093 pkt_size = sizeof (ql_mbx_iocb_t);
4094 4094 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4095 4095 if (pkt == NULL) {
4096 4096 EL(ha, "failed, kmem_zalloc\n");
4097 4097 return (QL_MEMORY_ALLOC_FAILED);
4098 4098 }
4099 4099
4100 4100 pkt->vpc.entry_type = VP_CONTROL_TYPE;
4101 4101 pkt->vpc.entry_count = 1;
4102 4102 pkt->vpc.command = cmd;
4103 4103 pkt->vpc.vp_count = 1;
4104 4104 bit = (uint8_t)(ha->vp_index - 1);
4105 4105 pkt->vpc.vp_index[bit / 8] = (uint8_t)
4106 4106 (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8);
4107 4107
4108 4108 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4109 4109 if (rval == QL_SUCCESS && pkt->vpc.status != 0) {
4110 4110 rval = QL_COMMAND_ERROR;
4111 4111 }
4112 4112
4113 4113 kmem_free(pkt, pkt_size);
4114 4114 } else {
4115 4115 rval = QL_SUCCESS;
4116 4116 }
4117 4117
4118 4118 if (rval != QL_SUCCESS) {
4119 4119 EL(ha, "failed, rval = %xh\n", rval);
4120 4120 } else {
4121 4121 /*EMPTY*/
4122 4122 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4123 4123 ha->vp_index);
4124 4124 }
4125 4125 return (rval);
4126 4126 }
4127 4127
4128 4128 /*
4129 4129 * ql_vport_modify
4130 4130 * Issue of Modify Virtual Port command.
4131 4131 *
4132 4132 * Input:
4133 4133 * ha = virtual adapter state pointer.
4134 4134 * cmd = command.
4135 4135 * opt = option.
4136 4136 *
4137 4137 * Context:
4138 4138 * Interrupt or Kernel context, no mailbox commands allowed.
4139 4139 */
4140 4140 int
4141 4141 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt)
4142 4142 {
4143 4143 ql_mbx_iocb_t *pkt;
4144 4144 int rval;
4145 4145 uint32_t pkt_size;
4146 4146
4147 4147 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4148 4148
4149 4149 pkt_size = sizeof (ql_mbx_iocb_t);
4150 4150 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4151 4151 if (pkt == NULL) {
4152 4152 EL(ha, "failed, kmem_zalloc\n");
4153 4153 return (QL_MEMORY_ALLOC_FAILED);
4154 4154 }
4155 4155
4156 4156 pkt->vpm.entry_type = VP_MODIFY_TYPE;
4157 4157 pkt->vpm.entry_count = 1;
4158 4158 pkt->vpm.command = cmd;
4159 4159 pkt->vpm.vp_count = 1;
4160 4160 pkt->vpm.first_vp_index = ha->vp_index;
4161 4161 pkt->vpm.first_options = opt;
4162 4162 bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name,
4163 4163 8);
4164 4164 bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name,
4165 4165 8);
4166 4166
4167 4167 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4168 4168 if (rval == QL_SUCCESS && pkt->vpm.status != 0) {
4169 4169 EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval,
4170 4170 pkt->vpm.status);
4171 4171 rval = QL_COMMAND_ERROR;
4172 4172 }
4173 4173
4174 4174 kmem_free(pkt, pkt_size);
4175 4175
4176 4176 if (rval != QL_SUCCESS) {
4177 4177 EL(ha, "failed, rval = %xh\n", rval);
4178 4178 } else {
4179 4179 /*EMPTY*/
4180 4180 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4181 4181 ha->vp_index);
4182 4182 }
4183 4183 return (rval);
4184 4184 }
4185 4185
4186 4186 /*
4187 4187 * ql_vport_enable
4188 4188 * Enable virtual port.
4189 4189 *
4190 4190 * Input:
4191 4191 * ha = virtual adapter state pointer.
4192 4192 *
4193 4193 * Context:
4194 4194 * Kernel context.
4195 4195 */
4196 4196 int
4197 4197 ql_vport_enable(ql_adapter_state_t *ha)
4198 4198 {
4199 4199 int timer;
4200 4200
4201 4201 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4202 4202
4203 4203 ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4204 4204 TASK_DAEMON_LOCK(ha);
4205 4205 ha->task_daemon_flags |= LOOP_DOWN;
4206 4206 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE);
4207 4207 TASK_DAEMON_UNLOCK(ha);
4208 4208
4209 4209 ADAPTER_STATE_LOCK(ha);
4210 4210 ha->flags |= VP_ENABLED;
4211 4211 ADAPTER_STATE_UNLOCK(ha);
4212 4212
4213 4213 if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED |
4214 4214 VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) {
4215 4215 QL_PRINT_2(CE_CONT, "(%d): failed to enable virtual port=%d\n",
4216 4216 ha->instance, ha->vp_index);
4217 4217 return (QL_FUNCTION_FAILED);
4218 4218 }
4219 4219 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4220 4220 /* Wait for loop to come up. */
4221 4221 for (timer = 0; timer < 3000 &&
4222 4222 !(ha->task_daemon_flags & STATE_ONLINE);
4223 4223 timer++) {
4224 4224 delay(1);
4225 4225 }
4226 4226 }
4227 4227
4228 4228 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4229 4229
4230 4230 return (QL_SUCCESS);
4231 4231 }
4232 4232
4233 4233 /*
4234 4234 * ql_vport_create
4235 4235 * Create virtual port context.
4236 4236 *
4237 4237 * Input:
4238 4238 * ha: parent adapter state pointer.
4239 4239 * index: virtual port index number.
4240 4240 *
4241 4241 * Context:
4242 4242 * Kernel context.
4243 4243 */
4244 4244 ql_adapter_state_t *
4245 4245 ql_vport_create(ql_adapter_state_t *ha, uint8_t index)
4246 4246 {
4247 4247 ql_adapter_state_t *vha;
4248 4248
4249 4249 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4250 4250
4251 4251 /* Inherit the parents data. */
4252 4252 vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP);
4253 4253
4254 4254 ADAPTER_STATE_LOCK(ha);
4255 4255 bcopy(ha, vha, sizeof (ql_adapter_state_t));
4256 4256 vha->pi_attrs = NULL;
4257 4257 vha->ub_outcnt = 0;
4258 4258 vha->ub_allocated = 0;
4259 4259 vha->flags = 0;
4260 4260 vha->task_daemon_flags = 0;
4261 4261 ha->vp_next = vha;
4262 4262 vha->pha = ha;
4263 4263 vha->vp_index = index;
4264 4264 ADAPTER_STATE_UNLOCK(ha);
4265 4265
4266 4266 vha->hba.next = NULL;
4267 4267 vha->hba.prev = NULL;
4268 4268 vha->hba.base_address = vha;
4269 4269 vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4270 4270 vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE,
4271 4271 KM_SLEEP);
4272 4272 vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT,
4273 4273 KM_SLEEP);
4274 4274
4275 4275 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4276 4276
4277 4277 return (vha);
4278 4278 }
4279 4279
4280 4280 /*
4281 4281 * ql_vport_destroy
4282 4282 * Destroys virtual port context.
4283 4283 *
4284 4284 * Input:
4285 4285 * ha = virtual adapter state pointer.
4286 4286 *
4287 4287 * Context:
4288 4288 * Kernel context.
4289 4289 */
4290 4290 void
4291 4291 ql_vport_destroy(ql_adapter_state_t *ha)
4292 4292 {
4293 4293 ql_adapter_state_t *vha;
4294 4294
4295 4295 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4296 4296
4297 4297 /* Remove port from list. */
4298 4298 ADAPTER_STATE_LOCK(ha);
4299 4299 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
4300 4300 if (vha->vp_next == ha) {
4301 4301 vha->vp_next = ha->vp_next;
4302 4302 break;
4303 4303 }
4304 4304 }
4305 4305 ADAPTER_STATE_UNLOCK(ha);
4306 4306
4307 4307 if (ha->ub_array != NULL) {
4308 4308 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
4309 4309 }
4310 4310 if (ha->dev != NULL) {
4311 4311 kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE);
4312 4312 }
4313 4313 kmem_free(ha, sizeof (ql_adapter_state_t));
4314 4314
4315 4315 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4316 4316 }
4317 4317
4318 4318 /*
4319 4319 * ql_mps_reset
4320 4320 * Reset MPS for FCoE functions.
4321 4321 *
4322 4322 * Input:
4323 4323 * ha = virtual adapter state pointer.
4324 4324 *
4325 4325 * Context:
4326 4326 * Kernel context.
4327 4327 */
4328 4328 static void
4329 4329 ql_mps_reset(ql_adapter_state_t *ha)
4330 4330 {
4331 4331 uint32_t data, dctl = 1000;
4332 4332
4333 4333 do {
4334 4334 if (dctl-- == 0 || ql_wrt_risc_ram_word(ha, 0x7c00, 1) !=
4335 4335 QL_SUCCESS) {
4336 4336 return;
4337 4337 }
4338 4338 if (ql_rd_risc_ram_word(ha, 0x7c00, &data) != QL_SUCCESS) {
4339 4339 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4340 4340 return;
4341 4341 }
4342 4342 } while (!(data & BIT_0));
4343 4343
4344 4344 if (ql_rd_risc_ram_word(ha, 0x7A15, &data) == QL_SUCCESS) {
4345 4345 dctl = (uint16_t)ql_pci_config_get16(ha, 0x54);
4346 4346 if ((data & 0xe0) != (dctl & 0xe0)) {
4347 4347 data &= 0xff1f;
4348 4348 data |= dctl & 0xe0;
4349 4349 (void) ql_wrt_risc_ram_word(ha, 0x7A15, data);
4350 4350 }
4351 4351 }
4352 4352 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4353 4353 }
↓ open down ↓ |
1319 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX