Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/comstar/port/qlt/qlt.c
+++ new/usr/src/uts/common/io/comstar/port/qlt/qlt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 QLogic Corporation. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29 29 */
30 30
31 31 #include <sys/conf.h>
32 32 #include <sys/ddi.h>
33 33 #include <sys/stat.h>
34 34 #include <sys/pci.h>
35 35 #include <sys/sunddi.h>
36 36 #include <sys/modctl.h>
37 37 #include <sys/file.h>
38 38 #include <sys/cred.h>
39 39 #include <sys/byteorder.h>
40 40 #include <sys/atomic.h>
41 41 #include <sys/scsi/scsi.h>
42 42
43 43 #include <sys/stmf_defines.h>
44 44 #include <sys/fct_defines.h>
45 45 #include <sys/stmf.h>
46 46 #include <sys/stmf_ioctl.h>
47 47 #include <sys/portif.h>
48 48 #include <sys/fct.h>
49 49
50 50 #include "qlt.h"
51 51 #include "qlt_dma.h"
52 52 #include "qlt_ioctl.h"
53 53 #include "qlt_open.h"
54 54
55 55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 57 static void qlt_enable_intr(qlt_state_t *);
58 58 static void qlt_disable_intr(qlt_state_t *);
59 59 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
60 60 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
61 61 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
62 62 uint32_t word_count, uint32_t risc_addr);
63 63 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
64 64 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
65 65 uint32_t dma_size);
66 66 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
67 67 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
68 68 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
69 69 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
70 70 stmf_state_change_info_t *ssci);
71 71 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
72 72 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
73 73 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
74 74 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
75 75 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
76 76 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
77 77 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
78 78 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
79 79 uint8_t *rsp);
80 80 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
81 81 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
82 82 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
83 83 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
84 84 static void qlt_verify_fw(qlt_state_t *qlt);
85 85 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
86 86 fct_status_t qlt_port_start(caddr_t arg);
87 87 fct_status_t qlt_port_stop(caddr_t arg);
88 88 fct_status_t qlt_port_online(qlt_state_t *qlt);
89 89 fct_status_t qlt_port_offline(qlt_state_t *qlt);
90 90 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
91 91 fct_link_info_t *li);
92 92 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
93 93 static fct_status_t qlt_force_lip(qlt_state_t *);
94 94 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
95 95 fct_flogi_xchg_t *fx);
96 96 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
97 97 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
98 98 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
99 99 fct_remote_port_t *rp, fct_cmd_t *login);
100 100 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
101 101 fct_remote_port_t *rp);
102 102 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
103 103 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
104 104 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
105 105 fct_cmd_t *cmd, int terminate);
106 106 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
107 107 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
108 108 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
109 109 fct_cmd_t *cmd, uint32_t flags);
110 110 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
111 111 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
112 112 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
113 113 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
114 114 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
115 115 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
116 116 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
117 117 stmf_data_buf_t *dbuf, uint32_t ioflags);
118 118 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
119 119 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
120 120 static void qlt_release_intr(qlt_state_t *qlt);
121 121 static int qlt_setup_interrupts(qlt_state_t *qlt);
122 122 static void qlt_destroy_mutex(qlt_state_t *qlt);
123 123
124 124 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
125 125 uint32_t words);
126 126 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
127 127 caddr_t buf, uint_t size_left);
128 128 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
129 129 caddr_t buf, uint_t size_left);
130 130 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
131 131 int count, uint_t size_left);
132 132 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
133 133 cred_t *credp, int *rval);
134 134 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
135 135 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
136 136
137 137 static int qlt_setup_msi(qlt_state_t *qlt);
138 138 static int qlt_setup_msix(qlt_state_t *qlt);
139 139
140 140 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
141 141 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
142 142 static int qlt_validate_trace_desc(qlt_state_t *qlt);
143 143 static char *qlt_find_trace_start(qlt_state_t *qlt);
144 144
145 145 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
146 146 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
147 147 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
148 148 char **prop_val);
149 149 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
150 150 static int qlt_convert_string_to_ull(char *prop, int radix,
151 151 u_longlong_t *result);
152 152 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
153 153 static int qlt_quiesce(dev_info_t *dip);
154 154 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
155 155 uint32_t);
156 156 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
157 157 uint32_t *);
158 158 static void qlt_mps_reset(qlt_state_t *qlt);
159 159 static void qlt_properties(qlt_state_t *qlt);
160 160
161 161
162 162 #define SETELSBIT(bmp, els) (bmp)[((els) >> 3) & 0x1F] = \
163 163 (uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
164 164
165 165 int qlt_enable_msix = 0;
166 166 int qlt_enable_msi = 1;
167 167
168 168
169 169 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
170 170
171 171 /* Array to quickly calculate next free buf index to use */
172 172 #if 0
173 173 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
174 174 #endif
175 175
176 176 static struct cb_ops qlt_cb_ops = {
177 177 qlt_open,
178 178 qlt_close,
179 179 nodev,
180 180 nodev,
181 181 nodev,
182 182 nodev,
183 183 nodev,
184 184 qlt_ioctl,
185 185 nodev,
186 186 nodev,
187 187 nodev,
188 188 nochpoll,
189 189 ddi_prop_op,
190 190 0,
191 191 D_MP | D_NEW
192 192 };
193 193
194 194 static struct dev_ops qlt_ops = {
195 195 DEVO_REV,
196 196 0,
197 197 nodev,
198 198 nulldev,
199 199 nulldev,
200 200 qlt_attach,
201 201 qlt_detach,
202 202 nodev,
203 203 &qlt_cb_ops,
204 204 NULL,
205 205 ddi_power,
206 206 qlt_quiesce
207 207 };
208 208
209 209 #ifndef PORT_SPEED_10G
↓ open down ↓ |
209 lines elided |
↑ open up ↑ |
210 210 #define PORT_SPEED_10G 16
211 211 #endif
212 212
213 213 static struct modldrv modldrv = {
214 214 &mod_driverops,
215 215 QLT_NAME" "QLT_VERSION,
216 216 &qlt_ops,
217 217 };
218 218
219 219 static struct modlinkage modlinkage = {
220 - MODREV_1, &modldrv, NULL
220 + MODREV_1, { &modldrv, NULL }
221 221 };
222 222
223 223 void *qlt_state = NULL;
224 224 kmutex_t qlt_global_lock;
225 225 static uint32_t qlt_loaded_counter = 0;
226 226
227 227 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
228 228 "-X Mode 1 133", "--Invalid--",
229 229 "-X Mode 2 66", "-X Mode 2 100",
230 230 "-X Mode 2 133", " 66" };
231 231
232 232 /* Always use 64 bit DMA. */
233 233 static ddi_dma_attr_t qlt_queue_dma_attr = {
234 234 DMA_ATTR_V0, /* dma_attr_version */
235 235 0, /* low DMA address range */
236 236 0xffffffffffffffff, /* high DMA address range */
237 237 0xffffffff, /* DMA counter register */
238 238 64, /* DMA address alignment */
239 239 0xff, /* DMA burstsizes */
240 240 1, /* min effective DMA size */
241 241 0xffffffff, /* max DMA xfer size */
242 242 0xffffffff, /* segment boundary */
243 243 1, /* s/g list length */
244 244 1, /* granularity of device */
245 245 0 /* DMA transfer flags */
246 246 };
247 247
248 248 /* qlogic logging */
249 249 int enable_extended_logging = 0;
250 250
251 251 static char qlt_provider_name[] = "qlt";
252 252 static struct stmf_port_provider *qlt_pp;
253 253
254 254 int
255 255 _init(void)
256 256 {
257 257 int ret;
258 258
259 259 ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
260 260 if (ret == 0) {
261 261 mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
262 262 qlt_pp = (stmf_port_provider_t *)stmf_alloc(
263 263 STMF_STRUCT_PORT_PROVIDER, 0, 0);
264 264 qlt_pp->pp_portif_rev = PORTIF_REV_1;
265 265 qlt_pp->pp_name = qlt_provider_name;
266 266 if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
267 267 stmf_free(qlt_pp);
268 268 mutex_destroy(&qlt_global_lock);
269 269 ddi_soft_state_fini(&qlt_state);
270 270 return (EIO);
271 271 }
272 272 ret = mod_install(&modlinkage);
273 273 if (ret != 0) {
274 274 (void) stmf_deregister_port_provider(qlt_pp);
275 275 stmf_free(qlt_pp);
276 276 mutex_destroy(&qlt_global_lock);
277 277 ddi_soft_state_fini(&qlt_state);
278 278 }
279 279 }
280 280 return (ret);
281 281 }
282 282
283 283 int
284 284 _fini(void)
285 285 {
286 286 int ret;
287 287
288 288 if (qlt_loaded_counter)
289 289 return (EBUSY);
290 290 ret = mod_remove(&modlinkage);
291 291 if (ret == 0) {
292 292 (void) stmf_deregister_port_provider(qlt_pp);
293 293 stmf_free(qlt_pp);
294 294 mutex_destroy(&qlt_global_lock);
295 295 ddi_soft_state_fini(&qlt_state);
296 296 }
297 297 return (ret);
298 298 }
299 299
300 300 int
301 301 _info(struct modinfo *modinfop)
302 302 {
303 303 return (mod_info(&modlinkage, modinfop));
304 304 }
305 305
306 306
307 307 static int
308 308 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
309 309 {
310 310 int instance;
311 311 qlt_state_t *qlt;
312 312 ddi_device_acc_attr_t dev_acc_attr;
313 313 uint16_t did;
314 314 uint16_t val;
315 315 uint16_t mr;
316 316 size_t discard;
317 317 uint_t ncookies;
318 318 int max_read_size;
319 319 int max_payload_size;
320 320 fct_status_t ret;
321 321
322 322 /* No support for suspend resume yet */
323 323 if (cmd != DDI_ATTACH)
324 324 return (DDI_FAILURE);
325 325 instance = ddi_get_instance(dip);
326 326
327 327 if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
328 328 return (DDI_FAILURE);
329 329 }
330 330
331 331 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
332 332 NULL) {
333 333 goto attach_fail_1;
334 334 }
335 335
336 336 qlt->instance = instance;
337 337
338 338 qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
339 339 qlt->dip = dip;
340 340
341 341 if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
342 342 cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
343 343 goto attach_fail_1;
344 344 }
345 345
346 346 EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
347 347
348 348 if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
349 349 goto attach_fail_2;
350 350 }
351 351 did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
352 352 if ((did != 0x2422) && (did != 0x2432) &&
353 353 (did != 0x8432) && (did != 0x2532) &&
354 354 (did != 0x8001)) {
355 355 cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
356 356 instance, did);
357 357 goto attach_fail_4;
358 358 }
359 359
360 360 if ((did & 0xFF00) == 0x8000)
361 361 qlt->qlt_81xx_chip = 1;
362 362 else if ((did & 0xFF00) == 0x2500)
363 363 qlt->qlt_25xx_chip = 1;
364 364
365 365 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
366 366 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
367 367 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
368 368 if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
369 369 &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
370 370 goto attach_fail_4;
371 371 }
372 372 if (did == 0x2422) {
373 373 uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
374 374 uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
375 375 pci_bits >>= 8;
376 376 pci_bits &= 0xf;
377 377 if ((pci_bits == 3) || (pci_bits == 7)) {
378 378 cmn_err(CE_NOTE,
379 379 "!qlt(%d): HBA running at PCI%sMHz (%d)",
380 380 instance, pci_speeds[pci_bits], pci_bits);
381 381 } else {
382 382 cmn_err(CE_WARN,
383 383 "qlt(%d): HBA running at PCI%sMHz %s(%d)",
384 384 instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
385 385 "(Invalid)", ((pci_bits == 0) ||
386 386 (pci_bits == 8)) ? (slot ? "64 bit slot " :
387 387 "32 bit slot ") : "", pci_bits);
388 388 }
389 389 }
390 390 if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
391 391 cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
392 392 (unsigned long long)ret);
393 393 goto attach_fail_5;
394 394 }
395 395
396 396 qlt_properties(qlt);
397 397
398 398 if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
399 399 0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
400 400 goto attach_fail_5;
401 401 }
402 402 if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
403 403 &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
404 404 &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
405 405 DDI_SUCCESS) {
406 406 goto attach_fail_6;
407 407 }
408 408 if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
409 409 qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
410 410 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
411 411 &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
412 412 goto attach_fail_7;
413 413 }
414 414 if (ncookies != 1)
415 415 goto attach_fail_8;
416 416 qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
417 417 qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
418 418 qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
419 419 qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
420 420
421 421 /* mutex are inited in this function */
422 422 if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
423 423 goto attach_fail_8;
424 424
425 425 (void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
426 426 "qlt%d", instance);
427 427 (void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
428 428 "%s,0", qlt->qlt_minor_name);
429 429
430 430 if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
431 431 instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
432 432 goto attach_fail_9;
433 433 }
434 434
435 435 cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
436 436 cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
437 437 mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
438 438
439 439 /* Setup PCI cfg space registers */
440 440 max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
441 441 if (max_read_size == 11)
442 442 goto over_max_read_xfer_setting;
443 443 if (did == 0x2422) {
444 444 if (max_read_size == 512)
445 445 val = 0;
446 446 else if (max_read_size == 1024)
447 447 val = 1;
448 448 else if (max_read_size == 2048)
449 449 val = 2;
450 450 else if (max_read_size == 4096)
451 451 val = 3;
452 452 else {
453 453 cmn_err(CE_WARN, "qlt(%d) malformed "
454 454 "pci-max-read-request in qlt.conf. Valid values "
455 455 "for this HBA are 512/1024/2048/4096", instance);
456 456 goto over_max_read_xfer_setting;
457 457 }
458 458 mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
459 459 mr = (uint16_t)(mr & 0xfff3);
460 460 mr = (uint16_t)(mr | (val << 2));
461 461 PCICFG_WR16(qlt, 0x4E, mr);
462 462 } else if ((did == 0x2432) || (did == 0x8432) ||
463 463 (did == 0x2532) || (did == 0x8001)) {
464 464 if (max_read_size == 128)
465 465 val = 0;
466 466 else if (max_read_size == 256)
467 467 val = 1;
468 468 else if (max_read_size == 512)
469 469 val = 2;
470 470 else if (max_read_size == 1024)
471 471 val = 3;
472 472 else if (max_read_size == 2048)
473 473 val = 4;
474 474 else if (max_read_size == 4096)
475 475 val = 5;
476 476 else {
477 477 cmn_err(CE_WARN, "qlt(%d) malformed "
478 478 "pci-max-read-request in qlt.conf. Valid values "
479 479 "for this HBA are 128/256/512/1024/2048/4096",
480 480 instance);
481 481 goto over_max_read_xfer_setting;
482 482 }
483 483 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
484 484 mr = (uint16_t)(mr & 0x8fff);
485 485 mr = (uint16_t)(mr | (val << 12));
486 486 PCICFG_WR16(qlt, 0x54, mr);
487 487 } else {
488 488 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
489 489 "pci-max-read-request for this device (%x)",
490 490 instance, did);
491 491 }
492 492 over_max_read_xfer_setting:;
493 493
494 494 max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
495 495 if (max_payload_size == 11)
496 496 goto over_max_payload_setting;
497 497 if ((did == 0x2432) || (did == 0x8432) ||
498 498 (did == 0x2532) || (did == 0x8001)) {
499 499 if (max_payload_size == 128)
500 500 val = 0;
501 501 else if (max_payload_size == 256)
502 502 val = 1;
503 503 else if (max_payload_size == 512)
504 504 val = 2;
505 505 else if (max_payload_size == 1024)
506 506 val = 3;
507 507 else {
508 508 cmn_err(CE_WARN, "qlt(%d) malformed "
509 509 "pcie-max-payload-size in qlt.conf. Valid values "
510 510 "for this HBA are 128/256/512/1024",
511 511 instance);
512 512 goto over_max_payload_setting;
513 513 }
514 514 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
515 515 mr = (uint16_t)(mr & 0xff1f);
516 516 mr = (uint16_t)(mr | (val << 5));
517 517 PCICFG_WR16(qlt, 0x54, mr);
518 518 } else {
519 519 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
520 520 "pcie-max-payload-size for this device (%x)",
521 521 instance, did);
522 522 }
523 523
524 524 over_max_payload_setting:;
525 525
526 526 qlt_enable_intr(qlt);
527 527
528 528 if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
529 529 goto attach_fail_10;
530 530
531 531 ddi_report_dev(dip);
532 532 return (DDI_SUCCESS);
533 533
534 534 attach_fail_10:;
535 535 mutex_destroy(&qlt->qlt_ioctl_lock);
536 536 cv_destroy(&qlt->mbox_cv);
537 537 cv_destroy(&qlt->rp_dereg_cv);
538 538 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
539 539 attach_fail_9:;
540 540 qlt_destroy_mutex(qlt);
541 541 qlt_release_intr(qlt);
542 542 attach_fail_8:;
543 543 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
544 544 attach_fail_7:;
545 545 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
546 546 attach_fail_6:;
547 547 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
548 548 attach_fail_5:;
549 549 ddi_regs_map_free(&qlt->regs_acc_handle);
550 550 attach_fail_4:;
551 551 pci_config_teardown(&qlt->pcicfg_acc_handle);
552 552 kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
553 553 (void) qlt_el_trace_desc_dtor(qlt);
554 554 attach_fail_2:;
555 555 attach_fail_1:;
556 556 ddi_soft_state_free(qlt_state, instance);
557 557 return (DDI_FAILURE);
558 558 }
559 559
560 560 #define FCT_I_EVENT_BRING_PORT_OFFLINE 0x83
561 561
562 562 /* ARGSUSED */
563 563 static int
564 564 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
565 565 {
566 566 qlt_state_t *qlt;
567 567
568 568 int instance;
569 569
570 570 instance = ddi_get_instance(dip);
571 571 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
572 572 NULL) {
573 573 return (DDI_FAILURE);
574 574 }
575 575
576 576 if (qlt->fw_code01) {
577 577 return (DDI_FAILURE);
578 578 }
579 579
580 580 if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
581 581 qlt->qlt_state_not_acked) {
582 582 return (DDI_FAILURE);
583 583 }
584 584 if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
585 585 return (DDI_FAILURE);
586 586 }
587 587
588 588 qlt_disable_intr(qlt);
589 589
590 590 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
591 591 qlt_destroy_mutex(qlt);
592 592 qlt_release_intr(qlt);
593 593 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
594 594 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
595 595 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
596 596 ddi_regs_map_free(&qlt->regs_acc_handle);
597 597 pci_config_teardown(&qlt->pcicfg_acc_handle);
598 598 kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
599 599 cv_destroy(&qlt->mbox_cv);
600 600 cv_destroy(&qlt->rp_dereg_cv);
601 601 (void) qlt_el_trace_desc_dtor(qlt);
602 602 ddi_soft_state_free(qlt_state, instance);
603 603
604 604 return (DDI_SUCCESS);
605 605 }
606 606
607 607 /*
608 608 * qlt_quiesce quiesce a device attached to the system.
609 609 */
610 610 static int
611 611 qlt_quiesce(dev_info_t *dip)
612 612 {
613 613 qlt_state_t *qlt;
614 614 uint32_t timer;
615 615 uint32_t stat;
616 616
617 617 qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
618 618 if (qlt == NULL) {
619 619 /* Oh well.... */
620 620 return (DDI_SUCCESS);
621 621 }
622 622
623 623 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
624 624 REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
625 625 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
626 626 for (timer = 0; timer < 30000; timer++) {
627 627 stat = REG_RD32(qlt, REG_RISC_STATUS);
628 628 if (stat & RISC_HOST_INTR_REQUEST) {
629 629 if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
630 630 REG_WR32(qlt, REG_HCCR,
631 631 HCCR_CMD(CLEAR_RISC_PAUSE));
632 632 break;
633 633 }
634 634 REG_WR32(qlt, REG_HCCR,
635 635 HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
636 636 }
637 637 drv_usecwait(100);
638 638 }
639 639 /* Reset the chip. */
640 640 REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
641 641 PCI_X_XFER_CTRL);
642 642 drv_usecwait(100);
643 643
644 644 qlt_disable_intr(qlt);
645 645
646 646 return (DDI_SUCCESS);
647 647 }
648 648
649 649 static void
650 650 qlt_enable_intr(qlt_state_t *qlt)
651 651 {
652 652 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
653 653 (void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
654 654 } else {
655 655 int i;
656 656 for (i = 0; i < qlt->intr_cnt; i++)
657 657 (void) ddi_intr_enable(qlt->htable[i]);
658 658 }
659 659 qlt->qlt_intr_enabled = 1;
660 660 }
661 661
662 662 static void
663 663 qlt_disable_intr(qlt_state_t *qlt)
664 664 {
665 665 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
666 666 (void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
667 667 } else {
668 668 int i;
669 669 for (i = 0; i < qlt->intr_cnt; i++)
670 670 (void) ddi_intr_disable(qlt->htable[i]);
671 671 }
672 672 qlt->qlt_intr_enabled = 0;
673 673 }
674 674
675 675 static void
676 676 qlt_release_intr(qlt_state_t *qlt)
677 677 {
678 678 if (qlt->htable) {
679 679 int i;
680 680 for (i = 0; i < qlt->intr_cnt; i++) {
681 681 (void) ddi_intr_remove_handler(qlt->htable[i]);
682 682 (void) ddi_intr_free(qlt->htable[i]);
683 683 }
684 684 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
685 685 }
686 686 qlt->htable = NULL;
687 687 qlt->intr_pri = 0;
688 688 qlt->intr_cnt = 0;
689 689 qlt->intr_size = 0;
690 690 qlt->intr_cap = 0;
691 691 }
692 692
693 693
694 694 static void
695 695 qlt_init_mutex(qlt_state_t *qlt)
696 696 {
697 697 mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
698 698 INT2PTR(qlt->intr_pri, void *));
699 699 mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
700 700 INT2PTR(qlt->intr_pri, void *));
701 701 mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
702 702 INT2PTR(qlt->intr_pri, void *));
703 703 mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
704 704 INT2PTR(qlt->intr_pri, void *));
705 705 }
706 706
707 707 static void
708 708 qlt_destroy_mutex(qlt_state_t *qlt)
709 709 {
710 710 mutex_destroy(&qlt->req_lock);
711 711 mutex_destroy(&qlt->preq_lock);
712 712 mutex_destroy(&qlt->mbox_lock);
713 713 mutex_destroy(&qlt->intr_lock);
714 714 }
715 715
716 716
717 717 static int
718 718 qlt_setup_msix(qlt_state_t *qlt)
719 719 {
720 720 int count, avail, actual;
721 721 int ret;
722 722 int itype = DDI_INTR_TYPE_MSIX;
723 723 int i;
724 724
725 725 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
726 726 if (ret != DDI_SUCCESS || count == 0) {
727 727 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
728 728 count);
729 729 return (DDI_FAILURE);
730 730 }
731 731 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
732 732 if (ret != DDI_SUCCESS || avail == 0) {
733 733 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
734 734 avail);
735 735 return (DDI_FAILURE);
736 736 }
737 737 if (avail < count) {
738 738 stmf_trace(qlt->qlt_port_alias,
739 739 "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
740 740 }
741 741
742 742 qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
743 743 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
744 744 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
745 745 DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
746 746 /* we need at least 2 interrupt vectors */
747 747 if (ret != DDI_SUCCESS || actual < 2) {
748 748 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
749 749 actual);
750 750 ret = DDI_FAILURE;
751 751 goto release_intr;
752 752 }
753 753 if (actual < count) {
754 754 EL(qlt, "requested: %d, received: %d\n", count, actual);
755 755 }
756 756
757 757 qlt->intr_cnt = actual;
758 758 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
759 759 if (ret != DDI_SUCCESS) {
760 760 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
761 761 ret = DDI_FAILURE;
762 762 goto release_intr;
763 763 }
764 764 qlt_init_mutex(qlt);
765 765 for (i = 0; i < actual; i++) {
766 766 ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
767 767 qlt, INT2PTR((uint_t)i, void *));
768 768 if (ret != DDI_SUCCESS) {
769 769 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
770 770 goto release_mutex;
771 771 }
772 772 }
773 773
774 774 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
775 775 qlt->intr_flags |= QLT_INTR_MSIX;
776 776 return (DDI_SUCCESS);
777 777
778 778 release_mutex:
779 779 qlt_destroy_mutex(qlt);
780 780 release_intr:
781 781 for (i = 0; i < actual; i++)
782 782 (void) ddi_intr_free(qlt->htable[i]);
783 783 #if 0
784 784 free_mem:
785 785 #endif
786 786 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
787 787 qlt->htable = NULL;
788 788 qlt_release_intr(qlt);
789 789 return (ret);
790 790 }
791 791
792 792
793 793 static int
794 794 qlt_setup_msi(qlt_state_t *qlt)
795 795 {
796 796 int count, avail, actual;
797 797 int itype = DDI_INTR_TYPE_MSI;
798 798 int ret;
799 799 int i;
800 800
801 801 /* get the # of interrupts */
802 802 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
803 803 if (ret != DDI_SUCCESS || count == 0) {
804 804 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
805 805 count);
806 806 return (DDI_FAILURE);
807 807 }
808 808 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
809 809 if (ret != DDI_SUCCESS || avail == 0) {
810 810 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
811 811 avail);
812 812 return (DDI_FAILURE);
813 813 }
814 814 if (avail < count) {
815 815 EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
816 816 }
817 817 /* MSI requires only 1 interrupt. */
818 818 count = 1;
819 819
820 820 /* allocate interrupt */
821 821 qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
822 822 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
823 823 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
824 824 0, count, &actual, DDI_INTR_ALLOC_NORMAL);
825 825 if (ret != DDI_SUCCESS || actual == 0) {
826 826 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
827 827 actual);
828 828 ret = DDI_FAILURE;
829 829 goto free_mem;
830 830 }
831 831 if (actual < count) {
832 832 EL(qlt, "requested: %d, received: %d\n", count, actual);
833 833 }
834 834 qlt->intr_cnt = actual;
835 835
836 836 /*
837 837 * Get priority for first msi, assume remaining are all the same.
838 838 */
839 839 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
840 840 if (ret != DDI_SUCCESS) {
841 841 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
842 842 ret = DDI_FAILURE;
843 843 goto release_intr;
844 844 }
845 845 qlt_init_mutex(qlt);
846 846
847 847 /* add handler */
848 848 for (i = 0; i < actual; i++) {
849 849 ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
850 850 qlt, INT2PTR((uint_t)i, void *));
851 851 if (ret != DDI_SUCCESS) {
852 852 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
853 853 goto release_mutex;
854 854 }
855 855 }
856 856
857 857 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
858 858 qlt->intr_flags |= QLT_INTR_MSI;
859 859 return (DDI_SUCCESS);
860 860
861 861 release_mutex:
862 862 qlt_destroy_mutex(qlt);
863 863 release_intr:
864 864 for (i = 0; i < actual; i++)
865 865 (void) ddi_intr_free(qlt->htable[i]);
866 866 free_mem:
867 867 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
868 868 qlt->htable = NULL;
869 869 qlt_release_intr(qlt);
870 870 return (ret);
871 871 }
872 872
873 873 static int
874 874 qlt_setup_fixed(qlt_state_t *qlt)
875 875 {
876 876 int count;
877 877 int actual;
878 878 int ret;
879 879 int itype = DDI_INTR_TYPE_FIXED;
880 880
881 881 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
882 882 /* Fixed interrupts can only have one interrupt. */
883 883 if (ret != DDI_SUCCESS || count != 1) {
884 884 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
885 885 count);
886 886 return (DDI_FAILURE);
887 887 }
888 888
889 889 qlt->intr_size = sizeof (ddi_intr_handle_t);
890 890 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
891 891 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
892 892 DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
893 893 if (ret != DDI_SUCCESS || actual != 1) {
894 894 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
895 895 actual);
896 896 ret = DDI_FAILURE;
897 897 goto free_mem;
898 898 }
899 899
900 900 qlt->intr_cnt = actual;
901 901 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
902 902 if (ret != DDI_SUCCESS) {
903 903 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
904 904 ret = DDI_FAILURE;
905 905 goto release_intr;
906 906 }
907 907 qlt_init_mutex(qlt);
908 908 ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
909 909 if (ret != DDI_SUCCESS) {
910 910 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
911 911 goto release_mutex;
912 912 }
913 913
914 914 qlt->intr_flags |= QLT_INTR_FIXED;
915 915 return (DDI_SUCCESS);
916 916
917 917 release_mutex:
918 918 qlt_destroy_mutex(qlt);
919 919 release_intr:
920 920 (void) ddi_intr_free(qlt->htable[0]);
921 921 free_mem:
922 922 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
923 923 qlt->htable = NULL;
924 924 qlt_release_intr(qlt);
925 925 return (ret);
926 926 }
927 927
928 928 static int
929 929 qlt_setup_interrupts(qlt_state_t *qlt)
930 930 {
931 931 int itypes = 0;
932 932
933 933 /*
934 934 * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
935 935 */
936 936 #ifndef __sparc
937 937 if (qlt_enable_msi != 0) {
938 938 #endif
939 939 if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
940 940 itypes = DDI_INTR_TYPE_FIXED;
941 941 }
942 942
943 943 if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
944 944 if (qlt_setup_msix(qlt) == DDI_SUCCESS)
945 945 return (DDI_SUCCESS);
946 946 }
947 947
948 948 if (itypes & DDI_INTR_TYPE_MSI) {
949 949 if (qlt_setup_msi(qlt) == DDI_SUCCESS)
950 950 return (DDI_SUCCESS);
951 951 }
952 952 #ifndef __sparc
953 953 }
954 954 #endif
955 955 return (qlt_setup_fixed(qlt));
956 956 }
957 957
958 958 /*
959 959 * Filling the hba attributes
960 960 */
961 961 void
962 962 qlt_populate_hba_fru_details(struct fct_local_port *port,
963 963 struct fct_port_attrs *port_attrs)
964 964 {
965 965 caddr_t bufp;
966 966 int len;
967 967 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
968 968
969 969 (void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
970 970 "QLogic Corp.");
971 971 (void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
972 972 "%s", QLT_NAME);
973 973 (void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
974 974 "%s", QLT_VERSION);
975 975 port_attrs->serial_number[0] = '\0';
976 976 port_attrs->hardware_version[0] = '\0';
977 977
978 978 (void) snprintf(port_attrs->firmware_version,
979 979 FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
980 980 qlt->fw_minor, qlt->fw_subminor);
981 981
982 982 /* Get FCode version */
983 983 if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
984 984 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
985 985 (int *)&len) == DDI_PROP_SUCCESS) {
986 986 (void) snprintf(port_attrs->option_rom_version,
987 987 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
988 988 kmem_free(bufp, (uint_t)len);
989 989 bufp = NULL;
990 990 } else {
991 991 #ifdef __sparc
992 992 (void) snprintf(port_attrs->option_rom_version,
993 993 FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
994 994 #else
995 995 (void) snprintf(port_attrs->option_rom_version,
996 996 FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
997 997 #endif
998 998 }
999 999 port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1000 1000 qlt->nvram->subsystem_vendor_id[1] << 8;
1001 1001
1002 1002 port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1003 1003 qlt->nvram->max_frame_length[0];
1004 1004
1005 1005 port_attrs->supported_cos = 0x10000000;
1006 1006 port_attrs->supported_speed = PORT_SPEED_1G |
1007 1007 PORT_SPEED_2G | PORT_SPEED_4G;
1008 1008 if (qlt->qlt_25xx_chip)
1009 1009 port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1010 1010 PORT_SPEED_8G;
1011 1011 if (qlt->qlt_81xx_chip)
1012 1012 port_attrs->supported_speed = PORT_SPEED_10G;
1013 1013
1014 1014 /* limit string length to nvr model_name length */
1015 1015 len = (qlt->qlt_81xx_chip) ? 16 : 8;
1016 1016 (void) snprintf(port_attrs->model,
1017 1017 (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1018 1018 "%s", qlt->nvram->model_name);
1019 1019
1020 1020 (void) snprintf(port_attrs->model_description,
1021 1021 (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1022 1022 FCHBA_MODEL_DESCRIPTION_LEN),
1023 1023 "%s", qlt->nvram->model_name);
1024 1024 }
1025 1025
1026 1026 /* ARGSUSED */
1027 1027 fct_status_t
1028 1028 qlt_info(uint32_t cmd, fct_local_port_t *port,
1029 1029 void *arg, uint8_t *buf, uint32_t *bufsizep)
1030 1030 {
1031 1031 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1032 1032 mbox_cmd_t *mcp;
1033 1033 fct_status_t ret = FCT_SUCCESS;
1034 1034 uint8_t *p;
1035 1035 fct_port_link_status_t *link_status;
1036 1036
1037 1037 switch (cmd) {
1038 1038 case FC_TGT_PORT_RLS:
1039 1039 if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1040 1040 EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1041 1041 "fct_port_link_status_t=%xh\n", *bufsizep,
1042 1042 sizeof (fct_port_link_status_t));
1043 1043 ret = FCT_FAILURE;
1044 1044 break;
1045 1045 }
1046 1046 /* send mailbox command to get link status */
1047 1047 mcp = qlt_alloc_mailbox_command(qlt, 156);
1048 1048 if (mcp == NULL) {
1049 1049 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1050 1050 ret = FCT_ALLOC_FAILURE;
1051 1051 break;
1052 1052 }
1053 1053
1054 1054 /* GET LINK STATUS count */
1055 1055 mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1056 1056 mcp->to_fw[8] = 156/4;
1057 1057 mcp->to_fw_mask |= BIT_1 | BIT_8;
1058 1058 mcp->from_fw_mask |= BIT_1 | BIT_2;
1059 1059
1060 1060 ret = qlt_mailbox_command(qlt, mcp);
1061 1061 if (ret != QLT_SUCCESS) {
1062 1062 EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1063 1063 qlt_free_mailbox_command(qlt, mcp);
1064 1064 break;
1065 1065 }
1066 1066 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1067 1067
1068 1068 p = mcp->dbuf->db_sglist[0].seg_addr;
1069 1069 link_status = (fct_port_link_status_t *)buf;
1070 1070 link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1071 1071 link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1072 1072 link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1073 1073 link_status->PrimitiveSeqProtocolErrorCount =
1074 1074 LE_32(*((uint32_t *)(p + 12)));
1075 1075 link_status->InvalidTransmissionWordCount =
1076 1076 LE_32(*((uint32_t *)(p + 16)));
1077 1077 link_status->InvalidCRCCount =
1078 1078 LE_32(*((uint32_t *)(p + 20)));
1079 1079
1080 1080 qlt_free_mailbox_command(qlt, mcp);
1081 1081 break;
1082 1082 default:
1083 1083 EL(qlt, "Unknown cmd=%xh\n", cmd);
1084 1084 ret = FCT_FAILURE;
1085 1085 break;
1086 1086 }
1087 1087 return (ret);
1088 1088 }
1089 1089
1090 1090 fct_status_t
1091 1091 qlt_port_start(caddr_t arg)
1092 1092 {
1093 1093 qlt_state_t *qlt = (qlt_state_t *)arg;
1094 1094 fct_local_port_t *port;
1095 1095 fct_dbuf_store_t *fds;
1096 1096 fct_status_t ret;
1097 1097
1098 1098 if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1099 1099 return (FCT_FAILURE);
1100 1100 }
1101 1101 /* Initialize the ddi_dma_handle free pool */
1102 1102 qlt_dma_handle_pool_init(qlt);
1103 1103
1104 1104 port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1105 1105 if (port == NULL) {
1106 1106 goto qlt_pstart_fail_1;
1107 1107 }
1108 1108 fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1109 1109 if (fds == NULL) {
1110 1110 goto qlt_pstart_fail_2;
1111 1111 }
1112 1112 qlt->qlt_port = port;
1113 1113 fds->fds_alloc_data_buf = qlt_dmem_alloc;
1114 1114 fds->fds_free_data_buf = qlt_dmem_free;
1115 1115 fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1116 1116 fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1117 1117 fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1118 1118 fds->fds_copy_threshold = MMU_PAGESIZE;
1119 1119 fds->fds_fca_private = (void *)qlt;
1120 1120 /*
1121 1121 * Since we keep everything in the state struct and dont allocate any
1122 1122 * port private area, just use that pointer to point to the
1123 1123 * state struct.
1124 1124 */
1125 1125 port->port_fca_private = qlt;
1126 1126 port->port_fca_abort_timeout = 5 * 1000; /* 5 seconds */
1127 1127 bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1128 1128 bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1129 1129 fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1130 1130 fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1131 1131 port->port_default_alias = qlt->qlt_port_alias;
1132 1132 port->port_pp = qlt_pp;
1133 1133 port->port_fds = fds;
1134 1134 port->port_max_logins = QLT_MAX_LOGINS;
1135 1135 port->port_max_xchges = QLT_MAX_XCHGES;
1136 1136 port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1137 1137 port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1138 1138 port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1139 1139 port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1140 1140 port->port_get_link_info = qlt_get_link_info;
1141 1141 port->port_register_remote_port = qlt_register_remote_port;
1142 1142 port->port_deregister_remote_port = qlt_deregister_remote_port;
1143 1143 port->port_send_cmd = qlt_send_cmd;
1144 1144 port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1145 1145 port->port_send_cmd_response = qlt_send_cmd_response;
1146 1146 port->port_abort_cmd = qlt_abort_cmd;
1147 1147 port->port_ctl = qlt_ctl;
1148 1148 port->port_flogi_xchg = qlt_do_flogi;
1149 1149 port->port_populate_hba_details = qlt_populate_hba_fru_details;
1150 1150 port->port_info = qlt_info;
1151 1151 port->port_fca_version = FCT_FCA_MODREV_1;
1152 1152
1153 1153 if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1154 1154 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1155 1155 goto qlt_pstart_fail_2_5;
1156 1156 }
1157 1157
1158 1158 return (QLT_SUCCESS);
1159 1159 #if 0
1160 1160 qlt_pstart_fail_3:
1161 1161 (void) fct_deregister_local_port(port);
1162 1162 #endif
1163 1163 qlt_pstart_fail_2_5:
1164 1164 fct_free(fds);
1165 1165 qlt_pstart_fail_2:
1166 1166 fct_free(port);
1167 1167 qlt->qlt_port = NULL;
1168 1168 qlt_pstart_fail_1:
1169 1169 qlt_dma_handle_pool_fini(qlt);
1170 1170 qlt_dmem_fini(qlt);
1171 1171 return (QLT_FAILURE);
1172 1172 }
1173 1173
1174 1174 fct_status_t
1175 1175 qlt_port_stop(caddr_t arg)
1176 1176 {
1177 1177 qlt_state_t *qlt = (qlt_state_t *)arg;
1178 1178 fct_status_t ret;
1179 1179
1180 1180 if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1181 1181 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1182 1182 return (QLT_FAILURE);
1183 1183 }
1184 1184 fct_free(qlt->qlt_port->port_fds);
1185 1185 fct_free(qlt->qlt_port);
1186 1186 qlt->qlt_port = NULL;
1187 1187 qlt_dma_handle_pool_fini(qlt);
1188 1188 qlt_dmem_fini(qlt);
1189 1189 return (QLT_SUCCESS);
1190 1190 }
1191 1191
1192 1192 /*
1193 1193 * Called by framework to init the HBA.
1194 1194 * Can be called in the middle of I/O. (Why ??)
1195 1195 * Should make sure sane state both before and after the initialization
1196 1196 */
1197 1197 fct_status_t
1198 1198 qlt_port_online(qlt_state_t *qlt)
1199 1199 {
1200 1200 uint64_t da;
1201 1201 int instance, i;
1202 1202 fct_status_t ret;
1203 1203 uint16_t rcount;
1204 1204 caddr_t icb;
1205 1205 mbox_cmd_t *mcp;
1206 1206 uint8_t *elsbmp;
1207 1207
1208 1208 instance = ddi_get_instance(qlt->dip);
1209 1209
1210 1210 /* XXX Make sure a sane state */
1211 1211
1212 1212 if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
1213 1213 cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1214 1214 return (ret);
1215 1215 }
1216 1216
1217 1217 bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1218 1218
1219 1219 /* Get resource count */
1220 1220 REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
1221 1221 ret = qlt_raw_mailbox_command(qlt);
1222 1222 rcount = REG_RD16(qlt, REG_MBOX(3));
1223 1223 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1224 1224 if (ret != QLT_SUCCESS) {
1225 1225 EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1226 1226 return (ret);
1227 1227 }
1228 1228
1229 1229 /* Enable PUREX */
1230 1230 REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
1231 1231 REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
1232 1232 REG_WR16(qlt, REG_MBOX(2), 0x0);
1233 1233 REG_WR16(qlt, REG_MBOX(3), 0x0);
1234 1234 ret = qlt_raw_mailbox_command(qlt);
1235 1235 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1236 1236 if (ret != QLT_SUCCESS) {
1237 1237 EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1238 1238 cmn_err(CE_NOTE, "Enable PUREX failed");
1239 1239 return (ret);
1240 1240 }
1241 1241
1242 1242 /* Pass ELS bitmap to fw */
1243 1243 REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
1244 1244 REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
1245 1245 elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1246 1246 bzero(elsbmp, 32);
1247 1247 da = qlt->queue_mem_cookie.dmac_laddress;
1248 1248 da += MBOX_DMA_MEM_OFFSET;
1249 1249 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
1250 1250 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
1251 1251 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
1252 1252 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
1253 1253 SETELSBIT(elsbmp, ELS_OP_PLOGI);
1254 1254 SETELSBIT(elsbmp, ELS_OP_LOGO);
1255 1255 SETELSBIT(elsbmp, ELS_OP_ABTX);
1256 1256 SETELSBIT(elsbmp, ELS_OP_ECHO);
1257 1257 SETELSBIT(elsbmp, ELS_OP_PRLI);
1258 1258 SETELSBIT(elsbmp, ELS_OP_PRLO);
1259 1259 SETELSBIT(elsbmp, ELS_OP_SCN);
1260 1260 SETELSBIT(elsbmp, ELS_OP_TPRLO);
1261 1261 SETELSBIT(elsbmp, ELS_OP_PDISC);
1262 1262 SETELSBIT(elsbmp, ELS_OP_ADISC);
1263 1263 SETELSBIT(elsbmp, ELS_OP_RSCN);
1264 1264 SETELSBIT(elsbmp, ELS_OP_RNID);
1265 1265 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1266 1266 DDI_DMA_SYNC_FORDEV);
1267 1267 ret = qlt_raw_mailbox_command(qlt);
1268 1268 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1269 1269 if (ret != QLT_SUCCESS) {
1270 1270 EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1271 1271 cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1272 1272 "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1273 1273 elsbmp[1]);
1274 1274 return (ret);
1275 1275 }
1276 1276
1277 1277 /* Init queue pointers */
1278 1278 REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1279 1279 REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1280 1280 REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1281 1281 REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1282 1282 REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1283 1283 REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1284 1284 REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1285 1285 REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1286 1286 qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1287 1287 qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1288 1288 qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1289 1289 qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1290 1290 qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1291 1291
1292 1292 /*
1293 1293 * XXX support for tunables. Also should we cache icb ?
1294 1294 */
1295 1295 if (qlt->qlt_81xx_chip) {
1296 1296 /* allocate extra 64 bytes for Extended init control block */
1297 1297 mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1298 1298 } else {
1299 1299 mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1300 1300 }
1301 1301 if (mcp == NULL) {
1302 1302 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1303 1303 return (STMF_ALLOC_FAILURE);
1304 1304 }
1305 1305 icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1306 1306 if (qlt->qlt_81xx_chip) {
1307 1307 bzero(icb, 0xC0);
1308 1308 } else {
1309 1309 bzero(icb, 0x80);
1310 1310 }
1311 1311 da = qlt->queue_mem_cookie.dmac_laddress;
1312 1312 DMEM_WR16(qlt, icb, 1); /* Version */
1313 1313 DMEM_WR16(qlt, icb+4, 2112); /* Max frame length */
1314 1314 DMEM_WR16(qlt, icb+6, 16); /* Execution throttle */
1315 1315 DMEM_WR16(qlt, icb+8, rcount); /* Xchg count */
1316 1316 DMEM_WR16(qlt, icb+0x0a, 0x00); /* Hard address (not used) */
1317 1317 bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1318 1318 bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1319 1319 DMEM_WR16(qlt, icb+0x20, 3); /* Login retry count */
1320 1320 DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1321 1321 DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1322 1322 if (!qlt->qlt_81xx_chip) {
1323 1323 DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1324 1324 }
1325 1325 DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1326 1326 DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1327 1327 DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1328 1328 DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1329 1329 DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1330 1330 DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1331 1331 DMEM_WR16(qlt, icb+0x58, 2); /* Interrupt delay Timer */
1332 1332 DMEM_WR16(qlt, icb+0x5a, 4); /* Login timeout (secs) */
1333 1333 if (qlt->qlt_81xx_chip) {
1334 1334 qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1335 1335
1336 1336 DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1337 1337 DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1338 1338 DMEM_WR32(qlt, icb+0x70,
1339 1339 qlt81nvr->enode_mac[0] |
1340 1340 (qlt81nvr->enode_mac[1] << 8) |
1341 1341 (qlt81nvr->enode_mac[2] << 16) |
1342 1342 (qlt81nvr->enode_mac[3] << 24));
1343 1343 DMEM_WR16(qlt, icb+0x74,
1344 1344 qlt81nvr->enode_mac[4] |
1345 1345 (qlt81nvr->enode_mac[5] << 8));
1346 1346 } else {
1347 1347 DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1348 1348 BIT_2 | BIT_1 | BIT_0);
1349 1349 DMEM_WR32(qlt, icb+0x60, BIT_5);
1350 1350 DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1351 1351 BIT_4);
1352 1352 }
1353 1353
1354 1354 if (qlt->qlt_81xx_chip) {
1355 1355 qlt_dmem_bctl_t *bctl;
1356 1356 uint32_t index;
1357 1357 caddr_t src;
1358 1358 caddr_t dst;
1359 1359 qlt_nvram_81xx_t *qlt81nvr;
1360 1360
1361 1361 dst = icb+0x80;
1362 1362 qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1363 1363 src = (caddr_t)&qlt81nvr->ext_blk;
1364 1364 index = sizeof (qlt_ext_icb_81xx_t);
1365 1365
1366 1366 /* Use defaults for cases where we find nothing in NVR */
1367 1367 if (*src == 0) {
1368 1368 EL(qlt, "nvram eicb=null\n");
1369 1369 cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1370 1370 instance);
1371 1371 qlt81nvr->ext_blk.version[0] = 1;
1372 1372 /*
1373 1373 * not yet, for !FIP firmware at least
1374 1374 *
1375 1375 * qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1376 1376 */
1377 1377 #ifdef _LITTLE_ENDIAN
1378 1378 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1379 1379 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1380 1380 #else
1381 1381 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1382 1382 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1383 1383 #endif
1384 1384 }
1385 1385
1386 1386 while (index--) {
1387 1387 *dst++ = *src++;
1388 1388 }
1389 1389
1390 1390 bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1391 1391 da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1392 1392
1393 1393 mcp->to_fw[11] = LSW(LSD(da));
1394 1394 mcp->to_fw[10] = MSW(LSD(da));
1395 1395 mcp->to_fw[13] = LSW(MSD(da));
1396 1396 mcp->to_fw[12] = MSW(MSD(da));
1397 1397 mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1398 1398 0xffff);
1399 1399
1400 1400 /* eicb enable */
1401 1401 mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1402 1402 mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1403 1403 BIT_1;
1404 1404 }
1405 1405
1406 1406 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1407 1407 mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
1408 1408
1409 1409 /*
1410 1410 * This is the 1st command after adapter initialize which will
1411 1411 * use interrupts and regular mailbox interface.
1412 1412 */
1413 1413 qlt->mbox_io_state = MBOX_STATE_READY;
1414 1414 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1415 1415 /* Issue mailbox to firmware */
1416 1416 ret = qlt_mailbox_command(qlt, mcp);
1417 1417 if (ret != QLT_SUCCESS) {
1418 1418 EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1419 1419 cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1420 1420 instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1421 1421 }
1422 1422
1423 1423 mcp->to_fw_mask = BIT_0;
1424 1424 mcp->from_fw_mask = BIT_0 | BIT_1;
1425 1425 mcp->to_fw[0] = 0x28;
1426 1426 ret = qlt_mailbox_command(qlt, mcp);
1427 1427 if (ret != QLT_SUCCESS) {
1428 1428 EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1429 1429 cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1430 1430 (long long)ret);
1431 1431 }
1432 1432
1433 1433 /*
1434 1434 * Report FW versions for 81xx - MPI rev is useful
1435 1435 */
1436 1436 if (qlt->qlt_81xx_chip) {
1437 1437 mcp->to_fw_mask = BIT_0;
1438 1438 mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1439 1439 BIT_0;
1440 1440 mcp->to_fw[0] = 0x8;
1441 1441 ret = qlt_mailbox_command(qlt, mcp);
1442 1442 if (ret != QLT_SUCCESS) {
1443 1443 EL(qlt, "about fw failed: %llx\n", (long long)ret);
1444 1444 } else {
1445 1445 EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1446 1446 mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1447 1447 mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1448 1448 mcp->from_fw[11] & 0xff);
1449 1449 }
1450 1450 }
1451 1451
1452 1452 qlt_free_mailbox_command(qlt, mcp);
1453 1453
1454 1454 for (i = 0; i < 5; i++) {
1455 1455 qlt->qlt_bufref[i] = 0;
1456 1456 }
1457 1457 qlt->qlt_bumpbucket = 0;
1458 1458 qlt->qlt_pmintry = 0;
1459 1459 qlt->qlt_pmin_ok = 0;
1460 1460
1461 1461 if (ret != QLT_SUCCESS)
1462 1462 return (ret);
1463 1463 return (FCT_SUCCESS);
1464 1464 }
1465 1465
1466 1466 fct_status_t
1467 1467 qlt_port_offline(qlt_state_t *qlt)
1468 1468 {
1469 1469 int retries;
1470 1470
1471 1471 mutex_enter(&qlt->mbox_lock);
1472 1472
1473 1473 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1474 1474 mutex_exit(&qlt->mbox_lock);
1475 1475 goto poff_mbox_done;
1476 1476 }
1477 1477
1478 1478 /* Wait to grab the mailboxes */
1479 1479 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1480 1480 retries++) {
1481 1481 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1482 1482 if ((retries > 5) ||
1483 1483 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1484 1484 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1485 1485 mutex_exit(&qlt->mbox_lock);
1486 1486 goto poff_mbox_done;
1487 1487 }
1488 1488 }
1489 1489 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1490 1490 mutex_exit(&qlt->mbox_lock);
1491 1491 poff_mbox_done:;
1492 1492 qlt->intr_sneak_counter = 10;
1493 1493 mutex_enter(&qlt->intr_lock);
1494 1494 (void) qlt_reset_chip(qlt);
1495 1495 drv_usecwait(20);
1496 1496 qlt->intr_sneak_counter = 0;
1497 1497 mutex_exit(&qlt->intr_lock);
1498 1498
1499 1499 return (FCT_SUCCESS);
1500 1500 }
1501 1501
1502 1502 static fct_status_t
1503 1503 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1504 1504 {
1505 1505 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1506 1506 mbox_cmd_t *mcp;
1507 1507 fct_status_t fc_ret;
1508 1508 fct_status_t ret;
1509 1509 clock_t et;
1510 1510
1511 1511 et = ddi_get_lbolt() + drv_usectohz(5000000);
1512 1512 mcp = qlt_alloc_mailbox_command(qlt, 0);
1513 1513 link_info_retry:
1514 1514 mcp->to_fw[0] = MBC_GET_ID;
1515 1515 mcp->to_fw[9] = 0;
1516 1516 mcp->to_fw_mask |= BIT_0 | BIT_9;
1517 1517 mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1518 1518 /* Issue mailbox to firmware */
1519 1519 ret = qlt_mailbox_command(qlt, mcp);
1520 1520 if (ret != QLT_SUCCESS) {
1521 1521 EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1522 1522 if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1523 1523 /* Firmware is not ready */
1524 1524 if (ddi_get_lbolt() < et) {
1525 1525 delay(drv_usectohz(50000));
1526 1526 goto link_info_retry;
1527 1527 }
1528 1528 }
1529 1529 stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1530 1530 "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1531 1531 fc_ret = FCT_FAILURE;
1532 1532 } else {
1533 1533 li->portid = ((uint32_t)(mcp->from_fw[2])) |
1534 1534 (((uint32_t)(mcp->from_fw[3])) << 16);
1535 1535
1536 1536 li->port_speed = qlt->link_speed;
1537 1537 switch (mcp->from_fw[6]) {
1538 1538 case 1:
1539 1539 li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1540 1540 li->port_fca_flogi_done = 1;
1541 1541 break;
1542 1542 case 0:
1543 1543 li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1544 1544 li->port_no_fct_flogi = 1;
1545 1545 break;
1546 1546 case 3:
1547 1547 li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1548 1548 li->port_fca_flogi_done = 1;
1549 1549 break;
1550 1550 case 2: /*FALLTHROUGH*/
1551 1551 case 4:
1552 1552 li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1553 1553 li->port_fca_flogi_done = 1;
1554 1554 break;
1555 1555 default:
1556 1556 li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1557 1557 EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1558 1558 }
1559 1559 qlt->cur_topology = li->port_topology;
1560 1560 fc_ret = FCT_SUCCESS;
1561 1561 }
1562 1562 qlt_free_mailbox_command(qlt, mcp);
1563 1563
1564 1564 if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1565 1565 mcp = qlt_alloc_mailbox_command(qlt, 64);
1566 1566 mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
1567 1567 mcp->to_fw[1] = 0x7FE;
1568 1568 mcp->to_fw[9] = 0;
1569 1569 mcp->to_fw[10] = 0;
1570 1570 mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1571 1571 fc_ret = qlt_mailbox_command(qlt, mcp);
1572 1572 if (fc_ret != QLT_SUCCESS) {
1573 1573 EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1574 1574 fc_ret);
1575 1575 stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1576 1576 "database for F_port failed, ret = %llx", fc_ret);
1577 1577 } else {
1578 1578 uint8_t *p;
1579 1579
1580 1580 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1581 1581 p = mcp->dbuf->db_sglist[0].seg_addr;
1582 1582 bcopy(p + 0x18, li->port_rpwwn, 8);
1583 1583 bcopy(p + 0x20, li->port_rnwwn, 8);
1584 1584 }
1585 1585 qlt_free_mailbox_command(qlt, mcp);
1586 1586 }
1587 1587 return (fc_ret);
1588 1588 }
1589 1589
1590 1590 static int
1591 1591 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1592 1592 {
1593 1593 int instance;
1594 1594 qlt_state_t *qlt;
1595 1595
1596 1596 if (otype != OTYP_CHR) {
1597 1597 return (EINVAL);
1598 1598 }
1599 1599
1600 1600 /*
1601 1601 * Since this is for debugging only, only allow root to issue ioctl now
1602 1602 */
1603 1603 if (drv_priv(credp)) {
1604 1604 return (EPERM);
1605 1605 }
1606 1606
1607 1607 instance = (int)getminor(*devp);
1608 1608 qlt = ddi_get_soft_state(qlt_state, instance);
1609 1609 if (qlt == NULL) {
1610 1610 return (ENXIO);
1611 1611 }
1612 1612
1613 1613 mutex_enter(&qlt->qlt_ioctl_lock);
1614 1614 if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1615 1615 /*
1616 1616 * It is already open for exclusive access.
1617 1617 * So shut the door on this caller.
1618 1618 */
1619 1619 mutex_exit(&qlt->qlt_ioctl_lock);
1620 1620 return (EBUSY);
1621 1621 }
1622 1622
1623 1623 if (flag & FEXCL) {
1624 1624 if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1625 1625 /*
1626 1626 * Exclusive operation not possible
1627 1627 * as it is already opened
1628 1628 */
1629 1629 mutex_exit(&qlt->qlt_ioctl_lock);
1630 1630 return (EBUSY);
1631 1631 }
1632 1632 qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1633 1633 }
1634 1634 qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1635 1635 mutex_exit(&qlt->qlt_ioctl_lock);
1636 1636
1637 1637 return (0);
1638 1638 }
1639 1639
1640 1640 /* ARGSUSED */
1641 1641 static int
1642 1642 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1643 1643 {
1644 1644 int instance;
1645 1645 qlt_state_t *qlt;
1646 1646
1647 1647 if (otype != OTYP_CHR) {
1648 1648 return (EINVAL);
1649 1649 }
1650 1650
1651 1651 instance = (int)getminor(dev);
1652 1652 qlt = ddi_get_soft_state(qlt_state, instance);
1653 1653 if (qlt == NULL) {
1654 1654 return (ENXIO);
1655 1655 }
1656 1656
1657 1657 mutex_enter(&qlt->qlt_ioctl_lock);
1658 1658 if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1659 1659 mutex_exit(&qlt->qlt_ioctl_lock);
1660 1660 return (ENODEV);
1661 1661 }
1662 1662
1663 1663 /*
1664 1664 * It looks there's one hole here, maybe there could several concurrent
1665 1665 * shareed open session, but we never check this case.
1666 1666 * But it will not hurt too much, disregard it now.
1667 1667 */
1668 1668 qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1669 1669 mutex_exit(&qlt->qlt_ioctl_lock);
1670 1670
1671 1671 return (0);
1672 1672 }
1673 1673
1674 1674 /*
1675 1675 * All of these ioctls are unstable interfaces which are meant to be used
1676 1676 * in a controlled lab env. No formal testing will be (or needs to be) done
1677 1677 * for these ioctls. Specially note that running with an additional
1678 1678 * uploaded firmware is not supported and is provided here for test
1679 1679 * purposes only.
1680 1680 */
1681 1681 /* ARGSUSED */
1682 1682 static int
1683 1683 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1684 1684 cred_t *credp, int *rval)
1685 1685 {
1686 1686 qlt_state_t *qlt;
1687 1687 int ret = 0;
1688 1688 #ifdef _LITTLE_ENDIAN
1689 1689 int i;
1690 1690 #endif
1691 1691 stmf_iocdata_t *iocd;
1692 1692 void *ibuf = NULL;
1693 1693 void *obuf = NULL;
1694 1694 uint32_t *intp;
1695 1695 qlt_fw_info_t *fwi;
1696 1696 mbox_cmd_t *mcp;
1697 1697 fct_status_t st;
1698 1698 char info[QLT_INFO_LEN];
1699 1699 fct_status_t ret2;
1700 1700
1701 1701 if (drv_priv(credp) != 0)
1702 1702 return (EPERM);
1703 1703
1704 1704 qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1705 1705 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1706 1706 if (ret)
1707 1707 return (ret);
1708 1708 iocd->stmf_error = 0;
1709 1709
1710 1710 switch (cmd) {
1711 1711 case QLT_IOCTL_FETCH_FWDUMP:
1712 1712 if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1713 1713 EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1714 1714 iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1715 1715 ret = EINVAL;
1716 1716 break;
1717 1717 }
1718 1718 mutex_enter(&qlt->qlt_ioctl_lock);
1719 1719 if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1720 1720 mutex_exit(&qlt->qlt_ioctl_lock);
1721 1721 ret = ENODATA;
1722 1722 EL(qlt, "no fwdump\n");
1723 1723 iocd->stmf_error = QLTIO_NO_DUMP;
1724 1724 break;
1725 1725 }
1726 1726 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1727 1727 mutex_exit(&qlt->qlt_ioctl_lock);
1728 1728 ret = EBUSY;
1729 1729 EL(qlt, "fwdump inprogress\n");
1730 1730 iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1731 1731 break;
1732 1732 }
1733 1733 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1734 1734 mutex_exit(&qlt->qlt_ioctl_lock);
1735 1735 ret = EEXIST;
1736 1736 EL(qlt, "fwdump already fetched\n");
1737 1737 iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1738 1738 break;
1739 1739 }
1740 1740 bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1741 1741 qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1742 1742 mutex_exit(&qlt->qlt_ioctl_lock);
1743 1743
1744 1744 break;
1745 1745
1746 1746 case QLT_IOCTL_TRIGGER_FWDUMP:
1747 1747 if (qlt->qlt_state != FCT_STATE_ONLINE) {
1748 1748 ret = EACCES;
1749 1749 iocd->stmf_error = QLTIO_NOT_ONLINE;
1750 1750 break;
1751 1751 }
1752 1752 (void) snprintf(info, sizeof (info), "qlt_ioctl: qlt-%p, "
1753 1753 "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1754 1754 if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1755 1755 STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1756 1756 STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1757 1757 EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1758 1758 "%llxh\n", ret2);
1759 1759 ret = EIO;
1760 1760 }
1761 1761 break;
1762 1762 case QLT_IOCTL_UPLOAD_FW:
1763 1763 if ((iocd->stmf_ibuf_size < 1024) ||
1764 1764 (iocd->stmf_ibuf_size & 3)) {
1765 1765 EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1766 1766 iocd->stmf_ibuf_size);
1767 1767 ret = EINVAL;
1768 1768 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1769 1769 break;
1770 1770 }
1771 1771 intp = (uint32_t *)ibuf;
1772 1772 #ifdef _LITTLE_ENDIAN
1773 1773 for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1774 1774 intp[i] = BSWAP_32(intp[i]);
1775 1775 }
1776 1776 #endif
1777 1777 if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1778 1778 (((intp[intp[3] + 3] + intp[3]) << 2) !=
1779 1779 iocd->stmf_ibuf_size)) {
1780 1780 EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1781 1781 iocd->stmf_ibuf_size);
1782 1782 ret = EINVAL;
1783 1783 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1784 1784 break;
1785 1785 }
1786 1786 if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1787 1787 (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1788 1788 (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1789 1789 ((intp[8] & 3) == 0))) {
1790 1790 EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1791 1791 ret = EACCES;
1792 1792 iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1793 1793 break;
1794 1794 }
1795 1795
1796 1796 /* Everything looks ok, lets copy this firmware */
1797 1797 if (qlt->fw_code01) {
1798 1798 kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1799 1799 qlt->fw_length02) << 2);
1800 1800 qlt->fw_code01 = NULL;
1801 1801 } else {
1802 1802 atomic_inc_32(&qlt_loaded_counter);
1803 1803 }
1804 1804 qlt->fw_length01 = intp[3];
1805 1805 qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1806 1806 KM_SLEEP);
1807 1807 bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1808 1808 qlt->fw_addr01 = intp[2];
1809 1809 qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1810 1810 qlt->fw_addr02 = qlt->fw_code02[2];
1811 1811 qlt->fw_length02 = qlt->fw_code02[3];
1812 1812 break;
1813 1813
1814 1814 case QLT_IOCTL_CLEAR_FW:
1815 1815 if (qlt->fw_code01) {
1816 1816 kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1817 1817 qlt->fw_length02) << 2);
1818 1818 qlt->fw_code01 = NULL;
1819 1819 atomic_dec_32(&qlt_loaded_counter);
1820 1820 }
1821 1821 break;
1822 1822
1823 1823 case QLT_IOCTL_GET_FW_INFO:
1824 1824 if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1825 1825 EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1826 1826 iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1827 1827 ret = EINVAL;
1828 1828 break;
1829 1829 }
1830 1830 fwi = (qlt_fw_info_t *)obuf;
1831 1831 if (qlt->qlt_stay_offline) {
1832 1832 fwi->fwi_stay_offline = 1;
1833 1833 }
1834 1834 if (qlt->qlt_state == FCT_STATE_ONLINE) {
1835 1835 fwi->fwi_port_active = 1;
1836 1836 }
1837 1837 fwi->fwi_active_major = qlt->fw_major;
1838 1838 fwi->fwi_active_minor = qlt->fw_minor;
1839 1839 fwi->fwi_active_subminor = qlt->fw_subminor;
1840 1840 fwi->fwi_active_attr = qlt->fw_attr;
1841 1841 if (qlt->fw_code01) {
1842 1842 fwi->fwi_fw_uploaded = 1;
1843 1843 fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1844 1844 fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1845 1845 fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1846 1846 fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1847 1847 }
1848 1848 if (qlt->qlt_81xx_chip) {
1849 1849 fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1850 1850 fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1851 1851 fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1852 1852 fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1853 1853 } else if (qlt->qlt_25xx_chip) {
1854 1854 fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1855 1855 fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1856 1856 fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1857 1857 fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1858 1858 } else {
1859 1859 fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1860 1860 fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1861 1861 fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1862 1862 fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1863 1863 }
1864 1864 break;
1865 1865
1866 1866 case QLT_IOCTL_STAY_OFFLINE:
1867 1867 if (!iocd->stmf_ibuf_size) {
1868 1868 EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1869 1869 iocd->stmf_ibuf_size);
1870 1870 ret = EINVAL;
1871 1871 break;
1872 1872 }
1873 1873 if (*((char *)ibuf)) {
1874 1874 qlt->qlt_stay_offline = 1;
1875 1875 } else {
1876 1876 qlt->qlt_stay_offline = 0;
1877 1877 }
1878 1878 break;
1879 1879
1880 1880 case QLT_IOCTL_MBOX:
1881 1881 if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1882 1882 (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1883 1883 EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1884 1884 iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1885 1885 ret = EINVAL;
1886 1886 break;
1887 1887 }
1888 1888 mcp = qlt_alloc_mailbox_command(qlt, 0);
1889 1889 if (mcp == NULL) {
1890 1890 EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1891 1891 ret = ENOMEM;
1892 1892 break;
1893 1893 }
1894 1894 bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1895 1895 st = qlt_mailbox_command(qlt, mcp);
1896 1896 bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1897 1897 qlt_free_mailbox_command(qlt, mcp);
1898 1898 if (st != QLT_SUCCESS) {
1899 1899 if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1900 1900 st = QLT_SUCCESS;
1901 1901 }
1902 1902 if (st != QLT_SUCCESS) {
1903 1903 EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1904 1904 ret = EIO;
1905 1905 switch (st) {
1906 1906 case QLT_MBOX_NOT_INITIALIZED:
1907 1907 iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1908 1908 break;
1909 1909 case QLT_MBOX_BUSY:
1910 1910 iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1911 1911 break;
1912 1912 case QLT_MBOX_TIMEOUT:
1913 1913 iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1914 1914 break;
1915 1915 case QLT_MBOX_ABORTED:
1916 1916 iocd->stmf_error = QLTIO_MBOX_ABORTED;
1917 1917 break;
1918 1918 }
1919 1919 }
1920 1920 break;
1921 1921
1922 1922 case QLT_IOCTL_ELOG:
1923 1923 qlt_dump_el_trace_buffer(qlt);
1924 1924 break;
1925 1925
1926 1926 default:
1927 1927 EL(qlt, "Unknown ioctl-%xh\n", cmd);
1928 1928 ret = ENOTTY;
1929 1929 }
1930 1930
1931 1931 if (ret == 0) {
1932 1932 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1933 1933 } else if (iocd->stmf_error) {
1934 1934 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1935 1935 }
1936 1936 if (obuf) {
1937 1937 kmem_free(obuf, iocd->stmf_obuf_size);
1938 1938 obuf = NULL;
1939 1939 }
1940 1940 if (ibuf) {
1941 1941 kmem_free(ibuf, iocd->stmf_ibuf_size);
1942 1942 ibuf = NULL;
1943 1943 }
1944 1944 kmem_free(iocd, sizeof (stmf_iocdata_t));
1945 1945 return (ret);
1946 1946 }
1947 1947
1948 1948 static fct_status_t
1949 1949 qlt_force_lip(qlt_state_t *qlt)
1950 1950 {
1951 1951 mbox_cmd_t *mcp;
1952 1952 fct_status_t rval;
1953 1953
1954 1954 mcp = qlt_alloc_mailbox_command(qlt, 0);
1955 1955 mcp->to_fw[0] = 0x0072;
1956 1956 mcp->to_fw[1] = BIT_4;
1957 1957 mcp->to_fw[3] = 1;
1958 1958 mcp->to_fw_mask |= BIT_1 | BIT_3;
1959 1959 rval = qlt_mailbox_command(qlt, mcp);
1960 1960 if (rval != FCT_SUCCESS) {
1961 1961 EL(qlt, "qlt force lip MB failed: rval=%x", rval);
1962 1962 } else {
1963 1963 if (mcp->from_fw[0] != 0x4000) {
1964 1964 QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1965 1965 mcp->from_fw[0]);
1966 1966 rval = FCT_FAILURE;
1967 1967 }
1968 1968 }
1969 1969 qlt_free_mailbox_command(qlt, mcp);
1970 1970 return (rval);
1971 1971 }
1972 1972
1973 1973 static void
1974 1974 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1975 1975 {
1976 1976 stmf_change_status_t st;
1977 1977 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg;
1978 1978 qlt_state_t *qlt;
1979 1979 fct_status_t ret;
1980 1980
1981 1981 ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1982 1982 (cmd == FCT_CMD_PORT_OFFLINE) ||
1983 1983 (cmd == FCT_CMD_FORCE_LIP) ||
1984 1984 (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1985 1985 (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1986 1986
1987 1987 qlt = (qlt_state_t *)port->port_fca_private;
1988 1988 st.st_completion_status = FCT_SUCCESS;
1989 1989 st.st_additional_info = NULL;
1990 1990
1991 1991 switch (cmd) {
1992 1992 case FCT_CMD_PORT_ONLINE:
1993 1993 if (qlt->qlt_state == FCT_STATE_ONLINE)
1994 1994 st.st_completion_status = STMF_ALREADY;
1995 1995 else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1996 1996 st.st_completion_status = FCT_FAILURE;
1997 1997 if (st.st_completion_status == FCT_SUCCESS) {
1998 1998 qlt->qlt_state = FCT_STATE_ONLINING;
1999 1999 qlt->qlt_state_not_acked = 1;
2000 2000 st.st_completion_status = qlt_port_online(qlt);
2001 2001 if (st.st_completion_status != STMF_SUCCESS) {
2002 2002 EL(qlt, "PORT_ONLINE status=%xh\n",
2003 2003 st.st_completion_status);
2004 2004 qlt->qlt_state = FCT_STATE_OFFLINE;
2005 2005 qlt->qlt_state_not_acked = 0;
2006 2006 } else {
2007 2007 qlt->qlt_state = FCT_STATE_ONLINE;
2008 2008 }
2009 2009 }
2010 2010 fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
2011 2011 qlt->qlt_change_state_flags = 0;
2012 2012 break;
2013 2013
2014 2014 case FCT_CMD_PORT_OFFLINE:
2015 2015 if (qlt->qlt_state == FCT_STATE_OFFLINE) {
2016 2016 st.st_completion_status = STMF_ALREADY;
2017 2017 } else if (qlt->qlt_state != FCT_STATE_ONLINE) {
2018 2018 st.st_completion_status = FCT_FAILURE;
2019 2019 }
2020 2020 if (st.st_completion_status == FCT_SUCCESS) {
2021 2021 qlt->qlt_state = FCT_STATE_OFFLINING;
2022 2022 qlt->qlt_state_not_acked = 1;
2023 2023
2024 2024 if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
2025 2025 (void) qlt_firmware_dump(port, ssci);
2026 2026 }
2027 2027 qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
2028 2028 st.st_completion_status = qlt_port_offline(qlt);
2029 2029 if (st.st_completion_status != STMF_SUCCESS) {
2030 2030 EL(qlt, "PORT_OFFLINE status=%xh\n",
2031 2031 st.st_completion_status);
2032 2032 qlt->qlt_state = FCT_STATE_ONLINE;
2033 2033 qlt->qlt_state_not_acked = 0;
2034 2034 } else {
2035 2035 qlt->qlt_state = FCT_STATE_OFFLINE;
2036 2036 }
2037 2037 }
2038 2038 fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
2039 2039 break;
2040 2040
2041 2041 case FCT_ACK_PORT_ONLINE_COMPLETE:
2042 2042 qlt->qlt_state_not_acked = 0;
2043 2043 break;
2044 2044
2045 2045 case FCT_ACK_PORT_OFFLINE_COMPLETE:
2046 2046 qlt->qlt_state_not_acked = 0;
2047 2047 if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
2048 2048 (qlt->qlt_stay_offline == 0)) {
2049 2049 if ((ret = fct_port_initialize(port,
2050 2050 qlt->qlt_change_state_flags,
2051 2051 "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
2052 2052 "with RLFLAG_RESET")) != FCT_SUCCESS) {
2053 2053 EL(qlt, "fct_port_initialize status=%llxh\n",
2054 2054 ret);
2055 2055 cmn_err(CE_WARN, "qlt_ctl: "
2056 2056 "fct_port_initialize failed, please use "
2057 2057 "stmfstate to start the port-%s manualy",
2058 2058 qlt->qlt_port_alias);
2059 2059 }
2060 2060 }
2061 2061 break;
2062 2062
2063 2063 case FCT_CMD_FORCE_LIP:
2064 2064 if (qlt->qlt_81xx_chip) {
2065 2065 EL(qlt, "force lip is an unsupported command "
2066 2066 "for this adapter type\n");
2067 2067 } else {
2068 2068 *((fct_status_t *)arg) = qlt_force_lip(qlt);
2069 2069 EL(qlt, "forcelip done\n");
2070 2070 }
2071 2071 break;
2072 2072
2073 2073 default:
2074 2074 EL(qlt, "unsupport cmd - 0x%02X", cmd);
2075 2075 break;
2076 2076 }
2077 2077 }
2078 2078
2079 2079 /* ARGSUSED */
2080 2080 static fct_status_t
2081 2081 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2082 2082 {
2083 2083 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
2084 2084
2085 2085 EL(qlt, "FLOGI requested not supported\n");
2086 2086 cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2087 2087 return (FCT_FAILURE);
2088 2088 }
2089 2089
2090 2090 /*
2091 2091 * Return a pointer to n entries in the request queue. Assumes that
2092 2092 * request queue lock is held. Does a very short busy wait if
2093 2093 * less/zero entries are available. Retuns NULL if it still cannot
2094 2094 * fullfill the request.
2095 2095 * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2096 2096 */
2097 2097 caddr_t
2098 2098 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2099 2099 {
2100 2100 int try = 0;
2101 2101
2102 2102 while (qlt->req_available < n) {
2103 2103 uint32_t val1, val2, val3;
2104 2104 val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2105 2105 val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2106 2106 val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2107 2107 if ((val1 != val2) || (val2 != val3))
2108 2108 continue;
2109 2109
2110 2110 qlt->req_ndx_from_fw = val1;
2111 2111 qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2112 2112 ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2113 2113 (REQUEST_QUEUE_ENTRIES - 1));
2114 2114 if (qlt->req_available < n) {
2115 2115 if (try < 2) {
2116 2116 drv_usecwait(100);
2117 2117 try++;
2118 2118 continue;
2119 2119 } else {
2120 2120 stmf_trace(qlt->qlt_port_alias,
2121 2121 "Req Q is full");
2122 2122 return (NULL);
2123 2123 }
2124 2124 }
2125 2125 break;
2126 2126 }
2127 2127 /* We dont change anything until the entries are sumitted */
2128 2128 return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2129 2129 }
2130 2130
2131 2131 /*
2132 2132 * updates the req in ptr to fw. Assumes that req lock is held.
2133 2133 */
2134 2134 void
2135 2135 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2136 2136 {
2137 2137 ASSERT(n >= 1);
2138 2138 qlt->req_ndx_to_fw += n;
2139 2139 qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2140 2140 qlt->req_available -= n;
2141 2141 REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2142 2142 }
2143 2143
2144 2144
2145 2145 /*
2146 2146 * Return a pointer to n entries in the priority request queue. Assumes that
2147 2147 * priority request queue lock is held. Does a very short busy wait if
2148 2148 * less/zero entries are available. Retuns NULL if it still cannot
2149 2149 * fullfill the request.
2150 2150 * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2151 2151 */
2152 2152 caddr_t
2153 2153 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2154 2154 {
2155 2155 int try = 0;
2156 2156 uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2157 2157 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2158 2158 (PRIORITY_QUEUE_ENTRIES - 1));
2159 2159
2160 2160 while (req_available < n) {
2161 2161 uint32_t val1, val2, val3;
2162 2162 val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2163 2163 val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2164 2164 val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2165 2165 if ((val1 != val2) || (val2 != val3))
2166 2166 continue;
2167 2167
2168 2168 qlt->preq_ndx_from_fw = val1;
2169 2169 req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2170 2170 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2171 2171 (PRIORITY_QUEUE_ENTRIES - 1));
2172 2172 if (req_available < n) {
2173 2173 if (try < 2) {
2174 2174 drv_usecwait(100);
2175 2175 try++;
2176 2176 continue;
2177 2177 } else {
2178 2178 return (NULL);
2179 2179 }
2180 2180 }
2181 2181 break;
2182 2182 }
2183 2183 /* We dont change anything until the entries are sumitted */
2184 2184 return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2185 2185 }
2186 2186
2187 2187 /*
2188 2188 * updates the req in ptr to fw. Assumes that req lock is held.
2189 2189 */
2190 2190 void
2191 2191 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2192 2192 {
2193 2193 ASSERT(n >= 1);
2194 2194 qlt->preq_ndx_to_fw += n;
2195 2195 qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2196 2196 REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2197 2197 }
2198 2198
2199 2199 /*
2200 2200 * - Should not be called from Interrupt.
2201 2201 * - A very hardware specific function. Does not touch driver state.
2202 2202 * - Assumes that interrupts are disabled or not there.
2203 2203 * - Expects that the caller makes sure that all activity has stopped
2204 2204 * and its ok now to go ahead and reset the chip. Also the caller
2205 2205 * takes care of post reset damage control.
2206 2206 * - called by initialize adapter() and dump_fw(for reset only).
2207 2207 * - During attach() nothing much is happening and during initialize_adapter()
2208 2208 * the function (caller) does all the housekeeping so that this function
2209 2209 * can execute in peace.
2210 2210 * - Returns 0 on success.
2211 2211 */
2212 2212 static fct_status_t
2213 2213 qlt_reset_chip(qlt_state_t *qlt)
2214 2214 {
2215 2215 int cntr;
2216 2216
2217 2217 EL(qlt, "initiated\n");
2218 2218
2219 2219 /* XXX: Switch off LEDs */
2220 2220
2221 2221 /* Disable Interrupts */
2222 2222 REG_WR32(qlt, REG_INTR_CTRL, 0);
2223 2223 (void) REG_RD32(qlt, REG_INTR_CTRL);
2224 2224 /* Stop DMA */
2225 2225 REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2226 2226
2227 2227 /* Wait for DMA to be stopped */
2228 2228 cntr = 0;
2229 2229 while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2230 2230 delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2231 2231 cntr++;
2232 2232 /* 3 sec should be more than enough */
2233 2233 if (cntr == 300)
2234 2234 return (QLT_DMA_STUCK);
2235 2235 }
2236 2236
2237 2237 /* Reset the Chip */
2238 2238 REG_WR32(qlt, REG_CTRL_STATUS,
2239 2239 DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2240 2240
2241 2241 qlt->qlt_link_up = 0;
2242 2242
2243 2243 drv_usecwait(100);
2244 2244
2245 2245 /* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2246 2246 cntr = 0;
2247 2247 while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2248 2248 delay(drv_usectohz(10000));
2249 2249 cntr++;
2250 2250 /* 3 sec should be more than enough */
2251 2251 if (cntr == 300)
2252 2252 return (QLT_ROM_STUCK);
2253 2253 }
2254 2254 /* Disable Interrupts (Probably not needed) */
2255 2255 REG_WR32(qlt, REG_INTR_CTRL, 0);
2256 2256
2257 2257 return (QLT_SUCCESS);
2258 2258 }
2259 2259 /*
2260 2260 * - Should not be called from Interrupt.
2261 2261 * - A very hardware specific function. Does not touch driver state.
2262 2262 * - Assumes that interrupts are disabled or not there.
2263 2263 * - Expects that the caller makes sure that all activity has stopped
2264 2264 * and its ok now to go ahead and reset the chip. Also the caller
2265 2265 * takes care of post reset damage control.
2266 2266 * - called by initialize adapter() and dump_fw(for reset only).
2267 2267 * - During attach() nothing much is happening and during initialize_adapter()
2268 2268 * the function (caller) does all the housekeeping so that this function
2269 2269 * can execute in peace.
2270 2270 * - Returns 0 on success.
2271 2271 */
2272 2272 static fct_status_t
2273 2273 qlt_download_fw(qlt_state_t *qlt)
2274 2274 {
2275 2275 uint32_t start_addr;
2276 2276 fct_status_t ret;
2277 2277
2278 2278 EL(qlt, "initiated\n");
2279 2279
2280 2280 (void) qlt_reset_chip(qlt);
2281 2281
2282 2282 if (qlt->qlt_81xx_chip) {
2283 2283 qlt_mps_reset(qlt);
2284 2284 }
2285 2285
2286 2286 /* Load the two segments */
2287 2287 if (qlt->fw_code01 != NULL) {
2288 2288 ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2289 2289 qlt->fw_addr01);
2290 2290 if (ret == QLT_SUCCESS) {
2291 2291 ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2292 2292 qlt->fw_length02, qlt->fw_addr02);
2293 2293 }
2294 2294 start_addr = qlt->fw_addr01;
2295 2295 } else if (qlt->qlt_81xx_chip) {
2296 2296 ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2297 2297 fw8100_addr01);
2298 2298 if (ret == QLT_SUCCESS) {
2299 2299 ret = qlt_load_risc_ram(qlt, fw8100_code02,
2300 2300 fw8100_length02, fw8100_addr02);
2301 2301 }
2302 2302 start_addr = fw8100_addr01;
2303 2303 } else if (qlt->qlt_25xx_chip) {
2304 2304 ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2305 2305 fw2500_addr01);
2306 2306 if (ret == QLT_SUCCESS) {
2307 2307 ret = qlt_load_risc_ram(qlt, fw2500_code02,
2308 2308 fw2500_length02, fw2500_addr02);
2309 2309 }
2310 2310 start_addr = fw2500_addr01;
2311 2311 } else {
2312 2312 ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2313 2313 fw2400_addr01);
2314 2314 if (ret == QLT_SUCCESS) {
2315 2315 ret = qlt_load_risc_ram(qlt, fw2400_code02,
2316 2316 fw2400_length02, fw2400_addr02);
2317 2317 }
2318 2318 start_addr = fw2400_addr01;
2319 2319 }
2320 2320 if (ret != QLT_SUCCESS) {
2321 2321 EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2322 2322 return (ret);
2323 2323 }
2324 2324
2325 2325 /* Verify Checksum */
2326 2326 REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
2327 2327 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2328 2328 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2329 2329 ret = qlt_raw_mailbox_command(qlt);
2330 2330 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2331 2331 if (ret != QLT_SUCCESS) {
2332 2332 EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2333 2333 return (ret);
2334 2334 }
2335 2335
2336 2336 /* Execute firmware */
2337 2337 REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
2338 2338 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2339 2339 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2340 2340 REG_WR16(qlt, REG_MBOX(3), 0);
2341 2341 REG_WR16(qlt, REG_MBOX(4), 1); /* 25xx enable additional credits */
2342 2342 ret = qlt_raw_mailbox_command(qlt);
2343 2343 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2344 2344 if (ret != QLT_SUCCESS) {
2345 2345 EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2346 2346 return (ret);
2347 2347 }
2348 2348
2349 2349 /* Get revisions (About Firmware) */
2350 2350 REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
2351 2351 ret = qlt_raw_mailbox_command(qlt);
2352 2352 qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2353 2353 qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2354 2354 qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2355 2355 qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2356 2356 qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2357 2357 qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2358 2358 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2359 2359 if (ret != QLT_SUCCESS) {
2360 2360 EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2361 2361 return (ret);
2362 2362 }
2363 2363
2364 2364 return (QLT_SUCCESS);
2365 2365 }
2366 2366
2367 2367 /*
2368 2368 * Used only from qlt_download_fw().
2369 2369 */
2370 2370 static fct_status_t
2371 2371 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2372 2372 uint32_t word_count, uint32_t risc_addr)
2373 2373 {
2374 2374 uint32_t words_sent = 0;
2375 2375 uint32_t words_being_sent;
2376 2376 uint32_t *cur_host_addr;
2377 2377 uint32_t cur_risc_addr;
2378 2378 uint64_t da;
2379 2379 fct_status_t ret;
2380 2380
2381 2381 while (words_sent < word_count) {
2382 2382 cur_host_addr = &(host_addr[words_sent]);
2383 2383 cur_risc_addr = risc_addr + (words_sent << 2);
2384 2384 words_being_sent = min(word_count - words_sent,
2385 2385 TOTAL_DMA_MEM_SIZE >> 2);
2386 2386 ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2387 2387 (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2388 2388 DDI_DEV_AUTOINCR);
2389 2389 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2390 2390 words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2391 2391 da = qlt->queue_mem_cookie.dmac_laddress;
2392 2392 REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
2393 2393 REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
2394 2394 REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
2395 2395 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2396 2396 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2397 2397 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2398 2398 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2399 2399 REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
2400 2400 REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
2401 2401 ret = qlt_raw_mailbox_command(qlt);
2402 2402 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2403 2403 if (ret != QLT_SUCCESS) {
2404 2404 EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2405 2405 ret);
2406 2406 return (ret);
2407 2407 }
2408 2408 words_sent += words_being_sent;
2409 2409 }
2410 2410 return (QLT_SUCCESS);
2411 2411 }
2412 2412
2413 2413 /*
2414 2414 * Not used during normal operation. Only during driver init.
2415 2415 * Assumes that interrupts are disabled and mailboxes are loaded.
2416 2416 * Just triggers the mailbox command an waits for the completion.
2417 2417 * Also expects that There is nothing else going on and we will only
2418 2418 * get back a mailbox completion from firmware.
2419 2419 * ---DOES NOT CLEAR INTERRUPT---
2420 2420 * Used only from the code path originating from
2421 2421 * qlt_reset_chip_and_download_fw()
2422 2422 */
2423 2423 static fct_status_t
2424 2424 qlt_raw_mailbox_command(qlt_state_t *qlt)
2425 2425 {
2426 2426 int cntr = 0;
2427 2427 uint32_t status;
2428 2428
2429 2429 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2430 2430 while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
2431 2431 cntr++;
2432 2432 if (cntr == 100) {
2433 2433 return (QLT_MAILBOX_STUCK);
2434 2434 }
2435 2435 delay(drv_usectohz(10000));
2436 2436 }
2437 2437 status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
2438 2438
2439 2439 if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
2440 2440 (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
2441 2441 (status == MBX_CMD_SUCCESSFUL) ||
2442 2442 (status == MBX_CMD_NOT_SUCCESSFUL)) {
2443 2443 uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2444 2444 if (mbox0 == QLT_MBX_CMD_SUCCESS) {
2445 2445 return (QLT_SUCCESS);
2446 2446 } else {
2447 2447 return (QLT_MBOX_FAILED | mbox0);
2448 2448 }
2449 2449 }
2450 2450 /* This is unexpected, dump a message */
2451 2451 cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2452 2452 ddi_get_instance(qlt->dip), (unsigned long long)status);
2453 2453 return (QLT_UNEXPECTED_RESPONSE);
2454 2454 }
2455 2455
2456 2456 static mbox_cmd_t *
2457 2457 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2458 2458 {
2459 2459 mbox_cmd_t *mcp;
2460 2460
2461 2461 mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2462 2462 if (dma_size) {
2463 2463 qlt_dmem_bctl_t *bctl;
2464 2464 uint64_t da;
2465 2465
2466 2466 mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2467 2467 if (mcp->dbuf == NULL) {
2468 2468 kmem_free(mcp, sizeof (*mcp));
2469 2469 return (NULL);
2470 2470 }
2471 2471 mcp->dbuf->db_data_size = dma_size;
2472 2472 ASSERT(mcp->dbuf->db_sglist_length == 1);
2473 2473
2474 2474 bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2475 2475 da = bctl->bctl_dev_addr;
2476 2476 /* This is the most common initialization of dma ptrs */
2477 2477 mcp->to_fw[3] = LSW(LSD(da));
2478 2478 mcp->to_fw[2] = MSW(LSD(da));
2479 2479 mcp->to_fw[7] = LSW(MSD(da));
2480 2480 mcp->to_fw[6] = MSW(MSD(da));
2481 2481 mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2482 2482 }
2483 2483 mcp->to_fw_mask |= BIT_0;
2484 2484 mcp->from_fw_mask |= BIT_0;
2485 2485 return (mcp);
2486 2486 }
2487 2487
2488 2488 void
2489 2489 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2490 2490 {
2491 2491 if (mcp->dbuf)
2492 2492 qlt_i_dmem_free(qlt, mcp->dbuf);
2493 2493 kmem_free(mcp, sizeof (*mcp));
2494 2494 }
2495 2495
2496 2496 /*
2497 2497 * This can sleep. Should never be called from interrupt context.
2498 2498 */
2499 2499 static fct_status_t
2500 2500 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2501 2501 {
2502 2502 int retries;
2503 2503 int i;
2504 2504 char info[QLT_INFO_LEN];
2505 2505
2506 2506 if (curthread->t_flag & T_INTR_THREAD) {
2507 2507 ASSERT(0);
2508 2508 return (QLT_MBOX_FAILED);
2509 2509 }
2510 2510
2511 2511 mutex_enter(&qlt->mbox_lock);
2512 2512 /* See if mailboxes are still uninitialized */
2513 2513 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2514 2514 mutex_exit(&qlt->mbox_lock);
2515 2515 return (QLT_MBOX_NOT_INITIALIZED);
2516 2516 }
2517 2517
2518 2518 /* Wait to grab the mailboxes */
2519 2519 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2520 2520 retries++) {
2521 2521 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2522 2522 if ((retries > 5) ||
2523 2523 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2524 2524 mutex_exit(&qlt->mbox_lock);
2525 2525 return (QLT_MBOX_BUSY);
2526 2526 }
2527 2527 }
2528 2528 /* Make sure we always ask for mailbox 0 */
2529 2529 mcp->from_fw_mask |= BIT_0;
2530 2530
2531 2531 /* Load mailboxes, set state and generate RISC interrupt */
2532 2532 qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2533 2533 qlt->mcp = mcp;
2534 2534 for (i = 0; i < MAX_MBOXES; i++) {
2535 2535 if (mcp->to_fw_mask & ((uint32_t)1 << i))
2536 2536 REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2537 2537 }
2538 2538 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2539 2539
2540 2540 qlt_mbox_wait_loop:;
2541 2541 /* Wait for mailbox command completion */
2542 2542 if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2543 2543 + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2544 2544 (void) snprintf(info, sizeof (info),
2545 2545 "qlt_mailbox_command: qlt-%p, "
2546 2546 "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2547 2547 qlt->mcp = NULL;
2548 2548 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2549 2549 mutex_exit(&qlt->mbox_lock);
2550 2550
2551 2551 /*
2552 2552 * XXX Throw HBA fatal error event
2553 2553 */
2554 2554 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2555 2555 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2556 2556 return (QLT_MBOX_TIMEOUT);
2557 2557 }
2558 2558 if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2559 2559 goto qlt_mbox_wait_loop;
2560 2560
2561 2561 qlt->mcp = NULL;
2562 2562
2563 2563 /* Make sure its a completion */
2564 2564 if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2565 2565 ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2566 2566 mutex_exit(&qlt->mbox_lock);
2567 2567 return (QLT_MBOX_ABORTED);
2568 2568 }
2569 2569
2570 2570 /* MBox command completed. Clear state, retuen based on mbox 0 */
2571 2571 /* Mailboxes are already loaded by interrupt routine */
2572 2572 qlt->mbox_io_state = MBOX_STATE_READY;
2573 2573 mutex_exit(&qlt->mbox_lock);
2574 2574 if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS)
2575 2575 return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2576 2576
2577 2577 return (QLT_SUCCESS);
2578 2578 }
2579 2579
2580 2580 /*
2581 2581 * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2582 2582 */
2583 2583 /* ARGSUSED */
2584 2584 static uint_t
2585 2585 qlt_isr(caddr_t arg, caddr_t arg2)
2586 2586 {
2587 2587 qlt_state_t *qlt = (qlt_state_t *)arg;
2588 2588 uint32_t risc_status, intr_type;
2589 2589 int i;
2590 2590 int intr_loop_count;
2591 2591 char info[QLT_INFO_LEN];
2592 2592
2593 2593 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2594 2594 if (!mutex_tryenter(&qlt->intr_lock)) {
2595 2595 /*
2596 2596 * Normally we will always get this lock. If tryenter is
2597 2597 * failing then it means that driver is trying to do
2598 2598 * some cleanup and is masking the intr but some intr
2599 2599 * has sneaked in between. See if our device has generated
2600 2600 * this intr. If so then wait a bit and return claimed.
2601 2601 * If not then return claimed if this is the 1st instance
2602 2602 * of a interrupt after driver has grabbed the lock.
2603 2603 */
2604 2604 if (risc_status & BIT_15) {
2605 2605 drv_usecwait(10);
2606 2606 return (DDI_INTR_CLAIMED);
2607 2607 } else if (qlt->intr_sneak_counter) {
2608 2608 qlt->intr_sneak_counter--;
2609 2609 return (DDI_INTR_CLAIMED);
2610 2610 } else {
2611 2611 return (DDI_INTR_UNCLAIMED);
2612 2612 }
2613 2613 }
2614 2614 if (((risc_status & BIT_15) == 0) ||
2615 2615 (qlt->qlt_intr_enabled == 0)) {
2616 2616 /*
2617 2617 * This might be a pure coincedence that we are operating
2618 2618 * in a interrupt disabled mode and another device
2619 2619 * sharing the interrupt line has generated an interrupt
2620 2620 * while an interrupt from our device might be pending. Just
2621 2621 * ignore it and let the code handling the interrupt
2622 2622 * disabled mode handle it.
2623 2623 */
2624 2624 mutex_exit(&qlt->intr_lock);
2625 2625 return (DDI_INTR_UNCLAIMED);
2626 2626 }
2627 2627
2628 2628 /*
2629 2629 * XXX take care for MSI case. disable intrs
2630 2630 * Its gonna be complicated because of the max iterations.
2631 2631 * as hba will have posted the intr which did not go on PCI
2632 2632 * but we did not service it either because of max iterations.
2633 2633 * Maybe offload the intr on a different thread.
2634 2634 */
2635 2635 intr_loop_count = 0;
2636 2636
2637 2637 REG_WR32(qlt, REG_INTR_CTRL, 0);
2638 2638
2639 2639 intr_again:;
2640 2640
2641 2641 /* check for risc pause */
2642 2642 if (risc_status & BIT_8) {
2643 2643 EL(qlt, "Risc Pause status=%xh\n", risc_status);
2644 2644 cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2645 2645 qlt->instance, risc_status);
2646 2646 (void) snprintf(info, sizeof (info), "Risc Pause %08x",
2647 2647 risc_status);
2648 2648 (void) fct_port_shutdown(qlt->qlt_port,
2649 2649 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2650 2650 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2651 2651 }
2652 2652
2653 2653 /* First check for high performance path */
2654 2654 intr_type = risc_status & 0xff;
2655 2655 if (intr_type == 0x1D) {
2656 2656 qlt->atio_ndx_from_fw = (uint16_t)
2657 2657 REG_RD32(qlt, REG_ATIO_IN_PTR);
2658 2658 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2659 2659 qlt->resp_ndx_from_fw = risc_status >> 16;
2660 2660 qlt_handle_atio_queue_update(qlt);
2661 2661 qlt_handle_resp_queue_update(qlt);
2662 2662 } else if (intr_type == 0x1C) {
2663 2663 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2664 2664 qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2665 2665 qlt_handle_atio_queue_update(qlt);
2666 2666 } else if (intr_type == 0x13) {
2667 2667 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2668 2668 qlt->resp_ndx_from_fw = risc_status >> 16;
2669 2669 qlt_handle_resp_queue_update(qlt);
2670 2670 } else if (intr_type == 0x12) {
2671 2671 uint16_t code = (uint16_t)(risc_status >> 16);
2672 2672 uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2673 2673 uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2674 2674 uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2675 2675 uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2676 2676 uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2677 2677 uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2678 2678
2679 2679 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2680 2680 stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2681 2681 " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2682 2682 mbox5, mbox6);
2683 2683 EL(qlt, "Async event %x mb1=%x mb2=%x, mb3=%x, mb5=%x, mb6=%x",
2684 2684 code, mbox1, mbox2, mbox3, mbox5, mbox6);
2685 2685
2686 2686 if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2687 2687 if (qlt->qlt_link_up) {
2688 2688 fct_handle_event(qlt->qlt_port,
2689 2689 FCT_EVENT_LINK_RESET, 0, 0);
2690 2690 }
2691 2691 } else if (code == 0x8012) {
2692 2692 qlt->qlt_link_up = 0;
2693 2693 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2694 2694 0, 0);
2695 2695 } else if (code == 0x8011) {
2696 2696 switch (mbox1) {
2697 2697 case 0: qlt->link_speed = PORT_SPEED_1G;
2698 2698 break;
2699 2699 case 1: qlt->link_speed = PORT_SPEED_2G;
2700 2700 break;
2701 2701 case 3: qlt->link_speed = PORT_SPEED_4G;
2702 2702 break;
2703 2703 case 4: qlt->link_speed = PORT_SPEED_8G;
2704 2704 break;
2705 2705 case 0x13: qlt->link_speed = PORT_SPEED_10G;
2706 2706 break;
2707 2707 default:
2708 2708 qlt->link_speed = PORT_SPEED_UNKNOWN;
2709 2709 }
2710 2710 qlt->qlt_link_up = 1;
2711 2711 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2712 2712 0, 0);
2713 2713 } else if ((code == 0x8002) || (code == 0x8003) ||
2714 2714 (code == 0x8004) || (code == 0x8005)) {
2715 2715 (void) snprintf(info, sizeof (info),
2716 2716 "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2717 2717 code, mbox1, mbox2, mbox5, mbox6);
2718 2718 (void) fct_port_shutdown(qlt->qlt_port,
2719 2719 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2720 2720 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2721 2721 } else if (code == 0x800F) {
2722 2722 (void) snprintf(info, sizeof (info),
2723 2723 "Got 800F, mb1=%x mb2=%x mb3=%x",
2724 2724 mbox1, mbox2, mbox3);
2725 2725
2726 2726 if (mbox1 != 1) {
2727 2727 /* issue "verify fw" */
2728 2728 qlt_verify_fw(qlt);
2729 2729 }
2730 2730 } else if (code == 0x8101) {
2731 2731 (void) snprintf(info, sizeof (info),
2732 2732 "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2733 2733 code, mbox1, mbox2, mbox3);
2734 2734
2735 2735 /* check if "ACK" is required (timeout != 0) */
2736 2736 if (mbox1 & 0x0f00) {
2737 2737 caddr_t req;
2738 2738
2739 2739 /*
2740 2740 * Ack the request (queue work to do it?)
2741 2741 * using a mailbox iocb
2742 2742 */
2743 2743 mutex_enter(&qlt->req_lock);
2744 2744 req = qlt_get_req_entries(qlt, 1);
2745 2745 if (req) {
2746 2746 bzero(req, IOCB_SIZE);
2747 2747 req[0] = 0x39; req[1] = 1;
2748 2748 QMEM_WR16(qlt, req+8, 0x101);
2749 2749 QMEM_WR16(qlt, req+10, mbox1);
2750 2750 QMEM_WR16(qlt, req+12, mbox2);
2751 2751 QMEM_WR16(qlt, req+14, mbox3);
2752 2752 QMEM_WR16(qlt, req+16, mbox4);
2753 2753 QMEM_WR16(qlt, req+18, mbox5);
2754 2754 QMEM_WR16(qlt, req+20, mbox6);
2755 2755 qlt_submit_req_entries(qlt, 1);
2756 2756 } else {
2757 2757 (void) snprintf(info, sizeof (info),
2758 2758 "IDC ACK failed");
2759 2759 }
2760 2760 mutex_exit(&qlt->req_lock);
2761 2761 }
2762 2762 }
2763 2763 } else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2764 2764 /* Handle mailbox completion */
2765 2765 mutex_enter(&qlt->mbox_lock);
2766 2766 if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2767 2767 cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2768 2768 " when driver wasn't waiting for it %d",
2769 2769 qlt->instance, qlt->mbox_io_state);
2770 2770 } else {
2771 2771 for (i = 0; i < MAX_MBOXES; i++) {
2772 2772 if (qlt->mcp->from_fw_mask &
2773 2773 (((uint32_t)1) << i)) {
2774 2774 qlt->mcp->from_fw[i] =
2775 2775 REG_RD16(qlt, REG_MBOX(i));
2776 2776 }
2777 2777 }
2778 2778 qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2779 2779 }
2780 2780 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2781 2781 cv_broadcast(&qlt->mbox_cv);
2782 2782 mutex_exit(&qlt->mbox_lock);
2783 2783 } else {
2784 2784 cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2785 2785 qlt->instance, intr_type);
2786 2786 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2787 2787 }
2788 2788
2789 2789 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting */
2790 2790 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2791 2791 if ((risc_status & BIT_15) &&
2792 2792 (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2793 2793 goto intr_again;
2794 2794 }
2795 2795
2796 2796 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2797 2797
2798 2798 mutex_exit(&qlt->intr_lock);
2799 2799 return (DDI_INTR_CLAIMED);
2800 2800 }
2801 2801
2802 2802 /* **************** NVRAM Functions ********************** */
2803 2803
2804 2804 fct_status_t
2805 2805 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2806 2806 {
2807 2807 uint32_t timer;
2808 2808
2809 2809 /* Clear access error flag */
2810 2810 REG_WR32(qlt, REG_CTRL_STATUS,
2811 2811 REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2812 2812
2813 2813 REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2814 2814
2815 2815 /* Wait for READ cycle to complete. */
2816 2816 for (timer = 3000; timer; timer--) {
2817 2817 if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2818 2818 break;
2819 2819 }
2820 2820 drv_usecwait(10);
2821 2821 }
2822 2822 if (timer == 0) {
2823 2823 EL(qlt, "flash timeout\n");
2824 2824 return (QLT_FLASH_TIMEOUT);
2825 2825 } else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2826 2826 EL(qlt, "flash access error\n");
2827 2827 return (QLT_FLASH_ACCESS_ERROR);
2828 2828 }
2829 2829
2830 2830 *bp = REG_RD32(qlt, REG_FLASH_DATA);
2831 2831
2832 2832 return (QLT_SUCCESS);
2833 2833 }
2834 2834
2835 2835 fct_status_t
2836 2836 qlt_read_nvram(qlt_state_t *qlt)
2837 2837 {
2838 2838 uint32_t index, addr, chksum;
2839 2839 uint32_t val, *ptr;
2840 2840 fct_status_t ret;
2841 2841 qlt_nvram_t *nv;
2842 2842 uint64_t empty_node_name = 0;
2843 2843
2844 2844 if (qlt->qlt_81xx_chip) {
2845 2845 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2846 2846 QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2847 2847 } else if (qlt->qlt_25xx_chip) {
2848 2848 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2849 2849 QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2850 2850 } else {
2851 2851 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2852 2852 NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2853 2853 }
2854 2854 mutex_enter(&qlt_global_lock);
2855 2855
2856 2856 /* Pause RISC. */
2857 2857 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
2858 2858 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
2859 2859
2860 2860 /* Get NVRAM data and calculate checksum. */
2861 2861 ptr = (uint32_t *)qlt->nvram;
2862 2862 chksum = 0;
2863 2863 for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2864 2864 ret = qlt_read_flash_word(qlt, addr++, &val);
2865 2865 if (ret != QLT_SUCCESS) {
2866 2866 EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2867 2867 mutex_exit(&qlt_global_lock);
2868 2868 return (ret);
2869 2869 }
2870 2870 chksum += val;
2871 2871 *ptr = LE_32(val);
2872 2872 ptr++;
2873 2873 }
2874 2874
2875 2875 /* Release RISC Pause */
2876 2876 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
2877 2877 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
2878 2878
2879 2879 mutex_exit(&qlt_global_lock);
2880 2880
2881 2881 /* Sanity check NVRAM Data */
2882 2882 nv = qlt->nvram;
2883 2883 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2884 2884 nv->id[2] != 'P' || nv->id[3] != ' ' ||
2885 2885 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2886 2886 EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2887 2887 nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2888 2888 nv->nvram_version[1], nv->nvram_version[0]);
2889 2889 return (QLT_BAD_NVRAM_DATA);
2890 2890 }
2891 2891
2892 2892 /* If node name is zero, hand craft it from port name */
2893 2893 if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2894 2894 bcopy(nv->port_name, nv->node_name, 8);
2895 2895 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2896 2896 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2897 2897 }
2898 2898
2899 2899 return (QLT_SUCCESS);
2900 2900 }
2901 2901
2902 2902 uint32_t
2903 2903 qlt_sync_atio_queue(qlt_state_t *qlt)
2904 2904 {
2905 2905 uint32_t total_ent;
2906 2906
2907 2907 if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2908 2908 total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2909 2909 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2910 2910 + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2911 2911 DDI_DMA_SYNC_FORCPU);
2912 2912 } else {
2913 2913 total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2914 2914 qlt->atio_ndx_from_fw;
2915 2915 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2916 2916 + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2917 2917 qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2918 2918 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2919 2919 ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2920 2920 DDI_DMA_SYNC_FORCPU);
2921 2921 }
2922 2922 return (total_ent);
2923 2923 }
2924 2924
2925 2925 void
2926 2926 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2927 2927 {
2928 2928 uint32_t total_ent;
2929 2929
2930 2930 if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2931 2931 return;
2932 2932
2933 2933 total_ent = qlt_sync_atio_queue(qlt);
2934 2934
2935 2935 do {
2936 2936 uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2937 2937 qlt->atio_ndx_to_fw << 6];
2938 2938 uint32_t ent_cnt;
2939 2939
2940 2940 ent_cnt = (uint32_t)(atio[1]);
2941 2941 if (ent_cnt > total_ent) {
2942 2942 break;
2943 2943 }
2944 2944 switch ((uint8_t)(atio[0])) {
2945 2945 case 0x0d: /* INOT */
2946 2946 qlt_handle_inot(qlt, atio);
2947 2947 break;
2948 2948 case 0x06: /* ATIO */
2949 2949 qlt_handle_atio(qlt, atio);
2950 2950 break;
2951 2951 default:
2952 2952 EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2953 2953 cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2954 2954 "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2955 2955 break;
2956 2956 }
2957 2957 qlt->atio_ndx_to_fw = (uint16_t)(
2958 2958 (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2959 2959 total_ent -= ent_cnt;
2960 2960 } while (total_ent > 0);
2961 2961 REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2962 2962 }
2963 2963
2964 2964 uint32_t
2965 2965 qlt_sync_resp_queue(qlt_state_t *qlt)
2966 2966 {
2967 2967 uint32_t total_ent;
2968 2968
2969 2969 if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2970 2970 total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2971 2971 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2972 2972 RESPONSE_QUEUE_OFFSET
2973 2973 + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2974 2974 DDI_DMA_SYNC_FORCPU);
2975 2975 } else {
2976 2976 total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2977 2977 qlt->resp_ndx_from_fw;
2978 2978 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2979 2979 RESPONSE_QUEUE_OFFSET
2980 2980 + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2981 2981 qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2982 2982 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2983 2983 RESPONSE_QUEUE_OFFSET,
2984 2984 qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2985 2985 }
2986 2986 return (total_ent);
2987 2987 }
2988 2988
2989 2989 void
2990 2990 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2991 2991 {
2992 2992 uint32_t total_ent;
2993 2993 uint8_t c;
2994 2994
2995 2995 if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2996 2996 return;
2997 2997
2998 2998 total_ent = qlt_sync_resp_queue(qlt);
2999 2999
3000 3000 do {
3001 3001 caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
3002 3002 uint32_t ent_cnt;
3003 3003
3004 3004 ent_cnt = (uint32_t)(resp[0] == 0x51 ? resp[1] : 1);
3005 3005 if (ent_cnt > total_ent) {
3006 3006 break;
3007 3007 }
3008 3008 switch ((uint8_t)(resp[0])) {
3009 3009 case 0x12: /* CTIO completion */
3010 3010 qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
3011 3011 break;
3012 3012 case 0x0e: /* NACK */
3013 3013 /* Do Nothing */
3014 3014 break;
3015 3015 case 0x1b: /* Verify FW */
3016 3016 qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
3017 3017 break;
3018 3018 case 0x29: /* CT PassThrough */
3019 3019 qlt_handle_ct_completion(qlt, (uint8_t *)resp);
3020 3020 break;
3021 3021 case 0x33: /* Abort IO IOCB completion */
3022 3022 qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
3023 3023 break;
3024 3024 case 0x51: /* PUREX */
3025 3025 qlt_handle_purex(qlt, (uint8_t *)resp);
3026 3026 break;
3027 3027 case 0x52:
3028 3028 qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
3029 3029 break;
3030 3030 case 0x53: /* ELS passthrough */
3031 3031 c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
3032 3032 if (c == 0) {
3033 3033 qlt_handle_sol_els_completion(qlt,
3034 3034 (uint8_t *)resp);
3035 3035 } else if (c == 3) {
3036 3036 qlt_handle_unsol_els_abort_completion(qlt,
3037 3037 (uint8_t *)resp);
3038 3038 } else {
3039 3039 qlt_handle_unsol_els_completion(qlt,
3040 3040 (uint8_t *)resp);
3041 3041 }
3042 3042 break;
3043 3043 case 0x54: /* ABTS received */
3044 3044 qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
3045 3045 break;
3046 3046 case 0x55: /* ABTS completion */
3047 3047 qlt_handle_abts_completion(qlt, (uint8_t *)resp);
3048 3048 break;
3049 3049 default:
3050 3050 EL(qlt, "response entry=%xh\n", resp[0]);
3051 3051 break;
3052 3052 }
3053 3053 qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
3054 3054 (RESPONSE_QUEUE_ENTRIES - 1);
3055 3055 total_ent -= ent_cnt;
3056 3056 } while (total_ent > 0);
3057 3057 REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
3058 3058 }
3059 3059
3060 3060 fct_status_t
3061 3061 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
3062 3062 uint16_t *ret_handle)
3063 3063 {
3064 3064 fct_status_t ret;
3065 3065 mbox_cmd_t *mcp;
3066 3066 uint16_t n;
3067 3067 uint16_t h;
3068 3068 uint32_t ent_id;
3069 3069 uint8_t *p;
3070 3070 int found = 0;
3071 3071
3072 3072 mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
3073 3073 if (mcp == NULL) {
3074 3074 return (STMF_ALLOC_FAILURE);
3075 3075 }
3076 3076 mcp->to_fw[0] = MBC_GET_ID_LIST;
3077 3077 mcp->to_fw[8] = 2048 * 8;
3078 3078 mcp->to_fw[9] = 0;
3079 3079 mcp->to_fw_mask |= BIT_9 | BIT_8;
3080 3080 mcp->from_fw_mask |= BIT_1 | BIT_2;
3081 3081
3082 3082 ret = qlt_mailbox_command(qlt, mcp);
3083 3083 if (ret != QLT_SUCCESS) {
3084 3084 EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
3085 3085 cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
3086 3086 "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
3087 3087 mcp->from_fw[1], mcp->from_fw[2]);
3088 3088 qlt_free_mailbox_command(qlt, mcp);
3089 3089 return (ret);
3090 3090 }
3091 3091 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3092 3092 p = mcp->dbuf->db_sglist[0].seg_addr;
3093 3093 for (n = 0; n < mcp->from_fw[1]; n++) {
3094 3094 ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3095 3095 h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3096 3096 if (ent_id == id) {
3097 3097 found = 1;
3098 3098 *ret_handle = h;
3099 3099 if ((cmd_handle != FCT_HANDLE_NONE) &&
3100 3100 (cmd_handle != h)) {
3101 3101 cmn_err(CE_WARN, "login for portid %x came in "
3102 3102 "with handle %x, while the portid was "
3103 3103 "already using a different handle %x",
3104 3104 id, cmd_handle, h);
3105 3105 qlt_free_mailbox_command(qlt, mcp);
3106 3106 return (QLT_FAILURE);
3107 3107 }
3108 3108 break;
3109 3109 }
3110 3110 if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3111 3111 cmn_err(CE_WARN, "login for portid %x came in with "
3112 3112 "handle %x, while the handle was already in use "
3113 3113 "for portid %x", id, cmd_handle, ent_id);
3114 3114 qlt_free_mailbox_command(qlt, mcp);
3115 3115 return (QLT_FAILURE);
3116 3116 }
3117 3117 p += 8;
3118 3118 }
3119 3119 if (!found) {
3120 3120 *ret_handle = cmd_handle;
3121 3121 }
3122 3122 qlt_free_mailbox_command(qlt, mcp);
3123 3123 return (FCT_SUCCESS);
3124 3124 }
3125 3125
3126 3126 /* ARGSUSED */
3127 3127 fct_status_t
3128 3128 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3129 3129 fct_cmd_t *login)
3130 3130 {
3131 3131 uint8_t *p;
3132 3132
3133 3133 p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3134 3134 p[0] = ELS_OP_PLOGI;
3135 3135 *((uint16_t *)(&p[4])) = 0x2020;
3136 3136 p[7] = 3;
3137 3137 p[8] = 0x88;
3138 3138 p[10] = 8;
3139 3139 p[13] = 0xff; p[15] = 0x1f;
3140 3140 p[18] = 7; p[19] = 0xd0;
3141 3141
3142 3142 bcopy(port->port_pwwn, p + 20, 8);
3143 3143 bcopy(port->port_nwwn, p + 28, 8);
3144 3144
3145 3145 p[68] = 0x80;
3146 3146 p[74] = 8;
3147 3147 p[77] = 0xff;
3148 3148 p[81] = 1;
3149 3149
3150 3150 return (FCT_SUCCESS);
3151 3151 }
3152 3152
3153 3153 /* ARGSUSED */
3154 3154 fct_status_t
3155 3155 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3156 3156 fct_cmd_t *login)
3157 3157 {
3158 3158 return (FCT_SUCCESS);
3159 3159 }
3160 3160
3161 3161 fct_status_t
3162 3162 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3163 3163 fct_cmd_t *login)
3164 3164 {
3165 3165 uint16_t h;
3166 3166 fct_status_t ret;
3167 3167 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3168 3168
3169 3169 switch (rp->rp_id) {
3170 3170 case 0xFFFFFC: h = 0x7FC; break;
3171 3171 case 0xFFFFFD: h = 0x7FD; break;
3172 3172 case 0xFFFFFE: h = 0x7FE; break;
3173 3173 case 0xFFFFFF: h = 0x7FF; break;
3174 3174 default:
3175 3175 ret = qlt_portid_to_handle(qlt, rp->rp_id,
3176 3176 login->cmd_rp_handle, &h);
3177 3177 if (ret != FCT_SUCCESS) {
3178 3178 EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3179 3179 return (ret);
3180 3180 }
3181 3181 }
3182 3182
3183 3183 if (login->cmd_type == FCT_CMD_SOL_ELS) {
3184 3184 ret = qlt_fill_plogi_req(port, rp, login);
3185 3185 } else {
3186 3186 ret = qlt_fill_plogi_resp(port, rp, login);
3187 3187 }
3188 3188
3189 3189 if (ret != FCT_SUCCESS) {
3190 3190 EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3191 3191 return (ret);
3192 3192 }
3193 3193
3194 3194 if (h == FCT_HANDLE_NONE)
3195 3195 return (FCT_SUCCESS);
3196 3196
3197 3197 if (rp->rp_handle == FCT_HANDLE_NONE) {
3198 3198 rp->rp_handle = h;
3199 3199 return (FCT_SUCCESS);
3200 3200 }
3201 3201
3202 3202 if (rp->rp_handle == h)
3203 3203 return (FCT_SUCCESS);
3204 3204
3205 3205 EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3206 3206 return (FCT_FAILURE);
3207 3207 }
3208 3208 /* invoked in single thread */
3209 3209 fct_status_t
3210 3210 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3211 3211 {
3212 3212 uint8_t *req;
3213 3213 qlt_state_t *qlt;
3214 3214 clock_t dereg_req_timer;
3215 3215 fct_status_t ret;
3216 3216
3217 3217 qlt = (qlt_state_t *)port->port_fca_private;
3218 3218
3219 3219 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3220 3220 (qlt->qlt_state == FCT_STATE_OFFLINING))
3221 3221 return (FCT_SUCCESS);
3222 3222 ASSERT(qlt->rp_id_in_dereg == 0);
3223 3223
3224 3224 mutex_enter(&qlt->preq_lock);
3225 3225 req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3226 3226 if (req == NULL) {
3227 3227 mutex_exit(&qlt->preq_lock);
3228 3228 return (FCT_BUSY);
3229 3229 }
3230 3230 bzero(req, IOCB_SIZE);
3231 3231 req[0] = 0x52; req[1] = 1;
3232 3232 /* QMEM_WR32(qlt, (&req[4]), 0xffffffff); */
3233 3233 QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3234 3234 QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3235 3235 QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3236 3236 qlt->rp_id_in_dereg = rp->rp_id;
3237 3237 qlt_submit_preq_entries(qlt, 1);
3238 3238
3239 3239 dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3240 3240 if (cv_timedwait(&qlt->rp_dereg_cv,
3241 3241 &qlt->preq_lock, dereg_req_timer) > 0) {
3242 3242 ret = qlt->rp_dereg_status;
3243 3243 } else {
3244 3244 ret = FCT_BUSY;
3245 3245 }
3246 3246 qlt->rp_dereg_status = 0;
3247 3247 qlt->rp_id_in_dereg = 0;
3248 3248 mutex_exit(&qlt->preq_lock);
3249 3249 return (ret);
3250 3250 }
3251 3251
3252 3252 /*
3253 3253 * Pass received ELS up to framework.
3254 3254 */
3255 3255 static void
3256 3256 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3257 3257 {
3258 3258 fct_cmd_t *cmd;
3259 3259 fct_els_t *els;
3260 3260 qlt_cmd_t *qcmd;
3261 3261 uint32_t payload_size;
3262 3262 uint32_t remote_portid;
3263 3263 uint8_t *pldptr, *bndrptr;
3264 3264 int i, off;
3265 3265 uint16_t iocb_flags;
3266 3266 char info[QLT_INFO_LEN];
3267 3267
3268 3268 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3269 3269 ((uint32_t)(resp[0x1A])) << 16;
3270 3270 iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3271 3271 if (iocb_flags & BIT_15) {
3272 3272 payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3273 3273 } else {
3274 3274 payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3275 3275 }
3276 3276
3277 3277 if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3278 3278 EL(qlt, "payload is too large = %xh\n", payload_size);
3279 3279 cmn_err(CE_WARN, "handle_purex: payload is too large");
3280 3280 goto cmd_null;
3281 3281 }
3282 3282
3283 3283 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3284 3284 (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3285 3285 if (cmd == NULL) {
3286 3286 EL(qlt, "fct_alloc cmd==NULL\n");
3287 3287 cmd_null:;
3288 3288 (void) snprintf(info, sizeof (info),
3289 3289 "qlt_handle_purex: qlt-%p, "
3290 3290 "can't allocate space for fct_cmd", (void *)qlt);
3291 3291 (void) fct_port_shutdown(qlt->qlt_port,
3292 3292 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3293 3293 return;
3294 3294 }
3295 3295
3296 3296 cmd->cmd_port = qlt->qlt_port;
3297 3297 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3298 3298 if (cmd->cmd_rp_handle == 0xFFFF) {
3299 3299 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3300 3300 }
3301 3301
3302 3302 els = (fct_els_t *)cmd->cmd_specific;
3303 3303 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3304 3304 els->els_req_size = (uint16_t)payload_size;
3305 3305 els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3306 3306 GET_STRUCT_SIZE(qlt_cmd_t));
3307 3307 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3308 3308 cmd->cmd_rportid = remote_portid;
3309 3309 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3310 3310 ((uint32_t)(resp[0x16])) << 16;
3311 3311 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3312 3312 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3313 3313 pldptr = &resp[0x2C];
3314 3314 bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3315 3315 for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3316 3316 /* Take care of fw's swapping of payload */
3317 3317 els->els_req_payload[i] = pldptr[3];
3318 3318 els->els_req_payload[i+1] = pldptr[2];
3319 3319 els->els_req_payload[i+2] = pldptr[1];
3320 3320 els->els_req_payload[i+3] = pldptr[0];
3321 3321 pldptr += 4;
3322 3322 if (pldptr == bndrptr)
3323 3323 pldptr = (uint8_t *)qlt->resp_ptr;
3324 3324 off += 4;
3325 3325 if (off >= IOCB_SIZE) {
3326 3326 off = 4;
3327 3327 pldptr += 4;
3328 3328 }
3329 3329 }
3330 3330 fct_post_rcvd_cmd(cmd, 0);
3331 3331 }
3332 3332
3333 3333 fct_status_t
3334 3334 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3335 3335 {
3336 3336 qlt_state_t *qlt;
3337 3337 char info[QLT_INFO_LEN];
3338 3338
3339 3339 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3340 3340
3341 3341 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3342 3342 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3343 3343 EL(qlt, "ioflags = %xh\n", ioflags);
3344 3344 goto fatal_panic;
3345 3345 } else {
3346 3346 return (qlt_send_status(qlt, cmd));
3347 3347 }
3348 3348 }
3349 3349
3350 3350 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3351 3351 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3352 3352 goto fatal_panic;
3353 3353 } else {
3354 3354 return (qlt_send_els_response(qlt, cmd));
3355 3355 }
3356 3356 }
3357 3357
3358 3358 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3359 3359 cmd->cmd_handle = 0;
3360 3360 }
3361 3361
3362 3362 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3363 3363 return (qlt_send_abts_response(qlt, cmd, 0));
3364 3364 } else {
3365 3365 EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3366 3366 ASSERT(0);
3367 3367 return (FCT_FAILURE);
3368 3368 }
3369 3369
3370 3370 fatal_panic:;
3371 3371 (void) snprintf(info, sizeof (info),
3372 3372 "qlt_send_cmd_response: can not handle "
3373 3373 "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3374 3374 ioflags);
3375 3375 (void) fct_port_shutdown(qlt->qlt_port,
3376 3376 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3377 3377 return (FCT_FAILURE);
3378 3378 }
3379 3379
3380 3380 /* ARGSUSED */
3381 3381 fct_status_t
3382 3382 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3383 3383 {
3384 3384 qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3385 3385 qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3386 3386 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3387 3387 uint8_t *req, rcnt;
3388 3388 uint16_t flags;
3389 3389 uint16_t cookie_count;
3390 3390
3391 3391 if (dbuf->db_handle == 0)
3392 3392 qcmd->dbuf = dbuf;
3393 3393 flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3394 3394 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3395 3395 flags = (uint16_t)(flags | 2);
3396 3396 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3397 3397 } else {
3398 3398 flags = (uint16_t)(flags | 1);
3399 3399 }
3400 3400
3401 3401 if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3402 3402 flags = (uint16_t)(flags | BIT_15);
3403 3403
3404 3404 if (dbuf->db_flags & DB_LU_DATA_BUF) {
3405 3405 /*
3406 3406 * Data bufs from LU are in scatter/gather list format.
3407 3407 */
3408 3408 cookie_count = qlt_get_cookie_count(dbuf);
3409 3409 rcnt = qlt_get_iocb_count(cookie_count);
3410 3410 } else {
3411 3411 cookie_count = 1;
3412 3412 rcnt = 1;
3413 3413 }
3414 3414 mutex_enter(&qlt->req_lock);
3415 3415 req = (uint8_t *)qlt_get_req_entries(qlt, rcnt);
3416 3416 if (req == NULL) {
3417 3417 mutex_exit(&qlt->req_lock);
3418 3418 return (FCT_BUSY);
3419 3419 }
3420 3420 bzero(req, IOCB_SIZE); /* XXX needed ? */
3421 3421 req[0] = 0x12;
3422 3422 req[1] = rcnt;
3423 3423 req[2] = dbuf->db_handle;
3424 3424 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3425 3425 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3426 3426 QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */
3427 3427 QMEM_WR16(qlt, req+12, cookie_count);
3428 3428 QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3429 3429 QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3430 3430 QMEM_WR16(qlt, req+0x1A, flags);
3431 3431 QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3432 3432 QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3433 3433 QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3434 3434 if (dbuf->db_flags & DB_LU_DATA_BUF) {
3435 3435 uint8_t *qptr; /* qlt continuation segs */
3436 3436 uint16_t cookie_resid;
3437 3437 uint16_t cont_segs;
3438 3438 ddi_dma_cookie_t cookie, *ckp;
3439 3439
3440 3440 /*
3441 3441 * See if the dma cookies are in simple array format.
3442 3442 */
3443 3443 ckp = qlt_get_cookie_array(dbuf);
3444 3444
3445 3445 /*
3446 3446 * Program the first segment into main record.
3447 3447 */
3448 3448 if (ckp) {
3449 3449 ASSERT(ckp->dmac_size);
3450 3450 QMEM_WR64(qlt, req+0x34, ckp->dmac_laddress);
3451 3451 QMEM_WR32(qlt, req+0x3c, ckp->dmac_size);
3452 3452 } else {
3453 3453 qlt_ddi_dma_nextcookie(dbuf, &cookie);
3454 3454 ASSERT(cookie.dmac_size);
3455 3455 QMEM_WR64(qlt, req+0x34, cookie.dmac_laddress);
3456 3456 QMEM_WR32(qlt, req+0x3c, cookie.dmac_size);
3457 3457 }
3458 3458 cookie_resid = cookie_count-1;
3459 3459
3460 3460 /*
3461 3461 * Program remaining segments into continuation records.
3462 3462 */
3463 3463 while (cookie_resid) {
3464 3464 req += IOCB_SIZE;
3465 3465 if (req >= (uint8_t *)qlt->resp_ptr) {
3466 3466 req = (uint8_t *)qlt->req_ptr;
3467 3467 }
3468 3468 req[0] = 0x0a;
3469 3469 req[1] = 1;
3470 3470 req[2] = req[3] = 0; /* tidy */
3471 3471 qptr = &req[4];
3472 3472 for (cont_segs = CONT_A64_DATA_SEGMENTS;
3473 3473 cont_segs && cookie_resid; cont_segs--) {
3474 3474
3475 3475 if (ckp) {
3476 3476 ++ckp; /* next cookie */
3477 3477 ASSERT(ckp->dmac_size != 0);
3478 3478 QMEM_WR64(qlt, qptr,
3479 3479 ckp->dmac_laddress);
3480 3480 qptr += 8; /* skip over laddress */
3481 3481 QMEM_WR32(qlt, qptr, ckp->dmac_size);
3482 3482 qptr += 4; /* skip over size */
3483 3483 } else {
3484 3484 qlt_ddi_dma_nextcookie(dbuf, &cookie);
3485 3485 ASSERT(cookie.dmac_size != 0);
3486 3486 QMEM_WR64(qlt, qptr,
3487 3487 cookie.dmac_laddress);
3488 3488 qptr += 8; /* skip over laddress */
3489 3489 QMEM_WR32(qlt, qptr, cookie.dmac_size);
3490 3490 qptr += 4; /* skip over size */
3491 3491 }
3492 3492 cookie_resid--;
3493 3493 }
3494 3494 /*
3495 3495 * zero unused remainder of IOCB
3496 3496 */
3497 3497 if (cont_segs) {
3498 3498 size_t resid;
3499 3499 resid = (size_t)((uintptr_t)(req+IOCB_SIZE) -
3500 3500 (uintptr_t)qptr);
3501 3501 ASSERT(resid < IOCB_SIZE);
3502 3502 bzero(qptr, resid);
3503 3503 }
3504 3504 }
3505 3505 } else {
3506 3506 /* Single, contiguous buffer */
3507 3507 QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3508 3508 QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3509 3509 }
3510 3510
3511 3511 qlt_submit_req_entries(qlt, rcnt);
3512 3512 mutex_exit(&qlt->req_lock);
3513 3513
3514 3514 return (STMF_SUCCESS);
3515 3515 }
3516 3516
3517 3517 /*
3518 3518 * We must construct proper FCP_RSP_IU now. Here we only focus on
3519 3519 * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3520 3520 * we could have catched them before we enter here.
3521 3521 */
3522 3522 fct_status_t
3523 3523 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3524 3524 {
3525 3525 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3526 3526 scsi_task_t *task = (scsi_task_t *)cmd->cmd_specific;
3527 3527 qlt_dmem_bctl_t *bctl;
3528 3528 uint32_t size;
3529 3529 uint8_t *req, *fcp_rsp_iu;
3530 3530 uint8_t *psd, sensbuf[24]; /* sense data */
3531 3531 uint16_t flags;
3532 3532 uint16_t scsi_status;
3533 3533 int use_mode2;
3534 3534 int ndx;
3535 3535
3536 3536 /*
3537 3537 * Enter fast channel for non check condition
3538 3538 */
3539 3539 if (task->task_scsi_status != STATUS_CHECK) {
3540 3540 /*
3541 3541 * We will use mode1
3542 3542 */
3543 3543 flags = (uint16_t)(BIT_6 | BIT_15 |
3544 3544 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3545 3545 scsi_status = (uint16_t)task->task_scsi_status;
3546 3546 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3547 3547 scsi_status = (uint16_t)(scsi_status | BIT_10);
3548 3548 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3549 3549 scsi_status = (uint16_t)(scsi_status | BIT_11);
3550 3550 }
3551 3551 qcmd->dbuf_rsp_iu = NULL;
3552 3552
3553 3553 /*
3554 3554 * Fillout CTIO type 7 IOCB
3555 3555 */
3556 3556 mutex_enter(&qlt->req_lock);
3557 3557 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3558 3558 if (req == NULL) {
3559 3559 mutex_exit(&qlt->req_lock);
3560 3560 return (FCT_BUSY);
3561 3561 }
3562 3562
3563 3563 /*
3564 3564 * Common fields
3565 3565 */
3566 3566 bzero(req, IOCB_SIZE);
3567 3567 req[0x00] = 0x12;
3568 3568 req[0x01] = 0x1;
3569 3569 req[0x02] = BIT_7; /* indicate if it's a pure status req */
3570 3570 QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3571 3571 QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3572 3572 QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3573 3573 QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3574 3574
3575 3575 /*
3576 3576 * Mode-specific fields
3577 3577 */
3578 3578 QMEM_WR16(qlt, req + 0x1A, flags);
3579 3579 QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3580 3580 QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3581 3581 QMEM_WR16(qlt, req + 0x22, scsi_status);
3582 3582
3583 3583 /*
3584 3584 * Trigger FW to send SCSI status out
3585 3585 */
3586 3586 qlt_submit_req_entries(qlt, 1);
3587 3587 mutex_exit(&qlt->req_lock);
3588 3588 return (STMF_SUCCESS);
3589 3589 }
3590 3590
3591 3591 ASSERT(task->task_scsi_status == STATUS_CHECK);
3592 3592 /*
3593 3593 * Decide the SCSI status mode, that should be used
3594 3594 */
3595 3595 use_mode2 = (task->task_sense_length > 24);
3596 3596
3597 3597 /*
3598 3598 * Prepare required information per the SCSI status mode
3599 3599 */
3600 3600 flags = (uint16_t)(BIT_15 |
3601 3601 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3602 3602 if (use_mode2) {
3603 3603 flags = (uint16_t)(flags | BIT_7);
3604 3604
3605 3605 size = task->task_sense_length;
3606 3606 qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3607 3607 task->task_sense_length, &size, 0);
3608 3608 if (!qcmd->dbuf_rsp_iu) {
3609 3609 return (FCT_ALLOC_FAILURE);
3610 3610 }
3611 3611
3612 3612 /*
3613 3613 * Start to construct FCP_RSP IU
3614 3614 */
3615 3615 fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3616 3616 bzero(fcp_rsp_iu, 24);
3617 3617
3618 3618 /*
3619 3619 * FCP_RSP IU flags, byte10
3620 3620 */
3621 3621 fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3622 3622 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3623 3623 fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3624 3624 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3625 3625 fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3626 3626 }
3627 3627
3628 3628 /*
3629 3629 * SCSI status code, byte11
3630 3630 */
3631 3631 fcp_rsp_iu[11] = task->task_scsi_status;
3632 3632
3633 3633 /*
3634 3634 * FCP_RESID (Overrun or underrun)
3635 3635 */
3636 3636 fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3637 3637 fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3638 3638 fcp_rsp_iu[14] = (uint8_t)((task->task_resid >> 8) & 0xFF);
3639 3639 fcp_rsp_iu[15] = (uint8_t)((task->task_resid >> 0) & 0xFF);
3640 3640
3641 3641 /*
3642 3642 * FCP_SNS_LEN
3643 3643 */
3644 3644 fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3645 3645 0xFF);
3646 3646 fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3647 3647 0xFF);
3648 3648
3649 3649 /*
3650 3650 * FCP_RSP_LEN
3651 3651 */
3652 3652 /*
3653 3653 * no FCP_RSP_INFO
3654 3654 */
3655 3655 /*
3656 3656 * FCP_SNS_INFO
3657 3657 */
3658 3658 bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3659 3659 task->task_sense_length);
3660 3660
3661 3661 /*
3662 3662 * Ensure dma data consistency
3663 3663 */
3664 3664 qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3665 3665 } else {
3666 3666 flags = (uint16_t)(flags | BIT_6);
3667 3667
3668 3668 scsi_status = (uint16_t)task->task_scsi_status;
3669 3669 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3670 3670 scsi_status = (uint16_t)(scsi_status | BIT_10);
3671 3671 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3672 3672 scsi_status = (uint16_t)(scsi_status | BIT_11);
3673 3673 }
3674 3674 if (task->task_sense_length) {
3675 3675 scsi_status = (uint16_t)(scsi_status | BIT_9);
3676 3676 }
3677 3677 bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3678 3678 qcmd->dbuf_rsp_iu = NULL;
3679 3679 }
3680 3680
3681 3681 /*
3682 3682 * Fillout CTIO type 7 IOCB
3683 3683 */
3684 3684 mutex_enter(&qlt->req_lock);
3685 3685 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3686 3686 if (req == NULL) {
3687 3687 mutex_exit(&qlt->req_lock);
3688 3688 if (use_mode2) {
3689 3689 qlt_dmem_free(cmd->cmd_port->port_fds,
3690 3690 qcmd->dbuf_rsp_iu);
3691 3691 qcmd->dbuf_rsp_iu = NULL;
3692 3692 }
3693 3693 return (FCT_BUSY);
3694 3694 }
3695 3695
3696 3696 /*
3697 3697 * Common fields
3698 3698 */
3699 3699 bzero(req, IOCB_SIZE);
3700 3700 req[0x00] = 0x12;
3701 3701 req[0x01] = 0x1;
3702 3702 req[0x02] = BIT_7; /* to indicate if it's a pure status req */
3703 3703 QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3704 3704 QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3705 3705 QMEM_WR16(qlt, req + 0x0A, 0); /* not timed by FW */
3706 3706 if (use_mode2) {
3707 3707 QMEM_WR16(qlt, req+0x0C, 1); /* FCP RSP IU data field */
3708 3708 }
3709 3709 QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3710 3710 QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3711 3711
3712 3712 /*
3713 3713 * Mode-specific fields
3714 3714 */
3715 3715 if (!use_mode2) {
3716 3716 QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3717 3717 }
3718 3718 QMEM_WR16(qlt, req + 0x1A, flags);
3719 3719 QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3720 3720 QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3721 3721 if (use_mode2) {
3722 3722 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3723 3723 QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3724 3724 QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3725 3725 QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3726 3726 } else {
3727 3727 QMEM_WR16(qlt, req + 0x22, scsi_status);
3728 3728 psd = req+0x28;
3729 3729
3730 3730 /*
3731 3731 * Data in sense buf is always big-endian, data in IOCB
3732 3732 * should always be little-endian, so we must do swapping.
3733 3733 */
3734 3734 size = ((task->task_sense_length + 3) & (~3));
3735 3735 for (ndx = 0; ndx < size; ndx += 4) {
3736 3736 psd[ndx + 0] = sensbuf[ndx + 3];
3737 3737 psd[ndx + 1] = sensbuf[ndx + 2];
3738 3738 psd[ndx + 2] = sensbuf[ndx + 1];
3739 3739 psd[ndx + 3] = sensbuf[ndx + 0];
3740 3740 }
3741 3741 }
3742 3742
3743 3743 /*
3744 3744 * Trigger FW to send SCSI status out
3745 3745 */
3746 3746 qlt_submit_req_entries(qlt, 1);
3747 3747 mutex_exit(&qlt->req_lock);
3748 3748
3749 3749 return (STMF_SUCCESS);
3750 3750 }
3751 3751
3752 3752 fct_status_t
3753 3753 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3754 3754 {
3755 3755 qlt_cmd_t *qcmd;
3756 3756 fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3757 3757 uint8_t *req, *addr;
3758 3758 qlt_dmem_bctl_t *bctl;
3759 3759 uint32_t minsize;
3760 3760 uint8_t elsop, req1f;
3761 3761
3762 3762 addr = els->els_resp_payload;
3763 3763 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3764 3764
3765 3765 minsize = els->els_resp_size;
3766 3766 qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3767 3767 if (qcmd->dbuf == NULL)
3768 3768 return (FCT_BUSY);
3769 3769
3770 3770 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3771 3771
3772 3772 bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3773 3773 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3774 3774
3775 3775 if (addr[0] == 0x02) { /* ACC */
3776 3776 req1f = BIT_5;
3777 3777 } else {
3778 3778 req1f = BIT_6;
3779 3779 }
3780 3780 elsop = els->els_req_payload[0];
3781 3781 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3782 3782 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3783 3783 req1f = (uint8_t)(req1f | BIT_4);
3784 3784 }
3785 3785
3786 3786 mutex_enter(&qlt->req_lock);
3787 3787 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3788 3788 if (req == NULL) {
3789 3789 mutex_exit(&qlt->req_lock);
3790 3790 qlt_dmem_free(NULL, qcmd->dbuf);
3791 3791 qcmd->dbuf = NULL;
3792 3792 return (FCT_BUSY);
3793 3793 }
3794 3794 bzero(req, IOCB_SIZE);
3795 3795 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3796 3796 req[0x16] = elsop; req[0x1f] = req1f;
3797 3797 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3798 3798 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3799 3799 QMEM_WR16(qlt, (&req[0xC]), 1);
3800 3800 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3801 3801 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3802 3802 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3803 3803 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3804 3804 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3805 3805 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3806 3806 }
3807 3807 QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3808 3808 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3809 3809 QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3810 3810 qlt_submit_req_entries(qlt, 1);
3811 3811 mutex_exit(&qlt->req_lock);
3812 3812
3813 3813 return (FCT_SUCCESS);
3814 3814 }
3815 3815
3816 3816 fct_status_t
3817 3817 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3818 3818 {
3819 3819 qlt_abts_cmd_t *qcmd;
3820 3820 fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3821 3821 uint8_t *req;
3822 3822 uint32_t lportid;
3823 3823 uint32_t fctl;
3824 3824 int i;
3825 3825
3826 3826 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3827 3827
3828 3828 mutex_enter(&qlt->req_lock);
3829 3829 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3830 3830 if (req == NULL) {
3831 3831 mutex_exit(&qlt->req_lock);
3832 3832 return (FCT_BUSY);
3833 3833 }
3834 3834 bcopy(qcmd->buf, req, IOCB_SIZE);
3835 3835 lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3836 3836 fctl = QMEM_RD32(qlt, req+0x1C);
3837 3837 fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3838 3838 req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3839 3839 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3840 3840 if (cmd->cmd_rp)
3841 3841 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3842 3842 else
3843 3843 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3844 3844 if (terminate) {
3845 3845 QMEM_WR16(qlt, (&req[0xC]), 1);
3846 3846 }
3847 3847 QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3848 3848 req[0x17] = abts->abts_resp_rctl;
3849 3849 QMEM_WR32(qlt, req+0x18, lportid);
3850 3850 QMEM_WR32(qlt, req+0x1C, fctl);
3851 3851 req[0x23]++;
3852 3852 for (i = 0; i < 12; i += 4) {
3853 3853 /* Take care of firmware's LE requirement */
3854 3854 req[0x2C+i] = abts->abts_resp_payload[i+3];
3855 3855 req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3856 3856 req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3857 3857 req[0x2C+i+3] = abts->abts_resp_payload[i];
3858 3858 }
3859 3859 qlt_submit_req_entries(qlt, 1);
3860 3860 mutex_exit(&qlt->req_lock);
3861 3861
3862 3862 return (FCT_SUCCESS);
3863 3863 }
3864 3864
3865 3865 static void
3866 3866 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3867 3867 {
3868 3868 int i;
3869 3869 uint32_t d;
3870 3870 caddr_t req;
3871 3871 /* Just put it on the request queue */
3872 3872 mutex_enter(&qlt->req_lock);
3873 3873 req = qlt_get_req_entries(qlt, 1);
3874 3874 if (req == NULL) {
3875 3875 mutex_exit(&qlt->req_lock);
3876 3876 /* XXX handle this */
3877 3877 return;
3878 3878 }
3879 3879 for (i = 0; i < 16; i++) {
3880 3880 d = QMEM_RD32(qlt, inot);
3881 3881 inot += 4;
3882 3882 QMEM_WR32(qlt, req, d);
3883 3883 req += 4;
3884 3884 }
3885 3885 req -= 64;
3886 3886 req[0] = 0x0e;
3887 3887 qlt_submit_req_entries(qlt, 1);
3888 3888 mutex_exit(&qlt->req_lock);
3889 3889 }
3890 3890
3891 3891 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3892 3892 static void
3893 3893 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3894 3894 {
3895 3895 fct_cmd_t *cmd;
3896 3896 scsi_task_t *task;
3897 3897 qlt_cmd_t *qcmd;
3898 3898 uint32_t rportid, fw_xchg_addr;
3899 3899 uint8_t *p, *q, *req, tm;
3900 3900 uint16_t cdb_size, flags, oxid;
3901 3901 char info[QLT_INFO_LEN];
3902 3902
3903 3903 /*
3904 3904 * If either bidirection xfer is requested of there is extended
3905 3905 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3906 3906 */
3907 3907 cdb_size = 16;
3908 3908 if (atio[0x20 + 11] >= 3) {
3909 3909 uint8_t b = atio[0x20 + 11];
3910 3910 uint16_t b1;
3911 3911 if ((b & 3) == 3) {
3912 3912 EL(qlt, "bidirectional I/O not supported\n");
3913 3913 cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3914 3914 "received, dropping the cmd as bidirectional "
3915 3915 " transfers are not yet supported", qlt->instance);
3916 3916 /* XXX abort the I/O */
3917 3917 return;
3918 3918 }
3919 3919 cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3920 3920 /*
3921 3921 * Verify that we have enough entries. Without additional CDB
3922 3922 * Everything will fit nicely within the same 64 bytes. So the
3923 3923 * additional cdb size is essentially the # of additional bytes
3924 3924 * we need.
3925 3925 */
3926 3926 b1 = (uint16_t)b;
3927 3927 if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3928 3928 EL(qlt, "extended cdb received\n");
3929 3929 cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3930 3930 " cdb (cdb size = %d bytes), however the firmware "
3931 3931 " did not DMAed the entire FCP_CMD IU, entry count "
3932 3932 " is %d while it should be %d", qlt->instance,
3933 3933 cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3934 3934 /* XXX abort the I/O */
3935 3935 return;
3936 3936 }
3937 3937 }
3938 3938
3939 3939 rportid = (((uint32_t)atio[8 + 5]) << 16) |
3940 3940 (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3941 3941 fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3942 3942 oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3943 3943
3944 3944 if (fw_xchg_addr == 0xFFFFFFFF) {
3945 3945 EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3946 3946 cmd = NULL;
3947 3947 } else {
3948 3948 cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3949 3949 rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3950 3950 if (cmd == NULL) {
3951 3951 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3952 3952 }
3953 3953 }
3954 3954 if (cmd == NULL) {
3955 3955 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3956 3956 /* Abort this IO */
3957 3957 flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3958 3958
3959 3959 mutex_enter(&qlt->req_lock);
3960 3960 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3961 3961 if (req == NULL) {
3962 3962 mutex_exit(&qlt->req_lock);
3963 3963
3964 3964 (void) snprintf(info, sizeof (info),
3965 3965 "qlt_handle_atio: qlt-%p, can't "
3966 3966 "allocate space for scsi_task", (void *)qlt);
3967 3967 (void) fct_port_shutdown(qlt->qlt_port,
3968 3968 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3969 3969 return;
3970 3970 }
3971 3971 bzero(req, IOCB_SIZE);
3972 3972 req[0] = 0x12; req[1] = 0x1;
3973 3973 QMEM_WR32(qlt, req+4, 0);
3974 3974 QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3975 3975 rportid));
3976 3976 QMEM_WR16(qlt, req+10, 60);
3977 3977 QMEM_WR32(qlt, req+0x10, rportid);
3978 3978 QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3979 3979 QMEM_WR16(qlt, req+0x1A, flags);
3980 3980 QMEM_WR16(qlt, req+0x20, oxid);
3981 3981 qlt_submit_req_entries(qlt, 1);
3982 3982 mutex_exit(&qlt->req_lock);
3983 3983
3984 3984 return;
3985 3985 }
3986 3986
3987 3987 task = (scsi_task_t *)cmd->cmd_specific;
3988 3988 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3989 3989 qcmd->fw_xchg_addr = fw_xchg_addr;
3990 3990 qcmd->param.atio_byte3 = atio[3];
3991 3991 cmd->cmd_oxid = oxid;
3992 3992 cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3993 3993 atio[8+19]);
3994 3994 cmd->cmd_rportid = rportid;
3995 3995 cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3996 3996 (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3997 3997 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3998 3998 /* Dont do a 64 byte read as this is IOMMU */
3999 3999 q = atio+0x28;
4000 4000 /* XXX Handle fcp_cntl */
4001 4001 task->task_cmd_seq_no = (uint32_t)(*q++);
4002 4002 task->task_csn_size = 8;
4003 4003 task->task_flags = qlt_task_flags[(*q++) & 7];
4004 4004 tm = *q++;
4005 4005 if (tm) {
4006 4006 if (tm & BIT_1)
4007 4007 task->task_mgmt_function = TM_ABORT_TASK_SET;
4008 4008 else if (tm & BIT_2)
4009 4009 task->task_mgmt_function = TM_CLEAR_TASK_SET;
4010 4010 else if (tm & BIT_4)
4011 4011 task->task_mgmt_function = TM_LUN_RESET;
4012 4012 else if (tm & BIT_5)
4013 4013 task->task_mgmt_function = TM_TARGET_COLD_RESET;
4014 4014 else if (tm & BIT_6)
4015 4015 task->task_mgmt_function = TM_CLEAR_ACA;
4016 4016 else
4017 4017 task->task_mgmt_function = TM_ABORT_TASK;
4018 4018 }
4019 4019 task->task_max_nbufs = STMF_BUFS_MAX;
4020 4020 task->task_csn_size = 8;
4021 4021 task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
4022 4022 p = task->task_cdb;
4023 4023 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4024 4024 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4025 4025 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4026 4026 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4027 4027 if (cdb_size > 16) {
4028 4028 uint16_t xtra = (uint16_t)(cdb_size - 16);
4029 4029 uint16_t i;
4030 4030 uint8_t cb[4];
4031 4031
4032 4032 while (xtra) {
4033 4033 *p++ = *q++;
4034 4034 xtra--;
4035 4035 if (q == ((uint8_t *)qlt->queue_mem_ptr +
4036 4036 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4037 4037 q = (uint8_t *)qlt->queue_mem_ptr +
4038 4038 ATIO_QUEUE_OFFSET;
4039 4039 }
4040 4040 }
4041 4041 for (i = 0; i < 4; i++) {
4042 4042 cb[i] = *q++;
4043 4043 if (q == ((uint8_t *)qlt->queue_mem_ptr +
4044 4044 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4045 4045 q = (uint8_t *)qlt->queue_mem_ptr +
4046 4046 ATIO_QUEUE_OFFSET;
4047 4047 }
4048 4048 }
4049 4049 task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
4050 4050 (((uint32_t)cb[1]) << 16) |
4051 4051 (((uint32_t)cb[2]) << 8) | cb[3];
4052 4052 } else {
4053 4053 task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
4054 4054 (((uint32_t)q[1]) << 16) |
4055 4055 (((uint32_t)q[2]) << 8) | q[3];
4056 4056 }
4057 4057 fct_post_rcvd_cmd(cmd, 0);
4058 4058 }
4059 4059
4060 4060 static void
4061 4061 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
4062 4062 {
4063 4063 uint16_t status;
4064 4064 uint32_t portid;
4065 4065 uint32_t subcode1, subcode2;
4066 4066
4067 4067 status = QMEM_RD16(qlt, rsp+8);
4068 4068 portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
4069 4069 subcode1 = QMEM_RD32(qlt, rsp+0x14);
4070 4070 subcode2 = QMEM_RD32(qlt, rsp+0x18);
4071 4071
4072 4072 mutex_enter(&qlt->preq_lock);
4073 4073 if (portid != qlt->rp_id_in_dereg) {
4074 4074 int instance = ddi_get_instance(qlt->dip);
4075 4075
4076 4076 EL(qlt, "implicit logout reveived portid = %xh\n", portid);
4077 4077 cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
4078 4078 " received when driver wasn't waiting for it",
4079 4079 instance, portid);
4080 4080 mutex_exit(&qlt->preq_lock);
4081 4081 return;
4082 4082 }
4083 4083
4084 4084 if (status != 0) {
4085 4085 EL(qlt, "implicit logout completed for %xh with status %xh, "
4086 4086 "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
4087 4087 subcode2);
4088 4088 if (status == 0x31 && subcode1 == 0x0a) {
4089 4089 qlt->rp_dereg_status = FCT_SUCCESS;
4090 4090 } else {
4091 4091 EL(qlt, "implicit logout portid=%xh, status=%xh, "
4092 4092 "subcode1=%xh, subcode2=%xh\n", portid, status,
4093 4093 subcode1, subcode2);
4094 4094 qlt->rp_dereg_status =
4095 4095 QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
4096 4096 }
4097 4097 } else {
4098 4098 qlt->rp_dereg_status = FCT_SUCCESS;
4099 4099 }
4100 4100 cv_signal(&qlt->rp_dereg_cv);
4101 4101 mutex_exit(&qlt->preq_lock);
4102 4102 }
4103 4103
4104 4104 /*
4105 4105 * Note that when an ELS is aborted, the regular or aborted completion
4106 4106 * (if any) gets posted before the abort IOCB comes back on response queue.
4107 4107 */
4108 4108 static void
4109 4109 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4110 4110 {
4111 4111 char info[QLT_INFO_LEN];
4112 4112 fct_cmd_t *cmd;
4113 4113 qlt_cmd_t *qcmd;
4114 4114 uint32_t hndl;
4115 4115 uint32_t subcode1, subcode2;
4116 4116 uint16_t status;
4117 4117
4118 4118 hndl = QMEM_RD32(qlt, rsp+4);
4119 4119 status = QMEM_RD16(qlt, rsp+8);
4120 4120 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4121 4121 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4122 4122
4123 4123 if (!CMD_HANDLE_VALID(hndl)) {
4124 4124 EL(qlt, "handle = %xh\n", hndl);
4125 4125 /*
4126 4126 * This cannot happen for unsol els completion. This can
4127 4127 * only happen when abort for an unsol els completes.
4128 4128 * This condition indicates a firmware bug.
4129 4129 */
4130 4130 (void) snprintf(info, sizeof (info),
4131 4131 "qlt_handle_unsol_els_completion: "
4132 4132 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4133 4133 hndl, status, subcode1, subcode2, (void *)rsp);
4134 4134 (void) fct_port_shutdown(qlt->qlt_port,
4135 4135 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4136 4136 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4137 4137 return;
4138 4138 }
4139 4139
4140 4140 if (status == 5) {
4141 4141 /*
4142 4142 * When an unsolicited els is aborted, the abort is done
4143 4143 * by a ELSPT iocb with abort control. This is the aborted IOCB
4144 4144 * and not the abortee. We will do the cleanup when the
4145 4145 * IOCB which caused the abort, returns.
4146 4146 */
4147 4147 EL(qlt, "status = %xh\n", status);
4148 4148 stmf_trace(0, "--UNSOL ELS returned with status 5 --");
4149 4149 return;
4150 4150 }
4151 4151
4152 4152 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4153 4153 if (cmd == NULL) {
4154 4154 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4155 4155 /*
4156 4156 * Now why would this happen ???
4157 4157 */
4158 4158 (void) snprintf(info, sizeof (info),
4159 4159 "qlt_handle_unsol_els_completion: can not "
4160 4160 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4161 4161 (void *)rsp);
4162 4162 (void) fct_port_shutdown(qlt->qlt_port,
4163 4163 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4164 4164
4165 4165 return;
4166 4166 }
4167 4167
4168 4168 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4169 4169 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4170 4170 if (qcmd->flags & QLT_CMD_ABORTING) {
4171 4171 /*
4172 4172 * This is the same case as "if (status == 5)" above. The
4173 4173 * only difference is that in this case the firmware actually
4174 4174 * finished sending the response. So the abort attempt will
4175 4175 * come back with status ?. We will handle it there.
4176 4176 */
4177 4177 stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4178 4178 "abort it");
4179 4179 return;
4180 4180 }
4181 4181
4182 4182 if (qcmd->dbuf != NULL) {
4183 4183 qlt_dmem_free(NULL, qcmd->dbuf);
4184 4184 qcmd->dbuf = NULL;
4185 4185 }
4186 4186
4187 4187 if (status == 0) {
4188 4188 fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4189 4189 } else {
4190 4190 fct_send_response_done(cmd,
4191 4191 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4192 4192 }
4193 4193 }
4194 4194
4195 4195 static void
4196 4196 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4197 4197 {
4198 4198 char info[QLT_INFO_LEN];
4199 4199 fct_cmd_t *cmd;
4200 4200 qlt_cmd_t *qcmd;
4201 4201 uint32_t hndl;
4202 4202 uint32_t subcode1, subcode2;
4203 4203 uint16_t status;
4204 4204
4205 4205 hndl = QMEM_RD32(qlt, rsp+4);
4206 4206 status = QMEM_RD16(qlt, rsp+8);
4207 4207 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4208 4208 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4209 4209
4210 4210 if (!CMD_HANDLE_VALID(hndl)) {
4211 4211 EL(qlt, "handle = %xh\n", hndl);
4212 4212 ASSERT(hndl == 0);
4213 4213 /*
4214 4214 * Someone has requested to abort it, but no one is waiting for
4215 4215 * this completion.
4216 4216 */
4217 4217 if ((status != 0) && (status != 8)) {
4218 4218 EL(qlt, "status = %xh\n", status);
4219 4219 /*
4220 4220 * There could be exchange resource leakage, so
4221 4221 * throw HBA fatal error event now
4222 4222 */
4223 4223 (void) snprintf(info, sizeof (info),
4224 4224 "qlt_handle_unsol_els_abort_completion: "
4225 4225 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4226 4226 hndl, status, subcode1, subcode2, (void *)rsp);
4227 4227 (void) fct_port_shutdown(qlt->qlt_port,
4228 4228 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4229 4229 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4230 4230 return;
4231 4231 }
4232 4232
4233 4233 return;
4234 4234 }
4235 4235
4236 4236 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4237 4237 if (cmd == NULL) {
4238 4238 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4239 4239 /*
4240 4240 * Why would this happen ??
4241 4241 */
4242 4242 (void) snprintf(info, sizeof (info),
4243 4243 "qlt_handle_unsol_els_abort_completion: can not get "
4244 4244 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4245 4245 (void *)rsp);
4246 4246 (void) fct_port_shutdown(qlt->qlt_port,
4247 4247 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4248 4248
4249 4249 return;
4250 4250 }
4251 4251
4252 4252 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4253 4253 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4254 4254 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4255 4255
4256 4256 if (qcmd->dbuf != NULL) {
4257 4257 qlt_dmem_free(NULL, qcmd->dbuf);
4258 4258 qcmd->dbuf = NULL;
4259 4259 }
4260 4260
4261 4261 if (status == 0) {
4262 4262 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4263 4263 } else if (status == 8) {
4264 4264 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4265 4265 } else {
4266 4266 fct_cmd_fca_aborted(cmd,
4267 4267 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4268 4268 }
4269 4269 }
4270 4270
4271 4271 static void
4272 4272 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4273 4273 {
4274 4274 char info[QLT_INFO_LEN];
4275 4275 fct_cmd_t *cmd;
4276 4276 fct_els_t *els;
4277 4277 qlt_cmd_t *qcmd;
4278 4278 uint32_t hndl;
4279 4279 uint32_t subcode1, subcode2;
4280 4280 uint16_t status;
4281 4281
4282 4282 hndl = QMEM_RD32(qlt, rsp+4);
4283 4283 status = QMEM_RD16(qlt, rsp+8);
4284 4284 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4285 4285 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4286 4286
4287 4287 if (!CMD_HANDLE_VALID(hndl)) {
4288 4288 EL(qlt, "handle = %xh\n", hndl);
4289 4289 /*
4290 4290 * This cannot happen for sol els completion.
4291 4291 */
4292 4292 (void) snprintf(info, sizeof (info),
4293 4293 "qlt_handle_sol_els_completion: "
4294 4294 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4295 4295 hndl, status, subcode1, subcode2, (void *)rsp);
4296 4296 (void) fct_port_shutdown(qlt->qlt_port,
4297 4297 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4298 4298 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4299 4299 return;
4300 4300 }
4301 4301
4302 4302 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4303 4303 if (cmd == NULL) {
4304 4304 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4305 4305 (void) snprintf(info, sizeof (info),
4306 4306 "qlt_handle_sol_els_completion: can not "
4307 4307 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4308 4308 (void *)rsp);
4309 4309 (void) fct_port_shutdown(qlt->qlt_port,
4310 4310 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4311 4311
4312 4312 return;
4313 4313 }
4314 4314
4315 4315 ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4316 4316 els = (fct_els_t *)cmd->cmd_specific;
4317 4317 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4318 4318 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4319 4319
4320 4320 if (qcmd->flags & QLT_CMD_ABORTING) {
4321 4321 /*
4322 4322 * We will handle it when the ABORT IO IOCB returns.
4323 4323 */
4324 4324 return;
4325 4325 }
4326 4326
4327 4327 if (qcmd->dbuf != NULL) {
4328 4328 if (status == 0) {
4329 4329 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4330 4330 bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4331 4331 qcmd->param.resp_offset,
4332 4332 els->els_resp_payload, els->els_resp_size);
4333 4333 }
4334 4334 qlt_dmem_free(NULL, qcmd->dbuf);
4335 4335 qcmd->dbuf = NULL;
4336 4336 }
4337 4337
4338 4338 if (status == 0) {
4339 4339 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4340 4340 } else {
4341 4341 fct_send_cmd_done(cmd,
4342 4342 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4343 4343 }
4344 4344 }
4345 4345
4346 4346 static void
4347 4347 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4348 4348 {
4349 4349 fct_cmd_t *cmd;
4350 4350 fct_sol_ct_t *ct;
4351 4351 qlt_cmd_t *qcmd;
4352 4352 uint32_t hndl;
4353 4353 uint16_t status;
4354 4354 char info[QLT_INFO_LEN];
4355 4355
4356 4356 hndl = QMEM_RD32(qlt, rsp+4);
4357 4357 status = QMEM_RD16(qlt, rsp+8);
4358 4358
4359 4359 if (!CMD_HANDLE_VALID(hndl)) {
4360 4360 EL(qlt, "handle = %xh\n", hndl);
4361 4361 /*
4362 4362 * Solicited commands will always have a valid handle.
4363 4363 */
4364 4364 (void) snprintf(info, sizeof (info),
4365 4365 "qlt_handle_ct_completion: "
4366 4366 "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4367 4367 (void) fct_port_shutdown(qlt->qlt_port,
4368 4368 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4369 4369 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4370 4370 return;
4371 4371 }
4372 4372
4373 4373 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4374 4374 if (cmd == NULL) {
4375 4375 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4376 4376 (void) snprintf(info, sizeof (info),
4377 4377 "qlt_handle_ct_completion: cannot find "
4378 4378 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4379 4379 (void *)rsp);
4380 4380 (void) fct_port_shutdown(qlt->qlt_port,
4381 4381 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4382 4382
4383 4383 return;
4384 4384 }
4385 4385
4386 4386 ct = (fct_sol_ct_t *)cmd->cmd_specific;
4387 4387 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4388 4388 ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4389 4389
4390 4390 if (qcmd->flags & QLT_CMD_ABORTING) {
4391 4391 /*
4392 4392 * We will handle it when ABORT IO IOCB returns;
4393 4393 */
4394 4394 return;
4395 4395 }
4396 4396
4397 4397 ASSERT(qcmd->dbuf);
4398 4398 if (status == 0) {
4399 4399 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4400 4400 bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4401 4401 qcmd->param.resp_offset,
4402 4402 ct->ct_resp_payload, ct->ct_resp_size);
4403 4403 }
4404 4404 qlt_dmem_free(NULL, qcmd->dbuf);
4405 4405 qcmd->dbuf = NULL;
4406 4406
4407 4407 if (status == 0) {
4408 4408 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4409 4409 } else {
4410 4410 fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4411 4411 }
4412 4412 }
4413 4413
4414 4414 static void
4415 4415 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4416 4416 {
4417 4417 fct_cmd_t *cmd;
4418 4418 scsi_task_t *task;
4419 4419 qlt_cmd_t *qcmd;
4420 4420 stmf_data_buf_t *dbuf;
4421 4421 fct_status_t fc_st;
4422 4422 uint32_t iof = 0;
4423 4423 uint32_t hndl;
4424 4424 uint16_t status;
4425 4425 uint16_t flags;
4426 4426 uint8_t abort_req;
4427 4427 uint8_t n;
4428 4428 char info[QLT_INFO_LEN];
4429 4429
4430 4430 /* XXX: Check validity of the IOCB by checking 4th byte. */
4431 4431 hndl = QMEM_RD32(qlt, rsp+4);
4432 4432 status = QMEM_RD16(qlt, rsp+8);
4433 4433 flags = QMEM_RD16(qlt, rsp+0x1a);
4434 4434 n = rsp[2];
4435 4435
4436 4436 if (!CMD_HANDLE_VALID(hndl)) {
4437 4437 EL(qlt, "handle = %xh\n", hndl);
4438 4438 ASSERT(hndl == 0);
4439 4439 /*
4440 4440 * Someone has requested to abort it, but no one is waiting for
4441 4441 * this completion.
4442 4442 */
4443 4443 EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4444 4444 (void *)rsp);
4445 4445 if ((status != 1) && (status != 2)) {
4446 4446 EL(qlt, "status = %xh\n", status);
4447 4447 /*
4448 4448 * There could be exchange resource leakage, so
4449 4449 * throw HBA fatal error event now
4450 4450 */
4451 4451 (void) snprintf(info, sizeof (info),
4452 4452 "qlt_handle_ctio_completion: hndl-"
4453 4453 "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4454 4454 (void) fct_port_shutdown(qlt->qlt_port,
4455 4455 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4456 4456
4457 4457 }
4458 4458
4459 4459 return;
4460 4460 }
4461 4461
4462 4462 if (flags & BIT_14) {
4463 4463 abort_req = 1;
4464 4464 EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4465 4465 (void *)rsp);
4466 4466 } else {
4467 4467 abort_req = 0;
4468 4468 }
4469 4469
4470 4470 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4471 4471 if (cmd == NULL) {
4472 4472 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4473 4473 (void) snprintf(info, sizeof (info),
4474 4474 "qlt_handle_ctio_completion: cannot find "
4475 4475 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4476 4476 (void *)rsp);
4477 4477 (void) fct_port_shutdown(qlt->qlt_port,
4478 4478 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4479 4479
4480 4480 return;
4481 4481 }
4482 4482
4483 4483 task = (scsi_task_t *)cmd->cmd_specific;
4484 4484 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4485 4485 if (qcmd->dbuf_rsp_iu) {
4486 4486 ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4487 4487 qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4488 4488 qcmd->dbuf_rsp_iu = NULL;
4489 4489 }
4490 4490
4491 4491 if ((status == 1) || (status == 2)) {
4492 4492 if (abort_req) {
4493 4493 fc_st = FCT_ABORT_SUCCESS;
4494 4494 iof = FCT_IOF_FCA_DONE;
4495 4495 } else {
4496 4496 fc_st = FCT_SUCCESS;
4497 4497 if (flags & BIT_15) {
4498 4498 iof = FCT_IOF_FCA_DONE;
4499 4499 }
4500 4500 }
4501 4501 } else {
4502 4502 EL(qlt, "status = %xh\n", status);
4503 4503 if ((status == 8) && abort_req) {
4504 4504 fc_st = FCT_NOT_FOUND;
4505 4505 iof = FCT_IOF_FCA_DONE;
4506 4506 } else {
4507 4507 fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4508 4508 }
4509 4509 }
4510 4510 dbuf = NULL;
4511 4511 if (((n & BIT_7) == 0) && (!abort_req)) {
4512 4512 /* A completion of data xfer */
4513 4513 if (n == 0) {
4514 4514 dbuf = qcmd->dbuf;
4515 4515 } else {
4516 4516 dbuf = stmf_handle_to_buf(task, n);
4517 4517 }
4518 4518
4519 4519 ASSERT(dbuf != NULL);
4520 4520 if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4521 4521 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4522 4522 if (flags & BIT_15) {
4523 4523 dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4524 4524 DB_STATUS_GOOD_SENT);
4525 4525 }
4526 4526
4527 4527 dbuf->db_xfer_status = fc_st;
4528 4528 fct_scsi_data_xfer_done(cmd, dbuf, iof);
4529 4529 return;
4530 4530 }
4531 4531 if (!abort_req) {
4532 4532 /*
4533 4533 * This was just a pure status xfer.
4534 4534 */
4535 4535 fct_send_response_done(cmd, fc_st, iof);
4536 4536 return;
4537 4537 }
4538 4538
4539 4539 fct_cmd_fca_aborted(cmd, fc_st, iof);
4540 4540 }
4541 4541
4542 4542 static void
4543 4543 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4544 4544 {
4545 4545 char info[QLT_INFO_LEN];
4546 4546 fct_cmd_t *cmd;
4547 4547 qlt_cmd_t *qcmd;
4548 4548 uint32_t h;
4549 4549 uint16_t status;
4550 4550
4551 4551 h = QMEM_RD32(qlt, rsp+4);
4552 4552 status = QMEM_RD16(qlt, rsp+8);
4553 4553
4554 4554 if (!CMD_HANDLE_VALID(h)) {
4555 4555 EL(qlt, "handle = %xh\n", h);
4556 4556 /*
4557 4557 * Solicited commands always have a valid handle.
4558 4558 */
4559 4559 (void) snprintf(info, sizeof (info),
4560 4560 "qlt_handle_sol_abort_completion: hndl-"
4561 4561 "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4562 4562 (void) fct_port_shutdown(qlt->qlt_port,
4563 4563 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4564 4564 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4565 4565 return;
4566 4566 }
4567 4567 cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4568 4568 if (cmd == NULL) {
4569 4569 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4570 4570 /*
4571 4571 * What happened to the cmd ??
4572 4572 */
4573 4573 (void) snprintf(info, sizeof (info),
4574 4574 "qlt_handle_sol_abort_completion: cannot "
4575 4575 "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4576 4576 (void *)rsp);
4577 4577 (void) fct_port_shutdown(qlt->qlt_port,
4578 4578 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4579 4579
4580 4580 return;
4581 4581 }
4582 4582
4583 4583 ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4584 4584 (cmd->cmd_type == FCT_CMD_SOL_CT));
4585 4585 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4586 4586 if (qcmd->dbuf != NULL) {
4587 4587 qlt_dmem_free(NULL, qcmd->dbuf);
4588 4588 qcmd->dbuf = NULL;
4589 4589 }
4590 4590 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4591 4591 if (status == 0) {
4592 4592 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4593 4593 } else if (status == 0x31) {
4594 4594 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4595 4595 } else {
4596 4596 fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4597 4597 }
4598 4598 }
4599 4599
4600 4600 static void
4601 4601 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4602 4602 {
4603 4603 qlt_abts_cmd_t *qcmd;
4604 4604 fct_cmd_t *cmd;
4605 4605 uint32_t remote_portid;
4606 4606 char info[QLT_INFO_LEN];
4607 4607
4608 4608 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4609 4609 ((uint32_t)(resp[0x1A])) << 16;
4610 4610 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4611 4611 sizeof (qlt_abts_cmd_t), 0);
4612 4612 if (cmd == NULL) {
4613 4613 EL(qlt, "fct_alloc cmd==NULL\n");
4614 4614 (void) snprintf(info, sizeof (info),
4615 4615 "qlt_handle_rcvd_abts: qlt-%p, can't "
4616 4616 "allocate space for fct_cmd", (void *)qlt);
4617 4617 (void) fct_port_shutdown(qlt->qlt_port,
4618 4618 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4619 4619 return;
4620 4620 }
4621 4621
4622 4622 resp[0xC] = resp[0xD] = resp[0xE] = 0;
4623 4623 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4624 4624 bcopy(resp, qcmd->buf, IOCB_SIZE);
4625 4625 cmd->cmd_port = qlt->qlt_port;
4626 4626 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4627 4627 if (cmd->cmd_rp_handle == 0xFFFF)
4628 4628 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4629 4629
4630 4630 cmd->cmd_rportid = remote_portid;
4631 4631 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4632 4632 ((uint32_t)(resp[0x16])) << 16;
4633 4633 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4634 4634 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4635 4635 fct_post_rcvd_cmd(cmd, 0);
4636 4636 }
4637 4637
4638 4638 static void
4639 4639 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4640 4640 {
4641 4641 uint16_t status;
4642 4642 char info[QLT_INFO_LEN];
4643 4643
4644 4644 status = QMEM_RD16(qlt, resp+8);
4645 4645
4646 4646 if ((status == 0) || (status == 5)) {
4647 4647 return;
4648 4648 }
4649 4649 EL(qlt, "status = %xh\n", status);
4650 4650 (void) snprintf(info, sizeof (info),
4651 4651 "ABTS completion failed %x/%x/%x resp_off %x",
4652 4652 status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4653 4653 ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4654 4654 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4655 4655 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4656 4656 }
4657 4657
4658 4658 #ifdef DEBUG
4659 4659 uint32_t qlt_drop_abort_counter = 0;
4660 4660 #endif
4661 4661
4662 4662 fct_status_t
4663 4663 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4664 4664 {
4665 4665 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4666 4666
4667 4667 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4668 4668 (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4669 4669 return (FCT_NOT_FOUND);
4670 4670 }
4671 4671
4672 4672 #ifdef DEBUG
4673 4673 if (qlt_drop_abort_counter > 0) {
4674 4674 if (atomic_dec_32_nv(&qlt_drop_abort_counter) == 1)
4675 4675 return (FCT_SUCCESS);
4676 4676 }
4677 4677 #endif
4678 4678
4679 4679 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4680 4680 return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4681 4681 }
4682 4682
4683 4683 if (flags & FCT_IOF_FORCE_FCA_DONE) {
4684 4684 cmd->cmd_handle = 0;
4685 4685 }
4686 4686
4687 4687 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4688 4688 return (qlt_send_abts_response(qlt, cmd, 1));
4689 4689 }
4690 4690
4691 4691 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4692 4692 return (qlt_abort_purex(qlt, cmd));
4693 4693 }
4694 4694
4695 4695 if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4696 4696 (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4697 4697 return (qlt_abort_sol_cmd(qlt, cmd));
4698 4698 }
4699 4699 EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4700 4700
4701 4701 ASSERT(0);
4702 4702 return (FCT_FAILURE);
4703 4703 }
4704 4704
4705 4705 fct_status_t
4706 4706 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4707 4707 {
4708 4708 uint8_t *req;
4709 4709 qlt_cmd_t *qcmd;
4710 4710
4711 4711 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4712 4712 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4713 4713 EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4714 4714
4715 4715 mutex_enter(&qlt->req_lock);
4716 4716 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4717 4717 if (req == NULL) {
4718 4718 mutex_exit(&qlt->req_lock);
4719 4719
4720 4720 return (FCT_BUSY);
4721 4721 }
4722 4722 bzero(req, IOCB_SIZE);
4723 4723 req[0] = 0x33; req[1] = 1;
4724 4724 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4725 4725 if (cmd->cmd_rp) {
4726 4726 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4727 4727 } else {
4728 4728 QMEM_WR16(qlt, req+8, 0xFFFF);
4729 4729 }
4730 4730
4731 4731 QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4732 4732 QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4733 4733 qlt_submit_req_entries(qlt, 1);
4734 4734 mutex_exit(&qlt->req_lock);
4735 4735
4736 4736 return (FCT_SUCCESS);
4737 4737 }
4738 4738
4739 4739 fct_status_t
4740 4740 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4741 4741 {
4742 4742 uint8_t *req;
4743 4743 qlt_cmd_t *qcmd;
4744 4744 fct_els_t *els;
4745 4745 uint8_t elsop, req1f;
4746 4746
4747 4747 els = (fct_els_t *)cmd->cmd_specific;
4748 4748 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4749 4749 elsop = els->els_req_payload[0];
4750 4750 EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4751 4751 elsop);
4752 4752 req1f = 0x60; /* Terminate xchg */
4753 4753 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4754 4754 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4755 4755 req1f = (uint8_t)(req1f | BIT_4);
4756 4756 }
4757 4757
4758 4758 mutex_enter(&qlt->req_lock);
4759 4759 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4760 4760 if (req == NULL) {
4761 4761 mutex_exit(&qlt->req_lock);
4762 4762
4763 4763 return (FCT_BUSY);
4764 4764 }
4765 4765
4766 4766 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4767 4767 bzero(req, IOCB_SIZE);
4768 4768 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4769 4769 req[0x16] = elsop; req[0x1f] = req1f;
4770 4770 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4771 4771 if (cmd->cmd_rp) {
4772 4772 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4773 4773 EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4774 4774 } else {
4775 4775 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4776 4776 EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4777 4777 }
4778 4778
4779 4779 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4780 4780 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4781 4781 qlt_submit_req_entries(qlt, 1);
4782 4782 mutex_exit(&qlt->req_lock);
4783 4783
4784 4784 return (FCT_SUCCESS);
4785 4785 }
4786 4786
4787 4787 fct_status_t
4788 4788 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4789 4789 {
4790 4790 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4791 4791 uint8_t *req;
4792 4792 uint16_t flags;
4793 4793
4794 4794 flags = (uint16_t)(BIT_14 |
4795 4795 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4796 4796 EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4797 4797
4798 4798 mutex_enter(&qlt->req_lock);
4799 4799 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4800 4800 if (req == NULL) {
4801 4801 mutex_exit(&qlt->req_lock);
4802 4802
4803 4803 return (FCT_BUSY);
4804 4804 }
4805 4805
4806 4806 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4807 4807 bzero(req, IOCB_SIZE);
4808 4808 req[0] = 0x12; req[1] = 0x1;
4809 4809 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4810 4810 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4811 4811 QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */
4812 4812 QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4813 4813 QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4814 4814 QMEM_WR16(qlt, req+0x1A, flags);
4815 4815 QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4816 4816 qlt_submit_req_entries(qlt, 1);
4817 4817 mutex_exit(&qlt->req_lock);
4818 4818
4819 4819 return (FCT_SUCCESS);
4820 4820 }
4821 4821
4822 4822 fct_status_t
4823 4823 qlt_send_cmd(fct_cmd_t *cmd)
4824 4824 {
4825 4825 qlt_state_t *qlt;
4826 4826
4827 4827 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4828 4828 if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4829 4829 return (qlt_send_els(qlt, cmd));
4830 4830 } else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4831 4831 return (qlt_send_ct(qlt, cmd));
4832 4832 }
4833 4833 EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4834 4834
4835 4835 ASSERT(0);
4836 4836 return (FCT_FAILURE);
4837 4837 }
4838 4838
4839 4839 fct_status_t
4840 4840 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4841 4841 {
4842 4842 uint8_t *req;
4843 4843 fct_els_t *els;
4844 4844 qlt_cmd_t *qcmd;
4845 4845 stmf_data_buf_t *buf;
4846 4846 qlt_dmem_bctl_t *bctl;
4847 4847 uint32_t sz, minsz;
4848 4848
4849 4849 els = (fct_els_t *)cmd->cmd_specific;
4850 4850 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4851 4851 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4852 4852 qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4853 4853 sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4854 4854 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4855 4855 if (buf == NULL) {
4856 4856 return (FCT_BUSY);
4857 4857 }
4858 4858 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4859 4859
4860 4860 qcmd->dbuf = buf;
4861 4861 bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4862 4862 els->els_req_size);
4863 4863 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4864 4864
4865 4865 mutex_enter(&qlt->req_lock);
4866 4866 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4867 4867 if (req == NULL) {
4868 4868 qlt_dmem_free(NULL, buf);
4869 4869 mutex_exit(&qlt->req_lock);
4870 4870 return (FCT_BUSY);
4871 4871 }
4872 4872 bzero(req, IOCB_SIZE);
4873 4873 req[0] = 0x53; req[1] = 1;
4874 4874 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4875 4875 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4876 4876 QMEM_WR16(qlt, (&req[0xC]), 1);
4877 4877 QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4878 4878 QMEM_WR16(qlt, (&req[0x14]), 1);
4879 4879 req[0x16] = els->els_req_payload[0];
4880 4880 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4881 4881 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4882 4882 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4883 4883 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4884 4884 }
4885 4885 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4886 4886 QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4887 4887 QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4888 4888 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4889 4889 QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4890 4890 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4891 4891 qcmd->param.resp_offset));
4892 4892 QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4893 4893 qlt_submit_req_entries(qlt, 1);
4894 4894 mutex_exit(&qlt->req_lock);
4895 4895
4896 4896 return (FCT_SUCCESS);
4897 4897 }
4898 4898
4899 4899 fct_status_t
4900 4900 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4901 4901 {
4902 4902 uint8_t *req;
4903 4903 fct_sol_ct_t *ct;
4904 4904 qlt_cmd_t *qcmd;
4905 4905 stmf_data_buf_t *buf;
4906 4906 qlt_dmem_bctl_t *bctl;
4907 4907 uint32_t sz, minsz;
4908 4908
4909 4909 ct = (fct_sol_ct_t *)cmd->cmd_specific;
4910 4910 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4911 4911 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4912 4912 qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4913 4913 sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4914 4914 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4915 4915 if (buf == NULL) {
4916 4916 return (FCT_BUSY);
4917 4917 }
4918 4918 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4919 4919
4920 4920 qcmd->dbuf = buf;
4921 4921 bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4922 4922 ct->ct_req_size);
4923 4923 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4924 4924
4925 4925 mutex_enter(&qlt->req_lock);
4926 4926 req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4927 4927 if (req == NULL) {
4928 4928 qlt_dmem_free(NULL, buf);
4929 4929 mutex_exit(&qlt->req_lock);
4930 4930 return (FCT_BUSY);
4931 4931 }
4932 4932 bzero(req, IOCB_SIZE);
4933 4933 req[0] = 0x29; req[1] = 1;
4934 4934 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4935 4935 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4936 4936 QMEM_WR16(qlt, (&req[0xC]), 1);
4937 4937 QMEM_WR16(qlt, (&req[0x10]), 0x20); /* > (2 * RA_TOV) */
4938 4938 QMEM_WR16(qlt, (&req[0x14]), 1);
4939 4939
4940 4940 QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4941 4941 QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4942 4942
4943 4943 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4944 4944 QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4945 4945 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4946 4946 qcmd->param.resp_offset)); /* RESPONSE DSD */
4947 4947 QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4948 4948
4949 4949 qlt_submit_req_entries(qlt, 1);
4950 4950 mutex_exit(&qlt->req_lock);
4951 4951
4952 4952 return (FCT_SUCCESS);
4953 4953 }
4954 4954
4955 4955
4956 4956 /*
4957 4957 * All QLT_FIRMWARE_* will mainly be handled in this function
4958 4958 * It can not be called in interrupt context
4959 4959 *
4960 4960 * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4961 4961 * and qlt_ioctl_lock
4962 4962 */
4963 4963 static fct_status_t
4964 4964 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4965 4965 {
4966 4966 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4967 4967 int i;
4968 4968 int retries, n;
4969 4969 uint_t size_left;
4970 4970 char c = ' ';
4971 4971 uint32_t addr, endaddr, words_to_read;
4972 4972 caddr_t buf;
4973 4973 fct_status_t ret;
4974 4974
4975 4975 mutex_enter(&qlt->qlt_ioctl_lock);
4976 4976 /*
4977 4977 * To make sure that there's no outstanding dumping task
4978 4978 */
4979 4979 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4980 4980 mutex_exit(&qlt->qlt_ioctl_lock);
4981 4981 EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4982 4982 qlt->qlt_ioctl_flags);
4983 4983 EL(qlt, "outstanding\n");
4984 4984 return (FCT_FAILURE);
4985 4985 }
4986 4986
4987 4987 /*
4988 4988 * To make sure not to overwrite existing dump
4989 4989 */
4990 4990 if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4991 4991 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4992 4992 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4993 4993 /*
4994 4994 * If we have alreay one dump, but it's not triggered by user
4995 4995 * and the user hasn't fetched it, we shouldn't dump again.
4996 4996 */
4997 4997 mutex_exit(&qlt->qlt_ioctl_lock);
4998 4998 EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4999 4999 qlt->qlt_ioctl_flags);
5000 5000 cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
5001 5001 "is one already outstanding.", qlt->instance);
5002 5002 return (FCT_FAILURE);
5003 5003 }
5004 5004 qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
5005 5005 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5006 5006 qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
5007 5007 } else {
5008 5008 qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
5009 5009 }
5010 5010 mutex_exit(&qlt->qlt_ioctl_lock);
5011 5011
5012 5012 size_left = QLT_FWDUMP_BUFSIZE;
5013 5013 if (!qlt->qlt_fwdump_buf) {
5014 5014 ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
5015 5015 /*
5016 5016 * It's the only place that we allocate buf for dumping. After
5017 5017 * it's allocated, we will use it until the port is detached.
5018 5018 */
5019 5019 qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
5020 5020 }
5021 5021
5022 5022 /*
5023 5023 * Start to dump firmware
5024 5024 */
5025 5025 buf = (caddr_t)qlt->qlt_fwdump_buf;
5026 5026
5027 5027 /*
5028 5028 * Print the ISP firmware revision number and attributes information
5029 5029 * Read the RISC to Host Status register
5030 5030 */
5031 5031 n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
5032 5032 "Attributes %04x\n\nR2H Status Register\n%08x",
5033 5033 qlt->fw_major, qlt->fw_minor,
5034 5034 qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
5035 5035 buf += n; size_left -= n;
5036 5036
5037 5037 /*
5038 5038 * Before pausing the RISC, make sure no mailbox can execute
5039 5039 */
5040 5040 mutex_enter(&qlt->mbox_lock);
5041 5041 if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
5042 5042 /*
5043 5043 * Wait to grab the mailboxes
5044 5044 */
5045 5045 for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
5046 5046 (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
5047 5047 (void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
5048 5048 ddi_get_lbolt() + drv_usectohz(1000000));
5049 5049 if (retries > 5) {
5050 5050 mutex_exit(&qlt->mbox_lock);
5051 5051 EL(qlt, "can't drain out mailbox commands\n");
5052 5052 goto dump_fail;
5053 5053 }
5054 5054 }
5055 5055 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
5056 5056 cv_broadcast(&qlt->mbox_cv);
5057 5057 }
5058 5058 mutex_exit(&qlt->mbox_lock);
5059 5059
5060 5060 /*
5061 5061 * Pause the RISC processor
5062 5062 */
5063 5063 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
5064 5064
5065 5065 /*
5066 5066 * Wait for the RISC processor to pause
5067 5067 */
5068 5068 for (i = 0; i < 200; i++) {
5069 5069 if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
5070 5070 break;
5071 5071 }
5072 5072 drv_usecwait(1000);
5073 5073 }
5074 5074 if (i == 200) {
5075 5075 EL(qlt, "can't pause\n");
5076 5076 return (FCT_FAILURE);
5077 5077 }
5078 5078
5079 5079 if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
5080 5080 goto over_25xx_specific_dump;
5081 5081 }
5082 5082 n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
5083 5083 buf += n; size_left -= n;
5084 5084 REG_WR32(qlt, 0x54, 0x7000);
5085 5085 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5086 5086 buf += n; size_left -= n;
5087 5087 REG_WR32(qlt, 0x54, 0x7010);
5088 5088 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5089 5089 buf += n; size_left -= n;
5090 5090 REG_WR32(qlt, 0x54, 0x7C00);
5091 5091
5092 5092 n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
5093 5093 buf += n; size_left -= n;
5094 5094 REG_WR32(qlt, 0xC0, 0x1);
5095 5095 n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
5096 5096 buf += n; size_left -= n;
5097 5097 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
5098 5098 buf += n; size_left -= n;
5099 5099 REG_WR32(qlt, 0xC0, 0x0);
5100 5100
5101 5101 over_25xx_specific_dump:;
5102 5102 n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
5103 5103 buf += n; size_left -= n;
5104 5104 /*
5105 5105 * Capture data from 32 regsiters
5106 5106 */
5107 5107 n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
5108 5108 buf += n; size_left -= n;
5109 5109
5110 5110 /*
5111 5111 * Disable interrupts
5112 5112 */
5113 5113 REG_WR32(qlt, 0xc, 0);
5114 5114
5115 5115 /*
5116 5116 * Shadow registers
5117 5117 */
5118 5118 n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
5119 5119 buf += n; size_left -= n;
5120 5120
5121 5121 REG_WR32(qlt, 0x54, 0xF70);
5122 5122 addr = 0xb0000000;
5123 5123 for (i = 0; i < 0xb; i++) {
5124 5124 if ((!qlt->qlt_25xx_chip) &&
5125 5125 (!qlt->qlt_81xx_chip) &&
5126 5126 (i >= 7)) {
5127 5127 break;
5128 5128 }
5129 5129 if (i && ((i & 7) == 0)) {
5130 5130 n = (int)snprintf(buf, size_left, "\n");
5131 5131 buf += n; size_left -= n;
5132 5132 }
5133 5133 REG_WR32(qlt, 0xF0, addr);
5134 5134 n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
5135 5135 buf += n; size_left -= n;
5136 5136 addr += 0x100000;
5137 5137 }
5138 5138
5139 5139 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5140 5140 REG_WR32(qlt, 0x54, 0x10);
5141 5141 n = (int)snprintf(buf, size_left,
5142 5142 "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
5143 5143 buf += n; size_left -= n;
5144 5144 }
5145 5145
5146 5146 /*
5147 5147 * Mailbox registers
5148 5148 */
5149 5149 n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
5150 5150 buf += n; size_left -= n;
5151 5151 for (i = 0; i < 32; i += 2) {
5152 5152 if ((i + 2) & 15) {
5153 5153 c = ' ';
5154 5154 } else {
5155 5155 c = '\n';
5156 5156 }
5157 5157 n = (int)snprintf(buf, size_left, "%04x %04x%c",
5158 5158 REG_RD16(qlt, 0x80 + (i << 1)),
5159 5159 REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
5160 5160 buf += n; size_left -= n;
5161 5161 }
5162 5162
5163 5163 /*
5164 5164 * Transfer sequence registers
5165 5165 */
5166 5166 n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
5167 5167 buf += n; size_left -= n;
5168 5168
5169 5169 REG_WR32(qlt, 0x54, 0xBF00);
5170 5170 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5171 5171 buf += n; size_left -= n;
5172 5172 REG_WR32(qlt, 0x54, 0xBF10);
5173 5173 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5174 5174 buf += n; size_left -= n;
5175 5175 REG_WR32(qlt, 0x54, 0xBF20);
5176 5176 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5177 5177 buf += n; size_left -= n;
5178 5178 REG_WR32(qlt, 0x54, 0xBF30);
5179 5179 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5180 5180 buf += n; size_left -= n;
5181 5181 REG_WR32(qlt, 0x54, 0xBF40);
5182 5182 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5183 5183 buf += n; size_left -= n;
5184 5184 REG_WR32(qlt, 0x54, 0xBF50);
5185 5185 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5186 5186 buf += n; size_left -= n;
5187 5187 REG_WR32(qlt, 0x54, 0xBF60);
5188 5188 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5189 5189 buf += n; size_left -= n;
5190 5190 REG_WR32(qlt, 0x54, 0xBF70);
5191 5191 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5192 5192 buf += n; size_left -= n;
5193 5193 n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5194 5194 buf += n; size_left -= n;
5195 5195 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5196 5196 REG_WR32(qlt, 0x54, 0xBFC0);
5197 5197 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5198 5198 buf += n; size_left -= n;
5199 5199 REG_WR32(qlt, 0x54, 0xBFD0);
5200 5200 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5201 5201 buf += n; size_left -= n;
5202 5202 }
5203 5203 REG_WR32(qlt, 0x54, 0xBFE0);
5204 5204 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5205 5205 buf += n; size_left -= n;
5206 5206 n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5207 5207 buf += n; size_left -= n;
5208 5208 REG_WR32(qlt, 0x54, 0xBFF0);
5209 5209 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5210 5210 buf += n; size_left -= n;
5211 5211
5212 5212 /*
5213 5213 * Receive sequence registers
5214 5214 */
5215 5215 n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5216 5216 buf += n; size_left -= n;
5217 5217 REG_WR32(qlt, 0x54, 0xFF00);
5218 5218 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5219 5219 buf += n; size_left -= n;
5220 5220 REG_WR32(qlt, 0x54, 0xFF10);
5221 5221 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5222 5222 buf += n; size_left -= n;
5223 5223 REG_WR32(qlt, 0x54, 0xFF20);
5224 5224 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5225 5225 buf += n; size_left -= n;
5226 5226 REG_WR32(qlt, 0x54, 0xFF30);
5227 5227 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5228 5228 buf += n; size_left -= n;
5229 5229 REG_WR32(qlt, 0x54, 0xFF40);
5230 5230 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5231 5231 buf += n; size_left -= n;
5232 5232 REG_WR32(qlt, 0x54, 0xFF50);
5233 5233 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5234 5234 buf += n; size_left -= n;
5235 5235 REG_WR32(qlt, 0x54, 0xFF60);
5236 5236 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5237 5237 buf += n; size_left -= n;
5238 5238 REG_WR32(qlt, 0x54, 0xFF70);
5239 5239 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5240 5240 buf += n; size_left -= n;
5241 5241 n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5242 5242 buf += n; size_left -= n;
5243 5243 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5244 5244 REG_WR32(qlt, 0x54, 0xFFC0);
5245 5245 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5246 5246 buf += n; size_left -= n;
5247 5247 }
5248 5248 REG_WR32(qlt, 0x54, 0xFFD0);
5249 5249 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5250 5250 buf += n; size_left -= n;
5251 5251 n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5252 5252 buf += n; size_left -= n;
5253 5253 REG_WR32(qlt, 0x54, 0xFFE0);
5254 5254 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5255 5255 buf += n; size_left -= n;
5256 5256 n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5257 5257 buf += n; size_left -= n;
5258 5258 REG_WR32(qlt, 0x54, 0xFFF0);
5259 5259 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5260 5260 buf += n; size_left -= n;
5261 5261
5262 5262 if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5263 5263 goto over_aseq_regs;
5264 5264
5265 5265 /*
5266 5266 * Auxiliary sequencer registers
5267 5267 */
5268 5268 n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5269 5269 buf += n; size_left -= n;
5270 5270 REG_WR32(qlt, 0x54, 0xB000);
5271 5271 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5272 5272 buf += n; size_left -= n;
5273 5273 REG_WR32(qlt, 0x54, 0xB010);
5274 5274 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5275 5275 buf += n; size_left -= n;
5276 5276 REG_WR32(qlt, 0x54, 0xB020);
5277 5277 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5278 5278 buf += n; size_left -= n;
5279 5279 REG_WR32(qlt, 0x54, 0xB030);
5280 5280 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5281 5281 buf += n; size_left -= n;
5282 5282 REG_WR32(qlt, 0x54, 0xB040);
5283 5283 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5284 5284 buf += n; size_left -= n;
5285 5285 REG_WR32(qlt, 0x54, 0xB050);
5286 5286 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5287 5287 buf += n; size_left -= n;
5288 5288 REG_WR32(qlt, 0x54, 0xB060);
5289 5289 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5290 5290 buf += n; size_left -= n;
5291 5291 REG_WR32(qlt, 0x54, 0xB070);
5292 5292 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5293 5293 buf += n; size_left -= n;
5294 5294 n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5295 5295 buf += n; size_left -= n;
5296 5296 REG_WR32(qlt, 0x54, 0xB0C0);
5297 5297 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5298 5298 buf += n; size_left -= n;
5299 5299 REG_WR32(qlt, 0x54, 0xB0D0);
5300 5300 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5301 5301 buf += n; size_left -= n;
5302 5302 n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5303 5303 buf += n; size_left -= n;
5304 5304 REG_WR32(qlt, 0x54, 0xB0E0);
5305 5305 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5306 5306 buf += n; size_left -= n;
5307 5307 n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5308 5308 buf += n; size_left -= n;
5309 5309 REG_WR32(qlt, 0x54, 0xB0F0);
5310 5310 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5311 5311 buf += n; size_left -= n;
5312 5312
5313 5313 over_aseq_regs:;
5314 5314
5315 5315 /*
5316 5316 * Command DMA registers
5317 5317 */
5318 5318 n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5319 5319 buf += n; size_left -= n;
5320 5320 REG_WR32(qlt, 0x54, 0x7100);
5321 5321 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5322 5322 buf += n; size_left -= n;
5323 5323
5324 5324 /*
5325 5325 * Queues
5326 5326 */
5327 5327 n = (int)snprintf(buf, size_left,
5328 5328 "\nRequest0 Queue DMA Channel registers\n");
5329 5329 buf += n; size_left -= n;
5330 5330 REG_WR32(qlt, 0x54, 0x7200);
5331 5331 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5332 5332 buf += n; size_left -= n;
5333 5333 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5334 5334 buf += n; size_left -= n;
5335 5335
5336 5336 n = (int)snprintf(buf, size_left,
5337 5337 "\n\nResponse0 Queue DMA Channel registers\n");
5338 5338 buf += n; size_left -= n;
5339 5339 REG_WR32(qlt, 0x54, 0x7300);
5340 5340 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5341 5341 buf += n; size_left -= n;
5342 5342 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5343 5343 buf += n; size_left -= n;
5344 5344
5345 5345 n = (int)snprintf(buf, size_left,
5346 5346 "\n\nRequest1 Queue DMA Channel registers\n");
5347 5347 buf += n; size_left -= n;
5348 5348 REG_WR32(qlt, 0x54, 0x7400);
5349 5349 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5350 5350 buf += n; size_left -= n;
5351 5351 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5352 5352 buf += n; size_left -= n;
5353 5353
5354 5354 /*
5355 5355 * Transmit DMA registers
5356 5356 */
5357 5357 n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5358 5358 buf += n; size_left -= n;
5359 5359 REG_WR32(qlt, 0x54, 0x7600);
5360 5360 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5361 5361 buf += n; size_left -= n;
5362 5362 REG_WR32(qlt, 0x54, 0x7610);
5363 5363 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5364 5364 buf += n; size_left -= n;
5365 5365 n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5366 5366 buf += n; size_left -= n;
5367 5367 REG_WR32(qlt, 0x54, 0x7620);
5368 5368 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5369 5369 buf += n; size_left -= n;
5370 5370 REG_WR32(qlt, 0x54, 0x7630);
5371 5371 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5372 5372 buf += n; size_left -= n;
5373 5373 n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5374 5374 buf += n; size_left -= n;
5375 5375 REG_WR32(qlt, 0x54, 0x7640);
5376 5376 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5377 5377 buf += n; size_left -= n;
5378 5378 REG_WR32(qlt, 0x54, 0x7650);
5379 5379 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5380 5380 buf += n; size_left -= n;
5381 5381 n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5382 5382 buf += n; size_left -= n;
5383 5383 REG_WR32(qlt, 0x54, 0x7660);
5384 5384 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5385 5385 buf += n; size_left -= n;
5386 5386 REG_WR32(qlt, 0x54, 0x7670);
5387 5387 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5388 5388 buf += n; size_left -= n;
5389 5389 n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5390 5390 buf += n; size_left -= n;
5391 5391 REG_WR32(qlt, 0x54, 0x7680);
5392 5392 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5393 5393 buf += n; size_left -= n;
5394 5394 REG_WR32(qlt, 0x54, 0x7690);
5395 5395 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5396 5396 buf += n; size_left -= n;
5397 5397 n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5398 5398 buf += n; size_left -= n;
5399 5399 REG_WR32(qlt, 0x54, 0x76A0);
5400 5400 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5401 5401 buf += n; size_left -= n;
5402 5402
5403 5403 /*
5404 5404 * Receive DMA registers
5405 5405 */
5406 5406 n = (int)snprintf(buf, size_left,
5407 5407 "\nRCV Thread 0 Data DMA registers\n");
5408 5408 buf += n; size_left -= n;
5409 5409 REG_WR32(qlt, 0x54, 0x7700);
5410 5410 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5411 5411 buf += n; size_left -= n;
5412 5412 REG_WR32(qlt, 0x54, 0x7710);
5413 5413 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5414 5414 buf += n; size_left -= n;
5415 5415 n = (int)snprintf(buf, size_left,
5416 5416 "\nRCV Thread 1 Data DMA registers\n");
5417 5417 buf += n; size_left -= n;
5418 5418 REG_WR32(qlt, 0x54, 0x7720);
5419 5419 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5420 5420 buf += n; size_left -= n;
5421 5421 REG_WR32(qlt, 0x54, 0x7730);
5422 5422 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5423 5423 buf += n; size_left -= n;
5424 5424
5425 5425 /*
5426 5426 * RISC registers
5427 5427 */
5428 5428 n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5429 5429 buf += n; size_left -= n;
5430 5430 REG_WR32(qlt, 0x54, 0x0F00);
5431 5431 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5432 5432 buf += n; size_left -= n;
5433 5433 REG_WR32(qlt, 0x54, 0x0F10);
5434 5434 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5435 5435 buf += n; size_left -= n;
5436 5436 REG_WR32(qlt, 0x54, 0x0F20);
5437 5437 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5438 5438 buf += n; size_left -= n;
5439 5439 REG_WR32(qlt, 0x54, 0x0F30);
5440 5440 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5441 5441 buf += n; size_left -= n;
5442 5442 REG_WR32(qlt, 0x54, 0x0F40);
5443 5443 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5444 5444 buf += n; size_left -= n;
5445 5445 REG_WR32(qlt, 0x54, 0x0F50);
5446 5446 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5447 5447 buf += n; size_left -= n;
5448 5448 REG_WR32(qlt, 0x54, 0x0F60);
5449 5449 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5450 5450 buf += n; size_left -= n;
5451 5451 REG_WR32(qlt, 0x54, 0x0F70);
5452 5452 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5453 5453 buf += n; size_left -= n;
5454 5454
5455 5455 /*
5456 5456 * Local memory controller registers
5457 5457 */
5458 5458 n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5459 5459 buf += n; size_left -= n;
5460 5460 REG_WR32(qlt, 0x54, 0x3000);
5461 5461 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5462 5462 buf += n; size_left -= n;
5463 5463 REG_WR32(qlt, 0x54, 0x3010);
5464 5464 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5465 5465 buf += n; size_left -= n;
5466 5466 REG_WR32(qlt, 0x54, 0x3020);
5467 5467 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5468 5468 buf += n; size_left -= n;
5469 5469 REG_WR32(qlt, 0x54, 0x3030);
5470 5470 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5471 5471 buf += n; size_left -= n;
5472 5472 REG_WR32(qlt, 0x54, 0x3040);
5473 5473 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5474 5474 buf += n; size_left -= n;
5475 5475 REG_WR32(qlt, 0x54, 0x3050);
5476 5476 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5477 5477 buf += n; size_left -= n;
5478 5478 REG_WR32(qlt, 0x54, 0x3060);
5479 5479 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5480 5480 buf += n; size_left -= n;
5481 5481
5482 5482 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5483 5483 REG_WR32(qlt, 0x54, 0x3070);
5484 5484 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5485 5485 buf += n; size_left -= n;
5486 5486 }
5487 5487
5488 5488 /*
5489 5489 * Fibre protocol module regsiters
5490 5490 */
5491 5491 n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5492 5492 buf += n; size_left -= n;
5493 5493 REG_WR32(qlt, 0x54, 0x4000);
5494 5494 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5495 5495 buf += n; size_left -= n;
5496 5496 REG_WR32(qlt, 0x54, 0x4010);
5497 5497 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5498 5498 buf += n; size_left -= n;
5499 5499 REG_WR32(qlt, 0x54, 0x4020);
5500 5500 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5501 5501 buf += n; size_left -= n;
5502 5502 REG_WR32(qlt, 0x54, 0x4030);
5503 5503 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5504 5504 buf += n; size_left -= n;
5505 5505 REG_WR32(qlt, 0x54, 0x4040);
5506 5506 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5507 5507 buf += n; size_left -= n;
5508 5508 REG_WR32(qlt, 0x54, 0x4050);
5509 5509 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5510 5510 buf += n; size_left -= n;
5511 5511 REG_WR32(qlt, 0x54, 0x4060);
5512 5512 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5513 5513 buf += n; size_left -= n;
5514 5514 REG_WR32(qlt, 0x54, 0x4070);
5515 5515 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5516 5516 buf += n; size_left -= n;
5517 5517 REG_WR32(qlt, 0x54, 0x4080);
5518 5518 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5519 5519 buf += n; size_left -= n;
5520 5520 REG_WR32(qlt, 0x54, 0x4090);
5521 5521 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5522 5522 buf += n; size_left -= n;
5523 5523 REG_WR32(qlt, 0x54, 0x40A0);
5524 5524 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5525 5525 buf += n; size_left -= n;
5526 5526 REG_WR32(qlt, 0x54, 0x40B0);
5527 5527 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5528 5528 buf += n; size_left -= n;
5529 5529 if (qlt->qlt_81xx_chip) {
5530 5530 REG_WR32(qlt, 0x54, 0x40C0);
5531 5531 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5532 5532 buf += n; size_left -= n;
5533 5533 REG_WR32(qlt, 0x54, 0x40D0);
5534 5534 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5535 5535 buf += n; size_left -= n;
5536 5536 }
5537 5537
5538 5538 /*
5539 5539 * Fibre buffer registers
5540 5540 */
5541 5541 n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5542 5542 buf += n; size_left -= n;
5543 5543 REG_WR32(qlt, 0x54, 0x6000);
5544 5544 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5545 5545 buf += n; size_left -= n;
5546 5546 REG_WR32(qlt, 0x54, 0x6010);
5547 5547 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5548 5548 buf += n; size_left -= n;
5549 5549 REG_WR32(qlt, 0x54, 0x6020);
5550 5550 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5551 5551 buf += n; size_left -= n;
5552 5552 REG_WR32(qlt, 0x54, 0x6030);
5553 5553 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5554 5554 buf += n; size_left -= n;
5555 5555 REG_WR32(qlt, 0x54, 0x6040);
5556 5556 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5557 5557 buf += n; size_left -= n;
5558 5558 REG_WR32(qlt, 0x54, 0x6100);
5559 5559 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5560 5560 buf += n; size_left -= n;
5561 5561 REG_WR32(qlt, 0x54, 0x6130);
5562 5562 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5563 5563 buf += n; size_left -= n;
5564 5564 REG_WR32(qlt, 0x54, 0x6150);
5565 5565 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5566 5566 buf += n; size_left -= n;
5567 5567 REG_WR32(qlt, 0x54, 0x6170);
5568 5568 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5569 5569 buf += n; size_left -= n;
5570 5570 REG_WR32(qlt, 0x54, 0x6190);
5571 5571 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5572 5572 buf += n; size_left -= n;
5573 5573 REG_WR32(qlt, 0x54, 0x61B0);
5574 5574 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5575 5575 buf += n; size_left -= n;
5576 5576 if (qlt->qlt_81xx_chip) {
5577 5577 REG_WR32(qlt, 0x54, 0x61C0);
5578 5578 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5579 5579 buf += n; size_left -= n;
5580 5580 }
5581 5581 if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5582 5582 REG_WR32(qlt, 0x54, 0x6F00);
5583 5583 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5584 5584 buf += n; size_left -= n;
5585 5585 }
5586 5586
5587 5587 qlt->intr_sneak_counter = 10;
5588 5588 mutex_enter(&qlt->intr_lock);
5589 5589 (void) qlt_reset_chip(qlt);
5590 5590 drv_usecwait(20);
5591 5591 qlt->intr_sneak_counter = 0;
5592 5592 mutex_exit(&qlt->intr_lock);
5593 5593
5594 5594 /*
5595 5595 * Memory
5596 5596 */
5597 5597 n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5598 5598 buf += n; size_left -= n;
5599 5599
5600 5600 addr = 0x20000;
5601 5601 endaddr = 0x22000;
5602 5602 words_to_read = 0;
5603 5603 while (addr < endaddr) {
5604 5604 words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5605 5605 if ((words_to_read + addr) > endaddr) {
5606 5606 words_to_read = endaddr - addr;
5607 5607 }
5608 5608 if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5609 5609 QLT_SUCCESS) {
5610 5610 EL(qlt, "Error reading risc ram - CODE RAM status="
5611 5611 "%llxh\n", ret);
5612 5612 goto dump_fail;
5613 5613 }
5614 5614
5615 5615 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5616 5616 buf += n; size_left -= n;
5617 5617
5618 5618 if (size_left < 100000) {
5619 5619 EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5620 5620 size_left);
5621 5621 goto dump_ok;
5622 5622 }
5623 5623 addr += words_to_read;
5624 5624 }
5625 5625
5626 5626 n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5627 5627 buf += n; size_left -= n;
5628 5628
5629 5629 addr = 0x100000;
5630 5630 endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5631 5631 endaddr++;
5632 5632 if (endaddr & 7) {
5633 5633 endaddr = (endaddr + 7) & 0xFFFFFFF8;
5634 5634 }
5635 5635
5636 5636 words_to_read = 0;
5637 5637 while (addr < endaddr) {
5638 5638 words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5639 5639 if ((words_to_read + addr) > endaddr) {
5640 5640 words_to_read = endaddr - addr;
5641 5641 }
5642 5642 if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5643 5643 QLT_SUCCESS) {
5644 5644 EL(qlt, "Error reading risc ram - EXT RAM status="
5645 5645 "%llxh\n", ret);
5646 5646 goto dump_fail;
5647 5647 }
5648 5648 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5649 5649 buf += n; size_left -= n;
5650 5650 if (size_left < 100000) {
5651 5651 EL(qlt, "run out of space - EXT RAM\n");
5652 5652 goto dump_ok;
5653 5653 }
5654 5654 addr += words_to_read;
5655 5655 }
5656 5656
5657 5657 /*
5658 5658 * Label the end tag
5659 5659 */
5660 5660 n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5661 5661 buf += n; size_left -= n;
5662 5662
5663 5663 /*
5664 5664 * Queue dumping
5665 5665 */
5666 5666 n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5667 5667 buf += n; size_left -= n;
5668 5668 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5669 5669 REQUEST_QUEUE_ENTRIES, buf, size_left);
5670 5670 buf += n; size_left -= n;
5671 5671
5672 5672 n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5673 5673 buf += n; size_left -= n;
5674 5674 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5675 5675 PRIORITY_QUEUE_ENTRIES, buf, size_left);
5676 5676 buf += n; size_left -= n;
5677 5677
5678 5678 n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5679 5679 buf += n; size_left -= n;
5680 5680 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5681 5681 RESPONSE_QUEUE_ENTRIES, buf, size_left);
5682 5682 buf += n; size_left -= n;
5683 5683
5684 5684 n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5685 5685 buf += n; size_left -= n;
5686 5686 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5687 5687 ATIO_QUEUE_ENTRIES, buf, size_left);
5688 5688 buf += n; size_left -= n;
5689 5689
5690 5690 /*
5691 5691 * Label dump reason
5692 5692 */
5693 5693 n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5694 5694 qlt->qlt_port_alias, ssci->st_additional_info);
5695 5695 buf += n; size_left -= n;
5696 5696
5697 5697 dump_ok:
5698 5698 EL(qlt, "left-%d\n", size_left);
5699 5699
5700 5700 mutex_enter(&qlt->qlt_ioctl_lock);
5701 5701 qlt->qlt_ioctl_flags &=
5702 5702 ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5703 5703 qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5704 5704 mutex_exit(&qlt->qlt_ioctl_lock);
5705 5705 return (FCT_SUCCESS);
5706 5706
5707 5707 dump_fail:
5708 5708 EL(qlt, "dump not done\n");
5709 5709 mutex_enter(&qlt->qlt_ioctl_lock);
5710 5710 qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5711 5711 mutex_exit(&qlt->qlt_ioctl_lock);
5712 5712 return (FCT_FAILURE);
5713 5713 }
5714 5714
5715 5715 static int
5716 5716 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5717 5717 uint_t size_left)
5718 5718 {
5719 5719 int i;
5720 5720 int n;
5721 5721 char c = ' ';
5722 5722
5723 5723 for (i = 0, n = 0; i < count; i++) {
5724 5724 if ((i + 1) & 7) {
5725 5725 c = ' ';
5726 5726 } else {
5727 5727 c = '\n';
5728 5728 }
5729 5729 n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5730 5730 "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5731 5731 }
5732 5732 return (n);
5733 5733 }
5734 5734
5735 5735 static int
5736 5736 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5737 5737 caddr_t buf, uint_t size_left)
5738 5738 {
5739 5739 int i;
5740 5740 int n;
5741 5741 char c = ' ';
5742 5742 uint32_t *ptr;
5743 5743
5744 5744 ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5745 5745 for (i = 0, n = 0; i < words; i++) {
5746 5746 if ((i & 7) == 0) {
5747 5747 n = (int)(n + (int)snprintf(&buf[n],
5748 5748 (uint_t)(size_left - n), "%08x: ", addr + i));
5749 5749 }
5750 5750 if ((i + 1) & 7) {
5751 5751 c = ' ';
5752 5752 } else {
5753 5753 c = '\n';
5754 5754 }
5755 5755 n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5756 5756 "%08x%c", ptr[i], c));
5757 5757 }
5758 5758 return (n);
5759 5759 }
5760 5760
5761 5761 static int
5762 5762 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5763 5763 uint_t size_left)
5764 5764 {
5765 5765 int i;
5766 5766 int n;
5767 5767 char c = ' ';
5768 5768 int words;
5769 5769 uint16_t *ptr;
5770 5770 uint16_t w;
5771 5771
5772 5772 words = entries * 32;
5773 5773 ptr = (uint16_t *)qadr;
5774 5774 for (i = 0, n = 0; i < words; i++) {
5775 5775 if ((i & 7) == 0) {
5776 5776 n = (int)(n + (int)snprintf(&buf[n],
5777 5777 (uint_t)(size_left - n), "%05x: ", i));
5778 5778 }
5779 5779 if ((i + 1) & 7) {
5780 5780 c = ' ';
5781 5781 } else {
5782 5782 c = '\n';
5783 5783 }
5784 5784 w = QMEM_RD16(qlt, &ptr[i]);
5785 5785 n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5786 5786 w, c));
5787 5787 }
5788 5788 return (n);
5789 5789 }
5790 5790
5791 5791 /*
5792 5792 * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5793 5793 * mailbox ram is available.
5794 5794 * Copy data from RISC RAM to system memory
5795 5795 */
5796 5796 static fct_status_t
5797 5797 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5798 5798 {
5799 5799 uint64_t da;
5800 5800 fct_status_t ret;
5801 5801
5802 5802 REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
5803 5803 da = qlt->queue_mem_cookie.dmac_laddress;
5804 5804 da += MBOX_DMA_MEM_OFFSET;
5805 5805
5806 5806 /* System destination address */
5807 5807 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
5808 5808 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
5809 5809 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
5810 5810 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
5811 5811
5812 5812 /* Length */
5813 5813 REG_WR16(qlt, REG_MBOX(5), LSW(words));
5814 5814 REG_WR16(qlt, REG_MBOX(4), MSW(words));
5815 5815
5816 5816 /* RISC source address */
5817 5817 REG_WR16(qlt, REG_MBOX(1), LSW(addr));
5818 5818 REG_WR16(qlt, REG_MBOX(8), MSW(addr));
5819 5819
5820 5820 ret = qlt_raw_mailbox_command(qlt);
5821 5821 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
5822 5822 if (ret == QLT_SUCCESS) {
5823 5823 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5824 5824 MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5825 5825 } else {
5826 5826 EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5827 5827 }
5828 5828 return (ret);
5829 5829 }
5830 5830
5831 5831 static void
5832 5832 qlt_verify_fw(qlt_state_t *qlt)
5833 5833 {
5834 5834 caddr_t req;
5835 5835 /* Just put it on the request queue */
5836 5836 mutex_enter(&qlt->req_lock);
5837 5837 req = qlt_get_req_entries(qlt, 1);
5838 5838 if (req == NULL) {
5839 5839 mutex_exit(&qlt->req_lock);
5840 5840 /* XXX handle this */
5841 5841 return;
5842 5842 }
5843 5843
5844 5844 bzero(req, IOCB_SIZE);
5845 5845
5846 5846 req[0] = 0x1b;
5847 5847 req[1] = 1;
5848 5848
5849 5849 QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5850 5850 QMEM_WR16(qlt, (&req[0x8]), 1); /* options - don't update */
5851 5851 QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5852 5852
5853 5853 qlt_submit_req_entries(qlt, 1);
5854 5854 mutex_exit(&qlt->req_lock);
5855 5855 }
5856 5856
5857 5857 static void
5858 5858 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5859 5859 {
5860 5860 uint16_t status;
5861 5861 char info[QLT_INFO_LEN];
5862 5862
5863 5863 status = QMEM_RD16(qlt, rsp+8);
5864 5864 if (status != 0) {
5865 5865 (void) snprintf(info, sizeof (info),
5866 5866 "qlt_handle_verify_fw_completion: "
5867 5867 "status:%x, rsp:%p", status, (void *)rsp);
5868 5868 if (status == 3) {
5869 5869 uint16_t error_code;
5870 5870
5871 5871 error_code = QMEM_RD16(qlt, rsp+0xA);
5872 5872 (void) snprintf(info, sizeof (info),
5873 5873 "qlt_handle_verify_fw_completion: error code:%x",
5874 5874 error_code);
5875 5875 }
5876 5876 }
5877 5877 }
5878 5878
5879 5879 /*
5880 5880 * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5881 5881 *
5882 5882 * Input: Pointer to the adapter state structure.
5883 5883 * Returns: Success or Failure.
5884 5884 * Context: Kernel context.
5885 5885 */
5886 5886 static int
5887 5887 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5888 5888 {
5889 5889 int rval = DDI_SUCCESS;
5890 5890
5891 5891 qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5892 5892 kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5893 5893
5894 5894 if (qlt->el_trace_desc == NULL) {
5895 5895 cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5896 5896 qlt->instance);
5897 5897 rval = DDI_FAILURE;
5898 5898 } else {
5899 5899 qlt->el_trace_desc->next = 0;
5900 5900 qlt->el_trace_desc->trace_buffer =
5901 5901 (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5902 5902
5903 5903 if (qlt->el_trace_desc->trace_buffer == NULL) {
5904 5904 cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5905 5905 qlt->instance);
5906 5906 kmem_free(qlt->el_trace_desc,
5907 5907 sizeof (qlt_el_trace_desc_t));
5908 5908 qlt->el_trace_desc = NULL;
5909 5909 rval = DDI_FAILURE;
5910 5910 } else {
5911 5911 qlt->el_trace_desc->trace_buffer_size =
5912 5912 EL_TRACE_BUF_SIZE;
5913 5913 mutex_init(&qlt->el_trace_desc->mutex, NULL,
5914 5914 MUTEX_DRIVER, NULL);
5915 5915 }
5916 5916 }
5917 5917
5918 5918 return (rval);
5919 5919 }
5920 5920
5921 5921 /*
5922 5922 * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5923 5923 *
5924 5924 * Input: Pointer to the adapter state structure.
5925 5925 * Returns: Success or Failure.
5926 5926 * Context: Kernel context.
5927 5927 */
5928 5928 static int
5929 5929 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5930 5930 {
5931 5931 int rval = DDI_SUCCESS;
5932 5932
5933 5933 if (qlt->el_trace_desc == NULL) {
5934 5934 cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5935 5935 qlt->instance);
5936 5936 rval = DDI_FAILURE;
5937 5937 } else {
5938 5938 if (qlt->el_trace_desc->trace_buffer != NULL) {
5939 5939 kmem_free(qlt->el_trace_desc->trace_buffer,
5940 5940 qlt->el_trace_desc->trace_buffer_size);
5941 5941 }
5942 5942 mutex_destroy(&qlt->el_trace_desc->mutex);
5943 5943 kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5944 5944 qlt->el_trace_desc = NULL;
5945 5945 }
5946 5946
5947 5947 return (rval);
5948 5948 }
5949 5949
5950 5950 /*
5951 5951 * qlt_el_msg
5952 5952 * Extended logging message
5953 5953 *
5954 5954 * Input:
5955 5955 * qlt: adapter state pointer.
5956 5956 * fn: function name.
5957 5957 * ce: level
5958 5958 * ...: Variable argument list.
5959 5959 *
5960 5960 * Context:
5961 5961 * Kernel/Interrupt context.
5962 5962 */
5963 5963 void
5964 5964 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5965 5965 {
5966 5966 char *s, *fmt = 0, *fmt1 = 0;
5967 5967 char fmt2[EL_BUFFER_RESERVE];
5968 5968 int rval, tmp;
5969 5969 int tracing = 0;
5970 5970 va_list vl;
5971 5971
5972 5972 /* Tracing is the default but it can be disabled. */
5973 5973 if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5974 5974 tracing = 1;
5975 5975
5976 5976 mutex_enter(&qlt->el_trace_desc->mutex);
5977 5977
5978 5978 /*
5979 5979 * Ensure enough space for the string. Wrap to
5980 5980 * start when default message allocation size
5981 5981 * would overrun the end.
5982 5982 */
5983 5983 if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5984 5984 qlt->el_trace_desc->trace_buffer_size) {
5985 5985 fmt = qlt->el_trace_desc->trace_buffer;
5986 5986 qlt->el_trace_desc->next = 0;
5987 5987 } else {
5988 5988 fmt = qlt->el_trace_desc->trace_buffer +
5989 5989 qlt->el_trace_desc->next;
5990 5990 }
5991 5991 }
5992 5992
5993 5993 /* if no buffer use the stack */
5994 5994 if (fmt == NULL) {
5995 5995 fmt = fmt2;
5996 5996 }
5997 5997
5998 5998 va_start(vl, ce);
5999 5999
6000 6000 s = va_arg(vl, char *);
6001 6001
6002 6002 rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
6003 6003 "QEL qlt(%d): %s, ", qlt->instance, fn);
6004 6004 fmt1 = fmt + rval;
6005 6005 tmp = (int)vsnprintf(fmt1,
6006 6006 (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
6007 6007 rval += tmp;
6008 6008
6009 6009 /*
6010 6010 * Calculate the offset where the next message will go,
6011 6011 * skipping the NULL.
6012 6012 */
6013 6013 if (tracing) {
6014 6014 uint16_t next = (uint16_t)(rval += 1);
6015 6015 qlt->el_trace_desc->next += next;
6016 6016 mutex_exit(&qlt->el_trace_desc->mutex);
6017 6017 }
6018 6018
6019 6019 if (enable_extended_logging) {
6020 6020 cmn_err(ce, fmt);
6021 6021 }
6022 6022
6023 6023 va_end(vl);
6024 6024 }
6025 6025
6026 6026 /*
6027 6027 * qlt_dump_el_trace_buffer
6028 6028 * Outputs extended logging trace buffer.
6029 6029 *
6030 6030 * Input:
6031 6031 * qlt: adapter state pointer.
6032 6032 */
6033 6033 void
6034 6034 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
6035 6035 {
6036 6036 char *dump_start = NULL;
6037 6037 char *dump_current = NULL;
6038 6038 char *trace_start;
6039 6039 char *trace_end;
6040 6040 int wrapped = 0;
6041 6041 int rval;
6042 6042
6043 6043 mutex_enter(&qlt->el_trace_desc->mutex);
6044 6044
6045 6045 rval = qlt_validate_trace_desc(qlt);
6046 6046 if (rval != NULL) {
6047 6047 cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
6048 6048 qlt->instance);
6049 6049 } else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
6050 6050 dump_current = dump_start;
6051 6051 trace_start = qlt->el_trace_desc->trace_buffer;
6052 6052 trace_end = trace_start +
6053 6053 qlt->el_trace_desc->trace_buffer_size;
6054 6054
6055 6055 cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
6056 6056 qlt->instance,
6057 6057 (void *)dump_start, (void *)trace_start);
6058 6058
6059 6059 while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
6060 6060 (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
6061 6061 /* Show it... */
6062 6062 cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
6063 6063 dump_current);
6064 6064 /* Make the next the current */
6065 6065 dump_current += (strlen(dump_current) + 1);
6066 6066 /* check for wrap */
6067 6067 if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
6068 6068 dump_current = trace_start;
6069 6069 wrapped = 1;
6070 6070 } else if (wrapped) {
6071 6071 /* Don't go past next. */
6072 6072 if ((trace_start + qlt->el_trace_desc->next) <=
6073 6073 dump_current) {
6074 6074 break;
6075 6075 }
6076 6076 } else if (*dump_current == NULL) {
6077 6077 break;
6078 6078 }
6079 6079 }
6080 6080 }
6081 6081 mutex_exit(&qlt->el_trace_desc->mutex);
6082 6082 }
6083 6083
6084 6084 /*
6085 6085 * qlt_validate_trace_desc
6086 6086 * Ensures the extended logging trace descriptor is good.
6087 6087 *
6088 6088 * Input:
6089 6089 * qlt: adapter state pointer.
6090 6090 *
6091 6091 * Returns:
6092 6092 * ql local function return status code.
6093 6093 */
6094 6094 static int
6095 6095 qlt_validate_trace_desc(qlt_state_t *qlt)
6096 6096 {
6097 6097 int rval = DDI_SUCCESS;
6098 6098
6099 6099 if (qlt->el_trace_desc == NULL) {
6100 6100 rval = DDI_FAILURE;
6101 6101 } else if (qlt->el_trace_desc->trace_buffer == NULL) {
6102 6102 rval = DDI_FAILURE;
6103 6103 }
6104 6104 return (rval);
6105 6105 }
6106 6106
6107 6107 /*
6108 6108 * qlt_find_trace_start
6109 6109 * Locate the oldest extended logging trace entry.
6110 6110 *
6111 6111 * Input:
6112 6112 * qlt: adapter state pointer.
6113 6113 *
6114 6114 * Returns:
6115 6115 * Pointer to a string.
6116 6116 *
6117 6117 * Context:
6118 6118 * Kernel/Interrupt context.
6119 6119 */
6120 6120 static char *
6121 6121 qlt_find_trace_start(qlt_state_t *qlt)
6122 6122 {
6123 6123 char *trace_start = 0;
6124 6124 char *trace_next = 0;
6125 6125
6126 6126 trace_next = qlt->el_trace_desc->trace_buffer +
6127 6127 qlt->el_trace_desc->next;
6128 6128
6129 6129 /*
6130 6130 * If the buffer has not wrapped next will point at a null so
6131 6131 * start is the beginning of the buffer. If next points at a char
6132 6132 * then we must traverse the buffer until a null is detected and
6133 6133 * that will be the beginning of the oldest whole object in the buffer
6134 6134 * which is the start.
6135 6135 */
6136 6136
6137 6137 if ((trace_next + EL_BUFFER_RESERVE) >=
6138 6138 (qlt->el_trace_desc->trace_buffer +
6139 6139 qlt->el_trace_desc->trace_buffer_size)) {
6140 6140 trace_start = qlt->el_trace_desc->trace_buffer;
6141 6141 } else if (*trace_next != NULL) {
6142 6142 trace_start = trace_next + (strlen(trace_next) + 1);
6143 6143 } else {
6144 6144 trace_start = qlt->el_trace_desc->trace_buffer;
6145 6145 }
6146 6146 return (trace_start);
6147 6147 }
6148 6148
6149 6149
6150 6150 static int
6151 6151 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
6152 6152 {
6153 6153 return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
6154 6154 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
6155 6155 }
6156 6156
6157 6157 static int
6158 6158 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6159 6159 {
6160 6160 return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6161 6161 DDI_PROP_DONTPASS, prop, prop_val));
6162 6162 }
6163 6163
6164 6164 static int
6165 6165 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
6166 6166 {
6167 6167 char inst_prop[256];
6168 6168 int val;
6169 6169
6170 6170 /*
6171 6171 * Get adapter instance specific parameters. If the instance
6172 6172 * specific parameter isn't there, try the global parameter.
6173 6173 */
6174 6174
6175 6175 (void) sprintf(inst_prop, "hba%d-%s", qlt->instance, prop);
6176 6176
6177 6177 if ((val = qlt_read_int_prop(qlt, inst_prop, defval)) == defval) {
6178 6178 val = qlt_read_int_prop(qlt, prop, defval);
6179 6179 }
6180 6180
6181 6181 return (val);
6182 6182 }
6183 6183
6184 6184 static int
6185 6185 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6186 6186 {
6187 6187 char instance_prop[256];
6188 6188
6189 6189 /* Get adapter instance specific parameter. */
6190 6190 (void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
6191 6191 return (qlt_read_string_prop(qlt, instance_prop, prop_val));
6192 6192 }
6193 6193
6194 6194 static int
6195 6195 qlt_convert_string_to_ull(char *prop, int radix,
6196 6196 u_longlong_t *result)
6197 6197 {
6198 6198 return (ddi_strtoull((const char *)prop, 0, radix, result));
6199 6199 }
6200 6200
6201 6201 static boolean_t
6202 6202 qlt_wwn_overload_prop(qlt_state_t *qlt)
6203 6203 {
6204 6204 char *prop_val = 0;
6205 6205 int rval;
6206 6206 int radix;
6207 6207 u_longlong_t wwnn = 0, wwpn = 0;
6208 6208 boolean_t overloaded = FALSE;
6209 6209
6210 6210 radix = 16;
6211 6211
6212 6212 rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
6213 6213 if (rval == DDI_PROP_SUCCESS) {
6214 6214 rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
6215 6215 }
6216 6216 if (rval == DDI_PROP_SUCCESS) {
6217 6217 rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
6218 6218 &prop_val);
6219 6219 if (rval == DDI_PROP_SUCCESS) {
6220 6220 rval = qlt_convert_string_to_ull(prop_val, radix,
6221 6221 &wwpn);
6222 6222 }
6223 6223 }
6224 6224 if (rval == DDI_PROP_SUCCESS) {
6225 6225 overloaded = TRUE;
6226 6226 /* Overload the current node/port name nvram copy */
6227 6227 bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
6228 6228 BIG_ENDIAN_64(qlt->nvram->node_name);
6229 6229 bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
6230 6230 BIG_ENDIAN_64(qlt->nvram->port_name);
6231 6231 }
6232 6232 return (overloaded);
6233 6233 }
6234 6234
6235 6235 /*
6236 6236 * prop_text - Return a pointer to a string describing the status
6237 6237 *
6238 6238 * Input: prop_status = the return status from a property function.
6239 6239 * Returns: pointer to a string.
6240 6240 * Context: Kernel context.
6241 6241 */
6242 6242 char *
6243 6243 prop_text(int prop_status)
6244 6244 {
6245 6245 string_table_t *entry = &prop_status_tbl[0];
6246 6246
6247 6247 return (value2string(entry, prop_status, 0xFFFF));
6248 6248 }
6249 6249
6250 6250 /*
6251 6251 * value2string Return a pointer to a string associated with the value
6252 6252 *
6253 6253 * Input: entry = the value to string table
6254 6254 * value = the value
6255 6255 * Returns: pointer to a string.
6256 6256 * Context: Kernel context.
6257 6257 */
6258 6258 char *
6259 6259 value2string(string_table_t *entry, int value, int delimiter)
6260 6260 {
6261 6261 for (; entry->value != delimiter; entry++) {
6262 6262 if (entry->value == value) {
6263 6263 break;
6264 6264 }
6265 6265 }
6266 6266 return (entry->string);
6267 6267 }
6268 6268
6269 6269 /*
6270 6270 * qlt_chg_endian Change endianess of byte array.
6271 6271 *
6272 6272 * Input: buf = array pointer.
6273 6273 * size = size of array in bytes.
6274 6274 *
6275 6275 * Context: Interrupt or Kernel context.
6276 6276 */
6277 6277 void
6278 6278 qlt_chg_endian(uint8_t buf[], size_t size)
6279 6279 {
6280 6280 uint8_t byte;
6281 6281 size_t cnt1;
6282 6282 size_t cnt;
6283 6283
6284 6284 cnt1 = size - 1;
6285 6285 for (cnt = 0; cnt < size / 2; cnt++) {
6286 6286 byte = buf[cnt1];
6287 6287 buf[cnt1] = buf[cnt];
6288 6288 buf[cnt] = byte;
6289 6289 cnt1--;
6290 6290 }
6291 6291 }
6292 6292
6293 6293 /*
6294 6294 * ql_mps_reset
6295 6295 * Reset MPS for FCoE functions.
6296 6296 *
6297 6297 * Input:
6298 6298 * ha = virtual adapter state pointer.
6299 6299 *
6300 6300 * Context:
6301 6301 * Kernel context.
6302 6302 */
6303 6303 static void
6304 6304 qlt_mps_reset(qlt_state_t *qlt)
6305 6305 {
6306 6306 uint32_t data, dctl = 1000;
6307 6307
6308 6308 do {
6309 6309 if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
6310 6310 QLT_SUCCESS) {
6311 6311 return;
6312 6312 }
6313 6313 if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
6314 6314 QLT_SUCCESS) {
6315 6315 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6316 6316 return;
6317 6317 }
6318 6318 } while (!(data & BIT_0));
6319 6319
6320 6320 if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
6321 6321 dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
6322 6322 if ((data & 0xe0) != (dctl & 0xe0)) {
6323 6323 data &= 0xff1f;
6324 6324 data |= dctl & 0xe0;
6325 6325 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
6326 6326 }
6327 6327 }
6328 6328 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6329 6329 }
6330 6330
6331 6331 /*
6332 6332 * qlt_raw_wrt_risc_ram_word
6333 6333 * Write RISC RAM word.
6334 6334 *
6335 6335 * Input: qlt: adapter state pointer.
6336 6336 * risc_address: risc ram word address.
6337 6337 * data: data.
6338 6338 *
6339 6339 * Returns: qlt local function return status code.
6340 6340 *
6341 6341 * Context: Kernel context.
6342 6342 */
6343 6343 static fct_status_t
6344 6344 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6345 6345 uint32_t data)
6346 6346 {
6347 6347 fct_status_t ret;
6348 6348
6349 6349 REG_WR16(qlt, REG_MBOX(0), MBC_WRITE_RAM_EXTENDED);
6350 6350 REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6351 6351 REG_WR16(qlt, REG_MBOX(2), LSW(data));
6352 6352 REG_WR16(qlt, REG_MBOX(3), MSW(data));
6353 6353 REG_WR16(qlt, REG_MBOX(8), MSW(risc_address));
6354 6354 ret = qlt_raw_mailbox_command(qlt);
6355 6355 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6356 6356 if (ret != QLT_SUCCESS) {
6357 6357 EL(qlt, "qlt_raw_mailbox_command=MBC_WRITE_RAM_EXTENDED status"
6358 6358 "=%llxh\n", ret);
6359 6359 }
6360 6360 return (ret);
6361 6361 }
6362 6362
6363 6363 /*
6364 6364 * ql_raw_rd_risc_ram_word
6365 6365 * Read RISC RAM word.
6366 6366 *
6367 6367 * Input: qlt: adapter state pointer.
6368 6368 * risc_address: risc ram word address.
6369 6369 * data: data pointer.
6370 6370 *
6371 6371 * Returns: ql local function return status code.
6372 6372 *
6373 6373 * Context: Kernel context.
6374 6374 */
6375 6375 static fct_status_t
6376 6376 qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6377 6377 uint32_t *data)
6378 6378 {
6379 6379 fct_status_t ret;
6380 6380
6381 6381 REG_WR16(qlt, REG_MBOX(0), MBC_READ_RAM_EXTENDED);
6382 6382 REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6383 6383 REG_WR16(qlt, REG_MBOX(2), MSW(risc_address));
6384 6384 ret = qlt_raw_mailbox_command(qlt);
6385 6385 *data = REG_RD16(qlt, REG_MBOX(2));
6386 6386 *data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
6387 6387 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6388 6388 if (ret != QLT_SUCCESS) {
6389 6389 EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
6390 6390 "=%llxh\n", ret);
6391 6391 }
6392 6392 return (ret);
6393 6393 }
6394 6394
6395 6395 static void
6396 6396 qlt_properties(qlt_state_t *qlt)
6397 6397 {
6398 6398 int32_t cnt = 0;
6399 6399 int32_t defval = 0xffff;
6400 6400
6401 6401 if (qlt_wwn_overload_prop(qlt) == TRUE) {
6402 6402 EL(qlt, "wwnn overloaded.\n");
6403 6403 }
6404 6404
6405 6405 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
6406 6406 defval) {
6407 6407 qlt->qlt_bucketcnt[0] = cnt;
6408 6408 EL(qlt, "2k bucket o/l=%d\n", cnt);
6409 6409 }
6410 6410
6411 6411 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
6412 6412 defval) {
6413 6413 qlt->qlt_bucketcnt[1] = cnt;
6414 6414 EL(qlt, "8k bucket o/l=%d\n", cnt);
6415 6415 }
6416 6416
6417 6417 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
6418 6418 defval) {
6419 6419 qlt->qlt_bucketcnt[2] = cnt;
6420 6420 EL(qlt, "64k bucket o/l=%d\n", cnt);
6421 6421 }
6422 6422
6423 6423 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
6424 6424 defval) {
6425 6425 qlt->qlt_bucketcnt[3] = cnt;
6426 6426 EL(qlt, "128k bucket o/l=%d\n", cnt);
6427 6427 }
6428 6428
6429 6429 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
6430 6430 defval) {
6431 6431 qlt->qlt_bucketcnt[4] = cnt;
6432 6432 EL(qlt, "256k bucket o/l=%d\n", cnt);
6433 6433 }
6434 6434 }
↓ open down ↓ |
6204 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX