1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2010 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27 /*
28 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 */
30
31 #pragma ident "Copyright 2010 QLogic Corporation; ql_api.c"
32
33 /*
34 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
35 *
36 * ***********************************************************************
37 * * **
38 * * NOTICE **
39 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
40 * * ALL RIGHTS RESERVED **
41 * * **
42 * ***********************************************************************
43 *
44 */
45
46 #include <ql_apps.h>
47 #include <ql_api.h>
48 #include <ql_debug.h>
49 #include <ql_init.h>
50 #include <ql_iocb.h>
51 #include <ql_ioctl.h>
52 #include <ql_isr.h>
53 #include <ql_mbx.h>
54 #include <ql_nx.h>
55 #include <ql_xioctl.h>
56
57 /*
58 * Solaris external defines.
59 */
60 extern pri_t minclsyspri;
61 extern pri_t maxclsyspri;
62
63 /*
64 * dev_ops functions prototypes
65 */
66 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
67 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
68 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
69 static int ql_power(dev_info_t *, int, int);
70 static int ql_quiesce(dev_info_t *);
71
72 /*
73 * FCA functions prototypes exported by means of the transport table
74 */
75 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
76 fc_fca_bind_info_t *);
77 static void ql_unbind_port(opaque_t);
78 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
79 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
80 static int ql_els_send(opaque_t, fc_packet_t *);
81 static int ql_get_cap(opaque_t, char *, void *);
82 static int ql_set_cap(opaque_t, char *, void *);
83 static int ql_getmap(opaque_t, fc_lilpmap_t *);
84 static int ql_transport(opaque_t, fc_packet_t *);
85 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
86 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
87 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
88 static int ql_abort(opaque_t, fc_packet_t *, int);
89 static int ql_reset(opaque_t, uint32_t);
90 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
91 static opaque_t ql_get_device(opaque_t, fc_portid_t);
92
93 /*
94 * FCA Driver Support Function Prototypes.
95 */
96 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
97 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
98 ql_srb_t *);
99 static void ql_task_daemon(void *);
100 static void ql_task_thread(ql_adapter_state_t *);
101 static void ql_unsol_callback(ql_srb_t *);
102 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
103 fc_unsol_buf_t *);
104 static void ql_timer(void *);
105 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
106 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
107 uint32_t *, uint32_t *);
108 static void ql_halt(ql_adapter_state_t *, int);
109 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
122 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
123 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
124 static int ql_login_port(ql_adapter_state_t *, port_id_t);
125 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
126 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
127 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
128 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
129 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
130 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
131 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
132 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
133 ql_srb_t *);
134 static int ql_kstat_update(kstat_t *, int);
135 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
136 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
137 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
138 static void ql_rst_aen(ql_adapter_state_t *);
139 static void ql_restart_queues(ql_adapter_state_t *);
140 static void ql_abort_queues(ql_adapter_state_t *);
141 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
142 static void ql_idle_check(ql_adapter_state_t *);
143 static int ql_loop_resync(ql_adapter_state_t *);
144 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
145 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
146 static int ql_save_config_regs(dev_info_t *);
147 static int ql_restore_config_regs(dev_info_t *);
148 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
149 static int ql_handle_rscn_update(ql_adapter_state_t *);
150 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
151 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
152 static int ql_dump_firmware(ql_adapter_state_t *);
153 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
154 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
155 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
156 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
157 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
158 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
159 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
160 void *);
161 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
162 uint8_t);
163 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
164 static int ql_suspend_adapter(ql_adapter_state_t *);
165 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
166 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
167 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
170 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
171 static int ql_setup_interrupts(ql_adapter_state_t *);
172 static int ql_setup_msi(ql_adapter_state_t *);
173 static int ql_setup_msix(ql_adapter_state_t *);
174 static int ql_setup_fixed(ql_adapter_state_t *);
175 static void ql_release_intr(ql_adapter_state_t *);
176 static void ql_disable_intr(ql_adapter_state_t *);
177 static int ql_legacy_intr(ql_adapter_state_t *);
178 static int ql_init_mutex(ql_adapter_state_t *);
179 static void ql_destroy_mutex(ql_adapter_state_t *);
180 static void ql_iidma(ql_adapter_state_t *);
181
182 static int ql_n_port_plogi(ql_adapter_state_t *);
183 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
184 els_descriptor_t *);
185 static void ql_isp_els_request_ctor(els_descriptor_t *,
186 els_passthru_entry_t *);
187 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
188 static int ql_wait_for_td_stop(ql_adapter_state_t *);
189 static void ql_process_idc_event(ql_adapter_state_t *);
190
191 /*
192 * Global data
193 */
194 static uint8_t ql_enable_pm = 1;
195 static int ql_flash_sbus_fpga = 0;
196 uint32_t ql_os_release_level;
197 uint32_t ql_disable_aif = 0;
198 uint32_t ql_disable_msi = 0;
199 uint32_t ql_disable_msix = 0;
200 uint32_t ql_enable_ets = 0;
201 uint16_t ql_osc_wait_count = 1000;
202
203 /* Timer routine variables. */
204 static timeout_id_t ql_timer_timeout_id = NULL;
205 static clock_t ql_timer_ticks;
206
207 /* Soft state head pointer. */
208 void *ql_state = NULL;
209
210 /* Head adapter link. */
211 ql_head_t ql_hba = {
212 NULL,
213 NULL
214 };
215
216 /* Global hba index */
217 uint32_t ql_gfru_hba_index = 1;
218
219 /*
220 * Some IP defines and globals
221 */
222 uint32_t ql_ip_buffer_count = 128;
223 uint32_t ql_ip_low_water = 10;
224 uint8_t ql_ip_fast_post_count = 5;
225 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */
226
227 /* Device AL_PA to Device Head Queue index array. */
228 uint8_t ql_alpa_to_index[] = {
229 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
230 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
231 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
232 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
233 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
234 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
235 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
236 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
237 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
238 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
239 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
240 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
241 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
242 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
243 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
244 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
245 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
246 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
247 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
248 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
249 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
250 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
251 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
252 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
253 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
254 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
255 };
256
257 /* Device loop_id to ALPA array. */
258 static uint8_t ql_index_to_alpa[] = {
259 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
260 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
261 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
262 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
263 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
264 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
265 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
266 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
267 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
268 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
269 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
270 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
271 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
272 };
273
274 /* 2200 register offsets */
275 static reg_off_t reg_off_2200 = {
276 0x00, /* flash_address */
277 0x02, /* flash_data */
278 0x06, /* ctrl_status */
279 0x08, /* ictrl */
280 0x0a, /* istatus */
281 0x0c, /* semaphore */
282 0x0e, /* nvram */
283 0x18, /* req_in */
284 0x18, /* req_out */
285 0x1a, /* resp_in */
286 0x1a, /* resp_out */
287 0xff, /* risc2host - n/a */
288 24, /* Number of mailboxes */
289
290 /* Mailbox in register offsets 0 - 23 */
291 { 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
292 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
293 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
294 /* 2200 does not have mailbox 24-31 - n/a */
295 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
296
297 /* Mailbox out register offsets 0 - 23 */
298 { 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
299 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
300 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
301 /* 2200 does not have mailbox 24-31 - n/a */
302 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
303
304 0x96, /* fpm_diag_config */
305 0xa4, /* pcr */
306 0xb0, /* mctr */
307 0xb8, /* fb_cmd */
308 0xc0, /* hccr */
309 0xcc, /* gpiod */
310 0xce, /* gpioe */
311 0xff, /* host_to_host_sema - n/a */
312 0xff, /* pri_req_in - n/a */
313 0xff, /* pri_req_out - n/a */
314 0xff, /* atio_req_in - n/a */
315 0xff, /* atio_req_out - n/a */
316 0xff, /* io_base_addr - n/a */
317 0xff, /* nx_host_int - n/a */
318 0xff /* nx_risc_int - n/a */
319 };
320
321 /* 2300 register offsets */
322 static reg_off_t reg_off_2300 = {
323 0x00, /* flash_address */
324 0x02, /* flash_data */
325 0x06, /* ctrl_status */
326 0x08, /* ictrl */
327 0x0a, /* istatus */
328 0x0c, /* semaphore */
329 0x0e, /* nvram */
330 0x10, /* req_in */
331 0x12, /* req_out */
332 0x14, /* resp_in */
333 0x16, /* resp_out */
334 0x18, /* risc2host */
335 32, /* Number of mailboxes */
336
337 /* Mailbox in register offsets 0 - 31 */
338 { 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
339 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
340 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
341 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e },
342
343 /* Mailbox out register offsets 0 - 31 */
344 { 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
345 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
346 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
347 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e },
348
349 0x96, /* fpm_diag_config */
350 0xa4, /* pcr */
351 0xb0, /* mctr */
352 0x80, /* fb_cmd */
353 0xc0, /* hccr */
354 0xcc, /* gpiod */
355 0xce, /* gpioe */
356 0x1c, /* host_to_host_sema */
357 0xff, /* pri_req_in - n/a */
358 0xff, /* pri_req_out - n/a */
359 0xff, /* atio_req_in - n/a */
360 0xff, /* atio_req_out - n/a */
361 0xff, /* io_base_addr - n/a */
362 0xff, /* nx_host_int - n/a */
363 0xff /* nx_risc_int - n/a */
364 };
365
366 /* 2400/2500 register offsets */
367 reg_off_t reg_off_2400_2500 = {
368 0x00, /* flash_address */
369 0x04, /* flash_data */
370 0x08, /* ctrl_status */
371 0x0c, /* ictrl */
372 0x10, /* istatus */
373 0xff, /* semaphore - n/a */
374 0xff, /* nvram - n/a */
375 0x1c, /* req_in */
376 0x20, /* req_out */
377 0x24, /* resp_in */
378 0x28, /* resp_out */
379 0x44, /* risc2host */
380 32, /* Number of mailboxes */
381
382 /* Mailbox in register offsets 0 - 31 */
383 { 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
384 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
385 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
386 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe },
387
388 /* Mailbox out register offsets 0 - 31 */
389 { 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
390 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
391 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
392 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe },
393
394 0xff, /* fpm_diag_config - n/a */
395 0xff, /* pcr - n/a */
396 0xff, /* mctr - n/a */
397 0xff, /* fb_cmd - n/a */
398 0x48, /* hccr */
399 0x4c, /* gpiod */
400 0x50, /* gpioe */
401 0xff, /* host_to_host_sema - n/a */
402 0x2c, /* pri_req_in */
403 0x30, /* pri_req_out */
404 0x3c, /* atio_req_in */
405 0x40, /* atio_req_out */
406 0x54, /* io_base_addr */
407 0xff, /* nx_host_int - n/a */
408 0xff /* nx_risc_int - n/a */
409 };
410
411 /* P3 register offsets */
412 static reg_off_t reg_off_8021 = {
413 0x00, /* flash_address */
414 0x04, /* flash_data */
415 0x08, /* ctrl_status */
416 0x0c, /* ictrl */
417 0x10, /* istatus */
418 0xff, /* semaphore - n/a */
419 0xff, /* nvram - n/a */
420 0xff, /* req_in - n/a */
421 0x0, /* req_out */
422 0x100, /* resp_in */
423 0x200, /* resp_out */
424 0x500, /* risc2host */
425 32, /* Number of mailboxes */
426
427 /* Mailbox in register offsets 0 - 31 */
428 { 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
429 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
430 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
431 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e },
432
433 /* Mailbox out register offsets 0 - 31 */
434 { 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
435 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
436 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
437 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e },
438
439 0xff, /* fpm_diag_config - n/a */
440 0xff, /* pcr - n/a */
441 0xff, /* mctr - n/a */
442 0xff, /* fb_cmd - n/a */
443 0x48, /* hccr */
444 0x4c, /* gpiod */
445 0x50, /* gpioe */
446 0xff, /* host_to_host_sema - n/a */
447 0x2c, /* pri_req_in */
448 0x30, /* pri_req_out */
449 0x3c, /* atio_req_in */
450 0x40, /* atio_req_out */
451 0x54, /* io_base_addr */
452 0x380, /* nx_host_int */
453 0x504 /* nx_risc_int */
454 };
455
456 /* mutex for protecting variables shared by all instances of the driver */
457 kmutex_t ql_global_mutex;
458 kmutex_t ql_global_hw_mutex;
459 kmutex_t ql_global_el_mutex;
460
461 /* DMA access attribute structure. */
462 static ddi_device_acc_attr_t ql_dev_acc_attr = {
463 DDI_DEVICE_ATTR_V0,
464 DDI_STRUCTURE_LE_ACC,
465 DDI_STRICTORDER_ACC
466 };
467
468 /* I/O DMA attributes structures. */
469 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
470 DMA_ATTR_V0, /* dma_attr_version */
471 QL_DMA_LOW_ADDRESS, /* low DMA address range */
472 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
473 QL_DMA_XFER_COUNTER, /* DMA counter register */
474 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
475 QL_DMA_BURSTSIZES, /* DMA burstsizes */
476 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
477 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
478 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
479 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
480 QL_DMA_GRANULARITY, /* granularity of device */
481 QL_DMA_XFER_FLAGS /* DMA transfer flags */
482 };
483
484 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
485 DMA_ATTR_V0, /* dma_attr_version */
486 QL_DMA_LOW_ADDRESS, /* low DMA address range */
487 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */
488 QL_DMA_XFER_COUNTER, /* DMA counter register */
489 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
490 QL_DMA_BURSTSIZES, /* DMA burstsizes */
491 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
492 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
493 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
494 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
495 QL_DMA_GRANULARITY, /* granularity of device */
496 QL_DMA_XFER_FLAGS /* DMA transfer flags */
497 };
498
499 /* Load the default dma attributes */
500 static ddi_dma_attr_t ql_32fcsm_cmd_dma_attr;
501 static ddi_dma_attr_t ql_64fcsm_cmd_dma_attr;
502 static ddi_dma_attr_t ql_32fcsm_rsp_dma_attr;
503 static ddi_dma_attr_t ql_64fcsm_rsp_dma_attr;
504 static ddi_dma_attr_t ql_32fcip_cmd_dma_attr;
505 static ddi_dma_attr_t ql_64fcip_cmd_dma_attr;
506 static ddi_dma_attr_t ql_32fcip_rsp_dma_attr;
507 static ddi_dma_attr_t ql_64fcip_rsp_dma_attr;
508 static ddi_dma_attr_t ql_32fcp_cmd_dma_attr;
509 static ddi_dma_attr_t ql_64fcp_cmd_dma_attr;
510 static ddi_dma_attr_t ql_32fcp_rsp_dma_attr;
511 static ddi_dma_attr_t ql_64fcp_rsp_dma_attr;
512 static ddi_dma_attr_t ql_32fcp_data_dma_attr;
513 static ddi_dma_attr_t ql_64fcp_data_dma_attr;
514
515 /* Static declarations of cb_ops entry point functions... */
516 static struct cb_ops ql_cb_ops = {
517 ql_open, /* b/c open */
518 ql_close, /* b/c close */
519 nodev, /* b strategy */
520 nodev, /* b print */
521 nodev, /* b dump */
522 nodev, /* c read */
523 nodev, /* c write */
524 ql_ioctl, /* c ioctl */
525 nodev, /* c devmap */
526 nodev, /* c mmap */
527 nodev, /* c segmap */
528 nochpoll, /* c poll */
529 nodev, /* cb_prop_op */
530 NULL, /* streamtab */
531 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
532 CB_REV, /* cb_ops revision */
533 nodev, /* c aread */
534 nodev /* c awrite */
535 };
536
537 /* Static declarations of dev_ops entry point functions... */
538 static struct dev_ops ql_devops = {
539 DEVO_REV, /* devo_rev */
540 0, /* refcnt */
541 ql_getinfo, /* getinfo */
542 nulldev, /* identify */
543 nulldev, /* probe */
544 ql_attach, /* attach */
545 ql_detach, /* detach */
546 nodev, /* reset */
547 &ql_cb_ops, /* char/block ops */
548 NULL, /* bus operations */
549 ql_power, /* power management */
550 ql_quiesce /* quiesce device */
551 };
552
553 /* ELS command code to text converter */
554 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
555 /* Mailbox command code to text converter */
556 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
557
558 char qlc_driver_version[] = QL_VERSION;
559
560 /*
561 * Loadable Driver Interface Structures.
562 * Declare and initialize the module configuration section...
563 */
564 static struct modldrv modldrv = {
565 &mod_driverops, /* type of module: driver */
566 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */
567 &ql_devops /* driver dev_ops */
568 };
569
570 static struct modlinkage modlinkage = {
571 MODREV_1,
572 { &modldrv, NULL }
573 };
574
575 /* ************************************************************************ */
576 /* Loadable Module Routines. */
577 /* ************************************************************************ */
578
579 /*
580 * _init
581 * Initializes a loadable module. It is called before any other
582 * routine in a loadable module.
583 *
584 * Returns:
585 * 0 = success
586 *
587 * Context:
588 * Kernel context.
589 */
590 int
591 _init(void)
592 {
593 uint16_t w16;
594 int rval = 0;
595
596 /* Get OS major release level. */
597 for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
598 if (utsname.release[w16] == '.') {
599 w16++;
600 break;
601 }
602 }
603 if (w16 < sizeof (utsname.release)) {
604 (void) ql_bstr_to_dec(&utsname.release[w16],
605 &ql_os_release_level, 0);
606 } else {
607 ql_os_release_level = 0;
608 }
609 if (ql_os_release_level < 6) {
610 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
611 QL_NAME, ql_os_release_level);
612 rval = EINVAL;
613 }
614 if (ql_os_release_level == 6) {
615 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
616 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
617 }
618
619 if (rval == 0) {
620 rval = ddi_soft_state_init(&ql_state,
621 sizeof (ql_adapter_state_t), 0);
622 }
623 if (rval == 0) {
624 /* allow the FC Transport to tweak the dev_ops */
625 fc_fca_init(&ql_devops);
626
627 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
628 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
629 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
630 rval = mod_install(&modlinkage);
631 if (rval != 0) {
632 mutex_destroy(&ql_global_hw_mutex);
633 mutex_destroy(&ql_global_mutex);
634 mutex_destroy(&ql_global_el_mutex);
635 ddi_soft_state_fini(&ql_state);
636 } else {
637 /*EMPTY*/
638 ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
639 ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
640 ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
641 ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
642 ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
643 ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
644 ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
645 ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
646 ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
647 ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
648 ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
649 ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
650 ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
651 ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
652 ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
653 ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
654 QL_FCSM_CMD_SGLLEN;
655 ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
656 ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
657 QL_FCSM_RSP_SGLLEN;
658 ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
659 ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
660 QL_FCIP_CMD_SGLLEN;
661 ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
662 ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
663 QL_FCIP_RSP_SGLLEN;
664 ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
665 ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
666 QL_FCP_CMD_SGLLEN;
667 ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
668 ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
669 QL_FCP_RSP_SGLLEN;
670 }
671 }
672
673 if (rval != 0) {
674 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
675 QL_NAME);
676 }
677
678 return (rval);
679 }
680
681 /*
682 * _fini
683 * Prepares a module for unloading. It is called when the system
684 * wants to unload a module. If the module determines that it can
685 * be unloaded, then _fini() returns the value returned by
686 * mod_remove(). Upon successful return from _fini() no other
687 * routine in the module will be called before _init() is called.
688 *
689 * Returns:
690 * 0 = success
691 *
692 * Context:
693 * Kernel context.
694 */
695 int
696 _fini(void)
697 {
698 int rval;
699
700 rval = mod_remove(&modlinkage);
701 if (rval == 0) {
702 mutex_destroy(&ql_global_hw_mutex);
703 mutex_destroy(&ql_global_mutex);
704 mutex_destroy(&ql_global_el_mutex);
705 ddi_soft_state_fini(&ql_state);
706 }
707
708 return (rval);
709 }
710
711 /*
712 * _info
713 * Returns information about loadable module.
714 *
715 * Input:
716 * modinfo = pointer to module information structure.
717 *
718 * Returns:
719 * Value returned by mod_info().
720 *
721 * Context:
722 * Kernel context.
723 */
724 int
725 _info(struct modinfo *modinfop)
726 {
727 return (mod_info(&modlinkage, modinfop));
728 }
729
730 /* ************************************************************************ */
731 /* dev_ops functions */
732 /* ************************************************************************ */
733
734 /*
735 * ql_getinfo
736 * Returns the pointer associated with arg when cmd is
737 * set to DDI_INFO_DEVT2DEVINFO, or it should return the
738 * instance number associated with arg when cmd is set
739 * to DDI_INFO_DEV2INSTANCE.
740 *
741 * Input:
742 * dip = Do not use.
743 * cmd = command argument.
744 * arg = command specific argument.
745 * resultp = pointer to where request information is stored.
746 *
747 * Returns:
748 * DDI_SUCCESS or DDI_FAILURE.
749 *
750 * Context:
751 * Kernel context.
752 */
753 /* ARGSUSED */
754 static int
755 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
756 {
757 ql_adapter_state_t *ha;
758 int minor;
759 int rval = DDI_FAILURE;
760
761 minor = (int)(getminor((dev_t)arg));
762 ha = ddi_get_soft_state(ql_state, minor);
763 if (ha == NULL) {
764 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
765 getminor((dev_t)arg));
766 *resultp = NULL;
767 return (rval);
768 }
769
770 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
771
772 switch (cmd) {
773 case DDI_INFO_DEVT2DEVINFO:
774 *resultp = ha->dip;
775 rval = DDI_SUCCESS;
776 break;
777 case DDI_INFO_DEVT2INSTANCE:
778 *resultp = (void *)(uintptr_t)(ha->instance);
779 rval = DDI_SUCCESS;
780 break;
781 default:
782 EL(ha, "failed, unsupported cmd=%d\n", cmd);
783 rval = DDI_FAILURE;
784 break;
785 }
786
787 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
788
789 return (rval);
790 }
791
792 /*
793 * ql_attach
794 * Configure and attach an instance of the driver
795 * for a port.
796 *
797 * Input:
798 * dip = pointer to device information structure.
799 * cmd = attach type.
800 *
801 * Returns:
802 * DDI_SUCCESS or DDI_FAILURE.
803 *
804 * Context:
805 * Kernel context.
806 */
807 static int
808 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
809 {
810 off_t regsize;
811 uint32_t size;
812 int rval, *ptr;
813 int instance;
814 uint_t progress = 0;
815 char *buf;
816 ushort_t caps_ptr, cap;
817 fc_fca_tran_t *tran;
818 ql_adapter_state_t *ha = NULL;
819
820 static char *pmcomps[] = {
821 NULL,
822 PM_LEVEL_D3_STR, /* Device OFF */
823 PM_LEVEL_D0_STR, /* Device ON */
824 };
825
826 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
827 ddi_get_instance(dip), cmd);
828
829 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
830
831 switch (cmd) {
832 case DDI_ATTACH:
833 /* first get the instance */
834 instance = ddi_get_instance(dip);
835
836 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
837 QL_NAME, instance, QL_VERSION);
838
839 /* Correct OS version? */
840 if (ql_os_release_level != 11) {
841 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
842 "11", QL_NAME, instance);
843 goto attach_failed;
844 }
845
846 /* Hardware is installed in a DMA-capable slot? */
847 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
848 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
849 instance);
850 goto attach_failed;
851 }
852
853 /* No support for high-level interrupts */
854 if (ddi_intr_hilevel(dip, 0) != 0) {
855 cmn_err(CE_WARN, "%s(%d): High level interrupt"
856 " not supported", QL_NAME, instance);
857 goto attach_failed;
858 }
859
860 /* Allocate our per-device-instance structure */
861 if (ddi_soft_state_zalloc(ql_state,
862 instance) != DDI_SUCCESS) {
863 cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
864 QL_NAME, instance);
865 goto attach_failed;
866 }
867 progress |= QL_SOFT_STATE_ALLOCED;
868
869 ha = ddi_get_soft_state(ql_state, instance);
870 if (ha == NULL) {
871 cmn_err(CE_WARN, "%s(%d): can't get soft state",
872 QL_NAME, instance);
873 goto attach_failed;
874 }
875 ha->dip = dip;
876 ha->instance = instance;
877 ha->hba.base_address = ha;
878 ha->pha = ha;
879
880 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
881 cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
882 QL_NAME, instance);
883 goto attach_failed;
884 }
885
886 /* Get extended logging and dump flags. */
887 ql_common_properties(ha);
888
889 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
890 "sbus") == 0) {
891 EL(ha, "%s SBUS card detected", QL_NAME);
892 ha->cfg_flags |= CFG_SBUS_CARD;
893 }
894
895 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
896 DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
897
898 ha->outstanding_cmds = kmem_zalloc(
899 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
900 KM_SLEEP);
901
902 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
903 QL_UB_LIMIT, KM_SLEEP);
904
905 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
906 KM_SLEEP);
907
908 (void) ddi_pathname(dip, buf);
909 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
910 if (ha->devpath == NULL) {
911 EL(ha, "devpath mem alloc failed\n");
912 } else {
913 (void) strcpy(ha->devpath, buf);
914 EL(ha, "devpath is: %s\n", ha->devpath);
915 }
916
917 if (CFG_IST(ha, CFG_SBUS_CARD)) {
918 /*
919 * For cards where PCI is mapped to sbus e.g. Ivory.
920 *
921 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
922 * : 0x100 - 0x3FF PCI IO space for 2200
923 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
924 * : 0x100 - 0x3FF PCI IO Space for fpga
925 */
926 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
927 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
928 DDI_SUCCESS) {
929 cmn_err(CE_WARN, "%s(%d): Unable to map device"
930 " registers", QL_NAME, instance);
931 goto attach_failed;
932 }
933 if (ddi_regs_map_setup(dip, 1,
934 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
935 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
936 DDI_SUCCESS) {
937 /* We should not fail attach here */
938 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
939 QL_NAME, instance);
940 ha->sbus_fpga_iobase = NULL;
941 }
942 progress |= QL_REGS_MAPPED;
943
944 /*
945 * We should map config space before adding interrupt
946 * So that the chip type (2200 or 2300) can be
947 * determined before the interrupt routine gets a
948 * chance to execute.
949 */
950 if (ddi_regs_map_setup(dip, 0,
951 (caddr_t *)&ha->sbus_config_base, 0, 0x100,
952 &ql_dev_acc_attr, &ha->sbus_config_handle) !=
953 DDI_SUCCESS) {
954 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
955 "config registers", QL_NAME, instance);
956 goto attach_failed;
957 }
958 progress |= QL_CONFIG_SPACE_SETUP;
959 } else {
960 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
961 rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
962 DDI_PROP_DONTPASS, "reg", &ptr, &size);
963 if (rval != DDI_PROP_SUCCESS) {
964 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
965 "address registers", QL_NAME, instance);
966 goto attach_failed;
967 } else {
968 ha->pci_bus_addr = ptr[0];
969 ha->function_number = (uint8_t)
970 (ha->pci_bus_addr >> 8 & 7);
971 ddi_prop_free(ptr);
972 }
973
974 /*
975 * We should map config space before adding interrupt
976 * So that the chip type (2200 or 2300) can be
977 * determined before the interrupt routine gets a
978 * chance to execute.
979 */
980 if (pci_config_setup(ha->dip, &ha->pci_handle) !=
981 DDI_SUCCESS) {
982 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
983 "config space", QL_NAME, instance);
984 goto attach_failed;
985 }
986 progress |= QL_CONFIG_SPACE_SETUP;
987
988 /*
989 * Setup the ISP2200 registers address mapping to be
990 * accessed by this particular driver.
991 * 0x0 Configuration Space
992 * 0x1 I/O Space
993 * 0x2 32-bit Memory Space address
994 * 0x3 64-bit Memory Space address
995 */
996 size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
997 2 : 1;
998 if (ddi_dev_regsize(dip, size, ®size) !=
999 DDI_SUCCESS ||
1000 ddi_regs_map_setup(dip, size, &ha->iobase,
1001 0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1002 DDI_SUCCESS) {
1003 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1004 "failed", QL_NAME, instance);
1005 goto attach_failed;
1006 }
1007 progress |= QL_REGS_MAPPED;
1008
1009 /*
1010 * We need I/O space mappings for 23xx HBAs for
1011 * loading flash (FCode). The chip has a bug due to
1012 * which loading flash fails through mem space
1013 * mappings in PCI-X mode.
1014 */
1015 if (size == 1) {
1016 ha->iomap_iobase = ha->iobase;
1017 ha->iomap_dev_handle = ha->dev_handle;
1018 } else {
1019 if (ddi_dev_regsize(dip, 1, ®size) !=
1020 DDI_SUCCESS ||
1021 ddi_regs_map_setup(dip, 1,
1022 &ha->iomap_iobase, 0, regsize,
1023 &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1024 DDI_SUCCESS) {
1025 cmn_err(CE_WARN, "%s(%d): regs_map_"
1026 "setup(I/O) failed", QL_NAME,
1027 instance);
1028 goto attach_failed;
1029 }
1030 progress |= QL_IOMAP_IOBASE_MAPPED;
1031 }
1032 }
1033
1034 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1035 PCI_CONF_SUBSYSID);
1036 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1037 PCI_CONF_SUBVENID);
1038 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1039 PCI_CONF_VENID);
1040 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1041 PCI_CONF_DEVID);
1042 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1043 PCI_CONF_REVID);
1044
1045 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1046 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1047 ha->subven_id, ha->subsys_id);
1048
1049 switch (ha->device_id) {
1050 case 0x2300:
1051 case 0x2312:
1052 case 0x2322:
1053 case 0x6312:
1054 case 0x6322:
1055 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1056 ha->flags |= FUNCTION_1;
1057 }
1058 if ((ha->device_id == 0x6322) ||
1059 (ha->device_id == 0x2322)) {
1060 ha->cfg_flags |= CFG_CTRL_6322;
1061 ha->fw_class = 0x6322;
1062 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1063 } else {
1064 ha->cfg_flags |= CFG_CTRL_2300;
1065 ha->fw_class = 0x2300;
1066 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1067 }
1068 ha->reg_off = ®_off_2300;
1069 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1070 goto attach_failed;
1071 }
1072 ha->fcp_cmd = ql_command_iocb;
1073 ha->ip_cmd = ql_ip_iocb;
1074 ha->ms_cmd = ql_ms_iocb;
1075 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1076 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1077 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1078 } else {
1079 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1080 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1081 }
1082 break;
1083
1084 case 0x2200:
1085 ha->cfg_flags |= CFG_CTRL_2200;
1086 ha->reg_off = ®_off_2200;
1087 ha->fw_class = 0x2200;
1088 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1089 goto attach_failed;
1090 }
1091 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1092 ha->fcp_cmd = ql_command_iocb;
1093 ha->ip_cmd = ql_ip_iocb;
1094 ha->ms_cmd = ql_ms_iocb;
1095 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1096 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1097 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1098 } else {
1099 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1100 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1101 }
1102 break;
1103
1104 case 0x2422:
1105 case 0x2432:
1106 case 0x5422:
1107 case 0x5432:
1108 case 0x8432:
1109 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1110 ha->flags |= FUNCTION_1;
1111 }
1112 ha->cfg_flags |= CFG_CTRL_2422;
1113 if (ha->device_id == 0x8432) {
1114 ha->cfg_flags |= CFG_CTRL_MENLO;
1115 } else {
1116 ha->flags |= VP_ENABLED;
1117 }
1118
1119 ha->reg_off = ®_off_2400_2500;
1120 ha->fw_class = 0x2400;
1121 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1122 goto attach_failed;
1123 }
1124 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1125 ha->fcp_cmd = ql_command_24xx_iocb;
1126 ha->ip_cmd = ql_ip_24xx_iocb;
1127 ha->ms_cmd = ql_ms_24xx_iocb;
1128 ha->els_cmd = ql_els_24xx_iocb;
1129 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1130 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1131 break;
1132
1133 case 0x2522:
1134 case 0x2532:
1135 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1136 ha->flags |= FUNCTION_1;
1137 }
1138 ha->cfg_flags |= CFG_CTRL_25XX;
1139 ha->flags |= VP_ENABLED;
1140 ha->fw_class = 0x2500;
1141 ha->reg_off = ®_off_2400_2500;
1142 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1143 goto attach_failed;
1144 }
1145 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1146 ha->fcp_cmd = ql_command_24xx_iocb;
1147 ha->ip_cmd = ql_ip_24xx_iocb;
1148 ha->ms_cmd = ql_ms_24xx_iocb;
1149 ha->els_cmd = ql_els_24xx_iocb;
1150 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1151 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1152 break;
1153
1154 case 0x8001:
1155 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1156 ha->flags |= FUNCTION_1;
1157 }
1158 ha->cfg_flags |= CFG_CTRL_81XX;
1159 ha->flags |= VP_ENABLED;
1160 ha->fw_class = 0x8100;
1161 ha->reg_off = ®_off_2400_2500;
1162 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1163 goto attach_failed;
1164 }
1165 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1166 ha->fcp_cmd = ql_command_24xx_iocb;
1167 ha->ip_cmd = ql_ip_24xx_iocb;
1168 ha->ms_cmd = ql_ms_24xx_iocb;
1169 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1170 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1171 break;
1172
1173 case 0x8021:
1174 if (ha->function_number & BIT_0) {
1175 ha->flags |= FUNCTION_1;
1176 }
1177 ha->cfg_flags |= CFG_CTRL_8021;
1178 ha->reg_off = ®_off_8021;
1179 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1180 ha->fcp_cmd = ql_command_24xx_iocb;
1181 ha->ms_cmd = ql_ms_24xx_iocb;
1182 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1183 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1184
1185 ha->nx_pcibase = ha->iobase;
1186 ha->iobase += 0xBC000 + (ha->function_number << 11);
1187 ha->iomap_iobase += 0xBC000 +
1188 (ha->function_number << 11);
1189
1190 /* map doorbell */
1191 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1192 ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1193 0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1194 DDI_SUCCESS) {
1195 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1196 "(doorbell) failed", QL_NAME, instance);
1197 goto attach_failed;
1198 }
1199 progress |= QL_DB_IOBASE_MAPPED;
1200
1201 ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1202 (ha->function_number << 12));
1203 ha->db_read = ha->nx_pcibase + (512 * 1024) +
1204 (ha->function_number * 8);
1205
1206 ql_8021_update_crb_int_ptr(ha);
1207 ql_8021_set_drv_active(ha);
1208 break;
1209
1210 default:
1211 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1212 QL_NAME, instance, ha->device_id);
1213 goto attach_failed;
1214 }
1215
1216 /* Setup hba buffer. */
1217
1218 size = CFG_IST(ha, CFG_CTRL_24258081) ?
1219 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1220 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1221 RCVBUF_QUEUE_SIZE);
1222
1223 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1224 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1225 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1226 "alloc failed", QL_NAME, instance);
1227 goto attach_failed;
1228 }
1229 progress |= QL_HBA_BUFFER_SETUP;
1230
1231 /* Setup buffer pointers. */
1232 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1233 REQUEST_Q_BUFFER_OFFSET;
1234 ha->request_ring_bp = (struct cmd_entry *)
1235 ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1236
1237 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1238 RESPONSE_Q_BUFFER_OFFSET;
1239 ha->response_ring_bp = (struct sts_entry *)
1240 ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1241
1242 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1243 RCVBUF_Q_BUFFER_OFFSET;
1244 ha->rcvbuf_ring_bp = (struct rcvbuf *)
1245 ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1246
1247 /* Allocate resource for QLogic IOCTL */
1248 (void) ql_alloc_xioctl_resource(ha);
1249
1250 /* Setup interrupts */
1251 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1252 cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1253 "rval=%xh", QL_NAME, instance, rval);
1254 goto attach_failed;
1255 }
1256
1257 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1258
1259 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1260 cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1261 QL_NAME, instance);
1262 goto attach_failed;
1263 }
1264
1265 /*
1266 * Allocate an N Port information structure
1267 * for use when in P2P topology.
1268 */
1269 ha->n_port = (ql_n_port_info_t *)
1270 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1271 if (ha->n_port == NULL) {
1272 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1273 QL_NAME, instance);
1274 goto attach_failed;
1275 }
1276
1277 progress |= QL_N_PORT_INFO_CREATED;
1278
1279 /*
1280 * Determine support for Power Management
1281 */
1282 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1283
1284 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1285 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1286 if (cap == PCI_CAP_ID_PM) {
1287 ha->pm_capable = 1;
1288 break;
1289 }
1290 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1291 PCI_CAP_NEXT_PTR);
1292 }
1293
1294 if (ha->pm_capable) {
1295 /*
1296 * Enable PM for 2200 based HBAs only.
1297 */
1298 if (ha->device_id != 0x2200) {
1299 ha->pm_capable = 0;
1300 }
1301 }
1302
1303 if (ha->pm_capable) {
1304 ha->pm_capable = ql_enable_pm;
1305 }
1306
1307 if (ha->pm_capable) {
1308 /*
1309 * Initialize power management bookkeeping;
1310 * components are created idle.
1311 */
1312 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1313 pmcomps[0] = buf;
1314
1315 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1316 if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1317 dip, "pm-components", pmcomps,
1318 sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1319 DDI_PROP_SUCCESS) {
1320 cmn_err(CE_WARN, "%s(%d): failed to create"
1321 " pm-components property", QL_NAME,
1322 instance);
1323
1324 /* Initialize adapter. */
1325 ha->power_level = PM_LEVEL_D0;
1326 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1327 cmn_err(CE_WARN, "%s(%d): failed to"
1328 " initialize adapter", QL_NAME,
1329 instance);
1330 goto attach_failed;
1331 }
1332 } else {
1333 ha->power_level = PM_LEVEL_D3;
1334 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1335 PM_LEVEL_D0) != DDI_SUCCESS) {
1336 cmn_err(CE_WARN, "%s(%d): failed to"
1337 " raise power or initialize"
1338 " adapter", QL_NAME, instance);
1339 }
1340 }
1341 } else {
1342 /* Initialize adapter. */
1343 ha->power_level = PM_LEVEL_D0;
1344 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1345 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1346 " adapter", QL_NAME, instance);
1347 }
1348 }
1349
1350 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1351 ha->fw_subminor_version == 0) {
1352 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1353 QL_NAME, ha->instance);
1354 } else {
1355 int rval;
1356 char ver_fmt[256];
1357
1358 rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1359 "Firmware version %d.%d.%d", ha->fw_major_version,
1360 ha->fw_minor_version, ha->fw_subminor_version);
1361
1362 if (CFG_IST(ha, CFG_CTRL_81XX)) {
1363 rval = (int)snprintf(ver_fmt + rval,
1364 (size_t)sizeof (ver_fmt),
1365 ", MPI fw version %d.%d.%d",
1366 ha->mpi_fw_major_version,
1367 ha->mpi_fw_minor_version,
1368 ha->mpi_fw_subminor_version);
1369
1370 if (ha->subsys_id == 0x17B ||
1371 ha->subsys_id == 0x17D) {
1372 (void) snprintf(ver_fmt + rval,
1373 (size_t)sizeof (ver_fmt),
1374 ", PHY fw version %d.%d.%d",
1375 ha->phy_fw_major_version,
1376 ha->phy_fw_minor_version,
1377 ha->phy_fw_subminor_version);
1378 }
1379 }
1380 cmn_err(CE_NOTE, "!%s(%d): %s",
1381 QL_NAME, ha->instance, ver_fmt);
1382 }
1383
1384 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1385 "controller", KSTAT_TYPE_RAW,
1386 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1387 if (ha->k_stats == NULL) {
1388 cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1389 QL_NAME, instance);
1390 goto attach_failed;
1391 }
1392 progress |= QL_KSTAT_CREATED;
1393
1394 ha->adapter_stats->version = 1;
1395 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1396 ha->k_stats->ks_private = ha;
1397 ha->k_stats->ks_update = ql_kstat_update;
1398 ha->k_stats->ks_ndata = 1;
1399 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1400 kstat_install(ha->k_stats);
1401
1402 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1403 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1404 cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1405 QL_NAME, instance);
1406 goto attach_failed;
1407 }
1408 progress |= QL_MINOR_NODE_CREATED;
1409
1410 /* Allocate a transport structure for this instance */
1411 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1412 if (tran == NULL) {
1413 cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1414 QL_NAME, instance);
1415 goto attach_failed;
1416 }
1417
1418 progress |= QL_FCA_TRAN_ALLOCED;
1419
1420 /* fill in the structure */
1421 tran->fca_numports = 1;
1422 tran->fca_version = FCTL_FCA_MODREV_5;
1423 if (CFG_IST(ha, CFG_CTRL_2422)) {
1424 tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1425 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1426 tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1427 }
1428 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1429 tran->fca_perm_pwwn.raw_wwn, 8);
1430
1431 EL(ha, "FCA version %d\n", tran->fca_version);
1432
1433 /* Specify the amount of space needed in each packet */
1434 tran->fca_pkt_size = sizeof (ql_srb_t);
1435
1436 /* command limits are usually dictated by hardware */
1437 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1438
1439 /* dmaattr are static, set elsewhere. */
1440 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1441 tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1442 tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1443 tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1444 tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1445 tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1446 tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1447 tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1448 tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1449 } else {
1450 tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1451 tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1452 tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1453 tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1454 tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1455 tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1456 tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1457 tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1458 }
1459
1460 tran->fca_acc_attr = &ql_dev_acc_attr;
1461 tran->fca_iblock = &(ha->iblock_cookie);
1462
1463 /* the remaining values are simply function vectors */
1464 tran->fca_bind_port = ql_bind_port;
1465 tran->fca_unbind_port = ql_unbind_port;
1466 tran->fca_init_pkt = ql_init_pkt;
1467 tran->fca_un_init_pkt = ql_un_init_pkt;
1468 tran->fca_els_send = ql_els_send;
1469 tran->fca_get_cap = ql_get_cap;
1470 tran->fca_set_cap = ql_set_cap;
1471 tran->fca_getmap = ql_getmap;
1472 tran->fca_transport = ql_transport;
1473 tran->fca_ub_alloc = ql_ub_alloc;
1474 tran->fca_ub_free = ql_ub_free;
1475 tran->fca_ub_release = ql_ub_release;
1476 tran->fca_abort = ql_abort;
1477 tran->fca_reset = ql_reset;
1478 tran->fca_port_manage = ql_port_manage;
1479 tran->fca_get_device = ql_get_device;
1480
1481 /* give it to the FC transport */
1482 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1483 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1484 instance);
1485 goto attach_failed;
1486 }
1487 progress |= QL_FCA_ATTACH_DONE;
1488
1489 /* Stash the structure so it can be freed at detach */
1490 ha->tran = tran;
1491
1492 /* Acquire global state lock. */
1493 GLOBAL_STATE_LOCK();
1494
1495 /* Add adapter structure to link list. */
1496 ql_add_link_b(&ql_hba, &ha->hba);
1497
1498 /* Start one second driver timer. */
1499 if (ql_timer_timeout_id == NULL) {
1500 ql_timer_ticks = drv_usectohz(1000000);
1501 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1502 ql_timer_ticks);
1503 }
1504
1505 /* Release global state lock. */
1506 GLOBAL_STATE_UNLOCK();
1507
1508 /* Determine and populate HBA fru info */
1509 ql_setup_fruinfo(ha);
1510
1511 /* Setup task_daemon thread. */
1512 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1513 0, &p0, TS_RUN, minclsyspri);
1514
1515 progress |= QL_TASK_DAEMON_STARTED;
1516
1517 ddi_report_dev(dip);
1518
1519 /* Disable link reset in panic path */
1520 ha->lip_on_panic = 1;
1521
1522 rval = DDI_SUCCESS;
1523 break;
1524
1525 attach_failed:
1526 if (progress & QL_FCA_ATTACH_DONE) {
1527 (void) fc_fca_detach(dip);
1528 progress &= ~QL_FCA_ATTACH_DONE;
1529 }
1530
1531 if (progress & QL_FCA_TRAN_ALLOCED) {
1532 kmem_free(tran, sizeof (fc_fca_tran_t));
1533 progress &= ~QL_FCA_TRAN_ALLOCED;
1534 }
1535
1536 if (progress & QL_MINOR_NODE_CREATED) {
1537 ddi_remove_minor_node(dip, "devctl");
1538 progress &= ~QL_MINOR_NODE_CREATED;
1539 }
1540
1541 if (progress & QL_KSTAT_CREATED) {
1542 kstat_delete(ha->k_stats);
1543 progress &= ~QL_KSTAT_CREATED;
1544 }
1545
1546 if (progress & QL_N_PORT_INFO_CREATED) {
1547 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1548 progress &= ~QL_N_PORT_INFO_CREATED;
1549 }
1550
1551 if (progress & QL_TASK_DAEMON_STARTED) {
1552 TASK_DAEMON_LOCK(ha);
1553
1554 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1555
1556 cv_signal(&ha->cv_task_daemon);
1557
1558 /* Release task daemon lock. */
1559 TASK_DAEMON_UNLOCK(ha);
1560
1561 /* Wait for for task daemon to stop running. */
1562 while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1563 ql_delay(ha, 10000);
1564 }
1565 progress &= ~QL_TASK_DAEMON_STARTED;
1566 }
1567
1568 if (progress & QL_DB_IOBASE_MAPPED) {
1569 ql_8021_clr_drv_active(ha);
1570 ddi_regs_map_free(&ha->db_dev_handle);
1571 progress &= ~QL_DB_IOBASE_MAPPED;
1572 }
1573 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1574 ddi_regs_map_free(&ha->iomap_dev_handle);
1575 progress &= ~QL_IOMAP_IOBASE_MAPPED;
1576 }
1577
1578 if (progress & QL_CONFIG_SPACE_SETUP) {
1579 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1580 ddi_regs_map_free(&ha->sbus_config_handle);
1581 } else {
1582 pci_config_teardown(&ha->pci_handle);
1583 }
1584 progress &= ~QL_CONFIG_SPACE_SETUP;
1585 }
1586
1587 if (progress & QL_INTR_ADDED) {
1588 ql_disable_intr(ha);
1589 ql_release_intr(ha);
1590 progress &= ~QL_INTR_ADDED;
1591 }
1592
1593 if (progress & QL_MUTEX_CV_INITED) {
1594 ql_destroy_mutex(ha);
1595 progress &= ~QL_MUTEX_CV_INITED;
1596 }
1597
1598 if (progress & QL_HBA_BUFFER_SETUP) {
1599 ql_free_phys(ha, &ha->hba_buf);
1600 progress &= ~QL_HBA_BUFFER_SETUP;
1601 }
1602
1603 if (progress & QL_REGS_MAPPED) {
1604 ddi_regs_map_free(&ha->dev_handle);
1605 if (ha->sbus_fpga_iobase != NULL) {
1606 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1607 }
1608 progress &= ~QL_REGS_MAPPED;
1609 }
1610
1611 if (progress & QL_SOFT_STATE_ALLOCED) {
1612
1613 ql_fcache_rel(ha->fcache);
1614
1615 kmem_free(ha->adapter_stats,
1616 sizeof (*ha->adapter_stats));
1617
1618 kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1619 QL_UB_LIMIT);
1620
1621 kmem_free(ha->outstanding_cmds,
1622 sizeof (*ha->outstanding_cmds) *
1623 MAX_OUTSTANDING_COMMANDS);
1624
1625 if (ha->devpath != NULL) {
1626 kmem_free(ha->devpath,
1627 strlen(ha->devpath) + 1);
1628 }
1629
1630 kmem_free(ha->dev, sizeof (*ha->dev) *
1631 DEVICE_HEAD_LIST_SIZE);
1632
1633 if (ha->xioctl != NULL) {
1634 ql_free_xioctl_resource(ha);
1635 }
1636
1637 if (ha->fw_module != NULL) {
1638 (void) ddi_modclose(ha->fw_module);
1639 }
1640 (void) ql_el_trace_desc_dtor(ha);
1641 (void) ql_nvram_cache_desc_dtor(ha);
1642
1643 ddi_soft_state_free(ql_state, instance);
1644 progress &= ~QL_SOFT_STATE_ALLOCED;
1645 }
1646
1647 ddi_prop_remove_all(dip);
1648 rval = DDI_FAILURE;
1649 break;
1650
1651 case DDI_RESUME:
1652 rval = DDI_FAILURE;
1653
1654 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1655 if (ha == NULL) {
1656 cmn_err(CE_WARN, "%s(%d): can't get soft state",
1657 QL_NAME, instance);
1658 break;
1659 }
1660
1661 ha->power_level = PM_LEVEL_D3;
1662 if (ha->pm_capable) {
1663 /*
1664 * Get ql_power to do power on initialization
1665 */
1666 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1667 PM_LEVEL_D0) != DDI_SUCCESS) {
1668 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1669 " power", QL_NAME, instance);
1670 }
1671 }
1672
1673 /*
1674 * There is a bug in DR that prevents PM framework
1675 * from calling ql_power.
1676 */
1677 if (ha->power_level == PM_LEVEL_D3) {
1678 ha->power_level = PM_LEVEL_D0;
1679
1680 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1681 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1682 " adapter", QL_NAME, instance);
1683 }
1684
1685 /* Wake up task_daemon. */
1686 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1687 0);
1688 }
1689
1690 /* Acquire global state lock. */
1691 GLOBAL_STATE_LOCK();
1692
1693 /* Restart driver timer. */
1694 if (ql_timer_timeout_id == NULL) {
1695 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1696 ql_timer_ticks);
1697 }
1698
1699 /* Release global state lock. */
1700 GLOBAL_STATE_UNLOCK();
1701
1702 /* Wake up command start routine. */
1703 ADAPTER_STATE_LOCK(ha);
1704 ha->flags &= ~ADAPTER_SUSPENDED;
1705 ADAPTER_STATE_UNLOCK(ha);
1706
1707 /*
1708 * Transport doesn't make FC discovery in polled
1709 * mode; So we need the daemon thread's services
1710 * right here.
1711 */
1712 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1713
1714 rval = DDI_SUCCESS;
1715
1716 /* Restart IP if it was running. */
1717 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1718 (void) ql_initialize_ip(ha);
1719 ql_isp_rcvbuf(ha);
1720 }
1721 break;
1722
1723 default:
1724 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1725 " %x", QL_NAME, ddi_get_instance(dip), cmd);
1726 rval = DDI_FAILURE;
1727 break;
1728 }
1729
1730 kmem_free(buf, MAXPATHLEN);
1731
1732 if (rval != DDI_SUCCESS) {
1733 /*EMPTY*/
1734 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1735 ddi_get_instance(dip), rval);
1736 } else {
1737 /*EMPTY*/
1738 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1739 }
1740
1741 return (rval);
1742 }
1743
1744 /*
1745 * ql_detach
1746 * Used to remove all the states associated with a given
1747 * instances of a device node prior to the removal of that
1748 * instance from the system.
1749 *
1750 * Input:
1751 * dip = pointer to device information structure.
1752 * cmd = type of detach.
1753 *
1754 * Returns:
1755 * DDI_SUCCESS or DDI_FAILURE.
1756 *
1757 * Context:
1758 * Kernel context.
1759 */
1760 static int
1761 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1762 {
1763 ql_adapter_state_t *ha, *vha;
1764 ql_tgt_t *tq;
1765 int delay_cnt;
1766 uint16_t index;
1767 ql_link_t *link;
1768 char *buf;
1769 timeout_id_t timer_id = NULL;
1770 int suspend, rval = DDI_SUCCESS;
1771
1772 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1773 if (ha == NULL) {
1774 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1775 ddi_get_instance(dip));
1776 return (DDI_FAILURE);
1777 }
1778
1779 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1780
1781 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1782
1783 switch (cmd) {
1784 case DDI_DETACH:
1785 ADAPTER_STATE_LOCK(ha);
1786 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1787 ADAPTER_STATE_UNLOCK(ha);
1788
1789 TASK_DAEMON_LOCK(ha);
1790
1791 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1792 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1793 cv_signal(&ha->cv_task_daemon);
1794
1795 TASK_DAEMON_UNLOCK(ha);
1796
1797 (void) ql_wait_for_td_stop(ha);
1798
1799 TASK_DAEMON_LOCK(ha);
1800 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1801 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1802 EL(ha, "failed, could not stop task daemon\n");
1803 }
1804 }
1805 TASK_DAEMON_UNLOCK(ha);
1806
1807 GLOBAL_STATE_LOCK();
1808
1809 /* Disable driver timer if no adapters. */
1810 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1811 ql_hba.last == &ha->hba) {
1812 timer_id = ql_timer_timeout_id;
1813 ql_timer_timeout_id = NULL;
1814 }
1815 ql_remove_link(&ql_hba, &ha->hba);
1816
1817 GLOBAL_STATE_UNLOCK();
1818
1819 if (timer_id) {
1820 (void) untimeout(timer_id);
1821 }
1822
1823 if (ha->pm_capable) {
1824 if (pm_lower_power(dip, QL_POWER_COMPONENT,
1825 PM_LEVEL_D3) != DDI_SUCCESS) {
1826 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1827 " power", QL_NAME, ha->instance);
1828 }
1829 }
1830
1831 /*
1832 * If pm_lower_power shutdown the adapter, there
1833 * isn't much else to do
1834 */
1835 if (ha->power_level != PM_LEVEL_D3) {
1836 ql_halt(ha, PM_LEVEL_D3);
1837 }
1838
1839 /* Remove virtual ports. */
1840 while ((vha = ha->vp_next) != NULL) {
1841 ql_vport_destroy(vha);
1842 }
1843
1844 /* Free target queues. */
1845 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1846 link = ha->dev[index].first;
1847 while (link != NULL) {
1848 tq = link->base_address;
1849 link = link->next;
1850 ql_dev_free(ha, tq);
1851 }
1852 }
1853
1854 /*
1855 * Free unsolicited buffers.
1856 * If we are here then there are no ULPs still
1857 * alive that wish to talk to ql so free up
1858 * any SRB_IP_UB_UNUSED buffers that are
1859 * lingering around
1860 */
1861 QL_UB_LOCK(ha);
1862 for (index = 0; index < QL_UB_LIMIT; index++) {
1863 fc_unsol_buf_t *ubp = ha->ub_array[index];
1864
1865 if (ubp != NULL) {
1866 ql_srb_t *sp = ubp->ub_fca_private;
1867
1868 sp->flags |= SRB_UB_FREE_REQUESTED;
1869
1870 while (!(sp->flags & SRB_UB_IN_FCA) ||
1871 (sp->flags & (SRB_UB_CALLBACK |
1872 SRB_UB_ACQUIRED))) {
1873 QL_UB_UNLOCK(ha);
1874 delay(drv_usectohz(100000));
1875 QL_UB_LOCK(ha);
1876 }
1877 ha->ub_array[index] = NULL;
1878
1879 QL_UB_UNLOCK(ha);
1880 ql_free_unsolicited_buffer(ha, ubp);
1881 QL_UB_LOCK(ha);
1882 }
1883 }
1884 QL_UB_UNLOCK(ha);
1885
1886 /* Free any saved RISC code. */
1887 if (ha->risc_code != NULL) {
1888 kmem_free(ha->risc_code, ha->risc_code_size);
1889 ha->risc_code = NULL;
1890 ha->risc_code_size = 0;
1891 }
1892
1893 if (ha->fw_module != NULL) {
1894 (void) ddi_modclose(ha->fw_module);
1895 ha->fw_module = NULL;
1896 }
1897
1898 /* Free resources. */
1899 ddi_prop_remove_all(dip);
1900 (void) fc_fca_detach(dip);
1901 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1902 ddi_remove_minor_node(dip, "devctl");
1903 if (ha->k_stats != NULL) {
1904 kstat_delete(ha->k_stats);
1905 }
1906
1907 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1908 ddi_regs_map_free(&ha->sbus_config_handle);
1909 } else {
1910 if (CFG_IST(ha, CFG_CTRL_8021)) {
1911 ql_8021_clr_drv_active(ha);
1912 ddi_regs_map_free(&ha->db_dev_handle);
1913 }
1914 if (ha->iomap_dev_handle != ha->dev_handle) {
1915 ddi_regs_map_free(&ha->iomap_dev_handle);
1916 }
1917 pci_config_teardown(&ha->pci_handle);
1918 }
1919
1920 ql_disable_intr(ha);
1921 ql_release_intr(ha);
1922
1923 ql_free_xioctl_resource(ha);
1924
1925 ql_destroy_mutex(ha);
1926
1927 ql_free_phys(ha, &ha->hba_buf);
1928 ql_free_phys(ha, &ha->fwexttracebuf);
1929 ql_free_phys(ha, &ha->fwfcetracebuf);
1930
1931 ddi_regs_map_free(&ha->dev_handle);
1932 if (ha->sbus_fpga_iobase != NULL) {
1933 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1934 }
1935
1936 ql_fcache_rel(ha->fcache);
1937 if (ha->vcache != NULL) {
1938 kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1939 }
1940
1941 if (ha->pi_attrs != NULL) {
1942 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1943 }
1944
1945 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1946
1947 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1948
1949 kmem_free(ha->outstanding_cmds,
1950 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1951
1952 if (ha->n_port != NULL) {
1953 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1954 }
1955
1956 if (ha->devpath != NULL) {
1957 kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1958 }
1959
1960 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1961
1962 EL(ha, "detached\n");
1963
1964 ddi_soft_state_free(ql_state, (int)ha->instance);
1965
1966 break;
1967
1968 case DDI_SUSPEND:
1969 ADAPTER_STATE_LOCK(ha);
1970
1971 delay_cnt = 0;
1972 ha->flags |= ADAPTER_SUSPENDED;
1973 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1974 ADAPTER_STATE_UNLOCK(ha);
1975 delay(drv_usectohz(1000000));
1976 ADAPTER_STATE_LOCK(ha);
1977 }
1978 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1979 ha->flags &= ~ADAPTER_SUSPENDED;
1980 ADAPTER_STATE_UNLOCK(ha);
1981 rval = DDI_FAILURE;
1982 cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1983 " busy %xh flags %xh", QL_NAME, ha->instance,
1984 ha->busy, ha->flags);
1985 break;
1986 }
1987
1988 ADAPTER_STATE_UNLOCK(ha);
1989
1990 if (ha->flags & IP_INITIALIZED) {
1991 (void) ql_shutdown_ip(ha);
1992 }
1993
1994 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1995 ADAPTER_STATE_LOCK(ha);
1996 ha->flags &= ~ADAPTER_SUSPENDED;
1997 ADAPTER_STATE_UNLOCK(ha);
1998 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1999 QL_NAME, ha->instance, suspend);
2000
2001 /* Restart IP if it was running. */
2002 if (ha->flags & IP_ENABLED &&
2003 !(ha->flags & IP_INITIALIZED)) {
2004 (void) ql_initialize_ip(ha);
2005 ql_isp_rcvbuf(ha);
2006 }
2007 rval = DDI_FAILURE;
2008 break;
2009 }
2010
2011 /* Acquire global state lock. */
2012 GLOBAL_STATE_LOCK();
2013
2014 /* Disable driver timer if last adapter. */
2015 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2016 ql_hba.last == &ha->hba) {
2017 timer_id = ql_timer_timeout_id;
2018 ql_timer_timeout_id = NULL;
2019 }
2020 GLOBAL_STATE_UNLOCK();
2021
2022 if (timer_id) {
2023 (void) untimeout(timer_id);
2024 }
2025
2026 EL(ha, "suspended\n");
2027
2028 break;
2029
2030 default:
2031 rval = DDI_FAILURE;
2032 break;
2033 }
2034
2035 kmem_free(buf, MAXPATHLEN);
2036
2037 if (rval != DDI_SUCCESS) {
2038 if (ha != NULL) {
2039 EL(ha, "failed, rval = %xh\n", rval);
2040 } else {
2041 /*EMPTY*/
2042 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2043 ddi_get_instance(dip), rval);
2044 }
2045 } else {
2046 /*EMPTY*/
2047 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2048 }
2049
2050 return (rval);
2051 }
2052
2053
2054 /*
2055 * ql_power
2056 * Power a device attached to the system.
2057 *
2058 * Input:
2059 * dip = pointer to device information structure.
2060 * component = device.
2061 * level = power level.
2062 *
2063 * Returns:
2064 * DDI_SUCCESS or DDI_FAILURE.
2065 *
2066 * Context:
2067 * Kernel context.
2068 */
2069 /* ARGSUSED */
2070 static int
2071 ql_power(dev_info_t *dip, int component, int level)
2072 {
2073 int rval = DDI_FAILURE;
2074 off_t csr;
2075 uint8_t saved_pm_val;
2076 ql_adapter_state_t *ha;
2077 char *buf;
2078 char *path;
2079
2080 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2081 if (ha == NULL || ha->pm_capable == 0) {
2082 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2083 ddi_get_instance(dip));
2084 return (rval);
2085 }
2086
2087 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2088
2089 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2090 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2091
2092 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2093 level != PM_LEVEL_D3)) {
2094 EL(ha, "invalid, component=%xh or level=%xh\n",
2095 component, level);
2096 return (rval);
2097 }
2098
2099 GLOBAL_HW_LOCK();
2100 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2101 GLOBAL_HW_UNLOCK();
2102
2103 (void) snprintf(buf, sizeof (buf),
2104 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2105 ddi_pathname(dip, path));
2106
2107 switch (level) {
2108 case PM_LEVEL_D0: /* power up to D0 state - fully on */
2109
2110 QL_PM_LOCK(ha);
2111 if (ha->power_level == PM_LEVEL_D0) {
2112 QL_PM_UNLOCK(ha);
2113 rval = DDI_SUCCESS;
2114 break;
2115 }
2116
2117 /*
2118 * Enable interrupts now
2119 */
2120 saved_pm_val = ha->power_level;
2121 ha->power_level = PM_LEVEL_D0;
2122 QL_PM_UNLOCK(ha);
2123
2124 GLOBAL_HW_LOCK();
2125
2126 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2127
2128 /*
2129 * Delay after reset, for chip to recover.
2130 * Otherwise causes system PANIC
2131 */
2132 drv_usecwait(200000);
2133
2134 GLOBAL_HW_UNLOCK();
2135
2136 if (ha->config_saved) {
2137 ha->config_saved = 0;
2138 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2139 QL_PM_LOCK(ha);
2140 ha->power_level = saved_pm_val;
2141 QL_PM_UNLOCK(ha);
2142 cmn_err(CE_WARN, "%s failed to restore "
2143 "config regs", buf);
2144 break;
2145 }
2146 }
2147
2148 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2149 cmn_err(CE_WARN, "%s adapter initialization failed",
2150 buf);
2151 }
2152
2153 /* Wake up task_daemon. */
2154 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2155 TASK_DAEMON_SLEEPING_FLG, 0);
2156
2157 /* Restart IP if it was running. */
2158 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2159 (void) ql_initialize_ip(ha);
2160 ql_isp_rcvbuf(ha);
2161 }
2162
2163 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2164 ha->instance, QL_NAME);
2165
2166 rval = DDI_SUCCESS;
2167 break;
2168
2169 case PM_LEVEL_D3: /* power down to D3 state - off */
2170
2171 QL_PM_LOCK(ha);
2172
2173 if (ha->busy || ((ha->task_daemon_flags &
2174 TASK_DAEMON_SLEEPING_FLG) == 0)) {
2175 QL_PM_UNLOCK(ha);
2176 break;
2177 }
2178
2179 if (ha->power_level == PM_LEVEL_D3) {
2180 rval = DDI_SUCCESS;
2181 QL_PM_UNLOCK(ha);
2182 break;
2183 }
2184 QL_PM_UNLOCK(ha);
2185
2186 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2187 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2188 " config regs", QL_NAME, ha->instance, buf);
2189 break;
2190 }
2191 ha->config_saved = 1;
2192
2193 /*
2194 * Don't enable interrupts. Running mailbox commands with
2195 * interrupts enabled could cause hangs since pm_run_scan()
2196 * runs out of a callout thread and on single cpu systems
2197 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2198 * would not get to run.
2199 */
2200 TASK_DAEMON_LOCK(ha);
2201 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2202 TASK_DAEMON_UNLOCK(ha);
2203
2204 ql_halt(ha, PM_LEVEL_D3);
2205
2206 /*
2207 * Setup ql_intr to ignore interrupts from here on.
2208 */
2209 QL_PM_LOCK(ha);
2210 ha->power_level = PM_LEVEL_D3;
2211 QL_PM_UNLOCK(ha);
2212
2213 /*
2214 * Wait for ISR to complete.
2215 */
2216 INTR_LOCK(ha);
2217 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2218 INTR_UNLOCK(ha);
2219
2220 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2221 ha->instance, QL_NAME);
2222
2223 rval = DDI_SUCCESS;
2224 break;
2225 }
2226
2227 kmem_free(buf, MAXPATHLEN);
2228 kmem_free(path, MAXPATHLEN);
2229
2230 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2231
2232 return (rval);
2233 }
2234
2235 /*
2236 * ql_quiesce
2237 * quiesce a device attached to the system.
2238 *
2239 * Input:
2240 * dip = pointer to device information structure.
2241 *
2242 * Returns:
2243 * DDI_SUCCESS
2244 *
2245 * Context:
2246 * Kernel context.
2247 */
2248 static int
2249 ql_quiesce(dev_info_t *dip)
2250 {
2251 ql_adapter_state_t *ha;
2252 uint32_t timer;
2253 uint32_t stat;
2254
2255 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2256 if (ha == NULL) {
2257 /* Oh well.... */
2258 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2259 ddi_get_instance(dip));
2260 return (DDI_SUCCESS);
2261 }
2262
2263 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2264
2265 if (CFG_IST(ha, CFG_CTRL_8021)) {
2266 (void) ql_stop_firmware(ha);
2267 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2268 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2269 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2270 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2271 for (timer = 0; timer < 30000; timer++) {
2272 stat = RD32_IO_REG(ha, risc2host);
2273 if (stat & BIT_15) {
2274 if ((stat & 0xff) < 0x12) {
2275 WRT32_IO_REG(ha, hccr,
2276 HC24_CLR_RISC_INT);
2277 break;
2278 }
2279 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2280 }
2281 drv_usecwait(100);
2282 }
2283 /* Reset the chip. */
2284 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2285 MWB_4096_BYTES);
2286 drv_usecwait(100);
2287
2288 } else {
2289 /* Disable ISP interrupts. */
2290 WRT16_IO_REG(ha, ictrl, 0);
2291 /* Select RISC module registers. */
2292 WRT16_IO_REG(ha, ctrl_status, 0);
2293 /* Reset ISP semaphore. */
2294 WRT16_IO_REG(ha, semaphore, 0);
2295 /* Reset RISC module. */
2296 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2297 /* Release RISC module. */
2298 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2299 }
2300
2301 ql_disable_intr(ha);
2302
2303 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2304
2305 return (DDI_SUCCESS);
2306 }
2307
2308 /* ************************************************************************ */
2309 /* Fibre Channel Adapter (FCA) Transport Functions. */
2310 /* ************************************************************************ */
2311
2312 /*
2313 * ql_bind_port
2314 * Handling port binding. The FC Transport attempts to bind an FCA port
2315 * when it is ready to start transactions on the port. The FC Transport
2316 * will call the fca_bind_port() function specified in the fca_transport
2317 * structure it receives. The FCA must fill in the port_info structure
2318 * passed in the call and also stash the information for future calls.
2319 *
2320 * Input:
2321 * dip = pointer to FCA information structure.
2322 * port_info = pointer to port information structure.
2323 * bind_info = pointer to bind information structure.
2324 *
2325 * Returns:
2326 * NULL = failure
2327 *
2328 * Context:
2329 * Kernel context.
2330 */
2331 static opaque_t
2332 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2333 fc_fca_bind_info_t *bind_info)
2334 {
2335 ql_adapter_state_t *ha, *vha;
2336 opaque_t fca_handle = NULL;
2337 port_id_t d_id;
2338 int port_npiv = bind_info->port_npiv;
2339 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn;
2340 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn;
2341
2342 /* get state info based on the dip */
2343 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2344 if (ha == NULL) {
2345 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2346 ddi_get_instance(dip));
2347 return (NULL);
2348 }
2349 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2350
2351 /* Verify port number is supported. */
2352 if (port_npiv != 0) {
2353 if (!(ha->flags & VP_ENABLED)) {
2354 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2355 ha->instance);
2356 port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2357 return (NULL);
2358 }
2359 if (!(ha->flags & POINT_TO_POINT)) {
2360 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2361 ha->instance);
2362 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2363 return (NULL);
2364 }
2365 if (!(ha->flags & FDISC_ENABLED)) {
2366 QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2367 "FDISC\n", ha->instance);
2368 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2369 return (NULL);
2370 }
2371 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2372 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2373 QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2374 "FC_OUTOFBOUNDS\n", ha->instance);
2375 port_info->pi_error = FC_OUTOFBOUNDS;
2376 return (NULL);
2377 }
2378 } else if (bind_info->port_num != 0) {
2379 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2380 "supported\n", ha->instance, bind_info->port_num);
2381 port_info->pi_error = FC_OUTOFBOUNDS;
2382 return (NULL);
2383 }
2384
2385 /* Locate port context. */
2386 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2387 if (vha->vp_index == bind_info->port_num) {
2388 break;
2389 }
2390 }
2391
2392 /* If virtual port does not exist. */
2393 if (vha == NULL) {
2394 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2395 }
2396
2397 /* make sure this port isn't already bound */
2398 if (vha->flags & FCA_BOUND) {
2399 port_info->pi_error = FC_ALREADY;
2400 } else {
2401 if (vha->vp_index != 0) {
2402 bcopy(port_nwwn,
2403 vha->loginparams.node_ww_name.raw_wwn, 8);
2404 bcopy(port_pwwn,
2405 vha->loginparams.nport_ww_name.raw_wwn, 8);
2406 }
2407 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2408 if (ql_vport_enable(vha) != QL_SUCCESS) {
2409 QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2410 "virtual port=%d\n", ha->instance,
2411 vha->vp_index);
2412 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2413 return (NULL);
2414 }
2415 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2416 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2417 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2418 QL_NAME, ha->instance, vha->vp_index,
2419 port_pwwn[0], port_pwwn[1], port_pwwn[2],
2420 port_pwwn[3], port_pwwn[4], port_pwwn[5],
2421 port_pwwn[6], port_pwwn[7],
2422 port_nwwn[0], port_nwwn[1], port_nwwn[2],
2423 port_nwwn[3], port_nwwn[4], port_nwwn[5],
2424 port_nwwn[6], port_nwwn[7]);
2425 }
2426
2427 /* stash the bind_info supplied by the FC Transport */
2428 vha->bind_info.port_handle = bind_info->port_handle;
2429 vha->bind_info.port_statec_cb =
2430 bind_info->port_statec_cb;
2431 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2432
2433 /* Set port's source ID. */
2434 port_info->pi_s_id.port_id = vha->d_id.b24;
2435
2436 /* copy out the default login parameters */
2437 bcopy((void *)&vha->loginparams,
2438 (void *)&port_info->pi_login_params,
2439 sizeof (la_els_logi_t));
2440
2441 /* Set port's hard address if enabled. */
2442 port_info->pi_hard_addr.hard_addr = 0;
2443 if (bind_info->port_num == 0) {
2444 d_id.b24 = ha->d_id.b24;
2445 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2446 if (ha->init_ctrl_blk.cb24.
2447 firmware_options_1[0] & BIT_0) {
2448 d_id.b.al_pa = ql_index_to_alpa[ha->
2449 init_ctrl_blk.cb24.
2450 hard_address[0]];
2451 port_info->pi_hard_addr.hard_addr =
2452 d_id.b24;
2453 }
2454 } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2455 BIT_0) {
2456 d_id.b.al_pa = ql_index_to_alpa[ha->
2457 init_ctrl_blk.cb.hard_address[0]];
2458 port_info->pi_hard_addr.hard_addr = d_id.b24;
2459 }
2460
2461 /* Set the node id data */
2462 if (ql_get_rnid_params(ha,
2463 sizeof (port_info->pi_rnid_params.params),
2464 (caddr_t)&port_info->pi_rnid_params.params) ==
2465 QL_SUCCESS) {
2466 port_info->pi_rnid_params.status = FC_SUCCESS;
2467 } else {
2468 port_info->pi_rnid_params.status = FC_FAILURE;
2469 }
2470
2471 /* Populate T11 FC-HBA details */
2472 ql_populate_hba_fru_details(ha, port_info);
2473 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2474 KM_SLEEP);
2475 if (ha->pi_attrs != NULL) {
2476 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2477 sizeof (fca_port_attrs_t));
2478 }
2479 } else {
2480 port_info->pi_rnid_params.status = FC_FAILURE;
2481 if (ha->pi_attrs != NULL) {
2482 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2483 sizeof (fca_port_attrs_t));
2484 }
2485 }
2486
2487 /* Generate handle for this FCA. */
2488 fca_handle = (opaque_t)vha;
2489
2490 ADAPTER_STATE_LOCK(ha);
2491 vha->flags |= FCA_BOUND;
2492 ADAPTER_STATE_UNLOCK(ha);
2493 /* Set port's current state. */
2494 port_info->pi_port_state = vha->state;
2495 }
2496
2497 QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2498 "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2499 port_info->pi_port_state, port_info->pi_s_id.port_id);
2500
2501 return (fca_handle);
2502 }
2503
2504 /*
2505 * ql_unbind_port
2506 * To unbind a Fibre Channel Adapter from an FC Port driver.
2507 *
2508 * Input:
2509 * fca_handle = handle setup by ql_bind_port().
2510 *
2511 * Context:
2512 * Kernel context.
2513 */
2514 static void
2515 ql_unbind_port(opaque_t fca_handle)
2516 {
2517 ql_adapter_state_t *ha;
2518 ql_tgt_t *tq;
2519 uint32_t flgs;
2520
2521 ha = ql_fca_handle_to_state(fca_handle);
2522 if (ha == NULL) {
2523 /*EMPTY*/
2524 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2525 (void *)fca_handle);
2526 } else {
2527 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2528 ha->vp_index);
2529
2530 if (!(ha->flags & FCA_BOUND)) {
2531 /*EMPTY*/
2532 QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2533 ha->instance, ha->vp_index);
2534 } else {
2535 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2536 if ((tq = ql_loop_id_to_queue(ha,
2537 FL_PORT_24XX_HDL)) != NULL) {
2538 (void) ql_logout_fabric_port(ha, tq);
2539 }
2540 (void) ql_vport_control(ha, (uint8_t)
2541 (CFG_IST(ha, CFG_CTRL_2425) ?
2542 VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2543 flgs = FCA_BOUND | VP_ENABLED;
2544 } else {
2545 flgs = FCA_BOUND;
2546 }
2547 ADAPTER_STATE_LOCK(ha);
2548 ha->flags &= ~flgs;
2549 ADAPTER_STATE_UNLOCK(ha);
2550 }
2551
2552 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2553 ha->vp_index);
2554 }
2555 }
2556
2557 /*
2558 * ql_init_pkt
2559 * Initialize FCA portion of packet.
2560 *
2561 * Input:
2562 * fca_handle = handle setup by ql_bind_port().
2563 * pkt = pointer to fc_packet.
2564 *
2565 * Returns:
2566 * FC_SUCCESS - the packet has successfully been initialized.
2567 * FC_UNBOUND - the fca_handle specified is not bound.
2568 * FC_NOMEM - the FCA failed initialization due to an allocation error.
2569 * FC_FAILURE - the FCA failed initialization for undisclosed reasons
2570 *
2571 * Context:
2572 * Kernel context.
2573 */
2574 /* ARGSUSED */
2575 static int
2576 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2577 {
2578 ql_adapter_state_t *ha;
2579 ql_srb_t *sp;
2580 int rval = FC_SUCCESS;
2581
2582 ha = ql_fca_handle_to_state(fca_handle);
2583 if (ha == NULL) {
2584 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2585 (void *)fca_handle);
2586 return (FC_UNBOUND);
2587 }
2588 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2589
2590 sp = (ql_srb_t *)pkt->pkt_fca_private;
2591 sp->flags = 0;
2592
2593 /* init cmd links */
2594 sp->cmd.base_address = sp;
2595 sp->cmd.prev = NULL;
2596 sp->cmd.next = NULL;
2597 sp->cmd.head = NULL;
2598
2599 /* init watchdog links */
2600 sp->wdg.base_address = sp;
2601 sp->wdg.prev = NULL;
2602 sp->wdg.next = NULL;
2603 sp->wdg.head = NULL;
2604 sp->pkt = pkt;
2605 sp->ha = ha;
2606 sp->magic_number = QL_FCA_BRAND;
2607 sp->sg_dma.dma_handle = NULL;
2608 #ifndef __sparc
2609 if (CFG_IST(ha, CFG_CTRL_8021)) {
2610 /* Setup DMA for scatter gather list. */
2611 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2612 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2613 sp->sg_dma.cookie_count = 1;
2614 sp->sg_dma.alignment = 64;
2615 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2616 rval = FC_NOMEM;
2617 }
2618 }
2619 #endif /* __sparc */
2620
2621 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2622
2623 return (rval);
2624 }
2625
2626 /*
2627 * ql_un_init_pkt
2628 * Release all local resources bound to packet.
2629 *
2630 * Input:
2631 * fca_handle = handle setup by ql_bind_port().
2632 * pkt = pointer to fc_packet.
2633 *
2634 * Returns:
2635 * FC_SUCCESS - the packet has successfully been invalidated.
2636 * FC_UNBOUND - the fca_handle specified is not bound.
2637 * FC_BADPACKET - the packet has not been initialized or has
2638 * already been freed by this FCA.
2639 *
2640 * Context:
2641 * Kernel context.
2642 */
2643 static int
2644 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2645 {
2646 ql_adapter_state_t *ha;
2647 int rval;
2648 ql_srb_t *sp;
2649
2650 ha = ql_fca_handle_to_state(fca_handle);
2651 if (ha == NULL) {
2652 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2653 (void *)fca_handle);
2654 return (FC_UNBOUND);
2655 }
2656 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2657
2658 sp = (ql_srb_t *)pkt->pkt_fca_private;
2659
2660 if (sp->magic_number != QL_FCA_BRAND) {
2661 EL(ha, "failed, FC_BADPACKET\n");
2662 rval = FC_BADPACKET;
2663 } else {
2664 sp->magic_number = NULL;
2665 ql_free_phys(ha, &sp->sg_dma);
2666 rval = FC_SUCCESS;
2667 }
2668
2669 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2670
2671 return (rval);
2672 }
2673
2674 /*
2675 * ql_els_send
2676 * Issue a extended link service request.
2677 *
2678 * Input:
2679 * fca_handle = handle setup by ql_bind_port().
2680 * pkt = pointer to fc_packet.
2681 *
2682 * Returns:
2683 * FC_SUCCESS - the command was successful.
2684 * FC_ELS_FREJECT - the command was rejected by a Fabric.
2685 * FC_ELS_PREJECT - the command was rejected by an N-port.
2686 * FC_TRANSPORT_ERROR - a transport error occurred.
2687 * FC_UNBOUND - the fca_handle specified is not bound.
2688 * FC_ELS_BAD - the FCA can not issue the requested ELS.
2689 *
2690 * Context:
2691 * Kernel context.
2692 */
2693 static int
2694 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2695 {
2696 ql_adapter_state_t *ha;
2697 int rval;
2698 clock_t timer = drv_usectohz(30000000);
2699 ls_code_t els;
2700 la_els_rjt_t rjt;
2701 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
2702
2703 /* Verify proper command. */
2704 ha = ql_cmd_setup(fca_handle, pkt, &rval);
2705 if (ha == NULL) {
2706 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2707 rval, fca_handle);
2708 return (FC_INVALID_REQUEST);
2709 }
2710 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2711
2712 /* Wait for suspension to end. */
2713 TASK_DAEMON_LOCK(ha);
2714 while (ha->task_daemon_flags & QL_SUSPENDED) {
2715 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2716
2717 /* 30 seconds from now */
2718 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2719 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2720 /*
2721 * The timeout time 'timer' was
2722 * reached without the condition
2723 * being signaled.
2724 */
2725 pkt->pkt_state = FC_PKT_TRAN_BSY;
2726 pkt->pkt_reason = FC_REASON_XCHG_BSY;
2727
2728 /* Release task daemon lock. */
2729 TASK_DAEMON_UNLOCK(ha);
2730
2731 EL(ha, "QL_SUSPENDED failed=%xh\n",
2732 QL_FUNCTION_TIMEOUT);
2733 return (FC_TRAN_BUSY);
2734 }
2735 }
2736 /* Release task daemon lock. */
2737 TASK_DAEMON_UNLOCK(ha);
2738
2739 /* Setup response header. */
2740 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2741 sizeof (fc_frame_hdr_t));
2742
2743 if (pkt->pkt_rsplen) {
2744 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2745 }
2746
2747 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2748 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2749 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2750 R_CTL_SOLICITED_CONTROL;
2751 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2752 F_CTL_END_SEQ;
2753
2754 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2755 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2756 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2757
2758 sp->flags |= SRB_ELS_PKT;
2759
2760 /* map the type of ELS to a function */
2761 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2762 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2763
2764 #if 0
2765 QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2766 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2767 sizeof (fc_frame_hdr_t) / 4);
2768 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2769 QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2770 #endif
2771
2772 sp->iocb = ha->els_cmd;
2773 sp->req_cnt = 1;
2774
2775 switch (els.ls_code) {
2776 case LA_ELS_RJT:
2777 case LA_ELS_ACC:
2778 EL(ha, "LA_ELS_RJT\n");
2779 pkt->pkt_state = FC_PKT_SUCCESS;
2780 rval = FC_SUCCESS;
2781 break;
2782 case LA_ELS_PLOGI:
2783 case LA_ELS_PDISC:
2784 rval = ql_els_plogi(ha, pkt);
2785 break;
2786 case LA_ELS_FLOGI:
2787 case LA_ELS_FDISC:
2788 rval = ql_els_flogi(ha, pkt);
2789 break;
2790 case LA_ELS_LOGO:
2791 rval = ql_els_logo(ha, pkt);
2792 break;
2793 case LA_ELS_PRLI:
2794 rval = ql_els_prli(ha, pkt);
2795 break;
2796 case LA_ELS_PRLO:
2797 rval = ql_els_prlo(ha, pkt);
2798 break;
2799 case LA_ELS_ADISC:
2800 rval = ql_els_adisc(ha, pkt);
2801 break;
2802 case LA_ELS_LINIT:
2803 rval = ql_els_linit(ha, pkt);
2804 break;
2805 case LA_ELS_LPC:
2806 rval = ql_els_lpc(ha, pkt);
2807 break;
2808 case LA_ELS_LSTS:
2809 rval = ql_els_lsts(ha, pkt);
2810 break;
2811 case LA_ELS_SCR:
2812 rval = ql_els_scr(ha, pkt);
2813 break;
2814 case LA_ELS_RSCN:
2815 rval = ql_els_rscn(ha, pkt);
2816 break;
2817 case LA_ELS_FARP_REQ:
2818 rval = ql_els_farp_req(ha, pkt);
2819 break;
2820 case LA_ELS_FARP_REPLY:
2821 rval = ql_els_farp_reply(ha, pkt);
2822 break;
2823 case LA_ELS_RLS:
2824 rval = ql_els_rls(ha, pkt);
2825 break;
2826 case LA_ELS_RNID:
2827 rval = ql_els_rnid(ha, pkt);
2828 break;
2829 default:
2830 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2831 els.ls_code);
2832 /* Build RJT. */
2833 bzero(&rjt, sizeof (rjt));
2834 rjt.ls_code.ls_code = LA_ELS_RJT;
2835 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2836
2837 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2838 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2839
2840 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2841 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2842 rval = FC_SUCCESS;
2843 break;
2844 }
2845
2846 #if 0
2847 QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2848 QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2849 sizeof (fc_frame_hdr_t) / 4);
2850 #endif
2851 /*
2852 * Return success if the srb was consumed by an iocb. The packet
2853 * completion callback will be invoked by the response handler.
2854 */
2855 if (rval == QL_CONSUMED) {
2856 rval = FC_SUCCESS;
2857 } else if (rval == FC_SUCCESS &&
2858 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2859 /* Do command callback only if no error */
2860 ql_awaken_task_daemon(ha, sp, 0, 0);
2861 }
2862
2863 if (rval != FC_SUCCESS) {
2864 EL(ha, "failed, rval = %xh\n", rval);
2865 } else {
2866 /*EMPTY*/
2867 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2868 }
2869 return (rval);
2870 }
2871
2872 /*
2873 * ql_get_cap
2874 * Export FCA hardware and software capabilities.
2875 *
2876 * Input:
2877 * fca_handle = handle setup by ql_bind_port().
2878 * cap = pointer to the capabilities string.
2879 * ptr = buffer pointer for return capability.
2880 *
2881 * Returns:
2882 * FC_CAP_ERROR - no such capability
2883 * FC_CAP_FOUND - the capability was returned and cannot be set
2884 * FC_CAP_SETTABLE - the capability was returned and can be set
2885 * FC_UNBOUND - the fca_handle specified is not bound.
2886 *
2887 * Context:
2888 * Kernel context.
2889 */
2890 static int
2891 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2892 {
2893 ql_adapter_state_t *ha;
2894 int rval;
2895 uint32_t *rptr = (uint32_t *)ptr;
2896
2897 ha = ql_fca_handle_to_state(fca_handle);
2898 if (ha == NULL) {
2899 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2900 (void *)fca_handle);
2901 return (FC_UNBOUND);
2902 }
2903 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2904
2905 if (strcmp(cap, FC_NODE_WWN) == 0) {
2906 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2907 ptr, 8);
2908 rval = FC_CAP_FOUND;
2909 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2910 bcopy((void *)&ha->loginparams, ptr,
2911 sizeof (la_els_logi_t));
2912 rval = FC_CAP_FOUND;
2913 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2914 *rptr = (uint32_t)QL_UB_LIMIT;
2915 rval = FC_CAP_FOUND;
2916 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2917
2918 dev_info_t *psydip = NULL;
2919 #ifdef __sparc
2920 /*
2921 * Disable streaming for certain 2 chip adapters
2922 * below Psycho to handle Psycho byte hole issue.
2923 */
2924 if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2925 (!CFG_IST(ha, CFG_SBUS_CARD))) {
2926 for (psydip = ddi_get_parent(ha->dip); psydip;
2927 psydip = ddi_get_parent(psydip)) {
2928 if (strcmp(ddi_driver_name(psydip),
2929 "pcipsy") == 0) {
2930 break;
2931 }
2932 }
2933 }
2934 #endif /* __sparc */
2935
2936 if (psydip) {
2937 *rptr = (uint32_t)FC_NO_STREAMING;
2938 EL(ha, "No Streaming\n");
2939 } else {
2940 *rptr = (uint32_t)FC_ALLOW_STREAMING;
2941 EL(ha, "Allow Streaming\n");
2942 }
2943 rval = FC_CAP_FOUND;
2944 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2945 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2946 *rptr = (uint32_t)CHAR_TO_SHORT(
2947 ha->init_ctrl_blk.cb24.max_frame_length[0],
2948 ha->init_ctrl_blk.cb24.max_frame_length[1]);
2949 } else {
2950 *rptr = (uint32_t)CHAR_TO_SHORT(
2951 ha->init_ctrl_blk.cb.max_frame_length[0],
2952 ha->init_ctrl_blk.cb.max_frame_length[1]);
2953 }
2954 rval = FC_CAP_FOUND;
2955 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2956 *rptr = FC_RESET_RETURN_ALL;
2957 rval = FC_CAP_FOUND;
2958 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2959 *rptr = FC_NO_DVMA_SPACE;
2960 rval = FC_CAP_FOUND;
2961 } else {
2962 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2963 rval = FC_CAP_ERROR;
2964 }
2965
2966 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2967
2968 return (rval);
2969 }
2970
2971 /*
2972 * ql_set_cap
2973 * Allow the FC Transport to set FCA capabilities if possible.
2974 *
2975 * Input:
2976 * fca_handle = handle setup by ql_bind_port().
2977 * cap = pointer to the capabilities string.
2978 * ptr = buffer pointer for capability.
2979 *
2980 * Returns:
2981 * FC_CAP_ERROR - no such capability
2982 * FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2983 * FC_CAP_SETTABLE - the capability was successfully set.
2984 * FC_UNBOUND - the fca_handle specified is not bound.
2985 *
2986 * Context:
2987 * Kernel context.
2988 */
2989 /* ARGSUSED */
2990 static int
2991 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2992 {
2993 ql_adapter_state_t *ha;
2994 int rval;
2995
2996 ha = ql_fca_handle_to_state(fca_handle);
2997 if (ha == NULL) {
2998 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2999 (void *)fca_handle);
3000 return (FC_UNBOUND);
3001 }
3002 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3003
3004 if (strcmp(cap, FC_NODE_WWN) == 0) {
3005 rval = FC_CAP_FOUND;
3006 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3007 rval = FC_CAP_FOUND;
3008 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3009 rval = FC_CAP_FOUND;
3010 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3011 rval = FC_CAP_FOUND;
3012 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3013 rval = FC_CAP_FOUND;
3014 } else {
3015 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3016 rval = FC_CAP_ERROR;
3017 }
3018
3019 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3020
3021 return (rval);
3022 }
3023
3024 /*
3025 * ql_getmap
3026 * Request of Arbitrated Loop (AL-PA) map.
3027 *
3028 * Input:
3029 * fca_handle = handle setup by ql_bind_port().
3030 * mapbuf= buffer pointer for map.
3031 *
3032 * Returns:
3033 * FC_OLDPORT - the specified port is not operating in loop mode.
3034 * FC_OFFLINE - the specified port is not online.
3035 * FC_NOMAP - there is no loop map available for this port.
3036 * FC_UNBOUND - the fca_handle specified is not bound.
3037 * FC_SUCCESS - a valid map has been placed in mapbuf.
3038 *
3039 * Context:
3040 * Kernel context.
3041 */
3042 static int
3043 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3044 {
3045 ql_adapter_state_t *ha;
3046 clock_t timer = drv_usectohz(30000000);
3047 int rval = FC_SUCCESS;
3048
3049 ha = ql_fca_handle_to_state(fca_handle);
3050 if (ha == NULL) {
3051 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3052 (void *)fca_handle);
3053 return (FC_UNBOUND);
3054 }
3055 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3056
3057 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3058 mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3059
3060 /* Wait for suspension to end. */
3061 TASK_DAEMON_LOCK(ha);
3062 while (ha->task_daemon_flags & QL_SUSPENDED) {
3063 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3064
3065 /* 30 seconds from now */
3066 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3067 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3068 /*
3069 * The timeout time 'timer' was
3070 * reached without the condition
3071 * being signaled.
3072 */
3073
3074 /* Release task daemon lock. */
3075 TASK_DAEMON_UNLOCK(ha);
3076
3077 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3078 return (FC_TRAN_BUSY);
3079 }
3080 }
3081 /* Release task daemon lock. */
3082 TASK_DAEMON_UNLOCK(ha);
3083
3084 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3085 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3086 /*
3087 * Now, since transport drivers cosider this as an
3088 * offline condition, let's wait for few seconds
3089 * for any loop transitions before we reset the.
3090 * chip and restart all over again.
3091 */
3092 ql_delay(ha, 2000000);
3093 EL(ha, "failed, FC_NOMAP\n");
3094 rval = FC_NOMAP;
3095 } else {
3096 /*EMPTY*/
3097 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3098 "data %xh %xh %xh %xh\n", ha->instance,
3099 mapbuf->lilp_myalpa, mapbuf->lilp_length,
3100 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3101 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3102 }
3103
3104 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3105 #if 0
3106 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3107 #endif
3108 return (rval);
3109 }
3110
3111 /*
3112 * ql_transport
3113 * Issue an I/O request. Handles all regular requests.
3114 *
3115 * Input:
3116 * fca_handle = handle setup by ql_bind_port().
3117 * pkt = pointer to fc_packet.
3118 *
3119 * Returns:
3120 * FC_SUCCESS - the packet was accepted for transport.
3121 * FC_TRANSPORT_ERROR - a transport error occurred.
3122 * FC_BADPACKET - the packet to be transported had not been
3123 * initialized by this FCA.
3124 * FC_UNBOUND - the fca_handle specified is not bound.
3125 *
3126 * Context:
3127 * Kernel context.
3128 */
3129 static int
3130 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3131 {
3132 ql_adapter_state_t *ha;
3133 int rval = FC_TRANSPORT_ERROR;
3134 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3135
3136 /* Verify proper command. */
3137 ha = ql_cmd_setup(fca_handle, pkt, &rval);
3138 if (ha == NULL) {
3139 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3140 rval, fca_handle);
3141 return (rval);
3142 }
3143 QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3144 #if 0
3145 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3146 sizeof (fc_frame_hdr_t) / 4);
3147 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3148 QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3149 #endif
3150
3151 /* Reset SRB flags. */
3152 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3153 SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3154 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3155 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3156 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3157 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3158 SRB_MS_PKT | SRB_ELS_PKT);
3159
3160 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3161 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3162 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3163 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3164 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3165
3166 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3167 case R_CTL_COMMAND:
3168 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3169 sp->flags |= SRB_FCP_CMD_PKT;
3170 rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3171 }
3172 break;
3173
3174 default:
3175 /* Setup response header and buffer. */
3176 if (pkt->pkt_rsplen) {
3177 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3178 }
3179
3180 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3181 case R_CTL_UNSOL_DATA:
3182 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3183 sp->flags |= SRB_IP_PKT;
3184 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3185 }
3186 break;
3187
3188 case R_CTL_UNSOL_CONTROL:
3189 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3190 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3191 rval = ql_fc_services(ha, pkt);
3192 }
3193 break;
3194
3195 case R_CTL_SOLICITED_DATA:
3196 case R_CTL_STATUS:
3197 default:
3198 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3199 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3200 rval = FC_TRANSPORT_ERROR;
3201 EL(ha, "unknown, r_ctl=%xh\n",
3202 pkt->pkt_cmd_fhdr.r_ctl);
3203 break;
3204 }
3205 }
3206
3207 if (rval != FC_SUCCESS) {
3208 EL(ha, "failed, rval = %xh\n", rval);
3209 } else {
3210 /*EMPTY*/
3211 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3212 }
3213
3214 return (rval);
3215 }
3216
3217 /*
3218 * ql_ub_alloc
3219 * Allocate buffers for unsolicited exchanges.
3220 *
3221 * Input:
3222 * fca_handle = handle setup by ql_bind_port().
3223 * tokens = token array for each buffer.
3224 * size = size of each buffer.
3225 * count = pointer to number of buffers.
3226 * type = the FC-4 type the buffers are reserved for.
3227 * 1 = Extended Link Services, 5 = LLC/SNAP
3228 *
3229 * Returns:
3230 * FC_FAILURE - buffers could not be allocated.
3231 * FC_TOOMANY - the FCA could not allocate the requested
3232 * number of buffers.
3233 * FC_SUCCESS - unsolicited buffers were allocated.
3234 * FC_UNBOUND - the fca_handle specified is not bound.
3235 *
3236 * Context:
3237 * Kernel context.
3238 */
3239 static int
3240 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3241 uint32_t *count, uint32_t type)
3242 {
3243 ql_adapter_state_t *ha;
3244 caddr_t bufp = NULL;
3245 fc_unsol_buf_t *ubp;
3246 ql_srb_t *sp;
3247 uint32_t index;
3248 uint32_t cnt;
3249 uint32_t ub_array_index = 0;
3250 int rval = FC_SUCCESS;
3251 int ub_updated = FALSE;
3252
3253 /* Check handle. */
3254 ha = ql_fca_handle_to_state(fca_handle);
3255 if (ha == NULL) {
3256 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3257 (void *)fca_handle);
3258 return (FC_UNBOUND);
3259 }
3260 QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3261 ha->instance, ha->vp_index, *count);
3262
3263 QL_PM_LOCK(ha);
3264 if (ha->power_level != PM_LEVEL_D0) {
3265 QL_PM_UNLOCK(ha);
3266 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3267 ha->vp_index);
3268 return (FC_FAILURE);
3269 }
3270 QL_PM_UNLOCK(ha);
3271
3272 /* Acquire adapter state lock. */
3273 ADAPTER_STATE_LOCK(ha);
3274
3275 /* Check the count. */
3276 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3277 *count = 0;
3278 EL(ha, "failed, FC_TOOMANY\n");
3279 rval = FC_TOOMANY;
3280 }
3281
3282 /*
3283 * reset ub_array_index
3284 */
3285 ub_array_index = 0;
3286
3287 /*
3288 * Now proceed to allocate any buffers required
3289 */
3290 for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3291 /* Allocate all memory needed. */
3292 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3293 KM_SLEEP);
3294 if (ubp == NULL) {
3295 EL(ha, "failed, FC_FAILURE\n");
3296 rval = FC_FAILURE;
3297 } else {
3298 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3299 if (sp == NULL) {
3300 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3301 rval = FC_FAILURE;
3302 } else {
3303 if (type == FC_TYPE_IS8802_SNAP) {
3304 #ifdef __sparc
3305 if (ql_get_dma_mem(ha,
3306 &sp->ub_buffer, size,
3307 BIG_ENDIAN_DMA,
3308 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3309 rval = FC_FAILURE;
3310 kmem_free(ubp,
3311 sizeof (fc_unsol_buf_t));
3312 kmem_free(sp,
3313 sizeof (ql_srb_t));
3314 } else {
3315 bufp = sp->ub_buffer.bp;
3316 sp->ub_size = size;
3317 }
3318 #else
3319 if (ql_get_dma_mem(ha,
3320 &sp->ub_buffer, size,
3321 LITTLE_ENDIAN_DMA,
3322 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3323 rval = FC_FAILURE;
3324 kmem_free(ubp,
3325 sizeof (fc_unsol_buf_t));
3326 kmem_free(sp,
3327 sizeof (ql_srb_t));
3328 } else {
3329 bufp = sp->ub_buffer.bp;
3330 sp->ub_size = size;
3331 }
3332 #endif
3333 } else {
3334 bufp = kmem_zalloc(size, KM_SLEEP);
3335 if (bufp == NULL) {
3336 rval = FC_FAILURE;
3337 kmem_free(ubp,
3338 sizeof (fc_unsol_buf_t));
3339 kmem_free(sp,
3340 sizeof (ql_srb_t));
3341 } else {
3342 sp->ub_size = size;
3343 }
3344 }
3345 }
3346 }
3347
3348 if (rval == FC_SUCCESS) {
3349 /* Find next available slot. */
3350 QL_UB_LOCK(ha);
3351 while (ha->ub_array[ub_array_index] != NULL) {
3352 ub_array_index++;
3353 }
3354
3355 ubp->ub_fca_private = (void *)sp;
3356
3357 /* init cmd links */
3358 sp->cmd.base_address = sp;
3359 sp->cmd.prev = NULL;
3360 sp->cmd.next = NULL;
3361 sp->cmd.head = NULL;
3362
3363 /* init wdg links */
3364 sp->wdg.base_address = sp;
3365 sp->wdg.prev = NULL;
3366 sp->wdg.next = NULL;
3367 sp->wdg.head = NULL;
3368 sp->ha = ha;
3369
3370 ubp->ub_buffer = bufp;
3371 ubp->ub_bufsize = size;
3372 ubp->ub_port_handle = fca_handle;
3373 ubp->ub_token = ub_array_index;
3374
3375 /* Save the token. */
3376 tokens[index] = ub_array_index;
3377
3378 /* Setup FCA private information. */
3379 sp->ub_type = type;
3380 sp->handle = ub_array_index;
3381 sp->flags |= SRB_UB_IN_FCA;
3382
3383 ha->ub_array[ub_array_index] = ubp;
3384 ha->ub_allocated++;
3385 ub_updated = TRUE;
3386 QL_UB_UNLOCK(ha);
3387 }
3388 }
3389
3390 /* Release adapter state lock. */
3391 ADAPTER_STATE_UNLOCK(ha);
3392
3393 /* IP buffer. */
3394 if (ub_updated) {
3395 if ((type == FC_TYPE_IS8802_SNAP) &&
3396 (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3397
3398 ADAPTER_STATE_LOCK(ha);
3399 ha->flags |= IP_ENABLED;
3400 ADAPTER_STATE_UNLOCK(ha);
3401
3402 if (!(ha->flags & IP_INITIALIZED)) {
3403 if (CFG_IST(ha, CFG_CTRL_2422)) {
3404 ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3405 LSB(ql_ip_mtu);
3406 ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3407 MSB(ql_ip_mtu);
3408 ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3409 LSB(size);
3410 ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3411 MSB(size);
3412
3413 cnt = CHAR_TO_SHORT(
3414 ha->ip_init_ctrl_blk.cb24.cc[0],
3415 ha->ip_init_ctrl_blk.cb24.cc[1]);
3416
3417 if (cnt < *count) {
3418 ha->ip_init_ctrl_blk.cb24.cc[0]
3419 = LSB(*count);
3420 ha->ip_init_ctrl_blk.cb24.cc[1]
3421 = MSB(*count);
3422 }
3423 } else {
3424 ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3425 LSB(ql_ip_mtu);
3426 ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3427 MSB(ql_ip_mtu);
3428 ha->ip_init_ctrl_blk.cb.buf_size[0] =
3429 LSB(size);
3430 ha->ip_init_ctrl_blk.cb.buf_size[1] =
3431 MSB(size);
3432
3433 cnt = CHAR_TO_SHORT(
3434 ha->ip_init_ctrl_blk.cb.cc[0],
3435 ha->ip_init_ctrl_blk.cb.cc[1]);
3436
3437 if (cnt < *count) {
3438 ha->ip_init_ctrl_blk.cb.cc[0] =
3439 LSB(*count);
3440 ha->ip_init_ctrl_blk.cb.cc[1] =
3441 MSB(*count);
3442 }
3443 }
3444
3445 (void) ql_initialize_ip(ha);
3446 }
3447 ql_isp_rcvbuf(ha);
3448 }
3449 }
3450
3451 if (rval != FC_SUCCESS) {
3452 EL(ha, "failed=%xh\n", rval);
3453 } else {
3454 /*EMPTY*/
3455 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3456 ha->vp_index);
3457 }
3458 return (rval);
3459 }
3460
3461 /*
3462 * ql_ub_free
3463 * Free unsolicited buffers.
3464 *
3465 * Input:
3466 * fca_handle = handle setup by ql_bind_port().
3467 * count = number of buffers.
3468 * tokens = token array for each buffer.
3469 *
3470 * Returns:
3471 * FC_SUCCESS - the requested buffers have been freed.
3472 * FC_UNBOUND - the fca_handle specified is not bound.
3473 * FC_UB_BADTOKEN - an invalid token was encountered.
3474 * No buffers have been released.
3475 *
3476 * Context:
3477 * Kernel context.
3478 */
3479 static int
3480 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3481 {
3482 ql_adapter_state_t *ha;
3483 ql_srb_t *sp;
3484 uint32_t index;
3485 uint64_t ub_array_index;
3486 int rval = FC_SUCCESS;
3487
3488 /* Check handle. */
3489 ha = ql_fca_handle_to_state(fca_handle);
3490 if (ha == NULL) {
3491 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3492 (void *)fca_handle);
3493 return (FC_UNBOUND);
3494 }
3495 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3496
3497 /* Acquire adapter state lock. */
3498 ADAPTER_STATE_LOCK(ha);
3499
3500 /* Check all returned tokens. */
3501 for (index = 0; index < count; index++) {
3502 fc_unsol_buf_t *ubp;
3503
3504 /* Check the token range. */
3505 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3506 EL(ha, "failed, FC_UB_BADTOKEN\n");
3507 rval = FC_UB_BADTOKEN;
3508 break;
3509 }
3510
3511 /* Check the unsolicited buffer array. */
3512 QL_UB_LOCK(ha);
3513 ubp = ha->ub_array[ub_array_index];
3514
3515 if (ubp == NULL) {
3516 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3517 rval = FC_UB_BADTOKEN;
3518 QL_UB_UNLOCK(ha);
3519 break;
3520 }
3521
3522 /* Check the state of the unsolicited buffer. */
3523 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3524 sp->flags |= SRB_UB_FREE_REQUESTED;
3525
3526 while (!(sp->flags & SRB_UB_IN_FCA) ||
3527 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3528 QL_UB_UNLOCK(ha);
3529 ADAPTER_STATE_UNLOCK(ha);
3530 delay(drv_usectohz(100000));
3531 ADAPTER_STATE_LOCK(ha);
3532 QL_UB_LOCK(ha);
3533 }
3534 ha->ub_array[ub_array_index] = NULL;
3535 QL_UB_UNLOCK(ha);
3536 ql_free_unsolicited_buffer(ha, ubp);
3537 }
3538
3539 if (rval == FC_SUCCESS) {
3540 /*
3541 * Signal any pending hardware reset when there are
3542 * no more unsolicited buffers in use.
3543 */
3544 if (ha->ub_allocated == 0) {
3545 cv_broadcast(&ha->pha->cv_ub);
3546 }
3547 }
3548
3549 /* Release adapter state lock. */
3550 ADAPTER_STATE_UNLOCK(ha);
3551
3552 if (rval != FC_SUCCESS) {
3553 EL(ha, "failed=%xh\n", rval);
3554 } else {
3555 /*EMPTY*/
3556 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3557 }
3558 return (rval);
3559 }
3560
3561 /*
3562 * ql_ub_release
3563 * Release unsolicited buffers from FC Transport
3564 * to FCA for future use.
3565 *
3566 * Input:
3567 * fca_handle = handle setup by ql_bind_port().
3568 * count = number of buffers.
3569 * tokens = token array for each buffer.
3570 *
3571 * Returns:
3572 * FC_SUCCESS - the requested buffers have been released.
3573 * FC_UNBOUND - the fca_handle specified is not bound.
3574 * FC_UB_BADTOKEN - an invalid token was encountered.
3575 * No buffers have been released.
3576 *
3577 * Context:
3578 * Kernel context.
3579 */
3580 static int
3581 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3582 {
3583 ql_adapter_state_t *ha;
3584 ql_srb_t *sp;
3585 uint32_t index;
3586 uint64_t ub_array_index;
3587 int rval = FC_SUCCESS;
3588 int ub_ip_updated = FALSE;
3589
3590 /* Check handle. */
3591 ha = ql_fca_handle_to_state(fca_handle);
3592 if (ha == NULL) {
3593 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3594 (void *)fca_handle);
3595 return (FC_UNBOUND);
3596 }
3597 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3598
3599 /* Acquire adapter state lock. */
3600 ADAPTER_STATE_LOCK(ha);
3601 QL_UB_LOCK(ha);
3602
3603 /* Check all returned tokens. */
3604 for (index = 0; index < count; index++) {
3605 /* Check the token range. */
3606 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3607 EL(ha, "failed, FC_UB_BADTOKEN\n");
3608 rval = FC_UB_BADTOKEN;
3609 break;
3610 }
3611
3612 /* Check the unsolicited buffer array. */
3613 if (ha->ub_array[ub_array_index] == NULL) {
3614 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3615 rval = FC_UB_BADTOKEN;
3616 break;
3617 }
3618
3619 /* Check the state of the unsolicited buffer. */
3620 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3621 if (sp->flags & SRB_UB_IN_FCA) {
3622 EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3623 rval = FC_UB_BADTOKEN;
3624 break;
3625 }
3626 }
3627
3628 /* If all tokens checkout, release the buffers. */
3629 if (rval == FC_SUCCESS) {
3630 /* Check all returned tokens. */
3631 for (index = 0; index < count; index++) {
3632 fc_unsol_buf_t *ubp;
3633
3634 ub_array_index = tokens[index];
3635 ubp = ha->ub_array[ub_array_index];
3636 sp = ubp->ub_fca_private;
3637
3638 ubp->ub_resp_flags = 0;
3639 sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3640 sp->flags |= SRB_UB_IN_FCA;
3641
3642 /* IP buffer. */
3643 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3644 ub_ip_updated = TRUE;
3645 }
3646 }
3647 }
3648
3649 QL_UB_UNLOCK(ha);
3650 /* Release adapter state lock. */
3651 ADAPTER_STATE_UNLOCK(ha);
3652
3653 /*
3654 * XXX: We should call ql_isp_rcvbuf() to return a
3655 * buffer to ISP only if the number of buffers fall below
3656 * the low water mark.
3657 */
3658 if (ub_ip_updated) {
3659 ql_isp_rcvbuf(ha);
3660 }
3661
3662 if (rval != FC_SUCCESS) {
3663 EL(ha, "failed, rval = %xh\n", rval);
3664 } else {
3665 /*EMPTY*/
3666 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3667 }
3668 return (rval);
3669 }
3670
3671 /*
3672 * ql_abort
3673 * Abort a packet.
3674 *
3675 * Input:
3676 * fca_handle = handle setup by ql_bind_port().
3677 * pkt = pointer to fc_packet.
3678 * flags = KM_SLEEP flag.
3679 *
3680 * Returns:
3681 * FC_SUCCESS - the packet has successfully aborted.
3682 * FC_ABORTED - the packet has successfully aborted.
3683 * FC_ABORTING - the packet is being aborted.
3684 * FC_ABORT_FAILED - the packet could not be aborted.
3685 * FC_TRANSPORT_ERROR - a transport error occurred while attempting
3686 * to abort the packet.
3687 * FC_BADEXCHANGE - no packet found.
3688 * FC_UNBOUND - the fca_handle specified is not bound.
3689 *
3690 * Context:
3691 * Kernel context.
3692 */
3693 static int
3694 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3695 {
3696 port_id_t d_id;
3697 ql_link_t *link;
3698 ql_adapter_state_t *ha, *pha;
3699 ql_srb_t *sp;
3700 ql_tgt_t *tq;
3701 ql_lun_t *lq;
3702 int rval = FC_ABORTED;
3703
3704 ha = ql_fca_handle_to_state(fca_handle);
3705 if (ha == NULL) {
3706 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3707 (void *)fca_handle);
3708 return (FC_UNBOUND);
3709 }
3710
3711 pha = ha->pha;
3712
3713 QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3714
3715 /* Get target queue pointer. */
3716 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3717 tq = ql_d_id_to_queue(ha, d_id);
3718
3719 if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3720 if (tq == NULL) {
3721 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3722 rval = FC_TRANSPORT_ERROR;
3723 } else {
3724 EL(ha, "failed, FC_OFFLINE\n");
3725 rval = FC_OFFLINE;
3726 }
3727 return (rval);
3728 }
3729
3730 sp = (ql_srb_t *)pkt->pkt_fca_private;
3731 lq = sp->lun_queue;
3732
3733 /* Set poll flag if sleep wanted. */
3734 if (flags == KM_SLEEP) {
3735 sp->flags |= SRB_POLL;
3736 }
3737
3738 /* Acquire target queue lock. */
3739 DEVICE_QUEUE_LOCK(tq);
3740 REQUEST_RING_LOCK(ha);
3741
3742 /* If command not already started. */
3743 if (!(sp->flags & SRB_ISP_STARTED)) {
3744 /* Check pending queue for command. */
3745 sp = NULL;
3746 for (link = pha->pending_cmds.first; link != NULL;
3747 link = link->next) {
3748 sp = link->base_address;
3749 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3750 /* Remove srb from q. */
3751 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3752 break;
3753 } else {
3754 sp = NULL;
3755 }
3756 }
3757 REQUEST_RING_UNLOCK(ha);
3758
3759 if (sp == NULL) {
3760 /* Check for cmd on device queue. */
3761 for (link = lq->cmd.first; link != NULL;
3762 link = link->next) {
3763 sp = link->base_address;
3764 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3765 /* Remove srb from q. */
3766 ql_remove_link(&lq->cmd, &sp->cmd);
3767 break;
3768 } else {
3769 sp = NULL;
3770 }
3771 }
3772 }
3773 /* Release device lock */
3774 DEVICE_QUEUE_UNLOCK(tq);
3775
3776 /* If command on target queue. */
3777 if (sp != NULL) {
3778 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3779
3780 /* Set return status */
3781 pkt->pkt_reason = CS_ABORTED;
3782
3783 sp->cmd.next = NULL;
3784 ql_done(&sp->cmd);
3785 rval = FC_ABORTED;
3786 } else {
3787 EL(ha, "failed, FC_BADEXCHANGE\n");
3788 rval = FC_BADEXCHANGE;
3789 }
3790 } else if (sp->flags & SRB_ISP_COMPLETED) {
3791 /* Release device queue lock. */
3792 REQUEST_RING_UNLOCK(ha);
3793 DEVICE_QUEUE_UNLOCK(tq);
3794 EL(ha, "failed, already done, FC_FAILURE\n");
3795 rval = FC_FAILURE;
3796 } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3797 (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3798 /*
3799 * If here, target data/resp ctio is with Fw.
3800 * Since firmware is supposed to terminate such I/Os
3801 * with an error, we need not do any thing. If FW
3802 * decides not to terminate those IOs and simply keep
3803 * quite then we need to initiate cleanup here by
3804 * calling ql_done.
3805 */
3806 REQUEST_RING_UNLOCK(ha);
3807 DEVICE_QUEUE_UNLOCK(tq);
3808 rval = FC_ABORTED;
3809 } else {
3810 request_t *ep = pha->request_ring_bp;
3811 uint16_t cnt;
3812
3813 if (sp->handle != 0) {
3814 for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3815 if (sp->handle == ddi_get32(
3816 pha->hba_buf.acc_handle, &ep->handle)) {
3817 ep->entry_type = INVALID_ENTRY_TYPE;
3818 break;
3819 }
3820 ep++;
3821 }
3822 }
3823
3824 /* Release device queue lock. */
3825 REQUEST_RING_UNLOCK(ha);
3826 DEVICE_QUEUE_UNLOCK(tq);
3827
3828 sp->flags |= SRB_ABORTING;
3829 (void) ql_abort_command(ha, sp);
3830 pkt->pkt_reason = CS_ABORTED;
3831 rval = FC_ABORTED;
3832 }
3833
3834 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3835
3836 return (rval);
3837 }
3838
3839 /*
3840 * ql_reset
3841 * Reset link or hardware.
3842 *
3843 * Input:
3844 * fca_handle = handle setup by ql_bind_port().
3845 * cmd = reset type command.
3846 *
3847 * Returns:
3848 * FC_SUCCESS - reset has successfully finished.
3849 * FC_UNBOUND - the fca_handle specified is not bound.
3850 * FC_FAILURE - reset failed.
3851 *
3852 * Context:
3853 * Kernel context.
3854 */
3855 static int
3856 ql_reset(opaque_t fca_handle, uint32_t cmd)
3857 {
3858 ql_adapter_state_t *ha;
3859 int rval = FC_SUCCESS, rval2;
3860
3861 ha = ql_fca_handle_to_state(fca_handle);
3862 if (ha == NULL) {
3863 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3864 (void *)fca_handle);
3865 return (FC_UNBOUND);
3866 }
3867
3868 QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3869 ha->vp_index, cmd);
3870
3871 switch (cmd) {
3872 case FC_FCA_CORE:
3873 /* dump firmware core if specified. */
3874 if (ha->vp_index == 0) {
3875 if (ql_dump_firmware(ha) != QL_SUCCESS) {
3876 EL(ha, "failed, FC_FAILURE\n");
3877 rval = FC_FAILURE;
3878 }
3879 }
3880 break;
3881 case FC_FCA_LINK_RESET:
3882 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3883 if (ql_loop_reset(ha) != QL_SUCCESS) {
3884 EL(ha, "failed, FC_FAILURE-2\n");
3885 rval = FC_FAILURE;
3886 }
3887 }
3888 break;
3889 case FC_FCA_RESET_CORE:
3890 case FC_FCA_RESET:
3891 /* if dump firmware core if specified. */
3892 if (cmd == FC_FCA_RESET_CORE) {
3893 if (ha->vp_index != 0) {
3894 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3895 ? QL_SUCCESS : ql_loop_reset(ha);
3896 } else {
3897 rval2 = ql_dump_firmware(ha);
3898 }
3899 if (rval2 != QL_SUCCESS) {
3900 EL(ha, "failed, FC_FAILURE-3\n");
3901 rval = FC_FAILURE;
3902 }
3903 }
3904
3905 /* Free up all unsolicited buffers. */
3906 if (ha->ub_allocated != 0) {
3907 /* Inform to release buffers. */
3908 ha->state = FC_PORT_SPEED_MASK(ha->state);
3909 ha->state |= FC_STATE_RESET_REQUESTED;
3910 if (ha->flags & FCA_BOUND) {
3911 (ha->bind_info.port_statec_cb)
3912 (ha->bind_info.port_handle,
3913 ha->state);
3914 }
3915 }
3916
3917 ha->state = FC_PORT_SPEED_MASK(ha->state);
3918
3919 /* All buffers freed */
3920 if (ha->ub_allocated == 0) {
3921 /* Hardware reset. */
3922 if (cmd == FC_FCA_RESET) {
3923 if (ha->vp_index == 0) {
3924 (void) ql_abort_isp(ha);
3925 } else if (!(ha->pha->task_daemon_flags &
3926 LOOP_DOWN)) {
3927 (void) ql_loop_reset(ha);
3928 }
3929 }
3930
3931 /* Inform that the hardware has been reset */
3932 ha->state |= FC_STATE_RESET;
3933 } else {
3934 /*
3935 * the port driver expects an online if
3936 * buffers are not freed.
3937 */
3938 if (ha->topology & QL_LOOP_CONNECTION) {
3939 ha->state |= FC_STATE_LOOP;
3940 } else {
3941 ha->state |= FC_STATE_ONLINE;
3942 }
3943 }
3944
3945 TASK_DAEMON_LOCK(ha);
3946 ha->task_daemon_flags |= FC_STATE_CHANGE;
3947 TASK_DAEMON_UNLOCK(ha);
3948
3949 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3950
3951 break;
3952 default:
3953 EL(ha, "unknown cmd=%xh\n", cmd);
3954 break;
3955 }
3956
3957 if (rval != FC_SUCCESS) {
3958 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3959 } else {
3960 /*EMPTY*/
3961 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3962 ha->vp_index);
3963 }
3964
3965 return (rval);
3966 }
3967
3968 /*
3969 * ql_port_manage
3970 * Perform port management or diagnostics.
3971 *
3972 * Input:
3973 * fca_handle = handle setup by ql_bind_port().
3974 * cmd = pointer to command structure.
3975 *
3976 * Returns:
3977 * FC_SUCCESS - the request completed successfully.
3978 * FC_FAILURE - the request did not complete successfully.
3979 * FC_UNBOUND - the fca_handle specified is not bound.
3980 *
3981 * Context:
3982 * Kernel context.
3983 */
3984 static int
3985 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3986 {
3987 clock_t timer;
3988 uint16_t index;
3989 uint32_t *bp;
3990 port_id_t d_id;
3991 ql_link_t *link;
3992 ql_adapter_state_t *ha, *pha;
3993 ql_tgt_t *tq;
3994 dma_mem_t buffer_xmt, buffer_rcv;
3995 size_t length;
3996 uint32_t cnt;
3997 char buf[80];
3998 lbp_t *lb;
3999 ql_mbx_data_t mr;
4000 app_mbx_cmd_t *mcp;
4001 int i0;
4002 uint8_t *bptr;
4003 int rval2, rval = FC_SUCCESS;
4004 uint32_t opcode;
4005 uint32_t set_flags = 0;
4006
4007 ha = ql_fca_handle_to_state(fca_handle);
4008 if (ha == NULL) {
4009 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4010 (void *)fca_handle);
4011 return (FC_UNBOUND);
4012 }
4013 pha = ha->pha;
4014
4015 QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4016 cmd->pm_cmd_code);
4017
4018 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4019
4020 /*
4021 * Wait for all outstanding commands to complete
4022 */
4023 index = (uint16_t)ql_wait_outstanding(ha);
4024
4025 if (index != MAX_OUTSTANDING_COMMANDS) {
4026 ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4027 ql_restart_queues(ha);
4028 EL(ha, "failed, FC_TRAN_BUSY\n");
4029 return (FC_TRAN_BUSY);
4030 }
4031
4032 switch (cmd->pm_cmd_code) {
4033 case FC_PORT_BYPASS:
4034 d_id.b24 = *cmd->pm_cmd_buf;
4035 tq = ql_d_id_to_queue(ha, d_id);
4036 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4037 EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4038 rval = FC_FAILURE;
4039 }
4040 break;
4041 case FC_PORT_UNBYPASS:
4042 d_id.b24 = *cmd->pm_cmd_buf;
4043 tq = ql_d_id_to_queue(ha, d_id);
4044 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4045 EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4046 rval = FC_FAILURE;
4047 }
4048 break;
4049 case FC_PORT_GET_FW_REV:
4050 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4051 pha->fw_minor_version, pha->fw_subminor_version);
4052 length = strlen(buf) + 1;
4053 if (cmd->pm_data_len < length) {
4054 cmd->pm_data_len = length;
4055 EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4056 rval = FC_FAILURE;
4057 } else {
4058 (void) strcpy(cmd->pm_data_buf, buf);
4059 }
4060 break;
4061
4062 case FC_PORT_GET_FCODE_REV: {
4063 caddr_t fcode_ver_buf = NULL;
4064
4065 i0 = 0;
4066 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4067 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4068 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4069 (caddr_t)&fcode_ver_buf, &i0);
4070 length = (uint_t)i0;
4071
4072 if (rval2 != DDI_PROP_SUCCESS) {
4073 EL(ha, "failed, getting version = %xh\n", rval2);
4074 length = 20;
4075 fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4076 if (fcode_ver_buf != NULL) {
4077 (void) sprintf(fcode_ver_buf,
4078 "NO FCODE FOUND");
4079 }
4080 }
4081
4082 if (cmd->pm_data_len < length) {
4083 EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4084 "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4085 cmd->pm_data_len = length;
4086 rval = FC_FAILURE;
4087 } else if (fcode_ver_buf != NULL) {
4088 bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4089 length);
4090 }
4091
4092 if (fcode_ver_buf != NULL) {
4093 kmem_free(fcode_ver_buf, length);
4094 }
4095 break;
4096 }
4097
4098 case FC_PORT_GET_DUMP:
4099 QL_DUMP_LOCK(pha);
4100 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4101 EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4102 "length=%lxh\n", cmd->pm_data_len);
4103 cmd->pm_data_len = pha->risc_dump_size;
4104 rval = FC_FAILURE;
4105 } else if (pha->ql_dump_state & QL_DUMPING) {
4106 EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4107 rval = FC_TRAN_BUSY;
4108 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4109 (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4110 pha->ql_dump_state |= QL_DUMP_UPLOADED;
4111 } else {
4112 EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4113 rval = FC_FAILURE;
4114 }
4115 QL_DUMP_UNLOCK(pha);
4116 break;
4117 case FC_PORT_FORCE_DUMP:
4118 PORTMANAGE_LOCK(ha);
4119 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4120 EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4121 rval = FC_FAILURE;
4122 }
4123 PORTMANAGE_UNLOCK(ha);
4124 break;
4125 case FC_PORT_DOWNLOAD_FW:
4126 PORTMANAGE_LOCK(ha);
4127 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4128 if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4129 (uint32_t)cmd->pm_data_len,
4130 ha->flash_fw_addr << 2) != QL_SUCCESS) {
4131 EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4132 rval = FC_FAILURE;
4133 }
4134 ql_reset_chip(ha);
4135 set_flags |= ISP_ABORT_NEEDED;
4136 } else {
4137 /* Save copy of the firmware. */
4138 if (pha->risc_code != NULL) {
4139 kmem_free(pha->risc_code, pha->risc_code_size);
4140 pha->risc_code = NULL;
4141 pha->risc_code_size = 0;
4142 }
4143
4144 pha->risc_code = kmem_alloc(cmd->pm_data_len,
4145 KM_SLEEP);
4146 if (pha->risc_code != NULL) {
4147 pha->risc_code_size =
4148 (uint32_t)cmd->pm_data_len;
4149 bcopy(cmd->pm_data_buf, pha->risc_code,
4150 cmd->pm_data_len);
4151
4152 /* Do abort to force reload. */
4153 ql_reset_chip(ha);
4154 if (ql_abort_isp(ha) != QL_SUCCESS) {
4155 kmem_free(pha->risc_code,
4156 pha->risc_code_size);
4157 pha->risc_code = NULL;
4158 pha->risc_code_size = 0;
4159 ql_reset_chip(ha);
4160 (void) ql_abort_isp(ha);
4161 EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4162 " FC_FAILURE\n");
4163 rval = FC_FAILURE;
4164 }
4165 }
4166 }
4167 PORTMANAGE_UNLOCK(ha);
4168 break;
4169 case FC_PORT_GET_DUMP_SIZE:
4170 bp = (uint32_t *)cmd->pm_data_buf;
4171 *bp = pha->risc_dump_size;
4172 break;
4173 case FC_PORT_DIAG:
4174 /*
4175 * Prevents concurrent diags
4176 */
4177 PORTMANAGE_LOCK(ha);
4178
4179 /* Wait for suspension to end. */
4180 for (timer = 0; timer < 3000 &&
4181 pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4182 ql_delay(ha, 10000);
4183 }
4184
4185 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4186 EL(ha, "failed, FC_TRAN_BUSY-2\n");
4187 rval = FC_TRAN_BUSY;
4188 PORTMANAGE_UNLOCK(ha);
4189 break;
4190 }
4191
4192 switch (cmd->pm_cmd_flags) {
4193 case QL_DIAG_EXEFMW:
4194 if (ql_start_firmware(ha) != QL_SUCCESS) {
4195 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4196 rval = FC_FAILURE;
4197 }
4198 break;
4199 case QL_DIAG_CHKCMDQUE:
4200 for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4201 i0++) {
4202 cnt += (pha->outstanding_cmds[i0] != NULL);
4203 }
4204 if (cnt != 0) {
4205 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4206 "FC_FAILURE\n");
4207 rval = FC_FAILURE;
4208 }
4209 break;
4210 case QL_DIAG_FMWCHKSUM:
4211 if (ql_verify_checksum(ha) != QL_SUCCESS) {
4212 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4213 "FC_FAILURE\n");
4214 rval = FC_FAILURE;
4215 }
4216 break;
4217 case QL_DIAG_SLFTST:
4218 if (ql_online_selftest(ha) != QL_SUCCESS) {
4219 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4220 rval = FC_FAILURE;
4221 }
4222 ql_reset_chip(ha);
4223 set_flags |= ISP_ABORT_NEEDED;
4224 break;
4225 case QL_DIAG_REVLVL:
4226 if (cmd->pm_stat_len <
4227 sizeof (ql_adapter_revlvl_t)) {
4228 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4229 "slen=%lxh, rlvllen=%lxh\n",
4230 cmd->pm_stat_len,
4231 sizeof (ql_adapter_revlvl_t));
4232 rval = FC_NOMEM;
4233 } else {
4234 bcopy((void *)&(pha->adapter_stats->revlvl),
4235 cmd->pm_stat_buf,
4236 (size_t)cmd->pm_stat_len);
4237 cmd->pm_stat_len =
4238 sizeof (ql_adapter_revlvl_t);
4239 }
4240 break;
4241 case QL_DIAG_LPBMBX:
4242
4243 if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4244 EL(ha, "failed, QL_DIAG_LPBMBX "
4245 "FC_INVALID_REQUEST, pmlen=%lxh, "
4246 "reqd=%lxh\n", cmd->pm_data_len,
4247 sizeof (struct app_mbx_cmd));
4248 rval = FC_INVALID_REQUEST;
4249 break;
4250 }
4251 /*
4252 * Don't do the wrap test on a 2200 when the
4253 * firmware is running.
4254 */
4255 if (!CFG_IST(ha, CFG_CTRL_2200)) {
4256 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4257 mr.mb[1] = mcp->mb[1];
4258 mr.mb[2] = mcp->mb[2];
4259 mr.mb[3] = mcp->mb[3];
4260 mr.mb[4] = mcp->mb[4];
4261 mr.mb[5] = mcp->mb[5];
4262 mr.mb[6] = mcp->mb[6];
4263 mr.mb[7] = mcp->mb[7];
4264
4265 bcopy(&mr.mb[0], &mr.mb[10],
4266 sizeof (uint16_t) * 8);
4267
4268 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4269 EL(ha, "failed, QL_DIAG_LPBMBX "
4270 "FC_FAILURE\n");
4271 rval = FC_FAILURE;
4272 break;
4273 } else {
4274 for (i0 = 1; i0 < 8; i0++) {
4275 if (mr.mb[i0] !=
4276 mr.mb[i0 + 10]) {
4277 EL(ha, "failed, "
4278 "QL_DIAG_LPBMBX "
4279 "FC_FAILURE-2\n");
4280 rval = FC_FAILURE;
4281 break;
4282 }
4283 }
4284 }
4285
4286 if (rval == FC_FAILURE) {
4287 (void) ql_flash_errlog(ha,
4288 FLASH_ERRLOG_ISP_ERR, 0,
4289 RD16_IO_REG(ha, hccr),
4290 RD16_IO_REG(ha, istatus));
4291 set_flags |= ISP_ABORT_NEEDED;
4292 }
4293 }
4294 break;
4295 case QL_DIAG_LPBDTA:
4296 /*
4297 * For loopback data, we receive the
4298 * data back in pm_stat_buf. This provides
4299 * the user an opportunity to compare the
4300 * transmitted and received data.
4301 *
4302 * NB: lb->options are:
4303 * 0 --> Ten bit loopback
4304 * 1 --> One bit loopback
4305 * 2 --> External loopback
4306 */
4307 if (cmd->pm_data_len > 65536) {
4308 rval = FC_TOOMANY;
4309 EL(ha, "failed, QL_DIAG_LPBDTA "
4310 "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4311 break;
4312 }
4313 if (ql_get_dma_mem(ha, &buffer_xmt,
4314 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4315 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4316 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4317 rval = FC_NOMEM;
4318 break;
4319 }
4320 if (ql_get_dma_mem(ha, &buffer_rcv,
4321 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4322 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4323 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4324 rval = FC_NOMEM;
4325 break;
4326 }
4327 ddi_rep_put8(buffer_xmt.acc_handle,
4328 (uint8_t *)cmd->pm_data_buf,
4329 (uint8_t *)buffer_xmt.bp,
4330 cmd->pm_data_len, DDI_DEV_AUTOINCR);
4331
4332 /* 22xx's adapter must be in loop mode for test. */
4333 if (CFG_IST(ha, CFG_CTRL_2200)) {
4334 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4335 if (ha->flags & POINT_TO_POINT ||
4336 (ha->task_daemon_flags & LOOP_DOWN &&
4337 *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4338 cnt = *bptr;
4339 *bptr = (uint8_t)
4340 (*bptr & ~(BIT_6|BIT_5|BIT_4));
4341 (void) ql_abort_isp(ha);
4342 *bptr = (uint8_t)cnt;
4343 }
4344 }
4345
4346 /* Shutdown IP. */
4347 if (pha->flags & IP_INITIALIZED) {
4348 (void) ql_shutdown_ip(pha);
4349 }
4350
4351 lb = (lbp_t *)cmd->pm_cmd_buf;
4352 lb->transfer_count =
4353 (uint32_t)cmd->pm_data_len;
4354 lb->transfer_segment_count = 0;
4355 lb->receive_segment_count = 0;
4356 lb->transfer_data_address =
4357 buffer_xmt.cookie.dmac_address;
4358 lb->receive_data_address =
4359 buffer_rcv.cookie.dmac_address;
4360
4361 if (ql_loop_back(ha, 0, lb,
4362 buffer_xmt.cookie.dmac_notused,
4363 buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4364 bzero((void *)cmd->pm_stat_buf,
4365 cmd->pm_stat_len);
4366 ddi_rep_get8(buffer_rcv.acc_handle,
4367 (uint8_t *)cmd->pm_stat_buf,
4368 (uint8_t *)buffer_rcv.bp,
4369 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4370 rval = FC_SUCCESS;
4371 } else {
4372 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4373 rval = FC_FAILURE;
4374 }
4375
4376 ql_free_phys(ha, &buffer_xmt);
4377 ql_free_phys(ha, &buffer_rcv);
4378
4379 /* Needed to recover the f/w */
4380 set_flags |= ISP_ABORT_NEEDED;
4381
4382 /* Restart IP if it was shutdown. */
4383 if (pha->flags & IP_ENABLED &&
4384 !(pha->flags & IP_INITIALIZED)) {
4385 (void) ql_initialize_ip(pha);
4386 ql_isp_rcvbuf(pha);
4387 }
4388
4389 break;
4390 case QL_DIAG_ECHO: {
4391 /*
4392 * issue an echo command with a user supplied
4393 * data pattern and destination address
4394 */
4395 echo_t echo; /* temp echo struct */
4396
4397 /* Setup echo cmd & adjust for platform */
4398 opcode = QL_ECHO_CMD;
4399 BIG_ENDIAN_32(&opcode);
4400
4401 /*
4402 * due to limitations in the ql
4403 * firmaware the echo data field is
4404 * limited to 220
4405 */
4406 if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4407 (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4408 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4409 "cmdl1=%lxh, statl2=%lxh\n",
4410 cmd->pm_cmd_len, cmd->pm_stat_len);
4411 rval = FC_TOOMANY;
4412 break;
4413 }
4414
4415 /*
4416 * the input data buffer has the user
4417 * supplied data pattern. The "echoed"
4418 * data will be DMAed into the output
4419 * data buffer. Therefore the length
4420 * of the output buffer must be equal
4421 * to or greater then the input buffer
4422 * length
4423 */
4424 if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4425 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4426 " cmdl1=%lxh, statl2=%lxh\n",
4427 cmd->pm_cmd_len, cmd->pm_stat_len);
4428 rval = FC_TOOMANY;
4429 break;
4430 }
4431 /* add four bytes for the opcode */
4432 echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4433
4434 /*
4435 * are we 32 or 64 bit addressed???
4436 * We need to get the appropriate
4437 * DMA and set the command options;
4438 * 64 bit (bit 6) or 32 bit
4439 * (no bit 6) addressing.
4440 * while we are at it lets ask for
4441 * real echo (bit 15)
4442 */
4443 echo.options = BIT_15;
4444 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4445 !(CFG_IST(ha, CFG_CTRL_8081))) {
4446 echo.options = (uint16_t)
4447 (echo.options | BIT_6);
4448 }
4449
4450 /*
4451 * Set up the DMA mappings for the
4452 * output and input data buffers.
4453 * First the output buffer
4454 */
4455 if (ql_get_dma_mem(ha, &buffer_xmt,
4456 (uint32_t)(cmd->pm_data_len + 4),
4457 LITTLE_ENDIAN_DMA,
4458 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4459 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4460 rval = FC_NOMEM;
4461 break;
4462 }
4463 echo.transfer_data_address = buffer_xmt.cookie;
4464
4465 /* Next the input buffer */
4466 if (ql_get_dma_mem(ha, &buffer_rcv,
4467 (uint32_t)(cmd->pm_data_len + 4),
4468 LITTLE_ENDIAN_DMA,
4469 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4470 /*
4471 * since we could not allocate
4472 * DMA space for the input
4473 * buffer we need to clean up
4474 * by freeing the DMA space
4475 * we allocated for the output
4476 * buffer
4477 */
4478 ql_free_phys(ha, &buffer_xmt);
4479 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4480 rval = FC_NOMEM;
4481 break;
4482 }
4483 echo.receive_data_address = buffer_rcv.cookie;
4484
4485 /*
4486 * copy the 4 byte ECHO op code to the
4487 * allocated DMA space
4488 */
4489 ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4490 (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4491
4492 /*
4493 * copy the user supplied data to the
4494 * allocated DMA space
4495 */
4496 ddi_rep_put8(buffer_xmt.acc_handle,
4497 (uint8_t *)cmd->pm_cmd_buf,
4498 (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4499 DDI_DEV_AUTOINCR);
4500
4501 /* Shutdown IP. */
4502 if (pha->flags & IP_INITIALIZED) {
4503 (void) ql_shutdown_ip(pha);
4504 }
4505
4506 /* send the echo */
4507 if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4508 ddi_rep_put8(buffer_rcv.acc_handle,
4509 (uint8_t *)buffer_rcv.bp + 4,
4510 (uint8_t *)cmd->pm_stat_buf,
4511 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4512 } else {
4513 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4514 rval = FC_FAILURE;
4515 }
4516
4517 /* Restart IP if it was shutdown. */
4518 if (pha->flags & IP_ENABLED &&
4519 !(pha->flags & IP_INITIALIZED)) {
4520 (void) ql_initialize_ip(pha);
4521 ql_isp_rcvbuf(pha);
4522 }
4523 /* free up our DMA buffers */
4524 ql_free_phys(ha, &buffer_xmt);
4525 ql_free_phys(ha, &buffer_rcv);
4526 break;
4527 }
4528 default:
4529 EL(ha, "unknown=%xh, FC_PORT_DIAG "
4530 "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4531 rval = FC_INVALID_REQUEST;
4532 break;
4533 }
4534 PORTMANAGE_UNLOCK(ha);
4535 break;
4536 case FC_PORT_LINK_STATE:
4537 /* Check for name equal to null. */
4538 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4539 index++) {
4540 if (cmd->pm_cmd_buf[index] != 0) {
4541 break;
4542 }
4543 }
4544
4545 /* If name not null. */
4546 if (index < 8 && cmd->pm_cmd_len >= 8) {
4547 /* Locate device queue. */
4548 tq = NULL;
4549 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4550 tq == NULL; index++) {
4551 for (link = ha->dev[index].first; link != NULL;
4552 link = link->next) {
4553 tq = link->base_address;
4554
4555 if (bcmp((void *)&tq->port_name[0],
4556 (void *)cmd->pm_cmd_buf, 8) == 0) {
4557 break;
4558 } else {
4559 tq = NULL;
4560 }
4561 }
4562 }
4563
4564 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4565 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4566 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4567 } else {
4568 cnt = FC_PORT_SPEED_MASK(ha->state) |
4569 FC_STATE_OFFLINE;
4570 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4571 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4572 }
4573 } else {
4574 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4575 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4576 }
4577 break;
4578 case FC_PORT_INITIALIZE:
4579 if (cmd->pm_cmd_len >= 8) {
4580 tq = NULL;
4581 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4582 tq == NULL; index++) {
4583 for (link = ha->dev[index].first; link != NULL;
4584 link = link->next) {
4585 tq = link->base_address;
4586
4587 if (bcmp((void *)&tq->port_name[0],
4588 (void *)cmd->pm_cmd_buf, 8) == 0) {
4589 if (!VALID_DEVICE_ID(ha,
4590 tq->loop_id)) {
4591 tq = NULL;
4592 }
4593 break;
4594 } else {
4595 tq = NULL;
4596 }
4597 }
4598 }
4599
4600 if (tq == NULL || ql_target_reset(ha, tq,
4601 ha->loop_reset_delay) != QL_SUCCESS) {
4602 EL(ha, "failed, FC_PORT_INITIALIZE "
4603 "FC_FAILURE\n");
4604 rval = FC_FAILURE;
4605 }
4606 } else {
4607 EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4608 "clen=%lxh\n", cmd->pm_cmd_len);
4609
4610 rval = FC_FAILURE;
4611 }
4612 break;
4613 case FC_PORT_RLS:
4614 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4615 EL(ha, "failed, buffer size passed: %lxh, "
4616 "req: %lxh\n", cmd->pm_data_len,
4617 (sizeof (fc_rls_acc_t)));
4618 rval = FC_FAILURE;
4619 } else if (LOOP_NOT_READY(pha)) {
4620 EL(ha, "loop NOT ready\n");
4621 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4622 } else if (ql_get_link_status(ha, ha->loop_id,
4623 cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4624 EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4625 rval = FC_FAILURE;
4626 #ifdef _BIG_ENDIAN
4627 } else {
4628 fc_rls_acc_t *rls;
4629
4630 rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4631 LITTLE_ENDIAN_32(&rls->rls_link_fail);
4632 LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4633 LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4634 LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4635 #endif /* _BIG_ENDIAN */
4636 }
4637 break;
4638 case FC_PORT_GET_NODE_ID:
4639 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4640 cmd->pm_data_buf) != QL_SUCCESS) {
4641 EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4642 rval = FC_FAILURE;
4643 }
4644 break;
4645 case FC_PORT_SET_NODE_ID:
4646 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4647 cmd->pm_data_buf) != QL_SUCCESS) {
4648 EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4649 rval = FC_FAILURE;
4650 }
4651 break;
4652 case FC_PORT_DOWNLOAD_FCODE:
4653 PORTMANAGE_LOCK(ha);
4654 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4655 rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4656 (uint32_t)cmd->pm_data_len);
4657 } else {
4658 if (cmd->pm_data_buf[0] == 4 &&
4659 cmd->pm_data_buf[8] == 0 &&
4660 cmd->pm_data_buf[9] == 0x10 &&
4661 cmd->pm_data_buf[10] == 0 &&
4662 cmd->pm_data_buf[11] == 0) {
4663 rval = ql_24xx_load_flash(ha,
4664 (uint8_t *)cmd->pm_data_buf,
4665 (uint32_t)cmd->pm_data_len,
4666 ha->flash_fw_addr << 2);
4667 } else {
4668 rval = ql_24xx_load_flash(ha,
4669 (uint8_t *)cmd->pm_data_buf,
4670 (uint32_t)cmd->pm_data_len, 0);
4671 }
4672 }
4673
4674 if (rval != QL_SUCCESS) {
4675 EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4676 rval = FC_FAILURE;
4677 } else {
4678 rval = FC_SUCCESS;
4679 }
4680 ql_reset_chip(ha);
4681 set_flags |= ISP_ABORT_NEEDED;
4682 PORTMANAGE_UNLOCK(ha);
4683 break;
4684 default:
4685 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4686 rval = FC_BADCMD;
4687 break;
4688 }
4689
4690 /* Wait for suspension to end. */
4691 ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4692 timer = 0;
4693
4694 while (timer++ < 3000 &&
4695 ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4696 ql_delay(ha, 10000);
4697 }
4698
4699 ql_restart_queues(ha);
4700
4701 if (rval != FC_SUCCESS) {
4702 EL(ha, "failed, rval = %xh\n", rval);
4703 } else {
4704 /*EMPTY*/
4705 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4706 }
4707
4708 return (rval);
4709 }
4710
4711 static opaque_t
4712 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4713 {
4714 port_id_t id;
4715 ql_adapter_state_t *ha;
4716 ql_tgt_t *tq;
4717
4718 id.r.rsvd_1 = 0;
4719 id.b24 = d_id.port_id;
4720
4721 ha = ql_fca_handle_to_state(fca_handle);
4722 if (ha == NULL) {
4723 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4724 (void *)fca_handle);
4725 return (NULL);
4726 }
4727 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4728
4729 tq = ql_d_id_to_queue(ha, id);
4730
4731 if (tq == NULL) {
4732 EL(ha, "failed, tq=NULL\n");
4733 } else {
4734 /*EMPTY*/
4735 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4736 }
4737 return (tq);
4738 }
4739
4740 /* ************************************************************************ */
4741 /* FCA Driver Local Support Functions. */
4742 /* ************************************************************************ */
4743
4744 /*
4745 * ql_cmd_setup
4746 * Verifies proper command.
4747 *
4748 * Input:
4749 * fca_handle = handle setup by ql_bind_port().
4750 * pkt = pointer to fc_packet.
4751 * rval = pointer for return value.
4752 *
4753 * Returns:
4754 * Adapter state pointer, NULL = failure.
4755 *
4756 * Context:
4757 * Kernel context.
4758 */
4759 static ql_adapter_state_t *
4760 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4761 {
4762 ql_adapter_state_t *ha, *pha;
4763 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
4764 ql_tgt_t *tq;
4765 port_id_t d_id;
4766
4767 pkt->pkt_resp_resid = 0;
4768 pkt->pkt_data_resid = 0;
4769
4770 /* check that the handle is assigned by this FCA */
4771 ha = ql_fca_handle_to_state(fca_handle);
4772 if (ha == NULL) {
4773 *rval = FC_UNBOUND;
4774 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4775 (void *)fca_handle);
4776 return (NULL);
4777 }
4778 pha = ha->pha;
4779
4780 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4781
4782 if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4783 return (ha);
4784 }
4785
4786 if (!(pha->flags & ONLINE)) {
4787 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4788 pkt->pkt_reason = FC_REASON_HW_ERROR;
4789 *rval = FC_TRANSPORT_ERROR;
4790 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4791 return (NULL);
4792 }
4793
4794 /* Exit on loop down. */
4795 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4796 pha->task_daemon_flags & LOOP_DOWN &&
4797 pha->loop_down_timer <= pha->loop_down_abort_time) {
4798 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4799 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4800 *rval = FC_OFFLINE;
4801 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4802 return (NULL);
4803 }
4804
4805 if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4806 pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4807 tq = (ql_tgt_t *)pkt->pkt_fca_device;
4808 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4809 d_id.r.rsvd_1 = 0;
4810 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4811 tq = ql_d_id_to_queue(ha, d_id);
4812
4813 pkt->pkt_fca_device = (opaque_t)tq;
4814 }
4815
4816 if (tq != NULL) {
4817 DEVICE_QUEUE_LOCK(tq);
4818 if (tq->flags & (TQF_RSCN_RCVD |
4819 TQF_NEED_AUTHENTICATION)) {
4820 *rval = FC_DEVICE_BUSY;
4821 DEVICE_QUEUE_UNLOCK(tq);
4822 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4823 tq->flags, tq->d_id.b24);
4824 return (NULL);
4825 }
4826 DEVICE_QUEUE_UNLOCK(tq);
4827 }
4828 }
4829
4830 /*
4831 * Check DMA pointers.
4832 */
4833 *rval = DDI_SUCCESS;
4834 if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4835 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4836 *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4837 if (*rval == DDI_SUCCESS) {
4838 *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4839 }
4840 }
4841
4842 if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4843 pkt->pkt_rsplen != 0) {
4844 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4845 *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4846 if (*rval == DDI_SUCCESS) {
4847 *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4848 }
4849 }
4850
4851 /*
4852 * Minimum branch conditional; Change it with care.
4853 */
4854 if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4855 (pkt->pkt_datalen != 0)) != 0) {
4856 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4857 *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4858 if (*rval == DDI_SUCCESS) {
4859 *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4860 }
4861 }
4862
4863 if (*rval != DDI_SUCCESS) {
4864 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4865 pkt->pkt_reason = FC_REASON_DMA_ERROR;
4866
4867 /* Do command callback. */
4868 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4869 ql_awaken_task_daemon(ha, sp, 0, 0);
4870 }
4871 *rval = FC_BADPACKET;
4872 EL(ha, "failed, bad DMA pointers\n");
4873 return (NULL);
4874 }
4875
4876 if (sp->magic_number != QL_FCA_BRAND) {
4877 *rval = FC_BADPACKET;
4878 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4879 return (NULL);
4880 }
4881 *rval = FC_SUCCESS;
4882
4883 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4884
4885 return (ha);
4886 }
4887
4888 /*
4889 * ql_els_plogi
4890 * Issue a extended link service port login request.
4891 *
4892 * Input:
4893 * ha = adapter state pointer.
4894 * pkt = pointer to fc_packet.
4895 *
4896 * Returns:
4897 * FC_SUCCESS - the packet was accepted for transport.
4898 * FC_TRANSPORT_ERROR - a transport error occurred.
4899 *
4900 * Context:
4901 * Kernel context.
4902 */
4903 static int
4904 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4905 {
4906 ql_tgt_t *tq = NULL;
4907 port_id_t d_id;
4908 la_els_logi_t acc;
4909 class_svc_param_t *class3_param;
4910 int ret;
4911 int rval = FC_SUCCESS;
4912
4913 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4914 pkt->pkt_cmd_fhdr.d_id);
4915
4916 TASK_DAEMON_LOCK(ha);
4917 if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4918 TASK_DAEMON_UNLOCK(ha);
4919 QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4920 return (FC_OFFLINE);
4921 }
4922 TASK_DAEMON_UNLOCK(ha);
4923
4924 bzero(&acc, sizeof (acc));
4925 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4926
4927 ret = QL_SUCCESS;
4928
4929 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4930 /*
4931 * In p2p topology he sends a PLOGI after determining
4932 * he has the N_Port login initiative.
4933 */
4934 ret = ql_p2p_plogi(ha, pkt);
4935 }
4936 if (ret == QL_CONSUMED) {
4937 return (ret);
4938 }
4939
4940 switch (ret = ql_login_port(ha, d_id)) {
4941 case QL_SUCCESS:
4942 tq = ql_d_id_to_queue(ha, d_id);
4943 break;
4944
4945 case QL_LOOP_ID_USED:
4946 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4947 tq = ql_d_id_to_queue(ha, d_id);
4948 }
4949 break;
4950
4951 default:
4952 break;
4953 }
4954
4955 if (ret != QL_SUCCESS) {
4956 /*
4957 * Invalidate this entry so as to seek a fresh loop ID
4958 * in case firmware reassigns it to something else
4959 */
4960 tq = ql_d_id_to_queue(ha, d_id);
4961 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4962 tq->loop_id = PORT_NO_LOOP_ID;
4963 }
4964 } else if (tq) {
4965 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4966 }
4967
4968 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4969 (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4970
4971 /* Build ACC. */
4972 acc.ls_code.ls_code = LA_ELS_ACC;
4973 acc.common_service.fcph_version = 0x2006;
4974 acc.common_service.cmn_features = 0x8800;
4975 acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4976 acc.common_service.conc_sequences = 0xff;
4977 acc.common_service.relative_offset = 0x03;
4978 acc.common_service.e_d_tov = 0x7d0;
4979
4980 bcopy((void *)&tq->port_name[0],
4981 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4982 bcopy((void *)&tq->node_name[0],
4983 (void *)&acc.node_ww_name.raw_wwn[0], 8);
4984
4985 class3_param = (class_svc_param_t *)&acc.class_3;
4986 class3_param->class_valid_svc_opt = 0x8000;
4987 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4988 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4989 class3_param->conc_sequences = tq->class3_conc_sequences;
4990 class3_param->open_sequences_per_exch =
4991 tq->class3_open_sequences_per_exch;
4992
4993 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4994 acc.ls_code.ls_code = LA_ELS_RJT;
4995 pkt->pkt_state = FC_PKT_TRAN_BSY;
4996 pkt->pkt_reason = FC_REASON_XCHG_BSY;
4997 EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4998 rval = FC_TRAN_BUSY;
4999 } else {
5000 DEVICE_QUEUE_LOCK(tq);
5001 tq->logout_sent = 0;
5002 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5003 if (CFG_IST(ha, CFG_CTRL_242581)) {
5004 tq->flags |= TQF_IIDMA_NEEDED;
5005 }
5006 DEVICE_QUEUE_UNLOCK(tq);
5007
5008 if (CFG_IST(ha, CFG_CTRL_242581)) {
5009 TASK_DAEMON_LOCK(ha);
5010 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5011 TASK_DAEMON_UNLOCK(ha);
5012 }
5013
5014 pkt->pkt_state = FC_PKT_SUCCESS;
5015 }
5016 } else {
5017 /* Build RJT. */
5018 acc.ls_code.ls_code = LA_ELS_RJT;
5019
5020 switch (ret) {
5021 case QL_FUNCTION_TIMEOUT:
5022 pkt->pkt_state = FC_PKT_TIMEOUT;
5023 pkt->pkt_reason = FC_REASON_HW_ERROR;
5024 break;
5025
5026 case QL_MEMORY_ALLOC_FAILED:
5027 pkt->pkt_state = FC_PKT_LOCAL_BSY;
5028 pkt->pkt_reason = FC_REASON_NOMEM;
5029 rval = FC_TRAN_BUSY;
5030 break;
5031
5032 case QL_FABRIC_NOT_INITIALIZED:
5033 pkt->pkt_state = FC_PKT_FABRIC_BSY;
5034 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5035 rval = FC_TRAN_BUSY;
5036 break;
5037
5038 default:
5039 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5040 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5041 break;
5042 }
5043
5044 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5045 "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5046 pkt->pkt_reason, ret, rval);
5047 }
5048
5049 if (tq != NULL) {
5050 DEVICE_QUEUE_LOCK(tq);
5051 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5052 if (rval == FC_TRAN_BUSY) {
5053 if (tq->d_id.b24 != BROADCAST_ADDR) {
5054 tq->flags |= TQF_NEED_AUTHENTICATION;
5055 }
5056 }
5057 DEVICE_QUEUE_UNLOCK(tq);
5058 }
5059
5060 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5061 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5062
5063 if (rval != FC_SUCCESS) {
5064 EL(ha, "failed, rval = %xh\n", rval);
5065 } else {
5066 /*EMPTY*/
5067 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5068 }
5069 return (rval);
5070 }
5071
5072 /*
5073 * ql_p2p_plogi
5074 * Start an extended link service port login request using
5075 * an ELS Passthru iocb.
5076 *
5077 * Input:
5078 * ha = adapter state pointer.
5079 * pkt = pointer to fc_packet.
5080 *
5081 * Returns:
5082 * QL_CONSUMMED - the iocb was queued for transport.
5083 *
5084 * Context:
5085 * Kernel context.
5086 */
5087 static int
5088 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5089 {
5090 uint16_t id;
5091 ql_tgt_t tmp;
5092 ql_tgt_t *tq = &tmp;
5093 int rval;
5094 port_id_t d_id;
5095 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5096
5097 tq->d_id.b.al_pa = 0;
5098 tq->d_id.b.area = 0;
5099 tq->d_id.b.domain = 0;
5100
5101 /*
5102 * Verify that the port database hasn't moved beneath our feet by
5103 * switching to the appropriate n_port_handle if necessary. This is
5104 * less unplesant than the error recovery if the wrong one is used.
5105 */
5106 for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5107 tq->loop_id = id;
5108 rval = ql_get_port_database(ha, tq, PDF_NONE);
5109 EL(ha, "rval=%xh\n", rval);
5110 /* check all the ones not logged in for possible use */
5111 if (rval == QL_NOT_LOGGED_IN) {
5112 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5113 ha->n_port->n_port_handle = tq->loop_id;
5114 EL(ha, "n_port_handle =%xh, master state=%x\n",
5115 tq->loop_id, tq->master_state);
5116 break;
5117 }
5118 /*
5119 * Use a 'port unavailable' entry only
5120 * if we used it before.
5121 */
5122 if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5123 /* if the port_id matches, reuse it */
5124 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5125 EL(ha, "n_port_handle =%xh,"
5126 "master state=%xh\n",
5127 tq->loop_id, tq->master_state);
5128 break;
5129 } else if (tq->loop_id ==
5130 ha->n_port->n_port_handle) {
5131 // avoid a lint error
5132 uint16_t *hndl;
5133 uint16_t val;
5134
5135 hndl = &ha->n_port->n_port_handle;
5136 val = *hndl;
5137 val++;
5138 val++;
5139 *hndl = val;
5140 }
5141 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5142 "master state=%x\n", rval, id, tq->loop_id,
5143 tq->master_state);
5144 }
5145
5146 }
5147 if (rval == QL_SUCCESS) {
5148 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5149 ha->n_port->n_port_handle = tq->loop_id;
5150 EL(ha, "n_port_handle =%xh, master state=%x\n",
5151 tq->loop_id, tq->master_state);
5152 break;
5153 }
5154 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5155 "master state=%x\n", rval, id, tq->loop_id,
5156 tq->master_state);
5157 }
5158 }
5159 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5160
5161 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5162 tq = ql_d_id_to_queue(ha, d_id);
5163 ql_timeout_insert(ha, tq, sp);
5164 ql_start_iocb(ha, sp);
5165
5166 return (QL_CONSUMED);
5167 }
5168
5169
5170 /*
5171 * ql_els_flogi
5172 * Issue a extended link service fabric login request.
5173 *
5174 * Input:
5175 * ha = adapter state pointer.
5176 * pkt = pointer to fc_packet.
5177 *
5178 * Returns:
5179 * FC_SUCCESS - the packet was accepted for transport.
5180 * FC_TRANSPORT_ERROR - a transport error occurred.
5181 *
5182 * Context:
5183 * Kernel context.
5184 */
5185 static int
5186 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5187 {
5188 ql_tgt_t *tq = NULL;
5189 port_id_t d_id;
5190 la_els_logi_t acc;
5191 class_svc_param_t *class3_param;
5192 int rval = FC_SUCCESS;
5193 int accept = 0;
5194
5195 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5196 pkt->pkt_cmd_fhdr.d_id);
5197
5198 bzero(&acc, sizeof (acc));
5199 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5200
5201 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5202 /*
5203 * d_id of zero in a FLOGI accept response in a point to point
5204 * topology triggers evaluation of N Port login initiative.
5205 */
5206 pkt->pkt_resp_fhdr.d_id = 0;
5207 /*
5208 * An N_Port already logged in with the firmware
5209 * will have the only database entry.
5210 */
5211 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5212 tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5213 }
5214
5215 if (tq != NULL) {
5216 /*
5217 * If the target port has initiative send
5218 * up a PLOGI about the new device.
5219 */
5220 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5221 (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5222 &ha->init_ctrl_blk.cb24.port_name[0] :
5223 &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5224 ha->send_plogi_timer = 3;
5225 } else {
5226 ha->send_plogi_timer = 0;
5227 }
5228 pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5229 } else {
5230 /*
5231 * An N_Port not logged in with the firmware will not
5232 * have a database entry. We accept anyway and rely
5233 * on a PLOGI from the upper layers to set the d_id
5234 * and s_id.
5235 */
5236 accept = 1;
5237 }
5238 } else {
5239 tq = ql_d_id_to_queue(ha, d_id);
5240 }
5241 if ((tq != NULL) || (accept != NULL)) {
5242 /* Build ACC. */
5243 pkt->pkt_state = FC_PKT_SUCCESS;
5244 class3_param = (class_svc_param_t *)&acc.class_3;
5245
5246 acc.ls_code.ls_code = LA_ELS_ACC;
5247 acc.common_service.fcph_version = 0x2006;
5248 if (ha->topology & QL_N_PORT) {
5249 /* clear F_Port indicator */
5250 acc.common_service.cmn_features = 0x0800;
5251 } else {
5252 acc.common_service.cmn_features = 0x1b00;
5253 }
5254 CFG_IST(ha, CFG_CTRL_24258081) ?
5255 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5256 ha->init_ctrl_blk.cb24.max_frame_length[0],
5257 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5258 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5259 ha->init_ctrl_blk.cb.max_frame_length[0],
5260 ha->init_ctrl_blk.cb.max_frame_length[1]));
5261 acc.common_service.conc_sequences = 0xff;
5262 acc.common_service.relative_offset = 0x03;
5263 acc.common_service.e_d_tov = 0x7d0;
5264 if (accept) {
5265 /* Use the saved N_Port WWNN and WWPN */
5266 if (ha->n_port != NULL) {
5267 bcopy((void *)&ha->n_port->port_name[0],
5268 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5269 bcopy((void *)&ha->n_port->node_name[0],
5270 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5271 /* mark service options invalid */
5272 class3_param->class_valid_svc_opt = 0x0800;
5273 } else {
5274 EL(ha, "ha->n_port is NULL\n");
5275 /* Build RJT. */
5276 acc.ls_code.ls_code = LA_ELS_RJT;
5277
5278 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5279 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5280 }
5281 } else {
5282 bcopy((void *)&tq->port_name[0],
5283 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5284 bcopy((void *)&tq->node_name[0],
5285 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5286
5287 class3_param = (class_svc_param_t *)&acc.class_3;
5288 class3_param->class_valid_svc_opt = 0x8800;
5289 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5290 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5291 class3_param->conc_sequences =
5292 tq->class3_conc_sequences;
5293 class3_param->open_sequences_per_exch =
5294 tq->class3_open_sequences_per_exch;
5295 }
5296 } else {
5297 /* Build RJT. */
5298 acc.ls_code.ls_code = LA_ELS_RJT;
5299
5300 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5301 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5302 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5303 }
5304
5305 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5306 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5307
5308 if (rval != FC_SUCCESS) {
5309 EL(ha, "failed, rval = %xh\n", rval);
5310 } else {
5311 /*EMPTY*/
5312 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5313 }
5314 return (rval);
5315 }
5316
5317 /*
5318 * ql_els_logo
5319 * Issue a extended link service logout request.
5320 *
5321 * Input:
5322 * ha = adapter state pointer.
5323 * pkt = pointer to fc_packet.
5324 *
5325 * Returns:
5326 * FC_SUCCESS - the packet was accepted for transport.
5327 * FC_TRANSPORT_ERROR - a transport error occurred.
5328 *
5329 * Context:
5330 * Kernel context.
5331 */
5332 static int
5333 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5334 {
5335 port_id_t d_id;
5336 ql_tgt_t *tq;
5337 la_els_logo_t acc;
5338 int rval = FC_SUCCESS;
5339
5340 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5341 pkt->pkt_cmd_fhdr.d_id);
5342
5343 bzero(&acc, sizeof (acc));
5344 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5345
5346 tq = ql_d_id_to_queue(ha, d_id);
5347 if (tq) {
5348 DEVICE_QUEUE_LOCK(tq);
5349 if (tq->d_id.b24 == BROADCAST_ADDR) {
5350 DEVICE_QUEUE_UNLOCK(tq);
5351 return (FC_SUCCESS);
5352 }
5353
5354 tq->flags |= TQF_NEED_AUTHENTICATION;
5355
5356 do {
5357 DEVICE_QUEUE_UNLOCK(tq);
5358 (void) ql_abort_device(ha, tq, 1);
5359
5360 /*
5361 * Wait for commands to drain in F/W (doesn't
5362 * take more than a few milliseconds)
5363 */
5364 ql_delay(ha, 10000);
5365
5366 DEVICE_QUEUE_LOCK(tq);
5367 } while (tq->outcnt);
5368
5369 DEVICE_QUEUE_UNLOCK(tq);
5370 }
5371
5372 if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5373 /* Build ACC. */
5374 acc.ls_code.ls_code = LA_ELS_ACC;
5375
5376 pkt->pkt_state = FC_PKT_SUCCESS;
5377 } else {
5378 /* Build RJT. */
5379 acc.ls_code.ls_code = LA_ELS_RJT;
5380
5381 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5382 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5383 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5384 }
5385
5386 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5387 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5388
5389 if (rval != FC_SUCCESS) {
5390 EL(ha, "failed, rval = %xh\n", rval);
5391 } else {
5392 /*EMPTY*/
5393 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5394 }
5395 return (rval);
5396 }
5397
5398 /*
5399 * ql_els_prli
5400 * Issue a extended link service process login request.
5401 *
5402 * Input:
5403 * ha = adapter state pointer.
5404 * pkt = pointer to fc_packet.
5405 *
5406 * Returns:
5407 * FC_SUCCESS - the packet was accepted for transport.
5408 * FC_TRANSPORT_ERROR - a transport error occurred.
5409 *
5410 * Context:
5411 * Kernel context.
5412 */
5413 static int
5414 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5415 {
5416 ql_tgt_t *tq;
5417 port_id_t d_id;
5418 la_els_prli_t acc;
5419 prli_svc_param_t *param;
5420 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5421 int rval = FC_SUCCESS;
5422
5423 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5424 pkt->pkt_cmd_fhdr.d_id);
5425
5426 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5427
5428 tq = ql_d_id_to_queue(ha, d_id);
5429 if (tq != NULL) {
5430 (void) ql_get_port_database(ha, tq, PDF_NONE);
5431
5432 if ((ha->topology & QL_N_PORT) &&
5433 (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5434 ql_timeout_insert(ha, tq, sp);
5435 ql_start_iocb(ha, sp);
5436 rval = QL_CONSUMED;
5437 } else {
5438 /* Build ACC. */
5439 bzero(&acc, sizeof (acc));
5440 acc.ls_code = LA_ELS_ACC;
5441 acc.page_length = 0x10;
5442 acc.payload_length = tq->prli_payload_length;
5443
5444 param = (prli_svc_param_t *)&acc.service_params[0];
5445 param->type = 0x08;
5446 param->rsvd = 0x00;
5447 param->process_assoc_flags = tq->prli_svc_param_word_0;
5448 param->process_flags = tq->prli_svc_param_word_3;
5449
5450 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5451 (uint8_t *)pkt->pkt_resp, sizeof (acc),
5452 DDI_DEV_AUTOINCR);
5453
5454 pkt->pkt_state = FC_PKT_SUCCESS;
5455 }
5456 } else {
5457 la_els_rjt_t rjt;
5458
5459 /* Build RJT. */
5460 bzero(&rjt, sizeof (rjt));
5461 rjt.ls_code.ls_code = LA_ELS_RJT;
5462
5463 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5464 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5465
5466 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5467 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5468 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5469 }
5470
5471 if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5472 EL(ha, "failed, rval = %xh\n", rval);
5473 } else {
5474 /*EMPTY*/
5475 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5476 }
5477 return (rval);
5478 }
5479
5480 /*
5481 * ql_els_prlo
5482 * Issue a extended link service process logout request.
5483 *
5484 * Input:
5485 * ha = adapter state pointer.
5486 * pkt = pointer to fc_packet.
5487 *
5488 * Returns:
5489 * FC_SUCCESS - the packet was accepted for transport.
5490 * FC_TRANSPORT_ERROR - a transport error occurred.
5491 *
5492 * Context:
5493 * Kernel context.
5494 */
5495 /* ARGSUSED */
5496 static int
5497 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5498 {
5499 la_els_prli_t acc;
5500 int rval = FC_SUCCESS;
5501
5502 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5503 pkt->pkt_cmd_fhdr.d_id);
5504
5505 /* Build ACC. */
5506 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5507 (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5508
5509 acc.ls_code = LA_ELS_ACC;
5510 acc.service_params[2] = 1;
5511
5512 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5513 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5514
5515 pkt->pkt_state = FC_PKT_SUCCESS;
5516
5517 if (rval != FC_SUCCESS) {
5518 EL(ha, "failed, rval = %xh\n", rval);
5519 } else {
5520 /*EMPTY*/
5521 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5522 }
5523 return (rval);
5524 }
5525
5526 /*
5527 * ql_els_adisc
5528 * Issue a extended link service address discovery request.
5529 *
5530 * Input:
5531 * ha = adapter state pointer.
5532 * pkt = pointer to fc_packet.
5533 *
5534 * Returns:
5535 * FC_SUCCESS - the packet was accepted for transport.
5536 * FC_TRANSPORT_ERROR - a transport error occurred.
5537 *
5538 * Context:
5539 * Kernel context.
5540 */
5541 static int
5542 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5543 {
5544 ql_dev_id_list_t *list;
5545 uint32_t list_size;
5546 ql_link_t *link;
5547 ql_tgt_t *tq;
5548 ql_lun_t *lq;
5549 port_id_t d_id;
5550 la_els_adisc_t acc;
5551 uint16_t index, loop_id;
5552 ql_mbx_data_t mr;
5553 int rval = FC_SUCCESS;
5554
5555 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5556
5557 bzero(&acc, sizeof (acc));
5558 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5559
5560 /*
5561 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5562 * the device from the firmware
5563 */
5564 index = ql_alpa_to_index[d_id.b.al_pa];
5565 tq = NULL;
5566 for (link = ha->dev[index].first; link != NULL; link = link->next) {
5567 tq = link->base_address;
5568 if (tq->d_id.b24 == d_id.b24) {
5569 break;
5570 } else {
5571 tq = NULL;
5572 }
5573 }
5574
5575 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5576 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5577 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5578
5579 if (list != NULL &&
5580 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5581 QL_SUCCESS) {
5582
5583 for (index = 0; index < mr.mb[1]; index++) {
5584 ql_dev_list(ha, list, index, &d_id, &loop_id);
5585
5586 if (tq->d_id.b24 == d_id.b24) {
5587 tq->loop_id = loop_id;
5588 break;
5589 }
5590 }
5591 } else {
5592 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5593 QL_NAME, ha->instance, d_id.b24);
5594 tq = NULL;
5595 }
5596 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5597 cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5598 QL_NAME, ha->instance, tq->d_id.b24);
5599 tq = NULL;
5600 }
5601
5602 if (list != NULL) {
5603 kmem_free(list, list_size);
5604 }
5605 }
5606
5607 if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5608 ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5609
5610 /* Build ACC. */
5611
5612 DEVICE_QUEUE_LOCK(tq);
5613 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5614 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5615 for (link = tq->lun_queues.first; link != NULL;
5616 link = link->next) {
5617 lq = link->base_address;
5618
5619 if (lq->cmd.first != NULL) {
5620 ql_next(ha, lq);
5621 DEVICE_QUEUE_LOCK(tq);
5622 }
5623 }
5624 }
5625 DEVICE_QUEUE_UNLOCK(tq);
5626
5627 acc.ls_code.ls_code = LA_ELS_ACC;
5628 acc.hard_addr.hard_addr = tq->hard_addr.b24;
5629
5630 bcopy((void *)&tq->port_name[0],
5631 (void *)&acc.port_wwn.raw_wwn[0], 8);
5632 bcopy((void *)&tq->node_name[0],
5633 (void *)&acc.node_wwn.raw_wwn[0], 8);
5634
5635 acc.nport_id.port_id = tq->d_id.b24;
5636
5637 pkt->pkt_state = FC_PKT_SUCCESS;
5638 } else {
5639 /* Build RJT. */
5640 acc.ls_code.ls_code = LA_ELS_RJT;
5641
5642 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5643 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5644 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5645 }
5646
5647 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5648 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5649
5650 if (rval != FC_SUCCESS) {
5651 EL(ha, "failed, rval = %xh\n", rval);
5652 } else {
5653 /*EMPTY*/
5654 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5655 }
5656 return (rval);
5657 }
5658
5659 /*
5660 * ql_els_linit
5661 * Issue a extended link service loop initialize request.
5662 *
5663 * Input:
5664 * ha = adapter state pointer.
5665 * pkt = pointer to fc_packet.
5666 *
5667 * Returns:
5668 * FC_SUCCESS - the packet was accepted for transport.
5669 * FC_TRANSPORT_ERROR - a transport error occurred.
5670 *
5671 * Context:
5672 * Kernel context.
5673 */
5674 static int
5675 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5676 {
5677 ddi_dma_cookie_t *cp;
5678 uint32_t cnt;
5679 conv_num_t n;
5680 port_id_t d_id;
5681 int rval = FC_SUCCESS;
5682
5683 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5684
5685 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5686 if (ha->topology & QL_SNS_CONNECTION) {
5687 fc_linit_req_t els;
5688 lfa_cmd_t lfa;
5689
5690 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5691 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5692
5693 /* Setup LFA mailbox command data. */
5694 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5695
5696 lfa.resp_buffer_length[0] = 4;
5697
5698 cp = pkt->pkt_resp_cookie;
5699 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5700 n.size64 = (uint64_t)cp->dmac_laddress;
5701 LITTLE_ENDIAN_64(&n.size64);
5702 } else {
5703 n.size32[0] = LSD(cp->dmac_laddress);
5704 LITTLE_ENDIAN_32(&n.size32[0]);
5705 n.size32[1] = MSD(cp->dmac_laddress);
5706 LITTLE_ENDIAN_32(&n.size32[1]);
5707 }
5708
5709 /* Set buffer address. */
5710 for (cnt = 0; cnt < 8; cnt++) {
5711 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5712 }
5713
5714 lfa.subcommand_length[0] = 4;
5715 n.size32[0] = d_id.b24;
5716 LITTLE_ENDIAN_32(&n.size32[0]);
5717 lfa.addr[0] = n.size8[0];
5718 lfa.addr[1] = n.size8[1];
5719 lfa.addr[2] = n.size8[2];
5720 lfa.subcommand[1] = 0x70;
5721 lfa.payload[2] = els.func;
5722 lfa.payload[4] = els.lip_b3;
5723 lfa.payload[5] = els.lip_b4;
5724
5725 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5726 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5727 } else {
5728 pkt->pkt_state = FC_PKT_SUCCESS;
5729 }
5730 } else {
5731 fc_linit_resp_t rjt;
5732
5733 /* Build RJT. */
5734 bzero(&rjt, sizeof (rjt));
5735 rjt.ls_code.ls_code = LA_ELS_RJT;
5736
5737 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5738 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5739
5740 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5741 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5742 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5743 }
5744
5745 if (rval != FC_SUCCESS) {
5746 EL(ha, "failed, rval = %xh\n", rval);
5747 } else {
5748 /*EMPTY*/
5749 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5750 }
5751 return (rval);
5752 }
5753
5754 /*
5755 * ql_els_lpc
5756 * Issue a extended link service loop control request.
5757 *
5758 * Input:
5759 * ha = adapter state pointer.
5760 * pkt = pointer to fc_packet.
5761 *
5762 * Returns:
5763 * FC_SUCCESS - the packet was accepted for transport.
5764 * FC_TRANSPORT_ERROR - a transport error occurred.
5765 *
5766 * Context:
5767 * Kernel context.
5768 */
5769 static int
5770 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5771 {
5772 ddi_dma_cookie_t *cp;
5773 uint32_t cnt;
5774 conv_num_t n;
5775 port_id_t d_id;
5776 int rval = FC_SUCCESS;
5777
5778 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5779
5780 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5781 if (ha->topology & QL_SNS_CONNECTION) {
5782 ql_lpc_t els;
5783 lfa_cmd_t lfa;
5784
5785 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5786 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5787
5788 /* Setup LFA mailbox command data. */
5789 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5790
5791 lfa.resp_buffer_length[0] = 4;
5792
5793 cp = pkt->pkt_resp_cookie;
5794 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5795 n.size64 = (uint64_t)(cp->dmac_laddress);
5796 LITTLE_ENDIAN_64(&n.size64);
5797 } else {
5798 n.size32[0] = cp->dmac_address;
5799 LITTLE_ENDIAN_32(&n.size32[0]);
5800 n.size32[1] = 0;
5801 }
5802
5803 /* Set buffer address. */
5804 for (cnt = 0; cnt < 8; cnt++) {
5805 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5806 }
5807
5808 lfa.subcommand_length[0] = 20;
5809 n.size32[0] = d_id.b24;
5810 LITTLE_ENDIAN_32(&n.size32[0]);
5811 lfa.addr[0] = n.size8[0];
5812 lfa.addr[1] = n.size8[1];
5813 lfa.addr[2] = n.size8[2];
5814 lfa.subcommand[1] = 0x71;
5815 lfa.payload[4] = els.port_control;
5816 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5817
5818 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5819 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5820 } else {
5821 pkt->pkt_state = FC_PKT_SUCCESS;
5822 }
5823 } else {
5824 ql_lpc_resp_t rjt;
5825
5826 /* Build RJT. */
5827 bzero(&rjt, sizeof (rjt));
5828 rjt.ls_code.ls_code = LA_ELS_RJT;
5829
5830 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5831 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5832
5833 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5834 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5835 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5836 }
5837
5838 if (rval != FC_SUCCESS) {
5839 EL(ha, "failed, rval = %xh\n", rval);
5840 } else {
5841 /*EMPTY*/
5842 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5843 }
5844 return (rval);
5845 }
5846
5847 /*
5848 * ql_els_lsts
5849 * Issue a extended link service loop status request.
5850 *
5851 * Input:
5852 * ha = adapter state pointer.
5853 * pkt = pointer to fc_packet.
5854 *
5855 * Returns:
5856 * FC_SUCCESS - the packet was accepted for transport.
5857 * FC_TRANSPORT_ERROR - a transport error occurred.
5858 *
5859 * Context:
5860 * Kernel context.
5861 */
5862 static int
5863 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5864 {
5865 ddi_dma_cookie_t *cp;
5866 uint32_t cnt;
5867 conv_num_t n;
5868 port_id_t d_id;
5869 int rval = FC_SUCCESS;
5870
5871 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5872
5873 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5874 if (ha->topology & QL_SNS_CONNECTION) {
5875 fc_lsts_req_t els;
5876 lfa_cmd_t lfa;
5877
5878 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5879 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5880
5881 /* Setup LFA mailbox command data. */
5882 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5883
5884 lfa.resp_buffer_length[0] = 84;
5885
5886 cp = pkt->pkt_resp_cookie;
5887 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5888 n.size64 = cp->dmac_laddress;
5889 LITTLE_ENDIAN_64(&n.size64);
5890 } else {
5891 n.size32[0] = cp->dmac_address;
5892 LITTLE_ENDIAN_32(&n.size32[0]);
5893 n.size32[1] = 0;
5894 }
5895
5896 /* Set buffer address. */
5897 for (cnt = 0; cnt < 8; cnt++) {
5898 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5899 }
5900
5901 lfa.subcommand_length[0] = 2;
5902 n.size32[0] = d_id.b24;
5903 LITTLE_ENDIAN_32(&n.size32[0]);
5904 lfa.addr[0] = n.size8[0];
5905 lfa.addr[1] = n.size8[1];
5906 lfa.addr[2] = n.size8[2];
5907 lfa.subcommand[1] = 0x72;
5908
5909 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5910 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5911 } else {
5912 pkt->pkt_state = FC_PKT_SUCCESS;
5913 }
5914 } else {
5915 fc_lsts_resp_t rjt;
5916
5917 /* Build RJT. */
5918 bzero(&rjt, sizeof (rjt));
5919 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5920
5921 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5922 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5923
5924 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5925 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5926 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5927 }
5928
5929 if (rval != FC_SUCCESS) {
5930 EL(ha, "failed=%xh\n", rval);
5931 } else {
5932 /*EMPTY*/
5933 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5934 }
5935 return (rval);
5936 }
5937
5938 /*
5939 * ql_els_scr
5940 * Issue a extended link service state change registration request.
5941 *
5942 * Input:
5943 * ha = adapter state pointer.
5944 * pkt = pointer to fc_packet.
5945 *
5946 * Returns:
5947 * FC_SUCCESS - the packet was accepted for transport.
5948 * FC_TRANSPORT_ERROR - a transport error occurred.
5949 *
5950 * Context:
5951 * Kernel context.
5952 */
5953 static int
5954 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5955 {
5956 fc_scr_resp_t acc;
5957 int rval = FC_SUCCESS;
5958
5959 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5960
5961 bzero(&acc, sizeof (acc));
5962 if (ha->topology & QL_SNS_CONNECTION) {
5963 fc_scr_req_t els;
5964
5965 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5966 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5967
5968 if (ql_send_change_request(ha, els.scr_func) ==
5969 QL_SUCCESS) {
5970 /* Build ACC. */
5971 acc.scr_acc = LA_ELS_ACC;
5972
5973 pkt->pkt_state = FC_PKT_SUCCESS;
5974 } else {
5975 /* Build RJT. */
5976 acc.scr_acc = LA_ELS_RJT;
5977
5978 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5979 pkt->pkt_reason = FC_REASON_HW_ERROR;
5980 EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5981 }
5982 } else {
5983 /* Build RJT. */
5984 acc.scr_acc = LA_ELS_RJT;
5985
5986 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5987 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5988 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5989 }
5990
5991 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5992 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5993
5994 if (rval != FC_SUCCESS) {
5995 EL(ha, "failed, rval = %xh\n", rval);
5996 } else {
5997 /*EMPTY*/
5998 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5999 }
6000 return (rval);
6001 }
6002
6003 /*
6004 * ql_els_rscn
6005 * Issue a extended link service register state
6006 * change notification request.
6007 *
6008 * Input:
6009 * ha = adapter state pointer.
6010 * pkt = pointer to fc_packet.
6011 *
6012 * Returns:
6013 * FC_SUCCESS - the packet was accepted for transport.
6014 * FC_TRANSPORT_ERROR - a transport error occurred.
6015 *
6016 * Context:
6017 * Kernel context.
6018 */
6019 static int
6020 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6021 {
6022 ql_rscn_resp_t acc;
6023 int rval = FC_SUCCESS;
6024
6025 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6026
6027 bzero(&acc, sizeof (acc));
6028 if (ha->topology & QL_SNS_CONNECTION) {
6029 /* Build ACC. */
6030 acc.scr_acc = LA_ELS_ACC;
6031
6032 pkt->pkt_state = FC_PKT_SUCCESS;
6033 } else {
6034 /* Build RJT. */
6035 acc.scr_acc = LA_ELS_RJT;
6036
6037 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6038 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6039 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6040 }
6041
6042 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6043 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6044
6045 if (rval != FC_SUCCESS) {
6046 EL(ha, "failed, rval = %xh\n", rval);
6047 } else {
6048 /*EMPTY*/
6049 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6050 }
6051 return (rval);
6052 }
6053
6054 /*
6055 * ql_els_farp_req
6056 * Issue FC Address Resolution Protocol (FARP)
6057 * extended link service request.
6058 *
6059 * Note: not supported.
6060 *
6061 * Input:
6062 * ha = adapter state pointer.
6063 * pkt = pointer to fc_packet.
6064 *
6065 * Returns:
6066 * FC_SUCCESS - the packet was accepted for transport.
6067 * FC_TRANSPORT_ERROR - a transport error occurred.
6068 *
6069 * Context:
6070 * Kernel context.
6071 */
6072 static int
6073 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6074 {
6075 ql_acc_rjt_t acc;
6076 int rval = FC_SUCCESS;
6077
6078 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6079
6080 bzero(&acc, sizeof (acc));
6081
6082 /* Build ACC. */
6083 acc.ls_code.ls_code = LA_ELS_ACC;
6084
6085 pkt->pkt_state = FC_PKT_SUCCESS;
6086
6087 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6088 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6089
6090 if (rval != FC_SUCCESS) {
6091 EL(ha, "failed, rval = %xh\n", rval);
6092 } else {
6093 /*EMPTY*/
6094 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6095 }
6096 return (rval);
6097 }
6098
6099 /*
6100 * ql_els_farp_reply
6101 * Issue FC Address Resolution Protocol (FARP)
6102 * extended link service reply.
6103 *
6104 * Note: not supported.
6105 *
6106 * Input:
6107 * ha = adapter state pointer.
6108 * pkt = pointer to fc_packet.
6109 *
6110 * Returns:
6111 * FC_SUCCESS - the packet was accepted for transport.
6112 * FC_TRANSPORT_ERROR - a transport error occurred.
6113 *
6114 * Context:
6115 * Kernel context.
6116 */
6117 /* ARGSUSED */
6118 static int
6119 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6120 {
6121 ql_acc_rjt_t acc;
6122 int rval = FC_SUCCESS;
6123
6124 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6125
6126 bzero(&acc, sizeof (acc));
6127
6128 /* Build ACC. */
6129 acc.ls_code.ls_code = LA_ELS_ACC;
6130
6131 pkt->pkt_state = FC_PKT_SUCCESS;
6132
6133 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6134 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6135
6136 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6137
6138 return (rval);
6139 }
6140
6141 static int
6142 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6143 {
6144 uchar_t *rnid_acc;
6145 port_id_t d_id;
6146 ql_link_t *link;
6147 ql_tgt_t *tq;
6148 uint16_t index;
6149 la_els_rnid_acc_t acc;
6150 la_els_rnid_t *req;
6151 size_t req_len;
6152
6153 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6154
6155 req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6156 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6157 index = ql_alpa_to_index[d_id.b.al_pa];
6158
6159 tq = NULL;
6160 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6161 tq = link->base_address;
6162 if (tq->d_id.b24 == d_id.b24) {
6163 break;
6164 } else {
6165 tq = NULL;
6166 }
6167 }
6168
6169 /* Allocate memory for rnid status block */
6170 rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6171
6172 bzero(&acc, sizeof (acc));
6173
6174 req = (la_els_rnid_t *)pkt->pkt_cmd;
6175 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6176 (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6177 (caddr_t)rnid_acc) != QL_SUCCESS)) {
6178
6179 kmem_free(rnid_acc, req_len);
6180 acc.ls_code.ls_code = LA_ELS_RJT;
6181
6182 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6183 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6184
6185 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6186 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6187 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6188
6189 return (FC_FAILURE);
6190 }
6191
6192 acc.ls_code.ls_code = LA_ELS_ACC;
6193 bcopy(rnid_acc, &acc.hdr, req_len);
6194 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6195 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6196
6197 kmem_free(rnid_acc, req_len);
6198 pkt->pkt_state = FC_PKT_SUCCESS;
6199
6200 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6201
6202 return (FC_SUCCESS);
6203 }
6204
6205 static int
6206 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6207 {
6208 fc_rls_acc_t *rls_acc;
6209 port_id_t d_id;
6210 ql_link_t *link;
6211 ql_tgt_t *tq;
6212 uint16_t index;
6213 la_els_rls_acc_t acc;
6214
6215 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6216
6217 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6218 index = ql_alpa_to_index[d_id.b.al_pa];
6219
6220 tq = NULL;
6221 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6222 tq = link->base_address;
6223 if (tq->d_id.b24 == d_id.b24) {
6224 break;
6225 } else {
6226 tq = NULL;
6227 }
6228 }
6229
6230 /* Allocate memory for link error status block */
6231 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6232
6233 bzero(&acc, sizeof (la_els_rls_acc_t));
6234
6235 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6236 (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6237 (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6238
6239 kmem_free(rls_acc, sizeof (*rls_acc));
6240 acc.ls_code.ls_code = LA_ELS_RJT;
6241
6242 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6243 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6244
6245 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6246 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6247 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6248
6249 return (FC_FAILURE);
6250 }
6251
6252 LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6253 LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6254 LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6255 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6256 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6257
6258 acc.ls_code.ls_code = LA_ELS_ACC;
6259 acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6260 acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6261 acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6262 acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6263 acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6264 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6265 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6266
6267 kmem_free(rls_acc, sizeof (*rls_acc));
6268 pkt->pkt_state = FC_PKT_SUCCESS;
6269
6270 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6271
6272 return (FC_SUCCESS);
6273 }
6274
6275 static int
6276 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6277 {
6278 port_id_t d_id;
6279 ql_srb_t *sp;
6280 fc_unsol_buf_t *ubp;
6281 ql_link_t *link, *next_link;
6282 int rval = FC_SUCCESS;
6283 int cnt = 5;
6284
6285 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6286
6287 /*
6288 * we need to ensure that q->outcnt == 0, otherwise
6289 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6290 * will confuse ulps.
6291 */
6292
6293 DEVICE_QUEUE_LOCK(tq);
6294 do {
6295 /*
6296 * wait for the cmds to get drained. If they
6297 * don't get drained then the transport will
6298 * retry PLOGI after few secs.
6299 */
6300 if (tq->outcnt != 0) {
6301 rval = FC_TRAN_BUSY;
6302 DEVICE_QUEUE_UNLOCK(tq);
6303 ql_delay(ha, 10000);
6304 DEVICE_QUEUE_LOCK(tq);
6305 cnt--;
6306 if (!cnt) {
6307 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6308 " for %xh outcount %xh", QL_NAME,
6309 ha->instance, tq->d_id.b24, tq->outcnt);
6310 }
6311 } else {
6312 rval = FC_SUCCESS;
6313 break;
6314 }
6315 } while (cnt > 0);
6316 DEVICE_QUEUE_UNLOCK(tq);
6317
6318 /*
6319 * return, if busy or if the plogi was asynchronous.
6320 */
6321 if ((rval != FC_SUCCESS) ||
6322 (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6323 pkt->pkt_comp)) {
6324 QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6325 ha->instance);
6326 return (rval);
6327 }
6328
6329 /*
6330 * Let us give daemon sufficient time and hopefully
6331 * when transport retries PLOGI, it would have flushed
6332 * callback queue.
6333 */
6334 TASK_DAEMON_LOCK(ha);
6335 for (link = ha->callback_queue.first; link != NULL;
6336 link = next_link) {
6337 next_link = link->next;
6338 sp = link->base_address;
6339 if (sp->flags & SRB_UB_CALLBACK) {
6340 ubp = ha->ub_array[sp->handle];
6341 d_id.b24 = ubp->ub_frame.s_id;
6342 } else {
6343 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6344 }
6345 if (tq->d_id.b24 == d_id.b24) {
6346 cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6347 ha->instance, tq->d_id.b24);
6348 rval = FC_TRAN_BUSY;
6349 break;
6350 }
6351 }
6352 TASK_DAEMON_UNLOCK(ha);
6353
6354 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6355
6356 return (rval);
6357 }
6358
6359 /*
6360 * ql_login_port
6361 * Logs in a device if not already logged in.
6362 *
6363 * Input:
6364 * ha = adapter state pointer.
6365 * d_id = 24 bit port ID.
6366 * DEVICE_QUEUE_LOCK must be released.
6367 *
6368 * Returns:
6369 * QL local function return status code.
6370 *
6371 * Context:
6372 * Kernel context.
6373 */
6374 static int
6375 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6376 {
6377 ql_adapter_state_t *vha;
6378 ql_link_t *link;
6379 uint16_t index;
6380 ql_tgt_t *tq, *tq2;
6381 uint16_t loop_id, first_loop_id, last_loop_id;
6382 int rval = QL_SUCCESS;
6383
6384 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6385 d_id.b24);
6386
6387 /* Get head queue index. */
6388 index = ql_alpa_to_index[d_id.b.al_pa];
6389
6390 /* Check for device already has a queue. */
6391 tq = NULL;
6392 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6393 tq = link->base_address;
6394 if (tq->d_id.b24 == d_id.b24) {
6395 loop_id = tq->loop_id;
6396 break;
6397 } else {
6398 tq = NULL;
6399 }
6400 }
6401
6402 /* Let's stop issuing any IO and unsolicited logo */
6403 if ((tq != NULL) && (!(ddi_in_panic()))) {
6404 DEVICE_QUEUE_LOCK(tq);
6405 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6406 tq->flags &= ~TQF_RSCN_RCVD;
6407 DEVICE_QUEUE_UNLOCK(tq);
6408 }
6409 if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6410 !(tq->flags & TQF_FABRIC_DEVICE)) {
6411 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6412 }
6413
6414 /* Special case for Nameserver */
6415 if (d_id.b24 == 0xFFFFFC) {
6416 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6417 SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6418 if (tq == NULL) {
6419 ADAPTER_STATE_LOCK(ha);
6420 tq = ql_dev_init(ha, d_id, loop_id);
6421 ADAPTER_STATE_UNLOCK(ha);
6422 if (tq == NULL) {
6423 EL(ha, "failed=%xh, d_id=%xh\n",
6424 QL_FUNCTION_FAILED, d_id.b24);
6425 return (QL_FUNCTION_FAILED);
6426 }
6427 }
6428 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6429 rval = ql_login_fabric_port(ha, tq, loop_id);
6430 if (rval == QL_SUCCESS) {
6431 tq->loop_id = loop_id;
6432 tq->flags |= TQF_FABRIC_DEVICE;
6433 (void) ql_get_port_database(ha, tq, PDF_NONE);
6434 }
6435 } else {
6436 ha->topology = (uint8_t)
6437 (ha->topology | QL_SNS_CONNECTION);
6438 }
6439 /* Check for device already logged in. */
6440 } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6441 if (tq->flags & TQF_FABRIC_DEVICE) {
6442 rval = ql_login_fabric_port(ha, tq, loop_id);
6443 if (rval == QL_PORT_ID_USED) {
6444 rval = QL_SUCCESS;
6445 }
6446 } else if (LOCAL_LOOP_ID(loop_id)) {
6447 rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6448 (tq->flags & TQF_INITIATOR_DEVICE ?
6449 LLF_NONE : LLF_PLOGI));
6450 if (rval == QL_SUCCESS) {
6451 DEVICE_QUEUE_LOCK(tq);
6452 tq->loop_id = loop_id;
6453 DEVICE_QUEUE_UNLOCK(tq);
6454 }
6455 }
6456 } else if (ha->topology & QL_SNS_CONNECTION) {
6457 /* Locate unused loop ID. */
6458 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6459 first_loop_id = 0;
6460 last_loop_id = LAST_N_PORT_HDL;
6461 } else if (ha->topology & QL_F_PORT) {
6462 first_loop_id = 0;
6463 last_loop_id = SNS_LAST_LOOP_ID;
6464 } else {
6465 first_loop_id = SNS_FIRST_LOOP_ID;
6466 last_loop_id = SNS_LAST_LOOP_ID;
6467 }
6468
6469 /* Acquire adapter state lock. */
6470 ADAPTER_STATE_LOCK(ha);
6471
6472 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6473 if (tq == NULL) {
6474 EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6475 d_id.b24);
6476
6477 ADAPTER_STATE_UNLOCK(ha);
6478
6479 return (QL_FUNCTION_FAILED);
6480 }
6481
6482 rval = QL_FUNCTION_FAILED;
6483 loop_id = ha->pha->free_loop_id++;
6484 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6485 index--) {
6486 if (loop_id < first_loop_id ||
6487 loop_id > last_loop_id) {
6488 loop_id = first_loop_id;
6489 ha->pha->free_loop_id = (uint16_t)
6490 (loop_id + 1);
6491 }
6492
6493 /* Bypass if loop ID used. */
6494 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6495 tq2 = ql_loop_id_to_queue(vha, loop_id);
6496 if (tq2 != NULL && tq2 != tq) {
6497 break;
6498 }
6499 }
6500 if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6501 loop_id == ha->loop_id) {
6502 loop_id = ha->pha->free_loop_id++;
6503 continue;
6504 }
6505
6506 ADAPTER_STATE_UNLOCK(ha);
6507 rval = ql_login_fabric_port(ha, tq, loop_id);
6508
6509 /*
6510 * If PORT_ID_USED is returned
6511 * the login_fabric_port() updates
6512 * with the correct loop ID
6513 */
6514 switch (rval) {
6515 case QL_PORT_ID_USED:
6516 /*
6517 * use f/w handle and try to
6518 * login again.
6519 */
6520 ADAPTER_STATE_LOCK(ha);
6521 ha->pha->free_loop_id--;
6522 ADAPTER_STATE_UNLOCK(ha);
6523 loop_id = tq->loop_id;
6524 break;
6525
6526 case QL_SUCCESS:
6527 tq->flags |= TQF_FABRIC_DEVICE;
6528 (void) ql_get_port_database(ha,
6529 tq, PDF_NONE);
6530 index = 1;
6531 break;
6532
6533 case QL_LOOP_ID_USED:
6534 tq->loop_id = PORT_NO_LOOP_ID;
6535 loop_id = ha->pha->free_loop_id++;
6536 break;
6537
6538 case QL_ALL_IDS_IN_USE:
6539 tq->loop_id = PORT_NO_LOOP_ID;
6540 index = 1;
6541 break;
6542
6543 default:
6544 tq->loop_id = PORT_NO_LOOP_ID;
6545 index = 1;
6546 break;
6547 }
6548
6549 ADAPTER_STATE_LOCK(ha);
6550 }
6551
6552 ADAPTER_STATE_UNLOCK(ha);
6553 } else {
6554 rval = QL_FUNCTION_FAILED;
6555 }
6556
6557 if (rval != QL_SUCCESS) {
6558 EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6559 } else {
6560 EL(ha, "d_id=%xh, loop_id=%xh, "
6561 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6562 tq->loop_id, tq->port_name[0], tq->port_name[1],
6563 tq->port_name[2], tq->port_name[3], tq->port_name[4],
6564 tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6565 }
6566 return (rval);
6567 }
6568
6569 /*
6570 * ql_login_fabric_port
6571 * Issue login fabric port mailbox command.
6572 *
6573 * Input:
6574 * ha: adapter state pointer.
6575 * tq: target queue pointer.
6576 * loop_id: FC Loop ID.
6577 *
6578 * Returns:
6579 * ql local function return status code.
6580 *
6581 * Context:
6582 * Kernel context.
6583 */
6584 static int
6585 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6586 {
6587 int rval;
6588 int index;
6589 int retry = 0;
6590 port_id_t d_id;
6591 ql_tgt_t *newq;
6592 ql_mbx_data_t mr;
6593
6594 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6595 tq->d_id.b24);
6596
6597 /*
6598 * QL_PARAMETER_ERROR also means the firmware is
6599 * not able to allocate PCB entry due to resource
6600 * issues, or collision.
6601 */
6602 do {
6603 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6604 if ((rval == QL_PARAMETER_ERROR) ||
6605 ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6606 mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6607 retry++;
6608 drv_usecwait(10 * MILLISEC);
6609 } else {
6610 break;
6611 }
6612 } while (retry < 5);
6613
6614 switch (rval) {
6615 case QL_SUCCESS:
6616 tq->loop_id = loop_id;
6617 break;
6618
6619 case QL_PORT_ID_USED:
6620 /*
6621 * This Loop ID should NOT be in use in drivers
6622 */
6623 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6624
6625 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6626 cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6627 "dup loop_id=%xh, d_id=%xh", ha->instance,
6628 newq->loop_id, newq->d_id.b24);
6629 ql_send_logo(ha, newq, NULL);
6630 }
6631
6632 tq->loop_id = mr.mb[1];
6633 break;
6634
6635 case QL_LOOP_ID_USED:
6636 d_id.b.al_pa = LSB(mr.mb[2]);
6637 d_id.b.area = MSB(mr.mb[2]);
6638 d_id.b.domain = LSB(mr.mb[1]);
6639
6640 newq = ql_d_id_to_queue(ha, d_id);
6641 if (newq && (newq->loop_id != loop_id)) {
6642 /*
6643 * This should NEVER ever happen; but this
6644 * code is needed to bail out when the worst
6645 * case happens - or as used to happen before
6646 */
6647 QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6648 "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6649 "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6650 ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6651 newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6652 newq->d_id.b24, loop_id);
6653
6654 if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6655 ADAPTER_STATE_LOCK(ha);
6656
6657 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6658 ql_add_link_b(&ha->dev[index], &newq->device);
6659
6660 newq->d_id.b24 = d_id.b24;
6661
6662 index = ql_alpa_to_index[d_id.b.al_pa];
6663 ql_add_link_b(&ha->dev[index], &newq->device);
6664
6665 ADAPTER_STATE_UNLOCK(ha);
6666 }
6667
6668 (void) ql_get_port_database(ha, newq, PDF_NONE);
6669
6670 }
6671
6672 /*
6673 * Invalidate the loop ID for the
6674 * us to obtain a new one.
6675 */
6676 tq->loop_id = PORT_NO_LOOP_ID;
6677 break;
6678
6679 case QL_ALL_IDS_IN_USE:
6680 rval = QL_FUNCTION_FAILED;
6681 EL(ha, "no loop id's available\n");
6682 break;
6683
6684 default:
6685 if (rval == QL_COMMAND_ERROR) {
6686 switch (mr.mb[1]) {
6687 case 2:
6688 case 3:
6689 rval = QL_MEMORY_ALLOC_FAILED;
6690 break;
6691
6692 case 4:
6693 rval = QL_FUNCTION_TIMEOUT;
6694 break;
6695 case 7:
6696 rval = QL_FABRIC_NOT_INITIALIZED;
6697 break;
6698 default:
6699 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6700 break;
6701 }
6702 } else {
6703 cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6704 " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6705 ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6706 }
6707 break;
6708 }
6709
6710 if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6711 rval != QL_LOOP_ID_USED) {
6712 EL(ha, "failed=%xh\n", rval);
6713 } else {
6714 /*EMPTY*/
6715 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6716 }
6717 return (rval);
6718 }
6719
6720 /*
6721 * ql_logout_port
6722 * Logs out a device if possible.
6723 *
6724 * Input:
6725 * ha: adapter state pointer.
6726 * d_id: 24 bit port ID.
6727 *
6728 * Returns:
6729 * QL local function return status code.
6730 *
6731 * Context:
6732 * Kernel context.
6733 */
6734 static int
6735 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6736 {
6737 ql_link_t *link;
6738 ql_tgt_t *tq;
6739 uint16_t index;
6740
6741 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6742
6743 /* Get head queue index. */
6744 index = ql_alpa_to_index[d_id.b.al_pa];
6745
6746 /* Get device queue. */
6747 tq = NULL;
6748 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6749 tq = link->base_address;
6750 if (tq->d_id.b24 == d_id.b24) {
6751 break;
6752 } else {
6753 tq = NULL;
6754 }
6755 }
6756
6757 if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6758 (void) ql_logout_fabric_port(ha, tq);
6759 tq->loop_id = PORT_NO_LOOP_ID;
6760 }
6761
6762 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6763
6764 return (QL_SUCCESS);
6765 }
6766
6767 /*
6768 * ql_dev_init
6769 * Initialize/allocate device queue.
6770 *
6771 * Input:
6772 * ha: adapter state pointer.
6773 * d_id: device destination ID
6774 * loop_id: device loop ID
6775 * ADAPTER_STATE_LOCK must be already obtained.
6776 *
6777 * Returns:
6778 * NULL = failure
6779 *
6780 * Context:
6781 * Kernel context.
6782 */
6783 ql_tgt_t *
6784 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6785 {
6786 ql_link_t *link;
6787 uint16_t index;
6788 ql_tgt_t *tq;
6789
6790 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6791 ha->instance, d_id.b24, loop_id);
6792
6793 index = ql_alpa_to_index[d_id.b.al_pa];
6794
6795 /* If device queue exists, set proper loop ID. */
6796 tq = NULL;
6797 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6798 tq = link->base_address;
6799 if (tq->d_id.b24 == d_id.b24) {
6800 tq->loop_id = loop_id;
6801
6802 /* Reset port down retry count. */
6803 tq->port_down_retry_count = ha->port_down_retry_count;
6804 tq->qfull_retry_count = ha->qfull_retry_count;
6805
6806 break;
6807 } else {
6808 tq = NULL;
6809 }
6810 }
6811
6812 /* If device does not have queue. */
6813 if (tq == NULL) {
6814 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6815 if (tq != NULL) {
6816 /*
6817 * mutex to protect the device queue,
6818 * does not block interrupts.
6819 */
6820 mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6821 (ha->iflags & IFLG_INTR_AIF) ?
6822 (void *)(uintptr_t)ha->intr_pri :
6823 (void *)(uintptr_t)ha->iblock_cookie);
6824
6825 tq->d_id.b24 = d_id.b24;
6826 tq->loop_id = loop_id;
6827 tq->device.base_address = tq;
6828 tq->iidma_rate = IIDMA_RATE_INIT;
6829
6830 /* Reset port down retry count. */
6831 tq->port_down_retry_count = ha->port_down_retry_count;
6832 tq->qfull_retry_count = ha->qfull_retry_count;
6833
6834 /* Add device to device queue. */
6835 ql_add_link_b(&ha->dev[index], &tq->device);
6836 }
6837 }
6838
6839 if (tq == NULL) {
6840 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6841 } else {
6842 /*EMPTY*/
6843 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6844 }
6845 return (tq);
6846 }
6847
6848 /*
6849 * ql_dev_free
6850 * Remove queue from device list and frees resources used by queue.
6851 *
6852 * Input:
6853 * ha: adapter state pointer.
6854 * tq: target queue pointer.
6855 * ADAPTER_STATE_LOCK must be already obtained.
6856 *
6857 * Context:
6858 * Kernel context.
6859 */
6860 void
6861 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6862 {
6863 ql_link_t *link;
6864 uint16_t index;
6865 ql_lun_t *lq;
6866
6867 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6868
6869 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6870 lq = link->base_address;
6871 if (lq->cmd.first != NULL) {
6872 return;
6873 }
6874 }
6875
6876 if (tq->outcnt == 0) {
6877 /* Get head queue index. */
6878 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6879 for (link = ha->dev[index].first; link != NULL;
6880 link = link->next) {
6881 if (link->base_address == tq) {
6882 ql_remove_link(&ha->dev[index], link);
6883
6884 link = tq->lun_queues.first;
6885 while (link != NULL) {
6886 lq = link->base_address;
6887 link = link->next;
6888
6889 ql_remove_link(&tq->lun_queues,
6890 &lq->link);
6891 kmem_free(lq, sizeof (ql_lun_t));
6892 }
6893
6894 mutex_destroy(&tq->mutex);
6895 kmem_free(tq, sizeof (ql_tgt_t));
6896 break;
6897 }
6898 }
6899 }
6900
6901 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6902 }
6903
6904 /*
6905 * ql_lun_queue
6906 * Allocate LUN queue if does not exists.
6907 *
6908 * Input:
6909 * ha: adapter state pointer.
6910 * tq: target queue.
6911 * lun: LUN number.
6912 *
6913 * Returns:
6914 * NULL = failure
6915 *
6916 * Context:
6917 * Kernel context.
6918 */
6919 static ql_lun_t *
6920 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6921 {
6922 ql_lun_t *lq;
6923 ql_link_t *link;
6924
6925 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6926
6927 /* Fast path. */
6928 if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6929 QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6930 return (tq->last_lun_queue);
6931 }
6932
6933 if (lun >= MAX_LUNS) {
6934 EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6935 return (NULL);
6936 }
6937 /* If device queue exists, set proper loop ID. */
6938 lq = NULL;
6939 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6940 lq = link->base_address;
6941 if (lq->lun_no == lun) {
6942 QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6943 tq->last_lun_queue = lq;
6944 return (lq);
6945 }
6946 }
6947
6948 /* If queue does exist. */
6949 lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6950
6951 /* Initialize LUN queue. */
6952 if (lq != NULL) {
6953 lq->link.base_address = lq;
6954
6955 lq->lun_no = lun;
6956 lq->target_queue = tq;
6957
6958 DEVICE_QUEUE_LOCK(tq);
6959 ql_add_link_b(&tq->lun_queues, &lq->link);
6960 DEVICE_QUEUE_UNLOCK(tq);
6961 tq->last_lun_queue = lq;
6962 }
6963
6964 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6965
6966 return (lq);
6967 }
6968
6969 /*
6970 * ql_fcp_scsi_cmd
6971 * Process fibre channel (FCP) SCSI protocol commands.
6972 *
6973 * Input:
6974 * ha = adapter state pointer.
6975 * pkt = pointer to fc_packet.
6976 * sp = srb pointer.
6977 *
6978 * Returns:
6979 * FC_SUCCESS - the packet was accepted for transport.
6980 * FC_TRANSPORT_ERROR - a transport error occurred.
6981 *
6982 * Context:
6983 * Kernel context.
6984 */
6985 static int
6986 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6987 {
6988 port_id_t d_id;
6989 ql_tgt_t *tq;
6990 uint64_t *ptr;
6991 uint16_t lun;
6992
6993 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6994
6995 tq = (ql_tgt_t *)pkt->pkt_fca_device;
6996 if (tq == NULL) {
6997 d_id.r.rsvd_1 = 0;
6998 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6999 tq = ql_d_id_to_queue(ha, d_id);
7000 }
7001
7002 sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7003 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7004 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7005
7006 if (tq != NULL &&
7007 (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7008
7009 /*
7010 * zero out FCP response; 24 Bytes
7011 */
7012 ptr = (uint64_t *)pkt->pkt_resp;
7013 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7014
7015 /* Handle task management function. */
7016 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7017 sp->fcp->fcp_cntl.cntl_clr_aca |
7018 sp->fcp->fcp_cntl.cntl_reset_tgt |
7019 sp->fcp->fcp_cntl.cntl_reset_lun |
7020 sp->fcp->fcp_cntl.cntl_clr_tsk |
7021 sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7022 ql_task_mgmt(ha, tq, pkt, sp);
7023 } else {
7024 ha->pha->xioctl->IosRequested++;
7025 ha->pha->xioctl->BytesRequested += (uint32_t)
7026 sp->fcp->fcp_data_len;
7027
7028 /*
7029 * Setup for commands with data transfer
7030 */
7031 sp->iocb = ha->fcp_cmd;
7032 sp->req_cnt = 1;
7033 if (sp->fcp->fcp_data_len != 0) {
7034 /*
7035 * FCP data is bound to pkt_data_dma
7036 */
7037 if (sp->fcp->fcp_cntl.cntl_write_data) {
7038 (void) ddi_dma_sync(pkt->pkt_data_dma,
7039 0, 0, DDI_DMA_SYNC_FORDEV);
7040 }
7041
7042 /* Setup IOCB count. */
7043 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7044 (!CFG_IST(ha, CFG_CTRL_8021) ||
7045 sp->sg_dma.dma_handle == NULL)) {
7046 uint32_t cnt;
7047
7048 cnt = pkt->pkt_data_cookie_cnt -
7049 ha->cmd_segs;
7050 sp->req_cnt = (uint16_t)
7051 (cnt / ha->cmd_cont_segs);
7052 if (cnt % ha->cmd_cont_segs) {
7053 sp->req_cnt = (uint16_t)
7054 (sp->req_cnt + 2);
7055 } else {
7056 sp->req_cnt++;
7057 }
7058 }
7059 }
7060 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7061
7062 return (ql_start_cmd(ha, tq, pkt, sp));
7063 }
7064 } else {
7065 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7066 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7067
7068 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7069 ql_awaken_task_daemon(ha, sp, 0, 0);
7070 }
7071
7072 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7073
7074 return (FC_SUCCESS);
7075 }
7076
7077 /*
7078 * ql_task_mgmt
7079 * Task management function processor.
7080 *
7081 * Input:
7082 * ha: adapter state pointer.
7083 * tq: target queue pointer.
7084 * pkt: pointer to fc_packet.
7085 * sp: SRB pointer.
7086 *
7087 * Context:
7088 * Kernel context.
7089 */
7090 static void
7091 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7092 ql_srb_t *sp)
7093 {
7094 fcp_rsp_t *fcpr;
7095 struct fcp_rsp_info *rsp;
7096 uint16_t lun;
7097
7098 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7099
7100 fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7101 rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7102
7103 bzero(fcpr, pkt->pkt_rsplen);
7104
7105 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7106 fcpr->fcp_response_len = 8;
7107 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7108 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7109
7110 if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7111 if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7112 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7113 }
7114 } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7115 if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7116 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7117 }
7118 } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7119 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7120 QL_SUCCESS) {
7121 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7122 }
7123 } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7124 if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7125 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7126 }
7127 } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7128 if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7129 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7130 }
7131 } else {
7132 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7133 }
7134
7135 pkt->pkt_state = FC_PKT_SUCCESS;
7136
7137 /* Do command callback. */
7138 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7139 ql_awaken_task_daemon(ha, sp, 0, 0);
7140 }
7141
7142 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7143 }
7144
7145 /*
7146 * ql_fcp_ip_cmd
7147 * Process fibre channel (FCP) Internet (IP) protocols commands.
7148 *
7149 * Input:
7150 * ha: adapter state pointer.
7151 * pkt: pointer to fc_packet.
7152 * sp: SRB pointer.
7153 *
7154 * Returns:
7155 * FC_SUCCESS - the packet was accepted for transport.
7156 * FC_TRANSPORT_ERROR - a transport error occurred.
7157 *
7158 * Context:
7159 * Kernel context.
7160 */
7161 static int
7162 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7163 {
7164 port_id_t d_id;
7165 ql_tgt_t *tq;
7166
7167 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7168
7169 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7170 if (tq == NULL) {
7171 d_id.r.rsvd_1 = 0;
7172 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7173 tq = ql_d_id_to_queue(ha, d_id);
7174 }
7175
7176 if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7177 /*
7178 * IP data is bound to pkt_cmd_dma
7179 */
7180 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7181 0, 0, DDI_DMA_SYNC_FORDEV);
7182
7183 /* Setup IOCB count. */
7184 sp->iocb = ha->ip_cmd;
7185 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7186 uint32_t cnt;
7187
7188 cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7189 sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7190 if (cnt % ha->cmd_cont_segs) {
7191 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7192 } else {
7193 sp->req_cnt++;
7194 }
7195 } else {
7196 sp->req_cnt = 1;
7197 }
7198 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7199
7200 return (ql_start_cmd(ha, tq, pkt, sp));
7201 } else {
7202 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7203 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7204
7205 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7206 ql_awaken_task_daemon(ha, sp, 0, 0);
7207 }
7208
7209 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7210
7211 return (FC_SUCCESS);
7212 }
7213
7214 /*
7215 * ql_fc_services
7216 * Process fibre channel services (name server).
7217 *
7218 * Input:
7219 * ha: adapter state pointer.
7220 * pkt: pointer to fc_packet.
7221 *
7222 * Returns:
7223 * FC_SUCCESS - the packet was accepted for transport.
7224 * FC_TRANSPORT_ERROR - a transport error occurred.
7225 *
7226 * Context:
7227 * Kernel context.
7228 */
7229 static int
7230 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7231 {
7232 uint32_t cnt;
7233 fc_ct_header_t hdr;
7234 la_els_rjt_t rjt;
7235 port_id_t d_id;
7236 ql_tgt_t *tq;
7237 ql_srb_t *sp;
7238 int rval;
7239
7240 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7241
7242 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7243 (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7244
7245 bzero(&rjt, sizeof (rjt));
7246
7247 /* Do some sanity checks */
7248 cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7249 sizeof (fc_ct_header_t));
7250 if (cnt > (uint32_t)pkt->pkt_rsplen) {
7251 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7252 pkt->pkt_rsplen);
7253 return (FC_ELS_MALFORMED);
7254 }
7255
7256 switch (hdr.ct_fcstype) {
7257 case FCSTYPE_DIRECTORY:
7258 case FCSTYPE_MGMTSERVICE:
7259 /* An FCA must make sure that the header is in big endian */
7260 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7261
7262 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7263 tq = ql_d_id_to_queue(ha, d_id);
7264 sp = (ql_srb_t *)pkt->pkt_fca_private;
7265 if (tq == NULL ||
7266 (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7267 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7268 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7269 rval = QL_SUCCESS;
7270 break;
7271 }
7272
7273 /*
7274 * Services data is bound to pkt_cmd_dma
7275 */
7276 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7277 DDI_DMA_SYNC_FORDEV);
7278
7279 sp->flags |= SRB_MS_PKT;
7280 sp->retry_count = 32;
7281
7282 /* Setup IOCB count. */
7283 sp->iocb = ha->ms_cmd;
7284 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7285 cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7286 sp->req_cnt =
7287 (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7288 if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7289 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7290 } else {
7291 sp->req_cnt++;
7292 }
7293 } else {
7294 sp->req_cnt = 1;
7295 }
7296 rval = ql_start_cmd(ha, tq, pkt, sp);
7297
7298 QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7299 ha->instance, rval);
7300
7301 return (rval);
7302
7303 default:
7304 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7305 rval = QL_FUNCTION_PARAMETER_ERROR;
7306 break;
7307 }
7308
7309 if (rval != QL_SUCCESS) {
7310 /* Build RJT. */
7311 rjt.ls_code.ls_code = LA_ELS_RJT;
7312 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7313
7314 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7315 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7316
7317 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7318 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7319 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7320 }
7321
7322 /* Do command callback. */
7323 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7324 ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7325 0, 0);
7326 }
7327
7328 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7329
7330 return (FC_SUCCESS);
7331 }
7332
7333 /*
7334 * ql_cthdr_endian
7335 * Change endianess of ct passthrough header and payload.
7336 *
7337 * Input:
7338 * acc_handle: DMA buffer access handle.
7339 * ct_hdr: Pointer to header.
7340 * restore: Restore first flag.
7341 *
7342 * Context:
7343 * Interrupt or Kernel context, no mailbox commands allowed.
7344 */
7345 void
7346 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7347 boolean_t restore)
7348 {
7349 uint8_t i, *bp;
7350 fc_ct_header_t hdr;
7351 uint32_t *hdrp = (uint32_t *)&hdr;
7352
7353 ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7354 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7355
7356 if (restore) {
7357 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7358 *hdrp = BE_32(*hdrp);
7359 hdrp++;
7360 }
7361 }
7362
7363 if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7364 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7365
7366 switch (hdr.ct_cmdrsp) {
7367 case NS_GA_NXT:
7368 case NS_GPN_ID:
7369 case NS_GNN_ID:
7370 case NS_GCS_ID:
7371 case NS_GFT_ID:
7372 case NS_GSPN_ID:
7373 case NS_GPT_ID:
7374 case NS_GID_FT:
7375 case NS_GID_PT:
7376 case NS_RPN_ID:
7377 case NS_RNN_ID:
7378 case NS_RSPN_ID:
7379 case NS_DA_ID:
7380 BIG_ENDIAN_32(bp);
7381 break;
7382 case NS_RFT_ID:
7383 case NS_RCS_ID:
7384 case NS_RPT_ID:
7385 BIG_ENDIAN_32(bp);
7386 bp += 4;
7387 BIG_ENDIAN_32(bp);
7388 break;
7389 case NS_GNN_IP:
7390 case NS_GIPA_IP:
7391 BIG_ENDIAN(bp, 16);
7392 break;
7393 case NS_RIP_NN:
7394 bp += 8;
7395 BIG_ENDIAN(bp, 16);
7396 break;
7397 case NS_RIPA_NN:
7398 bp += 8;
7399 BIG_ENDIAN_64(bp);
7400 break;
7401 default:
7402 break;
7403 }
7404 }
7405
7406 if (restore == B_FALSE) {
7407 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7408 *hdrp = BE_32(*hdrp);
7409 hdrp++;
7410 }
7411 }
7412
7413 ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7414 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7415 }
7416
7417 /*
7418 * ql_start_cmd
7419 * Finishes starting fibre channel protocol (FCP) command.
7420 *
7421 * Input:
7422 * ha: adapter state pointer.
7423 * tq: target queue pointer.
7424 * pkt: pointer to fc_packet.
7425 * sp: SRB pointer.
7426 *
7427 * Context:
7428 * Kernel context.
7429 */
7430 static int
7431 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7432 ql_srb_t *sp)
7433 {
7434 int rval = FC_SUCCESS;
7435 time_t poll_wait = 0;
7436 ql_lun_t *lq = sp->lun_queue;
7437
7438 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7439
7440 sp->handle = 0;
7441
7442 /* Set poll for finish. */
7443 if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7444 sp->flags |= SRB_POLL;
7445 if (pkt->pkt_timeout == 0) {
7446 pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7447 }
7448 }
7449
7450 /* Acquire device queue lock. */
7451 DEVICE_QUEUE_LOCK(tq);
7452
7453 /*
7454 * If we need authentication, report device busy to
7455 * upper layers to retry later
7456 */
7457 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7458 DEVICE_QUEUE_UNLOCK(tq);
7459 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7460 tq->d_id.b24);
7461 return (FC_DEVICE_BUSY);
7462 }
7463
7464 /* Insert command onto watchdog queue. */
7465 if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7466 ql_timeout_insert(ha, tq, sp);
7467 } else {
7468 /*
7469 * Run dump requests in polled mode as kernel threads
7470 * and interrupts may have been disabled.
7471 */
7472 sp->flags |= SRB_POLL;
7473 sp->init_wdg_q_time = 0;
7474 sp->isp_timeout = 0;
7475 }
7476
7477 /* If a polling command setup wait time. */
7478 if (sp->flags & SRB_POLL) {
7479 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7480 poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7481 } else {
7482 poll_wait = pkt->pkt_timeout;
7483 }
7484 }
7485
7486 if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7487 (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7488 /* Set ending status. */
7489 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7490
7491 /* Call done routine to handle completions. */
7492 sp->cmd.next = NULL;
7493 DEVICE_QUEUE_UNLOCK(tq);
7494 ql_done(&sp->cmd);
7495 } else {
7496 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7497 int do_lip = 0;
7498
7499 DEVICE_QUEUE_UNLOCK(tq);
7500
7501 ADAPTER_STATE_LOCK(ha);
7502 if ((do_lip = ha->pha->lip_on_panic) == 0) {
7503 ha->pha->lip_on_panic++;
7504 }
7505 ADAPTER_STATE_UNLOCK(ha);
7506
7507 if (!do_lip) {
7508
7509 /*
7510 * That Qlogic F/W performs PLOGI, PRLI, etc
7511 * is helpful here. If a PLOGI fails for some
7512 * reason, you would get CS_PORT_LOGGED_OUT
7513 * or some such error; and we should get a
7514 * careful polled mode login kicked off inside
7515 * of this driver itself. You don't have FC
7516 * transport's services as all threads are
7517 * suspended, interrupts disabled, and so
7518 * on. Right now we do re-login if the packet
7519 * state isn't FC_PKT_SUCCESS.
7520 */
7521 (void) ql_abort_isp(ha);
7522 }
7523
7524 ql_start_iocb(ha, sp);
7525 } else {
7526 /* Add the command to the device queue */
7527 if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7528 ql_add_link_t(&lq->cmd, &sp->cmd);
7529 } else {
7530 ql_add_link_b(&lq->cmd, &sp->cmd);
7531 }
7532
7533 sp->flags |= SRB_IN_DEVICE_QUEUE;
7534
7535 /* Check whether next message can be processed */
7536 ql_next(ha, lq);
7537 }
7538 }
7539
7540 /* If polling, wait for finish. */
7541 if (poll_wait) {
7542 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7543 int res;
7544
7545 res = ql_abort((opaque_t)ha, pkt, 0);
7546 if (res != FC_SUCCESS && res != FC_ABORTED) {
7547 DEVICE_QUEUE_LOCK(tq);
7548 ql_remove_link(&lq->cmd, &sp->cmd);
7549 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7550 DEVICE_QUEUE_UNLOCK(tq);
7551 }
7552 }
7553
7554 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7555 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7556 rval = FC_TRANSPORT_ERROR;
7557 }
7558
7559 if (ddi_in_panic()) {
7560 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7561 port_id_t d_id;
7562
7563 /*
7564 * successful LOGIN implies by design
7565 * that PRLI also succeeded for disks
7566 * Note also that there is no special
7567 * mailbox command to send PRLI.
7568 */
7569 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7570 (void) ql_login_port(ha, d_id);
7571 }
7572 }
7573
7574 /*
7575 * This should only happen during CPR dumping
7576 */
7577 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7578 pkt->pkt_comp) {
7579 sp->flags &= ~SRB_POLL;
7580 (*pkt->pkt_comp)(pkt);
7581 }
7582 }
7583
7584 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7585
7586 return (rval);
7587 }
7588
7589 /*
7590 * ql_poll_cmd
7591 * Polls commands for completion.
7592 *
7593 * Input:
7594 * ha = adapter state pointer.
7595 * sp = SRB command pointer.
7596 * poll_wait = poll wait time in seconds.
7597 *
7598 * Returns:
7599 * QL local function return status code.
7600 *
7601 * Context:
7602 * Kernel context.
7603 */
7604 static int
7605 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7606 {
7607 int rval = QL_SUCCESS;
7608 time_t msecs_left = poll_wait * 100; /* 10ms inc */
7609 ql_adapter_state_t *ha = vha->pha;
7610
7611 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7612
7613 while (sp->flags & SRB_POLL) {
7614
7615 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7616 ha->idle_timer >= 15 || ddi_in_panic()) {
7617
7618 /* If waiting for restart, do it now. */
7619 if (ha->port_retry_timer != 0) {
7620 ADAPTER_STATE_LOCK(ha);
7621 ha->port_retry_timer = 0;
7622 ADAPTER_STATE_UNLOCK(ha);
7623
7624 TASK_DAEMON_LOCK(ha);
7625 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7626 TASK_DAEMON_UNLOCK(ha);
7627 }
7628
7629 if (INTERRUPT_PENDING(ha)) {
7630 (void) ql_isr((caddr_t)ha);
7631 INTR_LOCK(ha);
7632 ha->intr_claimed = TRUE;
7633 INTR_UNLOCK(ha);
7634 }
7635
7636 /*
7637 * Call task thread function in case the
7638 * daemon is not running.
7639 */
7640 TASK_DAEMON_LOCK(ha);
7641
7642 if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7643 QL_TASK_PENDING(ha)) {
7644 ha->task_daemon_flags |= TASK_THREAD_CALLED;
7645 ql_task_thread(ha);
7646 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7647 }
7648
7649 TASK_DAEMON_UNLOCK(ha);
7650 }
7651
7652 if (msecs_left < 10) {
7653 rval = QL_FUNCTION_TIMEOUT;
7654 break;
7655 }
7656
7657 /*
7658 * Polling interval is 10 milli seconds; Increasing
7659 * the polling interval to seconds since disk IO
7660 * timeout values are ~60 seconds is tempting enough,
7661 * but CPR dump time increases, and so will the crash
7662 * dump time; Don't toy with the settings without due
7663 * consideration for all the scenarios that will be
7664 * impacted.
7665 */
7666 ql_delay(ha, 10000);
7667 msecs_left -= 10;
7668 }
7669
7670 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7671
7672 return (rval);
7673 }
7674
7675 /*
7676 * ql_next
7677 * Retrieve and process next job in the device queue.
7678 *
7679 * Input:
7680 * ha: adapter state pointer.
7681 * lq: LUN queue pointer.
7682 * DEVICE_QUEUE_LOCK must be already obtained.
7683 *
7684 * Output:
7685 * Releases DEVICE_QUEUE_LOCK upon exit.
7686 *
7687 * Context:
7688 * Interrupt or Kernel context, no mailbox commands allowed.
7689 */
7690 void
7691 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7692 {
7693 ql_srb_t *sp;
7694 ql_link_t *link;
7695 ql_tgt_t *tq = lq->target_queue;
7696 ql_adapter_state_t *ha = vha->pha;
7697
7698 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7699
7700 if (ddi_in_panic()) {
7701 DEVICE_QUEUE_UNLOCK(tq);
7702 QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7703 ha->instance);
7704 return;
7705 }
7706
7707 while ((link = lq->cmd.first) != NULL) {
7708 sp = link->base_address;
7709
7710 /* Exit if can not start commands. */
7711 if (DRIVER_SUSPENDED(ha) ||
7712 (ha->flags & ONLINE) == 0 ||
7713 !VALID_DEVICE_ID(ha, tq->loop_id) ||
7714 sp->flags & SRB_ABORT ||
7715 tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7716 TQF_QUEUE_SUSPENDED)) {
7717 EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7718 "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7719 ha->task_daemon_flags, tq->flags, sp->flags,
7720 ha->flags, tq->loop_id);
7721 break;
7722 }
7723
7724 /*
7725 * Find out the LUN number for untagged command use.
7726 * If there is an untagged command pending for the LUN,
7727 * we would not submit another untagged command
7728 * or if reached LUN execution throttle.
7729 */
7730 if (sp->flags & SRB_FCP_CMD_PKT) {
7731 if (lq->flags & LQF_UNTAGGED_PENDING ||
7732 lq->lun_outcnt >= ha->execution_throttle) {
7733 QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7734 "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7735 tq->d_id.b24, lq->flags, lq->lun_outcnt);
7736 break;
7737 }
7738 if (sp->fcp->fcp_cntl.cntl_qtype ==
7739 FCP_QTYPE_UNTAGGED) {
7740 /*
7741 * Set the untagged-flag for the LUN
7742 * so that no more untagged commands
7743 * can be submitted for this LUN.
7744 */
7745 lq->flags |= LQF_UNTAGGED_PENDING;
7746 }
7747
7748 /* Count command as sent. */
7749 lq->lun_outcnt++;
7750 }
7751
7752 /* Remove srb from device queue. */
7753 ql_remove_link(&lq->cmd, &sp->cmd);
7754 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7755
7756 tq->outcnt++;
7757
7758 ql_start_iocb(vha, sp);
7759 }
7760
7761 /* Release device queue lock. */
7762 DEVICE_QUEUE_UNLOCK(tq);
7763
7764 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7765 }
7766
7767 /*
7768 * ql_done
7769 * Process completed commands.
7770 *
7771 * Input:
7772 * link: first command link in chain.
7773 *
7774 * Context:
7775 * Interrupt or Kernel context, no mailbox commands allowed.
7776 */
7777 void
7778 ql_done(ql_link_t *link)
7779 {
7780 ql_adapter_state_t *ha;
7781 ql_link_t *next_link;
7782 ql_srb_t *sp;
7783 ql_tgt_t *tq;
7784 ql_lun_t *lq;
7785
7786 QL_PRINT_3(CE_CONT, "started\n");
7787
7788 for (; link != NULL; link = next_link) {
7789 next_link = link->next;
7790 sp = link->base_address;
7791 ha = sp->ha;
7792
7793 if (sp->flags & SRB_UB_CALLBACK) {
7794 QL_UB_LOCK(ha);
7795 if (sp->flags & SRB_UB_IN_ISP) {
7796 if (ha->ub_outcnt != 0) {
7797 ha->ub_outcnt--;
7798 }
7799 QL_UB_UNLOCK(ha);
7800 ql_isp_rcvbuf(ha);
7801 QL_UB_LOCK(ha);
7802 }
7803 QL_UB_UNLOCK(ha);
7804 ql_awaken_task_daemon(ha, sp, 0, 0);
7805 } else {
7806 /* Free outstanding command slot. */
7807 if (sp->handle != 0) {
7808 ha->outstanding_cmds[
7809 sp->handle & OSC_INDEX_MASK] = NULL;
7810 sp->handle = 0;
7811 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7812 }
7813
7814 /* Acquire device queue lock. */
7815 lq = sp->lun_queue;
7816 tq = lq->target_queue;
7817 DEVICE_QUEUE_LOCK(tq);
7818
7819 /* Decrement outstanding commands on device. */
7820 if (tq->outcnt != 0) {
7821 tq->outcnt--;
7822 }
7823
7824 if (sp->flags & SRB_FCP_CMD_PKT) {
7825 if (sp->fcp->fcp_cntl.cntl_qtype ==
7826 FCP_QTYPE_UNTAGGED) {
7827 /*
7828 * Clear the flag for this LUN so that
7829 * untagged commands can be submitted
7830 * for it.
7831 */
7832 lq->flags &= ~LQF_UNTAGGED_PENDING;
7833 }
7834
7835 if (lq->lun_outcnt != 0) {
7836 lq->lun_outcnt--;
7837 }
7838 }
7839
7840 /* Reset port down retry count on good completion. */
7841 if (sp->pkt->pkt_reason == CS_COMPLETE) {
7842 tq->port_down_retry_count =
7843 ha->port_down_retry_count;
7844 tq->qfull_retry_count = ha->qfull_retry_count;
7845 }
7846
7847
7848 /* Alter aborted status for fast timeout feature */
7849 if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7850 (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7851 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7852 sp->flags & SRB_RETRY &&
7853 (sp->flags & SRB_WATCHDOG_ENABLED &&
7854 sp->wdg_q_time > 1)) {
7855 EL(ha, "fast abort modify change\n");
7856 sp->flags &= ~(SRB_RETRY);
7857 sp->pkt->pkt_reason = CS_TIMEOUT;
7858 }
7859
7860 /* Place request back on top of target command queue */
7861 if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7862 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7863 sp->flags & SRB_RETRY &&
7864 (sp->flags & SRB_WATCHDOG_ENABLED &&
7865 sp->wdg_q_time > 1)) {
7866 sp->flags &= ~(SRB_ISP_STARTED |
7867 SRB_ISP_COMPLETED | SRB_RETRY);
7868
7869 /* Reset watchdog timer */
7870 sp->wdg_q_time = sp->init_wdg_q_time;
7871
7872 /* Issue marker command on reset status. */
7873 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7874 (sp->pkt->pkt_reason == CS_RESET ||
7875 (CFG_IST(ha, CFG_CTRL_24258081) &&
7876 sp->pkt->pkt_reason == CS_ABORTED))) {
7877 (void) ql_marker(ha, tq->loop_id, 0,
7878 MK_SYNC_ID);
7879 }
7880
7881 ql_add_link_t(&lq->cmd, &sp->cmd);
7882 sp->flags |= SRB_IN_DEVICE_QUEUE;
7883 ql_next(ha, lq);
7884 } else {
7885 /* Remove command from watchdog queue. */
7886 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7887 ql_remove_link(&tq->wdg, &sp->wdg);
7888 sp->flags &= ~SRB_WATCHDOG_ENABLED;
7889 }
7890
7891 if (lq->cmd.first != NULL) {
7892 ql_next(ha, lq);
7893 } else {
7894 /* Release LU queue specific lock. */
7895 DEVICE_QUEUE_UNLOCK(tq);
7896 if (ha->pha->pending_cmds.first !=
7897 NULL) {
7898 ql_start_iocb(ha, NULL);
7899 }
7900 }
7901
7902 /* Sync buffers if required. */
7903 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7904 (void) ddi_dma_sync(
7905 sp->pkt->pkt_resp_dma,
7906 0, 0, DDI_DMA_SYNC_FORCPU);
7907 }
7908
7909 /* Map ISP completion codes. */
7910 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7911 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7912 switch (sp->pkt->pkt_reason) {
7913 case CS_COMPLETE:
7914 sp->pkt->pkt_state = FC_PKT_SUCCESS;
7915 break;
7916 case CS_RESET:
7917 /* Issue marker command. */
7918 if (!(ha->task_daemon_flags &
7919 LOOP_DOWN)) {
7920 (void) ql_marker(ha,
7921 tq->loop_id, 0,
7922 MK_SYNC_ID);
7923 }
7924 sp->pkt->pkt_state =
7925 FC_PKT_PORT_OFFLINE;
7926 sp->pkt->pkt_reason =
7927 FC_REASON_ABORTED;
7928 break;
7929 case CS_RESOUCE_UNAVAILABLE:
7930 sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7931 sp->pkt->pkt_reason =
7932 FC_REASON_PKT_BUSY;
7933 break;
7934
7935 case CS_TIMEOUT:
7936 sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7937 sp->pkt->pkt_reason =
7938 FC_REASON_HW_ERROR;
7939 break;
7940 case CS_DATA_OVERRUN:
7941 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7942 sp->pkt->pkt_reason =
7943 FC_REASON_OVERRUN;
7944 break;
7945 case CS_PORT_UNAVAILABLE:
7946 case CS_PORT_LOGGED_OUT:
7947 sp->pkt->pkt_state =
7948 FC_PKT_PORT_OFFLINE;
7949 sp->pkt->pkt_reason =
7950 FC_REASON_LOGIN_REQUIRED;
7951 ql_send_logo(ha, tq, NULL);
7952 break;
7953 case CS_PORT_CONFIG_CHG:
7954 sp->pkt->pkt_state =
7955 FC_PKT_PORT_OFFLINE;
7956 sp->pkt->pkt_reason =
7957 FC_REASON_OFFLINE;
7958 break;
7959 case CS_QUEUE_FULL:
7960 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7961 sp->pkt->pkt_reason = FC_REASON_QFULL;
7962 break;
7963
7964 case CS_ABORTED:
7965 DEVICE_QUEUE_LOCK(tq);
7966 if (tq->flags & (TQF_RSCN_RCVD |
7967 TQF_NEED_AUTHENTICATION)) {
7968 sp->pkt->pkt_state =
7969 FC_PKT_PORT_OFFLINE;
7970 sp->pkt->pkt_reason =
7971 FC_REASON_LOGIN_REQUIRED;
7972 } else {
7973 sp->pkt->pkt_state =
7974 FC_PKT_LOCAL_RJT;
7975 sp->pkt->pkt_reason =
7976 FC_REASON_ABORTED;
7977 }
7978 DEVICE_QUEUE_UNLOCK(tq);
7979 break;
7980
7981 case CS_TRANSPORT:
7982 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7983 sp->pkt->pkt_reason =
7984 FC_PKT_TRAN_ERROR;
7985 break;
7986
7987 case CS_DATA_UNDERRUN:
7988 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7989 sp->pkt->pkt_reason =
7990 FC_REASON_UNDERRUN;
7991 break;
7992 case CS_DMA_ERROR:
7993 case CS_BAD_PAYLOAD:
7994 case CS_UNKNOWN:
7995 case CS_CMD_FAILED:
7996 default:
7997 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7998 sp->pkt->pkt_reason =
7999 FC_REASON_HW_ERROR;
8000 break;
8001 }
8002
8003 /* Now call the pkt completion callback */
8004 if (sp->flags & SRB_POLL) {
8005 sp->flags &= ~SRB_POLL;
8006 } else if (sp->pkt->pkt_comp) {
8007 if (sp->pkt->pkt_tran_flags &
8008 FC_TRAN_IMMEDIATE_CB) {
8009 (*sp->pkt->pkt_comp)(sp->pkt);
8010 } else {
8011 ql_awaken_task_daemon(ha, sp,
8012 0, 0);
8013 }
8014 }
8015 }
8016 }
8017 }
8018
8019 QL_PRINT_3(CE_CONT, "done\n");
8020 }
8021
8022 /*
8023 * ql_awaken_task_daemon
8024 * Adds command completion callback to callback queue and/or
8025 * awakens task daemon thread.
8026 *
8027 * Input:
8028 * ha: adapter state pointer.
8029 * sp: srb pointer.
8030 * set_flags: task daemon flags to set.
8031 * reset_flags: task daemon flags to reset.
8032 *
8033 * Context:
8034 * Interrupt or Kernel context, no mailbox commands allowed.
8035 */
8036 void
8037 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8038 uint32_t set_flags, uint32_t reset_flags)
8039 {
8040 ql_adapter_state_t *ha = vha->pha;
8041
8042 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8043
8044 /* Acquire task daemon lock. */
8045 TASK_DAEMON_LOCK(ha);
8046
8047 if (set_flags & ISP_ABORT_NEEDED) {
8048 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8049 set_flags &= ~ISP_ABORT_NEEDED;
8050 }
8051 }
8052
8053 ha->task_daemon_flags |= set_flags;
8054 ha->task_daemon_flags &= ~reset_flags;
8055
8056 if (QL_DAEMON_SUSPENDED(ha)) {
8057 if (sp != NULL) {
8058 TASK_DAEMON_UNLOCK(ha);
8059
8060 /* Do callback. */
8061 if (sp->flags & SRB_UB_CALLBACK) {
8062 ql_unsol_callback(sp);
8063 } else {
8064 (*sp->pkt->pkt_comp)(sp->pkt);
8065 }
8066 } else {
8067 if (!(curthread->t_flag & T_INTR_THREAD) &&
8068 !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8069 ha->task_daemon_flags |= TASK_THREAD_CALLED;
8070 ql_task_thread(ha);
8071 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8072 }
8073
8074 TASK_DAEMON_UNLOCK(ha);
8075 }
8076 } else {
8077 if (sp != NULL) {
8078 ql_add_link_b(&ha->callback_queue, &sp->cmd);
8079 }
8080
8081 if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8082 cv_broadcast(&ha->cv_task_daemon);
8083 }
8084 TASK_DAEMON_UNLOCK(ha);
8085 }
8086
8087 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8088 }
8089
8090 /*
8091 * ql_task_daemon
8092 * Thread that is awaken by the driver when a
8093 * background needs to be done.
8094 *
8095 * Input:
8096 * arg = adapter state pointer.
8097 *
8098 * Context:
8099 * Kernel context.
8100 */
8101 static void
8102 ql_task_daemon(void *arg)
8103 {
8104 ql_adapter_state_t *ha = (void *)arg;
8105
8106 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8107
8108 CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8109 "ql_task_daemon");
8110
8111 /* Acquire task daemon lock. */
8112 TASK_DAEMON_LOCK(ha);
8113
8114 ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8115
8116 while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8117 ql_task_thread(ha);
8118
8119 QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8120
8121 /*
8122 * Before we wait on the conditional variable, we
8123 * need to check if STOP_FLG is set for us to terminate
8124 */
8125 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8126 break;
8127 }
8128
8129 /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8130 CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8131
8132 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8133
8134 /* If killed, stop task daemon */
8135 if (cv_wait_sig(&ha->cv_task_daemon,
8136 &ha->task_daemon_mutex) == 0) {
8137 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8138 }
8139
8140 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8141
8142 /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8143 CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8144
8145 QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8146 }
8147
8148 ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8149 TASK_DAEMON_ALIVE_FLG);
8150
8151 /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8152 CALLB_CPR_EXIT(&ha->cprinfo);
8153
8154 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8155
8156 thread_exit();
8157 }
8158
8159 /*
8160 * ql_task_thread
8161 * Thread run by daemon.
8162 *
8163 * Input:
8164 * ha = adapter state pointer.
8165 * TASK_DAEMON_LOCK must be acquired prior to call.
8166 *
8167 * Context:
8168 * Kernel context.
8169 */
8170 static void
8171 ql_task_thread(ql_adapter_state_t *ha)
8172 {
8173 int loop_again;
8174 ql_srb_t *sp;
8175 ql_head_t *head;
8176 ql_link_t *link;
8177 caddr_t msg;
8178 ql_adapter_state_t *vha;
8179
8180 do {
8181 QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8182 ha->instance, ha->task_daemon_flags);
8183
8184 loop_again = FALSE;
8185
8186 QL_PM_LOCK(ha);
8187 if (ha->power_level != PM_LEVEL_D0) {
8188 QL_PM_UNLOCK(ha);
8189 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8190 break;
8191 }
8192 QL_PM_UNLOCK(ha);
8193
8194 /* IDC event. */
8195 if (ha->task_daemon_flags & IDC_EVENT) {
8196 ha->task_daemon_flags &= ~IDC_EVENT;
8197 TASK_DAEMON_UNLOCK(ha);
8198 ql_process_idc_event(ha);
8199 TASK_DAEMON_LOCK(ha);
8200 loop_again = TRUE;
8201 }
8202
8203 if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8204 (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8205 (ha->flags & ONLINE) == 0) {
8206 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8207 break;
8208 }
8209 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8210
8211 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8212 TASK_DAEMON_UNLOCK(ha);
8213 if (ha->log_parity_pause == B_TRUE) {
8214 (void) ql_flash_errlog(ha,
8215 FLASH_ERRLOG_PARITY_ERR, 0,
8216 MSW(ha->parity_stat_err),
8217 LSW(ha->parity_stat_err));
8218 ha->log_parity_pause = B_FALSE;
8219 }
8220 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8221 TASK_DAEMON_LOCK(ha);
8222 loop_again = TRUE;
8223 }
8224
8225 /* Idle Check. */
8226 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8227 ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8228 if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8229 TASK_DAEMON_UNLOCK(ha);
8230 ql_idle_check(ha);
8231 TASK_DAEMON_LOCK(ha);
8232 loop_again = TRUE;
8233 }
8234 }
8235
8236 /* Crystal+ port#0 bypass transition */
8237 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8238 ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8239 TASK_DAEMON_UNLOCK(ha);
8240 (void) ql_initiate_lip(ha);
8241 TASK_DAEMON_LOCK(ha);
8242 loop_again = TRUE;
8243 }
8244
8245 /* Abort queues needed. */
8246 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8247 ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8248 TASK_DAEMON_UNLOCK(ha);
8249 ql_abort_queues(ha);
8250 TASK_DAEMON_LOCK(ha);
8251 }
8252
8253 /* Not suspended, awaken waiting routines. */
8254 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8255 ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8256 ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8257 cv_broadcast(&ha->cv_dr_suspended);
8258 loop_again = TRUE;
8259 }
8260
8261 /* Handle RSCN changes. */
8262 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8263 if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8264 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8265 TASK_DAEMON_UNLOCK(ha);
8266 (void) ql_handle_rscn_update(vha);
8267 TASK_DAEMON_LOCK(ha);
8268 loop_again = TRUE;
8269 }
8270 }
8271
8272 /* Handle state changes. */
8273 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8274 if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8275 !(ha->task_daemon_flags &
8276 TASK_DAEMON_POWERING_DOWN)) {
8277 /* Report state change. */
8278 EL(vha, "state change = %xh\n", vha->state);
8279 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8280
8281 if (vha->task_daemon_flags &
8282 COMMAND_WAIT_NEEDED) {
8283 vha->task_daemon_flags &=
8284 ~COMMAND_WAIT_NEEDED;
8285 if (!(ha->task_daemon_flags &
8286 COMMAND_WAIT_ACTIVE)) {
8287 ha->task_daemon_flags |=
8288 COMMAND_WAIT_ACTIVE;
8289 TASK_DAEMON_UNLOCK(ha);
8290 ql_cmd_wait(ha);
8291 TASK_DAEMON_LOCK(ha);
8292 ha->task_daemon_flags &=
8293 ~COMMAND_WAIT_ACTIVE;
8294 }
8295 }
8296
8297 msg = NULL;
8298 if (FC_PORT_STATE_MASK(vha->state) ==
8299 FC_STATE_OFFLINE) {
8300 if (vha->task_daemon_flags &
8301 STATE_ONLINE) {
8302 if (ha->topology &
8303 QL_LOOP_CONNECTION) {
8304 msg = "Loop OFFLINE";
8305 } else {
8306 msg = "Link OFFLINE";
8307 }
8308 }
8309 vha->task_daemon_flags &=
8310 ~STATE_ONLINE;
8311 } else if (FC_PORT_STATE_MASK(vha->state) ==
8312 FC_STATE_LOOP) {
8313 if (!(vha->task_daemon_flags &
8314 STATE_ONLINE)) {
8315 msg = "Loop ONLINE";
8316 }
8317 vha->task_daemon_flags |= STATE_ONLINE;
8318 } else if (FC_PORT_STATE_MASK(vha->state) ==
8319 FC_STATE_ONLINE) {
8320 if (!(vha->task_daemon_flags &
8321 STATE_ONLINE)) {
8322 msg = "Link ONLINE";
8323 }
8324 vha->task_daemon_flags |= STATE_ONLINE;
8325 } else {
8326 msg = "Unknown Link state";
8327 }
8328
8329 if (msg != NULL) {
8330 cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8331 "%s", QL_NAME, ha->instance,
8332 vha->vp_index, msg);
8333 }
8334
8335 if (vha->flags & FCA_BOUND) {
8336 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8337 "cb state=%xh\n", ha->instance,
8338 vha->vp_index, vha->state);
8339 TASK_DAEMON_UNLOCK(ha);
8340 (vha->bind_info.port_statec_cb)
8341 (vha->bind_info.port_handle,
8342 vha->state);
8343 TASK_DAEMON_LOCK(ha);
8344 }
8345 loop_again = TRUE;
8346 }
8347 }
8348
8349 if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8350 !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8351 EL(ha, "processing LIP reset\n");
8352 ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8353 TASK_DAEMON_UNLOCK(ha);
8354 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8355 if (vha->flags & FCA_BOUND) {
8356 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8357 "cb reset\n", ha->instance,
8358 vha->vp_index);
8359 (vha->bind_info.port_statec_cb)
8360 (vha->bind_info.port_handle,
8361 FC_STATE_TARGET_PORT_RESET);
8362 }
8363 }
8364 TASK_DAEMON_LOCK(ha);
8365 loop_again = TRUE;
8366 }
8367
8368 if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8369 FIRMWARE_UP)) {
8370 /*
8371 * The firmware needs more unsolicited
8372 * buffers. We cannot allocate any new
8373 * buffers unless the ULP module requests
8374 * for new buffers. All we can do here is
8375 * to give received buffers from the pool
8376 * that is already allocated
8377 */
8378 ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8379 TASK_DAEMON_UNLOCK(ha);
8380 ql_isp_rcvbuf(ha);
8381 TASK_DAEMON_LOCK(ha);
8382 loop_again = TRUE;
8383 }
8384
8385 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8386 TASK_DAEMON_UNLOCK(ha);
8387 (void) ql_abort_isp(ha);
8388 TASK_DAEMON_LOCK(ha);
8389 loop_again = TRUE;
8390 }
8391
8392 if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8393 COMMAND_WAIT_NEEDED))) {
8394 if (QL_IS_SET(ha->task_daemon_flags,
8395 RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8396 ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8397 if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8398 ha->task_daemon_flags |= RESET_ACTIVE;
8399 TASK_DAEMON_UNLOCK(ha);
8400 for (vha = ha; vha != NULL;
8401 vha = vha->vp_next) {
8402 ql_rst_aen(vha);
8403 }
8404 TASK_DAEMON_LOCK(ha);
8405 ha->task_daemon_flags &= ~RESET_ACTIVE;
8406 loop_again = TRUE;
8407 }
8408 }
8409
8410 if (QL_IS_SET(ha->task_daemon_flags,
8411 LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8412 if (!(ha->task_daemon_flags &
8413 LOOP_RESYNC_ACTIVE)) {
8414 ha->task_daemon_flags |=
8415 LOOP_RESYNC_ACTIVE;
8416 TASK_DAEMON_UNLOCK(ha);
8417 (void) ql_loop_resync(ha);
8418 TASK_DAEMON_LOCK(ha);
8419 loop_again = TRUE;
8420 }
8421 }
8422 }
8423
8424 /* Port retry needed. */
8425 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8426 ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8427 ADAPTER_STATE_LOCK(ha);
8428 ha->port_retry_timer = 0;
8429 ADAPTER_STATE_UNLOCK(ha);
8430
8431 TASK_DAEMON_UNLOCK(ha);
8432 ql_restart_queues(ha);
8433 TASK_DAEMON_LOCK(ha);
8434 loop_again = B_TRUE;
8435 }
8436
8437 /* iiDMA setting needed? */
8438 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8439 ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8440
8441 TASK_DAEMON_UNLOCK(ha);
8442 ql_iidma(ha);
8443 TASK_DAEMON_LOCK(ha);
8444 loop_again = B_TRUE;
8445 }
8446
8447 if (ha->task_daemon_flags & SEND_PLOGI) {
8448 ha->task_daemon_flags &= ~SEND_PLOGI;
8449 TASK_DAEMON_UNLOCK(ha);
8450 (void) ql_n_port_plogi(ha);
8451 TASK_DAEMON_LOCK(ha);
8452 }
8453
8454 head = &ha->callback_queue;
8455 if (head->first != NULL) {
8456 sp = head->first->base_address;
8457 link = &sp->cmd;
8458
8459 /* Dequeue command. */
8460 ql_remove_link(head, link);
8461
8462 /* Release task daemon lock. */
8463 TASK_DAEMON_UNLOCK(ha);
8464
8465 /* Do callback. */
8466 if (sp->flags & SRB_UB_CALLBACK) {
8467 ql_unsol_callback(sp);
8468 } else {
8469 (*sp->pkt->pkt_comp)(sp->pkt);
8470 }
8471
8472 /* Acquire task daemon lock. */
8473 TASK_DAEMON_LOCK(ha);
8474
8475 loop_again = TRUE;
8476 }
8477
8478 } while (loop_again);
8479 }
8480
8481 /*
8482 * ql_idle_check
8483 * Test for adapter is alive and well.
8484 *
8485 * Input:
8486 * ha: adapter state pointer.
8487 *
8488 * Context:
8489 * Kernel context.
8490 */
8491 static void
8492 ql_idle_check(ql_adapter_state_t *ha)
8493 {
8494 ddi_devstate_t state;
8495 int rval;
8496 ql_mbx_data_t mr;
8497
8498 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8499
8500 /* Firmware Ready Test. */
8501 rval = ql_get_firmware_state(ha, &mr);
8502 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8503 (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8504 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8505 state = ddi_get_devstate(ha->dip);
8506 if (state == DDI_DEVSTATE_UP) {
8507 /*EMPTY*/
8508 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8509 DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8510 }
8511 TASK_DAEMON_LOCK(ha);
8512 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8513 EL(ha, "fstate_ready, isp_abort_needed\n");
8514 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8515 }
8516 TASK_DAEMON_UNLOCK(ha);
8517 }
8518
8519 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8520 }
8521
8522 /*
8523 * ql_unsol_callback
8524 * Handle unsolicited buffer callbacks.
8525 *
8526 * Input:
8527 * ha = adapter state pointer.
8528 * sp = srb pointer.
8529 *
8530 * Context:
8531 * Kernel context.
8532 */
8533 static void
8534 ql_unsol_callback(ql_srb_t *sp)
8535 {
8536 fc_affected_id_t *af;
8537 fc_unsol_buf_t *ubp;
8538 uchar_t r_ctl;
8539 uchar_t ls_code;
8540 ql_tgt_t *tq;
8541 ql_adapter_state_t *ha = sp->ha, *pha = sp->ha->pha;
8542
8543 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8544
8545 ubp = ha->ub_array[sp->handle];
8546 r_ctl = ubp->ub_frame.r_ctl;
8547 ls_code = ubp->ub_buffer[0];
8548
8549 if (sp->lun_queue == NULL) {
8550 tq = NULL;
8551 } else {
8552 tq = sp->lun_queue->target_queue;
8553 }
8554
8555 QL_UB_LOCK(ha);
8556 if (sp->flags & SRB_UB_FREE_REQUESTED ||
8557 pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8558 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8559 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8560 sp->flags |= SRB_UB_IN_FCA;
8561 QL_UB_UNLOCK(ha);
8562 return;
8563 }
8564
8565 /* Process RSCN */
8566 if (sp->flags & SRB_UB_RSCN) {
8567 int sendup = 1;
8568
8569 /*
8570 * Defer RSCN posting until commands return
8571 */
8572 QL_UB_UNLOCK(ha);
8573
8574 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8575
8576 /* Abort outstanding commands */
8577 sendup = ql_process_rscn(ha, af);
8578 if (sendup == 0) {
8579
8580 TASK_DAEMON_LOCK(ha);
8581 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8582 TASK_DAEMON_UNLOCK(ha);
8583
8584 /*
8585 * Wait for commands to drain in F/W (doesn't take
8586 * more than a few milliseconds)
8587 */
8588 ql_delay(ha, 10000);
8589
8590 QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8591 "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8592 af->aff_format, af->aff_d_id);
8593 return;
8594 }
8595
8596 QL_UB_LOCK(ha);
8597
8598 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8599 af->aff_format, af->aff_d_id);
8600 }
8601
8602 /* Process UNSOL LOGO */
8603 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8604 QL_UB_UNLOCK(ha);
8605
8606 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8607 TASK_DAEMON_LOCK(ha);
8608 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8609 TASK_DAEMON_UNLOCK(ha);
8610 QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8611 "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8612 return;
8613 }
8614
8615 QL_UB_LOCK(ha);
8616 EL(ha, "sending unsol logout for %xh to transport\n",
8617 ubp->ub_frame.s_id);
8618 }
8619
8620 sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8621 SRB_UB_FCP);
8622
8623 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8624 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8625 ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8626 }
8627 QL_UB_UNLOCK(ha);
8628
8629 (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8630 ubp, sp->ub_type);
8631
8632 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8633 }
8634
8635 /*
8636 * ql_send_logo
8637 *
8638 * Input:
8639 * ha: adapter state pointer.
8640 * tq: target queue pointer.
8641 * done_q: done queue pointer.
8642 *
8643 * Context:
8644 * Interrupt or Kernel context, no mailbox commands allowed.
8645 */
8646 void
8647 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8648 {
8649 fc_unsol_buf_t *ubp;
8650 ql_srb_t *sp;
8651 la_els_logo_t *payload;
8652 ql_adapter_state_t *ha = vha->pha;
8653
8654 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8655 tq->d_id.b24);
8656
8657 if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8658 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8659 return;
8660 }
8661
8662 if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8663 tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8664
8665 /* Locate a buffer to use. */
8666 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8667 if (ubp == NULL) {
8668 EL(vha, "Failed, get_unsolicited_buffer\n");
8669 return;
8670 }
8671
8672 DEVICE_QUEUE_LOCK(tq);
8673 tq->flags |= TQF_NEED_AUTHENTICATION;
8674 tq->logout_sent++;
8675 DEVICE_QUEUE_UNLOCK(tq);
8676
8677 EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8678
8679 sp = ubp->ub_fca_private;
8680
8681 /* Set header. */
8682 ubp->ub_frame.d_id = vha->d_id.b24;
8683 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8684 ubp->ub_frame.s_id = tq->d_id.b24;
8685 ubp->ub_frame.rsvd = 0;
8686 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8687 F_CTL_SEQ_INITIATIVE;
8688 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8689 ubp->ub_frame.seq_cnt = 0;
8690 ubp->ub_frame.df_ctl = 0;
8691 ubp->ub_frame.seq_id = 0;
8692 ubp->ub_frame.rx_id = 0xffff;
8693 ubp->ub_frame.ox_id = 0xffff;
8694
8695 /* set payload. */
8696 payload = (la_els_logo_t *)ubp->ub_buffer;
8697 bzero(payload, sizeof (la_els_logo_t));
8698 /* Make sure ls_code in payload is always big endian */
8699 ubp->ub_buffer[0] = LA_ELS_LOGO;
8700 ubp->ub_buffer[1] = 0;
8701 ubp->ub_buffer[2] = 0;
8702 ubp->ub_buffer[3] = 0;
8703 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8704 &payload->nport_ww_name.raw_wwn[0], 8);
8705 payload->nport_id.port_id = tq->d_id.b24;
8706
8707 QL_UB_LOCK(ha);
8708 sp->flags |= SRB_UB_CALLBACK;
8709 QL_UB_UNLOCK(ha);
8710 if (tq->lun_queues.first != NULL) {
8711 sp->lun_queue = (tq->lun_queues.first)->base_address;
8712 } else {
8713 sp->lun_queue = ql_lun_queue(vha, tq, 0);
8714 }
8715 if (done_q) {
8716 ql_add_link_b(done_q, &sp->cmd);
8717 } else {
8718 ql_awaken_task_daemon(ha, sp, 0, 0);
8719 }
8720 }
8721
8722 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8723 }
8724
8725 static int
8726 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8727 {
8728 port_id_t d_id;
8729 ql_srb_t *sp;
8730 ql_link_t *link;
8731 int sendup = 1;
8732
8733 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8734
8735 DEVICE_QUEUE_LOCK(tq);
8736 if (tq->outcnt) {
8737 DEVICE_QUEUE_UNLOCK(tq);
8738 sendup = 0;
8739 (void) ql_abort_device(ha, tq, 1);
8740 ql_delay(ha, 10000);
8741 } else {
8742 DEVICE_QUEUE_UNLOCK(tq);
8743 TASK_DAEMON_LOCK(ha);
8744
8745 for (link = ha->pha->callback_queue.first; link != NULL;
8746 link = link->next) {
8747 sp = link->base_address;
8748 if (sp->flags & SRB_UB_CALLBACK) {
8749 continue;
8750 }
8751 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8752
8753 if (tq->d_id.b24 == d_id.b24) {
8754 sendup = 0;
8755 break;
8756 }
8757 }
8758
8759 TASK_DAEMON_UNLOCK(ha);
8760 }
8761
8762 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8763
8764 return (sendup);
8765 }
8766
8767 static int
8768 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8769 {
8770 fc_unsol_buf_t *ubp;
8771 ql_srb_t *sp;
8772 la_els_logi_t *payload;
8773 class_svc_param_t *class3_param;
8774
8775 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8776
8777 if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8778 LOOP_DOWN)) {
8779 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8780 return (QL_FUNCTION_FAILED);
8781 }
8782
8783 /* Locate a buffer to use. */
8784 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8785 if (ubp == NULL) {
8786 EL(ha, "Failed\n");
8787 return (QL_FUNCTION_FAILED);
8788 }
8789
8790 QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8791 ha->instance, tq->d_id.b24);
8792
8793 EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8794
8795 sp = ubp->ub_fca_private;
8796
8797 /* Set header. */
8798 ubp->ub_frame.d_id = ha->d_id.b24;
8799 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8800 ubp->ub_frame.s_id = tq->d_id.b24;
8801 ubp->ub_frame.rsvd = 0;
8802 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8803 F_CTL_SEQ_INITIATIVE;
8804 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8805 ubp->ub_frame.seq_cnt = 0;
8806 ubp->ub_frame.df_ctl = 0;
8807 ubp->ub_frame.seq_id = 0;
8808 ubp->ub_frame.rx_id = 0xffff;
8809 ubp->ub_frame.ox_id = 0xffff;
8810
8811 /* set payload. */
8812 payload = (la_els_logi_t *)ubp->ub_buffer;
8813 bzero(payload, sizeof (payload));
8814
8815 payload->ls_code.ls_code = LA_ELS_PLOGI;
8816 payload->common_service.fcph_version = 0x2006;
8817 payload->common_service.cmn_features = 0x8800;
8818
8819 CFG_IST(ha, CFG_CTRL_24258081) ?
8820 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8821 ha->init_ctrl_blk.cb24.max_frame_length[0],
8822 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8823 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8824 ha->init_ctrl_blk.cb.max_frame_length[0],
8825 ha->init_ctrl_blk.cb.max_frame_length[1]));
8826
8827 payload->common_service.conc_sequences = 0xff;
8828 payload->common_service.relative_offset = 0x03;
8829 payload->common_service.e_d_tov = 0x7d0;
8830
8831 bcopy((void *)&tq->port_name[0],
8832 (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8833
8834 bcopy((void *)&tq->node_name[0],
8835 (void *)&payload->node_ww_name.raw_wwn[0], 8);
8836
8837 class3_param = (class_svc_param_t *)&payload->class_3;
8838 class3_param->class_valid_svc_opt = 0x8000;
8839 class3_param->recipient_ctl = tq->class3_recipient_ctl;
8840 class3_param->rcv_data_size = tq->class3_rcv_data_size;
8841 class3_param->conc_sequences = tq->class3_conc_sequences;
8842 class3_param->open_sequences_per_exch =
8843 tq->class3_open_sequences_per_exch;
8844
8845 QL_UB_LOCK(ha);
8846 sp->flags |= SRB_UB_CALLBACK;
8847 QL_UB_UNLOCK(ha);
8848
8849 ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8850
8851 if (done_q) {
8852 ql_add_link_b(done_q, &sp->cmd);
8853 } else {
8854 ql_awaken_task_daemon(ha, sp, 0, 0);
8855 }
8856
8857 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8858
8859 return (QL_SUCCESS);
8860 }
8861
8862 /*
8863 * Abort outstanding commands in the Firmware, clear internally
8864 * queued commands in the driver, Synchronize the target with
8865 * the Firmware
8866 */
8867 int
8868 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8869 {
8870 ql_link_t *link, *link2;
8871 ql_lun_t *lq;
8872 int rval = QL_SUCCESS;
8873 ql_srb_t *sp;
8874 ql_head_t done_q = { NULL, NULL };
8875
8876 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8877
8878 /*
8879 * First clear, internally queued commands
8880 */
8881 DEVICE_QUEUE_LOCK(tq);
8882 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8883 lq = link->base_address;
8884
8885 link2 = lq->cmd.first;
8886 while (link2 != NULL) {
8887 sp = link2->base_address;
8888 link2 = link2->next;
8889
8890 if (sp->flags & SRB_ABORT) {
8891 continue;
8892 }
8893
8894 /* Remove srb from device command queue. */
8895 ql_remove_link(&lq->cmd, &sp->cmd);
8896 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8897
8898 /* Set ending status. */
8899 sp->pkt->pkt_reason = CS_ABORTED;
8900
8901 /* Call done routine to handle completions. */
8902 ql_add_link_b(&done_q, &sp->cmd);
8903 }
8904 }
8905 DEVICE_QUEUE_UNLOCK(tq);
8906
8907 if (done_q.first != NULL) {
8908 ql_done(done_q.first);
8909 }
8910
8911 if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8912 rval = ql_abort_target(ha, tq, 0);
8913 }
8914
8915 if (rval != QL_SUCCESS) {
8916 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8917 } else {
8918 /*EMPTY*/
8919 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8920 ha->vp_index);
8921 }
8922
8923 return (rval);
8924 }
8925
8926 /*
8927 * ql_rcv_rscn_els
8928 * Processes received RSCN extended link service.
8929 *
8930 * Input:
8931 * ha: adapter state pointer.
8932 * mb: array containing input mailbox registers.
8933 * done_q: done queue pointer.
8934 *
8935 * Context:
8936 * Interrupt or Kernel context, no mailbox commands allowed.
8937 */
8938 void
8939 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8940 {
8941 fc_unsol_buf_t *ubp;
8942 ql_srb_t *sp;
8943 fc_rscn_t *rn;
8944 fc_affected_id_t *af;
8945 port_id_t d_id;
8946
8947 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8948
8949 /* Locate a buffer to use. */
8950 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8951 if (ubp != NULL) {
8952 sp = ubp->ub_fca_private;
8953
8954 /* Set header. */
8955 ubp->ub_frame.d_id = ha->d_id.b24;
8956 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8957 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8958 ubp->ub_frame.rsvd = 0;
8959 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8960 F_CTL_SEQ_INITIATIVE;
8961 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8962 ubp->ub_frame.seq_cnt = 0;
8963 ubp->ub_frame.df_ctl = 0;
8964 ubp->ub_frame.seq_id = 0;
8965 ubp->ub_frame.rx_id = 0xffff;
8966 ubp->ub_frame.ox_id = 0xffff;
8967
8968 /* set payload. */
8969 rn = (fc_rscn_t *)ubp->ub_buffer;
8970 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8971
8972 rn->rscn_code = LA_ELS_RSCN;
8973 rn->rscn_len = 4;
8974 rn->rscn_payload_len = 8;
8975 d_id.b.al_pa = LSB(mb[2]);
8976 d_id.b.area = MSB(mb[2]);
8977 d_id.b.domain = LSB(mb[1]);
8978 af->aff_d_id = d_id.b24;
8979 af->aff_format = MSB(mb[1]);
8980
8981 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8982 af->aff_d_id);
8983
8984 ql_update_rscn(ha, af);
8985
8986 QL_UB_LOCK(ha);
8987 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8988 QL_UB_UNLOCK(ha);
8989 ql_add_link_b(done_q, &sp->cmd);
8990 }
8991
8992 if (ubp == NULL) {
8993 EL(ha, "Failed, get_unsolicited_buffer\n");
8994 } else {
8995 /*EMPTY*/
8996 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8997 }
8998 }
8999
9000 /*
9001 * ql_update_rscn
9002 * Update devices from received RSCN.
9003 *
9004 * Input:
9005 * ha: adapter state pointer.
9006 * af: pointer to RSCN data.
9007 *
9008 * Context:
9009 * Interrupt or Kernel context, no mailbox commands allowed.
9010 */
9011 static void
9012 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9013 {
9014 ql_link_t *link;
9015 uint16_t index;
9016 ql_tgt_t *tq;
9017
9018 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9019
9020 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9021 port_id_t d_id;
9022
9023 d_id.r.rsvd_1 = 0;
9024 d_id.b24 = af->aff_d_id;
9025
9026 tq = ql_d_id_to_queue(ha, d_id);
9027 if (tq) {
9028 EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9029 DEVICE_QUEUE_LOCK(tq);
9030 tq->flags |= TQF_RSCN_RCVD;
9031 DEVICE_QUEUE_UNLOCK(tq);
9032 }
9033 QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9034 ha->instance);
9035
9036 return;
9037 }
9038
9039 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9040 for (link = ha->dev[index].first; link != NULL;
9041 link = link->next) {
9042 tq = link->base_address;
9043
9044 switch (af->aff_format) {
9045 case FC_RSCN_FABRIC_ADDRESS:
9046 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9047 EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9048 tq->d_id.b24);
9049 DEVICE_QUEUE_LOCK(tq);
9050 tq->flags |= TQF_RSCN_RCVD;
9051 DEVICE_QUEUE_UNLOCK(tq);
9052 }
9053 break;
9054
9055 case FC_RSCN_AREA_ADDRESS:
9056 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9057 EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9058 tq->d_id.b24);
9059 DEVICE_QUEUE_LOCK(tq);
9060 tq->flags |= TQF_RSCN_RCVD;
9061 DEVICE_QUEUE_UNLOCK(tq);
9062 }
9063 break;
9064
9065 case FC_RSCN_DOMAIN_ADDRESS:
9066 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9067 EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9068 tq->d_id.b24);
9069 DEVICE_QUEUE_LOCK(tq);
9070 tq->flags |= TQF_RSCN_RCVD;
9071 DEVICE_QUEUE_UNLOCK(tq);
9072 }
9073 break;
9074
9075 default:
9076 break;
9077 }
9078 }
9079 }
9080 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9081 }
9082
9083 /*
9084 * ql_process_rscn
9085 *
9086 * Input:
9087 * ha: adapter state pointer.
9088 * af: RSCN payload pointer.
9089 *
9090 * Context:
9091 * Kernel context.
9092 */
9093 static int
9094 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9095 {
9096 int sendit;
9097 int sendup = 1;
9098 ql_link_t *link;
9099 uint16_t index;
9100 ql_tgt_t *tq;
9101
9102 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9103
9104 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9105 port_id_t d_id;
9106
9107 d_id.r.rsvd_1 = 0;
9108 d_id.b24 = af->aff_d_id;
9109
9110 tq = ql_d_id_to_queue(ha, d_id);
9111 if (tq) {
9112 sendup = ql_process_rscn_for_device(ha, tq);
9113 }
9114
9115 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9116
9117 return (sendup);
9118 }
9119
9120 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9121 for (link = ha->dev[index].first; link != NULL;
9122 link = link->next) {
9123
9124 tq = link->base_address;
9125 if (tq == NULL) {
9126 continue;
9127 }
9128
9129 switch (af->aff_format) {
9130 case FC_RSCN_FABRIC_ADDRESS:
9131 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9132 sendit = ql_process_rscn_for_device(
9133 ha, tq);
9134 if (sendup) {
9135 sendup = sendit;
9136 }
9137 }
9138 break;
9139
9140 case FC_RSCN_AREA_ADDRESS:
9141 if ((tq->d_id.b24 & 0xffff00) ==
9142 af->aff_d_id) {
9143 sendit = ql_process_rscn_for_device(
9144 ha, tq);
9145
9146 if (sendup) {
9147 sendup = sendit;
9148 }
9149 }
9150 break;
9151
9152 case FC_RSCN_DOMAIN_ADDRESS:
9153 if ((tq->d_id.b24 & 0xff0000) ==
9154 af->aff_d_id) {
9155 sendit = ql_process_rscn_for_device(
9156 ha, tq);
9157
9158 if (sendup) {
9159 sendup = sendit;
9160 }
9161 }
9162 break;
9163
9164 default:
9165 break;
9166 }
9167 }
9168 }
9169
9170 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9171
9172 return (sendup);
9173 }
9174
9175 /*
9176 * ql_process_rscn_for_device
9177 *
9178 * Input:
9179 * ha: adapter state pointer.
9180 * tq: target queue pointer.
9181 *
9182 * Context:
9183 * Kernel context.
9184 */
9185 static int
9186 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9187 {
9188 int sendup = 1;
9189
9190 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9191
9192 DEVICE_QUEUE_LOCK(tq);
9193
9194 /*
9195 * Let FCP-2 compliant devices continue I/Os
9196 * with their low level recoveries.
9197 */
9198 if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9199 (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9200 /*
9201 * Cause ADISC to go out
9202 */
9203 DEVICE_QUEUE_UNLOCK(tq);
9204
9205 (void) ql_get_port_database(ha, tq, PDF_NONE);
9206
9207 DEVICE_QUEUE_LOCK(tq);
9208 tq->flags &= ~TQF_RSCN_RCVD;
9209
9210 } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9211 if (tq->d_id.b24 != BROADCAST_ADDR) {
9212 tq->flags |= TQF_NEED_AUTHENTICATION;
9213 }
9214
9215 DEVICE_QUEUE_UNLOCK(tq);
9216
9217 (void) ql_abort_device(ha, tq, 1);
9218
9219 DEVICE_QUEUE_LOCK(tq);
9220
9221 if (tq->outcnt) {
9222 sendup = 0;
9223 } else {
9224 tq->flags &= ~TQF_RSCN_RCVD;
9225 }
9226 } else {
9227 tq->flags &= ~TQF_RSCN_RCVD;
9228 }
9229
9230 if (sendup) {
9231 if (tq->d_id.b24 != BROADCAST_ADDR) {
9232 tq->flags |= TQF_NEED_AUTHENTICATION;
9233 }
9234 }
9235
9236 DEVICE_QUEUE_UNLOCK(tq);
9237
9238 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9239
9240 return (sendup);
9241 }
9242
9243 static int
9244 ql_handle_rscn_update(ql_adapter_state_t *ha)
9245 {
9246 int rval;
9247 ql_tgt_t *tq;
9248 uint16_t index, loop_id;
9249 ql_dev_id_list_t *list;
9250 uint32_t list_size;
9251 port_id_t d_id;
9252 ql_mbx_data_t mr;
9253 ql_head_t done_q = { NULL, NULL };
9254
9255 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9256
9257 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9258 list = kmem_zalloc(list_size, KM_SLEEP);
9259 if (list == NULL) {
9260 rval = QL_MEMORY_ALLOC_FAILED;
9261 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9262 return (rval);
9263 }
9264
9265 /*
9266 * Get data from RISC code d_id list to init each device queue.
9267 */
9268 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9269 if (rval != QL_SUCCESS) {
9270 kmem_free(list, list_size);
9271 EL(ha, "get_id_list failed=%xh\n", rval);
9272 return (rval);
9273 }
9274
9275 /* Acquire adapter state lock. */
9276 ADAPTER_STATE_LOCK(ha);
9277
9278 /* Check for new devices */
9279 for (index = 0; index < mr.mb[1]; index++) {
9280 ql_dev_list(ha, list, index, &d_id, &loop_id);
9281
9282 if (VALID_DEVICE_ID(ha, loop_id)) {
9283 d_id.r.rsvd_1 = 0;
9284
9285 tq = ql_d_id_to_queue(ha, d_id);
9286 if (tq != NULL) {
9287 continue;
9288 }
9289
9290 tq = ql_dev_init(ha, d_id, loop_id);
9291
9292 /* Test for fabric device. */
9293 if (d_id.b.domain != ha->d_id.b.domain ||
9294 d_id.b.area != ha->d_id.b.area) {
9295 tq->flags |= TQF_FABRIC_DEVICE;
9296 }
9297
9298 ADAPTER_STATE_UNLOCK(ha);
9299 if (ql_get_port_database(ha, tq, PDF_NONE) !=
9300 QL_SUCCESS) {
9301 tq->loop_id = PORT_NO_LOOP_ID;
9302 }
9303 ADAPTER_STATE_LOCK(ha);
9304
9305 /*
9306 * Send up a PLOGI about the new device
9307 */
9308 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9309 (void) ql_send_plogi(ha, tq, &done_q);
9310 }
9311 }
9312 }
9313
9314 /* Release adapter state lock. */
9315 ADAPTER_STATE_UNLOCK(ha);
9316
9317 if (done_q.first != NULL) {
9318 ql_done(done_q.first);
9319 }
9320
9321 kmem_free(list, list_size);
9322
9323 if (rval != QL_SUCCESS) {
9324 EL(ha, "failed=%xh\n", rval);
9325 } else {
9326 /*EMPTY*/
9327 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9328 }
9329
9330 return (rval);
9331 }
9332
9333 /*
9334 * ql_free_unsolicited_buffer
9335 * Frees allocated buffer.
9336 *
9337 * Input:
9338 * ha = adapter state pointer.
9339 * index = buffer array index.
9340 * ADAPTER_STATE_LOCK must be already obtained.
9341 *
9342 * Context:
9343 * Kernel context.
9344 */
9345 static void
9346 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9347 {
9348 ql_srb_t *sp;
9349 int status;
9350
9351 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9352
9353 sp = ubp->ub_fca_private;
9354 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9355 /* Disconnect IP from system buffers. */
9356 if (ha->flags & IP_INITIALIZED) {
9357 ADAPTER_STATE_UNLOCK(ha);
9358 status = ql_shutdown_ip(ha);
9359 ADAPTER_STATE_LOCK(ha);
9360 if (status != QL_SUCCESS) {
9361 cmn_err(CE_WARN,
9362 "!Qlogic %s(%d): Failed to shutdown IP",
9363 QL_NAME, ha->instance);
9364 return;
9365 }
9366
9367 ha->flags &= ~IP_ENABLED;
9368 }
9369
9370 ql_free_phys(ha, &sp->ub_buffer);
9371 } else {
9372 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9373 }
9374
9375 kmem_free(sp, sizeof (ql_srb_t));
9376 kmem_free(ubp, sizeof (fc_unsol_buf_t));
9377
9378 if (ha->ub_allocated != 0) {
9379 ha->ub_allocated--;
9380 }
9381
9382 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9383 }
9384
9385 /*
9386 * ql_get_unsolicited_buffer
9387 * Locates a free unsolicited buffer.
9388 *
9389 * Input:
9390 * ha = adapter state pointer.
9391 * type = buffer type.
9392 *
9393 * Returns:
9394 * Unsolicited buffer pointer.
9395 *
9396 * Context:
9397 * Interrupt or Kernel context, no mailbox commands allowed.
9398 */
9399 fc_unsol_buf_t *
9400 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9401 {
9402 fc_unsol_buf_t *ubp;
9403 ql_srb_t *sp;
9404 uint16_t index;
9405
9406 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9407
9408 /* Locate a buffer to use. */
9409 ubp = NULL;
9410
9411 QL_UB_LOCK(ha);
9412 for (index = 0; index < QL_UB_LIMIT; index++) {
9413 ubp = ha->ub_array[index];
9414 if (ubp != NULL) {
9415 sp = ubp->ub_fca_private;
9416 if ((sp->ub_type == type) &&
9417 (sp->flags & SRB_UB_IN_FCA) &&
9418 (!(sp->flags & (SRB_UB_CALLBACK |
9419 SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9420 sp->flags |= SRB_UB_ACQUIRED;
9421 ubp->ub_resp_flags = 0;
9422 break;
9423 }
9424 ubp = NULL;
9425 }
9426 }
9427 QL_UB_UNLOCK(ha);
9428
9429 if (ubp) {
9430 ubp->ub_resp_token = NULL;
9431 ubp->ub_class = FC_TRAN_CLASS3;
9432 }
9433
9434 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9435
9436 return (ubp);
9437 }
9438
9439 /*
9440 * ql_ub_frame_hdr
9441 * Processes received unsolicited buffers from ISP.
9442 *
9443 * Input:
9444 * ha: adapter state pointer.
9445 * tq: target queue pointer.
9446 * index: unsolicited buffer array index.
9447 * done_q: done queue pointer.
9448 *
9449 * Returns:
9450 * ql local function return status code.
9451 *
9452 * Context:
9453 * Interrupt or Kernel context, no mailbox commands allowed.
9454 */
9455 int
9456 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9457 ql_head_t *done_q)
9458 {
9459 fc_unsol_buf_t *ubp;
9460 ql_srb_t *sp;
9461 uint16_t loop_id;
9462 int rval = QL_FUNCTION_FAILED;
9463
9464 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9465
9466 QL_UB_LOCK(ha);
9467 if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9468 EL(ha, "Invalid buffer index=%xh\n", index);
9469 QL_UB_UNLOCK(ha);
9470 return (rval);
9471 }
9472
9473 sp = ubp->ub_fca_private;
9474 if (sp->flags & SRB_UB_FREE_REQUESTED) {
9475 EL(ha, "buffer freed index=%xh\n", index);
9476 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9477 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9478
9479 sp->flags |= SRB_UB_IN_FCA;
9480
9481 QL_UB_UNLOCK(ha);
9482 return (rval);
9483 }
9484
9485 if ((sp->handle == index) &&
9486 (sp->flags & SRB_UB_IN_ISP) &&
9487 (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9488 (!(sp->flags & SRB_UB_ACQUIRED))) {
9489 /* set broadcast D_ID */
9490 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9491 BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9492 if (tq->ub_loop_id == loop_id) {
9493 if (ha->topology & QL_FL_PORT) {
9494 ubp->ub_frame.d_id = 0x000000;
9495 } else {
9496 ubp->ub_frame.d_id = 0xffffff;
9497 }
9498 } else {
9499 ubp->ub_frame.d_id = ha->d_id.b24;
9500 }
9501 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9502 ubp->ub_frame.rsvd = 0;
9503 ubp->ub_frame.s_id = tq->d_id.b24;
9504 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9505 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9506 ubp->ub_frame.df_ctl = 0;
9507 ubp->ub_frame.seq_id = tq->ub_seq_id;
9508 ubp->ub_frame.rx_id = 0xffff;
9509 ubp->ub_frame.ox_id = 0xffff;
9510 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9511 sp->ub_size : tq->ub_sequence_length;
9512 ubp->ub_frame.ro = tq->ub_frame_ro;
9513
9514 tq->ub_sequence_length = (uint16_t)
9515 (tq->ub_sequence_length - ubp->ub_bufsize);
9516 tq->ub_frame_ro += ubp->ub_bufsize;
9517 tq->ub_seq_cnt++;
9518
9519 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9520 if (tq->ub_seq_cnt == 1) {
9521 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9522 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9523 } else {
9524 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9525 F_CTL_END_SEQ;
9526 }
9527 tq->ub_total_seg_cnt = 0;
9528 } else if (tq->ub_seq_cnt == 1) {
9529 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9530 F_CTL_FIRST_SEQ;
9531 ubp->ub_frame.df_ctl = 0x20;
9532 }
9533
9534 QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9535 ha->instance, ubp->ub_frame.d_id);
9536 QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9537 ha->instance, ubp->ub_frame.s_id);
9538 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9539 ha->instance, ubp->ub_frame.seq_cnt);
9540 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9541 ha->instance, ubp->ub_frame.seq_id);
9542 QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9543 ha->instance, ubp->ub_frame.ro);
9544 QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9545 ha->instance, ubp->ub_frame.f_ctl);
9546 QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9547 ha->instance, ubp->ub_bufsize);
9548 QL_DUMP_3(ubp->ub_buffer, 8,
9549 ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9550
9551 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9552 ql_add_link_b(done_q, &sp->cmd);
9553 rval = QL_SUCCESS;
9554 } else {
9555 if (sp->handle != index) {
9556 EL(ha, "Bad index=%xh, expect=%xh\n", index,
9557 sp->handle);
9558 }
9559 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9560 EL(ha, "buffer was already in driver, index=%xh\n",
9561 index);
9562 }
9563 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9564 EL(ha, "buffer was not an IP buffer, index=%xh\n",
9565 index);
9566 }
9567 if (sp->flags & SRB_UB_ACQUIRED) {
9568 EL(ha, "buffer was being used by driver, index=%xh\n",
9569 index);
9570 }
9571 }
9572 QL_UB_UNLOCK(ha);
9573
9574 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9575
9576 return (rval);
9577 }
9578
9579 /*
9580 * ql_timer
9581 * One second timer function.
9582 *
9583 * Input:
9584 * ql_hba.first = first link in adapter list.
9585 *
9586 * Context:
9587 * Interrupt context, no mailbox commands allowed.
9588 */
9589 static void
9590 ql_timer(void *arg)
9591 {
9592 ql_link_t *link;
9593 uint32_t set_flags;
9594 uint32_t reset_flags;
9595 ql_adapter_state_t *ha = NULL, *vha;
9596
9597 QL_PRINT_6(CE_CONT, "started\n");
9598
9599 /* Acquire global state lock. */
9600 GLOBAL_STATE_LOCK();
9601 if (ql_timer_timeout_id == NULL) {
9602 /* Release global state lock. */
9603 GLOBAL_STATE_UNLOCK();
9604 return;
9605 }
9606
9607 for (link = ql_hba.first; link != NULL; link = link->next) {
9608 ha = link->base_address;
9609
9610 /* Skip adapter if suspended of stalled. */
9611 ADAPTER_STATE_LOCK(ha);
9612 if (ha->flags & ADAPTER_SUSPENDED ||
9613 ha->task_daemon_flags & DRIVER_STALL) {
9614 ADAPTER_STATE_UNLOCK(ha);
9615 continue;
9616 }
9617 ha->flags |= ADAPTER_TIMER_BUSY;
9618 ADAPTER_STATE_UNLOCK(ha);
9619
9620 QL_PM_LOCK(ha);
9621 if (ha->power_level != PM_LEVEL_D0) {
9622 QL_PM_UNLOCK(ha);
9623
9624 ADAPTER_STATE_LOCK(ha);
9625 ha->flags &= ~ADAPTER_TIMER_BUSY;
9626 ADAPTER_STATE_UNLOCK(ha);
9627 continue;
9628 }
9629 ha->busy++;
9630 QL_PM_UNLOCK(ha);
9631
9632 set_flags = 0;
9633 reset_flags = 0;
9634
9635 /* Port retry timer handler. */
9636 if (LOOP_READY(ha)) {
9637 ADAPTER_STATE_LOCK(ha);
9638 if (ha->port_retry_timer != 0) {
9639 ha->port_retry_timer--;
9640 if (ha->port_retry_timer == 0) {
9641 set_flags |= PORT_RETRY_NEEDED;
9642 }
9643 }
9644 ADAPTER_STATE_UNLOCK(ha);
9645 }
9646
9647 /* Loop down timer handler. */
9648 if (LOOP_RECONFIGURE(ha) == 0) {
9649 if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9650 ha->loop_down_timer--;
9651 /*
9652 * give the firmware loop down dump flag
9653 * a chance to work.
9654 */
9655 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9656 if (CFG_IST(ha,
9657 CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9658 (void) ql_binary_fw_dump(ha,
9659 TRUE);
9660 }
9661 EL(ha, "loop_down_reset, "
9662 "isp_abort_needed\n");
9663 set_flags |= ISP_ABORT_NEEDED;
9664 }
9665 }
9666 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9667 /* Command abort time handler. */
9668 if (ha->loop_down_timer ==
9669 ha->loop_down_abort_time) {
9670 ADAPTER_STATE_LOCK(ha);
9671 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9672 ADAPTER_STATE_UNLOCK(ha);
9673 set_flags |= ABORT_QUEUES_NEEDED;
9674 EL(ha, "loop_down_abort_time, "
9675 "abort_queues_needed\n");
9676 }
9677
9678 /* Watchdog timer handler. */
9679 if (ha->watchdog_timer == 0) {
9680 ha->watchdog_timer = WATCHDOG_TIME;
9681 } else if (LOOP_READY(ha)) {
9682 ha->watchdog_timer--;
9683 if (ha->watchdog_timer == 0) {
9684 for (vha = ha; vha != NULL;
9685 vha = vha->vp_next) {
9686 ql_watchdog(vha,
9687 &set_flags,
9688 &reset_flags);
9689 }
9690 ha->watchdog_timer =
9691 WATCHDOG_TIME;
9692 }
9693 }
9694 }
9695 }
9696
9697 /* Idle timer handler. */
9698 if (!DRIVER_SUSPENDED(ha)) {
9699 if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9700 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9701 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9702 #endif
9703 ha->idle_timer = 0;
9704 }
9705 if (ha->send_plogi_timer != NULL) {
9706 ha->send_plogi_timer--;
9707 if (ha->send_plogi_timer == NULL) {
9708 set_flags |= SEND_PLOGI;
9709 }
9710 }
9711 }
9712 ADAPTER_STATE_LOCK(ha);
9713 if (ha->idc_restart_timer != 0) {
9714 ha->idc_restart_timer--;
9715 if (ha->idc_restart_timer == 0) {
9716 ha->idc_restart_cnt = 0;
9717 reset_flags |= DRIVER_STALL;
9718 }
9719 }
9720 if (ha->idc_flash_acc_timer != 0) {
9721 ha->idc_flash_acc_timer--;
9722 if (ha->idc_flash_acc_timer == 0 &&
9723 ha->idc_flash_acc != 0) {
9724 ha->idc_flash_acc = 1;
9725 ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9726 ha->idc_mb[1] = 0;
9727 ha->idc_mb[2] = IDC_OPC_DRV_START;
9728 set_flags |= IDC_EVENT;
9729 }
9730 }
9731 ADAPTER_STATE_UNLOCK(ha);
9732
9733 if (set_flags != 0 || reset_flags != 0) {
9734 ql_awaken_task_daemon(ha, NULL, set_flags,
9735 reset_flags);
9736 }
9737
9738 if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9739 ql_blink_led(ha);
9740 }
9741
9742 /* Update the IO stats */
9743 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9744 ha->xioctl->IOInputMByteCnt +=
9745 (ha->xioctl->IOInputByteCnt / 0x100000);
9746 ha->xioctl->IOInputByteCnt %= 0x100000;
9747 }
9748
9749 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9750 ha->xioctl->IOOutputMByteCnt +=
9751 (ha->xioctl->IOOutputByteCnt / 0x100000);
9752 ha->xioctl->IOOutputByteCnt %= 0x100000;
9753 }
9754
9755 if (CFG_IST(ha, CFG_CTRL_8021)) {
9756 (void) ql_8021_idc_handler(ha);
9757 }
9758
9759 ADAPTER_STATE_LOCK(ha);
9760 ha->flags &= ~ADAPTER_TIMER_BUSY;
9761 ADAPTER_STATE_UNLOCK(ha);
9762
9763 QL_PM_LOCK(ha);
9764 ha->busy--;
9765 QL_PM_UNLOCK(ha);
9766 }
9767
9768 /* Restart timer, if not being stopped. */
9769 if (ql_timer_timeout_id != NULL) {
9770 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9771 }
9772
9773 /* Release global state lock. */
9774 GLOBAL_STATE_UNLOCK();
9775
9776 QL_PRINT_6(CE_CONT, "done\n");
9777 }
9778
9779 /*
9780 * ql_timeout_insert
9781 * Function used to insert a command block onto the
9782 * watchdog timer queue.
9783 *
9784 * Note: Must insure that pkt_time is not zero
9785 * before calling ql_timeout_insert.
9786 *
9787 * Input:
9788 * ha: adapter state pointer.
9789 * tq: target queue pointer.
9790 * sp: SRB pointer.
9791 * DEVICE_QUEUE_LOCK must be already obtained.
9792 *
9793 * Context:
9794 * Kernel context.
9795 */
9796 /* ARGSUSED */
9797 static void
9798 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9799 {
9800 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9801
9802 if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9803 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9804 /*
9805 * The WATCHDOG_TIME must be rounded up + 1. As an example,
9806 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9807 * will expire in the next watchdog call, which could be in
9808 * 1 microsecond.
9809 *
9810 */
9811 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9812 WATCHDOG_TIME;
9813 /*
9814 * Added an additional 10 to account for the
9815 * firmware timer drift which can occur with
9816 * very long timeout values.
9817 */
9818 sp->wdg_q_time += 10;
9819
9820 /*
9821 * Add 6 more to insure watchdog does not timeout at the same
9822 * time as ISP RISC code timeout.
9823 */
9824 sp->wdg_q_time += 6;
9825
9826 /* Save initial time for resetting watchdog time. */
9827 sp->init_wdg_q_time = sp->wdg_q_time;
9828
9829 /* Insert command onto watchdog queue. */
9830 ql_add_link_b(&tq->wdg, &sp->wdg);
9831
9832 sp->flags |= SRB_WATCHDOG_ENABLED;
9833 } else {
9834 sp->isp_timeout = 0;
9835 sp->wdg_q_time = 0;
9836 sp->init_wdg_q_time = 0;
9837 }
9838
9839 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9840 }
9841
9842 /*
9843 * ql_watchdog
9844 * Timeout handler that runs in interrupt context. The
9845 * ql_adapter_state_t * argument is the parameter set up when the
9846 * timeout was initialized (state structure pointer).
9847 * Function used to update timeout values and if timeout
9848 * has occurred command will be aborted.
9849 *
9850 * Input:
9851 * ha: adapter state pointer.
9852 * set_flags: task daemon flags to set.
9853 * reset_flags: task daemon flags to reset.
9854 *
9855 * Context:
9856 * Interrupt context, no mailbox commands allowed.
9857 */
9858 static void
9859 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9860 {
9861 ql_srb_t *sp;
9862 ql_link_t *link;
9863 ql_link_t *next_cmd;
9864 ql_link_t *next_device;
9865 ql_tgt_t *tq;
9866 ql_lun_t *lq;
9867 uint16_t index;
9868 int q_sane;
9869
9870 QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9871
9872 /* Loop through all targets. */
9873 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9874 for (link = ha->dev[index].first; link != NULL;
9875 link = next_device) {
9876 tq = link->base_address;
9877
9878 /* Try to acquire device queue lock. */
9879 if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9880 next_device = NULL;
9881 continue;
9882 }
9883
9884 next_device = link->next;
9885
9886 if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9887 (tq->port_down_retry_count == 0)) {
9888 /* Release device queue lock. */
9889 DEVICE_QUEUE_UNLOCK(tq);
9890 continue;
9891 }
9892
9893 /* Find out if this device is in a sane state. */
9894 if (tq->flags & (TQF_RSCN_RCVD |
9895 TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9896 q_sane = 0;
9897 } else {
9898 q_sane = 1;
9899 }
9900 /* Loop through commands on watchdog queue. */
9901 for (link = tq->wdg.first; link != NULL;
9902 link = next_cmd) {
9903 next_cmd = link->next;
9904 sp = link->base_address;
9905 lq = sp->lun_queue;
9906
9907 /*
9908 * For SCSI commands, if everything seems to
9909 * be going fine and this packet is stuck
9910 * because of throttling at LUN or target
9911 * level then do not decrement the
9912 * sp->wdg_q_time
9913 */
9914 if (ha->task_daemon_flags & STATE_ONLINE &&
9915 (sp->flags & SRB_ISP_STARTED) == 0 &&
9916 q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9917 lq->lun_outcnt >= ha->execution_throttle) {
9918 continue;
9919 }
9920
9921 if (sp->wdg_q_time != 0) {
9922 sp->wdg_q_time--;
9923
9924 /* Timeout? */
9925 if (sp->wdg_q_time != 0) {
9926 continue;
9927 }
9928
9929 ql_remove_link(&tq->wdg, &sp->wdg);
9930 sp->flags &= ~SRB_WATCHDOG_ENABLED;
9931
9932 if (sp->flags & SRB_ISP_STARTED) {
9933 ql_cmd_timeout(ha, tq, sp,
9934 set_flags, reset_flags);
9935
9936 DEVICE_QUEUE_UNLOCK(tq);
9937 tq = NULL;
9938 next_cmd = NULL;
9939 next_device = NULL;
9940 index = DEVICE_HEAD_LIST_SIZE;
9941 } else {
9942 ql_cmd_timeout(ha, tq, sp,
9943 set_flags, reset_flags);
9944 }
9945 }
9946 }
9947
9948 /* Release device queue lock. */
9949 if (tq != NULL) {
9950 DEVICE_QUEUE_UNLOCK(tq);
9951 }
9952 }
9953 }
9954
9955 QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9956 }
9957
9958 /*
9959 * ql_cmd_timeout
9960 * Command timeout handler.
9961 *
9962 * Input:
9963 * ha: adapter state pointer.
9964 * tq: target queue pointer.
9965 * sp: SRB pointer.
9966 * set_flags: task daemon flags to set.
9967 * reset_flags: task daemon flags to reset.
9968 *
9969 * Context:
9970 * Interrupt context, no mailbox commands allowed.
9971 */
9972 /* ARGSUSED */
9973 static void
9974 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9975 uint32_t *set_flags, uint32_t *reset_flags)
9976 {
9977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9978
9979 if (!(sp->flags & SRB_ISP_STARTED)) {
9980
9981 EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9982
9983 REQUEST_RING_LOCK(ha);
9984
9985 /* if it's on a queue */
9986 if (sp->cmd.head) {
9987 /*
9988 * The pending_cmds que needs to be
9989 * protected by the ring lock
9990 */
9991 ql_remove_link(sp->cmd.head, &sp->cmd);
9992 }
9993 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9994
9995 /* Release device queue lock. */
9996 REQUEST_RING_UNLOCK(ha);
9997 DEVICE_QUEUE_UNLOCK(tq);
9998
9999 /* Set timeout status */
10000 sp->pkt->pkt_reason = CS_TIMEOUT;
10001
10002 /* Ensure no retry */
10003 sp->flags &= ~SRB_RETRY;
10004
10005 /* Call done routine to handle completion. */
10006 ql_done(&sp->cmd);
10007
10008 DEVICE_QUEUE_LOCK(tq);
10009 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
10010 int rval;
10011 uint32_t index;
10012
10013 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10014 "spf=%xh\n", (void *)sp,
10015 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10016 sp->handle & OSC_INDEX_MASK, sp->flags);
10017
10018 DEVICE_QUEUE_UNLOCK(tq);
10019
10020 INTR_LOCK(ha);
10021 ha->pha->xioctl->ControllerErrorCount++;
10022 if (sp->handle) {
10023 ha->pha->timeout_cnt++;
10024 index = sp->handle & OSC_INDEX_MASK;
10025 if (ha->pha->outstanding_cmds[index] == sp) {
10026 sp->request_ring_ptr->entry_type =
10027 INVALID_ENTRY_TYPE;
10028 sp->request_ring_ptr->entry_count = 0;
10029 ha->pha->outstanding_cmds[index] = 0;
10030 }
10031 INTR_UNLOCK(ha);
10032
10033 rval = ql_abort_command(ha, sp);
10034 if (rval == QL_FUNCTION_TIMEOUT ||
10035 rval == QL_LOCK_TIMEOUT ||
10036 rval == QL_FUNCTION_PARAMETER_ERROR ||
10037 ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10038 *set_flags |= ISP_ABORT_NEEDED;
10039 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10040 "needed\n", rval, ha->pha->timeout_cnt);
10041 }
10042
10043 sp->handle = 0;
10044 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10045 } else {
10046 INTR_UNLOCK(ha);
10047 }
10048
10049 /* Set timeout status */
10050 sp->pkt->pkt_reason = CS_TIMEOUT;
10051
10052 /* Ensure no retry */
10053 sp->flags &= ~SRB_RETRY;
10054
10055 /* Call done routine to handle completion. */
10056 ql_done(&sp->cmd);
10057
10058 DEVICE_QUEUE_LOCK(tq);
10059
10060 } else {
10061 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10062 "spf=%xh, isp_abort_needed\n", (void *)sp,
10063 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10064 sp->handle & OSC_INDEX_MASK, sp->flags);
10065
10066 /* Release device queue lock. */
10067 DEVICE_QUEUE_UNLOCK(tq);
10068
10069 INTR_LOCK(ha);
10070 ha->pha->xioctl->ControllerErrorCount++;
10071 INTR_UNLOCK(ha);
10072
10073 /* Set ISP needs to be reset */
10074 sp->flags |= SRB_COMMAND_TIMEOUT;
10075
10076 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10077 (void) ql_binary_fw_dump(ha, TRUE);
10078 }
10079
10080 *set_flags |= ISP_ABORT_NEEDED;
10081
10082 DEVICE_QUEUE_LOCK(tq);
10083 }
10084
10085 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10086 }
10087
10088 /*
10089 * ql_rst_aen
10090 * Processes asynchronous reset.
10091 *
10092 * Input:
10093 * ha = adapter state pointer.
10094 *
10095 * Context:
10096 * Kernel context.
10097 */
10098 static void
10099 ql_rst_aen(ql_adapter_state_t *ha)
10100 {
10101 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10102
10103 /* Issue marker command. */
10104 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10105
10106 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10107 }
10108
10109 /*
10110 * ql_cmd_wait
10111 * Stall driver until all outstanding commands are returned.
10112 *
10113 * Input:
10114 * ha = adapter state pointer.
10115 *
10116 * Context:
10117 * Kernel context.
10118 */
10119 void
10120 ql_cmd_wait(ql_adapter_state_t *ha)
10121 {
10122 uint16_t index;
10123 ql_link_t *link;
10124 ql_tgt_t *tq;
10125 ql_adapter_state_t *vha;
10126
10127 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10128
10129 /* Wait for all outstanding commands to be returned. */
10130 (void) ql_wait_outstanding(ha);
10131
10132 /*
10133 * clear out internally queued commands
10134 */
10135 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10136 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10137 for (link = vha->dev[index].first; link != NULL;
10138 link = link->next) {
10139 tq = link->base_address;
10140 if (tq &&
10141 (!(tq->prli_svc_param_word_3 &
10142 PRLI_W3_RETRY))) {
10143 (void) ql_abort_device(vha, tq, 0);
10144 }
10145 }
10146 }
10147 }
10148
10149 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10150 }
10151
10152 /*
10153 * ql_wait_outstanding
10154 * Wait for all outstanding commands to complete.
10155 *
10156 * Input:
10157 * ha = adapter state pointer.
10158 *
10159 * Returns:
10160 * index - the index for ql_srb into outstanding_cmds.
10161 *
10162 * Context:
10163 * Kernel context.
10164 */
10165 static uint16_t
10166 ql_wait_outstanding(ql_adapter_state_t *ha)
10167 {
10168 ql_srb_t *sp;
10169 uint16_t index, count;
10170
10171 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10172
10173 count = ql_osc_wait_count;
10174 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10175 if (ha->pha->pending_cmds.first != NULL) {
10176 ql_start_iocb(ha, NULL);
10177 index = 1;
10178 }
10179 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10180 (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10181 if (count-- != 0) {
10182 ql_delay(ha, 10000);
10183 index = 0;
10184 } else {
10185 EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10186 (void *)sp, index, sp->handle);
10187 break;
10188 }
10189 }
10190 }
10191
10192 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10193
10194 return (index);
10195 }
10196
10197 /*
10198 * ql_restart_queues
10199 * Restart device queues.
10200 *
10201 * Input:
10202 * ha = adapter state pointer.
10203 * DEVICE_QUEUE_LOCK must be released.
10204 *
10205 * Context:
10206 * Interrupt or Kernel context, no mailbox commands allowed.
10207 */
10208 static void
10209 ql_restart_queues(ql_adapter_state_t *ha)
10210 {
10211 ql_link_t *link, *link2;
10212 ql_tgt_t *tq;
10213 ql_lun_t *lq;
10214 uint16_t index;
10215 ql_adapter_state_t *vha;
10216
10217 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10218
10219 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10220 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10221 for (link = vha->dev[index].first; link != NULL;
10222 link = link->next) {
10223 tq = link->base_address;
10224
10225 /* Acquire device queue lock. */
10226 DEVICE_QUEUE_LOCK(tq);
10227
10228 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10229
10230 for (link2 = tq->lun_queues.first;
10231 link2 != NULL; link2 = link2->next) {
10232 lq = link2->base_address;
10233
10234 if (lq->cmd.first != NULL) {
10235 ql_next(vha, lq);
10236 DEVICE_QUEUE_LOCK(tq);
10237 }
10238 }
10239
10240 /* Release device queue lock. */
10241 DEVICE_QUEUE_UNLOCK(tq);
10242 }
10243 }
10244 }
10245
10246 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10247 }
10248
10249 /*
10250 * ql_iidma
10251 * Setup iiDMA parameters to firmware
10252 *
10253 * Input:
10254 * ha = adapter state pointer.
10255 * DEVICE_QUEUE_LOCK must be released.
10256 *
10257 * Context:
10258 * Interrupt or Kernel context, no mailbox commands allowed.
10259 */
10260 static void
10261 ql_iidma(ql_adapter_state_t *ha)
10262 {
10263 ql_link_t *link;
10264 ql_tgt_t *tq;
10265 uint16_t index;
10266 char buf[256];
10267 uint32_t data;
10268
10269 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10270
10271 if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10272 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10273 return;
10274 }
10275
10276 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10277 for (link = ha->dev[index].first; link != NULL;
10278 link = link->next) {
10279 tq = link->base_address;
10280
10281 /* Acquire device queue lock. */
10282 DEVICE_QUEUE_LOCK(tq);
10283
10284 if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10285 DEVICE_QUEUE_UNLOCK(tq);
10286 continue;
10287 }
10288
10289 tq->flags &= ~TQF_IIDMA_NEEDED;
10290
10291 if ((tq->loop_id > LAST_N_PORT_HDL) ||
10292 (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10293 DEVICE_QUEUE_UNLOCK(tq);
10294 continue;
10295 }
10296
10297 /* Get the iiDMA persistent data */
10298 if (tq->iidma_rate == IIDMA_RATE_INIT) {
10299 (void) sprintf(buf,
10300 "iidma-rate-%02x%02x%02x%02x%02x"
10301 "%02x%02x%02x", tq->port_name[0],
10302 tq->port_name[1], tq->port_name[2],
10303 tq->port_name[3], tq->port_name[4],
10304 tq->port_name[5], tq->port_name[6],
10305 tq->port_name[7]);
10306
10307 if ((data = ql_get_prop(ha, buf)) ==
10308 0xffffffff) {
10309 tq->iidma_rate = IIDMA_RATE_NDEF;
10310 } else {
10311 switch (data) {
10312 case IIDMA_RATE_1GB:
10313 case IIDMA_RATE_2GB:
10314 case IIDMA_RATE_4GB:
10315 case IIDMA_RATE_10GB:
10316 tq->iidma_rate = data;
10317 break;
10318 case IIDMA_RATE_8GB:
10319 if (CFG_IST(ha,
10320 CFG_CTRL_25XX)) {
10321 tq->iidma_rate = data;
10322 } else {
10323 tq->iidma_rate =
10324 IIDMA_RATE_4GB;
10325 }
10326 break;
10327 default:
10328 EL(ha, "invalid data for "
10329 "parameter: %s: %xh\n",
10330 buf, data);
10331 tq->iidma_rate =
10332 IIDMA_RATE_NDEF;
10333 break;
10334 }
10335 }
10336 }
10337
10338 /* Set the firmware's iiDMA rate */
10339 if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10340 !(CFG_IST(ha, CFG_CTRL_8081))) {
10341 data = ql_iidma_rate(ha, tq->loop_id,
10342 &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10343 if (data != QL_SUCCESS) {
10344 EL(ha, "mbx failed: %xh\n", data);
10345 }
10346 }
10347
10348 /* Release device queue lock. */
10349 DEVICE_QUEUE_UNLOCK(tq);
10350 }
10351 }
10352
10353 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10354 }
10355
10356 /*
10357 * ql_abort_queues
10358 * Abort all commands on device queues.
10359 *
10360 * Input:
10361 * ha = adapter state pointer.
10362 *
10363 * Context:
10364 * Interrupt or Kernel context, no mailbox commands allowed.
10365 */
10366 static void
10367 ql_abort_queues(ql_adapter_state_t *ha)
10368 {
10369 ql_link_t *link;
10370 ql_tgt_t *tq;
10371 ql_srb_t *sp;
10372 uint16_t index;
10373 ql_adapter_state_t *vha;
10374
10375 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10376
10377 /* Return all commands in outstanding command list. */
10378 INTR_LOCK(ha);
10379
10380 /* Place all commands in outstanding cmd list on device queue. */
10381 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10382 if (ha->pending_cmds.first != NULL) {
10383 INTR_UNLOCK(ha);
10384 ql_start_iocb(ha, NULL);
10385 /* Delay for system */
10386 ql_delay(ha, 10000);
10387 INTR_LOCK(ha);
10388 index = 1;
10389 }
10390 sp = ha->outstanding_cmds[index];
10391
10392 /* skip devices capable of FCP2 retrys */
10393 if ((sp != NULL) &&
10394 ((tq = sp->lun_queue->target_queue) != NULL) &&
10395 (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10396 ha->outstanding_cmds[index] = NULL;
10397 sp->handle = 0;
10398 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10399
10400 INTR_UNLOCK(ha);
10401
10402 /* Set ending status. */
10403 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10404 sp->flags |= SRB_ISP_COMPLETED;
10405
10406 /* Call done routine to handle completions. */
10407 sp->cmd.next = NULL;
10408 ql_done(&sp->cmd);
10409
10410 INTR_LOCK(ha);
10411 }
10412 }
10413 INTR_UNLOCK(ha);
10414
10415 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10416 QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10417 vha->instance, vha->vp_index);
10418 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10419 for (link = vha->dev[index].first; link != NULL;
10420 link = link->next) {
10421 tq = link->base_address;
10422 /* skip devices capable of FCP2 retrys */
10423 if (!(tq->prli_svc_param_word_3 &
10424 PRLI_W3_RETRY)) {
10425 /*
10426 * Set port unavailable status and
10427 * return all commands on a devices
10428 * queues.
10429 */
10430 ql_abort_device_queues(ha, tq);
10431 }
10432 }
10433 }
10434 }
10435 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10436 }
10437
10438 /*
10439 * ql_abort_device_queues
10440 * Abort all commands on device queues.
10441 *
10442 * Input:
10443 * ha = adapter state pointer.
10444 *
10445 * Context:
10446 * Interrupt or Kernel context, no mailbox commands allowed.
10447 */
10448 static void
10449 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10450 {
10451 ql_link_t *lun_link, *cmd_link;
10452 ql_srb_t *sp;
10453 ql_lun_t *lq;
10454
10455 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10456
10457 DEVICE_QUEUE_LOCK(tq);
10458
10459 for (lun_link = tq->lun_queues.first; lun_link != NULL;
10460 lun_link = lun_link->next) {
10461 lq = lun_link->base_address;
10462
10463 cmd_link = lq->cmd.first;
10464 while (cmd_link != NULL) {
10465 sp = cmd_link->base_address;
10466
10467 if (sp->flags & SRB_ABORT) {
10468 cmd_link = cmd_link->next;
10469 continue;
10470 }
10471
10472 /* Remove srb from device cmd queue. */
10473 ql_remove_link(&lq->cmd, &sp->cmd);
10474
10475 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10476
10477 DEVICE_QUEUE_UNLOCK(tq);
10478
10479 /* Set ending status. */
10480 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10481
10482 /* Call done routine to handle completion. */
10483 ql_done(&sp->cmd);
10484
10485 /* Delay for system */
10486 ql_delay(ha, 10000);
10487
10488 DEVICE_QUEUE_LOCK(tq);
10489 cmd_link = lq->cmd.first;
10490 }
10491 }
10492 DEVICE_QUEUE_UNLOCK(tq);
10493
10494 QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10495 }
10496
10497 /*
10498 * ql_loop_resync
10499 * Resync with fibre channel devices.
10500 *
10501 * Input:
10502 * ha = adapter state pointer.
10503 * DEVICE_QUEUE_LOCK must be released.
10504 *
10505 * Returns:
10506 * ql local function return status code.
10507 *
10508 * Context:
10509 * Kernel context.
10510 */
10511 static int
10512 ql_loop_resync(ql_adapter_state_t *ha)
10513 {
10514 int rval;
10515
10516 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10517
10518 if (ha->flags & IP_INITIALIZED) {
10519 (void) ql_shutdown_ip(ha);
10520 }
10521
10522 rval = ql_fw_ready(ha, 10);
10523
10524 TASK_DAEMON_LOCK(ha);
10525 ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10526 TASK_DAEMON_UNLOCK(ha);
10527
10528 /* Set loop online, if it really is. */
10529 if (rval == QL_SUCCESS) {
10530 ql_loop_online(ha);
10531 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10532 } else {
10533 EL(ha, "failed, rval = %xh\n", rval);
10534 }
10535
10536 return (rval);
10537 }
10538
10539 /*
10540 * ql_loop_online
10541 * Set loop online status if it really is online.
10542 *
10543 * Input:
10544 * ha = adapter state pointer.
10545 * DEVICE_QUEUE_LOCK must be released.
10546 *
10547 * Context:
10548 * Kernel context.
10549 */
10550 void
10551 ql_loop_online(ql_adapter_state_t *ha)
10552 {
10553 ql_adapter_state_t *vha;
10554
10555 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10556
10557 /* Inform the FC Transport that the hardware is online. */
10558 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10559 if (!(vha->task_daemon_flags &
10560 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10561 /* Restart IP if it was shutdown. */
10562 if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10563 !(vha->flags & IP_INITIALIZED)) {
10564 (void) ql_initialize_ip(vha);
10565 ql_isp_rcvbuf(vha);
10566 }
10567
10568 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10569 FC_PORT_STATE_MASK(vha->state) !=
10570 FC_STATE_ONLINE) {
10571 vha->state = FC_PORT_SPEED_MASK(vha->state);
10572 if (vha->topology & QL_LOOP_CONNECTION) {
10573 vha->state |= FC_STATE_LOOP;
10574 } else {
10575 vha->state |= FC_STATE_ONLINE;
10576 }
10577 TASK_DAEMON_LOCK(ha);
10578 vha->task_daemon_flags |= FC_STATE_CHANGE;
10579 TASK_DAEMON_UNLOCK(ha);
10580 }
10581 }
10582 }
10583
10584 ql_awaken_task_daemon(ha, NULL, 0, 0);
10585
10586 /* Restart device queues that may have been stopped. */
10587 ql_restart_queues(ha);
10588
10589 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10590 }
10591
10592 /*
10593 * ql_fca_handle_to_state
10594 * Verifies handle to be correct.
10595 *
10596 * Input:
10597 * fca_handle = pointer to state structure.
10598 *
10599 * Returns:
10600 * NULL = failure
10601 *
10602 * Context:
10603 * Kernel context.
10604 */
10605 static ql_adapter_state_t *
10606 ql_fca_handle_to_state(opaque_t fca_handle)
10607 {
10608 #ifdef QL_DEBUG_ROUTINES
10609 ql_link_t *link;
10610 ql_adapter_state_t *ha = NULL;
10611 ql_adapter_state_t *vha = NULL;
10612
10613 for (link = ql_hba.first; link != NULL; link = link->next) {
10614 ha = link->base_address;
10615 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10616 if ((opaque_t)vha == fca_handle) {
10617 ha = vha;
10618 break;
10619 }
10620 }
10621 if ((opaque_t)ha == fca_handle) {
10622 break;
10623 } else {
10624 ha = NULL;
10625 }
10626 }
10627
10628 if (ha == NULL) {
10629 /*EMPTY*/
10630 QL_PRINT_2(CE_CONT, "failed\n");
10631 }
10632
10633 #endif /* QL_DEBUG_ROUTINES */
10634
10635 return ((ql_adapter_state_t *)fca_handle);
10636 }
10637
10638 /*
10639 * ql_d_id_to_queue
10640 * Locate device queue that matches destination ID.
10641 *
10642 * Input:
10643 * ha = adapter state pointer.
10644 * d_id = destination ID
10645 *
10646 * Returns:
10647 * NULL = failure
10648 *
10649 * Context:
10650 * Interrupt or Kernel context, no mailbox commands allowed.
10651 */
10652 ql_tgt_t *
10653 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10654 {
10655 uint16_t index;
10656 ql_tgt_t *tq;
10657 ql_link_t *link;
10658
10659 /* Get head queue index. */
10660 index = ql_alpa_to_index[d_id.b.al_pa];
10661
10662 for (link = ha->dev[index].first; link != NULL; link = link->next) {
10663 tq = link->base_address;
10664 if (tq->d_id.b24 == d_id.b24 &&
10665 VALID_DEVICE_ID(ha, tq->loop_id)) {
10666 return (tq);
10667 }
10668 }
10669
10670 return (NULL);
10671 }
10672
10673 /*
10674 * ql_loop_id_to_queue
10675 * Locate device queue that matches loop ID.
10676 *
10677 * Input:
10678 * ha: adapter state pointer.
10679 * loop_id: destination ID
10680 *
10681 * Returns:
10682 * NULL = failure
10683 *
10684 * Context:
10685 * Interrupt or Kernel context, no mailbox commands allowed.
10686 */
10687 ql_tgt_t *
10688 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10689 {
10690 uint16_t index;
10691 ql_tgt_t *tq;
10692 ql_link_t *link;
10693
10694 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10695 for (link = ha->dev[index].first; link != NULL;
10696 link = link->next) {
10697 tq = link->base_address;
10698 if (tq->loop_id == loop_id) {
10699 return (tq);
10700 }
10701 }
10702 }
10703
10704 return (NULL);
10705 }
10706
10707 /*
10708 * ql_kstat_update
10709 * Updates kernel statistics.
10710 *
10711 * Input:
10712 * ksp - driver kernel statistics structure pointer.
10713 * rw - function to perform
10714 *
10715 * Returns:
10716 * 0 or EACCES
10717 *
10718 * Context:
10719 * Kernel context.
10720 */
10721 /* ARGSUSED */
10722 static int
10723 ql_kstat_update(kstat_t *ksp, int rw)
10724 {
10725 int rval;
10726
10727 QL_PRINT_3(CE_CONT, "started\n");
10728
10729 if (rw == KSTAT_WRITE) {
10730 rval = EACCES;
10731 } else {
10732 rval = 0;
10733 }
10734
10735 if (rval != 0) {
10736 /*EMPTY*/
10737 QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10738 } else {
10739 /*EMPTY*/
10740 QL_PRINT_3(CE_CONT, "done\n");
10741 }
10742 return (rval);
10743 }
10744
10745 /*
10746 * ql_load_flash
10747 * Loads flash.
10748 *
10749 * Input:
10750 * ha: adapter state pointer.
10751 * dp: data pointer.
10752 * size: data length.
10753 *
10754 * Returns:
10755 * ql local function return status code.
10756 *
10757 * Context:
10758 * Kernel context.
10759 */
10760 int
10761 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10762 {
10763 uint32_t cnt;
10764 int rval;
10765 uint32_t size_to_offset;
10766 uint32_t size_to_compare;
10767 int erase_all;
10768
10769 if (CFG_IST(ha, CFG_CTRL_24258081)) {
10770 return (ql_24xx_load_flash(ha, dp, size, 0));
10771 }
10772
10773 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10774
10775 size_to_compare = 0x20000;
10776 size_to_offset = 0;
10777 erase_all = 0;
10778 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10779 if (size == 0x80000) {
10780 /* Request to flash the entire chip. */
10781 size_to_compare = 0x80000;
10782 erase_all = 1;
10783 } else {
10784 size_to_compare = 0x40000;
10785 if (ql_flash_sbus_fpga) {
10786 size_to_offset = 0x40000;
10787 }
10788 }
10789 }
10790 if (size > size_to_compare) {
10791 rval = QL_FUNCTION_PARAMETER_ERROR;
10792 EL(ha, "failed=%xh\n", rval);
10793 return (rval);
10794 }
10795
10796 GLOBAL_HW_LOCK();
10797
10798 /* Enable Flash Read/Write. */
10799 ql_flash_enable(ha);
10800
10801 /* Erase flash prior to write. */
10802 rval = ql_erase_flash(ha, erase_all);
10803
10804 if (rval == QL_SUCCESS) {
10805 /* Write data to flash. */
10806 for (cnt = 0; cnt < size; cnt++) {
10807 /* Allow other system activity. */
10808 if (cnt % 0x1000 == 0) {
10809 ql_delay(ha, 10000);
10810 }
10811 rval = ql_program_flash_address(ha,
10812 cnt + size_to_offset, *dp++);
10813 if (rval != QL_SUCCESS) {
10814 break;
10815 }
10816 }
10817 }
10818
10819 ql_flash_disable(ha);
10820
10821 GLOBAL_HW_UNLOCK();
10822
10823 if (rval != QL_SUCCESS) {
10824 EL(ha, "failed=%xh\n", rval);
10825 } else {
10826 /*EMPTY*/
10827 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10828 }
10829 return (rval);
10830 }
10831
10832 /*
10833 * ql_program_flash_address
10834 * Program flash address.
10835 *
10836 * Input:
10837 * ha = adapter state pointer.
10838 * addr = flash byte address.
10839 * data = data to be written to flash.
10840 *
10841 * Returns:
10842 * ql local function return status code.
10843 *
10844 * Context:
10845 * Kernel context.
10846 */
10847 static int
10848 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10849 {
10850 int rval;
10851
10852 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10853
10854 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10855 ql_write_flash_byte(ha, 0x5555, 0xa0);
10856 ql_write_flash_byte(ha, addr, data);
10857 } else {
10858 /* Write Program Command Sequence */
10859 ql_write_flash_byte(ha, 0x5555, 0xaa);
10860 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10861 ql_write_flash_byte(ha, 0x5555, 0xa0);
10862 ql_write_flash_byte(ha, addr, data);
10863 }
10864
10865 /* Wait for write to complete. */
10866 rval = ql_poll_flash(ha, addr, data);
10867
10868 if (rval != QL_SUCCESS) {
10869 EL(ha, "failed=%xh\n", rval);
10870 } else {
10871 /*EMPTY*/
10872 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10873 }
10874 return (rval);
10875 }
10876
10877 /*
10878 * ql_erase_flash
10879 * Erases entire flash.
10880 *
10881 * Input:
10882 * ha = adapter state pointer.
10883 *
10884 * Returns:
10885 * ql local function return status code.
10886 *
10887 * Context:
10888 * Kernel context.
10889 */
10890 int
10891 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10892 {
10893 int rval;
10894 uint32_t erase_delay = 2000000;
10895 uint32_t sStartAddr;
10896 uint32_t ssize;
10897 uint32_t cnt;
10898 uint8_t *bfp;
10899 uint8_t *tmp;
10900
10901 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10902
10903 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10904
10905 if (ql_flash_sbus_fpga == 1) {
10906 ssize = QL_SBUS_FCODE_SIZE;
10907 sStartAddr = QL_FCODE_OFFSET;
10908 } else {
10909 ssize = QL_FPGA_SIZE;
10910 sStartAddr = QL_FPGA_OFFSET;
10911 }
10912
10913 erase_delay = 20000000;
10914
10915 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10916
10917 /* Save the section of flash we're not updating to buffer */
10918 tmp = bfp;
10919 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10920 /* Allow other system activity. */
10921 if (cnt % 0x1000 == 0) {
10922 ql_delay(ha, 10000);
10923 }
10924 *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10925 }
10926 }
10927
10928 /* Chip Erase Command Sequence */
10929 ql_write_flash_byte(ha, 0x5555, 0xaa);
10930 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10931 ql_write_flash_byte(ha, 0x5555, 0x80);
10932 ql_write_flash_byte(ha, 0x5555, 0xaa);
10933 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10934 ql_write_flash_byte(ha, 0x5555, 0x10);
10935
10936 ql_delay(ha, erase_delay);
10937
10938 /* Wait for erase to complete. */
10939 rval = ql_poll_flash(ha, 0, 0x80);
10940
10941 if (rval != QL_SUCCESS) {
10942 EL(ha, "failed=%xh\n", rval);
10943 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10944 kmem_free(bfp, ssize);
10945 }
10946 return (rval);
10947 }
10948
10949 /* restore the section we saved in the buffer */
10950 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10951 /* Restore the section we saved off */
10952 tmp = bfp;
10953 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10954 /* Allow other system activity. */
10955 if (cnt % 0x1000 == 0) {
10956 ql_delay(ha, 10000);
10957 }
10958 rval = ql_program_flash_address(ha, cnt, *tmp++);
10959 if (rval != QL_SUCCESS) {
10960 break;
10961 }
10962 }
10963
10964 kmem_free(bfp, ssize);
10965 }
10966
10967 if (rval != QL_SUCCESS) {
10968 EL(ha, "failed=%xh\n", rval);
10969 } else {
10970 /*EMPTY*/
10971 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10972 }
10973 return (rval);
10974 }
10975
10976 /*
10977 * ql_poll_flash
10978 * Polls flash for completion.
10979 *
10980 * Input:
10981 * ha = adapter state pointer.
10982 * addr = flash byte address.
10983 * data = data to be polled.
10984 *
10985 * Returns:
10986 * ql local function return status code.
10987 *
10988 * Context:
10989 * Kernel context.
10990 */
10991 int
10992 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10993 {
10994 uint8_t flash_data;
10995 uint32_t cnt;
10996 int rval = QL_FUNCTION_FAILED;
10997
10998 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10999
11000 poll_data = (uint8_t)(poll_data & BIT_7);
11001
11002 /* Wait for 30 seconds for command to finish. */
11003 for (cnt = 30000000; cnt; cnt--) {
11004 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11005
11006 if ((flash_data & BIT_7) == poll_data) {
11007 rval = QL_SUCCESS;
11008 break;
11009 }
11010 if (flash_data & BIT_5 && cnt > 2) {
11011 cnt = 2;
11012 }
11013 drv_usecwait(1);
11014 }
11015
11016 if (rval != QL_SUCCESS) {
11017 EL(ha, "failed=%xh\n", rval);
11018 } else {
11019 /*EMPTY*/
11020 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11021 }
11022 return (rval);
11023 }
11024
11025 /*
11026 * ql_flash_enable
11027 * Setup flash for reading/writing.
11028 *
11029 * Input:
11030 * ha = adapter state pointer.
11031 *
11032 * Context:
11033 * Kernel context.
11034 */
11035 void
11036 ql_flash_enable(ql_adapter_state_t *ha)
11037 {
11038 uint16_t data;
11039
11040 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11041
11042 /* Enable Flash Read/Write. */
11043 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11044 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11045 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11046 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11047 ddi_put16(ha->sbus_fpga_dev_handle,
11048 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11049 /* Read reset command sequence */
11050 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11051 ql_write_flash_byte(ha, 0x555, 0x55);
11052 ql_write_flash_byte(ha, 0xaaa, 0x20);
11053 ql_write_flash_byte(ha, 0x555, 0xf0);
11054 } else {
11055 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11056 ISP_FLASH_ENABLE);
11057 WRT16_IO_REG(ha, ctrl_status, data);
11058
11059 /* Read/Reset Command Sequence */
11060 ql_write_flash_byte(ha, 0x5555, 0xaa);
11061 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11062 ql_write_flash_byte(ha, 0x5555, 0xf0);
11063 }
11064 (void) ql_read_flash_byte(ha, 0);
11065
11066 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11067 }
11068
11069 /*
11070 * ql_flash_disable
11071 * Disable flash and allow RISC to run.
11072 *
11073 * Input:
11074 * ha = adapter state pointer.
11075 *
11076 * Context:
11077 * Kernel context.
11078 */
11079 void
11080 ql_flash_disable(ql_adapter_state_t *ha)
11081 {
11082 uint16_t data;
11083
11084 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11085
11086 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11087 /*
11088 * Lock the flash back up.
11089 */
11090 ql_write_flash_byte(ha, 0x555, 0x90);
11091 ql_write_flash_byte(ha, 0x555, 0x0);
11092
11093 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11094 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11095 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11096 ddi_put16(ha->sbus_fpga_dev_handle,
11097 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11098 } else {
11099 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11100 ~ISP_FLASH_ENABLE);
11101 WRT16_IO_REG(ha, ctrl_status, data);
11102 }
11103
11104 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11105 }
11106
11107 /*
11108 * ql_write_flash_byte
11109 * Write byte to flash.
11110 *
11111 * Input:
11112 * ha = adapter state pointer.
11113 * addr = flash byte address.
11114 * data = data to be written.
11115 *
11116 * Context:
11117 * Kernel context.
11118 */
11119 void
11120 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11121 {
11122 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11123 ddi_put16(ha->sbus_fpga_dev_handle,
11124 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11125 LSW(addr));
11126 ddi_put16(ha->sbus_fpga_dev_handle,
11127 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11128 MSW(addr));
11129 ddi_put16(ha->sbus_fpga_dev_handle,
11130 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11131 (uint16_t)data);
11132 } else {
11133 uint16_t bank_select;
11134
11135 /* Setup bit 16 of flash address. */
11136 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11137
11138 if (CFG_IST(ha, CFG_CTRL_6322)) {
11139 bank_select = (uint16_t)(bank_select & ~0xf0);
11140 bank_select = (uint16_t)(bank_select |
11141 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11142 WRT16_IO_REG(ha, ctrl_status, bank_select);
11143 } else {
11144 if (addr & BIT_16 && !(bank_select &
11145 ISP_FLASH_64K_BANK)) {
11146 bank_select = (uint16_t)(bank_select |
11147 ISP_FLASH_64K_BANK);
11148 WRT16_IO_REG(ha, ctrl_status, bank_select);
11149 } else if (!(addr & BIT_16) && bank_select &
11150 ISP_FLASH_64K_BANK) {
11151 bank_select = (uint16_t)(bank_select &
11152 ~ISP_FLASH_64K_BANK);
11153 WRT16_IO_REG(ha, ctrl_status, bank_select);
11154 }
11155 }
11156
11157 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11158 WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11159 WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11160 } else {
11161 WRT16_IOMAP_REG(ha, flash_address, addr);
11162 WRT16_IOMAP_REG(ha, flash_data, data);
11163 }
11164 }
11165 }
11166
11167 /*
11168 * ql_read_flash_byte
11169 * Reads byte from flash, but must read a word from chip.
11170 *
11171 * Input:
11172 * ha = adapter state pointer.
11173 * addr = flash byte address.
11174 *
11175 * Returns:
11176 * byte from flash.
11177 *
11178 * Context:
11179 * Kernel context.
11180 */
11181 uint8_t
11182 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11183 {
11184 uint8_t data;
11185
11186 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11187 ddi_put16(ha->sbus_fpga_dev_handle,
11188 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11189 LSW(addr));
11190 ddi_put16(ha->sbus_fpga_dev_handle,
11191 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11192 MSW(addr));
11193 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11194 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11195 } else {
11196 uint16_t bank_select;
11197
11198 /* Setup bit 16 of flash address. */
11199 bank_select = RD16_IO_REG(ha, ctrl_status);
11200 if (CFG_IST(ha, CFG_CTRL_6322)) {
11201 bank_select = (uint16_t)(bank_select & ~0xf0);
11202 bank_select = (uint16_t)(bank_select |
11203 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11204 WRT16_IO_REG(ha, ctrl_status, bank_select);
11205 } else {
11206 if (addr & BIT_16 &&
11207 !(bank_select & ISP_FLASH_64K_BANK)) {
11208 bank_select = (uint16_t)(bank_select |
11209 ISP_FLASH_64K_BANK);
11210 WRT16_IO_REG(ha, ctrl_status, bank_select);
11211 } else if (!(addr & BIT_16) &&
11212 bank_select & ISP_FLASH_64K_BANK) {
11213 bank_select = (uint16_t)(bank_select &
11214 ~ISP_FLASH_64K_BANK);
11215 WRT16_IO_REG(ha, ctrl_status, bank_select);
11216 }
11217 }
11218
11219 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11220 WRT16_IO_REG(ha, flash_address, addr);
11221 data = (uint8_t)RD16_IO_REG(ha, flash_data);
11222 } else {
11223 WRT16_IOMAP_REG(ha, flash_address, addr);
11224 data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11225 }
11226 }
11227
11228 return (data);
11229 }
11230
11231 /*
11232 * ql_24xx_flash_id
11233 * Get flash IDs.
11234 *
11235 * Input:
11236 * ha: adapter state pointer.
11237 *
11238 * Returns:
11239 * ql local function return status code.
11240 *
11241 * Context:
11242 * Kernel context.
11243 */
11244 int
11245 ql_24xx_flash_id(ql_adapter_state_t *vha)
11246 {
11247 int rval;
11248 uint32_t fdata = 0;
11249 ql_adapter_state_t *ha = vha->pha;
11250 ql_xioctl_t *xp = ha->xioctl;
11251
11252 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11253
11254 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11255
11256 if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11257 fdata = 0;
11258 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11259 (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11260 }
11261
11262 if (rval != QL_SUCCESS) {
11263 EL(ha, "24xx read_flash failed=%xh\n", rval);
11264 } else if (fdata != 0) {
11265 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11266 xp->fdesc.flash_id = MSB(LSW(fdata));
11267 xp->fdesc.flash_len = LSB(MSW(fdata));
11268 } else {
11269 xp->fdesc.flash_manuf = ATMEL_FLASH;
11270 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11271 xp->fdesc.flash_len = 0;
11272 }
11273
11274 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11275
11276 return (rval);
11277 }
11278
11279 /*
11280 * ql_24xx_load_flash
11281 * Loads flash.
11282 *
11283 * Input:
11284 * ha = adapter state pointer.
11285 * dp = data pointer.
11286 * size = data length in bytes.
11287 * faddr = 32bit word flash byte address.
11288 *
11289 * Returns:
11290 * ql local function return status code.
11291 *
11292 * Context:
11293 * Kernel context.
11294 */
11295 int
11296 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11297 uint32_t faddr)
11298 {
11299 int rval;
11300 uint32_t cnt, rest_addr, fdata, wc;
11301 dma_mem_t dmabuf = {0};
11302 ql_adapter_state_t *ha = vha->pha;
11303 ql_xioctl_t *xp = ha->xioctl;
11304
11305 QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11306 ha->instance, faddr, size);
11307
11308 /* start address must be 32 bit word aligned */
11309 if ((faddr & 0x3) != 0) {
11310 EL(ha, "incorrect buffer size alignment\n");
11311 return (QL_FUNCTION_PARAMETER_ERROR);
11312 }
11313
11314 /* Allocate DMA buffer */
11315 if (CFG_IST(ha, CFG_CTRL_2581)) {
11316 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11317 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11318 QL_SUCCESS) {
11319 EL(ha, "dma alloc failed, rval=%xh\n", rval);
11320 return (rval);
11321 }
11322 }
11323
11324 GLOBAL_HW_LOCK();
11325
11326 /* Enable flash write */
11327 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11328 GLOBAL_HW_UNLOCK();
11329 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11330 ql_free_phys(ha, &dmabuf);
11331 return (rval);
11332 }
11333
11334 /* setup mask of address range within a sector */
11335 rest_addr = (xp->fdesc.block_size - 1) >> 2;
11336
11337 faddr = faddr >> 2; /* flash gets 32 bit words */
11338
11339 /*
11340 * Write data to flash.
11341 */
11342 cnt = 0;
11343 size = (size + 3) >> 2; /* Round up & convert to dwords */
11344
11345 while (cnt < size) {
11346 /* Beginning of a sector? */
11347 if ((faddr & rest_addr) == 0) {
11348 if (CFG_IST(ha, CFG_CTRL_8021)) {
11349 fdata = ha->flash_data_addr | faddr;
11350 rval = ql_8021_rom_erase(ha, fdata);
11351 if (rval != QL_SUCCESS) {
11352 EL(ha, "8021 erase sector status="
11353 "%xh, start=%xh, end=%xh"
11354 "\n", rval, fdata,
11355 fdata + rest_addr);
11356 break;
11357 }
11358 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11359 fdata = ha->flash_data_addr | faddr;
11360 rval = ql_flash_access(ha,
11361 FAC_ERASE_SECTOR, fdata, fdata +
11362 rest_addr, 0);
11363 if (rval != QL_SUCCESS) {
11364 EL(ha, "erase sector status="
11365 "%xh, start=%xh, end=%xh"
11366 "\n", rval, fdata,
11367 fdata + rest_addr);
11368 break;
11369 }
11370 } else {
11371 fdata = (faddr & ~rest_addr) << 2;
11372 fdata = (fdata & 0xff00) |
11373 (fdata << 16 & 0xff0000) |
11374 (fdata >> 16 & 0xff);
11375
11376 if (rest_addr == 0x1fff) {
11377 /* 32kb sector block erase */
11378 rval = ql_24xx_write_flash(ha,
11379 FLASH_CONF_ADDR | 0x0352,
11380 fdata);
11381 } else {
11382 /* 64kb sector block erase */
11383 rval = ql_24xx_write_flash(ha,
11384 FLASH_CONF_ADDR | 0x03d8,
11385 fdata);
11386 }
11387 if (rval != QL_SUCCESS) {
11388 EL(ha, "Unable to flash sector"
11389 ": address=%xh\n", faddr);
11390 break;
11391 }
11392 }
11393 }
11394
11395 /* Write data */
11396 if (CFG_IST(ha, CFG_CTRL_2581) &&
11397 ((faddr & 0x3f) == 0)) {
11398 /*
11399 * Limit write up to sector boundary.
11400 */
11401 wc = ((~faddr & (rest_addr>>1)) + 1);
11402
11403 if (size - cnt < wc) {
11404 wc = size - cnt;
11405 }
11406
11407 ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11408 (uint8_t *)dmabuf.bp, wc<<2,
11409 DDI_DEV_AUTOINCR);
11410
11411 rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11412 faddr, dmabuf.cookie.dmac_laddress, wc);
11413 if (rval != QL_SUCCESS) {
11414 EL(ha, "unable to dma to flash "
11415 "address=%xh\n", faddr << 2);
11416 break;
11417 }
11418
11419 cnt += wc;
11420 faddr += wc;
11421 dp += wc << 2;
11422 } else {
11423 fdata = *dp++;
11424 fdata |= *dp++ << 8;
11425 fdata |= *dp++ << 16;
11426 fdata |= *dp++ << 24;
11427 rval = ql_24xx_write_flash(ha,
11428 ha->flash_data_addr | faddr, fdata);
11429 if (rval != QL_SUCCESS) {
11430 EL(ha, "Unable to program flash "
11431 "address=%xh data=%xh\n", faddr,
11432 *dp);
11433 break;
11434 }
11435 cnt++;
11436 faddr++;
11437
11438 /* Allow other system activity. */
11439 if (cnt % 0x1000 == 0) {
11440 ql_delay(ha, 10000);
11441 }
11442 }
11443 }
11444
11445 ql_24xx_protect_flash(ha);
11446
11447 ql_free_phys(ha, &dmabuf);
11448
11449 GLOBAL_HW_UNLOCK();
11450
11451 if (rval != QL_SUCCESS) {
11452 EL(ha, "failed=%xh\n", rval);
11453 } else {
11454 /*EMPTY*/
11455 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11456 }
11457 return (rval);
11458 }
11459
11460 /*
11461 * ql_24xx_read_flash
11462 * Reads a 32bit word from ISP24xx NVRAM/FLASH.
11463 *
11464 * Input:
11465 * ha: adapter state pointer.
11466 * faddr: NVRAM/FLASH address.
11467 * bp: data pointer.
11468 *
11469 * Returns:
11470 * ql local function return status code.
11471 *
11472 * Context:
11473 * Kernel context.
11474 */
11475 int
11476 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11477 {
11478 uint32_t timer;
11479 int rval = QL_SUCCESS;
11480 ql_adapter_state_t *ha = vha->pha;
11481
11482 if (CFG_IST(ha, CFG_CTRL_8021)) {
11483 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11484 EL(ha, "8021 access error\n");
11485 }
11486 return (rval);
11487 }
11488
11489 /* Clear access error flag */
11490 WRT32_IO_REG(ha, ctrl_status,
11491 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11492
11493 WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11494
11495 /* Wait for READ cycle to complete. */
11496 for (timer = 300000; timer; timer--) {
11497 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11498 break;
11499 }
11500 drv_usecwait(10);
11501 }
11502
11503 if (timer == 0) {
11504 EL(ha, "failed, timeout\n");
11505 rval = QL_FUNCTION_TIMEOUT;
11506 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11507 EL(ha, "failed, access error\n");
11508 rval = QL_FUNCTION_FAILED;
11509 }
11510
11511 *bp = RD32_IO_REG(ha, flash_data);
11512
11513 return (rval);
11514 }
11515
11516 /*
11517 * ql_24xx_write_flash
11518 * Writes a 32bit word to ISP24xx NVRAM/FLASH.
11519 *
11520 * Input:
11521 * ha: adapter state pointer.
11522 * addr: NVRAM/FLASH address.
11523 * value: data.
11524 *
11525 * Returns:
11526 * ql local function return status code.
11527 *
11528 * Context:
11529 * Kernel context.
11530 */
11531 int
11532 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11533 {
11534 uint32_t timer, fdata;
11535 int rval = QL_SUCCESS;
11536 ql_adapter_state_t *ha = vha->pha;
11537
11538 if (CFG_IST(ha, CFG_CTRL_8021)) {
11539 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11540 EL(ha, "8021 access error\n");
11541 }
11542 return (rval);
11543 }
11544 /* Clear access error flag */
11545 WRT32_IO_REG(ha, ctrl_status,
11546 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11547
11548 WRT32_IO_REG(ha, flash_data, data);
11549 RD32_IO_REG(ha, flash_data); /* PCI Posting. */
11550 WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11551
11552 /* Wait for Write cycle to complete. */
11553 for (timer = 3000000; timer; timer--) {
11554 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11555 /* Check flash write in progress. */
11556 if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11557 (void) ql_24xx_read_flash(ha,
11558 FLASH_CONF_ADDR | 0x005, &fdata);
11559 if (!(fdata & BIT_0)) {
11560 break;
11561 }
11562 } else {
11563 break;
11564 }
11565 }
11566 drv_usecwait(10);
11567 }
11568 if (timer == 0) {
11569 EL(ha, "failed, timeout\n");
11570 rval = QL_FUNCTION_TIMEOUT;
11571 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11572 EL(ha, "access error\n");
11573 rval = QL_FUNCTION_FAILED;
11574 }
11575
11576 return (rval);
11577 }
11578 /*
11579 * ql_24xx_unprotect_flash
11580 * Enable writes
11581 *
11582 * Input:
11583 * ha: adapter state pointer.
11584 *
11585 * Returns:
11586 * ql local function return status code.
11587 *
11588 * Context:
11589 * Kernel context.
11590 */
11591 int
11592 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11593 {
11594 int rval;
11595 uint32_t fdata;
11596 ql_adapter_state_t *ha = vha->pha;
11597 ql_xioctl_t *xp = ha->xioctl;
11598
11599 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11600
11601 if (CFG_IST(ha, CFG_CTRL_8021)) {
11602 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11603 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11604 if (rval != QL_SUCCESS) {
11605 EL(ha, "8021 access error\n");
11606 }
11607 return (rval);
11608 }
11609 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11610 if (ha->task_daemon_flags & FIRMWARE_UP) {
11611 if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11612 0)) != QL_SUCCESS) {
11613 EL(ha, "status=%xh\n", rval);
11614 }
11615 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11616 ha->instance);
11617 return (rval);
11618 }
11619 } else {
11620 /* Enable flash write. */
11621 WRT32_IO_REG(ha, ctrl_status,
11622 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11623 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11624 }
11625
11626 /*
11627 * Remove block write protection (SST and ST) and
11628 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11629 * Unprotect sectors.
11630 */
11631 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11632 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11633
11634 if (xp->fdesc.unprotect_sector_cmd != 0) {
11635 for (fdata = 0; fdata < 0x10; fdata++) {
11636 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11637 0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11638 }
11639
11640 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11641 xp->fdesc.unprotect_sector_cmd, 0x00400f);
11642 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11643 xp->fdesc.unprotect_sector_cmd, 0x00600f);
11644 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11645 xp->fdesc.unprotect_sector_cmd, 0x00800f);
11646 }
11647
11648 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11649
11650 return (QL_SUCCESS);
11651 }
11652
11653 /*
11654 * ql_24xx_protect_flash
11655 * Disable writes
11656 *
11657 * Input:
11658 * ha: adapter state pointer.
11659 *
11660 * Context:
11661 * Kernel context.
11662 */
11663 void
11664 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11665 {
11666 int rval;
11667 uint32_t fdata;
11668 ql_adapter_state_t *ha = vha->pha;
11669 ql_xioctl_t *xp = ha->xioctl;
11670
11671 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11672
11673 if (CFG_IST(ha, CFG_CTRL_8021)) {
11674 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11675 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11676 if (rval != QL_SUCCESS) {
11677 EL(ha, "8021 access error\n");
11678 }
11679 return;
11680 }
11681 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11682 if (ha->task_daemon_flags & FIRMWARE_UP) {
11683 if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11684 0)) != QL_SUCCESS) {
11685 EL(ha, "status=%xh\n", rval);
11686 }
11687 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11688 ha->instance);
11689 return;
11690 }
11691 } else {
11692 /* Enable flash write. */
11693 WRT32_IO_REG(ha, ctrl_status,
11694 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11695 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11696 }
11697
11698 /*
11699 * Protect sectors.
11700 * Set block write protection (SST and ST) and
11701 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11702 */
11703 if (xp->fdesc.protect_sector_cmd != 0) {
11704 for (fdata = 0; fdata < 0x10; fdata++) {
11705 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11706 0x330 | xp->fdesc.protect_sector_cmd, fdata);
11707 }
11708 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11709 xp->fdesc.protect_sector_cmd, 0x00400f);
11710 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11711 xp->fdesc.protect_sector_cmd, 0x00600f);
11712 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11713 xp->fdesc.protect_sector_cmd, 0x00800f);
11714
11715 /* TODO: ??? */
11716 (void) ql_24xx_write_flash(ha,
11717 FLASH_CONF_ADDR | 0x101, 0x80);
11718 } else {
11719 (void) ql_24xx_write_flash(ha,
11720 FLASH_CONF_ADDR | 0x101, 0x9c);
11721 }
11722
11723 /* Disable flash write. */
11724 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11725 WRT32_IO_REG(ha, ctrl_status,
11726 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11727 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11728 }
11729
11730 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11731 }
11732
11733 /*
11734 * ql_dump_firmware
11735 * Save RISC code state information.
11736 *
11737 * Input:
11738 * ha = adapter state pointer.
11739 *
11740 * Returns:
11741 * QL local function return status code.
11742 *
11743 * Context:
11744 * Kernel context.
11745 */
11746 static int
11747 ql_dump_firmware(ql_adapter_state_t *vha)
11748 {
11749 int rval;
11750 clock_t timer = drv_usectohz(30000000);
11751 ql_adapter_state_t *ha = vha->pha;
11752
11753 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11754
11755 QL_DUMP_LOCK(ha);
11756
11757 if (ha->ql_dump_state & QL_DUMPING ||
11758 (ha->ql_dump_state & QL_DUMP_VALID &&
11759 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11760 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11761 QL_DUMP_UNLOCK(ha);
11762 return (QL_SUCCESS);
11763 }
11764
11765 QL_DUMP_UNLOCK(ha);
11766
11767 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11768
11769 /*
11770 * Wait for all outstanding commands to complete
11771 */
11772 (void) ql_wait_outstanding(ha);
11773
11774 /* Dump firmware. */
11775 rval = ql_binary_fw_dump(ha, TRUE);
11776
11777 /* Do abort to force restart. */
11778 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11779 EL(ha, "restarting, isp_abort_needed\n");
11780
11781 /* Acquire task daemon lock. */
11782 TASK_DAEMON_LOCK(ha);
11783
11784 /* Wait for suspension to end. */
11785 while (ha->task_daemon_flags & QL_SUSPENDED) {
11786 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11787
11788 /* 30 seconds from now */
11789 if (cv_reltimedwait(&ha->cv_dr_suspended,
11790 &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11791 /*
11792 * The timeout time 'timer' was
11793 * reached without the condition
11794 * being signaled.
11795 */
11796 break;
11797 }
11798 }
11799
11800 /* Release task daemon lock. */
11801 TASK_DAEMON_UNLOCK(ha);
11802
11803 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11804 /*EMPTY*/
11805 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11806 } else {
11807 EL(ha, "failed, rval = %xh\n", rval);
11808 }
11809 return (rval);
11810 }
11811
11812 /*
11813 * ql_binary_fw_dump
11814 * Dumps binary data from firmware.
11815 *
11816 * Input:
11817 * ha = adapter state pointer.
11818 * lock_needed = mailbox lock needed.
11819 *
11820 * Returns:
11821 * ql local function return status code.
11822 *
11823 * Context:
11824 * Interrupt or Kernel context, no mailbox commands allowed.
11825 */
11826 int
11827 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11828 {
11829 clock_t timer;
11830 mbx_cmd_t mc;
11831 mbx_cmd_t *mcp = &mc;
11832 int rval = QL_SUCCESS;
11833 ql_adapter_state_t *ha = vha->pha;
11834
11835 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11836
11837 if (CFG_IST(ha, CFG_CTRL_8021)) {
11838 EL(ha, "8021 not supported\n");
11839 return (QL_NOT_SUPPORTED);
11840 }
11841
11842 QL_DUMP_LOCK(ha);
11843
11844 if (ha->ql_dump_state & QL_DUMPING ||
11845 (ha->ql_dump_state & QL_DUMP_VALID &&
11846 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11847 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11848 QL_DUMP_UNLOCK(ha);
11849 return (QL_DATA_EXISTS);
11850 }
11851
11852 ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11853 ha->ql_dump_state |= QL_DUMPING;
11854
11855 QL_DUMP_UNLOCK(ha);
11856
11857 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11858
11859 /* Insert Time Stamp */
11860 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11861 FTO_INSERT_TIME_STAMP);
11862 if (rval != QL_SUCCESS) {
11863 EL(ha, "f/w extended trace insert"
11864 "time stamp failed: %xh\n", rval);
11865 }
11866 }
11867
11868 if (lock_needed == TRUE) {
11869 /* Acquire mailbox register lock. */
11870 MBX_REGISTER_LOCK(ha);
11871 timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11872
11873 /* Check for mailbox available, if not wait for signal. */
11874 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11875 ha->mailbox_flags = (uint8_t)
11876 (ha->mailbox_flags | MBX_WANT_FLG);
11877
11878 /* 30 seconds from now */
11879 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11880 timer, TR_CLOCK_TICK) == -1) {
11881 /*
11882 * The timeout time 'timer' was
11883 * reached without the condition
11884 * being signaled.
11885 */
11886
11887 /* Release mailbox register lock. */
11888 MBX_REGISTER_UNLOCK(ha);
11889
11890 EL(ha, "failed, rval = %xh\n",
11891 QL_FUNCTION_TIMEOUT);
11892 return (QL_FUNCTION_TIMEOUT);
11893 }
11894 }
11895
11896 /* Set busy flag. */
11897 ha->mailbox_flags = (uint8_t)
11898 (ha->mailbox_flags | MBX_BUSY_FLG);
11899 mcp->timeout = 120;
11900 ha->mcp = mcp;
11901
11902 /* Release mailbox register lock. */
11903 MBX_REGISTER_UNLOCK(ha);
11904 }
11905
11906 /* Free previous dump buffer. */
11907 if (ha->ql_dump_ptr != NULL) {
11908 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11909 ha->ql_dump_ptr = NULL;
11910 }
11911
11912 if (CFG_IST(ha, CFG_CTRL_2422)) {
11913 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11914 ha->fw_ext_memory_size);
11915 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11916 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11917 ha->fw_ext_memory_size);
11918 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11919 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11920 ha->fw_ext_memory_size);
11921 } else {
11922 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11923 }
11924
11925 if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11926 NULL) {
11927 rval = QL_MEMORY_ALLOC_FAILED;
11928 } else {
11929 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11930 rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11931 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11932 rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11933 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11934 rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11935 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11936 rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11937 } else {
11938 rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11939 }
11940 }
11941
11942 /* Reset ISP chip. */
11943 ql_reset_chip(ha);
11944
11945 QL_DUMP_LOCK(ha);
11946
11947 if (rval != QL_SUCCESS) {
11948 if (ha->ql_dump_ptr != NULL) {
11949 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11950 ha->ql_dump_ptr = NULL;
11951 }
11952 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11953 QL_DUMP_UPLOADED);
11954 EL(ha, "failed, rval = %xh\n", rval);
11955 } else {
11956 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11957 ha->ql_dump_state |= QL_DUMP_VALID;
11958 EL(ha, "done\n");
11959 }
11960
11961 QL_DUMP_UNLOCK(ha);
11962
11963 return (rval);
11964 }
11965
11966 /*
11967 * ql_ascii_fw_dump
11968 * Converts firmware binary dump to ascii.
11969 *
11970 * Input:
11971 * ha = adapter state pointer.
11972 * bptr = buffer pointer.
11973 *
11974 * Returns:
11975 * Amount of data buffer used.
11976 *
11977 * Context:
11978 * Kernel context.
11979 */
11980 size_t
11981 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11982 {
11983 uint32_t cnt;
11984 caddr_t bp;
11985 int mbox_cnt;
11986 ql_adapter_state_t *ha = vha->pha;
11987 ql_fw_dump_t *fw = ha->ql_dump_ptr;
11988
11989 if (CFG_IST(ha, CFG_CTRL_2422)) {
11990 return (ql_24xx_ascii_fw_dump(ha, bufp));
11991 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11992 return (ql_2581_ascii_fw_dump(ha, bufp));
11993 }
11994
11995 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11996
11997 if (CFG_IST(ha, CFG_CTRL_2300)) {
11998 (void) sprintf(bufp, "\nISP 2300IP ");
11999 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
12000 (void) sprintf(bufp, "\nISP 6322FLX ");
12001 } else {
12002 (void) sprintf(bufp, "\nISP 2200IP ");
12003 }
12004
12005 bp = bufp + strlen(bufp);
12006 (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12007 ha->fw_major_version, ha->fw_minor_version,
12008 ha->fw_subminor_version);
12009
12010 (void) strcat(bufp, "\nPBIU Registers:");
12011 bp = bufp + strlen(bufp);
12012 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12013 if (cnt % 8 == 0) {
12014 *bp++ = '\n';
12015 }
12016 (void) sprintf(bp, "%04x ", fw->pbiu_reg[cnt]);
12017 bp = bp + 6;
12018 }
12019
12020 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12021 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12022 "registers:");
12023 bp = bufp + strlen(bufp);
12024 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12025 if (cnt % 8 == 0) {
12026 *bp++ = '\n';
12027 }
12028 (void) sprintf(bp, "%04x ", fw->risc_host_reg[cnt]);
12029 bp = bp + 6;
12030 }
12031 }
12032
12033 (void) strcat(bp, "\n\nMailbox Registers:");
12034 bp = bufp + strlen(bufp);
12035 mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12036 for (cnt = 0; cnt < mbox_cnt; cnt++) {
12037 if (cnt % 8 == 0) {
12038 *bp++ = '\n';
12039 }
12040 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12041 bp = bp + 6;
12042 }
12043
12044 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12045 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12046 bp = bufp + strlen(bufp);
12047 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12048 if (cnt % 8 == 0) {
12049 *bp++ = '\n';
12050 }
12051 (void) sprintf(bp, "%04x ", fw->resp_dma_reg[cnt]);
12052 bp = bp + 6;
12053 }
12054 }
12055
12056 (void) strcat(bp, "\n\nDMA Registers:");
12057 bp = bufp + strlen(bufp);
12058 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12059 if (cnt % 8 == 0) {
12060 *bp++ = '\n';
12061 }
12062 (void) sprintf(bp, "%04x ", fw->dma_reg[cnt]);
12063 bp = bp + 6;
12064 }
12065
12066 (void) strcat(bp, "\n\nRISC Hardware Registers:");
12067 bp = bufp + strlen(bufp);
12068 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12069 if (cnt % 8 == 0) {
12070 *bp++ = '\n';
12071 }
12072 (void) sprintf(bp, "%04x ", fw->risc_hdw_reg[cnt]);
12073 bp = bp + 6;
12074 }
12075
12076 (void) strcat(bp, "\n\nRISC GP0 Registers:");
12077 bp = bufp + strlen(bufp);
12078 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12079 if (cnt % 8 == 0) {
12080 *bp++ = '\n';
12081 }
12082 (void) sprintf(bp, "%04x ", fw->risc_gp0_reg[cnt]);
12083 bp = bp + 6;
12084 }
12085
12086 (void) strcat(bp, "\n\nRISC GP1 Registers:");
12087 bp = bufp + strlen(bufp);
12088 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12089 if (cnt % 8 == 0) {
12090 *bp++ = '\n';
12091 }
12092 (void) sprintf(bp, "%04x ", fw->risc_gp1_reg[cnt]);
12093 bp = bp + 6;
12094 }
12095
12096 (void) strcat(bp, "\n\nRISC GP2 Registers:");
12097 bp = bufp + strlen(bufp);
12098 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12099 if (cnt % 8 == 0) {
12100 *bp++ = '\n';
12101 }
12102 (void) sprintf(bp, "%04x ", fw->risc_gp2_reg[cnt]);
12103 bp = bp + 6;
12104 }
12105
12106 (void) strcat(bp, "\n\nRISC GP3 Registers:");
12107 bp = bufp + strlen(bufp);
12108 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12109 if (cnt % 8 == 0) {
12110 *bp++ = '\n';
12111 }
12112 (void) sprintf(bp, "%04x ", fw->risc_gp3_reg[cnt]);
12113 bp = bp + 6;
12114 }
12115
12116 (void) strcat(bp, "\n\nRISC GP4 Registers:");
12117 bp = bufp + strlen(bufp);
12118 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12119 if (cnt % 8 == 0) {
12120 *bp++ = '\n';
12121 }
12122 (void) sprintf(bp, "%04x ", fw->risc_gp4_reg[cnt]);
12123 bp = bp + 6;
12124 }
12125
12126 (void) strcat(bp, "\n\nRISC GP5 Registers:");
12127 bp = bufp + strlen(bufp);
12128 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12129 if (cnt % 8 == 0) {
12130 *bp++ = '\n';
12131 }
12132 (void) sprintf(bp, "%04x ", fw->risc_gp5_reg[cnt]);
12133 bp = bp + 6;
12134 }
12135
12136 (void) strcat(bp, "\n\nRISC GP6 Registers:");
12137 bp = bufp + strlen(bufp);
12138 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12139 if (cnt % 8 == 0) {
12140 *bp++ = '\n';
12141 }
12142 (void) sprintf(bp, "%04x ", fw->risc_gp6_reg[cnt]);
12143 bp = bp + 6;
12144 }
12145
12146 (void) strcat(bp, "\n\nRISC GP7 Registers:");
12147 bp = bufp + strlen(bufp);
12148 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12149 if (cnt % 8 == 0) {
12150 *bp++ = '\n';
12151 }
12152 (void) sprintf(bp, "%04x ", fw->risc_gp7_reg[cnt]);
12153 bp = bp + 6;
12154 }
12155
12156 (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12157 bp = bufp + strlen(bufp);
12158 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12159 if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12160 CFG_CTRL_6322)) == 0))) {
12161 break;
12162 }
12163 if (cnt % 8 == 0) {
12164 *bp++ = '\n';
12165 }
12166 (void) sprintf(bp, "%04x ", fw->frame_buf_hdw_reg[cnt]);
12167 bp = bp + 6;
12168 }
12169
12170 (void) strcat(bp, "\n\nFPM B0 Registers:");
12171 bp = bufp + strlen(bufp);
12172 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12173 if (cnt % 8 == 0) {
12174 *bp++ = '\n';
12175 }
12176 (void) sprintf(bp, "%04x ", fw->fpm_b0_reg[cnt]);
12177 bp = bp + 6;
12178 }
12179
12180 (void) strcat(bp, "\n\nFPM B1 Registers:");
12181 bp = bufp + strlen(bufp);
12182 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12183 if (cnt % 8 == 0) {
12184 *bp++ = '\n';
12185 }
12186 (void) sprintf(bp, "%04x ", fw->fpm_b1_reg[cnt]);
12187 bp = bp + 6;
12188 }
12189
12190 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12191 (void) strcat(bp, "\n\nCode RAM Dump:");
12192 bp = bufp + strlen(bufp);
12193 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12194 if (cnt % 8 == 0) {
12195 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12196 bp = bp + 8;
12197 }
12198 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12199 bp = bp + 6;
12200 }
12201
12202 (void) strcat(bp, "\n\nStack RAM Dump:");
12203 bp = bufp + strlen(bufp);
12204 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12205 if (cnt % 8 == 0) {
12206 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12207 bp = bp + 8;
12208 }
12209 (void) sprintf(bp, "%04x ", fw->stack_ram[cnt]);
12210 bp = bp + 6;
12211 }
12212
12213 (void) strcat(bp, "\n\nData RAM Dump:");
12214 bp = bufp + strlen(bufp);
12215 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12216 if (cnt % 8 == 0) {
12217 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12218 bp = bp + 8;
12219 }
12220 (void) sprintf(bp, "%04x ", fw->data_ram[cnt]);
12221 bp = bp + 6;
12222 }
12223 } else {
12224 (void) strcat(bp, "\n\nRISC SRAM:");
12225 bp = bufp + strlen(bufp);
12226 for (cnt = 0; cnt < 0xf000; cnt++) {
12227 if (cnt % 8 == 0) {
12228 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12229 bp = bp + 7;
12230 }
12231 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12232 bp = bp + 6;
12233 }
12234 }
12235
12236 (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12237 bp += strlen(bp);
12238
12239 (void) sprintf(bp, "\n\nRequest Queue");
12240 bp += strlen(bp);
12241 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12242 if (cnt % 8 == 0) {
12243 (void) sprintf(bp, "\n%08x: ", cnt);
12244 bp += strlen(bp);
12245 }
12246 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12247 bp += strlen(bp);
12248 }
12249
12250 (void) sprintf(bp, "\n\nResponse Queue");
12251 bp += strlen(bp);
12252 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12253 if (cnt % 8 == 0) {
12254 (void) sprintf(bp, "\n%08x: ", cnt);
12255 bp += strlen(bp);
12256 }
12257 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12258 bp += strlen(bp);
12259 }
12260
12261 (void) sprintf(bp, "\n");
12262
12263 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12264
12265 return (strlen(bufp));
12266 }
12267
12268 /*
12269 * ql_24xx_ascii_fw_dump
12270 * Converts ISP24xx firmware binary dump to ascii.
12271 *
12272 * Input:
12273 * ha = adapter state pointer.
12274 * bptr = buffer pointer.
12275 *
12276 * Returns:
12277 * Amount of data buffer used.
12278 *
12279 * Context:
12280 * Kernel context.
12281 */
12282 static size_t
12283 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12284 {
12285 uint32_t cnt;
12286 caddr_t bp = bufp;
12287 ql_24xx_fw_dump_t *fw = ha->ql_dump_ptr;
12288
12289 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12290
12291 (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12292 ha->fw_major_version, ha->fw_minor_version,
12293 ha->fw_subminor_version, ha->fw_attributes);
12294 bp += strlen(bp);
12295
12296 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12297
12298 (void) strcat(bp, "\nHost Interface Registers");
12299 bp += strlen(bp);
12300 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12301 if (cnt % 8 == 0) {
12302 (void) sprintf(bp++, "\n");
12303 }
12304
12305 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12306 bp += 9;
12307 }
12308
12309 (void) sprintf(bp, "\n\nMailbox Registers");
12310 bp += strlen(bp);
12311 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12312 if (cnt % 16 == 0) {
12313 (void) sprintf(bp++, "\n");
12314 }
12315
12316 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12317 bp += 5;
12318 }
12319
12320 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12321 bp += strlen(bp);
12322 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12323 if (cnt % 8 == 0) {
12324 (void) sprintf(bp++, "\n");
12325 }
12326
12327 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12328 bp += 9;
12329 }
12330
12331 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12332 bp += strlen(bp);
12333 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12334 if (cnt % 8 == 0) {
12335 (void) sprintf(bp++, "\n");
12336 }
12337
12338 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12339 bp += 9;
12340 }
12341
12342 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12343 bp += strlen(bp);
12344 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12345 if (cnt % 8 == 0) {
12346 (void) sprintf(bp++, "\n");
12347 }
12348
12349 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12350 bp += 9;
12351 }
12352
12353 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12354 bp += strlen(bp);
12355 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12356 if (cnt % 8 == 0) {
12357 (void) sprintf(bp++, "\n");
12358 }
12359
12360 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12361 bp += 9;
12362 }
12363
12364 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12365 bp += strlen(bp);
12366 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12367 if (cnt % 8 == 0) {
12368 (void) sprintf(bp++, "\n");
12369 }
12370
12371 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12372 bp += 9;
12373 }
12374
12375 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12376 bp += strlen(bp);
12377 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12378 if (cnt % 8 == 0) {
12379 (void) sprintf(bp++, "\n");
12380 }
12381
12382 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12383 bp += 9;
12384 }
12385
12386 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12387 bp += strlen(bp);
12388 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12389 if (cnt % 8 == 0) {
12390 (void) sprintf(bp++, "\n");
12391 }
12392
12393 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12394 bp += 9;
12395 }
12396
12397 (void) sprintf(bp, "\n\nCommand DMA Registers");
12398 bp += strlen(bp);
12399 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12400 if (cnt % 8 == 0) {
12401 (void) sprintf(bp++, "\n");
12402 }
12403
12404 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12405 bp += 9;
12406 }
12407
12408 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12409 bp += strlen(bp);
12410 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12411 if (cnt % 8 == 0) {
12412 (void) sprintf(bp++, "\n");
12413 }
12414
12415 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12416 bp += 9;
12417 }
12418
12419 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12420 bp += strlen(bp);
12421 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12422 if (cnt % 8 == 0) {
12423 (void) sprintf(bp++, "\n");
12424 }
12425
12426 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12427 bp += 9;
12428 }
12429
12430 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12431 bp += strlen(bp);
12432 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12433 if (cnt % 8 == 0) {
12434 (void) sprintf(bp++, "\n");
12435 }
12436
12437 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12438 bp += 9;
12439 }
12440
12441 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12442 bp += strlen(bp);
12443 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12444 if (cnt % 8 == 0) {
12445 (void) sprintf(bp++, "\n");
12446 }
12447
12448 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12449 bp += 9;
12450 }
12451
12452 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12453 bp += strlen(bp);
12454 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12455 if (cnt % 8 == 0) {
12456 (void) sprintf(bp++, "\n");
12457 }
12458
12459 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12460 bp += 9;
12461 }
12462
12463 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12464 bp += strlen(bp);
12465 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12466 if (cnt % 8 == 0) {
12467 (void) sprintf(bp++, "\n");
12468 }
12469
12470 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12471 bp += 9;
12472 }
12473
12474 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12475 bp += strlen(bp);
12476 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12477 if (cnt % 8 == 0) {
12478 (void) sprintf(bp++, "\n");
12479 }
12480
12481 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12482 bp += 9;
12483 }
12484
12485 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12486 bp += strlen(bp);
12487 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12488 if (cnt % 8 == 0) {
12489 (void) sprintf(bp++, "\n");
12490 }
12491
12492 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12493 bp += 9;
12494 }
12495
12496 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12497 bp += strlen(bp);
12498 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12499 if (cnt % 8 == 0) {
12500 (void) sprintf(bp++, "\n");
12501 }
12502
12503 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12504 bp += 9;
12505 }
12506
12507 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12508 bp += strlen(bp);
12509 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12510 if (cnt % 8 == 0) {
12511 (void) sprintf(bp++, "\n");
12512 }
12513
12514 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12515 bp += 9;
12516 }
12517
12518 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12519 bp += strlen(bp);
12520 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12521 if (cnt % 8 == 0) {
12522 (void) sprintf(bp++, "\n");
12523 }
12524
12525 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12526 bp += 9;
12527 }
12528
12529 (void) sprintf(bp, "\n\nRISC GP Registers");
12530 bp += strlen(bp);
12531 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12532 if (cnt % 8 == 0) {
12533 (void) sprintf(bp++, "\n");
12534 }
12535
12536 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12537 bp += 9;
12538 }
12539
12540 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12541 bp += strlen(bp);
12542 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12543 if (cnt % 8 == 0) {
12544 (void) sprintf(bp++, "\n");
12545 }
12546
12547 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12548 bp += 9;
12549 }
12550
12551 (void) sprintf(bp, "\n\nLMC Registers");
12552 bp += strlen(bp);
12553 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12554 if (cnt % 8 == 0) {
12555 (void) sprintf(bp++, "\n");
12556 }
12557
12558 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12559 bp += 9;
12560 }
12561
12562 (void) sprintf(bp, "\n\nFPM Hardware Registers");
12563 bp += strlen(bp);
12564 for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12565 if (cnt % 8 == 0) {
12566 (void) sprintf(bp++, "\n");
12567 }
12568
12569 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12570 bp += 9;
12571 }
12572
12573 (void) sprintf(bp, "\n\nFB Hardware Registers");
12574 bp += strlen(bp);
12575 for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12576 if (cnt % 8 == 0) {
12577 (void) sprintf(bp++, "\n");
12578 }
12579
12580 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12581 bp += 9;
12582 }
12583
12584 (void) sprintf(bp, "\n\nCode RAM");
12585 bp += strlen(bp);
12586 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12587 if (cnt % 8 == 0) {
12588 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12589 bp += 11;
12590 }
12591
12592 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12593 bp += 9;
12594 }
12595
12596 (void) sprintf(bp, "\n\nExternal Memory");
12597 bp += strlen(bp);
12598 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12599 if (cnt % 8 == 0) {
12600 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12601 bp += 11;
12602 }
12603 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12604 bp += 9;
12605 }
12606
12607 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12608 bp += strlen(bp);
12609
12610 (void) sprintf(bp, "\n\nRequest Queue");
12611 bp += strlen(bp);
12612 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12613 if (cnt % 8 == 0) {
12614 (void) sprintf(bp, "\n%08x: ", cnt);
12615 bp += strlen(bp);
12616 }
12617 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12618 bp += strlen(bp);
12619 }
12620
12621 (void) sprintf(bp, "\n\nResponse Queue");
12622 bp += strlen(bp);
12623 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12624 if (cnt % 8 == 0) {
12625 (void) sprintf(bp, "\n%08x: ", cnt);
12626 bp += strlen(bp);
12627 }
12628 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12629 bp += strlen(bp);
12630 }
12631
12632 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12633 (ha->fwexttracebuf.bp != NULL)) {
12634 uint32_t cnt_b = 0;
12635 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12636
12637 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12638 bp += strlen(bp);
12639 /* show data address as a byte address, data as long words */
12640 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12641 cnt_b = cnt * 4;
12642 if (cnt_b % 32 == 0) {
12643 (void) sprintf(bp, "\n%08x: ",
12644 (int)(w64 + cnt_b));
12645 bp += 11;
12646 }
12647 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12648 bp += 9;
12649 }
12650 }
12651
12652 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12653 (ha->fwfcetracebuf.bp != NULL)) {
12654 uint32_t cnt_b = 0;
12655 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12656
12657 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12658 bp += strlen(bp);
12659 /* show data address as a byte address, data as long words */
12660 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12661 cnt_b = cnt * 4;
12662 if (cnt_b % 32 == 0) {
12663 (void) sprintf(bp, "\n%08x: ",
12664 (int)(w64 + cnt_b));
12665 bp += 11;
12666 }
12667 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12668 bp += 9;
12669 }
12670 }
12671
12672 (void) sprintf(bp, "\n\n");
12673 bp += strlen(bp);
12674
12675 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12676
12677 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12678
12679 return (cnt);
12680 }
12681
12682 /*
12683 * ql_2581_ascii_fw_dump
12684 * Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12685 *
12686 * Input:
12687 * ha = adapter state pointer.
12688 * bptr = buffer pointer.
12689 *
12690 * Returns:
12691 * Amount of data buffer used.
12692 *
12693 * Context:
12694 * Kernel context.
12695 */
12696 static size_t
12697 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12698 {
12699 uint32_t cnt;
12700 uint32_t cnt1;
12701 caddr_t bp = bufp;
12702 ql_25xx_fw_dump_t *fw = ha->ql_dump_ptr;
12703
12704 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12705
12706 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12707 ha->fw_major_version, ha->fw_minor_version,
12708 ha->fw_subminor_version, ha->fw_attributes);
12709 bp += strlen(bp);
12710
12711 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12712 bp += strlen(bp);
12713
12714 (void) sprintf(bp, "\nHostRisc Registers");
12715 bp += strlen(bp);
12716 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12717 if (cnt % 8 == 0) {
12718 (void) sprintf(bp++, "\n");
12719 }
12720 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12721 bp += 9;
12722 }
12723
12724 (void) sprintf(bp, "\n\nPCIe Registers");
12725 bp += strlen(bp);
12726 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12727 if (cnt % 8 == 0) {
12728 (void) sprintf(bp++, "\n");
12729 }
12730 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12731 bp += 9;
12732 }
12733
12734 (void) strcat(bp, "\n\nHost Interface Registers");
12735 bp += strlen(bp);
12736 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12737 if (cnt % 8 == 0) {
12738 (void) sprintf(bp++, "\n");
12739 }
12740 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12741 bp += 9;
12742 }
12743
12744 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12745 bp += strlen(bp);
12746 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12747 if (cnt % 8 == 0) {
12748 (void) sprintf(bp++, "\n");
12749 }
12750 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12751 bp += 9;
12752 }
12753
12754 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12755 fw->risc_io);
12756 bp += strlen(bp);
12757
12758 (void) sprintf(bp, "\n\nMailbox Registers");
12759 bp += strlen(bp);
12760 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12761 if (cnt % 16 == 0) {
12762 (void) sprintf(bp++, "\n");
12763 }
12764 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12765 bp += 5;
12766 }
12767
12768 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12769 bp += strlen(bp);
12770 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12771 if (cnt % 8 == 0) {
12772 (void) sprintf(bp++, "\n");
12773 }
12774 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12775 bp += 9;
12776 }
12777
12778 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12779 bp += strlen(bp);
12780 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12781 if (cnt % 8 == 0) {
12782 (void) sprintf(bp++, "\n");
12783 }
12784 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12785 bp += 9;
12786 }
12787
12788 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12789 bp += strlen(bp);
12790 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12791 if (cnt % 8 == 0) {
12792 (void) sprintf(bp++, "\n");
12793 }
12794 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12795 bp += 9;
12796 }
12797
12798 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12799 bp += strlen(bp);
12800 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12801 if (cnt % 8 == 0) {
12802 (void) sprintf(bp++, "\n");
12803 }
12804 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12805 bp += 9;
12806 }
12807
12808 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12809 bp += strlen(bp);
12810 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12811 if (cnt % 8 == 0) {
12812 (void) sprintf(bp++, "\n");
12813 }
12814 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12815 bp += 9;
12816 }
12817
12818 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12819 bp += strlen(bp);
12820 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12821 if (cnt % 8 == 0) {
12822 (void) sprintf(bp++, "\n");
12823 }
12824 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12825 bp += 9;
12826 }
12827
12828 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12829 bp += strlen(bp);
12830 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12831 if (cnt % 8 == 0) {
12832 (void) sprintf(bp++, "\n");
12833 }
12834 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12835 bp += 9;
12836 }
12837
12838 (void) sprintf(bp, "\n\nASEQ GP Registers");
12839 bp += strlen(bp);
12840 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12841 if (cnt % 8 == 0) {
12842 (void) sprintf(bp++, "\n");
12843 }
12844 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12845 bp += 9;
12846 }
12847
12848 (void) sprintf(bp, "\n\nASEQ-0 Registers");
12849 bp += strlen(bp);
12850 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12851 if (cnt % 8 == 0) {
12852 (void) sprintf(bp++, "\n");
12853 }
12854 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12855 bp += 9;
12856 }
12857
12858 (void) sprintf(bp, "\n\nASEQ-1 Registers");
12859 bp += strlen(bp);
12860 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12861 if (cnt % 8 == 0) {
12862 (void) sprintf(bp++, "\n");
12863 }
12864 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12865 bp += 9;
12866 }
12867
12868 (void) sprintf(bp, "\n\nASEQ-2 Registers");
12869 bp += strlen(bp);
12870 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12871 if (cnt % 8 == 0) {
12872 (void) sprintf(bp++, "\n");
12873 }
12874 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12875 bp += 9;
12876 }
12877
12878 (void) sprintf(bp, "\n\nCommand DMA Registers");
12879 bp += strlen(bp);
12880 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12881 if (cnt % 8 == 0) {
12882 (void) sprintf(bp++, "\n");
12883 }
12884 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12885 bp += 9;
12886 }
12887
12888 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12889 bp += strlen(bp);
12890 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12891 if (cnt % 8 == 0) {
12892 (void) sprintf(bp++, "\n");
12893 }
12894 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12895 bp += 9;
12896 }
12897
12898 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12899 bp += strlen(bp);
12900 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12901 if (cnt % 8 == 0) {
12902 (void) sprintf(bp++, "\n");
12903 }
12904 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12905 bp += 9;
12906 }
12907
12908 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12909 bp += strlen(bp);
12910 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12911 if (cnt % 8 == 0) {
12912 (void) sprintf(bp++, "\n");
12913 }
12914 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12915 bp += 9;
12916 }
12917
12918 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12919 bp += strlen(bp);
12920 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12921 if (cnt % 8 == 0) {
12922 (void) sprintf(bp++, "\n");
12923 }
12924 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12925 bp += 9;
12926 }
12927
12928 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12929 bp += strlen(bp);
12930 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12931 if (cnt % 8 == 0) {
12932 (void) sprintf(bp++, "\n");
12933 }
12934 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12935 bp += 9;
12936 }
12937
12938 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12939 bp += strlen(bp);
12940 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12941 if (cnt % 8 == 0) {
12942 (void) sprintf(bp++, "\n");
12943 }
12944 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12945 bp += 9;
12946 }
12947
12948 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12949 bp += strlen(bp);
12950 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12951 if (cnt % 8 == 0) {
12952 (void) sprintf(bp++, "\n");
12953 }
12954 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12955 bp += 9;
12956 }
12957
12958 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12959 bp += strlen(bp);
12960 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12961 if (cnt % 8 == 0) {
12962 (void) sprintf(bp++, "\n");
12963 }
12964 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12965 bp += 9;
12966 }
12967
12968 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12969 bp += strlen(bp);
12970 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12971 if (cnt % 8 == 0) {
12972 (void) sprintf(bp++, "\n");
12973 }
12974 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12975 bp += 9;
12976 }
12977
12978 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12979 bp += strlen(bp);
12980 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12981 if (cnt % 8 == 0) {
12982 (void) sprintf(bp++, "\n");
12983 }
12984 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12985 bp += 9;
12986 }
12987
12988 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12989 bp += strlen(bp);
12990 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12991 if (cnt % 8 == 0) {
12992 (void) sprintf(bp++, "\n");
12993 }
12994 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12995 bp += 9;
12996 }
12997
12998 (void) sprintf(bp, "\n\nRISC GP Registers");
12999 bp += strlen(bp);
13000 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13001 if (cnt % 8 == 0) {
13002 (void) sprintf(bp++, "\n");
13003 }
13004 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13005 bp += 9;
13006 }
13007
13008 (void) sprintf(bp, "\n\nLMC Registers");
13009 bp += strlen(bp);
13010 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13011 if (cnt % 8 == 0) {
13012 (void) sprintf(bp++, "\n");
13013 }
13014 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13015 bp += 9;
13016 }
13017
13018 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13019 bp += strlen(bp);
13020 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13021 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13022 (uint32_t)(sizeof (fw->fpm_hdw_reg));
13023 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13024 if (cnt % 8 == 0) {
13025 (void) sprintf(bp++, "\n");
13026 }
13027 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13028 bp += 9;
13029 }
13030
13031 (void) sprintf(bp, "\n\nFB Hardware Registers");
13032 bp += strlen(bp);
13033 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13034 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13035 (uint32_t)(sizeof (fw->fb_hdw_reg));
13036 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13037 if (cnt % 8 == 0) {
13038 (void) sprintf(bp++, "\n");
13039 }
13040 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13041 bp += 9;
13042 }
13043
13044 (void) sprintf(bp, "\n\nCode RAM");
13045 bp += strlen(bp);
13046 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13047 if (cnt % 8 == 0) {
13048 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13049 bp += 11;
13050 }
13051 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13052 bp += 9;
13053 }
13054
13055 (void) sprintf(bp, "\n\nExternal Memory");
13056 bp += strlen(bp);
13057 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13058 if (cnt % 8 == 0) {
13059 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13060 bp += 11;
13061 }
13062 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13063 bp += 9;
13064 }
13065
13066 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13067 bp += strlen(bp);
13068
13069 (void) sprintf(bp, "\n\nRequest Queue");
13070 bp += strlen(bp);
13071 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13072 if (cnt % 8 == 0) {
13073 (void) sprintf(bp, "\n%08x: ", cnt);
13074 bp += strlen(bp);
13075 }
13076 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13077 bp += strlen(bp);
13078 }
13079
13080 (void) sprintf(bp, "\n\nResponse Queue");
13081 bp += strlen(bp);
13082 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13083 if (cnt % 8 == 0) {
13084 (void) sprintf(bp, "\n%08x: ", cnt);
13085 bp += strlen(bp);
13086 }
13087 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13088 bp += strlen(bp);
13089 }
13090
13091 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13092 (ha->fwexttracebuf.bp != NULL)) {
13093 uint32_t cnt_b = 0;
13094 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13095
13096 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13097 bp += strlen(bp);
13098 /* show data address as a byte address, data as long words */
13099 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13100 cnt_b = cnt * 4;
13101 if (cnt_b % 32 == 0) {
13102 (void) sprintf(bp, "\n%08x: ",
13103 (int)(w64 + cnt_b));
13104 bp += 11;
13105 }
13106 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13107 bp += 9;
13108 }
13109 }
13110
13111 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13112 (ha->fwfcetracebuf.bp != NULL)) {
13113 uint32_t cnt_b = 0;
13114 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13115
13116 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13117 bp += strlen(bp);
13118 /* show data address as a byte address, data as long words */
13119 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13120 cnt_b = cnt * 4;
13121 if (cnt_b % 32 == 0) {
13122 (void) sprintf(bp, "\n%08x: ",
13123 (int)(w64 + cnt_b));
13124 bp += 11;
13125 }
13126 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13127 bp += 9;
13128 }
13129 }
13130
13131 (void) sprintf(bp, "\n\n");
13132 bp += strlen(bp);
13133
13134 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13135
13136 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13137
13138 return (cnt);
13139 }
13140
13141 /*
13142 * ql_2200_binary_fw_dump
13143 *
13144 * Input:
13145 * ha: adapter state pointer.
13146 * fw: firmware dump context pointer.
13147 *
13148 * Returns:
13149 * ql local function return status code.
13150 *
13151 * Context:
13152 * Interrupt or Kernel context, no mailbox commands allowed.
13153 */
13154 static int
13155 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13156 {
13157 uint32_t cnt;
13158 uint16_t risc_address;
13159 clock_t timer;
13160 mbx_cmd_t mc;
13161 mbx_cmd_t *mcp = &mc;
13162 int rval = QL_SUCCESS;
13163
13164 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13165
13166 /* Disable ISP interrupts. */
13167 WRT16_IO_REG(ha, ictrl, 0);
13168 ADAPTER_STATE_LOCK(ha);
13169 ha->flags &= ~INTERRUPTS_ENABLED;
13170 ADAPTER_STATE_UNLOCK(ha);
13171
13172 /* Release mailbox registers. */
13173 WRT16_IO_REG(ha, semaphore, 0);
13174
13175 /* Pause RISC. */
13176 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13177 timer = 30000;
13178 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13179 if (timer-- != 0) {
13180 drv_usecwait(MILLISEC);
13181 } else {
13182 rval = QL_FUNCTION_TIMEOUT;
13183 break;
13184 }
13185 }
13186
13187 if (rval == QL_SUCCESS) {
13188 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13189 sizeof (fw->pbiu_reg) / 2, 16);
13190
13191 /* In 2200 we only read 8 mailboxes */
13192 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13193 8, 16);
13194
13195 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13196 sizeof (fw->dma_reg) / 2, 16);
13197
13198 WRT16_IO_REG(ha, ctrl_status, 0);
13199 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13200 sizeof (fw->risc_hdw_reg) / 2, 16);
13201
13202 WRT16_IO_REG(ha, pcr, 0x2000);
13203 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13204 sizeof (fw->risc_gp0_reg) / 2, 16);
13205
13206 WRT16_IO_REG(ha, pcr, 0x2100);
13207 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13208 sizeof (fw->risc_gp1_reg) / 2, 16);
13209
13210 WRT16_IO_REG(ha, pcr, 0x2200);
13211 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13212 sizeof (fw->risc_gp2_reg) / 2, 16);
13213
13214 WRT16_IO_REG(ha, pcr, 0x2300);
13215 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13216 sizeof (fw->risc_gp3_reg) / 2, 16);
13217
13218 WRT16_IO_REG(ha, pcr, 0x2400);
13219 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13220 sizeof (fw->risc_gp4_reg) / 2, 16);
13221
13222 WRT16_IO_REG(ha, pcr, 0x2500);
13223 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13224 sizeof (fw->risc_gp5_reg) / 2, 16);
13225
13226 WRT16_IO_REG(ha, pcr, 0x2600);
13227 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13228 sizeof (fw->risc_gp6_reg) / 2, 16);
13229
13230 WRT16_IO_REG(ha, pcr, 0x2700);
13231 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13232 sizeof (fw->risc_gp7_reg) / 2, 16);
13233
13234 WRT16_IO_REG(ha, ctrl_status, 0x10);
13235 /* 2200 has only 16 registers */
13236 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13237 ha->iobase + 0x80, 16, 16);
13238
13239 WRT16_IO_REG(ha, ctrl_status, 0x20);
13240 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13241 sizeof (fw->fpm_b0_reg) / 2, 16);
13242
13243 WRT16_IO_REG(ha, ctrl_status, 0x30);
13244 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13245 sizeof (fw->fpm_b1_reg) / 2, 16);
13246
13247 /* Select FPM registers. */
13248 WRT16_IO_REG(ha, ctrl_status, 0x20);
13249
13250 /* FPM Soft Reset. */
13251 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13252
13253 /* Select frame buffer registers. */
13254 WRT16_IO_REG(ha, ctrl_status, 0x10);
13255
13256 /* Reset frame buffer FIFOs. */
13257 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13258
13259 /* Select RISC module registers. */
13260 WRT16_IO_REG(ha, ctrl_status, 0);
13261
13262 /* Reset RISC module. */
13263 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13264
13265 /* Reset ISP semaphore. */
13266 WRT16_IO_REG(ha, semaphore, 0);
13267
13268 /* Release RISC module. */
13269 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13270
13271 /* Wait for RISC to recover from reset. */
13272 timer = 30000;
13273 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13274 if (timer-- != 0) {
13275 drv_usecwait(MILLISEC);
13276 } else {
13277 rval = QL_FUNCTION_TIMEOUT;
13278 break;
13279 }
13280 }
13281
13282 /* Disable RISC pause on FPM parity error. */
13283 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13284 }
13285
13286 if (rval == QL_SUCCESS) {
13287 /* Pause RISC. */
13288 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13289 timer = 30000;
13290 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13291 if (timer-- != 0) {
13292 drv_usecwait(MILLISEC);
13293 } else {
13294 rval = QL_FUNCTION_TIMEOUT;
13295 break;
13296 }
13297 }
13298 }
13299
13300 if (rval == QL_SUCCESS) {
13301 /* Set memory configuration and timing. */
13302 WRT16_IO_REG(ha, mctr, 0xf2);
13303
13304 /* Release RISC. */
13305 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13306
13307 /* Get RISC SRAM. */
13308 risc_address = 0x1000;
13309 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13310 for (cnt = 0; cnt < 0xf000; cnt++) {
13311 WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13312 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13313 for (timer = 6000000; timer != 0; timer--) {
13314 /* Check for pending interrupts. */
13315 if (INTERRUPT_PENDING(ha)) {
13316 if (RD16_IO_REG(ha, semaphore) &
13317 BIT_0) {
13318 WRT16_IO_REG(ha, hccr,
13319 HC_CLR_RISC_INT);
13320 mcp->mb[0] = RD16_IO_REG(ha,
13321 mailbox_out[0]);
13322 fw->risc_ram[cnt] =
13323 RD16_IO_REG(ha,
13324 mailbox_out[2]);
13325 WRT16_IO_REG(ha,
13326 semaphore, 0);
13327 break;
13328 }
13329 WRT16_IO_REG(ha, hccr,
13330 HC_CLR_RISC_INT);
13331 }
13332 drv_usecwait(5);
13333 }
13334
13335 if (timer == 0) {
13336 rval = QL_FUNCTION_TIMEOUT;
13337 } else {
13338 rval = mcp->mb[0];
13339 }
13340
13341 if (rval != QL_SUCCESS) {
13342 break;
13343 }
13344 }
13345 }
13346
13347 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13348
13349 return (rval);
13350 }
13351
13352 /*
13353 * ql_2300_binary_fw_dump
13354 *
13355 * Input:
13356 * ha: adapter state pointer.
13357 * fw: firmware dump context pointer.
13358 *
13359 * Returns:
13360 * ql local function return status code.
13361 *
13362 * Context:
13363 * Interrupt or Kernel context, no mailbox commands allowed.
13364 */
13365 static int
13366 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13367 {
13368 clock_t timer;
13369 int rval = QL_SUCCESS;
13370
13371 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13372
13373 /* Disable ISP interrupts. */
13374 WRT16_IO_REG(ha, ictrl, 0);
13375 ADAPTER_STATE_LOCK(ha);
13376 ha->flags &= ~INTERRUPTS_ENABLED;
13377 ADAPTER_STATE_UNLOCK(ha);
13378
13379 /* Release mailbox registers. */
13380 WRT16_IO_REG(ha, semaphore, 0);
13381
13382 /* Pause RISC. */
13383 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13384 timer = 30000;
13385 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13386 if (timer-- != 0) {
13387 drv_usecwait(MILLISEC);
13388 } else {
13389 rval = QL_FUNCTION_TIMEOUT;
13390 break;
13391 }
13392 }
13393
13394 if (rval == QL_SUCCESS) {
13395 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13396 sizeof (fw->pbiu_reg) / 2, 16);
13397
13398 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13399 sizeof (fw->risc_host_reg) / 2, 16);
13400
13401 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13402 sizeof (fw->mailbox_reg) / 2, 16);
13403
13404 WRT16_IO_REG(ha, ctrl_status, 0x40);
13405 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13406 sizeof (fw->resp_dma_reg) / 2, 16);
13407
13408 WRT16_IO_REG(ha, ctrl_status, 0x50);
13409 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13410 sizeof (fw->dma_reg) / 2, 16);
13411
13412 WRT16_IO_REG(ha, ctrl_status, 0);
13413 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13414 sizeof (fw->risc_hdw_reg) / 2, 16);
13415
13416 WRT16_IO_REG(ha, pcr, 0x2000);
13417 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13418 sizeof (fw->risc_gp0_reg) / 2, 16);
13419
13420 WRT16_IO_REG(ha, pcr, 0x2200);
13421 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13422 sizeof (fw->risc_gp1_reg) / 2, 16);
13423
13424 WRT16_IO_REG(ha, pcr, 0x2400);
13425 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13426 sizeof (fw->risc_gp2_reg) / 2, 16);
13427
13428 WRT16_IO_REG(ha, pcr, 0x2600);
13429 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13430 sizeof (fw->risc_gp3_reg) / 2, 16);
13431
13432 WRT16_IO_REG(ha, pcr, 0x2800);
13433 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13434 sizeof (fw->risc_gp4_reg) / 2, 16);
13435
13436 WRT16_IO_REG(ha, pcr, 0x2A00);
13437 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13438 sizeof (fw->risc_gp5_reg) / 2, 16);
13439
13440 WRT16_IO_REG(ha, pcr, 0x2C00);
13441 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13442 sizeof (fw->risc_gp6_reg) / 2, 16);
13443
13444 WRT16_IO_REG(ha, pcr, 0x2E00);
13445 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13446 sizeof (fw->risc_gp7_reg) / 2, 16);
13447
13448 WRT16_IO_REG(ha, ctrl_status, 0x10);
13449 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13450 ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13451
13452 WRT16_IO_REG(ha, ctrl_status, 0x20);
13453 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13454 sizeof (fw->fpm_b0_reg) / 2, 16);
13455
13456 WRT16_IO_REG(ha, ctrl_status, 0x30);
13457 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13458 sizeof (fw->fpm_b1_reg) / 2, 16);
13459
13460 /* Select FPM registers. */
13461 WRT16_IO_REG(ha, ctrl_status, 0x20);
13462
13463 /* FPM Soft Reset. */
13464 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13465
13466 /* Select frame buffer registers. */
13467 WRT16_IO_REG(ha, ctrl_status, 0x10);
13468
13469 /* Reset frame buffer FIFOs. */
13470 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13471
13472 /* Select RISC module registers. */
13473 WRT16_IO_REG(ha, ctrl_status, 0);
13474
13475 /* Reset RISC module. */
13476 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13477
13478 /* Reset ISP semaphore. */
13479 WRT16_IO_REG(ha, semaphore, 0);
13480
13481 /* Release RISC module. */
13482 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13483
13484 /* Wait for RISC to recover from reset. */
13485 timer = 30000;
13486 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13487 if (timer-- != 0) {
13488 drv_usecwait(MILLISEC);
13489 } else {
13490 rval = QL_FUNCTION_TIMEOUT;
13491 break;
13492 }
13493 }
13494
13495 /* Disable RISC pause on FPM parity error. */
13496 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13497 }
13498
13499 /* Get RISC SRAM. */
13500 if (rval == QL_SUCCESS) {
13501 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13502 }
13503 /* Get STACK SRAM. */
13504 if (rval == QL_SUCCESS) {
13505 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13506 }
13507 /* Get DATA SRAM. */
13508 if (rval == QL_SUCCESS) {
13509 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13510 }
13511
13512 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13513
13514 return (rval);
13515 }
13516
13517 /*
13518 * ql_24xx_binary_fw_dump
13519 *
13520 * Input:
13521 * ha: adapter state pointer.
13522 * fw: firmware dump context pointer.
13523 *
13524 * Returns:
13525 * ql local function return status code.
13526 *
13527 * Context:
13528 * Interrupt or Kernel context, no mailbox commands allowed.
13529 */
13530 static int
13531 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13532 {
13533 uint32_t *reg32;
13534 void *bp;
13535 clock_t timer;
13536 int rval = QL_SUCCESS;
13537
13538 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13539
13540 fw->hccr = RD32_IO_REG(ha, hccr);
13541
13542 /* Pause RISC. */
13543 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13544 /* Disable ISP interrupts. */
13545 WRT16_IO_REG(ha, ictrl, 0);
13546
13547 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13548 for (timer = 30000;
13549 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13550 rval == QL_SUCCESS; timer--) {
13551 if (timer) {
13552 drv_usecwait(100);
13553 } else {
13554 rval = QL_FUNCTION_TIMEOUT;
13555 }
13556 }
13557 }
13558
13559 if (rval == QL_SUCCESS) {
13560 /* Host interface registers. */
13561 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13562 sizeof (fw->host_reg) / 4, 32);
13563
13564 /* Disable ISP interrupts. */
13565 WRT32_IO_REG(ha, ictrl, 0);
13566 RD32_IO_REG(ha, ictrl);
13567 ADAPTER_STATE_LOCK(ha);
13568 ha->flags &= ~INTERRUPTS_ENABLED;
13569 ADAPTER_STATE_UNLOCK(ha);
13570
13571 /* Shadow registers. */
13572
13573 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13574 RD32_IO_REG(ha, io_base_addr);
13575
13576 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13577 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13578 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13579 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13580
13581 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13582 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13583 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13584 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13585
13586 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13587 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13588 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13589 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13590
13591 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13592 WRT_REG_DWORD(ha, reg32, 0xB0300000);
13593 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13594 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13595
13596 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13597 WRT_REG_DWORD(ha, reg32, 0xB0400000);
13598 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13599 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13600
13601 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13602 WRT_REG_DWORD(ha, reg32, 0xB0500000);
13603 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13604 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13605
13606 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13607 WRT_REG_DWORD(ha, reg32, 0xB0600000);
13608 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13609 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13610
13611 /* Mailbox registers. */
13612 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13613 sizeof (fw->mailbox_reg) / 2, 16);
13614
13615 /* Transfer sequence registers. */
13616
13617 /* XSEQ GP */
13618 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13619 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13620 16, 32);
13621 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13622 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13623 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13624 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13625 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13626 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13627 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13628 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13629 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13630 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13631 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13632 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13633 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13634 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13635
13636 /* XSEQ-0 */
13637 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13638 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13639 sizeof (fw->xseq_0_reg) / 4, 32);
13640
13641 /* XSEQ-1 */
13642 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13643 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13644 sizeof (fw->xseq_1_reg) / 4, 32);
13645
13646 /* Receive sequence registers. */
13647
13648 /* RSEQ GP */
13649 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13650 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13651 16, 32);
13652 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13653 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13654 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13655 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13656 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13657 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13658 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13659 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13660 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13661 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13662 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13663 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13664 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13665 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13666
13667 /* RSEQ-0 */
13668 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13669 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13670 sizeof (fw->rseq_0_reg) / 4, 32);
13671
13672 /* RSEQ-1 */
13673 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13674 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13675 sizeof (fw->rseq_1_reg) / 4, 32);
13676
13677 /* RSEQ-2 */
13678 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13679 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13680 sizeof (fw->rseq_2_reg) / 4, 32);
13681
13682 /* Command DMA registers. */
13683
13684 WRT32_IO_REG(ha, io_base_addr, 0x7100);
13685 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13686 sizeof (fw->cmd_dma_reg) / 4, 32);
13687
13688 /* Queues. */
13689
13690 /* RequestQ0 */
13691 WRT32_IO_REG(ha, io_base_addr, 0x7200);
13692 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13693 8, 32);
13694 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13695
13696 /* ResponseQ0 */
13697 WRT32_IO_REG(ha, io_base_addr, 0x7300);
13698 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13699 8, 32);
13700 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13701
13702 /* RequestQ1 */
13703 WRT32_IO_REG(ha, io_base_addr, 0x7400);
13704 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13705 8, 32);
13706 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13707
13708 /* Transmit DMA registers. */
13709
13710 /* XMT0 */
13711 WRT32_IO_REG(ha, io_base_addr, 0x7600);
13712 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13713 16, 32);
13714 WRT32_IO_REG(ha, io_base_addr, 0x7610);
13715 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13716
13717 /* XMT1 */
13718 WRT32_IO_REG(ha, io_base_addr, 0x7620);
13719 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13720 16, 32);
13721 WRT32_IO_REG(ha, io_base_addr, 0x7630);
13722 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13723
13724 /* XMT2 */
13725 WRT32_IO_REG(ha, io_base_addr, 0x7640);
13726 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13727 16, 32);
13728 WRT32_IO_REG(ha, io_base_addr, 0x7650);
13729 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13730
13731 /* XMT3 */
13732 WRT32_IO_REG(ha, io_base_addr, 0x7660);
13733 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13734 16, 32);
13735 WRT32_IO_REG(ha, io_base_addr, 0x7670);
13736 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13737
13738 /* XMT4 */
13739 WRT32_IO_REG(ha, io_base_addr, 0x7680);
13740 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13741 16, 32);
13742 WRT32_IO_REG(ha, io_base_addr, 0x7690);
13743 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13744
13745 /* XMT Common */
13746 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13747 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13748 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13749
13750 /* Receive DMA registers. */
13751
13752 /* RCVThread0 */
13753 WRT32_IO_REG(ha, io_base_addr, 0x7700);
13754 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13755 ha->iobase + 0xC0, 16, 32);
13756 WRT32_IO_REG(ha, io_base_addr, 0x7710);
13757 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13758
13759 /* RCVThread1 */
13760 WRT32_IO_REG(ha, io_base_addr, 0x7720);
13761 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13762 ha->iobase + 0xC0, 16, 32);
13763 WRT32_IO_REG(ha, io_base_addr, 0x7730);
13764 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13765
13766 /* RISC registers. */
13767
13768 /* RISC GP */
13769 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13770 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13771 16, 32);
13772 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13773 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13774 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13775 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13776 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13777 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13778 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13779 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13780 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13781 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13782 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13783 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13784 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13785 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13786
13787 /* Local memory controller registers. */
13788
13789 /* LMC */
13790 WRT32_IO_REG(ha, io_base_addr, 0x3000);
13791 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13792 16, 32);
13793 WRT32_IO_REG(ha, io_base_addr, 0x3010);
13794 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13795 WRT32_IO_REG(ha, io_base_addr, 0x3020);
13796 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13797 WRT32_IO_REG(ha, io_base_addr, 0x3030);
13798 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13799 WRT32_IO_REG(ha, io_base_addr, 0x3040);
13800 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13801 WRT32_IO_REG(ha, io_base_addr, 0x3050);
13802 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13803 WRT32_IO_REG(ha, io_base_addr, 0x3060);
13804 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13805
13806 /* Fibre Protocol Module registers. */
13807
13808 /* FPM hardware */
13809 WRT32_IO_REG(ha, io_base_addr, 0x4000);
13810 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13811 16, 32);
13812 WRT32_IO_REG(ha, io_base_addr, 0x4010);
13813 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13814 WRT32_IO_REG(ha, io_base_addr, 0x4020);
13815 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13816 WRT32_IO_REG(ha, io_base_addr, 0x4030);
13817 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13818 WRT32_IO_REG(ha, io_base_addr, 0x4040);
13819 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13820 WRT32_IO_REG(ha, io_base_addr, 0x4050);
13821 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13822 WRT32_IO_REG(ha, io_base_addr, 0x4060);
13823 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13824 WRT32_IO_REG(ha, io_base_addr, 0x4070);
13825 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13826 WRT32_IO_REG(ha, io_base_addr, 0x4080);
13827 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13828 WRT32_IO_REG(ha, io_base_addr, 0x4090);
13829 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13830 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13831 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13832 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13833 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13834
13835 /* Frame Buffer registers. */
13836
13837 /* FB hardware */
13838 WRT32_IO_REG(ha, io_base_addr, 0x6000);
13839 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13840 16, 32);
13841 WRT32_IO_REG(ha, io_base_addr, 0x6010);
13842 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13843 WRT32_IO_REG(ha, io_base_addr, 0x6020);
13844 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13845 WRT32_IO_REG(ha, io_base_addr, 0x6030);
13846 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13847 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13848 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13849 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13850 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13851 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13852 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13853 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13854 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13855 WRT32_IO_REG(ha, io_base_addr, 0x6170);
13856 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13857 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13858 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13860 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861 }
13862
13863 /* Get the request queue */
13864 if (rval == QL_SUCCESS) {
13865 uint32_t cnt;
13866 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
13867
13868 /* Sync DMA buffer. */
13869 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13870 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13871 DDI_DMA_SYNC_FORKERNEL);
13872
13873 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13874 fw->req_q[cnt] = *w32++;
13875 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13876 }
13877 }
13878
13879 /* Get the response queue */
13880 if (rval == QL_SUCCESS) {
13881 uint32_t cnt;
13882 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
13883
13884 /* Sync DMA buffer. */
13885 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13886 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13887 DDI_DMA_SYNC_FORKERNEL);
13888
13889 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13890 fw->rsp_q[cnt] = *w32++;
13891 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13892 }
13893 }
13894
13895 /* Reset RISC. */
13896 ql_reset_chip(ha);
13897
13898 /* Memory. */
13899 if (rval == QL_SUCCESS) {
13900 /* Code RAM. */
13901 rval = ql_read_risc_ram(ha, 0x20000,
13902 sizeof (fw->code_ram) / 4, fw->code_ram);
13903 }
13904 if (rval == QL_SUCCESS) {
13905 /* External Memory. */
13906 rval = ql_read_risc_ram(ha, 0x100000,
13907 ha->fw_ext_memory_size / 4, fw->ext_mem);
13908 }
13909
13910 /* Get the extended trace buffer */
13911 if (rval == QL_SUCCESS) {
13912 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13913 (ha->fwexttracebuf.bp != NULL)) {
13914 uint32_t cnt;
13915 uint32_t *w32 = ha->fwexttracebuf.bp;
13916
13917 /* Sync DMA buffer. */
13918 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13919 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13920
13921 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13922 fw->ext_trace_buf[cnt] = *w32++;
13923 }
13924 }
13925 }
13926
13927 /* Get the FC event trace buffer */
13928 if (rval == QL_SUCCESS) {
13929 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13930 (ha->fwfcetracebuf.bp != NULL)) {
13931 uint32_t cnt;
13932 uint32_t *w32 = ha->fwfcetracebuf.bp;
13933
13934 /* Sync DMA buffer. */
13935 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13936 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13937
13938 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13939 fw->fce_trace_buf[cnt] = *w32++;
13940 }
13941 }
13942 }
13943
13944 if (rval != QL_SUCCESS) {
13945 EL(ha, "failed=%xh\n", rval);
13946 } else {
13947 /*EMPTY*/
13948 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13949 }
13950
13951 return (rval);
13952 }
13953
13954 /*
13955 * ql_25xx_binary_fw_dump
13956 *
13957 * Input:
13958 * ha: adapter state pointer.
13959 * fw: firmware dump context pointer.
13960 *
13961 * Returns:
13962 * ql local function return status code.
13963 *
13964 * Context:
13965 * Interrupt or Kernel context, no mailbox commands allowed.
13966 */
13967 static int
13968 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13969 {
13970 uint32_t *reg32;
13971 void *bp;
13972 clock_t timer;
13973 int rval = QL_SUCCESS;
13974
13975 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13976
13977 fw->r2h_status = RD32_IO_REG(ha, risc2host);
13978
13979 /* Pause RISC. */
13980 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13981 /* Disable ISP interrupts. */
13982 WRT16_IO_REG(ha, ictrl, 0);
13983
13984 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13985 for (timer = 30000;
13986 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13987 rval == QL_SUCCESS; timer--) {
13988 if (timer) {
13989 drv_usecwait(100);
13990 if (timer % 10000 == 0) {
13991 EL(ha, "risc pause %d\n", timer);
13992 }
13993 } else {
13994 EL(ha, "risc pause timeout\n");
13995 rval = QL_FUNCTION_TIMEOUT;
13996 }
13997 }
13998 }
13999
14000 if (rval == QL_SUCCESS) {
14001
14002 /* Host Interface registers */
14003
14004 /* HostRisc registers. */
14005 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14006 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14007 16, 32);
14008 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14009 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14010
14011 /* PCIe registers. */
14012 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14013 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14014 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14015 3, 32);
14016 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14017 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14018
14019 /* Host interface registers. */
14020 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14021 sizeof (fw->host_reg) / 4, 32);
14022
14023 /* Disable ISP interrupts. */
14024
14025 WRT32_IO_REG(ha, ictrl, 0);
14026 RD32_IO_REG(ha, ictrl);
14027 ADAPTER_STATE_LOCK(ha);
14028 ha->flags &= ~INTERRUPTS_ENABLED;
14029 ADAPTER_STATE_UNLOCK(ha);
14030
14031 /* Shadow registers. */
14032
14033 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14034 RD32_IO_REG(ha, io_base_addr);
14035
14036 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14037 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14038 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14039 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14040
14041 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14042 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14043 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14044 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14045
14046 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14047 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14048 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14049 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14050
14051 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14052 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14053 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14054 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14055
14056 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14057 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14058 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14059 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14060
14061 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14062 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14063 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14064 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14065
14066 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14067 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14068 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14069 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14070
14071 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14072 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14073 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14074 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14075
14076 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14077 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14078 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14079 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14080
14081 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14082 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14083 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14084 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14085
14086 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14087 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14088 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14089 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14090
14091 /* RISC I/O register. */
14092
14093 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14094 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14095 1, 32);
14096
14097 /* Mailbox registers. */
14098
14099 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14100 sizeof (fw->mailbox_reg) / 2, 16);
14101
14102 /* Transfer sequence registers. */
14103
14104 /* XSEQ GP */
14105 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14106 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14107 16, 32);
14108 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14109 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14110 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14111 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14112 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14113 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14114 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14115 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14116 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14117 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14118 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14119 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14120 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14121 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14122
14123 /* XSEQ-0 */
14124 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14125 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14126 16, 32);
14127 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14128 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14129 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14130 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14131
14132 /* XSEQ-1 */
14133 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14134 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14135 16, 32);
14136
14137 /* Receive sequence registers. */
14138
14139 /* RSEQ GP */
14140 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14141 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14142 16, 32);
14143 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14144 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14145 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14146 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14147 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14148 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14149 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14150 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14151 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14152 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14153 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14154 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14155 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14156 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14157
14158 /* RSEQ-0 */
14159 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14160 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14161 16, 32);
14162 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14163 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14164
14165 /* RSEQ-1 */
14166 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14167 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14168 sizeof (fw->rseq_1_reg) / 4, 32);
14169
14170 /* RSEQ-2 */
14171 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14172 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14173 sizeof (fw->rseq_2_reg) / 4, 32);
14174
14175 /* Auxiliary sequencer registers. */
14176
14177 /* ASEQ GP */
14178 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14179 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14180 16, 32);
14181 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14182 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14183 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14184 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14185 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14186 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14187 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14188 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14189 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14190 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14191 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14192 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14193 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14194 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14195
14196 /* ASEQ-0 */
14197 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14198 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14199 16, 32);
14200 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14201 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14202
14203 /* ASEQ-1 */
14204 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14205 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14206 16, 32);
14207
14208 /* ASEQ-2 */
14209 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14210 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14211 16, 32);
14212
14213 /* Command DMA registers. */
14214
14215 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14216 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14217 sizeof (fw->cmd_dma_reg) / 4, 32);
14218
14219 /* Queues. */
14220
14221 /* RequestQ0 */
14222 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14223 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14224 8, 32);
14225 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14226
14227 /* ResponseQ0 */
14228 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14229 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14230 8, 32);
14231 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14232
14233 /* RequestQ1 */
14234 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14235 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14236 8, 32);
14237 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14238
14239 /* Transmit DMA registers. */
14240
14241 /* XMT0 */
14242 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14243 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14244 16, 32);
14245 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14246 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14247
14248 /* XMT1 */
14249 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14250 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14251 16, 32);
14252 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14253 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14254
14255 /* XMT2 */
14256 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14257 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14258 16, 32);
14259 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14260 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14261
14262 /* XMT3 */
14263 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14264 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14265 16, 32);
14266 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14267 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14268
14269 /* XMT4 */
14270 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14271 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14272 16, 32);
14273 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14274 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14275
14276 /* XMT Common */
14277 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14278 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14279 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14280
14281 /* Receive DMA registers. */
14282
14283 /* RCVThread0 */
14284 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14285 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14286 ha->iobase + 0xC0, 16, 32);
14287 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14288 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14289
14290 /* RCVThread1 */
14291 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14292 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14293 ha->iobase + 0xC0, 16, 32);
14294 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14295 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14296
14297 /* RISC registers. */
14298
14299 /* RISC GP */
14300 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14301 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14302 16, 32);
14303 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14304 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14305 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14306 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14307 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14308 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14309 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14310 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14311 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14312 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14313 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14314 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14315 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14316 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14317
14318 /* Local memory controller (LMC) registers. */
14319
14320 /* LMC */
14321 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14322 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14323 16, 32);
14324 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14325 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14326 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14327 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14328 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14329 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14330 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14331 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14332 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14333 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14334 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14335 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14336 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14337 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14338
14339 /* Fibre Protocol Module registers. */
14340
14341 /* FPM hardware */
14342 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14343 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14344 16, 32);
14345 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14346 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14347 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14348 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14349 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14350 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14351 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14352 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14353 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14354 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14355 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14356 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14357 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14358 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14359 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14360 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14361 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14362 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14363 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14364 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14365 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14366 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14367
14368 /* Frame Buffer registers. */
14369
14370 /* FB hardware */
14371 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14372 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14373 16, 32);
14374 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14375 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14376 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14377 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14378 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14379 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14380 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14381 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14382 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14383 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14384 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14385 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14386 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14387 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14388 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14389 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14390 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14391 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14392 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14393 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14394 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14395 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14396 }
14397
14398 /* Get the request queue */
14399 if (rval == QL_SUCCESS) {
14400 uint32_t cnt;
14401 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14402
14403 /* Sync DMA buffer. */
14404 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14405 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14406 DDI_DMA_SYNC_FORKERNEL);
14407
14408 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14409 fw->req_q[cnt] = *w32++;
14410 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14411 }
14412 }
14413
14414 /* Get the respons queue */
14415 if (rval == QL_SUCCESS) {
14416 uint32_t cnt;
14417 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14418
14419 /* Sync DMA buffer. */
14420 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14421 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14422 DDI_DMA_SYNC_FORKERNEL);
14423
14424 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14425 fw->rsp_q[cnt] = *w32++;
14426 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14427 }
14428 }
14429
14430 /* Reset RISC. */
14431
14432 ql_reset_chip(ha);
14433
14434 /* Memory. */
14435
14436 if (rval == QL_SUCCESS) {
14437 /* Code RAM. */
14438 rval = ql_read_risc_ram(ha, 0x20000,
14439 sizeof (fw->code_ram) / 4, fw->code_ram);
14440 }
14441 if (rval == QL_SUCCESS) {
14442 /* External Memory. */
14443 rval = ql_read_risc_ram(ha, 0x100000,
14444 ha->fw_ext_memory_size / 4, fw->ext_mem);
14445 }
14446
14447 /* Get the FC event trace buffer */
14448 if (rval == QL_SUCCESS) {
14449 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14450 (ha->fwfcetracebuf.bp != NULL)) {
14451 uint32_t cnt;
14452 uint32_t *w32 = ha->fwfcetracebuf.bp;
14453
14454 /* Sync DMA buffer. */
14455 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14456 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14457
14458 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14459 fw->fce_trace_buf[cnt] = *w32++;
14460 }
14461 }
14462 }
14463
14464 /* Get the extended trace buffer */
14465 if (rval == QL_SUCCESS) {
14466 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14467 (ha->fwexttracebuf.bp != NULL)) {
14468 uint32_t cnt;
14469 uint32_t *w32 = ha->fwexttracebuf.bp;
14470
14471 /* Sync DMA buffer. */
14472 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14473 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14474
14475 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14476 fw->ext_trace_buf[cnt] = *w32++;
14477 }
14478 }
14479 }
14480
14481 if (rval != QL_SUCCESS) {
14482 EL(ha, "failed=%xh\n", rval);
14483 } else {
14484 /*EMPTY*/
14485 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14486 }
14487
14488 return (rval);
14489 }
14490
14491 /*
14492 * ql_81xx_binary_fw_dump
14493 *
14494 * Input:
14495 * ha: adapter state pointer.
14496 * fw: firmware dump context pointer.
14497 *
14498 * Returns:
14499 * ql local function return status code.
14500 *
14501 * Context:
14502 * Interrupt or Kernel context, no mailbox commands allowed.
14503 */
14504 static int
14505 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14506 {
14507 uint32_t *reg32;
14508 void *bp;
14509 clock_t timer;
14510 int rval = QL_SUCCESS;
14511
14512 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14513
14514 fw->r2h_status = RD32_IO_REG(ha, risc2host);
14515
14516 /* Pause RISC. */
14517 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14518 /* Disable ISP interrupts. */
14519 WRT16_IO_REG(ha, ictrl, 0);
14520
14521 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14522 for (timer = 30000;
14523 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14524 rval == QL_SUCCESS; timer--) {
14525 if (timer) {
14526 drv_usecwait(100);
14527 if (timer % 10000 == 0) {
14528 EL(ha, "risc pause %d\n", timer);
14529 }
14530 } else {
14531 EL(ha, "risc pause timeout\n");
14532 rval = QL_FUNCTION_TIMEOUT;
14533 }
14534 }
14535 }
14536
14537 if (rval == QL_SUCCESS) {
14538
14539 /* Host Interface registers */
14540
14541 /* HostRisc registers. */
14542 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14543 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14544 16, 32);
14545 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14546 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14547
14548 /* PCIe registers. */
14549 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14550 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14551 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14552 3, 32);
14553 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14554 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14555
14556 /* Host interface registers. */
14557 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14558 sizeof (fw->host_reg) / 4, 32);
14559
14560 /* Disable ISP interrupts. */
14561
14562 WRT32_IO_REG(ha, ictrl, 0);
14563 RD32_IO_REG(ha, ictrl);
14564 ADAPTER_STATE_LOCK(ha);
14565 ha->flags &= ~INTERRUPTS_ENABLED;
14566 ADAPTER_STATE_UNLOCK(ha);
14567
14568 /* Shadow registers. */
14569
14570 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14571 RD32_IO_REG(ha, io_base_addr);
14572
14573 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14574 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14575 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14576 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14577
14578 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14579 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14580 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14581 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14582
14583 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14584 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14585 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14586 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14587
14588 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14589 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14590 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14591 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14592
14593 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14594 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14595 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14596 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14597
14598 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14599 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14600 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14601 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14602
14603 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14604 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14605 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14606 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14607
14608 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14609 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14610 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14611 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14612
14613 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14614 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14615 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14616 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14617
14618 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14619 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14620 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14621 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14622
14623 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14624 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14625 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14626 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14627
14628 /* RISC I/O register. */
14629
14630 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14631 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14632 1, 32);
14633
14634 /* Mailbox registers. */
14635
14636 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14637 sizeof (fw->mailbox_reg) / 2, 16);
14638
14639 /* Transfer sequence registers. */
14640
14641 /* XSEQ GP */
14642 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14643 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14644 16, 32);
14645 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14646 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14647 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14648 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14649 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14650 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14651 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14652 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14653 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14654 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14655 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14656 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14657 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14658 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14659
14660 /* XSEQ-0 */
14661 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14662 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14663 16, 32);
14664 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14665 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14666 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14667 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14668
14669 /* XSEQ-1 */
14670 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14671 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14672 16, 32);
14673
14674 /* Receive sequence registers. */
14675
14676 /* RSEQ GP */
14677 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14678 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14679 16, 32);
14680 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14681 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14682 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14683 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14684 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14685 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14686 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14687 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14688 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14689 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14690 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14691 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14692 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14693 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14694
14695 /* RSEQ-0 */
14696 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14697 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14698 16, 32);
14699 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14700 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14701
14702 /* RSEQ-1 */
14703 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14704 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14705 sizeof (fw->rseq_1_reg) / 4, 32);
14706
14707 /* RSEQ-2 */
14708 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14709 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14710 sizeof (fw->rseq_2_reg) / 4, 32);
14711
14712 /* Auxiliary sequencer registers. */
14713
14714 /* ASEQ GP */
14715 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14716 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14717 16, 32);
14718 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14719 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14720 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14721 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14722 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14723 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14724 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14725 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14726 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14727 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14728 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14729 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14730 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14731 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14732
14733 /* ASEQ-0 */
14734 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14735 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14736 16, 32);
14737 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14738 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14739
14740 /* ASEQ-1 */
14741 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14742 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14743 16, 32);
14744
14745 /* ASEQ-2 */
14746 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14747 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14748 16, 32);
14749
14750 /* Command DMA registers. */
14751
14752 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14753 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14754 sizeof (fw->cmd_dma_reg) / 4, 32);
14755
14756 /* Queues. */
14757
14758 /* RequestQ0 */
14759 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14760 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14761 8, 32);
14762 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14763
14764 /* ResponseQ0 */
14765 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14766 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14767 8, 32);
14768 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14769
14770 /* RequestQ1 */
14771 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14772 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14773 8, 32);
14774 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14775
14776 /* Transmit DMA registers. */
14777
14778 /* XMT0 */
14779 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14780 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14781 16, 32);
14782 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14783 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14784
14785 /* XMT1 */
14786 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14787 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14788 16, 32);
14789 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14790 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14791
14792 /* XMT2 */
14793 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14794 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14795 16, 32);
14796 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14797 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14798
14799 /* XMT3 */
14800 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14801 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14802 16, 32);
14803 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14804 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14805
14806 /* XMT4 */
14807 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14808 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14809 16, 32);
14810 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14811 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14812
14813 /* XMT Common */
14814 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14815 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14816 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14817
14818 /* Receive DMA registers. */
14819
14820 /* RCVThread0 */
14821 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14822 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14823 ha->iobase + 0xC0, 16, 32);
14824 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14825 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14826
14827 /* RCVThread1 */
14828 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14829 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14830 ha->iobase + 0xC0, 16, 32);
14831 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14832 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14833
14834 /* RISC registers. */
14835
14836 /* RISC GP */
14837 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14838 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14839 16, 32);
14840 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14841 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14842 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14843 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14844 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14845 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14846 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14847 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14848 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14849 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14850 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14851 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14852 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14853 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14854
14855 /* Local memory controller (LMC) registers. */
14856
14857 /* LMC */
14858 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14859 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14860 16, 32);
14861 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14862 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14863 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14864 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14865 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14866 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14867 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14868 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14869 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14870 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14871 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14872 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14873 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14874 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14875
14876 /* Fibre Protocol Module registers. */
14877
14878 /* FPM hardware */
14879 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14880 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14881 16, 32);
14882 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14883 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14884 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14885 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14886 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14887 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14888 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14889 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14890 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14891 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14892 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14893 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14894 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14895 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14896 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14897 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14898 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14899 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14900 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14901 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14902 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14903 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14904 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14905 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14906 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14907 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14908
14909 /* Frame Buffer registers. */
14910
14911 /* FB hardware */
14912 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14913 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14914 16, 32);
14915 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14916 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14917 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14918 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14919 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14920 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14921 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14922 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14923 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14924 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14925 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14926 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14927 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14928 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14929 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14930 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14931 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14932 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14933 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14934 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14935 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14936 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14937 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14938 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14939 }
14940
14941 /* Get the request queue */
14942 if (rval == QL_SUCCESS) {
14943 uint32_t cnt;
14944 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14945
14946 /* Sync DMA buffer. */
14947 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14948 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14949 DDI_DMA_SYNC_FORKERNEL);
14950
14951 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14952 fw->req_q[cnt] = *w32++;
14953 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14954 }
14955 }
14956
14957 /* Get the response queue */
14958 if (rval == QL_SUCCESS) {
14959 uint32_t cnt;
14960 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14961
14962 /* Sync DMA buffer. */
14963 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14964 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14965 DDI_DMA_SYNC_FORKERNEL);
14966
14967 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14968 fw->rsp_q[cnt] = *w32++;
14969 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14970 }
14971 }
14972
14973 /* Reset RISC. */
14974
14975 ql_reset_chip(ha);
14976
14977 /* Memory. */
14978
14979 if (rval == QL_SUCCESS) {
14980 /* Code RAM. */
14981 rval = ql_read_risc_ram(ha, 0x20000,
14982 sizeof (fw->code_ram) / 4, fw->code_ram);
14983 }
14984 if (rval == QL_SUCCESS) {
14985 /* External Memory. */
14986 rval = ql_read_risc_ram(ha, 0x100000,
14987 ha->fw_ext_memory_size / 4, fw->ext_mem);
14988 }
14989
14990 /* Get the FC event trace buffer */
14991 if (rval == QL_SUCCESS) {
14992 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14993 (ha->fwfcetracebuf.bp != NULL)) {
14994 uint32_t cnt;
14995 uint32_t *w32 = ha->fwfcetracebuf.bp;
14996
14997 /* Sync DMA buffer. */
14998 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14999 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15000
15001 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15002 fw->fce_trace_buf[cnt] = *w32++;
15003 }
15004 }
15005 }
15006
15007 /* Get the extended trace buffer */
15008 if (rval == QL_SUCCESS) {
15009 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15010 (ha->fwexttracebuf.bp != NULL)) {
15011 uint32_t cnt;
15012 uint32_t *w32 = ha->fwexttracebuf.bp;
15013
15014 /* Sync DMA buffer. */
15015 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15016 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15017
15018 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15019 fw->ext_trace_buf[cnt] = *w32++;
15020 }
15021 }
15022 }
15023
15024 if (rval != QL_SUCCESS) {
15025 EL(ha, "failed=%xh\n", rval);
15026 } else {
15027 /*EMPTY*/
15028 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15029 }
15030
15031 return (rval);
15032 }
15033
15034 /*
15035 * ql_read_risc_ram
15036 * Reads RISC RAM one word at a time.
15037 * Risc interrupts must be disabled when this routine is called.
15038 *
15039 * Input:
15040 * ha: adapter state pointer.
15041 * risc_address: RISC code start address.
15042 * len: Number of words.
15043 * buf: buffer pointer.
15044 *
15045 * Returns:
15046 * ql local function return status code.
15047 *
15048 * Context:
15049 * Interrupt or Kernel context, no mailbox commands allowed.
15050 */
15051 static int
15052 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15053 void *buf)
15054 {
15055 uint32_t cnt;
15056 uint16_t stat;
15057 clock_t timer;
15058 uint16_t *buf16 = (uint16_t *)buf;
15059 uint32_t *buf32 = (uint32_t *)buf;
15060 int rval = QL_SUCCESS;
15061
15062 for (cnt = 0; cnt < len; cnt++, risc_address++) {
15063 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15064 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15065 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15066 if (CFG_IST(ha, CFG_CTRL_8021)) {
15067 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15068 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15069 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15070 } else {
15071 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15072 }
15073 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15074 if (INTERRUPT_PENDING(ha)) {
15075 stat = (uint16_t)
15076 (RD16_IO_REG(ha, risc2host) & 0xff);
15077 if ((stat == 1) || (stat == 0x10)) {
15078 if (CFG_IST(ha, CFG_CTRL_24258081)) {
15079 buf32[cnt] = SHORT_TO_LONG(
15080 RD16_IO_REG(ha,
15081 mailbox_out[2]),
15082 RD16_IO_REG(ha,
15083 mailbox_out[3]));
15084 } else {
15085 buf16[cnt] =
15086 RD16_IO_REG(ha,
15087 mailbox_out[2]);
15088 }
15089
15090 break;
15091 } else if ((stat == 2) || (stat == 0x11)) {
15092 rval = RD16_IO_REG(ha, mailbox_out[0]);
15093 break;
15094 }
15095 if (CFG_IST(ha, CFG_CTRL_8021)) {
15096 ql_8021_clr_hw_intr(ha);
15097 ql_8021_clr_fw_intr(ha);
15098 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15099 WRT32_IO_REG(ha, hccr,
15100 HC24_CLR_RISC_INT);
15101 RD32_IO_REG(ha, hccr);
15102 } else {
15103 WRT16_IO_REG(ha, hccr,
15104 HC_CLR_RISC_INT);
15105 }
15106 }
15107 drv_usecwait(5);
15108 }
15109 if (CFG_IST(ha, CFG_CTRL_8021)) {
15110 ql_8021_clr_hw_intr(ha);
15111 ql_8021_clr_fw_intr(ha);
15112 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15113 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15114 RD32_IO_REG(ha, hccr);
15115 } else {
15116 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15117 WRT16_IO_REG(ha, semaphore, 0);
15118 }
15119
15120 if (timer == 0) {
15121 rval = QL_FUNCTION_TIMEOUT;
15122 }
15123 }
15124
15125 return (rval);
15126 }
15127
15128 /*
15129 * ql_read_regs
15130 * Reads adapter registers to buffer.
15131 *
15132 * Input:
15133 * ha: adapter state pointer.
15134 * buf: buffer pointer.
15135 * reg: start address.
15136 * count: number of registers.
15137 * wds: register size.
15138 *
15139 * Context:
15140 * Interrupt or Kernel context, no mailbox commands allowed.
15141 */
15142 static void *
15143 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15144 uint8_t wds)
15145 {
15146 uint32_t *bp32, *reg32;
15147 uint16_t *bp16, *reg16;
15148 uint8_t *bp8, *reg8;
15149
15150 switch (wds) {
15151 case 32:
15152 bp32 = buf;
15153 reg32 = reg;
15154 while (count--) {
15155 *bp32++ = RD_REG_DWORD(ha, reg32++);
15156 }
15157 return (bp32);
15158 case 16:
15159 bp16 = buf;
15160 reg16 = reg;
15161 while (count--) {
15162 *bp16++ = RD_REG_WORD(ha, reg16++);
15163 }
15164 return (bp16);
15165 case 8:
15166 bp8 = buf;
15167 reg8 = reg;
15168 while (count--) {
15169 *bp8++ = RD_REG_BYTE(ha, reg8++);
15170 }
15171 return (bp8);
15172 default:
15173 EL(ha, "Unknown word size=%d\n", wds);
15174 return (buf);
15175 }
15176 }
15177
15178 static int
15179 ql_save_config_regs(dev_info_t *dip)
15180 {
15181 ql_adapter_state_t *ha;
15182 int ret;
15183 ql_config_space_t chs;
15184 caddr_t prop = "ql-config-space";
15185
15186 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15187 if (ha == NULL) {
15188 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15189 ddi_get_instance(dip));
15190 return (DDI_FAILURE);
15191 }
15192
15193 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15194
15195 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15196 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15197 1) {
15198 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15199 return (DDI_SUCCESS);
15200 }
15201
15202 chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15203 chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15204 PCI_CONF_HEADER);
15205 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15206 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15207 PCI_BCNF_BCNTRL);
15208 }
15209
15210 chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15211 PCI_CONF_CACHE_LINESZ);
15212
15213 chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15214 PCI_CONF_LATENCY_TIMER);
15215
15216 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15217 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15218 PCI_BCNF_LATENCY_TIMER);
15219 }
15220
15221 chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15222 chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15223 chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15224 chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15225 chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15226 chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15227
15228 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15229 ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15230 (uchar_t *)&chs, sizeof (ql_config_space_t));
15231
15232 if (ret != DDI_PROP_SUCCESS) {
15233 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15234 QL_NAME, ddi_get_instance(dip), prop);
15235 return (DDI_FAILURE);
15236 }
15237
15238 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15239
15240 return (DDI_SUCCESS);
15241 }
15242
15243 static int
15244 ql_restore_config_regs(dev_info_t *dip)
15245 {
15246 ql_adapter_state_t *ha;
15247 uint_t elements;
15248 ql_config_space_t *chs_p;
15249 caddr_t prop = "ql-config-space";
15250
15251 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15252 if (ha == NULL) {
15253 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15254 ddi_get_instance(dip));
15255 return (DDI_FAILURE);
15256 }
15257
15258 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15259
15260 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15261 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15262 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15263 (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15264 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15265 return (DDI_FAILURE);
15266 }
15267
15268 ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15269
15270 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15271 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15272 chs_p->chs_bridge_control);
15273 }
15274
15275 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15276 chs_p->chs_cache_line_size);
15277
15278 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15279 chs_p->chs_latency_timer);
15280
15281 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15282 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15283 chs_p->chs_sec_latency_timer);
15284 }
15285
15286 ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15287 ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15288 ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15289 ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15290 ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15291 ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15292
15293 ddi_prop_free(chs_p);
15294
15295 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15296 if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15297 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15298 QL_NAME, ddi_get_instance(dip), prop);
15299 }
15300
15301 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15302
15303 return (DDI_SUCCESS);
15304 }
15305
15306 uint8_t
15307 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15308 {
15309 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15310 return (ddi_get8(ha->sbus_config_handle,
15311 (uint8_t *)(ha->sbus_config_base + off)));
15312 }
15313
15314 #ifdef KERNEL_32
15315 return (pci_config_getb(ha->pci_handle, off));
15316 #else
15317 return (pci_config_get8(ha->pci_handle, off));
15318 #endif
15319 }
15320
15321 uint16_t
15322 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15323 {
15324 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15325 return (ddi_get16(ha->sbus_config_handle,
15326 (uint16_t *)(ha->sbus_config_base + off)));
15327 }
15328
15329 #ifdef KERNEL_32
15330 return (pci_config_getw(ha->pci_handle, off));
15331 #else
15332 return (pci_config_get16(ha->pci_handle, off));
15333 #endif
15334 }
15335
15336 uint32_t
15337 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15338 {
15339 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15340 return (ddi_get32(ha->sbus_config_handle,
15341 (uint32_t *)(ha->sbus_config_base + off)));
15342 }
15343
15344 #ifdef KERNEL_32
15345 return (pci_config_getl(ha->pci_handle, off));
15346 #else
15347 return (pci_config_get32(ha->pci_handle, off));
15348 #endif
15349 }
15350
15351 void
15352 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15353 {
15354 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15355 ddi_put8(ha->sbus_config_handle,
15356 (uint8_t *)(ha->sbus_config_base + off), val);
15357 } else {
15358 #ifdef KERNEL_32
15359 pci_config_putb(ha->pci_handle, off, val);
15360 #else
15361 pci_config_put8(ha->pci_handle, off, val);
15362 #endif
15363 }
15364 }
15365
15366 void
15367 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15368 {
15369 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15370 ddi_put16(ha->sbus_config_handle,
15371 (uint16_t *)(ha->sbus_config_base + off), val);
15372 } else {
15373 #ifdef KERNEL_32
15374 pci_config_putw(ha->pci_handle, off, val);
15375 #else
15376 pci_config_put16(ha->pci_handle, off, val);
15377 #endif
15378 }
15379 }
15380
15381 void
15382 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15383 {
15384 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15385 ddi_put32(ha->sbus_config_handle,
15386 (uint32_t *)(ha->sbus_config_base + off), val);
15387 } else {
15388 #ifdef KERNEL_32
15389 pci_config_putl(ha->pci_handle, off, val);
15390 #else
15391 pci_config_put32(ha->pci_handle, off, val);
15392 #endif
15393 }
15394 }
15395
15396 /*
15397 * ql_halt
15398 * Waits for commands that are running to finish and
15399 * if they do not, commands are aborted.
15400 * Finally the adapter is reset.
15401 *
15402 * Input:
15403 * ha: adapter state pointer.
15404 * pwr: power state.
15405 *
15406 * Context:
15407 * Kernel context.
15408 */
15409 static void
15410 ql_halt(ql_adapter_state_t *ha, int pwr)
15411 {
15412 uint32_t cnt;
15413 ql_tgt_t *tq;
15414 ql_srb_t *sp;
15415 uint16_t index;
15416 ql_link_t *link;
15417
15418 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15419
15420 /* Wait for all commands running to finish. */
15421 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15422 for (link = ha->dev[index].first; link != NULL;
15423 link = link->next) {
15424 tq = link->base_address;
15425 (void) ql_abort_device(ha, tq, 0);
15426
15427 /* Wait for 30 seconds for commands to finish. */
15428 for (cnt = 3000; cnt != 0; cnt--) {
15429 /* Acquire device queue lock. */
15430 DEVICE_QUEUE_LOCK(tq);
15431 if (tq->outcnt == 0) {
15432 /* Release device queue lock. */
15433 DEVICE_QUEUE_UNLOCK(tq);
15434 break;
15435 } else {
15436 /* Release device queue lock. */
15437 DEVICE_QUEUE_UNLOCK(tq);
15438 ql_delay(ha, 10000);
15439 }
15440 }
15441
15442 /* Finish any commands waiting for more status. */
15443 if (ha->status_srb != NULL) {
15444 sp = ha->status_srb;
15445 ha->status_srb = NULL;
15446 sp->cmd.next = NULL;
15447 ql_done(&sp->cmd);
15448 }
15449
15450 /* Abort commands that did not finish. */
15451 if (cnt == 0) {
15452 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15453 cnt++) {
15454 if (ha->pending_cmds.first != NULL) {
15455 ql_start_iocb(ha, NULL);
15456 cnt = 1;
15457 }
15458 sp = ha->outstanding_cmds[cnt];
15459 if (sp != NULL &&
15460 sp->lun_queue->target_queue ==
15461 tq) {
15462 (void) ql_abort((opaque_t)ha,
15463 sp->pkt, 0);
15464 }
15465 }
15466 }
15467 }
15468 }
15469
15470 /* Shutdown IP. */
15471 if (ha->flags & IP_INITIALIZED) {
15472 (void) ql_shutdown_ip(ha);
15473 }
15474
15475 /* Stop all timers. */
15476 ADAPTER_STATE_LOCK(ha);
15477 ha->port_retry_timer = 0;
15478 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15479 ha->watchdog_timer = 0;
15480 ADAPTER_STATE_UNLOCK(ha);
15481
15482 if (pwr == PM_LEVEL_D3) {
15483 ADAPTER_STATE_LOCK(ha);
15484 ha->flags &= ~ONLINE;
15485 ADAPTER_STATE_UNLOCK(ha);
15486
15487 /* Reset ISP chip. */
15488 ql_reset_chip(ha);
15489 }
15490
15491 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15492 }
15493
15494 /*
15495 * ql_get_dma_mem
15496 * Function used to allocate dma memory.
15497 *
15498 * Input:
15499 * ha: adapter state pointer.
15500 * mem: pointer to dma memory object.
15501 * size: size of the request in bytes
15502 *
15503 * Returns:
15504 * qn local function return status code.
15505 *
15506 * Context:
15507 * Kernel context.
15508 */
15509 int
15510 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15511 mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15512 {
15513 int rval;
15514
15515 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15516
15517 mem->size = size;
15518 mem->type = allocation_type;
15519 mem->cookie_count = 1;
15520
15521 switch (alignment) {
15522 case QL_DMA_DATA_ALIGN:
15523 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15524 break;
15525 case QL_DMA_RING_ALIGN:
15526 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15527 break;
15528 default:
15529 EL(ha, "failed, unknown alignment type %x\n", alignment);
15530 break;
15531 }
15532
15533 if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15534 ql_free_phys(ha, mem);
15535 EL(ha, "failed, alloc_phys=%xh\n", rval);
15536 }
15537
15538 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15539
15540 return (rval);
15541 }
15542
15543 /*
15544 * ql_alloc_phys
15545 * Function used to allocate memory and zero it.
15546 * Memory is below 4 GB.
15547 *
15548 * Input:
15549 * ha: adapter state pointer.
15550 * mem: pointer to dma memory object.
15551 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15552 * mem->cookie_count number of segments allowed.
15553 * mem->type memory allocation type.
15554 * mem->size memory size.
15555 * mem->alignment memory alignment.
15556 *
15557 * Returns:
15558 * qn local function return status code.
15559 *
15560 * Context:
15561 * Kernel context.
15562 */
15563 int
15564 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15565 {
15566 size_t rlen;
15567 ddi_dma_attr_t dma_attr;
15568 ddi_device_acc_attr_t acc_attr = ql_dev_acc_attr;
15569
15570 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15571
15572 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15573 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15574
15575 dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15576 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15577
15578 /*
15579 * Workaround for SUN XMITS buffer must end and start on 8 byte
15580 * boundary. Else, hardware will overrun the buffer. Simple fix is
15581 * to make sure buffer has enough room for overrun.
15582 */
15583 if (mem->size & 7) {
15584 mem->size += 8 - (mem->size & 7);
15585 }
15586
15587 mem->flags = DDI_DMA_CONSISTENT;
15588
15589 /*
15590 * Allocate DMA memory for command.
15591 */
15592 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15593 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15594 DDI_SUCCESS) {
15595 EL(ha, "failed, ddi_dma_alloc_handle\n");
15596 mem->dma_handle = NULL;
15597 return (QL_MEMORY_ALLOC_FAILED);
15598 }
15599
15600 switch (mem->type) {
15601 case KERNEL_MEM:
15602 mem->bp = kmem_zalloc(mem->size, sleep);
15603 break;
15604 case BIG_ENDIAN_DMA:
15605 case LITTLE_ENDIAN_DMA:
15606 case NO_SWAP_DMA:
15607 if (mem->type == BIG_ENDIAN_DMA) {
15608 acc_attr.devacc_attr_endian_flags =
15609 DDI_STRUCTURE_BE_ACC;
15610 } else if (mem->type == NO_SWAP_DMA) {
15611 acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15612 }
15613 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15614 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15615 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15616 &mem->acc_handle) == DDI_SUCCESS) {
15617 bzero(mem->bp, mem->size);
15618 /* ensure we got what we asked for (32bit) */
15619 if (dma_attr.dma_attr_addr_hi == NULL) {
15620 if (mem->cookie.dmac_notused != NULL) {
15621 EL(ha, "failed, ddi_dma_mem_alloc "
15622 "returned 64 bit DMA address\n");
15623 ql_free_phys(ha, mem);
15624 return (QL_MEMORY_ALLOC_FAILED);
15625 }
15626 }
15627 } else {
15628 mem->acc_handle = NULL;
15629 mem->bp = NULL;
15630 }
15631 break;
15632 default:
15633 EL(ha, "failed, unknown type=%xh\n", mem->type);
15634 mem->acc_handle = NULL;
15635 mem->bp = NULL;
15636 break;
15637 }
15638
15639 if (mem->bp == NULL) {
15640 EL(ha, "failed, ddi_dma_mem_alloc\n");
15641 ddi_dma_free_handle(&mem->dma_handle);
15642 mem->dma_handle = NULL;
15643 return (QL_MEMORY_ALLOC_FAILED);
15644 }
15645
15646 mem->flags |= DDI_DMA_RDWR;
15647
15648 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15649 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15650 ql_free_phys(ha, mem);
15651 return (QL_MEMORY_ALLOC_FAILED);
15652 }
15653
15654 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15655
15656 return (QL_SUCCESS);
15657 }
15658
15659 /*
15660 * ql_free_phys
15661 * Function used to free physical memory.
15662 *
15663 * Input:
15664 * ha: adapter state pointer.
15665 * mem: pointer to dma memory object.
15666 *
15667 * Context:
15668 * Kernel context.
15669 */
15670 void
15671 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15672 {
15673 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15674
15675 if (mem != NULL && mem->dma_handle != NULL) {
15676 ql_unbind_dma_buffer(ha, mem);
15677 switch (mem->type) {
15678 case KERNEL_MEM:
15679 if (mem->bp != NULL) {
15680 kmem_free(mem->bp, mem->size);
15681 }
15682 break;
15683 case LITTLE_ENDIAN_DMA:
15684 case BIG_ENDIAN_DMA:
15685 case NO_SWAP_DMA:
15686 if (mem->acc_handle != NULL) {
15687 ddi_dma_mem_free(&mem->acc_handle);
15688 mem->acc_handle = NULL;
15689 }
15690 break;
15691 default:
15692 break;
15693 }
15694 mem->bp = NULL;
15695 ddi_dma_free_handle(&mem->dma_handle);
15696 mem->dma_handle = NULL;
15697 }
15698
15699 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15700 }
15701
15702 /*
15703 * ql_alloc_dma_resouce.
15704 * Allocates DMA resource for buffer.
15705 *
15706 * Input:
15707 * ha: adapter state pointer.
15708 * mem: pointer to dma memory object.
15709 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15710 * mem->cookie_count number of segments allowed.
15711 * mem->type memory allocation type.
15712 * mem->size memory size.
15713 * mem->bp pointer to memory or struct buf
15714 *
15715 * Returns:
15716 * qn local function return status code.
15717 *
15718 * Context:
15719 * Kernel context.
15720 */
15721 int
15722 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15723 {
15724 ddi_dma_attr_t dma_attr;
15725
15726 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15727
15728 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15729 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15730 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15731
15732 /*
15733 * Allocate DMA handle for command.
15734 */
15735 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15736 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15737 DDI_SUCCESS) {
15738 EL(ha, "failed, ddi_dma_alloc_handle\n");
15739 mem->dma_handle = NULL;
15740 return (QL_MEMORY_ALLOC_FAILED);
15741 }
15742
15743 mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15744
15745 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15746 EL(ha, "failed, bind_dma_buffer\n");
15747 ddi_dma_free_handle(&mem->dma_handle);
15748 mem->dma_handle = NULL;
15749 return (QL_MEMORY_ALLOC_FAILED);
15750 }
15751
15752 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15753
15754 return (QL_SUCCESS);
15755 }
15756
15757 /*
15758 * ql_free_dma_resource
15759 * Frees DMA resources.
15760 *
15761 * Input:
15762 * ha: adapter state pointer.
15763 * mem: pointer to dma memory object.
15764 * mem->dma_handle DMA memory handle.
15765 *
15766 * Context:
15767 * Kernel context.
15768 */
15769 void
15770 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15771 {
15772 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15773
15774 ql_free_phys(ha, mem);
15775
15776 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15777 }
15778
15779 /*
15780 * ql_bind_dma_buffer
15781 * Binds DMA buffer.
15782 *
15783 * Input:
15784 * ha: adapter state pointer.
15785 * mem: pointer to dma memory object.
15786 * sleep: KM_SLEEP or KM_NOSLEEP.
15787 * mem->dma_handle DMA memory handle.
15788 * mem->cookie_count number of segments allowed.
15789 * mem->type memory allocation type.
15790 * mem->size memory size.
15791 * mem->bp pointer to memory or struct buf
15792 *
15793 * Returns:
15794 * mem->cookies pointer to list of cookies.
15795 * mem->cookie_count number of cookies.
15796 * status success = DDI_DMA_MAPPED
15797 * DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15798 * DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15799 * DDI_DMA_TOOBIG
15800 *
15801 * Context:
15802 * Kernel context.
15803 */
15804 static int
15805 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15806 {
15807 int rval;
15808 ddi_dma_cookie_t *cookiep;
15809 uint32_t cnt = mem->cookie_count;
15810
15811 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15812
15813 if (mem->type == STRUCT_BUF_MEMORY) {
15814 rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15815 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15816 DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15817 } else {
15818 rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15819 mem->size, mem->flags, (sleep == KM_SLEEP) ?
15820 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15821 &mem->cookie_count);
15822 }
15823
15824 if (rval == DDI_DMA_MAPPED) {
15825 if (mem->cookie_count > cnt) {
15826 (void) ddi_dma_unbind_handle(mem->dma_handle);
15827 EL(ha, "failed, cookie_count %d > %d\n",
15828 mem->cookie_count, cnt);
15829 rval = DDI_DMA_TOOBIG;
15830 } else {
15831 if (mem->cookie_count > 1) {
15832 if (mem->cookies = kmem_zalloc(
15833 sizeof (ddi_dma_cookie_t) *
15834 mem->cookie_count, sleep)) {
15835 *mem->cookies = mem->cookie;
15836 cookiep = mem->cookies;
15837 for (cnt = 1; cnt < mem->cookie_count;
15838 cnt++) {
15839 ddi_dma_nextcookie(
15840 mem->dma_handle,
15841 ++cookiep);
15842 }
15843 } else {
15844 (void) ddi_dma_unbind_handle(
15845 mem->dma_handle);
15846 EL(ha, "failed, kmem_zalloc\n");
15847 rval = DDI_DMA_NORESOURCES;
15848 }
15849 } else {
15850 /*
15851 * It has been reported that dmac_size at times
15852 * may be incorrect on sparc machines so for
15853 * sparc machines that only have one segment
15854 * use the buffer size instead.
15855 */
15856 mem->cookies = &mem->cookie;
15857 mem->cookies->dmac_size = mem->size;
15858 }
15859 }
15860 }
15861
15862 if (rval != DDI_DMA_MAPPED) {
15863 EL(ha, "failed=%xh\n", rval);
15864 } else {
15865 /*EMPTY*/
15866 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15867 }
15868
15869 return (rval);
15870 }
15871
15872 /*
15873 * ql_unbind_dma_buffer
15874 * Unbinds DMA buffer.
15875 *
15876 * Input:
15877 * ha: adapter state pointer.
15878 * mem: pointer to dma memory object.
15879 * mem->dma_handle DMA memory handle.
15880 * mem->cookies pointer to cookie list.
15881 * mem->cookie_count number of cookies.
15882 *
15883 * Context:
15884 * Kernel context.
15885 */
15886 /* ARGSUSED */
15887 static void
15888 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15889 {
15890 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15891
15892 (void) ddi_dma_unbind_handle(mem->dma_handle);
15893 if (mem->cookie_count > 1) {
15894 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15895 mem->cookie_count);
15896 mem->cookies = NULL;
15897 }
15898 mem->cookie_count = 0;
15899
15900 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15901 }
15902
15903 static int
15904 ql_suspend_adapter(ql_adapter_state_t *ha)
15905 {
15906 clock_t timer = 32 * drv_usectohz(1000000);
15907
15908 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15909
15910 /*
15911 * First we will claim mbox ownership so that no
15912 * thread using mbox hangs when we disable the
15913 * interrupt in the middle of it.
15914 */
15915 MBX_REGISTER_LOCK(ha);
15916
15917 /* Check for mailbox available, if not wait for signal. */
15918 while (ha->mailbox_flags & MBX_BUSY_FLG) {
15919 ha->mailbox_flags = (uint8_t)
15920 (ha->mailbox_flags | MBX_WANT_FLG);
15921
15922 /* 30 seconds from now */
15923 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15924 timer, TR_CLOCK_TICK) == -1) {
15925
15926 /* Release mailbox register lock. */
15927 MBX_REGISTER_UNLOCK(ha);
15928 EL(ha, "failed, Suspend mbox");
15929 return (QL_FUNCTION_TIMEOUT);
15930 }
15931 }
15932
15933 /* Set busy flag. */
15934 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15935 MBX_REGISTER_UNLOCK(ha);
15936
15937 (void) ql_wait_outstanding(ha);
15938
15939 /*
15940 * here we are sure that there will not be any mbox interrupt.
15941 * So, let's make sure that we return back all the outstanding
15942 * cmds as well as internally queued commands.
15943 */
15944 ql_halt(ha, PM_LEVEL_D0);
15945
15946 if (ha->power_level != PM_LEVEL_D3) {
15947 /* Disable ISP interrupts. */
15948 WRT16_IO_REG(ha, ictrl, 0);
15949 }
15950
15951 ADAPTER_STATE_LOCK(ha);
15952 ha->flags &= ~INTERRUPTS_ENABLED;
15953 ADAPTER_STATE_UNLOCK(ha);
15954
15955 MBX_REGISTER_LOCK(ha);
15956 /* Reset busy status. */
15957 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15958
15959 /* If thread is waiting for mailbox go signal it to start. */
15960 if (ha->mailbox_flags & MBX_WANT_FLG) {
15961 ha->mailbox_flags = (uint8_t)
15962 (ha->mailbox_flags & ~MBX_WANT_FLG);
15963 cv_broadcast(&ha->cv_mbx_wait);
15964 }
15965 /* Release mailbox register lock. */
15966 MBX_REGISTER_UNLOCK(ha);
15967
15968 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15969
15970 return (QL_SUCCESS);
15971 }
15972
15973 /*
15974 * ql_add_link_b
15975 * Add link to the end of the chain.
15976 *
15977 * Input:
15978 * head = Head of link list.
15979 * link = link to be added.
15980 * LOCK must be already obtained.
15981 *
15982 * Context:
15983 * Interrupt or Kernel context, no mailbox commands allowed.
15984 */
15985 void
15986 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15987 {
15988 /* at the end there isn't a next */
15989 link->next = NULL;
15990
15991 if ((link->prev = head->last) == NULL) {
15992 head->first = link;
15993 } else {
15994 head->last->next = link;
15995 }
15996
15997 head->last = link;
15998 link->head = head; /* the queue we're on */
15999 }
16000
16001 /*
16002 * ql_add_link_t
16003 * Add link to the beginning of the chain.
16004 *
16005 * Input:
16006 * head = Head of link list.
16007 * link = link to be added.
16008 * LOCK must be already obtained.
16009 *
16010 * Context:
16011 * Interrupt or Kernel context, no mailbox commands allowed.
16012 */
16013 void
16014 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16015 {
16016 link->prev = NULL;
16017
16018 if ((link->next = head->first) == NULL) {
16019 head->last = link;
16020 } else {
16021 head->first->prev = link;
16022 }
16023
16024 head->first = link;
16025 link->head = head; /* the queue we're on */
16026 }
16027
16028 /*
16029 * ql_remove_link
16030 * Remove a link from the chain.
16031 *
16032 * Input:
16033 * head = Head of link list.
16034 * link = link to be removed.
16035 * LOCK must be already obtained.
16036 *
16037 * Context:
16038 * Interrupt or Kernel context, no mailbox commands allowed.
16039 */
16040 void
16041 ql_remove_link(ql_head_t *head, ql_link_t *link)
16042 {
16043 if (link->prev != NULL) {
16044 if ((link->prev->next = link->next) == NULL) {
16045 head->last = link->prev;
16046 } else {
16047 link->next->prev = link->prev;
16048 }
16049 } else if ((head->first = link->next) == NULL) {
16050 head->last = NULL;
16051 } else {
16052 head->first->prev = NULL;
16053 }
16054
16055 /* not on a queue any more */
16056 link->prev = link->next = NULL;
16057 link->head = NULL;
16058 }
16059
16060 /*
16061 * ql_chg_endian
16062 * Change endianess of byte array.
16063 *
16064 * Input:
16065 * buf = array pointer.
16066 * size = size of array in bytes.
16067 *
16068 * Context:
16069 * Interrupt or Kernel context, no mailbox commands allowed.
16070 */
16071 void
16072 ql_chg_endian(uint8_t buf[], size_t size)
16073 {
16074 uint8_t byte;
16075 size_t cnt1;
16076 size_t cnt;
16077
16078 cnt1 = size - 1;
16079 for (cnt = 0; cnt < size / 2; cnt++) {
16080 byte = buf[cnt1];
16081 buf[cnt1] = buf[cnt];
16082 buf[cnt] = byte;
16083 cnt1--;
16084 }
16085 }
16086
16087 /*
16088 * ql_bstr_to_dec
16089 * Convert decimal byte string to number.
16090 *
16091 * Input:
16092 * s: byte string pointer.
16093 * ans: interger pointer for number.
16094 * size: number of ascii bytes.
16095 *
16096 * Returns:
16097 * success = number of ascii bytes processed.
16098 *
16099 * Context:
16100 * Kernel/Interrupt context.
16101 */
16102 static int
16103 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16104 {
16105 int mul, num, cnt, pos;
16106 char *str;
16107
16108 /* Calculate size of number. */
16109 if (size == 0) {
16110 for (str = s; *str >= '0' && *str <= '9'; str++) {
16111 size++;
16112 }
16113 }
16114
16115 *ans = 0;
16116 for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16117 if (*s >= '0' && *s <= '9') {
16118 num = *s++ - '0';
16119 } else {
16120 break;
16121 }
16122
16123 for (mul = 1, pos = 1; pos < size; pos++) {
16124 mul *= 10;
16125 }
16126 *ans += num * mul;
16127 }
16128
16129 return (cnt);
16130 }
16131
16132 /*
16133 * ql_delay
16134 * Calls delay routine if threads are not suspended, otherwise, busy waits
16135 * Minimum = 1 tick = 10ms
16136 *
16137 * Input:
16138 * dly = delay time in microseconds.
16139 *
16140 * Context:
16141 * Kernel or Interrupt context, no mailbox commands allowed.
16142 */
16143 void
16144 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16145 {
16146 if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16147 drv_usecwait(usecs);
16148 } else {
16149 delay(drv_usectohz(usecs));
16150 }
16151 }
16152
16153 /*
16154 * ql_stall_drv
16155 * Stalls one or all driver instances, waits for 30 seconds.
16156 *
16157 * Input:
16158 * ha: adapter state pointer or NULL for all.
16159 * options: BIT_0 --> leave driver stalled on exit if
16160 * failed.
16161 *
16162 * Returns:
16163 * ql local function return status code.
16164 *
16165 * Context:
16166 * Kernel context.
16167 */
16168 int
16169 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16170 {
16171 ql_link_t *link;
16172 ql_adapter_state_t *ha2;
16173 uint32_t timer;
16174
16175 QL_PRINT_3(CE_CONT, "started\n");
16176
16177 /* Wait for 30 seconds for daemons unstall. */
16178 timer = 3000;
16179 link = ha == NULL ? ql_hba.first : &ha->hba;
16180 while (link != NULL && timer) {
16181 ha2 = link->base_address;
16182
16183 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16184
16185 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16186 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16187 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16188 ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16189 link = ha == NULL ? link->next : NULL;
16190 continue;
16191 }
16192
16193 ql_delay(ha2, 10000);
16194 timer--;
16195 link = ha == NULL ? ql_hba.first : &ha->hba;
16196 }
16197
16198 if (ha2 != NULL && timer == 0) {
16199 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16200 ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16201 "unstalled"));
16202 if (options & BIT_0) {
16203 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16204 }
16205 return (QL_FUNCTION_TIMEOUT);
16206 }
16207
16208 QL_PRINT_3(CE_CONT, "done\n");
16209
16210 return (QL_SUCCESS);
16211 }
16212
16213 /*
16214 * ql_restart_driver
16215 * Restarts one or all driver instances.
16216 *
16217 * Input:
16218 * ha: adapter state pointer or NULL for all.
16219 *
16220 * Context:
16221 * Kernel context.
16222 */
16223 void
16224 ql_restart_driver(ql_adapter_state_t *ha)
16225 {
16226 ql_link_t *link;
16227 ql_adapter_state_t *ha2;
16228 uint32_t timer;
16229
16230 QL_PRINT_3(CE_CONT, "started\n");
16231
16232 /* Tell all daemons to unstall. */
16233 link = ha == NULL ? ql_hba.first : &ha->hba;
16234 while (link != NULL) {
16235 ha2 = link->base_address;
16236
16237 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16238
16239 link = ha == NULL ? link->next : NULL;
16240 }
16241
16242 /* Wait for 30 seconds for all daemons unstall. */
16243 timer = 3000;
16244 link = ha == NULL ? ql_hba.first : &ha->hba;
16245 while (link != NULL && timer) {
16246 ha2 = link->base_address;
16247
16248 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16249 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16250 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16251 QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16252 ha2->instance, ha2->vp_index);
16253 ql_restart_queues(ha2);
16254 link = ha == NULL ? link->next : NULL;
16255 continue;
16256 }
16257
16258 QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16259 ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16260
16261 ql_delay(ha2, 10000);
16262 timer--;
16263 link = ha == NULL ? ql_hba.first : &ha->hba;
16264 }
16265
16266 QL_PRINT_3(CE_CONT, "done\n");
16267 }
16268
16269 /*
16270 * ql_setup_interrupts
16271 * Sets up interrupts based on the HBA's and platform's
16272 * capabilities (e.g., legacy / MSI / FIXED).
16273 *
16274 * Input:
16275 * ha = adapter state pointer.
16276 *
16277 * Returns:
16278 * DDI_SUCCESS or DDI_FAILURE.
16279 *
16280 * Context:
16281 * Kernel context.
16282 */
16283 static int
16284 ql_setup_interrupts(ql_adapter_state_t *ha)
16285 {
16286 int32_t rval = DDI_FAILURE;
16287 int32_t i;
16288 int32_t itypes = 0;
16289
16290 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16291
16292 /*
16293 * The Solaris Advanced Interrupt Functions (aif) are only
16294 * supported on s10U1 or greater.
16295 */
16296 if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16297 EL(ha, "interrupt framework is not supported or is "
16298 "disabled, using legacy\n");
16299 return (ql_legacy_intr(ha));
16300 } else if (ql_os_release_level == 10) {
16301 /*
16302 * See if the advanced interrupt functions (aif) are
16303 * in the kernel
16304 */
16305 void *fptr = (void *)&ddi_intr_get_supported_types;
16306
16307 if (fptr == NULL) {
16308 EL(ha, "aif is not supported, using legacy "
16309 "interrupts (rev)\n");
16310 return (ql_legacy_intr(ha));
16311 }
16312 }
16313
16314 /* See what types of interrupts this HBA and platform support */
16315 if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16316 DDI_SUCCESS) {
16317 EL(ha, "get supported types failed, rval=%xh, "
16318 "assuming FIXED\n", i);
16319 itypes = DDI_INTR_TYPE_FIXED;
16320 }
16321
16322 EL(ha, "supported types are: %xh\n", itypes);
16323
16324 if ((itypes & DDI_INTR_TYPE_MSIX) &&
16325 (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16326 EL(ha, "successful MSI-X setup\n");
16327 } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16328 (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16329 EL(ha, "successful MSI setup\n");
16330 } else {
16331 rval = ql_setup_fixed(ha);
16332 }
16333
16334 if (rval != DDI_SUCCESS) {
16335 EL(ha, "failed, aif, rval=%xh\n", rval);
16336 } else {
16337 /*EMPTY*/
16338 QL_PRINT_3(CE_CONT, "(%d): done\n");
16339 }
16340
16341 return (rval);
16342 }
16343
16344 /*
16345 * ql_setup_msi
16346 * Set up aif MSI interrupts
16347 *
16348 * Input:
16349 * ha = adapter state pointer.
16350 *
16351 * Returns:
16352 * DDI_SUCCESS or DDI_FAILURE.
16353 *
16354 * Context:
16355 * Kernel context.
16356 */
16357 static int
16358 ql_setup_msi(ql_adapter_state_t *ha)
16359 {
16360 int32_t count = 0;
16361 int32_t avail = 0;
16362 int32_t actual = 0;
16363 int32_t msitype = DDI_INTR_TYPE_MSI;
16364 int32_t ret;
16365 ql_ifunc_t itrfun[10] = {{NULL}};
16366
16367 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16368
16369 if (ql_disable_msi != 0) {
16370 EL(ha, "MSI is disabled by user\n");
16371 return (DDI_FAILURE);
16372 }
16373
16374 /* MSI support is only suported on 24xx HBA's. */
16375 if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16376 EL(ha, "HBA does not support MSI\n");
16377 return (DDI_FAILURE);
16378 }
16379
16380 /* Get number of MSI interrupts the system supports */
16381 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16382 DDI_SUCCESS) || count == 0) {
16383 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16384 return (DDI_FAILURE);
16385 }
16386
16387 /* Get number of available MSI interrupts */
16388 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16389 DDI_SUCCESS) || avail == 0) {
16390 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16391 return (DDI_FAILURE);
16392 }
16393
16394 /* MSI requires only 1. */
16395 count = 1;
16396 itrfun[0].ifunc = &ql_isr_aif;
16397
16398 /* Allocate space for interrupt handles */
16399 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16400 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16401
16402 ha->iflags |= IFLG_INTR_MSI;
16403
16404 /* Allocate the interrupts */
16405 if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16406 &actual, 0)) != DDI_SUCCESS || actual < count) {
16407 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16408 "actual=%xh\n", ret, count, actual);
16409 ql_release_intr(ha);
16410 return (DDI_FAILURE);
16411 }
16412
16413 ha->intr_cnt = actual;
16414
16415 /* Get interrupt priority */
16416 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16417 DDI_SUCCESS) {
16418 EL(ha, "failed, get_pri ret=%xh\n", ret);
16419 ql_release_intr(ha);
16420 return (ret);
16421 }
16422
16423 /* Add the interrupt handler */
16424 if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16425 (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16426 EL(ha, "failed, intr_add ret=%xh\n", ret);
16427 ql_release_intr(ha);
16428 return (ret);
16429 }
16430
16431 /* Setup mutexes */
16432 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16433 EL(ha, "failed, mutex init ret=%xh\n", ret);
16434 ql_release_intr(ha);
16435 return (ret);
16436 }
16437
16438 /* Get the capabilities */
16439 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16440
16441 /* Enable interrupts */
16442 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16443 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16444 DDI_SUCCESS) {
16445 EL(ha, "failed, block enable, ret=%xh\n", ret);
16446 ql_destroy_mutex(ha);
16447 ql_release_intr(ha);
16448 return (ret);
16449 }
16450 } else {
16451 if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16452 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16453 ql_destroy_mutex(ha);
16454 ql_release_intr(ha);
16455 return (ret);
16456 }
16457 }
16458
16459 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16460
16461 return (DDI_SUCCESS);
16462 }
16463
16464 /*
16465 * ql_setup_msix
16466 * Set up aif MSI-X interrupts
16467 *
16468 * Input:
16469 * ha = adapter state pointer.
16470 *
16471 * Returns:
16472 * DDI_SUCCESS or DDI_FAILURE.
16473 *
16474 * Context:
16475 * Kernel context.
16476 */
16477 static int
16478 ql_setup_msix(ql_adapter_state_t *ha)
16479 {
16480 uint16_t hwvect;
16481 int32_t count = 0;
16482 int32_t avail = 0;
16483 int32_t actual = 0;
16484 int32_t msitype = DDI_INTR_TYPE_MSIX;
16485 int32_t ret;
16486 uint32_t i;
16487 ql_ifunc_t itrfun[QL_MSIX_MAXAIF] = {{NULL}};
16488
16489 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16490
16491 if (ql_disable_msix != 0) {
16492 EL(ha, "MSI-X is disabled by user\n");
16493 return (DDI_FAILURE);
16494 }
16495
16496 /*
16497 * MSI-X support is only available on 24xx HBA's that have
16498 * rev A2 parts (revid = 3) or greater.
16499 */
16500 if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16501 (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16502 (ha->device_id == 0x8021))) {
16503 EL(ha, "HBA does not support MSI-X\n");
16504 return (DDI_FAILURE);
16505 }
16506
16507 if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16508 EL(ha, "HBA does not support MSI-X (revid)\n");
16509 return (DDI_FAILURE);
16510 }
16511
16512 /* Per HP, these HP branded HBA's are not supported with MSI-X */
16513 if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16514 ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16515 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16516 return (DDI_FAILURE);
16517 }
16518
16519 /* Get the number of 24xx/25xx MSI-X h/w vectors */
16520 hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16521 ql_pci_config_get16(ha, 0x7e) :
16522 ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16523
16524 EL(ha, "pcie config space hwvect = %d\n", hwvect);
16525
16526 if (hwvect < QL_MSIX_MAXAIF) {
16527 EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16528 QL_MSIX_MAXAIF, hwvect);
16529 return (DDI_FAILURE);
16530 }
16531
16532 /* Get number of MSI-X interrupts the platform h/w supports */
16533 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16534 DDI_SUCCESS) || count == 0) {
16535 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16536 return (DDI_FAILURE);
16537 }
16538
16539 /* Get number of available system interrupts */
16540 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16541 DDI_SUCCESS) || avail == 0) {
16542 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16543 return (DDI_FAILURE);
16544 }
16545
16546 /* Fill out the intr table */
16547 count = QL_MSIX_MAXAIF;
16548 itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16549 itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16550
16551 /* Allocate space for interrupt handles */
16552 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16553 if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16554 ha->hsize = 0;
16555 EL(ha, "failed, unable to allocate htable space\n");
16556 return (DDI_FAILURE);
16557 }
16558
16559 ha->iflags |= IFLG_INTR_MSIX;
16560
16561 /* Allocate the interrupts */
16562 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16563 DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16564 actual < QL_MSIX_MAXAIF) {
16565 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16566 "actual=%xh\n", ret, count, actual);
16567 ql_release_intr(ha);
16568 return (DDI_FAILURE);
16569 }
16570
16571 ha->intr_cnt = actual;
16572
16573 /* Get interrupt priority */
16574 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16575 DDI_SUCCESS) {
16576 EL(ha, "failed, get_pri ret=%xh\n", ret);
16577 ql_release_intr(ha);
16578 return (ret);
16579 }
16580
16581 /* Add the interrupt handlers */
16582 for (i = 0; i < actual; i++) {
16583 if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16584 (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16585 EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16586 actual, ret);
16587 ql_release_intr(ha);
16588 return (ret);
16589 }
16590 }
16591
16592 /*
16593 * duplicate the rest of the intr's
16594 * ddi_intr_dup_handler() isn't working on x86 just yet...
16595 */
16596 #ifdef __sparc
16597 for (i = actual; i < hwvect; i++) {
16598 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16599 &ha->htable[i])) != DDI_SUCCESS) {
16600 EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16601 i, actual, ret);
16602 ql_release_intr(ha);
16603 return (ret);
16604 }
16605 }
16606 #endif
16607
16608 /* Setup mutexes */
16609 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16610 EL(ha, "failed, mutex init ret=%xh\n", ret);
16611 ql_release_intr(ha);
16612 return (ret);
16613 }
16614
16615 /* Get the capabilities */
16616 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16617
16618 /* Enable interrupts */
16619 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16620 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16621 DDI_SUCCESS) {
16622 EL(ha, "failed, block enable, ret=%xh\n", ret);
16623 ql_destroy_mutex(ha);
16624 ql_release_intr(ha);
16625 return (ret);
16626 }
16627 } else {
16628 for (i = 0; i < ha->intr_cnt; i++) {
16629 if ((ret = ddi_intr_enable(ha->htable[i])) !=
16630 DDI_SUCCESS) {
16631 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16632 ql_destroy_mutex(ha);
16633 ql_release_intr(ha);
16634 return (ret);
16635 }
16636 }
16637 }
16638
16639 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16640
16641 return (DDI_SUCCESS);
16642 }
16643
16644 /*
16645 * ql_setup_fixed
16646 * Sets up aif FIXED interrupts
16647 *
16648 * Input:
16649 * ha = adapter state pointer.
16650 *
16651 * Returns:
16652 * DDI_SUCCESS or DDI_FAILURE.
16653 *
16654 * Context:
16655 * Kernel context.
16656 */
16657 static int
16658 ql_setup_fixed(ql_adapter_state_t *ha)
16659 {
16660 int32_t count = 0;
16661 int32_t actual = 0;
16662 int32_t ret;
16663 uint32_t i;
16664
16665 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16666
16667 /* Get number of fixed interrupts the system supports */
16668 if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16669 &count)) != DDI_SUCCESS) || count == 0) {
16670 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16671 return (DDI_FAILURE);
16672 }
16673
16674 ha->iflags |= IFLG_INTR_FIXED;
16675
16676 /* Allocate space for interrupt handles */
16677 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16678 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16679
16680 /* Allocate the interrupts */
16681 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16682 0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16683 actual < count) {
16684 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16685 "actual=%xh\n", ret, count, actual);
16686 ql_release_intr(ha);
16687 return (DDI_FAILURE);
16688 }
16689
16690 ha->intr_cnt = actual;
16691
16692 /* Get interrupt priority */
16693 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16694 DDI_SUCCESS) {
16695 EL(ha, "failed, get_pri ret=%xh\n", ret);
16696 ql_release_intr(ha);
16697 return (ret);
16698 }
16699
16700 /* Add the interrupt handlers */
16701 for (i = 0; i < ha->intr_cnt; i++) {
16702 if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16703 (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16704 EL(ha, "failed, intr_add ret=%xh\n", ret);
16705 ql_release_intr(ha);
16706 return (ret);
16707 }
16708 }
16709
16710 /* Setup mutexes */
16711 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16712 EL(ha, "failed, mutex init ret=%xh\n", ret);
16713 ql_release_intr(ha);
16714 return (ret);
16715 }
16716
16717 /* Enable interrupts */
16718 for (i = 0; i < ha->intr_cnt; i++) {
16719 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16720 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16721 ql_destroy_mutex(ha);
16722 ql_release_intr(ha);
16723 return (ret);
16724 }
16725 }
16726
16727 EL(ha, "using FIXED interupts\n");
16728
16729 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16730
16731 return (DDI_SUCCESS);
16732 }
16733
16734 /*
16735 * ql_disable_intr
16736 * Disables interrupts
16737 *
16738 * Input:
16739 * ha = adapter state pointer.
16740 *
16741 * Returns:
16742 *
16743 * Context:
16744 * Kernel context.
16745 */
16746 static void
16747 ql_disable_intr(ql_adapter_state_t *ha)
16748 {
16749 uint32_t i, rval;
16750
16751 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16752
16753 if (!(ha->iflags & IFLG_INTR_AIF)) {
16754
16755 /* Disable legacy interrupts */
16756 (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16757
16758 } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16759 (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16760
16761 /* Remove AIF block interrupts (MSI) */
16762 if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16763 != DDI_SUCCESS) {
16764 EL(ha, "failed intr block disable, rval=%x\n", rval);
16765 }
16766
16767 } else {
16768
16769 /* Remove AIF non-block interrupts (fixed). */
16770 for (i = 0; i < ha->intr_cnt; i++) {
16771 if ((rval = ddi_intr_disable(ha->htable[i])) !=
16772 DDI_SUCCESS) {
16773 EL(ha, "failed intr disable, intr#=%xh, "
16774 "rval=%xh\n", i, rval);
16775 }
16776 }
16777 }
16778
16779 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16780 }
16781
16782 /*
16783 * ql_release_intr
16784 * Releases aif legacy interrupt resources
16785 *
16786 * Input:
16787 * ha = adapter state pointer.
16788 *
16789 * Returns:
16790 *
16791 * Context:
16792 * Kernel context.
16793 */
16794 static void
16795 ql_release_intr(ql_adapter_state_t *ha)
16796 {
16797 int32_t i;
16798
16799 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16800
16801 if (!(ha->iflags & IFLG_INTR_AIF)) {
16802 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16803 return;
16804 }
16805
16806 ha->iflags &= ~(IFLG_INTR_AIF);
16807 if (ha->htable != NULL && ha->hsize > 0) {
16808 i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16809 while (i-- > 0) {
16810 if (ha->htable[i] == 0) {
16811 EL(ha, "htable[%x]=0h\n", i);
16812 continue;
16813 }
16814
16815 (void) ddi_intr_disable(ha->htable[i]);
16816
16817 if (i < ha->intr_cnt) {
16818 (void) ddi_intr_remove_handler(ha->htable[i]);
16819 }
16820
16821 (void) ddi_intr_free(ha->htable[i]);
16822 }
16823
16824 kmem_free(ha->htable, ha->hsize);
16825 ha->htable = NULL;
16826 }
16827
16828 ha->hsize = 0;
16829 ha->intr_cnt = 0;
16830 ha->intr_pri = 0;
16831 ha->intr_cap = 0;
16832
16833 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16834 }
16835
16836 /*
16837 * ql_legacy_intr
16838 * Sets up legacy interrupts.
16839 *
16840 * NB: Only to be used if AIF (Advanced Interupt Framework)
16841 * if NOT in the kernel.
16842 *
16843 * Input:
16844 * ha = adapter state pointer.
16845 *
16846 * Returns:
16847 * DDI_SUCCESS or DDI_FAILURE.
16848 *
16849 * Context:
16850 * Kernel context.
16851 */
16852 static int
16853 ql_legacy_intr(ql_adapter_state_t *ha)
16854 {
16855 int rval = DDI_SUCCESS;
16856
16857 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16858
16859 /* Setup mutexes */
16860 if (ql_init_mutex(ha) != DDI_SUCCESS) {
16861 EL(ha, "failed, mutex init\n");
16862 return (DDI_FAILURE);
16863 }
16864
16865 /* Setup standard/legacy interrupt handler */
16866 if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16867 (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16868 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16869 QL_NAME, ha->instance);
16870 ql_destroy_mutex(ha);
16871 rval = DDI_FAILURE;
16872 }
16873
16874 if (rval == DDI_SUCCESS) {
16875 ha->iflags |= IFLG_INTR_LEGACY;
16876 EL(ha, "using legacy interrupts\n");
16877 }
16878
16879 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16880
16881 return (rval);
16882 }
16883
16884 /*
16885 * ql_init_mutex
16886 * Initializes mutex's
16887 *
16888 * Input:
16889 * ha = adapter state pointer.
16890 *
16891 * Returns:
16892 * DDI_SUCCESS or DDI_FAILURE.
16893 *
16894 * Context:
16895 * Kernel context.
16896 */
16897 static int
16898 ql_init_mutex(ql_adapter_state_t *ha)
16899 {
16900 int ret;
16901 void *intr;
16902
16903 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16904
16905 if (ha->iflags & IFLG_INTR_AIF) {
16906 intr = (void *)(uintptr_t)ha->intr_pri;
16907 } else {
16908 /* Get iblock cookies to initialize mutexes */
16909 if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16910 &ha->iblock_cookie)) != DDI_SUCCESS) {
16911 EL(ha, "failed, get_iblock: %xh\n", ret);
16912 return (DDI_FAILURE);
16913 }
16914 intr = (void *)ha->iblock_cookie;
16915 }
16916
16917 /* mutexes to protect the adapter state structure. */
16918 mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16919
16920 /* mutex to protect the ISP response ring. */
16921 mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16922
16923 /* mutex to protect the mailbox registers. */
16924 mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16925
16926 /* power management protection */
16927 mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16928
16929 /* Mailbox wait and interrupt conditional variable. */
16930 cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16931 cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16932
16933 /* mutex to protect the ISP request ring. */
16934 mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16935
16936 /* Unsolicited buffer conditional variable. */
16937 cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16938
16939 mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16940 mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16941
16942 /* Suspended conditional variable. */
16943 cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16944
16945 /* mutex to protect task daemon context. */
16946 mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16947
16948 /* Task_daemon thread conditional variable. */
16949 cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16950
16951 /* mutex to protect diag port manage interface */
16952 mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16953
16954 /* mutex to protect per instance f/w dump flags and buffer */
16955 mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16956
16957 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16958
16959 return (DDI_SUCCESS);
16960 }
16961
16962 /*
16963 * ql_destroy_mutex
16964 * Destroys mutex's
16965 *
16966 * Input:
16967 * ha = adapter state pointer.
16968 *
16969 * Returns:
16970 *
16971 * Context:
16972 * Kernel context.
16973 */
16974 static void
16975 ql_destroy_mutex(ql_adapter_state_t *ha)
16976 {
16977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16978
16979 mutex_destroy(&ha->dump_mutex);
16980 mutex_destroy(&ha->portmutex);
16981 cv_destroy(&ha->cv_task_daemon);
16982 mutex_destroy(&ha->task_daemon_mutex);
16983 cv_destroy(&ha->cv_dr_suspended);
16984 mutex_destroy(&ha->cache_mutex);
16985 mutex_destroy(&ha->ub_mutex);
16986 cv_destroy(&ha->cv_ub);
16987 mutex_destroy(&ha->req_ring_mutex);
16988 cv_destroy(&ha->cv_mbx_intr);
16989 cv_destroy(&ha->cv_mbx_wait);
16990 mutex_destroy(&ha->pm_mutex);
16991 mutex_destroy(&ha->mbx_mutex);
16992 mutex_destroy(&ha->intr_mutex);
16993 mutex_destroy(&ha->mutex);
16994
16995 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16996 }
16997
16998 /*
16999 * ql_fwmodule_resolve
17000 * Loads and resolves external firmware module and symbols
17001 *
17002 * Input:
17003 * ha: adapter state pointer.
17004 *
17005 * Returns:
17006 * ql local function return status code:
17007 * QL_SUCCESS - external f/w module module and symbols resolved
17008 * QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17009 * QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17010 * QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17011 * Context:
17012 * Kernel context.
17013 *
17014 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time. We
17015 * could switch to a tighter scope around acutal download (and add an extra
17016 * ddi_modopen for module opens that occur before root is mounted).
17017 *
17018 */
17019 uint32_t
17020 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17021 {
17022 int8_t module[128];
17023 int8_t fw_version[128];
17024 uint32_t rval = QL_SUCCESS;
17025 caddr_t code, code02;
17026 uint8_t *p_ucfw;
17027 uint16_t *p_usaddr, *p_uslen;
17028 uint32_t *p_uiaddr, *p_uilen, *p_uifw;
17029 uint32_t *p_uiaddr02, *p_uilen02;
17030 struct fw_table *fwt;
17031 extern struct fw_table fw_table[];
17032
17033 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17034
17035 if (ha->fw_module != NULL) {
17036 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17037 ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17038 ha->fw_subminor_version);
17039 return (rval);
17040 }
17041
17042 /* make sure the fw_class is in the fw_table of supported classes */
17043 for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17044 if (fwt->fw_class == ha->fw_class)
17045 break; /* match */
17046 }
17047 if (fwt->fw_version == NULL) {
17048 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17049 "in driver's fw_table", QL_NAME, ha->instance,
17050 ha->fw_class);
17051 return (QL_FW_NOT_SUPPORTED);
17052 }
17053
17054 /*
17055 * open the module related to the fw_class
17056 */
17057 (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17058 ha->fw_class);
17059
17060 ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17061 if (ha->fw_module == NULL) {
17062 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17063 QL_NAME, ha->instance, module);
17064 return (QL_FWMODLOAD_FAILED);
17065 }
17066
17067 /*
17068 * resolve the fw module symbols, data types depend on fw_class
17069 */
17070
17071 switch (ha->fw_class) {
17072 case 0x2200:
17073 case 0x2300:
17074 case 0x6322:
17075
17076 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17077 NULL)) == NULL) {
17078 rval = QL_FWSYM_NOT_FOUND;
17079 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17080 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
17081 "risc_code_addr01", NULL)) == NULL) {
17082 rval = QL_FWSYM_NOT_FOUND;
17083 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17084 } else if ((p_uslen = ddi_modsym(ha->fw_module,
17085 "risc_code_length01", NULL)) == NULL) {
17086 rval = QL_FWSYM_NOT_FOUND;
17087 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17088 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17089 "firmware_version", NULL)) == NULL) {
17090 rval = QL_FWSYM_NOT_FOUND;
17091 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17092 }
17093
17094 if (rval == QL_SUCCESS) {
17095 ha->risc_fw[0].code = code;
17096 ha->risc_fw[0].addr = *p_usaddr;
17097 ha->risc_fw[0].length = *p_uslen;
17098
17099 (void) snprintf(fw_version, sizeof (fw_version),
17100 "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17101 }
17102 break;
17103
17104 case 0x2400:
17105 case 0x2500:
17106 case 0x8100:
17107
17108 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17109 NULL)) == NULL) {
17110 rval = QL_FWSYM_NOT_FOUND;
17111 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17112 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17113 "risc_code_addr01", NULL)) == NULL) {
17114 rval = QL_FWSYM_NOT_FOUND;
17115 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17116 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17117 "risc_code_length01", NULL)) == NULL) {
17118 rval = QL_FWSYM_NOT_FOUND;
17119 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17120 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17121 "firmware_version", NULL)) == NULL) {
17122 rval = QL_FWSYM_NOT_FOUND;
17123 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17124 }
17125
17126 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17127 NULL)) == NULL) {
17128 rval = QL_FWSYM_NOT_FOUND;
17129 EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17130 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17131 "risc_code_addr02", NULL)) == NULL) {
17132 rval = QL_FWSYM_NOT_FOUND;
17133 EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17134 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17135 "risc_code_length02", NULL)) == NULL) {
17136 rval = QL_FWSYM_NOT_FOUND;
17137 EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17138 }
17139
17140 if (rval == QL_SUCCESS) {
17141 ha->risc_fw[0].code = code;
17142 ha->risc_fw[0].addr = *p_uiaddr;
17143 ha->risc_fw[0].length = *p_uilen;
17144 ha->risc_fw[1].code = code02;
17145 ha->risc_fw[1].addr = *p_uiaddr02;
17146 ha->risc_fw[1].length = *p_uilen02;
17147
17148 (void) snprintf(fw_version, sizeof (fw_version),
17149 "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17150 }
17151 break;
17152
17153 default:
17154 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17155 rval = QL_FW_NOT_SUPPORTED;
17156 }
17157
17158 if (rval != QL_SUCCESS) {
17159 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17160 "module %s (%x)", QL_NAME, ha->instance, module, rval);
17161 if (ha->fw_module != NULL) {
17162 (void) ddi_modclose(ha->fw_module);
17163 ha->fw_module = NULL;
17164 }
17165 } else {
17166 /*
17167 * check for firmware version mismatch between module and
17168 * compiled in fw_table version.
17169 */
17170
17171 if (strcmp(fwt->fw_version, fw_version) != 0) {
17172
17173 /*
17174 * If f/w / driver version mismatches then
17175 * return a successful status -- however warn
17176 * the user that this is NOT recommended.
17177 */
17178
17179 cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17180 "mismatch for %x: driver-%s module-%s", QL_NAME,
17181 ha->instance, ha->fw_class, fwt->fw_version,
17182 fw_version);
17183
17184 ha->cfg_flags |= CFG_FW_MISMATCH;
17185 } else {
17186 ha->cfg_flags &= ~CFG_FW_MISMATCH;
17187 }
17188 }
17189
17190 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17191
17192 return (rval);
17193 }
17194
17195 /*
17196 * ql_port_state
17197 * Set the state on all adapter ports.
17198 *
17199 * Input:
17200 * ha: parent adapter state pointer.
17201 * state: port state.
17202 * flags: task daemon flags to set.
17203 *
17204 * Context:
17205 * Interrupt or Kernel context, no mailbox commands allowed.
17206 */
17207 void
17208 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17209 {
17210 ql_adapter_state_t *vha;
17211
17212 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17213
17214 TASK_DAEMON_LOCK(ha);
17215 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17216 if (FC_PORT_STATE_MASK(vha->state) != state) {
17217 vha->state = state != FC_STATE_OFFLINE ?
17218 (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17219 vha->task_daemon_flags |= flags;
17220 }
17221 }
17222 ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17223 TASK_DAEMON_UNLOCK(ha);
17224
17225 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17226 }
17227
17228 /*
17229 * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17230 *
17231 * Input: Pointer to the adapter state structure.
17232 * Returns: Success or Failure.
17233 * Context: Kernel context.
17234 */
17235 int
17236 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17237 {
17238 int rval = DDI_SUCCESS;
17239
17240 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17241
17242 ha->el_trace_desc =
17243 (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17244
17245 if (ha->el_trace_desc == NULL) {
17246 cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17247 QL_NAME, ha->instance);
17248 rval = DDI_FAILURE;
17249 } else {
17250 ha->el_trace_desc->next = 0;
17251 ha->el_trace_desc->trace_buffer =
17252 (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17253
17254 if (ha->el_trace_desc->trace_buffer == NULL) {
17255 cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17256 QL_NAME, ha->instance);
17257 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17258 rval = DDI_FAILURE;
17259 } else {
17260 ha->el_trace_desc->trace_buffer_size =
17261 EL_TRACE_BUF_SIZE;
17262 mutex_init(&ha->el_trace_desc->mutex, NULL,
17263 MUTEX_DRIVER, NULL);
17264 }
17265 }
17266
17267 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17268
17269 return (rval);
17270 }
17271
17272 /*
17273 * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17274 *
17275 * Input: Pointer to the adapter state structure.
17276 * Returns: Success or Failure.
17277 * Context: Kernel context.
17278 */
17279 int
17280 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17281 {
17282 int rval = DDI_SUCCESS;
17283
17284 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17285
17286 if (ha->el_trace_desc == NULL) {
17287 cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17288 QL_NAME, ha->instance);
17289 rval = DDI_FAILURE;
17290 } else {
17291 if (ha->el_trace_desc->trace_buffer != NULL) {
17292 kmem_free(ha->el_trace_desc->trace_buffer,
17293 ha->el_trace_desc->trace_buffer_size);
17294 }
17295 mutex_destroy(&ha->el_trace_desc->mutex);
17296 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17297 }
17298
17299 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17300
17301 return (rval);
17302 }
17303
17304 /*
17305 * els_cmd_text - Return a pointer to a string describing the command
17306 *
17307 * Input: els_cmd = the els command opcode.
17308 * Returns: pointer to a string.
17309 * Context: Kernel context.
17310 */
17311 char *
17312 els_cmd_text(int els_cmd)
17313 {
17314 cmd_table_t *entry = &els_cmd_tbl[0];
17315
17316 return (cmd_text(entry, els_cmd));
17317 }
17318
17319 /*
17320 * mbx_cmd_text - Return a pointer to a string describing the command
17321 *
17322 * Input: mbx_cmd = the mailbox command opcode.
17323 * Returns: pointer to a string.
17324 * Context: Kernel context.
17325 */
17326 char *
17327 mbx_cmd_text(int mbx_cmd)
17328 {
17329 cmd_table_t *entry = &mbox_cmd_tbl[0];
17330
17331 return (cmd_text(entry, mbx_cmd));
17332 }
17333
17334 /*
17335 * cmd_text Return a pointer to a string describing the command
17336 *
17337 * Input: entry = the command table
17338 * cmd = the command.
17339 * Returns: pointer to a string.
17340 * Context: Kernel context.
17341 */
17342 char *
17343 cmd_text(cmd_table_t *entry, int cmd)
17344 {
17345 for (; entry->cmd != 0; entry++) {
17346 if (entry->cmd == cmd) {
17347 break;
17348 }
17349 }
17350 return (entry->string);
17351 }
17352
17353 /*
17354 * ql_els_24xx_mbox_cmd_iocb - els request indication.
17355 *
17356 * Input: ha = adapter state pointer.
17357 * srb = scsi request block pointer.
17358 * arg = els passthru entry iocb pointer.
17359 * Returns:
17360 * Context: Kernel context.
17361 */
17362 void
17363 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17364 {
17365 els_descriptor_t els_desc;
17366
17367 /* Extract the ELS information */
17368 ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17369
17370 /* Construct the passthru entry */
17371 ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17372
17373 /* Ensure correct endianness */
17374 ql_isp_els_handle_cmd_endian(ha, srb);
17375 }
17376
17377 /*
17378 * ql_fca_isp_els_request - Extract into an els descriptor the info required
17379 * to build an els_passthru iocb from an fc packet.
17380 *
17381 * Input: ha = adapter state pointer.
17382 * pkt = fc packet pointer
17383 * els_desc = els descriptor pointer
17384 * Returns:
17385 * Context: Kernel context.
17386 */
17387 static void
17388 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17389 els_descriptor_t *els_desc)
17390 {
17391 ls_code_t els;
17392
17393 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17394 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17395
17396 els_desc->els = els.ls_code;
17397
17398 els_desc->els_handle = ha->hba_buf.acc_handle;
17399 els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17400 els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17401 /* if n_port_handle is not < 0x7d use 0 */
17402 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17403 els_desc->n_port_handle = ha->n_port->n_port_handle;
17404 } else {
17405 els_desc->n_port_handle = 0;
17406 }
17407 els_desc->control_flags = 0;
17408 els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17409 /*
17410 * Transmit DSD. This field defines the Fibre Channel Frame payload
17411 * (without the frame header) in system memory.
17412 */
17413 els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17414 els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17415 els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17416
17417 els_desc->rsp_byte_count = pkt->pkt_rsplen;
17418 /*
17419 * Receive DSD. This field defines the ELS response payload buffer
17420 * for the ISP24xx firmware transferring the received ELS
17421 * response frame to a location in host memory.
17422 */
17423 els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17424 els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17425 els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17426 }
17427
17428 /*
17429 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17430 * using the els descriptor.
17431 *
17432 * Input: ha = adapter state pointer.
17433 * els_desc = els descriptor pointer.
17434 * els_entry = els passthru entry iocb pointer.
17435 * Returns:
17436 * Context: Kernel context.
17437 */
17438 static void
17439 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17440 els_passthru_entry_t *els_entry)
17441 {
17442 uint32_t *ptr32;
17443
17444 /*
17445 * Construct command packet.
17446 */
17447 ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17448 (uint8_t)ELS_PASSTHRU_TYPE);
17449 ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17450 els_desc->n_port_handle);
17451 ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17452 ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17453 (uint32_t)0);
17454 ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17455 els_desc->els);
17456 ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17457 els_desc->d_id.b.al_pa);
17458 ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17459 els_desc->d_id.b.area);
17460 ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17461 els_desc->d_id.b.domain);
17462 ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17463 els_desc->s_id.b.al_pa);
17464 ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17465 els_desc->s_id.b.area);
17466 ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17467 els_desc->s_id.b.domain);
17468 ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17469 els_desc->control_flags);
17470 ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17471 els_desc->rsp_byte_count);
17472 ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17473 els_desc->cmd_byte_count);
17474 /* Load transmit data segments and count. */
17475 ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17476 ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17477 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17478 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17479 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17480 ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17481 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17482 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17483 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17484 }
17485
17486 /*
17487 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17488 * in host memory.
17489 *
17490 * Input: ha = adapter state pointer.
17491 * srb = scsi request block
17492 * Returns:
17493 * Context: Kernel context.
17494 */
17495 void
17496 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17497 {
17498 ls_code_t els;
17499 fc_packet_t *pkt;
17500 uint8_t *ptr;
17501
17502 pkt = srb->pkt;
17503
17504 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17505 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17506
17507 ptr = (uint8_t *)pkt->pkt_cmd;
17508
17509 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17510 }
17511
17512 /*
17513 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17514 * in host memory.
17515 * Input: ha = adapter state pointer.
17516 * srb = scsi request block
17517 * Returns:
17518 * Context: Kernel context.
17519 */
17520 void
17521 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17522 {
17523 ls_code_t els;
17524 fc_packet_t *pkt;
17525 uint8_t *ptr;
17526
17527 pkt = srb->pkt;
17528
17529 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17530 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17531
17532 ptr = (uint8_t *)pkt->pkt_resp;
17533 BIG_ENDIAN_32(&els);
17534 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17535 }
17536
17537 /*
17538 * ql_isp_els_handle_endian - els requests/responses must be in big endian
17539 * in host memory.
17540 * Input: ha = adapter state pointer.
17541 * ptr = els request/response buffer pointer.
17542 * ls_code = els command code.
17543 * Returns:
17544 * Context: Kernel context.
17545 */
17546 void
17547 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17548 {
17549 switch (ls_code) {
17550 case LA_ELS_PLOGI: {
17551 BIG_ENDIAN_32(ptr); /* Command Code */
17552 ptr += 4;
17553 BIG_ENDIAN_16(ptr); /* FC-PH version */
17554 ptr += 2;
17555 BIG_ENDIAN_16(ptr); /* b2b credit */
17556 ptr += 2;
17557 BIG_ENDIAN_16(ptr); /* Cmn Feature flags */
17558 ptr += 2;
17559 BIG_ENDIAN_16(ptr); /* Rcv data size */
17560 ptr += 2;
17561 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17562 ptr += 2;
17563 BIG_ENDIAN_16(ptr); /* Rel offset */
17564 ptr += 2;
17565 BIG_ENDIAN_32(ptr); /* E_D_TOV */
17566 ptr += 4; /* Port Name */
17567 ptr += 8; /* Node Name */
17568 ptr += 8; /* Class 1 */
17569 ptr += 16; /* Class 2 */
17570 ptr += 16; /* Class 3 */
17571 BIG_ENDIAN_16(ptr); /* Service options */
17572 ptr += 2;
17573 BIG_ENDIAN_16(ptr); /* Initiator control */
17574 ptr += 2;
17575 BIG_ENDIAN_16(ptr); /* Recipient Control */
17576 ptr += 2;
17577 BIG_ENDIAN_16(ptr); /* Rcv size */
17578 ptr += 2;
17579 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17580 ptr += 2;
17581 BIG_ENDIAN_16(ptr); /* N_Port e2e credit */
17582 ptr += 2;
17583 BIG_ENDIAN_16(ptr); /* Open Seq/Exch */
17584 break;
17585 }
17586 case LA_ELS_PRLI: {
17587 BIG_ENDIAN_32(ptr); /* Command Code/Page length */
17588 ptr += 4; /* Type */
17589 ptr += 2;
17590 BIG_ENDIAN_16(ptr); /* Flags */
17591 ptr += 2;
17592 BIG_ENDIAN_32(ptr); /* Originator Process associator */
17593 ptr += 4;
17594 BIG_ENDIAN_32(ptr); /* Responder Process associator */
17595 ptr += 4;
17596 BIG_ENDIAN_32(ptr); /* Flags */
17597 break;
17598 }
17599 default:
17600 EL(ha, "can't handle els code %x\n", ls_code);
17601 break;
17602 }
17603 }
17604
17605 /*
17606 * ql_n_port_plogi
17607 * In N port 2 N port topology where an N Port has logged in with the
17608 * firmware because it has the N_Port login initiative, we send up
17609 * a plogi by proxy which stimulates the login procedure to continue.
17610 *
17611 * Input:
17612 * ha = adapter state pointer.
17613 * Returns:
17614 *
17615 * Context:
17616 * Kernel context.
17617 */
17618 static int
17619 ql_n_port_plogi(ql_adapter_state_t *ha)
17620 {
17621 int rval;
17622 ql_tgt_t *tq;
17623 ql_head_t done_q = { NULL, NULL };
17624
17625 rval = QL_SUCCESS;
17626
17627 if (ha->topology & QL_N_PORT) {
17628 /* if we're doing this the n_port_handle must be good */
17629 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17630 tq = ql_loop_id_to_queue(ha,
17631 ha->n_port->n_port_handle);
17632 if (tq != NULL) {
17633 (void) ql_send_plogi(ha, tq, &done_q);
17634 } else {
17635 EL(ha, "n_port_handle = %x, tq = %x\n",
17636 ha->n_port->n_port_handle, tq);
17637 }
17638 } else {
17639 EL(ha, "n_port_handle = %x, tq = %x\n",
17640 ha->n_port->n_port_handle, tq);
17641 }
17642 if (done_q.first != NULL) {
17643 ql_done(done_q.first);
17644 }
17645 }
17646 return (rval);
17647 }
17648
17649 /*
17650 * Compare two WWNs. The NAA is omitted for comparison.
17651 *
17652 * Note particularly that the indentation used in this
17653 * function isn't according to Sun recommendations. It
17654 * is indented to make reading a bit easy.
17655 *
17656 * Return Values:
17657 * if first == second return 0
17658 * if first > second return 1
17659 * if first < second return -1
17660 */
17661 int
17662 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17663 {
17664 la_wwn_t t1, t2;
17665 int rval;
17666
17667 EL(ha, "WWPN=%08x%08x\n",
17668 BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17669 EL(ha, "WWPN=%08x%08x\n",
17670 BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17671 /*
17672 * Fibre Channel protocol is big endian, so compare
17673 * as big endian values
17674 */
17675 t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17676 t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17677
17678 t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17679 t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17680
17681 if (t1.i_wwn[0] == t2.i_wwn[0]) {
17682 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17683 rval = 0;
17684 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17685 rval = 1;
17686 } else {
17687 rval = -1;
17688 }
17689 } else {
17690 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17691 rval = 1;
17692 } else {
17693 rval = -1;
17694 }
17695 }
17696 return (rval);
17697 }
17698
17699 /*
17700 * ql_wait_for_td_stop
17701 * Wait for task daemon to stop running. Internal command timeout
17702 * is approximately 30 seconds, so it may help in some corner
17703 * cases to wait that long
17704 *
17705 * Input:
17706 * ha = adapter state pointer.
17707 *
17708 * Returns:
17709 * DDI_SUCCESS or DDI_FAILURE.
17710 *
17711 * Context:
17712 * Kernel context.
17713 */
17714
17715 static int
17716 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17717 {
17718 int rval = DDI_FAILURE;
17719 UINT16 wait_cnt;
17720
17721 for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17722 /* The task daemon clears the stop flag on exit. */
17723 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17724 if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17725 ddi_in_panic()) {
17726 drv_usecwait(10000);
17727 } else {
17728 delay(drv_usectohz(10000));
17729 }
17730 } else {
17731 rval = DDI_SUCCESS;
17732 break;
17733 }
17734 }
17735 return (rval);
17736 }
17737
17738 /*
17739 * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17740 *
17741 * Input: Pointer to the adapter state structure.
17742 * Returns: Success or Failure.
17743 * Context: Kernel context.
17744 */
17745 int
17746 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17747 {
17748 int rval = DDI_SUCCESS;
17749
17750 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17751
17752 ha->nvram_cache =
17753 (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17754 KM_SLEEP);
17755
17756 if (ha->nvram_cache == NULL) {
17757 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17758 " descriptor", QL_NAME, ha->instance);
17759 rval = DDI_FAILURE;
17760 } else {
17761 if (CFG_IST(ha, CFG_CTRL_24258081)) {
17762 ha->nvram_cache->size = sizeof (nvram_24xx_t);
17763 } else {
17764 ha->nvram_cache->size = sizeof (nvram_t);
17765 }
17766 ha->nvram_cache->cache =
17767 (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17768 if (ha->nvram_cache->cache == NULL) {
17769 cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17770 QL_NAME, ha->instance);
17771 kmem_free(ha->nvram_cache,
17772 sizeof (nvram_cache_desc_t));
17773 ha->nvram_cache = 0;
17774 rval = DDI_FAILURE;
17775 } else {
17776 mutex_init(&ha->nvram_cache->mutex, NULL,
17777 MUTEX_DRIVER, NULL);
17778 ha->nvram_cache->valid = 0;
17779 }
17780 }
17781
17782 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17783
17784 return (rval);
17785 }
17786
17787 /*
17788 * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17789 *
17790 * Input: Pointer to the adapter state structure.
17791 * Returns: Success or Failure.
17792 * Context: Kernel context.
17793 */
17794 int
17795 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17796 {
17797 int rval = DDI_SUCCESS;
17798
17799 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17800
17801 if (ha->nvram_cache == NULL) {
17802 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17803 QL_NAME, ha->instance);
17804 rval = DDI_FAILURE;
17805 } else {
17806 if (ha->nvram_cache->cache != NULL) {
17807 kmem_free(ha->nvram_cache->cache,
17808 ha->nvram_cache->size);
17809 }
17810 mutex_destroy(&ha->nvram_cache->mutex);
17811 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17812 }
17813
17814 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17815
17816 return (rval);
17817 }
17818
17819 /*
17820 * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17821 *
17822 * Input: Pointer to the adapter state structure.
17823 * Returns: void
17824 * Context: Kernel context.
17825 */
17826 static void
17827 ql_process_idc_event(ql_adapter_state_t *ha)
17828 {
17829 int rval;
17830
17831 switch (ha->idc_mb[0]) {
17832 case MBA_IDC_NOTIFICATION:
17833 /*
17834 * The informational opcode (idc_mb[2]) can be a
17835 * defined value or the mailbox command being executed
17836 * on another function which stimulated this IDC message.
17837 */
17838 ADAPTER_STATE_LOCK(ha);
17839 switch (ha->idc_mb[2]) {
17840 case IDC_OPC_DRV_START:
17841 if (ha->idc_flash_acc != 0) {
17842 ha->idc_flash_acc--;
17843 if (ha->idc_flash_acc == 0) {
17844 ha->idc_flash_acc_timer = 0;
17845 GLOBAL_HW_UNLOCK();
17846 }
17847 }
17848 if (ha->idc_restart_cnt != 0) {
17849 ha->idc_restart_cnt--;
17850 if (ha->idc_restart_cnt == 0) {
17851 ha->idc_restart_timer = 0;
17852 ADAPTER_STATE_UNLOCK(ha);
17853 TASK_DAEMON_LOCK(ha);
17854 ha->task_daemon_flags &= ~DRIVER_STALL;
17855 TASK_DAEMON_UNLOCK(ha);
17856 ql_restart_queues(ha);
17857 } else {
17858 ADAPTER_STATE_UNLOCK(ha);
17859 }
17860 } else {
17861 ADAPTER_STATE_UNLOCK(ha);
17862 }
17863 break;
17864 case IDC_OPC_FLASH_ACC:
17865 ha->idc_flash_acc_timer = 30;
17866 if (ha->idc_flash_acc == 0) {
17867 GLOBAL_HW_LOCK();
17868 }
17869 ha->idc_flash_acc++;
17870 ADAPTER_STATE_UNLOCK(ha);
17871 break;
17872 case IDC_OPC_RESTART_MPI:
17873 ha->idc_restart_timer = 30;
17874 ha->idc_restart_cnt++;
17875 ADAPTER_STATE_UNLOCK(ha);
17876 TASK_DAEMON_LOCK(ha);
17877 ha->task_daemon_flags |= DRIVER_STALL;
17878 TASK_DAEMON_UNLOCK(ha);
17879 break;
17880 case IDC_OPC_PORT_RESET_MBC:
17881 case IDC_OPC_SET_PORT_CONFIG_MBC:
17882 ha->idc_restart_timer = 30;
17883 ha->idc_restart_cnt++;
17884 ADAPTER_STATE_UNLOCK(ha);
17885 TASK_DAEMON_LOCK(ha);
17886 ha->task_daemon_flags |= DRIVER_STALL;
17887 TASK_DAEMON_UNLOCK(ha);
17888 (void) ql_wait_outstanding(ha);
17889 break;
17890 default:
17891 ADAPTER_STATE_UNLOCK(ha);
17892 EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17893 ha->idc_mb[2]);
17894 break;
17895 }
17896 /*
17897 * If there is a timeout value associated with this IDC
17898 * notification then there is an implied requirement
17899 * that we return an ACK.
17900 */
17901 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17902 rval = ql_idc_ack(ha);
17903 if (rval != QL_SUCCESS) {
17904 EL(ha, "idc_ack status=%xh %xh\n", rval,
17905 ha->idc_mb[2]);
17906 }
17907 }
17908 break;
17909 case MBA_IDC_COMPLETE:
17910 /*
17911 * We don't ACK completions, only these require action.
17912 */
17913 switch (ha->idc_mb[2]) {
17914 case IDC_OPC_PORT_RESET_MBC:
17915 case IDC_OPC_SET_PORT_CONFIG_MBC:
17916 ADAPTER_STATE_LOCK(ha);
17917 if (ha->idc_restart_cnt != 0) {
17918 ha->idc_restart_cnt--;
17919 if (ha->idc_restart_cnt == 0) {
17920 ha->idc_restart_timer = 0;
17921 ADAPTER_STATE_UNLOCK(ha);
17922 TASK_DAEMON_LOCK(ha);
17923 ha->task_daemon_flags &= ~DRIVER_STALL;
17924 TASK_DAEMON_UNLOCK(ha);
17925 ql_restart_queues(ha);
17926 } else {
17927 ADAPTER_STATE_UNLOCK(ha);
17928 }
17929 } else {
17930 ADAPTER_STATE_UNLOCK(ha);
17931 }
17932 break;
17933 default:
17934 break; /* Don't care... */
17935 }
17936 break;
17937 case MBA_IDC_TIME_EXTENDED:
17938 QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17939 "%xh\n", ha->instance, ha->idc_mb[2]);
17940 break;
17941 default:
17942 EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17943 ha->idc_mb[2]);
17944 ADAPTER_STATE_UNLOCK(ha);
17945 break;
17946 }
17947 }