Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /* Copyright 2010 QLogic Corporation */
23 23
24 24 /*
25 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 26 */
27 27 /*
28 28 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 29 */
30 30
31 31 #pragma ident "Copyright 2010 QLogic Corporation; ql_api.c"
32 32
33 33 /*
34 34 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
35 35 *
36 36 * ***********************************************************************
37 37 * * **
38 38 * * NOTICE **
39 39 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
40 40 * * ALL RIGHTS RESERVED **
41 41 * * **
42 42 * ***********************************************************************
43 43 *
44 44 */
45 45
46 46 #include <ql_apps.h>
47 47 #include <ql_api.h>
48 48 #include <ql_debug.h>
49 49 #include <ql_init.h>
50 50 #include <ql_iocb.h>
51 51 #include <ql_ioctl.h>
52 52 #include <ql_isr.h>
53 53 #include <ql_mbx.h>
54 54 #include <ql_nx.h>
55 55 #include <ql_xioctl.h>
56 56
57 57 /*
58 58 * Solaris external defines.
59 59 */
60 60 extern pri_t minclsyspri;
61 61 extern pri_t maxclsyspri;
62 62
63 63 /*
64 64 * dev_ops functions prototypes
65 65 */
66 66 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
67 67 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
68 68 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
69 69 static int ql_power(dev_info_t *, int, int);
70 70 static int ql_quiesce(dev_info_t *);
71 71
72 72 /*
73 73 * FCA functions prototypes exported by means of the transport table
74 74 */
75 75 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
76 76 fc_fca_bind_info_t *);
77 77 static void ql_unbind_port(opaque_t);
78 78 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
79 79 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
80 80 static int ql_els_send(opaque_t, fc_packet_t *);
81 81 static int ql_get_cap(opaque_t, char *, void *);
82 82 static int ql_set_cap(opaque_t, char *, void *);
83 83 static int ql_getmap(opaque_t, fc_lilpmap_t *);
84 84 static int ql_transport(opaque_t, fc_packet_t *);
85 85 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
86 86 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
87 87 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
88 88 static int ql_abort(opaque_t, fc_packet_t *, int);
89 89 static int ql_reset(opaque_t, uint32_t);
90 90 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
91 91 static opaque_t ql_get_device(opaque_t, fc_portid_t);
92 92
93 93 /*
94 94 * FCA Driver Support Function Prototypes.
95 95 */
96 96 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
97 97 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
98 98 ql_srb_t *);
99 99 static void ql_task_daemon(void *);
100 100 static void ql_task_thread(ql_adapter_state_t *);
101 101 static void ql_unsol_callback(ql_srb_t *);
102 102 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
103 103 fc_unsol_buf_t *);
104 104 static void ql_timer(void *);
105 105 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
106 106 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
107 107 uint32_t *, uint32_t *);
108 108 static void ql_halt(ql_adapter_state_t *, int);
109 109 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
110 110 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
111 111 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
112 112 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
113 113 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
114 114 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
115 115 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
116 116 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
117 117 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
118 118 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
119 119 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
120 120 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
121 121 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
122 122 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
123 123 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
124 124 static int ql_login_port(ql_adapter_state_t *, port_id_t);
125 125 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
126 126 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
127 127 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
128 128 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
129 129 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
130 130 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
131 131 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
132 132 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
133 133 ql_srb_t *);
134 134 static int ql_kstat_update(kstat_t *, int);
135 135 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
136 136 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
137 137 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
138 138 static void ql_rst_aen(ql_adapter_state_t *);
139 139 static void ql_restart_queues(ql_adapter_state_t *);
140 140 static void ql_abort_queues(ql_adapter_state_t *);
141 141 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
142 142 static void ql_idle_check(ql_adapter_state_t *);
143 143 static int ql_loop_resync(ql_adapter_state_t *);
144 144 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
145 145 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
146 146 static int ql_save_config_regs(dev_info_t *);
147 147 static int ql_restore_config_regs(dev_info_t *);
148 148 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
149 149 static int ql_handle_rscn_update(ql_adapter_state_t *);
150 150 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
151 151 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
152 152 static int ql_dump_firmware(ql_adapter_state_t *);
153 153 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
154 154 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
155 155 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
156 156 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
157 157 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
158 158 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
159 159 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
160 160 void *);
161 161 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
162 162 uint8_t);
163 163 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
164 164 static int ql_suspend_adapter(ql_adapter_state_t *);
165 165 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
166 166 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
167 167 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
168 168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
169 169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
170 170 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
171 171 static int ql_setup_interrupts(ql_adapter_state_t *);
172 172 static int ql_setup_msi(ql_adapter_state_t *);
173 173 static int ql_setup_msix(ql_adapter_state_t *);
174 174 static int ql_setup_fixed(ql_adapter_state_t *);
175 175 static void ql_release_intr(ql_adapter_state_t *);
176 176 static void ql_disable_intr(ql_adapter_state_t *);
177 177 static int ql_legacy_intr(ql_adapter_state_t *);
178 178 static int ql_init_mutex(ql_adapter_state_t *);
179 179 static void ql_destroy_mutex(ql_adapter_state_t *);
180 180 static void ql_iidma(ql_adapter_state_t *);
181 181
182 182 static int ql_n_port_plogi(ql_adapter_state_t *);
183 183 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
184 184 els_descriptor_t *);
185 185 static void ql_isp_els_request_ctor(els_descriptor_t *,
186 186 els_passthru_entry_t *);
187 187 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
188 188 static int ql_wait_for_td_stop(ql_adapter_state_t *);
189 189 static void ql_process_idc_event(ql_adapter_state_t *);
190 190
191 191 /*
192 192 * Global data
193 193 */
194 194 static uint8_t ql_enable_pm = 1;
195 195 static int ql_flash_sbus_fpga = 0;
196 196 uint32_t ql_os_release_level;
197 197 uint32_t ql_disable_aif = 0;
198 198 uint32_t ql_disable_msi = 0;
199 199 uint32_t ql_disable_msix = 0;
200 200 uint32_t ql_enable_ets = 0;
201 201 uint16_t ql_osc_wait_count = 1000;
202 202
203 203 /* Timer routine variables. */
204 204 static timeout_id_t ql_timer_timeout_id = NULL;
205 205 static clock_t ql_timer_ticks;
206 206
207 207 /* Soft state head pointer. */
208 208 void *ql_state = NULL;
209 209
210 210 /* Head adapter link. */
211 211 ql_head_t ql_hba = {
212 212 NULL,
213 213 NULL
214 214 };
215 215
216 216 /* Global hba index */
217 217 uint32_t ql_gfru_hba_index = 1;
218 218
219 219 /*
220 220 * Some IP defines and globals
221 221 */
222 222 uint32_t ql_ip_buffer_count = 128;
223 223 uint32_t ql_ip_low_water = 10;
224 224 uint8_t ql_ip_fast_post_count = 5;
225 225 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */
226 226
227 227 /* Device AL_PA to Device Head Queue index array. */
228 228 uint8_t ql_alpa_to_index[] = {
229 229 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
230 230 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
231 231 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
232 232 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
233 233 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
234 234 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
235 235 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
236 236 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
237 237 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
238 238 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
239 239 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
240 240 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
241 241 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
242 242 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
243 243 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
244 244 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
245 245 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
246 246 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
247 247 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
248 248 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
249 249 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
250 250 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
251 251 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
252 252 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
253 253 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
254 254 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
255 255 };
256 256
257 257 /* Device loop_id to ALPA array. */
258 258 static uint8_t ql_index_to_alpa[] = {
259 259 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
260 260 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
261 261 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
262 262 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
263 263 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
264 264 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
265 265 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
266 266 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
267 267 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
268 268 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
269 269 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
270 270 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
271 271 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
272 272 };
273 273
274 274 /* 2200 register offsets */
275 275 static reg_off_t reg_off_2200 = {
276 276 0x00, /* flash_address */
277 277 0x02, /* flash_data */
278 278 0x06, /* ctrl_status */
279 279 0x08, /* ictrl */
280 280 0x0a, /* istatus */
↓ open down ↓ |
280 lines elided |
↑ open up ↑ |
281 281 0x0c, /* semaphore */
282 282 0x0e, /* nvram */
283 283 0x18, /* req_in */
284 284 0x18, /* req_out */
285 285 0x1a, /* resp_in */
286 286 0x1a, /* resp_out */
287 287 0xff, /* risc2host - n/a */
288 288 24, /* Number of mailboxes */
289 289
290 290 /* Mailbox in register offsets 0 - 23 */
291 - 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
292 - 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
293 - 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
294 - /* 2200 does not have mailbox 24-31 - n/a */
295 - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
291 + { 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
292 + 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
293 + 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
294 + /* 2200 does not have mailbox 24-31 - n/a */
295 + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
296 296
297 297 /* Mailbox out register offsets 0 - 23 */
298 - 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
299 - 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
300 - 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
301 - /* 2200 does not have mailbox 24-31 - n/a */
302 - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
298 + { 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
299 + 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
300 + 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
301 + /* 2200 does not have mailbox 24-31 - n/a */
302 + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
303 303
304 304 0x96, /* fpm_diag_config */
305 305 0xa4, /* pcr */
306 306 0xb0, /* mctr */
307 307 0xb8, /* fb_cmd */
308 308 0xc0, /* hccr */
309 309 0xcc, /* gpiod */
310 310 0xce, /* gpioe */
311 311 0xff, /* host_to_host_sema - n/a */
312 312 0xff, /* pri_req_in - n/a */
313 313 0xff, /* pri_req_out - n/a */
314 314 0xff, /* atio_req_in - n/a */
315 315 0xff, /* atio_req_out - n/a */
316 316 0xff, /* io_base_addr - n/a */
317 317 0xff, /* nx_host_int - n/a */
318 318 0xff /* nx_risc_int - n/a */
319 319 };
320 320
321 321 /* 2300 register offsets */
322 322 static reg_off_t reg_off_2300 = {
323 323 0x00, /* flash_address */
324 324 0x02, /* flash_data */
325 325 0x06, /* ctrl_status */
326 326 0x08, /* ictrl */
327 327 0x0a, /* istatus */
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
328 328 0x0c, /* semaphore */
329 329 0x0e, /* nvram */
330 330 0x10, /* req_in */
331 331 0x12, /* req_out */
332 332 0x14, /* resp_in */
333 333 0x16, /* resp_out */
334 334 0x18, /* risc2host */
335 335 32, /* Number of mailboxes */
336 336
337 337 /* Mailbox in register offsets 0 - 31 */
338 - 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
339 - 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
340 - 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
341 - 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
338 + { 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
339 + 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
340 + 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
341 + 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e },
342 342
343 343 /* Mailbox out register offsets 0 - 31 */
344 - 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
345 - 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
346 - 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
347 - 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
344 + { 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
345 + 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
346 + 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
347 + 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e },
348 348
349 349 0x96, /* fpm_diag_config */
350 350 0xa4, /* pcr */
351 351 0xb0, /* mctr */
352 352 0x80, /* fb_cmd */
353 353 0xc0, /* hccr */
354 354 0xcc, /* gpiod */
355 355 0xce, /* gpioe */
356 356 0x1c, /* host_to_host_sema */
357 357 0xff, /* pri_req_in - n/a */
358 358 0xff, /* pri_req_out - n/a */
359 359 0xff, /* atio_req_in - n/a */
360 360 0xff, /* atio_req_out - n/a */
361 361 0xff, /* io_base_addr - n/a */
362 362 0xff, /* nx_host_int - n/a */
363 363 0xff /* nx_risc_int - n/a */
364 364 };
365 365
366 366 /* 2400/2500 register offsets */
367 367 reg_off_t reg_off_2400_2500 = {
368 368 0x00, /* flash_address */
369 369 0x04, /* flash_data */
370 370 0x08, /* ctrl_status */
371 371 0x0c, /* ictrl */
372 372 0x10, /* istatus */
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
373 373 0xff, /* semaphore - n/a */
374 374 0xff, /* nvram - n/a */
375 375 0x1c, /* req_in */
376 376 0x20, /* req_out */
377 377 0x24, /* resp_in */
378 378 0x28, /* resp_out */
379 379 0x44, /* risc2host */
380 380 32, /* Number of mailboxes */
381 381
382 382 /* Mailbox in register offsets 0 - 31 */
383 - 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
384 - 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
385 - 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
386 - 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
383 + { 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
384 + 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
385 + 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
386 + 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe },
387 387
388 388 /* Mailbox out register offsets 0 - 31 */
389 - 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
390 - 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
391 - 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
392 - 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
389 + { 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
390 + 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
391 + 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
392 + 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe },
393 393
394 394 0xff, /* fpm_diag_config - n/a */
395 395 0xff, /* pcr - n/a */
396 396 0xff, /* mctr - n/a */
397 397 0xff, /* fb_cmd - n/a */
398 398 0x48, /* hccr */
399 399 0x4c, /* gpiod */
400 400 0x50, /* gpioe */
401 401 0xff, /* host_to_host_sema - n/a */
402 402 0x2c, /* pri_req_in */
403 403 0x30, /* pri_req_out */
404 404 0x3c, /* atio_req_in */
405 405 0x40, /* atio_req_out */
406 406 0x54, /* io_base_addr */
407 407 0xff, /* nx_host_int - n/a */
408 408 0xff /* nx_risc_int - n/a */
409 409 };
410 410
411 411 /* P3 register offsets */
412 412 static reg_off_t reg_off_8021 = {
413 413 0x00, /* flash_address */
414 414 0x04, /* flash_data */
415 415 0x08, /* ctrl_status */
416 416 0x0c, /* ictrl */
417 417 0x10, /* istatus */
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
418 418 0xff, /* semaphore - n/a */
419 419 0xff, /* nvram - n/a */
420 420 0xff, /* req_in - n/a */
421 421 0x0, /* req_out */
422 422 0x100, /* resp_in */
423 423 0x200, /* resp_out */
424 424 0x500, /* risc2host */
425 425 32, /* Number of mailboxes */
426 426
427 427 /* Mailbox in register offsets 0 - 31 */
428 - 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
429 - 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
430 - 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
431 - 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
428 + { 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
429 + 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
430 + 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
431 + 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e },
432 432
433 433 /* Mailbox out register offsets 0 - 31 */
434 - 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
435 - 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
436 - 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
437 - 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
434 + { 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
435 + 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
436 + 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
437 + 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e },
438 438
439 439 0xff, /* fpm_diag_config - n/a */
440 440 0xff, /* pcr - n/a */
441 441 0xff, /* mctr - n/a */
442 442 0xff, /* fb_cmd - n/a */
443 443 0x48, /* hccr */
444 444 0x4c, /* gpiod */
445 445 0x50, /* gpioe */
446 446 0xff, /* host_to_host_sema - n/a */
447 447 0x2c, /* pri_req_in */
448 448 0x30, /* pri_req_out */
449 449 0x3c, /* atio_req_in */
450 450 0x40, /* atio_req_out */
451 451 0x54, /* io_base_addr */
452 452 0x380, /* nx_host_int */
453 453 0x504 /* nx_risc_int */
454 454 };
455 455
456 456 /* mutex for protecting variables shared by all instances of the driver */
457 457 kmutex_t ql_global_mutex;
458 458 kmutex_t ql_global_hw_mutex;
459 459 kmutex_t ql_global_el_mutex;
460 460
461 461 /* DMA access attribute structure. */
462 462 static ddi_device_acc_attr_t ql_dev_acc_attr = {
463 463 DDI_DEVICE_ATTR_V0,
464 464 DDI_STRUCTURE_LE_ACC,
465 465 DDI_STRICTORDER_ACC
466 466 };
467 467
468 468 /* I/O DMA attributes structures. */
469 469 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
470 470 DMA_ATTR_V0, /* dma_attr_version */
471 471 QL_DMA_LOW_ADDRESS, /* low DMA address range */
472 472 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
473 473 QL_DMA_XFER_COUNTER, /* DMA counter register */
474 474 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
475 475 QL_DMA_BURSTSIZES, /* DMA burstsizes */
476 476 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
477 477 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
478 478 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
479 479 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
480 480 QL_DMA_GRANULARITY, /* granularity of device */
481 481 QL_DMA_XFER_FLAGS /* DMA transfer flags */
482 482 };
483 483
484 484 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
485 485 DMA_ATTR_V0, /* dma_attr_version */
486 486 QL_DMA_LOW_ADDRESS, /* low DMA address range */
487 487 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */
488 488 QL_DMA_XFER_COUNTER, /* DMA counter register */
489 489 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
490 490 QL_DMA_BURSTSIZES, /* DMA burstsizes */
491 491 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
492 492 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
493 493 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
494 494 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
495 495 QL_DMA_GRANULARITY, /* granularity of device */
496 496 QL_DMA_XFER_FLAGS /* DMA transfer flags */
497 497 };
498 498
499 499 /* Load the default dma attributes */
500 500 static ddi_dma_attr_t ql_32fcsm_cmd_dma_attr;
501 501 static ddi_dma_attr_t ql_64fcsm_cmd_dma_attr;
502 502 static ddi_dma_attr_t ql_32fcsm_rsp_dma_attr;
503 503 static ddi_dma_attr_t ql_64fcsm_rsp_dma_attr;
504 504 static ddi_dma_attr_t ql_32fcip_cmd_dma_attr;
505 505 static ddi_dma_attr_t ql_64fcip_cmd_dma_attr;
506 506 static ddi_dma_attr_t ql_32fcip_rsp_dma_attr;
507 507 static ddi_dma_attr_t ql_64fcip_rsp_dma_attr;
508 508 static ddi_dma_attr_t ql_32fcp_cmd_dma_attr;
509 509 static ddi_dma_attr_t ql_64fcp_cmd_dma_attr;
510 510 static ddi_dma_attr_t ql_32fcp_rsp_dma_attr;
511 511 static ddi_dma_attr_t ql_64fcp_rsp_dma_attr;
512 512 static ddi_dma_attr_t ql_32fcp_data_dma_attr;
513 513 static ddi_dma_attr_t ql_64fcp_data_dma_attr;
514 514
515 515 /* Static declarations of cb_ops entry point functions... */
516 516 static struct cb_ops ql_cb_ops = {
517 517 ql_open, /* b/c open */
518 518 ql_close, /* b/c close */
519 519 nodev, /* b strategy */
520 520 nodev, /* b print */
521 521 nodev, /* b dump */
522 522 nodev, /* c read */
523 523 nodev, /* c write */
524 524 ql_ioctl, /* c ioctl */
525 525 nodev, /* c devmap */
526 526 nodev, /* c mmap */
527 527 nodev, /* c segmap */
528 528 nochpoll, /* c poll */
529 529 nodev, /* cb_prop_op */
530 530 NULL, /* streamtab */
531 531 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
532 532 CB_REV, /* cb_ops revision */
533 533 nodev, /* c aread */
534 534 nodev /* c awrite */
535 535 };
536 536
537 537 /* Static declarations of dev_ops entry point functions... */
538 538 static struct dev_ops ql_devops = {
539 539 DEVO_REV, /* devo_rev */
540 540 0, /* refcnt */
541 541 ql_getinfo, /* getinfo */
542 542 nulldev, /* identify */
543 543 nulldev, /* probe */
544 544 ql_attach, /* attach */
545 545 ql_detach, /* detach */
546 546 nodev, /* reset */
547 547 &ql_cb_ops, /* char/block ops */
548 548 NULL, /* bus operations */
549 549 ql_power, /* power management */
550 550 ql_quiesce /* quiesce device */
551 551 };
552 552
553 553 /* ELS command code to text converter */
554 554 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
555 555 /* Mailbox command code to text converter */
556 556 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
557 557
558 558 char qlc_driver_version[] = QL_VERSION;
559 559
560 560 /*
561 561 * Loadable Driver Interface Structures.
↓ open down ↓ |
114 lines elided |
↑ open up ↑ |
562 562 * Declare and initialize the module configuration section...
563 563 */
564 564 static struct modldrv modldrv = {
565 565 &mod_driverops, /* type of module: driver */
566 566 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */
567 567 &ql_devops /* driver dev_ops */
568 568 };
569 569
570 570 static struct modlinkage modlinkage = {
571 571 MODREV_1,
572 - &modldrv,
573 - NULL
572 + { &modldrv, NULL }
574 573 };
575 574
576 575 /* ************************************************************************ */
577 576 /* Loadable Module Routines. */
578 577 /* ************************************************************************ */
579 578
580 579 /*
581 580 * _init
582 581 * Initializes a loadable module. It is called before any other
583 582 * routine in a loadable module.
584 583 *
585 584 * Returns:
586 585 * 0 = success
587 586 *
588 587 * Context:
589 588 * Kernel context.
590 589 */
591 590 int
592 591 _init(void)
593 592 {
594 593 uint16_t w16;
595 594 int rval = 0;
596 595
597 596 /* Get OS major release level. */
598 597 for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
599 598 if (utsname.release[w16] == '.') {
600 599 w16++;
601 600 break;
602 601 }
603 602 }
604 603 if (w16 < sizeof (utsname.release)) {
605 604 (void) ql_bstr_to_dec(&utsname.release[w16],
606 605 &ql_os_release_level, 0);
607 606 } else {
608 607 ql_os_release_level = 0;
609 608 }
610 609 if (ql_os_release_level < 6) {
611 610 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
612 611 QL_NAME, ql_os_release_level);
613 612 rval = EINVAL;
614 613 }
615 614 if (ql_os_release_level == 6) {
616 615 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
617 616 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
618 617 }
619 618
620 619 if (rval == 0) {
621 620 rval = ddi_soft_state_init(&ql_state,
622 621 sizeof (ql_adapter_state_t), 0);
623 622 }
624 623 if (rval == 0) {
625 624 /* allow the FC Transport to tweak the dev_ops */
626 625 fc_fca_init(&ql_devops);
627 626
628 627 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
629 628 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
630 629 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
631 630 rval = mod_install(&modlinkage);
632 631 if (rval != 0) {
633 632 mutex_destroy(&ql_global_hw_mutex);
634 633 mutex_destroy(&ql_global_mutex);
635 634 mutex_destroy(&ql_global_el_mutex);
636 635 ddi_soft_state_fini(&ql_state);
637 636 } else {
638 637 /*EMPTY*/
639 638 ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
640 639 ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
641 640 ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
642 641 ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
643 642 ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
644 643 ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
645 644 ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
646 645 ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
647 646 ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
648 647 ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
649 648 ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
650 649 ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
651 650 ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
652 651 ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
653 652 ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
654 653 ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
655 654 QL_FCSM_CMD_SGLLEN;
656 655 ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
657 656 ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
658 657 QL_FCSM_RSP_SGLLEN;
659 658 ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
660 659 ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
661 660 QL_FCIP_CMD_SGLLEN;
662 661 ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
663 662 ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
664 663 QL_FCIP_RSP_SGLLEN;
665 664 ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
666 665 ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
667 666 QL_FCP_CMD_SGLLEN;
668 667 ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
669 668 ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
670 669 QL_FCP_RSP_SGLLEN;
671 670 }
672 671 }
673 672
674 673 if (rval != 0) {
675 674 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
676 675 QL_NAME);
677 676 }
678 677
679 678 return (rval);
680 679 }
681 680
682 681 /*
683 682 * _fini
684 683 * Prepares a module for unloading. It is called when the system
685 684 * wants to unload a module. If the module determines that it can
686 685 * be unloaded, then _fini() returns the value returned by
687 686 * mod_remove(). Upon successful return from _fini() no other
688 687 * routine in the module will be called before _init() is called.
689 688 *
690 689 * Returns:
691 690 * 0 = success
692 691 *
693 692 * Context:
694 693 * Kernel context.
695 694 */
696 695 int
697 696 _fini(void)
698 697 {
699 698 int rval;
700 699
701 700 rval = mod_remove(&modlinkage);
702 701 if (rval == 0) {
703 702 mutex_destroy(&ql_global_hw_mutex);
704 703 mutex_destroy(&ql_global_mutex);
705 704 mutex_destroy(&ql_global_el_mutex);
706 705 ddi_soft_state_fini(&ql_state);
707 706 }
708 707
709 708 return (rval);
710 709 }
711 710
712 711 /*
713 712 * _info
714 713 * Returns information about loadable module.
715 714 *
716 715 * Input:
717 716 * modinfo = pointer to module information structure.
718 717 *
719 718 * Returns:
720 719 * Value returned by mod_info().
721 720 *
722 721 * Context:
723 722 * Kernel context.
724 723 */
725 724 int
726 725 _info(struct modinfo *modinfop)
727 726 {
728 727 return (mod_info(&modlinkage, modinfop));
729 728 }
730 729
731 730 /* ************************************************************************ */
732 731 /* dev_ops functions */
733 732 /* ************************************************************************ */
734 733
735 734 /*
736 735 * ql_getinfo
737 736 * Returns the pointer associated with arg when cmd is
738 737 * set to DDI_INFO_DEVT2DEVINFO, or it should return the
739 738 * instance number associated with arg when cmd is set
740 739 * to DDI_INFO_DEV2INSTANCE.
741 740 *
742 741 * Input:
743 742 * dip = Do not use.
744 743 * cmd = command argument.
745 744 * arg = command specific argument.
746 745 * resultp = pointer to where request information is stored.
747 746 *
748 747 * Returns:
749 748 * DDI_SUCCESS or DDI_FAILURE.
750 749 *
751 750 * Context:
752 751 * Kernel context.
753 752 */
754 753 /* ARGSUSED */
755 754 static int
756 755 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
757 756 {
758 757 ql_adapter_state_t *ha;
759 758 int minor;
760 759 int rval = DDI_FAILURE;
761 760
762 761 minor = (int)(getminor((dev_t)arg));
763 762 ha = ddi_get_soft_state(ql_state, minor);
764 763 if (ha == NULL) {
765 764 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
766 765 getminor((dev_t)arg));
767 766 *resultp = NULL;
768 767 return (rval);
769 768 }
770 769
771 770 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
772 771
773 772 switch (cmd) {
774 773 case DDI_INFO_DEVT2DEVINFO:
775 774 *resultp = ha->dip;
776 775 rval = DDI_SUCCESS;
777 776 break;
778 777 case DDI_INFO_DEVT2INSTANCE:
779 778 *resultp = (void *)(uintptr_t)(ha->instance);
780 779 rval = DDI_SUCCESS;
781 780 break;
782 781 default:
783 782 EL(ha, "failed, unsupported cmd=%d\n", cmd);
784 783 rval = DDI_FAILURE;
785 784 break;
786 785 }
787 786
788 787 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
789 788
790 789 return (rval);
791 790 }
792 791
793 792 /*
794 793 * ql_attach
795 794 * Configure and attach an instance of the driver
796 795 * for a port.
797 796 *
798 797 * Input:
799 798 * dip = pointer to device information structure.
800 799 * cmd = attach type.
801 800 *
802 801 * Returns:
803 802 * DDI_SUCCESS or DDI_FAILURE.
804 803 *
805 804 * Context:
806 805 * Kernel context.
807 806 */
808 807 static int
809 808 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
810 809 {
811 810 off_t regsize;
812 811 uint32_t size;
813 812 int rval, *ptr;
814 813 int instance;
815 814 uint_t progress = 0;
816 815 char *buf;
817 816 ushort_t caps_ptr, cap;
818 817 fc_fca_tran_t *tran;
819 818 ql_adapter_state_t *ha = NULL;
820 819
821 820 static char *pmcomps[] = {
822 821 NULL,
823 822 PM_LEVEL_D3_STR, /* Device OFF */
824 823 PM_LEVEL_D0_STR, /* Device ON */
825 824 };
826 825
827 826 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
828 827 ddi_get_instance(dip), cmd);
829 828
830 829 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
831 830
832 831 switch (cmd) {
833 832 case DDI_ATTACH:
834 833 /* first get the instance */
835 834 instance = ddi_get_instance(dip);
836 835
837 836 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
838 837 QL_NAME, instance, QL_VERSION);
839 838
840 839 /* Correct OS version? */
841 840 if (ql_os_release_level != 11) {
842 841 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
843 842 "11", QL_NAME, instance);
844 843 goto attach_failed;
845 844 }
846 845
847 846 /* Hardware is installed in a DMA-capable slot? */
848 847 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
849 848 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
850 849 instance);
851 850 goto attach_failed;
852 851 }
853 852
854 853 /* No support for high-level interrupts */
855 854 if (ddi_intr_hilevel(dip, 0) != 0) {
856 855 cmn_err(CE_WARN, "%s(%d): High level interrupt"
857 856 " not supported", QL_NAME, instance);
858 857 goto attach_failed;
859 858 }
860 859
861 860 /* Allocate our per-device-instance structure */
862 861 if (ddi_soft_state_zalloc(ql_state,
863 862 instance) != DDI_SUCCESS) {
864 863 cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
865 864 QL_NAME, instance);
866 865 goto attach_failed;
867 866 }
868 867 progress |= QL_SOFT_STATE_ALLOCED;
869 868
870 869 ha = ddi_get_soft_state(ql_state, instance);
871 870 if (ha == NULL) {
872 871 cmn_err(CE_WARN, "%s(%d): can't get soft state",
873 872 QL_NAME, instance);
874 873 goto attach_failed;
875 874 }
876 875 ha->dip = dip;
877 876 ha->instance = instance;
878 877 ha->hba.base_address = ha;
879 878 ha->pha = ha;
880 879
881 880 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
882 881 cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
883 882 QL_NAME, instance);
884 883 goto attach_failed;
885 884 }
886 885
887 886 /* Get extended logging and dump flags. */
888 887 ql_common_properties(ha);
889 888
890 889 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
891 890 "sbus") == 0) {
892 891 EL(ha, "%s SBUS card detected", QL_NAME);
893 892 ha->cfg_flags |= CFG_SBUS_CARD;
894 893 }
895 894
896 895 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
897 896 DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
898 897
899 898 ha->outstanding_cmds = kmem_zalloc(
900 899 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
901 900 KM_SLEEP);
902 901
903 902 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
904 903 QL_UB_LIMIT, KM_SLEEP);
905 904
906 905 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
907 906 KM_SLEEP);
908 907
909 908 (void) ddi_pathname(dip, buf);
910 909 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
911 910 if (ha->devpath == NULL) {
912 911 EL(ha, "devpath mem alloc failed\n");
913 912 } else {
914 913 (void) strcpy(ha->devpath, buf);
915 914 EL(ha, "devpath is: %s\n", ha->devpath);
916 915 }
917 916
918 917 if (CFG_IST(ha, CFG_SBUS_CARD)) {
919 918 /*
920 919 * For cards where PCI is mapped to sbus e.g. Ivory.
921 920 *
922 921 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
923 922 * : 0x100 - 0x3FF PCI IO space for 2200
924 923 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
925 924 * : 0x100 - 0x3FF PCI IO Space for fpga
926 925 */
927 926 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
928 927 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
929 928 DDI_SUCCESS) {
930 929 cmn_err(CE_WARN, "%s(%d): Unable to map device"
931 930 " registers", QL_NAME, instance);
932 931 goto attach_failed;
933 932 }
934 933 if (ddi_regs_map_setup(dip, 1,
935 934 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
936 935 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
937 936 DDI_SUCCESS) {
938 937 /* We should not fail attach here */
939 938 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
940 939 QL_NAME, instance);
941 940 ha->sbus_fpga_iobase = NULL;
942 941 }
943 942 progress |= QL_REGS_MAPPED;
944 943
945 944 /*
946 945 * We should map config space before adding interrupt
947 946 * So that the chip type (2200 or 2300) can be
948 947 * determined before the interrupt routine gets a
949 948 * chance to execute.
950 949 */
951 950 if (ddi_regs_map_setup(dip, 0,
952 951 (caddr_t *)&ha->sbus_config_base, 0, 0x100,
953 952 &ql_dev_acc_attr, &ha->sbus_config_handle) !=
954 953 DDI_SUCCESS) {
955 954 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
956 955 "config registers", QL_NAME, instance);
957 956 goto attach_failed;
958 957 }
959 958 progress |= QL_CONFIG_SPACE_SETUP;
960 959 } else {
961 960 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
962 961 rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
963 962 DDI_PROP_DONTPASS, "reg", &ptr, &size);
964 963 if (rval != DDI_PROP_SUCCESS) {
965 964 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
966 965 "address registers", QL_NAME, instance);
967 966 goto attach_failed;
968 967 } else {
969 968 ha->pci_bus_addr = ptr[0];
970 969 ha->function_number = (uint8_t)
971 970 (ha->pci_bus_addr >> 8 & 7);
972 971 ddi_prop_free(ptr);
973 972 }
974 973
975 974 /*
976 975 * We should map config space before adding interrupt
977 976 * So that the chip type (2200 or 2300) can be
978 977 * determined before the interrupt routine gets a
979 978 * chance to execute.
980 979 */
981 980 if (pci_config_setup(ha->dip, &ha->pci_handle) !=
982 981 DDI_SUCCESS) {
983 982 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
984 983 "config space", QL_NAME, instance);
985 984 goto attach_failed;
986 985 }
987 986 progress |= QL_CONFIG_SPACE_SETUP;
988 987
989 988 /*
990 989 * Setup the ISP2200 registers address mapping to be
991 990 * accessed by this particular driver.
992 991 * 0x0 Configuration Space
993 992 * 0x1 I/O Space
994 993 * 0x2 32-bit Memory Space address
995 994 * 0x3 64-bit Memory Space address
996 995 */
997 996 size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
998 997 2 : 1;
999 998 if (ddi_dev_regsize(dip, size, ®size) !=
1000 999 DDI_SUCCESS ||
1001 1000 ddi_regs_map_setup(dip, size, &ha->iobase,
1002 1001 0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1003 1002 DDI_SUCCESS) {
1004 1003 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1005 1004 "failed", QL_NAME, instance);
1006 1005 goto attach_failed;
1007 1006 }
1008 1007 progress |= QL_REGS_MAPPED;
1009 1008
1010 1009 /*
1011 1010 * We need I/O space mappings for 23xx HBAs for
1012 1011 * loading flash (FCode). The chip has a bug due to
1013 1012 * which loading flash fails through mem space
1014 1013 * mappings in PCI-X mode.
1015 1014 */
1016 1015 if (size == 1) {
1017 1016 ha->iomap_iobase = ha->iobase;
1018 1017 ha->iomap_dev_handle = ha->dev_handle;
1019 1018 } else {
1020 1019 if (ddi_dev_regsize(dip, 1, ®size) !=
1021 1020 DDI_SUCCESS ||
1022 1021 ddi_regs_map_setup(dip, 1,
1023 1022 &ha->iomap_iobase, 0, regsize,
1024 1023 &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1025 1024 DDI_SUCCESS) {
1026 1025 cmn_err(CE_WARN, "%s(%d): regs_map_"
1027 1026 "setup(I/O) failed", QL_NAME,
1028 1027 instance);
1029 1028 goto attach_failed;
1030 1029 }
1031 1030 progress |= QL_IOMAP_IOBASE_MAPPED;
1032 1031 }
1033 1032 }
1034 1033
1035 1034 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1036 1035 PCI_CONF_SUBSYSID);
1037 1036 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1038 1037 PCI_CONF_SUBVENID);
1039 1038 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1040 1039 PCI_CONF_VENID);
1041 1040 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1042 1041 PCI_CONF_DEVID);
1043 1042 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1044 1043 PCI_CONF_REVID);
1045 1044
1046 1045 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1047 1046 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1048 1047 ha->subven_id, ha->subsys_id);
1049 1048
1050 1049 switch (ha->device_id) {
1051 1050 case 0x2300:
1052 1051 case 0x2312:
1053 1052 case 0x2322:
1054 1053 case 0x6312:
1055 1054 case 0x6322:
1056 1055 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1057 1056 ha->flags |= FUNCTION_1;
1058 1057 }
1059 1058 if ((ha->device_id == 0x6322) ||
1060 1059 (ha->device_id == 0x2322)) {
1061 1060 ha->cfg_flags |= CFG_CTRL_6322;
1062 1061 ha->fw_class = 0x6322;
1063 1062 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1064 1063 } else {
1065 1064 ha->cfg_flags |= CFG_CTRL_2300;
1066 1065 ha->fw_class = 0x2300;
1067 1066 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1068 1067 }
1069 1068 ha->reg_off = ®_off_2300;
1070 1069 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1071 1070 goto attach_failed;
1072 1071 }
1073 1072 ha->fcp_cmd = ql_command_iocb;
1074 1073 ha->ip_cmd = ql_ip_iocb;
1075 1074 ha->ms_cmd = ql_ms_iocb;
1076 1075 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1077 1076 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1078 1077 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1079 1078 } else {
1080 1079 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1081 1080 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1082 1081 }
1083 1082 break;
1084 1083
1085 1084 case 0x2200:
1086 1085 ha->cfg_flags |= CFG_CTRL_2200;
1087 1086 ha->reg_off = ®_off_2200;
1088 1087 ha->fw_class = 0x2200;
1089 1088 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1090 1089 goto attach_failed;
1091 1090 }
1092 1091 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1093 1092 ha->fcp_cmd = ql_command_iocb;
1094 1093 ha->ip_cmd = ql_ip_iocb;
1095 1094 ha->ms_cmd = ql_ms_iocb;
1096 1095 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1097 1096 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1098 1097 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1099 1098 } else {
1100 1099 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1101 1100 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1102 1101 }
1103 1102 break;
1104 1103
1105 1104 case 0x2422:
1106 1105 case 0x2432:
1107 1106 case 0x5422:
1108 1107 case 0x5432:
1109 1108 case 0x8432:
1110 1109 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1111 1110 ha->flags |= FUNCTION_1;
1112 1111 }
1113 1112 ha->cfg_flags |= CFG_CTRL_2422;
1114 1113 if (ha->device_id == 0x8432) {
1115 1114 ha->cfg_flags |= CFG_CTRL_MENLO;
1116 1115 } else {
1117 1116 ha->flags |= VP_ENABLED;
1118 1117 }
1119 1118
1120 1119 ha->reg_off = ®_off_2400_2500;
1121 1120 ha->fw_class = 0x2400;
1122 1121 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1123 1122 goto attach_failed;
1124 1123 }
1125 1124 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1126 1125 ha->fcp_cmd = ql_command_24xx_iocb;
1127 1126 ha->ip_cmd = ql_ip_24xx_iocb;
1128 1127 ha->ms_cmd = ql_ms_24xx_iocb;
1129 1128 ha->els_cmd = ql_els_24xx_iocb;
1130 1129 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1131 1130 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1132 1131 break;
1133 1132
1134 1133 case 0x2522:
1135 1134 case 0x2532:
1136 1135 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1137 1136 ha->flags |= FUNCTION_1;
1138 1137 }
1139 1138 ha->cfg_flags |= CFG_CTRL_25XX;
1140 1139 ha->flags |= VP_ENABLED;
1141 1140 ha->fw_class = 0x2500;
1142 1141 ha->reg_off = ®_off_2400_2500;
1143 1142 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1144 1143 goto attach_failed;
1145 1144 }
1146 1145 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1147 1146 ha->fcp_cmd = ql_command_24xx_iocb;
1148 1147 ha->ip_cmd = ql_ip_24xx_iocb;
1149 1148 ha->ms_cmd = ql_ms_24xx_iocb;
1150 1149 ha->els_cmd = ql_els_24xx_iocb;
1151 1150 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1152 1151 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1153 1152 break;
1154 1153
1155 1154 case 0x8001:
1156 1155 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1157 1156 ha->flags |= FUNCTION_1;
1158 1157 }
1159 1158 ha->cfg_flags |= CFG_CTRL_81XX;
1160 1159 ha->flags |= VP_ENABLED;
1161 1160 ha->fw_class = 0x8100;
1162 1161 ha->reg_off = ®_off_2400_2500;
1163 1162 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1164 1163 goto attach_failed;
1165 1164 }
1166 1165 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1167 1166 ha->fcp_cmd = ql_command_24xx_iocb;
1168 1167 ha->ip_cmd = ql_ip_24xx_iocb;
1169 1168 ha->ms_cmd = ql_ms_24xx_iocb;
1170 1169 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1171 1170 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1172 1171 break;
1173 1172
1174 1173 case 0x8021:
1175 1174 if (ha->function_number & BIT_0) {
1176 1175 ha->flags |= FUNCTION_1;
1177 1176 }
1178 1177 ha->cfg_flags |= CFG_CTRL_8021;
1179 1178 ha->reg_off = ®_off_8021;
1180 1179 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1181 1180 ha->fcp_cmd = ql_command_24xx_iocb;
1182 1181 ha->ms_cmd = ql_ms_24xx_iocb;
1183 1182 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1184 1183 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1185 1184
1186 1185 ha->nx_pcibase = ha->iobase;
1187 1186 ha->iobase += 0xBC000 + (ha->function_number << 11);
1188 1187 ha->iomap_iobase += 0xBC000 +
1189 1188 (ha->function_number << 11);
1190 1189
1191 1190 /* map doorbell */
1192 1191 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1193 1192 ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1194 1193 0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1195 1194 DDI_SUCCESS) {
1196 1195 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1197 1196 "(doorbell) failed", QL_NAME, instance);
1198 1197 goto attach_failed;
1199 1198 }
1200 1199 progress |= QL_DB_IOBASE_MAPPED;
1201 1200
1202 1201 ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1203 1202 (ha->function_number << 12));
1204 1203 ha->db_read = ha->nx_pcibase + (512 * 1024) +
1205 1204 (ha->function_number * 8);
1206 1205
1207 1206 ql_8021_update_crb_int_ptr(ha);
1208 1207 ql_8021_set_drv_active(ha);
1209 1208 break;
1210 1209
1211 1210 default:
1212 1211 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1213 1212 QL_NAME, instance, ha->device_id);
1214 1213 goto attach_failed;
1215 1214 }
1216 1215
1217 1216 /* Setup hba buffer. */
1218 1217
1219 1218 size = CFG_IST(ha, CFG_CTRL_24258081) ?
1220 1219 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1221 1220 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1222 1221 RCVBUF_QUEUE_SIZE);
1223 1222
1224 1223 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1225 1224 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1226 1225 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1227 1226 "alloc failed", QL_NAME, instance);
1228 1227 goto attach_failed;
1229 1228 }
1230 1229 progress |= QL_HBA_BUFFER_SETUP;
1231 1230
1232 1231 /* Setup buffer pointers. */
1233 1232 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1234 1233 REQUEST_Q_BUFFER_OFFSET;
1235 1234 ha->request_ring_bp = (struct cmd_entry *)
1236 1235 ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1237 1236
1238 1237 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1239 1238 RESPONSE_Q_BUFFER_OFFSET;
1240 1239 ha->response_ring_bp = (struct sts_entry *)
1241 1240 ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1242 1241
1243 1242 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1244 1243 RCVBUF_Q_BUFFER_OFFSET;
1245 1244 ha->rcvbuf_ring_bp = (struct rcvbuf *)
1246 1245 ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1247 1246
1248 1247 /* Allocate resource for QLogic IOCTL */
1249 1248 (void) ql_alloc_xioctl_resource(ha);
1250 1249
1251 1250 /* Setup interrupts */
1252 1251 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1253 1252 cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1254 1253 "rval=%xh", QL_NAME, instance, rval);
1255 1254 goto attach_failed;
1256 1255 }
1257 1256
1258 1257 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1259 1258
1260 1259 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1261 1260 cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1262 1261 QL_NAME, instance);
1263 1262 goto attach_failed;
1264 1263 }
1265 1264
1266 1265 /*
1267 1266 * Allocate an N Port information structure
1268 1267 * for use when in P2P topology.
1269 1268 */
1270 1269 ha->n_port = (ql_n_port_info_t *)
1271 1270 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1272 1271 if (ha->n_port == NULL) {
1273 1272 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1274 1273 QL_NAME, instance);
1275 1274 goto attach_failed;
1276 1275 }
1277 1276
1278 1277 progress |= QL_N_PORT_INFO_CREATED;
1279 1278
1280 1279 /*
1281 1280 * Determine support for Power Management
1282 1281 */
1283 1282 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1284 1283
1285 1284 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1286 1285 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1287 1286 if (cap == PCI_CAP_ID_PM) {
1288 1287 ha->pm_capable = 1;
1289 1288 break;
1290 1289 }
1291 1290 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1292 1291 PCI_CAP_NEXT_PTR);
1293 1292 }
1294 1293
1295 1294 if (ha->pm_capable) {
1296 1295 /*
1297 1296 * Enable PM for 2200 based HBAs only.
1298 1297 */
1299 1298 if (ha->device_id != 0x2200) {
1300 1299 ha->pm_capable = 0;
1301 1300 }
1302 1301 }
1303 1302
1304 1303 if (ha->pm_capable) {
1305 1304 ha->pm_capable = ql_enable_pm;
1306 1305 }
1307 1306
1308 1307 if (ha->pm_capable) {
1309 1308 /*
1310 1309 * Initialize power management bookkeeping;
1311 1310 * components are created idle.
1312 1311 */
1313 1312 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1314 1313 pmcomps[0] = buf;
1315 1314
1316 1315 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1317 1316 if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1318 1317 dip, "pm-components", pmcomps,
1319 1318 sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1320 1319 DDI_PROP_SUCCESS) {
1321 1320 cmn_err(CE_WARN, "%s(%d): failed to create"
1322 1321 " pm-components property", QL_NAME,
1323 1322 instance);
1324 1323
1325 1324 /* Initialize adapter. */
1326 1325 ha->power_level = PM_LEVEL_D0;
1327 1326 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1328 1327 cmn_err(CE_WARN, "%s(%d): failed to"
1329 1328 " initialize adapter", QL_NAME,
1330 1329 instance);
1331 1330 goto attach_failed;
1332 1331 }
1333 1332 } else {
1334 1333 ha->power_level = PM_LEVEL_D3;
1335 1334 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1336 1335 PM_LEVEL_D0) != DDI_SUCCESS) {
1337 1336 cmn_err(CE_WARN, "%s(%d): failed to"
1338 1337 " raise power or initialize"
1339 1338 " adapter", QL_NAME, instance);
1340 1339 }
1341 1340 }
1342 1341 } else {
1343 1342 /* Initialize adapter. */
1344 1343 ha->power_level = PM_LEVEL_D0;
1345 1344 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1346 1345 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1347 1346 " adapter", QL_NAME, instance);
1348 1347 }
1349 1348 }
1350 1349
1351 1350 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1352 1351 ha->fw_subminor_version == 0) {
1353 1352 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1354 1353 QL_NAME, ha->instance);
1355 1354 } else {
1356 1355 int rval;
1357 1356 char ver_fmt[256];
1358 1357
1359 1358 rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1360 1359 "Firmware version %d.%d.%d", ha->fw_major_version,
1361 1360 ha->fw_minor_version, ha->fw_subminor_version);
1362 1361
1363 1362 if (CFG_IST(ha, CFG_CTRL_81XX)) {
1364 1363 rval = (int)snprintf(ver_fmt + rval,
1365 1364 (size_t)sizeof (ver_fmt),
1366 1365 ", MPI fw version %d.%d.%d",
1367 1366 ha->mpi_fw_major_version,
1368 1367 ha->mpi_fw_minor_version,
1369 1368 ha->mpi_fw_subminor_version);
1370 1369
1371 1370 if (ha->subsys_id == 0x17B ||
1372 1371 ha->subsys_id == 0x17D) {
1373 1372 (void) snprintf(ver_fmt + rval,
1374 1373 (size_t)sizeof (ver_fmt),
1375 1374 ", PHY fw version %d.%d.%d",
1376 1375 ha->phy_fw_major_version,
1377 1376 ha->phy_fw_minor_version,
1378 1377 ha->phy_fw_subminor_version);
1379 1378 }
1380 1379 }
1381 1380 cmn_err(CE_NOTE, "!%s(%d): %s",
1382 1381 QL_NAME, ha->instance, ver_fmt);
1383 1382 }
1384 1383
1385 1384 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1386 1385 "controller", KSTAT_TYPE_RAW,
1387 1386 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1388 1387 if (ha->k_stats == NULL) {
1389 1388 cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1390 1389 QL_NAME, instance);
1391 1390 goto attach_failed;
1392 1391 }
1393 1392 progress |= QL_KSTAT_CREATED;
1394 1393
1395 1394 ha->adapter_stats->version = 1;
1396 1395 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1397 1396 ha->k_stats->ks_private = ha;
1398 1397 ha->k_stats->ks_update = ql_kstat_update;
1399 1398 ha->k_stats->ks_ndata = 1;
1400 1399 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1401 1400 kstat_install(ha->k_stats);
1402 1401
1403 1402 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1404 1403 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1405 1404 cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1406 1405 QL_NAME, instance);
1407 1406 goto attach_failed;
1408 1407 }
1409 1408 progress |= QL_MINOR_NODE_CREATED;
1410 1409
1411 1410 /* Allocate a transport structure for this instance */
1412 1411 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1413 1412 if (tran == NULL) {
1414 1413 cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1415 1414 QL_NAME, instance);
1416 1415 goto attach_failed;
1417 1416 }
1418 1417
1419 1418 progress |= QL_FCA_TRAN_ALLOCED;
1420 1419
1421 1420 /* fill in the structure */
1422 1421 tran->fca_numports = 1;
1423 1422 tran->fca_version = FCTL_FCA_MODREV_5;
1424 1423 if (CFG_IST(ha, CFG_CTRL_2422)) {
1425 1424 tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1426 1425 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1427 1426 tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1428 1427 }
1429 1428 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1430 1429 tran->fca_perm_pwwn.raw_wwn, 8);
1431 1430
1432 1431 EL(ha, "FCA version %d\n", tran->fca_version);
1433 1432
1434 1433 /* Specify the amount of space needed in each packet */
1435 1434 tran->fca_pkt_size = sizeof (ql_srb_t);
1436 1435
1437 1436 /* command limits are usually dictated by hardware */
1438 1437 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1439 1438
1440 1439 /* dmaattr are static, set elsewhere. */
1441 1440 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1442 1441 tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1443 1442 tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1444 1443 tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1445 1444 tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1446 1445 tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1447 1446 tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1448 1447 tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1449 1448 tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1450 1449 } else {
1451 1450 tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1452 1451 tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1453 1452 tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1454 1453 tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1455 1454 tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1456 1455 tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1457 1456 tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1458 1457 tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1459 1458 }
1460 1459
1461 1460 tran->fca_acc_attr = &ql_dev_acc_attr;
1462 1461 tran->fca_iblock = &(ha->iblock_cookie);
1463 1462
1464 1463 /* the remaining values are simply function vectors */
1465 1464 tran->fca_bind_port = ql_bind_port;
1466 1465 tran->fca_unbind_port = ql_unbind_port;
1467 1466 tran->fca_init_pkt = ql_init_pkt;
1468 1467 tran->fca_un_init_pkt = ql_un_init_pkt;
1469 1468 tran->fca_els_send = ql_els_send;
1470 1469 tran->fca_get_cap = ql_get_cap;
1471 1470 tran->fca_set_cap = ql_set_cap;
1472 1471 tran->fca_getmap = ql_getmap;
1473 1472 tran->fca_transport = ql_transport;
1474 1473 tran->fca_ub_alloc = ql_ub_alloc;
1475 1474 tran->fca_ub_free = ql_ub_free;
1476 1475 tran->fca_ub_release = ql_ub_release;
1477 1476 tran->fca_abort = ql_abort;
1478 1477 tran->fca_reset = ql_reset;
1479 1478 tran->fca_port_manage = ql_port_manage;
1480 1479 tran->fca_get_device = ql_get_device;
1481 1480
1482 1481 /* give it to the FC transport */
1483 1482 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1484 1483 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1485 1484 instance);
1486 1485 goto attach_failed;
1487 1486 }
1488 1487 progress |= QL_FCA_ATTACH_DONE;
1489 1488
1490 1489 /* Stash the structure so it can be freed at detach */
1491 1490 ha->tran = tran;
1492 1491
1493 1492 /* Acquire global state lock. */
1494 1493 GLOBAL_STATE_LOCK();
1495 1494
1496 1495 /* Add adapter structure to link list. */
1497 1496 ql_add_link_b(&ql_hba, &ha->hba);
1498 1497
1499 1498 /* Start one second driver timer. */
1500 1499 if (ql_timer_timeout_id == NULL) {
1501 1500 ql_timer_ticks = drv_usectohz(1000000);
1502 1501 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1503 1502 ql_timer_ticks);
1504 1503 }
1505 1504
1506 1505 /* Release global state lock. */
1507 1506 GLOBAL_STATE_UNLOCK();
1508 1507
1509 1508 /* Determine and populate HBA fru info */
1510 1509 ql_setup_fruinfo(ha);
1511 1510
1512 1511 /* Setup task_daemon thread. */
1513 1512 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1514 1513 0, &p0, TS_RUN, minclsyspri);
1515 1514
1516 1515 progress |= QL_TASK_DAEMON_STARTED;
1517 1516
1518 1517 ddi_report_dev(dip);
1519 1518
1520 1519 /* Disable link reset in panic path */
1521 1520 ha->lip_on_panic = 1;
1522 1521
1523 1522 rval = DDI_SUCCESS;
1524 1523 break;
1525 1524
1526 1525 attach_failed:
1527 1526 if (progress & QL_FCA_ATTACH_DONE) {
1528 1527 (void) fc_fca_detach(dip);
1529 1528 progress &= ~QL_FCA_ATTACH_DONE;
1530 1529 }
1531 1530
1532 1531 if (progress & QL_FCA_TRAN_ALLOCED) {
1533 1532 kmem_free(tran, sizeof (fc_fca_tran_t));
1534 1533 progress &= ~QL_FCA_TRAN_ALLOCED;
1535 1534 }
1536 1535
1537 1536 if (progress & QL_MINOR_NODE_CREATED) {
1538 1537 ddi_remove_minor_node(dip, "devctl");
1539 1538 progress &= ~QL_MINOR_NODE_CREATED;
1540 1539 }
1541 1540
1542 1541 if (progress & QL_KSTAT_CREATED) {
1543 1542 kstat_delete(ha->k_stats);
1544 1543 progress &= ~QL_KSTAT_CREATED;
1545 1544 }
1546 1545
1547 1546 if (progress & QL_N_PORT_INFO_CREATED) {
1548 1547 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1549 1548 progress &= ~QL_N_PORT_INFO_CREATED;
1550 1549 }
1551 1550
1552 1551 if (progress & QL_TASK_DAEMON_STARTED) {
1553 1552 TASK_DAEMON_LOCK(ha);
1554 1553
1555 1554 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1556 1555
1557 1556 cv_signal(&ha->cv_task_daemon);
1558 1557
1559 1558 /* Release task daemon lock. */
1560 1559 TASK_DAEMON_UNLOCK(ha);
1561 1560
1562 1561 /* Wait for for task daemon to stop running. */
1563 1562 while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1564 1563 ql_delay(ha, 10000);
1565 1564 }
1566 1565 progress &= ~QL_TASK_DAEMON_STARTED;
1567 1566 }
1568 1567
1569 1568 if (progress & QL_DB_IOBASE_MAPPED) {
1570 1569 ql_8021_clr_drv_active(ha);
1571 1570 ddi_regs_map_free(&ha->db_dev_handle);
1572 1571 progress &= ~QL_DB_IOBASE_MAPPED;
1573 1572 }
1574 1573 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1575 1574 ddi_regs_map_free(&ha->iomap_dev_handle);
1576 1575 progress &= ~QL_IOMAP_IOBASE_MAPPED;
1577 1576 }
1578 1577
1579 1578 if (progress & QL_CONFIG_SPACE_SETUP) {
1580 1579 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1581 1580 ddi_regs_map_free(&ha->sbus_config_handle);
1582 1581 } else {
1583 1582 pci_config_teardown(&ha->pci_handle);
1584 1583 }
1585 1584 progress &= ~QL_CONFIG_SPACE_SETUP;
1586 1585 }
1587 1586
1588 1587 if (progress & QL_INTR_ADDED) {
1589 1588 ql_disable_intr(ha);
1590 1589 ql_release_intr(ha);
1591 1590 progress &= ~QL_INTR_ADDED;
1592 1591 }
1593 1592
1594 1593 if (progress & QL_MUTEX_CV_INITED) {
1595 1594 ql_destroy_mutex(ha);
1596 1595 progress &= ~QL_MUTEX_CV_INITED;
1597 1596 }
1598 1597
1599 1598 if (progress & QL_HBA_BUFFER_SETUP) {
1600 1599 ql_free_phys(ha, &ha->hba_buf);
1601 1600 progress &= ~QL_HBA_BUFFER_SETUP;
1602 1601 }
1603 1602
1604 1603 if (progress & QL_REGS_MAPPED) {
1605 1604 ddi_regs_map_free(&ha->dev_handle);
1606 1605 if (ha->sbus_fpga_iobase != NULL) {
1607 1606 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1608 1607 }
1609 1608 progress &= ~QL_REGS_MAPPED;
1610 1609 }
1611 1610
1612 1611 if (progress & QL_SOFT_STATE_ALLOCED) {
1613 1612
1614 1613 ql_fcache_rel(ha->fcache);
1615 1614
1616 1615 kmem_free(ha->adapter_stats,
1617 1616 sizeof (*ha->adapter_stats));
1618 1617
1619 1618 kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1620 1619 QL_UB_LIMIT);
1621 1620
1622 1621 kmem_free(ha->outstanding_cmds,
1623 1622 sizeof (*ha->outstanding_cmds) *
1624 1623 MAX_OUTSTANDING_COMMANDS);
1625 1624
1626 1625 if (ha->devpath != NULL) {
1627 1626 kmem_free(ha->devpath,
1628 1627 strlen(ha->devpath) + 1);
1629 1628 }
1630 1629
1631 1630 kmem_free(ha->dev, sizeof (*ha->dev) *
1632 1631 DEVICE_HEAD_LIST_SIZE);
1633 1632
1634 1633 if (ha->xioctl != NULL) {
1635 1634 ql_free_xioctl_resource(ha);
1636 1635 }
1637 1636
1638 1637 if (ha->fw_module != NULL) {
1639 1638 (void) ddi_modclose(ha->fw_module);
1640 1639 }
1641 1640 (void) ql_el_trace_desc_dtor(ha);
1642 1641 (void) ql_nvram_cache_desc_dtor(ha);
1643 1642
1644 1643 ddi_soft_state_free(ql_state, instance);
1645 1644 progress &= ~QL_SOFT_STATE_ALLOCED;
1646 1645 }
1647 1646
1648 1647 ddi_prop_remove_all(dip);
1649 1648 rval = DDI_FAILURE;
1650 1649 break;
1651 1650
1652 1651 case DDI_RESUME:
1653 1652 rval = DDI_FAILURE;
1654 1653
1655 1654 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1656 1655 if (ha == NULL) {
1657 1656 cmn_err(CE_WARN, "%s(%d): can't get soft state",
1658 1657 QL_NAME, instance);
1659 1658 break;
1660 1659 }
1661 1660
1662 1661 ha->power_level = PM_LEVEL_D3;
1663 1662 if (ha->pm_capable) {
1664 1663 /*
1665 1664 * Get ql_power to do power on initialization
1666 1665 */
1667 1666 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1668 1667 PM_LEVEL_D0) != DDI_SUCCESS) {
1669 1668 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1670 1669 " power", QL_NAME, instance);
1671 1670 }
1672 1671 }
1673 1672
1674 1673 /*
1675 1674 * There is a bug in DR that prevents PM framework
1676 1675 * from calling ql_power.
1677 1676 */
1678 1677 if (ha->power_level == PM_LEVEL_D3) {
1679 1678 ha->power_level = PM_LEVEL_D0;
1680 1679
1681 1680 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1682 1681 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1683 1682 " adapter", QL_NAME, instance);
1684 1683 }
1685 1684
1686 1685 /* Wake up task_daemon. */
1687 1686 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1688 1687 0);
1689 1688 }
1690 1689
1691 1690 /* Acquire global state lock. */
1692 1691 GLOBAL_STATE_LOCK();
1693 1692
1694 1693 /* Restart driver timer. */
1695 1694 if (ql_timer_timeout_id == NULL) {
1696 1695 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1697 1696 ql_timer_ticks);
1698 1697 }
1699 1698
1700 1699 /* Release global state lock. */
1701 1700 GLOBAL_STATE_UNLOCK();
1702 1701
1703 1702 /* Wake up command start routine. */
1704 1703 ADAPTER_STATE_LOCK(ha);
1705 1704 ha->flags &= ~ADAPTER_SUSPENDED;
1706 1705 ADAPTER_STATE_UNLOCK(ha);
1707 1706
1708 1707 /*
1709 1708 * Transport doesn't make FC discovery in polled
1710 1709 * mode; So we need the daemon thread's services
1711 1710 * right here.
1712 1711 */
1713 1712 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1714 1713
1715 1714 rval = DDI_SUCCESS;
1716 1715
1717 1716 /* Restart IP if it was running. */
1718 1717 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1719 1718 (void) ql_initialize_ip(ha);
1720 1719 ql_isp_rcvbuf(ha);
1721 1720 }
1722 1721 break;
1723 1722
1724 1723 default:
1725 1724 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1726 1725 " %x", QL_NAME, ddi_get_instance(dip), cmd);
1727 1726 rval = DDI_FAILURE;
1728 1727 break;
1729 1728 }
1730 1729
1731 1730 kmem_free(buf, MAXPATHLEN);
1732 1731
1733 1732 if (rval != DDI_SUCCESS) {
1734 1733 /*EMPTY*/
1735 1734 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1736 1735 ddi_get_instance(dip), rval);
1737 1736 } else {
1738 1737 /*EMPTY*/
1739 1738 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1740 1739 }
1741 1740
1742 1741 return (rval);
1743 1742 }
1744 1743
1745 1744 /*
1746 1745 * ql_detach
1747 1746 * Used to remove all the states associated with a given
1748 1747 * instances of a device node prior to the removal of that
1749 1748 * instance from the system.
1750 1749 *
1751 1750 * Input:
1752 1751 * dip = pointer to device information structure.
1753 1752 * cmd = type of detach.
1754 1753 *
1755 1754 * Returns:
1756 1755 * DDI_SUCCESS or DDI_FAILURE.
1757 1756 *
1758 1757 * Context:
1759 1758 * Kernel context.
1760 1759 */
1761 1760 static int
1762 1761 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1763 1762 {
1764 1763 ql_adapter_state_t *ha, *vha;
1765 1764 ql_tgt_t *tq;
1766 1765 int delay_cnt;
1767 1766 uint16_t index;
1768 1767 ql_link_t *link;
1769 1768 char *buf;
1770 1769 timeout_id_t timer_id = NULL;
1771 1770 int suspend, rval = DDI_SUCCESS;
1772 1771
1773 1772 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1774 1773 if (ha == NULL) {
1775 1774 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1776 1775 ddi_get_instance(dip));
1777 1776 return (DDI_FAILURE);
1778 1777 }
1779 1778
1780 1779 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1781 1780
1782 1781 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1783 1782
1784 1783 switch (cmd) {
1785 1784 case DDI_DETACH:
1786 1785 ADAPTER_STATE_LOCK(ha);
1787 1786 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1788 1787 ADAPTER_STATE_UNLOCK(ha);
1789 1788
1790 1789 TASK_DAEMON_LOCK(ha);
1791 1790
1792 1791 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1793 1792 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1794 1793 cv_signal(&ha->cv_task_daemon);
1795 1794
1796 1795 TASK_DAEMON_UNLOCK(ha);
1797 1796
1798 1797 (void) ql_wait_for_td_stop(ha);
1799 1798
1800 1799 TASK_DAEMON_LOCK(ha);
1801 1800 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1802 1801 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1803 1802 EL(ha, "failed, could not stop task daemon\n");
1804 1803 }
1805 1804 }
1806 1805 TASK_DAEMON_UNLOCK(ha);
1807 1806
1808 1807 GLOBAL_STATE_LOCK();
1809 1808
1810 1809 /* Disable driver timer if no adapters. */
1811 1810 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1812 1811 ql_hba.last == &ha->hba) {
1813 1812 timer_id = ql_timer_timeout_id;
1814 1813 ql_timer_timeout_id = NULL;
1815 1814 }
1816 1815 ql_remove_link(&ql_hba, &ha->hba);
1817 1816
1818 1817 GLOBAL_STATE_UNLOCK();
1819 1818
1820 1819 if (timer_id) {
1821 1820 (void) untimeout(timer_id);
1822 1821 }
1823 1822
1824 1823 if (ha->pm_capable) {
1825 1824 if (pm_lower_power(dip, QL_POWER_COMPONENT,
1826 1825 PM_LEVEL_D3) != DDI_SUCCESS) {
1827 1826 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1828 1827 " power", QL_NAME, ha->instance);
1829 1828 }
1830 1829 }
1831 1830
1832 1831 /*
1833 1832 * If pm_lower_power shutdown the adapter, there
1834 1833 * isn't much else to do
1835 1834 */
1836 1835 if (ha->power_level != PM_LEVEL_D3) {
1837 1836 ql_halt(ha, PM_LEVEL_D3);
1838 1837 }
1839 1838
1840 1839 /* Remove virtual ports. */
1841 1840 while ((vha = ha->vp_next) != NULL) {
1842 1841 ql_vport_destroy(vha);
1843 1842 }
1844 1843
1845 1844 /* Free target queues. */
1846 1845 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1847 1846 link = ha->dev[index].first;
1848 1847 while (link != NULL) {
1849 1848 tq = link->base_address;
1850 1849 link = link->next;
1851 1850 ql_dev_free(ha, tq);
1852 1851 }
1853 1852 }
1854 1853
1855 1854 /*
1856 1855 * Free unsolicited buffers.
1857 1856 * If we are here then there are no ULPs still
1858 1857 * alive that wish to talk to ql so free up
1859 1858 * any SRB_IP_UB_UNUSED buffers that are
1860 1859 * lingering around
1861 1860 */
1862 1861 QL_UB_LOCK(ha);
1863 1862 for (index = 0; index < QL_UB_LIMIT; index++) {
1864 1863 fc_unsol_buf_t *ubp = ha->ub_array[index];
1865 1864
1866 1865 if (ubp != NULL) {
1867 1866 ql_srb_t *sp = ubp->ub_fca_private;
1868 1867
1869 1868 sp->flags |= SRB_UB_FREE_REQUESTED;
1870 1869
1871 1870 while (!(sp->flags & SRB_UB_IN_FCA) ||
1872 1871 (sp->flags & (SRB_UB_CALLBACK |
1873 1872 SRB_UB_ACQUIRED))) {
1874 1873 QL_UB_UNLOCK(ha);
1875 1874 delay(drv_usectohz(100000));
1876 1875 QL_UB_LOCK(ha);
1877 1876 }
1878 1877 ha->ub_array[index] = NULL;
1879 1878
1880 1879 QL_UB_UNLOCK(ha);
1881 1880 ql_free_unsolicited_buffer(ha, ubp);
1882 1881 QL_UB_LOCK(ha);
1883 1882 }
1884 1883 }
1885 1884 QL_UB_UNLOCK(ha);
1886 1885
1887 1886 /* Free any saved RISC code. */
1888 1887 if (ha->risc_code != NULL) {
1889 1888 kmem_free(ha->risc_code, ha->risc_code_size);
1890 1889 ha->risc_code = NULL;
1891 1890 ha->risc_code_size = 0;
1892 1891 }
1893 1892
1894 1893 if (ha->fw_module != NULL) {
1895 1894 (void) ddi_modclose(ha->fw_module);
1896 1895 ha->fw_module = NULL;
1897 1896 }
1898 1897
1899 1898 /* Free resources. */
1900 1899 ddi_prop_remove_all(dip);
1901 1900 (void) fc_fca_detach(dip);
1902 1901 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1903 1902 ddi_remove_minor_node(dip, "devctl");
1904 1903 if (ha->k_stats != NULL) {
1905 1904 kstat_delete(ha->k_stats);
1906 1905 }
1907 1906
1908 1907 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1909 1908 ddi_regs_map_free(&ha->sbus_config_handle);
1910 1909 } else {
1911 1910 if (CFG_IST(ha, CFG_CTRL_8021)) {
1912 1911 ql_8021_clr_drv_active(ha);
1913 1912 ddi_regs_map_free(&ha->db_dev_handle);
1914 1913 }
1915 1914 if (ha->iomap_dev_handle != ha->dev_handle) {
1916 1915 ddi_regs_map_free(&ha->iomap_dev_handle);
1917 1916 }
1918 1917 pci_config_teardown(&ha->pci_handle);
1919 1918 }
1920 1919
1921 1920 ql_disable_intr(ha);
1922 1921 ql_release_intr(ha);
1923 1922
1924 1923 ql_free_xioctl_resource(ha);
1925 1924
1926 1925 ql_destroy_mutex(ha);
1927 1926
1928 1927 ql_free_phys(ha, &ha->hba_buf);
1929 1928 ql_free_phys(ha, &ha->fwexttracebuf);
1930 1929 ql_free_phys(ha, &ha->fwfcetracebuf);
1931 1930
1932 1931 ddi_regs_map_free(&ha->dev_handle);
1933 1932 if (ha->sbus_fpga_iobase != NULL) {
1934 1933 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1935 1934 }
1936 1935
1937 1936 ql_fcache_rel(ha->fcache);
1938 1937 if (ha->vcache != NULL) {
1939 1938 kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1940 1939 }
1941 1940
1942 1941 if (ha->pi_attrs != NULL) {
1943 1942 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1944 1943 }
1945 1944
1946 1945 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1947 1946
1948 1947 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1949 1948
1950 1949 kmem_free(ha->outstanding_cmds,
1951 1950 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1952 1951
1953 1952 if (ha->n_port != NULL) {
1954 1953 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1955 1954 }
1956 1955
1957 1956 if (ha->devpath != NULL) {
1958 1957 kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1959 1958 }
1960 1959
1961 1960 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1962 1961
1963 1962 EL(ha, "detached\n");
1964 1963
1965 1964 ddi_soft_state_free(ql_state, (int)ha->instance);
1966 1965
1967 1966 break;
1968 1967
1969 1968 case DDI_SUSPEND:
1970 1969 ADAPTER_STATE_LOCK(ha);
1971 1970
1972 1971 delay_cnt = 0;
1973 1972 ha->flags |= ADAPTER_SUSPENDED;
1974 1973 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1975 1974 ADAPTER_STATE_UNLOCK(ha);
1976 1975 delay(drv_usectohz(1000000));
1977 1976 ADAPTER_STATE_LOCK(ha);
1978 1977 }
1979 1978 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1980 1979 ha->flags &= ~ADAPTER_SUSPENDED;
1981 1980 ADAPTER_STATE_UNLOCK(ha);
1982 1981 rval = DDI_FAILURE;
1983 1982 cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1984 1983 " busy %xh flags %xh", QL_NAME, ha->instance,
1985 1984 ha->busy, ha->flags);
1986 1985 break;
1987 1986 }
1988 1987
1989 1988 ADAPTER_STATE_UNLOCK(ha);
1990 1989
1991 1990 if (ha->flags & IP_INITIALIZED) {
1992 1991 (void) ql_shutdown_ip(ha);
1993 1992 }
1994 1993
1995 1994 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1996 1995 ADAPTER_STATE_LOCK(ha);
1997 1996 ha->flags &= ~ADAPTER_SUSPENDED;
1998 1997 ADAPTER_STATE_UNLOCK(ha);
1999 1998 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2000 1999 QL_NAME, ha->instance, suspend);
2001 2000
2002 2001 /* Restart IP if it was running. */
2003 2002 if (ha->flags & IP_ENABLED &&
2004 2003 !(ha->flags & IP_INITIALIZED)) {
2005 2004 (void) ql_initialize_ip(ha);
2006 2005 ql_isp_rcvbuf(ha);
2007 2006 }
2008 2007 rval = DDI_FAILURE;
2009 2008 break;
2010 2009 }
2011 2010
2012 2011 /* Acquire global state lock. */
2013 2012 GLOBAL_STATE_LOCK();
2014 2013
2015 2014 /* Disable driver timer if last adapter. */
2016 2015 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2017 2016 ql_hba.last == &ha->hba) {
2018 2017 timer_id = ql_timer_timeout_id;
2019 2018 ql_timer_timeout_id = NULL;
2020 2019 }
2021 2020 GLOBAL_STATE_UNLOCK();
2022 2021
2023 2022 if (timer_id) {
2024 2023 (void) untimeout(timer_id);
2025 2024 }
2026 2025
2027 2026 EL(ha, "suspended\n");
2028 2027
2029 2028 break;
2030 2029
2031 2030 default:
2032 2031 rval = DDI_FAILURE;
2033 2032 break;
2034 2033 }
2035 2034
2036 2035 kmem_free(buf, MAXPATHLEN);
2037 2036
2038 2037 if (rval != DDI_SUCCESS) {
2039 2038 if (ha != NULL) {
2040 2039 EL(ha, "failed, rval = %xh\n", rval);
2041 2040 } else {
2042 2041 /*EMPTY*/
2043 2042 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2044 2043 ddi_get_instance(dip), rval);
2045 2044 }
2046 2045 } else {
2047 2046 /*EMPTY*/
2048 2047 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2049 2048 }
2050 2049
2051 2050 return (rval);
2052 2051 }
2053 2052
2054 2053
2055 2054 /*
2056 2055 * ql_power
2057 2056 * Power a device attached to the system.
2058 2057 *
2059 2058 * Input:
2060 2059 * dip = pointer to device information structure.
2061 2060 * component = device.
2062 2061 * level = power level.
2063 2062 *
2064 2063 * Returns:
2065 2064 * DDI_SUCCESS or DDI_FAILURE.
2066 2065 *
2067 2066 * Context:
2068 2067 * Kernel context.
2069 2068 */
2070 2069 /* ARGSUSED */
2071 2070 static int
2072 2071 ql_power(dev_info_t *dip, int component, int level)
2073 2072 {
2074 2073 int rval = DDI_FAILURE;
2075 2074 off_t csr;
2076 2075 uint8_t saved_pm_val;
2077 2076 ql_adapter_state_t *ha;
2078 2077 char *buf;
2079 2078 char *path;
2080 2079
2081 2080 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2082 2081 if (ha == NULL || ha->pm_capable == 0) {
2083 2082 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2084 2083 ddi_get_instance(dip));
2085 2084 return (rval);
2086 2085 }
2087 2086
2088 2087 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2089 2088
2090 2089 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2091 2090 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2092 2091
2093 2092 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2094 2093 level != PM_LEVEL_D3)) {
2095 2094 EL(ha, "invalid, component=%xh or level=%xh\n",
2096 2095 component, level);
2097 2096 return (rval);
2098 2097 }
2099 2098
2100 2099 GLOBAL_HW_LOCK();
2101 2100 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2102 2101 GLOBAL_HW_UNLOCK();
2103 2102
2104 2103 (void) snprintf(buf, sizeof (buf),
2105 2104 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2106 2105 ddi_pathname(dip, path));
2107 2106
2108 2107 switch (level) {
2109 2108 case PM_LEVEL_D0: /* power up to D0 state - fully on */
2110 2109
2111 2110 QL_PM_LOCK(ha);
2112 2111 if (ha->power_level == PM_LEVEL_D0) {
2113 2112 QL_PM_UNLOCK(ha);
2114 2113 rval = DDI_SUCCESS;
2115 2114 break;
2116 2115 }
2117 2116
2118 2117 /*
2119 2118 * Enable interrupts now
2120 2119 */
2121 2120 saved_pm_val = ha->power_level;
2122 2121 ha->power_level = PM_LEVEL_D0;
2123 2122 QL_PM_UNLOCK(ha);
2124 2123
2125 2124 GLOBAL_HW_LOCK();
2126 2125
2127 2126 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2128 2127
2129 2128 /*
2130 2129 * Delay after reset, for chip to recover.
2131 2130 * Otherwise causes system PANIC
2132 2131 */
2133 2132 drv_usecwait(200000);
2134 2133
2135 2134 GLOBAL_HW_UNLOCK();
2136 2135
2137 2136 if (ha->config_saved) {
2138 2137 ha->config_saved = 0;
2139 2138 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2140 2139 QL_PM_LOCK(ha);
2141 2140 ha->power_level = saved_pm_val;
2142 2141 QL_PM_UNLOCK(ha);
2143 2142 cmn_err(CE_WARN, "%s failed to restore "
2144 2143 "config regs", buf);
2145 2144 break;
2146 2145 }
2147 2146 }
2148 2147
2149 2148 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2150 2149 cmn_err(CE_WARN, "%s adapter initialization failed",
2151 2150 buf);
2152 2151 }
2153 2152
2154 2153 /* Wake up task_daemon. */
2155 2154 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2156 2155 TASK_DAEMON_SLEEPING_FLG, 0);
2157 2156
2158 2157 /* Restart IP if it was running. */
2159 2158 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2160 2159 (void) ql_initialize_ip(ha);
2161 2160 ql_isp_rcvbuf(ha);
2162 2161 }
2163 2162
2164 2163 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2165 2164 ha->instance, QL_NAME);
2166 2165
2167 2166 rval = DDI_SUCCESS;
2168 2167 break;
2169 2168
2170 2169 case PM_LEVEL_D3: /* power down to D3 state - off */
2171 2170
2172 2171 QL_PM_LOCK(ha);
2173 2172
2174 2173 if (ha->busy || ((ha->task_daemon_flags &
2175 2174 TASK_DAEMON_SLEEPING_FLG) == 0)) {
2176 2175 QL_PM_UNLOCK(ha);
2177 2176 break;
2178 2177 }
2179 2178
2180 2179 if (ha->power_level == PM_LEVEL_D3) {
2181 2180 rval = DDI_SUCCESS;
2182 2181 QL_PM_UNLOCK(ha);
2183 2182 break;
2184 2183 }
2185 2184 QL_PM_UNLOCK(ha);
2186 2185
2187 2186 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2188 2187 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2189 2188 " config regs", QL_NAME, ha->instance, buf);
2190 2189 break;
2191 2190 }
2192 2191 ha->config_saved = 1;
2193 2192
2194 2193 /*
2195 2194 * Don't enable interrupts. Running mailbox commands with
2196 2195 * interrupts enabled could cause hangs since pm_run_scan()
2197 2196 * runs out of a callout thread and on single cpu systems
2198 2197 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2199 2198 * would not get to run.
2200 2199 */
2201 2200 TASK_DAEMON_LOCK(ha);
2202 2201 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2203 2202 TASK_DAEMON_UNLOCK(ha);
2204 2203
2205 2204 ql_halt(ha, PM_LEVEL_D3);
2206 2205
2207 2206 /*
2208 2207 * Setup ql_intr to ignore interrupts from here on.
2209 2208 */
2210 2209 QL_PM_LOCK(ha);
2211 2210 ha->power_level = PM_LEVEL_D3;
2212 2211 QL_PM_UNLOCK(ha);
2213 2212
2214 2213 /*
2215 2214 * Wait for ISR to complete.
2216 2215 */
2217 2216 INTR_LOCK(ha);
2218 2217 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2219 2218 INTR_UNLOCK(ha);
2220 2219
2221 2220 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2222 2221 ha->instance, QL_NAME);
2223 2222
2224 2223 rval = DDI_SUCCESS;
2225 2224 break;
2226 2225 }
2227 2226
2228 2227 kmem_free(buf, MAXPATHLEN);
2229 2228 kmem_free(path, MAXPATHLEN);
2230 2229
2231 2230 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2232 2231
2233 2232 return (rval);
2234 2233 }
2235 2234
2236 2235 /*
2237 2236 * ql_quiesce
2238 2237 * quiesce a device attached to the system.
2239 2238 *
2240 2239 * Input:
2241 2240 * dip = pointer to device information structure.
2242 2241 *
2243 2242 * Returns:
2244 2243 * DDI_SUCCESS
2245 2244 *
2246 2245 * Context:
2247 2246 * Kernel context.
2248 2247 */
2249 2248 static int
2250 2249 ql_quiesce(dev_info_t *dip)
2251 2250 {
2252 2251 ql_adapter_state_t *ha;
2253 2252 uint32_t timer;
2254 2253 uint32_t stat;
2255 2254
2256 2255 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2257 2256 if (ha == NULL) {
2258 2257 /* Oh well.... */
2259 2258 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2260 2259 ddi_get_instance(dip));
2261 2260 return (DDI_SUCCESS);
2262 2261 }
2263 2262
2264 2263 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2265 2264
2266 2265 if (CFG_IST(ha, CFG_CTRL_8021)) {
2267 2266 (void) ql_stop_firmware(ha);
2268 2267 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2269 2268 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2270 2269 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2271 2270 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2272 2271 for (timer = 0; timer < 30000; timer++) {
2273 2272 stat = RD32_IO_REG(ha, risc2host);
2274 2273 if (stat & BIT_15) {
2275 2274 if ((stat & 0xff) < 0x12) {
2276 2275 WRT32_IO_REG(ha, hccr,
2277 2276 HC24_CLR_RISC_INT);
2278 2277 break;
2279 2278 }
2280 2279 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2281 2280 }
2282 2281 drv_usecwait(100);
2283 2282 }
2284 2283 /* Reset the chip. */
2285 2284 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2286 2285 MWB_4096_BYTES);
2287 2286 drv_usecwait(100);
2288 2287
2289 2288 } else {
2290 2289 /* Disable ISP interrupts. */
2291 2290 WRT16_IO_REG(ha, ictrl, 0);
2292 2291 /* Select RISC module registers. */
2293 2292 WRT16_IO_REG(ha, ctrl_status, 0);
2294 2293 /* Reset ISP semaphore. */
2295 2294 WRT16_IO_REG(ha, semaphore, 0);
2296 2295 /* Reset RISC module. */
2297 2296 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2298 2297 /* Release RISC module. */
2299 2298 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2300 2299 }
2301 2300
2302 2301 ql_disable_intr(ha);
2303 2302
2304 2303 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2305 2304
2306 2305 return (DDI_SUCCESS);
2307 2306 }
2308 2307
2309 2308 /* ************************************************************************ */
2310 2309 /* Fibre Channel Adapter (FCA) Transport Functions. */
2311 2310 /* ************************************************************************ */
2312 2311
2313 2312 /*
2314 2313 * ql_bind_port
2315 2314 * Handling port binding. The FC Transport attempts to bind an FCA port
2316 2315 * when it is ready to start transactions on the port. The FC Transport
2317 2316 * will call the fca_bind_port() function specified in the fca_transport
2318 2317 * structure it receives. The FCA must fill in the port_info structure
2319 2318 * passed in the call and also stash the information for future calls.
2320 2319 *
2321 2320 * Input:
2322 2321 * dip = pointer to FCA information structure.
2323 2322 * port_info = pointer to port information structure.
2324 2323 * bind_info = pointer to bind information structure.
2325 2324 *
2326 2325 * Returns:
2327 2326 * NULL = failure
2328 2327 *
2329 2328 * Context:
2330 2329 * Kernel context.
2331 2330 */
2332 2331 static opaque_t
2333 2332 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2334 2333 fc_fca_bind_info_t *bind_info)
2335 2334 {
2336 2335 ql_adapter_state_t *ha, *vha;
2337 2336 opaque_t fca_handle = NULL;
2338 2337 port_id_t d_id;
2339 2338 int port_npiv = bind_info->port_npiv;
2340 2339 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn;
2341 2340 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn;
2342 2341
2343 2342 /* get state info based on the dip */
2344 2343 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2345 2344 if (ha == NULL) {
2346 2345 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2347 2346 ddi_get_instance(dip));
2348 2347 return (NULL);
2349 2348 }
2350 2349 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2351 2350
2352 2351 /* Verify port number is supported. */
2353 2352 if (port_npiv != 0) {
2354 2353 if (!(ha->flags & VP_ENABLED)) {
2355 2354 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2356 2355 ha->instance);
2357 2356 port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2358 2357 return (NULL);
2359 2358 }
2360 2359 if (!(ha->flags & POINT_TO_POINT)) {
2361 2360 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2362 2361 ha->instance);
2363 2362 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2364 2363 return (NULL);
2365 2364 }
2366 2365 if (!(ha->flags & FDISC_ENABLED)) {
2367 2366 QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2368 2367 "FDISC\n", ha->instance);
2369 2368 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2370 2369 return (NULL);
2371 2370 }
2372 2371 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2373 2372 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2374 2373 QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2375 2374 "FC_OUTOFBOUNDS\n", ha->instance);
2376 2375 port_info->pi_error = FC_OUTOFBOUNDS;
2377 2376 return (NULL);
2378 2377 }
2379 2378 } else if (bind_info->port_num != 0) {
2380 2379 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2381 2380 "supported\n", ha->instance, bind_info->port_num);
2382 2381 port_info->pi_error = FC_OUTOFBOUNDS;
2383 2382 return (NULL);
2384 2383 }
2385 2384
2386 2385 /* Locate port context. */
2387 2386 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2388 2387 if (vha->vp_index == bind_info->port_num) {
2389 2388 break;
2390 2389 }
2391 2390 }
2392 2391
2393 2392 /* If virtual port does not exist. */
2394 2393 if (vha == NULL) {
2395 2394 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2396 2395 }
2397 2396
2398 2397 /* make sure this port isn't already bound */
2399 2398 if (vha->flags & FCA_BOUND) {
2400 2399 port_info->pi_error = FC_ALREADY;
2401 2400 } else {
2402 2401 if (vha->vp_index != 0) {
2403 2402 bcopy(port_nwwn,
2404 2403 vha->loginparams.node_ww_name.raw_wwn, 8);
2405 2404 bcopy(port_pwwn,
2406 2405 vha->loginparams.nport_ww_name.raw_wwn, 8);
2407 2406 }
2408 2407 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2409 2408 if (ql_vport_enable(vha) != QL_SUCCESS) {
2410 2409 QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2411 2410 "virtual port=%d\n", ha->instance,
2412 2411 vha->vp_index);
2413 2412 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2414 2413 return (NULL);
2415 2414 }
2416 2415 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2417 2416 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2418 2417 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2419 2418 QL_NAME, ha->instance, vha->vp_index,
2420 2419 port_pwwn[0], port_pwwn[1], port_pwwn[2],
2421 2420 port_pwwn[3], port_pwwn[4], port_pwwn[5],
2422 2421 port_pwwn[6], port_pwwn[7],
2423 2422 port_nwwn[0], port_nwwn[1], port_nwwn[2],
2424 2423 port_nwwn[3], port_nwwn[4], port_nwwn[5],
2425 2424 port_nwwn[6], port_nwwn[7]);
2426 2425 }
2427 2426
2428 2427 /* stash the bind_info supplied by the FC Transport */
2429 2428 vha->bind_info.port_handle = bind_info->port_handle;
2430 2429 vha->bind_info.port_statec_cb =
2431 2430 bind_info->port_statec_cb;
2432 2431 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2433 2432
2434 2433 /* Set port's source ID. */
2435 2434 port_info->pi_s_id.port_id = vha->d_id.b24;
2436 2435
2437 2436 /* copy out the default login parameters */
2438 2437 bcopy((void *)&vha->loginparams,
2439 2438 (void *)&port_info->pi_login_params,
2440 2439 sizeof (la_els_logi_t));
2441 2440
2442 2441 /* Set port's hard address if enabled. */
2443 2442 port_info->pi_hard_addr.hard_addr = 0;
2444 2443 if (bind_info->port_num == 0) {
2445 2444 d_id.b24 = ha->d_id.b24;
2446 2445 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2447 2446 if (ha->init_ctrl_blk.cb24.
2448 2447 firmware_options_1[0] & BIT_0) {
2449 2448 d_id.b.al_pa = ql_index_to_alpa[ha->
2450 2449 init_ctrl_blk.cb24.
2451 2450 hard_address[0]];
2452 2451 port_info->pi_hard_addr.hard_addr =
2453 2452 d_id.b24;
2454 2453 }
2455 2454 } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2456 2455 BIT_0) {
2457 2456 d_id.b.al_pa = ql_index_to_alpa[ha->
2458 2457 init_ctrl_blk.cb.hard_address[0]];
2459 2458 port_info->pi_hard_addr.hard_addr = d_id.b24;
2460 2459 }
2461 2460
2462 2461 /* Set the node id data */
2463 2462 if (ql_get_rnid_params(ha,
2464 2463 sizeof (port_info->pi_rnid_params.params),
2465 2464 (caddr_t)&port_info->pi_rnid_params.params) ==
2466 2465 QL_SUCCESS) {
2467 2466 port_info->pi_rnid_params.status = FC_SUCCESS;
2468 2467 } else {
2469 2468 port_info->pi_rnid_params.status = FC_FAILURE;
2470 2469 }
2471 2470
2472 2471 /* Populate T11 FC-HBA details */
2473 2472 ql_populate_hba_fru_details(ha, port_info);
2474 2473 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2475 2474 KM_SLEEP);
2476 2475 if (ha->pi_attrs != NULL) {
2477 2476 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2478 2477 sizeof (fca_port_attrs_t));
2479 2478 }
2480 2479 } else {
2481 2480 port_info->pi_rnid_params.status = FC_FAILURE;
2482 2481 if (ha->pi_attrs != NULL) {
2483 2482 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2484 2483 sizeof (fca_port_attrs_t));
2485 2484 }
2486 2485 }
2487 2486
2488 2487 /* Generate handle for this FCA. */
2489 2488 fca_handle = (opaque_t)vha;
2490 2489
2491 2490 ADAPTER_STATE_LOCK(ha);
2492 2491 vha->flags |= FCA_BOUND;
2493 2492 ADAPTER_STATE_UNLOCK(ha);
2494 2493 /* Set port's current state. */
2495 2494 port_info->pi_port_state = vha->state;
2496 2495 }
2497 2496
2498 2497 QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2499 2498 "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2500 2499 port_info->pi_port_state, port_info->pi_s_id.port_id);
2501 2500
2502 2501 return (fca_handle);
2503 2502 }
2504 2503
2505 2504 /*
2506 2505 * ql_unbind_port
2507 2506 * To unbind a Fibre Channel Adapter from an FC Port driver.
2508 2507 *
2509 2508 * Input:
2510 2509 * fca_handle = handle setup by ql_bind_port().
2511 2510 *
2512 2511 * Context:
2513 2512 * Kernel context.
2514 2513 */
2515 2514 static void
2516 2515 ql_unbind_port(opaque_t fca_handle)
2517 2516 {
2518 2517 ql_adapter_state_t *ha;
2519 2518 ql_tgt_t *tq;
2520 2519 uint32_t flgs;
2521 2520
2522 2521 ha = ql_fca_handle_to_state(fca_handle);
2523 2522 if (ha == NULL) {
2524 2523 /*EMPTY*/
2525 2524 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2526 2525 (void *)fca_handle);
2527 2526 } else {
2528 2527 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2529 2528 ha->vp_index);
2530 2529
2531 2530 if (!(ha->flags & FCA_BOUND)) {
2532 2531 /*EMPTY*/
2533 2532 QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2534 2533 ha->instance, ha->vp_index);
2535 2534 } else {
2536 2535 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2537 2536 if ((tq = ql_loop_id_to_queue(ha,
2538 2537 FL_PORT_24XX_HDL)) != NULL) {
2539 2538 (void) ql_logout_fabric_port(ha, tq);
2540 2539 }
2541 2540 (void) ql_vport_control(ha, (uint8_t)
2542 2541 (CFG_IST(ha, CFG_CTRL_2425) ?
2543 2542 VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2544 2543 flgs = FCA_BOUND | VP_ENABLED;
2545 2544 } else {
2546 2545 flgs = FCA_BOUND;
2547 2546 }
2548 2547 ADAPTER_STATE_LOCK(ha);
2549 2548 ha->flags &= ~flgs;
2550 2549 ADAPTER_STATE_UNLOCK(ha);
2551 2550 }
2552 2551
2553 2552 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2554 2553 ha->vp_index);
2555 2554 }
2556 2555 }
2557 2556
2558 2557 /*
2559 2558 * ql_init_pkt
2560 2559 * Initialize FCA portion of packet.
2561 2560 *
2562 2561 * Input:
2563 2562 * fca_handle = handle setup by ql_bind_port().
2564 2563 * pkt = pointer to fc_packet.
2565 2564 *
2566 2565 * Returns:
2567 2566 * FC_SUCCESS - the packet has successfully been initialized.
2568 2567 * FC_UNBOUND - the fca_handle specified is not bound.
2569 2568 * FC_NOMEM - the FCA failed initialization due to an allocation error.
2570 2569 * FC_FAILURE - the FCA failed initialization for undisclosed reasons
2571 2570 *
2572 2571 * Context:
2573 2572 * Kernel context.
2574 2573 */
2575 2574 /* ARGSUSED */
2576 2575 static int
2577 2576 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2578 2577 {
2579 2578 ql_adapter_state_t *ha;
2580 2579 ql_srb_t *sp;
2581 2580 int rval = FC_SUCCESS;
2582 2581
2583 2582 ha = ql_fca_handle_to_state(fca_handle);
2584 2583 if (ha == NULL) {
2585 2584 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2586 2585 (void *)fca_handle);
2587 2586 return (FC_UNBOUND);
2588 2587 }
2589 2588 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2590 2589
2591 2590 sp = (ql_srb_t *)pkt->pkt_fca_private;
2592 2591 sp->flags = 0;
2593 2592
2594 2593 /* init cmd links */
2595 2594 sp->cmd.base_address = sp;
2596 2595 sp->cmd.prev = NULL;
2597 2596 sp->cmd.next = NULL;
2598 2597 sp->cmd.head = NULL;
2599 2598
2600 2599 /* init watchdog links */
2601 2600 sp->wdg.base_address = sp;
2602 2601 sp->wdg.prev = NULL;
2603 2602 sp->wdg.next = NULL;
2604 2603 sp->wdg.head = NULL;
2605 2604 sp->pkt = pkt;
2606 2605 sp->ha = ha;
2607 2606 sp->magic_number = QL_FCA_BRAND;
2608 2607 sp->sg_dma.dma_handle = NULL;
2609 2608 #ifndef __sparc
2610 2609 if (CFG_IST(ha, CFG_CTRL_8021)) {
2611 2610 /* Setup DMA for scatter gather list. */
2612 2611 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2613 2612 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2614 2613 sp->sg_dma.cookie_count = 1;
2615 2614 sp->sg_dma.alignment = 64;
2616 2615 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2617 2616 rval = FC_NOMEM;
2618 2617 }
2619 2618 }
2620 2619 #endif /* __sparc */
2621 2620
2622 2621 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2623 2622
2624 2623 return (rval);
2625 2624 }
2626 2625
2627 2626 /*
2628 2627 * ql_un_init_pkt
2629 2628 * Release all local resources bound to packet.
2630 2629 *
2631 2630 * Input:
2632 2631 * fca_handle = handle setup by ql_bind_port().
2633 2632 * pkt = pointer to fc_packet.
2634 2633 *
2635 2634 * Returns:
2636 2635 * FC_SUCCESS - the packet has successfully been invalidated.
2637 2636 * FC_UNBOUND - the fca_handle specified is not bound.
2638 2637 * FC_BADPACKET - the packet has not been initialized or has
2639 2638 * already been freed by this FCA.
2640 2639 *
2641 2640 * Context:
2642 2641 * Kernel context.
2643 2642 */
2644 2643 static int
2645 2644 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2646 2645 {
2647 2646 ql_adapter_state_t *ha;
2648 2647 int rval;
2649 2648 ql_srb_t *sp;
2650 2649
2651 2650 ha = ql_fca_handle_to_state(fca_handle);
2652 2651 if (ha == NULL) {
2653 2652 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2654 2653 (void *)fca_handle);
2655 2654 return (FC_UNBOUND);
2656 2655 }
2657 2656 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2658 2657
2659 2658 sp = (ql_srb_t *)pkt->pkt_fca_private;
2660 2659
2661 2660 if (sp->magic_number != QL_FCA_BRAND) {
2662 2661 EL(ha, "failed, FC_BADPACKET\n");
2663 2662 rval = FC_BADPACKET;
2664 2663 } else {
2665 2664 sp->magic_number = NULL;
2666 2665 ql_free_phys(ha, &sp->sg_dma);
2667 2666 rval = FC_SUCCESS;
2668 2667 }
2669 2668
2670 2669 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2671 2670
2672 2671 return (rval);
2673 2672 }
2674 2673
2675 2674 /*
2676 2675 * ql_els_send
2677 2676 * Issue a extended link service request.
2678 2677 *
2679 2678 * Input:
2680 2679 * fca_handle = handle setup by ql_bind_port().
2681 2680 * pkt = pointer to fc_packet.
2682 2681 *
2683 2682 * Returns:
2684 2683 * FC_SUCCESS - the command was successful.
2685 2684 * FC_ELS_FREJECT - the command was rejected by a Fabric.
2686 2685 * FC_ELS_PREJECT - the command was rejected by an N-port.
2687 2686 * FC_TRANSPORT_ERROR - a transport error occurred.
2688 2687 * FC_UNBOUND - the fca_handle specified is not bound.
2689 2688 * FC_ELS_BAD - the FCA can not issue the requested ELS.
2690 2689 *
2691 2690 * Context:
2692 2691 * Kernel context.
2693 2692 */
2694 2693 static int
2695 2694 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2696 2695 {
2697 2696 ql_adapter_state_t *ha;
2698 2697 int rval;
2699 2698 clock_t timer = drv_usectohz(30000000);
2700 2699 ls_code_t els;
2701 2700 la_els_rjt_t rjt;
2702 2701 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
2703 2702
2704 2703 /* Verify proper command. */
2705 2704 ha = ql_cmd_setup(fca_handle, pkt, &rval);
2706 2705 if (ha == NULL) {
2707 2706 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2708 2707 rval, fca_handle);
2709 2708 return (FC_INVALID_REQUEST);
2710 2709 }
2711 2710 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2712 2711
2713 2712 /* Wait for suspension to end. */
2714 2713 TASK_DAEMON_LOCK(ha);
2715 2714 while (ha->task_daemon_flags & QL_SUSPENDED) {
2716 2715 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2717 2716
2718 2717 /* 30 seconds from now */
2719 2718 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2720 2719 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2721 2720 /*
2722 2721 * The timeout time 'timer' was
2723 2722 * reached without the condition
2724 2723 * being signaled.
2725 2724 */
2726 2725 pkt->pkt_state = FC_PKT_TRAN_BSY;
2727 2726 pkt->pkt_reason = FC_REASON_XCHG_BSY;
2728 2727
2729 2728 /* Release task daemon lock. */
2730 2729 TASK_DAEMON_UNLOCK(ha);
2731 2730
2732 2731 EL(ha, "QL_SUSPENDED failed=%xh\n",
2733 2732 QL_FUNCTION_TIMEOUT);
2734 2733 return (FC_TRAN_BUSY);
2735 2734 }
2736 2735 }
2737 2736 /* Release task daemon lock. */
2738 2737 TASK_DAEMON_UNLOCK(ha);
2739 2738
2740 2739 /* Setup response header. */
2741 2740 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2742 2741 sizeof (fc_frame_hdr_t));
2743 2742
2744 2743 if (pkt->pkt_rsplen) {
2745 2744 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2746 2745 }
2747 2746
2748 2747 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2749 2748 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2750 2749 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2751 2750 R_CTL_SOLICITED_CONTROL;
2752 2751 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2753 2752 F_CTL_END_SEQ;
2754 2753
2755 2754 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2756 2755 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2757 2756 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2758 2757
2759 2758 sp->flags |= SRB_ELS_PKT;
2760 2759
2761 2760 /* map the type of ELS to a function */
2762 2761 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2763 2762 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2764 2763
2765 2764 #if 0
2766 2765 QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2767 2766 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2768 2767 sizeof (fc_frame_hdr_t) / 4);
2769 2768 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2770 2769 QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2771 2770 #endif
2772 2771
2773 2772 sp->iocb = ha->els_cmd;
2774 2773 sp->req_cnt = 1;
2775 2774
2776 2775 switch (els.ls_code) {
2777 2776 case LA_ELS_RJT:
2778 2777 case LA_ELS_ACC:
2779 2778 EL(ha, "LA_ELS_RJT\n");
2780 2779 pkt->pkt_state = FC_PKT_SUCCESS;
2781 2780 rval = FC_SUCCESS;
2782 2781 break;
2783 2782 case LA_ELS_PLOGI:
2784 2783 case LA_ELS_PDISC:
2785 2784 rval = ql_els_plogi(ha, pkt);
2786 2785 break;
2787 2786 case LA_ELS_FLOGI:
2788 2787 case LA_ELS_FDISC:
2789 2788 rval = ql_els_flogi(ha, pkt);
2790 2789 break;
2791 2790 case LA_ELS_LOGO:
2792 2791 rval = ql_els_logo(ha, pkt);
2793 2792 break;
2794 2793 case LA_ELS_PRLI:
2795 2794 rval = ql_els_prli(ha, pkt);
2796 2795 break;
2797 2796 case LA_ELS_PRLO:
2798 2797 rval = ql_els_prlo(ha, pkt);
2799 2798 break;
2800 2799 case LA_ELS_ADISC:
2801 2800 rval = ql_els_adisc(ha, pkt);
2802 2801 break;
2803 2802 case LA_ELS_LINIT:
2804 2803 rval = ql_els_linit(ha, pkt);
2805 2804 break;
2806 2805 case LA_ELS_LPC:
2807 2806 rval = ql_els_lpc(ha, pkt);
2808 2807 break;
2809 2808 case LA_ELS_LSTS:
2810 2809 rval = ql_els_lsts(ha, pkt);
2811 2810 break;
2812 2811 case LA_ELS_SCR:
2813 2812 rval = ql_els_scr(ha, pkt);
2814 2813 break;
2815 2814 case LA_ELS_RSCN:
2816 2815 rval = ql_els_rscn(ha, pkt);
2817 2816 break;
2818 2817 case LA_ELS_FARP_REQ:
2819 2818 rval = ql_els_farp_req(ha, pkt);
2820 2819 break;
2821 2820 case LA_ELS_FARP_REPLY:
2822 2821 rval = ql_els_farp_reply(ha, pkt);
2823 2822 break;
2824 2823 case LA_ELS_RLS:
2825 2824 rval = ql_els_rls(ha, pkt);
2826 2825 break;
2827 2826 case LA_ELS_RNID:
2828 2827 rval = ql_els_rnid(ha, pkt);
2829 2828 break;
2830 2829 default:
2831 2830 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2832 2831 els.ls_code);
2833 2832 /* Build RJT. */
2834 2833 bzero(&rjt, sizeof (rjt));
2835 2834 rjt.ls_code.ls_code = LA_ELS_RJT;
2836 2835 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2837 2836
2838 2837 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2839 2838 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2840 2839
2841 2840 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2842 2841 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2843 2842 rval = FC_SUCCESS;
2844 2843 break;
2845 2844 }
2846 2845
2847 2846 #if 0
2848 2847 QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2849 2848 QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2850 2849 sizeof (fc_frame_hdr_t) / 4);
2851 2850 #endif
2852 2851 /*
2853 2852 * Return success if the srb was consumed by an iocb. The packet
2854 2853 * completion callback will be invoked by the response handler.
2855 2854 */
2856 2855 if (rval == QL_CONSUMED) {
2857 2856 rval = FC_SUCCESS;
2858 2857 } else if (rval == FC_SUCCESS &&
2859 2858 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2860 2859 /* Do command callback only if no error */
2861 2860 ql_awaken_task_daemon(ha, sp, 0, 0);
2862 2861 }
2863 2862
2864 2863 if (rval != FC_SUCCESS) {
2865 2864 EL(ha, "failed, rval = %xh\n", rval);
2866 2865 } else {
2867 2866 /*EMPTY*/
2868 2867 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2869 2868 }
2870 2869 return (rval);
2871 2870 }
2872 2871
2873 2872 /*
2874 2873 * ql_get_cap
2875 2874 * Export FCA hardware and software capabilities.
2876 2875 *
2877 2876 * Input:
2878 2877 * fca_handle = handle setup by ql_bind_port().
2879 2878 * cap = pointer to the capabilities string.
2880 2879 * ptr = buffer pointer for return capability.
2881 2880 *
2882 2881 * Returns:
2883 2882 * FC_CAP_ERROR - no such capability
2884 2883 * FC_CAP_FOUND - the capability was returned and cannot be set
2885 2884 * FC_CAP_SETTABLE - the capability was returned and can be set
2886 2885 * FC_UNBOUND - the fca_handle specified is not bound.
2887 2886 *
2888 2887 * Context:
2889 2888 * Kernel context.
2890 2889 */
2891 2890 static int
2892 2891 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2893 2892 {
2894 2893 ql_adapter_state_t *ha;
2895 2894 int rval;
2896 2895 uint32_t *rptr = (uint32_t *)ptr;
2897 2896
2898 2897 ha = ql_fca_handle_to_state(fca_handle);
2899 2898 if (ha == NULL) {
2900 2899 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2901 2900 (void *)fca_handle);
2902 2901 return (FC_UNBOUND);
2903 2902 }
2904 2903 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2905 2904
2906 2905 if (strcmp(cap, FC_NODE_WWN) == 0) {
2907 2906 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2908 2907 ptr, 8);
2909 2908 rval = FC_CAP_FOUND;
2910 2909 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2911 2910 bcopy((void *)&ha->loginparams, ptr,
2912 2911 sizeof (la_els_logi_t));
2913 2912 rval = FC_CAP_FOUND;
2914 2913 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2915 2914 *rptr = (uint32_t)QL_UB_LIMIT;
2916 2915 rval = FC_CAP_FOUND;
2917 2916 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2918 2917
2919 2918 dev_info_t *psydip = NULL;
2920 2919 #ifdef __sparc
2921 2920 /*
2922 2921 * Disable streaming for certain 2 chip adapters
2923 2922 * below Psycho to handle Psycho byte hole issue.
2924 2923 */
2925 2924 if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2926 2925 (!CFG_IST(ha, CFG_SBUS_CARD))) {
2927 2926 for (psydip = ddi_get_parent(ha->dip); psydip;
2928 2927 psydip = ddi_get_parent(psydip)) {
2929 2928 if (strcmp(ddi_driver_name(psydip),
2930 2929 "pcipsy") == 0) {
2931 2930 break;
2932 2931 }
2933 2932 }
2934 2933 }
2935 2934 #endif /* __sparc */
2936 2935
2937 2936 if (psydip) {
2938 2937 *rptr = (uint32_t)FC_NO_STREAMING;
2939 2938 EL(ha, "No Streaming\n");
2940 2939 } else {
2941 2940 *rptr = (uint32_t)FC_ALLOW_STREAMING;
2942 2941 EL(ha, "Allow Streaming\n");
2943 2942 }
2944 2943 rval = FC_CAP_FOUND;
2945 2944 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2946 2945 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2947 2946 *rptr = (uint32_t)CHAR_TO_SHORT(
2948 2947 ha->init_ctrl_blk.cb24.max_frame_length[0],
2949 2948 ha->init_ctrl_blk.cb24.max_frame_length[1]);
2950 2949 } else {
2951 2950 *rptr = (uint32_t)CHAR_TO_SHORT(
2952 2951 ha->init_ctrl_blk.cb.max_frame_length[0],
2953 2952 ha->init_ctrl_blk.cb.max_frame_length[1]);
2954 2953 }
2955 2954 rval = FC_CAP_FOUND;
2956 2955 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2957 2956 *rptr = FC_RESET_RETURN_ALL;
2958 2957 rval = FC_CAP_FOUND;
2959 2958 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2960 2959 *rptr = FC_NO_DVMA_SPACE;
2961 2960 rval = FC_CAP_FOUND;
2962 2961 } else {
2963 2962 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2964 2963 rval = FC_CAP_ERROR;
2965 2964 }
2966 2965
2967 2966 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2968 2967
2969 2968 return (rval);
2970 2969 }
2971 2970
2972 2971 /*
2973 2972 * ql_set_cap
2974 2973 * Allow the FC Transport to set FCA capabilities if possible.
2975 2974 *
2976 2975 * Input:
2977 2976 * fca_handle = handle setup by ql_bind_port().
2978 2977 * cap = pointer to the capabilities string.
2979 2978 * ptr = buffer pointer for capability.
2980 2979 *
2981 2980 * Returns:
2982 2981 * FC_CAP_ERROR - no such capability
2983 2982 * FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2984 2983 * FC_CAP_SETTABLE - the capability was successfully set.
2985 2984 * FC_UNBOUND - the fca_handle specified is not bound.
2986 2985 *
2987 2986 * Context:
2988 2987 * Kernel context.
2989 2988 */
2990 2989 /* ARGSUSED */
2991 2990 static int
2992 2991 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2993 2992 {
2994 2993 ql_adapter_state_t *ha;
2995 2994 int rval;
2996 2995
2997 2996 ha = ql_fca_handle_to_state(fca_handle);
2998 2997 if (ha == NULL) {
2999 2998 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3000 2999 (void *)fca_handle);
3001 3000 return (FC_UNBOUND);
3002 3001 }
3003 3002 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3004 3003
3005 3004 if (strcmp(cap, FC_NODE_WWN) == 0) {
3006 3005 rval = FC_CAP_FOUND;
3007 3006 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3008 3007 rval = FC_CAP_FOUND;
3009 3008 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3010 3009 rval = FC_CAP_FOUND;
3011 3010 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3012 3011 rval = FC_CAP_FOUND;
3013 3012 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3014 3013 rval = FC_CAP_FOUND;
3015 3014 } else {
3016 3015 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3017 3016 rval = FC_CAP_ERROR;
3018 3017 }
3019 3018
3020 3019 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3021 3020
3022 3021 return (rval);
3023 3022 }
3024 3023
3025 3024 /*
3026 3025 * ql_getmap
3027 3026 * Request of Arbitrated Loop (AL-PA) map.
3028 3027 *
3029 3028 * Input:
3030 3029 * fca_handle = handle setup by ql_bind_port().
3031 3030 * mapbuf= buffer pointer for map.
3032 3031 *
3033 3032 * Returns:
3034 3033 * FC_OLDPORT - the specified port is not operating in loop mode.
3035 3034 * FC_OFFLINE - the specified port is not online.
3036 3035 * FC_NOMAP - there is no loop map available for this port.
3037 3036 * FC_UNBOUND - the fca_handle specified is not bound.
3038 3037 * FC_SUCCESS - a valid map has been placed in mapbuf.
3039 3038 *
3040 3039 * Context:
3041 3040 * Kernel context.
3042 3041 */
3043 3042 static int
3044 3043 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3045 3044 {
3046 3045 ql_adapter_state_t *ha;
3047 3046 clock_t timer = drv_usectohz(30000000);
3048 3047 int rval = FC_SUCCESS;
3049 3048
3050 3049 ha = ql_fca_handle_to_state(fca_handle);
3051 3050 if (ha == NULL) {
3052 3051 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3053 3052 (void *)fca_handle);
3054 3053 return (FC_UNBOUND);
3055 3054 }
3056 3055 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3057 3056
3058 3057 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3059 3058 mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3060 3059
3061 3060 /* Wait for suspension to end. */
3062 3061 TASK_DAEMON_LOCK(ha);
3063 3062 while (ha->task_daemon_flags & QL_SUSPENDED) {
3064 3063 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3065 3064
3066 3065 /* 30 seconds from now */
3067 3066 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3068 3067 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3069 3068 /*
3070 3069 * The timeout time 'timer' was
3071 3070 * reached without the condition
3072 3071 * being signaled.
3073 3072 */
3074 3073
3075 3074 /* Release task daemon lock. */
3076 3075 TASK_DAEMON_UNLOCK(ha);
3077 3076
3078 3077 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3079 3078 return (FC_TRAN_BUSY);
3080 3079 }
3081 3080 }
3082 3081 /* Release task daemon lock. */
3083 3082 TASK_DAEMON_UNLOCK(ha);
3084 3083
3085 3084 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3086 3085 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3087 3086 /*
3088 3087 * Now, since transport drivers cosider this as an
3089 3088 * offline condition, let's wait for few seconds
3090 3089 * for any loop transitions before we reset the.
3091 3090 * chip and restart all over again.
3092 3091 */
3093 3092 ql_delay(ha, 2000000);
3094 3093 EL(ha, "failed, FC_NOMAP\n");
3095 3094 rval = FC_NOMAP;
3096 3095 } else {
3097 3096 /*EMPTY*/
3098 3097 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3099 3098 "data %xh %xh %xh %xh\n", ha->instance,
3100 3099 mapbuf->lilp_myalpa, mapbuf->lilp_length,
3101 3100 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3102 3101 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3103 3102 }
3104 3103
3105 3104 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3106 3105 #if 0
3107 3106 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3108 3107 #endif
3109 3108 return (rval);
3110 3109 }
3111 3110
3112 3111 /*
3113 3112 * ql_transport
3114 3113 * Issue an I/O request. Handles all regular requests.
3115 3114 *
3116 3115 * Input:
3117 3116 * fca_handle = handle setup by ql_bind_port().
3118 3117 * pkt = pointer to fc_packet.
3119 3118 *
3120 3119 * Returns:
3121 3120 * FC_SUCCESS - the packet was accepted for transport.
3122 3121 * FC_TRANSPORT_ERROR - a transport error occurred.
3123 3122 * FC_BADPACKET - the packet to be transported had not been
3124 3123 * initialized by this FCA.
3125 3124 * FC_UNBOUND - the fca_handle specified is not bound.
3126 3125 *
3127 3126 * Context:
3128 3127 * Kernel context.
3129 3128 */
3130 3129 static int
3131 3130 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3132 3131 {
3133 3132 ql_adapter_state_t *ha;
3134 3133 int rval = FC_TRANSPORT_ERROR;
3135 3134 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3136 3135
3137 3136 /* Verify proper command. */
3138 3137 ha = ql_cmd_setup(fca_handle, pkt, &rval);
3139 3138 if (ha == NULL) {
3140 3139 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3141 3140 rval, fca_handle);
3142 3141 return (rval);
3143 3142 }
3144 3143 QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3145 3144 #if 0
3146 3145 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3147 3146 sizeof (fc_frame_hdr_t) / 4);
3148 3147 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3149 3148 QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3150 3149 #endif
3151 3150
3152 3151 /* Reset SRB flags. */
3153 3152 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3154 3153 SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3155 3154 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3156 3155 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3157 3156 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3158 3157 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3159 3158 SRB_MS_PKT | SRB_ELS_PKT);
3160 3159
3161 3160 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3162 3161 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3163 3162 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3164 3163 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3165 3164 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3166 3165
3167 3166 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3168 3167 case R_CTL_COMMAND:
3169 3168 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3170 3169 sp->flags |= SRB_FCP_CMD_PKT;
3171 3170 rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3172 3171 }
3173 3172 break;
3174 3173
3175 3174 default:
3176 3175 /* Setup response header and buffer. */
3177 3176 if (pkt->pkt_rsplen) {
3178 3177 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3179 3178 }
3180 3179
3181 3180 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3182 3181 case R_CTL_UNSOL_DATA:
3183 3182 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3184 3183 sp->flags |= SRB_IP_PKT;
3185 3184 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3186 3185 }
3187 3186 break;
3188 3187
3189 3188 case R_CTL_UNSOL_CONTROL:
3190 3189 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3191 3190 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3192 3191 rval = ql_fc_services(ha, pkt);
3193 3192 }
3194 3193 break;
3195 3194
3196 3195 case R_CTL_SOLICITED_DATA:
3197 3196 case R_CTL_STATUS:
3198 3197 default:
3199 3198 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3200 3199 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3201 3200 rval = FC_TRANSPORT_ERROR;
3202 3201 EL(ha, "unknown, r_ctl=%xh\n",
3203 3202 pkt->pkt_cmd_fhdr.r_ctl);
3204 3203 break;
3205 3204 }
3206 3205 }
3207 3206
3208 3207 if (rval != FC_SUCCESS) {
3209 3208 EL(ha, "failed, rval = %xh\n", rval);
3210 3209 } else {
3211 3210 /*EMPTY*/
3212 3211 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3213 3212 }
3214 3213
3215 3214 return (rval);
3216 3215 }
3217 3216
3218 3217 /*
3219 3218 * ql_ub_alloc
3220 3219 * Allocate buffers for unsolicited exchanges.
3221 3220 *
3222 3221 * Input:
3223 3222 * fca_handle = handle setup by ql_bind_port().
3224 3223 * tokens = token array for each buffer.
3225 3224 * size = size of each buffer.
3226 3225 * count = pointer to number of buffers.
3227 3226 * type = the FC-4 type the buffers are reserved for.
3228 3227 * 1 = Extended Link Services, 5 = LLC/SNAP
3229 3228 *
3230 3229 * Returns:
3231 3230 * FC_FAILURE - buffers could not be allocated.
3232 3231 * FC_TOOMANY - the FCA could not allocate the requested
3233 3232 * number of buffers.
3234 3233 * FC_SUCCESS - unsolicited buffers were allocated.
3235 3234 * FC_UNBOUND - the fca_handle specified is not bound.
3236 3235 *
3237 3236 * Context:
3238 3237 * Kernel context.
3239 3238 */
3240 3239 static int
3241 3240 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3242 3241 uint32_t *count, uint32_t type)
3243 3242 {
3244 3243 ql_adapter_state_t *ha;
3245 3244 caddr_t bufp = NULL;
3246 3245 fc_unsol_buf_t *ubp;
3247 3246 ql_srb_t *sp;
3248 3247 uint32_t index;
3249 3248 uint32_t cnt;
3250 3249 uint32_t ub_array_index = 0;
3251 3250 int rval = FC_SUCCESS;
3252 3251 int ub_updated = FALSE;
3253 3252
3254 3253 /* Check handle. */
3255 3254 ha = ql_fca_handle_to_state(fca_handle);
3256 3255 if (ha == NULL) {
3257 3256 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3258 3257 (void *)fca_handle);
3259 3258 return (FC_UNBOUND);
3260 3259 }
3261 3260 QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3262 3261 ha->instance, ha->vp_index, *count);
3263 3262
3264 3263 QL_PM_LOCK(ha);
3265 3264 if (ha->power_level != PM_LEVEL_D0) {
3266 3265 QL_PM_UNLOCK(ha);
3267 3266 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3268 3267 ha->vp_index);
3269 3268 return (FC_FAILURE);
3270 3269 }
3271 3270 QL_PM_UNLOCK(ha);
3272 3271
3273 3272 /* Acquire adapter state lock. */
3274 3273 ADAPTER_STATE_LOCK(ha);
3275 3274
3276 3275 /* Check the count. */
3277 3276 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3278 3277 *count = 0;
3279 3278 EL(ha, "failed, FC_TOOMANY\n");
3280 3279 rval = FC_TOOMANY;
3281 3280 }
3282 3281
3283 3282 /*
3284 3283 * reset ub_array_index
3285 3284 */
3286 3285 ub_array_index = 0;
3287 3286
3288 3287 /*
3289 3288 * Now proceed to allocate any buffers required
3290 3289 */
3291 3290 for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3292 3291 /* Allocate all memory needed. */
3293 3292 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3294 3293 KM_SLEEP);
3295 3294 if (ubp == NULL) {
3296 3295 EL(ha, "failed, FC_FAILURE\n");
3297 3296 rval = FC_FAILURE;
3298 3297 } else {
3299 3298 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3300 3299 if (sp == NULL) {
3301 3300 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3302 3301 rval = FC_FAILURE;
3303 3302 } else {
3304 3303 if (type == FC_TYPE_IS8802_SNAP) {
3305 3304 #ifdef __sparc
3306 3305 if (ql_get_dma_mem(ha,
3307 3306 &sp->ub_buffer, size,
3308 3307 BIG_ENDIAN_DMA,
3309 3308 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3310 3309 rval = FC_FAILURE;
3311 3310 kmem_free(ubp,
3312 3311 sizeof (fc_unsol_buf_t));
3313 3312 kmem_free(sp,
3314 3313 sizeof (ql_srb_t));
3315 3314 } else {
3316 3315 bufp = sp->ub_buffer.bp;
3317 3316 sp->ub_size = size;
3318 3317 }
3319 3318 #else
3320 3319 if (ql_get_dma_mem(ha,
3321 3320 &sp->ub_buffer, size,
3322 3321 LITTLE_ENDIAN_DMA,
3323 3322 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3324 3323 rval = FC_FAILURE;
3325 3324 kmem_free(ubp,
3326 3325 sizeof (fc_unsol_buf_t));
3327 3326 kmem_free(sp,
3328 3327 sizeof (ql_srb_t));
3329 3328 } else {
3330 3329 bufp = sp->ub_buffer.bp;
3331 3330 sp->ub_size = size;
3332 3331 }
3333 3332 #endif
3334 3333 } else {
3335 3334 bufp = kmem_zalloc(size, KM_SLEEP);
3336 3335 if (bufp == NULL) {
3337 3336 rval = FC_FAILURE;
3338 3337 kmem_free(ubp,
3339 3338 sizeof (fc_unsol_buf_t));
3340 3339 kmem_free(sp,
3341 3340 sizeof (ql_srb_t));
3342 3341 } else {
3343 3342 sp->ub_size = size;
3344 3343 }
3345 3344 }
3346 3345 }
3347 3346 }
3348 3347
3349 3348 if (rval == FC_SUCCESS) {
3350 3349 /* Find next available slot. */
3351 3350 QL_UB_LOCK(ha);
3352 3351 while (ha->ub_array[ub_array_index] != NULL) {
3353 3352 ub_array_index++;
3354 3353 }
3355 3354
3356 3355 ubp->ub_fca_private = (void *)sp;
3357 3356
3358 3357 /* init cmd links */
3359 3358 sp->cmd.base_address = sp;
3360 3359 sp->cmd.prev = NULL;
3361 3360 sp->cmd.next = NULL;
3362 3361 sp->cmd.head = NULL;
3363 3362
3364 3363 /* init wdg links */
3365 3364 sp->wdg.base_address = sp;
3366 3365 sp->wdg.prev = NULL;
3367 3366 sp->wdg.next = NULL;
3368 3367 sp->wdg.head = NULL;
3369 3368 sp->ha = ha;
3370 3369
3371 3370 ubp->ub_buffer = bufp;
3372 3371 ubp->ub_bufsize = size;
3373 3372 ubp->ub_port_handle = fca_handle;
3374 3373 ubp->ub_token = ub_array_index;
3375 3374
3376 3375 /* Save the token. */
3377 3376 tokens[index] = ub_array_index;
3378 3377
3379 3378 /* Setup FCA private information. */
3380 3379 sp->ub_type = type;
3381 3380 sp->handle = ub_array_index;
3382 3381 sp->flags |= SRB_UB_IN_FCA;
3383 3382
3384 3383 ha->ub_array[ub_array_index] = ubp;
3385 3384 ha->ub_allocated++;
3386 3385 ub_updated = TRUE;
3387 3386 QL_UB_UNLOCK(ha);
3388 3387 }
3389 3388 }
3390 3389
3391 3390 /* Release adapter state lock. */
3392 3391 ADAPTER_STATE_UNLOCK(ha);
3393 3392
3394 3393 /* IP buffer. */
3395 3394 if (ub_updated) {
3396 3395 if ((type == FC_TYPE_IS8802_SNAP) &&
3397 3396 (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3398 3397
3399 3398 ADAPTER_STATE_LOCK(ha);
3400 3399 ha->flags |= IP_ENABLED;
3401 3400 ADAPTER_STATE_UNLOCK(ha);
3402 3401
3403 3402 if (!(ha->flags & IP_INITIALIZED)) {
3404 3403 if (CFG_IST(ha, CFG_CTRL_2422)) {
3405 3404 ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3406 3405 LSB(ql_ip_mtu);
3407 3406 ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3408 3407 MSB(ql_ip_mtu);
3409 3408 ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3410 3409 LSB(size);
3411 3410 ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3412 3411 MSB(size);
3413 3412
3414 3413 cnt = CHAR_TO_SHORT(
3415 3414 ha->ip_init_ctrl_blk.cb24.cc[0],
3416 3415 ha->ip_init_ctrl_blk.cb24.cc[1]);
3417 3416
3418 3417 if (cnt < *count) {
3419 3418 ha->ip_init_ctrl_blk.cb24.cc[0]
3420 3419 = LSB(*count);
3421 3420 ha->ip_init_ctrl_blk.cb24.cc[1]
3422 3421 = MSB(*count);
3423 3422 }
3424 3423 } else {
3425 3424 ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3426 3425 LSB(ql_ip_mtu);
3427 3426 ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3428 3427 MSB(ql_ip_mtu);
3429 3428 ha->ip_init_ctrl_blk.cb.buf_size[0] =
3430 3429 LSB(size);
3431 3430 ha->ip_init_ctrl_blk.cb.buf_size[1] =
3432 3431 MSB(size);
3433 3432
3434 3433 cnt = CHAR_TO_SHORT(
3435 3434 ha->ip_init_ctrl_blk.cb.cc[0],
3436 3435 ha->ip_init_ctrl_blk.cb.cc[1]);
3437 3436
3438 3437 if (cnt < *count) {
3439 3438 ha->ip_init_ctrl_blk.cb.cc[0] =
3440 3439 LSB(*count);
3441 3440 ha->ip_init_ctrl_blk.cb.cc[1] =
3442 3441 MSB(*count);
3443 3442 }
3444 3443 }
3445 3444
3446 3445 (void) ql_initialize_ip(ha);
3447 3446 }
3448 3447 ql_isp_rcvbuf(ha);
3449 3448 }
3450 3449 }
3451 3450
3452 3451 if (rval != FC_SUCCESS) {
3453 3452 EL(ha, "failed=%xh\n", rval);
3454 3453 } else {
3455 3454 /*EMPTY*/
3456 3455 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3457 3456 ha->vp_index);
3458 3457 }
3459 3458 return (rval);
3460 3459 }
3461 3460
3462 3461 /*
3463 3462 * ql_ub_free
3464 3463 * Free unsolicited buffers.
3465 3464 *
3466 3465 * Input:
3467 3466 * fca_handle = handle setup by ql_bind_port().
3468 3467 * count = number of buffers.
3469 3468 * tokens = token array for each buffer.
3470 3469 *
3471 3470 * Returns:
3472 3471 * FC_SUCCESS - the requested buffers have been freed.
3473 3472 * FC_UNBOUND - the fca_handle specified is not bound.
3474 3473 * FC_UB_BADTOKEN - an invalid token was encountered.
3475 3474 * No buffers have been released.
3476 3475 *
3477 3476 * Context:
3478 3477 * Kernel context.
3479 3478 */
3480 3479 static int
3481 3480 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3482 3481 {
3483 3482 ql_adapter_state_t *ha;
3484 3483 ql_srb_t *sp;
3485 3484 uint32_t index;
3486 3485 uint64_t ub_array_index;
3487 3486 int rval = FC_SUCCESS;
3488 3487
3489 3488 /* Check handle. */
3490 3489 ha = ql_fca_handle_to_state(fca_handle);
3491 3490 if (ha == NULL) {
3492 3491 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3493 3492 (void *)fca_handle);
3494 3493 return (FC_UNBOUND);
3495 3494 }
3496 3495 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3497 3496
3498 3497 /* Acquire adapter state lock. */
3499 3498 ADAPTER_STATE_LOCK(ha);
3500 3499
3501 3500 /* Check all returned tokens. */
3502 3501 for (index = 0; index < count; index++) {
3503 3502 fc_unsol_buf_t *ubp;
3504 3503
3505 3504 /* Check the token range. */
3506 3505 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3507 3506 EL(ha, "failed, FC_UB_BADTOKEN\n");
3508 3507 rval = FC_UB_BADTOKEN;
3509 3508 break;
3510 3509 }
3511 3510
3512 3511 /* Check the unsolicited buffer array. */
3513 3512 QL_UB_LOCK(ha);
3514 3513 ubp = ha->ub_array[ub_array_index];
3515 3514
3516 3515 if (ubp == NULL) {
3517 3516 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3518 3517 rval = FC_UB_BADTOKEN;
3519 3518 QL_UB_UNLOCK(ha);
3520 3519 break;
3521 3520 }
3522 3521
3523 3522 /* Check the state of the unsolicited buffer. */
3524 3523 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3525 3524 sp->flags |= SRB_UB_FREE_REQUESTED;
3526 3525
3527 3526 while (!(sp->flags & SRB_UB_IN_FCA) ||
3528 3527 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3529 3528 QL_UB_UNLOCK(ha);
3530 3529 ADAPTER_STATE_UNLOCK(ha);
3531 3530 delay(drv_usectohz(100000));
3532 3531 ADAPTER_STATE_LOCK(ha);
3533 3532 QL_UB_LOCK(ha);
3534 3533 }
3535 3534 ha->ub_array[ub_array_index] = NULL;
3536 3535 QL_UB_UNLOCK(ha);
3537 3536 ql_free_unsolicited_buffer(ha, ubp);
3538 3537 }
3539 3538
3540 3539 if (rval == FC_SUCCESS) {
3541 3540 /*
3542 3541 * Signal any pending hardware reset when there are
3543 3542 * no more unsolicited buffers in use.
3544 3543 */
3545 3544 if (ha->ub_allocated == 0) {
3546 3545 cv_broadcast(&ha->pha->cv_ub);
3547 3546 }
3548 3547 }
3549 3548
3550 3549 /* Release adapter state lock. */
3551 3550 ADAPTER_STATE_UNLOCK(ha);
3552 3551
3553 3552 if (rval != FC_SUCCESS) {
3554 3553 EL(ha, "failed=%xh\n", rval);
3555 3554 } else {
3556 3555 /*EMPTY*/
3557 3556 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3558 3557 }
3559 3558 return (rval);
3560 3559 }
3561 3560
3562 3561 /*
3563 3562 * ql_ub_release
3564 3563 * Release unsolicited buffers from FC Transport
3565 3564 * to FCA for future use.
3566 3565 *
3567 3566 * Input:
3568 3567 * fca_handle = handle setup by ql_bind_port().
3569 3568 * count = number of buffers.
3570 3569 * tokens = token array for each buffer.
3571 3570 *
3572 3571 * Returns:
3573 3572 * FC_SUCCESS - the requested buffers have been released.
3574 3573 * FC_UNBOUND - the fca_handle specified is not bound.
3575 3574 * FC_UB_BADTOKEN - an invalid token was encountered.
3576 3575 * No buffers have been released.
3577 3576 *
3578 3577 * Context:
3579 3578 * Kernel context.
3580 3579 */
3581 3580 static int
3582 3581 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3583 3582 {
3584 3583 ql_adapter_state_t *ha;
3585 3584 ql_srb_t *sp;
3586 3585 uint32_t index;
3587 3586 uint64_t ub_array_index;
3588 3587 int rval = FC_SUCCESS;
3589 3588 int ub_ip_updated = FALSE;
3590 3589
3591 3590 /* Check handle. */
3592 3591 ha = ql_fca_handle_to_state(fca_handle);
3593 3592 if (ha == NULL) {
3594 3593 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3595 3594 (void *)fca_handle);
3596 3595 return (FC_UNBOUND);
3597 3596 }
3598 3597 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3599 3598
3600 3599 /* Acquire adapter state lock. */
3601 3600 ADAPTER_STATE_LOCK(ha);
3602 3601 QL_UB_LOCK(ha);
3603 3602
3604 3603 /* Check all returned tokens. */
3605 3604 for (index = 0; index < count; index++) {
3606 3605 /* Check the token range. */
3607 3606 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3608 3607 EL(ha, "failed, FC_UB_BADTOKEN\n");
3609 3608 rval = FC_UB_BADTOKEN;
3610 3609 break;
3611 3610 }
3612 3611
3613 3612 /* Check the unsolicited buffer array. */
3614 3613 if (ha->ub_array[ub_array_index] == NULL) {
3615 3614 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3616 3615 rval = FC_UB_BADTOKEN;
3617 3616 break;
3618 3617 }
3619 3618
3620 3619 /* Check the state of the unsolicited buffer. */
3621 3620 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3622 3621 if (sp->flags & SRB_UB_IN_FCA) {
3623 3622 EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3624 3623 rval = FC_UB_BADTOKEN;
3625 3624 break;
3626 3625 }
3627 3626 }
3628 3627
3629 3628 /* If all tokens checkout, release the buffers. */
3630 3629 if (rval == FC_SUCCESS) {
3631 3630 /* Check all returned tokens. */
3632 3631 for (index = 0; index < count; index++) {
3633 3632 fc_unsol_buf_t *ubp;
3634 3633
3635 3634 ub_array_index = tokens[index];
3636 3635 ubp = ha->ub_array[ub_array_index];
3637 3636 sp = ubp->ub_fca_private;
3638 3637
3639 3638 ubp->ub_resp_flags = 0;
3640 3639 sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3641 3640 sp->flags |= SRB_UB_IN_FCA;
3642 3641
3643 3642 /* IP buffer. */
3644 3643 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3645 3644 ub_ip_updated = TRUE;
3646 3645 }
3647 3646 }
3648 3647 }
3649 3648
3650 3649 QL_UB_UNLOCK(ha);
3651 3650 /* Release adapter state lock. */
3652 3651 ADAPTER_STATE_UNLOCK(ha);
3653 3652
3654 3653 /*
3655 3654 * XXX: We should call ql_isp_rcvbuf() to return a
3656 3655 * buffer to ISP only if the number of buffers fall below
3657 3656 * the low water mark.
3658 3657 */
3659 3658 if (ub_ip_updated) {
3660 3659 ql_isp_rcvbuf(ha);
3661 3660 }
3662 3661
3663 3662 if (rval != FC_SUCCESS) {
3664 3663 EL(ha, "failed, rval = %xh\n", rval);
3665 3664 } else {
3666 3665 /*EMPTY*/
3667 3666 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3668 3667 }
3669 3668 return (rval);
3670 3669 }
3671 3670
3672 3671 /*
3673 3672 * ql_abort
3674 3673 * Abort a packet.
3675 3674 *
3676 3675 * Input:
3677 3676 * fca_handle = handle setup by ql_bind_port().
3678 3677 * pkt = pointer to fc_packet.
3679 3678 * flags = KM_SLEEP flag.
3680 3679 *
3681 3680 * Returns:
3682 3681 * FC_SUCCESS - the packet has successfully aborted.
3683 3682 * FC_ABORTED - the packet has successfully aborted.
3684 3683 * FC_ABORTING - the packet is being aborted.
3685 3684 * FC_ABORT_FAILED - the packet could not be aborted.
3686 3685 * FC_TRANSPORT_ERROR - a transport error occurred while attempting
3687 3686 * to abort the packet.
3688 3687 * FC_BADEXCHANGE - no packet found.
3689 3688 * FC_UNBOUND - the fca_handle specified is not bound.
3690 3689 *
3691 3690 * Context:
3692 3691 * Kernel context.
3693 3692 */
3694 3693 static int
3695 3694 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3696 3695 {
3697 3696 port_id_t d_id;
3698 3697 ql_link_t *link;
3699 3698 ql_adapter_state_t *ha, *pha;
3700 3699 ql_srb_t *sp;
3701 3700 ql_tgt_t *tq;
3702 3701 ql_lun_t *lq;
3703 3702 int rval = FC_ABORTED;
3704 3703
3705 3704 ha = ql_fca_handle_to_state(fca_handle);
3706 3705 if (ha == NULL) {
3707 3706 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3708 3707 (void *)fca_handle);
3709 3708 return (FC_UNBOUND);
3710 3709 }
3711 3710
3712 3711 pha = ha->pha;
3713 3712
3714 3713 QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3715 3714
3716 3715 /* Get target queue pointer. */
3717 3716 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3718 3717 tq = ql_d_id_to_queue(ha, d_id);
3719 3718
3720 3719 if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3721 3720 if (tq == NULL) {
3722 3721 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3723 3722 rval = FC_TRANSPORT_ERROR;
3724 3723 } else {
3725 3724 EL(ha, "failed, FC_OFFLINE\n");
3726 3725 rval = FC_OFFLINE;
3727 3726 }
3728 3727 return (rval);
3729 3728 }
3730 3729
3731 3730 sp = (ql_srb_t *)pkt->pkt_fca_private;
3732 3731 lq = sp->lun_queue;
3733 3732
3734 3733 /* Set poll flag if sleep wanted. */
3735 3734 if (flags == KM_SLEEP) {
3736 3735 sp->flags |= SRB_POLL;
3737 3736 }
3738 3737
3739 3738 /* Acquire target queue lock. */
3740 3739 DEVICE_QUEUE_LOCK(tq);
3741 3740 REQUEST_RING_LOCK(ha);
3742 3741
3743 3742 /* If command not already started. */
3744 3743 if (!(sp->flags & SRB_ISP_STARTED)) {
3745 3744 /* Check pending queue for command. */
3746 3745 sp = NULL;
3747 3746 for (link = pha->pending_cmds.first; link != NULL;
3748 3747 link = link->next) {
3749 3748 sp = link->base_address;
3750 3749 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3751 3750 /* Remove srb from q. */
3752 3751 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3753 3752 break;
3754 3753 } else {
3755 3754 sp = NULL;
3756 3755 }
3757 3756 }
3758 3757 REQUEST_RING_UNLOCK(ha);
3759 3758
3760 3759 if (sp == NULL) {
3761 3760 /* Check for cmd on device queue. */
3762 3761 for (link = lq->cmd.first; link != NULL;
3763 3762 link = link->next) {
3764 3763 sp = link->base_address;
3765 3764 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3766 3765 /* Remove srb from q. */
3767 3766 ql_remove_link(&lq->cmd, &sp->cmd);
3768 3767 break;
3769 3768 } else {
3770 3769 sp = NULL;
3771 3770 }
3772 3771 }
3773 3772 }
3774 3773 /* Release device lock */
3775 3774 DEVICE_QUEUE_UNLOCK(tq);
3776 3775
3777 3776 /* If command on target queue. */
3778 3777 if (sp != NULL) {
3779 3778 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3780 3779
3781 3780 /* Set return status */
3782 3781 pkt->pkt_reason = CS_ABORTED;
3783 3782
3784 3783 sp->cmd.next = NULL;
3785 3784 ql_done(&sp->cmd);
3786 3785 rval = FC_ABORTED;
3787 3786 } else {
3788 3787 EL(ha, "failed, FC_BADEXCHANGE\n");
3789 3788 rval = FC_BADEXCHANGE;
3790 3789 }
3791 3790 } else if (sp->flags & SRB_ISP_COMPLETED) {
3792 3791 /* Release device queue lock. */
3793 3792 REQUEST_RING_UNLOCK(ha);
3794 3793 DEVICE_QUEUE_UNLOCK(tq);
3795 3794 EL(ha, "failed, already done, FC_FAILURE\n");
3796 3795 rval = FC_FAILURE;
3797 3796 } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3798 3797 (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3799 3798 /*
3800 3799 * If here, target data/resp ctio is with Fw.
3801 3800 * Since firmware is supposed to terminate such I/Os
3802 3801 * with an error, we need not do any thing. If FW
3803 3802 * decides not to terminate those IOs and simply keep
3804 3803 * quite then we need to initiate cleanup here by
3805 3804 * calling ql_done.
3806 3805 */
3807 3806 REQUEST_RING_UNLOCK(ha);
3808 3807 DEVICE_QUEUE_UNLOCK(tq);
3809 3808 rval = FC_ABORTED;
3810 3809 } else {
3811 3810 request_t *ep = pha->request_ring_bp;
3812 3811 uint16_t cnt;
3813 3812
3814 3813 if (sp->handle != 0) {
3815 3814 for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3816 3815 if (sp->handle == ddi_get32(
3817 3816 pha->hba_buf.acc_handle, &ep->handle)) {
3818 3817 ep->entry_type = INVALID_ENTRY_TYPE;
3819 3818 break;
3820 3819 }
3821 3820 ep++;
3822 3821 }
3823 3822 }
3824 3823
3825 3824 /* Release device queue lock. */
3826 3825 REQUEST_RING_UNLOCK(ha);
3827 3826 DEVICE_QUEUE_UNLOCK(tq);
3828 3827
3829 3828 sp->flags |= SRB_ABORTING;
3830 3829 (void) ql_abort_command(ha, sp);
3831 3830 pkt->pkt_reason = CS_ABORTED;
3832 3831 rval = FC_ABORTED;
3833 3832 }
3834 3833
3835 3834 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3836 3835
3837 3836 return (rval);
3838 3837 }
3839 3838
3840 3839 /*
3841 3840 * ql_reset
3842 3841 * Reset link or hardware.
3843 3842 *
3844 3843 * Input:
3845 3844 * fca_handle = handle setup by ql_bind_port().
3846 3845 * cmd = reset type command.
3847 3846 *
3848 3847 * Returns:
3849 3848 * FC_SUCCESS - reset has successfully finished.
3850 3849 * FC_UNBOUND - the fca_handle specified is not bound.
3851 3850 * FC_FAILURE - reset failed.
3852 3851 *
3853 3852 * Context:
3854 3853 * Kernel context.
3855 3854 */
3856 3855 static int
3857 3856 ql_reset(opaque_t fca_handle, uint32_t cmd)
3858 3857 {
3859 3858 ql_adapter_state_t *ha;
3860 3859 int rval = FC_SUCCESS, rval2;
3861 3860
3862 3861 ha = ql_fca_handle_to_state(fca_handle);
3863 3862 if (ha == NULL) {
3864 3863 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3865 3864 (void *)fca_handle);
3866 3865 return (FC_UNBOUND);
3867 3866 }
3868 3867
3869 3868 QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3870 3869 ha->vp_index, cmd);
3871 3870
3872 3871 switch (cmd) {
3873 3872 case FC_FCA_CORE:
3874 3873 /* dump firmware core if specified. */
3875 3874 if (ha->vp_index == 0) {
3876 3875 if (ql_dump_firmware(ha) != QL_SUCCESS) {
3877 3876 EL(ha, "failed, FC_FAILURE\n");
3878 3877 rval = FC_FAILURE;
3879 3878 }
3880 3879 }
3881 3880 break;
3882 3881 case FC_FCA_LINK_RESET:
3883 3882 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3884 3883 if (ql_loop_reset(ha) != QL_SUCCESS) {
3885 3884 EL(ha, "failed, FC_FAILURE-2\n");
3886 3885 rval = FC_FAILURE;
3887 3886 }
3888 3887 }
3889 3888 break;
3890 3889 case FC_FCA_RESET_CORE:
3891 3890 case FC_FCA_RESET:
3892 3891 /* if dump firmware core if specified. */
3893 3892 if (cmd == FC_FCA_RESET_CORE) {
3894 3893 if (ha->vp_index != 0) {
3895 3894 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3896 3895 ? QL_SUCCESS : ql_loop_reset(ha);
3897 3896 } else {
3898 3897 rval2 = ql_dump_firmware(ha);
3899 3898 }
3900 3899 if (rval2 != QL_SUCCESS) {
3901 3900 EL(ha, "failed, FC_FAILURE-3\n");
3902 3901 rval = FC_FAILURE;
3903 3902 }
3904 3903 }
3905 3904
3906 3905 /* Free up all unsolicited buffers. */
3907 3906 if (ha->ub_allocated != 0) {
3908 3907 /* Inform to release buffers. */
3909 3908 ha->state = FC_PORT_SPEED_MASK(ha->state);
3910 3909 ha->state |= FC_STATE_RESET_REQUESTED;
3911 3910 if (ha->flags & FCA_BOUND) {
3912 3911 (ha->bind_info.port_statec_cb)
3913 3912 (ha->bind_info.port_handle,
3914 3913 ha->state);
3915 3914 }
3916 3915 }
3917 3916
3918 3917 ha->state = FC_PORT_SPEED_MASK(ha->state);
3919 3918
3920 3919 /* All buffers freed */
3921 3920 if (ha->ub_allocated == 0) {
3922 3921 /* Hardware reset. */
3923 3922 if (cmd == FC_FCA_RESET) {
3924 3923 if (ha->vp_index == 0) {
3925 3924 (void) ql_abort_isp(ha);
3926 3925 } else if (!(ha->pha->task_daemon_flags &
3927 3926 LOOP_DOWN)) {
3928 3927 (void) ql_loop_reset(ha);
3929 3928 }
3930 3929 }
3931 3930
3932 3931 /* Inform that the hardware has been reset */
3933 3932 ha->state |= FC_STATE_RESET;
3934 3933 } else {
3935 3934 /*
3936 3935 * the port driver expects an online if
3937 3936 * buffers are not freed.
3938 3937 */
3939 3938 if (ha->topology & QL_LOOP_CONNECTION) {
3940 3939 ha->state |= FC_STATE_LOOP;
3941 3940 } else {
3942 3941 ha->state |= FC_STATE_ONLINE;
3943 3942 }
3944 3943 }
3945 3944
3946 3945 TASK_DAEMON_LOCK(ha);
3947 3946 ha->task_daemon_flags |= FC_STATE_CHANGE;
3948 3947 TASK_DAEMON_UNLOCK(ha);
3949 3948
3950 3949 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3951 3950
3952 3951 break;
3953 3952 default:
3954 3953 EL(ha, "unknown cmd=%xh\n", cmd);
3955 3954 break;
3956 3955 }
3957 3956
3958 3957 if (rval != FC_SUCCESS) {
3959 3958 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3960 3959 } else {
3961 3960 /*EMPTY*/
3962 3961 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3963 3962 ha->vp_index);
3964 3963 }
3965 3964
3966 3965 return (rval);
3967 3966 }
3968 3967
3969 3968 /*
3970 3969 * ql_port_manage
3971 3970 * Perform port management or diagnostics.
3972 3971 *
3973 3972 * Input:
3974 3973 * fca_handle = handle setup by ql_bind_port().
3975 3974 * cmd = pointer to command structure.
3976 3975 *
3977 3976 * Returns:
3978 3977 * FC_SUCCESS - the request completed successfully.
3979 3978 * FC_FAILURE - the request did not complete successfully.
3980 3979 * FC_UNBOUND - the fca_handle specified is not bound.
3981 3980 *
3982 3981 * Context:
3983 3982 * Kernel context.
3984 3983 */
3985 3984 static int
3986 3985 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3987 3986 {
3988 3987 clock_t timer;
3989 3988 uint16_t index;
3990 3989 uint32_t *bp;
3991 3990 port_id_t d_id;
3992 3991 ql_link_t *link;
3993 3992 ql_adapter_state_t *ha, *pha;
3994 3993 ql_tgt_t *tq;
3995 3994 dma_mem_t buffer_xmt, buffer_rcv;
3996 3995 size_t length;
3997 3996 uint32_t cnt;
3998 3997 char buf[80];
3999 3998 lbp_t *lb;
4000 3999 ql_mbx_data_t mr;
4001 4000 app_mbx_cmd_t *mcp;
4002 4001 int i0;
4003 4002 uint8_t *bptr;
4004 4003 int rval2, rval = FC_SUCCESS;
4005 4004 uint32_t opcode;
4006 4005 uint32_t set_flags = 0;
4007 4006
4008 4007 ha = ql_fca_handle_to_state(fca_handle);
4009 4008 if (ha == NULL) {
4010 4009 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4011 4010 (void *)fca_handle);
4012 4011 return (FC_UNBOUND);
4013 4012 }
4014 4013 pha = ha->pha;
4015 4014
4016 4015 QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4017 4016 cmd->pm_cmd_code);
4018 4017
4019 4018 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4020 4019
4021 4020 /*
4022 4021 * Wait for all outstanding commands to complete
4023 4022 */
4024 4023 index = (uint16_t)ql_wait_outstanding(ha);
4025 4024
4026 4025 if (index != MAX_OUTSTANDING_COMMANDS) {
4027 4026 ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4028 4027 ql_restart_queues(ha);
4029 4028 EL(ha, "failed, FC_TRAN_BUSY\n");
4030 4029 return (FC_TRAN_BUSY);
4031 4030 }
4032 4031
4033 4032 switch (cmd->pm_cmd_code) {
4034 4033 case FC_PORT_BYPASS:
4035 4034 d_id.b24 = *cmd->pm_cmd_buf;
4036 4035 tq = ql_d_id_to_queue(ha, d_id);
4037 4036 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4038 4037 EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4039 4038 rval = FC_FAILURE;
4040 4039 }
4041 4040 break;
4042 4041 case FC_PORT_UNBYPASS:
4043 4042 d_id.b24 = *cmd->pm_cmd_buf;
4044 4043 tq = ql_d_id_to_queue(ha, d_id);
4045 4044 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4046 4045 EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4047 4046 rval = FC_FAILURE;
4048 4047 }
4049 4048 break;
4050 4049 case FC_PORT_GET_FW_REV:
4051 4050 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4052 4051 pha->fw_minor_version, pha->fw_subminor_version);
4053 4052 length = strlen(buf) + 1;
4054 4053 if (cmd->pm_data_len < length) {
4055 4054 cmd->pm_data_len = length;
4056 4055 EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4057 4056 rval = FC_FAILURE;
4058 4057 } else {
4059 4058 (void) strcpy(cmd->pm_data_buf, buf);
4060 4059 }
4061 4060 break;
4062 4061
4063 4062 case FC_PORT_GET_FCODE_REV: {
4064 4063 caddr_t fcode_ver_buf = NULL;
4065 4064
4066 4065 i0 = 0;
4067 4066 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4068 4067 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4069 4068 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4070 4069 (caddr_t)&fcode_ver_buf, &i0);
4071 4070 length = (uint_t)i0;
4072 4071
4073 4072 if (rval2 != DDI_PROP_SUCCESS) {
4074 4073 EL(ha, "failed, getting version = %xh\n", rval2);
4075 4074 length = 20;
4076 4075 fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4077 4076 if (fcode_ver_buf != NULL) {
4078 4077 (void) sprintf(fcode_ver_buf,
4079 4078 "NO FCODE FOUND");
4080 4079 }
4081 4080 }
4082 4081
4083 4082 if (cmd->pm_data_len < length) {
4084 4083 EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4085 4084 "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4086 4085 cmd->pm_data_len = length;
4087 4086 rval = FC_FAILURE;
4088 4087 } else if (fcode_ver_buf != NULL) {
4089 4088 bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4090 4089 length);
4091 4090 }
4092 4091
4093 4092 if (fcode_ver_buf != NULL) {
4094 4093 kmem_free(fcode_ver_buf, length);
4095 4094 }
4096 4095 break;
4097 4096 }
4098 4097
4099 4098 case FC_PORT_GET_DUMP:
4100 4099 QL_DUMP_LOCK(pha);
4101 4100 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4102 4101 EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4103 4102 "length=%lxh\n", cmd->pm_data_len);
4104 4103 cmd->pm_data_len = pha->risc_dump_size;
4105 4104 rval = FC_FAILURE;
4106 4105 } else if (pha->ql_dump_state & QL_DUMPING) {
4107 4106 EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4108 4107 rval = FC_TRAN_BUSY;
4109 4108 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4110 4109 (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4111 4110 pha->ql_dump_state |= QL_DUMP_UPLOADED;
4112 4111 } else {
4113 4112 EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4114 4113 rval = FC_FAILURE;
4115 4114 }
4116 4115 QL_DUMP_UNLOCK(pha);
4117 4116 break;
4118 4117 case FC_PORT_FORCE_DUMP:
4119 4118 PORTMANAGE_LOCK(ha);
4120 4119 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4121 4120 EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4122 4121 rval = FC_FAILURE;
4123 4122 }
4124 4123 PORTMANAGE_UNLOCK(ha);
4125 4124 break;
4126 4125 case FC_PORT_DOWNLOAD_FW:
4127 4126 PORTMANAGE_LOCK(ha);
4128 4127 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4129 4128 if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4130 4129 (uint32_t)cmd->pm_data_len,
4131 4130 ha->flash_fw_addr << 2) != QL_SUCCESS) {
4132 4131 EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4133 4132 rval = FC_FAILURE;
4134 4133 }
4135 4134 ql_reset_chip(ha);
4136 4135 set_flags |= ISP_ABORT_NEEDED;
4137 4136 } else {
4138 4137 /* Save copy of the firmware. */
4139 4138 if (pha->risc_code != NULL) {
4140 4139 kmem_free(pha->risc_code, pha->risc_code_size);
4141 4140 pha->risc_code = NULL;
4142 4141 pha->risc_code_size = 0;
4143 4142 }
4144 4143
4145 4144 pha->risc_code = kmem_alloc(cmd->pm_data_len,
4146 4145 KM_SLEEP);
4147 4146 if (pha->risc_code != NULL) {
4148 4147 pha->risc_code_size =
4149 4148 (uint32_t)cmd->pm_data_len;
4150 4149 bcopy(cmd->pm_data_buf, pha->risc_code,
4151 4150 cmd->pm_data_len);
4152 4151
4153 4152 /* Do abort to force reload. */
4154 4153 ql_reset_chip(ha);
4155 4154 if (ql_abort_isp(ha) != QL_SUCCESS) {
4156 4155 kmem_free(pha->risc_code,
4157 4156 pha->risc_code_size);
4158 4157 pha->risc_code = NULL;
4159 4158 pha->risc_code_size = 0;
4160 4159 ql_reset_chip(ha);
4161 4160 (void) ql_abort_isp(ha);
4162 4161 EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4163 4162 " FC_FAILURE\n");
4164 4163 rval = FC_FAILURE;
4165 4164 }
4166 4165 }
4167 4166 }
4168 4167 PORTMANAGE_UNLOCK(ha);
4169 4168 break;
4170 4169 case FC_PORT_GET_DUMP_SIZE:
4171 4170 bp = (uint32_t *)cmd->pm_data_buf;
4172 4171 *bp = pha->risc_dump_size;
4173 4172 break;
4174 4173 case FC_PORT_DIAG:
4175 4174 /*
4176 4175 * Prevents concurrent diags
4177 4176 */
4178 4177 PORTMANAGE_LOCK(ha);
4179 4178
4180 4179 /* Wait for suspension to end. */
4181 4180 for (timer = 0; timer < 3000 &&
4182 4181 pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4183 4182 ql_delay(ha, 10000);
4184 4183 }
4185 4184
4186 4185 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4187 4186 EL(ha, "failed, FC_TRAN_BUSY-2\n");
4188 4187 rval = FC_TRAN_BUSY;
4189 4188 PORTMANAGE_UNLOCK(ha);
4190 4189 break;
4191 4190 }
4192 4191
4193 4192 switch (cmd->pm_cmd_flags) {
4194 4193 case QL_DIAG_EXEFMW:
4195 4194 if (ql_start_firmware(ha) != QL_SUCCESS) {
4196 4195 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4197 4196 rval = FC_FAILURE;
4198 4197 }
4199 4198 break;
4200 4199 case QL_DIAG_CHKCMDQUE:
4201 4200 for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4202 4201 i0++) {
4203 4202 cnt += (pha->outstanding_cmds[i0] != NULL);
4204 4203 }
4205 4204 if (cnt != 0) {
4206 4205 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4207 4206 "FC_FAILURE\n");
4208 4207 rval = FC_FAILURE;
4209 4208 }
4210 4209 break;
4211 4210 case QL_DIAG_FMWCHKSUM:
4212 4211 if (ql_verify_checksum(ha) != QL_SUCCESS) {
4213 4212 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4214 4213 "FC_FAILURE\n");
4215 4214 rval = FC_FAILURE;
4216 4215 }
4217 4216 break;
4218 4217 case QL_DIAG_SLFTST:
4219 4218 if (ql_online_selftest(ha) != QL_SUCCESS) {
4220 4219 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4221 4220 rval = FC_FAILURE;
4222 4221 }
4223 4222 ql_reset_chip(ha);
4224 4223 set_flags |= ISP_ABORT_NEEDED;
4225 4224 break;
4226 4225 case QL_DIAG_REVLVL:
4227 4226 if (cmd->pm_stat_len <
4228 4227 sizeof (ql_adapter_revlvl_t)) {
4229 4228 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4230 4229 "slen=%lxh, rlvllen=%lxh\n",
4231 4230 cmd->pm_stat_len,
4232 4231 sizeof (ql_adapter_revlvl_t));
4233 4232 rval = FC_NOMEM;
4234 4233 } else {
4235 4234 bcopy((void *)&(pha->adapter_stats->revlvl),
4236 4235 cmd->pm_stat_buf,
4237 4236 (size_t)cmd->pm_stat_len);
4238 4237 cmd->pm_stat_len =
4239 4238 sizeof (ql_adapter_revlvl_t);
4240 4239 }
4241 4240 break;
4242 4241 case QL_DIAG_LPBMBX:
4243 4242
4244 4243 if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4245 4244 EL(ha, "failed, QL_DIAG_LPBMBX "
4246 4245 "FC_INVALID_REQUEST, pmlen=%lxh, "
4247 4246 "reqd=%lxh\n", cmd->pm_data_len,
4248 4247 sizeof (struct app_mbx_cmd));
4249 4248 rval = FC_INVALID_REQUEST;
4250 4249 break;
4251 4250 }
4252 4251 /*
4253 4252 * Don't do the wrap test on a 2200 when the
4254 4253 * firmware is running.
4255 4254 */
4256 4255 if (!CFG_IST(ha, CFG_CTRL_2200)) {
4257 4256 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4258 4257 mr.mb[1] = mcp->mb[1];
4259 4258 mr.mb[2] = mcp->mb[2];
4260 4259 mr.mb[3] = mcp->mb[3];
4261 4260 mr.mb[4] = mcp->mb[4];
4262 4261 mr.mb[5] = mcp->mb[5];
4263 4262 mr.mb[6] = mcp->mb[6];
4264 4263 mr.mb[7] = mcp->mb[7];
4265 4264
4266 4265 bcopy(&mr.mb[0], &mr.mb[10],
4267 4266 sizeof (uint16_t) * 8);
4268 4267
4269 4268 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4270 4269 EL(ha, "failed, QL_DIAG_LPBMBX "
4271 4270 "FC_FAILURE\n");
4272 4271 rval = FC_FAILURE;
4273 4272 break;
4274 4273 } else {
4275 4274 for (i0 = 1; i0 < 8; i0++) {
4276 4275 if (mr.mb[i0] !=
4277 4276 mr.mb[i0 + 10]) {
4278 4277 EL(ha, "failed, "
4279 4278 "QL_DIAG_LPBMBX "
4280 4279 "FC_FAILURE-2\n");
4281 4280 rval = FC_FAILURE;
4282 4281 break;
4283 4282 }
4284 4283 }
4285 4284 }
4286 4285
4287 4286 if (rval == FC_FAILURE) {
4288 4287 (void) ql_flash_errlog(ha,
4289 4288 FLASH_ERRLOG_ISP_ERR, 0,
4290 4289 RD16_IO_REG(ha, hccr),
4291 4290 RD16_IO_REG(ha, istatus));
4292 4291 set_flags |= ISP_ABORT_NEEDED;
4293 4292 }
4294 4293 }
4295 4294 break;
4296 4295 case QL_DIAG_LPBDTA:
4297 4296 /*
4298 4297 * For loopback data, we receive the
4299 4298 * data back in pm_stat_buf. This provides
4300 4299 * the user an opportunity to compare the
4301 4300 * transmitted and received data.
4302 4301 *
4303 4302 * NB: lb->options are:
4304 4303 * 0 --> Ten bit loopback
4305 4304 * 1 --> One bit loopback
4306 4305 * 2 --> External loopback
4307 4306 */
4308 4307 if (cmd->pm_data_len > 65536) {
4309 4308 rval = FC_TOOMANY;
4310 4309 EL(ha, "failed, QL_DIAG_LPBDTA "
4311 4310 "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4312 4311 break;
4313 4312 }
4314 4313 if (ql_get_dma_mem(ha, &buffer_xmt,
4315 4314 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4316 4315 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4317 4316 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4318 4317 rval = FC_NOMEM;
4319 4318 break;
4320 4319 }
4321 4320 if (ql_get_dma_mem(ha, &buffer_rcv,
4322 4321 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4323 4322 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4324 4323 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4325 4324 rval = FC_NOMEM;
4326 4325 break;
4327 4326 }
4328 4327 ddi_rep_put8(buffer_xmt.acc_handle,
4329 4328 (uint8_t *)cmd->pm_data_buf,
4330 4329 (uint8_t *)buffer_xmt.bp,
4331 4330 cmd->pm_data_len, DDI_DEV_AUTOINCR);
4332 4331
4333 4332 /* 22xx's adapter must be in loop mode for test. */
4334 4333 if (CFG_IST(ha, CFG_CTRL_2200)) {
4335 4334 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4336 4335 if (ha->flags & POINT_TO_POINT ||
4337 4336 (ha->task_daemon_flags & LOOP_DOWN &&
4338 4337 *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4339 4338 cnt = *bptr;
4340 4339 *bptr = (uint8_t)
4341 4340 (*bptr & ~(BIT_6|BIT_5|BIT_4));
4342 4341 (void) ql_abort_isp(ha);
4343 4342 *bptr = (uint8_t)cnt;
4344 4343 }
4345 4344 }
4346 4345
4347 4346 /* Shutdown IP. */
4348 4347 if (pha->flags & IP_INITIALIZED) {
4349 4348 (void) ql_shutdown_ip(pha);
4350 4349 }
4351 4350
4352 4351 lb = (lbp_t *)cmd->pm_cmd_buf;
4353 4352 lb->transfer_count =
4354 4353 (uint32_t)cmd->pm_data_len;
4355 4354 lb->transfer_segment_count = 0;
4356 4355 lb->receive_segment_count = 0;
4357 4356 lb->transfer_data_address =
4358 4357 buffer_xmt.cookie.dmac_address;
4359 4358 lb->receive_data_address =
4360 4359 buffer_rcv.cookie.dmac_address;
4361 4360
4362 4361 if (ql_loop_back(ha, 0, lb,
4363 4362 buffer_xmt.cookie.dmac_notused,
4364 4363 buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4365 4364 bzero((void *)cmd->pm_stat_buf,
4366 4365 cmd->pm_stat_len);
4367 4366 ddi_rep_get8(buffer_rcv.acc_handle,
4368 4367 (uint8_t *)cmd->pm_stat_buf,
4369 4368 (uint8_t *)buffer_rcv.bp,
4370 4369 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4371 4370 rval = FC_SUCCESS;
4372 4371 } else {
4373 4372 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4374 4373 rval = FC_FAILURE;
4375 4374 }
4376 4375
4377 4376 ql_free_phys(ha, &buffer_xmt);
4378 4377 ql_free_phys(ha, &buffer_rcv);
4379 4378
4380 4379 /* Needed to recover the f/w */
4381 4380 set_flags |= ISP_ABORT_NEEDED;
4382 4381
4383 4382 /* Restart IP if it was shutdown. */
4384 4383 if (pha->flags & IP_ENABLED &&
4385 4384 !(pha->flags & IP_INITIALIZED)) {
4386 4385 (void) ql_initialize_ip(pha);
4387 4386 ql_isp_rcvbuf(pha);
4388 4387 }
4389 4388
4390 4389 break;
4391 4390 case QL_DIAG_ECHO: {
4392 4391 /*
4393 4392 * issue an echo command with a user supplied
4394 4393 * data pattern and destination address
4395 4394 */
4396 4395 echo_t echo; /* temp echo struct */
4397 4396
4398 4397 /* Setup echo cmd & adjust for platform */
4399 4398 opcode = QL_ECHO_CMD;
4400 4399 BIG_ENDIAN_32(&opcode);
4401 4400
4402 4401 /*
4403 4402 * due to limitations in the ql
4404 4403 * firmaware the echo data field is
4405 4404 * limited to 220
4406 4405 */
4407 4406 if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4408 4407 (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4409 4408 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4410 4409 "cmdl1=%lxh, statl2=%lxh\n",
4411 4410 cmd->pm_cmd_len, cmd->pm_stat_len);
4412 4411 rval = FC_TOOMANY;
4413 4412 break;
4414 4413 }
4415 4414
4416 4415 /*
4417 4416 * the input data buffer has the user
4418 4417 * supplied data pattern. The "echoed"
4419 4418 * data will be DMAed into the output
4420 4419 * data buffer. Therefore the length
4421 4420 * of the output buffer must be equal
4422 4421 * to or greater then the input buffer
4423 4422 * length
4424 4423 */
4425 4424 if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4426 4425 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4427 4426 " cmdl1=%lxh, statl2=%lxh\n",
4428 4427 cmd->pm_cmd_len, cmd->pm_stat_len);
4429 4428 rval = FC_TOOMANY;
4430 4429 break;
4431 4430 }
4432 4431 /* add four bytes for the opcode */
4433 4432 echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4434 4433
4435 4434 /*
4436 4435 * are we 32 or 64 bit addressed???
4437 4436 * We need to get the appropriate
4438 4437 * DMA and set the command options;
4439 4438 * 64 bit (bit 6) or 32 bit
4440 4439 * (no bit 6) addressing.
4441 4440 * while we are at it lets ask for
4442 4441 * real echo (bit 15)
4443 4442 */
4444 4443 echo.options = BIT_15;
4445 4444 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4446 4445 !(CFG_IST(ha, CFG_CTRL_8081))) {
4447 4446 echo.options = (uint16_t)
4448 4447 (echo.options | BIT_6);
4449 4448 }
4450 4449
4451 4450 /*
4452 4451 * Set up the DMA mappings for the
4453 4452 * output and input data buffers.
4454 4453 * First the output buffer
4455 4454 */
4456 4455 if (ql_get_dma_mem(ha, &buffer_xmt,
4457 4456 (uint32_t)(cmd->pm_data_len + 4),
4458 4457 LITTLE_ENDIAN_DMA,
4459 4458 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4460 4459 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4461 4460 rval = FC_NOMEM;
4462 4461 break;
4463 4462 }
4464 4463 echo.transfer_data_address = buffer_xmt.cookie;
4465 4464
4466 4465 /* Next the input buffer */
4467 4466 if (ql_get_dma_mem(ha, &buffer_rcv,
4468 4467 (uint32_t)(cmd->pm_data_len + 4),
4469 4468 LITTLE_ENDIAN_DMA,
4470 4469 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4471 4470 /*
4472 4471 * since we could not allocate
4473 4472 * DMA space for the input
4474 4473 * buffer we need to clean up
4475 4474 * by freeing the DMA space
4476 4475 * we allocated for the output
4477 4476 * buffer
4478 4477 */
4479 4478 ql_free_phys(ha, &buffer_xmt);
4480 4479 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4481 4480 rval = FC_NOMEM;
4482 4481 break;
4483 4482 }
4484 4483 echo.receive_data_address = buffer_rcv.cookie;
4485 4484
4486 4485 /*
4487 4486 * copy the 4 byte ECHO op code to the
4488 4487 * allocated DMA space
4489 4488 */
4490 4489 ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4491 4490 (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4492 4491
4493 4492 /*
4494 4493 * copy the user supplied data to the
4495 4494 * allocated DMA space
4496 4495 */
4497 4496 ddi_rep_put8(buffer_xmt.acc_handle,
4498 4497 (uint8_t *)cmd->pm_cmd_buf,
4499 4498 (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4500 4499 DDI_DEV_AUTOINCR);
4501 4500
4502 4501 /* Shutdown IP. */
4503 4502 if (pha->flags & IP_INITIALIZED) {
4504 4503 (void) ql_shutdown_ip(pha);
4505 4504 }
4506 4505
4507 4506 /* send the echo */
4508 4507 if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4509 4508 ddi_rep_put8(buffer_rcv.acc_handle,
4510 4509 (uint8_t *)buffer_rcv.bp + 4,
4511 4510 (uint8_t *)cmd->pm_stat_buf,
4512 4511 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4513 4512 } else {
4514 4513 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4515 4514 rval = FC_FAILURE;
4516 4515 }
4517 4516
4518 4517 /* Restart IP if it was shutdown. */
4519 4518 if (pha->flags & IP_ENABLED &&
4520 4519 !(pha->flags & IP_INITIALIZED)) {
4521 4520 (void) ql_initialize_ip(pha);
4522 4521 ql_isp_rcvbuf(pha);
4523 4522 }
4524 4523 /* free up our DMA buffers */
4525 4524 ql_free_phys(ha, &buffer_xmt);
4526 4525 ql_free_phys(ha, &buffer_rcv);
4527 4526 break;
4528 4527 }
4529 4528 default:
4530 4529 EL(ha, "unknown=%xh, FC_PORT_DIAG "
4531 4530 "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4532 4531 rval = FC_INVALID_REQUEST;
4533 4532 break;
4534 4533 }
4535 4534 PORTMANAGE_UNLOCK(ha);
4536 4535 break;
4537 4536 case FC_PORT_LINK_STATE:
4538 4537 /* Check for name equal to null. */
4539 4538 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4540 4539 index++) {
4541 4540 if (cmd->pm_cmd_buf[index] != 0) {
4542 4541 break;
4543 4542 }
4544 4543 }
4545 4544
4546 4545 /* If name not null. */
4547 4546 if (index < 8 && cmd->pm_cmd_len >= 8) {
4548 4547 /* Locate device queue. */
4549 4548 tq = NULL;
4550 4549 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4551 4550 tq == NULL; index++) {
4552 4551 for (link = ha->dev[index].first; link != NULL;
4553 4552 link = link->next) {
4554 4553 tq = link->base_address;
4555 4554
4556 4555 if (bcmp((void *)&tq->port_name[0],
4557 4556 (void *)cmd->pm_cmd_buf, 8) == 0) {
4558 4557 break;
4559 4558 } else {
4560 4559 tq = NULL;
4561 4560 }
4562 4561 }
4563 4562 }
4564 4563
4565 4564 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4566 4565 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4567 4566 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4568 4567 } else {
4569 4568 cnt = FC_PORT_SPEED_MASK(ha->state) |
4570 4569 FC_STATE_OFFLINE;
4571 4570 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4572 4571 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4573 4572 }
4574 4573 } else {
4575 4574 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4576 4575 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4577 4576 }
4578 4577 break;
4579 4578 case FC_PORT_INITIALIZE:
4580 4579 if (cmd->pm_cmd_len >= 8) {
4581 4580 tq = NULL;
4582 4581 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4583 4582 tq == NULL; index++) {
4584 4583 for (link = ha->dev[index].first; link != NULL;
4585 4584 link = link->next) {
4586 4585 tq = link->base_address;
4587 4586
4588 4587 if (bcmp((void *)&tq->port_name[0],
4589 4588 (void *)cmd->pm_cmd_buf, 8) == 0) {
4590 4589 if (!VALID_DEVICE_ID(ha,
4591 4590 tq->loop_id)) {
4592 4591 tq = NULL;
4593 4592 }
4594 4593 break;
4595 4594 } else {
4596 4595 tq = NULL;
4597 4596 }
4598 4597 }
4599 4598 }
4600 4599
4601 4600 if (tq == NULL || ql_target_reset(ha, tq,
4602 4601 ha->loop_reset_delay) != QL_SUCCESS) {
4603 4602 EL(ha, "failed, FC_PORT_INITIALIZE "
4604 4603 "FC_FAILURE\n");
4605 4604 rval = FC_FAILURE;
4606 4605 }
4607 4606 } else {
4608 4607 EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4609 4608 "clen=%lxh\n", cmd->pm_cmd_len);
4610 4609
4611 4610 rval = FC_FAILURE;
4612 4611 }
4613 4612 break;
4614 4613 case FC_PORT_RLS:
4615 4614 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4616 4615 EL(ha, "failed, buffer size passed: %lxh, "
4617 4616 "req: %lxh\n", cmd->pm_data_len,
4618 4617 (sizeof (fc_rls_acc_t)));
4619 4618 rval = FC_FAILURE;
4620 4619 } else if (LOOP_NOT_READY(pha)) {
4621 4620 EL(ha, "loop NOT ready\n");
4622 4621 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4623 4622 } else if (ql_get_link_status(ha, ha->loop_id,
4624 4623 cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4625 4624 EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4626 4625 rval = FC_FAILURE;
4627 4626 #ifdef _BIG_ENDIAN
4628 4627 } else {
4629 4628 fc_rls_acc_t *rls;
4630 4629
4631 4630 rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4632 4631 LITTLE_ENDIAN_32(&rls->rls_link_fail);
4633 4632 LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4634 4633 LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4635 4634 LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4636 4635 #endif /* _BIG_ENDIAN */
4637 4636 }
4638 4637 break;
4639 4638 case FC_PORT_GET_NODE_ID:
4640 4639 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4641 4640 cmd->pm_data_buf) != QL_SUCCESS) {
4642 4641 EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4643 4642 rval = FC_FAILURE;
4644 4643 }
4645 4644 break;
4646 4645 case FC_PORT_SET_NODE_ID:
4647 4646 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4648 4647 cmd->pm_data_buf) != QL_SUCCESS) {
4649 4648 EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4650 4649 rval = FC_FAILURE;
4651 4650 }
4652 4651 break;
4653 4652 case FC_PORT_DOWNLOAD_FCODE:
4654 4653 PORTMANAGE_LOCK(ha);
4655 4654 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4656 4655 rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4657 4656 (uint32_t)cmd->pm_data_len);
4658 4657 } else {
4659 4658 if (cmd->pm_data_buf[0] == 4 &&
4660 4659 cmd->pm_data_buf[8] == 0 &&
4661 4660 cmd->pm_data_buf[9] == 0x10 &&
4662 4661 cmd->pm_data_buf[10] == 0 &&
4663 4662 cmd->pm_data_buf[11] == 0) {
4664 4663 rval = ql_24xx_load_flash(ha,
4665 4664 (uint8_t *)cmd->pm_data_buf,
4666 4665 (uint32_t)cmd->pm_data_len,
4667 4666 ha->flash_fw_addr << 2);
4668 4667 } else {
4669 4668 rval = ql_24xx_load_flash(ha,
4670 4669 (uint8_t *)cmd->pm_data_buf,
4671 4670 (uint32_t)cmd->pm_data_len, 0);
4672 4671 }
4673 4672 }
4674 4673
4675 4674 if (rval != QL_SUCCESS) {
4676 4675 EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4677 4676 rval = FC_FAILURE;
4678 4677 } else {
4679 4678 rval = FC_SUCCESS;
4680 4679 }
4681 4680 ql_reset_chip(ha);
4682 4681 set_flags |= ISP_ABORT_NEEDED;
4683 4682 PORTMANAGE_UNLOCK(ha);
4684 4683 break;
4685 4684 default:
4686 4685 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4687 4686 rval = FC_BADCMD;
4688 4687 break;
4689 4688 }
4690 4689
4691 4690 /* Wait for suspension to end. */
4692 4691 ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4693 4692 timer = 0;
4694 4693
4695 4694 while (timer++ < 3000 &&
4696 4695 ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4697 4696 ql_delay(ha, 10000);
4698 4697 }
4699 4698
4700 4699 ql_restart_queues(ha);
4701 4700
4702 4701 if (rval != FC_SUCCESS) {
4703 4702 EL(ha, "failed, rval = %xh\n", rval);
4704 4703 } else {
4705 4704 /*EMPTY*/
4706 4705 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4707 4706 }
4708 4707
4709 4708 return (rval);
4710 4709 }
4711 4710
4712 4711 static opaque_t
4713 4712 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4714 4713 {
4715 4714 port_id_t id;
4716 4715 ql_adapter_state_t *ha;
4717 4716 ql_tgt_t *tq;
4718 4717
4719 4718 id.r.rsvd_1 = 0;
4720 4719 id.b24 = d_id.port_id;
4721 4720
4722 4721 ha = ql_fca_handle_to_state(fca_handle);
4723 4722 if (ha == NULL) {
4724 4723 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4725 4724 (void *)fca_handle);
4726 4725 return (NULL);
4727 4726 }
4728 4727 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4729 4728
4730 4729 tq = ql_d_id_to_queue(ha, id);
4731 4730
4732 4731 if (tq == NULL) {
4733 4732 EL(ha, "failed, tq=NULL\n");
4734 4733 } else {
4735 4734 /*EMPTY*/
4736 4735 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4737 4736 }
4738 4737 return (tq);
4739 4738 }
4740 4739
4741 4740 /* ************************************************************************ */
4742 4741 /* FCA Driver Local Support Functions. */
4743 4742 /* ************************************************************************ */
4744 4743
4745 4744 /*
4746 4745 * ql_cmd_setup
4747 4746 * Verifies proper command.
4748 4747 *
4749 4748 * Input:
4750 4749 * fca_handle = handle setup by ql_bind_port().
4751 4750 * pkt = pointer to fc_packet.
4752 4751 * rval = pointer for return value.
4753 4752 *
4754 4753 * Returns:
4755 4754 * Adapter state pointer, NULL = failure.
4756 4755 *
4757 4756 * Context:
4758 4757 * Kernel context.
4759 4758 */
4760 4759 static ql_adapter_state_t *
4761 4760 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4762 4761 {
4763 4762 ql_adapter_state_t *ha, *pha;
4764 4763 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
4765 4764 ql_tgt_t *tq;
4766 4765 port_id_t d_id;
4767 4766
4768 4767 pkt->pkt_resp_resid = 0;
4769 4768 pkt->pkt_data_resid = 0;
4770 4769
4771 4770 /* check that the handle is assigned by this FCA */
4772 4771 ha = ql_fca_handle_to_state(fca_handle);
4773 4772 if (ha == NULL) {
4774 4773 *rval = FC_UNBOUND;
4775 4774 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4776 4775 (void *)fca_handle);
4777 4776 return (NULL);
4778 4777 }
4779 4778 pha = ha->pha;
4780 4779
4781 4780 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4782 4781
4783 4782 if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4784 4783 return (ha);
4785 4784 }
4786 4785
4787 4786 if (!(pha->flags & ONLINE)) {
4788 4787 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4789 4788 pkt->pkt_reason = FC_REASON_HW_ERROR;
4790 4789 *rval = FC_TRANSPORT_ERROR;
4791 4790 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4792 4791 return (NULL);
4793 4792 }
4794 4793
4795 4794 /* Exit on loop down. */
4796 4795 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4797 4796 pha->task_daemon_flags & LOOP_DOWN &&
4798 4797 pha->loop_down_timer <= pha->loop_down_abort_time) {
4799 4798 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4800 4799 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4801 4800 *rval = FC_OFFLINE;
4802 4801 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4803 4802 return (NULL);
4804 4803 }
4805 4804
4806 4805 if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4807 4806 pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4808 4807 tq = (ql_tgt_t *)pkt->pkt_fca_device;
4809 4808 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4810 4809 d_id.r.rsvd_1 = 0;
4811 4810 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4812 4811 tq = ql_d_id_to_queue(ha, d_id);
4813 4812
4814 4813 pkt->pkt_fca_device = (opaque_t)tq;
4815 4814 }
4816 4815
4817 4816 if (tq != NULL) {
4818 4817 DEVICE_QUEUE_LOCK(tq);
4819 4818 if (tq->flags & (TQF_RSCN_RCVD |
4820 4819 TQF_NEED_AUTHENTICATION)) {
4821 4820 *rval = FC_DEVICE_BUSY;
4822 4821 DEVICE_QUEUE_UNLOCK(tq);
4823 4822 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4824 4823 tq->flags, tq->d_id.b24);
4825 4824 return (NULL);
4826 4825 }
4827 4826 DEVICE_QUEUE_UNLOCK(tq);
4828 4827 }
4829 4828 }
4830 4829
4831 4830 /*
4832 4831 * Check DMA pointers.
4833 4832 */
4834 4833 *rval = DDI_SUCCESS;
4835 4834 if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4836 4835 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4837 4836 *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4838 4837 if (*rval == DDI_SUCCESS) {
4839 4838 *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4840 4839 }
4841 4840 }
4842 4841
4843 4842 if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4844 4843 pkt->pkt_rsplen != 0) {
4845 4844 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4846 4845 *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4847 4846 if (*rval == DDI_SUCCESS) {
4848 4847 *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4849 4848 }
4850 4849 }
4851 4850
4852 4851 /*
4853 4852 * Minimum branch conditional; Change it with care.
4854 4853 */
4855 4854 if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4856 4855 (pkt->pkt_datalen != 0)) != 0) {
4857 4856 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4858 4857 *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4859 4858 if (*rval == DDI_SUCCESS) {
4860 4859 *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4861 4860 }
4862 4861 }
4863 4862
4864 4863 if (*rval != DDI_SUCCESS) {
4865 4864 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4866 4865 pkt->pkt_reason = FC_REASON_DMA_ERROR;
4867 4866
4868 4867 /* Do command callback. */
4869 4868 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4870 4869 ql_awaken_task_daemon(ha, sp, 0, 0);
4871 4870 }
4872 4871 *rval = FC_BADPACKET;
4873 4872 EL(ha, "failed, bad DMA pointers\n");
4874 4873 return (NULL);
4875 4874 }
4876 4875
4877 4876 if (sp->magic_number != QL_FCA_BRAND) {
4878 4877 *rval = FC_BADPACKET;
4879 4878 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4880 4879 return (NULL);
4881 4880 }
4882 4881 *rval = FC_SUCCESS;
4883 4882
4884 4883 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4885 4884
4886 4885 return (ha);
4887 4886 }
4888 4887
4889 4888 /*
4890 4889 * ql_els_plogi
4891 4890 * Issue a extended link service port login request.
4892 4891 *
4893 4892 * Input:
4894 4893 * ha = adapter state pointer.
4895 4894 * pkt = pointer to fc_packet.
4896 4895 *
4897 4896 * Returns:
4898 4897 * FC_SUCCESS - the packet was accepted for transport.
4899 4898 * FC_TRANSPORT_ERROR - a transport error occurred.
4900 4899 *
4901 4900 * Context:
4902 4901 * Kernel context.
4903 4902 */
4904 4903 static int
4905 4904 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4906 4905 {
4907 4906 ql_tgt_t *tq = NULL;
4908 4907 port_id_t d_id;
4909 4908 la_els_logi_t acc;
4910 4909 class_svc_param_t *class3_param;
4911 4910 int ret;
4912 4911 int rval = FC_SUCCESS;
4913 4912
4914 4913 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4915 4914 pkt->pkt_cmd_fhdr.d_id);
4916 4915
4917 4916 TASK_DAEMON_LOCK(ha);
4918 4917 if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4919 4918 TASK_DAEMON_UNLOCK(ha);
4920 4919 QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4921 4920 return (FC_OFFLINE);
4922 4921 }
4923 4922 TASK_DAEMON_UNLOCK(ha);
4924 4923
4925 4924 bzero(&acc, sizeof (acc));
4926 4925 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4927 4926
4928 4927 ret = QL_SUCCESS;
4929 4928
4930 4929 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4931 4930 /*
4932 4931 * In p2p topology he sends a PLOGI after determining
4933 4932 * he has the N_Port login initiative.
4934 4933 */
4935 4934 ret = ql_p2p_plogi(ha, pkt);
4936 4935 }
4937 4936 if (ret == QL_CONSUMED) {
4938 4937 return (ret);
4939 4938 }
4940 4939
4941 4940 switch (ret = ql_login_port(ha, d_id)) {
4942 4941 case QL_SUCCESS:
4943 4942 tq = ql_d_id_to_queue(ha, d_id);
4944 4943 break;
4945 4944
4946 4945 case QL_LOOP_ID_USED:
4947 4946 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4948 4947 tq = ql_d_id_to_queue(ha, d_id);
4949 4948 }
4950 4949 break;
4951 4950
4952 4951 default:
4953 4952 break;
4954 4953 }
4955 4954
4956 4955 if (ret != QL_SUCCESS) {
4957 4956 /*
4958 4957 * Invalidate this entry so as to seek a fresh loop ID
4959 4958 * in case firmware reassigns it to something else
4960 4959 */
4961 4960 tq = ql_d_id_to_queue(ha, d_id);
4962 4961 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4963 4962 tq->loop_id = PORT_NO_LOOP_ID;
4964 4963 }
4965 4964 } else if (tq) {
4966 4965 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4967 4966 }
4968 4967
4969 4968 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4970 4969 (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4971 4970
4972 4971 /* Build ACC. */
4973 4972 acc.ls_code.ls_code = LA_ELS_ACC;
4974 4973 acc.common_service.fcph_version = 0x2006;
4975 4974 acc.common_service.cmn_features = 0x8800;
4976 4975 acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4977 4976 acc.common_service.conc_sequences = 0xff;
4978 4977 acc.common_service.relative_offset = 0x03;
4979 4978 acc.common_service.e_d_tov = 0x7d0;
4980 4979
4981 4980 bcopy((void *)&tq->port_name[0],
4982 4981 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4983 4982 bcopy((void *)&tq->node_name[0],
4984 4983 (void *)&acc.node_ww_name.raw_wwn[0], 8);
4985 4984
4986 4985 class3_param = (class_svc_param_t *)&acc.class_3;
4987 4986 class3_param->class_valid_svc_opt = 0x8000;
4988 4987 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4989 4988 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4990 4989 class3_param->conc_sequences = tq->class3_conc_sequences;
4991 4990 class3_param->open_sequences_per_exch =
4992 4991 tq->class3_open_sequences_per_exch;
4993 4992
4994 4993 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4995 4994 acc.ls_code.ls_code = LA_ELS_RJT;
4996 4995 pkt->pkt_state = FC_PKT_TRAN_BSY;
4997 4996 pkt->pkt_reason = FC_REASON_XCHG_BSY;
4998 4997 EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4999 4998 rval = FC_TRAN_BUSY;
5000 4999 } else {
5001 5000 DEVICE_QUEUE_LOCK(tq);
5002 5001 tq->logout_sent = 0;
5003 5002 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5004 5003 if (CFG_IST(ha, CFG_CTRL_242581)) {
5005 5004 tq->flags |= TQF_IIDMA_NEEDED;
5006 5005 }
5007 5006 DEVICE_QUEUE_UNLOCK(tq);
5008 5007
5009 5008 if (CFG_IST(ha, CFG_CTRL_242581)) {
5010 5009 TASK_DAEMON_LOCK(ha);
5011 5010 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5012 5011 TASK_DAEMON_UNLOCK(ha);
5013 5012 }
5014 5013
5015 5014 pkt->pkt_state = FC_PKT_SUCCESS;
5016 5015 }
5017 5016 } else {
5018 5017 /* Build RJT. */
5019 5018 acc.ls_code.ls_code = LA_ELS_RJT;
5020 5019
5021 5020 switch (ret) {
5022 5021 case QL_FUNCTION_TIMEOUT:
5023 5022 pkt->pkt_state = FC_PKT_TIMEOUT;
5024 5023 pkt->pkt_reason = FC_REASON_HW_ERROR;
5025 5024 break;
5026 5025
5027 5026 case QL_MEMORY_ALLOC_FAILED:
5028 5027 pkt->pkt_state = FC_PKT_LOCAL_BSY;
5029 5028 pkt->pkt_reason = FC_REASON_NOMEM;
5030 5029 rval = FC_TRAN_BUSY;
5031 5030 break;
5032 5031
5033 5032 case QL_FABRIC_NOT_INITIALIZED:
5034 5033 pkt->pkt_state = FC_PKT_FABRIC_BSY;
5035 5034 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5036 5035 rval = FC_TRAN_BUSY;
5037 5036 break;
5038 5037
5039 5038 default:
5040 5039 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5041 5040 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5042 5041 break;
5043 5042 }
5044 5043
5045 5044 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5046 5045 "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5047 5046 pkt->pkt_reason, ret, rval);
5048 5047 }
5049 5048
5050 5049 if (tq != NULL) {
5051 5050 DEVICE_QUEUE_LOCK(tq);
5052 5051 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5053 5052 if (rval == FC_TRAN_BUSY) {
5054 5053 if (tq->d_id.b24 != BROADCAST_ADDR) {
5055 5054 tq->flags |= TQF_NEED_AUTHENTICATION;
5056 5055 }
5057 5056 }
5058 5057 DEVICE_QUEUE_UNLOCK(tq);
5059 5058 }
5060 5059
5061 5060 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5062 5061 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5063 5062
5064 5063 if (rval != FC_SUCCESS) {
5065 5064 EL(ha, "failed, rval = %xh\n", rval);
5066 5065 } else {
5067 5066 /*EMPTY*/
5068 5067 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5069 5068 }
5070 5069 return (rval);
5071 5070 }
5072 5071
5073 5072 /*
5074 5073 * ql_p2p_plogi
5075 5074 * Start an extended link service port login request using
5076 5075 * an ELS Passthru iocb.
5077 5076 *
5078 5077 * Input:
5079 5078 * ha = adapter state pointer.
5080 5079 * pkt = pointer to fc_packet.
5081 5080 *
5082 5081 * Returns:
5083 5082 * QL_CONSUMMED - the iocb was queued for transport.
5084 5083 *
5085 5084 * Context:
5086 5085 * Kernel context.
5087 5086 */
5088 5087 static int
5089 5088 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5090 5089 {
5091 5090 uint16_t id;
5092 5091 ql_tgt_t tmp;
5093 5092 ql_tgt_t *tq = &tmp;
5094 5093 int rval;
5095 5094 port_id_t d_id;
5096 5095 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5097 5096
5098 5097 tq->d_id.b.al_pa = 0;
5099 5098 tq->d_id.b.area = 0;
5100 5099 tq->d_id.b.domain = 0;
5101 5100
5102 5101 /*
5103 5102 * Verify that the port database hasn't moved beneath our feet by
5104 5103 * switching to the appropriate n_port_handle if necessary. This is
5105 5104 * less unplesant than the error recovery if the wrong one is used.
5106 5105 */
5107 5106 for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5108 5107 tq->loop_id = id;
5109 5108 rval = ql_get_port_database(ha, tq, PDF_NONE);
5110 5109 EL(ha, "rval=%xh\n", rval);
5111 5110 /* check all the ones not logged in for possible use */
5112 5111 if (rval == QL_NOT_LOGGED_IN) {
5113 5112 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5114 5113 ha->n_port->n_port_handle = tq->loop_id;
5115 5114 EL(ha, "n_port_handle =%xh, master state=%x\n",
5116 5115 tq->loop_id, tq->master_state);
5117 5116 break;
5118 5117 }
5119 5118 /*
5120 5119 * Use a 'port unavailable' entry only
5121 5120 * if we used it before.
5122 5121 */
5123 5122 if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5124 5123 /* if the port_id matches, reuse it */
5125 5124 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5126 5125 EL(ha, "n_port_handle =%xh,"
5127 5126 "master state=%xh\n",
5128 5127 tq->loop_id, tq->master_state);
5129 5128 break;
5130 5129 } else if (tq->loop_id ==
5131 5130 ha->n_port->n_port_handle) {
5132 5131 // avoid a lint error
5133 5132 uint16_t *hndl;
5134 5133 uint16_t val;
5135 5134
5136 5135 hndl = &ha->n_port->n_port_handle;
5137 5136 val = *hndl;
5138 5137 val++;
5139 5138 val++;
5140 5139 *hndl = val;
5141 5140 }
5142 5141 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5143 5142 "master state=%x\n", rval, id, tq->loop_id,
5144 5143 tq->master_state);
5145 5144 }
5146 5145
5147 5146 }
5148 5147 if (rval == QL_SUCCESS) {
5149 5148 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5150 5149 ha->n_port->n_port_handle = tq->loop_id;
5151 5150 EL(ha, "n_port_handle =%xh, master state=%x\n",
5152 5151 tq->loop_id, tq->master_state);
5153 5152 break;
5154 5153 }
5155 5154 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5156 5155 "master state=%x\n", rval, id, tq->loop_id,
5157 5156 tq->master_state);
5158 5157 }
5159 5158 }
5160 5159 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5161 5160
5162 5161 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5163 5162 tq = ql_d_id_to_queue(ha, d_id);
5164 5163 ql_timeout_insert(ha, tq, sp);
5165 5164 ql_start_iocb(ha, sp);
5166 5165
5167 5166 return (QL_CONSUMED);
5168 5167 }
5169 5168
5170 5169
5171 5170 /*
5172 5171 * ql_els_flogi
5173 5172 * Issue a extended link service fabric login request.
5174 5173 *
5175 5174 * Input:
5176 5175 * ha = adapter state pointer.
5177 5176 * pkt = pointer to fc_packet.
5178 5177 *
5179 5178 * Returns:
5180 5179 * FC_SUCCESS - the packet was accepted for transport.
5181 5180 * FC_TRANSPORT_ERROR - a transport error occurred.
5182 5181 *
5183 5182 * Context:
5184 5183 * Kernel context.
5185 5184 */
5186 5185 static int
5187 5186 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5188 5187 {
5189 5188 ql_tgt_t *tq = NULL;
5190 5189 port_id_t d_id;
5191 5190 la_els_logi_t acc;
5192 5191 class_svc_param_t *class3_param;
5193 5192 int rval = FC_SUCCESS;
5194 5193 int accept = 0;
5195 5194
5196 5195 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5197 5196 pkt->pkt_cmd_fhdr.d_id);
5198 5197
5199 5198 bzero(&acc, sizeof (acc));
5200 5199 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5201 5200
5202 5201 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5203 5202 /*
5204 5203 * d_id of zero in a FLOGI accept response in a point to point
5205 5204 * topology triggers evaluation of N Port login initiative.
5206 5205 */
5207 5206 pkt->pkt_resp_fhdr.d_id = 0;
5208 5207 /*
5209 5208 * An N_Port already logged in with the firmware
5210 5209 * will have the only database entry.
5211 5210 */
5212 5211 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5213 5212 tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5214 5213 }
5215 5214
5216 5215 if (tq != NULL) {
5217 5216 /*
5218 5217 * If the target port has initiative send
5219 5218 * up a PLOGI about the new device.
5220 5219 */
5221 5220 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5222 5221 (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5223 5222 &ha->init_ctrl_blk.cb24.port_name[0] :
5224 5223 &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5225 5224 ha->send_plogi_timer = 3;
5226 5225 } else {
5227 5226 ha->send_plogi_timer = 0;
5228 5227 }
5229 5228 pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5230 5229 } else {
5231 5230 /*
5232 5231 * An N_Port not logged in with the firmware will not
5233 5232 * have a database entry. We accept anyway and rely
5234 5233 * on a PLOGI from the upper layers to set the d_id
5235 5234 * and s_id.
5236 5235 */
5237 5236 accept = 1;
5238 5237 }
5239 5238 } else {
5240 5239 tq = ql_d_id_to_queue(ha, d_id);
5241 5240 }
5242 5241 if ((tq != NULL) || (accept != NULL)) {
5243 5242 /* Build ACC. */
5244 5243 pkt->pkt_state = FC_PKT_SUCCESS;
5245 5244 class3_param = (class_svc_param_t *)&acc.class_3;
5246 5245
5247 5246 acc.ls_code.ls_code = LA_ELS_ACC;
5248 5247 acc.common_service.fcph_version = 0x2006;
5249 5248 if (ha->topology & QL_N_PORT) {
5250 5249 /* clear F_Port indicator */
5251 5250 acc.common_service.cmn_features = 0x0800;
5252 5251 } else {
5253 5252 acc.common_service.cmn_features = 0x1b00;
5254 5253 }
5255 5254 CFG_IST(ha, CFG_CTRL_24258081) ?
5256 5255 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5257 5256 ha->init_ctrl_blk.cb24.max_frame_length[0],
5258 5257 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5259 5258 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5260 5259 ha->init_ctrl_blk.cb.max_frame_length[0],
5261 5260 ha->init_ctrl_blk.cb.max_frame_length[1]));
5262 5261 acc.common_service.conc_sequences = 0xff;
5263 5262 acc.common_service.relative_offset = 0x03;
5264 5263 acc.common_service.e_d_tov = 0x7d0;
5265 5264 if (accept) {
5266 5265 /* Use the saved N_Port WWNN and WWPN */
5267 5266 if (ha->n_port != NULL) {
5268 5267 bcopy((void *)&ha->n_port->port_name[0],
5269 5268 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5270 5269 bcopy((void *)&ha->n_port->node_name[0],
5271 5270 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5272 5271 /* mark service options invalid */
5273 5272 class3_param->class_valid_svc_opt = 0x0800;
5274 5273 } else {
5275 5274 EL(ha, "ha->n_port is NULL\n");
5276 5275 /* Build RJT. */
5277 5276 acc.ls_code.ls_code = LA_ELS_RJT;
5278 5277
5279 5278 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5280 5279 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5281 5280 }
5282 5281 } else {
5283 5282 bcopy((void *)&tq->port_name[0],
5284 5283 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5285 5284 bcopy((void *)&tq->node_name[0],
5286 5285 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5287 5286
5288 5287 class3_param = (class_svc_param_t *)&acc.class_3;
5289 5288 class3_param->class_valid_svc_opt = 0x8800;
5290 5289 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5291 5290 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5292 5291 class3_param->conc_sequences =
5293 5292 tq->class3_conc_sequences;
5294 5293 class3_param->open_sequences_per_exch =
5295 5294 tq->class3_open_sequences_per_exch;
5296 5295 }
5297 5296 } else {
5298 5297 /* Build RJT. */
5299 5298 acc.ls_code.ls_code = LA_ELS_RJT;
5300 5299
5301 5300 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5302 5301 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5303 5302 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5304 5303 }
5305 5304
5306 5305 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5307 5306 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5308 5307
5309 5308 if (rval != FC_SUCCESS) {
5310 5309 EL(ha, "failed, rval = %xh\n", rval);
5311 5310 } else {
5312 5311 /*EMPTY*/
5313 5312 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5314 5313 }
5315 5314 return (rval);
5316 5315 }
5317 5316
5318 5317 /*
5319 5318 * ql_els_logo
5320 5319 * Issue a extended link service logout request.
5321 5320 *
5322 5321 * Input:
5323 5322 * ha = adapter state pointer.
5324 5323 * pkt = pointer to fc_packet.
5325 5324 *
5326 5325 * Returns:
5327 5326 * FC_SUCCESS - the packet was accepted for transport.
5328 5327 * FC_TRANSPORT_ERROR - a transport error occurred.
5329 5328 *
5330 5329 * Context:
5331 5330 * Kernel context.
5332 5331 */
5333 5332 static int
5334 5333 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5335 5334 {
5336 5335 port_id_t d_id;
5337 5336 ql_tgt_t *tq;
5338 5337 la_els_logo_t acc;
5339 5338 int rval = FC_SUCCESS;
5340 5339
5341 5340 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5342 5341 pkt->pkt_cmd_fhdr.d_id);
5343 5342
5344 5343 bzero(&acc, sizeof (acc));
5345 5344 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5346 5345
5347 5346 tq = ql_d_id_to_queue(ha, d_id);
5348 5347 if (tq) {
5349 5348 DEVICE_QUEUE_LOCK(tq);
5350 5349 if (tq->d_id.b24 == BROADCAST_ADDR) {
5351 5350 DEVICE_QUEUE_UNLOCK(tq);
5352 5351 return (FC_SUCCESS);
5353 5352 }
5354 5353
5355 5354 tq->flags |= TQF_NEED_AUTHENTICATION;
5356 5355
5357 5356 do {
5358 5357 DEVICE_QUEUE_UNLOCK(tq);
5359 5358 (void) ql_abort_device(ha, tq, 1);
5360 5359
5361 5360 /*
5362 5361 * Wait for commands to drain in F/W (doesn't
5363 5362 * take more than a few milliseconds)
5364 5363 */
5365 5364 ql_delay(ha, 10000);
5366 5365
5367 5366 DEVICE_QUEUE_LOCK(tq);
5368 5367 } while (tq->outcnt);
5369 5368
5370 5369 DEVICE_QUEUE_UNLOCK(tq);
5371 5370 }
5372 5371
5373 5372 if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5374 5373 /* Build ACC. */
5375 5374 acc.ls_code.ls_code = LA_ELS_ACC;
5376 5375
5377 5376 pkt->pkt_state = FC_PKT_SUCCESS;
5378 5377 } else {
5379 5378 /* Build RJT. */
5380 5379 acc.ls_code.ls_code = LA_ELS_RJT;
5381 5380
5382 5381 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5383 5382 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5384 5383 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5385 5384 }
5386 5385
5387 5386 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5388 5387 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5389 5388
5390 5389 if (rval != FC_SUCCESS) {
5391 5390 EL(ha, "failed, rval = %xh\n", rval);
5392 5391 } else {
5393 5392 /*EMPTY*/
5394 5393 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5395 5394 }
5396 5395 return (rval);
5397 5396 }
5398 5397
5399 5398 /*
5400 5399 * ql_els_prli
5401 5400 * Issue a extended link service process login request.
5402 5401 *
5403 5402 * Input:
5404 5403 * ha = adapter state pointer.
5405 5404 * pkt = pointer to fc_packet.
5406 5405 *
5407 5406 * Returns:
5408 5407 * FC_SUCCESS - the packet was accepted for transport.
5409 5408 * FC_TRANSPORT_ERROR - a transport error occurred.
5410 5409 *
5411 5410 * Context:
5412 5411 * Kernel context.
5413 5412 */
5414 5413 static int
5415 5414 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5416 5415 {
5417 5416 ql_tgt_t *tq;
5418 5417 port_id_t d_id;
5419 5418 la_els_prli_t acc;
5420 5419 prli_svc_param_t *param;
5421 5420 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5422 5421 int rval = FC_SUCCESS;
5423 5422
5424 5423 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5425 5424 pkt->pkt_cmd_fhdr.d_id);
5426 5425
5427 5426 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5428 5427
5429 5428 tq = ql_d_id_to_queue(ha, d_id);
5430 5429 if (tq != NULL) {
5431 5430 (void) ql_get_port_database(ha, tq, PDF_NONE);
5432 5431
5433 5432 if ((ha->topology & QL_N_PORT) &&
5434 5433 (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5435 5434 ql_timeout_insert(ha, tq, sp);
5436 5435 ql_start_iocb(ha, sp);
5437 5436 rval = QL_CONSUMED;
5438 5437 } else {
5439 5438 /* Build ACC. */
5440 5439 bzero(&acc, sizeof (acc));
5441 5440 acc.ls_code = LA_ELS_ACC;
5442 5441 acc.page_length = 0x10;
5443 5442 acc.payload_length = tq->prli_payload_length;
5444 5443
5445 5444 param = (prli_svc_param_t *)&acc.service_params[0];
5446 5445 param->type = 0x08;
5447 5446 param->rsvd = 0x00;
5448 5447 param->process_assoc_flags = tq->prli_svc_param_word_0;
5449 5448 param->process_flags = tq->prli_svc_param_word_3;
5450 5449
5451 5450 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5452 5451 (uint8_t *)pkt->pkt_resp, sizeof (acc),
5453 5452 DDI_DEV_AUTOINCR);
5454 5453
5455 5454 pkt->pkt_state = FC_PKT_SUCCESS;
5456 5455 }
5457 5456 } else {
5458 5457 la_els_rjt_t rjt;
5459 5458
5460 5459 /* Build RJT. */
5461 5460 bzero(&rjt, sizeof (rjt));
5462 5461 rjt.ls_code.ls_code = LA_ELS_RJT;
5463 5462
5464 5463 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5465 5464 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5466 5465
5467 5466 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5468 5467 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5469 5468 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5470 5469 }
5471 5470
5472 5471 if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5473 5472 EL(ha, "failed, rval = %xh\n", rval);
5474 5473 } else {
5475 5474 /*EMPTY*/
5476 5475 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5477 5476 }
5478 5477 return (rval);
5479 5478 }
5480 5479
5481 5480 /*
5482 5481 * ql_els_prlo
5483 5482 * Issue a extended link service process logout request.
5484 5483 *
5485 5484 * Input:
5486 5485 * ha = adapter state pointer.
5487 5486 * pkt = pointer to fc_packet.
5488 5487 *
5489 5488 * Returns:
5490 5489 * FC_SUCCESS - the packet was accepted for transport.
5491 5490 * FC_TRANSPORT_ERROR - a transport error occurred.
5492 5491 *
5493 5492 * Context:
5494 5493 * Kernel context.
5495 5494 */
5496 5495 /* ARGSUSED */
5497 5496 static int
5498 5497 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5499 5498 {
5500 5499 la_els_prli_t acc;
5501 5500 int rval = FC_SUCCESS;
5502 5501
5503 5502 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5504 5503 pkt->pkt_cmd_fhdr.d_id);
5505 5504
5506 5505 /* Build ACC. */
5507 5506 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5508 5507 (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5509 5508
5510 5509 acc.ls_code = LA_ELS_ACC;
5511 5510 acc.service_params[2] = 1;
5512 5511
5513 5512 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5514 5513 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5515 5514
5516 5515 pkt->pkt_state = FC_PKT_SUCCESS;
5517 5516
5518 5517 if (rval != FC_SUCCESS) {
5519 5518 EL(ha, "failed, rval = %xh\n", rval);
5520 5519 } else {
5521 5520 /*EMPTY*/
5522 5521 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5523 5522 }
5524 5523 return (rval);
5525 5524 }
5526 5525
5527 5526 /*
5528 5527 * ql_els_adisc
5529 5528 * Issue a extended link service address discovery request.
5530 5529 *
5531 5530 * Input:
5532 5531 * ha = adapter state pointer.
5533 5532 * pkt = pointer to fc_packet.
5534 5533 *
5535 5534 * Returns:
5536 5535 * FC_SUCCESS - the packet was accepted for transport.
5537 5536 * FC_TRANSPORT_ERROR - a transport error occurred.
5538 5537 *
5539 5538 * Context:
5540 5539 * Kernel context.
5541 5540 */
5542 5541 static int
5543 5542 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5544 5543 {
5545 5544 ql_dev_id_list_t *list;
5546 5545 uint32_t list_size;
5547 5546 ql_link_t *link;
5548 5547 ql_tgt_t *tq;
5549 5548 ql_lun_t *lq;
5550 5549 port_id_t d_id;
5551 5550 la_els_adisc_t acc;
5552 5551 uint16_t index, loop_id;
5553 5552 ql_mbx_data_t mr;
5554 5553 int rval = FC_SUCCESS;
5555 5554
5556 5555 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5557 5556
5558 5557 bzero(&acc, sizeof (acc));
5559 5558 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5560 5559
5561 5560 /*
5562 5561 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5563 5562 * the device from the firmware
5564 5563 */
5565 5564 index = ql_alpa_to_index[d_id.b.al_pa];
5566 5565 tq = NULL;
5567 5566 for (link = ha->dev[index].first; link != NULL; link = link->next) {
5568 5567 tq = link->base_address;
5569 5568 if (tq->d_id.b24 == d_id.b24) {
5570 5569 break;
5571 5570 } else {
5572 5571 tq = NULL;
5573 5572 }
5574 5573 }
5575 5574
5576 5575 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5577 5576 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5578 5577 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5579 5578
5580 5579 if (list != NULL &&
5581 5580 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5582 5581 QL_SUCCESS) {
5583 5582
5584 5583 for (index = 0; index < mr.mb[1]; index++) {
5585 5584 ql_dev_list(ha, list, index, &d_id, &loop_id);
5586 5585
5587 5586 if (tq->d_id.b24 == d_id.b24) {
5588 5587 tq->loop_id = loop_id;
5589 5588 break;
5590 5589 }
5591 5590 }
5592 5591 } else {
5593 5592 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5594 5593 QL_NAME, ha->instance, d_id.b24);
5595 5594 tq = NULL;
5596 5595 }
5597 5596 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5598 5597 cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5599 5598 QL_NAME, ha->instance, tq->d_id.b24);
5600 5599 tq = NULL;
5601 5600 }
5602 5601
5603 5602 if (list != NULL) {
5604 5603 kmem_free(list, list_size);
5605 5604 }
5606 5605 }
5607 5606
5608 5607 if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5609 5608 ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5610 5609
5611 5610 /* Build ACC. */
5612 5611
5613 5612 DEVICE_QUEUE_LOCK(tq);
5614 5613 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5615 5614 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5616 5615 for (link = tq->lun_queues.first; link != NULL;
5617 5616 link = link->next) {
5618 5617 lq = link->base_address;
5619 5618
5620 5619 if (lq->cmd.first != NULL) {
5621 5620 ql_next(ha, lq);
5622 5621 DEVICE_QUEUE_LOCK(tq);
5623 5622 }
5624 5623 }
5625 5624 }
5626 5625 DEVICE_QUEUE_UNLOCK(tq);
5627 5626
5628 5627 acc.ls_code.ls_code = LA_ELS_ACC;
5629 5628 acc.hard_addr.hard_addr = tq->hard_addr.b24;
5630 5629
5631 5630 bcopy((void *)&tq->port_name[0],
5632 5631 (void *)&acc.port_wwn.raw_wwn[0], 8);
5633 5632 bcopy((void *)&tq->node_name[0],
5634 5633 (void *)&acc.node_wwn.raw_wwn[0], 8);
5635 5634
5636 5635 acc.nport_id.port_id = tq->d_id.b24;
5637 5636
5638 5637 pkt->pkt_state = FC_PKT_SUCCESS;
5639 5638 } else {
5640 5639 /* Build RJT. */
5641 5640 acc.ls_code.ls_code = LA_ELS_RJT;
5642 5641
5643 5642 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5644 5643 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5645 5644 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5646 5645 }
5647 5646
5648 5647 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5649 5648 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5650 5649
5651 5650 if (rval != FC_SUCCESS) {
5652 5651 EL(ha, "failed, rval = %xh\n", rval);
5653 5652 } else {
5654 5653 /*EMPTY*/
5655 5654 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5656 5655 }
5657 5656 return (rval);
5658 5657 }
5659 5658
5660 5659 /*
5661 5660 * ql_els_linit
5662 5661 * Issue a extended link service loop initialize request.
5663 5662 *
5664 5663 * Input:
5665 5664 * ha = adapter state pointer.
5666 5665 * pkt = pointer to fc_packet.
5667 5666 *
5668 5667 * Returns:
5669 5668 * FC_SUCCESS - the packet was accepted for transport.
5670 5669 * FC_TRANSPORT_ERROR - a transport error occurred.
5671 5670 *
5672 5671 * Context:
5673 5672 * Kernel context.
5674 5673 */
5675 5674 static int
5676 5675 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5677 5676 {
5678 5677 ddi_dma_cookie_t *cp;
5679 5678 uint32_t cnt;
5680 5679 conv_num_t n;
5681 5680 port_id_t d_id;
5682 5681 int rval = FC_SUCCESS;
5683 5682
5684 5683 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5685 5684
5686 5685 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5687 5686 if (ha->topology & QL_SNS_CONNECTION) {
5688 5687 fc_linit_req_t els;
5689 5688 lfa_cmd_t lfa;
5690 5689
5691 5690 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5692 5691 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5693 5692
5694 5693 /* Setup LFA mailbox command data. */
5695 5694 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5696 5695
5697 5696 lfa.resp_buffer_length[0] = 4;
5698 5697
5699 5698 cp = pkt->pkt_resp_cookie;
5700 5699 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5701 5700 n.size64 = (uint64_t)cp->dmac_laddress;
5702 5701 LITTLE_ENDIAN_64(&n.size64);
5703 5702 } else {
5704 5703 n.size32[0] = LSD(cp->dmac_laddress);
5705 5704 LITTLE_ENDIAN_32(&n.size32[0]);
5706 5705 n.size32[1] = MSD(cp->dmac_laddress);
5707 5706 LITTLE_ENDIAN_32(&n.size32[1]);
5708 5707 }
5709 5708
5710 5709 /* Set buffer address. */
5711 5710 for (cnt = 0; cnt < 8; cnt++) {
5712 5711 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5713 5712 }
5714 5713
5715 5714 lfa.subcommand_length[0] = 4;
5716 5715 n.size32[0] = d_id.b24;
5717 5716 LITTLE_ENDIAN_32(&n.size32[0]);
5718 5717 lfa.addr[0] = n.size8[0];
5719 5718 lfa.addr[1] = n.size8[1];
5720 5719 lfa.addr[2] = n.size8[2];
5721 5720 lfa.subcommand[1] = 0x70;
5722 5721 lfa.payload[2] = els.func;
5723 5722 lfa.payload[4] = els.lip_b3;
5724 5723 lfa.payload[5] = els.lip_b4;
5725 5724
5726 5725 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5727 5726 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5728 5727 } else {
5729 5728 pkt->pkt_state = FC_PKT_SUCCESS;
5730 5729 }
5731 5730 } else {
5732 5731 fc_linit_resp_t rjt;
5733 5732
5734 5733 /* Build RJT. */
5735 5734 bzero(&rjt, sizeof (rjt));
5736 5735 rjt.ls_code.ls_code = LA_ELS_RJT;
5737 5736
5738 5737 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5739 5738 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5740 5739
5741 5740 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5742 5741 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5743 5742 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5744 5743 }
5745 5744
5746 5745 if (rval != FC_SUCCESS) {
5747 5746 EL(ha, "failed, rval = %xh\n", rval);
5748 5747 } else {
5749 5748 /*EMPTY*/
5750 5749 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5751 5750 }
5752 5751 return (rval);
5753 5752 }
5754 5753
5755 5754 /*
5756 5755 * ql_els_lpc
5757 5756 * Issue a extended link service loop control request.
5758 5757 *
5759 5758 * Input:
5760 5759 * ha = adapter state pointer.
5761 5760 * pkt = pointer to fc_packet.
5762 5761 *
5763 5762 * Returns:
5764 5763 * FC_SUCCESS - the packet was accepted for transport.
5765 5764 * FC_TRANSPORT_ERROR - a transport error occurred.
5766 5765 *
5767 5766 * Context:
5768 5767 * Kernel context.
5769 5768 */
5770 5769 static int
5771 5770 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5772 5771 {
5773 5772 ddi_dma_cookie_t *cp;
5774 5773 uint32_t cnt;
5775 5774 conv_num_t n;
5776 5775 port_id_t d_id;
5777 5776 int rval = FC_SUCCESS;
5778 5777
5779 5778 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5780 5779
5781 5780 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5782 5781 if (ha->topology & QL_SNS_CONNECTION) {
5783 5782 ql_lpc_t els;
5784 5783 lfa_cmd_t lfa;
5785 5784
5786 5785 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5787 5786 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5788 5787
5789 5788 /* Setup LFA mailbox command data. */
5790 5789 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5791 5790
5792 5791 lfa.resp_buffer_length[0] = 4;
5793 5792
5794 5793 cp = pkt->pkt_resp_cookie;
5795 5794 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5796 5795 n.size64 = (uint64_t)(cp->dmac_laddress);
5797 5796 LITTLE_ENDIAN_64(&n.size64);
5798 5797 } else {
5799 5798 n.size32[0] = cp->dmac_address;
5800 5799 LITTLE_ENDIAN_32(&n.size32[0]);
5801 5800 n.size32[1] = 0;
5802 5801 }
5803 5802
5804 5803 /* Set buffer address. */
5805 5804 for (cnt = 0; cnt < 8; cnt++) {
5806 5805 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5807 5806 }
5808 5807
5809 5808 lfa.subcommand_length[0] = 20;
5810 5809 n.size32[0] = d_id.b24;
5811 5810 LITTLE_ENDIAN_32(&n.size32[0]);
5812 5811 lfa.addr[0] = n.size8[0];
5813 5812 lfa.addr[1] = n.size8[1];
5814 5813 lfa.addr[2] = n.size8[2];
5815 5814 lfa.subcommand[1] = 0x71;
5816 5815 lfa.payload[4] = els.port_control;
5817 5816 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5818 5817
5819 5818 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5820 5819 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5821 5820 } else {
5822 5821 pkt->pkt_state = FC_PKT_SUCCESS;
5823 5822 }
5824 5823 } else {
5825 5824 ql_lpc_resp_t rjt;
5826 5825
5827 5826 /* Build RJT. */
5828 5827 bzero(&rjt, sizeof (rjt));
5829 5828 rjt.ls_code.ls_code = LA_ELS_RJT;
5830 5829
5831 5830 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5832 5831 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5833 5832
5834 5833 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5835 5834 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5836 5835 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5837 5836 }
5838 5837
5839 5838 if (rval != FC_SUCCESS) {
5840 5839 EL(ha, "failed, rval = %xh\n", rval);
5841 5840 } else {
5842 5841 /*EMPTY*/
5843 5842 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5844 5843 }
5845 5844 return (rval);
5846 5845 }
5847 5846
5848 5847 /*
5849 5848 * ql_els_lsts
5850 5849 * Issue a extended link service loop status request.
5851 5850 *
5852 5851 * Input:
5853 5852 * ha = adapter state pointer.
5854 5853 * pkt = pointer to fc_packet.
5855 5854 *
5856 5855 * Returns:
5857 5856 * FC_SUCCESS - the packet was accepted for transport.
5858 5857 * FC_TRANSPORT_ERROR - a transport error occurred.
5859 5858 *
5860 5859 * Context:
5861 5860 * Kernel context.
5862 5861 */
5863 5862 static int
5864 5863 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5865 5864 {
5866 5865 ddi_dma_cookie_t *cp;
5867 5866 uint32_t cnt;
5868 5867 conv_num_t n;
5869 5868 port_id_t d_id;
5870 5869 int rval = FC_SUCCESS;
5871 5870
5872 5871 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5873 5872
5874 5873 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5875 5874 if (ha->topology & QL_SNS_CONNECTION) {
5876 5875 fc_lsts_req_t els;
5877 5876 lfa_cmd_t lfa;
5878 5877
5879 5878 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5880 5879 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5881 5880
5882 5881 /* Setup LFA mailbox command data. */
5883 5882 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5884 5883
5885 5884 lfa.resp_buffer_length[0] = 84;
5886 5885
5887 5886 cp = pkt->pkt_resp_cookie;
5888 5887 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5889 5888 n.size64 = cp->dmac_laddress;
5890 5889 LITTLE_ENDIAN_64(&n.size64);
5891 5890 } else {
5892 5891 n.size32[0] = cp->dmac_address;
5893 5892 LITTLE_ENDIAN_32(&n.size32[0]);
5894 5893 n.size32[1] = 0;
5895 5894 }
5896 5895
5897 5896 /* Set buffer address. */
5898 5897 for (cnt = 0; cnt < 8; cnt++) {
5899 5898 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5900 5899 }
5901 5900
5902 5901 lfa.subcommand_length[0] = 2;
5903 5902 n.size32[0] = d_id.b24;
5904 5903 LITTLE_ENDIAN_32(&n.size32[0]);
5905 5904 lfa.addr[0] = n.size8[0];
5906 5905 lfa.addr[1] = n.size8[1];
5907 5906 lfa.addr[2] = n.size8[2];
5908 5907 lfa.subcommand[1] = 0x72;
5909 5908
5910 5909 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5911 5910 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5912 5911 } else {
5913 5912 pkt->pkt_state = FC_PKT_SUCCESS;
5914 5913 }
5915 5914 } else {
5916 5915 fc_lsts_resp_t rjt;
5917 5916
5918 5917 /* Build RJT. */
5919 5918 bzero(&rjt, sizeof (rjt));
5920 5919 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5921 5920
5922 5921 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5923 5922 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5924 5923
5925 5924 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5926 5925 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5927 5926 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5928 5927 }
5929 5928
5930 5929 if (rval != FC_SUCCESS) {
5931 5930 EL(ha, "failed=%xh\n", rval);
5932 5931 } else {
5933 5932 /*EMPTY*/
5934 5933 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5935 5934 }
5936 5935 return (rval);
5937 5936 }
5938 5937
5939 5938 /*
5940 5939 * ql_els_scr
5941 5940 * Issue a extended link service state change registration request.
5942 5941 *
5943 5942 * Input:
5944 5943 * ha = adapter state pointer.
5945 5944 * pkt = pointer to fc_packet.
5946 5945 *
5947 5946 * Returns:
5948 5947 * FC_SUCCESS - the packet was accepted for transport.
5949 5948 * FC_TRANSPORT_ERROR - a transport error occurred.
5950 5949 *
5951 5950 * Context:
5952 5951 * Kernel context.
5953 5952 */
5954 5953 static int
5955 5954 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5956 5955 {
5957 5956 fc_scr_resp_t acc;
5958 5957 int rval = FC_SUCCESS;
5959 5958
5960 5959 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5961 5960
5962 5961 bzero(&acc, sizeof (acc));
5963 5962 if (ha->topology & QL_SNS_CONNECTION) {
5964 5963 fc_scr_req_t els;
5965 5964
5966 5965 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5967 5966 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5968 5967
5969 5968 if (ql_send_change_request(ha, els.scr_func) ==
5970 5969 QL_SUCCESS) {
5971 5970 /* Build ACC. */
5972 5971 acc.scr_acc = LA_ELS_ACC;
5973 5972
5974 5973 pkt->pkt_state = FC_PKT_SUCCESS;
5975 5974 } else {
5976 5975 /* Build RJT. */
5977 5976 acc.scr_acc = LA_ELS_RJT;
5978 5977
5979 5978 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5980 5979 pkt->pkt_reason = FC_REASON_HW_ERROR;
5981 5980 EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5982 5981 }
5983 5982 } else {
5984 5983 /* Build RJT. */
5985 5984 acc.scr_acc = LA_ELS_RJT;
5986 5985
5987 5986 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5988 5987 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5989 5988 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5990 5989 }
5991 5990
5992 5991 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5993 5992 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5994 5993
5995 5994 if (rval != FC_SUCCESS) {
5996 5995 EL(ha, "failed, rval = %xh\n", rval);
5997 5996 } else {
5998 5997 /*EMPTY*/
5999 5998 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6000 5999 }
6001 6000 return (rval);
6002 6001 }
6003 6002
6004 6003 /*
6005 6004 * ql_els_rscn
6006 6005 * Issue a extended link service register state
6007 6006 * change notification request.
6008 6007 *
6009 6008 * Input:
6010 6009 * ha = adapter state pointer.
6011 6010 * pkt = pointer to fc_packet.
6012 6011 *
6013 6012 * Returns:
6014 6013 * FC_SUCCESS - the packet was accepted for transport.
6015 6014 * FC_TRANSPORT_ERROR - a transport error occurred.
6016 6015 *
6017 6016 * Context:
6018 6017 * Kernel context.
6019 6018 */
6020 6019 static int
6021 6020 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6022 6021 {
6023 6022 ql_rscn_resp_t acc;
6024 6023 int rval = FC_SUCCESS;
6025 6024
6026 6025 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6027 6026
6028 6027 bzero(&acc, sizeof (acc));
6029 6028 if (ha->topology & QL_SNS_CONNECTION) {
6030 6029 /* Build ACC. */
6031 6030 acc.scr_acc = LA_ELS_ACC;
6032 6031
6033 6032 pkt->pkt_state = FC_PKT_SUCCESS;
6034 6033 } else {
6035 6034 /* Build RJT. */
6036 6035 acc.scr_acc = LA_ELS_RJT;
6037 6036
6038 6037 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6039 6038 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6040 6039 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6041 6040 }
6042 6041
6043 6042 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6044 6043 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6045 6044
6046 6045 if (rval != FC_SUCCESS) {
6047 6046 EL(ha, "failed, rval = %xh\n", rval);
6048 6047 } else {
6049 6048 /*EMPTY*/
6050 6049 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6051 6050 }
6052 6051 return (rval);
6053 6052 }
6054 6053
6055 6054 /*
6056 6055 * ql_els_farp_req
6057 6056 * Issue FC Address Resolution Protocol (FARP)
6058 6057 * extended link service request.
6059 6058 *
6060 6059 * Note: not supported.
6061 6060 *
6062 6061 * Input:
6063 6062 * ha = adapter state pointer.
6064 6063 * pkt = pointer to fc_packet.
6065 6064 *
6066 6065 * Returns:
6067 6066 * FC_SUCCESS - the packet was accepted for transport.
6068 6067 * FC_TRANSPORT_ERROR - a transport error occurred.
6069 6068 *
6070 6069 * Context:
6071 6070 * Kernel context.
6072 6071 */
6073 6072 static int
6074 6073 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6075 6074 {
6076 6075 ql_acc_rjt_t acc;
6077 6076 int rval = FC_SUCCESS;
6078 6077
6079 6078 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6080 6079
6081 6080 bzero(&acc, sizeof (acc));
6082 6081
6083 6082 /* Build ACC. */
6084 6083 acc.ls_code.ls_code = LA_ELS_ACC;
6085 6084
6086 6085 pkt->pkt_state = FC_PKT_SUCCESS;
6087 6086
6088 6087 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6089 6088 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6090 6089
6091 6090 if (rval != FC_SUCCESS) {
6092 6091 EL(ha, "failed, rval = %xh\n", rval);
6093 6092 } else {
6094 6093 /*EMPTY*/
6095 6094 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6096 6095 }
6097 6096 return (rval);
6098 6097 }
6099 6098
6100 6099 /*
6101 6100 * ql_els_farp_reply
6102 6101 * Issue FC Address Resolution Protocol (FARP)
6103 6102 * extended link service reply.
6104 6103 *
6105 6104 * Note: not supported.
6106 6105 *
6107 6106 * Input:
6108 6107 * ha = adapter state pointer.
6109 6108 * pkt = pointer to fc_packet.
6110 6109 *
6111 6110 * Returns:
6112 6111 * FC_SUCCESS - the packet was accepted for transport.
6113 6112 * FC_TRANSPORT_ERROR - a transport error occurred.
6114 6113 *
6115 6114 * Context:
6116 6115 * Kernel context.
6117 6116 */
6118 6117 /* ARGSUSED */
6119 6118 static int
6120 6119 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6121 6120 {
6122 6121 ql_acc_rjt_t acc;
6123 6122 int rval = FC_SUCCESS;
6124 6123
6125 6124 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6126 6125
6127 6126 bzero(&acc, sizeof (acc));
6128 6127
6129 6128 /* Build ACC. */
6130 6129 acc.ls_code.ls_code = LA_ELS_ACC;
6131 6130
6132 6131 pkt->pkt_state = FC_PKT_SUCCESS;
6133 6132
6134 6133 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6135 6134 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6136 6135
6137 6136 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6138 6137
6139 6138 return (rval);
6140 6139 }
6141 6140
6142 6141 static int
6143 6142 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6144 6143 {
6145 6144 uchar_t *rnid_acc;
6146 6145 port_id_t d_id;
6147 6146 ql_link_t *link;
6148 6147 ql_tgt_t *tq;
6149 6148 uint16_t index;
6150 6149 la_els_rnid_acc_t acc;
6151 6150 la_els_rnid_t *req;
6152 6151 size_t req_len;
6153 6152
6154 6153 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6155 6154
6156 6155 req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6157 6156 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6158 6157 index = ql_alpa_to_index[d_id.b.al_pa];
6159 6158
6160 6159 tq = NULL;
6161 6160 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6162 6161 tq = link->base_address;
6163 6162 if (tq->d_id.b24 == d_id.b24) {
6164 6163 break;
6165 6164 } else {
6166 6165 tq = NULL;
6167 6166 }
6168 6167 }
6169 6168
6170 6169 /* Allocate memory for rnid status block */
6171 6170 rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6172 6171
6173 6172 bzero(&acc, sizeof (acc));
6174 6173
6175 6174 req = (la_els_rnid_t *)pkt->pkt_cmd;
6176 6175 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6177 6176 (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6178 6177 (caddr_t)rnid_acc) != QL_SUCCESS)) {
6179 6178
6180 6179 kmem_free(rnid_acc, req_len);
6181 6180 acc.ls_code.ls_code = LA_ELS_RJT;
6182 6181
6183 6182 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6184 6183 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6185 6184
6186 6185 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6187 6186 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6188 6187 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6189 6188
6190 6189 return (FC_FAILURE);
6191 6190 }
6192 6191
6193 6192 acc.ls_code.ls_code = LA_ELS_ACC;
6194 6193 bcopy(rnid_acc, &acc.hdr, req_len);
6195 6194 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6196 6195 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6197 6196
6198 6197 kmem_free(rnid_acc, req_len);
6199 6198 pkt->pkt_state = FC_PKT_SUCCESS;
6200 6199
6201 6200 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6202 6201
6203 6202 return (FC_SUCCESS);
6204 6203 }
6205 6204
6206 6205 static int
6207 6206 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6208 6207 {
6209 6208 fc_rls_acc_t *rls_acc;
6210 6209 port_id_t d_id;
6211 6210 ql_link_t *link;
6212 6211 ql_tgt_t *tq;
6213 6212 uint16_t index;
6214 6213 la_els_rls_acc_t acc;
6215 6214
6216 6215 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6217 6216
6218 6217 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6219 6218 index = ql_alpa_to_index[d_id.b.al_pa];
6220 6219
6221 6220 tq = NULL;
6222 6221 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6223 6222 tq = link->base_address;
6224 6223 if (tq->d_id.b24 == d_id.b24) {
6225 6224 break;
6226 6225 } else {
6227 6226 tq = NULL;
6228 6227 }
6229 6228 }
6230 6229
6231 6230 /* Allocate memory for link error status block */
6232 6231 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6233 6232
6234 6233 bzero(&acc, sizeof (la_els_rls_acc_t));
6235 6234
6236 6235 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6237 6236 (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6238 6237 (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6239 6238
6240 6239 kmem_free(rls_acc, sizeof (*rls_acc));
6241 6240 acc.ls_code.ls_code = LA_ELS_RJT;
6242 6241
6243 6242 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6244 6243 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6245 6244
6246 6245 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6247 6246 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6248 6247 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6249 6248
6250 6249 return (FC_FAILURE);
6251 6250 }
6252 6251
6253 6252 LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6254 6253 LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6255 6254 LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6256 6255 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6257 6256 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6258 6257
6259 6258 acc.ls_code.ls_code = LA_ELS_ACC;
6260 6259 acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6261 6260 acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6262 6261 acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6263 6262 acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6264 6263 acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6265 6264 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6266 6265 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6267 6266
6268 6267 kmem_free(rls_acc, sizeof (*rls_acc));
6269 6268 pkt->pkt_state = FC_PKT_SUCCESS;
6270 6269
6271 6270 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6272 6271
6273 6272 return (FC_SUCCESS);
6274 6273 }
6275 6274
6276 6275 static int
6277 6276 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6278 6277 {
6279 6278 port_id_t d_id;
6280 6279 ql_srb_t *sp;
6281 6280 fc_unsol_buf_t *ubp;
6282 6281 ql_link_t *link, *next_link;
6283 6282 int rval = FC_SUCCESS;
6284 6283 int cnt = 5;
6285 6284
6286 6285 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6287 6286
6288 6287 /*
6289 6288 * we need to ensure that q->outcnt == 0, otherwise
6290 6289 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6291 6290 * will confuse ulps.
6292 6291 */
6293 6292
6294 6293 DEVICE_QUEUE_LOCK(tq);
6295 6294 do {
6296 6295 /*
6297 6296 * wait for the cmds to get drained. If they
6298 6297 * don't get drained then the transport will
6299 6298 * retry PLOGI after few secs.
6300 6299 */
6301 6300 if (tq->outcnt != 0) {
6302 6301 rval = FC_TRAN_BUSY;
6303 6302 DEVICE_QUEUE_UNLOCK(tq);
6304 6303 ql_delay(ha, 10000);
6305 6304 DEVICE_QUEUE_LOCK(tq);
6306 6305 cnt--;
6307 6306 if (!cnt) {
6308 6307 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6309 6308 " for %xh outcount %xh", QL_NAME,
6310 6309 ha->instance, tq->d_id.b24, tq->outcnt);
6311 6310 }
6312 6311 } else {
6313 6312 rval = FC_SUCCESS;
6314 6313 break;
6315 6314 }
6316 6315 } while (cnt > 0);
6317 6316 DEVICE_QUEUE_UNLOCK(tq);
6318 6317
6319 6318 /*
6320 6319 * return, if busy or if the plogi was asynchronous.
6321 6320 */
6322 6321 if ((rval != FC_SUCCESS) ||
6323 6322 (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6324 6323 pkt->pkt_comp)) {
6325 6324 QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6326 6325 ha->instance);
6327 6326 return (rval);
6328 6327 }
6329 6328
6330 6329 /*
6331 6330 * Let us give daemon sufficient time and hopefully
6332 6331 * when transport retries PLOGI, it would have flushed
6333 6332 * callback queue.
6334 6333 */
6335 6334 TASK_DAEMON_LOCK(ha);
6336 6335 for (link = ha->callback_queue.first; link != NULL;
6337 6336 link = next_link) {
6338 6337 next_link = link->next;
6339 6338 sp = link->base_address;
6340 6339 if (sp->flags & SRB_UB_CALLBACK) {
6341 6340 ubp = ha->ub_array[sp->handle];
6342 6341 d_id.b24 = ubp->ub_frame.s_id;
6343 6342 } else {
6344 6343 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6345 6344 }
6346 6345 if (tq->d_id.b24 == d_id.b24) {
6347 6346 cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6348 6347 ha->instance, tq->d_id.b24);
6349 6348 rval = FC_TRAN_BUSY;
6350 6349 break;
6351 6350 }
6352 6351 }
6353 6352 TASK_DAEMON_UNLOCK(ha);
6354 6353
6355 6354 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6356 6355
6357 6356 return (rval);
6358 6357 }
6359 6358
6360 6359 /*
6361 6360 * ql_login_port
6362 6361 * Logs in a device if not already logged in.
6363 6362 *
6364 6363 * Input:
6365 6364 * ha = adapter state pointer.
6366 6365 * d_id = 24 bit port ID.
6367 6366 * DEVICE_QUEUE_LOCK must be released.
6368 6367 *
6369 6368 * Returns:
6370 6369 * QL local function return status code.
6371 6370 *
6372 6371 * Context:
6373 6372 * Kernel context.
6374 6373 */
6375 6374 static int
6376 6375 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6377 6376 {
6378 6377 ql_adapter_state_t *vha;
6379 6378 ql_link_t *link;
6380 6379 uint16_t index;
6381 6380 ql_tgt_t *tq, *tq2;
6382 6381 uint16_t loop_id, first_loop_id, last_loop_id;
6383 6382 int rval = QL_SUCCESS;
6384 6383
6385 6384 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6386 6385 d_id.b24);
6387 6386
6388 6387 /* Get head queue index. */
6389 6388 index = ql_alpa_to_index[d_id.b.al_pa];
6390 6389
6391 6390 /* Check for device already has a queue. */
6392 6391 tq = NULL;
6393 6392 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6394 6393 tq = link->base_address;
6395 6394 if (tq->d_id.b24 == d_id.b24) {
6396 6395 loop_id = tq->loop_id;
6397 6396 break;
6398 6397 } else {
6399 6398 tq = NULL;
6400 6399 }
6401 6400 }
6402 6401
6403 6402 /* Let's stop issuing any IO and unsolicited logo */
6404 6403 if ((tq != NULL) && (!(ddi_in_panic()))) {
6405 6404 DEVICE_QUEUE_LOCK(tq);
6406 6405 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6407 6406 tq->flags &= ~TQF_RSCN_RCVD;
6408 6407 DEVICE_QUEUE_UNLOCK(tq);
6409 6408 }
6410 6409 if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6411 6410 !(tq->flags & TQF_FABRIC_DEVICE)) {
6412 6411 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6413 6412 }
6414 6413
6415 6414 /* Special case for Nameserver */
6416 6415 if (d_id.b24 == 0xFFFFFC) {
6417 6416 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6418 6417 SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6419 6418 if (tq == NULL) {
6420 6419 ADAPTER_STATE_LOCK(ha);
6421 6420 tq = ql_dev_init(ha, d_id, loop_id);
6422 6421 ADAPTER_STATE_UNLOCK(ha);
6423 6422 if (tq == NULL) {
6424 6423 EL(ha, "failed=%xh, d_id=%xh\n",
6425 6424 QL_FUNCTION_FAILED, d_id.b24);
6426 6425 return (QL_FUNCTION_FAILED);
6427 6426 }
6428 6427 }
6429 6428 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6430 6429 rval = ql_login_fabric_port(ha, tq, loop_id);
6431 6430 if (rval == QL_SUCCESS) {
6432 6431 tq->loop_id = loop_id;
6433 6432 tq->flags |= TQF_FABRIC_DEVICE;
6434 6433 (void) ql_get_port_database(ha, tq, PDF_NONE);
6435 6434 }
6436 6435 } else {
6437 6436 ha->topology = (uint8_t)
6438 6437 (ha->topology | QL_SNS_CONNECTION);
6439 6438 }
6440 6439 /* Check for device already logged in. */
6441 6440 } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6442 6441 if (tq->flags & TQF_FABRIC_DEVICE) {
6443 6442 rval = ql_login_fabric_port(ha, tq, loop_id);
6444 6443 if (rval == QL_PORT_ID_USED) {
6445 6444 rval = QL_SUCCESS;
6446 6445 }
6447 6446 } else if (LOCAL_LOOP_ID(loop_id)) {
6448 6447 rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6449 6448 (tq->flags & TQF_INITIATOR_DEVICE ?
6450 6449 LLF_NONE : LLF_PLOGI));
6451 6450 if (rval == QL_SUCCESS) {
6452 6451 DEVICE_QUEUE_LOCK(tq);
6453 6452 tq->loop_id = loop_id;
6454 6453 DEVICE_QUEUE_UNLOCK(tq);
6455 6454 }
6456 6455 }
6457 6456 } else if (ha->topology & QL_SNS_CONNECTION) {
6458 6457 /* Locate unused loop ID. */
6459 6458 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6460 6459 first_loop_id = 0;
6461 6460 last_loop_id = LAST_N_PORT_HDL;
6462 6461 } else if (ha->topology & QL_F_PORT) {
6463 6462 first_loop_id = 0;
6464 6463 last_loop_id = SNS_LAST_LOOP_ID;
6465 6464 } else {
6466 6465 first_loop_id = SNS_FIRST_LOOP_ID;
6467 6466 last_loop_id = SNS_LAST_LOOP_ID;
6468 6467 }
6469 6468
6470 6469 /* Acquire adapter state lock. */
6471 6470 ADAPTER_STATE_LOCK(ha);
6472 6471
6473 6472 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6474 6473 if (tq == NULL) {
6475 6474 EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6476 6475 d_id.b24);
6477 6476
6478 6477 ADAPTER_STATE_UNLOCK(ha);
6479 6478
6480 6479 return (QL_FUNCTION_FAILED);
6481 6480 }
6482 6481
6483 6482 rval = QL_FUNCTION_FAILED;
6484 6483 loop_id = ha->pha->free_loop_id++;
6485 6484 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6486 6485 index--) {
6487 6486 if (loop_id < first_loop_id ||
6488 6487 loop_id > last_loop_id) {
6489 6488 loop_id = first_loop_id;
6490 6489 ha->pha->free_loop_id = (uint16_t)
6491 6490 (loop_id + 1);
6492 6491 }
6493 6492
6494 6493 /* Bypass if loop ID used. */
6495 6494 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6496 6495 tq2 = ql_loop_id_to_queue(vha, loop_id);
6497 6496 if (tq2 != NULL && tq2 != tq) {
6498 6497 break;
6499 6498 }
6500 6499 }
6501 6500 if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6502 6501 loop_id == ha->loop_id) {
6503 6502 loop_id = ha->pha->free_loop_id++;
6504 6503 continue;
6505 6504 }
6506 6505
6507 6506 ADAPTER_STATE_UNLOCK(ha);
6508 6507 rval = ql_login_fabric_port(ha, tq, loop_id);
6509 6508
6510 6509 /*
6511 6510 * If PORT_ID_USED is returned
6512 6511 * the login_fabric_port() updates
6513 6512 * with the correct loop ID
6514 6513 */
6515 6514 switch (rval) {
6516 6515 case QL_PORT_ID_USED:
6517 6516 /*
6518 6517 * use f/w handle and try to
6519 6518 * login again.
6520 6519 */
6521 6520 ADAPTER_STATE_LOCK(ha);
6522 6521 ha->pha->free_loop_id--;
6523 6522 ADAPTER_STATE_UNLOCK(ha);
6524 6523 loop_id = tq->loop_id;
6525 6524 break;
6526 6525
6527 6526 case QL_SUCCESS:
6528 6527 tq->flags |= TQF_FABRIC_DEVICE;
6529 6528 (void) ql_get_port_database(ha,
6530 6529 tq, PDF_NONE);
6531 6530 index = 1;
6532 6531 break;
6533 6532
6534 6533 case QL_LOOP_ID_USED:
6535 6534 tq->loop_id = PORT_NO_LOOP_ID;
6536 6535 loop_id = ha->pha->free_loop_id++;
6537 6536 break;
6538 6537
6539 6538 case QL_ALL_IDS_IN_USE:
6540 6539 tq->loop_id = PORT_NO_LOOP_ID;
6541 6540 index = 1;
6542 6541 break;
6543 6542
6544 6543 default:
6545 6544 tq->loop_id = PORT_NO_LOOP_ID;
6546 6545 index = 1;
6547 6546 break;
6548 6547 }
6549 6548
6550 6549 ADAPTER_STATE_LOCK(ha);
6551 6550 }
6552 6551
6553 6552 ADAPTER_STATE_UNLOCK(ha);
6554 6553 } else {
6555 6554 rval = QL_FUNCTION_FAILED;
6556 6555 }
6557 6556
6558 6557 if (rval != QL_SUCCESS) {
6559 6558 EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6560 6559 } else {
6561 6560 EL(ha, "d_id=%xh, loop_id=%xh, "
6562 6561 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6563 6562 tq->loop_id, tq->port_name[0], tq->port_name[1],
6564 6563 tq->port_name[2], tq->port_name[3], tq->port_name[4],
6565 6564 tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6566 6565 }
6567 6566 return (rval);
6568 6567 }
6569 6568
6570 6569 /*
6571 6570 * ql_login_fabric_port
6572 6571 * Issue login fabric port mailbox command.
6573 6572 *
6574 6573 * Input:
6575 6574 * ha: adapter state pointer.
6576 6575 * tq: target queue pointer.
6577 6576 * loop_id: FC Loop ID.
6578 6577 *
6579 6578 * Returns:
6580 6579 * ql local function return status code.
6581 6580 *
6582 6581 * Context:
6583 6582 * Kernel context.
6584 6583 */
6585 6584 static int
6586 6585 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6587 6586 {
6588 6587 int rval;
6589 6588 int index;
6590 6589 int retry = 0;
6591 6590 port_id_t d_id;
6592 6591 ql_tgt_t *newq;
6593 6592 ql_mbx_data_t mr;
6594 6593
6595 6594 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6596 6595 tq->d_id.b24);
6597 6596
6598 6597 /*
6599 6598 * QL_PARAMETER_ERROR also means the firmware is
6600 6599 * not able to allocate PCB entry due to resource
6601 6600 * issues, or collision.
6602 6601 */
6603 6602 do {
6604 6603 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6605 6604 if ((rval == QL_PARAMETER_ERROR) ||
6606 6605 ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6607 6606 mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6608 6607 retry++;
6609 6608 drv_usecwait(10 * MILLISEC);
6610 6609 } else {
6611 6610 break;
6612 6611 }
6613 6612 } while (retry < 5);
6614 6613
6615 6614 switch (rval) {
6616 6615 case QL_SUCCESS:
6617 6616 tq->loop_id = loop_id;
6618 6617 break;
6619 6618
6620 6619 case QL_PORT_ID_USED:
6621 6620 /*
6622 6621 * This Loop ID should NOT be in use in drivers
6623 6622 */
6624 6623 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6625 6624
6626 6625 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6627 6626 cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6628 6627 "dup loop_id=%xh, d_id=%xh", ha->instance,
6629 6628 newq->loop_id, newq->d_id.b24);
6630 6629 ql_send_logo(ha, newq, NULL);
6631 6630 }
6632 6631
6633 6632 tq->loop_id = mr.mb[1];
6634 6633 break;
6635 6634
6636 6635 case QL_LOOP_ID_USED:
6637 6636 d_id.b.al_pa = LSB(mr.mb[2]);
6638 6637 d_id.b.area = MSB(mr.mb[2]);
6639 6638 d_id.b.domain = LSB(mr.mb[1]);
6640 6639
6641 6640 newq = ql_d_id_to_queue(ha, d_id);
6642 6641 if (newq && (newq->loop_id != loop_id)) {
6643 6642 /*
6644 6643 * This should NEVER ever happen; but this
6645 6644 * code is needed to bail out when the worst
6646 6645 * case happens - or as used to happen before
6647 6646 */
6648 6647 QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6649 6648 "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6650 6649 "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6651 6650 ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6652 6651 newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6653 6652 newq->d_id.b24, loop_id);
6654 6653
6655 6654 if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6656 6655 ADAPTER_STATE_LOCK(ha);
6657 6656
6658 6657 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6659 6658 ql_add_link_b(&ha->dev[index], &newq->device);
6660 6659
6661 6660 newq->d_id.b24 = d_id.b24;
6662 6661
6663 6662 index = ql_alpa_to_index[d_id.b.al_pa];
6664 6663 ql_add_link_b(&ha->dev[index], &newq->device);
6665 6664
6666 6665 ADAPTER_STATE_UNLOCK(ha);
6667 6666 }
6668 6667
6669 6668 (void) ql_get_port_database(ha, newq, PDF_NONE);
6670 6669
6671 6670 }
6672 6671
6673 6672 /*
6674 6673 * Invalidate the loop ID for the
6675 6674 * us to obtain a new one.
6676 6675 */
6677 6676 tq->loop_id = PORT_NO_LOOP_ID;
6678 6677 break;
6679 6678
6680 6679 case QL_ALL_IDS_IN_USE:
6681 6680 rval = QL_FUNCTION_FAILED;
6682 6681 EL(ha, "no loop id's available\n");
6683 6682 break;
6684 6683
6685 6684 default:
6686 6685 if (rval == QL_COMMAND_ERROR) {
6687 6686 switch (mr.mb[1]) {
6688 6687 case 2:
6689 6688 case 3:
6690 6689 rval = QL_MEMORY_ALLOC_FAILED;
6691 6690 break;
6692 6691
6693 6692 case 4:
6694 6693 rval = QL_FUNCTION_TIMEOUT;
6695 6694 break;
6696 6695 case 7:
6697 6696 rval = QL_FABRIC_NOT_INITIALIZED;
6698 6697 break;
6699 6698 default:
6700 6699 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6701 6700 break;
6702 6701 }
6703 6702 } else {
6704 6703 cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6705 6704 " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6706 6705 ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6707 6706 }
6708 6707 break;
6709 6708 }
6710 6709
6711 6710 if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6712 6711 rval != QL_LOOP_ID_USED) {
6713 6712 EL(ha, "failed=%xh\n", rval);
6714 6713 } else {
6715 6714 /*EMPTY*/
6716 6715 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6717 6716 }
6718 6717 return (rval);
6719 6718 }
6720 6719
6721 6720 /*
6722 6721 * ql_logout_port
6723 6722 * Logs out a device if possible.
6724 6723 *
6725 6724 * Input:
6726 6725 * ha: adapter state pointer.
6727 6726 * d_id: 24 bit port ID.
6728 6727 *
6729 6728 * Returns:
6730 6729 * QL local function return status code.
6731 6730 *
6732 6731 * Context:
6733 6732 * Kernel context.
6734 6733 */
6735 6734 static int
6736 6735 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6737 6736 {
6738 6737 ql_link_t *link;
6739 6738 ql_tgt_t *tq;
6740 6739 uint16_t index;
6741 6740
6742 6741 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6743 6742
6744 6743 /* Get head queue index. */
6745 6744 index = ql_alpa_to_index[d_id.b.al_pa];
6746 6745
6747 6746 /* Get device queue. */
6748 6747 tq = NULL;
6749 6748 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6750 6749 tq = link->base_address;
6751 6750 if (tq->d_id.b24 == d_id.b24) {
6752 6751 break;
6753 6752 } else {
6754 6753 tq = NULL;
6755 6754 }
6756 6755 }
6757 6756
6758 6757 if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6759 6758 (void) ql_logout_fabric_port(ha, tq);
6760 6759 tq->loop_id = PORT_NO_LOOP_ID;
6761 6760 }
6762 6761
6763 6762 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6764 6763
6765 6764 return (QL_SUCCESS);
6766 6765 }
6767 6766
6768 6767 /*
6769 6768 * ql_dev_init
6770 6769 * Initialize/allocate device queue.
6771 6770 *
6772 6771 * Input:
6773 6772 * ha: adapter state pointer.
6774 6773 * d_id: device destination ID
6775 6774 * loop_id: device loop ID
6776 6775 * ADAPTER_STATE_LOCK must be already obtained.
6777 6776 *
6778 6777 * Returns:
6779 6778 * NULL = failure
6780 6779 *
6781 6780 * Context:
6782 6781 * Kernel context.
6783 6782 */
6784 6783 ql_tgt_t *
6785 6784 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6786 6785 {
6787 6786 ql_link_t *link;
6788 6787 uint16_t index;
6789 6788 ql_tgt_t *tq;
6790 6789
6791 6790 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6792 6791 ha->instance, d_id.b24, loop_id);
6793 6792
6794 6793 index = ql_alpa_to_index[d_id.b.al_pa];
6795 6794
6796 6795 /* If device queue exists, set proper loop ID. */
6797 6796 tq = NULL;
6798 6797 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6799 6798 tq = link->base_address;
6800 6799 if (tq->d_id.b24 == d_id.b24) {
6801 6800 tq->loop_id = loop_id;
6802 6801
6803 6802 /* Reset port down retry count. */
6804 6803 tq->port_down_retry_count = ha->port_down_retry_count;
6805 6804 tq->qfull_retry_count = ha->qfull_retry_count;
6806 6805
6807 6806 break;
6808 6807 } else {
6809 6808 tq = NULL;
6810 6809 }
6811 6810 }
6812 6811
6813 6812 /* If device does not have queue. */
6814 6813 if (tq == NULL) {
6815 6814 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6816 6815 if (tq != NULL) {
6817 6816 /*
6818 6817 * mutex to protect the device queue,
6819 6818 * does not block interrupts.
6820 6819 */
6821 6820 mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6822 6821 (ha->iflags & IFLG_INTR_AIF) ?
6823 6822 (void *)(uintptr_t)ha->intr_pri :
6824 6823 (void *)(uintptr_t)ha->iblock_cookie);
6825 6824
6826 6825 tq->d_id.b24 = d_id.b24;
6827 6826 tq->loop_id = loop_id;
6828 6827 tq->device.base_address = tq;
6829 6828 tq->iidma_rate = IIDMA_RATE_INIT;
6830 6829
6831 6830 /* Reset port down retry count. */
6832 6831 tq->port_down_retry_count = ha->port_down_retry_count;
6833 6832 tq->qfull_retry_count = ha->qfull_retry_count;
6834 6833
6835 6834 /* Add device to device queue. */
6836 6835 ql_add_link_b(&ha->dev[index], &tq->device);
6837 6836 }
6838 6837 }
6839 6838
6840 6839 if (tq == NULL) {
6841 6840 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6842 6841 } else {
6843 6842 /*EMPTY*/
6844 6843 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6845 6844 }
6846 6845 return (tq);
6847 6846 }
6848 6847
6849 6848 /*
6850 6849 * ql_dev_free
6851 6850 * Remove queue from device list and frees resources used by queue.
6852 6851 *
6853 6852 * Input:
6854 6853 * ha: adapter state pointer.
6855 6854 * tq: target queue pointer.
6856 6855 * ADAPTER_STATE_LOCK must be already obtained.
6857 6856 *
6858 6857 * Context:
6859 6858 * Kernel context.
6860 6859 */
6861 6860 void
6862 6861 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6863 6862 {
6864 6863 ql_link_t *link;
6865 6864 uint16_t index;
6866 6865 ql_lun_t *lq;
6867 6866
6868 6867 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6869 6868
6870 6869 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6871 6870 lq = link->base_address;
6872 6871 if (lq->cmd.first != NULL) {
6873 6872 return;
6874 6873 }
6875 6874 }
6876 6875
6877 6876 if (tq->outcnt == 0) {
6878 6877 /* Get head queue index. */
6879 6878 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6880 6879 for (link = ha->dev[index].first; link != NULL;
6881 6880 link = link->next) {
6882 6881 if (link->base_address == tq) {
6883 6882 ql_remove_link(&ha->dev[index], link);
6884 6883
6885 6884 link = tq->lun_queues.first;
6886 6885 while (link != NULL) {
6887 6886 lq = link->base_address;
6888 6887 link = link->next;
6889 6888
6890 6889 ql_remove_link(&tq->lun_queues,
6891 6890 &lq->link);
6892 6891 kmem_free(lq, sizeof (ql_lun_t));
6893 6892 }
6894 6893
6895 6894 mutex_destroy(&tq->mutex);
6896 6895 kmem_free(tq, sizeof (ql_tgt_t));
6897 6896 break;
6898 6897 }
6899 6898 }
6900 6899 }
6901 6900
6902 6901 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6903 6902 }
6904 6903
6905 6904 /*
6906 6905 * ql_lun_queue
6907 6906 * Allocate LUN queue if does not exists.
6908 6907 *
6909 6908 * Input:
6910 6909 * ha: adapter state pointer.
6911 6910 * tq: target queue.
6912 6911 * lun: LUN number.
6913 6912 *
6914 6913 * Returns:
6915 6914 * NULL = failure
6916 6915 *
6917 6916 * Context:
6918 6917 * Kernel context.
6919 6918 */
6920 6919 static ql_lun_t *
6921 6920 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6922 6921 {
6923 6922 ql_lun_t *lq;
6924 6923 ql_link_t *link;
6925 6924
6926 6925 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6927 6926
6928 6927 /* Fast path. */
6929 6928 if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6930 6929 QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6931 6930 return (tq->last_lun_queue);
6932 6931 }
6933 6932
6934 6933 if (lun >= MAX_LUNS) {
6935 6934 EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6936 6935 return (NULL);
6937 6936 }
6938 6937 /* If device queue exists, set proper loop ID. */
6939 6938 lq = NULL;
6940 6939 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6941 6940 lq = link->base_address;
6942 6941 if (lq->lun_no == lun) {
6943 6942 QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6944 6943 tq->last_lun_queue = lq;
6945 6944 return (lq);
6946 6945 }
6947 6946 }
6948 6947
6949 6948 /* If queue does exist. */
6950 6949 lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6951 6950
6952 6951 /* Initialize LUN queue. */
6953 6952 if (lq != NULL) {
6954 6953 lq->link.base_address = lq;
6955 6954
6956 6955 lq->lun_no = lun;
6957 6956 lq->target_queue = tq;
6958 6957
6959 6958 DEVICE_QUEUE_LOCK(tq);
6960 6959 ql_add_link_b(&tq->lun_queues, &lq->link);
6961 6960 DEVICE_QUEUE_UNLOCK(tq);
6962 6961 tq->last_lun_queue = lq;
6963 6962 }
6964 6963
6965 6964 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6966 6965
6967 6966 return (lq);
6968 6967 }
6969 6968
6970 6969 /*
6971 6970 * ql_fcp_scsi_cmd
6972 6971 * Process fibre channel (FCP) SCSI protocol commands.
6973 6972 *
6974 6973 * Input:
6975 6974 * ha = adapter state pointer.
6976 6975 * pkt = pointer to fc_packet.
6977 6976 * sp = srb pointer.
6978 6977 *
6979 6978 * Returns:
6980 6979 * FC_SUCCESS - the packet was accepted for transport.
6981 6980 * FC_TRANSPORT_ERROR - a transport error occurred.
6982 6981 *
6983 6982 * Context:
6984 6983 * Kernel context.
6985 6984 */
6986 6985 static int
6987 6986 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6988 6987 {
6989 6988 port_id_t d_id;
6990 6989 ql_tgt_t *tq;
6991 6990 uint64_t *ptr;
6992 6991 uint16_t lun;
6993 6992
6994 6993 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6995 6994
6996 6995 tq = (ql_tgt_t *)pkt->pkt_fca_device;
6997 6996 if (tq == NULL) {
6998 6997 d_id.r.rsvd_1 = 0;
6999 6998 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7000 6999 tq = ql_d_id_to_queue(ha, d_id);
7001 7000 }
7002 7001
7003 7002 sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7004 7003 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7005 7004 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7006 7005
7007 7006 if (tq != NULL &&
7008 7007 (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7009 7008
7010 7009 /*
7011 7010 * zero out FCP response; 24 Bytes
7012 7011 */
7013 7012 ptr = (uint64_t *)pkt->pkt_resp;
7014 7013 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7015 7014
7016 7015 /* Handle task management function. */
7017 7016 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7018 7017 sp->fcp->fcp_cntl.cntl_clr_aca |
7019 7018 sp->fcp->fcp_cntl.cntl_reset_tgt |
7020 7019 sp->fcp->fcp_cntl.cntl_reset_lun |
7021 7020 sp->fcp->fcp_cntl.cntl_clr_tsk |
7022 7021 sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7023 7022 ql_task_mgmt(ha, tq, pkt, sp);
7024 7023 } else {
7025 7024 ha->pha->xioctl->IosRequested++;
7026 7025 ha->pha->xioctl->BytesRequested += (uint32_t)
7027 7026 sp->fcp->fcp_data_len;
7028 7027
7029 7028 /*
7030 7029 * Setup for commands with data transfer
7031 7030 */
7032 7031 sp->iocb = ha->fcp_cmd;
7033 7032 sp->req_cnt = 1;
7034 7033 if (sp->fcp->fcp_data_len != 0) {
7035 7034 /*
7036 7035 * FCP data is bound to pkt_data_dma
7037 7036 */
7038 7037 if (sp->fcp->fcp_cntl.cntl_write_data) {
7039 7038 (void) ddi_dma_sync(pkt->pkt_data_dma,
7040 7039 0, 0, DDI_DMA_SYNC_FORDEV);
7041 7040 }
7042 7041
7043 7042 /* Setup IOCB count. */
7044 7043 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7045 7044 (!CFG_IST(ha, CFG_CTRL_8021) ||
7046 7045 sp->sg_dma.dma_handle == NULL)) {
7047 7046 uint32_t cnt;
7048 7047
7049 7048 cnt = pkt->pkt_data_cookie_cnt -
7050 7049 ha->cmd_segs;
7051 7050 sp->req_cnt = (uint16_t)
7052 7051 (cnt / ha->cmd_cont_segs);
7053 7052 if (cnt % ha->cmd_cont_segs) {
7054 7053 sp->req_cnt = (uint16_t)
7055 7054 (sp->req_cnt + 2);
7056 7055 } else {
7057 7056 sp->req_cnt++;
7058 7057 }
7059 7058 }
7060 7059 }
7061 7060 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7062 7061
7063 7062 return (ql_start_cmd(ha, tq, pkt, sp));
7064 7063 }
7065 7064 } else {
7066 7065 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7067 7066 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7068 7067
7069 7068 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7070 7069 ql_awaken_task_daemon(ha, sp, 0, 0);
7071 7070 }
7072 7071
7073 7072 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7074 7073
7075 7074 return (FC_SUCCESS);
7076 7075 }
7077 7076
7078 7077 /*
7079 7078 * ql_task_mgmt
7080 7079 * Task management function processor.
7081 7080 *
7082 7081 * Input:
7083 7082 * ha: adapter state pointer.
7084 7083 * tq: target queue pointer.
7085 7084 * pkt: pointer to fc_packet.
7086 7085 * sp: SRB pointer.
7087 7086 *
7088 7087 * Context:
7089 7088 * Kernel context.
7090 7089 */
7091 7090 static void
7092 7091 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7093 7092 ql_srb_t *sp)
7094 7093 {
7095 7094 fcp_rsp_t *fcpr;
7096 7095 struct fcp_rsp_info *rsp;
7097 7096 uint16_t lun;
7098 7097
7099 7098 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7100 7099
7101 7100 fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7102 7101 rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7103 7102
7104 7103 bzero(fcpr, pkt->pkt_rsplen);
7105 7104
7106 7105 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7107 7106 fcpr->fcp_response_len = 8;
7108 7107 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7109 7108 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7110 7109
7111 7110 if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7112 7111 if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7113 7112 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7114 7113 }
7115 7114 } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7116 7115 if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7117 7116 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7118 7117 }
7119 7118 } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7120 7119 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7121 7120 QL_SUCCESS) {
7122 7121 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7123 7122 }
7124 7123 } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7125 7124 if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7126 7125 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7127 7126 }
7128 7127 } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7129 7128 if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7130 7129 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7131 7130 }
7132 7131 } else {
7133 7132 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7134 7133 }
7135 7134
7136 7135 pkt->pkt_state = FC_PKT_SUCCESS;
7137 7136
7138 7137 /* Do command callback. */
7139 7138 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7140 7139 ql_awaken_task_daemon(ha, sp, 0, 0);
7141 7140 }
7142 7141
7143 7142 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7144 7143 }
7145 7144
7146 7145 /*
7147 7146 * ql_fcp_ip_cmd
7148 7147 * Process fibre channel (FCP) Internet (IP) protocols commands.
7149 7148 *
7150 7149 * Input:
7151 7150 * ha: adapter state pointer.
7152 7151 * pkt: pointer to fc_packet.
7153 7152 * sp: SRB pointer.
7154 7153 *
7155 7154 * Returns:
7156 7155 * FC_SUCCESS - the packet was accepted for transport.
7157 7156 * FC_TRANSPORT_ERROR - a transport error occurred.
7158 7157 *
7159 7158 * Context:
7160 7159 * Kernel context.
7161 7160 */
7162 7161 static int
7163 7162 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7164 7163 {
7165 7164 port_id_t d_id;
7166 7165 ql_tgt_t *tq;
7167 7166
7168 7167 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7169 7168
7170 7169 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7171 7170 if (tq == NULL) {
7172 7171 d_id.r.rsvd_1 = 0;
7173 7172 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7174 7173 tq = ql_d_id_to_queue(ha, d_id);
7175 7174 }
7176 7175
7177 7176 if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7178 7177 /*
7179 7178 * IP data is bound to pkt_cmd_dma
7180 7179 */
7181 7180 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7182 7181 0, 0, DDI_DMA_SYNC_FORDEV);
7183 7182
7184 7183 /* Setup IOCB count. */
7185 7184 sp->iocb = ha->ip_cmd;
7186 7185 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7187 7186 uint32_t cnt;
7188 7187
7189 7188 cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7190 7189 sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7191 7190 if (cnt % ha->cmd_cont_segs) {
7192 7191 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7193 7192 } else {
7194 7193 sp->req_cnt++;
7195 7194 }
7196 7195 } else {
7197 7196 sp->req_cnt = 1;
7198 7197 }
7199 7198 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7200 7199
7201 7200 return (ql_start_cmd(ha, tq, pkt, sp));
7202 7201 } else {
7203 7202 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7204 7203 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7205 7204
7206 7205 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7207 7206 ql_awaken_task_daemon(ha, sp, 0, 0);
7208 7207 }
7209 7208
7210 7209 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7211 7210
7212 7211 return (FC_SUCCESS);
7213 7212 }
7214 7213
7215 7214 /*
7216 7215 * ql_fc_services
7217 7216 * Process fibre channel services (name server).
7218 7217 *
7219 7218 * Input:
7220 7219 * ha: adapter state pointer.
7221 7220 * pkt: pointer to fc_packet.
7222 7221 *
7223 7222 * Returns:
7224 7223 * FC_SUCCESS - the packet was accepted for transport.
7225 7224 * FC_TRANSPORT_ERROR - a transport error occurred.
7226 7225 *
7227 7226 * Context:
7228 7227 * Kernel context.
7229 7228 */
7230 7229 static int
7231 7230 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7232 7231 {
7233 7232 uint32_t cnt;
7234 7233 fc_ct_header_t hdr;
7235 7234 la_els_rjt_t rjt;
7236 7235 port_id_t d_id;
7237 7236 ql_tgt_t *tq;
7238 7237 ql_srb_t *sp;
7239 7238 int rval;
7240 7239
7241 7240 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7242 7241
7243 7242 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7244 7243 (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7245 7244
7246 7245 bzero(&rjt, sizeof (rjt));
7247 7246
7248 7247 /* Do some sanity checks */
7249 7248 cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7250 7249 sizeof (fc_ct_header_t));
7251 7250 if (cnt > (uint32_t)pkt->pkt_rsplen) {
7252 7251 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7253 7252 pkt->pkt_rsplen);
7254 7253 return (FC_ELS_MALFORMED);
7255 7254 }
7256 7255
7257 7256 switch (hdr.ct_fcstype) {
7258 7257 case FCSTYPE_DIRECTORY:
7259 7258 case FCSTYPE_MGMTSERVICE:
7260 7259 /* An FCA must make sure that the header is in big endian */
7261 7260 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7262 7261
7263 7262 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7264 7263 tq = ql_d_id_to_queue(ha, d_id);
7265 7264 sp = (ql_srb_t *)pkt->pkt_fca_private;
7266 7265 if (tq == NULL ||
7267 7266 (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7268 7267 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7269 7268 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7270 7269 rval = QL_SUCCESS;
7271 7270 break;
7272 7271 }
7273 7272
7274 7273 /*
7275 7274 * Services data is bound to pkt_cmd_dma
7276 7275 */
7277 7276 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7278 7277 DDI_DMA_SYNC_FORDEV);
7279 7278
7280 7279 sp->flags |= SRB_MS_PKT;
7281 7280 sp->retry_count = 32;
7282 7281
7283 7282 /* Setup IOCB count. */
7284 7283 sp->iocb = ha->ms_cmd;
7285 7284 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7286 7285 cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7287 7286 sp->req_cnt =
7288 7287 (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7289 7288 if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7290 7289 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7291 7290 } else {
7292 7291 sp->req_cnt++;
7293 7292 }
7294 7293 } else {
7295 7294 sp->req_cnt = 1;
7296 7295 }
7297 7296 rval = ql_start_cmd(ha, tq, pkt, sp);
7298 7297
7299 7298 QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7300 7299 ha->instance, rval);
7301 7300
7302 7301 return (rval);
7303 7302
7304 7303 default:
7305 7304 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7306 7305 rval = QL_FUNCTION_PARAMETER_ERROR;
7307 7306 break;
7308 7307 }
7309 7308
7310 7309 if (rval != QL_SUCCESS) {
7311 7310 /* Build RJT. */
7312 7311 rjt.ls_code.ls_code = LA_ELS_RJT;
7313 7312 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7314 7313
7315 7314 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7316 7315 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7317 7316
7318 7317 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7319 7318 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7320 7319 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7321 7320 }
7322 7321
7323 7322 /* Do command callback. */
7324 7323 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7325 7324 ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7326 7325 0, 0);
7327 7326 }
7328 7327
7329 7328 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7330 7329
7331 7330 return (FC_SUCCESS);
7332 7331 }
7333 7332
7334 7333 /*
7335 7334 * ql_cthdr_endian
7336 7335 * Change endianess of ct passthrough header and payload.
7337 7336 *
7338 7337 * Input:
7339 7338 * acc_handle: DMA buffer access handle.
7340 7339 * ct_hdr: Pointer to header.
7341 7340 * restore: Restore first flag.
7342 7341 *
7343 7342 * Context:
7344 7343 * Interrupt or Kernel context, no mailbox commands allowed.
7345 7344 */
7346 7345 void
7347 7346 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7348 7347 boolean_t restore)
7349 7348 {
7350 7349 uint8_t i, *bp;
7351 7350 fc_ct_header_t hdr;
7352 7351 uint32_t *hdrp = (uint32_t *)&hdr;
7353 7352
7354 7353 ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7355 7354 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7356 7355
7357 7356 if (restore) {
7358 7357 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7359 7358 *hdrp = BE_32(*hdrp);
7360 7359 hdrp++;
7361 7360 }
7362 7361 }
7363 7362
7364 7363 if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7365 7364 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7366 7365
7367 7366 switch (hdr.ct_cmdrsp) {
7368 7367 case NS_GA_NXT:
7369 7368 case NS_GPN_ID:
7370 7369 case NS_GNN_ID:
7371 7370 case NS_GCS_ID:
7372 7371 case NS_GFT_ID:
7373 7372 case NS_GSPN_ID:
7374 7373 case NS_GPT_ID:
7375 7374 case NS_GID_FT:
7376 7375 case NS_GID_PT:
7377 7376 case NS_RPN_ID:
7378 7377 case NS_RNN_ID:
7379 7378 case NS_RSPN_ID:
7380 7379 case NS_DA_ID:
7381 7380 BIG_ENDIAN_32(bp);
7382 7381 break;
7383 7382 case NS_RFT_ID:
7384 7383 case NS_RCS_ID:
7385 7384 case NS_RPT_ID:
7386 7385 BIG_ENDIAN_32(bp);
7387 7386 bp += 4;
7388 7387 BIG_ENDIAN_32(bp);
7389 7388 break;
7390 7389 case NS_GNN_IP:
7391 7390 case NS_GIPA_IP:
7392 7391 BIG_ENDIAN(bp, 16);
7393 7392 break;
7394 7393 case NS_RIP_NN:
7395 7394 bp += 8;
7396 7395 BIG_ENDIAN(bp, 16);
7397 7396 break;
7398 7397 case NS_RIPA_NN:
7399 7398 bp += 8;
7400 7399 BIG_ENDIAN_64(bp);
7401 7400 break;
7402 7401 default:
7403 7402 break;
7404 7403 }
7405 7404 }
7406 7405
7407 7406 if (restore == B_FALSE) {
7408 7407 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7409 7408 *hdrp = BE_32(*hdrp);
7410 7409 hdrp++;
7411 7410 }
7412 7411 }
7413 7412
7414 7413 ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7415 7414 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7416 7415 }
7417 7416
7418 7417 /*
7419 7418 * ql_start_cmd
7420 7419 * Finishes starting fibre channel protocol (FCP) command.
7421 7420 *
7422 7421 * Input:
7423 7422 * ha: adapter state pointer.
7424 7423 * tq: target queue pointer.
7425 7424 * pkt: pointer to fc_packet.
7426 7425 * sp: SRB pointer.
7427 7426 *
7428 7427 * Context:
7429 7428 * Kernel context.
7430 7429 */
7431 7430 static int
7432 7431 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7433 7432 ql_srb_t *sp)
7434 7433 {
7435 7434 int rval = FC_SUCCESS;
7436 7435 time_t poll_wait = 0;
7437 7436 ql_lun_t *lq = sp->lun_queue;
7438 7437
7439 7438 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7440 7439
7441 7440 sp->handle = 0;
7442 7441
7443 7442 /* Set poll for finish. */
7444 7443 if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7445 7444 sp->flags |= SRB_POLL;
7446 7445 if (pkt->pkt_timeout == 0) {
7447 7446 pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7448 7447 }
7449 7448 }
7450 7449
7451 7450 /* Acquire device queue lock. */
7452 7451 DEVICE_QUEUE_LOCK(tq);
7453 7452
7454 7453 /*
7455 7454 * If we need authentication, report device busy to
7456 7455 * upper layers to retry later
7457 7456 */
7458 7457 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7459 7458 DEVICE_QUEUE_UNLOCK(tq);
7460 7459 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7461 7460 tq->d_id.b24);
7462 7461 return (FC_DEVICE_BUSY);
7463 7462 }
7464 7463
7465 7464 /* Insert command onto watchdog queue. */
7466 7465 if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7467 7466 ql_timeout_insert(ha, tq, sp);
7468 7467 } else {
7469 7468 /*
7470 7469 * Run dump requests in polled mode as kernel threads
7471 7470 * and interrupts may have been disabled.
7472 7471 */
7473 7472 sp->flags |= SRB_POLL;
7474 7473 sp->init_wdg_q_time = 0;
7475 7474 sp->isp_timeout = 0;
7476 7475 }
7477 7476
7478 7477 /* If a polling command setup wait time. */
7479 7478 if (sp->flags & SRB_POLL) {
7480 7479 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7481 7480 poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7482 7481 } else {
7483 7482 poll_wait = pkt->pkt_timeout;
7484 7483 }
7485 7484 }
7486 7485
7487 7486 if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7488 7487 (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7489 7488 /* Set ending status. */
7490 7489 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7491 7490
7492 7491 /* Call done routine to handle completions. */
7493 7492 sp->cmd.next = NULL;
7494 7493 DEVICE_QUEUE_UNLOCK(tq);
7495 7494 ql_done(&sp->cmd);
7496 7495 } else {
7497 7496 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7498 7497 int do_lip = 0;
7499 7498
7500 7499 DEVICE_QUEUE_UNLOCK(tq);
7501 7500
7502 7501 ADAPTER_STATE_LOCK(ha);
7503 7502 if ((do_lip = ha->pha->lip_on_panic) == 0) {
7504 7503 ha->pha->lip_on_panic++;
7505 7504 }
7506 7505 ADAPTER_STATE_UNLOCK(ha);
7507 7506
7508 7507 if (!do_lip) {
7509 7508
7510 7509 /*
7511 7510 * That Qlogic F/W performs PLOGI, PRLI, etc
7512 7511 * is helpful here. If a PLOGI fails for some
7513 7512 * reason, you would get CS_PORT_LOGGED_OUT
7514 7513 * or some such error; and we should get a
7515 7514 * careful polled mode login kicked off inside
7516 7515 * of this driver itself. You don't have FC
7517 7516 * transport's services as all threads are
7518 7517 * suspended, interrupts disabled, and so
7519 7518 * on. Right now we do re-login if the packet
7520 7519 * state isn't FC_PKT_SUCCESS.
7521 7520 */
7522 7521 (void) ql_abort_isp(ha);
7523 7522 }
7524 7523
7525 7524 ql_start_iocb(ha, sp);
7526 7525 } else {
7527 7526 /* Add the command to the device queue */
7528 7527 if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7529 7528 ql_add_link_t(&lq->cmd, &sp->cmd);
7530 7529 } else {
7531 7530 ql_add_link_b(&lq->cmd, &sp->cmd);
7532 7531 }
7533 7532
7534 7533 sp->flags |= SRB_IN_DEVICE_QUEUE;
7535 7534
7536 7535 /* Check whether next message can be processed */
7537 7536 ql_next(ha, lq);
7538 7537 }
7539 7538 }
7540 7539
7541 7540 /* If polling, wait for finish. */
7542 7541 if (poll_wait) {
7543 7542 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7544 7543 int res;
7545 7544
7546 7545 res = ql_abort((opaque_t)ha, pkt, 0);
7547 7546 if (res != FC_SUCCESS && res != FC_ABORTED) {
7548 7547 DEVICE_QUEUE_LOCK(tq);
7549 7548 ql_remove_link(&lq->cmd, &sp->cmd);
7550 7549 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7551 7550 DEVICE_QUEUE_UNLOCK(tq);
7552 7551 }
7553 7552 }
7554 7553
7555 7554 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7556 7555 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7557 7556 rval = FC_TRANSPORT_ERROR;
7558 7557 }
7559 7558
7560 7559 if (ddi_in_panic()) {
7561 7560 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7562 7561 port_id_t d_id;
7563 7562
7564 7563 /*
7565 7564 * successful LOGIN implies by design
7566 7565 * that PRLI also succeeded for disks
7567 7566 * Note also that there is no special
7568 7567 * mailbox command to send PRLI.
7569 7568 */
7570 7569 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7571 7570 (void) ql_login_port(ha, d_id);
7572 7571 }
7573 7572 }
7574 7573
7575 7574 /*
7576 7575 * This should only happen during CPR dumping
7577 7576 */
7578 7577 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7579 7578 pkt->pkt_comp) {
7580 7579 sp->flags &= ~SRB_POLL;
7581 7580 (*pkt->pkt_comp)(pkt);
7582 7581 }
7583 7582 }
7584 7583
7585 7584 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7586 7585
7587 7586 return (rval);
7588 7587 }
7589 7588
7590 7589 /*
7591 7590 * ql_poll_cmd
7592 7591 * Polls commands for completion.
7593 7592 *
7594 7593 * Input:
7595 7594 * ha = adapter state pointer.
7596 7595 * sp = SRB command pointer.
7597 7596 * poll_wait = poll wait time in seconds.
7598 7597 *
7599 7598 * Returns:
7600 7599 * QL local function return status code.
7601 7600 *
7602 7601 * Context:
7603 7602 * Kernel context.
7604 7603 */
7605 7604 static int
7606 7605 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7607 7606 {
7608 7607 int rval = QL_SUCCESS;
7609 7608 time_t msecs_left = poll_wait * 100; /* 10ms inc */
7610 7609 ql_adapter_state_t *ha = vha->pha;
7611 7610
7612 7611 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7613 7612
7614 7613 while (sp->flags & SRB_POLL) {
7615 7614
7616 7615 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7617 7616 ha->idle_timer >= 15 || ddi_in_panic()) {
7618 7617
7619 7618 /* If waiting for restart, do it now. */
7620 7619 if (ha->port_retry_timer != 0) {
7621 7620 ADAPTER_STATE_LOCK(ha);
7622 7621 ha->port_retry_timer = 0;
7623 7622 ADAPTER_STATE_UNLOCK(ha);
7624 7623
7625 7624 TASK_DAEMON_LOCK(ha);
7626 7625 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7627 7626 TASK_DAEMON_UNLOCK(ha);
7628 7627 }
7629 7628
7630 7629 if (INTERRUPT_PENDING(ha)) {
7631 7630 (void) ql_isr((caddr_t)ha);
7632 7631 INTR_LOCK(ha);
7633 7632 ha->intr_claimed = TRUE;
7634 7633 INTR_UNLOCK(ha);
7635 7634 }
7636 7635
7637 7636 /*
7638 7637 * Call task thread function in case the
7639 7638 * daemon is not running.
7640 7639 */
7641 7640 TASK_DAEMON_LOCK(ha);
7642 7641
7643 7642 if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7644 7643 QL_TASK_PENDING(ha)) {
7645 7644 ha->task_daemon_flags |= TASK_THREAD_CALLED;
7646 7645 ql_task_thread(ha);
7647 7646 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7648 7647 }
7649 7648
7650 7649 TASK_DAEMON_UNLOCK(ha);
7651 7650 }
7652 7651
7653 7652 if (msecs_left < 10) {
7654 7653 rval = QL_FUNCTION_TIMEOUT;
7655 7654 break;
7656 7655 }
7657 7656
7658 7657 /*
7659 7658 * Polling interval is 10 milli seconds; Increasing
7660 7659 * the polling interval to seconds since disk IO
7661 7660 * timeout values are ~60 seconds is tempting enough,
7662 7661 * but CPR dump time increases, and so will the crash
7663 7662 * dump time; Don't toy with the settings without due
7664 7663 * consideration for all the scenarios that will be
7665 7664 * impacted.
7666 7665 */
7667 7666 ql_delay(ha, 10000);
7668 7667 msecs_left -= 10;
7669 7668 }
7670 7669
7671 7670 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7672 7671
7673 7672 return (rval);
7674 7673 }
7675 7674
7676 7675 /*
7677 7676 * ql_next
7678 7677 * Retrieve and process next job in the device queue.
7679 7678 *
7680 7679 * Input:
7681 7680 * ha: adapter state pointer.
7682 7681 * lq: LUN queue pointer.
7683 7682 * DEVICE_QUEUE_LOCK must be already obtained.
7684 7683 *
7685 7684 * Output:
7686 7685 * Releases DEVICE_QUEUE_LOCK upon exit.
7687 7686 *
7688 7687 * Context:
7689 7688 * Interrupt or Kernel context, no mailbox commands allowed.
7690 7689 */
7691 7690 void
7692 7691 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7693 7692 {
7694 7693 ql_srb_t *sp;
7695 7694 ql_link_t *link;
7696 7695 ql_tgt_t *tq = lq->target_queue;
7697 7696 ql_adapter_state_t *ha = vha->pha;
7698 7697
7699 7698 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7700 7699
7701 7700 if (ddi_in_panic()) {
7702 7701 DEVICE_QUEUE_UNLOCK(tq);
7703 7702 QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7704 7703 ha->instance);
7705 7704 return;
7706 7705 }
7707 7706
7708 7707 while ((link = lq->cmd.first) != NULL) {
7709 7708 sp = link->base_address;
7710 7709
7711 7710 /* Exit if can not start commands. */
7712 7711 if (DRIVER_SUSPENDED(ha) ||
7713 7712 (ha->flags & ONLINE) == 0 ||
7714 7713 !VALID_DEVICE_ID(ha, tq->loop_id) ||
7715 7714 sp->flags & SRB_ABORT ||
7716 7715 tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7717 7716 TQF_QUEUE_SUSPENDED)) {
7718 7717 EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7719 7718 "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7720 7719 ha->task_daemon_flags, tq->flags, sp->flags,
7721 7720 ha->flags, tq->loop_id);
7722 7721 break;
7723 7722 }
7724 7723
7725 7724 /*
7726 7725 * Find out the LUN number for untagged command use.
7727 7726 * If there is an untagged command pending for the LUN,
7728 7727 * we would not submit another untagged command
7729 7728 * or if reached LUN execution throttle.
7730 7729 */
7731 7730 if (sp->flags & SRB_FCP_CMD_PKT) {
7732 7731 if (lq->flags & LQF_UNTAGGED_PENDING ||
7733 7732 lq->lun_outcnt >= ha->execution_throttle) {
7734 7733 QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7735 7734 "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7736 7735 tq->d_id.b24, lq->flags, lq->lun_outcnt);
7737 7736 break;
7738 7737 }
7739 7738 if (sp->fcp->fcp_cntl.cntl_qtype ==
7740 7739 FCP_QTYPE_UNTAGGED) {
7741 7740 /*
7742 7741 * Set the untagged-flag for the LUN
7743 7742 * so that no more untagged commands
7744 7743 * can be submitted for this LUN.
7745 7744 */
7746 7745 lq->flags |= LQF_UNTAGGED_PENDING;
7747 7746 }
7748 7747
7749 7748 /* Count command as sent. */
7750 7749 lq->lun_outcnt++;
7751 7750 }
7752 7751
7753 7752 /* Remove srb from device queue. */
7754 7753 ql_remove_link(&lq->cmd, &sp->cmd);
7755 7754 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7756 7755
7757 7756 tq->outcnt++;
7758 7757
7759 7758 ql_start_iocb(vha, sp);
7760 7759 }
7761 7760
7762 7761 /* Release device queue lock. */
7763 7762 DEVICE_QUEUE_UNLOCK(tq);
7764 7763
7765 7764 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7766 7765 }
7767 7766
7768 7767 /*
7769 7768 * ql_done
7770 7769 * Process completed commands.
7771 7770 *
7772 7771 * Input:
7773 7772 * link: first command link in chain.
7774 7773 *
7775 7774 * Context:
7776 7775 * Interrupt or Kernel context, no mailbox commands allowed.
7777 7776 */
7778 7777 void
7779 7778 ql_done(ql_link_t *link)
7780 7779 {
7781 7780 ql_adapter_state_t *ha;
7782 7781 ql_link_t *next_link;
7783 7782 ql_srb_t *sp;
7784 7783 ql_tgt_t *tq;
7785 7784 ql_lun_t *lq;
7786 7785
7787 7786 QL_PRINT_3(CE_CONT, "started\n");
7788 7787
7789 7788 for (; link != NULL; link = next_link) {
7790 7789 next_link = link->next;
7791 7790 sp = link->base_address;
7792 7791 ha = sp->ha;
7793 7792
7794 7793 if (sp->flags & SRB_UB_CALLBACK) {
7795 7794 QL_UB_LOCK(ha);
7796 7795 if (sp->flags & SRB_UB_IN_ISP) {
7797 7796 if (ha->ub_outcnt != 0) {
7798 7797 ha->ub_outcnt--;
7799 7798 }
7800 7799 QL_UB_UNLOCK(ha);
7801 7800 ql_isp_rcvbuf(ha);
7802 7801 QL_UB_LOCK(ha);
7803 7802 }
7804 7803 QL_UB_UNLOCK(ha);
7805 7804 ql_awaken_task_daemon(ha, sp, 0, 0);
7806 7805 } else {
7807 7806 /* Free outstanding command slot. */
7808 7807 if (sp->handle != 0) {
7809 7808 ha->outstanding_cmds[
7810 7809 sp->handle & OSC_INDEX_MASK] = NULL;
7811 7810 sp->handle = 0;
7812 7811 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7813 7812 }
7814 7813
7815 7814 /* Acquire device queue lock. */
7816 7815 lq = sp->lun_queue;
7817 7816 tq = lq->target_queue;
7818 7817 DEVICE_QUEUE_LOCK(tq);
7819 7818
7820 7819 /* Decrement outstanding commands on device. */
7821 7820 if (tq->outcnt != 0) {
7822 7821 tq->outcnt--;
7823 7822 }
7824 7823
7825 7824 if (sp->flags & SRB_FCP_CMD_PKT) {
7826 7825 if (sp->fcp->fcp_cntl.cntl_qtype ==
7827 7826 FCP_QTYPE_UNTAGGED) {
7828 7827 /*
7829 7828 * Clear the flag for this LUN so that
7830 7829 * untagged commands can be submitted
7831 7830 * for it.
7832 7831 */
7833 7832 lq->flags &= ~LQF_UNTAGGED_PENDING;
7834 7833 }
7835 7834
7836 7835 if (lq->lun_outcnt != 0) {
7837 7836 lq->lun_outcnt--;
7838 7837 }
7839 7838 }
7840 7839
7841 7840 /* Reset port down retry count on good completion. */
7842 7841 if (sp->pkt->pkt_reason == CS_COMPLETE) {
7843 7842 tq->port_down_retry_count =
7844 7843 ha->port_down_retry_count;
7845 7844 tq->qfull_retry_count = ha->qfull_retry_count;
7846 7845 }
7847 7846
7848 7847
7849 7848 /* Alter aborted status for fast timeout feature */
7850 7849 if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7851 7850 (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7852 7851 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7853 7852 sp->flags & SRB_RETRY &&
7854 7853 (sp->flags & SRB_WATCHDOG_ENABLED &&
7855 7854 sp->wdg_q_time > 1)) {
7856 7855 EL(ha, "fast abort modify change\n");
7857 7856 sp->flags &= ~(SRB_RETRY);
7858 7857 sp->pkt->pkt_reason = CS_TIMEOUT;
7859 7858 }
7860 7859
7861 7860 /* Place request back on top of target command queue */
7862 7861 if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7863 7862 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7864 7863 sp->flags & SRB_RETRY &&
7865 7864 (sp->flags & SRB_WATCHDOG_ENABLED &&
7866 7865 sp->wdg_q_time > 1)) {
7867 7866 sp->flags &= ~(SRB_ISP_STARTED |
7868 7867 SRB_ISP_COMPLETED | SRB_RETRY);
7869 7868
7870 7869 /* Reset watchdog timer */
7871 7870 sp->wdg_q_time = sp->init_wdg_q_time;
7872 7871
7873 7872 /* Issue marker command on reset status. */
7874 7873 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7875 7874 (sp->pkt->pkt_reason == CS_RESET ||
7876 7875 (CFG_IST(ha, CFG_CTRL_24258081) &&
7877 7876 sp->pkt->pkt_reason == CS_ABORTED))) {
7878 7877 (void) ql_marker(ha, tq->loop_id, 0,
7879 7878 MK_SYNC_ID);
7880 7879 }
7881 7880
7882 7881 ql_add_link_t(&lq->cmd, &sp->cmd);
7883 7882 sp->flags |= SRB_IN_DEVICE_QUEUE;
7884 7883 ql_next(ha, lq);
7885 7884 } else {
7886 7885 /* Remove command from watchdog queue. */
7887 7886 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7888 7887 ql_remove_link(&tq->wdg, &sp->wdg);
7889 7888 sp->flags &= ~SRB_WATCHDOG_ENABLED;
7890 7889 }
7891 7890
7892 7891 if (lq->cmd.first != NULL) {
7893 7892 ql_next(ha, lq);
7894 7893 } else {
7895 7894 /* Release LU queue specific lock. */
7896 7895 DEVICE_QUEUE_UNLOCK(tq);
7897 7896 if (ha->pha->pending_cmds.first !=
7898 7897 NULL) {
7899 7898 ql_start_iocb(ha, NULL);
7900 7899 }
7901 7900 }
7902 7901
7903 7902 /* Sync buffers if required. */
7904 7903 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7905 7904 (void) ddi_dma_sync(
7906 7905 sp->pkt->pkt_resp_dma,
7907 7906 0, 0, DDI_DMA_SYNC_FORCPU);
7908 7907 }
7909 7908
7910 7909 /* Map ISP completion codes. */
7911 7910 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7912 7911 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7913 7912 switch (sp->pkt->pkt_reason) {
7914 7913 case CS_COMPLETE:
7915 7914 sp->pkt->pkt_state = FC_PKT_SUCCESS;
7916 7915 break;
7917 7916 case CS_RESET:
7918 7917 /* Issue marker command. */
7919 7918 if (!(ha->task_daemon_flags &
7920 7919 LOOP_DOWN)) {
7921 7920 (void) ql_marker(ha,
7922 7921 tq->loop_id, 0,
7923 7922 MK_SYNC_ID);
7924 7923 }
7925 7924 sp->pkt->pkt_state =
7926 7925 FC_PKT_PORT_OFFLINE;
7927 7926 sp->pkt->pkt_reason =
7928 7927 FC_REASON_ABORTED;
7929 7928 break;
7930 7929 case CS_RESOUCE_UNAVAILABLE:
7931 7930 sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7932 7931 sp->pkt->pkt_reason =
7933 7932 FC_REASON_PKT_BUSY;
7934 7933 break;
7935 7934
7936 7935 case CS_TIMEOUT:
7937 7936 sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7938 7937 sp->pkt->pkt_reason =
7939 7938 FC_REASON_HW_ERROR;
7940 7939 break;
7941 7940 case CS_DATA_OVERRUN:
7942 7941 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7943 7942 sp->pkt->pkt_reason =
7944 7943 FC_REASON_OVERRUN;
7945 7944 break;
7946 7945 case CS_PORT_UNAVAILABLE:
7947 7946 case CS_PORT_LOGGED_OUT:
7948 7947 sp->pkt->pkt_state =
7949 7948 FC_PKT_PORT_OFFLINE;
7950 7949 sp->pkt->pkt_reason =
7951 7950 FC_REASON_LOGIN_REQUIRED;
7952 7951 ql_send_logo(ha, tq, NULL);
7953 7952 break;
7954 7953 case CS_PORT_CONFIG_CHG:
7955 7954 sp->pkt->pkt_state =
7956 7955 FC_PKT_PORT_OFFLINE;
7957 7956 sp->pkt->pkt_reason =
7958 7957 FC_REASON_OFFLINE;
7959 7958 break;
7960 7959 case CS_QUEUE_FULL:
7961 7960 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7962 7961 sp->pkt->pkt_reason = FC_REASON_QFULL;
7963 7962 break;
7964 7963
7965 7964 case CS_ABORTED:
7966 7965 DEVICE_QUEUE_LOCK(tq);
7967 7966 if (tq->flags & (TQF_RSCN_RCVD |
7968 7967 TQF_NEED_AUTHENTICATION)) {
7969 7968 sp->pkt->pkt_state =
7970 7969 FC_PKT_PORT_OFFLINE;
7971 7970 sp->pkt->pkt_reason =
7972 7971 FC_REASON_LOGIN_REQUIRED;
7973 7972 } else {
7974 7973 sp->pkt->pkt_state =
7975 7974 FC_PKT_LOCAL_RJT;
7976 7975 sp->pkt->pkt_reason =
7977 7976 FC_REASON_ABORTED;
7978 7977 }
7979 7978 DEVICE_QUEUE_UNLOCK(tq);
7980 7979 break;
7981 7980
7982 7981 case CS_TRANSPORT:
7983 7982 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7984 7983 sp->pkt->pkt_reason =
7985 7984 FC_PKT_TRAN_ERROR;
7986 7985 break;
7987 7986
7988 7987 case CS_DATA_UNDERRUN:
7989 7988 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7990 7989 sp->pkt->pkt_reason =
7991 7990 FC_REASON_UNDERRUN;
7992 7991 break;
7993 7992 case CS_DMA_ERROR:
7994 7993 case CS_BAD_PAYLOAD:
7995 7994 case CS_UNKNOWN:
7996 7995 case CS_CMD_FAILED:
7997 7996 default:
7998 7997 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7999 7998 sp->pkt->pkt_reason =
8000 7999 FC_REASON_HW_ERROR;
8001 8000 break;
8002 8001 }
8003 8002
8004 8003 /* Now call the pkt completion callback */
8005 8004 if (sp->flags & SRB_POLL) {
8006 8005 sp->flags &= ~SRB_POLL;
8007 8006 } else if (sp->pkt->pkt_comp) {
8008 8007 if (sp->pkt->pkt_tran_flags &
8009 8008 FC_TRAN_IMMEDIATE_CB) {
8010 8009 (*sp->pkt->pkt_comp)(sp->pkt);
8011 8010 } else {
8012 8011 ql_awaken_task_daemon(ha, sp,
8013 8012 0, 0);
8014 8013 }
8015 8014 }
8016 8015 }
8017 8016 }
8018 8017 }
8019 8018
8020 8019 QL_PRINT_3(CE_CONT, "done\n");
8021 8020 }
8022 8021
8023 8022 /*
8024 8023 * ql_awaken_task_daemon
8025 8024 * Adds command completion callback to callback queue and/or
8026 8025 * awakens task daemon thread.
8027 8026 *
8028 8027 * Input:
8029 8028 * ha: adapter state pointer.
8030 8029 * sp: srb pointer.
8031 8030 * set_flags: task daemon flags to set.
8032 8031 * reset_flags: task daemon flags to reset.
8033 8032 *
8034 8033 * Context:
8035 8034 * Interrupt or Kernel context, no mailbox commands allowed.
8036 8035 */
8037 8036 void
8038 8037 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8039 8038 uint32_t set_flags, uint32_t reset_flags)
8040 8039 {
8041 8040 ql_adapter_state_t *ha = vha->pha;
8042 8041
8043 8042 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8044 8043
8045 8044 /* Acquire task daemon lock. */
8046 8045 TASK_DAEMON_LOCK(ha);
8047 8046
8048 8047 if (set_flags & ISP_ABORT_NEEDED) {
8049 8048 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8050 8049 set_flags &= ~ISP_ABORT_NEEDED;
8051 8050 }
8052 8051 }
8053 8052
8054 8053 ha->task_daemon_flags |= set_flags;
8055 8054 ha->task_daemon_flags &= ~reset_flags;
8056 8055
8057 8056 if (QL_DAEMON_SUSPENDED(ha)) {
8058 8057 if (sp != NULL) {
8059 8058 TASK_DAEMON_UNLOCK(ha);
8060 8059
8061 8060 /* Do callback. */
8062 8061 if (sp->flags & SRB_UB_CALLBACK) {
8063 8062 ql_unsol_callback(sp);
8064 8063 } else {
8065 8064 (*sp->pkt->pkt_comp)(sp->pkt);
8066 8065 }
8067 8066 } else {
8068 8067 if (!(curthread->t_flag & T_INTR_THREAD) &&
8069 8068 !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8070 8069 ha->task_daemon_flags |= TASK_THREAD_CALLED;
8071 8070 ql_task_thread(ha);
8072 8071 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8073 8072 }
8074 8073
8075 8074 TASK_DAEMON_UNLOCK(ha);
8076 8075 }
8077 8076 } else {
8078 8077 if (sp != NULL) {
8079 8078 ql_add_link_b(&ha->callback_queue, &sp->cmd);
8080 8079 }
8081 8080
8082 8081 if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8083 8082 cv_broadcast(&ha->cv_task_daemon);
8084 8083 }
8085 8084 TASK_DAEMON_UNLOCK(ha);
8086 8085 }
8087 8086
8088 8087 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8089 8088 }
8090 8089
8091 8090 /*
8092 8091 * ql_task_daemon
8093 8092 * Thread that is awaken by the driver when a
8094 8093 * background needs to be done.
8095 8094 *
8096 8095 * Input:
8097 8096 * arg = adapter state pointer.
8098 8097 *
8099 8098 * Context:
8100 8099 * Kernel context.
8101 8100 */
8102 8101 static void
8103 8102 ql_task_daemon(void *arg)
8104 8103 {
8105 8104 ql_adapter_state_t *ha = (void *)arg;
8106 8105
8107 8106 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8108 8107
8109 8108 CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8110 8109 "ql_task_daemon");
8111 8110
8112 8111 /* Acquire task daemon lock. */
8113 8112 TASK_DAEMON_LOCK(ha);
8114 8113
8115 8114 ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8116 8115
8117 8116 while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8118 8117 ql_task_thread(ha);
8119 8118
8120 8119 QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8121 8120
8122 8121 /*
8123 8122 * Before we wait on the conditional variable, we
8124 8123 * need to check if STOP_FLG is set for us to terminate
8125 8124 */
8126 8125 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8127 8126 break;
8128 8127 }
8129 8128
8130 8129 /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8131 8130 CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8132 8131
8133 8132 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8134 8133
8135 8134 /* If killed, stop task daemon */
8136 8135 if (cv_wait_sig(&ha->cv_task_daemon,
8137 8136 &ha->task_daemon_mutex) == 0) {
8138 8137 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8139 8138 }
8140 8139
8141 8140 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8142 8141
8143 8142 /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8144 8143 CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8145 8144
8146 8145 QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8147 8146 }
8148 8147
8149 8148 ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8150 8149 TASK_DAEMON_ALIVE_FLG);
8151 8150
8152 8151 /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8153 8152 CALLB_CPR_EXIT(&ha->cprinfo);
8154 8153
8155 8154 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8156 8155
8157 8156 thread_exit();
8158 8157 }
8159 8158
8160 8159 /*
8161 8160 * ql_task_thread
8162 8161 * Thread run by daemon.
8163 8162 *
8164 8163 * Input:
8165 8164 * ha = adapter state pointer.
8166 8165 * TASK_DAEMON_LOCK must be acquired prior to call.
8167 8166 *
8168 8167 * Context:
8169 8168 * Kernel context.
8170 8169 */
8171 8170 static void
8172 8171 ql_task_thread(ql_adapter_state_t *ha)
8173 8172 {
8174 8173 int loop_again;
8175 8174 ql_srb_t *sp;
8176 8175 ql_head_t *head;
8177 8176 ql_link_t *link;
8178 8177 caddr_t msg;
8179 8178 ql_adapter_state_t *vha;
8180 8179
8181 8180 do {
8182 8181 QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8183 8182 ha->instance, ha->task_daemon_flags);
8184 8183
8185 8184 loop_again = FALSE;
8186 8185
8187 8186 QL_PM_LOCK(ha);
8188 8187 if (ha->power_level != PM_LEVEL_D0) {
8189 8188 QL_PM_UNLOCK(ha);
8190 8189 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8191 8190 break;
8192 8191 }
8193 8192 QL_PM_UNLOCK(ha);
8194 8193
8195 8194 /* IDC event. */
8196 8195 if (ha->task_daemon_flags & IDC_EVENT) {
8197 8196 ha->task_daemon_flags &= ~IDC_EVENT;
8198 8197 TASK_DAEMON_UNLOCK(ha);
8199 8198 ql_process_idc_event(ha);
8200 8199 TASK_DAEMON_LOCK(ha);
8201 8200 loop_again = TRUE;
8202 8201 }
8203 8202
8204 8203 if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8205 8204 (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8206 8205 (ha->flags & ONLINE) == 0) {
8207 8206 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8208 8207 break;
8209 8208 }
8210 8209 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8211 8210
8212 8211 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8213 8212 TASK_DAEMON_UNLOCK(ha);
8214 8213 if (ha->log_parity_pause == B_TRUE) {
8215 8214 (void) ql_flash_errlog(ha,
8216 8215 FLASH_ERRLOG_PARITY_ERR, 0,
8217 8216 MSW(ha->parity_stat_err),
8218 8217 LSW(ha->parity_stat_err));
8219 8218 ha->log_parity_pause = B_FALSE;
8220 8219 }
8221 8220 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8222 8221 TASK_DAEMON_LOCK(ha);
8223 8222 loop_again = TRUE;
8224 8223 }
8225 8224
8226 8225 /* Idle Check. */
8227 8226 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8228 8227 ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8229 8228 if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8230 8229 TASK_DAEMON_UNLOCK(ha);
8231 8230 ql_idle_check(ha);
8232 8231 TASK_DAEMON_LOCK(ha);
8233 8232 loop_again = TRUE;
8234 8233 }
8235 8234 }
8236 8235
8237 8236 /* Crystal+ port#0 bypass transition */
8238 8237 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8239 8238 ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8240 8239 TASK_DAEMON_UNLOCK(ha);
8241 8240 (void) ql_initiate_lip(ha);
8242 8241 TASK_DAEMON_LOCK(ha);
8243 8242 loop_again = TRUE;
8244 8243 }
8245 8244
8246 8245 /* Abort queues needed. */
8247 8246 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8248 8247 ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8249 8248 TASK_DAEMON_UNLOCK(ha);
8250 8249 ql_abort_queues(ha);
8251 8250 TASK_DAEMON_LOCK(ha);
8252 8251 }
8253 8252
8254 8253 /* Not suspended, awaken waiting routines. */
8255 8254 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8256 8255 ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8257 8256 ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8258 8257 cv_broadcast(&ha->cv_dr_suspended);
8259 8258 loop_again = TRUE;
8260 8259 }
8261 8260
8262 8261 /* Handle RSCN changes. */
8263 8262 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8264 8263 if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8265 8264 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8266 8265 TASK_DAEMON_UNLOCK(ha);
8267 8266 (void) ql_handle_rscn_update(vha);
8268 8267 TASK_DAEMON_LOCK(ha);
8269 8268 loop_again = TRUE;
8270 8269 }
8271 8270 }
8272 8271
8273 8272 /* Handle state changes. */
8274 8273 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8275 8274 if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8276 8275 !(ha->task_daemon_flags &
8277 8276 TASK_DAEMON_POWERING_DOWN)) {
8278 8277 /* Report state change. */
8279 8278 EL(vha, "state change = %xh\n", vha->state);
8280 8279 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8281 8280
8282 8281 if (vha->task_daemon_flags &
8283 8282 COMMAND_WAIT_NEEDED) {
8284 8283 vha->task_daemon_flags &=
8285 8284 ~COMMAND_WAIT_NEEDED;
8286 8285 if (!(ha->task_daemon_flags &
8287 8286 COMMAND_WAIT_ACTIVE)) {
8288 8287 ha->task_daemon_flags |=
8289 8288 COMMAND_WAIT_ACTIVE;
8290 8289 TASK_DAEMON_UNLOCK(ha);
8291 8290 ql_cmd_wait(ha);
8292 8291 TASK_DAEMON_LOCK(ha);
8293 8292 ha->task_daemon_flags &=
8294 8293 ~COMMAND_WAIT_ACTIVE;
8295 8294 }
8296 8295 }
8297 8296
8298 8297 msg = NULL;
8299 8298 if (FC_PORT_STATE_MASK(vha->state) ==
8300 8299 FC_STATE_OFFLINE) {
8301 8300 if (vha->task_daemon_flags &
8302 8301 STATE_ONLINE) {
8303 8302 if (ha->topology &
8304 8303 QL_LOOP_CONNECTION) {
8305 8304 msg = "Loop OFFLINE";
8306 8305 } else {
8307 8306 msg = "Link OFFLINE";
8308 8307 }
8309 8308 }
8310 8309 vha->task_daemon_flags &=
8311 8310 ~STATE_ONLINE;
8312 8311 } else if (FC_PORT_STATE_MASK(vha->state) ==
8313 8312 FC_STATE_LOOP) {
8314 8313 if (!(vha->task_daemon_flags &
8315 8314 STATE_ONLINE)) {
8316 8315 msg = "Loop ONLINE";
8317 8316 }
8318 8317 vha->task_daemon_flags |= STATE_ONLINE;
8319 8318 } else if (FC_PORT_STATE_MASK(vha->state) ==
8320 8319 FC_STATE_ONLINE) {
8321 8320 if (!(vha->task_daemon_flags &
8322 8321 STATE_ONLINE)) {
8323 8322 msg = "Link ONLINE";
8324 8323 }
8325 8324 vha->task_daemon_flags |= STATE_ONLINE;
8326 8325 } else {
8327 8326 msg = "Unknown Link state";
8328 8327 }
8329 8328
8330 8329 if (msg != NULL) {
8331 8330 cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8332 8331 "%s", QL_NAME, ha->instance,
8333 8332 vha->vp_index, msg);
8334 8333 }
8335 8334
8336 8335 if (vha->flags & FCA_BOUND) {
8337 8336 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8338 8337 "cb state=%xh\n", ha->instance,
8339 8338 vha->vp_index, vha->state);
8340 8339 TASK_DAEMON_UNLOCK(ha);
8341 8340 (vha->bind_info.port_statec_cb)
8342 8341 (vha->bind_info.port_handle,
8343 8342 vha->state);
8344 8343 TASK_DAEMON_LOCK(ha);
8345 8344 }
8346 8345 loop_again = TRUE;
8347 8346 }
8348 8347 }
8349 8348
8350 8349 if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8351 8350 !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8352 8351 EL(ha, "processing LIP reset\n");
8353 8352 ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8354 8353 TASK_DAEMON_UNLOCK(ha);
8355 8354 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8356 8355 if (vha->flags & FCA_BOUND) {
8357 8356 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8358 8357 "cb reset\n", ha->instance,
8359 8358 vha->vp_index);
8360 8359 (vha->bind_info.port_statec_cb)
8361 8360 (vha->bind_info.port_handle,
8362 8361 FC_STATE_TARGET_PORT_RESET);
8363 8362 }
8364 8363 }
8365 8364 TASK_DAEMON_LOCK(ha);
8366 8365 loop_again = TRUE;
8367 8366 }
8368 8367
8369 8368 if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8370 8369 FIRMWARE_UP)) {
8371 8370 /*
8372 8371 * The firmware needs more unsolicited
8373 8372 * buffers. We cannot allocate any new
8374 8373 * buffers unless the ULP module requests
8375 8374 * for new buffers. All we can do here is
8376 8375 * to give received buffers from the pool
8377 8376 * that is already allocated
8378 8377 */
8379 8378 ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8380 8379 TASK_DAEMON_UNLOCK(ha);
8381 8380 ql_isp_rcvbuf(ha);
8382 8381 TASK_DAEMON_LOCK(ha);
8383 8382 loop_again = TRUE;
8384 8383 }
8385 8384
8386 8385 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8387 8386 TASK_DAEMON_UNLOCK(ha);
8388 8387 (void) ql_abort_isp(ha);
8389 8388 TASK_DAEMON_LOCK(ha);
8390 8389 loop_again = TRUE;
8391 8390 }
8392 8391
8393 8392 if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8394 8393 COMMAND_WAIT_NEEDED))) {
8395 8394 if (QL_IS_SET(ha->task_daemon_flags,
8396 8395 RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8397 8396 ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8398 8397 if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8399 8398 ha->task_daemon_flags |= RESET_ACTIVE;
8400 8399 TASK_DAEMON_UNLOCK(ha);
8401 8400 for (vha = ha; vha != NULL;
8402 8401 vha = vha->vp_next) {
8403 8402 ql_rst_aen(vha);
8404 8403 }
8405 8404 TASK_DAEMON_LOCK(ha);
8406 8405 ha->task_daemon_flags &= ~RESET_ACTIVE;
8407 8406 loop_again = TRUE;
8408 8407 }
8409 8408 }
8410 8409
8411 8410 if (QL_IS_SET(ha->task_daemon_flags,
8412 8411 LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8413 8412 if (!(ha->task_daemon_flags &
8414 8413 LOOP_RESYNC_ACTIVE)) {
8415 8414 ha->task_daemon_flags |=
8416 8415 LOOP_RESYNC_ACTIVE;
8417 8416 TASK_DAEMON_UNLOCK(ha);
8418 8417 (void) ql_loop_resync(ha);
8419 8418 TASK_DAEMON_LOCK(ha);
8420 8419 loop_again = TRUE;
8421 8420 }
8422 8421 }
8423 8422 }
8424 8423
8425 8424 /* Port retry needed. */
8426 8425 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8427 8426 ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8428 8427 ADAPTER_STATE_LOCK(ha);
8429 8428 ha->port_retry_timer = 0;
8430 8429 ADAPTER_STATE_UNLOCK(ha);
8431 8430
8432 8431 TASK_DAEMON_UNLOCK(ha);
8433 8432 ql_restart_queues(ha);
8434 8433 TASK_DAEMON_LOCK(ha);
8435 8434 loop_again = B_TRUE;
8436 8435 }
8437 8436
8438 8437 /* iiDMA setting needed? */
8439 8438 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8440 8439 ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8441 8440
8442 8441 TASK_DAEMON_UNLOCK(ha);
8443 8442 ql_iidma(ha);
8444 8443 TASK_DAEMON_LOCK(ha);
8445 8444 loop_again = B_TRUE;
8446 8445 }
8447 8446
8448 8447 if (ha->task_daemon_flags & SEND_PLOGI) {
8449 8448 ha->task_daemon_flags &= ~SEND_PLOGI;
8450 8449 TASK_DAEMON_UNLOCK(ha);
8451 8450 (void) ql_n_port_plogi(ha);
8452 8451 TASK_DAEMON_LOCK(ha);
8453 8452 }
8454 8453
8455 8454 head = &ha->callback_queue;
8456 8455 if (head->first != NULL) {
8457 8456 sp = head->first->base_address;
8458 8457 link = &sp->cmd;
8459 8458
8460 8459 /* Dequeue command. */
8461 8460 ql_remove_link(head, link);
8462 8461
8463 8462 /* Release task daemon lock. */
8464 8463 TASK_DAEMON_UNLOCK(ha);
8465 8464
8466 8465 /* Do callback. */
8467 8466 if (sp->flags & SRB_UB_CALLBACK) {
8468 8467 ql_unsol_callback(sp);
8469 8468 } else {
8470 8469 (*sp->pkt->pkt_comp)(sp->pkt);
8471 8470 }
8472 8471
8473 8472 /* Acquire task daemon lock. */
8474 8473 TASK_DAEMON_LOCK(ha);
8475 8474
8476 8475 loop_again = TRUE;
8477 8476 }
8478 8477
8479 8478 } while (loop_again);
8480 8479 }
8481 8480
8482 8481 /*
8483 8482 * ql_idle_check
8484 8483 * Test for adapter is alive and well.
8485 8484 *
8486 8485 * Input:
8487 8486 * ha: adapter state pointer.
8488 8487 *
8489 8488 * Context:
8490 8489 * Kernel context.
8491 8490 */
8492 8491 static void
8493 8492 ql_idle_check(ql_adapter_state_t *ha)
8494 8493 {
8495 8494 ddi_devstate_t state;
8496 8495 int rval;
8497 8496 ql_mbx_data_t mr;
8498 8497
8499 8498 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8500 8499
8501 8500 /* Firmware Ready Test. */
8502 8501 rval = ql_get_firmware_state(ha, &mr);
8503 8502 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8504 8503 (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8505 8504 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8506 8505 state = ddi_get_devstate(ha->dip);
8507 8506 if (state == DDI_DEVSTATE_UP) {
8508 8507 /*EMPTY*/
8509 8508 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8510 8509 DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8511 8510 }
8512 8511 TASK_DAEMON_LOCK(ha);
8513 8512 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8514 8513 EL(ha, "fstate_ready, isp_abort_needed\n");
8515 8514 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8516 8515 }
8517 8516 TASK_DAEMON_UNLOCK(ha);
8518 8517 }
8519 8518
8520 8519 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8521 8520 }
8522 8521
8523 8522 /*
8524 8523 * ql_unsol_callback
8525 8524 * Handle unsolicited buffer callbacks.
8526 8525 *
8527 8526 * Input:
8528 8527 * ha = adapter state pointer.
8529 8528 * sp = srb pointer.
8530 8529 *
8531 8530 * Context:
8532 8531 * Kernel context.
8533 8532 */
8534 8533 static void
8535 8534 ql_unsol_callback(ql_srb_t *sp)
8536 8535 {
8537 8536 fc_affected_id_t *af;
8538 8537 fc_unsol_buf_t *ubp;
8539 8538 uchar_t r_ctl;
8540 8539 uchar_t ls_code;
8541 8540 ql_tgt_t *tq;
8542 8541 ql_adapter_state_t *ha = sp->ha, *pha = sp->ha->pha;
8543 8542
8544 8543 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8545 8544
8546 8545 ubp = ha->ub_array[sp->handle];
8547 8546 r_ctl = ubp->ub_frame.r_ctl;
8548 8547 ls_code = ubp->ub_buffer[0];
8549 8548
8550 8549 if (sp->lun_queue == NULL) {
8551 8550 tq = NULL;
8552 8551 } else {
8553 8552 tq = sp->lun_queue->target_queue;
8554 8553 }
8555 8554
8556 8555 QL_UB_LOCK(ha);
8557 8556 if (sp->flags & SRB_UB_FREE_REQUESTED ||
8558 8557 pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8559 8558 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8560 8559 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8561 8560 sp->flags |= SRB_UB_IN_FCA;
8562 8561 QL_UB_UNLOCK(ha);
8563 8562 return;
8564 8563 }
8565 8564
8566 8565 /* Process RSCN */
8567 8566 if (sp->flags & SRB_UB_RSCN) {
8568 8567 int sendup = 1;
8569 8568
8570 8569 /*
8571 8570 * Defer RSCN posting until commands return
8572 8571 */
8573 8572 QL_UB_UNLOCK(ha);
8574 8573
8575 8574 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8576 8575
8577 8576 /* Abort outstanding commands */
8578 8577 sendup = ql_process_rscn(ha, af);
8579 8578 if (sendup == 0) {
8580 8579
8581 8580 TASK_DAEMON_LOCK(ha);
8582 8581 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8583 8582 TASK_DAEMON_UNLOCK(ha);
8584 8583
8585 8584 /*
8586 8585 * Wait for commands to drain in F/W (doesn't take
8587 8586 * more than a few milliseconds)
8588 8587 */
8589 8588 ql_delay(ha, 10000);
8590 8589
8591 8590 QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8592 8591 "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8593 8592 af->aff_format, af->aff_d_id);
8594 8593 return;
8595 8594 }
8596 8595
8597 8596 QL_UB_LOCK(ha);
8598 8597
8599 8598 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8600 8599 af->aff_format, af->aff_d_id);
8601 8600 }
8602 8601
8603 8602 /* Process UNSOL LOGO */
8604 8603 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8605 8604 QL_UB_UNLOCK(ha);
8606 8605
8607 8606 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8608 8607 TASK_DAEMON_LOCK(ha);
8609 8608 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8610 8609 TASK_DAEMON_UNLOCK(ha);
8611 8610 QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8612 8611 "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8613 8612 return;
8614 8613 }
8615 8614
8616 8615 QL_UB_LOCK(ha);
8617 8616 EL(ha, "sending unsol logout for %xh to transport\n",
8618 8617 ubp->ub_frame.s_id);
8619 8618 }
8620 8619
8621 8620 sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8622 8621 SRB_UB_FCP);
8623 8622
8624 8623 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8625 8624 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8626 8625 ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8627 8626 }
8628 8627 QL_UB_UNLOCK(ha);
8629 8628
8630 8629 (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8631 8630 ubp, sp->ub_type);
8632 8631
8633 8632 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8634 8633 }
8635 8634
8636 8635 /*
8637 8636 * ql_send_logo
8638 8637 *
8639 8638 * Input:
8640 8639 * ha: adapter state pointer.
8641 8640 * tq: target queue pointer.
8642 8641 * done_q: done queue pointer.
8643 8642 *
8644 8643 * Context:
8645 8644 * Interrupt or Kernel context, no mailbox commands allowed.
8646 8645 */
8647 8646 void
8648 8647 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8649 8648 {
8650 8649 fc_unsol_buf_t *ubp;
8651 8650 ql_srb_t *sp;
8652 8651 la_els_logo_t *payload;
8653 8652 ql_adapter_state_t *ha = vha->pha;
8654 8653
8655 8654 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8656 8655 tq->d_id.b24);
8657 8656
8658 8657 if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8659 8658 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8660 8659 return;
8661 8660 }
8662 8661
8663 8662 if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8664 8663 tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8665 8664
8666 8665 /* Locate a buffer to use. */
8667 8666 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8668 8667 if (ubp == NULL) {
8669 8668 EL(vha, "Failed, get_unsolicited_buffer\n");
8670 8669 return;
8671 8670 }
8672 8671
8673 8672 DEVICE_QUEUE_LOCK(tq);
8674 8673 tq->flags |= TQF_NEED_AUTHENTICATION;
8675 8674 tq->logout_sent++;
8676 8675 DEVICE_QUEUE_UNLOCK(tq);
8677 8676
8678 8677 EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8679 8678
8680 8679 sp = ubp->ub_fca_private;
8681 8680
8682 8681 /* Set header. */
8683 8682 ubp->ub_frame.d_id = vha->d_id.b24;
8684 8683 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8685 8684 ubp->ub_frame.s_id = tq->d_id.b24;
8686 8685 ubp->ub_frame.rsvd = 0;
8687 8686 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8688 8687 F_CTL_SEQ_INITIATIVE;
8689 8688 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8690 8689 ubp->ub_frame.seq_cnt = 0;
8691 8690 ubp->ub_frame.df_ctl = 0;
8692 8691 ubp->ub_frame.seq_id = 0;
8693 8692 ubp->ub_frame.rx_id = 0xffff;
8694 8693 ubp->ub_frame.ox_id = 0xffff;
8695 8694
8696 8695 /* set payload. */
8697 8696 payload = (la_els_logo_t *)ubp->ub_buffer;
8698 8697 bzero(payload, sizeof (la_els_logo_t));
8699 8698 /* Make sure ls_code in payload is always big endian */
8700 8699 ubp->ub_buffer[0] = LA_ELS_LOGO;
8701 8700 ubp->ub_buffer[1] = 0;
8702 8701 ubp->ub_buffer[2] = 0;
8703 8702 ubp->ub_buffer[3] = 0;
8704 8703 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8705 8704 &payload->nport_ww_name.raw_wwn[0], 8);
8706 8705 payload->nport_id.port_id = tq->d_id.b24;
8707 8706
8708 8707 QL_UB_LOCK(ha);
8709 8708 sp->flags |= SRB_UB_CALLBACK;
8710 8709 QL_UB_UNLOCK(ha);
8711 8710 if (tq->lun_queues.first != NULL) {
8712 8711 sp->lun_queue = (tq->lun_queues.first)->base_address;
8713 8712 } else {
8714 8713 sp->lun_queue = ql_lun_queue(vha, tq, 0);
8715 8714 }
8716 8715 if (done_q) {
8717 8716 ql_add_link_b(done_q, &sp->cmd);
8718 8717 } else {
8719 8718 ql_awaken_task_daemon(ha, sp, 0, 0);
8720 8719 }
8721 8720 }
8722 8721
8723 8722 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8724 8723 }
8725 8724
8726 8725 static int
8727 8726 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8728 8727 {
8729 8728 port_id_t d_id;
8730 8729 ql_srb_t *sp;
8731 8730 ql_link_t *link;
8732 8731 int sendup = 1;
8733 8732
8734 8733 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8735 8734
8736 8735 DEVICE_QUEUE_LOCK(tq);
8737 8736 if (tq->outcnt) {
8738 8737 DEVICE_QUEUE_UNLOCK(tq);
8739 8738 sendup = 0;
8740 8739 (void) ql_abort_device(ha, tq, 1);
8741 8740 ql_delay(ha, 10000);
8742 8741 } else {
8743 8742 DEVICE_QUEUE_UNLOCK(tq);
8744 8743 TASK_DAEMON_LOCK(ha);
8745 8744
8746 8745 for (link = ha->pha->callback_queue.first; link != NULL;
8747 8746 link = link->next) {
8748 8747 sp = link->base_address;
8749 8748 if (sp->flags & SRB_UB_CALLBACK) {
8750 8749 continue;
8751 8750 }
8752 8751 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8753 8752
8754 8753 if (tq->d_id.b24 == d_id.b24) {
8755 8754 sendup = 0;
8756 8755 break;
8757 8756 }
8758 8757 }
8759 8758
8760 8759 TASK_DAEMON_UNLOCK(ha);
8761 8760 }
8762 8761
8763 8762 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8764 8763
8765 8764 return (sendup);
8766 8765 }
8767 8766
8768 8767 static int
8769 8768 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8770 8769 {
8771 8770 fc_unsol_buf_t *ubp;
8772 8771 ql_srb_t *sp;
8773 8772 la_els_logi_t *payload;
8774 8773 class_svc_param_t *class3_param;
8775 8774
8776 8775 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8777 8776
8778 8777 if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8779 8778 LOOP_DOWN)) {
8780 8779 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8781 8780 return (QL_FUNCTION_FAILED);
8782 8781 }
8783 8782
8784 8783 /* Locate a buffer to use. */
8785 8784 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8786 8785 if (ubp == NULL) {
8787 8786 EL(ha, "Failed\n");
8788 8787 return (QL_FUNCTION_FAILED);
8789 8788 }
8790 8789
8791 8790 QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8792 8791 ha->instance, tq->d_id.b24);
8793 8792
8794 8793 EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8795 8794
8796 8795 sp = ubp->ub_fca_private;
8797 8796
8798 8797 /* Set header. */
8799 8798 ubp->ub_frame.d_id = ha->d_id.b24;
8800 8799 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8801 8800 ubp->ub_frame.s_id = tq->d_id.b24;
8802 8801 ubp->ub_frame.rsvd = 0;
8803 8802 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8804 8803 F_CTL_SEQ_INITIATIVE;
8805 8804 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8806 8805 ubp->ub_frame.seq_cnt = 0;
8807 8806 ubp->ub_frame.df_ctl = 0;
8808 8807 ubp->ub_frame.seq_id = 0;
8809 8808 ubp->ub_frame.rx_id = 0xffff;
8810 8809 ubp->ub_frame.ox_id = 0xffff;
8811 8810
8812 8811 /* set payload. */
8813 8812 payload = (la_els_logi_t *)ubp->ub_buffer;
8814 8813 bzero(payload, sizeof (payload));
8815 8814
8816 8815 payload->ls_code.ls_code = LA_ELS_PLOGI;
8817 8816 payload->common_service.fcph_version = 0x2006;
8818 8817 payload->common_service.cmn_features = 0x8800;
8819 8818
8820 8819 CFG_IST(ha, CFG_CTRL_24258081) ?
8821 8820 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8822 8821 ha->init_ctrl_blk.cb24.max_frame_length[0],
8823 8822 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8824 8823 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8825 8824 ha->init_ctrl_blk.cb.max_frame_length[0],
8826 8825 ha->init_ctrl_blk.cb.max_frame_length[1]));
8827 8826
8828 8827 payload->common_service.conc_sequences = 0xff;
8829 8828 payload->common_service.relative_offset = 0x03;
8830 8829 payload->common_service.e_d_tov = 0x7d0;
8831 8830
8832 8831 bcopy((void *)&tq->port_name[0],
8833 8832 (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8834 8833
8835 8834 bcopy((void *)&tq->node_name[0],
8836 8835 (void *)&payload->node_ww_name.raw_wwn[0], 8);
8837 8836
8838 8837 class3_param = (class_svc_param_t *)&payload->class_3;
8839 8838 class3_param->class_valid_svc_opt = 0x8000;
8840 8839 class3_param->recipient_ctl = tq->class3_recipient_ctl;
8841 8840 class3_param->rcv_data_size = tq->class3_rcv_data_size;
8842 8841 class3_param->conc_sequences = tq->class3_conc_sequences;
8843 8842 class3_param->open_sequences_per_exch =
8844 8843 tq->class3_open_sequences_per_exch;
8845 8844
8846 8845 QL_UB_LOCK(ha);
8847 8846 sp->flags |= SRB_UB_CALLBACK;
8848 8847 QL_UB_UNLOCK(ha);
8849 8848
8850 8849 ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8851 8850
8852 8851 if (done_q) {
8853 8852 ql_add_link_b(done_q, &sp->cmd);
8854 8853 } else {
8855 8854 ql_awaken_task_daemon(ha, sp, 0, 0);
8856 8855 }
8857 8856
8858 8857 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8859 8858
8860 8859 return (QL_SUCCESS);
8861 8860 }
8862 8861
8863 8862 /*
8864 8863 * Abort outstanding commands in the Firmware, clear internally
8865 8864 * queued commands in the driver, Synchronize the target with
8866 8865 * the Firmware
8867 8866 */
8868 8867 int
8869 8868 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8870 8869 {
8871 8870 ql_link_t *link, *link2;
8872 8871 ql_lun_t *lq;
8873 8872 int rval = QL_SUCCESS;
8874 8873 ql_srb_t *sp;
8875 8874 ql_head_t done_q = { NULL, NULL };
8876 8875
8877 8876 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8878 8877
8879 8878 /*
8880 8879 * First clear, internally queued commands
8881 8880 */
8882 8881 DEVICE_QUEUE_LOCK(tq);
8883 8882 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8884 8883 lq = link->base_address;
8885 8884
8886 8885 link2 = lq->cmd.first;
8887 8886 while (link2 != NULL) {
8888 8887 sp = link2->base_address;
8889 8888 link2 = link2->next;
8890 8889
8891 8890 if (sp->flags & SRB_ABORT) {
8892 8891 continue;
8893 8892 }
8894 8893
8895 8894 /* Remove srb from device command queue. */
8896 8895 ql_remove_link(&lq->cmd, &sp->cmd);
8897 8896 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8898 8897
8899 8898 /* Set ending status. */
8900 8899 sp->pkt->pkt_reason = CS_ABORTED;
8901 8900
8902 8901 /* Call done routine to handle completions. */
8903 8902 ql_add_link_b(&done_q, &sp->cmd);
8904 8903 }
8905 8904 }
8906 8905 DEVICE_QUEUE_UNLOCK(tq);
8907 8906
8908 8907 if (done_q.first != NULL) {
8909 8908 ql_done(done_q.first);
8910 8909 }
8911 8910
8912 8911 if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8913 8912 rval = ql_abort_target(ha, tq, 0);
8914 8913 }
8915 8914
8916 8915 if (rval != QL_SUCCESS) {
8917 8916 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8918 8917 } else {
8919 8918 /*EMPTY*/
8920 8919 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8921 8920 ha->vp_index);
8922 8921 }
8923 8922
8924 8923 return (rval);
8925 8924 }
8926 8925
8927 8926 /*
8928 8927 * ql_rcv_rscn_els
8929 8928 * Processes received RSCN extended link service.
8930 8929 *
8931 8930 * Input:
8932 8931 * ha: adapter state pointer.
8933 8932 * mb: array containing input mailbox registers.
8934 8933 * done_q: done queue pointer.
8935 8934 *
8936 8935 * Context:
8937 8936 * Interrupt or Kernel context, no mailbox commands allowed.
8938 8937 */
8939 8938 void
8940 8939 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8941 8940 {
8942 8941 fc_unsol_buf_t *ubp;
8943 8942 ql_srb_t *sp;
8944 8943 fc_rscn_t *rn;
8945 8944 fc_affected_id_t *af;
8946 8945 port_id_t d_id;
8947 8946
8948 8947 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8949 8948
8950 8949 /* Locate a buffer to use. */
8951 8950 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8952 8951 if (ubp != NULL) {
8953 8952 sp = ubp->ub_fca_private;
8954 8953
8955 8954 /* Set header. */
8956 8955 ubp->ub_frame.d_id = ha->d_id.b24;
8957 8956 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8958 8957 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8959 8958 ubp->ub_frame.rsvd = 0;
8960 8959 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8961 8960 F_CTL_SEQ_INITIATIVE;
8962 8961 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8963 8962 ubp->ub_frame.seq_cnt = 0;
8964 8963 ubp->ub_frame.df_ctl = 0;
8965 8964 ubp->ub_frame.seq_id = 0;
8966 8965 ubp->ub_frame.rx_id = 0xffff;
8967 8966 ubp->ub_frame.ox_id = 0xffff;
8968 8967
8969 8968 /* set payload. */
8970 8969 rn = (fc_rscn_t *)ubp->ub_buffer;
8971 8970 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8972 8971
8973 8972 rn->rscn_code = LA_ELS_RSCN;
8974 8973 rn->rscn_len = 4;
8975 8974 rn->rscn_payload_len = 8;
8976 8975 d_id.b.al_pa = LSB(mb[2]);
8977 8976 d_id.b.area = MSB(mb[2]);
8978 8977 d_id.b.domain = LSB(mb[1]);
8979 8978 af->aff_d_id = d_id.b24;
8980 8979 af->aff_format = MSB(mb[1]);
8981 8980
8982 8981 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8983 8982 af->aff_d_id);
8984 8983
8985 8984 ql_update_rscn(ha, af);
8986 8985
8987 8986 QL_UB_LOCK(ha);
8988 8987 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8989 8988 QL_UB_UNLOCK(ha);
8990 8989 ql_add_link_b(done_q, &sp->cmd);
8991 8990 }
8992 8991
8993 8992 if (ubp == NULL) {
8994 8993 EL(ha, "Failed, get_unsolicited_buffer\n");
8995 8994 } else {
8996 8995 /*EMPTY*/
8997 8996 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8998 8997 }
8999 8998 }
9000 8999
9001 9000 /*
9002 9001 * ql_update_rscn
9003 9002 * Update devices from received RSCN.
9004 9003 *
9005 9004 * Input:
9006 9005 * ha: adapter state pointer.
9007 9006 * af: pointer to RSCN data.
9008 9007 *
9009 9008 * Context:
9010 9009 * Interrupt or Kernel context, no mailbox commands allowed.
9011 9010 */
9012 9011 static void
9013 9012 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9014 9013 {
9015 9014 ql_link_t *link;
9016 9015 uint16_t index;
9017 9016 ql_tgt_t *tq;
9018 9017
9019 9018 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9020 9019
9021 9020 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9022 9021 port_id_t d_id;
9023 9022
9024 9023 d_id.r.rsvd_1 = 0;
9025 9024 d_id.b24 = af->aff_d_id;
9026 9025
9027 9026 tq = ql_d_id_to_queue(ha, d_id);
9028 9027 if (tq) {
9029 9028 EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9030 9029 DEVICE_QUEUE_LOCK(tq);
9031 9030 tq->flags |= TQF_RSCN_RCVD;
9032 9031 DEVICE_QUEUE_UNLOCK(tq);
9033 9032 }
9034 9033 QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9035 9034 ha->instance);
9036 9035
9037 9036 return;
9038 9037 }
9039 9038
9040 9039 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9041 9040 for (link = ha->dev[index].first; link != NULL;
9042 9041 link = link->next) {
9043 9042 tq = link->base_address;
9044 9043
9045 9044 switch (af->aff_format) {
9046 9045 case FC_RSCN_FABRIC_ADDRESS:
9047 9046 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9048 9047 EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9049 9048 tq->d_id.b24);
9050 9049 DEVICE_QUEUE_LOCK(tq);
9051 9050 tq->flags |= TQF_RSCN_RCVD;
9052 9051 DEVICE_QUEUE_UNLOCK(tq);
9053 9052 }
9054 9053 break;
9055 9054
9056 9055 case FC_RSCN_AREA_ADDRESS:
9057 9056 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9058 9057 EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9059 9058 tq->d_id.b24);
9060 9059 DEVICE_QUEUE_LOCK(tq);
9061 9060 tq->flags |= TQF_RSCN_RCVD;
9062 9061 DEVICE_QUEUE_UNLOCK(tq);
9063 9062 }
9064 9063 break;
9065 9064
9066 9065 case FC_RSCN_DOMAIN_ADDRESS:
9067 9066 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9068 9067 EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9069 9068 tq->d_id.b24);
9070 9069 DEVICE_QUEUE_LOCK(tq);
9071 9070 tq->flags |= TQF_RSCN_RCVD;
9072 9071 DEVICE_QUEUE_UNLOCK(tq);
9073 9072 }
9074 9073 break;
9075 9074
9076 9075 default:
9077 9076 break;
9078 9077 }
9079 9078 }
9080 9079 }
9081 9080 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9082 9081 }
9083 9082
9084 9083 /*
9085 9084 * ql_process_rscn
9086 9085 *
9087 9086 * Input:
9088 9087 * ha: adapter state pointer.
9089 9088 * af: RSCN payload pointer.
9090 9089 *
9091 9090 * Context:
9092 9091 * Kernel context.
9093 9092 */
9094 9093 static int
9095 9094 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9096 9095 {
9097 9096 int sendit;
9098 9097 int sendup = 1;
9099 9098 ql_link_t *link;
9100 9099 uint16_t index;
9101 9100 ql_tgt_t *tq;
9102 9101
9103 9102 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9104 9103
9105 9104 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9106 9105 port_id_t d_id;
9107 9106
9108 9107 d_id.r.rsvd_1 = 0;
9109 9108 d_id.b24 = af->aff_d_id;
9110 9109
9111 9110 tq = ql_d_id_to_queue(ha, d_id);
9112 9111 if (tq) {
9113 9112 sendup = ql_process_rscn_for_device(ha, tq);
9114 9113 }
9115 9114
9116 9115 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9117 9116
9118 9117 return (sendup);
9119 9118 }
9120 9119
9121 9120 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9122 9121 for (link = ha->dev[index].first; link != NULL;
9123 9122 link = link->next) {
9124 9123
9125 9124 tq = link->base_address;
9126 9125 if (tq == NULL) {
9127 9126 continue;
9128 9127 }
9129 9128
9130 9129 switch (af->aff_format) {
9131 9130 case FC_RSCN_FABRIC_ADDRESS:
9132 9131 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9133 9132 sendit = ql_process_rscn_for_device(
9134 9133 ha, tq);
9135 9134 if (sendup) {
9136 9135 sendup = sendit;
9137 9136 }
9138 9137 }
9139 9138 break;
9140 9139
9141 9140 case FC_RSCN_AREA_ADDRESS:
9142 9141 if ((tq->d_id.b24 & 0xffff00) ==
9143 9142 af->aff_d_id) {
9144 9143 sendit = ql_process_rscn_for_device(
9145 9144 ha, tq);
9146 9145
9147 9146 if (sendup) {
9148 9147 sendup = sendit;
9149 9148 }
9150 9149 }
9151 9150 break;
9152 9151
9153 9152 case FC_RSCN_DOMAIN_ADDRESS:
9154 9153 if ((tq->d_id.b24 & 0xff0000) ==
9155 9154 af->aff_d_id) {
9156 9155 sendit = ql_process_rscn_for_device(
9157 9156 ha, tq);
9158 9157
9159 9158 if (sendup) {
9160 9159 sendup = sendit;
9161 9160 }
9162 9161 }
9163 9162 break;
9164 9163
9165 9164 default:
9166 9165 break;
9167 9166 }
9168 9167 }
9169 9168 }
9170 9169
9171 9170 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9172 9171
9173 9172 return (sendup);
9174 9173 }
9175 9174
9176 9175 /*
9177 9176 * ql_process_rscn_for_device
9178 9177 *
9179 9178 * Input:
9180 9179 * ha: adapter state pointer.
9181 9180 * tq: target queue pointer.
9182 9181 *
9183 9182 * Context:
9184 9183 * Kernel context.
9185 9184 */
9186 9185 static int
9187 9186 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9188 9187 {
9189 9188 int sendup = 1;
9190 9189
9191 9190 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9192 9191
9193 9192 DEVICE_QUEUE_LOCK(tq);
9194 9193
9195 9194 /*
9196 9195 * Let FCP-2 compliant devices continue I/Os
9197 9196 * with their low level recoveries.
9198 9197 */
9199 9198 if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9200 9199 (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9201 9200 /*
9202 9201 * Cause ADISC to go out
9203 9202 */
9204 9203 DEVICE_QUEUE_UNLOCK(tq);
9205 9204
9206 9205 (void) ql_get_port_database(ha, tq, PDF_NONE);
9207 9206
9208 9207 DEVICE_QUEUE_LOCK(tq);
9209 9208 tq->flags &= ~TQF_RSCN_RCVD;
9210 9209
9211 9210 } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9212 9211 if (tq->d_id.b24 != BROADCAST_ADDR) {
9213 9212 tq->flags |= TQF_NEED_AUTHENTICATION;
9214 9213 }
9215 9214
9216 9215 DEVICE_QUEUE_UNLOCK(tq);
9217 9216
9218 9217 (void) ql_abort_device(ha, tq, 1);
9219 9218
9220 9219 DEVICE_QUEUE_LOCK(tq);
9221 9220
9222 9221 if (tq->outcnt) {
9223 9222 sendup = 0;
9224 9223 } else {
9225 9224 tq->flags &= ~TQF_RSCN_RCVD;
9226 9225 }
9227 9226 } else {
9228 9227 tq->flags &= ~TQF_RSCN_RCVD;
9229 9228 }
9230 9229
9231 9230 if (sendup) {
9232 9231 if (tq->d_id.b24 != BROADCAST_ADDR) {
9233 9232 tq->flags |= TQF_NEED_AUTHENTICATION;
9234 9233 }
9235 9234 }
9236 9235
9237 9236 DEVICE_QUEUE_UNLOCK(tq);
9238 9237
9239 9238 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9240 9239
9241 9240 return (sendup);
9242 9241 }
9243 9242
9244 9243 static int
9245 9244 ql_handle_rscn_update(ql_adapter_state_t *ha)
9246 9245 {
9247 9246 int rval;
9248 9247 ql_tgt_t *tq;
9249 9248 uint16_t index, loop_id;
9250 9249 ql_dev_id_list_t *list;
9251 9250 uint32_t list_size;
9252 9251 port_id_t d_id;
9253 9252 ql_mbx_data_t mr;
9254 9253 ql_head_t done_q = { NULL, NULL };
9255 9254
9256 9255 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9257 9256
9258 9257 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9259 9258 list = kmem_zalloc(list_size, KM_SLEEP);
9260 9259 if (list == NULL) {
9261 9260 rval = QL_MEMORY_ALLOC_FAILED;
9262 9261 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9263 9262 return (rval);
9264 9263 }
9265 9264
9266 9265 /*
9267 9266 * Get data from RISC code d_id list to init each device queue.
9268 9267 */
9269 9268 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9270 9269 if (rval != QL_SUCCESS) {
9271 9270 kmem_free(list, list_size);
9272 9271 EL(ha, "get_id_list failed=%xh\n", rval);
9273 9272 return (rval);
9274 9273 }
9275 9274
9276 9275 /* Acquire adapter state lock. */
9277 9276 ADAPTER_STATE_LOCK(ha);
9278 9277
9279 9278 /* Check for new devices */
9280 9279 for (index = 0; index < mr.mb[1]; index++) {
9281 9280 ql_dev_list(ha, list, index, &d_id, &loop_id);
9282 9281
9283 9282 if (VALID_DEVICE_ID(ha, loop_id)) {
9284 9283 d_id.r.rsvd_1 = 0;
9285 9284
9286 9285 tq = ql_d_id_to_queue(ha, d_id);
9287 9286 if (tq != NULL) {
9288 9287 continue;
9289 9288 }
9290 9289
9291 9290 tq = ql_dev_init(ha, d_id, loop_id);
9292 9291
9293 9292 /* Test for fabric device. */
9294 9293 if (d_id.b.domain != ha->d_id.b.domain ||
9295 9294 d_id.b.area != ha->d_id.b.area) {
9296 9295 tq->flags |= TQF_FABRIC_DEVICE;
9297 9296 }
9298 9297
9299 9298 ADAPTER_STATE_UNLOCK(ha);
9300 9299 if (ql_get_port_database(ha, tq, PDF_NONE) !=
9301 9300 QL_SUCCESS) {
9302 9301 tq->loop_id = PORT_NO_LOOP_ID;
9303 9302 }
9304 9303 ADAPTER_STATE_LOCK(ha);
9305 9304
9306 9305 /*
9307 9306 * Send up a PLOGI about the new device
9308 9307 */
9309 9308 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9310 9309 (void) ql_send_plogi(ha, tq, &done_q);
9311 9310 }
9312 9311 }
9313 9312 }
9314 9313
9315 9314 /* Release adapter state lock. */
9316 9315 ADAPTER_STATE_UNLOCK(ha);
9317 9316
9318 9317 if (done_q.first != NULL) {
9319 9318 ql_done(done_q.first);
9320 9319 }
9321 9320
9322 9321 kmem_free(list, list_size);
9323 9322
9324 9323 if (rval != QL_SUCCESS) {
9325 9324 EL(ha, "failed=%xh\n", rval);
9326 9325 } else {
9327 9326 /*EMPTY*/
9328 9327 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9329 9328 }
9330 9329
9331 9330 return (rval);
9332 9331 }
9333 9332
9334 9333 /*
9335 9334 * ql_free_unsolicited_buffer
9336 9335 * Frees allocated buffer.
9337 9336 *
9338 9337 * Input:
9339 9338 * ha = adapter state pointer.
9340 9339 * index = buffer array index.
9341 9340 * ADAPTER_STATE_LOCK must be already obtained.
9342 9341 *
9343 9342 * Context:
9344 9343 * Kernel context.
9345 9344 */
9346 9345 static void
9347 9346 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9348 9347 {
9349 9348 ql_srb_t *sp;
9350 9349 int status;
9351 9350
9352 9351 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9353 9352
9354 9353 sp = ubp->ub_fca_private;
9355 9354 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9356 9355 /* Disconnect IP from system buffers. */
9357 9356 if (ha->flags & IP_INITIALIZED) {
9358 9357 ADAPTER_STATE_UNLOCK(ha);
9359 9358 status = ql_shutdown_ip(ha);
9360 9359 ADAPTER_STATE_LOCK(ha);
9361 9360 if (status != QL_SUCCESS) {
9362 9361 cmn_err(CE_WARN,
9363 9362 "!Qlogic %s(%d): Failed to shutdown IP",
9364 9363 QL_NAME, ha->instance);
9365 9364 return;
9366 9365 }
9367 9366
9368 9367 ha->flags &= ~IP_ENABLED;
9369 9368 }
9370 9369
9371 9370 ql_free_phys(ha, &sp->ub_buffer);
9372 9371 } else {
9373 9372 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9374 9373 }
9375 9374
9376 9375 kmem_free(sp, sizeof (ql_srb_t));
9377 9376 kmem_free(ubp, sizeof (fc_unsol_buf_t));
9378 9377
9379 9378 if (ha->ub_allocated != 0) {
9380 9379 ha->ub_allocated--;
9381 9380 }
9382 9381
9383 9382 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9384 9383 }
9385 9384
9386 9385 /*
9387 9386 * ql_get_unsolicited_buffer
9388 9387 * Locates a free unsolicited buffer.
9389 9388 *
9390 9389 * Input:
9391 9390 * ha = adapter state pointer.
9392 9391 * type = buffer type.
9393 9392 *
9394 9393 * Returns:
9395 9394 * Unsolicited buffer pointer.
9396 9395 *
9397 9396 * Context:
9398 9397 * Interrupt or Kernel context, no mailbox commands allowed.
9399 9398 */
9400 9399 fc_unsol_buf_t *
9401 9400 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9402 9401 {
9403 9402 fc_unsol_buf_t *ubp;
9404 9403 ql_srb_t *sp;
9405 9404 uint16_t index;
9406 9405
9407 9406 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9408 9407
9409 9408 /* Locate a buffer to use. */
9410 9409 ubp = NULL;
9411 9410
9412 9411 QL_UB_LOCK(ha);
9413 9412 for (index = 0; index < QL_UB_LIMIT; index++) {
9414 9413 ubp = ha->ub_array[index];
9415 9414 if (ubp != NULL) {
9416 9415 sp = ubp->ub_fca_private;
9417 9416 if ((sp->ub_type == type) &&
9418 9417 (sp->flags & SRB_UB_IN_FCA) &&
9419 9418 (!(sp->flags & (SRB_UB_CALLBACK |
9420 9419 SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9421 9420 sp->flags |= SRB_UB_ACQUIRED;
9422 9421 ubp->ub_resp_flags = 0;
9423 9422 break;
9424 9423 }
9425 9424 ubp = NULL;
9426 9425 }
9427 9426 }
9428 9427 QL_UB_UNLOCK(ha);
9429 9428
9430 9429 if (ubp) {
9431 9430 ubp->ub_resp_token = NULL;
9432 9431 ubp->ub_class = FC_TRAN_CLASS3;
9433 9432 }
9434 9433
9435 9434 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9436 9435
9437 9436 return (ubp);
9438 9437 }
9439 9438
9440 9439 /*
9441 9440 * ql_ub_frame_hdr
9442 9441 * Processes received unsolicited buffers from ISP.
9443 9442 *
9444 9443 * Input:
9445 9444 * ha: adapter state pointer.
9446 9445 * tq: target queue pointer.
9447 9446 * index: unsolicited buffer array index.
9448 9447 * done_q: done queue pointer.
9449 9448 *
9450 9449 * Returns:
9451 9450 * ql local function return status code.
9452 9451 *
9453 9452 * Context:
9454 9453 * Interrupt or Kernel context, no mailbox commands allowed.
9455 9454 */
9456 9455 int
9457 9456 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9458 9457 ql_head_t *done_q)
9459 9458 {
9460 9459 fc_unsol_buf_t *ubp;
9461 9460 ql_srb_t *sp;
9462 9461 uint16_t loop_id;
9463 9462 int rval = QL_FUNCTION_FAILED;
9464 9463
9465 9464 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9466 9465
9467 9466 QL_UB_LOCK(ha);
9468 9467 if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9469 9468 EL(ha, "Invalid buffer index=%xh\n", index);
9470 9469 QL_UB_UNLOCK(ha);
9471 9470 return (rval);
9472 9471 }
9473 9472
9474 9473 sp = ubp->ub_fca_private;
9475 9474 if (sp->flags & SRB_UB_FREE_REQUESTED) {
9476 9475 EL(ha, "buffer freed index=%xh\n", index);
9477 9476 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9478 9477 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9479 9478
9480 9479 sp->flags |= SRB_UB_IN_FCA;
9481 9480
9482 9481 QL_UB_UNLOCK(ha);
9483 9482 return (rval);
9484 9483 }
9485 9484
9486 9485 if ((sp->handle == index) &&
9487 9486 (sp->flags & SRB_UB_IN_ISP) &&
9488 9487 (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9489 9488 (!(sp->flags & SRB_UB_ACQUIRED))) {
9490 9489 /* set broadcast D_ID */
9491 9490 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9492 9491 BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9493 9492 if (tq->ub_loop_id == loop_id) {
9494 9493 if (ha->topology & QL_FL_PORT) {
9495 9494 ubp->ub_frame.d_id = 0x000000;
9496 9495 } else {
9497 9496 ubp->ub_frame.d_id = 0xffffff;
9498 9497 }
9499 9498 } else {
9500 9499 ubp->ub_frame.d_id = ha->d_id.b24;
9501 9500 }
9502 9501 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9503 9502 ubp->ub_frame.rsvd = 0;
9504 9503 ubp->ub_frame.s_id = tq->d_id.b24;
9505 9504 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9506 9505 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9507 9506 ubp->ub_frame.df_ctl = 0;
9508 9507 ubp->ub_frame.seq_id = tq->ub_seq_id;
9509 9508 ubp->ub_frame.rx_id = 0xffff;
9510 9509 ubp->ub_frame.ox_id = 0xffff;
9511 9510 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9512 9511 sp->ub_size : tq->ub_sequence_length;
9513 9512 ubp->ub_frame.ro = tq->ub_frame_ro;
9514 9513
9515 9514 tq->ub_sequence_length = (uint16_t)
9516 9515 (tq->ub_sequence_length - ubp->ub_bufsize);
9517 9516 tq->ub_frame_ro += ubp->ub_bufsize;
9518 9517 tq->ub_seq_cnt++;
9519 9518
9520 9519 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9521 9520 if (tq->ub_seq_cnt == 1) {
9522 9521 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9523 9522 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9524 9523 } else {
9525 9524 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9526 9525 F_CTL_END_SEQ;
9527 9526 }
9528 9527 tq->ub_total_seg_cnt = 0;
9529 9528 } else if (tq->ub_seq_cnt == 1) {
9530 9529 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9531 9530 F_CTL_FIRST_SEQ;
9532 9531 ubp->ub_frame.df_ctl = 0x20;
9533 9532 }
9534 9533
9535 9534 QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9536 9535 ha->instance, ubp->ub_frame.d_id);
9537 9536 QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9538 9537 ha->instance, ubp->ub_frame.s_id);
9539 9538 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9540 9539 ha->instance, ubp->ub_frame.seq_cnt);
9541 9540 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9542 9541 ha->instance, ubp->ub_frame.seq_id);
9543 9542 QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9544 9543 ha->instance, ubp->ub_frame.ro);
9545 9544 QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9546 9545 ha->instance, ubp->ub_frame.f_ctl);
9547 9546 QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9548 9547 ha->instance, ubp->ub_bufsize);
9549 9548 QL_DUMP_3(ubp->ub_buffer, 8,
9550 9549 ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9551 9550
9552 9551 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9553 9552 ql_add_link_b(done_q, &sp->cmd);
9554 9553 rval = QL_SUCCESS;
9555 9554 } else {
9556 9555 if (sp->handle != index) {
9557 9556 EL(ha, "Bad index=%xh, expect=%xh\n", index,
9558 9557 sp->handle);
9559 9558 }
9560 9559 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9561 9560 EL(ha, "buffer was already in driver, index=%xh\n",
9562 9561 index);
9563 9562 }
9564 9563 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9565 9564 EL(ha, "buffer was not an IP buffer, index=%xh\n",
9566 9565 index);
9567 9566 }
9568 9567 if (sp->flags & SRB_UB_ACQUIRED) {
9569 9568 EL(ha, "buffer was being used by driver, index=%xh\n",
9570 9569 index);
9571 9570 }
9572 9571 }
9573 9572 QL_UB_UNLOCK(ha);
9574 9573
9575 9574 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9576 9575
9577 9576 return (rval);
9578 9577 }
9579 9578
9580 9579 /*
9581 9580 * ql_timer
9582 9581 * One second timer function.
9583 9582 *
9584 9583 * Input:
9585 9584 * ql_hba.first = first link in adapter list.
9586 9585 *
9587 9586 * Context:
9588 9587 * Interrupt context, no mailbox commands allowed.
9589 9588 */
9590 9589 static void
9591 9590 ql_timer(void *arg)
9592 9591 {
9593 9592 ql_link_t *link;
9594 9593 uint32_t set_flags;
9595 9594 uint32_t reset_flags;
9596 9595 ql_adapter_state_t *ha = NULL, *vha;
9597 9596
9598 9597 QL_PRINT_6(CE_CONT, "started\n");
9599 9598
9600 9599 /* Acquire global state lock. */
9601 9600 GLOBAL_STATE_LOCK();
9602 9601 if (ql_timer_timeout_id == NULL) {
9603 9602 /* Release global state lock. */
9604 9603 GLOBAL_STATE_UNLOCK();
9605 9604 return;
9606 9605 }
9607 9606
9608 9607 for (link = ql_hba.first; link != NULL; link = link->next) {
9609 9608 ha = link->base_address;
9610 9609
9611 9610 /* Skip adapter if suspended of stalled. */
9612 9611 ADAPTER_STATE_LOCK(ha);
9613 9612 if (ha->flags & ADAPTER_SUSPENDED ||
9614 9613 ha->task_daemon_flags & DRIVER_STALL) {
9615 9614 ADAPTER_STATE_UNLOCK(ha);
9616 9615 continue;
9617 9616 }
9618 9617 ha->flags |= ADAPTER_TIMER_BUSY;
9619 9618 ADAPTER_STATE_UNLOCK(ha);
9620 9619
9621 9620 QL_PM_LOCK(ha);
9622 9621 if (ha->power_level != PM_LEVEL_D0) {
9623 9622 QL_PM_UNLOCK(ha);
9624 9623
9625 9624 ADAPTER_STATE_LOCK(ha);
9626 9625 ha->flags &= ~ADAPTER_TIMER_BUSY;
9627 9626 ADAPTER_STATE_UNLOCK(ha);
9628 9627 continue;
9629 9628 }
9630 9629 ha->busy++;
9631 9630 QL_PM_UNLOCK(ha);
9632 9631
9633 9632 set_flags = 0;
9634 9633 reset_flags = 0;
9635 9634
9636 9635 /* Port retry timer handler. */
9637 9636 if (LOOP_READY(ha)) {
9638 9637 ADAPTER_STATE_LOCK(ha);
9639 9638 if (ha->port_retry_timer != 0) {
9640 9639 ha->port_retry_timer--;
9641 9640 if (ha->port_retry_timer == 0) {
9642 9641 set_flags |= PORT_RETRY_NEEDED;
9643 9642 }
9644 9643 }
9645 9644 ADAPTER_STATE_UNLOCK(ha);
9646 9645 }
9647 9646
9648 9647 /* Loop down timer handler. */
9649 9648 if (LOOP_RECONFIGURE(ha) == 0) {
9650 9649 if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9651 9650 ha->loop_down_timer--;
9652 9651 /*
9653 9652 * give the firmware loop down dump flag
9654 9653 * a chance to work.
9655 9654 */
9656 9655 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9657 9656 if (CFG_IST(ha,
9658 9657 CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9659 9658 (void) ql_binary_fw_dump(ha,
9660 9659 TRUE);
9661 9660 }
9662 9661 EL(ha, "loop_down_reset, "
9663 9662 "isp_abort_needed\n");
9664 9663 set_flags |= ISP_ABORT_NEEDED;
9665 9664 }
9666 9665 }
9667 9666 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9668 9667 /* Command abort time handler. */
9669 9668 if (ha->loop_down_timer ==
9670 9669 ha->loop_down_abort_time) {
9671 9670 ADAPTER_STATE_LOCK(ha);
9672 9671 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9673 9672 ADAPTER_STATE_UNLOCK(ha);
9674 9673 set_flags |= ABORT_QUEUES_NEEDED;
9675 9674 EL(ha, "loop_down_abort_time, "
9676 9675 "abort_queues_needed\n");
9677 9676 }
9678 9677
9679 9678 /* Watchdog timer handler. */
9680 9679 if (ha->watchdog_timer == 0) {
9681 9680 ha->watchdog_timer = WATCHDOG_TIME;
9682 9681 } else if (LOOP_READY(ha)) {
9683 9682 ha->watchdog_timer--;
9684 9683 if (ha->watchdog_timer == 0) {
9685 9684 for (vha = ha; vha != NULL;
9686 9685 vha = vha->vp_next) {
9687 9686 ql_watchdog(vha,
9688 9687 &set_flags,
9689 9688 &reset_flags);
9690 9689 }
9691 9690 ha->watchdog_timer =
9692 9691 WATCHDOG_TIME;
9693 9692 }
9694 9693 }
9695 9694 }
9696 9695 }
9697 9696
9698 9697 /* Idle timer handler. */
9699 9698 if (!DRIVER_SUSPENDED(ha)) {
9700 9699 if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9701 9700 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9702 9701 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9703 9702 #endif
9704 9703 ha->idle_timer = 0;
9705 9704 }
9706 9705 if (ha->send_plogi_timer != NULL) {
9707 9706 ha->send_plogi_timer--;
9708 9707 if (ha->send_plogi_timer == NULL) {
9709 9708 set_flags |= SEND_PLOGI;
9710 9709 }
9711 9710 }
9712 9711 }
9713 9712 ADAPTER_STATE_LOCK(ha);
9714 9713 if (ha->idc_restart_timer != 0) {
9715 9714 ha->idc_restart_timer--;
9716 9715 if (ha->idc_restart_timer == 0) {
9717 9716 ha->idc_restart_cnt = 0;
9718 9717 reset_flags |= DRIVER_STALL;
9719 9718 }
9720 9719 }
9721 9720 if (ha->idc_flash_acc_timer != 0) {
9722 9721 ha->idc_flash_acc_timer--;
9723 9722 if (ha->idc_flash_acc_timer == 0 &&
9724 9723 ha->idc_flash_acc != 0) {
9725 9724 ha->idc_flash_acc = 1;
9726 9725 ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9727 9726 ha->idc_mb[1] = 0;
9728 9727 ha->idc_mb[2] = IDC_OPC_DRV_START;
9729 9728 set_flags |= IDC_EVENT;
9730 9729 }
9731 9730 }
9732 9731 ADAPTER_STATE_UNLOCK(ha);
9733 9732
9734 9733 if (set_flags != 0 || reset_flags != 0) {
9735 9734 ql_awaken_task_daemon(ha, NULL, set_flags,
9736 9735 reset_flags);
9737 9736 }
9738 9737
9739 9738 if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9740 9739 ql_blink_led(ha);
9741 9740 }
9742 9741
9743 9742 /* Update the IO stats */
9744 9743 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9745 9744 ha->xioctl->IOInputMByteCnt +=
9746 9745 (ha->xioctl->IOInputByteCnt / 0x100000);
9747 9746 ha->xioctl->IOInputByteCnt %= 0x100000;
9748 9747 }
9749 9748
9750 9749 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9751 9750 ha->xioctl->IOOutputMByteCnt +=
9752 9751 (ha->xioctl->IOOutputByteCnt / 0x100000);
9753 9752 ha->xioctl->IOOutputByteCnt %= 0x100000;
9754 9753 }
9755 9754
9756 9755 if (CFG_IST(ha, CFG_CTRL_8021)) {
9757 9756 (void) ql_8021_idc_handler(ha);
9758 9757 }
9759 9758
9760 9759 ADAPTER_STATE_LOCK(ha);
9761 9760 ha->flags &= ~ADAPTER_TIMER_BUSY;
9762 9761 ADAPTER_STATE_UNLOCK(ha);
9763 9762
9764 9763 QL_PM_LOCK(ha);
9765 9764 ha->busy--;
9766 9765 QL_PM_UNLOCK(ha);
9767 9766 }
9768 9767
9769 9768 /* Restart timer, if not being stopped. */
9770 9769 if (ql_timer_timeout_id != NULL) {
9771 9770 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9772 9771 }
9773 9772
9774 9773 /* Release global state lock. */
9775 9774 GLOBAL_STATE_UNLOCK();
9776 9775
9777 9776 QL_PRINT_6(CE_CONT, "done\n");
9778 9777 }
9779 9778
9780 9779 /*
9781 9780 * ql_timeout_insert
9782 9781 * Function used to insert a command block onto the
9783 9782 * watchdog timer queue.
9784 9783 *
9785 9784 * Note: Must insure that pkt_time is not zero
9786 9785 * before calling ql_timeout_insert.
9787 9786 *
9788 9787 * Input:
9789 9788 * ha: adapter state pointer.
9790 9789 * tq: target queue pointer.
9791 9790 * sp: SRB pointer.
9792 9791 * DEVICE_QUEUE_LOCK must be already obtained.
9793 9792 *
9794 9793 * Context:
9795 9794 * Kernel context.
9796 9795 */
9797 9796 /* ARGSUSED */
9798 9797 static void
9799 9798 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9800 9799 {
9801 9800 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9802 9801
9803 9802 if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9804 9803 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9805 9804 /*
9806 9805 * The WATCHDOG_TIME must be rounded up + 1. As an example,
9807 9806 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9808 9807 * will expire in the next watchdog call, which could be in
9809 9808 * 1 microsecond.
9810 9809 *
9811 9810 */
9812 9811 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9813 9812 WATCHDOG_TIME;
9814 9813 /*
9815 9814 * Added an additional 10 to account for the
9816 9815 * firmware timer drift which can occur with
9817 9816 * very long timeout values.
9818 9817 */
9819 9818 sp->wdg_q_time += 10;
9820 9819
9821 9820 /*
9822 9821 * Add 6 more to insure watchdog does not timeout at the same
9823 9822 * time as ISP RISC code timeout.
9824 9823 */
9825 9824 sp->wdg_q_time += 6;
9826 9825
9827 9826 /* Save initial time for resetting watchdog time. */
9828 9827 sp->init_wdg_q_time = sp->wdg_q_time;
9829 9828
9830 9829 /* Insert command onto watchdog queue. */
9831 9830 ql_add_link_b(&tq->wdg, &sp->wdg);
9832 9831
9833 9832 sp->flags |= SRB_WATCHDOG_ENABLED;
9834 9833 } else {
9835 9834 sp->isp_timeout = 0;
9836 9835 sp->wdg_q_time = 0;
9837 9836 sp->init_wdg_q_time = 0;
9838 9837 }
9839 9838
9840 9839 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9841 9840 }
9842 9841
9843 9842 /*
9844 9843 * ql_watchdog
9845 9844 * Timeout handler that runs in interrupt context. The
9846 9845 * ql_adapter_state_t * argument is the parameter set up when the
9847 9846 * timeout was initialized (state structure pointer).
9848 9847 * Function used to update timeout values and if timeout
9849 9848 * has occurred command will be aborted.
9850 9849 *
9851 9850 * Input:
9852 9851 * ha: adapter state pointer.
9853 9852 * set_flags: task daemon flags to set.
9854 9853 * reset_flags: task daemon flags to reset.
9855 9854 *
9856 9855 * Context:
9857 9856 * Interrupt context, no mailbox commands allowed.
9858 9857 */
9859 9858 static void
9860 9859 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9861 9860 {
9862 9861 ql_srb_t *sp;
9863 9862 ql_link_t *link;
9864 9863 ql_link_t *next_cmd;
9865 9864 ql_link_t *next_device;
9866 9865 ql_tgt_t *tq;
9867 9866 ql_lun_t *lq;
9868 9867 uint16_t index;
9869 9868 int q_sane;
9870 9869
9871 9870 QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9872 9871
9873 9872 /* Loop through all targets. */
9874 9873 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9875 9874 for (link = ha->dev[index].first; link != NULL;
9876 9875 link = next_device) {
9877 9876 tq = link->base_address;
9878 9877
9879 9878 /* Try to acquire device queue lock. */
9880 9879 if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9881 9880 next_device = NULL;
9882 9881 continue;
9883 9882 }
9884 9883
9885 9884 next_device = link->next;
9886 9885
9887 9886 if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9888 9887 (tq->port_down_retry_count == 0)) {
9889 9888 /* Release device queue lock. */
9890 9889 DEVICE_QUEUE_UNLOCK(tq);
9891 9890 continue;
9892 9891 }
9893 9892
9894 9893 /* Find out if this device is in a sane state. */
9895 9894 if (tq->flags & (TQF_RSCN_RCVD |
9896 9895 TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9897 9896 q_sane = 0;
9898 9897 } else {
9899 9898 q_sane = 1;
9900 9899 }
9901 9900 /* Loop through commands on watchdog queue. */
9902 9901 for (link = tq->wdg.first; link != NULL;
9903 9902 link = next_cmd) {
9904 9903 next_cmd = link->next;
9905 9904 sp = link->base_address;
9906 9905 lq = sp->lun_queue;
9907 9906
9908 9907 /*
9909 9908 * For SCSI commands, if everything seems to
9910 9909 * be going fine and this packet is stuck
9911 9910 * because of throttling at LUN or target
9912 9911 * level then do not decrement the
9913 9912 * sp->wdg_q_time
9914 9913 */
9915 9914 if (ha->task_daemon_flags & STATE_ONLINE &&
9916 9915 (sp->flags & SRB_ISP_STARTED) == 0 &&
9917 9916 q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9918 9917 lq->lun_outcnt >= ha->execution_throttle) {
9919 9918 continue;
9920 9919 }
9921 9920
9922 9921 if (sp->wdg_q_time != 0) {
9923 9922 sp->wdg_q_time--;
9924 9923
9925 9924 /* Timeout? */
9926 9925 if (sp->wdg_q_time != 0) {
9927 9926 continue;
9928 9927 }
9929 9928
9930 9929 ql_remove_link(&tq->wdg, &sp->wdg);
9931 9930 sp->flags &= ~SRB_WATCHDOG_ENABLED;
9932 9931
9933 9932 if (sp->flags & SRB_ISP_STARTED) {
9934 9933 ql_cmd_timeout(ha, tq, sp,
9935 9934 set_flags, reset_flags);
9936 9935
9937 9936 DEVICE_QUEUE_UNLOCK(tq);
9938 9937 tq = NULL;
9939 9938 next_cmd = NULL;
9940 9939 next_device = NULL;
9941 9940 index = DEVICE_HEAD_LIST_SIZE;
9942 9941 } else {
9943 9942 ql_cmd_timeout(ha, tq, sp,
9944 9943 set_flags, reset_flags);
9945 9944 }
9946 9945 }
9947 9946 }
9948 9947
9949 9948 /* Release device queue lock. */
9950 9949 if (tq != NULL) {
9951 9950 DEVICE_QUEUE_UNLOCK(tq);
9952 9951 }
9953 9952 }
9954 9953 }
9955 9954
9956 9955 QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9957 9956 }
9958 9957
9959 9958 /*
9960 9959 * ql_cmd_timeout
9961 9960 * Command timeout handler.
9962 9961 *
9963 9962 * Input:
9964 9963 * ha: adapter state pointer.
9965 9964 * tq: target queue pointer.
9966 9965 * sp: SRB pointer.
9967 9966 * set_flags: task daemon flags to set.
9968 9967 * reset_flags: task daemon flags to reset.
9969 9968 *
9970 9969 * Context:
9971 9970 * Interrupt context, no mailbox commands allowed.
9972 9971 */
9973 9972 /* ARGSUSED */
9974 9973 static void
9975 9974 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9976 9975 uint32_t *set_flags, uint32_t *reset_flags)
9977 9976 {
9978 9977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9979 9978
9980 9979 if (!(sp->flags & SRB_ISP_STARTED)) {
9981 9980
9982 9981 EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9983 9982
9984 9983 REQUEST_RING_LOCK(ha);
9985 9984
9986 9985 /* if it's on a queue */
9987 9986 if (sp->cmd.head) {
9988 9987 /*
9989 9988 * The pending_cmds que needs to be
9990 9989 * protected by the ring lock
9991 9990 */
9992 9991 ql_remove_link(sp->cmd.head, &sp->cmd);
9993 9992 }
9994 9993 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9995 9994
9996 9995 /* Release device queue lock. */
9997 9996 REQUEST_RING_UNLOCK(ha);
9998 9997 DEVICE_QUEUE_UNLOCK(tq);
9999 9998
10000 9999 /* Set timeout status */
10001 10000 sp->pkt->pkt_reason = CS_TIMEOUT;
10002 10001
10003 10002 /* Ensure no retry */
10004 10003 sp->flags &= ~SRB_RETRY;
10005 10004
10006 10005 /* Call done routine to handle completion. */
10007 10006 ql_done(&sp->cmd);
10008 10007
10009 10008 DEVICE_QUEUE_LOCK(tq);
10010 10009 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
10011 10010 int rval;
10012 10011 uint32_t index;
10013 10012
10014 10013 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10015 10014 "spf=%xh\n", (void *)sp,
10016 10015 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10017 10016 sp->handle & OSC_INDEX_MASK, sp->flags);
10018 10017
10019 10018 DEVICE_QUEUE_UNLOCK(tq);
10020 10019
10021 10020 INTR_LOCK(ha);
10022 10021 ha->pha->xioctl->ControllerErrorCount++;
10023 10022 if (sp->handle) {
10024 10023 ha->pha->timeout_cnt++;
10025 10024 index = sp->handle & OSC_INDEX_MASK;
10026 10025 if (ha->pha->outstanding_cmds[index] == sp) {
10027 10026 sp->request_ring_ptr->entry_type =
10028 10027 INVALID_ENTRY_TYPE;
10029 10028 sp->request_ring_ptr->entry_count = 0;
10030 10029 ha->pha->outstanding_cmds[index] = 0;
10031 10030 }
10032 10031 INTR_UNLOCK(ha);
10033 10032
10034 10033 rval = ql_abort_command(ha, sp);
10035 10034 if (rval == QL_FUNCTION_TIMEOUT ||
10036 10035 rval == QL_LOCK_TIMEOUT ||
10037 10036 rval == QL_FUNCTION_PARAMETER_ERROR ||
10038 10037 ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10039 10038 *set_flags |= ISP_ABORT_NEEDED;
10040 10039 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10041 10040 "needed\n", rval, ha->pha->timeout_cnt);
10042 10041 }
10043 10042
10044 10043 sp->handle = 0;
10045 10044 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10046 10045 } else {
10047 10046 INTR_UNLOCK(ha);
10048 10047 }
10049 10048
10050 10049 /* Set timeout status */
10051 10050 sp->pkt->pkt_reason = CS_TIMEOUT;
10052 10051
10053 10052 /* Ensure no retry */
10054 10053 sp->flags &= ~SRB_RETRY;
10055 10054
10056 10055 /* Call done routine to handle completion. */
10057 10056 ql_done(&sp->cmd);
10058 10057
10059 10058 DEVICE_QUEUE_LOCK(tq);
10060 10059
10061 10060 } else {
10062 10061 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10063 10062 "spf=%xh, isp_abort_needed\n", (void *)sp,
10064 10063 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10065 10064 sp->handle & OSC_INDEX_MASK, sp->flags);
10066 10065
10067 10066 /* Release device queue lock. */
10068 10067 DEVICE_QUEUE_UNLOCK(tq);
10069 10068
10070 10069 INTR_LOCK(ha);
10071 10070 ha->pha->xioctl->ControllerErrorCount++;
10072 10071 INTR_UNLOCK(ha);
10073 10072
10074 10073 /* Set ISP needs to be reset */
10075 10074 sp->flags |= SRB_COMMAND_TIMEOUT;
10076 10075
10077 10076 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10078 10077 (void) ql_binary_fw_dump(ha, TRUE);
10079 10078 }
10080 10079
10081 10080 *set_flags |= ISP_ABORT_NEEDED;
10082 10081
10083 10082 DEVICE_QUEUE_LOCK(tq);
10084 10083 }
10085 10084
10086 10085 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10087 10086 }
10088 10087
10089 10088 /*
10090 10089 * ql_rst_aen
10091 10090 * Processes asynchronous reset.
10092 10091 *
10093 10092 * Input:
10094 10093 * ha = adapter state pointer.
10095 10094 *
10096 10095 * Context:
10097 10096 * Kernel context.
10098 10097 */
10099 10098 static void
10100 10099 ql_rst_aen(ql_adapter_state_t *ha)
10101 10100 {
10102 10101 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10103 10102
10104 10103 /* Issue marker command. */
10105 10104 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10106 10105
10107 10106 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10108 10107 }
10109 10108
10110 10109 /*
10111 10110 * ql_cmd_wait
10112 10111 * Stall driver until all outstanding commands are returned.
10113 10112 *
10114 10113 * Input:
10115 10114 * ha = adapter state pointer.
10116 10115 *
10117 10116 * Context:
10118 10117 * Kernel context.
10119 10118 */
10120 10119 void
10121 10120 ql_cmd_wait(ql_adapter_state_t *ha)
10122 10121 {
10123 10122 uint16_t index;
10124 10123 ql_link_t *link;
10125 10124 ql_tgt_t *tq;
10126 10125 ql_adapter_state_t *vha;
10127 10126
10128 10127 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10129 10128
10130 10129 /* Wait for all outstanding commands to be returned. */
10131 10130 (void) ql_wait_outstanding(ha);
10132 10131
10133 10132 /*
10134 10133 * clear out internally queued commands
10135 10134 */
10136 10135 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10137 10136 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10138 10137 for (link = vha->dev[index].first; link != NULL;
10139 10138 link = link->next) {
10140 10139 tq = link->base_address;
10141 10140 if (tq &&
10142 10141 (!(tq->prli_svc_param_word_3 &
10143 10142 PRLI_W3_RETRY))) {
10144 10143 (void) ql_abort_device(vha, tq, 0);
10145 10144 }
10146 10145 }
10147 10146 }
10148 10147 }
10149 10148
10150 10149 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10151 10150 }
10152 10151
10153 10152 /*
10154 10153 * ql_wait_outstanding
10155 10154 * Wait for all outstanding commands to complete.
10156 10155 *
10157 10156 * Input:
10158 10157 * ha = adapter state pointer.
10159 10158 *
10160 10159 * Returns:
10161 10160 * index - the index for ql_srb into outstanding_cmds.
10162 10161 *
10163 10162 * Context:
10164 10163 * Kernel context.
10165 10164 */
10166 10165 static uint16_t
10167 10166 ql_wait_outstanding(ql_adapter_state_t *ha)
10168 10167 {
10169 10168 ql_srb_t *sp;
10170 10169 uint16_t index, count;
10171 10170
10172 10171 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10173 10172
10174 10173 count = ql_osc_wait_count;
10175 10174 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10176 10175 if (ha->pha->pending_cmds.first != NULL) {
10177 10176 ql_start_iocb(ha, NULL);
10178 10177 index = 1;
10179 10178 }
10180 10179 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10181 10180 (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10182 10181 if (count-- != 0) {
10183 10182 ql_delay(ha, 10000);
10184 10183 index = 0;
10185 10184 } else {
10186 10185 EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10187 10186 (void *)sp, index, sp->handle);
10188 10187 break;
10189 10188 }
10190 10189 }
10191 10190 }
10192 10191
10193 10192 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10194 10193
10195 10194 return (index);
10196 10195 }
10197 10196
10198 10197 /*
10199 10198 * ql_restart_queues
10200 10199 * Restart device queues.
10201 10200 *
10202 10201 * Input:
10203 10202 * ha = adapter state pointer.
10204 10203 * DEVICE_QUEUE_LOCK must be released.
10205 10204 *
10206 10205 * Context:
10207 10206 * Interrupt or Kernel context, no mailbox commands allowed.
10208 10207 */
10209 10208 static void
10210 10209 ql_restart_queues(ql_adapter_state_t *ha)
10211 10210 {
10212 10211 ql_link_t *link, *link2;
10213 10212 ql_tgt_t *tq;
10214 10213 ql_lun_t *lq;
10215 10214 uint16_t index;
10216 10215 ql_adapter_state_t *vha;
10217 10216
10218 10217 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10219 10218
10220 10219 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10221 10220 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10222 10221 for (link = vha->dev[index].first; link != NULL;
10223 10222 link = link->next) {
10224 10223 tq = link->base_address;
10225 10224
10226 10225 /* Acquire device queue lock. */
10227 10226 DEVICE_QUEUE_LOCK(tq);
10228 10227
10229 10228 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10230 10229
10231 10230 for (link2 = tq->lun_queues.first;
10232 10231 link2 != NULL; link2 = link2->next) {
10233 10232 lq = link2->base_address;
10234 10233
10235 10234 if (lq->cmd.first != NULL) {
10236 10235 ql_next(vha, lq);
10237 10236 DEVICE_QUEUE_LOCK(tq);
10238 10237 }
10239 10238 }
10240 10239
10241 10240 /* Release device queue lock. */
10242 10241 DEVICE_QUEUE_UNLOCK(tq);
10243 10242 }
10244 10243 }
10245 10244 }
10246 10245
10247 10246 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10248 10247 }
10249 10248
10250 10249 /*
10251 10250 * ql_iidma
10252 10251 * Setup iiDMA parameters to firmware
10253 10252 *
10254 10253 * Input:
10255 10254 * ha = adapter state pointer.
10256 10255 * DEVICE_QUEUE_LOCK must be released.
10257 10256 *
10258 10257 * Context:
10259 10258 * Interrupt or Kernel context, no mailbox commands allowed.
10260 10259 */
10261 10260 static void
10262 10261 ql_iidma(ql_adapter_state_t *ha)
10263 10262 {
10264 10263 ql_link_t *link;
10265 10264 ql_tgt_t *tq;
10266 10265 uint16_t index;
10267 10266 char buf[256];
10268 10267 uint32_t data;
10269 10268
10270 10269 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10271 10270
10272 10271 if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10273 10272 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10274 10273 return;
10275 10274 }
10276 10275
10277 10276 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10278 10277 for (link = ha->dev[index].first; link != NULL;
10279 10278 link = link->next) {
10280 10279 tq = link->base_address;
10281 10280
10282 10281 /* Acquire device queue lock. */
10283 10282 DEVICE_QUEUE_LOCK(tq);
10284 10283
10285 10284 if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10286 10285 DEVICE_QUEUE_UNLOCK(tq);
10287 10286 continue;
10288 10287 }
10289 10288
10290 10289 tq->flags &= ~TQF_IIDMA_NEEDED;
10291 10290
10292 10291 if ((tq->loop_id > LAST_N_PORT_HDL) ||
10293 10292 (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10294 10293 DEVICE_QUEUE_UNLOCK(tq);
10295 10294 continue;
10296 10295 }
10297 10296
10298 10297 /* Get the iiDMA persistent data */
10299 10298 if (tq->iidma_rate == IIDMA_RATE_INIT) {
10300 10299 (void) sprintf(buf,
10301 10300 "iidma-rate-%02x%02x%02x%02x%02x"
10302 10301 "%02x%02x%02x", tq->port_name[0],
10303 10302 tq->port_name[1], tq->port_name[2],
10304 10303 tq->port_name[3], tq->port_name[4],
10305 10304 tq->port_name[5], tq->port_name[6],
10306 10305 tq->port_name[7]);
10307 10306
10308 10307 if ((data = ql_get_prop(ha, buf)) ==
10309 10308 0xffffffff) {
10310 10309 tq->iidma_rate = IIDMA_RATE_NDEF;
10311 10310 } else {
10312 10311 switch (data) {
10313 10312 case IIDMA_RATE_1GB:
10314 10313 case IIDMA_RATE_2GB:
10315 10314 case IIDMA_RATE_4GB:
10316 10315 case IIDMA_RATE_10GB:
10317 10316 tq->iidma_rate = data;
10318 10317 break;
10319 10318 case IIDMA_RATE_8GB:
10320 10319 if (CFG_IST(ha,
10321 10320 CFG_CTRL_25XX)) {
10322 10321 tq->iidma_rate = data;
10323 10322 } else {
10324 10323 tq->iidma_rate =
10325 10324 IIDMA_RATE_4GB;
10326 10325 }
10327 10326 break;
10328 10327 default:
10329 10328 EL(ha, "invalid data for "
10330 10329 "parameter: %s: %xh\n",
10331 10330 buf, data);
10332 10331 tq->iidma_rate =
10333 10332 IIDMA_RATE_NDEF;
10334 10333 break;
10335 10334 }
10336 10335 }
10337 10336 }
10338 10337
10339 10338 /* Set the firmware's iiDMA rate */
10340 10339 if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10341 10340 !(CFG_IST(ha, CFG_CTRL_8081))) {
10342 10341 data = ql_iidma_rate(ha, tq->loop_id,
10343 10342 &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10344 10343 if (data != QL_SUCCESS) {
10345 10344 EL(ha, "mbx failed: %xh\n", data);
10346 10345 }
10347 10346 }
10348 10347
10349 10348 /* Release device queue lock. */
10350 10349 DEVICE_QUEUE_UNLOCK(tq);
10351 10350 }
10352 10351 }
10353 10352
10354 10353 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10355 10354 }
10356 10355
10357 10356 /*
10358 10357 * ql_abort_queues
10359 10358 * Abort all commands on device queues.
10360 10359 *
10361 10360 * Input:
10362 10361 * ha = adapter state pointer.
10363 10362 *
10364 10363 * Context:
10365 10364 * Interrupt or Kernel context, no mailbox commands allowed.
10366 10365 */
10367 10366 static void
10368 10367 ql_abort_queues(ql_adapter_state_t *ha)
10369 10368 {
10370 10369 ql_link_t *link;
10371 10370 ql_tgt_t *tq;
10372 10371 ql_srb_t *sp;
10373 10372 uint16_t index;
10374 10373 ql_adapter_state_t *vha;
10375 10374
10376 10375 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10377 10376
10378 10377 /* Return all commands in outstanding command list. */
10379 10378 INTR_LOCK(ha);
10380 10379
10381 10380 /* Place all commands in outstanding cmd list on device queue. */
10382 10381 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10383 10382 if (ha->pending_cmds.first != NULL) {
10384 10383 INTR_UNLOCK(ha);
10385 10384 ql_start_iocb(ha, NULL);
10386 10385 /* Delay for system */
10387 10386 ql_delay(ha, 10000);
10388 10387 INTR_LOCK(ha);
10389 10388 index = 1;
10390 10389 }
10391 10390 sp = ha->outstanding_cmds[index];
10392 10391
10393 10392 /* skip devices capable of FCP2 retrys */
10394 10393 if ((sp != NULL) &&
10395 10394 ((tq = sp->lun_queue->target_queue) != NULL) &&
10396 10395 (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10397 10396 ha->outstanding_cmds[index] = NULL;
10398 10397 sp->handle = 0;
10399 10398 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10400 10399
10401 10400 INTR_UNLOCK(ha);
10402 10401
10403 10402 /* Set ending status. */
10404 10403 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10405 10404 sp->flags |= SRB_ISP_COMPLETED;
10406 10405
10407 10406 /* Call done routine to handle completions. */
10408 10407 sp->cmd.next = NULL;
10409 10408 ql_done(&sp->cmd);
10410 10409
10411 10410 INTR_LOCK(ha);
10412 10411 }
10413 10412 }
10414 10413 INTR_UNLOCK(ha);
10415 10414
10416 10415 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10417 10416 QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10418 10417 vha->instance, vha->vp_index);
10419 10418 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10420 10419 for (link = vha->dev[index].first; link != NULL;
10421 10420 link = link->next) {
10422 10421 tq = link->base_address;
10423 10422 /* skip devices capable of FCP2 retrys */
10424 10423 if (!(tq->prli_svc_param_word_3 &
10425 10424 PRLI_W3_RETRY)) {
10426 10425 /*
10427 10426 * Set port unavailable status and
10428 10427 * return all commands on a devices
10429 10428 * queues.
10430 10429 */
10431 10430 ql_abort_device_queues(ha, tq);
10432 10431 }
10433 10432 }
10434 10433 }
10435 10434 }
10436 10435 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10437 10436 }
10438 10437
10439 10438 /*
10440 10439 * ql_abort_device_queues
10441 10440 * Abort all commands on device queues.
10442 10441 *
10443 10442 * Input:
10444 10443 * ha = adapter state pointer.
10445 10444 *
10446 10445 * Context:
10447 10446 * Interrupt or Kernel context, no mailbox commands allowed.
10448 10447 */
10449 10448 static void
10450 10449 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10451 10450 {
10452 10451 ql_link_t *lun_link, *cmd_link;
10453 10452 ql_srb_t *sp;
10454 10453 ql_lun_t *lq;
10455 10454
10456 10455 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10457 10456
10458 10457 DEVICE_QUEUE_LOCK(tq);
10459 10458
10460 10459 for (lun_link = tq->lun_queues.first; lun_link != NULL;
10461 10460 lun_link = lun_link->next) {
10462 10461 lq = lun_link->base_address;
10463 10462
10464 10463 cmd_link = lq->cmd.first;
10465 10464 while (cmd_link != NULL) {
10466 10465 sp = cmd_link->base_address;
10467 10466
10468 10467 if (sp->flags & SRB_ABORT) {
10469 10468 cmd_link = cmd_link->next;
10470 10469 continue;
10471 10470 }
10472 10471
10473 10472 /* Remove srb from device cmd queue. */
10474 10473 ql_remove_link(&lq->cmd, &sp->cmd);
10475 10474
10476 10475 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10477 10476
10478 10477 DEVICE_QUEUE_UNLOCK(tq);
10479 10478
10480 10479 /* Set ending status. */
10481 10480 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10482 10481
10483 10482 /* Call done routine to handle completion. */
10484 10483 ql_done(&sp->cmd);
10485 10484
10486 10485 /* Delay for system */
10487 10486 ql_delay(ha, 10000);
10488 10487
10489 10488 DEVICE_QUEUE_LOCK(tq);
10490 10489 cmd_link = lq->cmd.first;
10491 10490 }
10492 10491 }
10493 10492 DEVICE_QUEUE_UNLOCK(tq);
10494 10493
10495 10494 QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10496 10495 }
10497 10496
10498 10497 /*
10499 10498 * ql_loop_resync
10500 10499 * Resync with fibre channel devices.
10501 10500 *
10502 10501 * Input:
10503 10502 * ha = adapter state pointer.
10504 10503 * DEVICE_QUEUE_LOCK must be released.
10505 10504 *
10506 10505 * Returns:
10507 10506 * ql local function return status code.
10508 10507 *
10509 10508 * Context:
10510 10509 * Kernel context.
10511 10510 */
10512 10511 static int
10513 10512 ql_loop_resync(ql_adapter_state_t *ha)
10514 10513 {
10515 10514 int rval;
10516 10515
10517 10516 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10518 10517
10519 10518 if (ha->flags & IP_INITIALIZED) {
10520 10519 (void) ql_shutdown_ip(ha);
10521 10520 }
10522 10521
10523 10522 rval = ql_fw_ready(ha, 10);
10524 10523
10525 10524 TASK_DAEMON_LOCK(ha);
10526 10525 ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10527 10526 TASK_DAEMON_UNLOCK(ha);
10528 10527
10529 10528 /* Set loop online, if it really is. */
10530 10529 if (rval == QL_SUCCESS) {
10531 10530 ql_loop_online(ha);
10532 10531 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10533 10532 } else {
10534 10533 EL(ha, "failed, rval = %xh\n", rval);
10535 10534 }
10536 10535
10537 10536 return (rval);
10538 10537 }
10539 10538
10540 10539 /*
10541 10540 * ql_loop_online
10542 10541 * Set loop online status if it really is online.
10543 10542 *
10544 10543 * Input:
10545 10544 * ha = adapter state pointer.
10546 10545 * DEVICE_QUEUE_LOCK must be released.
10547 10546 *
10548 10547 * Context:
10549 10548 * Kernel context.
10550 10549 */
10551 10550 void
10552 10551 ql_loop_online(ql_adapter_state_t *ha)
10553 10552 {
10554 10553 ql_adapter_state_t *vha;
10555 10554
10556 10555 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10557 10556
10558 10557 /* Inform the FC Transport that the hardware is online. */
10559 10558 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10560 10559 if (!(vha->task_daemon_flags &
10561 10560 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10562 10561 /* Restart IP if it was shutdown. */
10563 10562 if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10564 10563 !(vha->flags & IP_INITIALIZED)) {
10565 10564 (void) ql_initialize_ip(vha);
10566 10565 ql_isp_rcvbuf(vha);
10567 10566 }
10568 10567
10569 10568 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10570 10569 FC_PORT_STATE_MASK(vha->state) !=
10571 10570 FC_STATE_ONLINE) {
10572 10571 vha->state = FC_PORT_SPEED_MASK(vha->state);
10573 10572 if (vha->topology & QL_LOOP_CONNECTION) {
10574 10573 vha->state |= FC_STATE_LOOP;
10575 10574 } else {
10576 10575 vha->state |= FC_STATE_ONLINE;
10577 10576 }
10578 10577 TASK_DAEMON_LOCK(ha);
10579 10578 vha->task_daemon_flags |= FC_STATE_CHANGE;
10580 10579 TASK_DAEMON_UNLOCK(ha);
10581 10580 }
10582 10581 }
10583 10582 }
10584 10583
10585 10584 ql_awaken_task_daemon(ha, NULL, 0, 0);
10586 10585
10587 10586 /* Restart device queues that may have been stopped. */
10588 10587 ql_restart_queues(ha);
10589 10588
10590 10589 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10591 10590 }
10592 10591
10593 10592 /*
10594 10593 * ql_fca_handle_to_state
10595 10594 * Verifies handle to be correct.
10596 10595 *
10597 10596 * Input:
10598 10597 * fca_handle = pointer to state structure.
10599 10598 *
10600 10599 * Returns:
10601 10600 * NULL = failure
10602 10601 *
10603 10602 * Context:
10604 10603 * Kernel context.
10605 10604 */
10606 10605 static ql_adapter_state_t *
10607 10606 ql_fca_handle_to_state(opaque_t fca_handle)
10608 10607 {
10609 10608 #ifdef QL_DEBUG_ROUTINES
10610 10609 ql_link_t *link;
10611 10610 ql_adapter_state_t *ha = NULL;
10612 10611 ql_adapter_state_t *vha = NULL;
10613 10612
10614 10613 for (link = ql_hba.first; link != NULL; link = link->next) {
10615 10614 ha = link->base_address;
10616 10615 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10617 10616 if ((opaque_t)vha == fca_handle) {
10618 10617 ha = vha;
10619 10618 break;
10620 10619 }
10621 10620 }
10622 10621 if ((opaque_t)ha == fca_handle) {
10623 10622 break;
10624 10623 } else {
10625 10624 ha = NULL;
10626 10625 }
10627 10626 }
10628 10627
10629 10628 if (ha == NULL) {
10630 10629 /*EMPTY*/
10631 10630 QL_PRINT_2(CE_CONT, "failed\n");
10632 10631 }
10633 10632
10634 10633 #endif /* QL_DEBUG_ROUTINES */
10635 10634
10636 10635 return ((ql_adapter_state_t *)fca_handle);
10637 10636 }
10638 10637
10639 10638 /*
10640 10639 * ql_d_id_to_queue
10641 10640 * Locate device queue that matches destination ID.
10642 10641 *
10643 10642 * Input:
10644 10643 * ha = adapter state pointer.
10645 10644 * d_id = destination ID
10646 10645 *
10647 10646 * Returns:
10648 10647 * NULL = failure
10649 10648 *
10650 10649 * Context:
10651 10650 * Interrupt or Kernel context, no mailbox commands allowed.
10652 10651 */
10653 10652 ql_tgt_t *
10654 10653 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10655 10654 {
10656 10655 uint16_t index;
10657 10656 ql_tgt_t *tq;
10658 10657 ql_link_t *link;
10659 10658
10660 10659 /* Get head queue index. */
10661 10660 index = ql_alpa_to_index[d_id.b.al_pa];
10662 10661
10663 10662 for (link = ha->dev[index].first; link != NULL; link = link->next) {
10664 10663 tq = link->base_address;
10665 10664 if (tq->d_id.b24 == d_id.b24 &&
10666 10665 VALID_DEVICE_ID(ha, tq->loop_id)) {
10667 10666 return (tq);
10668 10667 }
10669 10668 }
10670 10669
10671 10670 return (NULL);
10672 10671 }
10673 10672
10674 10673 /*
10675 10674 * ql_loop_id_to_queue
10676 10675 * Locate device queue that matches loop ID.
10677 10676 *
10678 10677 * Input:
10679 10678 * ha: adapter state pointer.
10680 10679 * loop_id: destination ID
10681 10680 *
10682 10681 * Returns:
10683 10682 * NULL = failure
10684 10683 *
10685 10684 * Context:
10686 10685 * Interrupt or Kernel context, no mailbox commands allowed.
10687 10686 */
10688 10687 ql_tgt_t *
10689 10688 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10690 10689 {
10691 10690 uint16_t index;
10692 10691 ql_tgt_t *tq;
10693 10692 ql_link_t *link;
10694 10693
10695 10694 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10696 10695 for (link = ha->dev[index].first; link != NULL;
10697 10696 link = link->next) {
10698 10697 tq = link->base_address;
10699 10698 if (tq->loop_id == loop_id) {
10700 10699 return (tq);
10701 10700 }
10702 10701 }
10703 10702 }
10704 10703
10705 10704 return (NULL);
10706 10705 }
10707 10706
10708 10707 /*
10709 10708 * ql_kstat_update
10710 10709 * Updates kernel statistics.
10711 10710 *
10712 10711 * Input:
10713 10712 * ksp - driver kernel statistics structure pointer.
10714 10713 * rw - function to perform
10715 10714 *
10716 10715 * Returns:
10717 10716 * 0 or EACCES
10718 10717 *
10719 10718 * Context:
10720 10719 * Kernel context.
10721 10720 */
10722 10721 /* ARGSUSED */
10723 10722 static int
10724 10723 ql_kstat_update(kstat_t *ksp, int rw)
10725 10724 {
10726 10725 int rval;
10727 10726
10728 10727 QL_PRINT_3(CE_CONT, "started\n");
10729 10728
10730 10729 if (rw == KSTAT_WRITE) {
10731 10730 rval = EACCES;
10732 10731 } else {
10733 10732 rval = 0;
10734 10733 }
10735 10734
10736 10735 if (rval != 0) {
10737 10736 /*EMPTY*/
10738 10737 QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10739 10738 } else {
10740 10739 /*EMPTY*/
10741 10740 QL_PRINT_3(CE_CONT, "done\n");
10742 10741 }
10743 10742 return (rval);
10744 10743 }
10745 10744
10746 10745 /*
10747 10746 * ql_load_flash
10748 10747 * Loads flash.
10749 10748 *
10750 10749 * Input:
10751 10750 * ha: adapter state pointer.
10752 10751 * dp: data pointer.
10753 10752 * size: data length.
10754 10753 *
10755 10754 * Returns:
10756 10755 * ql local function return status code.
10757 10756 *
10758 10757 * Context:
10759 10758 * Kernel context.
10760 10759 */
10761 10760 int
10762 10761 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10763 10762 {
10764 10763 uint32_t cnt;
10765 10764 int rval;
10766 10765 uint32_t size_to_offset;
10767 10766 uint32_t size_to_compare;
10768 10767 int erase_all;
10769 10768
10770 10769 if (CFG_IST(ha, CFG_CTRL_24258081)) {
10771 10770 return (ql_24xx_load_flash(ha, dp, size, 0));
10772 10771 }
10773 10772
10774 10773 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10775 10774
10776 10775 size_to_compare = 0x20000;
10777 10776 size_to_offset = 0;
10778 10777 erase_all = 0;
10779 10778 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10780 10779 if (size == 0x80000) {
10781 10780 /* Request to flash the entire chip. */
10782 10781 size_to_compare = 0x80000;
10783 10782 erase_all = 1;
10784 10783 } else {
10785 10784 size_to_compare = 0x40000;
10786 10785 if (ql_flash_sbus_fpga) {
10787 10786 size_to_offset = 0x40000;
10788 10787 }
10789 10788 }
10790 10789 }
10791 10790 if (size > size_to_compare) {
10792 10791 rval = QL_FUNCTION_PARAMETER_ERROR;
10793 10792 EL(ha, "failed=%xh\n", rval);
10794 10793 return (rval);
10795 10794 }
10796 10795
10797 10796 GLOBAL_HW_LOCK();
10798 10797
10799 10798 /* Enable Flash Read/Write. */
10800 10799 ql_flash_enable(ha);
10801 10800
10802 10801 /* Erase flash prior to write. */
10803 10802 rval = ql_erase_flash(ha, erase_all);
10804 10803
10805 10804 if (rval == QL_SUCCESS) {
10806 10805 /* Write data to flash. */
10807 10806 for (cnt = 0; cnt < size; cnt++) {
10808 10807 /* Allow other system activity. */
10809 10808 if (cnt % 0x1000 == 0) {
10810 10809 ql_delay(ha, 10000);
10811 10810 }
10812 10811 rval = ql_program_flash_address(ha,
10813 10812 cnt + size_to_offset, *dp++);
10814 10813 if (rval != QL_SUCCESS) {
10815 10814 break;
10816 10815 }
10817 10816 }
10818 10817 }
10819 10818
10820 10819 ql_flash_disable(ha);
10821 10820
10822 10821 GLOBAL_HW_UNLOCK();
10823 10822
10824 10823 if (rval != QL_SUCCESS) {
10825 10824 EL(ha, "failed=%xh\n", rval);
10826 10825 } else {
10827 10826 /*EMPTY*/
10828 10827 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10829 10828 }
10830 10829 return (rval);
10831 10830 }
10832 10831
10833 10832 /*
10834 10833 * ql_program_flash_address
10835 10834 * Program flash address.
10836 10835 *
10837 10836 * Input:
10838 10837 * ha = adapter state pointer.
10839 10838 * addr = flash byte address.
10840 10839 * data = data to be written to flash.
10841 10840 *
10842 10841 * Returns:
10843 10842 * ql local function return status code.
10844 10843 *
10845 10844 * Context:
10846 10845 * Kernel context.
10847 10846 */
10848 10847 static int
10849 10848 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10850 10849 {
10851 10850 int rval;
10852 10851
10853 10852 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10854 10853
10855 10854 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10856 10855 ql_write_flash_byte(ha, 0x5555, 0xa0);
10857 10856 ql_write_flash_byte(ha, addr, data);
10858 10857 } else {
10859 10858 /* Write Program Command Sequence */
10860 10859 ql_write_flash_byte(ha, 0x5555, 0xaa);
10861 10860 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10862 10861 ql_write_flash_byte(ha, 0x5555, 0xa0);
10863 10862 ql_write_flash_byte(ha, addr, data);
10864 10863 }
10865 10864
10866 10865 /* Wait for write to complete. */
10867 10866 rval = ql_poll_flash(ha, addr, data);
10868 10867
10869 10868 if (rval != QL_SUCCESS) {
10870 10869 EL(ha, "failed=%xh\n", rval);
10871 10870 } else {
10872 10871 /*EMPTY*/
10873 10872 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10874 10873 }
10875 10874 return (rval);
10876 10875 }
10877 10876
10878 10877 /*
10879 10878 * ql_erase_flash
10880 10879 * Erases entire flash.
10881 10880 *
10882 10881 * Input:
10883 10882 * ha = adapter state pointer.
10884 10883 *
10885 10884 * Returns:
10886 10885 * ql local function return status code.
10887 10886 *
10888 10887 * Context:
10889 10888 * Kernel context.
10890 10889 */
10891 10890 int
10892 10891 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10893 10892 {
10894 10893 int rval;
10895 10894 uint32_t erase_delay = 2000000;
10896 10895 uint32_t sStartAddr;
10897 10896 uint32_t ssize;
10898 10897 uint32_t cnt;
10899 10898 uint8_t *bfp;
10900 10899 uint8_t *tmp;
10901 10900
10902 10901 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10903 10902
10904 10903 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10905 10904
10906 10905 if (ql_flash_sbus_fpga == 1) {
10907 10906 ssize = QL_SBUS_FCODE_SIZE;
10908 10907 sStartAddr = QL_FCODE_OFFSET;
10909 10908 } else {
10910 10909 ssize = QL_FPGA_SIZE;
10911 10910 sStartAddr = QL_FPGA_OFFSET;
10912 10911 }
10913 10912
10914 10913 erase_delay = 20000000;
10915 10914
10916 10915 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10917 10916
10918 10917 /* Save the section of flash we're not updating to buffer */
10919 10918 tmp = bfp;
10920 10919 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10921 10920 /* Allow other system activity. */
10922 10921 if (cnt % 0x1000 == 0) {
10923 10922 ql_delay(ha, 10000);
10924 10923 }
10925 10924 *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10926 10925 }
10927 10926 }
10928 10927
10929 10928 /* Chip Erase Command Sequence */
10930 10929 ql_write_flash_byte(ha, 0x5555, 0xaa);
10931 10930 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10932 10931 ql_write_flash_byte(ha, 0x5555, 0x80);
10933 10932 ql_write_flash_byte(ha, 0x5555, 0xaa);
10934 10933 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10935 10934 ql_write_flash_byte(ha, 0x5555, 0x10);
10936 10935
10937 10936 ql_delay(ha, erase_delay);
10938 10937
10939 10938 /* Wait for erase to complete. */
10940 10939 rval = ql_poll_flash(ha, 0, 0x80);
10941 10940
10942 10941 if (rval != QL_SUCCESS) {
10943 10942 EL(ha, "failed=%xh\n", rval);
10944 10943 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10945 10944 kmem_free(bfp, ssize);
10946 10945 }
10947 10946 return (rval);
10948 10947 }
10949 10948
10950 10949 /* restore the section we saved in the buffer */
10951 10950 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10952 10951 /* Restore the section we saved off */
10953 10952 tmp = bfp;
10954 10953 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10955 10954 /* Allow other system activity. */
10956 10955 if (cnt % 0x1000 == 0) {
10957 10956 ql_delay(ha, 10000);
10958 10957 }
10959 10958 rval = ql_program_flash_address(ha, cnt, *tmp++);
10960 10959 if (rval != QL_SUCCESS) {
10961 10960 break;
10962 10961 }
10963 10962 }
10964 10963
10965 10964 kmem_free(bfp, ssize);
10966 10965 }
10967 10966
10968 10967 if (rval != QL_SUCCESS) {
10969 10968 EL(ha, "failed=%xh\n", rval);
10970 10969 } else {
10971 10970 /*EMPTY*/
10972 10971 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10973 10972 }
10974 10973 return (rval);
10975 10974 }
10976 10975
10977 10976 /*
10978 10977 * ql_poll_flash
10979 10978 * Polls flash for completion.
10980 10979 *
10981 10980 * Input:
10982 10981 * ha = adapter state pointer.
10983 10982 * addr = flash byte address.
10984 10983 * data = data to be polled.
10985 10984 *
10986 10985 * Returns:
10987 10986 * ql local function return status code.
10988 10987 *
10989 10988 * Context:
10990 10989 * Kernel context.
10991 10990 */
10992 10991 int
10993 10992 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10994 10993 {
10995 10994 uint8_t flash_data;
10996 10995 uint32_t cnt;
10997 10996 int rval = QL_FUNCTION_FAILED;
10998 10997
10999 10998 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11000 10999
11001 11000 poll_data = (uint8_t)(poll_data & BIT_7);
11002 11001
11003 11002 /* Wait for 30 seconds for command to finish. */
11004 11003 for (cnt = 30000000; cnt; cnt--) {
11005 11004 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11006 11005
11007 11006 if ((flash_data & BIT_7) == poll_data) {
11008 11007 rval = QL_SUCCESS;
11009 11008 break;
11010 11009 }
11011 11010 if (flash_data & BIT_5 && cnt > 2) {
11012 11011 cnt = 2;
11013 11012 }
11014 11013 drv_usecwait(1);
11015 11014 }
11016 11015
11017 11016 if (rval != QL_SUCCESS) {
11018 11017 EL(ha, "failed=%xh\n", rval);
11019 11018 } else {
11020 11019 /*EMPTY*/
11021 11020 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11022 11021 }
11023 11022 return (rval);
11024 11023 }
11025 11024
11026 11025 /*
11027 11026 * ql_flash_enable
11028 11027 * Setup flash for reading/writing.
11029 11028 *
11030 11029 * Input:
11031 11030 * ha = adapter state pointer.
11032 11031 *
11033 11032 * Context:
11034 11033 * Kernel context.
11035 11034 */
11036 11035 void
11037 11036 ql_flash_enable(ql_adapter_state_t *ha)
11038 11037 {
11039 11038 uint16_t data;
11040 11039
11041 11040 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11042 11041
11043 11042 /* Enable Flash Read/Write. */
11044 11043 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11045 11044 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11046 11045 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11047 11046 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11048 11047 ddi_put16(ha->sbus_fpga_dev_handle,
11049 11048 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11050 11049 /* Read reset command sequence */
11051 11050 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11052 11051 ql_write_flash_byte(ha, 0x555, 0x55);
11053 11052 ql_write_flash_byte(ha, 0xaaa, 0x20);
11054 11053 ql_write_flash_byte(ha, 0x555, 0xf0);
11055 11054 } else {
11056 11055 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11057 11056 ISP_FLASH_ENABLE);
11058 11057 WRT16_IO_REG(ha, ctrl_status, data);
11059 11058
11060 11059 /* Read/Reset Command Sequence */
11061 11060 ql_write_flash_byte(ha, 0x5555, 0xaa);
11062 11061 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11063 11062 ql_write_flash_byte(ha, 0x5555, 0xf0);
11064 11063 }
11065 11064 (void) ql_read_flash_byte(ha, 0);
11066 11065
11067 11066 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11068 11067 }
11069 11068
11070 11069 /*
11071 11070 * ql_flash_disable
11072 11071 * Disable flash and allow RISC to run.
11073 11072 *
11074 11073 * Input:
11075 11074 * ha = adapter state pointer.
11076 11075 *
11077 11076 * Context:
11078 11077 * Kernel context.
11079 11078 */
11080 11079 void
11081 11080 ql_flash_disable(ql_adapter_state_t *ha)
11082 11081 {
11083 11082 uint16_t data;
11084 11083
11085 11084 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11086 11085
11087 11086 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11088 11087 /*
11089 11088 * Lock the flash back up.
11090 11089 */
11091 11090 ql_write_flash_byte(ha, 0x555, 0x90);
11092 11091 ql_write_flash_byte(ha, 0x555, 0x0);
11093 11092
11094 11093 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11095 11094 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11096 11095 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11097 11096 ddi_put16(ha->sbus_fpga_dev_handle,
11098 11097 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11099 11098 } else {
11100 11099 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11101 11100 ~ISP_FLASH_ENABLE);
11102 11101 WRT16_IO_REG(ha, ctrl_status, data);
11103 11102 }
11104 11103
11105 11104 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11106 11105 }
11107 11106
11108 11107 /*
11109 11108 * ql_write_flash_byte
11110 11109 * Write byte to flash.
11111 11110 *
11112 11111 * Input:
11113 11112 * ha = adapter state pointer.
11114 11113 * addr = flash byte address.
11115 11114 * data = data to be written.
11116 11115 *
11117 11116 * Context:
11118 11117 * Kernel context.
11119 11118 */
11120 11119 void
11121 11120 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11122 11121 {
11123 11122 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11124 11123 ddi_put16(ha->sbus_fpga_dev_handle,
11125 11124 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11126 11125 LSW(addr));
11127 11126 ddi_put16(ha->sbus_fpga_dev_handle,
11128 11127 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11129 11128 MSW(addr));
11130 11129 ddi_put16(ha->sbus_fpga_dev_handle,
11131 11130 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11132 11131 (uint16_t)data);
11133 11132 } else {
11134 11133 uint16_t bank_select;
11135 11134
11136 11135 /* Setup bit 16 of flash address. */
11137 11136 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11138 11137
11139 11138 if (CFG_IST(ha, CFG_CTRL_6322)) {
11140 11139 bank_select = (uint16_t)(bank_select & ~0xf0);
11141 11140 bank_select = (uint16_t)(bank_select |
11142 11141 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11143 11142 WRT16_IO_REG(ha, ctrl_status, bank_select);
11144 11143 } else {
11145 11144 if (addr & BIT_16 && !(bank_select &
11146 11145 ISP_FLASH_64K_BANK)) {
11147 11146 bank_select = (uint16_t)(bank_select |
11148 11147 ISP_FLASH_64K_BANK);
11149 11148 WRT16_IO_REG(ha, ctrl_status, bank_select);
11150 11149 } else if (!(addr & BIT_16) && bank_select &
11151 11150 ISP_FLASH_64K_BANK) {
11152 11151 bank_select = (uint16_t)(bank_select &
11153 11152 ~ISP_FLASH_64K_BANK);
11154 11153 WRT16_IO_REG(ha, ctrl_status, bank_select);
11155 11154 }
11156 11155 }
11157 11156
11158 11157 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11159 11158 WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11160 11159 WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11161 11160 } else {
11162 11161 WRT16_IOMAP_REG(ha, flash_address, addr);
11163 11162 WRT16_IOMAP_REG(ha, flash_data, data);
11164 11163 }
11165 11164 }
11166 11165 }
11167 11166
11168 11167 /*
11169 11168 * ql_read_flash_byte
11170 11169 * Reads byte from flash, but must read a word from chip.
11171 11170 *
11172 11171 * Input:
11173 11172 * ha = adapter state pointer.
11174 11173 * addr = flash byte address.
11175 11174 *
11176 11175 * Returns:
11177 11176 * byte from flash.
11178 11177 *
11179 11178 * Context:
11180 11179 * Kernel context.
11181 11180 */
11182 11181 uint8_t
11183 11182 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11184 11183 {
11185 11184 uint8_t data;
11186 11185
11187 11186 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11188 11187 ddi_put16(ha->sbus_fpga_dev_handle,
11189 11188 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11190 11189 LSW(addr));
11191 11190 ddi_put16(ha->sbus_fpga_dev_handle,
11192 11191 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11193 11192 MSW(addr));
11194 11193 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11195 11194 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11196 11195 } else {
11197 11196 uint16_t bank_select;
11198 11197
11199 11198 /* Setup bit 16 of flash address. */
11200 11199 bank_select = RD16_IO_REG(ha, ctrl_status);
11201 11200 if (CFG_IST(ha, CFG_CTRL_6322)) {
11202 11201 bank_select = (uint16_t)(bank_select & ~0xf0);
11203 11202 bank_select = (uint16_t)(bank_select |
11204 11203 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11205 11204 WRT16_IO_REG(ha, ctrl_status, bank_select);
11206 11205 } else {
11207 11206 if (addr & BIT_16 &&
11208 11207 !(bank_select & ISP_FLASH_64K_BANK)) {
11209 11208 bank_select = (uint16_t)(bank_select |
11210 11209 ISP_FLASH_64K_BANK);
11211 11210 WRT16_IO_REG(ha, ctrl_status, bank_select);
11212 11211 } else if (!(addr & BIT_16) &&
11213 11212 bank_select & ISP_FLASH_64K_BANK) {
11214 11213 bank_select = (uint16_t)(bank_select &
11215 11214 ~ISP_FLASH_64K_BANK);
11216 11215 WRT16_IO_REG(ha, ctrl_status, bank_select);
11217 11216 }
11218 11217 }
11219 11218
11220 11219 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11221 11220 WRT16_IO_REG(ha, flash_address, addr);
11222 11221 data = (uint8_t)RD16_IO_REG(ha, flash_data);
11223 11222 } else {
11224 11223 WRT16_IOMAP_REG(ha, flash_address, addr);
11225 11224 data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11226 11225 }
11227 11226 }
11228 11227
11229 11228 return (data);
11230 11229 }
11231 11230
11232 11231 /*
11233 11232 * ql_24xx_flash_id
11234 11233 * Get flash IDs.
11235 11234 *
11236 11235 * Input:
11237 11236 * ha: adapter state pointer.
11238 11237 *
11239 11238 * Returns:
11240 11239 * ql local function return status code.
11241 11240 *
11242 11241 * Context:
11243 11242 * Kernel context.
11244 11243 */
11245 11244 int
11246 11245 ql_24xx_flash_id(ql_adapter_state_t *vha)
11247 11246 {
11248 11247 int rval;
11249 11248 uint32_t fdata = 0;
11250 11249 ql_adapter_state_t *ha = vha->pha;
11251 11250 ql_xioctl_t *xp = ha->xioctl;
11252 11251
11253 11252 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11254 11253
11255 11254 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11256 11255
11257 11256 if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11258 11257 fdata = 0;
11259 11258 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11260 11259 (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11261 11260 }
11262 11261
11263 11262 if (rval != QL_SUCCESS) {
11264 11263 EL(ha, "24xx read_flash failed=%xh\n", rval);
11265 11264 } else if (fdata != 0) {
11266 11265 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11267 11266 xp->fdesc.flash_id = MSB(LSW(fdata));
11268 11267 xp->fdesc.flash_len = LSB(MSW(fdata));
11269 11268 } else {
11270 11269 xp->fdesc.flash_manuf = ATMEL_FLASH;
11271 11270 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11272 11271 xp->fdesc.flash_len = 0;
11273 11272 }
11274 11273
11275 11274 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11276 11275
11277 11276 return (rval);
11278 11277 }
11279 11278
11280 11279 /*
11281 11280 * ql_24xx_load_flash
11282 11281 * Loads flash.
11283 11282 *
11284 11283 * Input:
11285 11284 * ha = adapter state pointer.
11286 11285 * dp = data pointer.
11287 11286 * size = data length in bytes.
11288 11287 * faddr = 32bit word flash byte address.
11289 11288 *
11290 11289 * Returns:
11291 11290 * ql local function return status code.
11292 11291 *
11293 11292 * Context:
11294 11293 * Kernel context.
11295 11294 */
11296 11295 int
11297 11296 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11298 11297 uint32_t faddr)
11299 11298 {
11300 11299 int rval;
11301 11300 uint32_t cnt, rest_addr, fdata, wc;
11302 11301 dma_mem_t dmabuf = {0};
11303 11302 ql_adapter_state_t *ha = vha->pha;
11304 11303 ql_xioctl_t *xp = ha->xioctl;
11305 11304
11306 11305 QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11307 11306 ha->instance, faddr, size);
11308 11307
11309 11308 /* start address must be 32 bit word aligned */
11310 11309 if ((faddr & 0x3) != 0) {
11311 11310 EL(ha, "incorrect buffer size alignment\n");
11312 11311 return (QL_FUNCTION_PARAMETER_ERROR);
11313 11312 }
11314 11313
11315 11314 /* Allocate DMA buffer */
11316 11315 if (CFG_IST(ha, CFG_CTRL_2581)) {
11317 11316 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11318 11317 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11319 11318 QL_SUCCESS) {
11320 11319 EL(ha, "dma alloc failed, rval=%xh\n", rval);
11321 11320 return (rval);
11322 11321 }
11323 11322 }
11324 11323
11325 11324 GLOBAL_HW_LOCK();
11326 11325
11327 11326 /* Enable flash write */
11328 11327 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11329 11328 GLOBAL_HW_UNLOCK();
11330 11329 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11331 11330 ql_free_phys(ha, &dmabuf);
11332 11331 return (rval);
11333 11332 }
11334 11333
11335 11334 /* setup mask of address range within a sector */
11336 11335 rest_addr = (xp->fdesc.block_size - 1) >> 2;
11337 11336
11338 11337 faddr = faddr >> 2; /* flash gets 32 bit words */
11339 11338
11340 11339 /*
11341 11340 * Write data to flash.
11342 11341 */
11343 11342 cnt = 0;
11344 11343 size = (size + 3) >> 2; /* Round up & convert to dwords */
11345 11344
11346 11345 while (cnt < size) {
11347 11346 /* Beginning of a sector? */
11348 11347 if ((faddr & rest_addr) == 0) {
11349 11348 if (CFG_IST(ha, CFG_CTRL_8021)) {
11350 11349 fdata = ha->flash_data_addr | faddr;
11351 11350 rval = ql_8021_rom_erase(ha, fdata);
11352 11351 if (rval != QL_SUCCESS) {
11353 11352 EL(ha, "8021 erase sector status="
11354 11353 "%xh, start=%xh, end=%xh"
11355 11354 "\n", rval, fdata,
11356 11355 fdata + rest_addr);
11357 11356 break;
11358 11357 }
11359 11358 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11360 11359 fdata = ha->flash_data_addr | faddr;
11361 11360 rval = ql_flash_access(ha,
11362 11361 FAC_ERASE_SECTOR, fdata, fdata +
11363 11362 rest_addr, 0);
11364 11363 if (rval != QL_SUCCESS) {
11365 11364 EL(ha, "erase sector status="
11366 11365 "%xh, start=%xh, end=%xh"
11367 11366 "\n", rval, fdata,
11368 11367 fdata + rest_addr);
11369 11368 break;
11370 11369 }
11371 11370 } else {
11372 11371 fdata = (faddr & ~rest_addr) << 2;
11373 11372 fdata = (fdata & 0xff00) |
11374 11373 (fdata << 16 & 0xff0000) |
11375 11374 (fdata >> 16 & 0xff);
11376 11375
11377 11376 if (rest_addr == 0x1fff) {
11378 11377 /* 32kb sector block erase */
11379 11378 rval = ql_24xx_write_flash(ha,
11380 11379 FLASH_CONF_ADDR | 0x0352,
11381 11380 fdata);
11382 11381 } else {
11383 11382 /* 64kb sector block erase */
11384 11383 rval = ql_24xx_write_flash(ha,
11385 11384 FLASH_CONF_ADDR | 0x03d8,
11386 11385 fdata);
11387 11386 }
11388 11387 if (rval != QL_SUCCESS) {
11389 11388 EL(ha, "Unable to flash sector"
11390 11389 ": address=%xh\n", faddr);
11391 11390 break;
11392 11391 }
11393 11392 }
11394 11393 }
11395 11394
11396 11395 /* Write data */
11397 11396 if (CFG_IST(ha, CFG_CTRL_2581) &&
11398 11397 ((faddr & 0x3f) == 0)) {
11399 11398 /*
11400 11399 * Limit write up to sector boundary.
11401 11400 */
11402 11401 wc = ((~faddr & (rest_addr>>1)) + 1);
11403 11402
11404 11403 if (size - cnt < wc) {
11405 11404 wc = size - cnt;
11406 11405 }
11407 11406
11408 11407 ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11409 11408 (uint8_t *)dmabuf.bp, wc<<2,
11410 11409 DDI_DEV_AUTOINCR);
11411 11410
11412 11411 rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11413 11412 faddr, dmabuf.cookie.dmac_laddress, wc);
11414 11413 if (rval != QL_SUCCESS) {
11415 11414 EL(ha, "unable to dma to flash "
11416 11415 "address=%xh\n", faddr << 2);
11417 11416 break;
11418 11417 }
11419 11418
11420 11419 cnt += wc;
11421 11420 faddr += wc;
11422 11421 dp += wc << 2;
11423 11422 } else {
11424 11423 fdata = *dp++;
11425 11424 fdata |= *dp++ << 8;
11426 11425 fdata |= *dp++ << 16;
11427 11426 fdata |= *dp++ << 24;
11428 11427 rval = ql_24xx_write_flash(ha,
11429 11428 ha->flash_data_addr | faddr, fdata);
11430 11429 if (rval != QL_SUCCESS) {
11431 11430 EL(ha, "Unable to program flash "
11432 11431 "address=%xh data=%xh\n", faddr,
11433 11432 *dp);
11434 11433 break;
11435 11434 }
11436 11435 cnt++;
11437 11436 faddr++;
11438 11437
11439 11438 /* Allow other system activity. */
11440 11439 if (cnt % 0x1000 == 0) {
11441 11440 ql_delay(ha, 10000);
11442 11441 }
11443 11442 }
11444 11443 }
11445 11444
11446 11445 ql_24xx_protect_flash(ha);
11447 11446
11448 11447 ql_free_phys(ha, &dmabuf);
11449 11448
11450 11449 GLOBAL_HW_UNLOCK();
11451 11450
11452 11451 if (rval != QL_SUCCESS) {
11453 11452 EL(ha, "failed=%xh\n", rval);
11454 11453 } else {
11455 11454 /*EMPTY*/
11456 11455 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11457 11456 }
11458 11457 return (rval);
11459 11458 }
11460 11459
11461 11460 /*
11462 11461 * ql_24xx_read_flash
11463 11462 * Reads a 32bit word from ISP24xx NVRAM/FLASH.
11464 11463 *
11465 11464 * Input:
11466 11465 * ha: adapter state pointer.
11467 11466 * faddr: NVRAM/FLASH address.
11468 11467 * bp: data pointer.
11469 11468 *
11470 11469 * Returns:
11471 11470 * ql local function return status code.
11472 11471 *
11473 11472 * Context:
11474 11473 * Kernel context.
11475 11474 */
11476 11475 int
11477 11476 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11478 11477 {
11479 11478 uint32_t timer;
11480 11479 int rval = QL_SUCCESS;
11481 11480 ql_adapter_state_t *ha = vha->pha;
11482 11481
11483 11482 if (CFG_IST(ha, CFG_CTRL_8021)) {
11484 11483 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11485 11484 EL(ha, "8021 access error\n");
11486 11485 }
11487 11486 return (rval);
11488 11487 }
11489 11488
11490 11489 /* Clear access error flag */
11491 11490 WRT32_IO_REG(ha, ctrl_status,
11492 11491 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11493 11492
11494 11493 WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11495 11494
11496 11495 /* Wait for READ cycle to complete. */
11497 11496 for (timer = 300000; timer; timer--) {
11498 11497 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11499 11498 break;
11500 11499 }
11501 11500 drv_usecwait(10);
11502 11501 }
11503 11502
11504 11503 if (timer == 0) {
11505 11504 EL(ha, "failed, timeout\n");
11506 11505 rval = QL_FUNCTION_TIMEOUT;
11507 11506 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11508 11507 EL(ha, "failed, access error\n");
11509 11508 rval = QL_FUNCTION_FAILED;
11510 11509 }
11511 11510
11512 11511 *bp = RD32_IO_REG(ha, flash_data);
11513 11512
11514 11513 return (rval);
11515 11514 }
11516 11515
11517 11516 /*
11518 11517 * ql_24xx_write_flash
11519 11518 * Writes a 32bit word to ISP24xx NVRAM/FLASH.
11520 11519 *
11521 11520 * Input:
11522 11521 * ha: adapter state pointer.
11523 11522 * addr: NVRAM/FLASH address.
11524 11523 * value: data.
11525 11524 *
11526 11525 * Returns:
11527 11526 * ql local function return status code.
11528 11527 *
11529 11528 * Context:
11530 11529 * Kernel context.
11531 11530 */
11532 11531 int
11533 11532 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11534 11533 {
11535 11534 uint32_t timer, fdata;
11536 11535 int rval = QL_SUCCESS;
11537 11536 ql_adapter_state_t *ha = vha->pha;
11538 11537
11539 11538 if (CFG_IST(ha, CFG_CTRL_8021)) {
11540 11539 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11541 11540 EL(ha, "8021 access error\n");
11542 11541 }
11543 11542 return (rval);
11544 11543 }
11545 11544 /* Clear access error flag */
11546 11545 WRT32_IO_REG(ha, ctrl_status,
11547 11546 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11548 11547
11549 11548 WRT32_IO_REG(ha, flash_data, data);
11550 11549 RD32_IO_REG(ha, flash_data); /* PCI Posting. */
11551 11550 WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11552 11551
11553 11552 /* Wait for Write cycle to complete. */
11554 11553 for (timer = 3000000; timer; timer--) {
11555 11554 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11556 11555 /* Check flash write in progress. */
11557 11556 if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11558 11557 (void) ql_24xx_read_flash(ha,
11559 11558 FLASH_CONF_ADDR | 0x005, &fdata);
11560 11559 if (!(fdata & BIT_0)) {
11561 11560 break;
11562 11561 }
11563 11562 } else {
11564 11563 break;
11565 11564 }
11566 11565 }
11567 11566 drv_usecwait(10);
11568 11567 }
11569 11568 if (timer == 0) {
11570 11569 EL(ha, "failed, timeout\n");
11571 11570 rval = QL_FUNCTION_TIMEOUT;
11572 11571 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11573 11572 EL(ha, "access error\n");
11574 11573 rval = QL_FUNCTION_FAILED;
11575 11574 }
11576 11575
11577 11576 return (rval);
11578 11577 }
11579 11578 /*
11580 11579 * ql_24xx_unprotect_flash
11581 11580 * Enable writes
11582 11581 *
11583 11582 * Input:
11584 11583 * ha: adapter state pointer.
11585 11584 *
11586 11585 * Returns:
11587 11586 * ql local function return status code.
11588 11587 *
11589 11588 * Context:
11590 11589 * Kernel context.
11591 11590 */
11592 11591 int
11593 11592 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11594 11593 {
11595 11594 int rval;
11596 11595 uint32_t fdata;
11597 11596 ql_adapter_state_t *ha = vha->pha;
11598 11597 ql_xioctl_t *xp = ha->xioctl;
11599 11598
11600 11599 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11601 11600
11602 11601 if (CFG_IST(ha, CFG_CTRL_8021)) {
11603 11602 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11604 11603 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11605 11604 if (rval != QL_SUCCESS) {
11606 11605 EL(ha, "8021 access error\n");
11607 11606 }
11608 11607 return (rval);
11609 11608 }
11610 11609 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11611 11610 if (ha->task_daemon_flags & FIRMWARE_UP) {
11612 11611 if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11613 11612 0)) != QL_SUCCESS) {
11614 11613 EL(ha, "status=%xh\n", rval);
11615 11614 }
11616 11615 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11617 11616 ha->instance);
11618 11617 return (rval);
11619 11618 }
11620 11619 } else {
11621 11620 /* Enable flash write. */
11622 11621 WRT32_IO_REG(ha, ctrl_status,
11623 11622 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11624 11623 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11625 11624 }
11626 11625
11627 11626 /*
11628 11627 * Remove block write protection (SST and ST) and
11629 11628 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11630 11629 * Unprotect sectors.
11631 11630 */
11632 11631 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11633 11632 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11634 11633
11635 11634 if (xp->fdesc.unprotect_sector_cmd != 0) {
11636 11635 for (fdata = 0; fdata < 0x10; fdata++) {
11637 11636 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11638 11637 0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11639 11638 }
11640 11639
11641 11640 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11642 11641 xp->fdesc.unprotect_sector_cmd, 0x00400f);
11643 11642 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11644 11643 xp->fdesc.unprotect_sector_cmd, 0x00600f);
11645 11644 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11646 11645 xp->fdesc.unprotect_sector_cmd, 0x00800f);
11647 11646 }
11648 11647
11649 11648 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11650 11649
11651 11650 return (QL_SUCCESS);
11652 11651 }
11653 11652
11654 11653 /*
11655 11654 * ql_24xx_protect_flash
11656 11655 * Disable writes
11657 11656 *
11658 11657 * Input:
11659 11658 * ha: adapter state pointer.
11660 11659 *
11661 11660 * Context:
11662 11661 * Kernel context.
11663 11662 */
11664 11663 void
11665 11664 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11666 11665 {
11667 11666 int rval;
11668 11667 uint32_t fdata;
11669 11668 ql_adapter_state_t *ha = vha->pha;
11670 11669 ql_xioctl_t *xp = ha->xioctl;
11671 11670
11672 11671 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11673 11672
11674 11673 if (CFG_IST(ha, CFG_CTRL_8021)) {
11675 11674 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11676 11675 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11677 11676 if (rval != QL_SUCCESS) {
11678 11677 EL(ha, "8021 access error\n");
11679 11678 }
11680 11679 return;
11681 11680 }
11682 11681 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11683 11682 if (ha->task_daemon_flags & FIRMWARE_UP) {
11684 11683 if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11685 11684 0)) != QL_SUCCESS) {
11686 11685 EL(ha, "status=%xh\n", rval);
11687 11686 }
11688 11687 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11689 11688 ha->instance);
11690 11689 return;
11691 11690 }
11692 11691 } else {
11693 11692 /* Enable flash write. */
11694 11693 WRT32_IO_REG(ha, ctrl_status,
11695 11694 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11696 11695 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11697 11696 }
11698 11697
11699 11698 /*
11700 11699 * Protect sectors.
11701 11700 * Set block write protection (SST and ST) and
11702 11701 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11703 11702 */
11704 11703 if (xp->fdesc.protect_sector_cmd != 0) {
11705 11704 for (fdata = 0; fdata < 0x10; fdata++) {
11706 11705 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11707 11706 0x330 | xp->fdesc.protect_sector_cmd, fdata);
11708 11707 }
11709 11708 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11710 11709 xp->fdesc.protect_sector_cmd, 0x00400f);
11711 11710 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11712 11711 xp->fdesc.protect_sector_cmd, 0x00600f);
11713 11712 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11714 11713 xp->fdesc.protect_sector_cmd, 0x00800f);
11715 11714
11716 11715 /* TODO: ??? */
11717 11716 (void) ql_24xx_write_flash(ha,
11718 11717 FLASH_CONF_ADDR | 0x101, 0x80);
11719 11718 } else {
11720 11719 (void) ql_24xx_write_flash(ha,
11721 11720 FLASH_CONF_ADDR | 0x101, 0x9c);
11722 11721 }
11723 11722
11724 11723 /* Disable flash write. */
11725 11724 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11726 11725 WRT32_IO_REG(ha, ctrl_status,
11727 11726 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11728 11727 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11729 11728 }
11730 11729
11731 11730 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11732 11731 }
11733 11732
11734 11733 /*
11735 11734 * ql_dump_firmware
11736 11735 * Save RISC code state information.
11737 11736 *
11738 11737 * Input:
11739 11738 * ha = adapter state pointer.
11740 11739 *
11741 11740 * Returns:
11742 11741 * QL local function return status code.
11743 11742 *
11744 11743 * Context:
11745 11744 * Kernel context.
11746 11745 */
11747 11746 static int
11748 11747 ql_dump_firmware(ql_adapter_state_t *vha)
11749 11748 {
11750 11749 int rval;
11751 11750 clock_t timer = drv_usectohz(30000000);
11752 11751 ql_adapter_state_t *ha = vha->pha;
11753 11752
11754 11753 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11755 11754
11756 11755 QL_DUMP_LOCK(ha);
11757 11756
11758 11757 if (ha->ql_dump_state & QL_DUMPING ||
11759 11758 (ha->ql_dump_state & QL_DUMP_VALID &&
11760 11759 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11761 11760 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11762 11761 QL_DUMP_UNLOCK(ha);
11763 11762 return (QL_SUCCESS);
11764 11763 }
11765 11764
11766 11765 QL_DUMP_UNLOCK(ha);
11767 11766
11768 11767 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11769 11768
11770 11769 /*
11771 11770 * Wait for all outstanding commands to complete
11772 11771 */
11773 11772 (void) ql_wait_outstanding(ha);
11774 11773
11775 11774 /* Dump firmware. */
11776 11775 rval = ql_binary_fw_dump(ha, TRUE);
11777 11776
11778 11777 /* Do abort to force restart. */
11779 11778 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11780 11779 EL(ha, "restarting, isp_abort_needed\n");
11781 11780
11782 11781 /* Acquire task daemon lock. */
11783 11782 TASK_DAEMON_LOCK(ha);
11784 11783
11785 11784 /* Wait for suspension to end. */
11786 11785 while (ha->task_daemon_flags & QL_SUSPENDED) {
11787 11786 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11788 11787
11789 11788 /* 30 seconds from now */
11790 11789 if (cv_reltimedwait(&ha->cv_dr_suspended,
11791 11790 &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11792 11791 /*
11793 11792 * The timeout time 'timer' was
11794 11793 * reached without the condition
11795 11794 * being signaled.
11796 11795 */
11797 11796 break;
11798 11797 }
11799 11798 }
11800 11799
11801 11800 /* Release task daemon lock. */
11802 11801 TASK_DAEMON_UNLOCK(ha);
11803 11802
11804 11803 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11805 11804 /*EMPTY*/
11806 11805 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11807 11806 } else {
11808 11807 EL(ha, "failed, rval = %xh\n", rval);
11809 11808 }
11810 11809 return (rval);
11811 11810 }
11812 11811
11813 11812 /*
11814 11813 * ql_binary_fw_dump
11815 11814 * Dumps binary data from firmware.
11816 11815 *
11817 11816 * Input:
11818 11817 * ha = adapter state pointer.
11819 11818 * lock_needed = mailbox lock needed.
11820 11819 *
11821 11820 * Returns:
11822 11821 * ql local function return status code.
11823 11822 *
11824 11823 * Context:
11825 11824 * Interrupt or Kernel context, no mailbox commands allowed.
11826 11825 */
11827 11826 int
11828 11827 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11829 11828 {
11830 11829 clock_t timer;
11831 11830 mbx_cmd_t mc;
11832 11831 mbx_cmd_t *mcp = &mc;
11833 11832 int rval = QL_SUCCESS;
11834 11833 ql_adapter_state_t *ha = vha->pha;
11835 11834
11836 11835 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11837 11836
11838 11837 if (CFG_IST(ha, CFG_CTRL_8021)) {
11839 11838 EL(ha, "8021 not supported\n");
11840 11839 return (QL_NOT_SUPPORTED);
11841 11840 }
11842 11841
11843 11842 QL_DUMP_LOCK(ha);
11844 11843
11845 11844 if (ha->ql_dump_state & QL_DUMPING ||
11846 11845 (ha->ql_dump_state & QL_DUMP_VALID &&
11847 11846 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11848 11847 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11849 11848 QL_DUMP_UNLOCK(ha);
11850 11849 return (QL_DATA_EXISTS);
11851 11850 }
11852 11851
11853 11852 ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11854 11853 ha->ql_dump_state |= QL_DUMPING;
11855 11854
11856 11855 QL_DUMP_UNLOCK(ha);
11857 11856
11858 11857 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11859 11858
11860 11859 /* Insert Time Stamp */
11861 11860 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11862 11861 FTO_INSERT_TIME_STAMP);
11863 11862 if (rval != QL_SUCCESS) {
11864 11863 EL(ha, "f/w extended trace insert"
11865 11864 "time stamp failed: %xh\n", rval);
11866 11865 }
11867 11866 }
11868 11867
11869 11868 if (lock_needed == TRUE) {
11870 11869 /* Acquire mailbox register lock. */
11871 11870 MBX_REGISTER_LOCK(ha);
11872 11871 timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11873 11872
11874 11873 /* Check for mailbox available, if not wait for signal. */
11875 11874 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11876 11875 ha->mailbox_flags = (uint8_t)
11877 11876 (ha->mailbox_flags | MBX_WANT_FLG);
11878 11877
11879 11878 /* 30 seconds from now */
11880 11879 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11881 11880 timer, TR_CLOCK_TICK) == -1) {
11882 11881 /*
11883 11882 * The timeout time 'timer' was
11884 11883 * reached without the condition
11885 11884 * being signaled.
11886 11885 */
11887 11886
11888 11887 /* Release mailbox register lock. */
11889 11888 MBX_REGISTER_UNLOCK(ha);
11890 11889
11891 11890 EL(ha, "failed, rval = %xh\n",
11892 11891 QL_FUNCTION_TIMEOUT);
11893 11892 return (QL_FUNCTION_TIMEOUT);
11894 11893 }
11895 11894 }
11896 11895
11897 11896 /* Set busy flag. */
11898 11897 ha->mailbox_flags = (uint8_t)
11899 11898 (ha->mailbox_flags | MBX_BUSY_FLG);
11900 11899 mcp->timeout = 120;
11901 11900 ha->mcp = mcp;
11902 11901
11903 11902 /* Release mailbox register lock. */
11904 11903 MBX_REGISTER_UNLOCK(ha);
11905 11904 }
11906 11905
11907 11906 /* Free previous dump buffer. */
11908 11907 if (ha->ql_dump_ptr != NULL) {
11909 11908 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11910 11909 ha->ql_dump_ptr = NULL;
11911 11910 }
11912 11911
11913 11912 if (CFG_IST(ha, CFG_CTRL_2422)) {
11914 11913 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11915 11914 ha->fw_ext_memory_size);
11916 11915 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11917 11916 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11918 11917 ha->fw_ext_memory_size);
11919 11918 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11920 11919 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11921 11920 ha->fw_ext_memory_size);
11922 11921 } else {
11923 11922 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11924 11923 }
11925 11924
11926 11925 if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11927 11926 NULL) {
11928 11927 rval = QL_MEMORY_ALLOC_FAILED;
11929 11928 } else {
11930 11929 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11931 11930 rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11932 11931 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11933 11932 rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11934 11933 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11935 11934 rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11936 11935 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11937 11936 rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11938 11937 } else {
11939 11938 rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11940 11939 }
11941 11940 }
11942 11941
11943 11942 /* Reset ISP chip. */
11944 11943 ql_reset_chip(ha);
11945 11944
11946 11945 QL_DUMP_LOCK(ha);
11947 11946
11948 11947 if (rval != QL_SUCCESS) {
11949 11948 if (ha->ql_dump_ptr != NULL) {
11950 11949 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11951 11950 ha->ql_dump_ptr = NULL;
11952 11951 }
11953 11952 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11954 11953 QL_DUMP_UPLOADED);
11955 11954 EL(ha, "failed, rval = %xh\n", rval);
11956 11955 } else {
11957 11956 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11958 11957 ha->ql_dump_state |= QL_DUMP_VALID;
11959 11958 EL(ha, "done\n");
11960 11959 }
11961 11960
11962 11961 QL_DUMP_UNLOCK(ha);
11963 11962
11964 11963 return (rval);
11965 11964 }
11966 11965
11967 11966 /*
11968 11967 * ql_ascii_fw_dump
11969 11968 * Converts firmware binary dump to ascii.
11970 11969 *
11971 11970 * Input:
11972 11971 * ha = adapter state pointer.
11973 11972 * bptr = buffer pointer.
11974 11973 *
11975 11974 * Returns:
11976 11975 * Amount of data buffer used.
11977 11976 *
11978 11977 * Context:
11979 11978 * Kernel context.
11980 11979 */
11981 11980 size_t
11982 11981 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11983 11982 {
11984 11983 uint32_t cnt;
11985 11984 caddr_t bp;
11986 11985 int mbox_cnt;
11987 11986 ql_adapter_state_t *ha = vha->pha;
11988 11987 ql_fw_dump_t *fw = ha->ql_dump_ptr;
11989 11988
11990 11989 if (CFG_IST(ha, CFG_CTRL_2422)) {
11991 11990 return (ql_24xx_ascii_fw_dump(ha, bufp));
11992 11991 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11993 11992 return (ql_2581_ascii_fw_dump(ha, bufp));
11994 11993 }
11995 11994
11996 11995 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11997 11996
11998 11997 if (CFG_IST(ha, CFG_CTRL_2300)) {
11999 11998 (void) sprintf(bufp, "\nISP 2300IP ");
12000 11999 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
12001 12000 (void) sprintf(bufp, "\nISP 6322FLX ");
12002 12001 } else {
12003 12002 (void) sprintf(bufp, "\nISP 2200IP ");
12004 12003 }
12005 12004
12006 12005 bp = bufp + strlen(bufp);
12007 12006 (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12008 12007 ha->fw_major_version, ha->fw_minor_version,
12009 12008 ha->fw_subminor_version);
12010 12009
12011 12010 (void) strcat(bufp, "\nPBIU Registers:");
12012 12011 bp = bufp + strlen(bufp);
12013 12012 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12014 12013 if (cnt % 8 == 0) {
12015 12014 *bp++ = '\n';
12016 12015 }
12017 12016 (void) sprintf(bp, "%04x ", fw->pbiu_reg[cnt]);
12018 12017 bp = bp + 6;
12019 12018 }
12020 12019
12021 12020 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12022 12021 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12023 12022 "registers:");
12024 12023 bp = bufp + strlen(bufp);
12025 12024 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12026 12025 if (cnt % 8 == 0) {
12027 12026 *bp++ = '\n';
12028 12027 }
12029 12028 (void) sprintf(bp, "%04x ", fw->risc_host_reg[cnt]);
12030 12029 bp = bp + 6;
12031 12030 }
12032 12031 }
12033 12032
12034 12033 (void) strcat(bp, "\n\nMailbox Registers:");
12035 12034 bp = bufp + strlen(bufp);
12036 12035 mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12037 12036 for (cnt = 0; cnt < mbox_cnt; cnt++) {
12038 12037 if (cnt % 8 == 0) {
12039 12038 *bp++ = '\n';
12040 12039 }
12041 12040 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12042 12041 bp = bp + 6;
12043 12042 }
12044 12043
12045 12044 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12046 12045 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12047 12046 bp = bufp + strlen(bufp);
12048 12047 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12049 12048 if (cnt % 8 == 0) {
12050 12049 *bp++ = '\n';
12051 12050 }
12052 12051 (void) sprintf(bp, "%04x ", fw->resp_dma_reg[cnt]);
12053 12052 bp = bp + 6;
12054 12053 }
12055 12054 }
12056 12055
12057 12056 (void) strcat(bp, "\n\nDMA Registers:");
12058 12057 bp = bufp + strlen(bufp);
12059 12058 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12060 12059 if (cnt % 8 == 0) {
12061 12060 *bp++ = '\n';
12062 12061 }
12063 12062 (void) sprintf(bp, "%04x ", fw->dma_reg[cnt]);
12064 12063 bp = bp + 6;
12065 12064 }
12066 12065
12067 12066 (void) strcat(bp, "\n\nRISC Hardware Registers:");
12068 12067 bp = bufp + strlen(bufp);
12069 12068 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12070 12069 if (cnt % 8 == 0) {
12071 12070 *bp++ = '\n';
12072 12071 }
12073 12072 (void) sprintf(bp, "%04x ", fw->risc_hdw_reg[cnt]);
12074 12073 bp = bp + 6;
12075 12074 }
12076 12075
12077 12076 (void) strcat(bp, "\n\nRISC GP0 Registers:");
12078 12077 bp = bufp + strlen(bufp);
12079 12078 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12080 12079 if (cnt % 8 == 0) {
12081 12080 *bp++ = '\n';
12082 12081 }
12083 12082 (void) sprintf(bp, "%04x ", fw->risc_gp0_reg[cnt]);
12084 12083 bp = bp + 6;
12085 12084 }
12086 12085
12087 12086 (void) strcat(bp, "\n\nRISC GP1 Registers:");
12088 12087 bp = bufp + strlen(bufp);
12089 12088 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12090 12089 if (cnt % 8 == 0) {
12091 12090 *bp++ = '\n';
12092 12091 }
12093 12092 (void) sprintf(bp, "%04x ", fw->risc_gp1_reg[cnt]);
12094 12093 bp = bp + 6;
12095 12094 }
12096 12095
12097 12096 (void) strcat(bp, "\n\nRISC GP2 Registers:");
12098 12097 bp = bufp + strlen(bufp);
12099 12098 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12100 12099 if (cnt % 8 == 0) {
12101 12100 *bp++ = '\n';
12102 12101 }
12103 12102 (void) sprintf(bp, "%04x ", fw->risc_gp2_reg[cnt]);
12104 12103 bp = bp + 6;
12105 12104 }
12106 12105
12107 12106 (void) strcat(bp, "\n\nRISC GP3 Registers:");
12108 12107 bp = bufp + strlen(bufp);
12109 12108 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12110 12109 if (cnt % 8 == 0) {
12111 12110 *bp++ = '\n';
12112 12111 }
12113 12112 (void) sprintf(bp, "%04x ", fw->risc_gp3_reg[cnt]);
12114 12113 bp = bp + 6;
12115 12114 }
12116 12115
12117 12116 (void) strcat(bp, "\n\nRISC GP4 Registers:");
12118 12117 bp = bufp + strlen(bufp);
12119 12118 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12120 12119 if (cnt % 8 == 0) {
12121 12120 *bp++ = '\n';
12122 12121 }
12123 12122 (void) sprintf(bp, "%04x ", fw->risc_gp4_reg[cnt]);
12124 12123 bp = bp + 6;
12125 12124 }
12126 12125
12127 12126 (void) strcat(bp, "\n\nRISC GP5 Registers:");
12128 12127 bp = bufp + strlen(bufp);
12129 12128 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12130 12129 if (cnt % 8 == 0) {
12131 12130 *bp++ = '\n';
12132 12131 }
12133 12132 (void) sprintf(bp, "%04x ", fw->risc_gp5_reg[cnt]);
12134 12133 bp = bp + 6;
12135 12134 }
12136 12135
12137 12136 (void) strcat(bp, "\n\nRISC GP6 Registers:");
12138 12137 bp = bufp + strlen(bufp);
12139 12138 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12140 12139 if (cnt % 8 == 0) {
12141 12140 *bp++ = '\n';
12142 12141 }
12143 12142 (void) sprintf(bp, "%04x ", fw->risc_gp6_reg[cnt]);
12144 12143 bp = bp + 6;
12145 12144 }
12146 12145
12147 12146 (void) strcat(bp, "\n\nRISC GP7 Registers:");
12148 12147 bp = bufp + strlen(bufp);
12149 12148 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12150 12149 if (cnt % 8 == 0) {
12151 12150 *bp++ = '\n';
12152 12151 }
12153 12152 (void) sprintf(bp, "%04x ", fw->risc_gp7_reg[cnt]);
12154 12153 bp = bp + 6;
12155 12154 }
12156 12155
12157 12156 (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12158 12157 bp = bufp + strlen(bufp);
12159 12158 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12160 12159 if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12161 12160 CFG_CTRL_6322)) == 0))) {
12162 12161 break;
12163 12162 }
12164 12163 if (cnt % 8 == 0) {
12165 12164 *bp++ = '\n';
12166 12165 }
12167 12166 (void) sprintf(bp, "%04x ", fw->frame_buf_hdw_reg[cnt]);
12168 12167 bp = bp + 6;
12169 12168 }
12170 12169
12171 12170 (void) strcat(bp, "\n\nFPM B0 Registers:");
12172 12171 bp = bufp + strlen(bufp);
12173 12172 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12174 12173 if (cnt % 8 == 0) {
12175 12174 *bp++ = '\n';
12176 12175 }
12177 12176 (void) sprintf(bp, "%04x ", fw->fpm_b0_reg[cnt]);
12178 12177 bp = bp + 6;
12179 12178 }
12180 12179
12181 12180 (void) strcat(bp, "\n\nFPM B1 Registers:");
12182 12181 bp = bufp + strlen(bufp);
12183 12182 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12184 12183 if (cnt % 8 == 0) {
12185 12184 *bp++ = '\n';
12186 12185 }
12187 12186 (void) sprintf(bp, "%04x ", fw->fpm_b1_reg[cnt]);
12188 12187 bp = bp + 6;
12189 12188 }
12190 12189
12191 12190 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12192 12191 (void) strcat(bp, "\n\nCode RAM Dump:");
12193 12192 bp = bufp + strlen(bufp);
12194 12193 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12195 12194 if (cnt % 8 == 0) {
12196 12195 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12197 12196 bp = bp + 8;
12198 12197 }
12199 12198 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12200 12199 bp = bp + 6;
12201 12200 }
12202 12201
12203 12202 (void) strcat(bp, "\n\nStack RAM Dump:");
12204 12203 bp = bufp + strlen(bufp);
12205 12204 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12206 12205 if (cnt % 8 == 0) {
12207 12206 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12208 12207 bp = bp + 8;
12209 12208 }
12210 12209 (void) sprintf(bp, "%04x ", fw->stack_ram[cnt]);
12211 12210 bp = bp + 6;
12212 12211 }
12213 12212
12214 12213 (void) strcat(bp, "\n\nData RAM Dump:");
12215 12214 bp = bufp + strlen(bufp);
12216 12215 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12217 12216 if (cnt % 8 == 0) {
12218 12217 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12219 12218 bp = bp + 8;
12220 12219 }
12221 12220 (void) sprintf(bp, "%04x ", fw->data_ram[cnt]);
12222 12221 bp = bp + 6;
12223 12222 }
12224 12223 } else {
12225 12224 (void) strcat(bp, "\n\nRISC SRAM:");
12226 12225 bp = bufp + strlen(bufp);
12227 12226 for (cnt = 0; cnt < 0xf000; cnt++) {
12228 12227 if (cnt % 8 == 0) {
12229 12228 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12230 12229 bp = bp + 7;
12231 12230 }
12232 12231 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12233 12232 bp = bp + 6;
12234 12233 }
12235 12234 }
12236 12235
12237 12236 (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12238 12237 bp += strlen(bp);
12239 12238
12240 12239 (void) sprintf(bp, "\n\nRequest Queue");
12241 12240 bp += strlen(bp);
12242 12241 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12243 12242 if (cnt % 8 == 0) {
12244 12243 (void) sprintf(bp, "\n%08x: ", cnt);
12245 12244 bp += strlen(bp);
12246 12245 }
12247 12246 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12248 12247 bp += strlen(bp);
12249 12248 }
12250 12249
12251 12250 (void) sprintf(bp, "\n\nResponse Queue");
12252 12251 bp += strlen(bp);
12253 12252 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12254 12253 if (cnt % 8 == 0) {
12255 12254 (void) sprintf(bp, "\n%08x: ", cnt);
12256 12255 bp += strlen(bp);
12257 12256 }
12258 12257 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12259 12258 bp += strlen(bp);
12260 12259 }
12261 12260
12262 12261 (void) sprintf(bp, "\n");
12263 12262
12264 12263 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12265 12264
12266 12265 return (strlen(bufp));
12267 12266 }
12268 12267
12269 12268 /*
12270 12269 * ql_24xx_ascii_fw_dump
12271 12270 * Converts ISP24xx firmware binary dump to ascii.
12272 12271 *
12273 12272 * Input:
12274 12273 * ha = adapter state pointer.
12275 12274 * bptr = buffer pointer.
12276 12275 *
12277 12276 * Returns:
12278 12277 * Amount of data buffer used.
12279 12278 *
12280 12279 * Context:
12281 12280 * Kernel context.
12282 12281 */
12283 12282 static size_t
12284 12283 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12285 12284 {
12286 12285 uint32_t cnt;
12287 12286 caddr_t bp = bufp;
12288 12287 ql_24xx_fw_dump_t *fw = ha->ql_dump_ptr;
12289 12288
12290 12289 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12291 12290
12292 12291 (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12293 12292 ha->fw_major_version, ha->fw_minor_version,
12294 12293 ha->fw_subminor_version, ha->fw_attributes);
12295 12294 bp += strlen(bp);
12296 12295
12297 12296 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12298 12297
12299 12298 (void) strcat(bp, "\nHost Interface Registers");
12300 12299 bp += strlen(bp);
12301 12300 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12302 12301 if (cnt % 8 == 0) {
12303 12302 (void) sprintf(bp++, "\n");
12304 12303 }
12305 12304
12306 12305 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12307 12306 bp += 9;
12308 12307 }
12309 12308
12310 12309 (void) sprintf(bp, "\n\nMailbox Registers");
12311 12310 bp += strlen(bp);
12312 12311 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12313 12312 if (cnt % 16 == 0) {
12314 12313 (void) sprintf(bp++, "\n");
12315 12314 }
12316 12315
12317 12316 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12318 12317 bp += 5;
12319 12318 }
12320 12319
12321 12320 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12322 12321 bp += strlen(bp);
12323 12322 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12324 12323 if (cnt % 8 == 0) {
12325 12324 (void) sprintf(bp++, "\n");
12326 12325 }
12327 12326
12328 12327 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12329 12328 bp += 9;
12330 12329 }
12331 12330
12332 12331 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12333 12332 bp += strlen(bp);
12334 12333 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12335 12334 if (cnt % 8 == 0) {
12336 12335 (void) sprintf(bp++, "\n");
12337 12336 }
12338 12337
12339 12338 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12340 12339 bp += 9;
12341 12340 }
12342 12341
12343 12342 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12344 12343 bp += strlen(bp);
12345 12344 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12346 12345 if (cnt % 8 == 0) {
12347 12346 (void) sprintf(bp++, "\n");
12348 12347 }
12349 12348
12350 12349 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12351 12350 bp += 9;
12352 12351 }
12353 12352
12354 12353 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12355 12354 bp += strlen(bp);
12356 12355 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12357 12356 if (cnt % 8 == 0) {
12358 12357 (void) sprintf(bp++, "\n");
12359 12358 }
12360 12359
12361 12360 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12362 12361 bp += 9;
12363 12362 }
12364 12363
12365 12364 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12366 12365 bp += strlen(bp);
12367 12366 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12368 12367 if (cnt % 8 == 0) {
12369 12368 (void) sprintf(bp++, "\n");
12370 12369 }
12371 12370
12372 12371 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12373 12372 bp += 9;
12374 12373 }
12375 12374
12376 12375 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12377 12376 bp += strlen(bp);
12378 12377 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12379 12378 if (cnt % 8 == 0) {
12380 12379 (void) sprintf(bp++, "\n");
12381 12380 }
12382 12381
12383 12382 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12384 12383 bp += 9;
12385 12384 }
12386 12385
12387 12386 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12388 12387 bp += strlen(bp);
12389 12388 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12390 12389 if (cnt % 8 == 0) {
12391 12390 (void) sprintf(bp++, "\n");
12392 12391 }
12393 12392
12394 12393 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12395 12394 bp += 9;
12396 12395 }
12397 12396
12398 12397 (void) sprintf(bp, "\n\nCommand DMA Registers");
12399 12398 bp += strlen(bp);
12400 12399 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12401 12400 if (cnt % 8 == 0) {
12402 12401 (void) sprintf(bp++, "\n");
12403 12402 }
12404 12403
12405 12404 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12406 12405 bp += 9;
12407 12406 }
12408 12407
12409 12408 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12410 12409 bp += strlen(bp);
12411 12410 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12412 12411 if (cnt % 8 == 0) {
12413 12412 (void) sprintf(bp++, "\n");
12414 12413 }
12415 12414
12416 12415 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12417 12416 bp += 9;
12418 12417 }
12419 12418
12420 12419 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12421 12420 bp += strlen(bp);
12422 12421 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12423 12422 if (cnt % 8 == 0) {
12424 12423 (void) sprintf(bp++, "\n");
12425 12424 }
12426 12425
12427 12426 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12428 12427 bp += 9;
12429 12428 }
12430 12429
12431 12430 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12432 12431 bp += strlen(bp);
12433 12432 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12434 12433 if (cnt % 8 == 0) {
12435 12434 (void) sprintf(bp++, "\n");
12436 12435 }
12437 12436
12438 12437 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12439 12438 bp += 9;
12440 12439 }
12441 12440
12442 12441 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12443 12442 bp += strlen(bp);
12444 12443 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12445 12444 if (cnt % 8 == 0) {
12446 12445 (void) sprintf(bp++, "\n");
12447 12446 }
12448 12447
12449 12448 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12450 12449 bp += 9;
12451 12450 }
12452 12451
12453 12452 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12454 12453 bp += strlen(bp);
12455 12454 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12456 12455 if (cnt % 8 == 0) {
12457 12456 (void) sprintf(bp++, "\n");
12458 12457 }
12459 12458
12460 12459 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12461 12460 bp += 9;
12462 12461 }
12463 12462
12464 12463 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12465 12464 bp += strlen(bp);
12466 12465 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12467 12466 if (cnt % 8 == 0) {
12468 12467 (void) sprintf(bp++, "\n");
12469 12468 }
12470 12469
12471 12470 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12472 12471 bp += 9;
12473 12472 }
12474 12473
12475 12474 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12476 12475 bp += strlen(bp);
12477 12476 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12478 12477 if (cnt % 8 == 0) {
12479 12478 (void) sprintf(bp++, "\n");
12480 12479 }
12481 12480
12482 12481 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12483 12482 bp += 9;
12484 12483 }
12485 12484
12486 12485 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12487 12486 bp += strlen(bp);
12488 12487 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12489 12488 if (cnt % 8 == 0) {
12490 12489 (void) sprintf(bp++, "\n");
12491 12490 }
12492 12491
12493 12492 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12494 12493 bp += 9;
12495 12494 }
12496 12495
12497 12496 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12498 12497 bp += strlen(bp);
12499 12498 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12500 12499 if (cnt % 8 == 0) {
12501 12500 (void) sprintf(bp++, "\n");
12502 12501 }
12503 12502
12504 12503 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12505 12504 bp += 9;
12506 12505 }
12507 12506
12508 12507 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12509 12508 bp += strlen(bp);
12510 12509 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12511 12510 if (cnt % 8 == 0) {
12512 12511 (void) sprintf(bp++, "\n");
12513 12512 }
12514 12513
12515 12514 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12516 12515 bp += 9;
12517 12516 }
12518 12517
12519 12518 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12520 12519 bp += strlen(bp);
12521 12520 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12522 12521 if (cnt % 8 == 0) {
12523 12522 (void) sprintf(bp++, "\n");
12524 12523 }
12525 12524
12526 12525 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12527 12526 bp += 9;
12528 12527 }
12529 12528
12530 12529 (void) sprintf(bp, "\n\nRISC GP Registers");
12531 12530 bp += strlen(bp);
12532 12531 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12533 12532 if (cnt % 8 == 0) {
12534 12533 (void) sprintf(bp++, "\n");
12535 12534 }
12536 12535
12537 12536 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12538 12537 bp += 9;
12539 12538 }
12540 12539
12541 12540 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12542 12541 bp += strlen(bp);
12543 12542 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12544 12543 if (cnt % 8 == 0) {
12545 12544 (void) sprintf(bp++, "\n");
12546 12545 }
12547 12546
12548 12547 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12549 12548 bp += 9;
12550 12549 }
12551 12550
12552 12551 (void) sprintf(bp, "\n\nLMC Registers");
12553 12552 bp += strlen(bp);
12554 12553 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12555 12554 if (cnt % 8 == 0) {
12556 12555 (void) sprintf(bp++, "\n");
12557 12556 }
12558 12557
12559 12558 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12560 12559 bp += 9;
12561 12560 }
12562 12561
12563 12562 (void) sprintf(bp, "\n\nFPM Hardware Registers");
12564 12563 bp += strlen(bp);
12565 12564 for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12566 12565 if (cnt % 8 == 0) {
12567 12566 (void) sprintf(bp++, "\n");
12568 12567 }
12569 12568
12570 12569 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12571 12570 bp += 9;
12572 12571 }
12573 12572
12574 12573 (void) sprintf(bp, "\n\nFB Hardware Registers");
12575 12574 bp += strlen(bp);
12576 12575 for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12577 12576 if (cnt % 8 == 0) {
12578 12577 (void) sprintf(bp++, "\n");
12579 12578 }
12580 12579
12581 12580 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12582 12581 bp += 9;
12583 12582 }
12584 12583
12585 12584 (void) sprintf(bp, "\n\nCode RAM");
12586 12585 bp += strlen(bp);
12587 12586 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12588 12587 if (cnt % 8 == 0) {
12589 12588 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12590 12589 bp += 11;
12591 12590 }
12592 12591
12593 12592 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12594 12593 bp += 9;
12595 12594 }
12596 12595
12597 12596 (void) sprintf(bp, "\n\nExternal Memory");
12598 12597 bp += strlen(bp);
12599 12598 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12600 12599 if (cnt % 8 == 0) {
12601 12600 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12602 12601 bp += 11;
12603 12602 }
12604 12603 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12605 12604 bp += 9;
12606 12605 }
12607 12606
12608 12607 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12609 12608 bp += strlen(bp);
12610 12609
12611 12610 (void) sprintf(bp, "\n\nRequest Queue");
12612 12611 bp += strlen(bp);
12613 12612 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12614 12613 if (cnt % 8 == 0) {
12615 12614 (void) sprintf(bp, "\n%08x: ", cnt);
12616 12615 bp += strlen(bp);
12617 12616 }
12618 12617 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12619 12618 bp += strlen(bp);
12620 12619 }
12621 12620
12622 12621 (void) sprintf(bp, "\n\nResponse Queue");
12623 12622 bp += strlen(bp);
12624 12623 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12625 12624 if (cnt % 8 == 0) {
12626 12625 (void) sprintf(bp, "\n%08x: ", cnt);
12627 12626 bp += strlen(bp);
12628 12627 }
12629 12628 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12630 12629 bp += strlen(bp);
12631 12630 }
12632 12631
12633 12632 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12634 12633 (ha->fwexttracebuf.bp != NULL)) {
12635 12634 uint32_t cnt_b = 0;
12636 12635 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12637 12636
12638 12637 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12639 12638 bp += strlen(bp);
12640 12639 /* show data address as a byte address, data as long words */
12641 12640 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12642 12641 cnt_b = cnt * 4;
12643 12642 if (cnt_b % 32 == 0) {
12644 12643 (void) sprintf(bp, "\n%08x: ",
12645 12644 (int)(w64 + cnt_b));
12646 12645 bp += 11;
12647 12646 }
12648 12647 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12649 12648 bp += 9;
12650 12649 }
12651 12650 }
12652 12651
12653 12652 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12654 12653 (ha->fwfcetracebuf.bp != NULL)) {
12655 12654 uint32_t cnt_b = 0;
12656 12655 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12657 12656
12658 12657 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12659 12658 bp += strlen(bp);
12660 12659 /* show data address as a byte address, data as long words */
12661 12660 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12662 12661 cnt_b = cnt * 4;
12663 12662 if (cnt_b % 32 == 0) {
12664 12663 (void) sprintf(bp, "\n%08x: ",
12665 12664 (int)(w64 + cnt_b));
12666 12665 bp += 11;
12667 12666 }
12668 12667 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12669 12668 bp += 9;
12670 12669 }
12671 12670 }
12672 12671
12673 12672 (void) sprintf(bp, "\n\n");
12674 12673 bp += strlen(bp);
12675 12674
12676 12675 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12677 12676
12678 12677 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12679 12678
12680 12679 return (cnt);
12681 12680 }
12682 12681
12683 12682 /*
12684 12683 * ql_2581_ascii_fw_dump
12685 12684 * Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12686 12685 *
12687 12686 * Input:
12688 12687 * ha = adapter state pointer.
12689 12688 * bptr = buffer pointer.
12690 12689 *
12691 12690 * Returns:
12692 12691 * Amount of data buffer used.
12693 12692 *
12694 12693 * Context:
12695 12694 * Kernel context.
12696 12695 */
12697 12696 static size_t
12698 12697 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12699 12698 {
12700 12699 uint32_t cnt;
12701 12700 uint32_t cnt1;
12702 12701 caddr_t bp = bufp;
12703 12702 ql_25xx_fw_dump_t *fw = ha->ql_dump_ptr;
12704 12703
12705 12704 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12706 12705
12707 12706 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12708 12707 ha->fw_major_version, ha->fw_minor_version,
12709 12708 ha->fw_subminor_version, ha->fw_attributes);
12710 12709 bp += strlen(bp);
12711 12710
12712 12711 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12713 12712 bp += strlen(bp);
12714 12713
12715 12714 (void) sprintf(bp, "\nHostRisc Registers");
12716 12715 bp += strlen(bp);
12717 12716 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12718 12717 if (cnt % 8 == 0) {
12719 12718 (void) sprintf(bp++, "\n");
12720 12719 }
12721 12720 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12722 12721 bp += 9;
12723 12722 }
12724 12723
12725 12724 (void) sprintf(bp, "\n\nPCIe Registers");
12726 12725 bp += strlen(bp);
12727 12726 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12728 12727 if (cnt % 8 == 0) {
12729 12728 (void) sprintf(bp++, "\n");
12730 12729 }
12731 12730 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12732 12731 bp += 9;
12733 12732 }
12734 12733
12735 12734 (void) strcat(bp, "\n\nHost Interface Registers");
12736 12735 bp += strlen(bp);
12737 12736 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12738 12737 if (cnt % 8 == 0) {
12739 12738 (void) sprintf(bp++, "\n");
12740 12739 }
12741 12740 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12742 12741 bp += 9;
12743 12742 }
12744 12743
12745 12744 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12746 12745 bp += strlen(bp);
12747 12746 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12748 12747 if (cnt % 8 == 0) {
12749 12748 (void) sprintf(bp++, "\n");
12750 12749 }
12751 12750 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12752 12751 bp += 9;
12753 12752 }
12754 12753
12755 12754 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12756 12755 fw->risc_io);
12757 12756 bp += strlen(bp);
12758 12757
12759 12758 (void) sprintf(bp, "\n\nMailbox Registers");
12760 12759 bp += strlen(bp);
12761 12760 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12762 12761 if (cnt % 16 == 0) {
12763 12762 (void) sprintf(bp++, "\n");
12764 12763 }
12765 12764 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12766 12765 bp += 5;
12767 12766 }
12768 12767
12769 12768 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12770 12769 bp += strlen(bp);
12771 12770 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12772 12771 if (cnt % 8 == 0) {
12773 12772 (void) sprintf(bp++, "\n");
12774 12773 }
12775 12774 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12776 12775 bp += 9;
12777 12776 }
12778 12777
12779 12778 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12780 12779 bp += strlen(bp);
12781 12780 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12782 12781 if (cnt % 8 == 0) {
12783 12782 (void) sprintf(bp++, "\n");
12784 12783 }
12785 12784 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12786 12785 bp += 9;
12787 12786 }
12788 12787
12789 12788 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12790 12789 bp += strlen(bp);
12791 12790 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12792 12791 if (cnt % 8 == 0) {
12793 12792 (void) sprintf(bp++, "\n");
12794 12793 }
12795 12794 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12796 12795 bp += 9;
12797 12796 }
12798 12797
12799 12798 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12800 12799 bp += strlen(bp);
12801 12800 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12802 12801 if (cnt % 8 == 0) {
12803 12802 (void) sprintf(bp++, "\n");
12804 12803 }
12805 12804 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12806 12805 bp += 9;
12807 12806 }
12808 12807
12809 12808 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12810 12809 bp += strlen(bp);
12811 12810 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12812 12811 if (cnt % 8 == 0) {
12813 12812 (void) sprintf(bp++, "\n");
12814 12813 }
12815 12814 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12816 12815 bp += 9;
12817 12816 }
12818 12817
12819 12818 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12820 12819 bp += strlen(bp);
12821 12820 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12822 12821 if (cnt % 8 == 0) {
12823 12822 (void) sprintf(bp++, "\n");
12824 12823 }
12825 12824 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12826 12825 bp += 9;
12827 12826 }
12828 12827
12829 12828 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12830 12829 bp += strlen(bp);
12831 12830 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12832 12831 if (cnt % 8 == 0) {
12833 12832 (void) sprintf(bp++, "\n");
12834 12833 }
12835 12834 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12836 12835 bp += 9;
12837 12836 }
12838 12837
12839 12838 (void) sprintf(bp, "\n\nASEQ GP Registers");
12840 12839 bp += strlen(bp);
12841 12840 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12842 12841 if (cnt % 8 == 0) {
12843 12842 (void) sprintf(bp++, "\n");
12844 12843 }
12845 12844 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12846 12845 bp += 9;
12847 12846 }
12848 12847
12849 12848 (void) sprintf(bp, "\n\nASEQ-0 Registers");
12850 12849 bp += strlen(bp);
12851 12850 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12852 12851 if (cnt % 8 == 0) {
12853 12852 (void) sprintf(bp++, "\n");
12854 12853 }
12855 12854 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12856 12855 bp += 9;
12857 12856 }
12858 12857
12859 12858 (void) sprintf(bp, "\n\nASEQ-1 Registers");
12860 12859 bp += strlen(bp);
12861 12860 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12862 12861 if (cnt % 8 == 0) {
12863 12862 (void) sprintf(bp++, "\n");
12864 12863 }
12865 12864 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12866 12865 bp += 9;
12867 12866 }
12868 12867
12869 12868 (void) sprintf(bp, "\n\nASEQ-2 Registers");
12870 12869 bp += strlen(bp);
12871 12870 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12872 12871 if (cnt % 8 == 0) {
12873 12872 (void) sprintf(bp++, "\n");
12874 12873 }
12875 12874 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12876 12875 bp += 9;
12877 12876 }
12878 12877
12879 12878 (void) sprintf(bp, "\n\nCommand DMA Registers");
12880 12879 bp += strlen(bp);
12881 12880 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12882 12881 if (cnt % 8 == 0) {
12883 12882 (void) sprintf(bp++, "\n");
12884 12883 }
12885 12884 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12886 12885 bp += 9;
12887 12886 }
12888 12887
12889 12888 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12890 12889 bp += strlen(bp);
12891 12890 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12892 12891 if (cnt % 8 == 0) {
12893 12892 (void) sprintf(bp++, "\n");
12894 12893 }
12895 12894 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12896 12895 bp += 9;
12897 12896 }
12898 12897
12899 12898 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12900 12899 bp += strlen(bp);
12901 12900 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12902 12901 if (cnt % 8 == 0) {
12903 12902 (void) sprintf(bp++, "\n");
12904 12903 }
12905 12904 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12906 12905 bp += 9;
12907 12906 }
12908 12907
12909 12908 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12910 12909 bp += strlen(bp);
12911 12910 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12912 12911 if (cnt % 8 == 0) {
12913 12912 (void) sprintf(bp++, "\n");
12914 12913 }
12915 12914 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12916 12915 bp += 9;
12917 12916 }
12918 12917
12919 12918 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12920 12919 bp += strlen(bp);
12921 12920 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12922 12921 if (cnt % 8 == 0) {
12923 12922 (void) sprintf(bp++, "\n");
12924 12923 }
12925 12924 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12926 12925 bp += 9;
12927 12926 }
12928 12927
12929 12928 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12930 12929 bp += strlen(bp);
12931 12930 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12932 12931 if (cnt % 8 == 0) {
12933 12932 (void) sprintf(bp++, "\n");
12934 12933 }
12935 12934 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12936 12935 bp += 9;
12937 12936 }
12938 12937
12939 12938 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12940 12939 bp += strlen(bp);
12941 12940 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12942 12941 if (cnt % 8 == 0) {
12943 12942 (void) sprintf(bp++, "\n");
12944 12943 }
12945 12944 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12946 12945 bp += 9;
12947 12946 }
12948 12947
12949 12948 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12950 12949 bp += strlen(bp);
12951 12950 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12952 12951 if (cnt % 8 == 0) {
12953 12952 (void) sprintf(bp++, "\n");
12954 12953 }
12955 12954 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12956 12955 bp += 9;
12957 12956 }
12958 12957
12959 12958 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12960 12959 bp += strlen(bp);
12961 12960 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12962 12961 if (cnt % 8 == 0) {
12963 12962 (void) sprintf(bp++, "\n");
12964 12963 }
12965 12964 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12966 12965 bp += 9;
12967 12966 }
12968 12967
12969 12968 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12970 12969 bp += strlen(bp);
12971 12970 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12972 12971 if (cnt % 8 == 0) {
12973 12972 (void) sprintf(bp++, "\n");
12974 12973 }
12975 12974 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12976 12975 bp += 9;
12977 12976 }
12978 12977
12979 12978 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12980 12979 bp += strlen(bp);
12981 12980 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12982 12981 if (cnt % 8 == 0) {
12983 12982 (void) sprintf(bp++, "\n");
12984 12983 }
12985 12984 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12986 12985 bp += 9;
12987 12986 }
12988 12987
12989 12988 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12990 12989 bp += strlen(bp);
12991 12990 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12992 12991 if (cnt % 8 == 0) {
12993 12992 (void) sprintf(bp++, "\n");
12994 12993 }
12995 12994 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12996 12995 bp += 9;
12997 12996 }
12998 12997
12999 12998 (void) sprintf(bp, "\n\nRISC GP Registers");
13000 12999 bp += strlen(bp);
13001 13000 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13002 13001 if (cnt % 8 == 0) {
13003 13002 (void) sprintf(bp++, "\n");
13004 13003 }
13005 13004 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13006 13005 bp += 9;
13007 13006 }
13008 13007
13009 13008 (void) sprintf(bp, "\n\nLMC Registers");
13010 13009 bp += strlen(bp);
13011 13010 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13012 13011 if (cnt % 8 == 0) {
13013 13012 (void) sprintf(bp++, "\n");
13014 13013 }
13015 13014 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13016 13015 bp += 9;
13017 13016 }
13018 13017
13019 13018 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13020 13019 bp += strlen(bp);
13021 13020 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13022 13021 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13023 13022 (uint32_t)(sizeof (fw->fpm_hdw_reg));
13024 13023 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13025 13024 if (cnt % 8 == 0) {
13026 13025 (void) sprintf(bp++, "\n");
13027 13026 }
13028 13027 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13029 13028 bp += 9;
13030 13029 }
13031 13030
13032 13031 (void) sprintf(bp, "\n\nFB Hardware Registers");
13033 13032 bp += strlen(bp);
13034 13033 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13035 13034 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13036 13035 (uint32_t)(sizeof (fw->fb_hdw_reg));
13037 13036 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13038 13037 if (cnt % 8 == 0) {
13039 13038 (void) sprintf(bp++, "\n");
13040 13039 }
13041 13040 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13042 13041 bp += 9;
13043 13042 }
13044 13043
13045 13044 (void) sprintf(bp, "\n\nCode RAM");
13046 13045 bp += strlen(bp);
13047 13046 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13048 13047 if (cnt % 8 == 0) {
13049 13048 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13050 13049 bp += 11;
13051 13050 }
13052 13051 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13053 13052 bp += 9;
13054 13053 }
13055 13054
13056 13055 (void) sprintf(bp, "\n\nExternal Memory");
13057 13056 bp += strlen(bp);
13058 13057 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13059 13058 if (cnt % 8 == 0) {
13060 13059 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13061 13060 bp += 11;
13062 13061 }
13063 13062 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13064 13063 bp += 9;
13065 13064 }
13066 13065
13067 13066 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13068 13067 bp += strlen(bp);
13069 13068
13070 13069 (void) sprintf(bp, "\n\nRequest Queue");
13071 13070 bp += strlen(bp);
13072 13071 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13073 13072 if (cnt % 8 == 0) {
13074 13073 (void) sprintf(bp, "\n%08x: ", cnt);
13075 13074 bp += strlen(bp);
13076 13075 }
13077 13076 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13078 13077 bp += strlen(bp);
13079 13078 }
13080 13079
13081 13080 (void) sprintf(bp, "\n\nResponse Queue");
13082 13081 bp += strlen(bp);
13083 13082 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13084 13083 if (cnt % 8 == 0) {
13085 13084 (void) sprintf(bp, "\n%08x: ", cnt);
13086 13085 bp += strlen(bp);
13087 13086 }
13088 13087 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13089 13088 bp += strlen(bp);
13090 13089 }
13091 13090
13092 13091 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13093 13092 (ha->fwexttracebuf.bp != NULL)) {
13094 13093 uint32_t cnt_b = 0;
13095 13094 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13096 13095
13097 13096 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13098 13097 bp += strlen(bp);
13099 13098 /* show data address as a byte address, data as long words */
13100 13099 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13101 13100 cnt_b = cnt * 4;
13102 13101 if (cnt_b % 32 == 0) {
13103 13102 (void) sprintf(bp, "\n%08x: ",
13104 13103 (int)(w64 + cnt_b));
13105 13104 bp += 11;
13106 13105 }
13107 13106 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13108 13107 bp += 9;
13109 13108 }
13110 13109 }
13111 13110
13112 13111 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13113 13112 (ha->fwfcetracebuf.bp != NULL)) {
13114 13113 uint32_t cnt_b = 0;
13115 13114 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13116 13115
13117 13116 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13118 13117 bp += strlen(bp);
13119 13118 /* show data address as a byte address, data as long words */
13120 13119 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13121 13120 cnt_b = cnt * 4;
13122 13121 if (cnt_b % 32 == 0) {
13123 13122 (void) sprintf(bp, "\n%08x: ",
13124 13123 (int)(w64 + cnt_b));
13125 13124 bp += 11;
13126 13125 }
13127 13126 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13128 13127 bp += 9;
13129 13128 }
13130 13129 }
13131 13130
13132 13131 (void) sprintf(bp, "\n\n");
13133 13132 bp += strlen(bp);
13134 13133
13135 13134 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13136 13135
13137 13136 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13138 13137
13139 13138 return (cnt);
13140 13139 }
13141 13140
13142 13141 /*
13143 13142 * ql_2200_binary_fw_dump
13144 13143 *
13145 13144 * Input:
13146 13145 * ha: adapter state pointer.
13147 13146 * fw: firmware dump context pointer.
13148 13147 *
13149 13148 * Returns:
13150 13149 * ql local function return status code.
13151 13150 *
13152 13151 * Context:
13153 13152 * Interrupt or Kernel context, no mailbox commands allowed.
13154 13153 */
13155 13154 static int
13156 13155 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13157 13156 {
13158 13157 uint32_t cnt;
13159 13158 uint16_t risc_address;
13160 13159 clock_t timer;
13161 13160 mbx_cmd_t mc;
13162 13161 mbx_cmd_t *mcp = &mc;
13163 13162 int rval = QL_SUCCESS;
13164 13163
13165 13164 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13166 13165
13167 13166 /* Disable ISP interrupts. */
13168 13167 WRT16_IO_REG(ha, ictrl, 0);
13169 13168 ADAPTER_STATE_LOCK(ha);
13170 13169 ha->flags &= ~INTERRUPTS_ENABLED;
13171 13170 ADAPTER_STATE_UNLOCK(ha);
13172 13171
13173 13172 /* Release mailbox registers. */
13174 13173 WRT16_IO_REG(ha, semaphore, 0);
13175 13174
13176 13175 /* Pause RISC. */
13177 13176 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13178 13177 timer = 30000;
13179 13178 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13180 13179 if (timer-- != 0) {
13181 13180 drv_usecwait(MILLISEC);
13182 13181 } else {
13183 13182 rval = QL_FUNCTION_TIMEOUT;
13184 13183 break;
13185 13184 }
13186 13185 }
13187 13186
13188 13187 if (rval == QL_SUCCESS) {
13189 13188 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13190 13189 sizeof (fw->pbiu_reg) / 2, 16);
13191 13190
13192 13191 /* In 2200 we only read 8 mailboxes */
13193 13192 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13194 13193 8, 16);
13195 13194
13196 13195 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13197 13196 sizeof (fw->dma_reg) / 2, 16);
13198 13197
13199 13198 WRT16_IO_REG(ha, ctrl_status, 0);
13200 13199 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13201 13200 sizeof (fw->risc_hdw_reg) / 2, 16);
13202 13201
13203 13202 WRT16_IO_REG(ha, pcr, 0x2000);
13204 13203 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13205 13204 sizeof (fw->risc_gp0_reg) / 2, 16);
13206 13205
13207 13206 WRT16_IO_REG(ha, pcr, 0x2100);
13208 13207 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13209 13208 sizeof (fw->risc_gp1_reg) / 2, 16);
13210 13209
13211 13210 WRT16_IO_REG(ha, pcr, 0x2200);
13212 13211 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13213 13212 sizeof (fw->risc_gp2_reg) / 2, 16);
13214 13213
13215 13214 WRT16_IO_REG(ha, pcr, 0x2300);
13216 13215 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13217 13216 sizeof (fw->risc_gp3_reg) / 2, 16);
13218 13217
13219 13218 WRT16_IO_REG(ha, pcr, 0x2400);
13220 13219 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13221 13220 sizeof (fw->risc_gp4_reg) / 2, 16);
13222 13221
13223 13222 WRT16_IO_REG(ha, pcr, 0x2500);
13224 13223 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13225 13224 sizeof (fw->risc_gp5_reg) / 2, 16);
13226 13225
13227 13226 WRT16_IO_REG(ha, pcr, 0x2600);
13228 13227 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13229 13228 sizeof (fw->risc_gp6_reg) / 2, 16);
13230 13229
13231 13230 WRT16_IO_REG(ha, pcr, 0x2700);
13232 13231 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13233 13232 sizeof (fw->risc_gp7_reg) / 2, 16);
13234 13233
13235 13234 WRT16_IO_REG(ha, ctrl_status, 0x10);
13236 13235 /* 2200 has only 16 registers */
13237 13236 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13238 13237 ha->iobase + 0x80, 16, 16);
13239 13238
13240 13239 WRT16_IO_REG(ha, ctrl_status, 0x20);
13241 13240 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13242 13241 sizeof (fw->fpm_b0_reg) / 2, 16);
13243 13242
13244 13243 WRT16_IO_REG(ha, ctrl_status, 0x30);
13245 13244 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13246 13245 sizeof (fw->fpm_b1_reg) / 2, 16);
13247 13246
13248 13247 /* Select FPM registers. */
13249 13248 WRT16_IO_REG(ha, ctrl_status, 0x20);
13250 13249
13251 13250 /* FPM Soft Reset. */
13252 13251 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13253 13252
13254 13253 /* Select frame buffer registers. */
13255 13254 WRT16_IO_REG(ha, ctrl_status, 0x10);
13256 13255
13257 13256 /* Reset frame buffer FIFOs. */
13258 13257 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13259 13258
13260 13259 /* Select RISC module registers. */
13261 13260 WRT16_IO_REG(ha, ctrl_status, 0);
13262 13261
13263 13262 /* Reset RISC module. */
13264 13263 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13265 13264
13266 13265 /* Reset ISP semaphore. */
13267 13266 WRT16_IO_REG(ha, semaphore, 0);
13268 13267
13269 13268 /* Release RISC module. */
13270 13269 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13271 13270
13272 13271 /* Wait for RISC to recover from reset. */
13273 13272 timer = 30000;
13274 13273 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13275 13274 if (timer-- != 0) {
13276 13275 drv_usecwait(MILLISEC);
13277 13276 } else {
13278 13277 rval = QL_FUNCTION_TIMEOUT;
13279 13278 break;
13280 13279 }
13281 13280 }
13282 13281
13283 13282 /* Disable RISC pause on FPM parity error. */
13284 13283 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13285 13284 }
13286 13285
13287 13286 if (rval == QL_SUCCESS) {
13288 13287 /* Pause RISC. */
13289 13288 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13290 13289 timer = 30000;
13291 13290 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13292 13291 if (timer-- != 0) {
13293 13292 drv_usecwait(MILLISEC);
13294 13293 } else {
13295 13294 rval = QL_FUNCTION_TIMEOUT;
13296 13295 break;
13297 13296 }
13298 13297 }
13299 13298 }
13300 13299
13301 13300 if (rval == QL_SUCCESS) {
13302 13301 /* Set memory configuration and timing. */
13303 13302 WRT16_IO_REG(ha, mctr, 0xf2);
13304 13303
13305 13304 /* Release RISC. */
13306 13305 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13307 13306
13308 13307 /* Get RISC SRAM. */
13309 13308 risc_address = 0x1000;
13310 13309 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13311 13310 for (cnt = 0; cnt < 0xf000; cnt++) {
13312 13311 WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13313 13312 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13314 13313 for (timer = 6000000; timer != 0; timer--) {
13315 13314 /* Check for pending interrupts. */
13316 13315 if (INTERRUPT_PENDING(ha)) {
13317 13316 if (RD16_IO_REG(ha, semaphore) &
13318 13317 BIT_0) {
13319 13318 WRT16_IO_REG(ha, hccr,
13320 13319 HC_CLR_RISC_INT);
13321 13320 mcp->mb[0] = RD16_IO_REG(ha,
13322 13321 mailbox_out[0]);
13323 13322 fw->risc_ram[cnt] =
13324 13323 RD16_IO_REG(ha,
13325 13324 mailbox_out[2]);
13326 13325 WRT16_IO_REG(ha,
13327 13326 semaphore, 0);
13328 13327 break;
13329 13328 }
13330 13329 WRT16_IO_REG(ha, hccr,
13331 13330 HC_CLR_RISC_INT);
13332 13331 }
13333 13332 drv_usecwait(5);
13334 13333 }
13335 13334
13336 13335 if (timer == 0) {
13337 13336 rval = QL_FUNCTION_TIMEOUT;
13338 13337 } else {
13339 13338 rval = mcp->mb[0];
13340 13339 }
13341 13340
13342 13341 if (rval != QL_SUCCESS) {
13343 13342 break;
13344 13343 }
13345 13344 }
13346 13345 }
13347 13346
13348 13347 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13349 13348
13350 13349 return (rval);
13351 13350 }
13352 13351
13353 13352 /*
13354 13353 * ql_2300_binary_fw_dump
13355 13354 *
13356 13355 * Input:
13357 13356 * ha: adapter state pointer.
13358 13357 * fw: firmware dump context pointer.
13359 13358 *
13360 13359 * Returns:
13361 13360 * ql local function return status code.
13362 13361 *
13363 13362 * Context:
13364 13363 * Interrupt or Kernel context, no mailbox commands allowed.
13365 13364 */
13366 13365 static int
13367 13366 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13368 13367 {
13369 13368 clock_t timer;
13370 13369 int rval = QL_SUCCESS;
13371 13370
13372 13371 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13373 13372
13374 13373 /* Disable ISP interrupts. */
13375 13374 WRT16_IO_REG(ha, ictrl, 0);
13376 13375 ADAPTER_STATE_LOCK(ha);
13377 13376 ha->flags &= ~INTERRUPTS_ENABLED;
13378 13377 ADAPTER_STATE_UNLOCK(ha);
13379 13378
13380 13379 /* Release mailbox registers. */
13381 13380 WRT16_IO_REG(ha, semaphore, 0);
13382 13381
13383 13382 /* Pause RISC. */
13384 13383 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13385 13384 timer = 30000;
13386 13385 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13387 13386 if (timer-- != 0) {
13388 13387 drv_usecwait(MILLISEC);
13389 13388 } else {
13390 13389 rval = QL_FUNCTION_TIMEOUT;
13391 13390 break;
13392 13391 }
13393 13392 }
13394 13393
13395 13394 if (rval == QL_SUCCESS) {
13396 13395 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13397 13396 sizeof (fw->pbiu_reg) / 2, 16);
13398 13397
13399 13398 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13400 13399 sizeof (fw->risc_host_reg) / 2, 16);
13401 13400
13402 13401 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13403 13402 sizeof (fw->mailbox_reg) / 2, 16);
13404 13403
13405 13404 WRT16_IO_REG(ha, ctrl_status, 0x40);
13406 13405 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13407 13406 sizeof (fw->resp_dma_reg) / 2, 16);
13408 13407
13409 13408 WRT16_IO_REG(ha, ctrl_status, 0x50);
13410 13409 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13411 13410 sizeof (fw->dma_reg) / 2, 16);
13412 13411
13413 13412 WRT16_IO_REG(ha, ctrl_status, 0);
13414 13413 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13415 13414 sizeof (fw->risc_hdw_reg) / 2, 16);
13416 13415
13417 13416 WRT16_IO_REG(ha, pcr, 0x2000);
13418 13417 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13419 13418 sizeof (fw->risc_gp0_reg) / 2, 16);
13420 13419
13421 13420 WRT16_IO_REG(ha, pcr, 0x2200);
13422 13421 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13423 13422 sizeof (fw->risc_gp1_reg) / 2, 16);
13424 13423
13425 13424 WRT16_IO_REG(ha, pcr, 0x2400);
13426 13425 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13427 13426 sizeof (fw->risc_gp2_reg) / 2, 16);
13428 13427
13429 13428 WRT16_IO_REG(ha, pcr, 0x2600);
13430 13429 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13431 13430 sizeof (fw->risc_gp3_reg) / 2, 16);
13432 13431
13433 13432 WRT16_IO_REG(ha, pcr, 0x2800);
13434 13433 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13435 13434 sizeof (fw->risc_gp4_reg) / 2, 16);
13436 13435
13437 13436 WRT16_IO_REG(ha, pcr, 0x2A00);
13438 13437 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13439 13438 sizeof (fw->risc_gp5_reg) / 2, 16);
13440 13439
13441 13440 WRT16_IO_REG(ha, pcr, 0x2C00);
13442 13441 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13443 13442 sizeof (fw->risc_gp6_reg) / 2, 16);
13444 13443
13445 13444 WRT16_IO_REG(ha, pcr, 0x2E00);
13446 13445 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13447 13446 sizeof (fw->risc_gp7_reg) / 2, 16);
13448 13447
13449 13448 WRT16_IO_REG(ha, ctrl_status, 0x10);
13450 13449 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13451 13450 ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13452 13451
13453 13452 WRT16_IO_REG(ha, ctrl_status, 0x20);
13454 13453 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13455 13454 sizeof (fw->fpm_b0_reg) / 2, 16);
13456 13455
13457 13456 WRT16_IO_REG(ha, ctrl_status, 0x30);
13458 13457 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13459 13458 sizeof (fw->fpm_b1_reg) / 2, 16);
13460 13459
13461 13460 /* Select FPM registers. */
13462 13461 WRT16_IO_REG(ha, ctrl_status, 0x20);
13463 13462
13464 13463 /* FPM Soft Reset. */
13465 13464 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13466 13465
13467 13466 /* Select frame buffer registers. */
13468 13467 WRT16_IO_REG(ha, ctrl_status, 0x10);
13469 13468
13470 13469 /* Reset frame buffer FIFOs. */
13471 13470 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13472 13471
13473 13472 /* Select RISC module registers. */
13474 13473 WRT16_IO_REG(ha, ctrl_status, 0);
13475 13474
13476 13475 /* Reset RISC module. */
13477 13476 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13478 13477
13479 13478 /* Reset ISP semaphore. */
13480 13479 WRT16_IO_REG(ha, semaphore, 0);
13481 13480
13482 13481 /* Release RISC module. */
13483 13482 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13484 13483
13485 13484 /* Wait for RISC to recover from reset. */
13486 13485 timer = 30000;
13487 13486 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13488 13487 if (timer-- != 0) {
13489 13488 drv_usecwait(MILLISEC);
13490 13489 } else {
13491 13490 rval = QL_FUNCTION_TIMEOUT;
13492 13491 break;
13493 13492 }
13494 13493 }
13495 13494
13496 13495 /* Disable RISC pause on FPM parity error. */
13497 13496 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13498 13497 }
13499 13498
13500 13499 /* Get RISC SRAM. */
13501 13500 if (rval == QL_SUCCESS) {
13502 13501 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13503 13502 }
13504 13503 /* Get STACK SRAM. */
13505 13504 if (rval == QL_SUCCESS) {
13506 13505 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13507 13506 }
13508 13507 /* Get DATA SRAM. */
13509 13508 if (rval == QL_SUCCESS) {
13510 13509 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13511 13510 }
13512 13511
13513 13512 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13514 13513
13515 13514 return (rval);
13516 13515 }
13517 13516
13518 13517 /*
13519 13518 * ql_24xx_binary_fw_dump
13520 13519 *
13521 13520 * Input:
13522 13521 * ha: adapter state pointer.
13523 13522 * fw: firmware dump context pointer.
13524 13523 *
13525 13524 * Returns:
13526 13525 * ql local function return status code.
13527 13526 *
13528 13527 * Context:
13529 13528 * Interrupt or Kernel context, no mailbox commands allowed.
13530 13529 */
13531 13530 static int
13532 13531 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13533 13532 {
13534 13533 uint32_t *reg32;
13535 13534 void *bp;
13536 13535 clock_t timer;
13537 13536 int rval = QL_SUCCESS;
13538 13537
13539 13538 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13540 13539
13541 13540 fw->hccr = RD32_IO_REG(ha, hccr);
13542 13541
13543 13542 /* Pause RISC. */
13544 13543 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13545 13544 /* Disable ISP interrupts. */
13546 13545 WRT16_IO_REG(ha, ictrl, 0);
13547 13546
13548 13547 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13549 13548 for (timer = 30000;
13550 13549 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13551 13550 rval == QL_SUCCESS; timer--) {
13552 13551 if (timer) {
13553 13552 drv_usecwait(100);
13554 13553 } else {
13555 13554 rval = QL_FUNCTION_TIMEOUT;
13556 13555 }
13557 13556 }
13558 13557 }
13559 13558
13560 13559 if (rval == QL_SUCCESS) {
13561 13560 /* Host interface registers. */
13562 13561 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13563 13562 sizeof (fw->host_reg) / 4, 32);
13564 13563
13565 13564 /* Disable ISP interrupts. */
13566 13565 WRT32_IO_REG(ha, ictrl, 0);
13567 13566 RD32_IO_REG(ha, ictrl);
13568 13567 ADAPTER_STATE_LOCK(ha);
13569 13568 ha->flags &= ~INTERRUPTS_ENABLED;
13570 13569 ADAPTER_STATE_UNLOCK(ha);
13571 13570
13572 13571 /* Shadow registers. */
13573 13572
13574 13573 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13575 13574 RD32_IO_REG(ha, io_base_addr);
13576 13575
13577 13576 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13578 13577 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13579 13578 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13580 13579 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13581 13580
13582 13581 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13583 13582 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13584 13583 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13585 13584 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13586 13585
13587 13586 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13588 13587 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13589 13588 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13590 13589 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13591 13590
13592 13591 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13593 13592 WRT_REG_DWORD(ha, reg32, 0xB0300000);
13594 13593 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13595 13594 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13596 13595
13597 13596 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13598 13597 WRT_REG_DWORD(ha, reg32, 0xB0400000);
13599 13598 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13600 13599 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13601 13600
13602 13601 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13603 13602 WRT_REG_DWORD(ha, reg32, 0xB0500000);
13604 13603 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13605 13604 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13606 13605
13607 13606 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13608 13607 WRT_REG_DWORD(ha, reg32, 0xB0600000);
13609 13608 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13610 13609 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13611 13610
13612 13611 /* Mailbox registers. */
13613 13612 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13614 13613 sizeof (fw->mailbox_reg) / 2, 16);
13615 13614
13616 13615 /* Transfer sequence registers. */
13617 13616
13618 13617 /* XSEQ GP */
13619 13618 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13620 13619 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13621 13620 16, 32);
13622 13621 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13623 13622 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13624 13623 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13625 13624 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13626 13625 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13627 13626 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13628 13627 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13629 13628 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13630 13629 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13631 13630 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13632 13631 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13633 13632 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13634 13633 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13635 13634 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13636 13635
13637 13636 /* XSEQ-0 */
13638 13637 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13639 13638 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13640 13639 sizeof (fw->xseq_0_reg) / 4, 32);
13641 13640
13642 13641 /* XSEQ-1 */
13643 13642 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13644 13643 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13645 13644 sizeof (fw->xseq_1_reg) / 4, 32);
13646 13645
13647 13646 /* Receive sequence registers. */
13648 13647
13649 13648 /* RSEQ GP */
13650 13649 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13651 13650 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13652 13651 16, 32);
13653 13652 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13654 13653 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13655 13654 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13656 13655 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13657 13656 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13658 13657 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13659 13658 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13660 13659 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13661 13660 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13662 13661 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13663 13662 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13664 13663 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13665 13664 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13666 13665 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13667 13666
13668 13667 /* RSEQ-0 */
13669 13668 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13670 13669 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13671 13670 sizeof (fw->rseq_0_reg) / 4, 32);
13672 13671
13673 13672 /* RSEQ-1 */
13674 13673 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13675 13674 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13676 13675 sizeof (fw->rseq_1_reg) / 4, 32);
13677 13676
13678 13677 /* RSEQ-2 */
13679 13678 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13680 13679 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13681 13680 sizeof (fw->rseq_2_reg) / 4, 32);
13682 13681
13683 13682 /* Command DMA registers. */
13684 13683
13685 13684 WRT32_IO_REG(ha, io_base_addr, 0x7100);
13686 13685 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13687 13686 sizeof (fw->cmd_dma_reg) / 4, 32);
13688 13687
13689 13688 /* Queues. */
13690 13689
13691 13690 /* RequestQ0 */
13692 13691 WRT32_IO_REG(ha, io_base_addr, 0x7200);
13693 13692 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13694 13693 8, 32);
13695 13694 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13696 13695
13697 13696 /* ResponseQ0 */
13698 13697 WRT32_IO_REG(ha, io_base_addr, 0x7300);
13699 13698 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13700 13699 8, 32);
13701 13700 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13702 13701
13703 13702 /* RequestQ1 */
13704 13703 WRT32_IO_REG(ha, io_base_addr, 0x7400);
13705 13704 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13706 13705 8, 32);
13707 13706 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13708 13707
13709 13708 /* Transmit DMA registers. */
13710 13709
13711 13710 /* XMT0 */
13712 13711 WRT32_IO_REG(ha, io_base_addr, 0x7600);
13713 13712 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13714 13713 16, 32);
13715 13714 WRT32_IO_REG(ha, io_base_addr, 0x7610);
13716 13715 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13717 13716
13718 13717 /* XMT1 */
13719 13718 WRT32_IO_REG(ha, io_base_addr, 0x7620);
13720 13719 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13721 13720 16, 32);
13722 13721 WRT32_IO_REG(ha, io_base_addr, 0x7630);
13723 13722 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13724 13723
13725 13724 /* XMT2 */
13726 13725 WRT32_IO_REG(ha, io_base_addr, 0x7640);
13727 13726 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13728 13727 16, 32);
13729 13728 WRT32_IO_REG(ha, io_base_addr, 0x7650);
13730 13729 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13731 13730
13732 13731 /* XMT3 */
13733 13732 WRT32_IO_REG(ha, io_base_addr, 0x7660);
13734 13733 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13735 13734 16, 32);
13736 13735 WRT32_IO_REG(ha, io_base_addr, 0x7670);
13737 13736 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13738 13737
13739 13738 /* XMT4 */
13740 13739 WRT32_IO_REG(ha, io_base_addr, 0x7680);
13741 13740 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13742 13741 16, 32);
13743 13742 WRT32_IO_REG(ha, io_base_addr, 0x7690);
13744 13743 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13745 13744
13746 13745 /* XMT Common */
13747 13746 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13748 13747 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13749 13748 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13750 13749
13751 13750 /* Receive DMA registers. */
13752 13751
13753 13752 /* RCVThread0 */
13754 13753 WRT32_IO_REG(ha, io_base_addr, 0x7700);
13755 13754 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13756 13755 ha->iobase + 0xC0, 16, 32);
13757 13756 WRT32_IO_REG(ha, io_base_addr, 0x7710);
13758 13757 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13759 13758
13760 13759 /* RCVThread1 */
13761 13760 WRT32_IO_REG(ha, io_base_addr, 0x7720);
13762 13761 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13763 13762 ha->iobase + 0xC0, 16, 32);
13764 13763 WRT32_IO_REG(ha, io_base_addr, 0x7730);
13765 13764 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13766 13765
13767 13766 /* RISC registers. */
13768 13767
13769 13768 /* RISC GP */
13770 13769 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13771 13770 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13772 13771 16, 32);
13773 13772 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13774 13773 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13775 13774 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13776 13775 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13777 13776 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13778 13777 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13779 13778 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13780 13779 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13781 13780 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13782 13781 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13783 13782 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13784 13783 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13785 13784 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13786 13785 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13787 13786
13788 13787 /* Local memory controller registers. */
13789 13788
13790 13789 /* LMC */
13791 13790 WRT32_IO_REG(ha, io_base_addr, 0x3000);
13792 13791 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13793 13792 16, 32);
13794 13793 WRT32_IO_REG(ha, io_base_addr, 0x3010);
13795 13794 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13796 13795 WRT32_IO_REG(ha, io_base_addr, 0x3020);
13797 13796 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13798 13797 WRT32_IO_REG(ha, io_base_addr, 0x3030);
13799 13798 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13800 13799 WRT32_IO_REG(ha, io_base_addr, 0x3040);
13801 13800 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13802 13801 WRT32_IO_REG(ha, io_base_addr, 0x3050);
13803 13802 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13804 13803 WRT32_IO_REG(ha, io_base_addr, 0x3060);
13805 13804 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13806 13805
13807 13806 /* Fibre Protocol Module registers. */
13808 13807
13809 13808 /* FPM hardware */
13810 13809 WRT32_IO_REG(ha, io_base_addr, 0x4000);
13811 13810 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13812 13811 16, 32);
13813 13812 WRT32_IO_REG(ha, io_base_addr, 0x4010);
13814 13813 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13815 13814 WRT32_IO_REG(ha, io_base_addr, 0x4020);
13816 13815 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13817 13816 WRT32_IO_REG(ha, io_base_addr, 0x4030);
13818 13817 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13819 13818 WRT32_IO_REG(ha, io_base_addr, 0x4040);
13820 13819 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13821 13820 WRT32_IO_REG(ha, io_base_addr, 0x4050);
13822 13821 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13823 13822 WRT32_IO_REG(ha, io_base_addr, 0x4060);
13824 13823 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13825 13824 WRT32_IO_REG(ha, io_base_addr, 0x4070);
13826 13825 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13827 13826 WRT32_IO_REG(ha, io_base_addr, 0x4080);
13828 13827 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13829 13828 WRT32_IO_REG(ha, io_base_addr, 0x4090);
13830 13829 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13831 13830 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13832 13831 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13833 13832 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13834 13833 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13835 13834
13836 13835 /* Frame Buffer registers. */
13837 13836
13838 13837 /* FB hardware */
13839 13838 WRT32_IO_REG(ha, io_base_addr, 0x6000);
13840 13839 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13841 13840 16, 32);
13842 13841 WRT32_IO_REG(ha, io_base_addr, 0x6010);
13843 13842 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13844 13843 WRT32_IO_REG(ha, io_base_addr, 0x6020);
13845 13844 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13846 13845 WRT32_IO_REG(ha, io_base_addr, 0x6030);
13847 13846 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13848 13847 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13849 13848 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13850 13849 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13851 13850 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13852 13851 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13853 13852 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13854 13853 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13855 13854 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13856 13855 WRT32_IO_REG(ha, io_base_addr, 0x6170);
13857 13856 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13858 13857 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13859 13858 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13860 13859 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13861 13860 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13862 13861 }
13863 13862
13864 13863 /* Get the request queue */
13865 13864 if (rval == QL_SUCCESS) {
13866 13865 uint32_t cnt;
13867 13866 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
13868 13867
13869 13868 /* Sync DMA buffer. */
13870 13869 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13871 13870 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13872 13871 DDI_DMA_SYNC_FORKERNEL);
13873 13872
13874 13873 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13875 13874 fw->req_q[cnt] = *w32++;
13876 13875 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13877 13876 }
13878 13877 }
13879 13878
13880 13879 /* Get the response queue */
13881 13880 if (rval == QL_SUCCESS) {
13882 13881 uint32_t cnt;
13883 13882 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
13884 13883
13885 13884 /* Sync DMA buffer. */
13886 13885 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13887 13886 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13888 13887 DDI_DMA_SYNC_FORKERNEL);
13889 13888
13890 13889 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13891 13890 fw->rsp_q[cnt] = *w32++;
13892 13891 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13893 13892 }
13894 13893 }
13895 13894
13896 13895 /* Reset RISC. */
13897 13896 ql_reset_chip(ha);
13898 13897
13899 13898 /* Memory. */
13900 13899 if (rval == QL_SUCCESS) {
13901 13900 /* Code RAM. */
13902 13901 rval = ql_read_risc_ram(ha, 0x20000,
13903 13902 sizeof (fw->code_ram) / 4, fw->code_ram);
13904 13903 }
13905 13904 if (rval == QL_SUCCESS) {
13906 13905 /* External Memory. */
13907 13906 rval = ql_read_risc_ram(ha, 0x100000,
13908 13907 ha->fw_ext_memory_size / 4, fw->ext_mem);
13909 13908 }
13910 13909
13911 13910 /* Get the extended trace buffer */
13912 13911 if (rval == QL_SUCCESS) {
13913 13912 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13914 13913 (ha->fwexttracebuf.bp != NULL)) {
13915 13914 uint32_t cnt;
13916 13915 uint32_t *w32 = ha->fwexttracebuf.bp;
13917 13916
13918 13917 /* Sync DMA buffer. */
13919 13918 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13920 13919 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13921 13920
13922 13921 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13923 13922 fw->ext_trace_buf[cnt] = *w32++;
13924 13923 }
13925 13924 }
13926 13925 }
13927 13926
13928 13927 /* Get the FC event trace buffer */
13929 13928 if (rval == QL_SUCCESS) {
13930 13929 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13931 13930 (ha->fwfcetracebuf.bp != NULL)) {
13932 13931 uint32_t cnt;
13933 13932 uint32_t *w32 = ha->fwfcetracebuf.bp;
13934 13933
13935 13934 /* Sync DMA buffer. */
13936 13935 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13937 13936 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13938 13937
13939 13938 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13940 13939 fw->fce_trace_buf[cnt] = *w32++;
13941 13940 }
13942 13941 }
13943 13942 }
13944 13943
13945 13944 if (rval != QL_SUCCESS) {
13946 13945 EL(ha, "failed=%xh\n", rval);
13947 13946 } else {
13948 13947 /*EMPTY*/
13949 13948 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13950 13949 }
13951 13950
13952 13951 return (rval);
13953 13952 }
13954 13953
13955 13954 /*
13956 13955 * ql_25xx_binary_fw_dump
13957 13956 *
13958 13957 * Input:
13959 13958 * ha: adapter state pointer.
13960 13959 * fw: firmware dump context pointer.
13961 13960 *
13962 13961 * Returns:
13963 13962 * ql local function return status code.
13964 13963 *
13965 13964 * Context:
13966 13965 * Interrupt or Kernel context, no mailbox commands allowed.
13967 13966 */
13968 13967 static int
13969 13968 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13970 13969 {
13971 13970 uint32_t *reg32;
13972 13971 void *bp;
13973 13972 clock_t timer;
13974 13973 int rval = QL_SUCCESS;
13975 13974
13976 13975 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13977 13976
13978 13977 fw->r2h_status = RD32_IO_REG(ha, risc2host);
13979 13978
13980 13979 /* Pause RISC. */
13981 13980 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13982 13981 /* Disable ISP interrupts. */
13983 13982 WRT16_IO_REG(ha, ictrl, 0);
13984 13983
13985 13984 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13986 13985 for (timer = 30000;
13987 13986 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13988 13987 rval == QL_SUCCESS; timer--) {
13989 13988 if (timer) {
13990 13989 drv_usecwait(100);
13991 13990 if (timer % 10000 == 0) {
13992 13991 EL(ha, "risc pause %d\n", timer);
13993 13992 }
13994 13993 } else {
13995 13994 EL(ha, "risc pause timeout\n");
13996 13995 rval = QL_FUNCTION_TIMEOUT;
13997 13996 }
13998 13997 }
13999 13998 }
14000 13999
14001 14000 if (rval == QL_SUCCESS) {
14002 14001
14003 14002 /* Host Interface registers */
14004 14003
14005 14004 /* HostRisc registers. */
14006 14005 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14007 14006 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14008 14007 16, 32);
14009 14008 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14010 14009 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14011 14010
14012 14011 /* PCIe registers. */
14013 14012 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14014 14013 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14015 14014 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14016 14015 3, 32);
14017 14016 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14018 14017 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14019 14018
14020 14019 /* Host interface registers. */
14021 14020 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14022 14021 sizeof (fw->host_reg) / 4, 32);
14023 14022
14024 14023 /* Disable ISP interrupts. */
14025 14024
14026 14025 WRT32_IO_REG(ha, ictrl, 0);
14027 14026 RD32_IO_REG(ha, ictrl);
14028 14027 ADAPTER_STATE_LOCK(ha);
14029 14028 ha->flags &= ~INTERRUPTS_ENABLED;
14030 14029 ADAPTER_STATE_UNLOCK(ha);
14031 14030
14032 14031 /* Shadow registers. */
14033 14032
14034 14033 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14035 14034 RD32_IO_REG(ha, io_base_addr);
14036 14035
14037 14036 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14038 14037 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14039 14038 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14040 14039 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14041 14040
14042 14041 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14043 14042 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14044 14043 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14045 14044 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14046 14045
14047 14046 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14048 14047 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14049 14048 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14050 14049 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14051 14050
14052 14051 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14053 14052 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14054 14053 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14055 14054 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14056 14055
14057 14056 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14058 14057 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14059 14058 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14060 14059 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14061 14060
14062 14061 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14063 14062 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14064 14063 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14065 14064 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14066 14065
14067 14066 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14068 14067 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14069 14068 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14070 14069 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14071 14070
14072 14071 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14073 14072 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14074 14073 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14075 14074 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14076 14075
14077 14076 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14078 14077 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14079 14078 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14080 14079 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14081 14080
14082 14081 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14083 14082 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14084 14083 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14085 14084 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14086 14085
14087 14086 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14088 14087 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14089 14088 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14090 14089 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14091 14090
14092 14091 /* RISC I/O register. */
14093 14092
14094 14093 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14095 14094 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14096 14095 1, 32);
14097 14096
14098 14097 /* Mailbox registers. */
14099 14098
14100 14099 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14101 14100 sizeof (fw->mailbox_reg) / 2, 16);
14102 14101
14103 14102 /* Transfer sequence registers. */
14104 14103
14105 14104 /* XSEQ GP */
14106 14105 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14107 14106 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14108 14107 16, 32);
14109 14108 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14110 14109 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14111 14110 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14112 14111 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14113 14112 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14114 14113 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14115 14114 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14116 14115 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117 14116 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14118 14117 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119 14118 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14120 14119 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14121 14120 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14122 14121 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14123 14122
14124 14123 /* XSEQ-0 */
14125 14124 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14126 14125 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14127 14126 16, 32);
14128 14127 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14129 14128 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14130 14129 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14131 14130 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14132 14131
14133 14132 /* XSEQ-1 */
14134 14133 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14135 14134 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14136 14135 16, 32);
14137 14136
14138 14137 /* Receive sequence registers. */
14139 14138
14140 14139 /* RSEQ GP */
14141 14140 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14142 14141 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14143 14142 16, 32);
14144 14143 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14145 14144 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14146 14145 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14147 14146 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14148 14147 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14149 14148 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14150 14149 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14151 14150 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14152 14151 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14153 14152 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14154 14153 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14155 14154 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14156 14155 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14157 14156 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14158 14157
14159 14158 /* RSEQ-0 */
14160 14159 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14161 14160 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14162 14161 16, 32);
14163 14162 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14164 14163 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14165 14164
14166 14165 /* RSEQ-1 */
14167 14166 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14168 14167 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14169 14168 sizeof (fw->rseq_1_reg) / 4, 32);
14170 14169
14171 14170 /* RSEQ-2 */
14172 14171 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14173 14172 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14174 14173 sizeof (fw->rseq_2_reg) / 4, 32);
14175 14174
14176 14175 /* Auxiliary sequencer registers. */
14177 14176
14178 14177 /* ASEQ GP */
14179 14178 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14180 14179 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14181 14180 16, 32);
14182 14181 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14183 14182 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14184 14183 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14185 14184 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14186 14185 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14187 14186 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14188 14187 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14189 14188 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14190 14189 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14191 14190 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14192 14191 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14193 14192 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14194 14193 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14195 14194 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14196 14195
14197 14196 /* ASEQ-0 */
14198 14197 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14199 14198 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14200 14199 16, 32);
14201 14200 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14202 14201 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14203 14202
14204 14203 /* ASEQ-1 */
14205 14204 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14206 14205 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14207 14206 16, 32);
14208 14207
14209 14208 /* ASEQ-2 */
14210 14209 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14211 14210 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14212 14211 16, 32);
14213 14212
14214 14213 /* Command DMA registers. */
14215 14214
14216 14215 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14217 14216 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14218 14217 sizeof (fw->cmd_dma_reg) / 4, 32);
14219 14218
14220 14219 /* Queues. */
14221 14220
14222 14221 /* RequestQ0 */
14223 14222 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14224 14223 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14225 14224 8, 32);
14226 14225 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14227 14226
14228 14227 /* ResponseQ0 */
14229 14228 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14230 14229 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14231 14230 8, 32);
14232 14231 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14233 14232
14234 14233 /* RequestQ1 */
14235 14234 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14236 14235 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14237 14236 8, 32);
14238 14237 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14239 14238
14240 14239 /* Transmit DMA registers. */
14241 14240
14242 14241 /* XMT0 */
14243 14242 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14244 14243 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14245 14244 16, 32);
14246 14245 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14247 14246 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14248 14247
14249 14248 /* XMT1 */
14250 14249 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14251 14250 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14252 14251 16, 32);
14253 14252 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14254 14253 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14255 14254
14256 14255 /* XMT2 */
14257 14256 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14258 14257 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14259 14258 16, 32);
14260 14259 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14261 14260 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14262 14261
14263 14262 /* XMT3 */
14264 14263 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14265 14264 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14266 14265 16, 32);
14267 14266 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14268 14267 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14269 14268
14270 14269 /* XMT4 */
14271 14270 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14272 14271 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14273 14272 16, 32);
14274 14273 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14275 14274 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14276 14275
14277 14276 /* XMT Common */
14278 14277 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14279 14278 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14280 14279 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14281 14280
14282 14281 /* Receive DMA registers. */
14283 14282
14284 14283 /* RCVThread0 */
14285 14284 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14286 14285 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14287 14286 ha->iobase + 0xC0, 16, 32);
14288 14287 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14289 14288 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14290 14289
14291 14290 /* RCVThread1 */
14292 14291 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14293 14292 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14294 14293 ha->iobase + 0xC0, 16, 32);
14295 14294 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14296 14295 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14297 14296
14298 14297 /* RISC registers. */
14299 14298
14300 14299 /* RISC GP */
14301 14300 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14302 14301 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14303 14302 16, 32);
14304 14303 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14305 14304 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14306 14305 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14307 14306 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14308 14307 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14309 14308 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14310 14309 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14311 14310 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14312 14311 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14313 14312 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14314 14313 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14315 14314 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14316 14315 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14317 14316 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14318 14317
14319 14318 /* Local memory controller (LMC) registers. */
14320 14319
14321 14320 /* LMC */
14322 14321 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14323 14322 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14324 14323 16, 32);
14325 14324 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14326 14325 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14327 14326 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14328 14327 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14329 14328 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14330 14329 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14331 14330 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14332 14331 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14333 14332 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14334 14333 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14335 14334 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14336 14335 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14337 14336 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14338 14337 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14339 14338
14340 14339 /* Fibre Protocol Module registers. */
14341 14340
14342 14341 /* FPM hardware */
14343 14342 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14344 14343 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14345 14344 16, 32);
14346 14345 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14347 14346 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14348 14347 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14349 14348 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14350 14349 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14351 14350 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14352 14351 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14353 14352 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14354 14353 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14355 14354 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14356 14355 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14357 14356 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14358 14357 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14359 14358 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14360 14359 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14361 14360 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14362 14361 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14363 14362 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14364 14363 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14365 14364 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14366 14365 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14367 14366 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14368 14367
14369 14368 /* Frame Buffer registers. */
14370 14369
14371 14370 /* FB hardware */
14372 14371 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14373 14372 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14374 14373 16, 32);
14375 14374 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14376 14375 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14377 14376 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14378 14377 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14379 14378 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14380 14379 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14381 14380 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14382 14381 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14383 14382 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14384 14383 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14385 14384 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14386 14385 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14387 14386 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14388 14387 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14389 14388 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14390 14389 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14391 14390 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14392 14391 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14393 14392 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14394 14393 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14395 14394 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14396 14395 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14397 14396 }
14398 14397
14399 14398 /* Get the request queue */
14400 14399 if (rval == QL_SUCCESS) {
14401 14400 uint32_t cnt;
14402 14401 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14403 14402
14404 14403 /* Sync DMA buffer. */
14405 14404 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14406 14405 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14407 14406 DDI_DMA_SYNC_FORKERNEL);
14408 14407
14409 14408 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14410 14409 fw->req_q[cnt] = *w32++;
14411 14410 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14412 14411 }
14413 14412 }
14414 14413
14415 14414 /* Get the respons queue */
14416 14415 if (rval == QL_SUCCESS) {
14417 14416 uint32_t cnt;
14418 14417 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14419 14418
14420 14419 /* Sync DMA buffer. */
14421 14420 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14422 14421 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14423 14422 DDI_DMA_SYNC_FORKERNEL);
14424 14423
14425 14424 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14426 14425 fw->rsp_q[cnt] = *w32++;
14427 14426 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14428 14427 }
14429 14428 }
14430 14429
14431 14430 /* Reset RISC. */
14432 14431
14433 14432 ql_reset_chip(ha);
14434 14433
14435 14434 /* Memory. */
14436 14435
14437 14436 if (rval == QL_SUCCESS) {
14438 14437 /* Code RAM. */
14439 14438 rval = ql_read_risc_ram(ha, 0x20000,
14440 14439 sizeof (fw->code_ram) / 4, fw->code_ram);
14441 14440 }
14442 14441 if (rval == QL_SUCCESS) {
14443 14442 /* External Memory. */
14444 14443 rval = ql_read_risc_ram(ha, 0x100000,
14445 14444 ha->fw_ext_memory_size / 4, fw->ext_mem);
14446 14445 }
14447 14446
14448 14447 /* Get the FC event trace buffer */
14449 14448 if (rval == QL_SUCCESS) {
14450 14449 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14451 14450 (ha->fwfcetracebuf.bp != NULL)) {
14452 14451 uint32_t cnt;
14453 14452 uint32_t *w32 = ha->fwfcetracebuf.bp;
14454 14453
14455 14454 /* Sync DMA buffer. */
14456 14455 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14457 14456 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14458 14457
14459 14458 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14460 14459 fw->fce_trace_buf[cnt] = *w32++;
14461 14460 }
14462 14461 }
14463 14462 }
14464 14463
14465 14464 /* Get the extended trace buffer */
14466 14465 if (rval == QL_SUCCESS) {
14467 14466 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14468 14467 (ha->fwexttracebuf.bp != NULL)) {
14469 14468 uint32_t cnt;
14470 14469 uint32_t *w32 = ha->fwexttracebuf.bp;
14471 14470
14472 14471 /* Sync DMA buffer. */
14473 14472 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14474 14473 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14475 14474
14476 14475 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14477 14476 fw->ext_trace_buf[cnt] = *w32++;
14478 14477 }
14479 14478 }
14480 14479 }
14481 14480
14482 14481 if (rval != QL_SUCCESS) {
14483 14482 EL(ha, "failed=%xh\n", rval);
14484 14483 } else {
14485 14484 /*EMPTY*/
14486 14485 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14487 14486 }
14488 14487
14489 14488 return (rval);
14490 14489 }
14491 14490
14492 14491 /*
14493 14492 * ql_81xx_binary_fw_dump
14494 14493 *
14495 14494 * Input:
14496 14495 * ha: adapter state pointer.
14497 14496 * fw: firmware dump context pointer.
14498 14497 *
14499 14498 * Returns:
14500 14499 * ql local function return status code.
14501 14500 *
14502 14501 * Context:
14503 14502 * Interrupt or Kernel context, no mailbox commands allowed.
14504 14503 */
14505 14504 static int
14506 14505 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14507 14506 {
14508 14507 uint32_t *reg32;
14509 14508 void *bp;
14510 14509 clock_t timer;
14511 14510 int rval = QL_SUCCESS;
14512 14511
14513 14512 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14514 14513
14515 14514 fw->r2h_status = RD32_IO_REG(ha, risc2host);
14516 14515
14517 14516 /* Pause RISC. */
14518 14517 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14519 14518 /* Disable ISP interrupts. */
14520 14519 WRT16_IO_REG(ha, ictrl, 0);
14521 14520
14522 14521 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14523 14522 for (timer = 30000;
14524 14523 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14525 14524 rval == QL_SUCCESS; timer--) {
14526 14525 if (timer) {
14527 14526 drv_usecwait(100);
14528 14527 if (timer % 10000 == 0) {
14529 14528 EL(ha, "risc pause %d\n", timer);
14530 14529 }
14531 14530 } else {
14532 14531 EL(ha, "risc pause timeout\n");
14533 14532 rval = QL_FUNCTION_TIMEOUT;
14534 14533 }
14535 14534 }
14536 14535 }
14537 14536
14538 14537 if (rval == QL_SUCCESS) {
14539 14538
14540 14539 /* Host Interface registers */
14541 14540
14542 14541 /* HostRisc registers. */
14543 14542 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14544 14543 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14545 14544 16, 32);
14546 14545 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14547 14546 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14548 14547
14549 14548 /* PCIe registers. */
14550 14549 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14551 14550 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14552 14551 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14553 14552 3, 32);
14554 14553 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14555 14554 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14556 14555
14557 14556 /* Host interface registers. */
14558 14557 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14559 14558 sizeof (fw->host_reg) / 4, 32);
14560 14559
14561 14560 /* Disable ISP interrupts. */
14562 14561
14563 14562 WRT32_IO_REG(ha, ictrl, 0);
14564 14563 RD32_IO_REG(ha, ictrl);
14565 14564 ADAPTER_STATE_LOCK(ha);
14566 14565 ha->flags &= ~INTERRUPTS_ENABLED;
14567 14566 ADAPTER_STATE_UNLOCK(ha);
14568 14567
14569 14568 /* Shadow registers. */
14570 14569
14571 14570 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14572 14571 RD32_IO_REG(ha, io_base_addr);
14573 14572
14574 14573 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14575 14574 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14576 14575 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14577 14576 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14578 14577
14579 14578 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14580 14579 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14581 14580 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14582 14581 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14583 14582
14584 14583 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14585 14584 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14586 14585 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14587 14586 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14588 14587
14589 14588 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14590 14589 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14591 14590 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14592 14591 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14593 14592
14594 14593 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14595 14594 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14596 14595 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14597 14596 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14598 14597
14599 14598 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14600 14599 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14601 14600 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14602 14601 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14603 14602
14604 14603 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14605 14604 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14606 14605 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14607 14606 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14608 14607
14609 14608 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14610 14609 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14611 14610 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14612 14611 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14613 14612
14614 14613 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14615 14614 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14616 14615 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14617 14616 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14618 14617
14619 14618 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14620 14619 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14621 14620 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14622 14621 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14623 14622
14624 14623 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14625 14624 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14626 14625 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14627 14626 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14628 14627
14629 14628 /* RISC I/O register. */
14630 14629
14631 14630 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14632 14631 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14633 14632 1, 32);
14634 14633
14635 14634 /* Mailbox registers. */
14636 14635
14637 14636 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14638 14637 sizeof (fw->mailbox_reg) / 2, 16);
14639 14638
14640 14639 /* Transfer sequence registers. */
14641 14640
14642 14641 /* XSEQ GP */
14643 14642 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14644 14643 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14645 14644 16, 32);
14646 14645 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14647 14646 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14648 14647 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14649 14648 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14650 14649 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14651 14650 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14652 14651 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14653 14652 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654 14653 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14655 14654 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14656 14655 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14657 14656 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14658 14657 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14659 14658 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14660 14659
14661 14660 /* XSEQ-0 */
14662 14661 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14663 14662 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14664 14663 16, 32);
14665 14664 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14666 14665 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14667 14666 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14668 14667 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14669 14668
14670 14669 /* XSEQ-1 */
14671 14670 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14672 14671 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14673 14672 16, 32);
14674 14673
14675 14674 /* Receive sequence registers. */
14676 14675
14677 14676 /* RSEQ GP */
14678 14677 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14679 14678 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14680 14679 16, 32);
14681 14680 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14682 14681 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14683 14682 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14684 14683 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14685 14684 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14686 14685 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14687 14686 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14688 14687 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14689 14688 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14690 14689 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14691 14690 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14692 14691 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14693 14692 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14694 14693 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14695 14694
14696 14695 /* RSEQ-0 */
14697 14696 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14698 14697 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14699 14698 16, 32);
14700 14699 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14701 14700 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14702 14701
14703 14702 /* RSEQ-1 */
14704 14703 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14705 14704 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14706 14705 sizeof (fw->rseq_1_reg) / 4, 32);
14707 14706
14708 14707 /* RSEQ-2 */
14709 14708 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14710 14709 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14711 14710 sizeof (fw->rseq_2_reg) / 4, 32);
14712 14711
14713 14712 /* Auxiliary sequencer registers. */
14714 14713
14715 14714 /* ASEQ GP */
14716 14715 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14717 14716 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14718 14717 16, 32);
14719 14718 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14720 14719 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14721 14720 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14722 14721 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14723 14722 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14724 14723 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14725 14724 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14726 14725 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14727 14726 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14728 14727 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14729 14728 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14730 14729 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14731 14730 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14732 14731 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14733 14732
14734 14733 /* ASEQ-0 */
14735 14734 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14736 14735 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14737 14736 16, 32);
14738 14737 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14739 14738 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14740 14739
14741 14740 /* ASEQ-1 */
14742 14741 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14743 14742 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14744 14743 16, 32);
14745 14744
14746 14745 /* ASEQ-2 */
14747 14746 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14748 14747 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14749 14748 16, 32);
14750 14749
14751 14750 /* Command DMA registers. */
14752 14751
14753 14752 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14754 14753 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14755 14754 sizeof (fw->cmd_dma_reg) / 4, 32);
14756 14755
14757 14756 /* Queues. */
14758 14757
14759 14758 /* RequestQ0 */
14760 14759 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14761 14760 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14762 14761 8, 32);
14763 14762 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14764 14763
14765 14764 /* ResponseQ0 */
14766 14765 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14767 14766 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14768 14767 8, 32);
14769 14768 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14770 14769
14771 14770 /* RequestQ1 */
14772 14771 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14773 14772 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14774 14773 8, 32);
14775 14774 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14776 14775
14777 14776 /* Transmit DMA registers. */
14778 14777
14779 14778 /* XMT0 */
14780 14779 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14781 14780 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14782 14781 16, 32);
14783 14782 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14784 14783 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14785 14784
14786 14785 /* XMT1 */
14787 14786 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14788 14787 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14789 14788 16, 32);
14790 14789 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14791 14790 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14792 14791
14793 14792 /* XMT2 */
14794 14793 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14795 14794 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14796 14795 16, 32);
14797 14796 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14798 14797 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14799 14798
14800 14799 /* XMT3 */
14801 14800 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14802 14801 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14803 14802 16, 32);
14804 14803 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14805 14804 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14806 14805
14807 14806 /* XMT4 */
14808 14807 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14809 14808 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14810 14809 16, 32);
14811 14810 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14812 14811 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14813 14812
14814 14813 /* XMT Common */
14815 14814 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14816 14815 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14817 14816 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14818 14817
14819 14818 /* Receive DMA registers. */
14820 14819
14821 14820 /* RCVThread0 */
14822 14821 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14823 14822 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14824 14823 ha->iobase + 0xC0, 16, 32);
14825 14824 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14826 14825 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14827 14826
14828 14827 /* RCVThread1 */
14829 14828 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14830 14829 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14831 14830 ha->iobase + 0xC0, 16, 32);
14832 14831 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14833 14832 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14834 14833
14835 14834 /* RISC registers. */
14836 14835
14837 14836 /* RISC GP */
14838 14837 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14839 14838 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14840 14839 16, 32);
14841 14840 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14842 14841 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14843 14842 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14844 14843 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14845 14844 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14846 14845 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14847 14846 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14848 14847 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14849 14848 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14850 14849 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14851 14850 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14852 14851 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14853 14852 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14854 14853 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14855 14854
14856 14855 /* Local memory controller (LMC) registers. */
14857 14856
14858 14857 /* LMC */
14859 14858 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14860 14859 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14861 14860 16, 32);
14862 14861 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14863 14862 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14864 14863 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14865 14864 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14866 14865 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14867 14866 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14868 14867 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14869 14868 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14870 14869 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14871 14870 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14872 14871 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14873 14872 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14874 14873 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14875 14874 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14876 14875
14877 14876 /* Fibre Protocol Module registers. */
14878 14877
14879 14878 /* FPM hardware */
14880 14879 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14881 14880 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14882 14881 16, 32);
14883 14882 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14884 14883 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14885 14884 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14886 14885 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14887 14886 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14888 14887 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14889 14888 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14890 14889 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14891 14890 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14892 14891 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14893 14892 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14894 14893 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14895 14894 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14896 14895 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14897 14896 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14898 14897 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14899 14898 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14900 14899 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14901 14900 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14902 14901 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14903 14902 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14904 14903 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14905 14904 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14906 14905 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14907 14906 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14908 14907 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14909 14908
14910 14909 /* Frame Buffer registers. */
14911 14910
14912 14911 /* FB hardware */
14913 14912 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14914 14913 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14915 14914 16, 32);
14916 14915 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14917 14916 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14918 14917 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14919 14918 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14920 14919 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14921 14920 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14922 14921 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14923 14922 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14924 14923 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14925 14924 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14926 14925 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14927 14926 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14928 14927 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14929 14928 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14930 14929 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14931 14930 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14932 14931 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14933 14932 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14934 14933 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14935 14934 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14936 14935 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14937 14936 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14938 14937 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14939 14938 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14940 14939 }
14941 14940
14942 14941 /* Get the request queue */
14943 14942 if (rval == QL_SUCCESS) {
14944 14943 uint32_t cnt;
14945 14944 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14946 14945
14947 14946 /* Sync DMA buffer. */
14948 14947 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14949 14948 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14950 14949 DDI_DMA_SYNC_FORKERNEL);
14951 14950
14952 14951 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14953 14952 fw->req_q[cnt] = *w32++;
14954 14953 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14955 14954 }
14956 14955 }
14957 14956
14958 14957 /* Get the response queue */
14959 14958 if (rval == QL_SUCCESS) {
14960 14959 uint32_t cnt;
14961 14960 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14962 14961
14963 14962 /* Sync DMA buffer. */
14964 14963 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14965 14964 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14966 14965 DDI_DMA_SYNC_FORKERNEL);
14967 14966
14968 14967 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14969 14968 fw->rsp_q[cnt] = *w32++;
14970 14969 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14971 14970 }
14972 14971 }
14973 14972
14974 14973 /* Reset RISC. */
14975 14974
14976 14975 ql_reset_chip(ha);
14977 14976
14978 14977 /* Memory. */
14979 14978
14980 14979 if (rval == QL_SUCCESS) {
14981 14980 /* Code RAM. */
14982 14981 rval = ql_read_risc_ram(ha, 0x20000,
14983 14982 sizeof (fw->code_ram) / 4, fw->code_ram);
14984 14983 }
14985 14984 if (rval == QL_SUCCESS) {
14986 14985 /* External Memory. */
14987 14986 rval = ql_read_risc_ram(ha, 0x100000,
14988 14987 ha->fw_ext_memory_size / 4, fw->ext_mem);
14989 14988 }
14990 14989
14991 14990 /* Get the FC event trace buffer */
14992 14991 if (rval == QL_SUCCESS) {
14993 14992 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14994 14993 (ha->fwfcetracebuf.bp != NULL)) {
14995 14994 uint32_t cnt;
14996 14995 uint32_t *w32 = ha->fwfcetracebuf.bp;
14997 14996
14998 14997 /* Sync DMA buffer. */
14999 14998 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15000 14999 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15001 15000
15002 15001 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15003 15002 fw->fce_trace_buf[cnt] = *w32++;
15004 15003 }
15005 15004 }
15006 15005 }
15007 15006
15008 15007 /* Get the extended trace buffer */
15009 15008 if (rval == QL_SUCCESS) {
15010 15009 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15011 15010 (ha->fwexttracebuf.bp != NULL)) {
15012 15011 uint32_t cnt;
15013 15012 uint32_t *w32 = ha->fwexttracebuf.bp;
15014 15013
15015 15014 /* Sync DMA buffer. */
15016 15015 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15017 15016 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15018 15017
15019 15018 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15020 15019 fw->ext_trace_buf[cnt] = *w32++;
15021 15020 }
15022 15021 }
15023 15022 }
15024 15023
15025 15024 if (rval != QL_SUCCESS) {
15026 15025 EL(ha, "failed=%xh\n", rval);
15027 15026 } else {
15028 15027 /*EMPTY*/
15029 15028 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15030 15029 }
15031 15030
15032 15031 return (rval);
15033 15032 }
15034 15033
15035 15034 /*
15036 15035 * ql_read_risc_ram
15037 15036 * Reads RISC RAM one word at a time.
15038 15037 * Risc interrupts must be disabled when this routine is called.
15039 15038 *
15040 15039 * Input:
15041 15040 * ha: adapter state pointer.
15042 15041 * risc_address: RISC code start address.
15043 15042 * len: Number of words.
15044 15043 * buf: buffer pointer.
15045 15044 *
15046 15045 * Returns:
15047 15046 * ql local function return status code.
15048 15047 *
15049 15048 * Context:
15050 15049 * Interrupt or Kernel context, no mailbox commands allowed.
15051 15050 */
15052 15051 static int
15053 15052 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15054 15053 void *buf)
15055 15054 {
15056 15055 uint32_t cnt;
15057 15056 uint16_t stat;
15058 15057 clock_t timer;
15059 15058 uint16_t *buf16 = (uint16_t *)buf;
15060 15059 uint32_t *buf32 = (uint32_t *)buf;
15061 15060 int rval = QL_SUCCESS;
15062 15061
15063 15062 for (cnt = 0; cnt < len; cnt++, risc_address++) {
15064 15063 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15065 15064 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15066 15065 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15067 15066 if (CFG_IST(ha, CFG_CTRL_8021)) {
15068 15067 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15069 15068 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15070 15069 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15071 15070 } else {
15072 15071 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15073 15072 }
15074 15073 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15075 15074 if (INTERRUPT_PENDING(ha)) {
15076 15075 stat = (uint16_t)
15077 15076 (RD16_IO_REG(ha, risc2host) & 0xff);
15078 15077 if ((stat == 1) || (stat == 0x10)) {
15079 15078 if (CFG_IST(ha, CFG_CTRL_24258081)) {
15080 15079 buf32[cnt] = SHORT_TO_LONG(
15081 15080 RD16_IO_REG(ha,
15082 15081 mailbox_out[2]),
15083 15082 RD16_IO_REG(ha,
15084 15083 mailbox_out[3]));
15085 15084 } else {
15086 15085 buf16[cnt] =
15087 15086 RD16_IO_REG(ha,
15088 15087 mailbox_out[2]);
15089 15088 }
15090 15089
15091 15090 break;
15092 15091 } else if ((stat == 2) || (stat == 0x11)) {
15093 15092 rval = RD16_IO_REG(ha, mailbox_out[0]);
15094 15093 break;
15095 15094 }
15096 15095 if (CFG_IST(ha, CFG_CTRL_8021)) {
15097 15096 ql_8021_clr_hw_intr(ha);
15098 15097 ql_8021_clr_fw_intr(ha);
15099 15098 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15100 15099 WRT32_IO_REG(ha, hccr,
15101 15100 HC24_CLR_RISC_INT);
15102 15101 RD32_IO_REG(ha, hccr);
15103 15102 } else {
15104 15103 WRT16_IO_REG(ha, hccr,
15105 15104 HC_CLR_RISC_INT);
15106 15105 }
15107 15106 }
15108 15107 drv_usecwait(5);
15109 15108 }
15110 15109 if (CFG_IST(ha, CFG_CTRL_8021)) {
15111 15110 ql_8021_clr_hw_intr(ha);
15112 15111 ql_8021_clr_fw_intr(ha);
15113 15112 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15114 15113 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15115 15114 RD32_IO_REG(ha, hccr);
15116 15115 } else {
15117 15116 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15118 15117 WRT16_IO_REG(ha, semaphore, 0);
15119 15118 }
15120 15119
15121 15120 if (timer == 0) {
15122 15121 rval = QL_FUNCTION_TIMEOUT;
15123 15122 }
15124 15123 }
15125 15124
15126 15125 return (rval);
15127 15126 }
15128 15127
15129 15128 /*
15130 15129 * ql_read_regs
15131 15130 * Reads adapter registers to buffer.
15132 15131 *
15133 15132 * Input:
15134 15133 * ha: adapter state pointer.
15135 15134 * buf: buffer pointer.
15136 15135 * reg: start address.
15137 15136 * count: number of registers.
15138 15137 * wds: register size.
15139 15138 *
15140 15139 * Context:
15141 15140 * Interrupt or Kernel context, no mailbox commands allowed.
15142 15141 */
15143 15142 static void *
15144 15143 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15145 15144 uint8_t wds)
15146 15145 {
15147 15146 uint32_t *bp32, *reg32;
15148 15147 uint16_t *bp16, *reg16;
15149 15148 uint8_t *bp8, *reg8;
15150 15149
15151 15150 switch (wds) {
15152 15151 case 32:
15153 15152 bp32 = buf;
15154 15153 reg32 = reg;
15155 15154 while (count--) {
15156 15155 *bp32++ = RD_REG_DWORD(ha, reg32++);
15157 15156 }
15158 15157 return (bp32);
15159 15158 case 16:
15160 15159 bp16 = buf;
15161 15160 reg16 = reg;
15162 15161 while (count--) {
15163 15162 *bp16++ = RD_REG_WORD(ha, reg16++);
15164 15163 }
15165 15164 return (bp16);
15166 15165 case 8:
15167 15166 bp8 = buf;
15168 15167 reg8 = reg;
15169 15168 while (count--) {
15170 15169 *bp8++ = RD_REG_BYTE(ha, reg8++);
15171 15170 }
15172 15171 return (bp8);
15173 15172 default:
15174 15173 EL(ha, "Unknown word size=%d\n", wds);
15175 15174 return (buf);
15176 15175 }
15177 15176 }
15178 15177
15179 15178 static int
15180 15179 ql_save_config_regs(dev_info_t *dip)
15181 15180 {
15182 15181 ql_adapter_state_t *ha;
15183 15182 int ret;
15184 15183 ql_config_space_t chs;
15185 15184 caddr_t prop = "ql-config-space";
15186 15185
15187 15186 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15188 15187 if (ha == NULL) {
15189 15188 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15190 15189 ddi_get_instance(dip));
15191 15190 return (DDI_FAILURE);
15192 15191 }
15193 15192
15194 15193 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15195 15194
15196 15195 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15197 15196 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15198 15197 1) {
15199 15198 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15200 15199 return (DDI_SUCCESS);
15201 15200 }
15202 15201
15203 15202 chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15204 15203 chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15205 15204 PCI_CONF_HEADER);
15206 15205 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15207 15206 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15208 15207 PCI_BCNF_BCNTRL);
15209 15208 }
15210 15209
15211 15210 chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15212 15211 PCI_CONF_CACHE_LINESZ);
15213 15212
15214 15213 chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15215 15214 PCI_CONF_LATENCY_TIMER);
15216 15215
15217 15216 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15218 15217 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15219 15218 PCI_BCNF_LATENCY_TIMER);
15220 15219 }
15221 15220
15222 15221 chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15223 15222 chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15224 15223 chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15225 15224 chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15226 15225 chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15227 15226 chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15228 15227
15229 15228 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15230 15229 ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15231 15230 (uchar_t *)&chs, sizeof (ql_config_space_t));
15232 15231
15233 15232 if (ret != DDI_PROP_SUCCESS) {
15234 15233 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15235 15234 QL_NAME, ddi_get_instance(dip), prop);
15236 15235 return (DDI_FAILURE);
15237 15236 }
15238 15237
15239 15238 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15240 15239
15241 15240 return (DDI_SUCCESS);
15242 15241 }
15243 15242
15244 15243 static int
15245 15244 ql_restore_config_regs(dev_info_t *dip)
15246 15245 {
15247 15246 ql_adapter_state_t *ha;
15248 15247 uint_t elements;
15249 15248 ql_config_space_t *chs_p;
15250 15249 caddr_t prop = "ql-config-space";
15251 15250
15252 15251 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15253 15252 if (ha == NULL) {
15254 15253 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15255 15254 ddi_get_instance(dip));
15256 15255 return (DDI_FAILURE);
15257 15256 }
15258 15257
15259 15258 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15260 15259
15261 15260 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15262 15261 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15263 15262 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15264 15263 (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15265 15264 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15266 15265 return (DDI_FAILURE);
15267 15266 }
15268 15267
15269 15268 ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15270 15269
15271 15270 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15272 15271 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15273 15272 chs_p->chs_bridge_control);
15274 15273 }
15275 15274
15276 15275 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15277 15276 chs_p->chs_cache_line_size);
15278 15277
15279 15278 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15280 15279 chs_p->chs_latency_timer);
15281 15280
15282 15281 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15283 15282 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15284 15283 chs_p->chs_sec_latency_timer);
15285 15284 }
15286 15285
15287 15286 ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15288 15287 ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15289 15288 ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15290 15289 ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15291 15290 ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15292 15291 ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15293 15292
15294 15293 ddi_prop_free(chs_p);
15295 15294
15296 15295 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15297 15296 if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15298 15297 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15299 15298 QL_NAME, ddi_get_instance(dip), prop);
15300 15299 }
15301 15300
15302 15301 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15303 15302
15304 15303 return (DDI_SUCCESS);
15305 15304 }
15306 15305
15307 15306 uint8_t
15308 15307 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15309 15308 {
15310 15309 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15311 15310 return (ddi_get8(ha->sbus_config_handle,
15312 15311 (uint8_t *)(ha->sbus_config_base + off)));
15313 15312 }
15314 15313
15315 15314 #ifdef KERNEL_32
15316 15315 return (pci_config_getb(ha->pci_handle, off));
15317 15316 #else
15318 15317 return (pci_config_get8(ha->pci_handle, off));
15319 15318 #endif
15320 15319 }
15321 15320
15322 15321 uint16_t
15323 15322 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15324 15323 {
15325 15324 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15326 15325 return (ddi_get16(ha->sbus_config_handle,
15327 15326 (uint16_t *)(ha->sbus_config_base + off)));
15328 15327 }
15329 15328
15330 15329 #ifdef KERNEL_32
15331 15330 return (pci_config_getw(ha->pci_handle, off));
15332 15331 #else
15333 15332 return (pci_config_get16(ha->pci_handle, off));
15334 15333 #endif
15335 15334 }
15336 15335
15337 15336 uint32_t
15338 15337 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15339 15338 {
15340 15339 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15341 15340 return (ddi_get32(ha->sbus_config_handle,
15342 15341 (uint32_t *)(ha->sbus_config_base + off)));
15343 15342 }
15344 15343
15345 15344 #ifdef KERNEL_32
15346 15345 return (pci_config_getl(ha->pci_handle, off));
15347 15346 #else
15348 15347 return (pci_config_get32(ha->pci_handle, off));
15349 15348 #endif
15350 15349 }
15351 15350
15352 15351 void
15353 15352 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15354 15353 {
15355 15354 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15356 15355 ddi_put8(ha->sbus_config_handle,
15357 15356 (uint8_t *)(ha->sbus_config_base + off), val);
15358 15357 } else {
15359 15358 #ifdef KERNEL_32
15360 15359 pci_config_putb(ha->pci_handle, off, val);
15361 15360 #else
15362 15361 pci_config_put8(ha->pci_handle, off, val);
15363 15362 #endif
15364 15363 }
15365 15364 }
15366 15365
15367 15366 void
15368 15367 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15369 15368 {
15370 15369 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15371 15370 ddi_put16(ha->sbus_config_handle,
15372 15371 (uint16_t *)(ha->sbus_config_base + off), val);
15373 15372 } else {
15374 15373 #ifdef KERNEL_32
15375 15374 pci_config_putw(ha->pci_handle, off, val);
15376 15375 #else
15377 15376 pci_config_put16(ha->pci_handle, off, val);
15378 15377 #endif
15379 15378 }
15380 15379 }
15381 15380
15382 15381 void
15383 15382 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15384 15383 {
15385 15384 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15386 15385 ddi_put32(ha->sbus_config_handle,
15387 15386 (uint32_t *)(ha->sbus_config_base + off), val);
15388 15387 } else {
15389 15388 #ifdef KERNEL_32
15390 15389 pci_config_putl(ha->pci_handle, off, val);
15391 15390 #else
15392 15391 pci_config_put32(ha->pci_handle, off, val);
15393 15392 #endif
15394 15393 }
15395 15394 }
15396 15395
15397 15396 /*
15398 15397 * ql_halt
15399 15398 * Waits for commands that are running to finish and
15400 15399 * if they do not, commands are aborted.
15401 15400 * Finally the adapter is reset.
15402 15401 *
15403 15402 * Input:
15404 15403 * ha: adapter state pointer.
15405 15404 * pwr: power state.
15406 15405 *
15407 15406 * Context:
15408 15407 * Kernel context.
15409 15408 */
15410 15409 static void
15411 15410 ql_halt(ql_adapter_state_t *ha, int pwr)
15412 15411 {
15413 15412 uint32_t cnt;
15414 15413 ql_tgt_t *tq;
15415 15414 ql_srb_t *sp;
15416 15415 uint16_t index;
15417 15416 ql_link_t *link;
15418 15417
15419 15418 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15420 15419
15421 15420 /* Wait for all commands running to finish. */
15422 15421 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15423 15422 for (link = ha->dev[index].first; link != NULL;
15424 15423 link = link->next) {
15425 15424 tq = link->base_address;
15426 15425 (void) ql_abort_device(ha, tq, 0);
15427 15426
15428 15427 /* Wait for 30 seconds for commands to finish. */
15429 15428 for (cnt = 3000; cnt != 0; cnt--) {
15430 15429 /* Acquire device queue lock. */
15431 15430 DEVICE_QUEUE_LOCK(tq);
15432 15431 if (tq->outcnt == 0) {
15433 15432 /* Release device queue lock. */
15434 15433 DEVICE_QUEUE_UNLOCK(tq);
15435 15434 break;
15436 15435 } else {
15437 15436 /* Release device queue lock. */
15438 15437 DEVICE_QUEUE_UNLOCK(tq);
15439 15438 ql_delay(ha, 10000);
15440 15439 }
15441 15440 }
15442 15441
15443 15442 /* Finish any commands waiting for more status. */
15444 15443 if (ha->status_srb != NULL) {
15445 15444 sp = ha->status_srb;
15446 15445 ha->status_srb = NULL;
15447 15446 sp->cmd.next = NULL;
15448 15447 ql_done(&sp->cmd);
15449 15448 }
15450 15449
15451 15450 /* Abort commands that did not finish. */
15452 15451 if (cnt == 0) {
15453 15452 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15454 15453 cnt++) {
15455 15454 if (ha->pending_cmds.first != NULL) {
15456 15455 ql_start_iocb(ha, NULL);
15457 15456 cnt = 1;
15458 15457 }
15459 15458 sp = ha->outstanding_cmds[cnt];
15460 15459 if (sp != NULL &&
15461 15460 sp->lun_queue->target_queue ==
15462 15461 tq) {
15463 15462 (void) ql_abort((opaque_t)ha,
15464 15463 sp->pkt, 0);
15465 15464 }
15466 15465 }
15467 15466 }
15468 15467 }
15469 15468 }
15470 15469
15471 15470 /* Shutdown IP. */
15472 15471 if (ha->flags & IP_INITIALIZED) {
15473 15472 (void) ql_shutdown_ip(ha);
15474 15473 }
15475 15474
15476 15475 /* Stop all timers. */
15477 15476 ADAPTER_STATE_LOCK(ha);
15478 15477 ha->port_retry_timer = 0;
15479 15478 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15480 15479 ha->watchdog_timer = 0;
15481 15480 ADAPTER_STATE_UNLOCK(ha);
15482 15481
15483 15482 if (pwr == PM_LEVEL_D3) {
15484 15483 ADAPTER_STATE_LOCK(ha);
15485 15484 ha->flags &= ~ONLINE;
15486 15485 ADAPTER_STATE_UNLOCK(ha);
15487 15486
15488 15487 /* Reset ISP chip. */
15489 15488 ql_reset_chip(ha);
15490 15489 }
15491 15490
15492 15491 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15493 15492 }
15494 15493
15495 15494 /*
15496 15495 * ql_get_dma_mem
15497 15496 * Function used to allocate dma memory.
15498 15497 *
15499 15498 * Input:
15500 15499 * ha: adapter state pointer.
15501 15500 * mem: pointer to dma memory object.
15502 15501 * size: size of the request in bytes
15503 15502 *
15504 15503 * Returns:
15505 15504 * qn local function return status code.
15506 15505 *
15507 15506 * Context:
15508 15507 * Kernel context.
15509 15508 */
15510 15509 int
15511 15510 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15512 15511 mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15513 15512 {
15514 15513 int rval;
15515 15514
15516 15515 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15517 15516
15518 15517 mem->size = size;
15519 15518 mem->type = allocation_type;
15520 15519 mem->cookie_count = 1;
15521 15520
15522 15521 switch (alignment) {
15523 15522 case QL_DMA_DATA_ALIGN:
15524 15523 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15525 15524 break;
15526 15525 case QL_DMA_RING_ALIGN:
15527 15526 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15528 15527 break;
15529 15528 default:
15530 15529 EL(ha, "failed, unknown alignment type %x\n", alignment);
15531 15530 break;
15532 15531 }
15533 15532
15534 15533 if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15535 15534 ql_free_phys(ha, mem);
15536 15535 EL(ha, "failed, alloc_phys=%xh\n", rval);
15537 15536 }
15538 15537
15539 15538 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15540 15539
15541 15540 return (rval);
15542 15541 }
15543 15542
15544 15543 /*
15545 15544 * ql_alloc_phys
15546 15545 * Function used to allocate memory and zero it.
15547 15546 * Memory is below 4 GB.
15548 15547 *
15549 15548 * Input:
15550 15549 * ha: adapter state pointer.
15551 15550 * mem: pointer to dma memory object.
15552 15551 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15553 15552 * mem->cookie_count number of segments allowed.
15554 15553 * mem->type memory allocation type.
15555 15554 * mem->size memory size.
15556 15555 * mem->alignment memory alignment.
15557 15556 *
15558 15557 * Returns:
15559 15558 * qn local function return status code.
15560 15559 *
15561 15560 * Context:
15562 15561 * Kernel context.
15563 15562 */
15564 15563 int
15565 15564 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15566 15565 {
15567 15566 size_t rlen;
15568 15567 ddi_dma_attr_t dma_attr;
15569 15568 ddi_device_acc_attr_t acc_attr = ql_dev_acc_attr;
15570 15569
15571 15570 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15572 15571
15573 15572 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15574 15573 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15575 15574
15576 15575 dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15577 15576 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15578 15577
15579 15578 /*
15580 15579 * Workaround for SUN XMITS buffer must end and start on 8 byte
15581 15580 * boundary. Else, hardware will overrun the buffer. Simple fix is
15582 15581 * to make sure buffer has enough room for overrun.
15583 15582 */
15584 15583 if (mem->size & 7) {
15585 15584 mem->size += 8 - (mem->size & 7);
15586 15585 }
15587 15586
15588 15587 mem->flags = DDI_DMA_CONSISTENT;
15589 15588
15590 15589 /*
15591 15590 * Allocate DMA memory for command.
15592 15591 */
15593 15592 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15594 15593 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15595 15594 DDI_SUCCESS) {
15596 15595 EL(ha, "failed, ddi_dma_alloc_handle\n");
15597 15596 mem->dma_handle = NULL;
15598 15597 return (QL_MEMORY_ALLOC_FAILED);
15599 15598 }
15600 15599
15601 15600 switch (mem->type) {
15602 15601 case KERNEL_MEM:
15603 15602 mem->bp = kmem_zalloc(mem->size, sleep);
15604 15603 break;
15605 15604 case BIG_ENDIAN_DMA:
15606 15605 case LITTLE_ENDIAN_DMA:
15607 15606 case NO_SWAP_DMA:
15608 15607 if (mem->type == BIG_ENDIAN_DMA) {
15609 15608 acc_attr.devacc_attr_endian_flags =
15610 15609 DDI_STRUCTURE_BE_ACC;
15611 15610 } else if (mem->type == NO_SWAP_DMA) {
15612 15611 acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15613 15612 }
15614 15613 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15615 15614 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15616 15615 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15617 15616 &mem->acc_handle) == DDI_SUCCESS) {
15618 15617 bzero(mem->bp, mem->size);
15619 15618 /* ensure we got what we asked for (32bit) */
15620 15619 if (dma_attr.dma_attr_addr_hi == NULL) {
15621 15620 if (mem->cookie.dmac_notused != NULL) {
15622 15621 EL(ha, "failed, ddi_dma_mem_alloc "
15623 15622 "returned 64 bit DMA address\n");
15624 15623 ql_free_phys(ha, mem);
15625 15624 return (QL_MEMORY_ALLOC_FAILED);
15626 15625 }
15627 15626 }
15628 15627 } else {
15629 15628 mem->acc_handle = NULL;
15630 15629 mem->bp = NULL;
15631 15630 }
15632 15631 break;
15633 15632 default:
15634 15633 EL(ha, "failed, unknown type=%xh\n", mem->type);
15635 15634 mem->acc_handle = NULL;
15636 15635 mem->bp = NULL;
15637 15636 break;
15638 15637 }
15639 15638
15640 15639 if (mem->bp == NULL) {
15641 15640 EL(ha, "failed, ddi_dma_mem_alloc\n");
15642 15641 ddi_dma_free_handle(&mem->dma_handle);
15643 15642 mem->dma_handle = NULL;
15644 15643 return (QL_MEMORY_ALLOC_FAILED);
15645 15644 }
15646 15645
15647 15646 mem->flags |= DDI_DMA_RDWR;
15648 15647
15649 15648 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15650 15649 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15651 15650 ql_free_phys(ha, mem);
15652 15651 return (QL_MEMORY_ALLOC_FAILED);
15653 15652 }
15654 15653
15655 15654 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15656 15655
15657 15656 return (QL_SUCCESS);
15658 15657 }
15659 15658
15660 15659 /*
15661 15660 * ql_free_phys
15662 15661 * Function used to free physical memory.
15663 15662 *
15664 15663 * Input:
15665 15664 * ha: adapter state pointer.
15666 15665 * mem: pointer to dma memory object.
15667 15666 *
15668 15667 * Context:
15669 15668 * Kernel context.
15670 15669 */
15671 15670 void
15672 15671 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15673 15672 {
15674 15673 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15675 15674
15676 15675 if (mem != NULL && mem->dma_handle != NULL) {
15677 15676 ql_unbind_dma_buffer(ha, mem);
15678 15677 switch (mem->type) {
15679 15678 case KERNEL_MEM:
15680 15679 if (mem->bp != NULL) {
15681 15680 kmem_free(mem->bp, mem->size);
15682 15681 }
15683 15682 break;
15684 15683 case LITTLE_ENDIAN_DMA:
15685 15684 case BIG_ENDIAN_DMA:
15686 15685 case NO_SWAP_DMA:
15687 15686 if (mem->acc_handle != NULL) {
15688 15687 ddi_dma_mem_free(&mem->acc_handle);
15689 15688 mem->acc_handle = NULL;
15690 15689 }
15691 15690 break;
15692 15691 default:
15693 15692 break;
15694 15693 }
15695 15694 mem->bp = NULL;
15696 15695 ddi_dma_free_handle(&mem->dma_handle);
15697 15696 mem->dma_handle = NULL;
15698 15697 }
15699 15698
15700 15699 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15701 15700 }
15702 15701
15703 15702 /*
15704 15703 * ql_alloc_dma_resouce.
15705 15704 * Allocates DMA resource for buffer.
15706 15705 *
15707 15706 * Input:
15708 15707 * ha: adapter state pointer.
15709 15708 * mem: pointer to dma memory object.
15710 15709 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15711 15710 * mem->cookie_count number of segments allowed.
15712 15711 * mem->type memory allocation type.
15713 15712 * mem->size memory size.
15714 15713 * mem->bp pointer to memory or struct buf
15715 15714 *
15716 15715 * Returns:
15717 15716 * qn local function return status code.
15718 15717 *
15719 15718 * Context:
15720 15719 * Kernel context.
15721 15720 */
15722 15721 int
15723 15722 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15724 15723 {
15725 15724 ddi_dma_attr_t dma_attr;
15726 15725
15727 15726 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15728 15727
15729 15728 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15730 15729 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15731 15730 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15732 15731
15733 15732 /*
15734 15733 * Allocate DMA handle for command.
15735 15734 */
15736 15735 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15737 15736 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15738 15737 DDI_SUCCESS) {
15739 15738 EL(ha, "failed, ddi_dma_alloc_handle\n");
15740 15739 mem->dma_handle = NULL;
15741 15740 return (QL_MEMORY_ALLOC_FAILED);
15742 15741 }
15743 15742
15744 15743 mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15745 15744
15746 15745 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15747 15746 EL(ha, "failed, bind_dma_buffer\n");
15748 15747 ddi_dma_free_handle(&mem->dma_handle);
15749 15748 mem->dma_handle = NULL;
15750 15749 return (QL_MEMORY_ALLOC_FAILED);
15751 15750 }
15752 15751
15753 15752 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15754 15753
15755 15754 return (QL_SUCCESS);
15756 15755 }
15757 15756
15758 15757 /*
15759 15758 * ql_free_dma_resource
15760 15759 * Frees DMA resources.
15761 15760 *
15762 15761 * Input:
15763 15762 * ha: adapter state pointer.
15764 15763 * mem: pointer to dma memory object.
15765 15764 * mem->dma_handle DMA memory handle.
15766 15765 *
15767 15766 * Context:
15768 15767 * Kernel context.
15769 15768 */
15770 15769 void
15771 15770 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15772 15771 {
15773 15772 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15774 15773
15775 15774 ql_free_phys(ha, mem);
15776 15775
15777 15776 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15778 15777 }
15779 15778
15780 15779 /*
15781 15780 * ql_bind_dma_buffer
15782 15781 * Binds DMA buffer.
15783 15782 *
15784 15783 * Input:
15785 15784 * ha: adapter state pointer.
15786 15785 * mem: pointer to dma memory object.
15787 15786 * sleep: KM_SLEEP or KM_NOSLEEP.
15788 15787 * mem->dma_handle DMA memory handle.
15789 15788 * mem->cookie_count number of segments allowed.
15790 15789 * mem->type memory allocation type.
15791 15790 * mem->size memory size.
15792 15791 * mem->bp pointer to memory or struct buf
15793 15792 *
15794 15793 * Returns:
15795 15794 * mem->cookies pointer to list of cookies.
15796 15795 * mem->cookie_count number of cookies.
15797 15796 * status success = DDI_DMA_MAPPED
15798 15797 * DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15799 15798 * DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15800 15799 * DDI_DMA_TOOBIG
15801 15800 *
15802 15801 * Context:
15803 15802 * Kernel context.
15804 15803 */
15805 15804 static int
15806 15805 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15807 15806 {
15808 15807 int rval;
15809 15808 ddi_dma_cookie_t *cookiep;
15810 15809 uint32_t cnt = mem->cookie_count;
15811 15810
15812 15811 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15813 15812
15814 15813 if (mem->type == STRUCT_BUF_MEMORY) {
15815 15814 rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15816 15815 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15817 15816 DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15818 15817 } else {
15819 15818 rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15820 15819 mem->size, mem->flags, (sleep == KM_SLEEP) ?
15821 15820 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15822 15821 &mem->cookie_count);
15823 15822 }
15824 15823
15825 15824 if (rval == DDI_DMA_MAPPED) {
15826 15825 if (mem->cookie_count > cnt) {
15827 15826 (void) ddi_dma_unbind_handle(mem->dma_handle);
15828 15827 EL(ha, "failed, cookie_count %d > %d\n",
15829 15828 mem->cookie_count, cnt);
15830 15829 rval = DDI_DMA_TOOBIG;
15831 15830 } else {
15832 15831 if (mem->cookie_count > 1) {
15833 15832 if (mem->cookies = kmem_zalloc(
15834 15833 sizeof (ddi_dma_cookie_t) *
15835 15834 mem->cookie_count, sleep)) {
15836 15835 *mem->cookies = mem->cookie;
15837 15836 cookiep = mem->cookies;
15838 15837 for (cnt = 1; cnt < mem->cookie_count;
15839 15838 cnt++) {
15840 15839 ddi_dma_nextcookie(
15841 15840 mem->dma_handle,
15842 15841 ++cookiep);
15843 15842 }
15844 15843 } else {
15845 15844 (void) ddi_dma_unbind_handle(
15846 15845 mem->dma_handle);
15847 15846 EL(ha, "failed, kmem_zalloc\n");
15848 15847 rval = DDI_DMA_NORESOURCES;
15849 15848 }
15850 15849 } else {
15851 15850 /*
15852 15851 * It has been reported that dmac_size at times
15853 15852 * may be incorrect on sparc machines so for
15854 15853 * sparc machines that only have one segment
15855 15854 * use the buffer size instead.
15856 15855 */
15857 15856 mem->cookies = &mem->cookie;
15858 15857 mem->cookies->dmac_size = mem->size;
15859 15858 }
15860 15859 }
15861 15860 }
15862 15861
15863 15862 if (rval != DDI_DMA_MAPPED) {
15864 15863 EL(ha, "failed=%xh\n", rval);
15865 15864 } else {
15866 15865 /*EMPTY*/
15867 15866 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15868 15867 }
15869 15868
15870 15869 return (rval);
15871 15870 }
15872 15871
15873 15872 /*
15874 15873 * ql_unbind_dma_buffer
15875 15874 * Unbinds DMA buffer.
15876 15875 *
15877 15876 * Input:
15878 15877 * ha: adapter state pointer.
15879 15878 * mem: pointer to dma memory object.
15880 15879 * mem->dma_handle DMA memory handle.
15881 15880 * mem->cookies pointer to cookie list.
15882 15881 * mem->cookie_count number of cookies.
15883 15882 *
15884 15883 * Context:
15885 15884 * Kernel context.
15886 15885 */
15887 15886 /* ARGSUSED */
15888 15887 static void
15889 15888 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15890 15889 {
15891 15890 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15892 15891
15893 15892 (void) ddi_dma_unbind_handle(mem->dma_handle);
15894 15893 if (mem->cookie_count > 1) {
15895 15894 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15896 15895 mem->cookie_count);
15897 15896 mem->cookies = NULL;
15898 15897 }
15899 15898 mem->cookie_count = 0;
15900 15899
15901 15900 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15902 15901 }
15903 15902
15904 15903 static int
15905 15904 ql_suspend_adapter(ql_adapter_state_t *ha)
15906 15905 {
15907 15906 clock_t timer = 32 * drv_usectohz(1000000);
15908 15907
15909 15908 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15910 15909
15911 15910 /*
15912 15911 * First we will claim mbox ownership so that no
15913 15912 * thread using mbox hangs when we disable the
15914 15913 * interrupt in the middle of it.
15915 15914 */
15916 15915 MBX_REGISTER_LOCK(ha);
15917 15916
15918 15917 /* Check for mailbox available, if not wait for signal. */
15919 15918 while (ha->mailbox_flags & MBX_BUSY_FLG) {
15920 15919 ha->mailbox_flags = (uint8_t)
15921 15920 (ha->mailbox_flags | MBX_WANT_FLG);
15922 15921
15923 15922 /* 30 seconds from now */
15924 15923 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15925 15924 timer, TR_CLOCK_TICK) == -1) {
15926 15925
15927 15926 /* Release mailbox register lock. */
15928 15927 MBX_REGISTER_UNLOCK(ha);
15929 15928 EL(ha, "failed, Suspend mbox");
15930 15929 return (QL_FUNCTION_TIMEOUT);
15931 15930 }
15932 15931 }
15933 15932
15934 15933 /* Set busy flag. */
15935 15934 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15936 15935 MBX_REGISTER_UNLOCK(ha);
15937 15936
15938 15937 (void) ql_wait_outstanding(ha);
15939 15938
15940 15939 /*
15941 15940 * here we are sure that there will not be any mbox interrupt.
15942 15941 * So, let's make sure that we return back all the outstanding
15943 15942 * cmds as well as internally queued commands.
15944 15943 */
15945 15944 ql_halt(ha, PM_LEVEL_D0);
15946 15945
15947 15946 if (ha->power_level != PM_LEVEL_D3) {
15948 15947 /* Disable ISP interrupts. */
15949 15948 WRT16_IO_REG(ha, ictrl, 0);
15950 15949 }
15951 15950
15952 15951 ADAPTER_STATE_LOCK(ha);
15953 15952 ha->flags &= ~INTERRUPTS_ENABLED;
15954 15953 ADAPTER_STATE_UNLOCK(ha);
15955 15954
15956 15955 MBX_REGISTER_LOCK(ha);
15957 15956 /* Reset busy status. */
15958 15957 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15959 15958
15960 15959 /* If thread is waiting for mailbox go signal it to start. */
15961 15960 if (ha->mailbox_flags & MBX_WANT_FLG) {
15962 15961 ha->mailbox_flags = (uint8_t)
15963 15962 (ha->mailbox_flags & ~MBX_WANT_FLG);
15964 15963 cv_broadcast(&ha->cv_mbx_wait);
15965 15964 }
15966 15965 /* Release mailbox register lock. */
15967 15966 MBX_REGISTER_UNLOCK(ha);
15968 15967
15969 15968 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15970 15969
15971 15970 return (QL_SUCCESS);
15972 15971 }
15973 15972
15974 15973 /*
15975 15974 * ql_add_link_b
15976 15975 * Add link to the end of the chain.
15977 15976 *
15978 15977 * Input:
15979 15978 * head = Head of link list.
15980 15979 * link = link to be added.
15981 15980 * LOCK must be already obtained.
15982 15981 *
15983 15982 * Context:
15984 15983 * Interrupt or Kernel context, no mailbox commands allowed.
15985 15984 */
15986 15985 void
15987 15986 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15988 15987 {
15989 15988 /* at the end there isn't a next */
15990 15989 link->next = NULL;
15991 15990
15992 15991 if ((link->prev = head->last) == NULL) {
15993 15992 head->first = link;
15994 15993 } else {
15995 15994 head->last->next = link;
15996 15995 }
15997 15996
15998 15997 head->last = link;
15999 15998 link->head = head; /* the queue we're on */
16000 15999 }
16001 16000
16002 16001 /*
16003 16002 * ql_add_link_t
16004 16003 * Add link to the beginning of the chain.
16005 16004 *
16006 16005 * Input:
16007 16006 * head = Head of link list.
16008 16007 * link = link to be added.
16009 16008 * LOCK must be already obtained.
16010 16009 *
16011 16010 * Context:
16012 16011 * Interrupt or Kernel context, no mailbox commands allowed.
16013 16012 */
16014 16013 void
16015 16014 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16016 16015 {
16017 16016 link->prev = NULL;
16018 16017
16019 16018 if ((link->next = head->first) == NULL) {
16020 16019 head->last = link;
16021 16020 } else {
16022 16021 head->first->prev = link;
16023 16022 }
16024 16023
16025 16024 head->first = link;
16026 16025 link->head = head; /* the queue we're on */
16027 16026 }
16028 16027
16029 16028 /*
16030 16029 * ql_remove_link
16031 16030 * Remove a link from the chain.
16032 16031 *
16033 16032 * Input:
16034 16033 * head = Head of link list.
16035 16034 * link = link to be removed.
16036 16035 * LOCK must be already obtained.
16037 16036 *
16038 16037 * Context:
16039 16038 * Interrupt or Kernel context, no mailbox commands allowed.
16040 16039 */
16041 16040 void
16042 16041 ql_remove_link(ql_head_t *head, ql_link_t *link)
16043 16042 {
16044 16043 if (link->prev != NULL) {
16045 16044 if ((link->prev->next = link->next) == NULL) {
16046 16045 head->last = link->prev;
16047 16046 } else {
16048 16047 link->next->prev = link->prev;
16049 16048 }
16050 16049 } else if ((head->first = link->next) == NULL) {
16051 16050 head->last = NULL;
16052 16051 } else {
16053 16052 head->first->prev = NULL;
16054 16053 }
16055 16054
16056 16055 /* not on a queue any more */
16057 16056 link->prev = link->next = NULL;
16058 16057 link->head = NULL;
16059 16058 }
16060 16059
16061 16060 /*
16062 16061 * ql_chg_endian
16063 16062 * Change endianess of byte array.
16064 16063 *
16065 16064 * Input:
16066 16065 * buf = array pointer.
16067 16066 * size = size of array in bytes.
16068 16067 *
16069 16068 * Context:
16070 16069 * Interrupt or Kernel context, no mailbox commands allowed.
16071 16070 */
16072 16071 void
16073 16072 ql_chg_endian(uint8_t buf[], size_t size)
16074 16073 {
16075 16074 uint8_t byte;
16076 16075 size_t cnt1;
16077 16076 size_t cnt;
16078 16077
16079 16078 cnt1 = size - 1;
16080 16079 for (cnt = 0; cnt < size / 2; cnt++) {
16081 16080 byte = buf[cnt1];
16082 16081 buf[cnt1] = buf[cnt];
16083 16082 buf[cnt] = byte;
16084 16083 cnt1--;
16085 16084 }
16086 16085 }
16087 16086
16088 16087 /*
16089 16088 * ql_bstr_to_dec
16090 16089 * Convert decimal byte string to number.
16091 16090 *
16092 16091 * Input:
16093 16092 * s: byte string pointer.
16094 16093 * ans: interger pointer for number.
16095 16094 * size: number of ascii bytes.
16096 16095 *
16097 16096 * Returns:
16098 16097 * success = number of ascii bytes processed.
16099 16098 *
16100 16099 * Context:
16101 16100 * Kernel/Interrupt context.
16102 16101 */
16103 16102 static int
16104 16103 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16105 16104 {
16106 16105 int mul, num, cnt, pos;
16107 16106 char *str;
16108 16107
16109 16108 /* Calculate size of number. */
16110 16109 if (size == 0) {
16111 16110 for (str = s; *str >= '0' && *str <= '9'; str++) {
16112 16111 size++;
16113 16112 }
16114 16113 }
16115 16114
16116 16115 *ans = 0;
16117 16116 for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16118 16117 if (*s >= '0' && *s <= '9') {
16119 16118 num = *s++ - '0';
16120 16119 } else {
16121 16120 break;
16122 16121 }
16123 16122
16124 16123 for (mul = 1, pos = 1; pos < size; pos++) {
16125 16124 mul *= 10;
16126 16125 }
16127 16126 *ans += num * mul;
16128 16127 }
16129 16128
16130 16129 return (cnt);
16131 16130 }
16132 16131
16133 16132 /*
16134 16133 * ql_delay
16135 16134 * Calls delay routine if threads are not suspended, otherwise, busy waits
16136 16135 * Minimum = 1 tick = 10ms
16137 16136 *
16138 16137 * Input:
16139 16138 * dly = delay time in microseconds.
16140 16139 *
16141 16140 * Context:
16142 16141 * Kernel or Interrupt context, no mailbox commands allowed.
16143 16142 */
16144 16143 void
16145 16144 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16146 16145 {
16147 16146 if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16148 16147 drv_usecwait(usecs);
16149 16148 } else {
16150 16149 delay(drv_usectohz(usecs));
16151 16150 }
16152 16151 }
16153 16152
16154 16153 /*
16155 16154 * ql_stall_drv
16156 16155 * Stalls one or all driver instances, waits for 30 seconds.
16157 16156 *
16158 16157 * Input:
16159 16158 * ha: adapter state pointer or NULL for all.
16160 16159 * options: BIT_0 --> leave driver stalled on exit if
16161 16160 * failed.
16162 16161 *
16163 16162 * Returns:
16164 16163 * ql local function return status code.
16165 16164 *
16166 16165 * Context:
16167 16166 * Kernel context.
16168 16167 */
16169 16168 int
16170 16169 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16171 16170 {
16172 16171 ql_link_t *link;
16173 16172 ql_adapter_state_t *ha2;
16174 16173 uint32_t timer;
16175 16174
16176 16175 QL_PRINT_3(CE_CONT, "started\n");
16177 16176
16178 16177 /* Wait for 30 seconds for daemons unstall. */
16179 16178 timer = 3000;
16180 16179 link = ha == NULL ? ql_hba.first : &ha->hba;
16181 16180 while (link != NULL && timer) {
16182 16181 ha2 = link->base_address;
16183 16182
16184 16183 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16185 16184
16186 16185 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16187 16186 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16188 16187 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16189 16188 ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16190 16189 link = ha == NULL ? link->next : NULL;
16191 16190 continue;
16192 16191 }
16193 16192
16194 16193 ql_delay(ha2, 10000);
16195 16194 timer--;
16196 16195 link = ha == NULL ? ql_hba.first : &ha->hba;
16197 16196 }
16198 16197
16199 16198 if (ha2 != NULL && timer == 0) {
16200 16199 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16201 16200 ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16202 16201 "unstalled"));
16203 16202 if (options & BIT_0) {
16204 16203 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16205 16204 }
16206 16205 return (QL_FUNCTION_TIMEOUT);
16207 16206 }
16208 16207
16209 16208 QL_PRINT_3(CE_CONT, "done\n");
16210 16209
16211 16210 return (QL_SUCCESS);
16212 16211 }
16213 16212
16214 16213 /*
16215 16214 * ql_restart_driver
16216 16215 * Restarts one or all driver instances.
16217 16216 *
16218 16217 * Input:
16219 16218 * ha: adapter state pointer or NULL for all.
16220 16219 *
16221 16220 * Context:
16222 16221 * Kernel context.
16223 16222 */
16224 16223 void
16225 16224 ql_restart_driver(ql_adapter_state_t *ha)
16226 16225 {
16227 16226 ql_link_t *link;
16228 16227 ql_adapter_state_t *ha2;
16229 16228 uint32_t timer;
16230 16229
16231 16230 QL_PRINT_3(CE_CONT, "started\n");
16232 16231
16233 16232 /* Tell all daemons to unstall. */
16234 16233 link = ha == NULL ? ql_hba.first : &ha->hba;
16235 16234 while (link != NULL) {
16236 16235 ha2 = link->base_address;
16237 16236
16238 16237 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16239 16238
16240 16239 link = ha == NULL ? link->next : NULL;
16241 16240 }
16242 16241
16243 16242 /* Wait for 30 seconds for all daemons unstall. */
16244 16243 timer = 3000;
16245 16244 link = ha == NULL ? ql_hba.first : &ha->hba;
16246 16245 while (link != NULL && timer) {
16247 16246 ha2 = link->base_address;
16248 16247
16249 16248 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16250 16249 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16251 16250 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16252 16251 QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16253 16252 ha2->instance, ha2->vp_index);
16254 16253 ql_restart_queues(ha2);
16255 16254 link = ha == NULL ? link->next : NULL;
16256 16255 continue;
16257 16256 }
16258 16257
16259 16258 QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16260 16259 ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16261 16260
16262 16261 ql_delay(ha2, 10000);
16263 16262 timer--;
16264 16263 link = ha == NULL ? ql_hba.first : &ha->hba;
16265 16264 }
16266 16265
16267 16266 QL_PRINT_3(CE_CONT, "done\n");
16268 16267 }
16269 16268
16270 16269 /*
16271 16270 * ql_setup_interrupts
16272 16271 * Sets up interrupts based on the HBA's and platform's
16273 16272 * capabilities (e.g., legacy / MSI / FIXED).
16274 16273 *
16275 16274 * Input:
16276 16275 * ha = adapter state pointer.
16277 16276 *
16278 16277 * Returns:
16279 16278 * DDI_SUCCESS or DDI_FAILURE.
16280 16279 *
16281 16280 * Context:
16282 16281 * Kernel context.
16283 16282 */
16284 16283 static int
16285 16284 ql_setup_interrupts(ql_adapter_state_t *ha)
16286 16285 {
16287 16286 int32_t rval = DDI_FAILURE;
16288 16287 int32_t i;
16289 16288 int32_t itypes = 0;
16290 16289
16291 16290 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16292 16291
16293 16292 /*
16294 16293 * The Solaris Advanced Interrupt Functions (aif) are only
16295 16294 * supported on s10U1 or greater.
16296 16295 */
16297 16296 if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16298 16297 EL(ha, "interrupt framework is not supported or is "
16299 16298 "disabled, using legacy\n");
16300 16299 return (ql_legacy_intr(ha));
16301 16300 } else if (ql_os_release_level == 10) {
16302 16301 /*
16303 16302 * See if the advanced interrupt functions (aif) are
16304 16303 * in the kernel
16305 16304 */
16306 16305 void *fptr = (void *)&ddi_intr_get_supported_types;
16307 16306
16308 16307 if (fptr == NULL) {
16309 16308 EL(ha, "aif is not supported, using legacy "
16310 16309 "interrupts (rev)\n");
16311 16310 return (ql_legacy_intr(ha));
16312 16311 }
16313 16312 }
16314 16313
16315 16314 /* See what types of interrupts this HBA and platform support */
16316 16315 if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16317 16316 DDI_SUCCESS) {
16318 16317 EL(ha, "get supported types failed, rval=%xh, "
16319 16318 "assuming FIXED\n", i);
16320 16319 itypes = DDI_INTR_TYPE_FIXED;
16321 16320 }
16322 16321
16323 16322 EL(ha, "supported types are: %xh\n", itypes);
16324 16323
16325 16324 if ((itypes & DDI_INTR_TYPE_MSIX) &&
16326 16325 (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16327 16326 EL(ha, "successful MSI-X setup\n");
16328 16327 } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16329 16328 (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16330 16329 EL(ha, "successful MSI setup\n");
16331 16330 } else {
16332 16331 rval = ql_setup_fixed(ha);
16333 16332 }
16334 16333
16335 16334 if (rval != DDI_SUCCESS) {
16336 16335 EL(ha, "failed, aif, rval=%xh\n", rval);
16337 16336 } else {
16338 16337 /*EMPTY*/
16339 16338 QL_PRINT_3(CE_CONT, "(%d): done\n");
16340 16339 }
16341 16340
16342 16341 return (rval);
16343 16342 }
16344 16343
16345 16344 /*
16346 16345 * ql_setup_msi
16347 16346 * Set up aif MSI interrupts
16348 16347 *
16349 16348 * Input:
16350 16349 * ha = adapter state pointer.
16351 16350 *
16352 16351 * Returns:
16353 16352 * DDI_SUCCESS or DDI_FAILURE.
16354 16353 *
16355 16354 * Context:
↓ open down ↓ |
15772 lines elided |
↑ open up ↑ |
16356 16355 * Kernel context.
16357 16356 */
16358 16357 static int
16359 16358 ql_setup_msi(ql_adapter_state_t *ha)
16360 16359 {
16361 16360 int32_t count = 0;
16362 16361 int32_t avail = 0;
16363 16362 int32_t actual = 0;
16364 16363 int32_t msitype = DDI_INTR_TYPE_MSI;
16365 16364 int32_t ret;
16366 - ql_ifunc_t itrfun[10] = {0};
16365 + ql_ifunc_t itrfun[10] = {{NULL}};
16367 16366
16368 16367 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16369 16368
16370 16369 if (ql_disable_msi != 0) {
16371 16370 EL(ha, "MSI is disabled by user\n");
16372 16371 return (DDI_FAILURE);
16373 16372 }
16374 16373
16375 16374 /* MSI support is only suported on 24xx HBA's. */
16376 16375 if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16377 16376 EL(ha, "HBA does not support MSI\n");
16378 16377 return (DDI_FAILURE);
16379 16378 }
16380 16379
16381 16380 /* Get number of MSI interrupts the system supports */
16382 16381 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16383 16382 DDI_SUCCESS) || count == 0) {
16384 16383 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16385 16384 return (DDI_FAILURE);
16386 16385 }
16387 16386
16388 16387 /* Get number of available MSI interrupts */
16389 16388 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16390 16389 DDI_SUCCESS) || avail == 0) {
16391 16390 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16392 16391 return (DDI_FAILURE);
16393 16392 }
16394 16393
16395 16394 /* MSI requires only 1. */
16396 16395 count = 1;
16397 16396 itrfun[0].ifunc = &ql_isr_aif;
16398 16397
16399 16398 /* Allocate space for interrupt handles */
16400 16399 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16401 16400 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16402 16401
16403 16402 ha->iflags |= IFLG_INTR_MSI;
16404 16403
16405 16404 /* Allocate the interrupts */
16406 16405 if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16407 16406 &actual, 0)) != DDI_SUCCESS || actual < count) {
16408 16407 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16409 16408 "actual=%xh\n", ret, count, actual);
16410 16409 ql_release_intr(ha);
16411 16410 return (DDI_FAILURE);
16412 16411 }
16413 16412
16414 16413 ha->intr_cnt = actual;
16415 16414
16416 16415 /* Get interrupt priority */
16417 16416 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16418 16417 DDI_SUCCESS) {
16419 16418 EL(ha, "failed, get_pri ret=%xh\n", ret);
16420 16419 ql_release_intr(ha);
16421 16420 return (ret);
16422 16421 }
16423 16422
16424 16423 /* Add the interrupt handler */
16425 16424 if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16426 16425 (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16427 16426 EL(ha, "failed, intr_add ret=%xh\n", ret);
16428 16427 ql_release_intr(ha);
16429 16428 return (ret);
16430 16429 }
16431 16430
16432 16431 /* Setup mutexes */
16433 16432 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16434 16433 EL(ha, "failed, mutex init ret=%xh\n", ret);
16435 16434 ql_release_intr(ha);
16436 16435 return (ret);
16437 16436 }
16438 16437
16439 16438 /* Get the capabilities */
16440 16439 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16441 16440
16442 16441 /* Enable interrupts */
16443 16442 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16444 16443 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16445 16444 DDI_SUCCESS) {
16446 16445 EL(ha, "failed, block enable, ret=%xh\n", ret);
16447 16446 ql_destroy_mutex(ha);
16448 16447 ql_release_intr(ha);
16449 16448 return (ret);
16450 16449 }
16451 16450 } else {
16452 16451 if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16453 16452 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16454 16453 ql_destroy_mutex(ha);
16455 16454 ql_release_intr(ha);
16456 16455 return (ret);
16457 16456 }
16458 16457 }
16459 16458
16460 16459 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16461 16460
16462 16461 return (DDI_SUCCESS);
16463 16462 }
16464 16463
16465 16464 /*
16466 16465 * ql_setup_msix
16467 16466 * Set up aif MSI-X interrupts
16468 16467 *
16469 16468 * Input:
16470 16469 * ha = adapter state pointer.
16471 16470 *
16472 16471 * Returns:
16473 16472 * DDI_SUCCESS or DDI_FAILURE.
16474 16473 *
16475 16474 * Context:
16476 16475 * Kernel context.
16477 16476 */
↓ open down ↓ |
101 lines elided |
↑ open up ↑ |
16478 16477 static int
16479 16478 ql_setup_msix(ql_adapter_state_t *ha)
16480 16479 {
16481 16480 uint16_t hwvect;
16482 16481 int32_t count = 0;
16483 16482 int32_t avail = 0;
16484 16483 int32_t actual = 0;
16485 16484 int32_t msitype = DDI_INTR_TYPE_MSIX;
16486 16485 int32_t ret;
16487 16486 uint32_t i;
16488 - ql_ifunc_t itrfun[QL_MSIX_MAXAIF] = {0};
16487 + ql_ifunc_t itrfun[QL_MSIX_MAXAIF] = {{NULL}};
16489 16488
16490 16489 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16491 16490
16492 16491 if (ql_disable_msix != 0) {
16493 16492 EL(ha, "MSI-X is disabled by user\n");
16494 16493 return (DDI_FAILURE);
16495 16494 }
16496 16495
16497 16496 /*
16498 16497 * MSI-X support is only available on 24xx HBA's that have
16499 16498 * rev A2 parts (revid = 3) or greater.
16500 16499 */
16501 16500 if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16502 16501 (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16503 16502 (ha->device_id == 0x8021))) {
16504 16503 EL(ha, "HBA does not support MSI-X\n");
16505 16504 return (DDI_FAILURE);
16506 16505 }
16507 16506
16508 16507 if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16509 16508 EL(ha, "HBA does not support MSI-X (revid)\n");
16510 16509 return (DDI_FAILURE);
16511 16510 }
16512 16511
16513 16512 /* Per HP, these HP branded HBA's are not supported with MSI-X */
16514 16513 if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16515 16514 ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16516 16515 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16517 16516 return (DDI_FAILURE);
16518 16517 }
16519 16518
16520 16519 /* Get the number of 24xx/25xx MSI-X h/w vectors */
16521 16520 hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16522 16521 ql_pci_config_get16(ha, 0x7e) :
16523 16522 ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16524 16523
16525 16524 EL(ha, "pcie config space hwvect = %d\n", hwvect);
16526 16525
16527 16526 if (hwvect < QL_MSIX_MAXAIF) {
16528 16527 EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16529 16528 QL_MSIX_MAXAIF, hwvect);
16530 16529 return (DDI_FAILURE);
16531 16530 }
16532 16531
16533 16532 /* Get number of MSI-X interrupts the platform h/w supports */
16534 16533 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16535 16534 DDI_SUCCESS) || count == 0) {
16536 16535 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16537 16536 return (DDI_FAILURE);
16538 16537 }
16539 16538
16540 16539 /* Get number of available system interrupts */
16541 16540 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16542 16541 DDI_SUCCESS) || avail == 0) {
16543 16542 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16544 16543 return (DDI_FAILURE);
16545 16544 }
16546 16545
16547 16546 /* Fill out the intr table */
16548 16547 count = QL_MSIX_MAXAIF;
16549 16548 itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16550 16549 itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16551 16550
16552 16551 /* Allocate space for interrupt handles */
16553 16552 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16554 16553 if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16555 16554 ha->hsize = 0;
16556 16555 EL(ha, "failed, unable to allocate htable space\n");
16557 16556 return (DDI_FAILURE);
16558 16557 }
16559 16558
16560 16559 ha->iflags |= IFLG_INTR_MSIX;
16561 16560
16562 16561 /* Allocate the interrupts */
16563 16562 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16564 16563 DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16565 16564 actual < QL_MSIX_MAXAIF) {
16566 16565 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16567 16566 "actual=%xh\n", ret, count, actual);
16568 16567 ql_release_intr(ha);
16569 16568 return (DDI_FAILURE);
16570 16569 }
16571 16570
16572 16571 ha->intr_cnt = actual;
16573 16572
16574 16573 /* Get interrupt priority */
16575 16574 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16576 16575 DDI_SUCCESS) {
16577 16576 EL(ha, "failed, get_pri ret=%xh\n", ret);
16578 16577 ql_release_intr(ha);
16579 16578 return (ret);
16580 16579 }
16581 16580
16582 16581 /* Add the interrupt handlers */
16583 16582 for (i = 0; i < actual; i++) {
16584 16583 if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16585 16584 (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16586 16585 EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16587 16586 actual, ret);
16588 16587 ql_release_intr(ha);
16589 16588 return (ret);
16590 16589 }
16591 16590 }
16592 16591
16593 16592 /*
16594 16593 * duplicate the rest of the intr's
16595 16594 * ddi_intr_dup_handler() isn't working on x86 just yet...
16596 16595 */
16597 16596 #ifdef __sparc
16598 16597 for (i = actual; i < hwvect; i++) {
16599 16598 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16600 16599 &ha->htable[i])) != DDI_SUCCESS) {
16601 16600 EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16602 16601 i, actual, ret);
16603 16602 ql_release_intr(ha);
16604 16603 return (ret);
16605 16604 }
16606 16605 }
16607 16606 #endif
16608 16607
16609 16608 /* Setup mutexes */
16610 16609 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16611 16610 EL(ha, "failed, mutex init ret=%xh\n", ret);
16612 16611 ql_release_intr(ha);
16613 16612 return (ret);
16614 16613 }
16615 16614
16616 16615 /* Get the capabilities */
16617 16616 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16618 16617
16619 16618 /* Enable interrupts */
16620 16619 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16621 16620 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16622 16621 DDI_SUCCESS) {
16623 16622 EL(ha, "failed, block enable, ret=%xh\n", ret);
16624 16623 ql_destroy_mutex(ha);
16625 16624 ql_release_intr(ha);
16626 16625 return (ret);
16627 16626 }
16628 16627 } else {
16629 16628 for (i = 0; i < ha->intr_cnt; i++) {
16630 16629 if ((ret = ddi_intr_enable(ha->htable[i])) !=
16631 16630 DDI_SUCCESS) {
16632 16631 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16633 16632 ql_destroy_mutex(ha);
16634 16633 ql_release_intr(ha);
16635 16634 return (ret);
16636 16635 }
16637 16636 }
16638 16637 }
16639 16638
16640 16639 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16641 16640
16642 16641 return (DDI_SUCCESS);
16643 16642 }
16644 16643
16645 16644 /*
16646 16645 * ql_setup_fixed
16647 16646 * Sets up aif FIXED interrupts
16648 16647 *
16649 16648 * Input:
16650 16649 * ha = adapter state pointer.
16651 16650 *
16652 16651 * Returns:
16653 16652 * DDI_SUCCESS or DDI_FAILURE.
16654 16653 *
16655 16654 * Context:
16656 16655 * Kernel context.
16657 16656 */
16658 16657 static int
16659 16658 ql_setup_fixed(ql_adapter_state_t *ha)
16660 16659 {
16661 16660 int32_t count = 0;
16662 16661 int32_t actual = 0;
16663 16662 int32_t ret;
16664 16663 uint32_t i;
16665 16664
16666 16665 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16667 16666
16668 16667 /* Get number of fixed interrupts the system supports */
16669 16668 if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16670 16669 &count)) != DDI_SUCCESS) || count == 0) {
16671 16670 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16672 16671 return (DDI_FAILURE);
16673 16672 }
16674 16673
16675 16674 ha->iflags |= IFLG_INTR_FIXED;
16676 16675
16677 16676 /* Allocate space for interrupt handles */
16678 16677 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16679 16678 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16680 16679
16681 16680 /* Allocate the interrupts */
16682 16681 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16683 16682 0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16684 16683 actual < count) {
16685 16684 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16686 16685 "actual=%xh\n", ret, count, actual);
16687 16686 ql_release_intr(ha);
16688 16687 return (DDI_FAILURE);
16689 16688 }
16690 16689
16691 16690 ha->intr_cnt = actual;
16692 16691
16693 16692 /* Get interrupt priority */
16694 16693 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16695 16694 DDI_SUCCESS) {
16696 16695 EL(ha, "failed, get_pri ret=%xh\n", ret);
16697 16696 ql_release_intr(ha);
16698 16697 return (ret);
16699 16698 }
16700 16699
16701 16700 /* Add the interrupt handlers */
16702 16701 for (i = 0; i < ha->intr_cnt; i++) {
16703 16702 if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16704 16703 (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16705 16704 EL(ha, "failed, intr_add ret=%xh\n", ret);
16706 16705 ql_release_intr(ha);
16707 16706 return (ret);
16708 16707 }
16709 16708 }
16710 16709
16711 16710 /* Setup mutexes */
16712 16711 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16713 16712 EL(ha, "failed, mutex init ret=%xh\n", ret);
16714 16713 ql_release_intr(ha);
16715 16714 return (ret);
16716 16715 }
16717 16716
16718 16717 /* Enable interrupts */
16719 16718 for (i = 0; i < ha->intr_cnt; i++) {
16720 16719 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16721 16720 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16722 16721 ql_destroy_mutex(ha);
16723 16722 ql_release_intr(ha);
16724 16723 return (ret);
16725 16724 }
16726 16725 }
16727 16726
16728 16727 EL(ha, "using FIXED interupts\n");
16729 16728
16730 16729 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16731 16730
16732 16731 return (DDI_SUCCESS);
16733 16732 }
16734 16733
16735 16734 /*
16736 16735 * ql_disable_intr
16737 16736 * Disables interrupts
16738 16737 *
16739 16738 * Input:
16740 16739 * ha = adapter state pointer.
16741 16740 *
16742 16741 * Returns:
16743 16742 *
16744 16743 * Context:
16745 16744 * Kernel context.
16746 16745 */
16747 16746 static void
16748 16747 ql_disable_intr(ql_adapter_state_t *ha)
16749 16748 {
16750 16749 uint32_t i, rval;
16751 16750
16752 16751 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16753 16752
16754 16753 if (!(ha->iflags & IFLG_INTR_AIF)) {
16755 16754
16756 16755 /* Disable legacy interrupts */
16757 16756 (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16758 16757
16759 16758 } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16760 16759 (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16761 16760
16762 16761 /* Remove AIF block interrupts (MSI) */
16763 16762 if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16764 16763 != DDI_SUCCESS) {
16765 16764 EL(ha, "failed intr block disable, rval=%x\n", rval);
16766 16765 }
16767 16766
16768 16767 } else {
16769 16768
16770 16769 /* Remove AIF non-block interrupts (fixed). */
16771 16770 for (i = 0; i < ha->intr_cnt; i++) {
16772 16771 if ((rval = ddi_intr_disable(ha->htable[i])) !=
16773 16772 DDI_SUCCESS) {
16774 16773 EL(ha, "failed intr disable, intr#=%xh, "
16775 16774 "rval=%xh\n", i, rval);
16776 16775 }
16777 16776 }
16778 16777 }
16779 16778
16780 16779 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16781 16780 }
16782 16781
16783 16782 /*
16784 16783 * ql_release_intr
16785 16784 * Releases aif legacy interrupt resources
16786 16785 *
16787 16786 * Input:
16788 16787 * ha = adapter state pointer.
16789 16788 *
16790 16789 * Returns:
16791 16790 *
16792 16791 * Context:
16793 16792 * Kernel context.
16794 16793 */
16795 16794 static void
16796 16795 ql_release_intr(ql_adapter_state_t *ha)
16797 16796 {
16798 16797 int32_t i;
16799 16798
16800 16799 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16801 16800
16802 16801 if (!(ha->iflags & IFLG_INTR_AIF)) {
16803 16802 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16804 16803 return;
16805 16804 }
16806 16805
16807 16806 ha->iflags &= ~(IFLG_INTR_AIF);
16808 16807 if (ha->htable != NULL && ha->hsize > 0) {
16809 16808 i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16810 16809 while (i-- > 0) {
16811 16810 if (ha->htable[i] == 0) {
16812 16811 EL(ha, "htable[%x]=0h\n", i);
16813 16812 continue;
16814 16813 }
16815 16814
16816 16815 (void) ddi_intr_disable(ha->htable[i]);
16817 16816
16818 16817 if (i < ha->intr_cnt) {
16819 16818 (void) ddi_intr_remove_handler(ha->htable[i]);
16820 16819 }
16821 16820
16822 16821 (void) ddi_intr_free(ha->htable[i]);
16823 16822 }
16824 16823
16825 16824 kmem_free(ha->htable, ha->hsize);
16826 16825 ha->htable = NULL;
16827 16826 }
16828 16827
16829 16828 ha->hsize = 0;
16830 16829 ha->intr_cnt = 0;
16831 16830 ha->intr_pri = 0;
16832 16831 ha->intr_cap = 0;
16833 16832
16834 16833 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16835 16834 }
16836 16835
16837 16836 /*
16838 16837 * ql_legacy_intr
16839 16838 * Sets up legacy interrupts.
16840 16839 *
16841 16840 * NB: Only to be used if AIF (Advanced Interupt Framework)
16842 16841 * if NOT in the kernel.
16843 16842 *
16844 16843 * Input:
16845 16844 * ha = adapter state pointer.
16846 16845 *
16847 16846 * Returns:
16848 16847 * DDI_SUCCESS or DDI_FAILURE.
16849 16848 *
16850 16849 * Context:
16851 16850 * Kernel context.
16852 16851 */
16853 16852 static int
16854 16853 ql_legacy_intr(ql_adapter_state_t *ha)
16855 16854 {
16856 16855 int rval = DDI_SUCCESS;
16857 16856
16858 16857 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16859 16858
16860 16859 /* Setup mutexes */
16861 16860 if (ql_init_mutex(ha) != DDI_SUCCESS) {
16862 16861 EL(ha, "failed, mutex init\n");
16863 16862 return (DDI_FAILURE);
16864 16863 }
16865 16864
16866 16865 /* Setup standard/legacy interrupt handler */
16867 16866 if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16868 16867 (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16869 16868 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16870 16869 QL_NAME, ha->instance);
16871 16870 ql_destroy_mutex(ha);
16872 16871 rval = DDI_FAILURE;
16873 16872 }
16874 16873
16875 16874 if (rval == DDI_SUCCESS) {
16876 16875 ha->iflags |= IFLG_INTR_LEGACY;
16877 16876 EL(ha, "using legacy interrupts\n");
16878 16877 }
16879 16878
16880 16879 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16881 16880
16882 16881 return (rval);
16883 16882 }
16884 16883
16885 16884 /*
16886 16885 * ql_init_mutex
16887 16886 * Initializes mutex's
16888 16887 *
16889 16888 * Input:
16890 16889 * ha = adapter state pointer.
16891 16890 *
16892 16891 * Returns:
16893 16892 * DDI_SUCCESS or DDI_FAILURE.
16894 16893 *
16895 16894 * Context:
16896 16895 * Kernel context.
16897 16896 */
16898 16897 static int
16899 16898 ql_init_mutex(ql_adapter_state_t *ha)
16900 16899 {
16901 16900 int ret;
16902 16901 void *intr;
16903 16902
16904 16903 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16905 16904
16906 16905 if (ha->iflags & IFLG_INTR_AIF) {
16907 16906 intr = (void *)(uintptr_t)ha->intr_pri;
16908 16907 } else {
16909 16908 /* Get iblock cookies to initialize mutexes */
16910 16909 if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16911 16910 &ha->iblock_cookie)) != DDI_SUCCESS) {
16912 16911 EL(ha, "failed, get_iblock: %xh\n", ret);
16913 16912 return (DDI_FAILURE);
16914 16913 }
16915 16914 intr = (void *)ha->iblock_cookie;
16916 16915 }
16917 16916
16918 16917 /* mutexes to protect the adapter state structure. */
16919 16918 mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16920 16919
16921 16920 /* mutex to protect the ISP response ring. */
16922 16921 mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16923 16922
16924 16923 /* mutex to protect the mailbox registers. */
16925 16924 mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16926 16925
16927 16926 /* power management protection */
16928 16927 mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16929 16928
16930 16929 /* Mailbox wait and interrupt conditional variable. */
16931 16930 cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16932 16931 cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16933 16932
16934 16933 /* mutex to protect the ISP request ring. */
16935 16934 mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16936 16935
16937 16936 /* Unsolicited buffer conditional variable. */
16938 16937 cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16939 16938
16940 16939 mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16941 16940 mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16942 16941
16943 16942 /* Suspended conditional variable. */
16944 16943 cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16945 16944
16946 16945 /* mutex to protect task daemon context. */
16947 16946 mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16948 16947
16949 16948 /* Task_daemon thread conditional variable. */
16950 16949 cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16951 16950
16952 16951 /* mutex to protect diag port manage interface */
16953 16952 mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16954 16953
16955 16954 /* mutex to protect per instance f/w dump flags and buffer */
16956 16955 mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16957 16956
16958 16957 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16959 16958
16960 16959 return (DDI_SUCCESS);
16961 16960 }
16962 16961
16963 16962 /*
16964 16963 * ql_destroy_mutex
16965 16964 * Destroys mutex's
16966 16965 *
16967 16966 * Input:
16968 16967 * ha = adapter state pointer.
16969 16968 *
16970 16969 * Returns:
16971 16970 *
16972 16971 * Context:
16973 16972 * Kernel context.
16974 16973 */
16975 16974 static void
16976 16975 ql_destroy_mutex(ql_adapter_state_t *ha)
16977 16976 {
16978 16977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16979 16978
16980 16979 mutex_destroy(&ha->dump_mutex);
16981 16980 mutex_destroy(&ha->portmutex);
16982 16981 cv_destroy(&ha->cv_task_daemon);
16983 16982 mutex_destroy(&ha->task_daemon_mutex);
16984 16983 cv_destroy(&ha->cv_dr_suspended);
16985 16984 mutex_destroy(&ha->cache_mutex);
16986 16985 mutex_destroy(&ha->ub_mutex);
16987 16986 cv_destroy(&ha->cv_ub);
16988 16987 mutex_destroy(&ha->req_ring_mutex);
16989 16988 cv_destroy(&ha->cv_mbx_intr);
16990 16989 cv_destroy(&ha->cv_mbx_wait);
16991 16990 mutex_destroy(&ha->pm_mutex);
16992 16991 mutex_destroy(&ha->mbx_mutex);
16993 16992 mutex_destroy(&ha->intr_mutex);
16994 16993 mutex_destroy(&ha->mutex);
16995 16994
16996 16995 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16997 16996 }
16998 16997
16999 16998 /*
17000 16999 * ql_fwmodule_resolve
17001 17000 * Loads and resolves external firmware module and symbols
17002 17001 *
17003 17002 * Input:
17004 17003 * ha: adapter state pointer.
17005 17004 *
17006 17005 * Returns:
17007 17006 * ql local function return status code:
17008 17007 * QL_SUCCESS - external f/w module module and symbols resolved
17009 17008 * QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17010 17009 * QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17011 17010 * QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17012 17011 * Context:
17013 17012 * Kernel context.
17014 17013 *
17015 17014 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time. We
17016 17015 * could switch to a tighter scope around acutal download (and add an extra
17017 17016 * ddi_modopen for module opens that occur before root is mounted).
17018 17017 *
17019 17018 */
17020 17019 uint32_t
17021 17020 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17022 17021 {
17023 17022 int8_t module[128];
17024 17023 int8_t fw_version[128];
17025 17024 uint32_t rval = QL_SUCCESS;
17026 17025 caddr_t code, code02;
17027 17026 uint8_t *p_ucfw;
17028 17027 uint16_t *p_usaddr, *p_uslen;
17029 17028 uint32_t *p_uiaddr, *p_uilen, *p_uifw;
17030 17029 uint32_t *p_uiaddr02, *p_uilen02;
17031 17030 struct fw_table *fwt;
17032 17031 extern struct fw_table fw_table[];
17033 17032
17034 17033 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17035 17034
17036 17035 if (ha->fw_module != NULL) {
17037 17036 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17038 17037 ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17039 17038 ha->fw_subminor_version);
17040 17039 return (rval);
17041 17040 }
17042 17041
17043 17042 /* make sure the fw_class is in the fw_table of supported classes */
17044 17043 for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17045 17044 if (fwt->fw_class == ha->fw_class)
17046 17045 break; /* match */
17047 17046 }
17048 17047 if (fwt->fw_version == NULL) {
17049 17048 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17050 17049 "in driver's fw_table", QL_NAME, ha->instance,
17051 17050 ha->fw_class);
17052 17051 return (QL_FW_NOT_SUPPORTED);
17053 17052 }
17054 17053
17055 17054 /*
17056 17055 * open the module related to the fw_class
17057 17056 */
17058 17057 (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17059 17058 ha->fw_class);
17060 17059
17061 17060 ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17062 17061 if (ha->fw_module == NULL) {
17063 17062 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17064 17063 QL_NAME, ha->instance, module);
17065 17064 return (QL_FWMODLOAD_FAILED);
17066 17065 }
17067 17066
17068 17067 /*
17069 17068 * resolve the fw module symbols, data types depend on fw_class
17070 17069 */
17071 17070
17072 17071 switch (ha->fw_class) {
17073 17072 case 0x2200:
17074 17073 case 0x2300:
17075 17074 case 0x6322:
17076 17075
17077 17076 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17078 17077 NULL)) == NULL) {
17079 17078 rval = QL_FWSYM_NOT_FOUND;
17080 17079 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17081 17080 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
17082 17081 "risc_code_addr01", NULL)) == NULL) {
17083 17082 rval = QL_FWSYM_NOT_FOUND;
17084 17083 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17085 17084 } else if ((p_uslen = ddi_modsym(ha->fw_module,
17086 17085 "risc_code_length01", NULL)) == NULL) {
17087 17086 rval = QL_FWSYM_NOT_FOUND;
17088 17087 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17089 17088 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17090 17089 "firmware_version", NULL)) == NULL) {
17091 17090 rval = QL_FWSYM_NOT_FOUND;
17092 17091 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17093 17092 }
17094 17093
17095 17094 if (rval == QL_SUCCESS) {
17096 17095 ha->risc_fw[0].code = code;
17097 17096 ha->risc_fw[0].addr = *p_usaddr;
17098 17097 ha->risc_fw[0].length = *p_uslen;
17099 17098
17100 17099 (void) snprintf(fw_version, sizeof (fw_version),
17101 17100 "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17102 17101 }
17103 17102 break;
17104 17103
17105 17104 case 0x2400:
17106 17105 case 0x2500:
17107 17106 case 0x8100:
17108 17107
17109 17108 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17110 17109 NULL)) == NULL) {
17111 17110 rval = QL_FWSYM_NOT_FOUND;
17112 17111 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17113 17112 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17114 17113 "risc_code_addr01", NULL)) == NULL) {
17115 17114 rval = QL_FWSYM_NOT_FOUND;
17116 17115 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17117 17116 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17118 17117 "risc_code_length01", NULL)) == NULL) {
17119 17118 rval = QL_FWSYM_NOT_FOUND;
17120 17119 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17121 17120 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17122 17121 "firmware_version", NULL)) == NULL) {
17123 17122 rval = QL_FWSYM_NOT_FOUND;
17124 17123 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17125 17124 }
17126 17125
17127 17126 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17128 17127 NULL)) == NULL) {
17129 17128 rval = QL_FWSYM_NOT_FOUND;
17130 17129 EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17131 17130 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17132 17131 "risc_code_addr02", NULL)) == NULL) {
17133 17132 rval = QL_FWSYM_NOT_FOUND;
17134 17133 EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17135 17134 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17136 17135 "risc_code_length02", NULL)) == NULL) {
17137 17136 rval = QL_FWSYM_NOT_FOUND;
17138 17137 EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17139 17138 }
17140 17139
17141 17140 if (rval == QL_SUCCESS) {
17142 17141 ha->risc_fw[0].code = code;
17143 17142 ha->risc_fw[0].addr = *p_uiaddr;
17144 17143 ha->risc_fw[0].length = *p_uilen;
17145 17144 ha->risc_fw[1].code = code02;
17146 17145 ha->risc_fw[1].addr = *p_uiaddr02;
17147 17146 ha->risc_fw[1].length = *p_uilen02;
17148 17147
17149 17148 (void) snprintf(fw_version, sizeof (fw_version),
17150 17149 "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17151 17150 }
17152 17151 break;
17153 17152
17154 17153 default:
17155 17154 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17156 17155 rval = QL_FW_NOT_SUPPORTED;
17157 17156 }
17158 17157
17159 17158 if (rval != QL_SUCCESS) {
17160 17159 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17161 17160 "module %s (%x)", QL_NAME, ha->instance, module, rval);
17162 17161 if (ha->fw_module != NULL) {
17163 17162 (void) ddi_modclose(ha->fw_module);
17164 17163 ha->fw_module = NULL;
17165 17164 }
17166 17165 } else {
17167 17166 /*
17168 17167 * check for firmware version mismatch between module and
17169 17168 * compiled in fw_table version.
17170 17169 */
17171 17170
17172 17171 if (strcmp(fwt->fw_version, fw_version) != 0) {
17173 17172
17174 17173 /*
17175 17174 * If f/w / driver version mismatches then
17176 17175 * return a successful status -- however warn
17177 17176 * the user that this is NOT recommended.
17178 17177 */
17179 17178
17180 17179 cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17181 17180 "mismatch for %x: driver-%s module-%s", QL_NAME,
17182 17181 ha->instance, ha->fw_class, fwt->fw_version,
17183 17182 fw_version);
17184 17183
17185 17184 ha->cfg_flags |= CFG_FW_MISMATCH;
17186 17185 } else {
17187 17186 ha->cfg_flags &= ~CFG_FW_MISMATCH;
17188 17187 }
17189 17188 }
17190 17189
17191 17190 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17192 17191
17193 17192 return (rval);
17194 17193 }
17195 17194
17196 17195 /*
17197 17196 * ql_port_state
17198 17197 * Set the state on all adapter ports.
17199 17198 *
17200 17199 * Input:
17201 17200 * ha: parent adapter state pointer.
17202 17201 * state: port state.
17203 17202 * flags: task daemon flags to set.
17204 17203 *
17205 17204 * Context:
17206 17205 * Interrupt or Kernel context, no mailbox commands allowed.
17207 17206 */
17208 17207 void
17209 17208 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17210 17209 {
17211 17210 ql_adapter_state_t *vha;
17212 17211
17213 17212 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17214 17213
17215 17214 TASK_DAEMON_LOCK(ha);
17216 17215 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17217 17216 if (FC_PORT_STATE_MASK(vha->state) != state) {
17218 17217 vha->state = state != FC_STATE_OFFLINE ?
17219 17218 (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17220 17219 vha->task_daemon_flags |= flags;
17221 17220 }
17222 17221 }
17223 17222 ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17224 17223 TASK_DAEMON_UNLOCK(ha);
17225 17224
17226 17225 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17227 17226 }
17228 17227
17229 17228 /*
17230 17229 * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17231 17230 *
17232 17231 * Input: Pointer to the adapter state structure.
17233 17232 * Returns: Success or Failure.
17234 17233 * Context: Kernel context.
17235 17234 */
17236 17235 int
17237 17236 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17238 17237 {
17239 17238 int rval = DDI_SUCCESS;
17240 17239
17241 17240 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17242 17241
17243 17242 ha->el_trace_desc =
17244 17243 (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17245 17244
17246 17245 if (ha->el_trace_desc == NULL) {
17247 17246 cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17248 17247 QL_NAME, ha->instance);
17249 17248 rval = DDI_FAILURE;
17250 17249 } else {
17251 17250 ha->el_trace_desc->next = 0;
17252 17251 ha->el_trace_desc->trace_buffer =
17253 17252 (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17254 17253
17255 17254 if (ha->el_trace_desc->trace_buffer == NULL) {
17256 17255 cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17257 17256 QL_NAME, ha->instance);
17258 17257 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17259 17258 rval = DDI_FAILURE;
17260 17259 } else {
17261 17260 ha->el_trace_desc->trace_buffer_size =
17262 17261 EL_TRACE_BUF_SIZE;
17263 17262 mutex_init(&ha->el_trace_desc->mutex, NULL,
17264 17263 MUTEX_DRIVER, NULL);
17265 17264 }
17266 17265 }
17267 17266
17268 17267 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17269 17268
17270 17269 return (rval);
17271 17270 }
17272 17271
17273 17272 /*
17274 17273 * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17275 17274 *
17276 17275 * Input: Pointer to the adapter state structure.
17277 17276 * Returns: Success or Failure.
17278 17277 * Context: Kernel context.
17279 17278 */
17280 17279 int
17281 17280 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17282 17281 {
17283 17282 int rval = DDI_SUCCESS;
17284 17283
17285 17284 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17286 17285
17287 17286 if (ha->el_trace_desc == NULL) {
17288 17287 cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17289 17288 QL_NAME, ha->instance);
17290 17289 rval = DDI_FAILURE;
17291 17290 } else {
17292 17291 if (ha->el_trace_desc->trace_buffer != NULL) {
17293 17292 kmem_free(ha->el_trace_desc->trace_buffer,
17294 17293 ha->el_trace_desc->trace_buffer_size);
17295 17294 }
17296 17295 mutex_destroy(&ha->el_trace_desc->mutex);
17297 17296 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17298 17297 }
17299 17298
17300 17299 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17301 17300
17302 17301 return (rval);
17303 17302 }
17304 17303
17305 17304 /*
17306 17305 * els_cmd_text - Return a pointer to a string describing the command
17307 17306 *
17308 17307 * Input: els_cmd = the els command opcode.
17309 17308 * Returns: pointer to a string.
17310 17309 * Context: Kernel context.
17311 17310 */
17312 17311 char *
17313 17312 els_cmd_text(int els_cmd)
17314 17313 {
17315 17314 cmd_table_t *entry = &els_cmd_tbl[0];
17316 17315
17317 17316 return (cmd_text(entry, els_cmd));
17318 17317 }
17319 17318
17320 17319 /*
17321 17320 * mbx_cmd_text - Return a pointer to a string describing the command
17322 17321 *
17323 17322 * Input: mbx_cmd = the mailbox command opcode.
17324 17323 * Returns: pointer to a string.
17325 17324 * Context: Kernel context.
17326 17325 */
17327 17326 char *
17328 17327 mbx_cmd_text(int mbx_cmd)
17329 17328 {
17330 17329 cmd_table_t *entry = &mbox_cmd_tbl[0];
17331 17330
17332 17331 return (cmd_text(entry, mbx_cmd));
17333 17332 }
17334 17333
17335 17334 /*
17336 17335 * cmd_text Return a pointer to a string describing the command
17337 17336 *
17338 17337 * Input: entry = the command table
17339 17338 * cmd = the command.
17340 17339 * Returns: pointer to a string.
17341 17340 * Context: Kernel context.
17342 17341 */
17343 17342 char *
17344 17343 cmd_text(cmd_table_t *entry, int cmd)
17345 17344 {
17346 17345 for (; entry->cmd != 0; entry++) {
17347 17346 if (entry->cmd == cmd) {
17348 17347 break;
17349 17348 }
17350 17349 }
17351 17350 return (entry->string);
17352 17351 }
17353 17352
17354 17353 /*
17355 17354 * ql_els_24xx_mbox_cmd_iocb - els request indication.
17356 17355 *
17357 17356 * Input: ha = adapter state pointer.
17358 17357 * srb = scsi request block pointer.
17359 17358 * arg = els passthru entry iocb pointer.
17360 17359 * Returns:
17361 17360 * Context: Kernel context.
17362 17361 */
17363 17362 void
17364 17363 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17365 17364 {
17366 17365 els_descriptor_t els_desc;
17367 17366
17368 17367 /* Extract the ELS information */
17369 17368 ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17370 17369
17371 17370 /* Construct the passthru entry */
17372 17371 ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17373 17372
17374 17373 /* Ensure correct endianness */
17375 17374 ql_isp_els_handle_cmd_endian(ha, srb);
17376 17375 }
17377 17376
17378 17377 /*
17379 17378 * ql_fca_isp_els_request - Extract into an els descriptor the info required
17380 17379 * to build an els_passthru iocb from an fc packet.
17381 17380 *
17382 17381 * Input: ha = adapter state pointer.
17383 17382 * pkt = fc packet pointer
17384 17383 * els_desc = els descriptor pointer
17385 17384 * Returns:
17386 17385 * Context: Kernel context.
17387 17386 */
17388 17387 static void
17389 17388 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17390 17389 els_descriptor_t *els_desc)
17391 17390 {
17392 17391 ls_code_t els;
17393 17392
17394 17393 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17395 17394 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17396 17395
17397 17396 els_desc->els = els.ls_code;
17398 17397
17399 17398 els_desc->els_handle = ha->hba_buf.acc_handle;
17400 17399 els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17401 17400 els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17402 17401 /* if n_port_handle is not < 0x7d use 0 */
17403 17402 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17404 17403 els_desc->n_port_handle = ha->n_port->n_port_handle;
17405 17404 } else {
17406 17405 els_desc->n_port_handle = 0;
17407 17406 }
17408 17407 els_desc->control_flags = 0;
17409 17408 els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17410 17409 /*
17411 17410 * Transmit DSD. This field defines the Fibre Channel Frame payload
17412 17411 * (without the frame header) in system memory.
17413 17412 */
17414 17413 els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17415 17414 els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17416 17415 els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17417 17416
17418 17417 els_desc->rsp_byte_count = pkt->pkt_rsplen;
17419 17418 /*
17420 17419 * Receive DSD. This field defines the ELS response payload buffer
17421 17420 * for the ISP24xx firmware transferring the received ELS
17422 17421 * response frame to a location in host memory.
17423 17422 */
17424 17423 els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17425 17424 els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17426 17425 els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17427 17426 }
17428 17427
17429 17428 /*
17430 17429 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17431 17430 * using the els descriptor.
17432 17431 *
17433 17432 * Input: ha = adapter state pointer.
17434 17433 * els_desc = els descriptor pointer.
17435 17434 * els_entry = els passthru entry iocb pointer.
17436 17435 * Returns:
17437 17436 * Context: Kernel context.
17438 17437 */
17439 17438 static void
17440 17439 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17441 17440 els_passthru_entry_t *els_entry)
17442 17441 {
17443 17442 uint32_t *ptr32;
17444 17443
17445 17444 /*
17446 17445 * Construct command packet.
17447 17446 */
17448 17447 ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17449 17448 (uint8_t)ELS_PASSTHRU_TYPE);
17450 17449 ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17451 17450 els_desc->n_port_handle);
17452 17451 ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17453 17452 ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17454 17453 (uint32_t)0);
17455 17454 ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17456 17455 els_desc->els);
17457 17456 ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17458 17457 els_desc->d_id.b.al_pa);
17459 17458 ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17460 17459 els_desc->d_id.b.area);
17461 17460 ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17462 17461 els_desc->d_id.b.domain);
17463 17462 ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17464 17463 els_desc->s_id.b.al_pa);
17465 17464 ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17466 17465 els_desc->s_id.b.area);
17467 17466 ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17468 17467 els_desc->s_id.b.domain);
17469 17468 ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17470 17469 els_desc->control_flags);
17471 17470 ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17472 17471 els_desc->rsp_byte_count);
17473 17472 ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17474 17473 els_desc->cmd_byte_count);
17475 17474 /* Load transmit data segments and count. */
17476 17475 ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17477 17476 ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17478 17477 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17479 17478 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17480 17479 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17481 17480 ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17482 17481 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17483 17482 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17484 17483 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17485 17484 }
17486 17485
17487 17486 /*
17488 17487 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17489 17488 * in host memory.
17490 17489 *
17491 17490 * Input: ha = adapter state pointer.
17492 17491 * srb = scsi request block
17493 17492 * Returns:
17494 17493 * Context: Kernel context.
17495 17494 */
17496 17495 void
17497 17496 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17498 17497 {
17499 17498 ls_code_t els;
17500 17499 fc_packet_t *pkt;
17501 17500 uint8_t *ptr;
17502 17501
17503 17502 pkt = srb->pkt;
17504 17503
17505 17504 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17506 17505 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17507 17506
17508 17507 ptr = (uint8_t *)pkt->pkt_cmd;
17509 17508
17510 17509 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17511 17510 }
17512 17511
17513 17512 /*
17514 17513 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17515 17514 * in host memory.
17516 17515 * Input: ha = adapter state pointer.
17517 17516 * srb = scsi request block
17518 17517 * Returns:
17519 17518 * Context: Kernel context.
17520 17519 */
17521 17520 void
17522 17521 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17523 17522 {
17524 17523 ls_code_t els;
17525 17524 fc_packet_t *pkt;
17526 17525 uint8_t *ptr;
17527 17526
17528 17527 pkt = srb->pkt;
17529 17528
17530 17529 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17531 17530 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17532 17531
17533 17532 ptr = (uint8_t *)pkt->pkt_resp;
17534 17533 BIG_ENDIAN_32(&els);
17535 17534 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17536 17535 }
17537 17536
17538 17537 /*
17539 17538 * ql_isp_els_handle_endian - els requests/responses must be in big endian
17540 17539 * in host memory.
17541 17540 * Input: ha = adapter state pointer.
17542 17541 * ptr = els request/response buffer pointer.
17543 17542 * ls_code = els command code.
17544 17543 * Returns:
17545 17544 * Context: Kernel context.
17546 17545 */
17547 17546 void
17548 17547 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17549 17548 {
17550 17549 switch (ls_code) {
17551 17550 case LA_ELS_PLOGI: {
17552 17551 BIG_ENDIAN_32(ptr); /* Command Code */
17553 17552 ptr += 4;
17554 17553 BIG_ENDIAN_16(ptr); /* FC-PH version */
17555 17554 ptr += 2;
17556 17555 BIG_ENDIAN_16(ptr); /* b2b credit */
17557 17556 ptr += 2;
17558 17557 BIG_ENDIAN_16(ptr); /* Cmn Feature flags */
17559 17558 ptr += 2;
17560 17559 BIG_ENDIAN_16(ptr); /* Rcv data size */
17561 17560 ptr += 2;
17562 17561 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17563 17562 ptr += 2;
17564 17563 BIG_ENDIAN_16(ptr); /* Rel offset */
17565 17564 ptr += 2;
17566 17565 BIG_ENDIAN_32(ptr); /* E_D_TOV */
17567 17566 ptr += 4; /* Port Name */
17568 17567 ptr += 8; /* Node Name */
17569 17568 ptr += 8; /* Class 1 */
17570 17569 ptr += 16; /* Class 2 */
17571 17570 ptr += 16; /* Class 3 */
17572 17571 BIG_ENDIAN_16(ptr); /* Service options */
17573 17572 ptr += 2;
17574 17573 BIG_ENDIAN_16(ptr); /* Initiator control */
17575 17574 ptr += 2;
17576 17575 BIG_ENDIAN_16(ptr); /* Recipient Control */
17577 17576 ptr += 2;
17578 17577 BIG_ENDIAN_16(ptr); /* Rcv size */
17579 17578 ptr += 2;
17580 17579 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17581 17580 ptr += 2;
17582 17581 BIG_ENDIAN_16(ptr); /* N_Port e2e credit */
17583 17582 ptr += 2;
17584 17583 BIG_ENDIAN_16(ptr); /* Open Seq/Exch */
17585 17584 break;
17586 17585 }
17587 17586 case LA_ELS_PRLI: {
17588 17587 BIG_ENDIAN_32(ptr); /* Command Code/Page length */
17589 17588 ptr += 4; /* Type */
17590 17589 ptr += 2;
17591 17590 BIG_ENDIAN_16(ptr); /* Flags */
17592 17591 ptr += 2;
17593 17592 BIG_ENDIAN_32(ptr); /* Originator Process associator */
17594 17593 ptr += 4;
17595 17594 BIG_ENDIAN_32(ptr); /* Responder Process associator */
17596 17595 ptr += 4;
17597 17596 BIG_ENDIAN_32(ptr); /* Flags */
17598 17597 break;
17599 17598 }
17600 17599 default:
17601 17600 EL(ha, "can't handle els code %x\n", ls_code);
17602 17601 break;
17603 17602 }
17604 17603 }
17605 17604
17606 17605 /*
17607 17606 * ql_n_port_plogi
17608 17607 * In N port 2 N port topology where an N Port has logged in with the
17609 17608 * firmware because it has the N_Port login initiative, we send up
17610 17609 * a plogi by proxy which stimulates the login procedure to continue.
17611 17610 *
17612 17611 * Input:
17613 17612 * ha = adapter state pointer.
17614 17613 * Returns:
17615 17614 *
17616 17615 * Context:
17617 17616 * Kernel context.
17618 17617 */
17619 17618 static int
17620 17619 ql_n_port_plogi(ql_adapter_state_t *ha)
17621 17620 {
17622 17621 int rval;
17623 17622 ql_tgt_t *tq;
17624 17623 ql_head_t done_q = { NULL, NULL };
17625 17624
17626 17625 rval = QL_SUCCESS;
17627 17626
17628 17627 if (ha->topology & QL_N_PORT) {
17629 17628 /* if we're doing this the n_port_handle must be good */
17630 17629 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17631 17630 tq = ql_loop_id_to_queue(ha,
17632 17631 ha->n_port->n_port_handle);
17633 17632 if (tq != NULL) {
17634 17633 (void) ql_send_plogi(ha, tq, &done_q);
17635 17634 } else {
17636 17635 EL(ha, "n_port_handle = %x, tq = %x\n",
17637 17636 ha->n_port->n_port_handle, tq);
17638 17637 }
17639 17638 } else {
17640 17639 EL(ha, "n_port_handle = %x, tq = %x\n",
17641 17640 ha->n_port->n_port_handle, tq);
17642 17641 }
17643 17642 if (done_q.first != NULL) {
17644 17643 ql_done(done_q.first);
17645 17644 }
17646 17645 }
17647 17646 return (rval);
17648 17647 }
17649 17648
17650 17649 /*
17651 17650 * Compare two WWNs. The NAA is omitted for comparison.
17652 17651 *
17653 17652 * Note particularly that the indentation used in this
17654 17653 * function isn't according to Sun recommendations. It
17655 17654 * is indented to make reading a bit easy.
17656 17655 *
17657 17656 * Return Values:
17658 17657 * if first == second return 0
17659 17658 * if first > second return 1
17660 17659 * if first < second return -1
17661 17660 */
17662 17661 int
17663 17662 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17664 17663 {
17665 17664 la_wwn_t t1, t2;
17666 17665 int rval;
17667 17666
17668 17667 EL(ha, "WWPN=%08x%08x\n",
17669 17668 BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17670 17669 EL(ha, "WWPN=%08x%08x\n",
17671 17670 BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17672 17671 /*
17673 17672 * Fibre Channel protocol is big endian, so compare
17674 17673 * as big endian values
17675 17674 */
17676 17675 t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17677 17676 t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17678 17677
17679 17678 t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17680 17679 t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17681 17680
17682 17681 if (t1.i_wwn[0] == t2.i_wwn[0]) {
17683 17682 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17684 17683 rval = 0;
17685 17684 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17686 17685 rval = 1;
17687 17686 } else {
17688 17687 rval = -1;
17689 17688 }
17690 17689 } else {
17691 17690 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17692 17691 rval = 1;
17693 17692 } else {
17694 17693 rval = -1;
17695 17694 }
17696 17695 }
17697 17696 return (rval);
17698 17697 }
17699 17698
17700 17699 /*
17701 17700 * ql_wait_for_td_stop
17702 17701 * Wait for task daemon to stop running. Internal command timeout
17703 17702 * is approximately 30 seconds, so it may help in some corner
17704 17703 * cases to wait that long
17705 17704 *
17706 17705 * Input:
17707 17706 * ha = adapter state pointer.
17708 17707 *
17709 17708 * Returns:
17710 17709 * DDI_SUCCESS or DDI_FAILURE.
17711 17710 *
17712 17711 * Context:
17713 17712 * Kernel context.
17714 17713 */
17715 17714
17716 17715 static int
17717 17716 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17718 17717 {
17719 17718 int rval = DDI_FAILURE;
17720 17719 UINT16 wait_cnt;
17721 17720
17722 17721 for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17723 17722 /* The task daemon clears the stop flag on exit. */
17724 17723 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17725 17724 if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17726 17725 ddi_in_panic()) {
17727 17726 drv_usecwait(10000);
17728 17727 } else {
17729 17728 delay(drv_usectohz(10000));
17730 17729 }
17731 17730 } else {
17732 17731 rval = DDI_SUCCESS;
17733 17732 break;
17734 17733 }
17735 17734 }
17736 17735 return (rval);
17737 17736 }
17738 17737
17739 17738 /*
17740 17739 * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17741 17740 *
17742 17741 * Input: Pointer to the adapter state structure.
17743 17742 * Returns: Success or Failure.
17744 17743 * Context: Kernel context.
17745 17744 */
17746 17745 int
17747 17746 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17748 17747 {
17749 17748 int rval = DDI_SUCCESS;
17750 17749
17751 17750 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17752 17751
17753 17752 ha->nvram_cache =
17754 17753 (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17755 17754 KM_SLEEP);
17756 17755
17757 17756 if (ha->nvram_cache == NULL) {
17758 17757 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17759 17758 " descriptor", QL_NAME, ha->instance);
17760 17759 rval = DDI_FAILURE;
17761 17760 } else {
17762 17761 if (CFG_IST(ha, CFG_CTRL_24258081)) {
17763 17762 ha->nvram_cache->size = sizeof (nvram_24xx_t);
17764 17763 } else {
17765 17764 ha->nvram_cache->size = sizeof (nvram_t);
17766 17765 }
17767 17766 ha->nvram_cache->cache =
17768 17767 (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17769 17768 if (ha->nvram_cache->cache == NULL) {
17770 17769 cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17771 17770 QL_NAME, ha->instance);
17772 17771 kmem_free(ha->nvram_cache,
17773 17772 sizeof (nvram_cache_desc_t));
17774 17773 ha->nvram_cache = 0;
17775 17774 rval = DDI_FAILURE;
17776 17775 } else {
17777 17776 mutex_init(&ha->nvram_cache->mutex, NULL,
17778 17777 MUTEX_DRIVER, NULL);
17779 17778 ha->nvram_cache->valid = 0;
17780 17779 }
17781 17780 }
17782 17781
17783 17782 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17784 17783
17785 17784 return (rval);
17786 17785 }
17787 17786
17788 17787 /*
17789 17788 * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17790 17789 *
17791 17790 * Input: Pointer to the adapter state structure.
17792 17791 * Returns: Success or Failure.
17793 17792 * Context: Kernel context.
17794 17793 */
17795 17794 int
17796 17795 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17797 17796 {
17798 17797 int rval = DDI_SUCCESS;
17799 17798
17800 17799 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17801 17800
17802 17801 if (ha->nvram_cache == NULL) {
17803 17802 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17804 17803 QL_NAME, ha->instance);
17805 17804 rval = DDI_FAILURE;
17806 17805 } else {
17807 17806 if (ha->nvram_cache->cache != NULL) {
17808 17807 kmem_free(ha->nvram_cache->cache,
17809 17808 ha->nvram_cache->size);
17810 17809 }
17811 17810 mutex_destroy(&ha->nvram_cache->mutex);
17812 17811 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17813 17812 }
17814 17813
17815 17814 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17816 17815
17817 17816 return (rval);
17818 17817 }
17819 17818
17820 17819 /*
17821 17820 * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17822 17821 *
17823 17822 * Input: Pointer to the adapter state structure.
17824 17823 * Returns: void
17825 17824 * Context: Kernel context.
17826 17825 */
17827 17826 static void
17828 17827 ql_process_idc_event(ql_adapter_state_t *ha)
17829 17828 {
17830 17829 int rval;
17831 17830
17832 17831 switch (ha->idc_mb[0]) {
17833 17832 case MBA_IDC_NOTIFICATION:
17834 17833 /*
17835 17834 * The informational opcode (idc_mb[2]) can be a
17836 17835 * defined value or the mailbox command being executed
17837 17836 * on another function which stimulated this IDC message.
17838 17837 */
17839 17838 ADAPTER_STATE_LOCK(ha);
17840 17839 switch (ha->idc_mb[2]) {
17841 17840 case IDC_OPC_DRV_START:
17842 17841 if (ha->idc_flash_acc != 0) {
17843 17842 ha->idc_flash_acc--;
17844 17843 if (ha->idc_flash_acc == 0) {
17845 17844 ha->idc_flash_acc_timer = 0;
17846 17845 GLOBAL_HW_UNLOCK();
17847 17846 }
17848 17847 }
17849 17848 if (ha->idc_restart_cnt != 0) {
17850 17849 ha->idc_restart_cnt--;
17851 17850 if (ha->idc_restart_cnt == 0) {
17852 17851 ha->idc_restart_timer = 0;
17853 17852 ADAPTER_STATE_UNLOCK(ha);
17854 17853 TASK_DAEMON_LOCK(ha);
17855 17854 ha->task_daemon_flags &= ~DRIVER_STALL;
17856 17855 TASK_DAEMON_UNLOCK(ha);
17857 17856 ql_restart_queues(ha);
17858 17857 } else {
17859 17858 ADAPTER_STATE_UNLOCK(ha);
17860 17859 }
17861 17860 } else {
17862 17861 ADAPTER_STATE_UNLOCK(ha);
17863 17862 }
17864 17863 break;
17865 17864 case IDC_OPC_FLASH_ACC:
17866 17865 ha->idc_flash_acc_timer = 30;
17867 17866 if (ha->idc_flash_acc == 0) {
17868 17867 GLOBAL_HW_LOCK();
17869 17868 }
17870 17869 ha->idc_flash_acc++;
17871 17870 ADAPTER_STATE_UNLOCK(ha);
17872 17871 break;
17873 17872 case IDC_OPC_RESTART_MPI:
17874 17873 ha->idc_restart_timer = 30;
17875 17874 ha->idc_restart_cnt++;
17876 17875 ADAPTER_STATE_UNLOCK(ha);
17877 17876 TASK_DAEMON_LOCK(ha);
17878 17877 ha->task_daemon_flags |= DRIVER_STALL;
17879 17878 TASK_DAEMON_UNLOCK(ha);
17880 17879 break;
17881 17880 case IDC_OPC_PORT_RESET_MBC:
17882 17881 case IDC_OPC_SET_PORT_CONFIG_MBC:
17883 17882 ha->idc_restart_timer = 30;
17884 17883 ha->idc_restart_cnt++;
17885 17884 ADAPTER_STATE_UNLOCK(ha);
17886 17885 TASK_DAEMON_LOCK(ha);
17887 17886 ha->task_daemon_flags |= DRIVER_STALL;
17888 17887 TASK_DAEMON_UNLOCK(ha);
17889 17888 (void) ql_wait_outstanding(ha);
17890 17889 break;
17891 17890 default:
17892 17891 ADAPTER_STATE_UNLOCK(ha);
17893 17892 EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17894 17893 ha->idc_mb[2]);
17895 17894 break;
17896 17895 }
17897 17896 /*
17898 17897 * If there is a timeout value associated with this IDC
17899 17898 * notification then there is an implied requirement
17900 17899 * that we return an ACK.
17901 17900 */
17902 17901 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17903 17902 rval = ql_idc_ack(ha);
17904 17903 if (rval != QL_SUCCESS) {
17905 17904 EL(ha, "idc_ack status=%xh %xh\n", rval,
17906 17905 ha->idc_mb[2]);
17907 17906 }
17908 17907 }
17909 17908 break;
17910 17909 case MBA_IDC_COMPLETE:
17911 17910 /*
17912 17911 * We don't ACK completions, only these require action.
17913 17912 */
17914 17913 switch (ha->idc_mb[2]) {
17915 17914 case IDC_OPC_PORT_RESET_MBC:
17916 17915 case IDC_OPC_SET_PORT_CONFIG_MBC:
17917 17916 ADAPTER_STATE_LOCK(ha);
17918 17917 if (ha->idc_restart_cnt != 0) {
17919 17918 ha->idc_restart_cnt--;
17920 17919 if (ha->idc_restart_cnt == 0) {
17921 17920 ha->idc_restart_timer = 0;
17922 17921 ADAPTER_STATE_UNLOCK(ha);
17923 17922 TASK_DAEMON_LOCK(ha);
17924 17923 ha->task_daemon_flags &= ~DRIVER_STALL;
17925 17924 TASK_DAEMON_UNLOCK(ha);
17926 17925 ql_restart_queues(ha);
17927 17926 } else {
17928 17927 ADAPTER_STATE_UNLOCK(ha);
17929 17928 }
17930 17929 } else {
17931 17930 ADAPTER_STATE_UNLOCK(ha);
17932 17931 }
17933 17932 break;
17934 17933 default:
17935 17934 break; /* Don't care... */
17936 17935 }
17937 17936 break;
17938 17937 case MBA_IDC_TIME_EXTENDED:
17939 17938 QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17940 17939 "%xh\n", ha->instance, ha->idc_mb[2]);
17941 17940 break;
17942 17941 default:
17943 17942 EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17944 17943 ha->idc_mb[2]);
17945 17944 ADAPTER_STATE_UNLOCK(ha);
17946 17945 break;
17947 17946 }
17948 17947 }
↓ open down ↓ |
1450 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX