Print this page
fixup .text where possible
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #define DEF_ICFG 1
28 28
29 29 #include <emlxs.h>
30 30 #include <emlxs_version.h>
31 31
32 32
33 33 static char emlxs_copyright[] = EMLXS_COPYRIGHT;
34 34 char emlxs_revision[] = EMLXS_REVISION;
35 35 char emlxs_version[] = EMLXS_VERSION;
36 36 char emlxs_name[] = EMLXS_NAME;
37 37 char emlxs_label[] = EMLXS_LABEL;
38 38
39 39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
40 40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
41 41
42 42 #ifdef MENLO_SUPPORT
43 43 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
44 44 #endif /* MENLO_SUPPORT */
45 45
46 46 static void emlxs_fca_attach(emlxs_hba_t *hba);
47 47 static void emlxs_fca_detach(emlxs_hba_t *hba);
48 48 static void emlxs_drv_banner(emlxs_hba_t *hba);
49 49
50 50 static int32_t emlxs_get_props(emlxs_hba_t *hba);
51 51 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
52 52 uint32_t *pkt_flags);
53 53 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
54 54 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
55 55 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
56 56 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
57 57 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
58 58 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
59 59 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
60 60 static uint32_t emlxs_add_instance(int32_t ddiinst);
61 61 static void emlxs_iodone(emlxs_buf_t *sbp);
62 62 static int emlxs_pm_lower_power(dev_info_t *dip);
63 63 static int emlxs_pm_raise_power(dev_info_t *dip);
64 64 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
65 65 uint32_t failed);
66 66 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
67 67 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
68 68 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
69 69 uint32_t args, uint32_t *arg);
70 70
71 71 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
72 72 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
73 73 #endif /* EMLXS_MODREV3 && EMLXS_MODREV4 */
74 74
75 75 static void emlxs_mode_init_masks(emlxs_hba_t *hba);
76 76
77 77
78 78 extern int
79 79 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
80 80 extern int
81 81 emlxs_select_msiid(emlxs_hba_t *hba);
82 82 extern void
83 83 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
84 84
85 85 /*
86 86 * Driver Entry Routines.
87 87 */
88 88 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
89 89 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
90 90 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
91 91 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *);
92 92 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
93 93 cred_t *, int32_t *);
94 94 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
95 95
96 96
97 97 /*
98 98 * FC_AL Transport Functions.
99 99 */
100 100 static opaque_t emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
101 101 fc_fca_bind_info_t *);
102 102 static void emlxs_fca_unbind_port(opaque_t);
103 103 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
104 104 static int32_t emlxs_fca_get_cap(opaque_t, char *, void *);
105 105 static int32_t emlxs_fca_set_cap(opaque_t, char *, void *);
106 106 static int32_t emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
107 107 static int32_t emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
108 108 uint32_t *, uint32_t);
109 109 static int32_t emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
110 110
111 111 static opaque_t emlxs_fca_get_device(opaque_t, fc_portid_t);
112 112 static int32_t emlxs_fca_notify(opaque_t, uint32_t);
113 113 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
114 114
115 115 /*
116 116 * Driver Internal Functions.
117 117 */
118 118
119 119 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
120 120 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t);
121 121 #ifdef EMLXS_I386
122 122 #ifdef S11
123 123 static int32_t emlxs_quiesce(dev_info_t *);
124 124 #endif /* S11 */
125 125 #endif /* EMLXS_I386 */
126 126 static int32_t emlxs_hba_resume(dev_info_t *);
127 127 static int32_t emlxs_hba_suspend(dev_info_t *);
128 128 static int32_t emlxs_hba_detach(dev_info_t *);
129 129 static int32_t emlxs_hba_attach(dev_info_t *);
130 130 static void emlxs_lock_destroy(emlxs_hba_t *);
131 131 static void emlxs_lock_init(emlxs_hba_t *);
132 132
133 133 char *emlxs_pm_components[] = {
134 134 "NAME=" DRIVER_NAME "000",
135 135 "0=Device D3 State",
136 136 "1=Device D0 State"
137 137 };
138 138
139 139
140 140 /*
141 141 * Default emlx dma limits
142 142 */
143 143 ddi_dma_lim_t emlxs_dma_lim = {
144 144 (uint32_t)0, /* dlim_addr_lo */
145 145 (uint32_t)0xffffffff, /* dlim_addr_hi */
146 146 (uint_t)0x00ffffff, /* dlim_cntr_max */
147 147 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */
148 148 1, /* dlim_minxfer */
149 149 0x00ffffff /* dlim_dmaspeed */
150 150 };
151 151
152 152 /*
153 153 * Be careful when using these attributes; the defaults listed below are
154 154 * (almost) the most general case, permitting allocation in almost any
155 155 * way supported by the LightPulse family. The sole exception is the
156 156 * alignment specified as requiring memory allocation on a 4-byte boundary;
157 157 * the Lightpulse can DMA memory on any byte boundary.
158 158 *
159 159 * The LightPulse family currently is limited to 16M transfers;
160 160 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
161 161 */
162 162 ddi_dma_attr_t emlxs_dma_attr = {
163 163 DMA_ATTR_V0, /* dma_attr_version */
164 164 (uint64_t)0, /* dma_attr_addr_lo */
165 165 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
166 166 (uint64_t)0x00ffffff, /* dma_attr_count_max */
167 167 1, /* dma_attr_align */
168 168 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
169 169 1, /* dma_attr_minxfer */
170 170 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
171 171 (uint64_t)0xffffffff, /* dma_attr_seg */
172 172 1, /* dma_attr_sgllen */
173 173 1, /* dma_attr_granular */
174 174 0 /* dma_attr_flags */
175 175 };
176 176
177 177 ddi_dma_attr_t emlxs_dma_attr_ro = {
178 178 DMA_ATTR_V0, /* dma_attr_version */
179 179 (uint64_t)0, /* dma_attr_addr_lo */
180 180 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
181 181 (uint64_t)0x00ffffff, /* dma_attr_count_max */
182 182 1, /* dma_attr_align */
183 183 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
184 184 1, /* dma_attr_minxfer */
185 185 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
186 186 (uint64_t)0xffffffff, /* dma_attr_seg */
187 187 1, /* dma_attr_sgllen */
188 188 1, /* dma_attr_granular */
189 189 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
190 190 };
191 191
192 192 ddi_dma_attr_t emlxs_dma_attr_1sg = {
193 193 DMA_ATTR_V0, /* dma_attr_version */
194 194 (uint64_t)0, /* dma_attr_addr_lo */
195 195 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
196 196 (uint64_t)0x00ffffff, /* dma_attr_count_max */
197 197 1, /* dma_attr_align */
198 198 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
199 199 1, /* dma_attr_minxfer */
200 200 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
201 201 (uint64_t)0xffffffff, /* dma_attr_seg */
202 202 1, /* dma_attr_sgllen */
203 203 1, /* dma_attr_granular */
204 204 0 /* dma_attr_flags */
205 205 };
206 206
207 207 #if (EMLXS_MODREV >= EMLXS_MODREV3)
208 208 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
209 209 DMA_ATTR_V0, /* dma_attr_version */
210 210 (uint64_t)0, /* dma_attr_addr_lo */
211 211 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
212 212 (uint64_t)0x00ffffff, /* dma_attr_count_max */
213 213 1, /* dma_attr_align */
214 214 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
215 215 1, /* dma_attr_minxfer */
216 216 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
217 217 (uint64_t)0xffffffff, /* dma_attr_seg */
218 218 1, /* dma_attr_sgllen */
219 219 1, /* dma_attr_granular */
220 220 0 /* dma_attr_flags */
221 221 };
222 222 #endif /* >= EMLXS_MODREV3 */
223 223
224 224 /*
225 225 * DDI access attributes for device
226 226 */
227 227 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
228 228 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
229 229 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */
230 230 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
231 231 DDI_DEFAULT_ACC /* devacc_attr_access */
232 232 };
233 233
234 234 /*
235 235 * DDI access attributes for data
236 236 */
237 237 ddi_device_acc_attr_t emlxs_data_acc_attr = {
238 238 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
239 239 DDI_NEVERSWAP_ACC, /* don't swap for Data */
240 240 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
241 241 DDI_DEFAULT_ACC /* devacc_attr_access */
242 242 };
243 243
244 244 /*
245 245 * Fill in the FC Transport structure,
246 246 * as defined in the Fibre Channel Transport Programmming Guide.
247 247 */
248 248 #if (EMLXS_MODREV == EMLXS_MODREV5)
249 249 static fc_fca_tran_t emlxs_fca_tran = {
250 250 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */
251 251 MAX_VPORTS, /* fca numerb of ports */
252 252 sizeof (emlxs_buf_t), /* fca pkt size */
253 253 2048, /* fca cmd max */
254 254 &emlxs_dma_lim, /* fca dma limits */
255 255 0, /* fca iblock, to be filled in later */
↓ open down ↓ |
255 lines elided |
↑ open up ↑ |
256 256 &emlxs_dma_attr, /* fca dma attributes */
257 257 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
258 258 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
259 259 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
260 260 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
261 261 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
262 262 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
263 263 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
264 264 &emlxs_data_acc_attr, /* fca access atributes */
265 265 0, /* fca_num_npivports */
266 - {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */
266 + {{0, 0, 0, 0, 0, 0, 0, 0}}, /* Physical port WWPN */
267 267 emlxs_fca_bind_port,
268 268 emlxs_fca_unbind_port,
269 269 emlxs_fca_pkt_init,
270 270 emlxs_fca_pkt_uninit,
271 271 emlxs_fca_transport,
272 272 emlxs_fca_get_cap,
273 273 emlxs_fca_set_cap,
274 274 emlxs_fca_get_map,
275 275 emlxs_fca_transport,
276 276 emlxs_fca_ub_alloc,
277 277 emlxs_fca_ub_free,
278 278 emlxs_fca_ub_release,
279 279 emlxs_fca_pkt_abort,
280 280 emlxs_fca_reset,
281 281 emlxs_fca_port_manage,
282 282 emlxs_fca_get_device,
283 283 emlxs_fca_notify
284 284 };
285 285 #endif /* EMLXS_MODREV5 */
286 286
287 287
288 288 #if (EMLXS_MODREV == EMLXS_MODREV4)
289 289 static fc_fca_tran_t emlxs_fca_tran = {
290 290 FCTL_FCA_MODREV_4, /* fca_version */
291 291 MAX_VPORTS, /* fca numerb of ports */
292 292 sizeof (emlxs_buf_t), /* fca pkt size */
293 293 2048, /* fca cmd max */
294 294 &emlxs_dma_lim, /* fca dma limits */
295 295 0, /* fca iblock, to be filled in later */
296 296 &emlxs_dma_attr, /* fca dma attributes */
297 297 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
298 298 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
299 299 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
300 300 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
301 301 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
302 302 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
303 303 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
304 304 &emlxs_data_acc_attr, /* fca access atributes */
305 305 emlxs_fca_bind_port,
306 306 emlxs_fca_unbind_port,
307 307 emlxs_fca_pkt_init,
308 308 emlxs_fca_pkt_uninit,
309 309 emlxs_fca_transport,
310 310 emlxs_fca_get_cap,
311 311 emlxs_fca_set_cap,
312 312 emlxs_fca_get_map,
313 313 emlxs_fca_transport,
314 314 emlxs_fca_ub_alloc,
315 315 emlxs_fca_ub_free,
316 316 emlxs_fca_ub_release,
317 317 emlxs_fca_pkt_abort,
318 318 emlxs_fca_reset,
319 319 emlxs_fca_port_manage,
320 320 emlxs_fca_get_device,
321 321 emlxs_fca_notify
322 322 };
323 323 #endif /* EMLXS_MODEREV4 */
324 324
325 325
326 326 #if (EMLXS_MODREV == EMLXS_MODREV3)
327 327 static fc_fca_tran_t emlxs_fca_tran = {
328 328 FCTL_FCA_MODREV_3, /* fca_version */
329 329 MAX_VPORTS, /* fca numerb of ports */
330 330 sizeof (emlxs_buf_t), /* fca pkt size */
331 331 2048, /* fca cmd max */
332 332 &emlxs_dma_lim, /* fca dma limits */
333 333 0, /* fca iblock, to be filled in later */
334 334 &emlxs_dma_attr, /* fca dma attributes */
335 335 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
336 336 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
337 337 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
338 338 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
339 339 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
340 340 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
341 341 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
342 342 &emlxs_data_acc_attr, /* fca access atributes */
343 343 emlxs_fca_bind_port,
344 344 emlxs_fca_unbind_port,
345 345 emlxs_fca_pkt_init,
346 346 emlxs_fca_pkt_uninit,
347 347 emlxs_fca_transport,
348 348 emlxs_fca_get_cap,
349 349 emlxs_fca_set_cap,
350 350 emlxs_fca_get_map,
351 351 emlxs_fca_transport,
352 352 emlxs_fca_ub_alloc,
353 353 emlxs_fca_ub_free,
354 354 emlxs_fca_ub_release,
355 355 emlxs_fca_pkt_abort,
356 356 emlxs_fca_reset,
357 357 emlxs_fca_port_manage,
358 358 emlxs_fca_get_device,
359 359 emlxs_fca_notify
360 360 };
361 361 #endif /* EMLXS_MODREV3 */
362 362
363 363
364 364 #if (EMLXS_MODREV == EMLXS_MODREV2)
365 365 static fc_fca_tran_t emlxs_fca_tran = {
366 366 FCTL_FCA_MODREV_2, /* fca_version */
367 367 MAX_VPORTS, /* number of ports */
368 368 sizeof (emlxs_buf_t), /* pkt size */
369 369 2048, /* max cmds */
370 370 &emlxs_dma_lim, /* DMA limits */
371 371 0, /* iblock, to be filled in later */
372 372 &emlxs_dma_attr, /* dma attributes */
373 373 &emlxs_data_acc_attr, /* access atributes */
374 374 emlxs_fca_bind_port,
375 375 emlxs_fca_unbind_port,
376 376 emlxs_fca_pkt_init,
377 377 emlxs_fca_pkt_uninit,
378 378 emlxs_fca_transport,
379 379 emlxs_fca_get_cap,
380 380 emlxs_fca_set_cap,
381 381 emlxs_fca_get_map,
382 382 emlxs_fca_transport,
383 383 emlxs_fca_ub_alloc,
384 384 emlxs_fca_ub_free,
385 385 emlxs_fca_ub_release,
386 386 emlxs_fca_pkt_abort,
387 387 emlxs_fca_reset,
388 388 emlxs_fca_port_manage,
389 389 emlxs_fca_get_device,
390 390 emlxs_fca_notify
391 391 };
392 392 #endif /* EMLXS_MODREV2 */
393 393
394 394
395 395 /*
396 396 * state pointer which the implementation uses as a place to
397 397 * hang a set of per-driver structures;
398 398 *
399 399 */
400 400 void *emlxs_soft_state = NULL;
401 401
402 402 /*
403 403 * Driver Global variables.
404 404 */
405 405 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */
406 406
407 407 emlxs_device_t emlxs_device;
408 408
409 409 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */
410 410 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */
411 411 uint32_t emlxs_instance_flag = 0; /* uses emlxs_device.lock */
412 412 #define EMLXS_FW_SHOW 0x00000001
413 413
414 414
415 415 /*
416 416 * CB ops vector. Used for administration only.
417 417 */
418 418 static struct cb_ops emlxs_cb_ops = {
419 419 emlxs_open, /* cb_open */
420 420 emlxs_close, /* cb_close */
421 421 nodev, /* cb_strategy */
422 422 nodev, /* cb_print */
423 423 nodev, /* cb_dump */
424 424 nodev, /* cb_read */
425 425 nodev, /* cb_write */
426 426 emlxs_ioctl, /* cb_ioctl */
427 427 nodev, /* cb_devmap */
428 428 nodev, /* cb_mmap */
429 429 nodev, /* cb_segmap */
430 430 nochpoll, /* cb_chpoll */
431 431 ddi_prop_op, /* cb_prop_op */
432 432 0, /* cb_stream */
433 433 #ifdef _LP64
434 434 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
435 435 #else
436 436 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
437 437 #endif
438 438 CB_REV, /* rev */
439 439 nodev, /* cb_aread */
440 440 nodev /* cb_awrite */
441 441 };
442 442
443 443 static struct dev_ops emlxs_ops = {
444 444 DEVO_REV, /* rev */
445 445 0, /* refcnt */
446 446 emlxs_info, /* getinfo */
447 447 nulldev, /* identify */
448 448 nulldev, /* probe */
449 449 emlxs_attach, /* attach */
450 450 emlxs_detach, /* detach */
451 451 nodev, /* reset */
452 452 &emlxs_cb_ops, /* devo_cb_ops */
453 453 NULL, /* devo_bus_ops */
454 454 emlxs_power, /* power ops */
455 455 #ifdef EMLXS_I386
456 456 #ifdef S11
457 457 emlxs_quiesce, /* quiesce */
458 458 #endif /* S11 */
459 459 #endif /* EMLXS_I386 */
460 460 };
461 461
462 462 #include <sys/modctl.h>
463 463 extern struct mod_ops mod_driverops;
464 464
465 465 #ifdef SAN_DIAG_SUPPORT
466 466 extern kmutex_t emlxs_sd_bucket_mutex;
467 467 extern sd_bucket_info_t emlxs_sd_bucket;
468 468 #endif /* SAN_DIAG_SUPPORT */
469 469
470 470 /*
471 471 * Module linkage information for the kernel.
472 472 */
473 473 static struct modldrv emlxs_modldrv = {
↓ open down ↓ |
197 lines elided |
↑ open up ↑ |
474 474 &mod_driverops, /* module type - driver */
475 475 emlxs_name, /* module name */
476 476 &emlxs_ops, /* driver ops */
477 477 };
478 478
479 479
480 480 /*
481 481 * Driver module linkage structure
482 482 */
483 483 static struct modlinkage emlxs_modlinkage = {
484 - MODREV_1, /* ml_rev - must be MODREV_1 */
485 - &emlxs_modldrv, /* ml_linkage */
486 - NULL /* end of driver linkage */
484 + MODREV_1, /* ml_rev - must be MODREV_1 */
485 + { &emlxs_modldrv, /* ml_linkage */
486 + NULL }
487 487 };
488 488
489 489
490 490 /* We only need to add entries for non-default return codes. */
491 491 /* Entries do not need to be in order. */
492 492 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
493 493 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */
494 494
495 495 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
496 496 /* {f/w code, pkt_state, pkt_reason, */
497 497 /* pkt_expln, pkt_action} */
498 498
499 499 /* 0x00 - Do not remove */
500 500 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
501 501 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
502 502
503 503 /* 0x01 - Do not remove */
504 504 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
505 505 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
506 506
507 507 /* 0x02 */
508 508 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
509 509 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
510 510
511 511 /*
512 512 * This is a default entry.
513 513 * The real codes are written dynamically in emlxs_els.c
514 514 */
515 515 /* 0x09 */
516 516 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
517 517 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
518 518
519 519 /* Special error code */
520 520 /* 0x10 */
521 521 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
522 522 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
523 523
524 524 /* Special error code */
525 525 /* 0x11 */
526 526 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
527 527 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
528 528
529 529 /* Special error code */
530 530 /* 0x12 */
531 531 {IOSTAT_RSP_INVALID, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
532 532 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
533 533
534 534 /* CLASS 2 only */
535 535 /* 0x04 */
536 536 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
537 537 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
538 538
539 539 /* CLASS 2 only */
540 540 /* 0x05 */
541 541 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
542 542 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
543 543
544 544 /* CLASS 2 only */
545 545 /* 0x06 */
546 546 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
547 547 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
548 548
549 549 /* CLASS 2 only */
550 550 /* 0x07 */
551 551 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
552 552 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
553 553 };
554 554
555 555 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
556 556
557 557
558 558 /* We only need to add entries for non-default return codes. */
559 559 /* Entries do not need to be in order. */
560 560 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
561 561 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */
562 562
563 563 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
564 564 /* {f/w code, pkt_state, pkt_reason, */
565 565 /* pkt_expln, pkt_action} */
566 566
567 567 /* 0x01 */
568 568 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
569 569 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
570 570
571 571 /* 0x02 */
572 572 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
573 573 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
574 574
575 575 /* 0x04 */
576 576 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
577 577 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
578 578
579 579 /* 0x05 */
580 580 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
581 581 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
582 582
583 583 /* 0x06 */
584 584 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
585 585 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
586 586
587 587 /* 0x07 */
588 588 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
589 589 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
590 590
591 591 /* 0x08 */
592 592 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
593 593 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
594 594
595 595 /* 0x0B */
596 596 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
597 597 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
598 598
599 599 /* 0x0D */
600 600 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
601 601 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
602 602
603 603 /* 0x0E */
604 604 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
605 605 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
606 606
607 607 /* 0x0F */
608 608 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
609 609 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
610 610
611 611 /* 0x11 */
612 612 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
613 613 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
614 614
615 615 /* 0x13 */
616 616 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
617 617 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
618 618
619 619 /* 0x14 */
620 620 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
621 621 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
622 622
623 623 /* 0x15 */
624 624 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
625 625 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
626 626
627 627 /* 0x16 */
628 628 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
629 629 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
630 630
631 631 /* 0x17 */
632 632 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
633 633 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
634 634
635 635 /* 0x18 */
636 636 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
637 637 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
638 638
639 639 /* 0x1A */
640 640 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
641 641 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
642 642
643 643 /* 0x21 */
644 644 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
645 645 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
646 646
647 647 /* Occurs at link down */
648 648 /* 0x28 */
649 649 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
650 650 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
651 651
652 652 /* 0xF0 */
653 653 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
654 654 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
655 655 };
656 656
657 657 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
658 658
659 659
660 660
661 661 emlxs_table_t emlxs_error_table[] = {
662 662 {IOERR_SUCCESS, "No error."},
663 663 {IOERR_MISSING_CONTINUE, "Missing continue."},
664 664 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
665 665 {IOERR_INTERNAL_ERROR, "Internal error."},
666 666 {IOERR_INVALID_RPI, "Invalid RPI."},
667 667 {IOERR_NO_XRI, "No XRI."},
668 668 {IOERR_ILLEGAL_COMMAND, "Illegal command."},
669 669 {IOERR_XCHG_DROPPED, "Exchange dropped."},
670 670 {IOERR_ILLEGAL_FIELD, "Illegal field."},
671 671 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
672 672 {IOERR_TX_DMA_FAILED, "TX DMA failed."},
673 673 {IOERR_RX_DMA_FAILED, "RX DMA failed."},
674 674 {IOERR_ILLEGAL_FRAME, "Illegal frame."},
675 675 {IOERR_NO_RESOURCES, "No resources."},
676 676 {IOERR_ILLEGAL_LENGTH, "Illegal length."},
677 677 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
678 678 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
679 679 {IOERR_ABORT_REQUESTED, "Abort requested."},
680 680 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
681 681 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
682 682 {IOERR_RING_RESET, "Ring reset."},
683 683 {IOERR_LINK_DOWN, "Link down."},
684 684 {IOERR_CORRUPTED_DATA, "Corrupted data."},
685 685 {IOERR_CORRUPTED_RPI, "Corrupted RPI."},
686 686 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
687 687 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
688 688 {IOERR_DUP_FRAME, "Duplicate frame."},
689 689 {IOERR_LINK_CONTROL_FRAME, "Link control frame."},
690 690 {IOERR_BAD_HOST_ADDRESS, "Bad host address."},
691 691 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
692 692 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
693 693 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
694 694 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
695 695 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
696 696 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
697 697 {IOERR_XRIBUF_MISSING, "XRI buffer missing"},
698 698 {IOERR_ROFFSET_INVAL, "Relative offset invalid."},
699 699 {IOERR_ROFFSET_MISSING, "Relative offset missing."},
700 700 {IOERR_INSUF_BUFFER, "Buffer too small."},
701 701 {IOERR_MISSING_SI, "ELS frame missing SI"},
702 702 {IOERR_MISSING_ES, "Exhausted burst without ES"},
703 703 {IOERR_INCOMP_XFER, "Transfer incomplete."},
704 704 {IOERR_ABORT_TIMEOUT, "Abort timeout."}
705 705
706 706 }; /* emlxs_error_table */
707 707
708 708
709 709 emlxs_table_t emlxs_state_table[] = {
710 710 {IOSTAT_SUCCESS, "Success."},
711 711 {IOSTAT_FCP_RSP_ERROR, "FCP response error."},
712 712 {IOSTAT_REMOTE_STOP, "Remote stop."},
713 713 {IOSTAT_LOCAL_REJECT, "Local reject."},
714 714 {IOSTAT_NPORT_RJT, "NPort reject."},
715 715 {IOSTAT_FABRIC_RJT, "Fabric reject."},
716 716 {IOSTAT_NPORT_BSY, "Nport busy."},
717 717 {IOSTAT_FABRIC_BSY, "Fabric busy."},
718 718 {IOSTAT_INTERMED_RSP, "Intermediate response."},
719 719 {IOSTAT_LS_RJT, "LS reject."},
720 720 {IOSTAT_CMD_REJECT, "Cmd reject."},
721 721 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
722 722 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
723 723 {IOSTAT_DATA_UNDERRUN, "Data underrun."},
724 724 {IOSTAT_DATA_OVERRUN, "Data overrun."},
725 725 {IOSTAT_RSP_INVALID, "Response Invalid."},
726 726
727 727 }; /* emlxs_state_table */
728 728
729 729
730 730 #ifdef MENLO_SUPPORT
731 731 emlxs_table_t emlxs_menlo_cmd_table[] = {
732 732 {MENLO_CMD_INITIALIZE, "MENLO_INIT"},
733 733 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
734 734 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
735 735 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
736 736 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
737 737 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
738 738
739 739 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
740 740 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
741 741 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
742 742 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
743 743 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
744 744 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
745 745 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
746 746 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
747 747 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
748 748
749 749 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
750 750 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
751 751 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
752 752
753 753 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
754 754 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
755 755
756 756 {MENLO_CMD_RESET, "MENLO_RESET"},
757 757 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
758 758
759 759 }; /* emlxs_menlo_cmd_table */
760 760
761 761 emlxs_table_t emlxs_menlo_rsp_table[] = {
762 762 {MENLO_RSP_SUCCESS, "SUCCESS"},
763 763 {MENLO_ERR_FAILED, "FAILED"},
764 764 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
765 765 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
766 766 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
767 767 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
768 768 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
769 769 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
770 770 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
771 771 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
772 772 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
773 773 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
774 774 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
775 775 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
776 776 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
777 777 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
778 778 {MENLO_ERR_BUSY, "BUSY"},
779 779
780 780 }; /* emlxs_menlo_rsp_table */
781 781
782 782 #endif /* MENLO_SUPPORT */
783 783
784 784
785 785 emlxs_table_t emlxs_mscmd_table[] = {
786 786 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
787 787 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
788 788 {MS_GTIN, "MS_GTIN"},
789 789 {MS_GIEL, "MS_GIEL"},
790 790 {MS_GIET, "MS_GIET"},
791 791 {MS_GDID, "MS_GDID"},
792 792 {MS_GMID, "MS_GMID"},
793 793 {MS_GFN, "MS_GFN"},
794 794 {MS_GIELN, "MS_GIELN"},
795 795 {MS_GMAL, "MS_GMAL"},
796 796 {MS_GIEIL, "MS_GIEIL"},
797 797 {MS_GPL, "MS_GPL"},
798 798 {MS_GPT, "MS_GPT"},
799 799 {MS_GPPN, "MS_GPPN"},
800 800 {MS_GAPNL, "MS_GAPNL"},
801 801 {MS_GPS, "MS_GPS"},
802 802 {MS_GPSC, "MS_GPSC"},
803 803 {MS_GATIN, "MS_GATIN"},
804 804 {MS_GSES, "MS_GSES"},
805 805 {MS_GPLNL, "MS_GPLNL"},
806 806 {MS_GPLT, "MS_GPLT"},
807 807 {MS_GPLML, "MS_GPLML"},
808 808 {MS_GPAB, "MS_GPAB"},
809 809 {MS_GNPL, "MS_GNPL"},
810 810 {MS_GPNL, "MS_GPNL"},
811 811 {MS_GPFCP, "MS_GPFCP"},
812 812 {MS_GPLI, "MS_GPLI"},
813 813 {MS_GNID, "MS_GNID"},
814 814 {MS_RIELN, "MS_RIELN"},
815 815 {MS_RPL, "MS_RPL"},
816 816 {MS_RPLN, "MS_RPLN"},
817 817 {MS_RPLT, "MS_RPLT"},
818 818 {MS_RPLM, "MS_RPLM"},
819 819 {MS_RPAB, "MS_RPAB"},
820 820 {MS_RPFCP, "MS_RPFCP"},
821 821 {MS_RPLI, "MS_RPLI"},
822 822 {MS_DPL, "MS_DPL"},
823 823 {MS_DPLN, "MS_DPLN"},
824 824 {MS_DPLM, "MS_DPLM"},
825 825 {MS_DPLML, "MS_DPLML"},
826 826 {MS_DPLI, "MS_DPLI"},
827 827 {MS_DPAB, "MS_DPAB"},
828 828 {MS_DPALL, "MS_DPALL"}
829 829
830 830 }; /* emlxs_mscmd_table */
831 831
832 832
833 833 emlxs_table_t emlxs_ctcmd_table[] = {
834 834 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
835 835 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
836 836 {SLI_CTNS_GA_NXT, "GA_NXT"},
837 837 {SLI_CTNS_GPN_ID, "GPN_ID"},
838 838 {SLI_CTNS_GNN_ID, "GNN_ID"},
839 839 {SLI_CTNS_GCS_ID, "GCS_ID"},
840 840 {SLI_CTNS_GFT_ID, "GFT_ID"},
841 841 {SLI_CTNS_GSPN_ID, "GSPN_ID"},
842 842 {SLI_CTNS_GPT_ID, "GPT_ID"},
843 843 {SLI_CTNS_GID_PN, "GID_PN"},
844 844 {SLI_CTNS_GID_NN, "GID_NN"},
845 845 {SLI_CTNS_GIP_NN, "GIP_NN"},
846 846 {SLI_CTNS_GIPA_NN, "GIPA_NN"},
847 847 {SLI_CTNS_GSNN_NN, "GSNN_NN"},
848 848 {SLI_CTNS_GNN_IP, "GNN_IP"},
849 849 {SLI_CTNS_GIPA_IP, "GIPA_IP"},
850 850 {SLI_CTNS_GID_FT, "GID_FT"},
851 851 {SLI_CTNS_GID_PT, "GID_PT"},
852 852 {SLI_CTNS_RPN_ID, "RPN_ID"},
853 853 {SLI_CTNS_RNN_ID, "RNN_ID"},
854 854 {SLI_CTNS_RCS_ID, "RCS_ID"},
855 855 {SLI_CTNS_RFT_ID, "RFT_ID"},
856 856 {SLI_CTNS_RSPN_ID, "RSPN_ID"},
857 857 {SLI_CTNS_RPT_ID, "RPT_ID"},
858 858 {SLI_CTNS_RIP_NN, "RIP_NN"},
859 859 {SLI_CTNS_RIPA_NN, "RIPA_NN"},
860 860 {SLI_CTNS_RSNN_NN, "RSNN_NN"},
861 861 {SLI_CTNS_DA_ID, "DA_ID"},
862 862 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
863 863
864 864 }; /* emlxs_ctcmd_table */
865 865
866 866
867 867
868 868 emlxs_table_t emlxs_rmcmd_table[] = {
869 869 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
870 870 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
871 871 {CT_OP_GSAT, "RM_GSAT"},
872 872 {CT_OP_GHAT, "RM_GHAT"},
873 873 {CT_OP_GPAT, "RM_GPAT"},
874 874 {CT_OP_GDAT, "RM_GDAT"},
875 875 {CT_OP_GPST, "RM_GPST"},
876 876 {CT_OP_GDP, "RM_GDP"},
877 877 {CT_OP_GDPG, "RM_GDPG"},
878 878 {CT_OP_GEPS, "RM_GEPS"},
879 879 {CT_OP_GLAT, "RM_GLAT"},
880 880 {CT_OP_SSAT, "RM_SSAT"},
881 881 {CT_OP_SHAT, "RM_SHAT"},
882 882 {CT_OP_SPAT, "RM_SPAT"},
883 883 {CT_OP_SDAT, "RM_SDAT"},
884 884 {CT_OP_SDP, "RM_SDP"},
885 885 {CT_OP_SBBS, "RM_SBBS"},
886 886 {CT_OP_RPST, "RM_RPST"},
887 887 {CT_OP_VFW, "RM_VFW"},
888 888 {CT_OP_DFW, "RM_DFW"},
889 889 {CT_OP_RES, "RM_RES"},
890 890 {CT_OP_RHD, "RM_RHD"},
891 891 {CT_OP_UFW, "RM_UFW"},
892 892 {CT_OP_RDP, "RM_RDP"},
893 893 {CT_OP_GHDR, "RM_GHDR"},
894 894 {CT_OP_CHD, "RM_CHD"},
895 895 {CT_OP_SSR, "RM_SSR"},
896 896 {CT_OP_RSAT, "RM_RSAT"},
897 897 {CT_OP_WSAT, "RM_WSAT"},
898 898 {CT_OP_RSAH, "RM_RSAH"},
899 899 {CT_OP_WSAH, "RM_WSAH"},
900 900 {CT_OP_RACT, "RM_RACT"},
901 901 {CT_OP_WACT, "RM_WACT"},
902 902 {CT_OP_RKT, "RM_RKT"},
903 903 {CT_OP_WKT, "RM_WKT"},
904 904 {CT_OP_SSC, "RM_SSC"},
905 905 {CT_OP_QHBA, "RM_QHBA"},
906 906 {CT_OP_GST, "RM_GST"},
907 907 {CT_OP_GFTM, "RM_GFTM"},
908 908 {CT_OP_SRL, "RM_SRL"},
909 909 {CT_OP_SI, "RM_SI"},
910 910 {CT_OP_SRC, "RM_SRC"},
911 911 {CT_OP_GPB, "RM_GPB"},
912 912 {CT_OP_SPB, "RM_SPB"},
913 913 {CT_OP_RPB, "RM_RPB"},
914 914 {CT_OP_RAPB, "RM_RAPB"},
915 915 {CT_OP_GBC, "RM_GBC"},
916 916 {CT_OP_GBS, "RM_GBS"},
917 917 {CT_OP_SBS, "RM_SBS"},
918 918 {CT_OP_GANI, "RM_GANI"},
919 919 {CT_OP_GRV, "RM_GRV"},
920 920 {CT_OP_GAPBS, "RM_GAPBS"},
921 921 {CT_OP_APBC, "RM_APBC"},
922 922 {CT_OP_GDT, "RM_GDT"},
923 923 {CT_OP_GDLMI, "RM_GDLMI"},
924 924 {CT_OP_GANA, "RM_GANA"},
925 925 {CT_OP_GDLV, "RM_GDLV"},
926 926 {CT_OP_GWUP, "RM_GWUP"},
927 927 {CT_OP_GLM, "RM_GLM"},
928 928 {CT_OP_GABS, "RM_GABS"},
929 929 {CT_OP_SABS, "RM_SABS"},
930 930 {CT_OP_RPR, "RM_RPR"},
931 931 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
932 932
933 933 }; /* emlxs_rmcmd_table */
934 934
935 935
936 936 emlxs_table_t emlxs_elscmd_table[] = {
937 937 {ELS_CMD_ACC, "ACC"},
938 938 {ELS_CMD_LS_RJT, "LS_RJT"},
939 939 {ELS_CMD_PLOGI, "PLOGI"},
940 940 {ELS_CMD_FLOGI, "FLOGI"},
941 941 {ELS_CMD_LOGO, "LOGO"},
942 942 {ELS_CMD_ABTX, "ABTX"},
943 943 {ELS_CMD_RCS, "RCS"},
944 944 {ELS_CMD_RES, "RES"},
945 945 {ELS_CMD_RSS, "RSS"},
946 946 {ELS_CMD_RSI, "RSI"},
947 947 {ELS_CMD_ESTS, "ESTS"},
948 948 {ELS_CMD_ESTC, "ESTC"},
949 949 {ELS_CMD_ADVC, "ADVC"},
950 950 {ELS_CMD_RTV, "RTV"},
951 951 {ELS_CMD_RLS, "RLS"},
952 952 {ELS_CMD_ECHO, "ECHO"},
953 953 {ELS_CMD_TEST, "TEST"},
954 954 {ELS_CMD_RRQ, "RRQ"},
955 955 {ELS_CMD_REC, "REC"},
956 956 {ELS_CMD_PRLI, "PRLI"},
957 957 {ELS_CMD_PRLO, "PRLO"},
958 958 {ELS_CMD_SCN, "SCN"},
959 959 {ELS_CMD_TPLS, "TPLS"},
960 960 {ELS_CMD_GPRLO, "GPRLO"},
961 961 {ELS_CMD_GAID, "GAID"},
962 962 {ELS_CMD_FACT, "FACT"},
963 963 {ELS_CMD_FDACT, "FDACT"},
964 964 {ELS_CMD_NACT, "NACT"},
965 965 {ELS_CMD_NDACT, "NDACT"},
966 966 {ELS_CMD_QoSR, "QoSR"},
967 967 {ELS_CMD_RVCS, "RVCS"},
968 968 {ELS_CMD_PDISC, "PDISC"},
969 969 {ELS_CMD_FDISC, "FDISC"},
970 970 {ELS_CMD_ADISC, "ADISC"},
971 971 {ELS_CMD_FARP, "FARP"},
972 972 {ELS_CMD_FARPR, "FARPR"},
973 973 {ELS_CMD_FAN, "FAN"},
974 974 {ELS_CMD_RSCN, "RSCN"},
975 975 {ELS_CMD_SCR, "SCR"},
976 976 {ELS_CMD_LINIT, "LINIT"},
977 977 {ELS_CMD_RNID, "RNID"},
978 978 {ELS_CMD_AUTH, "AUTH"}
979 979
980 980 }; /* emlxs_elscmd_table */
981 981
982 982
983 983 emlxs_table_t emlxs_mode_table[] = {
984 984 {MODE_NONE, "NONE"},
985 985 {MODE_INITIATOR, "INITIATOR"},
986 986 {MODE_TARGET, "TARGET"},
987 987 {MODE_ALL, "INITIATOR | TARGET"}
988 988 }; /* emlxs_mode_table */
989 989
990 990 /*
991 991 *
992 992 * Device Driver Entry Routines
993 993 *
994 994 */
995 995
996 996 #ifdef MODSYM_SUPPORT
997 997 static void emlxs_fca_modclose();
998 998 static int emlxs_fca_modopen();
999 999 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */
1000 1000
1001 1001 static int
1002 1002 emlxs_fca_modopen()
1003 1003 {
1004 1004 int err;
1005 1005
1006 1006 if (emlxs_modsym.mod_fctl) {
1007 1007 return (0);
1008 1008 }
1009 1009
1010 1010 /* Leadville (fctl) */
1011 1011 err = 0;
1012 1012 emlxs_modsym.mod_fctl =
1013 1013 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1014 1014 if (!emlxs_modsym.mod_fctl) {
1015 1015 cmn_err(CE_WARN,
1016 1016 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1017 1017 DRIVER_NAME, err);
1018 1018
1019 1019 goto failed;
1020 1020 }
1021 1021
1022 1022 err = 0;
1023 1023 /* Check if the fctl fc_fca_attach is present */
1024 1024 emlxs_modsym.fc_fca_attach =
1025 1025 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1026 1026 &err);
1027 1027 if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1028 1028 cmn_err(CE_WARN,
1029 1029 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1030 1030 goto failed;
1031 1031 }
1032 1032
1033 1033 err = 0;
1034 1034 /* Check if the fctl fc_fca_detach is present */
1035 1035 emlxs_modsym.fc_fca_detach =
1036 1036 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1037 1037 &err);
1038 1038 if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1039 1039 cmn_err(CE_WARN,
1040 1040 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1041 1041 goto failed;
1042 1042 }
1043 1043
1044 1044 err = 0;
1045 1045 /* Check if the fctl fc_fca_init is present */
1046 1046 emlxs_modsym.fc_fca_init =
1047 1047 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1048 1048 if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1049 1049 cmn_err(CE_WARN,
1050 1050 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1051 1051 goto failed;
1052 1052 }
1053 1053
1054 1054 return (0);
1055 1055
1056 1056 failed:
1057 1057
1058 1058 emlxs_fca_modclose();
1059 1059
1060 1060 return (1);
1061 1061
1062 1062
1063 1063 } /* emlxs_fca_modopen() */
1064 1064
1065 1065
1066 1066 static void
1067 1067 emlxs_fca_modclose()
1068 1068 {
1069 1069 if (emlxs_modsym.mod_fctl) {
1070 1070 (void) ddi_modclose(emlxs_modsym.mod_fctl);
1071 1071 emlxs_modsym.mod_fctl = 0;
1072 1072 }
1073 1073
1074 1074 emlxs_modsym.fc_fca_attach = NULL;
1075 1075 emlxs_modsym.fc_fca_detach = NULL;
1076 1076 emlxs_modsym.fc_fca_init = NULL;
1077 1077
1078 1078 return;
1079 1079
1080 1080 } /* emlxs_fca_modclose() */
1081 1081
1082 1082 #endif /* MODSYM_SUPPORT */
1083 1083
1084 1084
1085 1085
1086 1086 /*
1087 1087 * Global driver initialization, called once when driver is loaded
1088 1088 */
1089 1089 int
1090 1090 _init(void)
1091 1091 {
1092 1092 int ret;
1093 1093
1094 1094 /*
1095 1095 * First init call for this driver,
1096 1096 * so initialize the emlxs_dev_ctl structure.
1097 1097 */
1098 1098 bzero(&emlxs_device, sizeof (emlxs_device));
1099 1099
1100 1100 #ifdef MODSYM_SUPPORT
1101 1101 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1102 1102 #endif /* MODSYM_SUPPORT */
1103 1103
1104 1104 mutex_init(&emlxs_device.lock, NULL, MUTEX_DRIVER, NULL);
1105 1105
1106 1106 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1107 1107 emlxs_device.drv_timestamp = ddi_get_time();
1108 1108
1109 1109 for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1110 1110 emlxs_instance[ret] = (uint32_t)-1;
1111 1111 }
1112 1112
1113 1113 /*
1114 1114 * Provide for one ddiinst of the emlxs_dev_ctl structure
1115 1115 * for each possible board in the system.
1116 1116 */
1117 1117 if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1118 1118 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1119 1119 cmn_err(CE_WARN,
1120 1120 "?%s: _init: ddi_soft_state_init failed. rval=%x",
1121 1121 DRIVER_NAME, ret);
1122 1122
1123 1123 return (ret);
1124 1124 }
1125 1125
1126 1126 #ifdef MODSYM_SUPPORT
1127 1127 /* Open SFS */
1128 1128 (void) emlxs_fca_modopen();
1129 1129 #endif /* MODSYM_SUPPORT */
1130 1130
1131 1131 /* Setup devops for SFS */
1132 1132 MODSYM(fc_fca_init)(&emlxs_ops);
1133 1133
1134 1134 if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1135 1135 (void) ddi_soft_state_fini(&emlxs_soft_state);
1136 1136 #ifdef MODSYM_SUPPORT
1137 1137 /* Close SFS */
1138 1138 emlxs_fca_modclose();
1139 1139 #endif /* MODSYM_SUPPORT */
1140 1140
1141 1141 return (ret);
1142 1142 }
1143 1143
1144 1144 #ifdef SAN_DIAG_SUPPORT
1145 1145 mutex_init(&emlxs_sd_bucket_mutex, NULL, MUTEX_DRIVER, NULL);
1146 1146 #endif /* SAN_DIAG_SUPPORT */
1147 1147
1148 1148 return (ret);
1149 1149
1150 1150 } /* _init() */
1151 1151
1152 1152
1153 1153 /*
1154 1154 * Called when driver is unloaded.
1155 1155 */
1156 1156 int
1157 1157 _fini(void)
1158 1158 {
1159 1159 int ret;
1160 1160
1161 1161 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1162 1162 return (ret);
1163 1163 }
1164 1164 #ifdef MODSYM_SUPPORT
1165 1165 /* Close SFS */
1166 1166 emlxs_fca_modclose();
1167 1167 #endif /* MODSYM_SUPPORT */
1168 1168
1169 1169 /*
1170 1170 * Destroy the soft state structure
1171 1171 */
1172 1172 (void) ddi_soft_state_fini(&emlxs_soft_state);
1173 1173
1174 1174 /* Destroy the global device lock */
1175 1175 mutex_destroy(&emlxs_device.lock);
1176 1176
1177 1177 #ifdef SAN_DIAG_SUPPORT
1178 1178 mutex_destroy(&emlxs_sd_bucket_mutex);
1179 1179 #endif /* SAN_DIAG_SUPPORT */
1180 1180
1181 1181 return (ret);
1182 1182
1183 1183 } /* _fini() */
1184 1184
1185 1185
1186 1186
1187 1187 int
1188 1188 _info(struct modinfo *modinfop)
1189 1189 {
1190 1190
1191 1191 return (mod_info(&emlxs_modlinkage, modinfop));
1192 1192
1193 1193 } /* _info() */
1194 1194
1195 1195
1196 1196 /*
1197 1197 * Attach an ddiinst of an emlx host adapter.
1198 1198 * Allocate data structures, initialize the adapter and we're ready to fly.
1199 1199 */
1200 1200 static int
1201 1201 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1202 1202 {
1203 1203 emlxs_hba_t *hba;
1204 1204 int ddiinst;
1205 1205 int emlxinst;
1206 1206 int rval;
1207 1207
1208 1208 switch (cmd) {
1209 1209 case DDI_ATTACH:
1210 1210 /* If successful this will set EMLXS_PM_IN_ATTACH */
1211 1211 rval = emlxs_hba_attach(dip);
1212 1212 break;
1213 1213
1214 1214 case DDI_RESUME:
1215 1215 /* This will resume the driver */
1216 1216 rval = emlxs_hba_resume(dip);
1217 1217 break;
1218 1218
1219 1219 default:
1220 1220 rval = DDI_FAILURE;
1221 1221 }
1222 1222
1223 1223 if (rval == DDI_SUCCESS) {
1224 1224 ddiinst = ddi_get_instance(dip);
1225 1225 emlxinst = emlxs_get_instance(ddiinst);
1226 1226 hba = emlxs_device.hba[emlxinst];
1227 1227
1228 1228 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1229 1229
1230 1230 /* Enable driver dump feature */
1231 1231 mutex_enter(&EMLXS_PORT_LOCK);
1232 1232 hba->flag |= FC_DUMP_SAFE;
1233 1233 mutex_exit(&EMLXS_PORT_LOCK);
1234 1234 }
1235 1235 }
1236 1236
1237 1237 return (rval);
1238 1238
1239 1239 } /* emlxs_attach() */
1240 1240
1241 1241
1242 1242 /*
1243 1243 * Detach/prepare driver to unload (see detach(9E)).
1244 1244 */
1245 1245 static int
1246 1246 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1247 1247 {
1248 1248 emlxs_hba_t *hba;
1249 1249 emlxs_port_t *port;
1250 1250 int ddiinst;
1251 1251 int emlxinst;
1252 1252 int rval;
1253 1253
1254 1254 ddiinst = ddi_get_instance(dip);
1255 1255 emlxinst = emlxs_get_instance(ddiinst);
1256 1256 hba = emlxs_device.hba[emlxinst];
1257 1257
1258 1258 if (hba == NULL) {
1259 1259 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1260 1260
1261 1261 return (DDI_FAILURE);
1262 1262 }
1263 1263
1264 1264 if (hba == (emlxs_hba_t *)-1) {
1265 1265 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1266 1266 DRIVER_NAME);
1267 1267
1268 1268 return (DDI_FAILURE);
1269 1269 }
1270 1270
1271 1271 port = &PPORT;
1272 1272 rval = DDI_SUCCESS;
1273 1273
1274 1274 /* Check driver dump */
1275 1275 mutex_enter(&EMLXS_PORT_LOCK);
1276 1276
1277 1277 if (hba->flag & FC_DUMP_ACTIVE) {
1278 1278 mutex_exit(&EMLXS_PORT_LOCK);
1279 1279
1280 1280 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1281 1281 "detach: Driver busy. Driver dump active.");
1282 1282
1283 1283 return (DDI_FAILURE);
1284 1284 }
1285 1285
1286 1286 #ifdef SFCT_SUPPORT
1287 1287 if ((port->flag & EMLXS_TGT_BOUND) &&
1288 1288 ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1289 1289 (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1290 1290 mutex_exit(&EMLXS_PORT_LOCK);
1291 1291
1292 1292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1293 1293 "detach: Driver busy. Target mode active.");
1294 1294
1295 1295 return (DDI_FAILURE);
1296 1296 }
1297 1297 #endif /* SFCT_SUPPORT */
1298 1298
1299 1299 if (port->flag & EMLXS_INI_BOUND) {
1300 1300 mutex_exit(&EMLXS_PORT_LOCK);
1301 1301
1302 1302 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1303 1303 "detach: Driver busy. Initiator mode active.");
1304 1304
1305 1305 return (DDI_FAILURE);
1306 1306 }
1307 1307
1308 1308 hba->flag &= ~FC_DUMP_SAFE;
1309 1309
1310 1310 mutex_exit(&EMLXS_PORT_LOCK);
1311 1311
1312 1312 switch (cmd) {
1313 1313 case DDI_DETACH:
1314 1314
1315 1315 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1316 1316 "DDI_DETACH");
1317 1317
1318 1318 rval = emlxs_hba_detach(dip);
1319 1319
1320 1320 if (rval != DDI_SUCCESS) {
1321 1321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1322 1322 "Unable to detach.");
1323 1323 }
1324 1324 break;
1325 1325
1326 1326 case DDI_SUSPEND:
1327 1327
1328 1328 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1329 1329 "DDI_SUSPEND");
1330 1330
1331 1331 /* Suspend the driver */
1332 1332 rval = emlxs_hba_suspend(dip);
1333 1333
1334 1334 if (rval != DDI_SUCCESS) {
1335 1335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1336 1336 "Unable to suspend driver.");
1337 1337 }
1338 1338 break;
1339 1339
1340 1340 default:
1341 1341 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1342 1342 DRIVER_NAME, cmd);
1343 1343 rval = DDI_FAILURE;
1344 1344 }
1345 1345
1346 1346 if (rval == DDI_FAILURE) {
1347 1347 /* Re-Enable driver dump feature */
1348 1348 mutex_enter(&EMLXS_PORT_LOCK);
1349 1349 hba->flag |= FC_DUMP_SAFE;
1350 1350 mutex_exit(&EMLXS_PORT_LOCK);
1351 1351 }
1352 1352
1353 1353 return (rval);
1354 1354
1355 1355 } /* emlxs_detach() */
1356 1356
1357 1357
1358 1358 /* EMLXS_PORT_LOCK must be held when calling this */
1359 1359 extern void
1360 1360 emlxs_port_init(emlxs_port_t *port)
1361 1361 {
1362 1362 emlxs_hba_t *hba = HBA;
1363 1363
1364 1364 /* Initialize the base node */
1365 1365 bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1366 1366 port->node_base.nlp_Rpi = 0;
1367 1367 port->node_base.nlp_DID = 0xffffff;
1368 1368 port->node_base.nlp_list_next = NULL;
1369 1369 port->node_base.nlp_list_prev = NULL;
1370 1370 port->node_base.nlp_active = 1;
1371 1371 port->node_base.nlp_base = 1;
1372 1372 port->node_count = 0;
1373 1373
1374 1374 if (!(port->flag & EMLXS_PORT_ENABLED)) {
1375 1375 uint8_t dummy_wwn[8] =
1376 1376 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1377 1377
1378 1378 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1379 1379 sizeof (NAME_TYPE));
1380 1380 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1381 1381 sizeof (NAME_TYPE));
1382 1382 }
1383 1383
1384 1384 if (!(port->flag & EMLXS_PORT_CONFIG)) {
1385 1385 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1386 1386 (sizeof (port->snn)-1));
1387 1387 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn,
1388 1388 (sizeof (port->spn)-1));
1389 1389 }
1390 1390
1391 1391 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1392 1392 sizeof (SERV_PARM));
1393 1393 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1394 1394 sizeof (NAME_TYPE));
1395 1395 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1396 1396 sizeof (NAME_TYPE));
1397 1397
1398 1398 return;
1399 1399
1400 1400 } /* emlxs_port_init() */
1401 1401
1402 1402
1403 1403 void
1404 1404 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1405 1405 {
1406 1406 uint16_t reg;
1407 1407
1408 1408 if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1409 1409 return;
1410 1410 }
1411 1411
1412 1412 /* Turn off the Correctable Error Reporting */
1413 1413 /* (the Device Control Register, bit 0). */
1414 1414 reg = ddi_get16(hba->pci_acc_handle,
1415 1415 (uint16_t *)(hba->pci_addr +
1416 1416 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1417 1417 PCIE_DEVCTL));
1418 1418
1419 1419 reg &= ~1;
1420 1420
1421 1421 (void) ddi_put16(hba->pci_acc_handle,
1422 1422 (uint16_t *)(hba->pci_addr +
1423 1423 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1424 1424 PCIE_DEVCTL),
1425 1425 reg);
1426 1426
1427 1427 return;
1428 1428
1429 1429 } /* emlxs_disable_pcie_ce_err() */
1430 1430
1431 1431
1432 1432 /*
1433 1433 * emlxs_fca_bind_port
1434 1434 *
1435 1435 * Arguments:
1436 1436 *
1437 1437 * dip: the dev_info pointer for the ddiinst
1438 1438 * port_info: pointer to info handed back to the transport
1439 1439 * bind_info: pointer to info from the transport
1440 1440 *
1441 1441 * Return values: a port handle for this port, NULL for failure
1442 1442 *
1443 1443 */
1444 1444 static opaque_t
1445 1445 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1446 1446 fc_fca_bind_info_t *bind_info)
1447 1447 {
1448 1448 emlxs_hba_t *hba;
1449 1449 emlxs_port_t *port;
1450 1450 emlxs_port_t *pport;
1451 1451 emlxs_port_t *vport;
1452 1452 int ddiinst;
1453 1453 emlxs_vpd_t *vpd;
1454 1454 emlxs_config_t *cfg;
1455 1455 char *dptr;
1456 1456 char buffer[16];
1457 1457 uint32_t length;
1458 1458 uint32_t len;
1459 1459 char topology[32];
1460 1460 char linkspeed[32];
1461 1461 uint32_t linkstate;
1462 1462
1463 1463 ddiinst = ddi_get_instance(dip);
1464 1464 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1465 1465 port = &PPORT;
1466 1466 pport = &PPORT;
1467 1467
1468 1468 ddiinst = hba->ddiinst;
1469 1469 vpd = &VPD;
1470 1470 cfg = &CFG;
1471 1471
1472 1472 mutex_enter(&EMLXS_PORT_LOCK);
1473 1473
1474 1474 if (bind_info->port_num > 0) {
1475 1475 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1476 1476 if (!(hba->flag & FC_NPIV_ENABLED) ||
1477 1477 !(bind_info->port_npiv) ||
1478 1478 (bind_info->port_num > hba->vpi_max))
1479 1479 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1480 1480 if (!(hba->flag & FC_NPIV_ENABLED) ||
1481 1481 (bind_info->port_num > hba->vpi_high))
1482 1482 #endif
1483 1483 {
1484 1484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1485 1485 "fca_bind_port: Port %d not supported.",
1486 1486 bind_info->port_num);
1487 1487
1488 1488 mutex_exit(&EMLXS_PORT_LOCK);
1489 1489
1490 1490 port_info->pi_error = FC_OUTOFBOUNDS;
1491 1491 return (NULL);
1492 1492 }
1493 1493 }
1494 1494
1495 1495 /* Get true port pointer */
1496 1496 port = &VPORT(bind_info->port_num);
1497 1497
1498 1498 /* Make sure the port is not already bound to the transport */
1499 1499 if (port->flag & EMLXS_INI_BOUND) {
1500 1500
1501 1501 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1502 1502 "fca_bind_port: Port %d already bound. flag=%x",
1503 1503 bind_info->port_num, port->flag);
1504 1504
1505 1505 mutex_exit(&EMLXS_PORT_LOCK);
1506 1506
1507 1507 port_info->pi_error = FC_ALREADY;
1508 1508 return (NULL);
1509 1509 }
1510 1510
1511 1511 if (!(pport->flag & EMLXS_INI_ENABLED)) {
1512 1512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1513 1513 "fca_bind_port: Physical port does not support "
1514 1514 "initiator mode.");
1515 1515
1516 1516 mutex_exit(&EMLXS_PORT_LOCK);
1517 1517
1518 1518 port_info->pi_error = FC_OUTOFBOUNDS;
1519 1519 return (NULL);
1520 1520 }
1521 1521
1522 1522 /* Make sure port enable flag is set */
1523 1523 /* Just in case fca_port_unbind is called just prior to fca_port_bind */
1524 1524 /* without a driver attach or resume operation */
1525 1525 port->flag |= EMLXS_PORT_ENABLED;
1526 1526
1527 1527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1528 1528 "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1529 1529 bind_info->port_num, port_info, bind_info);
1530 1530
1531 1531 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1532 1532 if (bind_info->port_npiv) {
1533 1533 /* Leadville is telling us about a new virtual port */
1534 1534 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1535 1535 sizeof (NAME_TYPE));
1536 1536 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1537 1537 sizeof (NAME_TYPE));
1538 1538 if (port->snn[0] == 0) {
1539 1539 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1540 1540 (sizeof (port->snn)-1));
1541 1541
1542 1542 }
1543 1543
1544 1544 if (port->spn[0] == 0) {
1545 1545 (void) snprintf((caddr_t)port->spn,
1546 1546 (sizeof (port->spn)-1), "%s VPort-%d",
1547 1547 (caddr_t)hba->spn, port->vpi);
1548 1548 }
1549 1549 port->flag |= EMLXS_PORT_CONFIG;
1550 1550 }
1551 1551 #endif /* >= EMLXS_MODREV5 */
1552 1552
1553 1553 /*
1554 1554 * Restricted login should apply both physical and
1555 1555 * virtual ports.
1556 1556 */
1557 1557 if (cfg[CFG_VPORT_RESTRICTED].current) {
1558 1558 port->flag |= EMLXS_PORT_RESTRICTED;
1559 1559 }
1560 1560
1561 1561 /* Perform generic port initialization */
1562 1562 emlxs_port_init(port);
1563 1563
1564 1564 /* Perform SFS specific initialization */
1565 1565 port->ulp_handle = bind_info->port_handle;
1566 1566 port->ulp_statec_cb = bind_info->port_statec_cb;
1567 1567 port->ulp_unsol_cb = bind_info->port_unsol_cb;
1568 1568
1569 1569 /* Set the bound flag */
1570 1570 port->flag |= EMLXS_INI_BOUND;
1571 1571 hba->num_of_ports++;
1572 1572
1573 1573 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1574 1574 mutex_exit(&EMLXS_PORT_LOCK);
1575 1575 (void) emlxs_vpi_port_bind_notify(port);
1576 1576 mutex_enter(&EMLXS_PORT_LOCK);
1577 1577
1578 1578 linkstate = (port->vpip->state == VPI_STATE_PORT_ONLINE)?
1579 1579 FC_LINK_UP:FC_LINK_DOWN;
1580 1580 } else {
1581 1581 linkstate = hba->state;
1582 1582 }
1583 1583
1584 1584 /* Update the port info structure */
1585 1585
1586 1586 /* Set the topology and state */
1587 1587 if (port->mode == MODE_TARGET) {
1588 1588 port_info->pi_port_state = FC_STATE_OFFLINE;
1589 1589 port_info->pi_topology = FC_TOP_UNKNOWN;
1590 1590 } else if ((linkstate < FC_LINK_UP) ||
1591 1591 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLED) ||
1592 1592 !(hba->flag & FC_NPIV_SUPPORTED)))) {
1593 1593 port_info->pi_port_state = FC_STATE_OFFLINE;
1594 1594 port_info->pi_topology = FC_TOP_UNKNOWN;
1595 1595 }
1596 1596 #ifdef MENLO_SUPPORT
1597 1597 else if (hba->flag & FC_MENLO_MODE) {
1598 1598 port_info->pi_port_state = FC_STATE_OFFLINE;
1599 1599 port_info->pi_topology = FC_TOP_UNKNOWN;
1600 1600 }
1601 1601 #endif /* MENLO_SUPPORT */
1602 1602 else {
1603 1603 /* Check for loop topology */
1604 1604 if (hba->topology == TOPOLOGY_LOOP) {
1605 1605 port_info->pi_port_state = FC_STATE_LOOP;
1606 1606 (void) strlcpy(topology, ", loop", sizeof (topology));
1607 1607
1608 1608 if (hba->flag & FC_FABRIC_ATTACHED) {
1609 1609 port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1610 1610 } else {
1611 1611 port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1612 1612 }
1613 1613 } else {
1614 1614 port_info->pi_topology = FC_TOP_FABRIC;
1615 1615 port_info->pi_port_state = FC_STATE_ONLINE;
1616 1616 (void) strlcpy(topology, ", fabric", sizeof (topology));
1617 1617 }
1618 1618
1619 1619 /* Set the link speed */
1620 1620 switch (hba->linkspeed) {
1621 1621 case 0:
1622 1622 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1623 1623 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1624 1624 break;
1625 1625
1626 1626 case LA_1GHZ_LINK:
1627 1627 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1628 1628 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1629 1629 break;
1630 1630 case LA_2GHZ_LINK:
1631 1631 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1632 1632 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1633 1633 break;
1634 1634 case LA_4GHZ_LINK:
1635 1635 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1636 1636 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1637 1637 break;
1638 1638 case LA_8GHZ_LINK:
1639 1639 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1640 1640 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1641 1641 break;
1642 1642 case LA_10GHZ_LINK:
1643 1643 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1644 1644 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1645 1645 break;
1646 1646 case LA_16GHZ_LINK:
1647 1647 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1648 1648 port_info->pi_port_state |= FC_STATE_16GBIT_SPEED;
1649 1649 break;
1650 1650 default:
1651 1651 (void) snprintf(linkspeed, sizeof (linkspeed),
1652 1652 "unknown(0x%x)", hba->linkspeed);
1653 1653 break;
1654 1654 }
1655 1655
1656 1656 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
1657 1657 /* Adjusting port context for link up messages */
1658 1658 vport = port;
1659 1659 port = &PPORT;
1660 1660 if (vport->vpi == 0) {
1661 1661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1662 1662 "%s%s, initiator",
1663 1663 linkspeed, topology);
1664 1664 } else if (!(hba->flag & FC_NPIV_LINKUP)) {
1665 1665 hba->flag |= FC_NPIV_LINKUP;
1666 1666 EMLXS_MSGF(EMLXS_CONTEXT,
1667 1667 &emlxs_npiv_link_up_msg,
1668 1668 "%s%s, initiator", linkspeed, topology);
1669 1669 }
1670 1670 port = vport;
1671 1671 }
1672 1672 }
1673 1673
1674 1674 /* PCIE Correctable Error Reporting workaround */
1675 1675 if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1676 1676 (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1677 1677 (bind_info->port_num == 0)) {
1678 1678 emlxs_disable_pcie_ce_err(hba);
1679 1679 }
1680 1680
1681 1681 /* Save initial state */
1682 1682 port->ulp_statec = port_info->pi_port_state;
1683 1683
1684 1684 /*
1685 1685 * The transport needs a copy of the common service parameters
1686 1686 * for this port. The transport can get any updates through
1687 1687 * the getcap entry point.
1688 1688 */
1689 1689 bcopy((void *) &port->sparam,
1690 1690 (void *) &port_info->pi_login_params.common_service,
1691 1691 sizeof (SERV_PARM));
1692 1692
1693 1693 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1694 1694 /* Swap the service parameters for ULP */
1695 1695 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1696 1696 common_service);
1697 1697 #endif /* EMLXS_MODREV2X */
1698 1698
1699 1699 port_info->pi_login_params.common_service.btob_credit = 0xffff;
1700 1700
1701 1701 bcopy((void *) &port->wwnn,
1702 1702 (void *) &port_info->pi_login_params.node_ww_name,
1703 1703 sizeof (NAME_TYPE));
1704 1704
1705 1705 bcopy((void *) &port->wwpn,
1706 1706 (void *) &port_info->pi_login_params.nport_ww_name,
1707 1707 sizeof (NAME_TYPE));
1708 1708
1709 1709 /*
1710 1710 * We need to turn off CLASS2 support.
1711 1711 * Otherwise, FC transport will use CLASS2 as default class
1712 1712 * and never try with CLASS3.
1713 1713 */
1714 1714 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1715 1715 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1716 1716 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1717 1717 port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1718 1718 }
1719 1719
1720 1720 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1721 1721 port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1722 1722 }
1723 1723 #else /* EMLXS_SPARC or EMLXS_MODREV2X */
1724 1724 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1725 1725 port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1726 1726 }
1727 1727
1728 1728 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1729 1729 port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1730 1730 }
1731 1731 #endif /* >= EMLXS_MODREV3X */
1732 1732 #endif /* >= EMLXS_MODREV3 */
1733 1733
1734 1734
1735 1735 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1736 1736 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1737 1737 port_info->pi_login_params.class_1.data[0] &= ~0x80;
1738 1738 }
1739 1739
1740 1740 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1741 1741 port_info->pi_login_params.class_2.data[0] &= ~0x80;
1742 1742 }
1743 1743 #endif /* <= EMLXS_MODREV2 */
1744 1744
1745 1745 /* Additional parameters */
1746 1746 port_info->pi_s_id.port_id = port->did;
1747 1747 port_info->pi_s_id.priv_lilp_posit = 0;
1748 1748 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1749 1749
1750 1750 /* Initialize the RNID parameters */
1751 1751 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1752 1752
1753 1753 (void) snprintf((char *)port_info->pi_rnid_params.params.global_id,
1754 1754 (sizeof (port_info->pi_rnid_params.params.global_id)-1),
1755 1755 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1756 1756 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1757 1757 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1758 1758 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1759 1759
1760 1760 port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1761 1761 port_info->pi_rnid_params.params.port_id = port->did;
1762 1762 port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1763 1763
1764 1764 /* Initialize the port attributes */
1765 1765 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1766 1766
1767 1767 (void) strncpy(port_info->pi_attrs.manufacturer, "Emulex",
1768 1768 (sizeof (port_info->pi_attrs.manufacturer)-1));
1769 1769
1770 1770 port_info->pi_rnid_params.status = FC_SUCCESS;
1771 1771
1772 1772 (void) strncpy(port_info->pi_attrs.serial_number, vpd->serial_num,
1773 1773 (sizeof (port_info->pi_attrs.serial_number)-1));
1774 1774
1775 1775 (void) snprintf(port_info->pi_attrs.firmware_version,
1776 1776 (sizeof (port_info->pi_attrs.firmware_version)-1), "%s (%s)",
1777 1777 vpd->fw_version, vpd->fw_label);
1778 1778
1779 1779 #ifdef EMLXS_I386
1780 1780 (void) snprintf(port_info->pi_attrs.option_rom_version,
1781 1781 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1782 1782 "Boot:%s", vpd->boot_version);
1783 1783 #else /* EMLXS_SPARC */
1784 1784 (void) snprintf(port_info->pi_attrs.option_rom_version,
1785 1785 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1786 1786 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1787 1787 #endif /* EMLXS_I386 */
1788 1788
1789 1789 (void) snprintf(port_info->pi_attrs.driver_version,
1790 1790 (sizeof (port_info->pi_attrs.driver_version)-1), "%s (%s)",
1791 1791 emlxs_version, emlxs_revision);
1792 1792
1793 1793 (void) strncpy(port_info->pi_attrs.driver_name, DRIVER_NAME,
1794 1794 (sizeof (port_info->pi_attrs.driver_name)-1));
1795 1795
1796 1796 port_info->pi_attrs.vendor_specific_id =
1797 1797 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1798 1798
1799 1799 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1800 1800
1801 1801 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1802 1802
1803 1803 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1804 1804 port_info->pi_rnid_params.params.num_attached = 0;
1805 1805
1806 1806 if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
1807 1807 uint8_t byte;
1808 1808 uint8_t *wwpn;
1809 1809 uint32_t i;
1810 1810 uint32_t j;
1811 1811
1812 1812 /* Copy the WWPN as a string into the local buffer */
1813 1813 wwpn = (uint8_t *)&hba->wwpn;
1814 1814 for (i = 0; i < 16; i++) {
1815 1815 byte = *wwpn++;
1816 1816 j = ((byte & 0xf0) >> 4);
1817 1817 if (j <= 9) {
1818 1818 buffer[i] =
1819 1819 (char)((uint8_t)'0' + (uint8_t)j);
1820 1820 } else {
1821 1821 buffer[i] =
1822 1822 (char)((uint8_t)'A' + (uint8_t)(j -
1823 1823 10));
1824 1824 }
1825 1825
1826 1826 i++;
1827 1827 j = (byte & 0xf);
1828 1828 if (j <= 9) {
1829 1829 buffer[i] =
1830 1830 (char)((uint8_t)'0' + (uint8_t)j);
1831 1831 } else {
1832 1832 buffer[i] =
1833 1833 (char)((uint8_t)'A' + (uint8_t)(j -
1834 1834 10));
1835 1835 }
1836 1836 }
1837 1837
1838 1838 port_info->pi_attrs.hba_fru_details.port_index = 0;
1839 1839 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1840 1840
1841 1841 } else if (hba->flag & FC_NPIV_ENABLED) {
1842 1842 uint8_t byte;
1843 1843 uint8_t *wwpn;
1844 1844 uint32_t i;
1845 1845 uint32_t j;
1846 1846
1847 1847 /* Copy the WWPN as a string into the local buffer */
1848 1848 wwpn = (uint8_t *)&hba->wwpn;
1849 1849 for (i = 0; i < 16; i++) {
1850 1850 byte = *wwpn++;
1851 1851 j = ((byte & 0xf0) >> 4);
1852 1852 if (j <= 9) {
1853 1853 buffer[i] =
1854 1854 (char)((uint8_t)'0' + (uint8_t)j);
1855 1855 } else {
1856 1856 buffer[i] =
1857 1857 (char)((uint8_t)'A' + (uint8_t)(j -
1858 1858 10));
1859 1859 }
1860 1860
1861 1861 i++;
1862 1862 j = (byte & 0xf);
1863 1863 if (j <= 9) {
1864 1864 buffer[i] =
1865 1865 (char)((uint8_t)'0' + (uint8_t)j);
1866 1866 } else {
1867 1867 buffer[i] =
1868 1868 (char)((uint8_t)'A' + (uint8_t)(j -
1869 1869 10));
1870 1870 }
1871 1871 }
1872 1872
1873 1873 port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1874 1874 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1875 1875
1876 1876 } else {
1877 1877 /* Copy the serial number string (right most 16 chars) */
1878 1878 /* into the right justified local buffer */
1879 1879 bzero(buffer, sizeof (buffer));
1880 1880 length = strlen(vpd->serial_num);
1881 1881 len = (length > 16) ? 16 : length;
1882 1882 bcopy(&vpd->serial_num[(length - len)],
1883 1883 &buffer[(sizeof (buffer) - len)], len);
1884 1884
1885 1885 port_info->pi_attrs.hba_fru_details.port_index =
1886 1886 vpd->port_index;
1887 1887 }
1888 1888
1889 1889 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1890 1890 dptr[0] = buffer[0];
1891 1891 dptr[1] = buffer[1];
1892 1892 dptr[2] = buffer[2];
1893 1893 dptr[3] = buffer[3];
1894 1894 dptr[4] = buffer[4];
1895 1895 dptr[5] = buffer[5];
1896 1896 dptr[6] = buffer[6];
1897 1897 dptr[7] = buffer[7];
1898 1898 port_info->pi_attrs.hba_fru_details.high =
1899 1899 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1900 1900
1901 1901 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1902 1902 dptr[0] = buffer[8];
1903 1903 dptr[1] = buffer[9];
1904 1904 dptr[2] = buffer[10];
1905 1905 dptr[3] = buffer[11];
1906 1906 dptr[4] = buffer[12];
1907 1907 dptr[5] = buffer[13];
1908 1908 dptr[6] = buffer[14];
1909 1909 dptr[7] = buffer[15];
1910 1910 port_info->pi_attrs.hba_fru_details.low =
1911 1911 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1912 1912
1913 1913 #endif /* >= EMLXS_MODREV3 */
1914 1914
1915 1915 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1916 1916 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1917 1917 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1918 1918 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1919 1919 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1920 1920 #endif /* >= EMLXS_MODREV4 */
1921 1921
1922 1922 (void) snprintf(port_info->pi_attrs.hardware_version,
1923 1923 (sizeof (port_info->pi_attrs.hardware_version)-1),
1924 1924 "%x", vpd->biuRev);
1925 1925
1926 1926 /* Set the hba speed limit */
1927 1927 if (vpd->link_speed & LMT_16GB_CAPABLE) {
1928 1928 port_info->pi_attrs.supported_speed |=
1929 1929 FC_HBA_PORTSPEED_16GBIT;
1930 1930 }
1931 1931 if (vpd->link_speed & LMT_10GB_CAPABLE) {
1932 1932 port_info->pi_attrs.supported_speed |=
1933 1933 FC_HBA_PORTSPEED_10GBIT;
1934 1934 }
1935 1935 if (vpd->link_speed & LMT_8GB_CAPABLE) {
1936 1936 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1937 1937 }
1938 1938 if (vpd->link_speed & LMT_4GB_CAPABLE) {
1939 1939 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1940 1940 }
1941 1941 if (vpd->link_speed & LMT_2GB_CAPABLE) {
1942 1942 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1943 1943 }
1944 1944 if (vpd->link_speed & LMT_1GB_CAPABLE) {
1945 1945 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1946 1946 }
1947 1947
1948 1948 /* Set the hba model info */
1949 1949 (void) strncpy(port_info->pi_attrs.model, hba->model_info.model,
1950 1950 (sizeof (port_info->pi_attrs.model)-1));
1951 1951 (void) strncpy(port_info->pi_attrs.model_description,
1952 1952 hba->model_info.model_desc,
1953 1953 (sizeof (port_info->pi_attrs.model_description)-1));
1954 1954
1955 1955
1956 1956 /* Log information */
1957 1957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1958 1958 "Bind info: port_num = %d", bind_info->port_num);
1959 1959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1960 1960 "Bind info: port_handle = %p", bind_info->port_handle);
1961 1961
1962 1962 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1963 1963 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1964 1964 "Bind info: port_npiv = %d", bind_info->port_npiv);
1965 1965 #endif /* >= EMLXS_MODREV5 */
1966 1966
1967 1967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1968 1968 "Port info: pi_topology = %x", port_info->pi_topology);
1969 1969 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1970 1970 "Port info: pi_error = %x", port_info->pi_error);
1971 1971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1972 1972 "Port info: pi_port_state = %x", port_info->pi_port_state);
1973 1973
1974 1974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 1975 "Port info: port_id = %x", port_info->pi_s_id.port_id);
1976 1976 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1977 1977 "Port info: priv_lilp_posit = %x",
1978 1978 port_info->pi_s_id.priv_lilp_posit);
1979 1979
1980 1980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1981 1981 "Port info: hard_addr = %x",
1982 1982 port_info->pi_hard_addr.hard_addr);
1983 1983
1984 1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 1985 "Port info: rnid.status = %x",
1986 1986 port_info->pi_rnid_params.status);
1987 1987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1988 1988 "Port info: rnid.global_id = %16s",
1989 1989 port_info->pi_rnid_params.params.global_id);
1990 1990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1991 1991 "Port info: rnid.unit_type = %x",
1992 1992 port_info->pi_rnid_params.params.unit_type);
1993 1993 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1994 1994 "Port info: rnid.port_id = %x",
1995 1995 port_info->pi_rnid_params.params.port_id);
1996 1996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1997 1997 "Port info: rnid.num_attached = %x",
1998 1998 port_info->pi_rnid_params.params.num_attached);
1999 1999 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2000 2000 "Port info: rnid.ip_version = %x",
2001 2001 port_info->pi_rnid_params.params.ip_version);
2002 2002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2003 2003 "Port info: rnid.udp_port = %x",
2004 2004 port_info->pi_rnid_params.params.udp_port);
2005 2005 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2006 2006 "Port info: rnid.ip_addr = %16s",
2007 2007 port_info->pi_rnid_params.params.ip_addr);
2008 2008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2009 2009 "Port info: rnid.spec_id_resv = %x",
2010 2010 port_info->pi_rnid_params.params.specific_id_resv);
2011 2011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2012 2012 "Port info: rnid.topo_flags = %x",
2013 2013 port_info->pi_rnid_params.params.topo_flags);
2014 2014
2015 2015 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2016 2016 "Port info: manufacturer = %s",
2017 2017 port_info->pi_attrs.manufacturer);
2018 2018 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2019 2019 "Port info: serial_num = %s",
2020 2020 port_info->pi_attrs.serial_number);
2021 2021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2022 2022 "Port info: model = %s", port_info->pi_attrs.model);
2023 2023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2024 2024 "Port info: model_description = %s",
2025 2025 port_info->pi_attrs.model_description);
2026 2026 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2027 2027 "Port info: hardware_version = %s",
2028 2028 port_info->pi_attrs.hardware_version);
2029 2029 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2030 2030 "Port info: driver_version = %s",
2031 2031 port_info->pi_attrs.driver_version);
2032 2032 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2033 2033 "Port info: option_rom_version = %s",
2034 2034 port_info->pi_attrs.option_rom_version);
2035 2035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2036 2036 "Port info: firmware_version = %s",
2037 2037 port_info->pi_attrs.firmware_version);
2038 2038 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2039 2039 "Port info: driver_name = %s",
2040 2040 port_info->pi_attrs.driver_name);
2041 2041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2042 2042 "Port info: vendor_specific_id = %x",
2043 2043 port_info->pi_attrs.vendor_specific_id);
2044 2044 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2045 2045 "Port info: supported_cos = %x",
2046 2046 port_info->pi_attrs.supported_cos);
2047 2047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2048 2048 "Port info: supported_speed = %x",
2049 2049 port_info->pi_attrs.supported_speed);
2050 2050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2051 2051 "Port info: max_frame_size = %x",
2052 2052 port_info->pi_attrs.max_frame_size);
2053 2053
2054 2054 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2055 2055 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2056 2056 "Port info: fru_port_index = %x",
2057 2057 port_info->pi_attrs.hba_fru_details.port_index);
2058 2058 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2059 2059 "Port info: fru_high = %llx",
2060 2060 port_info->pi_attrs.hba_fru_details.high);
2061 2061 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2062 2062 "Port info: fru_low = %llx",
2063 2063 port_info->pi_attrs.hba_fru_details.low);
2064 2064 #endif /* >= EMLXS_MODREV3 */
2065 2065
2066 2066 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2067 2067 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2068 2068 "Port info: sym_node_name = %s",
2069 2069 port_info->pi_attrs.sym_node_name);
2070 2070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2071 2071 "Port info: sym_port_name = %s",
2072 2072 port_info->pi_attrs.sym_port_name);
2073 2073 #endif /* >= EMLXS_MODREV4 */
2074 2074
2075 2075 mutex_exit(&EMLXS_PORT_LOCK);
2076 2076
2077 2077 #ifdef SFCT_SUPPORT
2078 2078 if (port->flag & EMLXS_TGT_ENABLED) {
2079 2079 emlxs_fct_bind_port(port);
2080 2080 }
2081 2081 #endif /* SFCT_SUPPORT */
2082 2082
2083 2083 return ((opaque_t)port);
2084 2084
2085 2085 } /* emlxs_fca_bind_port() */
2086 2086
2087 2087
2088 2088 static void
2089 2089 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2090 2090 {
2091 2091 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2092 2092 emlxs_hba_t *hba = HBA;
2093 2093
2094 2094 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2095 2095 "fca_unbind_port: port=%p", port);
2096 2096
2097 2097 if (!(port->flag & EMLXS_PORT_BOUND)) {
2098 2098 return;
2099 2099 }
2100 2100
2101 2101 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2102 2102 (void) emlxs_vpi_port_unbind_notify(port, 1);
2103 2103 }
2104 2104
2105 2105 /* Destroy & flush all port nodes, if they exist */
2106 2106 if (port->node_count) {
2107 2107 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2108 2108 }
2109 2109
2110 2110 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2111 2111 if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2112 2112 (hba->flag & FC_NPIV_ENABLED) &&
2113 2113 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED))) {
2114 2114 (void) emlxs_mb_unreg_vpi(port);
2115 2115 }
2116 2116 #endif
2117 2117
2118 2118 mutex_enter(&EMLXS_PORT_LOCK);
2119 2119 if (port->flag & EMLXS_INI_BOUND) {
2120 2120 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2121 2121 port->flag &= ~(EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
2122 2122 #endif
2123 2123 port->flag &= ~EMLXS_INI_BOUND;
2124 2124 hba->num_of_ports--;
2125 2125
2126 2126 /* Wait until ulp callback interface is idle */
2127 2127 while (port->ulp_busy) {
2128 2128 mutex_exit(&EMLXS_PORT_LOCK);
2129 2129 delay(drv_usectohz(500000));
2130 2130 mutex_enter(&EMLXS_PORT_LOCK);
2131 2131 }
2132 2132
2133 2133 port->ulp_handle = 0;
2134 2134 port->ulp_statec = FC_STATE_OFFLINE;
2135 2135 port->ulp_statec_cb = NULL;
2136 2136 port->ulp_unsol_cb = NULL;
2137 2137 }
2138 2138 mutex_exit(&EMLXS_PORT_LOCK);
2139 2139
2140 2140 #ifdef SFCT_SUPPORT
2141 2141 /* Check if port was target bound */
2142 2142 if (port->flag & EMLXS_TGT_BOUND) {
2143 2143 emlxs_fct_unbind_port(port);
2144 2144 }
2145 2145 #endif /* SFCT_SUPPORT */
2146 2146
2147 2147 return;
2148 2148
2149 2149 } /* emlxs_fca_unbind_port() */
2150 2150
2151 2151
2152 2152 /*ARGSUSED*/
2153 2153 extern int
2154 2154 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2155 2155 {
2156 2156 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2157 2157 emlxs_hba_t *hba = HBA;
2158 2158 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2159 2159
2160 2160 if (!sbp) {
2161 2161 return (FC_FAILURE);
2162 2162 }
2163 2163 bzero((void *)sbp, sizeof (emlxs_buf_t));
2164 2164
2165 2165 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2166 2166 sbp->pkt_flags =
2167 2167 PACKET_VALID | PACKET_ULP_OWNED;
2168 2168 sbp->port = port;
2169 2169 sbp->pkt = pkt;
2170 2170 sbp->iocbq.sbp = sbp;
2171 2171
2172 2172 return (FC_SUCCESS);
2173 2173
2174 2174 } /* emlxs_fca_pkt_init() */
2175 2175
2176 2176
2177 2177
2178 2178 static void
2179 2179 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2180 2180 {
2181 2181 emlxs_hba_t *hba = HBA;
2182 2182 emlxs_config_t *cfg = &CFG;
2183 2183 fc_packet_t *pkt = PRIV2PKT(sbp);
2184 2184
2185 2185 mutex_enter(&sbp->mtx);
2186 2186
2187 2187 /* Reinitialize */
2188 2188 sbp->pkt = pkt;
2189 2189 sbp->port = port;
2190 2190 sbp->bmp = NULL;
2191 2191 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2192 2192 sbp->iotag = 0;
2193 2193 sbp->ticks = 0;
2194 2194 sbp->abort_attempts = 0;
2195 2195 sbp->fpkt = NULL;
2196 2196 sbp->flush_count = 0;
2197 2197 sbp->next = NULL;
2198 2198
2199 2199 if (port->mode == MODE_INITIATOR) {
2200 2200 sbp->node = NULL;
2201 2201 sbp->did = 0;
2202 2202 sbp->lun = EMLXS_LUN_NONE;
2203 2203 sbp->class = 0;
2204 2204 sbp->channel = NULL;
2205 2205 }
2206 2206
2207 2207 bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2208 2208 sbp->iocbq.sbp = sbp;
2209 2209
2210 2210 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2211 2211 ddi_in_panic()) {
2212 2212 sbp->pkt_flags |= PACKET_POLLED;
2213 2213 }
2214 2214
2215 2215 /* Prepare the fc packet */
2216 2216 pkt->pkt_state = FC_PKT_SUCCESS;
2217 2217 pkt->pkt_reason = 0;
2218 2218 pkt->pkt_action = 0;
2219 2219 pkt->pkt_expln = 0;
2220 2220 pkt->pkt_data_resid = 0;
2221 2221 pkt->pkt_resp_resid = 0;
2222 2222
2223 2223 /* Make sure all pkt's have a proper timeout */
2224 2224 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2225 2225 /* This disables all IOCB on chip timeouts */
2226 2226 pkt->pkt_timeout = 0x80000000;
2227 2227 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2228 2228 pkt->pkt_timeout = 60;
2229 2229 }
2230 2230
2231 2231 /* Clear the response buffer */
2232 2232 if (pkt->pkt_rsplen) {
2233 2233 bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2234 2234 }
2235 2235
2236 2236 mutex_exit(&sbp->mtx);
2237 2237
2238 2238 return;
2239 2239
2240 2240 } /* emlxs_initialize_pkt() */
2241 2241
2242 2242
2243 2243
2244 2244 /*
2245 2245 * We may not need this routine
2246 2246 */
2247 2247 /*ARGSUSED*/
2248 2248 extern int
2249 2249 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2250 2250 {
2251 2251 emlxs_buf_t *sbp = PKT2PRIV(pkt);
2252 2252
2253 2253 if (!sbp) {
2254 2254 return (FC_FAILURE);
2255 2255 }
2256 2256
2257 2257 if (!(sbp->pkt_flags & PACKET_VALID)) {
2258 2258 return (FC_FAILURE);
2259 2259 }
2260 2260 sbp->pkt_flags &= ~PACKET_VALID;
2261 2261 mutex_destroy(&sbp->mtx);
2262 2262
2263 2263 return (FC_SUCCESS);
2264 2264
2265 2265 } /* emlxs_fca_pkt_uninit() */
2266 2266
2267 2267
2268 2268 static int
2269 2269 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2270 2270 {
2271 2271 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2272 2272 emlxs_hba_t *hba = HBA;
2273 2273 int32_t rval;
2274 2274 emlxs_config_t *cfg = &CFG;
2275 2275
2276 2276 if (!(port->flag & EMLXS_INI_BOUND)) {
2277 2277 return (FC_CAP_ERROR);
2278 2278 }
2279 2279
2280 2280 if (strcmp(cap, FC_NODE_WWN) == 0) {
2281 2281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2282 2282 "fca_get_cap: FC_NODE_WWN");
2283 2283
2284 2284 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2285 2285 rval = FC_CAP_FOUND;
2286 2286
2287 2287 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2288 2288 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2289 2289 "fca_get_cap: FC_LOGIN_PARAMS");
2290 2290
2291 2291 /*
2292 2292 * We need to turn off CLASS2 support.
2293 2293 * Otherwise, FC transport will use CLASS2 as default class
2294 2294 * and never try with CLASS3.
2295 2295 */
2296 2296 hba->sparam.cls2.classValid = 0;
2297 2297
2298 2298 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2299 2299
2300 2300 rval = FC_CAP_FOUND;
2301 2301
2302 2302 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2303 2303 int32_t *num_bufs;
2304 2304
2305 2305 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2306 2306 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2307 2307 cfg[CFG_UB_BUFS].current);
2308 2308
2309 2309 num_bufs = (int32_t *)ptr;
2310 2310
2311 2311 /* We multiply by MAX_VPORTS because ULP uses a */
2312 2312 /* formula to calculate ub bufs from this */
2313 2313 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2314 2314
2315 2315 rval = FC_CAP_FOUND;
2316 2316
2317 2317 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2318 2318 int32_t *size;
2319 2319
2320 2320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2321 2321 "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2322 2322
2323 2323 size = (int32_t *)ptr;
2324 2324 *size = -1;
2325 2325 rval = FC_CAP_FOUND;
2326 2326
2327 2327 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2328 2328 fc_reset_action_t *action;
2329 2329
2330 2330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2331 2331 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2332 2332
2333 2333 action = (fc_reset_action_t *)ptr;
2334 2334 *action = FC_RESET_RETURN_ALL;
2335 2335 rval = FC_CAP_FOUND;
2336 2336
2337 2337 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2338 2338 fc_dma_behavior_t *behavior;
2339 2339
2340 2340 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2341 2341 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2342 2342
2343 2343 behavior = (fc_dma_behavior_t *)ptr;
2344 2344 *behavior = FC_ALLOW_STREAMING;
2345 2345 rval = FC_CAP_FOUND;
2346 2346
2347 2347 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2348 2348 fc_fcp_dma_t *fcp_dma;
2349 2349
2350 2350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2351 2351 "fca_get_cap: FC_CAP_FCP_DMA");
2352 2352
2353 2353 fcp_dma = (fc_fcp_dma_t *)ptr;
2354 2354 *fcp_dma = FC_DVMA_SPACE;
2355 2355 rval = FC_CAP_FOUND;
2356 2356
2357 2357 } else {
2358 2358 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2359 2359 "fca_get_cap: Unknown capability. [%s]", cap);
2360 2360
2361 2361 rval = FC_CAP_ERROR;
2362 2362
2363 2363 }
2364 2364
2365 2365 return (rval);
2366 2366
2367 2367 } /* emlxs_fca_get_cap() */
2368 2368
2369 2369
2370 2370
2371 2371 static int
2372 2372 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2373 2373 {
2374 2374 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2375 2375
2376 2376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2377 2377 "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2378 2378
2379 2379 return (FC_CAP_ERROR);
2380 2380
2381 2381 } /* emlxs_fca_set_cap() */
2382 2382
2383 2383
2384 2384 static opaque_t
2385 2385 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2386 2386 {
2387 2387 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2388 2388
2389 2389 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2390 2390 "fca_get_device: did=%x", d_id.port_id);
2391 2391
2392 2392 return (NULL);
2393 2393
2394 2394 } /* emlxs_fca_get_device() */
2395 2395
2396 2396
2397 2397 static int32_t
2398 2398 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2399 2399 {
2400 2400 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2401 2401
2402 2402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2403 2403 cmd);
2404 2404
2405 2405 return (FC_SUCCESS);
2406 2406
2407 2407 } /* emlxs_fca_notify */
2408 2408
2409 2409
2410 2410
2411 2411 static int
2412 2412 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2413 2413 {
2414 2414 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2415 2415 emlxs_hba_t *hba = HBA;
2416 2416 uint32_t lilp_length;
2417 2417
2418 2418 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2419 2419 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2420 2420 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2421 2421 port->alpa_map[3], port->alpa_map[4]);
2422 2422
2423 2423 if (!(port->flag & EMLXS_INI_BOUND)) {
2424 2424 return (FC_NOMAP);
2425 2425 }
2426 2426
2427 2427 if (hba->topology != TOPOLOGY_LOOP) {
2428 2428 return (FC_NOMAP);
2429 2429 }
2430 2430
2431 2431 /* Check if alpa map is available */
2432 2432 if (port->alpa_map[0] != 0) {
2433 2433 mapbuf->lilp_magic = MAGIC_LILP;
2434 2434 } else { /* No LILP map available */
2435 2435
2436 2436 /* Set lilp_magic to MAGIC_LISA and this will */
2437 2437 /* trigger an ALPA scan in ULP */
2438 2438 mapbuf->lilp_magic = MAGIC_LISA;
2439 2439 }
2440 2440
2441 2441 mapbuf->lilp_myalpa = port->did;
2442 2442
2443 2443 /* The first byte of the alpa_map is the lilp map length */
2444 2444 /* Add one to include the lilp length byte itself */
2445 2445 lilp_length = (uint32_t)port->alpa_map[0] + 1;
2446 2446
2447 2447 /* Make sure the max transfer is 128 bytes */
2448 2448 if (lilp_length > 128) {
2449 2449 lilp_length = 128;
2450 2450 }
2451 2451
2452 2452 /* We start copying from the lilp_length field */
2453 2453 /* in order to get a word aligned address */
2454 2454 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2455 2455 lilp_length);
2456 2456
2457 2457 return (FC_SUCCESS);
2458 2458
2459 2459 } /* emlxs_fca_get_map() */
2460 2460
2461 2461
2462 2462
2463 2463 extern int
2464 2464 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2465 2465 {
2466 2466 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2467 2467 emlxs_hba_t *hba = HBA;
2468 2468 emlxs_buf_t *sbp;
2469 2469 uint32_t rval;
2470 2470 uint32_t pkt_flags;
2471 2471
2472 2472 /* Validate packet */
2473 2473 sbp = PKT2PRIV(pkt);
2474 2474
2475 2475 /* Make sure adapter is online */
2476 2476 if (!(hba->flag & FC_ONLINE_MODE) &&
2477 2477 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2478 2478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2479 2479 "Adapter offline.");
2480 2480
2481 2481 rval = (hba->flag & FC_ONLINING_MODE) ?
2482 2482 FC_TRAN_BUSY : FC_OFFLINE;
2483 2483 return (rval);
2484 2484 }
2485 2485
2486 2486 /* Make sure ULP was told that the port was online */
2487 2487 if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2488 2488 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2489 2489 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2490 2490 "Port offline.");
2491 2491
2492 2492 return (FC_OFFLINE);
2493 2493 }
2494 2494
2495 2495 if (sbp->port != port) {
2496 2496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2497 2497 "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2498 2498 sbp->port, sbp->pkt_flags);
2499 2499 return (FC_BADPACKET);
2500 2500 }
2501 2501
2502 2502 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2503 2503 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2504 2504 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2505 2505 sbp->port, sbp->pkt_flags);
2506 2506 return (FC_BADPACKET);
2507 2507 }
2508 2508
2509 2509 #ifdef SFCT_SUPPORT
2510 2510 if ((port->mode == MODE_TARGET) && !sbp->fct_cmd &&
2511 2511 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2512 2512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2513 2513 "Packet blocked. Target mode.");
2514 2514 return (FC_TRANSPORT_ERROR);
2515 2515 }
2516 2516 #endif /* SFCT_SUPPORT */
2517 2517
2518 2518 #ifdef IDLE_TIMER
2519 2519 emlxs_pm_busy_component(hba);
2520 2520 #endif /* IDLE_TIMER */
2521 2521
2522 2522 /* Prepare the packet for transport */
2523 2523 emlxs_initialize_pkt(port, sbp);
2524 2524
2525 2525 /* Save a copy of the pkt flags. */
2526 2526 /* We will check the polling flag later */
2527 2527 pkt_flags = sbp->pkt_flags;
2528 2528
2529 2529 /* Send the packet */
2530 2530 switch (pkt->pkt_tran_type) {
2531 2531 case FC_PKT_FCP_READ:
2532 2532 case FC_PKT_FCP_WRITE:
2533 2533 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2534 2534 break;
2535 2535
2536 2536 case FC_PKT_IP_WRITE:
2537 2537 case FC_PKT_BROADCAST:
2538 2538 rval = emlxs_send_ip(port, sbp);
2539 2539 break;
2540 2540
2541 2541 case FC_PKT_EXCHANGE:
2542 2542 switch (pkt->pkt_cmd_fhdr.type) {
2543 2543 case FC_TYPE_SCSI_FCP:
2544 2544 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2545 2545 break;
2546 2546
2547 2547 case FC_TYPE_FC_SERVICES:
2548 2548 rval = emlxs_send_ct(port, sbp);
2549 2549 break;
2550 2550
2551 2551 #ifdef MENLO_SUPPORT
2552 2552 case EMLXS_MENLO_TYPE:
2553 2553 rval = emlxs_send_menlo(port, sbp);
2554 2554 break;
2555 2555 #endif /* MENLO_SUPPORT */
2556 2556
2557 2557 default:
2558 2558 rval = emlxs_send_els(port, sbp);
2559 2559 }
2560 2560 break;
2561 2561
2562 2562 case FC_PKT_OUTBOUND:
2563 2563 switch (pkt->pkt_cmd_fhdr.type) {
2564 2564 #ifdef SFCT_SUPPORT
2565 2565 case FC_TYPE_SCSI_FCP:
2566 2566 rval = emlxs_send_fct_status(port, sbp);
2567 2567 break;
2568 2568
2569 2569 case FC_TYPE_BASIC_LS:
2570 2570 rval = emlxs_send_fct_abort(port, sbp);
2571 2571 break;
2572 2572 #endif /* SFCT_SUPPORT */
2573 2573
2574 2574 case FC_TYPE_FC_SERVICES:
2575 2575 rval = emlxs_send_ct_rsp(port, sbp);
2576 2576 break;
2577 2577 #ifdef MENLO_SUPPORT
2578 2578 case EMLXS_MENLO_TYPE:
2579 2579 rval = emlxs_send_menlo(port, sbp);
2580 2580 break;
2581 2581 #endif /* MENLO_SUPPORT */
2582 2582
2583 2583 default:
2584 2584 rval = emlxs_send_els_rsp(port, sbp);
2585 2585 }
2586 2586 break;
2587 2587
2588 2588 default:
2589 2589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2590 2590 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2591 2591 rval = FC_TRANSPORT_ERROR;
2592 2592 break;
2593 2593 }
2594 2594
2595 2595 /* Check if send was not successful */
2596 2596 if (rval != FC_SUCCESS) {
2597 2597 /* Return packet to ULP */
2598 2598 mutex_enter(&sbp->mtx);
2599 2599 sbp->pkt_flags |= PACKET_ULP_OWNED;
2600 2600 mutex_exit(&sbp->mtx);
2601 2601
2602 2602 return (rval);
2603 2603 }
2604 2604
2605 2605 /* Check if this packet should be polled for completion before */
2606 2606 /* returning. This check must be done with a saved copy of the */
2607 2607 /* pkt_flags because the packet itself could already be freed from */
2608 2608 /* memory if it was not polled. */
2609 2609 if (pkt_flags & PACKET_POLLED) {
2610 2610 emlxs_poll(port, sbp);
2611 2611 }
2612 2612
2613 2613 return (FC_SUCCESS);
2614 2614
2615 2615 } /* emlxs_fca_transport() */
2616 2616
2617 2617
2618 2618
2619 2619 static void
2620 2620 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2621 2621 {
2622 2622 emlxs_hba_t *hba = HBA;
2623 2623 fc_packet_t *pkt = PRIV2PKT(sbp);
2624 2624 clock_t timeout;
2625 2625 clock_t time;
2626 2626 CHANNEL *cp;
2627 2627 int in_panic = 0;
2628 2628
2629 2629 mutex_enter(&EMLXS_PORT_LOCK);
2630 2630 hba->io_poll_count++;
2631 2631 mutex_exit(&EMLXS_PORT_LOCK);
2632 2632
2633 2633 /* Check for panic situation */
2634 2634 cp = (CHANNEL *)sbp->channel;
2635 2635
2636 2636 if (ddi_in_panic()) {
2637 2637 in_panic = 1;
2638 2638 /*
2639 2639 * In panic situations there will be one thread with
2640 2640 * no interrrupts (hard or soft) and no timers
2641 2641 */
2642 2642
2643 2643 /*
2644 2644 * We must manually poll everything in this thread
2645 2645 * to keep the driver going.
2646 2646 */
2647 2647
2648 2648 /* Keep polling the chip until our IO is completed */
2649 2649 /* Driver's timer will not function during panics. */
2650 2650 /* Therefore, timer checks must be performed manually. */
2651 2651 (void) drv_getparm(LBOLT, &time);
2652 2652 timeout = time + drv_usectohz(1000000);
2653 2653 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2654 2654 EMLXS_SLI_POLL_INTR(hba);
2655 2655 (void) drv_getparm(LBOLT, &time);
2656 2656
2657 2657 /* Trigger timer checks periodically */
2658 2658 if (time >= timeout) {
2659 2659 emlxs_timer_checks(hba);
2660 2660 timeout = time + drv_usectohz(1000000);
2661 2661 }
2662 2662 }
2663 2663 } else {
2664 2664 /* Wait for IO completion */
2665 2665 /* The driver's timer will detect */
2666 2666 /* any timeout and abort the I/O. */
2667 2667 mutex_enter(&EMLXS_PKT_LOCK);
2668 2668 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2669 2669 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2670 2670 }
2671 2671 mutex_exit(&EMLXS_PKT_LOCK);
2672 2672 }
2673 2673
2674 2674 /* Check for fcp reset pkt */
2675 2675 if (sbp->pkt_flags & PACKET_FCP_RESET) {
2676 2676 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2677 2677 /* Flush the IO's on the chipq */
2678 2678 (void) emlxs_chipq_node_flush(port,
2679 2679 &hba->chan[hba->channel_fcp],
2680 2680 sbp->node, sbp);
2681 2681 } else {
2682 2682 /* Flush the IO's on the chipq for this lun */
2683 2683 (void) emlxs_chipq_lun_flush(port,
2684 2684 sbp->node, sbp->lun, sbp);
2685 2685 }
2686 2686
2687 2687 if (sbp->flush_count == 0) {
2688 2688 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2689 2689 goto done;
2690 2690 }
2691 2691
2692 2692 /* Set the timeout so the flush has time to complete */
2693 2693 timeout = emlxs_timeout(hba, 60);
2694 2694 (void) drv_getparm(LBOLT, &time);
2695 2695 while ((time < timeout) && sbp->flush_count > 0) {
2696 2696 delay(drv_usectohz(500000));
2697 2697 (void) drv_getparm(LBOLT, &time);
2698 2698 }
2699 2699
2700 2700 if (sbp->flush_count == 0) {
2701 2701 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2702 2702 goto done;
2703 2703 }
2704 2704
2705 2705 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2706 2706 "sbp=%p flush_count=%d. Waiting...", sbp,
2707 2707 sbp->flush_count);
2708 2708
2709 2709 /* Let's try this one more time */
2710 2710
2711 2711 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2712 2712 /* Flush the IO's on the chipq */
2713 2713 (void) emlxs_chipq_node_flush(port,
2714 2714 &hba->chan[hba->channel_fcp],
2715 2715 sbp->node, sbp);
2716 2716 } else {
2717 2717 /* Flush the IO's on the chipq for this lun */
2718 2718 (void) emlxs_chipq_lun_flush(port,
2719 2719 sbp->node, sbp->lun, sbp);
2720 2720 }
2721 2721
2722 2722 /* Reset the timeout so the flush has time to complete */
2723 2723 timeout = emlxs_timeout(hba, 60);
2724 2724 (void) drv_getparm(LBOLT, &time);
2725 2725 while ((time < timeout) && sbp->flush_count > 0) {
2726 2726 delay(drv_usectohz(500000));
2727 2727 (void) drv_getparm(LBOLT, &time);
2728 2728 }
2729 2729
2730 2730 if (sbp->flush_count == 0) {
2731 2731 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2732 2732 goto done;
2733 2733 }
2734 2734
2735 2735 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2736 2736 "sbp=%p flush_count=%d. Resetting link.", sbp,
2737 2737 sbp->flush_count);
2738 2738
2739 2739 /* Let's first try to reset the link */
2740 2740 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
2741 2741
2742 2742 if (sbp->flush_count == 0) {
2743 2743 goto done;
2744 2744 }
2745 2745
2746 2746 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2747 2747 "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2748 2748 sbp->flush_count);
2749 2749
2750 2750 /* If that doesn't work, reset the adapter */
2751 2751 (void) emlxs_reset(port, FC_FCA_RESET);
2752 2752
2753 2753 if (sbp->flush_count != 0) {
2754 2754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2755 2755 "sbp=%p flush_count=%d. Giving up.", sbp,
2756 2756 sbp->flush_count);
2757 2757 }
2758 2758
2759 2759 }
2760 2760 /* PACKET_FCP_RESET */
2761 2761 done:
2762 2762
2763 2763 /* Packet has been declared completed and is now ready to be returned */
2764 2764
2765 2765 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2766 2766 emlxs_unswap_pkt(sbp);
2767 2767 #endif /* EMLXS_MODREV2X */
2768 2768
2769 2769 mutex_enter(&sbp->mtx);
2770 2770 sbp->pkt_flags |= PACKET_ULP_OWNED;
2771 2771 mutex_exit(&sbp->mtx);
2772 2772
2773 2773 mutex_enter(&EMLXS_PORT_LOCK);
2774 2774 hba->io_poll_count--;
2775 2775 mutex_exit(&EMLXS_PORT_LOCK);
2776 2776
2777 2777 #ifdef FMA_SUPPORT
2778 2778 if (!in_panic) {
2779 2779 emlxs_check_dma(hba, sbp);
2780 2780 }
2781 2781 #endif
2782 2782
2783 2783 /* Make ULP completion callback if required */
2784 2784 if (pkt->pkt_comp) {
2785 2785 cp->ulpCmplCmd++;
2786 2786 (*pkt->pkt_comp) (pkt);
2787 2787 }
2788 2788
2789 2789 #ifdef FMA_SUPPORT
2790 2790 if (hba->flag & FC_DMA_CHECK_ERROR) {
2791 2791 emlxs_thread_spawn(hba, emlxs_restart_thread,
2792 2792 NULL, NULL);
2793 2793 }
2794 2794 #endif
2795 2795
2796 2796 return;
2797 2797
2798 2798 } /* emlxs_poll() */
2799 2799
2800 2800
2801 2801 static int
2802 2802 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2803 2803 uint32_t *count, uint32_t type)
2804 2804 {
2805 2805 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2806 2806 emlxs_hba_t *hba = HBA;
2807 2807 char *err = NULL;
2808 2808 emlxs_unsol_buf_t *pool = NULL;
2809 2809 emlxs_unsol_buf_t *new_pool = NULL;
2810 2810 emlxs_config_t *cfg = &CFG;
2811 2811 int32_t i;
2812 2812 int result;
2813 2813 uint32_t free_resv;
2814 2814 uint32_t free;
2815 2815 fc_unsol_buf_t *ubp;
2816 2816 emlxs_ub_priv_t *ub_priv;
2817 2817 int rc;
2818 2818
2819 2819 if (!(port->flag & EMLXS_INI_ENABLED)) {
2820 2820 if (tokens && count) {
2821 2821 bzero(tokens, (sizeof (uint64_t) * (*count)));
2822 2822 }
2823 2823 return (FC_SUCCESS);
2824 2824 }
2825 2825
2826 2826 if (!(port->flag & EMLXS_INI_BOUND)) {
2827 2827 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2828 2828 "fca_ub_alloc failed: Port not bound! size=%x count=%d "
2829 2829 "type=%x", size, *count, type);
2830 2830
2831 2831 return (FC_FAILURE);
2832 2832 }
2833 2833
2834 2834 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2835 2835 "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2836 2836
2837 2837 if (count && (*count > EMLXS_MAX_UBUFS)) {
2838 2838 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2839 2839 "fca_ub_alloc failed: Too many unsolicted buffers "
2840 2840 "requested. count=%x", *count);
2841 2841
2842 2842 return (FC_FAILURE);
2843 2843
2844 2844 }
2845 2845
2846 2846 if (tokens == NULL) {
2847 2847 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2848 2848 "fca_ub_alloc failed: Token array is NULL.");
2849 2849
2850 2850 return (FC_FAILURE);
2851 2851 }
2852 2852
2853 2853 /* Clear the token array */
2854 2854 bzero(tokens, (sizeof (uint64_t) * (*count)));
2855 2855
2856 2856 free_resv = 0;
2857 2857 free = *count;
2858 2858 switch (type) {
2859 2859 case FC_TYPE_BASIC_LS:
2860 2860 err = "BASIC_LS";
2861 2861 break;
2862 2862 case FC_TYPE_EXTENDED_LS:
2863 2863 err = "EXTENDED_LS";
2864 2864 free = *count / 2; /* Hold 50% for normal use */
2865 2865 free_resv = *count - free; /* Reserve 50% for RSCN use */
2866 2866 break;
2867 2867 case FC_TYPE_IS8802:
2868 2868 err = "IS8802";
2869 2869 break;
2870 2870 case FC_TYPE_IS8802_SNAP:
2871 2871 err = "IS8802_SNAP";
2872 2872
2873 2873 if (cfg[CFG_NETWORK_ON].current == 0) {
2874 2874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2875 2875 "fca_ub_alloc failed: IP support is disabled.");
2876 2876
2877 2877 return (FC_FAILURE);
2878 2878 }
2879 2879 break;
2880 2880 case FC_TYPE_SCSI_FCP:
2881 2881 err = "SCSI_FCP";
2882 2882 break;
2883 2883 case FC_TYPE_SCSI_GPP:
2884 2884 err = "SCSI_GPP";
2885 2885 break;
2886 2886 case FC_TYPE_HIPP_FP:
2887 2887 err = "HIPP_FP";
2888 2888 break;
2889 2889 case FC_TYPE_IPI3_MASTER:
2890 2890 err = "IPI3_MASTER";
2891 2891 break;
2892 2892 case FC_TYPE_IPI3_SLAVE:
2893 2893 err = "IPI3_SLAVE";
2894 2894 break;
2895 2895 case FC_TYPE_IPI3_PEER:
2896 2896 err = "IPI3_PEER";
2897 2897 break;
2898 2898 case FC_TYPE_FC_SERVICES:
2899 2899 err = "FC_SERVICES";
2900 2900 break;
2901 2901 }
2902 2902
2903 2903 mutex_enter(&EMLXS_UB_LOCK);
2904 2904
2905 2905 /*
2906 2906 * Walk through the list of the unsolicited buffers
2907 2907 * for this ddiinst of emlx.
2908 2908 */
2909 2909
2910 2910 pool = port->ub_pool;
2911 2911
2912 2912 /*
2913 2913 * The emlxs_fca_ub_alloc() can be called more than once with different
2914 2914 * size. We will reject the call if there are
2915 2915 * duplicate size with the same FC-4 type.
2916 2916 */
2917 2917 while (pool) {
2918 2918 if ((pool->pool_type == type) &&
2919 2919 (pool->pool_buf_size == size)) {
2920 2920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2921 2921 "fca_ub_alloc failed: Unsolicited buffer pool "
2922 2922 "for %s of size 0x%x bytes already exists.",
2923 2923 err, size);
2924 2924
2925 2925 result = FC_FAILURE;
2926 2926 goto fail;
2927 2927 }
2928 2928
2929 2929 pool = pool->pool_next;
2930 2930 }
2931 2931
2932 2932 mutex_exit(&EMLXS_UB_LOCK);
2933 2933
2934 2934 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2935 2935 KM_SLEEP);
2936 2936
2937 2937 new_pool->pool_next = NULL;
2938 2938 new_pool->pool_type = type;
2939 2939 new_pool->pool_buf_size = size;
2940 2940 new_pool->pool_nentries = *count;
2941 2941 new_pool->pool_available = new_pool->pool_nentries;
2942 2942 new_pool->pool_free = free;
2943 2943 new_pool->pool_free_resv = free_resv;
2944 2944 new_pool->fc_ubufs =
2945 2945 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2946 2946
2947 2947 new_pool->pool_first_token = port->ub_count;
2948 2948 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2949 2949
2950 2950 for (i = 0; i < new_pool->pool_nentries; i++) {
2951 2951 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2952 2952 ubp->ub_port_handle = port->ulp_handle;
2953 2953 ubp->ub_token = (uint64_t)((unsigned long)ubp);
2954 2954 ubp->ub_bufsize = size;
2955 2955 ubp->ub_class = FC_TRAN_CLASS3;
2956 2956 ubp->ub_port_private = NULL;
2957 2957 ubp->ub_fca_private =
2958 2958 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2959 2959 KM_SLEEP);
2960 2960
2961 2961 /*
2962 2962 * Initialize emlxs_ub_priv_t
2963 2963 */
2964 2964 ub_priv = ubp->ub_fca_private;
2965 2965 ub_priv->ubp = ubp;
2966 2966 ub_priv->port = port;
2967 2967 ub_priv->flags = EMLXS_UB_FREE;
2968 2968 ub_priv->available = 1;
2969 2969 ub_priv->pool = new_pool;
2970 2970 ub_priv->time = 0;
2971 2971 ub_priv->timeout = 0;
2972 2972 ub_priv->token = port->ub_count;
2973 2973 ub_priv->cmd = 0;
2974 2974
2975 2975 /* Allocate the actual buffer */
2976 2976 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2977 2977
2978 2978
2979 2979 tokens[i] = (uint64_t)((unsigned long)ubp);
2980 2980 port->ub_count++;
2981 2981 }
2982 2982
2983 2983 mutex_enter(&EMLXS_UB_LOCK);
2984 2984
2985 2985 /* Add the pool to the top of the pool list */
2986 2986 new_pool->pool_prev = NULL;
2987 2987 new_pool->pool_next = port->ub_pool;
2988 2988
2989 2989 if (port->ub_pool) {
2990 2990 port->ub_pool->pool_prev = new_pool;
2991 2991 }
2992 2992 port->ub_pool = new_pool;
2993 2993
2994 2994 /* Set the post counts */
2995 2995 if (type == FC_TYPE_IS8802_SNAP) {
2996 2996 MAILBOXQ *mbox;
2997 2997
2998 2998 port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2999 2999
3000 3000 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
3001 3001 MEM_MBOX))) {
3002 3002 emlxs_mb_config_farp(hba, mbox);
3003 3003 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
3004 3004 mbox, MBX_NOWAIT, 0);
3005 3005 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3006 3006 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
3007 3007 }
3008 3008 }
3009 3009 port->flag |= EMLXS_PORT_IP_UP;
3010 3010 } else if (type == FC_TYPE_EXTENDED_LS) {
3011 3011 port->ub_post[hba->channel_els] += new_pool->pool_nentries;
3012 3012 } else if (type == FC_TYPE_FC_SERVICES) {
3013 3013 port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
3014 3014 }
3015 3015
3016 3016 mutex_exit(&EMLXS_UB_LOCK);
3017 3017
3018 3018 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3019 3019 "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3020 3020 *count, err, size);
3021 3021
3022 3022 return (FC_SUCCESS);
3023 3023
3024 3024 fail:
3025 3025
3026 3026 /* Clean the pool */
3027 3027 for (i = 0; tokens[i] != NULL; i++) {
3028 3028 /* Get the buffer object */
3029 3029 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3030 3030 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3031 3031
3032 3032 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3033 3033 "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3034 3034 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3035 3035
3036 3036 /* Free the actual buffer */
3037 3037 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3038 3038
3039 3039 /* Free the private area of the buffer object */
3040 3040 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3041 3041
3042 3042 tokens[i] = 0;
3043 3043 port->ub_count--;
3044 3044 }
3045 3045
3046 3046 if (new_pool) {
3047 3047 /* Free the array of buffer objects in the pool */
3048 3048 kmem_free((caddr_t)new_pool->fc_ubufs,
3049 3049 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3050 3050
3051 3051 /* Free the pool object */
3052 3052 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3053 3053 }
3054 3054
3055 3055 mutex_exit(&EMLXS_UB_LOCK);
3056 3056
3057 3057 return (result);
3058 3058
3059 3059 } /* emlxs_fca_ub_alloc() */
3060 3060
3061 3061
3062 3062 static void
3063 3063 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3064 3064 {
3065 3065 emlxs_hba_t *hba = HBA;
3066 3066 emlxs_ub_priv_t *ub_priv;
3067 3067 fc_packet_t *pkt;
3068 3068 ELS_PKT *els;
3069 3069 uint32_t sid;
3070 3070
3071 3071 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3072 3072
3073 3073 if (hba->state <= FC_LINK_DOWN) {
3074 3074 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3075 3075 return;
3076 3076 }
3077 3077
3078 3078 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3079 3079 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3080 3080 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3081 3081 return;
3082 3082 }
3083 3083
3084 3084 sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3085 3085
3086 3086 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3087 3087 "%s dropped: sid=%x. Rejecting.",
3088 3088 emlxs_elscmd_xlate(ub_priv->cmd), sid);
3089 3089
3090 3090 pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3091 3091 pkt->pkt_timeout = (2 * hba->fc_ratov);
3092 3092
3093 3093 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3094 3094 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3095 3095 pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3096 3096 }
3097 3097
3098 3098 /* Build the fc header */
3099 3099 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3100 3100 pkt->pkt_cmd_fhdr.r_ctl =
3101 3101 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3102 3102 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3103 3103 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3104 3104 pkt->pkt_cmd_fhdr.f_ctl =
3105 3105 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3106 3106 pkt->pkt_cmd_fhdr.seq_id = 0;
3107 3107 pkt->pkt_cmd_fhdr.df_ctl = 0;
3108 3108 pkt->pkt_cmd_fhdr.seq_cnt = 0;
3109 3109 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3110 3110 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3111 3111 pkt->pkt_cmd_fhdr.ro = 0;
3112 3112
3113 3113 /* Build the command */
3114 3114 els = (ELS_PKT *) pkt->pkt_cmd;
3115 3115 els->elsCode = 0x01;
3116 3116 els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3117 3117 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3118 3118 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3119 3119 els->un.lsRjt.un.b.vendorUnique = 0x02;
3120 3120
3121 3121 /* Send the pkt later in another thread */
3122 3122 (void) emlxs_pkt_send(pkt, 0);
3123 3123
3124 3124 return;
3125 3125
3126 3126 } /* emlxs_ub_els_reject() */
3127 3127
3128 3128 extern int
3129 3129 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3130 3130 uint64_t tokens[])
3131 3131 {
3132 3132 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3133 3133 emlxs_hba_t *hba = HBA;
3134 3134 fc_unsol_buf_t *ubp;
3135 3135 emlxs_ub_priv_t *ub_priv;
3136 3136 uint32_t i;
3137 3137 uint32_t time;
3138 3138 emlxs_unsol_buf_t *pool;
3139 3139
3140 3140 if (count == 0) {
3141 3141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3142 3142 "fca_ub_release: Nothing to do. count=%d", count);
3143 3143
3144 3144 return (FC_SUCCESS);
3145 3145 }
3146 3146
3147 3147 if (!(port->flag & EMLXS_INI_BOUND)) {
3148 3148 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3149 3149 "fca_ub_release failed: Port not bound. count=%d "
3150 3150 "token[0]=%p",
3151 3151 count, tokens[0]);
3152 3152
3153 3153 return (FC_UNBOUND);
3154 3154 }
3155 3155
3156 3156 mutex_enter(&EMLXS_UB_LOCK);
3157 3157
3158 3158 if (!port->ub_pool) {
3159 3159 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3160 3160 "fca_ub_release failed: No pools! count=%d token[0]=%p",
3161 3161 count, tokens[0]);
3162 3162
3163 3163 mutex_exit(&EMLXS_UB_LOCK);
3164 3164 return (FC_UB_BADTOKEN);
3165 3165 }
3166 3166
3167 3167 for (i = 0; i < count; i++) {
3168 3168 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3169 3169
3170 3170 if (!ubp) {
3171 3171 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3172 3172 "fca_ub_release failed: count=%d tokens[%d]=0",
3173 3173 count, i);
3174 3174
3175 3175 mutex_exit(&EMLXS_UB_LOCK);
3176 3176 return (FC_UB_BADTOKEN);
3177 3177 }
3178 3178
3179 3179 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3180 3180
3181 3181 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3182 3182 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3183 3183 "fca_ub_release failed: Dead buffer found. ubp=%p",
3184 3184 ubp);
3185 3185
3186 3186 mutex_exit(&EMLXS_UB_LOCK);
3187 3187 return (FC_UB_BADTOKEN);
3188 3188 }
3189 3189
3190 3190 if (ub_priv->flags == EMLXS_UB_FREE) {
3191 3191 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3192 3192 "fca_ub_release: Buffer already free! ubp=%p "
3193 3193 "token=%x",
3194 3194 ubp, ub_priv->token);
3195 3195
3196 3196 continue;
3197 3197 }
3198 3198
3199 3199 /* Check for dropped els buffer */
3200 3200 /* ULP will do this sometimes without sending a reply */
3201 3201 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3202 3202 !(ub_priv->flags & EMLXS_UB_REPLY)) {
3203 3203 emlxs_ub_els_reject(port, ubp);
3204 3204 }
3205 3205
3206 3206 /* Mark the buffer free */
3207 3207 ub_priv->flags = EMLXS_UB_FREE;
3208 3208 bzero(ubp->ub_buffer, ubp->ub_bufsize);
3209 3209
3210 3210 time = hba->timer_tics - ub_priv->time;
3211 3211 ub_priv->time = 0;
3212 3212 ub_priv->timeout = 0;
3213 3213
3214 3214 pool = ub_priv->pool;
3215 3215
3216 3216 if (ub_priv->flags & EMLXS_UB_RESV) {
3217 3217 pool->pool_free_resv++;
3218 3218 } else {
3219 3219 pool->pool_free++;
3220 3220 }
3221 3221
3222 3222 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3223 3223 "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3224 3224 "(%d,%d,%d,%d)",
3225 3225 ubp, ub_priv->token, time, ub_priv->available,
3226 3226 pool->pool_nentries, pool->pool_available,
3227 3227 pool->pool_free, pool->pool_free_resv);
3228 3228
3229 3229 /* Check if pool can be destroyed now */
3230 3230 if ((pool->pool_available == 0) &&
3231 3231 (pool->pool_free + pool->pool_free_resv ==
3232 3232 pool->pool_nentries)) {
3233 3233 emlxs_ub_destroy(port, pool);
3234 3234 }
3235 3235 }
3236 3236
3237 3237 mutex_exit(&EMLXS_UB_LOCK);
3238 3238
3239 3239 return (FC_SUCCESS);
3240 3240
3241 3241 } /* emlxs_fca_ub_release() */
3242 3242
3243 3243
3244 3244 static int
3245 3245 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3246 3246 {
3247 3247 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3248 3248 emlxs_unsol_buf_t *pool;
3249 3249 fc_unsol_buf_t *ubp;
3250 3250 emlxs_ub_priv_t *ub_priv;
3251 3251 uint32_t i;
3252 3252
3253 3253 if (!(port->flag & EMLXS_INI_ENABLED)) {
3254 3254 return (FC_SUCCESS);
3255 3255 }
3256 3256
3257 3257 if (count == 0) {
3258 3258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3259 3259 "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3260 3260 tokens[0]);
3261 3261
3262 3262 return (FC_SUCCESS);
3263 3263 }
3264 3264
3265 3265 if (!(port->flag & EMLXS_INI_BOUND)) {
3266 3266 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3267 3267 "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3268 3268 tokens[0]);
3269 3269
3270 3270 return (FC_SUCCESS);
3271 3271 }
3272 3272
3273 3273 mutex_enter(&EMLXS_UB_LOCK);
3274 3274
3275 3275 if (!port->ub_pool) {
3276 3276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3277 3277 "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3278 3278 tokens[0]);
3279 3279
3280 3280 mutex_exit(&EMLXS_UB_LOCK);
3281 3281 return (FC_UB_BADTOKEN);
3282 3282 }
3283 3283
3284 3284 /* Process buffer list */
3285 3285 for (i = 0; i < count; i++) {
3286 3286 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3287 3287
3288 3288 if (!ubp) {
3289 3289 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3290 3290 "fca_ub_free failed: count=%d tokens[%d]=0", count,
3291 3291 i);
3292 3292
3293 3293 mutex_exit(&EMLXS_UB_LOCK);
3294 3294 return (FC_UB_BADTOKEN);
3295 3295 }
3296 3296
3297 3297 /* Mark buffer unavailable */
3298 3298 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3299 3299
3300 3300 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3301 3301 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3302 3302 "fca_ub_free failed: Dead buffer found. ubp=%p",
3303 3303 ubp);
3304 3304
3305 3305 mutex_exit(&EMLXS_UB_LOCK);
3306 3306 return (FC_UB_BADTOKEN);
3307 3307 }
3308 3308
3309 3309 ub_priv->available = 0;
3310 3310
3311 3311 /* Mark one less buffer available in the parent pool */
3312 3312 pool = ub_priv->pool;
3313 3313
3314 3314 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3315 3315 "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3316 3316 ub_priv->token, pool->pool_nentries,
3317 3317 pool->pool_available - 1, pool->pool_free,
3318 3318 pool->pool_free_resv);
3319 3319
3320 3320 if (pool->pool_available) {
3321 3321 pool->pool_available--;
3322 3322
3323 3323 /* Check if pool can be destroyed */
3324 3324 if ((pool->pool_available == 0) &&
3325 3325 (pool->pool_free + pool->pool_free_resv ==
3326 3326 pool->pool_nentries)) {
3327 3327 emlxs_ub_destroy(port, pool);
3328 3328 }
3329 3329 }
3330 3330 }
3331 3331
3332 3332 mutex_exit(&EMLXS_UB_LOCK);
3333 3333
3334 3334 return (FC_SUCCESS);
3335 3335
3336 3336 } /* emlxs_fca_ub_free() */
3337 3337
3338 3338
3339 3339 /* EMLXS_UB_LOCK must be held when calling this routine */
3340 3340 extern void
3341 3341 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3342 3342 {
3343 3343 emlxs_hba_t *hba = HBA;
3344 3344 emlxs_unsol_buf_t *next;
3345 3345 emlxs_unsol_buf_t *prev;
3346 3346 fc_unsol_buf_t *ubp;
3347 3347 uint32_t i;
3348 3348
3349 3349 /* Remove the pool object from the pool list */
3350 3350 next = pool->pool_next;
3351 3351 prev = pool->pool_prev;
3352 3352
3353 3353 if (port->ub_pool == pool) {
3354 3354 port->ub_pool = next;
3355 3355 }
3356 3356
3357 3357 if (prev) {
3358 3358 prev->pool_next = next;
3359 3359 }
3360 3360
3361 3361 if (next) {
3362 3362 next->pool_prev = prev;
3363 3363 }
3364 3364
3365 3365 pool->pool_prev = NULL;
3366 3366 pool->pool_next = NULL;
3367 3367
3368 3368 /* Clear the post counts */
3369 3369 switch (pool->pool_type) {
3370 3370 case FC_TYPE_IS8802_SNAP:
3371 3371 port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3372 3372 break;
3373 3373
3374 3374 case FC_TYPE_EXTENDED_LS:
3375 3375 port->ub_post[hba->channel_els] -= pool->pool_nentries;
3376 3376 break;
3377 3377
3378 3378 case FC_TYPE_FC_SERVICES:
3379 3379 port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3380 3380 break;
3381 3381 }
3382 3382
3383 3383 /* Now free the pool memory */
3384 3384 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3385 3385 "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3386 3386 pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3387 3387
3388 3388 /* Process the array of buffer objects in the pool */
3389 3389 for (i = 0; i < pool->pool_nentries; i++) {
3390 3390 /* Get the buffer object */
3391 3391 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3392 3392
3393 3393 /* Free the memory the buffer object represents */
3394 3394 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3395 3395
3396 3396 /* Free the private area of the buffer object */
3397 3397 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3398 3398 }
3399 3399
3400 3400 /* Free the array of buffer objects in the pool */
3401 3401 kmem_free((caddr_t)pool->fc_ubufs,
3402 3402 (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3403 3403
3404 3404 /* Free the pool object */
3405 3405 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3406 3406
3407 3407 return;
3408 3408
3409 3409 } /* emlxs_ub_destroy() */
3410 3410
3411 3411
3412 3412 /*ARGSUSED*/
3413 3413 extern int
3414 3414 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3415 3415 {
3416 3416 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3417 3417 emlxs_hba_t *hba = HBA;
3418 3418 emlxs_config_t *cfg = &CFG;
3419 3419
3420 3420 emlxs_buf_t *sbp;
3421 3421 NODELIST *nlp;
3422 3422 NODELIST *prev_nlp;
3423 3423 uint8_t channelno;
3424 3424 CHANNEL *cp;
3425 3425 clock_t pkt_timeout;
3426 3426 clock_t timer;
3427 3427 clock_t time;
3428 3428 int32_t pkt_ret;
3429 3429 IOCBQ *iocbq;
3430 3430 IOCBQ *next;
3431 3431 IOCBQ *prev;
3432 3432 uint32_t found;
3433 3433 uint32_t pass = 0;
3434 3434
3435 3435 sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3436 3436 iocbq = &sbp->iocbq;
3437 3437 nlp = (NODELIST *)sbp->node;
3438 3438 cp = (CHANNEL *)sbp->channel;
3439 3439 channelno = (cp) ? cp->channelno : 0;
3440 3440
3441 3441 if (!(port->flag & EMLXS_INI_BOUND)) {
3442 3442 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3443 3443 "Port not bound.");
3444 3444 return (FC_UNBOUND);
3445 3445 }
3446 3446
3447 3447 if (!(hba->flag & FC_ONLINE_MODE)) {
3448 3448 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3449 3449 "Adapter offline.");
3450 3450 return (FC_OFFLINE);
3451 3451 }
3452 3452
3453 3453 /* ULP requires the aborted pkt to be completed */
3454 3454 /* back to ULP before returning from this call. */
3455 3455 /* SUN knows of problems with this call so they suggested that we */
3456 3456 /* always return a FC_FAILURE for this call, until it is worked out. */
3457 3457
3458 3458 /* Check if pkt is no good */
3459 3459 if (!(sbp->pkt_flags & PACKET_VALID) ||
3460 3460 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3461 3461 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3462 3462 "Bad sbp. flags=%x", sbp->pkt_flags);
3463 3463 return (FC_FAILURE);
3464 3464 }
3465 3465
3466 3466 /* Tag this now */
3467 3467 /* This will prevent any thread except ours from completing it */
3468 3468 mutex_enter(&sbp->mtx);
3469 3469
3470 3470 /* Check again if we still own this */
3471 3471 if (!(sbp->pkt_flags & PACKET_VALID) ||
3472 3472 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3473 3473 mutex_exit(&sbp->mtx);
3474 3474 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3475 3475 "Bad sbp. flags=%x", sbp->pkt_flags);
3476 3476 return (FC_FAILURE);
3477 3477 }
3478 3478
3479 3479 /* Check if pkt is a real polled command */
3480 3480 if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3481 3481 (sbp->pkt_flags & PACKET_POLLED)) {
3482 3482 mutex_exit(&sbp->mtx);
3483 3483
3484 3484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3485 3485 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3486 3486 sbp->pkt_flags);
3487 3487 return (FC_FAILURE);
3488 3488 }
3489 3489
3490 3490 sbp->pkt_flags |= PACKET_POLLED;
3491 3491 sbp->pkt_flags |= PACKET_IN_ABORT;
3492 3492
3493 3493 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3494 3494 PACKET_IN_TIMEOUT)) {
3495 3495 mutex_exit(&sbp->mtx);
3496 3496
3497 3497 /* Do nothing, pkt already on its way out */
3498 3498 goto done;
3499 3499 }
3500 3500
3501 3501 mutex_exit(&sbp->mtx);
3502 3502
3503 3503 begin:
3504 3504 pass++;
3505 3505
3506 3506 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3507 3507
3508 3508 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3509 3509 /* Find it on the queue */
3510 3510 found = 0;
3511 3511 if (iocbq->flag & IOCB_PRIORITY) {
3512 3512 /* Search the priority queue */
3513 3513 prev = NULL;
3514 3514 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3515 3515
3516 3516 while (next) {
3517 3517 if (next == iocbq) {
3518 3518 /* Remove it */
3519 3519 if (prev) {
3520 3520 prev->next = iocbq->next;
3521 3521 }
3522 3522
3523 3523 if (nlp->nlp_ptx[channelno].q_last ==
3524 3524 (void *)iocbq) {
3525 3525 nlp->nlp_ptx[channelno].q_last =
3526 3526 (void *)prev;
3527 3527 }
3528 3528
3529 3529 if (nlp->nlp_ptx[channelno].q_first ==
3530 3530 (void *)iocbq) {
3531 3531 nlp->nlp_ptx[channelno].
3532 3532 q_first =
3533 3533 (void *)iocbq->next;
3534 3534 }
3535 3535
3536 3536 nlp->nlp_ptx[channelno].q_cnt--;
3537 3537 iocbq->next = NULL;
3538 3538 found = 1;
3539 3539 break;
3540 3540 }
3541 3541
3542 3542 prev = next;
3543 3543 next = next->next;
3544 3544 }
3545 3545 } else {
3546 3546 /* Search the normal queue */
3547 3547 prev = NULL;
3548 3548 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3549 3549
3550 3550 while (next) {
3551 3551 if (next == iocbq) {
3552 3552 /* Remove it */
3553 3553 if (prev) {
3554 3554 prev->next = iocbq->next;
3555 3555 }
3556 3556
3557 3557 if (nlp->nlp_tx[channelno].q_last ==
3558 3558 (void *)iocbq) {
3559 3559 nlp->nlp_tx[channelno].q_last =
3560 3560 (void *)prev;
3561 3561 }
3562 3562
3563 3563 if (nlp->nlp_tx[channelno].q_first ==
3564 3564 (void *)iocbq) {
3565 3565 nlp->nlp_tx[channelno].q_first =
3566 3566 (void *)iocbq->next;
3567 3567 }
3568 3568
3569 3569 nlp->nlp_tx[channelno].q_cnt--;
3570 3570 iocbq->next = NULL;
3571 3571 found = 1;
3572 3572 break;
3573 3573 }
3574 3574
3575 3575 prev = next;
3576 3576 next = (IOCBQ *) next->next;
3577 3577 }
3578 3578 }
3579 3579
3580 3580 if (!found) {
3581 3581 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3582 3582 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3583 3583 "I/O not found in driver. sbp=%p flags=%x", sbp,
3584 3584 sbp->pkt_flags);
3585 3585 goto done;
3586 3586 }
3587 3587
3588 3588 /* Check if node still needs servicing */
3589 3589 if ((nlp->nlp_ptx[channelno].q_first) ||
3590 3590 (nlp->nlp_tx[channelno].q_first &&
3591 3591 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3592 3592
3593 3593 /*
3594 3594 * If this is the base node,
3595 3595 * then don't shift the pointers
3596 3596 */
3597 3597 /* We want to drain the base node before moving on */
3598 3598 if (!nlp->nlp_base) {
3599 3599 /* Just shift channel queue */
3600 3600 /* pointers to next node */
3601 3601 cp->nodeq.q_last = (void *) nlp;
3602 3602 cp->nodeq.q_first = nlp->nlp_next[channelno];
3603 3603 }
3604 3604 } else {
3605 3605 /* Remove node from channel queue */
3606 3606
3607 3607 /* If this is the only node on list */
3608 3608 if (cp->nodeq.q_first == (void *)nlp &&
3609 3609 cp->nodeq.q_last == (void *)nlp) {
3610 3610 cp->nodeq.q_last = NULL;
3611 3611 cp->nodeq.q_first = NULL;
3612 3612 cp->nodeq.q_cnt = 0;
3613 3613 } else if (cp->nodeq.q_first == (void *)nlp) {
3614 3614 cp->nodeq.q_first = nlp->nlp_next[channelno];
3615 3615 ((NODELIST *) cp->nodeq.q_last)->
3616 3616 nlp_next[channelno] = cp->nodeq.q_first;
3617 3617 cp->nodeq.q_cnt--;
3618 3618 } else {
3619 3619 /*
3620 3620 * This is a little more difficult find the
3621 3621 * previous node in the circular channel queue
3622 3622 */
3623 3623 prev_nlp = nlp;
3624 3624 while (prev_nlp->nlp_next[channelno] != nlp) {
3625 3625 prev_nlp = prev_nlp->
3626 3626 nlp_next[channelno];
3627 3627 }
3628 3628
3629 3629 prev_nlp->nlp_next[channelno] =
3630 3630 nlp->nlp_next[channelno];
3631 3631
3632 3632 if (cp->nodeq.q_last == (void *)nlp) {
3633 3633 cp->nodeq.q_last = (void *)prev_nlp;
3634 3634 }
3635 3635 cp->nodeq.q_cnt--;
3636 3636
3637 3637 }
3638 3638
3639 3639 /* Clear node */
3640 3640 nlp->nlp_next[channelno] = NULL;
3641 3641 }
3642 3642
3643 3643 /* Free the ULPIOTAG and the bmp */
3644 3644 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3645 3645 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3646 3646 } else {
3647 3647 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3648 3648 }
3649 3649
3650 3650
3651 3651 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3652 3652
3653 3653 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3654 3654 IOERR_ABORT_REQUESTED, 1);
3655 3655
3656 3656 goto done;
3657 3657 }
3658 3658
3659 3659 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3660 3660
3661 3661
3662 3662 /* Check the chip queue */
3663 3663 mutex_enter(&EMLXS_FCTAB_LOCK);
3664 3664
3665 3665 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3666 3666 !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3667 3667 (sbp == hba->fc_table[sbp->iotag])) {
3668 3668
3669 3669 /* Create the abort IOCB */
3670 3670 if (hba->state >= FC_LINK_UP) {
3671 3671 iocbq =
3672 3672 emlxs_create_abort_xri_cn(port, sbp->node,
3673 3673 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3674 3674
3675 3675 mutex_enter(&sbp->mtx);
3676 3676 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3677 3677 sbp->ticks =
3678 3678 hba->timer_tics + (4 * hba->fc_ratov) + 10;
3679 3679 sbp->abort_attempts++;
3680 3680 mutex_exit(&sbp->mtx);
3681 3681 } else {
3682 3682 iocbq =
3683 3683 emlxs_create_close_xri_cn(port, sbp->node,
3684 3684 sbp->iotag, cp);
3685 3685
3686 3686 mutex_enter(&sbp->mtx);
3687 3687 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3688 3688 sbp->ticks = hba->timer_tics + 30;
3689 3689 sbp->abort_attempts++;
3690 3690 mutex_exit(&sbp->mtx);
3691 3691 }
3692 3692
3693 3693 mutex_exit(&EMLXS_FCTAB_LOCK);
3694 3694
3695 3695 /* Send this iocbq */
3696 3696 if (iocbq) {
3697 3697 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3698 3698 iocbq = NULL;
3699 3699 }
3700 3700
3701 3701 goto done;
3702 3702 }
3703 3703
3704 3704 mutex_exit(&EMLXS_FCTAB_LOCK);
3705 3705
3706 3706 /* Pkt was not on any queues */
3707 3707
3708 3708 /* Check again if we still own this */
3709 3709 if (!(sbp->pkt_flags & PACKET_VALID) ||
3710 3710 (sbp->pkt_flags &
3711 3711 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3712 3712 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3713 3713 goto done;
3714 3714 }
3715 3715
3716 3716 if (!sleep) {
3717 3717 return (FC_FAILURE);
3718 3718 }
3719 3719
3720 3720 /* Apparently the pkt was not found. Let's delay and try again */
3721 3721 if (pass < 5) {
3722 3722 delay(drv_usectohz(5000000)); /* 5 seconds */
3723 3723
3724 3724 /* Check again if we still own this */
3725 3725 if (!(sbp->pkt_flags & PACKET_VALID) ||
3726 3726 (sbp->pkt_flags &
3727 3727 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3728 3728 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3729 3729 goto done;
3730 3730 }
3731 3731
3732 3732 goto begin;
3733 3733 }
3734 3734
3735 3735 force_it:
3736 3736
3737 3737 /* Force the completion now */
3738 3738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3739 3739 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3740 3740
3741 3741 /* Now complete it */
3742 3742 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3743 3743 1);
3744 3744
3745 3745 done:
3746 3746
3747 3747 /* Now wait for the pkt to complete */
3748 3748 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3749 3749 /* Set thread timeout */
3750 3750 pkt_timeout = emlxs_timeout(hba, 30);
3751 3751
3752 3752 /* Check for panic situation */
3753 3753 if (ddi_in_panic()) {
3754 3754
3755 3755 /*
3756 3756 * In panic situations there will be one thread with no
3757 3757 * interrrupts (hard or soft) and no timers
3758 3758 */
3759 3759
3760 3760 /*
3761 3761 * We must manually poll everything in this thread
3762 3762 * to keep the driver going.
3763 3763 */
3764 3764
3765 3765 /* Keep polling the chip until our IO is completed */
3766 3766 (void) drv_getparm(LBOLT, &time);
3767 3767 timer = time + drv_usectohz(1000000);
3768 3768 while ((time < pkt_timeout) &&
3769 3769 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3770 3770 EMLXS_SLI_POLL_INTR(hba);
3771 3771 (void) drv_getparm(LBOLT, &time);
3772 3772
3773 3773 /* Trigger timer checks periodically */
3774 3774 if (time >= timer) {
3775 3775 emlxs_timer_checks(hba);
3776 3776 timer = time + drv_usectohz(1000000);
3777 3777 }
3778 3778 }
3779 3779 } else {
3780 3780 /* Wait for IO completion or pkt_timeout */
3781 3781 mutex_enter(&EMLXS_PKT_LOCK);
3782 3782 pkt_ret = 0;
3783 3783 while ((pkt_ret != -1) &&
3784 3784 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3785 3785 pkt_ret =
3786 3786 cv_timedwait(&EMLXS_PKT_CV,
3787 3787 &EMLXS_PKT_LOCK, pkt_timeout);
3788 3788 }
3789 3789 mutex_exit(&EMLXS_PKT_LOCK);
3790 3790 }
3791 3791
3792 3792 /* Check if pkt_timeout occured. This is not good. */
3793 3793 /* Something happened to our IO. */
3794 3794 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3795 3795 /* Force the completion now */
3796 3796 goto force_it;
3797 3797 }
3798 3798 }
3799 3799 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3800 3800 emlxs_unswap_pkt(sbp);
3801 3801 #endif /* EMLXS_MODREV2X */
3802 3802
3803 3803 /* Check again if we still own this */
3804 3804 if ((sbp->pkt_flags & PACKET_VALID) &&
3805 3805 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3806 3806 mutex_enter(&sbp->mtx);
3807 3807 if ((sbp->pkt_flags & PACKET_VALID) &&
3808 3808 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3809 3809 sbp->pkt_flags |= PACKET_ULP_OWNED;
3810 3810 }
3811 3811 mutex_exit(&sbp->mtx);
3812 3812 }
3813 3813
3814 3814 #ifdef ULP_PATCH5
3815 3815 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3816 3816 return (FC_FAILURE);
3817 3817 }
3818 3818 #endif /* ULP_PATCH5 */
3819 3819
3820 3820 return (FC_SUCCESS);
3821 3821
3822 3822 } /* emlxs_fca_pkt_abort() */
3823 3823
3824 3824
3825 3825 static void
3826 3826 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3827 3827 {
3828 3828 emlxs_port_t *port = &PPORT;
3829 3829 fc_packet_t *pkt;
3830 3830 emlxs_buf_t *sbp;
3831 3831 uint32_t i;
3832 3832 uint32_t flg;
3833 3833 uint32_t rc;
3834 3834 uint32_t txcnt;
3835 3835 uint32_t chipcnt;
3836 3836
3837 3837 txcnt = 0;
3838 3838 chipcnt = 0;
3839 3839
3840 3840 mutex_enter(&EMLXS_FCTAB_LOCK);
3841 3841 for (i = 0; i < hba->max_iotag; i++) {
3842 3842 sbp = hba->fc_table[i];
3843 3843 if (sbp == NULL || sbp == STALE_PACKET) {
3844 3844 continue;
3845 3845 }
3846 3846 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ);
3847 3847 pkt = PRIV2PKT(sbp);
3848 3848 mutex_exit(&EMLXS_FCTAB_LOCK);
3849 3849 rc = emlxs_fca_pkt_abort(port, pkt, 0);
3850 3850 if (rc == FC_SUCCESS) {
3851 3851 if (flg) {
3852 3852 chipcnt++;
3853 3853 } else {
3854 3854 txcnt++;
3855 3855 }
3856 3856 }
3857 3857 mutex_enter(&EMLXS_FCTAB_LOCK);
3858 3858 }
3859 3859 mutex_exit(&EMLXS_FCTAB_LOCK);
3860 3860 *tx = txcnt;
3861 3861 *chip = chipcnt;
3862 3862 } /* emlxs_abort_all() */
3863 3863
3864 3864
3865 3865 extern int32_t
3866 3866 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3867 3867 {
3868 3868 emlxs_hba_t *hba = HBA;
3869 3869 int rval;
3870 3870 int i = 0;
3871 3871 int ret;
3872 3872 clock_t timeout;
3873 3873
3874 3874 switch (cmd) {
3875 3875 case FC_FCA_LINK_RESET:
3876 3876
3877 3877 mutex_enter(&EMLXS_PORT_LOCK);
3878 3878 if (!(hba->flag & FC_ONLINE_MODE) ||
3879 3879 (hba->state <= FC_LINK_DOWN)) {
3880 3880 mutex_exit(&EMLXS_PORT_LOCK);
3881 3881 return (FC_SUCCESS);
3882 3882 }
3883 3883
3884 3884 if (hba->reset_state &
3885 3885 (FC_LINK_RESET_INP | FC_PORT_RESET_INP)) {
3886 3886 mutex_exit(&EMLXS_PORT_LOCK);
3887 3887 return (FC_FAILURE);
3888 3888 }
3889 3889
3890 3890 hba->reset_state |= FC_LINK_RESET_INP;
3891 3891 hba->reset_request |= FC_LINK_RESET;
3892 3892 mutex_exit(&EMLXS_PORT_LOCK);
3893 3893
3894 3894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3895 3895 "Resetting Link.");
3896 3896
3897 3897 mutex_enter(&EMLXS_LINKUP_LOCK);
3898 3898 hba->linkup_wait_flag = TRUE;
3899 3899 mutex_exit(&EMLXS_LINKUP_LOCK);
3900 3900
3901 3901 if (emlxs_reset_link(hba, 1, 1)) {
3902 3902 mutex_enter(&EMLXS_LINKUP_LOCK);
3903 3903 hba->linkup_wait_flag = FALSE;
3904 3904 mutex_exit(&EMLXS_LINKUP_LOCK);
3905 3905
3906 3906 mutex_enter(&EMLXS_PORT_LOCK);
3907 3907 hba->reset_state &= ~FC_LINK_RESET_INP;
3908 3908 hba->reset_request &= ~FC_LINK_RESET;
3909 3909 mutex_exit(&EMLXS_PORT_LOCK);
3910 3910
3911 3911 return (FC_FAILURE);
3912 3912 }
3913 3913
3914 3914 mutex_enter(&EMLXS_LINKUP_LOCK);
3915 3915 timeout = emlxs_timeout(hba, 60);
3916 3916 ret = 0;
3917 3917 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3918 3918 ret =
3919 3919 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3920 3920 timeout);
3921 3921 }
3922 3922
3923 3923 hba->linkup_wait_flag = FALSE;
3924 3924 mutex_exit(&EMLXS_LINKUP_LOCK);
3925 3925
3926 3926 mutex_enter(&EMLXS_PORT_LOCK);
3927 3927 hba->reset_state &= ~FC_LINK_RESET_INP;
3928 3928 hba->reset_request &= ~FC_LINK_RESET;
3929 3929 mutex_exit(&EMLXS_PORT_LOCK);
3930 3930
3931 3931 if (ret == -1) {
3932 3932 return (FC_FAILURE);
3933 3933 }
3934 3934
3935 3935 return (FC_SUCCESS);
3936 3936
3937 3937 case FC_FCA_CORE:
3938 3938 #ifdef DUMP_SUPPORT
3939 3939 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3940 3940 "Dumping Core.");
3941 3941
3942 3942 /* Schedule a USER dump */
3943 3943 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3944 3944
3945 3945 /* Wait for dump to complete */
3946 3946 emlxs_dump_wait(hba);
3947 3947
3948 3948 return (FC_SUCCESS);
3949 3949 #endif /* DUMP_SUPPORT */
3950 3950
3951 3951 case FC_FCA_RESET:
3952 3952 case FC_FCA_RESET_CORE:
3953 3953
3954 3954 mutex_enter(&EMLXS_PORT_LOCK);
3955 3955 if (hba->reset_state & FC_PORT_RESET_INP) {
3956 3956 mutex_exit(&EMLXS_PORT_LOCK);
3957 3957 return (FC_FAILURE);
3958 3958 }
3959 3959
3960 3960 hba->reset_state |= FC_PORT_RESET_INP;
3961 3961 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
3962 3962
3963 3963 /* wait for any pending link resets to complete */
3964 3964 while ((hba->reset_state & FC_LINK_RESET_INP) &&
3965 3965 (i++ < 1000)) {
3966 3966 mutex_exit(&EMLXS_PORT_LOCK);
3967 3967 delay(drv_usectohz(1000));
3968 3968 mutex_enter(&EMLXS_PORT_LOCK);
3969 3969 }
3970 3970
3971 3971 if (hba->reset_state & FC_LINK_RESET_INP) {
3972 3972 hba->reset_state &= ~FC_PORT_RESET_INP;
3973 3973 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3974 3974 mutex_exit(&EMLXS_PORT_LOCK);
3975 3975 return (FC_FAILURE);
3976 3976 }
3977 3977 mutex_exit(&EMLXS_PORT_LOCK);
3978 3978
3979 3979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3980 3980 "Resetting Adapter.");
3981 3981
3982 3982 rval = FC_SUCCESS;
3983 3983
3984 3984 if (emlxs_offline(hba, 0) == 0) {
3985 3985 (void) emlxs_online(hba);
3986 3986 } else {
3987 3987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3988 3988 "Adapter reset failed. Device busy.");
3989 3989
3990 3990 rval = FC_DEVICE_BUSY;
3991 3991 }
3992 3992
3993 3993 mutex_enter(&EMLXS_PORT_LOCK);
3994 3994 hba->reset_state &= ~FC_PORT_RESET_INP;
3995 3995 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3996 3996 mutex_exit(&EMLXS_PORT_LOCK);
3997 3997
3998 3998 return (rval);
3999 3999
4000 4000 case EMLXS_DFC_RESET_ALL:
4001 4001 case EMLXS_DFC_RESET_ALL_FORCE_DUMP:
4002 4002
4003 4003 mutex_enter(&EMLXS_PORT_LOCK);
4004 4004 if (hba->reset_state & FC_PORT_RESET_INP) {
4005 4005 mutex_exit(&EMLXS_PORT_LOCK);
4006 4006 return (FC_FAILURE);
4007 4007 }
4008 4008
4009 4009 hba->reset_state |= FC_PORT_RESET_INP;
4010 4010 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
4011 4011
4012 4012 /* wait for any pending link resets to complete */
4013 4013 while ((hba->reset_state & FC_LINK_RESET_INP) &&
4014 4014 (i++ < 1000)) {
4015 4015 mutex_exit(&EMLXS_PORT_LOCK);
4016 4016 delay(drv_usectohz(1000));
4017 4017 mutex_enter(&EMLXS_PORT_LOCK);
4018 4018 }
4019 4019
4020 4020 if (hba->reset_state & FC_LINK_RESET_INP) {
4021 4021 hba->reset_state &= ~FC_PORT_RESET_INP;
4022 4022 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4023 4023 mutex_exit(&EMLXS_PORT_LOCK);
4024 4024 return (FC_FAILURE);
4025 4025 }
4026 4026 mutex_exit(&EMLXS_PORT_LOCK);
4027 4027
4028 4028 rval = FC_SUCCESS;
4029 4029
4030 4030 if (cmd == EMLXS_DFC_RESET_ALL) {
4031 4031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4032 4032 "Resetting Adapter (All Firmware Reset).");
4033 4033
4034 4034 emlxs_sli4_hba_reset_all(hba, 0);
4035 4035 } else {
4036 4036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4037 4037 "Resetting Adapter "
4038 4038 "(All Firmware Reset, Force Dump).");
4039 4039
4040 4040 emlxs_sli4_hba_reset_all(hba, 1);
4041 4041 }
4042 4042
4043 4043 mutex_enter(&EMLXS_PORT_LOCK);
4044 4044 hba->reset_state &= ~FC_PORT_RESET_INP;
4045 4045 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4046 4046 mutex_exit(&EMLXS_PORT_LOCK);
4047 4047
4048 4048 /* Wait for the timer thread to detect the error condition */
4049 4049 delay(drv_usectohz(1000000));
4050 4050
4051 4051 /* Wait for the HBA to re-initialize */
4052 4052 i = 0;
4053 4053 mutex_enter(&EMLXS_PORT_LOCK);
4054 4054 while (!(hba->flag & FC_ONLINE_MODE) && (i++ < 30)) {
4055 4055 mutex_exit(&EMLXS_PORT_LOCK);
4056 4056 delay(drv_usectohz(1000000));
4057 4057 mutex_enter(&EMLXS_PORT_LOCK);
4058 4058 }
4059 4059
4060 4060 if (!(hba->flag & FC_ONLINE_MODE)) {
4061 4061 rval = FC_FAILURE;
4062 4062 }
4063 4063
4064 4064 mutex_exit(&EMLXS_PORT_LOCK);
4065 4065
4066 4066 return (rval);
4067 4067
4068 4068 default:
4069 4069 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4070 4070 "reset: Unknown command. cmd=%x", cmd);
4071 4071
4072 4072 break;
4073 4073 }
4074 4074
4075 4075 return (FC_FAILURE);
4076 4076
4077 4077 } /* emlxs_reset() */
4078 4078
4079 4079
4080 4080 extern int32_t
4081 4081 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
4082 4082 {
4083 4083 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4084 4084 emlxs_hba_t *hba = HBA;
4085 4085 int32_t rval;
4086 4086
4087 4087 if (port->mode != MODE_INITIATOR) {
4088 4088 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4089 4089 "fca_reset failed. Port is not in initiator mode.");
4090 4090
4091 4091 return (FC_FAILURE);
4092 4092 }
4093 4093
4094 4094 if (!(port->flag & EMLXS_INI_BOUND)) {
4095 4095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4096 4096 "fca_reset: Port not bound.");
4097 4097
4098 4098 return (FC_UNBOUND);
4099 4099 }
4100 4100
4101 4101 switch (cmd) {
4102 4102 case FC_FCA_LINK_RESET:
4103 4103 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4104 4104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4105 4105 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
4106 4106 cmd = FC_FCA_RESET;
4107 4107 } else {
4108 4108 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4109 4109 "fca_reset: FC_FCA_LINK_RESET");
4110 4110 }
4111 4111 break;
4112 4112
4113 4113 case FC_FCA_CORE:
4114 4114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4115 4115 "fca_reset: FC_FCA_CORE");
4116 4116 break;
4117 4117
4118 4118 case FC_FCA_RESET:
4119 4119 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4120 4120 "fca_reset: FC_FCA_RESET");
4121 4121 break;
4122 4122
4123 4123 case FC_FCA_RESET_CORE:
4124 4124 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4125 4125 "fca_reset: FC_FCA_RESET_CORE");
4126 4126 break;
4127 4127
4128 4128 default:
4129 4129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4130 4130 "fca_reset: Unknown command. cmd=%x", cmd);
4131 4131 return (FC_FAILURE);
4132 4132 }
4133 4133
4134 4134 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4135 4135 hba->fw_flag |= FW_UPDATE_KERNEL;
4136 4136 }
4137 4137
4138 4138 rval = emlxs_reset(port, cmd);
4139 4139
4140 4140 return (rval);
4141 4141
4142 4142 } /* emlxs_fca_reset() */
4143 4143
4144 4144
4145 4145 extern int
4146 4146 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4147 4147 {
4148 4148 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4149 4149 emlxs_hba_t *hba = HBA;
4150 4150 int32_t ret;
4151 4151 emlxs_vpd_t *vpd = &VPD;
4152 4152
4153 4153 ret = FC_SUCCESS;
4154 4154
4155 4155 #ifdef IDLE_TIMER
4156 4156 emlxs_pm_busy_component(hba);
4157 4157 #endif /* IDLE_TIMER */
4158 4158
4159 4159 switch (pm->pm_cmd_code) {
4160 4160
4161 4161 case FC_PORT_GET_FW_REV:
4162 4162 {
4163 4163 char buffer[128];
4164 4164
4165 4165 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4166 4166 "fca_port_manage: FC_PORT_GET_FW_REV");
4167 4167
4168 4168 (void) snprintf(buffer, (sizeof (buffer)-1),
4169 4169 "%s %s", hba->model_info.model,
4170 4170 vpd->fw_version);
4171 4171 bzero(pm->pm_data_buf, pm->pm_data_len);
4172 4172
4173 4173 if (pm->pm_data_len < strlen(buffer) + 1) {
4174 4174 ret = FC_NOMEM;
4175 4175
4176 4176 break;
4177 4177 }
4178 4178
4179 4179 (void) strncpy(pm->pm_data_buf, buffer,
4180 4180 (pm->pm_data_len-1));
4181 4181 break;
4182 4182 }
4183 4183
4184 4184 case FC_PORT_GET_FCODE_REV:
4185 4185 {
4186 4186 char buffer[128];
4187 4187
4188 4188 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4189 4189 "fca_port_manage: FC_PORT_GET_FCODE_REV");
4190 4190
4191 4191 /* Force update here just to be sure */
4192 4192 emlxs_get_fcode_version(hba);
4193 4193
4194 4194 (void) snprintf(buffer, (sizeof (buffer)-1),
4195 4195 "%s %s", hba->model_info.model,
4196 4196 vpd->fcode_version);
4197 4197 bzero(pm->pm_data_buf, pm->pm_data_len);
4198 4198
4199 4199 if (pm->pm_data_len < strlen(buffer) + 1) {
4200 4200 ret = FC_NOMEM;
4201 4201 break;
4202 4202 }
4203 4203
4204 4204 (void) strncpy(pm->pm_data_buf, buffer,
4205 4205 (pm->pm_data_len-1));
4206 4206 break;
4207 4207 }
4208 4208
4209 4209 case FC_PORT_GET_DUMP_SIZE:
4210 4210 {
4211 4211 #ifdef DUMP_SUPPORT
4212 4212 uint32_t dump_size = 0;
4213 4213
4214 4214 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4215 4215 "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4216 4216
4217 4217 if (pm->pm_data_len < sizeof (uint32_t)) {
4218 4218 ret = FC_NOMEM;
4219 4219 break;
4220 4220 }
4221 4221
4222 4222 (void) emlxs_get_dump(hba, NULL, &dump_size);
4223 4223
4224 4224 *((uint32_t *)pm->pm_data_buf) = dump_size;
4225 4225
4226 4226 #else
4227 4227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4228 4228 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4229 4229
4230 4230 #endif /* DUMP_SUPPORT */
4231 4231
4232 4232 break;
4233 4233 }
4234 4234
4235 4235 case FC_PORT_GET_DUMP:
4236 4236 {
4237 4237 #ifdef DUMP_SUPPORT
4238 4238 uint32_t dump_size = 0;
4239 4239
4240 4240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4241 4241 "fca_port_manage: FC_PORT_GET_DUMP");
4242 4242
4243 4243 (void) emlxs_get_dump(hba, NULL, &dump_size);
4244 4244
4245 4245 if (pm->pm_data_len < dump_size) {
4246 4246 ret = FC_NOMEM;
4247 4247 break;
4248 4248 }
4249 4249
4250 4250 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4251 4251 (uint32_t *)&dump_size);
4252 4252 #else
4253 4253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4254 4254 "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4255 4255
4256 4256 #endif /* DUMP_SUPPORT */
4257 4257
4258 4258 break;
4259 4259 }
4260 4260
4261 4261 case FC_PORT_FORCE_DUMP:
4262 4262 {
4263 4263 #ifdef DUMP_SUPPORT
4264 4264 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4265 4265 "fca_port_manage: FC_PORT_FORCE_DUMP");
4266 4266
4267 4267 /* Schedule a USER dump */
4268 4268 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4269 4269
4270 4270 /* Wait for dump to complete */
4271 4271 emlxs_dump_wait(hba);
4272 4272 #else
4273 4273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4274 4274 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4275 4275
4276 4276 #endif /* DUMP_SUPPORT */
4277 4277 break;
4278 4278 }
4279 4279
4280 4280 case FC_PORT_LINK_STATE:
4281 4281 {
4282 4282 uint32_t *link_state;
4283 4283
4284 4284 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4285 4285 "fca_port_manage: FC_PORT_LINK_STATE");
4286 4286
4287 4287 if (pm->pm_stat_len != sizeof (*link_state)) {
4288 4288 ret = FC_NOMEM;
4289 4289 break;
4290 4290 }
4291 4291
4292 4292 if (pm->pm_cmd_buf != NULL) {
4293 4293 /*
4294 4294 * Can't look beyond the FCA port.
4295 4295 */
4296 4296 ret = FC_INVALID_REQUEST;
4297 4297 break;
4298 4298 }
4299 4299
4300 4300 link_state = (uint32_t *)pm->pm_stat_buf;
4301 4301
4302 4302 /* Set the state */
4303 4303 if (hba->state >= FC_LINK_UP) {
4304 4304 /* Check for loop topology */
4305 4305 if (hba->topology == TOPOLOGY_LOOP) {
4306 4306 *link_state = FC_STATE_LOOP;
4307 4307 } else {
4308 4308 *link_state = FC_STATE_ONLINE;
4309 4309 }
4310 4310
4311 4311 /* Set the link speed */
4312 4312 switch (hba->linkspeed) {
4313 4313 case LA_2GHZ_LINK:
4314 4314 *link_state |= FC_STATE_2GBIT_SPEED;
4315 4315 break;
4316 4316 case LA_4GHZ_LINK:
4317 4317 *link_state |= FC_STATE_4GBIT_SPEED;
4318 4318 break;
4319 4319 case LA_8GHZ_LINK:
4320 4320 *link_state |= FC_STATE_8GBIT_SPEED;
4321 4321 break;
4322 4322 case LA_10GHZ_LINK:
4323 4323 *link_state |= FC_STATE_10GBIT_SPEED;
4324 4324 break;
4325 4325 case LA_16GHZ_LINK:
4326 4326 *link_state |= FC_STATE_16GBIT_SPEED;
4327 4327 break;
4328 4328 case LA_1GHZ_LINK:
4329 4329 default:
4330 4330 *link_state |= FC_STATE_1GBIT_SPEED;
4331 4331 break;
4332 4332 }
4333 4333 } else {
4334 4334 *link_state = FC_STATE_OFFLINE;
4335 4335 }
4336 4336
4337 4337 break;
4338 4338 }
4339 4339
4340 4340
4341 4341 case FC_PORT_ERR_STATS:
4342 4342 case FC_PORT_RLS:
4343 4343 {
4344 4344 MAILBOXQ *mbq;
4345 4345 MAILBOX *mb;
4346 4346 fc_rls_acc_t *bp;
4347 4347
4348 4348 if (!(hba->flag & FC_ONLINE_MODE)) {
4349 4349 return (FC_OFFLINE);
4350 4350 }
4351 4351 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4352 4352 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4353 4353
4354 4354 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4355 4355 ret = FC_NOMEM;
4356 4356 break;
4357 4357 }
4358 4358
4359 4359 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4360 4360 MEM_MBOX)) == 0) {
4361 4361 ret = FC_NOMEM;
4362 4362 break;
4363 4363 }
4364 4364 mb = (MAILBOX *)mbq;
4365 4365
4366 4366 emlxs_mb_read_lnk_stat(hba, mbq);
4367 4367 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4368 4368 != MBX_SUCCESS) {
4369 4369 ret = FC_PBUSY;
4370 4370 } else {
4371 4371 bp = (fc_rls_acc_t *)pm->pm_data_buf;
4372 4372
4373 4373 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4374 4374 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4375 4375 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4376 4376 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4377 4377 bp->rls_invalid_word =
4378 4378 mb->un.varRdLnk.invalidXmitWord;
4379 4379 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4380 4380 }
4381 4381
4382 4382 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4383 4383 break;
4384 4384 }
4385 4385
4386 4386 case FC_PORT_DOWNLOAD_FW:
4387 4387 if (!(hba->flag & FC_ONLINE_MODE)) {
4388 4388 return (FC_OFFLINE);
4389 4389 }
4390 4390 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4391 4391 "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4392 4392 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4393 4393 pm->pm_data_len, 1);
4394 4394 break;
4395 4395
4396 4396 case FC_PORT_DOWNLOAD_FCODE:
4397 4397 if (!(hba->flag & FC_ONLINE_MODE)) {
4398 4398 return (FC_OFFLINE);
4399 4399 }
4400 4400 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4401 4401 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4402 4402 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4403 4403 pm->pm_data_len, 1);
4404 4404 break;
4405 4405
4406 4406 case FC_PORT_DIAG:
4407 4407 {
4408 4408 uint32_t errno = 0;
4409 4409 uint32_t did = 0;
4410 4410 uint32_t pattern = 0;
4411 4411
4412 4412 switch (pm->pm_cmd_flags) {
4413 4413 case EMLXS_DIAG_BIU:
4414 4414
4415 4415 if (!(hba->flag & FC_ONLINE_MODE)) {
4416 4416 return (FC_OFFLINE);
4417 4417 }
4418 4418 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4419 4419 "fca_port_manage: DIAG_BIU");
4420 4420
4421 4421 if (pm->pm_data_len) {
4422 4422 pattern = *((uint32_t *)pm->pm_data_buf);
4423 4423 }
4424 4424
4425 4425 errno = emlxs_diag_biu_run(hba, pattern);
4426 4426
4427 4427 if (pm->pm_stat_len == sizeof (errno)) {
4428 4428 *(int *)pm->pm_stat_buf = errno;
4429 4429 }
4430 4430
4431 4431 break;
4432 4432
4433 4433
4434 4434 case EMLXS_DIAG_POST:
4435 4435
4436 4436 if (!(hba->flag & FC_ONLINE_MODE)) {
4437 4437 return (FC_OFFLINE);
4438 4438 }
4439 4439 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4440 4440 "fca_port_manage: DIAG_POST");
4441 4441
4442 4442 errno = emlxs_diag_post_run(hba);
4443 4443
4444 4444 if (pm->pm_stat_len == sizeof (errno)) {
4445 4445 *(int *)pm->pm_stat_buf = errno;
4446 4446 }
4447 4447
4448 4448 break;
4449 4449
4450 4450
4451 4451 case EMLXS_DIAG_ECHO:
4452 4452
4453 4453 if (!(hba->flag & FC_ONLINE_MODE)) {
4454 4454 return (FC_OFFLINE);
4455 4455 }
4456 4456 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4457 4457 "fca_port_manage: DIAG_ECHO");
4458 4458
4459 4459 if (pm->pm_cmd_len != sizeof (uint32_t)) {
4460 4460 ret = FC_INVALID_REQUEST;
4461 4461 break;
4462 4462 }
4463 4463
4464 4464 did = *((uint32_t *)pm->pm_cmd_buf);
4465 4465
4466 4466 if (pm->pm_data_len) {
4467 4467 pattern = *((uint32_t *)pm->pm_data_buf);
4468 4468 }
4469 4469
4470 4470 errno = emlxs_diag_echo_run(port, did, pattern);
4471 4471
4472 4472 if (pm->pm_stat_len == sizeof (errno)) {
4473 4473 *(int *)pm->pm_stat_buf = errno;
4474 4474 }
4475 4475
4476 4476 break;
4477 4477
4478 4478
4479 4479 case EMLXS_PARM_GET_NUM:
4480 4480 {
4481 4481 uint32_t *num;
4482 4482 emlxs_config_t *cfg;
4483 4483 uint32_t i;
4484 4484 uint32_t count;
4485 4485 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4486 4486 "fca_port_manage: PARM_GET_NUM");
4487 4487
4488 4488 if (pm->pm_stat_len < sizeof (uint32_t)) {
4489 4489 ret = FC_NOMEM;
4490 4490 break;
4491 4491 }
4492 4492
4493 4493 num = (uint32_t *)pm->pm_stat_buf;
4494 4494 count = 0;
4495 4495 cfg = &CFG;
4496 4496 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4497 4497 if (!(cfg->flags & PARM_HIDDEN)) {
4498 4498 count++;
4499 4499 }
4500 4500
4501 4501 }
4502 4502
4503 4503 *num = count;
4504 4504
4505 4505 break;
4506 4506 }
4507 4507
4508 4508 case EMLXS_PARM_GET_LIST:
4509 4509 {
4510 4510 emlxs_parm_t *parm;
4511 4511 emlxs_config_t *cfg;
4512 4512 uint32_t i;
4513 4513 uint32_t max_count;
4514 4514
4515 4515 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4516 4516 "fca_port_manage: PARM_GET_LIST");
4517 4517
4518 4518 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4519 4519 ret = FC_NOMEM;
4520 4520 break;
4521 4521 }
4522 4522
4523 4523 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4524 4524
4525 4525 parm = (emlxs_parm_t *)pm->pm_stat_buf;
4526 4526 cfg = &CFG;
4527 4527 for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4528 4528 cfg++) {
4529 4529 if (!(cfg->flags & PARM_HIDDEN)) {
4530 4530 (void) strncpy(parm->label, cfg->string,
4531 4531 (sizeof (parm->label)-1));
4532 4532 parm->min = cfg->low;
4533 4533 parm->max = cfg->hi;
4534 4534 parm->def = cfg->def;
4535 4535 parm->current = cfg->current;
4536 4536 parm->flags = cfg->flags;
4537 4537 (void) strncpy(parm->help, cfg->help,
4538 4538 (sizeof (parm->help)-1));
4539 4539 parm++;
4540 4540 max_count--;
4541 4541 }
4542 4542 }
4543 4543
4544 4544 break;
4545 4545 }
4546 4546
4547 4547 case EMLXS_PARM_GET:
4548 4548 {
4549 4549 emlxs_parm_t *parm_in;
4550 4550 emlxs_parm_t *parm_out;
4551 4551 emlxs_config_t *cfg;
4552 4552 uint32_t i;
4553 4553 uint32_t len;
4554 4554
4555 4555 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4556 4556 EMLXS_MSGF(EMLXS_CONTEXT,
4557 4557 &emlxs_sfs_debug_msg,
4558 4558 "fca_port_manage: PARM_GET. "
4559 4559 "inbuf too small.");
4560 4560
4561 4561 ret = FC_BADCMD;
4562 4562 break;
4563 4563 }
4564 4564
4565 4565 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4566 4566 EMLXS_MSGF(EMLXS_CONTEXT,
4567 4567 &emlxs_sfs_debug_msg,
4568 4568 "fca_port_manage: PARM_GET. "
4569 4569 "outbuf too small");
4570 4570
4571 4571 ret = FC_BADCMD;
4572 4572 break;
4573 4573 }
4574 4574
4575 4575 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4576 4576 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4577 4577 len = strlen(parm_in->label);
4578 4578 cfg = &CFG;
4579 4579 ret = FC_BADOBJECT;
4580 4580
4581 4581 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4582 4582 "fca_port_manage: PARM_GET: %s=0x%x,%d",
4583 4583 parm_in->label, parm_in->current,
4584 4584 parm_in->current);
4585 4585
4586 4586 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4587 4587 if (len == strlen(cfg->string) &&
4588 4588 (strcmp(parm_in->label,
4589 4589 cfg->string) == 0)) {
4590 4590 (void) strncpy(parm_out->label,
4591 4591 cfg->string,
4592 4592 (sizeof (parm_out->label)-1));
4593 4593 parm_out->min = cfg->low;
4594 4594 parm_out->max = cfg->hi;
4595 4595 parm_out->def = cfg->def;
4596 4596 parm_out->current = cfg->current;
4597 4597 parm_out->flags = cfg->flags;
4598 4598 (void) strncpy(parm_out->help,
4599 4599 cfg->help,
4600 4600 (sizeof (parm_out->help)-1));
4601 4601
4602 4602 ret = FC_SUCCESS;
4603 4603 break;
4604 4604 }
4605 4605 }
4606 4606
4607 4607 break;
4608 4608 }
4609 4609
4610 4610 case EMLXS_PARM_SET:
4611 4611 {
4612 4612 emlxs_parm_t *parm_in;
4613 4613 emlxs_parm_t *parm_out;
4614 4614 emlxs_config_t *cfg;
4615 4615 uint32_t i;
4616 4616 uint32_t len;
4617 4617
4618 4618 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4619 4619 EMLXS_MSGF(EMLXS_CONTEXT,
4620 4620 &emlxs_sfs_debug_msg,
4621 4621 "fca_port_manage: PARM_GET. "
4622 4622 "inbuf too small.");
4623 4623
4624 4624 ret = FC_BADCMD;
4625 4625 break;
4626 4626 }
4627 4627
4628 4628 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4629 4629 EMLXS_MSGF(EMLXS_CONTEXT,
4630 4630 &emlxs_sfs_debug_msg,
4631 4631 "fca_port_manage: PARM_GET. "
4632 4632 "outbuf too small");
4633 4633 ret = FC_BADCMD;
4634 4634 break;
4635 4635 }
4636 4636
4637 4637 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4638 4638 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4639 4639 len = strlen(parm_in->label);
4640 4640 cfg = &CFG;
4641 4641 ret = FC_BADOBJECT;
4642 4642
4643 4643 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4644 4644 "fca_port_manage: PARM_SET: %s=0x%x,%d",
4645 4645 parm_in->label, parm_in->current,
4646 4646 parm_in->current);
4647 4647
4648 4648 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4649 4649 /* Find matching parameter string */
4650 4650 if (len == strlen(cfg->string) &&
4651 4651 (strcmp(parm_in->label,
4652 4652 cfg->string) == 0)) {
4653 4653 /* Attempt to update parameter */
4654 4654 if (emlxs_set_parm(hba, i,
4655 4655 parm_in->current) == FC_SUCCESS) {
4656 4656 (void) strncpy(parm_out->label,
4657 4657 cfg->string,
4658 4658 (sizeof (parm_out->label)-
4659 4659 1));
4660 4660 parm_out->min = cfg->low;
4661 4661 parm_out->max = cfg->hi;
4662 4662 parm_out->def = cfg->def;
4663 4663 parm_out->current =
4664 4664 cfg->current;
4665 4665 parm_out->flags = cfg->flags;
4666 4666 (void) strncpy(parm_out->help,
4667 4667 cfg->help,
4668 4668 (sizeof (parm_out->help)-
4669 4669 1));
4670 4670
4671 4671 ret = FC_SUCCESS;
4672 4672 }
4673 4673
4674 4674 break;
4675 4675 }
4676 4676 }
4677 4677
4678 4678 break;
4679 4679 }
4680 4680
4681 4681 case EMLXS_LOG_GET:
4682 4682 {
4683 4683 emlxs_log_req_t *req;
4684 4684 emlxs_log_resp_t *resp;
4685 4685 uint32_t len;
4686 4686
4687 4687 /* Check command size */
4688 4688 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4689 4689 ret = FC_BADCMD;
4690 4690 break;
4691 4691 }
4692 4692
4693 4693 /* Get the request */
4694 4694 req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4695 4695
4696 4696 /* Calculate the response length from the request */
4697 4697 len = sizeof (emlxs_log_resp_t) +
4698 4698 (req->count * MAX_LOG_MSG_LENGTH);
4699 4699
4700 4700 /* Check the response buffer length */
4701 4701 if (pm->pm_stat_len < len) {
4702 4702 ret = FC_BADCMD;
4703 4703 break;
4704 4704 }
4705 4705
4706 4706 /* Get the response pointer */
4707 4707 resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4708 4708
4709 4709 /* Get the request log enties */
4710 4710 (void) emlxs_msg_log_get(hba, req, resp);
4711 4711
4712 4712 ret = FC_SUCCESS;
4713 4713 break;
4714 4714 }
4715 4715
4716 4716 case EMLXS_GET_BOOT_REV:
4717 4717 {
4718 4718 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4719 4719 "fca_port_manage: GET_BOOT_REV");
4720 4720
4721 4721 if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4722 4722 ret = FC_NOMEM;
4723 4723 break;
4724 4724 }
4725 4725
4726 4726 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4727 4727 (void) snprintf(pm->pm_stat_buf, pm->pm_stat_len,
4728 4728 "%s %s", hba->model_info.model, vpd->boot_version);
4729 4729
4730 4730 break;
4731 4731 }
4732 4732
4733 4733 case EMLXS_DOWNLOAD_BOOT:
4734 4734 if (!(hba->flag & FC_ONLINE_MODE)) {
4735 4735 return (FC_OFFLINE);
4736 4736 }
4737 4737 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4738 4738 "fca_port_manage: DOWNLOAD_BOOT");
4739 4739
4740 4740 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4741 4741 pm->pm_data_len, 1);
4742 4742 break;
4743 4743
4744 4744 case EMLXS_DOWNLOAD_CFL:
4745 4745 {
4746 4746 uint32_t *buffer;
4747 4747 uint32_t region;
4748 4748 uint32_t length;
4749 4749
4750 4750 if (!(hba->flag & FC_ONLINE_MODE)) {
4751 4751 return (FC_OFFLINE);
4752 4752 }
4753 4753
4754 4754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4755 4755 "fca_port_manage: DOWNLOAD_CFL");
4756 4756
4757 4757 /* Extract the region number from the first word. */
4758 4758 buffer = (uint32_t *)pm->pm_data_buf;
4759 4759 region = *buffer++;
4760 4760
4761 4761 /* Adjust the image length for the header word */
4762 4762 length = pm->pm_data_len - 4;
4763 4763
4764 4764 ret =
4765 4765 emlxs_cfl_download(hba, region, (caddr_t)buffer,
4766 4766 length);
4767 4767 break;
4768 4768 }
4769 4769
4770 4770 case EMLXS_VPD_GET:
4771 4771 {
4772 4772 emlxs_vpd_desc_t *vpd_out;
4773 4773
4774 4774 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4775 4775 "fca_port_manage: VPD_GET");
4776 4776
4777 4777 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4778 4778 ret = FC_BADCMD;
4779 4779 break;
4780 4780 }
4781 4781
4782 4782 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4783 4783 bzero(vpd_out, pm->pm_stat_len);
4784 4784
4785 4785 (void) strncpy(vpd_out->id, vpd->id,
4786 4786 (sizeof (vpd_out->id)-1));
4787 4787 (void) strncpy(vpd_out->part_num, vpd->part_num,
4788 4788 (sizeof (vpd_out->part_num)-1));
4789 4789 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4790 4790 (sizeof (vpd_out->eng_change)-1));
4791 4791 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4792 4792 (sizeof (vpd_out->manufacturer)-1));
4793 4793 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4794 4794 (sizeof (vpd_out->serial_num)-1));
4795 4795 (void) strncpy(vpd_out->model, vpd->model,
4796 4796 (sizeof (vpd_out->model)-1));
4797 4797 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4798 4798 (sizeof (vpd_out->model_desc)-1));
4799 4799 (void) strncpy(vpd_out->port_num, vpd->port_num,
4800 4800 (sizeof (vpd_out->port_num)-1));
4801 4801 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4802 4802 (sizeof (vpd_out->prog_types)-1));
4803 4803
4804 4804 ret = FC_SUCCESS;
4805 4805
4806 4806 break;
4807 4807 }
4808 4808
4809 4809 case EMLXS_VPD_GET_V2:
4810 4810 {
4811 4811 emlxs_vpd_desc_v2_t *vpd_out;
4812 4812
4813 4813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4814 4814 "fca_port_manage: VPD_GET_V2");
4815 4815
4816 4816 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_v2_t)) {
4817 4817 ret = FC_BADCMD;
4818 4818 break;
4819 4819 }
4820 4820
4821 4821 vpd_out = (emlxs_vpd_desc_v2_t *)pm->pm_stat_buf;
4822 4822 bzero(vpd_out, pm->pm_stat_len);
4823 4823
4824 4824 (void) strncpy(vpd_out->id, vpd->id,
4825 4825 (sizeof (vpd_out->id)-1));
4826 4826 (void) strncpy(vpd_out->part_num, vpd->part_num,
4827 4827 (sizeof (vpd_out->part_num)-1));
4828 4828 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4829 4829 (sizeof (vpd_out->eng_change)-1));
4830 4830 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4831 4831 (sizeof (vpd_out->manufacturer)-1));
4832 4832 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4833 4833 (sizeof (vpd_out->serial_num)-1));
4834 4834 (void) strncpy(vpd_out->model, vpd->model,
4835 4835 (sizeof (vpd_out->model)-1));
4836 4836 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4837 4837 (sizeof (vpd_out->model_desc)-1));
4838 4838 (void) strncpy(vpd_out->port_num, vpd->port_num,
4839 4839 (sizeof (vpd_out->port_num)-1));
4840 4840 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4841 4841 (sizeof (vpd_out->prog_types)-1));
4842 4842
4843 4843 ret = FC_SUCCESS;
4844 4844
4845 4845 break;
4846 4846 }
4847 4847
4848 4848 case EMLXS_PHY_GET:
4849 4849 {
4850 4850 emlxs_phy_desc_t *phy_out;
4851 4851 MAILBOXQ *mbq;
4852 4852 MAILBOX4 *mb;
4853 4853 IOCTL_COMMON_GET_PHY_DETAILS *phy;
4854 4854 mbox_req_hdr_t *hdr_req;
4855 4855
4856 4856 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4857 4857 "fca_port_manage: EMLXS_PHY_GET");
4858 4858
4859 4859 if (pm->pm_stat_len < sizeof (emlxs_phy_desc_t)) {
4860 4860 ret = FC_BADCMD;
4861 4861 break;
4862 4862 }
4863 4863
4864 4864 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4865 4865 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4866 4866 "Invalid sli_mode. mode=%d", hba->sli_mode);
4867 4867 ret = FC_BADCMD;
4868 4868 break;
4869 4869 }
4870 4870
4871 4871 phy_out = (emlxs_phy_desc_t *)pm->pm_stat_buf;
4872 4872 bzero(phy_out, sizeof (emlxs_phy_desc_t));
4873 4873
4874 4874 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4875 4875 MEM_MBOX)) == 0) {
4876 4876 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4877 4877 "Unable to allocate mailbox buffer.");
4878 4878 ret = FC_NOMEM;
4879 4879 break;
4880 4880 }
4881 4881
4882 4882 mb = (MAILBOX4*)mbq;
4883 4883
4884 4884 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
4885 4885
4886 4886 mb->un.varSLIConfig.be.embedded = 1;
4887 4887 mbq->mbox_cmpl = NULL;
4888 4888
4889 4889 mb->mbxCommand = MBX_SLI_CONFIG;
4890 4890 mb->mbxOwner = OWN_HOST;
4891 4891
4892 4892 hdr_req = (mbox_req_hdr_t *)
4893 4893 &mb->un.varSLIConfig.be.un_hdr.hdr_req;
4894 4894 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
4895 4895 hdr_req->opcode = COMMON_OPCODE_GET_PHY_DETAILS;
4896 4896 hdr_req->timeout = 0;
4897 4897 hdr_req->req_length =
4898 4898 sizeof (IOCTL_COMMON_GET_PHY_DETAILS);
4899 4899
4900 4900 phy = (IOCTL_COMMON_GET_PHY_DETAILS *)(hdr_req + 1);
4901 4901
4902 4902 /* Send read request */
4903 4903 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) !=
4904 4904 MBX_SUCCESS) {
4905 4905 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4906 4906 "Unable to get PHY details. status=%x",
4907 4907 mb->mbxStatus);
4908 4908
4909 4909 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4910 4910
4911 4911 ret = FC_FAILURE;
4912 4912 break;
4913 4913 }
4914 4914
4915 4915 phy_out->phy_type = phy->params.response.phy_type;
4916 4916 phy_out->interface_type =
4917 4917 phy->params.response.interface_type;
4918 4918 phy_out->misc_params = phy->params.response.misc_params;
4919 4919 phy_out->rsvd[0] = phy->params.response.rsvd[0];
4920 4920 phy_out->rsvd[1] = phy->params.response.rsvd[1];
4921 4921 phy_out->rsvd[2] = phy->params.response.rsvd[2];
4922 4922 phy_out->rsvd[3] = phy->params.response.rsvd[3];
4923 4923
4924 4924 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4925 4925
4926 4926 ret = FC_SUCCESS;
4927 4927 break;
4928 4928 }
4929 4929
4930 4930 #ifdef NODE_THROTTLE_SUPPORT
4931 4931 case EMLXS_SET_THROTTLE:
4932 4932 {
4933 4933 emlxs_node_t *node;
4934 4934 uint32_t scope = 0;
4935 4935 uint32_t i;
4936 4936 char buf1[32];
4937 4937 emlxs_throttle_desc_t *desc;
4938 4938
4939 4939 if ((pm->pm_data_buf == NULL) ||
4940 4940 (pm->pm_data_len !=
4941 4941 sizeof (emlxs_throttle_desc_t))) {
4942 4942 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4943 4943 "fca_port_manage: EMLXS_SET_THROTTLE: "
4944 4944 "Descriptor buffer not valid. %d",
4945 4945 pm->pm_data_len);
4946 4946 ret = FC_BADCMD;
4947 4947 break;
4948 4948 }
4949 4949
4950 4950 if ((pm->pm_cmd_buf != NULL) &&
4951 4951 (pm->pm_cmd_len == sizeof (uint32_t))) {
4952 4952 scope = *(uint32_t *)pm->pm_cmd_buf;
4953 4953 }
4954 4954
4955 4955 desc = (emlxs_throttle_desc_t *)pm->pm_data_buf;
4956 4956 desc->throttle = MIN(desc->throttle, MAX_NODE_THROTTLE);
4957 4957
4958 4958 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4959 4959 "fca_port_manage: EMLXS_SET_THROTTLE: scope=%d "
4960 4960 "depth=%d",
4961 4961 scope, desc->throttle);
4962 4962
4963 4963 rw_enter(&port->node_rwlock, RW_WRITER);
4964 4964 switch (scope) {
4965 4965 case 1: /* all */
4966 4966 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4967 4967 node = port->node_table[i];
4968 4968 while (node != NULL) {
4969 4969 node->io_throttle = desc->throttle;
4970 4970
4971 4971 EMLXS_MSGF(EMLXS_CONTEXT,
4972 4972 &emlxs_sfs_debug_msg,
4973 4973 "EMLXS_SET_THROTTLE: wwpn=%s "
4974 4974 "depth=%d",
4975 4975 emlxs_wwn_xlate(buf1, sizeof (buf1),
4976 4976 (uint8_t *)&node->nlp_portname),
4977 4977 node->io_throttle);
4978 4978
4979 4979 node = (NODELIST *)node->nlp_list_next;
4980 4980 }
4981 4981 }
4982 4982 break;
4983 4983
4984 4984 case 2: /* FCP */
4985 4985 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4986 4986 node = port->node_table[i];
4987 4987 while (node != NULL) {
4988 4988 if (!(node->nlp_fcp_info &
4989 4989 NLP_FCP_TGT_DEVICE)) {
4990 4990 node = (NODELIST *)
4991 4991 node->nlp_list_next;
4992 4992 continue;
4993 4993 }
4994 4994
4995 4995 node->io_throttle = desc->throttle;
4996 4996
4997 4997 EMLXS_MSGF(EMLXS_CONTEXT,
4998 4998 &emlxs_sfs_debug_msg,
4999 4999 "EMLXS_SET_THROTTLE: wwpn=%s "
5000 5000 "depth=%d",
5001 5001 emlxs_wwn_xlate(buf1, sizeof (buf1),
5002 5002 (uint8_t *)&node->nlp_portname),
5003 5003 node->io_throttle);
5004 5004
5005 5005 node = (NODELIST *)node->nlp_list_next;
5006 5006 }
5007 5007 }
5008 5008 break;
5009 5009
5010 5010 case 0: /* WWPN */
5011 5011 default:
5012 5012 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5013 5013 node = port->node_table[i];
5014 5014 while (node != NULL) {
5015 5015 if (bcmp((caddr_t)&node->nlp_portname,
5016 5016 desc->wwpn, 8)) {
5017 5017 node = (NODELIST *)
5018 5018 node->nlp_list_next;
5019 5019 continue;
5020 5020 }
5021 5021
5022 5022 node->io_throttle = desc->throttle;
5023 5023
5024 5024 EMLXS_MSGF(EMLXS_CONTEXT,
5025 5025 &emlxs_sfs_debug_msg,
5026 5026 "EMLXS_SET_THROTTLE: wwpn=%s "
5027 5027 "depth=%d",
5028 5028 emlxs_wwn_xlate(buf1, sizeof (buf1),
5029 5029 (uint8_t *)&node->nlp_portname),
5030 5030 node->io_throttle);
5031 5031
5032 5032 goto set_throttle_done;
5033 5033 }
5034 5034 }
5035 5035 set_throttle_done:
5036 5036 break;
5037 5037 }
5038 5038
5039 5039 rw_exit(&port->node_rwlock);
5040 5040 ret = FC_SUCCESS;
5041 5041
5042 5042 break;
5043 5043 }
5044 5044
5045 5045 case EMLXS_GET_THROTTLE:
5046 5046 {
5047 5047 emlxs_node_t *node;
5048 5048 uint32_t i;
5049 5049 uint32_t j;
5050 5050 char buf1[32];
5051 5051 uint32_t count;
5052 5052 emlxs_throttle_desc_t *desc;
5053 5053
5054 5054 if (pm->pm_stat_len == sizeof (uint32_t)) {
5055 5055 count = emlxs_nport_count(port);
5056 5056 *(uint32_t *)pm->pm_stat_buf = count;
5057 5057
5058 5058 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5059 5059 "fca_port_manage: EMLXS_GET_THROTTLE: "
5060 5060 "count=%d",
5061 5061 count);
5062 5062
5063 5063 ret = FC_SUCCESS;
5064 5064 break;
5065 5065 }
5066 5066
5067 5067 if ((pm->pm_stat_buf == NULL) ||
5068 5068 (pm->pm_stat_len <
5069 5069 sizeof (emlxs_throttle_desc_t))) {
5070 5070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5071 5071 "fca_port_manage: EMLXS_GET_THROTTLE: "
5072 5072 "Descriptor buffer too small. %d",
5073 5073 pm->pm_data_len);
5074 5074 ret = FC_BADCMD;
5075 5075 break;
5076 5076 }
5077 5077
5078 5078 count = pm->pm_stat_len /
5079 5079 sizeof (emlxs_throttle_desc_t);
5080 5080 desc = (emlxs_throttle_desc_t *)pm->pm_stat_buf;
5081 5081
5082 5082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5083 5083 "fca_port_manage: EMLXS_GET_THROTTLE: max=%d",
5084 5084 count);
5085 5085
5086 5086 rw_enter(&port->node_rwlock, RW_READER);
5087 5087 j = 0;
5088 5088 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5089 5089 node = port->node_table[i];
5090 5090 while (node != NULL) {
5091 5091 if ((node->nlp_DID & 0xFFF000) ==
5092 5092 0xFFF000) {
5093 5093 node = (NODELIST *)
5094 5094 node->nlp_list_next;
5095 5095 continue;
5096 5096 }
5097 5097
5098 5098 bcopy((uint8_t *)&node->nlp_portname,
5099 5099 desc[j].wwpn, 8);
5100 5100 desc[j].throttle = node->io_throttle;
5101 5101
5102 5102 EMLXS_MSGF(EMLXS_CONTEXT,
5103 5103 &emlxs_sfs_debug_msg,
5104 5104 "EMLXS_GET_THROTTLE: wwpn=%s "
5105 5105 "depth=%d",
5106 5106 emlxs_wwn_xlate(buf1, sizeof (buf1),
5107 5107 desc[j].wwpn),
5108 5108 desc[j].throttle);
5109 5109
5110 5110 j++;
5111 5111 if (j >= count) {
5112 5112 goto get_throttle_done;
5113 5113 }
5114 5114
5115 5115 node = (NODELIST *)node->nlp_list_next;
5116 5116 }
5117 5117 }
5118 5118 get_throttle_done:
5119 5119 rw_exit(&port->node_rwlock);
5120 5120 ret = FC_SUCCESS;
5121 5121
5122 5122 break;
5123 5123 }
5124 5124 #endif /* NODE_THROTTLE_SUPPORT */
5125 5125
5126 5126 case EMLXS_GET_FCIO_REV:
5127 5127 {
5128 5128 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5129 5129 "fca_port_manage: GET_FCIO_REV");
5130 5130
5131 5131 if (pm->pm_stat_len < sizeof (uint32_t)) {
5132 5132 ret = FC_NOMEM;
5133 5133 break;
5134 5134 }
5135 5135
5136 5136 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5137 5137 *(uint32_t *)pm->pm_stat_buf = FCIO_REV;
5138 5138
5139 5139 break;
5140 5140 }
5141 5141
5142 5142 case EMLXS_GET_DFC_REV:
5143 5143 {
5144 5144 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5145 5145 "fca_port_manage: GET_DFC_REV");
5146 5146
5147 5147 if (pm->pm_stat_len < sizeof (uint32_t)) {
5148 5148 ret = FC_NOMEM;
5149 5149 break;
5150 5150 }
5151 5151
5152 5152 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5153 5153 *(uint32_t *)pm->pm_stat_buf = DFC_REV;
5154 5154
5155 5155 break;
5156 5156 }
5157 5157
5158 5158 case EMLXS_SET_BOOT_STATE:
5159 5159 case EMLXS_SET_BOOT_STATE_old:
5160 5160 {
5161 5161 uint32_t state;
5162 5162
5163 5163 if (!(hba->flag & FC_ONLINE_MODE)) {
5164 5164 return (FC_OFFLINE);
5165 5165 }
5166 5166 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5167 5167 EMLXS_MSGF(EMLXS_CONTEXT,
5168 5168 &emlxs_sfs_debug_msg,
5169 5169 "fca_port_manage: SET_BOOT_STATE");
5170 5170 ret = FC_BADCMD;
5171 5171 break;
5172 5172 }
5173 5173
5174 5174 state = *(uint32_t *)pm->pm_cmd_buf;
5175 5175
5176 5176 if (state == 0) {
5177 5177 EMLXS_MSGF(EMLXS_CONTEXT,
5178 5178 &emlxs_sfs_debug_msg,
5179 5179 "fca_port_manage: SET_BOOT_STATE: "
5180 5180 "Disable");
5181 5181 ret = emlxs_boot_code_disable(hba);
5182 5182 } else {
5183 5183 EMLXS_MSGF(EMLXS_CONTEXT,
5184 5184 &emlxs_sfs_debug_msg,
5185 5185 "fca_port_manage: SET_BOOT_STATE: "
5186 5186 "Enable");
5187 5187 ret = emlxs_boot_code_enable(hba);
5188 5188 }
5189 5189
5190 5190 break;
5191 5191 }
5192 5192
5193 5193 case EMLXS_GET_BOOT_STATE:
5194 5194 case EMLXS_GET_BOOT_STATE_old:
5195 5195 {
5196 5196 if (!(hba->flag & FC_ONLINE_MODE)) {
5197 5197 return (FC_OFFLINE);
5198 5198 }
5199 5199 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5200 5200 "fca_port_manage: GET_BOOT_STATE");
5201 5201
5202 5202 if (pm->pm_stat_len < sizeof (uint32_t)) {
5203 5203 ret = FC_NOMEM;
5204 5204 break;
5205 5205 }
5206 5206 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5207 5207
5208 5208 ret = emlxs_boot_code_state(hba);
5209 5209
5210 5210 if (ret == FC_SUCCESS) {
5211 5211 *(uint32_t *)pm->pm_stat_buf = 1;
5212 5212 ret = FC_SUCCESS;
5213 5213 } else if (ret == FC_FAILURE) {
5214 5214 ret = FC_SUCCESS;
5215 5215 }
5216 5216
5217 5217 break;
5218 5218 }
5219 5219
5220 5220 case EMLXS_HW_ERROR_TEST:
5221 5221 {
5222 5222 /*
5223 5223 * This command is used for simulating HW ERROR
5224 5224 * on SLI4 only.
5225 5225 */
5226 5226 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5227 5227 ret = FC_INVALID_REQUEST;
5228 5228 break;
5229 5229 }
5230 5230 hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
5231 5231 break;
5232 5232 }
5233 5233
5234 5234 case EMLXS_MB_TIMEOUT_TEST:
5235 5235 {
5236 5236 if (!(hba->flag & FC_ONLINE_MODE)) {
5237 5237 return (FC_OFFLINE);
5238 5238 }
5239 5239
5240 5240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5241 5241 "fca_port_manage: HW_ERROR_TEST");
5242 5242
5243 5243 /* Trigger a mailbox timeout */
5244 5244 hba->mbox_timer = hba->timer_tics;
5245 5245
5246 5246 break;
5247 5247 }
5248 5248
5249 5249 case EMLXS_TEST_CODE:
5250 5250 {
5251 5251 uint32_t *cmd;
5252 5252
5253 5253 if (!(hba->flag & FC_ONLINE_MODE)) {
5254 5254 return (FC_OFFLINE);
5255 5255 }
5256 5256
5257 5257 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5258 5258 "fca_port_manage: TEST_CODE");
5259 5259
5260 5260 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5261 5261 EMLXS_MSGF(EMLXS_CONTEXT,
5262 5262 &emlxs_sfs_debug_msg,
5263 5263 "fca_port_manage: TEST_CODE. "
5264 5264 "inbuf to small.");
5265 5265
5266 5266 ret = FC_BADCMD;
5267 5267 break;
5268 5268 }
5269 5269
5270 5270 cmd = (uint32_t *)pm->pm_cmd_buf;
5271 5271
5272 5272 ret = emlxs_test(hba, cmd[0],
5273 5273 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
5274 5274
5275 5275 break;
5276 5276 }
5277 5277
5278 5278 case EMLXS_BAR_IO:
5279 5279 {
5280 5280 uint32_t *cmd;
5281 5281 uint32_t *datap;
5282 5282 FCIO_Q_STAT_t *qp;
5283 5283 clock_t time;
5284 5284 uint32_t offset;
5285 5285 caddr_t addr;
5286 5286 uint32_t i;
5287 5287 uint32_t tx_cnt;
5288 5288 uint32_t chip_cnt;
5289 5289
5290 5290 cmd = (uint32_t *)pm->pm_cmd_buf;
5291 5291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5292 5292 "fca_port_manage: BAR_IO %x %x %x",
5293 5293 cmd[0], cmd[1], cmd[2]);
5294 5294
5295 5295 offset = cmd[1];
5296 5296
5297 5297 ret = FC_SUCCESS;
5298 5298
5299 5299 switch (cmd[0]) {
5300 5300 case 2: /* bar1read */
5301 5301 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5302 5302 return (FC_BADCMD);
5303 5303 }
5304 5304
5305 5305 /* Registers in this range are invalid */
5306 5306 if ((offset >= 0x4C00) && (offset < 0x5000)) {
5307 5307 return (FC_BADCMD);
5308 5308 }
5309 5309 if ((offset >= 0x5800) || (offset & 0x3)) {
5310 5310 return (FC_BADCMD);
5311 5311 }
5312 5312 datap = (uint32_t *)pm->pm_stat_buf;
5313 5313
5314 5314 for (i = 0; i < pm->pm_stat_len;
5315 5315 i += sizeof (uint32_t)) {
5316 5316 if ((offset >= 0x4C00) &&
5317 5317 (offset < 0x5000)) {
5318 5318 pm->pm_stat_len = i;
5319 5319 break;
5320 5320 }
5321 5321 if (offset >= 0x5800) {
5322 5322 pm->pm_stat_len = i;
5323 5323 break;
5324 5324 }
5325 5325 addr = hba->sli.sli4.bar1_addr + offset;
5326 5326 *datap = READ_BAR1_REG(hba, addr);
5327 5327 datap++;
5328 5328 offset += sizeof (uint32_t);
5329 5329 }
5330 5330 #ifdef FMA_SUPPORT
5331 5331 /* Access handle validation */
5332 5332 EMLXS_CHK_ACC_HANDLE(hba,
5333 5333 hba->sli.sli4.bar1_acc_handle);
5334 5334 #endif /* FMA_SUPPORT */
5335 5335 break;
5336 5336 case 3: /* bar2read */
5337 5337 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5338 5338 return (FC_BADCMD);
5339 5339 }
5340 5340 if ((offset >= 0x1000) || (offset & 0x3)) {
5341 5341 return (FC_BADCMD);
5342 5342 }
5343 5343 datap = (uint32_t *)pm->pm_stat_buf;
5344 5344
5345 5345 for (i = 0; i < pm->pm_stat_len;
5346 5346 i += sizeof (uint32_t)) {
5347 5347 *datap = READ_BAR2_REG(hba,
5348 5348 hba->sli.sli4.bar2_addr + offset);
5349 5349 datap++;
5350 5350 offset += sizeof (uint32_t);
5351 5351 }
5352 5352 #ifdef FMA_SUPPORT
5353 5353 /* Access handle validation */
5354 5354 EMLXS_CHK_ACC_HANDLE(hba,
5355 5355 hba->sli.sli4.bar2_acc_handle);
5356 5356 #endif /* FMA_SUPPORT */
5357 5357 break;
5358 5358 case 4: /* bar1write */
5359 5359 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5360 5360 return (FC_BADCMD);
5361 5361 }
5362 5362 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
5363 5363 offset, cmd[2]);
5364 5364 #ifdef FMA_SUPPORT
5365 5365 /* Access handle validation */
5366 5366 EMLXS_CHK_ACC_HANDLE(hba,
5367 5367 hba->sli.sli4.bar1_acc_handle);
5368 5368 #endif /* FMA_SUPPORT */
5369 5369 break;
5370 5370 case 5: /* bar2write */
5371 5371 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5372 5372 return (FC_BADCMD);
5373 5373 }
5374 5374 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
5375 5375 offset, cmd[2]);
5376 5376 #ifdef FMA_SUPPORT
5377 5377 /* Access handle validation */
5378 5378 EMLXS_CHK_ACC_HANDLE(hba,
5379 5379 hba->sli.sli4.bar2_acc_handle);
5380 5380 #endif /* FMA_SUPPORT */
5381 5381 break;
5382 5382 case 6: /* dumpbsmbox */
5383 5383 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5384 5384 return (FC_BADCMD);
5385 5385 }
5386 5386 if (offset != 0) {
5387 5387 return (FC_BADCMD);
5388 5388 }
5389 5389
5390 5390 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
5391 5391 (caddr_t)pm->pm_stat_buf, 256);
5392 5392 break;
5393 5393 case 7: /* pciread */
5394 5394 if ((offset >= 0x200) || (offset & 0x3)) {
5395 5395 return (FC_BADCMD);
5396 5396 }
5397 5397 datap = (uint32_t *)pm->pm_stat_buf;
5398 5398 for (i = 0; i < pm->pm_stat_len;
5399 5399 i += sizeof (uint32_t)) {
5400 5400 *datap = ddi_get32(hba->pci_acc_handle,
5401 5401 (uint32_t *)(hba->pci_addr +
5402 5402 offset));
5403 5403 datap++;
5404 5404 offset += sizeof (uint32_t);
5405 5405 }
5406 5406 #ifdef FMA_SUPPORT
5407 5407 /* Access handle validation */
5408 5408 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
5409 5409 #endif /* FMA_SUPPORT */
5410 5410 break;
5411 5411 case 8: /* abortall */
5412 5412 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5413 5413 return (FC_BADCMD);
5414 5414 }
5415 5415 emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
5416 5416 datap = (uint32_t *)pm->pm_stat_buf;
5417 5417 *datap++ = tx_cnt;
5418 5418 *datap = chip_cnt;
5419 5419 break;
5420 5420 case 9: /* get_q_info */
5421 5421 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5422 5422 return (FC_BADCMD);
5423 5423 }
5424 5424 qp = (FCIO_Q_STAT_t *)pm->pm_stat_buf;
5425 5425 for (i = 0; i < FCIO_MAX_EQS; i++) {
5426 5426 addr = hba->sli.sli4.eq[i].addr.virt;
5427 5427 qp->eq[i].host_index =
5428 5428 hba->sli.sli4.eq[i].host_index;
5429 5429 qp->eq[i].max_index =
5430 5430 hba->sli.sli4.eq[i].max_index;
5431 5431 qp->eq[i].qid =
5432 5432 hba->sli.sli4.eq[i].qid;
5433 5433 qp->eq[i].msix_vector =
5434 5434 hba->sli.sli4.eq[i].msix_vector;
5435 5435 qp->eq[i].phys =
5436 5436 hba->sli.sli4.eq[i].addr.phys;
5437 5437 qp->eq[i].virt = PADDR_LO(
5438 5438 (uintptr_t)addr);
5439 5439 qp->eq[i].virt_hi = PADDR_HI(
5440 5440 (uintptr_t)addr);
5441 5441 qp->eq[i].max_proc =
5442 5442 hba->sli.sli4.eq[i].max_proc;
5443 5443 qp->eq[i].isr_count =
5444 5444 hba->sli.sli4.eq[i].isr_count;
5445 5445 qp->eq[i].num_proc =
5446 5446 hba->sli.sli4.eq[i].num_proc;
5447 5447 }
5448 5448 for (i = 0; i < FCIO_MAX_CQS; i++) {
5449 5449 addr = hba->sli.sli4.cq[i].addr.virt;
5450 5450 qp->cq[i].host_index =
5451 5451 hba->sli.sli4.cq[i].host_index;
5452 5452 qp->cq[i].max_index =
5453 5453 hba->sli.sli4.cq[i].max_index;
5454 5454 qp->cq[i].qid =
5455 5455 hba->sli.sli4.cq[i].qid;
5456 5456 qp->cq[i].eqid =
5457 5457 hba->sli.sli4.cq[i].eqid;
5458 5458 qp->cq[i].type =
5459 5459 hba->sli.sli4.cq[i].type;
5460 5460 qp->cq[i].phys =
5461 5461 hba->sli.sli4.cq[i].addr.phys;
5462 5462 qp->cq[i].virt = PADDR_LO(
5463 5463 (uintptr_t)addr);
5464 5464 qp->cq[i].virt_hi = PADDR_HI(
5465 5465 (uintptr_t)addr);
5466 5466 qp->cq[i].max_proc =
5467 5467 hba->sli.sli4.cq[i].max_proc;
5468 5468 qp->cq[i].isr_count =
5469 5469 hba->sli.sli4.cq[i].isr_count;
5470 5470 qp->cq[i].num_proc =
5471 5471 hba->sli.sli4.cq[i].num_proc;
5472 5472 }
5473 5473 for (i = 0; i < FCIO_MAX_WQS; i++) {
5474 5474 addr = hba->sli.sli4.wq[i].addr.virt;
5475 5475 qp->wq[i].host_index =
5476 5476 hba->sli.sli4.wq[i].host_index;
5477 5477 qp->wq[i].max_index =
5478 5478 hba->sli.sli4.wq[i].max_index;
5479 5479 qp->wq[i].port_index =
5480 5480 hba->sli.sli4.wq[i].port_index;
5481 5481 qp->wq[i].release_depth =
5482 5482 hba->sli.sli4.wq[i].release_depth;
5483 5483 qp->wq[i].qid =
5484 5484 hba->sli.sli4.wq[i].qid;
5485 5485 qp->wq[i].cqid =
5486 5486 hba->sli.sli4.wq[i].cqid;
5487 5487 qp->wq[i].phys =
5488 5488 hba->sli.sli4.wq[i].addr.phys;
5489 5489 qp->wq[i].virt = PADDR_LO(
5490 5490 (uintptr_t)addr);
5491 5491 qp->wq[i].virt_hi = PADDR_HI(
5492 5492 (uintptr_t)addr);
5493 5493 qp->wq[i].num_proc =
5494 5494 hba->sli.sli4.wq[i].num_proc;
5495 5495 qp->wq[i].num_busy =
5496 5496 hba->sli.sli4.wq[i].num_busy;
5497 5497 }
5498 5498 for (i = 0; i < FCIO_MAX_RQS; i++) {
5499 5499 addr = hba->sli.sli4.rq[i].addr.virt;
5500 5500 qp->rq[i].qid =
5501 5501 hba->sli.sli4.rq[i].qid;
5502 5502 qp->rq[i].cqid =
5503 5503 hba->sli.sli4.rq[i].cqid;
5504 5504 qp->rq[i].host_index =
5505 5505 hba->sli.sli4.rq[i].host_index;
5506 5506 qp->rq[i].max_index =
5507 5507 hba->sli.sli4.rq[i].max_index;
5508 5508 qp->rq[i].phys =
5509 5509 hba->sli.sli4.rq[i].addr.phys;
5510 5510 qp->rq[i].virt = PADDR_LO(
5511 5511 (uintptr_t)addr);
5512 5512 qp->rq[i].virt_hi = PADDR_HI(
5513 5513 (uintptr_t)addr);
5514 5514 qp->rq[i].num_proc =
5515 5515 hba->sli.sli4.rq[i].num_proc;
5516 5516 }
5517 5517 qp->que_start_timer =
5518 5518 hba->sli.sli4.que_stat_timer;
5519 5519 (void) drv_getparm(LBOLT, &time);
5520 5520 qp->que_current_timer = (uint32_t)time;
5521 5521 qp->intr_count = hba->intr_count;
5522 5522 break;
5523 5523 case 10: /* zero_q_stat */
5524 5524 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5525 5525 return (FC_BADCMD);
5526 5526 }
5527 5527 emlxs_sli4_zero_queue_stat(hba);
5528 5528 break;
5529 5529 default:
5530 5530 ret = FC_BADCMD;
5531 5531 break;
5532 5532 }
5533 5533 break;
5534 5534 }
5535 5535
5536 5536 default:
5537 5537
5538 5538 ret = FC_INVALID_REQUEST;
5539 5539 break;
5540 5540 }
5541 5541
5542 5542 break;
5543 5543
5544 5544 }
5545 5545
5546 5546 case FC_PORT_INITIALIZE:
5547 5547 if (!(hba->flag & FC_ONLINE_MODE)) {
5548 5548 return (FC_OFFLINE);
5549 5549 }
5550 5550 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5551 5551 "fca_port_manage: FC_PORT_INITIALIZE");
5552 5552 break;
5553 5553
5554 5554 case FC_PORT_LOOPBACK:
5555 5555 if (!(hba->flag & FC_ONLINE_MODE)) {
5556 5556 return (FC_OFFLINE);
5557 5557 }
5558 5558 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5559 5559 "fca_port_manage: FC_PORT_LOOPBACK");
5560 5560 break;
5561 5561
5562 5562 case FC_PORT_BYPASS:
5563 5563 if (!(hba->flag & FC_ONLINE_MODE)) {
5564 5564 return (FC_OFFLINE);
5565 5565 }
5566 5566 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5567 5567 "fca_port_manage: FC_PORT_BYPASS");
5568 5568 ret = FC_INVALID_REQUEST;
5569 5569 break;
5570 5570
5571 5571 case FC_PORT_UNBYPASS:
5572 5572 if (!(hba->flag & FC_ONLINE_MODE)) {
5573 5573 return (FC_OFFLINE);
5574 5574 }
5575 5575 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5576 5576 "fca_port_manage: FC_PORT_UNBYPASS");
5577 5577 ret = FC_INVALID_REQUEST;
5578 5578 break;
5579 5579
5580 5580 case FC_PORT_GET_NODE_ID:
5581 5581 {
5582 5582 fc_rnid_t *rnid;
5583 5583
5584 5584 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5585 5585 "fca_port_manage: FC_PORT_GET_NODE_ID");
5586 5586
5587 5587 bzero(pm->pm_data_buf, pm->pm_data_len);
5588 5588
5589 5589 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5590 5590 ret = FC_NOMEM;
5591 5591 break;
5592 5592 }
5593 5593
5594 5594 rnid = (fc_rnid_t *)pm->pm_data_buf;
5595 5595
5596 5596 (void) snprintf((char *)rnid->global_id,
5597 5597 (sizeof (rnid->global_id)-1),
5598 5598 "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5599 5599 hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5600 5600 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5601 5601 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5602 5602 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5603 5603
5604 5604 rnid->unit_type = RNID_HBA;
5605 5605 rnid->port_id = port->did;
5606 5606 rnid->ip_version = RNID_IPV4;
5607 5607
5608 5608 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5609 5609 "GET_NODE_ID: wwpn: %s", rnid->global_id);
5610 5610 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5611 5611 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5612 5612 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5613 5613 "GET_NODE_ID: port_id: 0x%x", rnid->port_id);
5614 5614 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5615 5615 "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5616 5616 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5617 5617 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5618 5618 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5619 5619 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5620 5620 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5621 5621 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5622 5622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5623 5623 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5624 5624 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5625 5625 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5626 5626
5627 5627 ret = FC_SUCCESS;
5628 5628 break;
5629 5629 }
5630 5630
5631 5631 case FC_PORT_SET_NODE_ID:
5632 5632 {
5633 5633 fc_rnid_t *rnid;
5634 5634
5635 5635 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5636 5636 "fca_port_manage: FC_PORT_SET_NODE_ID");
5637 5637
5638 5638 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5639 5639 ret = FC_NOMEM;
5640 5640 break;
5641 5641 }
5642 5642
5643 5643 rnid = (fc_rnid_t *)pm->pm_data_buf;
5644 5644
5645 5645 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5646 5646 "SET_NODE_ID: wwpn: %s", rnid->global_id);
5647 5647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5648 5648 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5649 5649 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5650 5650 "SET_NODE_ID: port_id: 0x%x", rnid->port_id);
5651 5651 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5652 5652 "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5653 5653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5654 5654 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5655 5655 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5656 5656 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5657 5657 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5658 5658 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5659 5659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5660 5660 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5661 5661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5662 5662 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5663 5663
5664 5664 ret = FC_SUCCESS;
5665 5665 break;
5666 5666 }
5667 5667
5668 5668 #ifdef S11
5669 5669 case FC_PORT_GET_P2P_INFO:
5670 5670 {
5671 5671 fc_fca_p2p_info_t *p2p_info;
5672 5672 NODELIST *ndlp;
5673 5673
5674 5674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5675 5675 "fca_port_manage: FC_PORT_GET_P2P_INFO");
5676 5676
5677 5677 bzero(pm->pm_data_buf, pm->pm_data_len);
5678 5678
5679 5679 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5680 5680 ret = FC_NOMEM;
5681 5681 break;
5682 5682 }
5683 5683
5684 5684 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5685 5685
5686 5686 if (hba->state >= FC_LINK_UP) {
5687 5687 if ((hba->topology == TOPOLOGY_PT_PT) &&
5688 5688 (hba->flag & FC_PT_TO_PT)) {
5689 5689 p2p_info->fca_d_id = port->did;
5690 5690 p2p_info->d_id = port->rdid;
5691 5691
5692 5692 ndlp = emlxs_node_find_did(port,
5693 5693 port->rdid, 1);
5694 5694
5695 5695 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5696 5696 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5697 5697 "d_id: 0x%x, ndlp: 0x%p", port->did,
5698 5698 port->rdid, ndlp);
5699 5699 if (ndlp) {
5700 5700 bcopy(&ndlp->nlp_portname,
5701 5701 (caddr_t)&p2p_info->pwwn,
5702 5702 sizeof (la_wwn_t));
5703 5703 bcopy(&ndlp->nlp_nodename,
5704 5704 (caddr_t)&p2p_info->nwwn,
5705 5705 sizeof (la_wwn_t));
5706 5706
5707 5707 ret = FC_SUCCESS;
5708 5708 break;
5709 5709
5710 5710 }
5711 5711 }
5712 5712 }
5713 5713
5714 5714 ret = FC_FAILURE;
5715 5715 break;
5716 5716 }
5717 5717 #endif /* S11 */
5718 5718
5719 5719 default:
5720 5720 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5721 5721 "fca_port_manage: code=%x", pm->pm_cmd_code);
5722 5722 ret = FC_INVALID_REQUEST;
5723 5723 break;
5724 5724
5725 5725 }
5726 5726
5727 5727 return (ret);
5728 5728
5729 5729 } /* emlxs_fca_port_manage() */
5730 5730
5731 5731
5732 5732 /*ARGSUSED*/
5733 5733 static uint32_t
5734 5734 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5735 5735 uint32_t *arg)
5736 5736 {
5737 5737 uint32_t rval = 0;
5738 5738 emlxs_port_t *port = &PPORT;
5739 5739
5740 5740 switch (test_code) {
5741 5741 #ifdef TEST_SUPPORT
5742 5742 case 1: /* SCSI underrun */
5743 5743 {
5744 5744 hba->underrun_counter = (args)? arg[0]:1;
5745 5745 break;
5746 5746 }
5747 5747 #endif /* TEST_SUPPORT */
5748 5748
5749 5749 default:
5750 5750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5751 5751 "test: Unsupported test code. (0x%x)", test_code);
5752 5752 rval = FC_INVALID_REQUEST;
5753 5753 }
5754 5754
5755 5755 return (rval);
5756 5756
5757 5757 } /* emlxs_test() */
5758 5758
5759 5759
5760 5760 /*
5761 5761 * Given the device number, return the devinfo pointer or the ddiinst number.
5762 5762 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5763 5763 * before attach.
5764 5764 *
5765 5765 * Translate "dev_t" to a pointer to the associated "dev_info_t".
5766 5766 */
5767 5767 /*ARGSUSED*/
5768 5768 static int
5769 5769 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5770 5770 {
5771 5771 emlxs_hba_t *hba;
5772 5772 int32_t ddiinst;
5773 5773
5774 5774 ddiinst = getminor((dev_t)arg);
5775 5775
5776 5776 switch (infocmd) {
5777 5777 case DDI_INFO_DEVT2DEVINFO:
5778 5778 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5779 5779 if (hba)
5780 5780 *result = hba->dip;
5781 5781 else
5782 5782 *result = NULL;
5783 5783 break;
5784 5784
5785 5785 case DDI_INFO_DEVT2INSTANCE:
5786 5786 *result = (void *)((unsigned long)ddiinst);
5787 5787 break;
5788 5788
5789 5789 default:
5790 5790 return (DDI_FAILURE);
5791 5791 }
5792 5792
5793 5793 return (DDI_SUCCESS);
5794 5794
5795 5795 } /* emlxs_info() */
5796 5796
5797 5797
5798 5798 static int32_t
5799 5799 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5800 5800 {
5801 5801 emlxs_hba_t *hba;
5802 5802 emlxs_port_t *port;
5803 5803 int32_t ddiinst;
5804 5804 int rval = DDI_SUCCESS;
5805 5805
5806 5806 ddiinst = ddi_get_instance(dip);
5807 5807 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5808 5808 port = &PPORT;
5809 5809
5810 5810 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5811 5811 "fca_power: comp=%x level=%x", comp, level);
5812 5812
5813 5813 if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5814 5814 return (DDI_FAILURE);
5815 5815 }
5816 5816
5817 5817 mutex_enter(&EMLXS_PM_LOCK);
5818 5818
5819 5819 /* If we are already at the proper level then return success */
5820 5820 if (hba->pm_level == level) {
5821 5821 mutex_exit(&EMLXS_PM_LOCK);
5822 5822 return (DDI_SUCCESS);
5823 5823 }
5824 5824
5825 5825 switch (level) {
5826 5826 case EMLXS_PM_ADAPTER_UP:
5827 5827
5828 5828 /*
5829 5829 * If we are already in emlxs_attach,
5830 5830 * let emlxs_hba_attach take care of things
5831 5831 */
5832 5832 if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5833 5833 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5834 5834 break;
5835 5835 }
5836 5836
5837 5837 /* Check if adapter is suspended */
5838 5838 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5839 5839 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5840 5840
5841 5841 /* Try to resume the port */
5842 5842 rval = emlxs_hba_resume(dip);
5843 5843
5844 5844 if (rval != DDI_SUCCESS) {
5845 5845 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5846 5846 }
5847 5847 break;
5848 5848 }
5849 5849
5850 5850 /* Set adapter up */
5851 5851 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5852 5852 break;
5853 5853
5854 5854 case EMLXS_PM_ADAPTER_DOWN:
5855 5855
5856 5856
5857 5857 /*
5858 5858 * If we are already in emlxs_detach,
5859 5859 * let emlxs_hba_detach take care of things
5860 5860 */
5861 5861 if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5862 5862 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5863 5863 break;
5864 5864 }
5865 5865
5866 5866 /* Check if adapter is not suspended */
5867 5867 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5868 5868 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5869 5869
5870 5870 /* Try to suspend the port */
5871 5871 rval = emlxs_hba_suspend(dip);
5872 5872
5873 5873 if (rval != DDI_SUCCESS) {
5874 5874 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5875 5875 }
5876 5876
5877 5877 break;
5878 5878 }
5879 5879
5880 5880 /* Set adapter down */
5881 5881 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5882 5882 break;
5883 5883
5884 5884 default:
5885 5885 rval = DDI_FAILURE;
5886 5886 break;
5887 5887
5888 5888 }
5889 5889
5890 5890 mutex_exit(&EMLXS_PM_LOCK);
5891 5891
5892 5892 return (rval);
5893 5893
5894 5894 } /* emlxs_power() */
5895 5895
5896 5896
5897 5897 #ifdef EMLXS_I386
5898 5898 #ifdef S11
5899 5899 /*
5900 5900 * quiesce(9E) entry point.
5901 5901 *
5902 5902 * This function is called when the system is single-thread at hight PIL
5903 5903 * with preemption disabled. Therefore, this function must not be blocked.
5904 5904 *
5905 5905 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5906 5906 * DDI_FAILURE indicates an error condition and should almost never happen.
5907 5907 */
5908 5908 static int
5909 5909 emlxs_quiesce(dev_info_t *dip)
5910 5910 {
5911 5911 emlxs_hba_t *hba;
5912 5912 emlxs_port_t *port;
5913 5913 int32_t ddiinst;
5914 5914 int rval = DDI_SUCCESS;
5915 5915
5916 5916 ddiinst = ddi_get_instance(dip);
5917 5917 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5918 5918 port = &PPORT;
5919 5919
5920 5920 if (hba == NULL || port == NULL) {
5921 5921 return (DDI_FAILURE);
5922 5922 }
5923 5923
5924 5924 /* The fourth arg 1 indicates the call is from quiesce */
5925 5925 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5926 5926 return (rval);
5927 5927 } else {
5928 5928 return (DDI_FAILURE);
5929 5929 }
5930 5930
5931 5931 } /* emlxs_quiesce */
5932 5932 #endif /* S11 */
5933 5933 #endif /* EMLXS_I386 */
5934 5934
5935 5935
5936 5936 static int
5937 5937 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5938 5938 {
5939 5939 emlxs_hba_t *hba;
5940 5940 emlxs_port_t *port;
5941 5941 int ddiinst;
5942 5942
5943 5943 ddiinst = getminor(*dev_p);
5944 5944 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5945 5945
5946 5946 if (hba == NULL) {
5947 5947 return (ENXIO);
5948 5948 }
5949 5949
5950 5950 port = &PPORT;
5951 5951
5952 5952 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5953 5953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5954 5954 "open failed: Driver suspended.");
5955 5955 return (ENXIO);
5956 5956 }
5957 5957
5958 5958 if (otype != OTYP_CHR) {
5959 5959 return (EINVAL);
5960 5960 }
5961 5961
5962 5962 if (drv_priv(cred_p)) {
5963 5963 return (EPERM);
5964 5964 }
5965 5965
5966 5966 mutex_enter(&EMLXS_IOCTL_LOCK);
5967 5967
5968 5968 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5969 5969 mutex_exit(&EMLXS_IOCTL_LOCK);
5970 5970 return (EBUSY);
5971 5971 }
5972 5972
5973 5973 if (flag & FEXCL) {
5974 5974 if (hba->ioctl_flags & EMLXS_OPEN) {
5975 5975 mutex_exit(&EMLXS_IOCTL_LOCK);
5976 5976 return (EBUSY);
5977 5977 }
5978 5978
5979 5979 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5980 5980 }
5981 5981
5982 5982 hba->ioctl_flags |= EMLXS_OPEN;
5983 5983
5984 5984 mutex_exit(&EMLXS_IOCTL_LOCK);
5985 5985
5986 5986 return (0);
5987 5987
5988 5988 } /* emlxs_open() */
5989 5989
5990 5990
5991 5991 /*ARGSUSED*/
5992 5992 static int
5993 5993 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5994 5994 {
5995 5995 emlxs_hba_t *hba;
5996 5996 int ddiinst;
5997 5997
5998 5998 ddiinst = getminor(dev);
5999 5999 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6000 6000
6001 6001 if (hba == NULL) {
6002 6002 return (ENXIO);
6003 6003 }
6004 6004
6005 6005 if (otype != OTYP_CHR) {
6006 6006 return (EINVAL);
6007 6007 }
6008 6008
6009 6009 mutex_enter(&EMLXS_IOCTL_LOCK);
6010 6010
6011 6011 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6012 6012 mutex_exit(&EMLXS_IOCTL_LOCK);
6013 6013 return (ENODEV);
6014 6014 }
6015 6015
6016 6016 hba->ioctl_flags &= ~EMLXS_OPEN;
6017 6017 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
6018 6018
6019 6019 mutex_exit(&EMLXS_IOCTL_LOCK);
6020 6020
6021 6021 return (0);
6022 6022
6023 6023 } /* emlxs_close() */
6024 6024
6025 6025
6026 6026 /*ARGSUSED*/
6027 6027 static int
6028 6028 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
6029 6029 cred_t *cred_p, int32_t *rval_p)
6030 6030 {
6031 6031 emlxs_hba_t *hba;
6032 6032 emlxs_port_t *port;
6033 6033 int rval = 0; /* return code */
6034 6034 int ddiinst;
6035 6035
6036 6036 ddiinst = getminor(dev);
6037 6037 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6038 6038
6039 6039 if (hba == NULL) {
6040 6040 return (ENXIO);
6041 6041 }
6042 6042
6043 6043 port = &PPORT;
6044 6044
6045 6045 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6046 6046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6047 6047 "ioctl failed: Driver suspended.");
6048 6048
6049 6049 return (ENXIO);
6050 6050 }
6051 6051
6052 6052 mutex_enter(&EMLXS_IOCTL_LOCK);
6053 6053 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6054 6054 mutex_exit(&EMLXS_IOCTL_LOCK);
6055 6055 return (ENXIO);
6056 6056 }
6057 6057 mutex_exit(&EMLXS_IOCTL_LOCK);
6058 6058
6059 6059 #ifdef IDLE_TIMER
6060 6060 emlxs_pm_busy_component(hba);
6061 6061 #endif /* IDLE_TIMER */
6062 6062
6063 6063 switch (cmd) {
6064 6064 case EMLXS_DFC_COMMAND:
6065 6065 rval = emlxs_dfc_manage(hba, (void *)arg, mode);
6066 6066 break;
6067 6067
6068 6068 default:
6069 6069 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6070 6070 "ioctl: Invalid command received. cmd=%x", cmd);
6071 6071 rval = EINVAL;
6072 6072 }
6073 6073
6074 6074 done:
6075 6075 return (rval);
6076 6076
6077 6077 } /* emlxs_ioctl() */
6078 6078
6079 6079
6080 6080
6081 6081 /*
6082 6082 *
6083 6083 * Device Driver Common Routines
6084 6084 *
6085 6085 */
6086 6086
6087 6087 /* EMLXS_PM_LOCK must be held for this call */
6088 6088 static int
6089 6089 emlxs_hba_resume(dev_info_t *dip)
6090 6090 {
6091 6091 emlxs_hba_t *hba;
6092 6092 emlxs_port_t *port;
6093 6093 int ddiinst;
6094 6094
6095 6095 ddiinst = ddi_get_instance(dip);
6096 6096 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6097 6097 port = &PPORT;
6098 6098
6099 6099 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
6100 6100
6101 6101 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
6102 6102 return (DDI_SUCCESS);
6103 6103 }
6104 6104
6105 6105 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6106 6106
6107 6107 /* Re-enable the physical port on this HBA */
6108 6108 port->flag |= EMLXS_PORT_ENABLED;
6109 6109
6110 6110 /* Take the adapter online */
6111 6111 if (emlxs_power_up(hba)) {
6112 6112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
6113 6113 "Unable to take adapter online.");
6114 6114
6115 6115 hba->pm_state |= EMLXS_PM_SUSPENDED;
6116 6116
6117 6117 return (DDI_FAILURE);
6118 6118 }
6119 6119
6120 6120 return (DDI_SUCCESS);
6121 6121
6122 6122 } /* emlxs_hba_resume() */
6123 6123
6124 6124
6125 6125 /* EMLXS_PM_LOCK must be held for this call */
6126 6126 static int
6127 6127 emlxs_hba_suspend(dev_info_t *dip)
6128 6128 {
6129 6129 emlxs_hba_t *hba;
6130 6130 emlxs_port_t *port;
6131 6131 int ddiinst;
6132 6132
6133 6133 ddiinst = ddi_get_instance(dip);
6134 6134 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6135 6135 port = &PPORT;
6136 6136
6137 6137 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
6138 6138
6139 6139 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6140 6140 return (DDI_SUCCESS);
6141 6141 }
6142 6142
6143 6143 hba->pm_state |= EMLXS_PM_SUSPENDED;
6144 6144
6145 6145 /* Take the adapter offline */
6146 6146 if (emlxs_power_down(hba)) {
6147 6147 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6148 6148
6149 6149 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
6150 6150 "Unable to take adapter offline.");
6151 6151
6152 6152 return (DDI_FAILURE);
6153 6153 }
6154 6154
6155 6155 return (DDI_SUCCESS);
6156 6156
6157 6157 } /* emlxs_hba_suspend() */
6158 6158
6159 6159
6160 6160
6161 6161 static void
6162 6162 emlxs_lock_init(emlxs_hba_t *hba)
6163 6163 {
6164 6164 emlxs_port_t *port = &PPORT;
6165 6165 uint32_t i;
6166 6166
6167 6167 /* Initialize the power management */
6168 6168 mutex_init(&EMLXS_PM_LOCK, NULL, MUTEX_DRIVER,
6169 6169 DDI_INTR_PRI(hba->intr_arg));
6170 6170
6171 6171 mutex_init(&EMLXS_TIMER_LOCK, NULL, MUTEX_DRIVER,
6172 6172 DDI_INTR_PRI(hba->intr_arg));
6173 6173
6174 6174 cv_init(&hba->timer_lock_cv, NULL, CV_DRIVER, NULL);
6175 6175
6176 6176 mutex_init(&EMLXS_PORT_LOCK, NULL, MUTEX_DRIVER,
6177 6177 DDI_INTR_PRI(hba->intr_arg));
6178 6178
6179 6179 mutex_init(&EMLXS_MBOX_LOCK, NULL, MUTEX_DRIVER,
6180 6180 DDI_INTR_PRI(hba->intr_arg));
6181 6181
6182 6182 cv_init(&EMLXS_MBOX_CV, NULL, CV_DRIVER, NULL);
6183 6183
6184 6184 mutex_init(&EMLXS_LINKUP_LOCK, NULL, MUTEX_DRIVER,
6185 6185 DDI_INTR_PRI(hba->intr_arg));
6186 6186
6187 6187 cv_init(&EMLXS_LINKUP_CV, NULL, CV_DRIVER, NULL);
6188 6188
6189 6189 mutex_init(&EMLXS_TX_CHANNEL_LOCK, NULL, MUTEX_DRIVER,
6190 6190 DDI_INTR_PRI(hba->intr_arg));
6191 6191
6192 6192 for (i = 0; i < MAX_RINGS; i++) {
6193 6193 mutex_init(&EMLXS_CMD_RING_LOCK(i), NULL, MUTEX_DRIVER,
6194 6194 DDI_INTR_PRI(hba->intr_arg));
6195 6195 }
6196 6196
6197 6197
6198 6198 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6199 6199 mutex_init(&EMLXS_QUE_LOCK(i), NULL, MUTEX_DRIVER,
6200 6200 DDI_INTR_PRI(hba->intr_arg));
6201 6201 }
6202 6202
6203 6203 mutex_init(&EMLXS_MSIID_LOCK, NULL, MUTEX_DRIVER,
6204 6204 DDI_INTR_PRI(hba->intr_arg));
6205 6205
6206 6206 mutex_init(&EMLXS_FCTAB_LOCK, NULL, MUTEX_DRIVER,
6207 6207 DDI_INTR_PRI(hba->intr_arg));
6208 6208
6209 6209 mutex_init(&EMLXS_MEMGET_LOCK, NULL, MUTEX_DRIVER,
6210 6210 DDI_INTR_PRI(hba->intr_arg));
6211 6211
6212 6212 mutex_init(&EMLXS_MEMPUT_LOCK, NULL, MUTEX_DRIVER,
6213 6213 DDI_INTR_PRI(hba->intr_arg));
6214 6214
6215 6215 mutex_init(&EMLXS_IOCTL_LOCK, NULL, MUTEX_DRIVER,
6216 6216 DDI_INTR_PRI(hba->intr_arg));
6217 6217
6218 6218 #ifdef DUMP_SUPPORT
6219 6219 mutex_init(&EMLXS_DUMP_LOCK, NULL, MUTEX_DRIVER,
6220 6220 DDI_INTR_PRI(hba->intr_arg));
6221 6221 #endif /* DUMP_SUPPORT */
6222 6222
6223 6223 mutex_init(&EMLXS_SPAWN_LOCK, NULL, MUTEX_DRIVER,
6224 6224 DDI_INTR_PRI(hba->intr_arg));
6225 6225
6226 6226 /* Create per port locks */
6227 6227 for (i = 0; i < MAX_VPORTS; i++) {
6228 6228 port = &VPORT(i);
6229 6229
6230 6230 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
6231 6231
6232 6232 if (i == 0) {
6233 6233 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6234 6234 DDI_INTR_PRI(hba->intr_arg));
6235 6235
6236 6236 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6237 6237
6238 6238 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6239 6239 DDI_INTR_PRI(hba->intr_arg));
6240 6240 } else {
6241 6241 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6242 6242 DDI_INTR_PRI(hba->intr_arg));
6243 6243
6244 6244 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6245 6245
6246 6246 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6247 6247 DDI_INTR_PRI(hba->intr_arg));
6248 6248 }
6249 6249 }
6250 6250
6251 6251 return;
6252 6252
6253 6253 } /* emlxs_lock_init() */
6254 6254
6255 6255
6256 6256
6257 6257 static void
6258 6258 emlxs_lock_destroy(emlxs_hba_t *hba)
6259 6259 {
6260 6260 emlxs_port_t *port = &PPORT;
6261 6261 uint32_t i;
6262 6262
6263 6263 mutex_destroy(&EMLXS_TIMER_LOCK);
6264 6264 cv_destroy(&hba->timer_lock_cv);
6265 6265
6266 6266 mutex_destroy(&EMLXS_PORT_LOCK);
6267 6267
6268 6268 cv_destroy(&EMLXS_MBOX_CV);
6269 6269 cv_destroy(&EMLXS_LINKUP_CV);
6270 6270
6271 6271 mutex_destroy(&EMLXS_LINKUP_LOCK);
6272 6272 mutex_destroy(&EMLXS_MBOX_LOCK);
6273 6273
6274 6274 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
6275 6275
6276 6276 for (i = 0; i < MAX_RINGS; i++) {
6277 6277 mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
6278 6278 }
6279 6279
6280 6280 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6281 6281 mutex_destroy(&EMLXS_QUE_LOCK(i));
6282 6282 }
6283 6283
6284 6284 mutex_destroy(&EMLXS_MSIID_LOCK);
6285 6285
6286 6286 mutex_destroy(&EMLXS_FCTAB_LOCK);
6287 6287 mutex_destroy(&EMLXS_MEMGET_LOCK);
6288 6288 mutex_destroy(&EMLXS_MEMPUT_LOCK);
6289 6289 mutex_destroy(&EMLXS_IOCTL_LOCK);
6290 6290 mutex_destroy(&EMLXS_SPAWN_LOCK);
6291 6291 mutex_destroy(&EMLXS_PM_LOCK);
6292 6292
6293 6293 #ifdef DUMP_SUPPORT
6294 6294 mutex_destroy(&EMLXS_DUMP_LOCK);
6295 6295 #endif /* DUMP_SUPPORT */
6296 6296
6297 6297 /* Destroy per port locks */
6298 6298 for (i = 0; i < MAX_VPORTS; i++) {
6299 6299 port = &VPORT(i);
6300 6300 rw_destroy(&port->node_rwlock);
6301 6301 mutex_destroy(&EMLXS_PKT_LOCK);
6302 6302 cv_destroy(&EMLXS_PKT_CV);
6303 6303 mutex_destroy(&EMLXS_UB_LOCK);
6304 6304 }
6305 6305
6306 6306 return;
6307 6307
6308 6308 } /* emlxs_lock_destroy() */
6309 6309
6310 6310
6311 6311 /* init_flag values */
6312 6312 #define ATTACH_SOFT_STATE 0x00000001
6313 6313 #define ATTACH_FCA_TRAN 0x00000002
6314 6314 #define ATTACH_HBA 0x00000004
6315 6315 #define ATTACH_LOG 0x00000008
6316 6316 #define ATTACH_MAP_BUS 0x00000010
6317 6317 #define ATTACH_INTR_INIT 0x00000020
6318 6318 #define ATTACH_PROP 0x00000040
6319 6319 #define ATTACH_LOCK 0x00000080
6320 6320 #define ATTACH_THREAD 0x00000100
6321 6321 #define ATTACH_INTR_ADD 0x00000200
6322 6322 #define ATTACH_ONLINE 0x00000400
6323 6323 #define ATTACH_NODE 0x00000800
6324 6324 #define ATTACH_FCT 0x00001000
6325 6325 #define ATTACH_FCA 0x00002000
6326 6326 #define ATTACH_KSTAT 0x00004000
6327 6327 #define ATTACH_DHCHAP 0x00008000
6328 6328 #define ATTACH_FM 0x00010000
6329 6329 #define ATTACH_MAP_SLI 0x00020000
6330 6330 #define ATTACH_SPAWN 0x00040000
6331 6331 #define ATTACH_EVENTS 0x00080000
6332 6332
6333 6333 static void
6334 6334 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
6335 6335 {
6336 6336 emlxs_hba_t *hba = NULL;
6337 6337 int ddiinst;
6338 6338
6339 6339 ddiinst = ddi_get_instance(dip);
6340 6340
6341 6341 if (init_flag & ATTACH_HBA) {
6342 6342 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6343 6343
6344 6344 if (init_flag & ATTACH_SPAWN) {
6345 6345 emlxs_thread_spawn_destroy(hba);
6346 6346 }
6347 6347
6348 6348 if (init_flag & ATTACH_EVENTS) {
6349 6349 (void) emlxs_event_queue_destroy(hba);
6350 6350 }
6351 6351
6352 6352 if (init_flag & ATTACH_ONLINE) {
6353 6353 (void) emlxs_offline(hba, 1);
6354 6354 }
6355 6355
6356 6356 if (init_flag & ATTACH_INTR_ADD) {
6357 6357 (void) EMLXS_INTR_REMOVE(hba);
6358 6358 }
6359 6359 #ifdef SFCT_SUPPORT
6360 6360 if (init_flag & ATTACH_FCT) {
6361 6361 emlxs_fct_detach(hba);
6362 6362 emlxs_fct_modclose();
6363 6363 }
6364 6364 #endif /* SFCT_SUPPORT */
6365 6365
6366 6366 #ifdef DHCHAP_SUPPORT
6367 6367 if (init_flag & ATTACH_DHCHAP) {
6368 6368 emlxs_dhc_detach(hba);
6369 6369 }
6370 6370 #endif /* DHCHAP_SUPPORT */
6371 6371
6372 6372 if (init_flag & ATTACH_KSTAT) {
6373 6373 kstat_delete(hba->kstat);
6374 6374 }
6375 6375
6376 6376 if (init_flag & ATTACH_FCA) {
6377 6377 emlxs_fca_detach(hba);
6378 6378 }
6379 6379
6380 6380 if (init_flag & ATTACH_NODE) {
6381 6381 (void) ddi_remove_minor_node(hba->dip, "devctl");
6382 6382 }
6383 6383
6384 6384 if (init_flag & ATTACH_THREAD) {
6385 6385 emlxs_thread_destroy(&hba->iodone_thread);
6386 6386 }
6387 6387
6388 6388 if (init_flag & ATTACH_PROP) {
6389 6389 (void) ddi_prop_remove_all(hba->dip);
6390 6390 }
6391 6391
6392 6392 if (init_flag & ATTACH_LOCK) {
6393 6393 emlxs_lock_destroy(hba);
6394 6394 }
6395 6395
6396 6396 if (init_flag & ATTACH_INTR_INIT) {
6397 6397 (void) EMLXS_INTR_UNINIT(hba);
6398 6398 }
6399 6399
6400 6400 if (init_flag & ATTACH_MAP_BUS) {
6401 6401 emlxs_unmap_bus(hba);
6402 6402 }
6403 6403
6404 6404 if (init_flag & ATTACH_MAP_SLI) {
6405 6405 EMLXS_SLI_UNMAP_HDW(hba);
6406 6406 }
6407 6407
6408 6408 #ifdef FMA_SUPPORT
6409 6409 if (init_flag & ATTACH_FM) {
6410 6410 emlxs_fm_fini(hba);
6411 6411 }
6412 6412 #endif /* FMA_SUPPORT */
6413 6413
6414 6414 if (init_flag & ATTACH_LOG) {
6415 6415 emlxs_msg_log_destroy(hba);
6416 6416 }
6417 6417
6418 6418 if (init_flag & ATTACH_FCA_TRAN) {
6419 6419 (void) ddi_set_driver_private(hba->dip, NULL);
6420 6420 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
6421 6421 hba->fca_tran = NULL;
6422 6422 }
6423 6423
6424 6424 if (init_flag & ATTACH_HBA) {
6425 6425 emlxs_device.log[hba->emlxinst] = 0;
6426 6426 emlxs_device.hba[hba->emlxinst] =
6427 6427 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
6428 6428 #ifdef DUMP_SUPPORT
6429 6429 emlxs_device.dump_txtfile[hba->emlxinst] = 0;
6430 6430 emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
6431 6431 emlxs_device.dump_ceefile[hba->emlxinst] = 0;
6432 6432 #endif /* DUMP_SUPPORT */
6433 6433
6434 6434 }
6435 6435 }
6436 6436
6437 6437 if (init_flag & ATTACH_SOFT_STATE) {
6438 6438 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
6439 6439 }
6440 6440
6441 6441 return;
6442 6442
6443 6443 } /* emlxs_driver_remove() */
6444 6444
6445 6445
6446 6446 /* This determines which ports will be initiator mode */
6447 6447 static uint32_t
6448 6448 emlxs_fca_init(emlxs_hba_t *hba)
6449 6449 {
6450 6450 emlxs_port_t *port = &PPORT;
6451 6451
6452 6452 /* Check if SFS present */
6453 6453 if (((void *)MODSYM(fc_fca_init) == NULL) ||
6454 6454 ((void *)MODSYM(fc_fca_attach) == NULL)) {
6455 6455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6456 6456 "SFS not present.");
6457 6457 return (1);
6458 6458 }
6459 6459
6460 6460 /* Check if our SFS driver interface matches the current SFS stack */
6461 6461 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
6462 6462 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6463 6463 "SFS/FCA version mismatch. FCA=0x%x",
6464 6464 hba->fca_tran->fca_version);
6465 6465 return (1);
6466 6466 }
6467 6467
6468 6468 return (0);
6469 6469
6470 6470 } /* emlxs_fca_init() */
6471 6471
6472 6472
6473 6473 /* This determines which ports will be initiator or target mode */
6474 6474 static void
6475 6475 emlxs_mode_init(emlxs_hba_t *hba)
6476 6476 {
6477 6477 emlxs_port_t *port = &PPORT;
6478 6478 emlxs_config_t *cfg = &CFG;
6479 6479 emlxs_port_t *vport;
6480 6480 uint32_t i;
6481 6481 uint32_t mode_mask;
6482 6482
6483 6483 /* Initialize mode masks */
6484 6484 (void) emlxs_mode_init_masks(hba);
6485 6485
6486 6486 if (!(port->mode_mask & MODE_INITIATOR)) {
6487 6487 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6488 6488 "Initiator mode not enabled.");
6489 6489
6490 6490 #ifdef SFCT_SUPPORT
6491 6491 /* Disable dynamic target mode */
6492 6492 cfg[CFG_DTM_ENABLE].current = 0;
6493 6493 #endif /* SFCT_SUPPORT */
6494 6494
6495 6495 goto done1;
6496 6496 }
6497 6497
6498 6498 /* Try to initialize fca interface */
6499 6499 if (emlxs_fca_init(hba) != 0) {
6500 6500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6501 6501 "Initiator mode disabled.");
6502 6502
6503 6503 /* Disable initiator mode */
6504 6504 port->mode_mask &= ~MODE_INITIATOR;
6505 6505
6506 6506 #ifdef SFCT_SUPPORT
6507 6507 /* Disable dynamic target mode */
6508 6508 cfg[CFG_DTM_ENABLE].current = 0;
6509 6509 #endif /* SFCT_SUPPORT */
6510 6510
6511 6511 goto done1;
6512 6512 }
6513 6513
6514 6514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6515 6515 "Initiator mode enabled.");
6516 6516
6517 6517 done1:
6518 6518
6519 6519 #ifdef SFCT_SUPPORT
6520 6520 if (!(port->mode_mask & MODE_TARGET)) {
6521 6521 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6522 6522 "Target mode not enabled.");
6523 6523
6524 6524 /* Disable target modes */
6525 6525 cfg[CFG_DTM_ENABLE].current = 0;
6526 6526 cfg[CFG_TARGET_MODE].current = 0;
6527 6527
6528 6528 goto done2;
6529 6529 }
6530 6530
6531 6531 /* Try to open the COMSTAR module */
6532 6532 if (emlxs_fct_modopen() != 0) {
6533 6533 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6534 6534 "Target mode disabled.");
6535 6535
6536 6536 /* Disable target modes */
6537 6537 port->mode_mask &= ~MODE_TARGET;
6538 6538 cfg[CFG_DTM_ENABLE].current = 0;
6539 6539 cfg[CFG_TARGET_MODE].current = 0;
6540 6540
6541 6541 goto done2;
6542 6542 }
6543 6543
6544 6544 /* Try to initialize fct interface */
6545 6545 if (emlxs_fct_init(hba) != 0) {
6546 6546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6547 6547 "Target mode disabled.");
6548 6548
6549 6549 /* Disable target modes */
6550 6550 port->mode_mask &= ~MODE_TARGET;
6551 6551 cfg[CFG_DTM_ENABLE].current = 0;
6552 6552 cfg[CFG_TARGET_MODE].current = 0;
6553 6553
6554 6554 goto done2;
6555 6555 }
6556 6556
6557 6557 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6558 6558 "Target mode enabled.");
6559 6559
6560 6560 done2:
6561 6561 /* Adjust target mode parameter flags */
6562 6562 if (cfg[CFG_DTM_ENABLE].current) {
6563 6563 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6564 6564 "Dynamic target mode enabled.");
6565 6565
6566 6566 cfg[CFG_TARGET_MODE].flags |= PARM_DYNAMIC;
6567 6567 } else {
6568 6568 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6569 6569 "Dynamic target mode disabled.");
6570 6570
6571 6571 cfg[CFG_TARGET_MODE].flags &= ~PARM_DYNAMIC;
6572 6572 }
6573 6573 #endif /* SFCT_SUPPORT */
6574 6574
6575 6575 /* Now set port flags */
6576 6576 mutex_enter(&EMLXS_PORT_LOCK);
6577 6577
6578 6578 /* Set flags for physical port */
6579 6579 if (port->mode_mask & MODE_INITIATOR) {
6580 6580 port->flag |= EMLXS_INI_ENABLED;
6581 6581 } else {
6582 6582 port->flag &= ~EMLXS_INI_ENABLED;
6583 6583 }
6584 6584
6585 6585 if (port->mode_mask & MODE_TARGET) {
6586 6586 port->flag |= EMLXS_TGT_ENABLED;
6587 6587 } else {
6588 6588 port->flag &= ~EMLXS_TGT_ENABLED;
6589 6589 }
6590 6590
6591 6591 for (i = 1; i < MAX_VPORTS; i++) {
6592 6592 vport = &VPORT(i);
6593 6593
6594 6594 /* Physical port mask has only allowable bits */
6595 6595 mode_mask = vport->mode_mask & port->mode_mask;
6596 6596
6597 6597 /* Set flags for physical port */
6598 6598 if (mode_mask & MODE_INITIATOR) {
6599 6599 vport->flag |= EMLXS_INI_ENABLED;
6600 6600 } else {
6601 6601 vport->flag &= ~EMLXS_INI_ENABLED;
6602 6602 }
6603 6603
6604 6604 if (mode_mask & MODE_TARGET) {
6605 6605 vport->flag |= EMLXS_TGT_ENABLED;
6606 6606 } else {
6607 6607 vport->flag &= ~EMLXS_TGT_ENABLED;
6608 6608 }
6609 6609 }
6610 6610
6611 6611 /* Set initial driver mode */
6612 6612 emlxs_mode_set(hba);
6613 6613
6614 6614 mutex_exit(&EMLXS_PORT_LOCK);
6615 6615
6616 6616 /* Recheck possible mode dependent parameters */
6617 6617 /* in case conditions have changed. */
6618 6618 if (port->mode != MODE_NONE) {
6619 6619 for (i = 0; i < NUM_CFG_PARAM; i++) {
6620 6620 cfg = &hba->config[i];
6621 6621 cfg->current = emlxs_check_parm(hba, i, cfg->current);
6622 6622 }
6623 6623 }
6624 6624
6625 6625 return;
6626 6626
6627 6627 } /* emlxs_mode_init() */
6628 6628
6629 6629
6630 6630 /* This must be called while holding the EMLXS_PORT_LOCK */
6631 6631 extern void
6632 6632 emlxs_mode_set(emlxs_hba_t *hba)
6633 6633 {
6634 6634 emlxs_port_t *port = &PPORT;
6635 6635 #ifdef SFCT_SUPPORT
6636 6636 emlxs_config_t *cfg = &CFG;
6637 6637 #endif /* SFCT_SUPPORT */
6638 6638 emlxs_port_t *vport;
6639 6639 uint32_t i;
6640 6640 uint32_t cfg_tgt_mode = 0;
6641 6641
6642 6642 /* mutex_enter(&EMLXS_PORT_LOCK); */
6643 6643
6644 6644 #ifdef SFCT_SUPPORT
6645 6645 cfg_tgt_mode = cfg[CFG_TARGET_MODE].current;
6646 6646 #endif /* SFCT_SUPPORT */
6647 6647
6648 6648 /* Initiator mode requested */
6649 6649 if (!cfg_tgt_mode) {
6650 6650 for (i = 0; i < MAX_VPORTS; i++) {
6651 6651 vport = &VPORT(i);
6652 6652 vport->mode = (vport->flag & EMLXS_INI_ENABLED)?
6653 6653 MODE_INITIATOR:MODE_NONE;
6654 6654 }
6655 6655 #ifdef SFCT_SUPPORT
6656 6656 /* Target mode requested */
6657 6657 } else {
6658 6658 for (i = 0; i < MAX_VPORTS; i++) {
6659 6659 vport = &VPORT(i);
6660 6660 vport->mode = (vport->flag & EMLXS_TGT_ENABLED)?
6661 6661 MODE_TARGET:MODE_NONE;
6662 6662 }
6663 6663 #endif /* SFCT_SUPPORT */
6664 6664 }
6665 6665
6666 6666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6667 6667 "MODE: %s", emlxs_mode_xlate(port->mode));
6668 6668
6669 6669 /* mutex_exit(&EMLXS_PORT_LOCK); */
6670 6670
6671 6671 return;
6672 6672
6673 6673 } /* emlxs_mode_set() */
6674 6674
6675 6675
6676 6676 static void
6677 6677 emlxs_mode_init_masks(emlxs_hba_t *hba)
6678 6678 {
6679 6679 emlxs_port_t *port = &PPORT;
6680 6680 emlxs_port_t *vport;
6681 6681 uint32_t i;
6682 6682
6683 6683 #ifdef SFCT_SUPPORT
6684 6684 emlxs_config_t *cfg = &CFG;
6685 6685 uint32_t vport_mode_mask;
6686 6686 uint32_t cfg_vport_mode_mask;
6687 6687 uint32_t mode_mask;
6688 6688 char string[256];
6689 6689
6690 6690 port->mode_mask = 0;
6691 6691
6692 6692 if (!cfg[CFG_TARGET_MODE].current ||
6693 6693 cfg[CFG_DTM_ENABLE].current) {
6694 6694 port->mode_mask |= MODE_INITIATOR;
6695 6695 }
6696 6696
6697 6697 if (cfg[CFG_TARGET_MODE].current ||
6698 6698 cfg[CFG_DTM_ENABLE].current) {
6699 6699 port->mode_mask |= MODE_TARGET;
6700 6700 }
6701 6701
6702 6702 /* Physical port mask has only allowable bits */
6703 6703 vport_mode_mask = port->mode_mask;
6704 6704 cfg_vport_mode_mask = cfg[CFG_VPORT_MODE_MASK].current;
6705 6705
6706 6706 /* Check dynamic target mode value for virtual ports */
6707 6707 if (cfg[CFG_DTM_ENABLE].current == 0) {
6708 6708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6709 6709 "%s = 0: Virtual target ports are not supported.",
6710 6710 cfg[CFG_DTM_ENABLE].string);
6711 6711
6712 6712 vport_mode_mask &= ~MODE_TARGET;
6713 6713 }
6714 6714
6715 6715 cfg_vport_mode_mask &= vport_mode_mask;
6716 6716
6717 6717 if (cfg[CFG_VPORT_MODE_MASK].current != cfg_vport_mode_mask) {
6718 6718 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6719 6719 "%s: Changing 0x%x --> 0x%x",
6720 6720 cfg[CFG_VPORT_MODE_MASK].string,
6721 6721 cfg[CFG_VPORT_MODE_MASK].current,
6722 6722 cfg_vport_mode_mask);
6723 6723
6724 6724 cfg[CFG_VPORT_MODE_MASK].current = cfg_vport_mode_mask;
6725 6725 }
6726 6726
6727 6727 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6728 6728 "pport-mode-mask: %s", emlxs_mode_xlate(port->mode_mask));
6729 6729
6730 6730 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6731 6731 "vport-mode-mask: %s", emlxs_mode_xlate(cfg_vport_mode_mask));
6732 6732
6733 6733 for (i = 1; i < MAX_VPORTS; i++) {
6734 6734 vport = &VPORT(i);
6735 6735
6736 6736 (void) snprintf(string, sizeof (string),
6737 6737 "%s%d-vport%d-mode-mask", DRIVER_NAME, hba->ddiinst, i);
6738 6738
6739 6739 mode_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6740 6740 (void *)hba->dip, DDI_PROP_DONTPASS, string,
6741 6741 cfg_vport_mode_mask);
6742 6742
6743 6743 vport->mode_mask = mode_mask & vport_mode_mask;
6744 6744
6745 6745 if (vport->mode_mask != cfg_vport_mode_mask) {
6746 6746 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6747 6747 "vport%d-mode-mask: %s",
6748 6748 i, emlxs_mode_xlate(vport->mode_mask));
6749 6749 }
6750 6750 }
6751 6751 #else
6752 6752 port->mode_mask = MODE_INITIATOR;
6753 6753 for (i = 1; i < MAX_VPORTS; i++) {
6754 6754 vport = &VPORT(i);
6755 6755 vport->mode_mask = MODE_INITIATOR;
6756 6756 }
6757 6757 #endif /* SFCT_SUPPORT */
6758 6758
6759 6759 return;
6760 6760
6761 6761 } /* emlxs_mode_init_masks() */
6762 6762
6763 6763
6764 6764 static void
6765 6765 emlxs_fca_attach(emlxs_hba_t *hba)
6766 6766 {
6767 6767 emlxs_port_t *port;
6768 6768 uint32_t i;
6769 6769
6770 6770 /* Update our transport structure */
6771 6771 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
6772 6772 hba->fca_tran->fca_cmd_max = hba->io_throttle;
6773 6773
6774 6774 for (i = 0; i < MAX_VPORTS; i++) {
6775 6775 port = &VPORT(i);
6776 6776 port->ub_count = EMLXS_UB_TOKEN_OFFSET;
6777 6777 port->ub_pool = NULL;
6778 6778 }
6779 6779
6780 6780 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6781 6781 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6782 6782 sizeof (NAME_TYPE));
6783 6783 #endif /* >= EMLXS_MODREV5 */
6784 6784
6785 6785 return;
6786 6786
6787 6787 } /* emlxs_fca_attach() */
6788 6788
6789 6789
6790 6790 static void
6791 6791 emlxs_fca_detach(emlxs_hba_t *hba)
6792 6792 {
6793 6793 emlxs_port_t *port = &PPORT;
6794 6794 uint32_t i;
6795 6795 emlxs_port_t *vport;
6796 6796
6797 6797 if (!(port->flag & EMLXS_INI_ENABLED)) {
6798 6798 return;
6799 6799 }
6800 6800
6801 6801 if ((void *)MODSYM(fc_fca_detach) != NULL) {
6802 6802 MODSYM(fc_fca_detach)(hba->dip);
6803 6803 }
6804 6804
6805 6805 /* Disable INI mode for all ports */
6806 6806 for (i = 0; i < MAX_VPORTS; i++) {
6807 6807 vport = &VPORT(i);
6808 6808 vport->flag &= ~EMLXS_INI_ENABLED;
6809 6809 }
6810 6810
6811 6811 return;
6812 6812
6813 6813 } /* emlxs_fca_detach() */
6814 6814
6815 6815
6816 6816 static void
6817 6817 emlxs_drv_banner(emlxs_hba_t *hba)
6818 6818 {
6819 6819 emlxs_port_t *port = &PPORT;
6820 6820 uint32_t i;
6821 6821 char sli_mode[16];
6822 6822 char msi_mode[16];
6823 6823 char npiv_mode[16];
6824 6824 emlxs_vpd_t *vpd = &VPD;
6825 6825 uint8_t *wwpn;
6826 6826 uint8_t *wwnn;
6827 6827 uint32_t fw_show = 0;
6828 6828
6829 6829 /* Display firmware library one time for all driver instances */
6830 6830 mutex_enter(&emlxs_device.lock);
6831 6831 if (!(emlxs_instance_flag & EMLXS_FW_SHOW)) {
6832 6832 emlxs_instance_flag |= EMLXS_FW_SHOW;
6833 6833 fw_show = 1;
6834 6834 }
6835 6835 mutex_exit(&emlxs_device.lock);
6836 6836
6837 6837 if (fw_show) {
6838 6838 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s",
6839 6839 emlxs_copyright);
6840 6840 emlxs_fw_show(hba);
6841 6841 }
6842 6842
6843 6843 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6844 6844 emlxs_revision);
6845 6845
6846 6846 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6847 6847 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6848 6848 hba->model_info.device_id, hba->model_info.ssdid,
6849 6849 hba->model_info.id);
6850 6850
6851 6851 #ifdef EMLXS_I386
6852 6852
6853 6853 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6854 6854 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6855 6855 vpd->boot_version);
6856 6856
6857 6857 #else /* EMLXS_SPARC */
6858 6858
6859 6859 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6860 6860 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6861 6861 vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6862 6862
6863 6863 #endif /* EMLXS_I386 */
6864 6864
6865 6865 if (hba->sli_mode > 3) {
6866 6866 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d(%s)",
6867 6867 hba->sli_mode,
6868 6868 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6869 6869 } else {
6870 6870 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d",
6871 6871 hba->sli_mode);
6872 6872 }
6873 6873
6874 6874 (void) strlcpy(msi_mode, " INTX:1", sizeof (msi_mode));
6875 6875
6876 6876 #ifdef MSI_SUPPORT
6877 6877 if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6878 6878 switch (hba->intr_type) {
6879 6879 case DDI_INTR_TYPE_FIXED:
6880 6880 (void) strlcpy(msi_mode, " MSI:0", sizeof (msi_mode));
6881 6881 break;
6882 6882
6883 6883 case DDI_INTR_TYPE_MSI:
6884 6884 (void) snprintf(msi_mode, sizeof (msi_mode), " MSI:%d",
6885 6885 hba->intr_count);
6886 6886 break;
6887 6887
6888 6888 case DDI_INTR_TYPE_MSIX:
6889 6889 (void) snprintf(msi_mode, sizeof (msi_mode), " MSIX:%d",
6890 6890 hba->intr_count);
6891 6891 break;
6892 6892 }
6893 6893 }
6894 6894 #endif /* MSI_SUPPORT */
6895 6895
6896 6896 (void) strlcpy(npiv_mode, "", sizeof (npiv_mode));
6897 6897
6898 6898 if (hba->flag & FC_NPIV_ENABLED) {
6899 6899 (void) snprintf(npiv_mode, sizeof (npiv_mode), " NPIV:%d",
6900 6900 hba->vpi_max+1);
6901 6901 } else {
6902 6902 (void) strlcpy(npiv_mode, " NPIV:0", sizeof (npiv_mode));
6903 6903 }
6904 6904
6905 6905 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
6906 6906 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s%s",
6907 6907 sli_mode, msi_mode, npiv_mode,
6908 6908 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6909 6909 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""),
6910 6910 ((SLI4_FCOE_MODE)? " FCoE":" FC"));
6911 6911 } else {
6912 6912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6913 6913 sli_mode, msi_mode, npiv_mode,
6914 6914 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6915 6915 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""));
6916 6916 }
6917 6917
6918 6918 wwpn = (uint8_t *)&hba->wwpn;
6919 6919 wwnn = (uint8_t *)&hba->wwnn;
6920 6920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6921 6921 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6922 6922 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6923 6923 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6924 6924 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6925 6925 wwnn[6], wwnn[7]);
6926 6926
6927 6927 for (i = 0; i < MAX_VPORTS; i++) {
6928 6928 port = &VPORT(i);
6929 6929
6930 6930 if (!(port->flag & EMLXS_PORT_CONFIG)) {
6931 6931 continue;
6932 6932 }
6933 6933
6934 6934 wwpn = (uint8_t *)&port->wwpn;
6935 6935 wwnn = (uint8_t *)&port->wwnn;
6936 6936
6937 6937 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6938 6938 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6939 6939 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6940 6940 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6941 6941 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6942 6942 wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6943 6943 }
6944 6944
6945 6945 /*
6946 6946 * Announce the device: ddi_report_dev() prints a banner at boot time,
6947 6947 * announcing the device pointed to by dip.
6948 6948 */
6949 6949 (void) ddi_report_dev(hba->dip);
6950 6950
6951 6951 return;
6952 6952
6953 6953 } /* emlxs_drv_banner() */
6954 6954
6955 6955
6956 6956 extern void
6957 6957 emlxs_get_fcode_version(emlxs_hba_t *hba)
6958 6958 {
6959 6959 emlxs_vpd_t *vpd = &VPD;
6960 6960 char *prop_str;
6961 6961 int status;
6962 6962
6963 6963 /* Setup fcode version property */
6964 6964 prop_str = NULL;
6965 6965 status =
6966 6966 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6967 6967 "fcode-version", (char **)&prop_str);
6968 6968
6969 6969 if (status == DDI_PROP_SUCCESS) {
6970 6970 bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6971 6971 (void) ddi_prop_free((void *)prop_str);
6972 6972 } else {
6973 6973 (void) strncpy(vpd->fcode_version, "none",
6974 6974 (sizeof (vpd->fcode_version)-1));
6975 6975 }
6976 6976
6977 6977 return;
6978 6978
6979 6979 } /* emlxs_get_fcode_version() */
6980 6980
6981 6981
6982 6982 static int
6983 6983 emlxs_hba_attach(dev_info_t *dip)
6984 6984 {
6985 6985 emlxs_hba_t *hba;
6986 6986 emlxs_port_t *port;
6987 6987 emlxs_config_t *cfg;
6988 6988 char *prop_str;
6989 6989 int ddiinst;
6990 6990 int32_t emlxinst;
6991 6991 int status;
6992 6992 uint32_t rval;
6993 6993 uint32_t init_flag = 0;
6994 6994 char local_pm_components[32];
6995 6995 uint32_t i;
6996 6996
6997 6997 ddiinst = ddi_get_instance(dip);
6998 6998 emlxinst = emlxs_add_instance(ddiinst);
6999 6999
7000 7000 if (emlxinst >= MAX_FC_BRDS) {
7001 7001 cmn_err(CE_WARN,
7002 7002 "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
7003 7003 "inst=%x", DRIVER_NAME, ddiinst);
7004 7004 return (DDI_FAILURE);
7005 7005 }
7006 7006
7007 7007 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
7008 7008 return (DDI_FAILURE);
7009 7009 }
7010 7010
7011 7011 if (emlxs_device.hba[emlxinst]) {
7012 7012 return (DDI_SUCCESS);
7013 7013 }
7014 7014
7015 7015 /* An adapter can accidentally be plugged into a slave-only PCI slot */
7016 7016 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
7017 7017 cmn_err(CE_WARN,
7018 7018 "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
7019 7019 DRIVER_NAME, ddiinst);
7020 7020 return (DDI_FAILURE);
7021 7021 }
7022 7022
7023 7023 /* Allocate emlxs_dev_ctl structure. */
7024 7024 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
7025 7025 cmn_err(CE_WARN,
7026 7026 "?%s%d: fca_hba_attach failed. Unable to allocate soft "
7027 7027 "state.", DRIVER_NAME, ddiinst);
7028 7028 return (DDI_FAILURE);
7029 7029 }
7030 7030 init_flag |= ATTACH_SOFT_STATE;
7031 7031
7032 7032 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
7033 7033 ddiinst)) == NULL) {
7034 7034 cmn_err(CE_WARN,
7035 7035 "?%s%d: fca_hba_attach failed. Unable to get soft state.",
7036 7036 DRIVER_NAME, ddiinst);
7037 7037 goto failed;
7038 7038 }
7039 7039 bzero((char *)hba, sizeof (emlxs_hba_t));
7040 7040
7041 7041 emlxs_device.hba[emlxinst] = hba;
7042 7042 emlxs_device.log[emlxinst] = &hba->log;
7043 7043
7044 7044 #ifdef DUMP_SUPPORT
7045 7045 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
7046 7046 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
7047 7047 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
7048 7048 #endif /* DUMP_SUPPORT */
7049 7049
7050 7050 hba->dip = dip;
7051 7051 hba->emlxinst = emlxinst;
7052 7052 hba->ddiinst = ddiinst;
7053 7053
7054 7054 init_flag |= ATTACH_HBA;
7055 7055
7056 7056 /* Enable the physical port on this HBA */
7057 7057 port = &PPORT;
7058 7058 port->hba = hba;
7059 7059 port->vpi = 0;
7060 7060 port->flag |= EMLXS_PORT_ENABLED;
7061 7061
7062 7062 /* Allocate a transport structure */
7063 7063 hba->fca_tran =
7064 7064 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
7065 7065 if (hba->fca_tran == NULL) {
7066 7066 cmn_err(CE_WARN,
7067 7067 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
7068 7068 "memory.", DRIVER_NAME, ddiinst);
7069 7069 goto failed;
7070 7070 }
7071 7071 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
7072 7072 sizeof (fc_fca_tran_t));
7073 7073
7074 7074 /*
7075 7075 * Copy the global ddi_dma_attr to the local hba fields
7076 7076 */
7077 7077 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
7078 7078 sizeof (ddi_dma_attr_t));
7079 7079 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
7080 7080 sizeof (ddi_dma_attr_t));
7081 7081 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
7082 7082 sizeof (ddi_dma_attr_t));
7083 7083 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
7084 7084 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
7085 7085
7086 7086 /* Reset the fca_tran dma_attr fields to the per-hba copies */
7087 7087 hba->fca_tran->fca_dma_attr = &hba->dma_attr;
7088 7088 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
7089 7089 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
7090 7090 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
7091 7091 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
7092 7092 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
7093 7093 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
7094 7094 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
7095 7095
7096 7096 /* Set the transport structure pointer in our dip */
7097 7097 /* SFS may panic if we are in target only mode */
7098 7098 /* We will update the transport structure later */
7099 7099 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
7100 7100 init_flag |= ATTACH_FCA_TRAN;
7101 7101
7102 7102 /* Perform driver integrity check */
7103 7103 rval = emlxs_integrity_check(hba);
7104 7104 if (rval) {
7105 7105 cmn_err(CE_WARN,
7106 7106 "?%s%d: fca_hba_attach failed. Driver integrity check "
7107 7107 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
7108 7108 goto failed;
7109 7109 }
7110 7110
7111 7111 cfg = &CFG;
7112 7112
7113 7113 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
7114 7114 #ifdef MSI_SUPPORT
7115 7115 if ((void *)&ddi_intr_get_supported_types != NULL) {
7116 7116 hba->intr_flags |= EMLXS_MSI_ENABLED;
7117 7117 }
7118 7118 #endif /* MSI_SUPPORT */
7119 7119
7120 7120
7121 7121 /* Create the msg log file */
7122 7122 if (emlxs_msg_log_create(hba) == 0) {
7123 7123 cmn_err(CE_WARN,
7124 7124 "?%s%d: fca_hba_attach failed. Unable to create message "
7125 7125 "log", DRIVER_NAME, ddiinst);
7126 7126 goto failed;
7127 7127
7128 7128 }
7129 7129 init_flag |= ATTACH_LOG;
7130 7130
7131 7131 /* We can begin to use EMLXS_MSGF from this point on */
7132 7132
7133 7133 /*
7134 7134 * Find the I/O bus type If it is not a SBUS card,
7135 7135 * then it is a PCI card. Default is PCI_FC (0).
7136 7136 */
7137 7137 prop_str = NULL;
7138 7138 status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
7139 7139 (dev_info_t *)dip, 0, "name", (char **)&prop_str);
7140 7140
7141 7141 if (status == DDI_PROP_SUCCESS) {
7142 7142 if (strncmp(prop_str, "lpfs", 4) == 0) {
7143 7143 hba->bus_type = SBUS_FC;
7144 7144 }
7145 7145
7146 7146 (void) ddi_prop_free((void *)prop_str);
7147 7147 }
7148 7148
7149 7149 /*
7150 7150 * Copy DDS from the config method and update configuration parameters
7151 7151 */
7152 7152 (void) emlxs_get_props(hba);
7153 7153
7154 7154 #ifdef FMA_SUPPORT
7155 7155 hba->fm_caps = cfg[CFG_FM_CAPS].current;
7156 7156
7157 7157 emlxs_fm_init(hba);
7158 7158
7159 7159 init_flag |= ATTACH_FM;
7160 7160 #endif /* FMA_SUPPORT */
7161 7161
7162 7162 if (emlxs_map_bus(hba)) {
7163 7163 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7164 7164 "Unable to map memory");
7165 7165 goto failed;
7166 7166
7167 7167 }
7168 7168 init_flag |= ATTACH_MAP_BUS;
7169 7169
7170 7170 /* Attempt to identify the adapter */
7171 7171 rval = emlxs_init_adapter_info(hba);
7172 7172
7173 7173 if (rval == 0) {
7174 7174 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7175 7175 "Unable to get adapter info. Id:%d Device id:0x%x "
7176 7176 "Model:%s", hba->model_info.id,
7177 7177 hba->model_info.device_id, hba->model_info.model);
7178 7178 goto failed;
7179 7179 }
7180 7180 #define FILTER_ORACLE_BRANDED
7181 7181 #ifdef FILTER_ORACLE_BRANDED
7182 7182
7183 7183 /* Oracle branded adapters are not supported in this driver */
7184 7184 if (hba->model_info.flags & EMLXS_ORACLE_BRANDED) {
7185 7185 hba->model_info.flags |= EMLXS_NOT_SUPPORTED;
7186 7186 }
7187 7187 #endif /* FILTER_ORACLE_BRANDED */
7188 7188
7189 7189 /* Check if adapter is not supported */
7190 7190 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
7191 7191 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7192 7192 "Unsupported adapter found. Id:%d Device id:0x%x "
7193 7193 "SSDID:0x%x Model:%s", hba->model_info.id,
7194 7194 hba->model_info.device_id,
7195 7195 hba->model_info.ssdid, hba->model_info.model);
7196 7196 goto failed;
7197 7197 }
7198 7198
7199 7199 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
7200 7200 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
7201 7201
7202 7202 #ifdef EMLXS_I386
7203 7203 /*
7204 7204 * TigerShark has 64K limit for SG element size
7205 7205 * Do this for x86 alone. For SPARC, the driver
7206 7206 * breaks up the single SGE later on.
7207 7207 */
7208 7208 hba->dma_attr_ro.dma_attr_count_max = 0xffff;
7209 7209
7210 7210 i = cfg[CFG_MAX_XFER_SIZE].current;
7211 7211 /* Update SGL size based on max_xfer_size */
7212 7212 if (i > 516096) {
7213 7213 /* 516096 = (((2048 / 16) - 2) * 4096) */
7214 7214 hba->sli.sli4.mem_sgl_size = 4096;
7215 7215 } else if (i > 253952) {
7216 7216 /* 253952 = (((1024 / 16) - 2) * 4096) */
7217 7217 hba->sli.sli4.mem_sgl_size = 2048;
7218 7218 } else {
7219 7219 hba->sli.sli4.mem_sgl_size = 1024;
7220 7220 }
7221 7221 #endif /* EMLXS_I386 */
7222 7222
7223 7223 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
7224 7224 } else {
7225 7225 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
7226 7226
7227 7227 #ifdef EMLXS_I386
7228 7228 i = cfg[CFG_MAX_XFER_SIZE].current;
7229 7229 /* Update BPL size based on max_xfer_size */
7230 7230 if (i > 688128) {
7231 7231 /* 688128 = (((2048 / 12) - 2) * 4096) */
7232 7232 hba->sli.sli3.mem_bpl_size = 4096;
7233 7233 } else if (i > 339968) {
7234 7234 /* 339968 = (((1024 / 12) - 2) * 4096) */
7235 7235 hba->sli.sli3.mem_bpl_size = 2048;
7236 7236 } else {
7237 7237 hba->sli.sli3.mem_bpl_size = 1024;
7238 7238 }
7239 7239 #endif /* EMLXS_I386 */
7240 7240
7241 7241 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
7242 7242 }
7243 7243
7244 7244 /* Update dma_attr_sgllen based on true SGL length */
7245 7245 hba->dma_attr.dma_attr_sgllen = i;
7246 7246 hba->dma_attr_ro.dma_attr_sgllen = i;
7247 7247 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
7248 7248
7249 7249 if (EMLXS_SLI_MAP_HDW(hba)) {
7250 7250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7251 7251 "Unable to map memory");
7252 7252 goto failed;
7253 7253
7254 7254 }
7255 7255 init_flag |= ATTACH_MAP_SLI;
7256 7256
7257 7257 /* Initialize the interrupts. But don't add them yet */
7258 7258 status = EMLXS_INTR_INIT(hba, 0);
7259 7259 if (status != DDI_SUCCESS) {
7260 7260 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7261 7261 "Unable to initalize interrupt(s).");
7262 7262 goto failed;
7263 7263
7264 7264 }
7265 7265 init_flag |= ATTACH_INTR_INIT;
7266 7266
7267 7267 /* Initialize LOCKs */
7268 7268 emlxs_msg_lock_reinit(hba);
7269 7269 emlxs_lock_init(hba);
7270 7270 init_flag |= ATTACH_LOCK;
7271 7271
7272 7272 /* Create the event queue */
7273 7273 if (emlxs_event_queue_create(hba) == 0) {
7274 7274 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7275 7275 "Unable to create event queue");
7276 7276
7277 7277 goto failed;
7278 7278
7279 7279 }
7280 7280 init_flag |= ATTACH_EVENTS;
7281 7281
7282 7282 /* Initialize the power management */
7283 7283 mutex_enter(&EMLXS_PM_LOCK);
7284 7284 hba->pm_state = EMLXS_PM_IN_ATTACH;
7285 7285 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
7286 7286 hba->pm_busy = 0;
7287 7287 #ifdef IDLE_TIMER
7288 7288 hba->pm_active = 1;
7289 7289 hba->pm_idle_timer = 0;
7290 7290 #endif /* IDLE_TIMER */
7291 7291 mutex_exit(&EMLXS_PM_LOCK);
7292 7292
7293 7293 /* Set the pm component name */
7294 7294 (void) snprintf(local_pm_components, sizeof (local_pm_components),
7295 7295 "NAME=%s%d", DRIVER_NAME, ddiinst);
7296 7296 emlxs_pm_components[0] = local_pm_components;
7297 7297
7298 7298 /* Check if power management support is enabled */
7299 7299 if (cfg[CFG_PM_SUPPORT].current) {
7300 7300 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
7301 7301 "pm-components", emlxs_pm_components,
7302 7302 sizeof (emlxs_pm_components) /
7303 7303 sizeof (emlxs_pm_components[0])) !=
7304 7304 DDI_PROP_SUCCESS) {
7305 7305 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7306 7306 "Unable to create pm components.");
7307 7307 goto failed;
7308 7308 }
7309 7309 }
7310 7310
7311 7311 /* Needed for suspend and resume support */
7312 7312 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
7313 7313 "needs-suspend-resume");
7314 7314 init_flag |= ATTACH_PROP;
7315 7315
7316 7316 emlxs_thread_spawn_create(hba);
7317 7317 init_flag |= ATTACH_SPAWN;
7318 7318
7319 7319 emlxs_thread_create(hba, &hba->iodone_thread);
7320 7320
7321 7321 init_flag |= ATTACH_THREAD;
7322 7322
7323 7323 retry:
7324 7324 /* Setup initiator / target ports */
7325 7325 emlxs_mode_init(hba);
7326 7326
7327 7327 /* If driver did not attach to either stack, */
7328 7328 /* then driver attach fails */
7329 7329 if (port->mode == MODE_NONE) {
7330 7330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7331 7331 "Driver interfaces not enabled.");
7332 7332 goto failed;
7333 7333 }
7334 7334
7335 7335 /*
7336 7336 * Initialize HBA
7337 7337 */
7338 7338
7339 7339 /* Set initial state */
7340 7340 mutex_enter(&EMLXS_PORT_LOCK);
7341 7341 hba->flag |= FC_OFFLINE_MODE;
7342 7342 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
7343 7343 mutex_exit(&EMLXS_PORT_LOCK);
7344 7344
7345 7345 if (status = emlxs_online(hba)) {
7346 7346 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7347 7347 "Unable to initialize adapter.");
7348 7348
7349 7349 if (status == EAGAIN) {
7350 7350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7351 7351 "Retrying adapter initialization ...");
7352 7352 goto retry;
7353 7353 }
7354 7354 goto failed;
7355 7355 }
7356 7356 init_flag |= ATTACH_ONLINE;
7357 7357
7358 7358 /* This is to ensure that the model property is properly set */
7359 7359 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
7360 7360 hba->model_info.model);
7361 7361
7362 7362 /* Create the device node. */
7363 7363 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
7364 7364 DDI_FAILURE) {
7365 7365 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7366 7366 "Unable to create device node.");
7367 7367 goto failed;
7368 7368 }
7369 7369 init_flag |= ATTACH_NODE;
7370 7370
7371 7371 /* Attach initiator now */
7372 7372 /* This must come after emlxs_online() */
7373 7373 emlxs_fca_attach(hba);
7374 7374 init_flag |= ATTACH_FCA;
7375 7375
7376 7376 /* Initialize kstat information */
7377 7377 hba->kstat = kstat_create(DRIVER_NAME,
7378 7378 ddiinst, "statistics", "controller",
7379 7379 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
7380 7380 KSTAT_FLAG_VIRTUAL);
7381 7381
7382 7382 if (hba->kstat == NULL) {
7383 7383 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
7384 7384 "kstat_create failed.");
7385 7385 } else {
7386 7386 hba->kstat->ks_data = (void *)&hba->stats;
7387 7387 kstat_install(hba->kstat);
7388 7388 init_flag |= ATTACH_KSTAT;
7389 7389 }
7390 7390
7391 7391 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
7392 7392 /* Setup virtual port properties */
7393 7393 emlxs_read_vport_prop(hba);
7394 7394 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
7395 7395
7396 7396
7397 7397 #ifdef DHCHAP_SUPPORT
7398 7398 emlxs_dhc_attach(hba);
7399 7399 init_flag |= ATTACH_DHCHAP;
7400 7400 #endif /* DHCHAP_SUPPORT */
7401 7401
7402 7402 /* Display the driver banner now */
7403 7403 emlxs_drv_banner(hba);
7404 7404
7405 7405 /* Raise the power level */
7406 7406
7407 7407 /*
7408 7408 * This will not execute emlxs_hba_resume because
7409 7409 * EMLXS_PM_IN_ATTACH is set
7410 7410 */
7411 7411 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
7412 7412 /* Set power up anyway. This should not happen! */
7413 7413 mutex_enter(&EMLXS_PM_LOCK);
7414 7414 hba->pm_level = EMLXS_PM_ADAPTER_UP;
7415 7415 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7416 7416 mutex_exit(&EMLXS_PM_LOCK);
7417 7417 } else {
7418 7418 mutex_enter(&EMLXS_PM_LOCK);
7419 7419 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7420 7420 mutex_exit(&EMLXS_PM_LOCK);
7421 7421 }
7422 7422
7423 7423 #ifdef SFCT_SUPPORT
7424 7424 if (port->flag & EMLXS_TGT_ENABLED) {
7425 7425 /* Do this last */
7426 7426 emlxs_fct_attach(hba);
7427 7427 init_flag |= ATTACH_FCT;
7428 7428 }
7429 7429 #endif /* SFCT_SUPPORT */
7430 7430
7431 7431 return (DDI_SUCCESS);
7432 7432
7433 7433 failed:
7434 7434
7435 7435 emlxs_driver_remove(dip, init_flag, 1);
7436 7436
7437 7437 return (DDI_FAILURE);
7438 7438
7439 7439 } /* emlxs_hba_attach() */
7440 7440
7441 7441
7442 7442 static int
7443 7443 emlxs_hba_detach(dev_info_t *dip)
7444 7444 {
7445 7445 emlxs_hba_t *hba;
7446 7446 emlxs_port_t *port;
7447 7447 int ddiinst;
7448 7448 int count;
7449 7449 uint32_t init_flag = (uint32_t)-1;
7450 7450
7451 7451 ddiinst = ddi_get_instance(dip);
7452 7452 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
7453 7453 port = &PPORT;
7454 7454
7455 7455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
7456 7456
7457 7457 mutex_enter(&EMLXS_PM_LOCK);
7458 7458 hba->pm_state |= EMLXS_PM_IN_DETACH;
7459 7459 mutex_exit(&EMLXS_PM_LOCK);
7460 7460
7461 7461 /* Lower the power level */
7462 7462 /*
7463 7463 * This will not suspend the driver since the
7464 7464 * EMLXS_PM_IN_DETACH has been set
7465 7465 */
7466 7466 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
7467 7467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7468 7468 "Unable to lower power.");
7469 7469
7470 7470 mutex_enter(&EMLXS_PM_LOCK);
7471 7471 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7472 7472 mutex_exit(&EMLXS_PM_LOCK);
7473 7473
7474 7474 return (DDI_FAILURE);
7475 7475 }
7476 7476
7477 7477 /* Take the adapter offline first, if not already */
7478 7478 if (emlxs_offline(hba, 1) != 0) {
7479 7479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7480 7480 "Unable to take adapter offline.");
7481 7481
7482 7482 mutex_enter(&EMLXS_PM_LOCK);
7483 7483 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7484 7484 mutex_exit(&EMLXS_PM_LOCK);
7485 7485
7486 7486 (void) emlxs_pm_raise_power(dip);
7487 7487
7488 7488 return (DDI_FAILURE);
7489 7489 }
7490 7490 /* Check ub buffer pools */
7491 7491 if (port->ub_pool) {
7492 7492 mutex_enter(&EMLXS_UB_LOCK);
7493 7493
7494 7494 /* Wait up to 10 seconds for all ub pools to be freed */
7495 7495 count = 10 * 2;
7496 7496 while (port->ub_pool && count) {
7497 7497 mutex_exit(&EMLXS_UB_LOCK);
7498 7498 delay(drv_usectohz(500000)); /* half second wait */
7499 7499 count--;
7500 7500 mutex_enter(&EMLXS_UB_LOCK);
7501 7501 }
7502 7502
7503 7503 if (port->ub_pool) {
7504 7504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7505 7505 "fca_unbind_port: Unsolicited buffers still "
7506 7506 "active. port=%p. Destroying...", port);
7507 7507
7508 7508 /* Destroy all pools */
7509 7509 while (port->ub_pool) {
7510 7510 emlxs_ub_destroy(port, port->ub_pool);
7511 7511 }
7512 7512 }
7513 7513
7514 7514 mutex_exit(&EMLXS_UB_LOCK);
7515 7515 }
7516 7516 init_flag &= ~ATTACH_ONLINE;
7517 7517
7518 7518 /* Remove the driver instance */
7519 7519 emlxs_driver_remove(dip, init_flag, 0);
7520 7520
7521 7521 return (DDI_SUCCESS);
7522 7522
7523 7523 } /* emlxs_hba_detach() */
7524 7524
7525 7525
7526 7526 extern int
7527 7527 emlxs_map_bus(emlxs_hba_t *hba)
7528 7528 {
7529 7529 emlxs_port_t *port = &PPORT;
7530 7530 dev_info_t *dip;
7531 7531 ddi_device_acc_attr_t dev_attr;
7532 7532 int status;
7533 7533
7534 7534 dip = (dev_info_t *)hba->dip;
7535 7535 dev_attr = emlxs_dev_acc_attr;
7536 7536
7537 7537 if (hba->bus_type == SBUS_FC) {
7538 7538 if (hba->pci_acc_handle == 0) {
7539 7539 status = ddi_regs_map_setup(dip,
7540 7540 SBUS_DFLY_PCI_CFG_RINDEX,
7541 7541 (caddr_t *)&hba->pci_addr,
7542 7542 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7543 7543 if (status != DDI_SUCCESS) {
7544 7544 EMLXS_MSGF(EMLXS_CONTEXT,
7545 7545 &emlxs_attach_failed_msg,
7546 7546 "(SBUS) ddi_regs_map_setup PCI failed. "
7547 7547 "status=%x", status);
7548 7548 goto failed;
7549 7549 }
7550 7550 }
7551 7551
7552 7552 if (hba->sbus_pci_handle == 0) {
7553 7553 status = ddi_regs_map_setup(dip,
7554 7554 SBUS_TITAN_PCI_CFG_RINDEX,
7555 7555 (caddr_t *)&hba->sbus_pci_addr,
7556 7556 0, 0, &dev_attr, &hba->sbus_pci_handle);
7557 7557 if (status != DDI_SUCCESS) {
7558 7558 EMLXS_MSGF(EMLXS_CONTEXT,
7559 7559 &emlxs_attach_failed_msg,
7560 7560 "(SBUS) ddi_regs_map_setup TITAN PCI "
7561 7561 "failed. status=%x", status);
7562 7562 goto failed;
7563 7563 }
7564 7564 }
7565 7565
7566 7566 } else { /* ****** PCI ****** */
7567 7567
7568 7568 if (hba->pci_acc_handle == 0) {
7569 7569 status = ddi_regs_map_setup(dip,
7570 7570 PCI_CFG_RINDEX,
7571 7571 (caddr_t *)&hba->pci_addr,
7572 7572 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7573 7573 if (status != DDI_SUCCESS) {
7574 7574 EMLXS_MSGF(EMLXS_CONTEXT,
7575 7575 &emlxs_attach_failed_msg,
7576 7576 "(PCI) ddi_regs_map_setup PCI failed. "
7577 7577 "status=%x", status);
7578 7578 goto failed;
7579 7579 }
7580 7580 }
7581 7581 #ifdef EMLXS_I386
7582 7582 /* Setting up PCI configure space */
7583 7583 (void) ddi_put16(hba->pci_acc_handle,
7584 7584 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
7585 7585 CMD_CFG_VALUE | CMD_IO_ENBL);
7586 7586
7587 7587 #ifdef FMA_SUPPORT
7588 7588 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
7589 7589 != DDI_FM_OK) {
7590 7590 EMLXS_MSGF(EMLXS_CONTEXT,
7591 7591 &emlxs_invalid_access_handle_msg, NULL);
7592 7592 goto failed;
7593 7593 }
7594 7594 #endif /* FMA_SUPPORT */
7595 7595
7596 7596 #endif /* EMLXS_I386 */
7597 7597
7598 7598 }
7599 7599 return (0);
7600 7600
7601 7601 failed:
7602 7602
7603 7603 emlxs_unmap_bus(hba);
7604 7604 return (ENOMEM);
7605 7605
7606 7606 } /* emlxs_map_bus() */
7607 7607
7608 7608
7609 7609 extern void
7610 7610 emlxs_unmap_bus(emlxs_hba_t *hba)
7611 7611 {
7612 7612 if (hba->pci_acc_handle) {
7613 7613 (void) ddi_regs_map_free(&hba->pci_acc_handle);
7614 7614 hba->pci_acc_handle = 0;
7615 7615 }
7616 7616
7617 7617 if (hba->sbus_pci_handle) {
7618 7618 (void) ddi_regs_map_free(&hba->sbus_pci_handle);
7619 7619 hba->sbus_pci_handle = 0;
7620 7620 }
7621 7621
7622 7622 return;
7623 7623
7624 7624 } /* emlxs_unmap_bus() */
7625 7625
7626 7626
7627 7627 static int
7628 7628 emlxs_get_props(emlxs_hba_t *hba)
7629 7629 {
7630 7630 emlxs_config_t *cfg;
7631 7631 uint32_t i;
7632 7632 char string[256];
7633 7633 uint32_t new_value;
7634 7634
7635 7635 /* Initialize each parameter */
7636 7636 for (i = 0; i < NUM_CFG_PARAM; i++) {
7637 7637 cfg = &hba->config[i];
7638 7638
7639 7639 /* Ensure strings are terminated */
7640 7640 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
7641 7641 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0;
7642 7642
7643 7643 /* Set the current value to the default value */
7644 7644 new_value = cfg->def;
7645 7645
7646 7646 /* First check for the global setting */
7647 7647 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7648 7648 (void *)hba->dip, DDI_PROP_DONTPASS,
7649 7649 cfg->string, new_value);
7650 7650
7651 7651 /* Now check for the per adapter ddiinst setting */
7652 7652 (void) snprintf(string, sizeof (string), "%s%d-%s", DRIVER_NAME,
7653 7653 hba->ddiinst, cfg->string);
7654 7654
7655 7655 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7656 7656 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
7657 7657
7658 7658 /* Now check the parameter */
7659 7659 cfg->current = emlxs_check_parm(hba, i, new_value);
7660 7660 }
7661 7661
7662 7662 return (0);
7663 7663
7664 7664 } /* emlxs_get_props() */
7665 7665
7666 7666
7667 7667 extern uint32_t
7668 7668 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7669 7669 {
7670 7670 emlxs_port_t *port = &PPORT;
7671 7671 uint32_t i;
7672 7672 emlxs_config_t *cfg;
7673 7673 emlxs_vpd_t *vpd = &VPD;
7674 7674
7675 7675 if (index >= NUM_CFG_PARAM) {
7676 7676 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7677 7677 "check_parm failed. Invalid index = %d", index);
7678 7678
7679 7679 return (new_value);
7680 7680 }
7681 7681
7682 7682 cfg = &hba->config[index];
7683 7683
7684 7684 if (new_value > cfg->hi) {
7685 7685 new_value = cfg->def;
7686 7686 } else if (new_value < cfg->low) {
7687 7687 new_value = cfg->def;
7688 7688 }
7689 7689
7690 7690 /* Perform additional checks */
7691 7691 switch (index) {
7692 7692 #ifdef SFCT_SUPPORT
7693 7693 case CFG_NPIV_ENABLE:
7694 7694 if (hba->config[CFG_TARGET_MODE].current &&
7695 7695 hba->config[CFG_DTM_ENABLE].current == 0) {
7696 7696 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7697 7697 "enable-npiv: Not supported in pure target mode. "
7698 7698 "Disabling.");
7699 7699
7700 7700 new_value = 0;
7701 7701 }
7702 7702 break;
7703 7703 #endif /* SFCT_SUPPORT */
7704 7704
7705 7705
7706 7706 case CFG_NUM_NODES:
7707 7707 switch (new_value) {
7708 7708 case 1:
7709 7709 case 2:
7710 7710 /* Must have at least 3 if not 0 */
7711 7711 return (3);
7712 7712
7713 7713 default:
7714 7714 break;
7715 7715 }
7716 7716 break;
7717 7717
7718 7718 case CFG_FW_CHECK:
7719 7719 /* The 0x2 bit implies the 0x1 bit will also be set */
7720 7720 if (new_value & 0x2) {
7721 7721 new_value |= 0x1;
7722 7722 }
7723 7723
7724 7724 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
7725 7725 if (!(new_value & 0x3) && (new_value & 0x4)) {
7726 7726 new_value &= ~0x4;
7727 7727 }
7728 7728 break;
7729 7729
7730 7730 case CFG_LINK_SPEED:
7731 7731 if ((new_value > 8) &&
7732 7732 (hba->config[CFG_TOPOLOGY].current == 4)) {
7733 7733 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7734 7734 "link-speed: %dGb not supported in loop topology. "
7735 7735 "Switching to auto detect.",
7736 7736 new_value);
7737 7737
7738 7738 new_value = 0;
7739 7739 break;
7740 7740 }
7741 7741
7742 7742 if (vpd->link_speed) {
7743 7743 switch (new_value) {
7744 7744 case 0:
7745 7745 break;
7746 7746
7747 7747 case 1:
7748 7748 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
7749 7749 new_value = 0;
7750 7750
7751 7751 EMLXS_MSGF(EMLXS_CONTEXT,
7752 7752 &emlxs_init_msg,
7753 7753 "link-speed: 1Gb not supported "
7754 7754 "by adapter. Switching to auto "
7755 7755 "detect.");
7756 7756 }
7757 7757 break;
7758 7758
7759 7759 case 2:
7760 7760 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7761 7761 new_value = 0;
7762 7762
7763 7763 EMLXS_MSGF(EMLXS_CONTEXT,
7764 7764 &emlxs_init_msg,
7765 7765 "link-speed: 2Gb not supported "
7766 7766 "by adapter. Switching to auto "
7767 7767 "detect.");
7768 7768 }
7769 7769 break;
7770 7770
7771 7771 case 4:
7772 7772 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7773 7773 new_value = 0;
7774 7774
7775 7775 EMLXS_MSGF(EMLXS_CONTEXT,
7776 7776 &emlxs_init_msg,
7777 7777 "link-speed: 4Gb not supported "
7778 7778 "by adapter. Switching to auto "
7779 7779 "detect.");
7780 7780 }
7781 7781 break;
7782 7782
7783 7783 case 8:
7784 7784 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7785 7785 new_value = 0;
7786 7786
7787 7787 EMLXS_MSGF(EMLXS_CONTEXT,
7788 7788 &emlxs_init_msg,
7789 7789 "link-speed: 8Gb not supported "
7790 7790 "by adapter. Switching to auto "
7791 7791 "detect.");
7792 7792 }
7793 7793 break;
7794 7794
7795 7795 case 16:
7796 7796 if (!(vpd->link_speed & LMT_16GB_CAPABLE)) {
7797 7797 new_value = 0;
7798 7798
7799 7799 EMLXS_MSGF(EMLXS_CONTEXT,
7800 7800 &emlxs_init_msg,
7801 7801 "link-speed: 16Gb not supported "
7802 7802 "by adapter. Switching to auto "
7803 7803 "detect.");
7804 7804 }
7805 7805 break;
7806 7806
7807 7807 default:
7808 7808 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7809 7809 "link-speed: Invalid value=%d provided. "
7810 7810 "Switching to auto detect.",
7811 7811 new_value);
7812 7812
7813 7813 new_value = 0;
7814 7814 }
7815 7815 } else { /* Perform basic validity check */
7816 7816
7817 7817 /* Perform additional check on link speed */
7818 7818 switch (new_value) {
7819 7819 case 0:
7820 7820 case 1:
7821 7821 case 2:
7822 7822 case 4:
7823 7823 case 8:
7824 7824 case 16:
7825 7825 /* link-speed is a valid choice */
7826 7826 break;
7827 7827
7828 7828 default:
7829 7829 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7830 7830 "link-speed: Invalid value=%d provided. "
7831 7831 "Switching to auto detect.",
7832 7832 new_value);
7833 7833
7834 7834 new_value = 0;
7835 7835 }
7836 7836 }
7837 7837 break;
7838 7838
7839 7839 case CFG_TOPOLOGY:
7840 7840 if ((new_value == 4) &&
7841 7841 (hba->config[CFG_LINK_SPEED].current > 8)) {
7842 7842 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7843 7843 "topology: Loop topology not supported "
7844 7844 "with link speeds greater than 8Gb. "
7845 7845 "Switching to auto detect.");
7846 7846
7847 7847 new_value = 0;
7848 7848 break;
7849 7849 }
7850 7850
7851 7851 /* Perform additional check on topology */
7852 7852 switch (new_value) {
7853 7853 case 0:
7854 7854 case 2:
7855 7855 case 4:
7856 7856 case 6:
7857 7857 /* topology is a valid choice */
7858 7858 break;
7859 7859
7860 7860 default:
7861 7861 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7862 7862 "topology: Invalid value=%d provided. "
7863 7863 "Switching to auto detect.",
7864 7864 new_value);
7865 7865
7866 7866 new_value = 0;
7867 7867 break;
7868 7868 }
7869 7869 break;
7870 7870
7871 7871 #ifdef DHCHAP_SUPPORT
7872 7872 case CFG_AUTH_TYPE:
7873 7873 {
7874 7874 uint32_t shift;
7875 7875 uint32_t mask;
7876 7876
7877 7877 /* Perform additional check on auth type */
7878 7878 shift = 12;
7879 7879 mask = 0xF000;
7880 7880 for (i = 0; i < 4; i++) {
7881 7881 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7882 7882 return (cfg->def);
7883 7883 }
7884 7884
7885 7885 shift -= 4;
7886 7886 mask >>= 4;
7887 7887 }
7888 7888 break;
7889 7889 }
7890 7890
7891 7891 case CFG_AUTH_HASH:
7892 7892 {
7893 7893 uint32_t shift;
7894 7894 uint32_t mask;
7895 7895
7896 7896 /* Perform additional check on auth hash */
7897 7897 shift = 12;
7898 7898 mask = 0xF000;
7899 7899 for (i = 0; i < 4; i++) {
7900 7900 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7901 7901 return (cfg->def);
7902 7902 }
7903 7903
7904 7904 shift -= 4;
7905 7905 mask >>= 4;
7906 7906 }
7907 7907 break;
7908 7908 }
7909 7909
7910 7910 case CFG_AUTH_GROUP:
7911 7911 {
7912 7912 uint32_t shift;
7913 7913 uint32_t mask;
7914 7914
7915 7915 /* Perform additional check on auth group */
7916 7916 shift = 28;
7917 7917 mask = 0xF0000000;
7918 7918 for (i = 0; i < 8; i++) {
7919 7919 if (((new_value & mask) >> shift) >
7920 7920 DFC_AUTH_GROUP_MAX) {
7921 7921 return (cfg->def);
7922 7922 }
7923 7923
7924 7924 shift -= 4;
7925 7925 mask >>= 4;
7926 7926 }
7927 7927 break;
7928 7928 }
7929 7929
7930 7930 case CFG_AUTH_INTERVAL:
7931 7931 if (new_value < 10) {
7932 7932 return (10);
7933 7933 }
7934 7934 break;
7935 7935
7936 7936
7937 7937 #endif /* DHCHAP_SUPPORT */
7938 7938
7939 7939 } /* switch */
7940 7940
7941 7941 return (new_value);
7942 7942
7943 7943 } /* emlxs_check_parm() */
7944 7944
7945 7945
7946 7946 extern uint32_t
7947 7947 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7948 7948 {
7949 7949 emlxs_port_t *port = &PPORT;
7950 7950 emlxs_port_t *vport;
7951 7951 uint32_t vpi;
7952 7952 emlxs_config_t *cfg;
7953 7953 uint32_t old_value;
7954 7954
7955 7955 if (index >= NUM_CFG_PARAM) {
7956 7956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7957 7957 "set_parm failed. Invalid index = %d", index);
7958 7958
7959 7959 return ((uint32_t)FC_FAILURE);
7960 7960 }
7961 7961
7962 7962 cfg = &hba->config[index];
7963 7963
7964 7964 if (!(cfg->flags & PARM_DYNAMIC)) {
7965 7965 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7966 7966 "set_parm failed. %s is not dynamic.", cfg->string);
7967 7967
7968 7968 return ((uint32_t)FC_FAILURE);
7969 7969 }
7970 7970
7971 7971 /* Check new value */
7972 7972 old_value = new_value;
7973 7973 new_value = emlxs_check_parm(hba, index, new_value);
7974 7974
7975 7975 if (old_value != new_value) {
7976 7976 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7977 7977 "set_parm: %s invalid. 0x%x --> 0x%x",
7978 7978 cfg->string, old_value, new_value);
7979 7979 }
7980 7980
7981 7981 /* Return now if no actual change */
7982 7982 if (new_value == cfg->current) {
7983 7983 return (FC_SUCCESS);
7984 7984 }
7985 7985
7986 7986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7987 7987 "set_parm: %s changing. 0x%x --> 0x%x",
7988 7988 cfg->string, cfg->current, new_value);
7989 7989
7990 7990 old_value = cfg->current;
7991 7991 cfg->current = new_value;
7992 7992
7993 7993 /* React to change if needed */
7994 7994 switch (index) {
7995 7995
7996 7996 case CFG_PCI_MAX_READ:
7997 7997 /* Update MXR */
7998 7998 emlxs_pcix_mxr_update(hba, 1);
7999 7999 break;
8000 8000
8001 8001 #ifdef SFCT_SUPPORT
8002 8002 case CFG_TARGET_MODE:
8003 8003 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8004 8004 break;
8005 8005 #endif /* SFCT_SUPPORT */
8006 8006
8007 8007 case CFG_SLI_MODE:
8008 8008 /* Check SLI mode */
8009 8009 if ((hba->sli_mode == 3) && (new_value == 2)) {
8010 8010 /* All vports must be disabled first */
8011 8011 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8012 8012 vport = &VPORT(vpi);
8013 8013
8014 8014 if (vport->flag & EMLXS_PORT_ENABLED) {
8015 8015 /* Reset current value */
8016 8016 cfg->current = old_value;
8017 8017
8018 8018 EMLXS_MSGF(EMLXS_CONTEXT,
8019 8019 &emlxs_sfs_debug_msg,
8020 8020 "set_parm failed. %s: vpi=%d "
8021 8021 "still enabled. Value restored to "
8022 8022 "0x%x.", cfg->string, vpi,
8023 8023 old_value);
8024 8024
8025 8025 return (2);
8026 8026 }
8027 8027 }
8028 8028 }
8029 8029
8030 8030 if ((hba->sli_mode >= 4) && (new_value < 4)) {
8031 8031 /*
8032 8032 * Not allow to set to SLI 2 or 3 if HBA supports SLI4
8033 8033 */
8034 8034 cfg->current = old_value;
8035 8035 return ((uint32_t)FC_FAILURE);
8036 8036 }
8037 8037
8038 8038 break;
8039 8039
8040 8040 case CFG_NPIV_ENABLE:
8041 8041 /* Check if NPIV is being disabled */
8042 8042 if ((old_value == 1) && (new_value == 0)) {
8043 8043 /* All vports must be disabled first */
8044 8044 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8045 8045 vport = &VPORT(vpi);
8046 8046
8047 8047 if (vport->flag & EMLXS_PORT_ENABLED) {
8048 8048 /* Reset current value */
8049 8049 cfg->current = old_value;
8050 8050
8051 8051 EMLXS_MSGF(EMLXS_CONTEXT,
8052 8052 &emlxs_sfs_debug_msg,
8053 8053 "set_parm failed. %s: vpi=%d "
8054 8054 "still enabled. Value restored to "
8055 8055 "0x%x.", cfg->string, vpi,
8056 8056 old_value);
8057 8057
8058 8058 return (2);
8059 8059 }
8060 8060 }
8061 8061 }
8062 8062
8063 8063 /* Trigger adapter reset */
8064 8064 /* (void) emlxs_reset(port, FC_FCA_RESET); */
8065 8065
8066 8066 break;
8067 8067
8068 8068
8069 8069 case CFG_VPORT_RESTRICTED:
8070 8070 for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
8071 8071 vport = &VPORT(vpi);
8072 8072
8073 8073 if (!(vport->flag & EMLXS_PORT_CONFIG)) {
8074 8074 continue;
8075 8075 }
8076 8076
8077 8077 if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
8078 8078 continue;
8079 8079 }
8080 8080
8081 8081 if (new_value) {
8082 8082 vport->flag |= EMLXS_PORT_RESTRICTED;
8083 8083 } else {
8084 8084 vport->flag &= ~EMLXS_PORT_RESTRICTED;
8085 8085 }
8086 8086 }
8087 8087
8088 8088 break;
8089 8089
8090 8090 #ifdef DHCHAP_SUPPORT
8091 8091 case CFG_AUTH_ENABLE:
8092 8092 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8093 8093 break;
8094 8094
8095 8095 case CFG_AUTH_TMO:
8096 8096 hba->auth_cfg.authentication_timeout = cfg->current;
8097 8097 break;
8098 8098
8099 8099 case CFG_AUTH_MODE:
8100 8100 hba->auth_cfg.authentication_mode = cfg->current;
8101 8101 break;
8102 8102
8103 8103 case CFG_AUTH_BIDIR:
8104 8104 hba->auth_cfg.bidirectional = cfg->current;
8105 8105 break;
8106 8106
8107 8107 case CFG_AUTH_TYPE:
8108 8108 hba->auth_cfg.authentication_type_priority[0] =
8109 8109 (cfg->current & 0xF000) >> 12;
8110 8110 hba->auth_cfg.authentication_type_priority[1] =
8111 8111 (cfg->current & 0x0F00) >> 8;
8112 8112 hba->auth_cfg.authentication_type_priority[2] =
8113 8113 (cfg->current & 0x00F0) >> 4;
8114 8114 hba->auth_cfg.authentication_type_priority[3] =
8115 8115 (cfg->current & 0x000F);
8116 8116 break;
8117 8117
8118 8118 case CFG_AUTH_HASH:
8119 8119 hba->auth_cfg.hash_priority[0] =
8120 8120 (cfg->current & 0xF000) >> 12;
8121 8121 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
8122 8122 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
8123 8123 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
8124 8124 break;
8125 8125
8126 8126 case CFG_AUTH_GROUP:
8127 8127 hba->auth_cfg.dh_group_priority[0] =
8128 8128 (cfg->current & 0xF0000000) >> 28;
8129 8129 hba->auth_cfg.dh_group_priority[1] =
8130 8130 (cfg->current & 0x0F000000) >> 24;
8131 8131 hba->auth_cfg.dh_group_priority[2] =
8132 8132 (cfg->current & 0x00F00000) >> 20;
8133 8133 hba->auth_cfg.dh_group_priority[3] =
8134 8134 (cfg->current & 0x000F0000) >> 16;
8135 8135 hba->auth_cfg.dh_group_priority[4] =
8136 8136 (cfg->current & 0x0000F000) >> 12;
8137 8137 hba->auth_cfg.dh_group_priority[5] =
8138 8138 (cfg->current & 0x00000F00) >> 8;
8139 8139 hba->auth_cfg.dh_group_priority[6] =
8140 8140 (cfg->current & 0x000000F0) >> 4;
8141 8141 hba->auth_cfg.dh_group_priority[7] =
8142 8142 (cfg->current & 0x0000000F);
8143 8143 break;
8144 8144
8145 8145 case CFG_AUTH_INTERVAL:
8146 8146 hba->auth_cfg.reauthenticate_time_interval = cfg->current;
8147 8147 break;
8148 8148 #endif /* DHCHAP_SUPPORT */
8149 8149
8150 8150 }
8151 8151
8152 8152 return (FC_SUCCESS);
8153 8153
8154 8154 } /* emlxs_set_parm() */
8155 8155
8156 8156
8157 8157 /*
8158 8158 * emlxs_mem_alloc OS specific routine for memory allocation / mapping
8159 8159 *
8160 8160 * The buf_info->flags field describes the memory operation requested.
8161 8161 *
8162 8162 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA
8163 8163 * Virtual address is supplied in buf_info->virt
8164 8164 * DMA mapping flag is in buf_info->align
8165 8165 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
8166 8166 * The mapped physical address is returned buf_info->phys
8167 8167 *
8168 8168 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
8169 8169 * if FC_MBUF_DMA is set the memory is also mapped for DMA
8170 8170 * The byte alignment of the memory request is supplied in buf_info->align
8171 8171 * The byte size of the memory request is supplied in buf_info->size
8172 8172 * The virtual address is returned buf_info->virt
8173 8173 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
8174 8174 */
8175 8175 extern uint8_t *
8176 8176 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8177 8177 {
8178 8178 emlxs_port_t *port = &PPORT;
8179 8179 ddi_dma_attr_t dma_attr;
8180 8180 ddi_device_acc_attr_t dev_attr;
8181 8181 uint_t cookie_count;
8182 8182 size_t dma_reallen;
8183 8183 ddi_dma_cookie_t dma_cookie;
8184 8184 uint_t dma_flag;
8185 8185 int status;
8186 8186
8187 8187 dma_attr = hba->dma_attr_1sg;
8188 8188 dev_attr = emlxs_data_acc_attr;
8189 8189
8190 8190 if (buf_info->flags & FC_MBUF_SNGLSG) {
8191 8191 dma_attr.dma_attr_sgllen = 1;
8192 8192 }
8193 8193
8194 8194 if (buf_info->flags & FC_MBUF_DMA32) {
8195 8195 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
8196 8196 }
8197 8197
8198 8198 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8199 8199
8200 8200 if (buf_info->virt == NULL) {
8201 8201 goto done;
8202 8202 }
8203 8203
8204 8204 /*
8205 8205 * Allocate the DMA handle for this DMA object
8206 8206 */
8207 8207 status = ddi_dma_alloc_handle((void *)hba->dip,
8208 8208 &dma_attr, DDI_DMA_DONTWAIT,
8209 8209 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
8210 8210 if (status != DDI_SUCCESS) {
8211 8211 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8212 8212 "ddi_dma_alloc_handle failed: size=%x align=%x "
8213 8213 "flags=%x", buf_info->size, buf_info->align,
8214 8214 buf_info->flags);
8215 8215
8216 8216 buf_info->phys = 0;
8217 8217 buf_info->dma_handle = 0;
8218 8218 goto done;
8219 8219 }
8220 8220
8221 8221 switch (buf_info->align) {
8222 8222 case DMA_READ_WRITE:
8223 8223 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
8224 8224 break;
8225 8225 case DMA_READ_ONLY:
8226 8226 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
8227 8227 break;
8228 8228 case DMA_WRITE_ONLY:
8229 8229 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
8230 8230 break;
8231 8231 default:
8232 8232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8233 8233 "Invalid DMA flag");
8234 8234 (void) ddi_dma_free_handle(
8235 8235 (ddi_dma_handle_t *)&buf_info->dma_handle);
8236 8236 buf_info->phys = 0;
8237 8237 buf_info->dma_handle = 0;
8238 8238 return ((uint8_t *)buf_info->virt);
8239 8239 }
8240 8240
8241 8241 /* Map this page of memory */
8242 8242 status = ddi_dma_addr_bind_handle(
8243 8243 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8244 8244 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8245 8245 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
8246 8246 &cookie_count);
8247 8247
8248 8248 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8249 8249 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8250 8250 "ddi_dma_addr_bind_handle failed: status=%x "
8251 8251 "count=%x flags=%x", status, cookie_count,
8252 8252 buf_info->flags);
8253 8253
8254 8254 (void) ddi_dma_free_handle(
8255 8255 (ddi_dma_handle_t *)&buf_info->dma_handle);
8256 8256 buf_info->phys = 0;
8257 8257 buf_info->dma_handle = 0;
8258 8258 goto done;
8259 8259 }
8260 8260
8261 8261 if (hba->bus_type == SBUS_FC) {
8262 8262
8263 8263 int32_t burstsizes_limit = 0xff;
8264 8264 int32_t ret_burst;
8265 8265
8266 8266 ret_burst = ddi_dma_burstsizes(
8267 8267 buf_info->dma_handle) & burstsizes_limit;
8268 8268 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8269 8269 ret_burst) == DDI_FAILURE) {
8270 8270 EMLXS_MSGF(EMLXS_CONTEXT,
8271 8271 &emlxs_mem_alloc_failed_msg,
8272 8272 "ddi_dma_set_sbus64 failed.");
8273 8273 }
8274 8274 }
8275 8275
8276 8276 /* Save Physical address */
8277 8277 buf_info->phys = dma_cookie.dmac_laddress;
8278 8278
8279 8279 /*
8280 8280 * Just to be sure, let's add this
8281 8281 */
8282 8282 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8283 8283 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8284 8284
8285 8285 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
8286 8286
8287 8287 dma_attr.dma_attr_align = buf_info->align;
8288 8288
8289 8289 /*
8290 8290 * Allocate the DMA handle for this DMA object
8291 8291 */
8292 8292 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
8293 8293 DDI_DMA_DONTWAIT, NULL,
8294 8294 (ddi_dma_handle_t *)&buf_info->dma_handle);
8295 8295 if (status != DDI_SUCCESS) {
8296 8296 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8297 8297 "ddi_dma_alloc_handle failed: size=%x align=%x "
8298 8298 "flags=%x", buf_info->size, buf_info->align,
8299 8299 buf_info->flags);
8300 8300
8301 8301 buf_info->virt = NULL;
8302 8302 buf_info->phys = 0;
8303 8303 buf_info->data_handle = 0;
8304 8304 buf_info->dma_handle = 0;
8305 8305 goto done;
8306 8306 }
8307 8307
8308 8308 status = ddi_dma_mem_alloc(
8309 8309 (ddi_dma_handle_t)buf_info->dma_handle,
8310 8310 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
8311 8311 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
8312 8312 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
8313 8313
8314 8314 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
8315 8315 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8316 8316 "ddi_dma_mem_alloc failed: size=%x align=%x "
8317 8317 "flags=%x", buf_info->size, buf_info->align,
8318 8318 buf_info->flags);
8319 8319
8320 8320 (void) ddi_dma_free_handle(
8321 8321 (ddi_dma_handle_t *)&buf_info->dma_handle);
8322 8322
8323 8323 buf_info->virt = NULL;
8324 8324 buf_info->phys = 0;
8325 8325 buf_info->data_handle = 0;
8326 8326 buf_info->dma_handle = 0;
8327 8327 goto done;
8328 8328 }
8329 8329
8330 8330 /* Map this page of memory */
8331 8331 status = ddi_dma_addr_bind_handle(
8332 8332 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8333 8333 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8334 8334 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
8335 8335 &dma_cookie, &cookie_count);
8336 8336
8337 8337 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8338 8338 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8339 8339 "ddi_dma_addr_bind_handle failed: status=%x "
8340 8340 "count=%d size=%x align=%x flags=%x", status,
8341 8341 cookie_count, buf_info->size, buf_info->align,
8342 8342 buf_info->flags);
8343 8343
8344 8344 (void) ddi_dma_mem_free(
8345 8345 (ddi_acc_handle_t *)&buf_info->data_handle);
8346 8346 (void) ddi_dma_free_handle(
8347 8347 (ddi_dma_handle_t *)&buf_info->dma_handle);
8348 8348
8349 8349 buf_info->virt = NULL;
8350 8350 buf_info->phys = 0;
8351 8351 buf_info->dma_handle = 0;
8352 8352 buf_info->data_handle = 0;
8353 8353 goto done;
8354 8354 }
8355 8355
8356 8356 if (hba->bus_type == SBUS_FC) {
8357 8357 int32_t burstsizes_limit = 0xff;
8358 8358 int32_t ret_burst;
8359 8359
8360 8360 ret_burst =
8361 8361 ddi_dma_burstsizes(buf_info->
8362 8362 dma_handle) & burstsizes_limit;
8363 8363 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8364 8364 ret_burst) == DDI_FAILURE) {
8365 8365 EMLXS_MSGF(EMLXS_CONTEXT,
8366 8366 &emlxs_mem_alloc_failed_msg,
8367 8367 "ddi_dma_set_sbus64 failed.");
8368 8368 }
8369 8369 }
8370 8370
8371 8371 /* Save Physical address */
8372 8372 buf_info->phys = dma_cookie.dmac_laddress;
8373 8373
8374 8374 /* Just to be sure, let's add this */
8375 8375 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8376 8376 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8377 8377
8378 8378 } else { /* allocate virtual memory */
8379 8379
8380 8380 buf_info->virt =
8381 8381 kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
8382 8382 buf_info->phys = 0;
8383 8383 buf_info->data_handle = 0;
8384 8384 buf_info->dma_handle = 0;
8385 8385
8386 8386 if (buf_info->virt == (uint32_t *)0) {
8387 8387 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8388 8388 "size=%x flags=%x", buf_info->size,
8389 8389 buf_info->flags);
8390 8390 }
8391 8391
8392 8392 }
8393 8393
8394 8394 done:
8395 8395
8396 8396 return ((uint8_t *)buf_info->virt);
8397 8397
8398 8398 } /* emlxs_mem_alloc() */
8399 8399
8400 8400
8401 8401
8402 8402 /*
8403 8403 * emlxs_mem_free:
8404 8404 *
8405 8405 * OS specific routine for memory de-allocation / unmapping
8406 8406 *
8407 8407 * The buf_info->flags field describes the memory operation requested.
8408 8408 *
8409 8409 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped
8410 8410 * for DMA, but not freed. The mapped physical address to be unmapped is in
8411 8411 * buf_info->phys
8412 8412 *
8413 8413 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
8414 8414 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
8415 8415 * buf_info->phys. The virtual address to be freed is in buf_info->virt
8416 8416 */
8417 8417 /*ARGSUSED*/
8418 8418 extern void
8419 8419 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8420 8420 {
8421 8421 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8422 8422
8423 8423 if (buf_info->dma_handle) {
8424 8424 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8425 8425 (void) ddi_dma_free_handle(
8426 8426 (ddi_dma_handle_t *)&buf_info->dma_handle);
8427 8427 buf_info->dma_handle = NULL;
8428 8428 }
8429 8429
8430 8430 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
8431 8431
8432 8432 if (buf_info->dma_handle) {
8433 8433 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8434 8434 (void) ddi_dma_mem_free(
8435 8435 (ddi_acc_handle_t *)&buf_info->data_handle);
8436 8436 (void) ddi_dma_free_handle(
8437 8437 (ddi_dma_handle_t *)&buf_info->dma_handle);
8438 8438 buf_info->dma_handle = NULL;
8439 8439 buf_info->data_handle = NULL;
8440 8440 }
8441 8441
8442 8442 } else { /* allocate virtual memory */
8443 8443
8444 8444 if (buf_info->virt) {
8445 8445 kmem_free(buf_info->virt, (size_t)buf_info->size);
8446 8446 buf_info->virt = NULL;
8447 8447 }
8448 8448 }
8449 8449
8450 8450 } /* emlxs_mem_free() */
8451 8451
8452 8452
8453 8453 static int
8454 8454 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
8455 8455 {
8456 8456 int channel;
8457 8457 int msi_id;
8458 8458
8459 8459
8460 8460 /* IO to FCP2 device or a device reset always use fcp channel */
8461 8461 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
8462 8462 return (hba->channel_fcp);
8463 8463 }
8464 8464
8465 8465
8466 8466 msi_id = emlxs_select_msiid(hba);
8467 8467 channel = emlxs_msiid_to_chan(hba, msi_id);
8468 8468
8469 8469
8470 8470
8471 8471 /* If channel is closed, then try fcp channel */
8472 8472 if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
8473 8473 channel = hba->channel_fcp;
8474 8474 }
8475 8475 return (channel);
8476 8476
8477 8477 } /* emlxs_select_fcp_channel() */
8478 8478
8479 8479
8480 8480 static int32_t
8481 8481 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
8482 8482 {
8483 8483 emlxs_hba_t *hba = HBA;
8484 8484 fc_packet_t *pkt;
8485 8485 emlxs_config_t *cfg;
8486 8486 MAILBOXQ *mbq;
8487 8487 MAILBOX *mb;
8488 8488 uint32_t rc;
8489 8489
8490 8490 /*
8491 8491 * This routine provides a alternative target reset provessing
8492 8492 * method. Instead of sending an actual target reset to the
8493 8493 * NPort, we will first unreg the login to that NPort. This
8494 8494 * will cause all the outstanding IOs the quickly complete with
8495 8495 * a NO RPI local error. Next we will force the ULP to relogin
8496 8496 * to the NPort by sending an RSCN (for that NPort) to the
8497 8497 * upper layer. This method should result in a fast target
8498 8498 * reset, as far as IOs completing; however, since an actual
8499 8499 * target reset is not sent to the NPort, it is not 100%
8500 8500 * compatable. Things like reservations will not be broken.
8501 8501 * By default this option is DISABLED, and its only enabled thru
8502 8502 * a hidden configuration parameter (fast-tgt-reset).
8503 8503 */
8504 8504 rc = FC_TRAN_BUSY;
8505 8505 pkt = PRIV2PKT(sbp);
8506 8506 cfg = &CFG;
8507 8507
8508 8508 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
8509 8509 /* issue the mbox cmd to the sli */
8510 8510 mb = (MAILBOX *) mbq->mbox;
8511 8511 bzero((void *) mb, MAILBOX_CMD_BSIZE);
8512 8512 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
8513 8513 #ifdef SLI3_SUPPORT
8514 8514 mb->un.varUnregLogin.vpi = port->vpi;
8515 8515 #endif /* SLI3_SUPPORT */
8516 8516 mb->mbxCommand = MBX_UNREG_LOGIN;
8517 8517 mb->mbxOwner = OWN_HOST;
8518 8518
8519 8519 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8520 8520 "Fast Target Reset: unreg rpi=%d tmr=%d", ndlp->nlp_Rpi,
8521 8521 cfg[CFG_FAST_TGT_RESET_TMR].current);
8522 8522
8523 8523 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
8524 8524 == MBX_SUCCESS) {
8525 8525
8526 8526 ndlp->nlp_Rpi = 0;
8527 8527
8528 8528 mutex_enter(&sbp->mtx);
8529 8529 sbp->node = (void *)ndlp;
8530 8530 sbp->did = ndlp->nlp_DID;
8531 8531 mutex_exit(&sbp->mtx);
8532 8532
8533 8533 if (pkt->pkt_rsplen) {
8534 8534 bzero((uint8_t *)pkt->pkt_resp,
8535 8535 pkt->pkt_rsplen);
8536 8536 }
8537 8537 if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
8538 8538 ndlp->nlp_force_rscn = hba->timer_tics +
8539 8539 cfg[CFG_FAST_TGT_RESET_TMR].current;
8540 8540 }
8541 8541
8542 8542 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
8543 8543 }
8544 8544
8545 8545 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
8546 8546 rc = FC_SUCCESS;
8547 8547 }
8548 8548 return (rc);
8549 8549 } /* emlxs_fast_target_reset() */
8550 8550
8551 8551 static int32_t
8552 8552 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
8553 8553 {
8554 8554 emlxs_hba_t *hba = HBA;
8555 8555 fc_packet_t *pkt;
8556 8556 emlxs_config_t *cfg;
8557 8557 IOCBQ *iocbq;
8558 8558 IOCB *iocb;
8559 8559 CHANNEL *cp;
8560 8560 NODELIST *ndlp;
8561 8561 char *cmd;
8562 8562 uint16_t lun;
8563 8563 FCP_CMND *fcp_cmd;
8564 8564 uint32_t did;
8565 8565 uint32_t reset = 0;
8566 8566 int channel;
8567 8567 int32_t rval;
8568 8568
8569 8569 pkt = PRIV2PKT(sbp);
8570 8570 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8571 8571
8572 8572 /* Find target node object */
8573 8573 ndlp = emlxs_node_find_did(port, did, 1);
8574 8574
8575 8575 if (!ndlp || !ndlp->nlp_active) {
8576 8576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8577 8577 "Node not found. did=%x", did);
8578 8578
8579 8579 return (FC_BADPACKET);
8580 8580 }
8581 8581
8582 8582 /* When the fcp channel is closed we stop accepting any FCP cmd */
8583 8583 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8584 8584 return (FC_TRAN_BUSY);
8585 8585 }
8586 8586
8587 8587 /* Snoop for target or lun reset first */
8588 8588 /* We always use FCP channel to send out target/lun reset fcp cmds */
8589 8589 /* interrupt affinity only applies to non tgt lun reset fcp cmd */
8590 8590
8591 8591 cmd = (char *)pkt->pkt_cmd;
8592 8592 lun = *((uint16_t *)cmd);
8593 8593 lun = LE_SWAP16(lun);
8594 8594
8595 8595 iocbq = &sbp->iocbq;
8596 8596 iocb = &iocbq->iocb;
8597 8597 iocbq->node = (void *) ndlp;
8598 8598
8599 8599 /* Check for target reset */
8600 8600 if (cmd[10] & 0x20) {
8601 8601 /* prepare iocb */
8602 8602 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8603 8603 hba->channel_fcp)) != FC_SUCCESS) {
8604 8604
8605 8605 if (rval == 0xff) {
8606 8606 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8607 8607 0, 1);
8608 8608 rval = FC_SUCCESS;
8609 8609 }
8610 8610
8611 8611 return (rval);
8612 8612 }
8613 8613
8614 8614 mutex_enter(&sbp->mtx);
8615 8615 sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
8616 8616 sbp->pkt_flags |= PACKET_POLLED;
8617 8617 *pkt_flags = sbp->pkt_flags;
8618 8618 mutex_exit(&sbp->mtx);
8619 8619
8620 8620 #ifdef SAN_DIAG_SUPPORT
8621 8621 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
8622 8622 (HBA_WWN *)&ndlp->nlp_portname, -1);
8623 8623 #endif /* SAN_DIAG_SUPPORT */
8624 8624
8625 8625 iocbq->flag |= IOCB_PRIORITY;
8626 8626
8627 8627 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8628 8628 "Target Reset: did=%x", did);
8629 8629
8630 8630 cfg = &CFG;
8631 8631 if (cfg[CFG_FAST_TGT_RESET].current) {
8632 8632 if (emlxs_fast_target_reset(port, sbp, ndlp) ==
8633 8633 FC_SUCCESS) {
8634 8634 return (FC_SUCCESS);
8635 8635 }
8636 8636 }
8637 8637
8638 8638 /* Close the node for any further normal IO */
8639 8639 emlxs_node_close(port, ndlp, hba->channel_fcp,
8640 8640 pkt->pkt_timeout);
8641 8641
8642 8642 /* Flush the IO's on the tx queues */
8643 8643 (void) emlxs_tx_node_flush(port, ndlp,
8644 8644 &hba->chan[hba->channel_fcp], 0, sbp);
8645 8645
8646 8646 /* This is the target reset fcp cmd */
8647 8647 reset = 1;
8648 8648 }
8649 8649
8650 8650 /* Check for lun reset */
8651 8651 else if (cmd[10] & 0x10) {
8652 8652 /* prepare iocb */
8653 8653 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8654 8654 hba->channel_fcp)) != FC_SUCCESS) {
8655 8655
8656 8656 if (rval == 0xff) {
8657 8657 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8658 8658 0, 1);
8659 8659 rval = FC_SUCCESS;
8660 8660 }
8661 8661
8662 8662 return (rval);
8663 8663 }
8664 8664
8665 8665 mutex_enter(&sbp->mtx);
8666 8666 sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
8667 8667 sbp->pkt_flags |= PACKET_POLLED;
8668 8668 *pkt_flags = sbp->pkt_flags;
8669 8669 mutex_exit(&sbp->mtx);
8670 8670
8671 8671 #ifdef SAN_DIAG_SUPPORT
8672 8672 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
8673 8673 (HBA_WWN *)&ndlp->nlp_portname, lun);
8674 8674 #endif /* SAN_DIAG_SUPPORT */
8675 8675
8676 8676 iocbq->flag |= IOCB_PRIORITY;
8677 8677
8678 8678 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8679 8679 "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
8680 8680 cmd[0], cmd[1]);
8681 8681
8682 8682 /* Flush the IO's on the tx queues for this lun */
8683 8683 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
8684 8684
8685 8685 /* This is the lun reset fcp cmd */
8686 8686 reset = 1;
8687 8687 }
8688 8688
8689 8689 channel = emlxs_select_fcp_channel(hba, ndlp, reset);
8690 8690
8691 8691 #ifdef SAN_DIAG_SUPPORT
8692 8692 sbp->sd_start_time = gethrtime();
8693 8693 #endif /* SAN_DIAG_SUPPORT */
8694 8694
8695 8695 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8696 8696 emlxs_swap_fcp_pkt(sbp);
8697 8697 #endif /* EMLXS_MODREV2X */
8698 8698
8699 8699 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
8700 8700
8701 8701 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
8702 8702 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
8703 8703 }
8704 8704
8705 8705 if (reset == 0) {
8706 8706 /*
8707 8707 * tgt lun reset fcp cmd has been prepared
8708 8708 * separately in the beginning
8709 8709 */
8710 8710 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8711 8711 channel)) != FC_SUCCESS) {
8712 8712
8713 8713 if (rval == 0xff) {
8714 8714 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8715 8715 0, 1);
8716 8716 rval = FC_SUCCESS;
8717 8717 }
8718 8718
8719 8719 return (rval);
8720 8720 }
8721 8721 }
8722 8722
8723 8723 cp = &hba->chan[channel];
8724 8724 cp->ulpSendCmd++;
8725 8725
8726 8726 /* Initalize sbp */
8727 8727 mutex_enter(&sbp->mtx);
8728 8728 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8729 8729 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8730 8730 sbp->node = (void *)ndlp;
8731 8731 sbp->lun = lun;
8732 8732 sbp->class = iocb->ULPCLASS;
8733 8733 sbp->did = ndlp->nlp_DID;
8734 8734 mutex_exit(&sbp->mtx);
8735 8735
8736 8736 if (pkt->pkt_cmdlen) {
8737 8737 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8738 8738 DDI_DMA_SYNC_FORDEV);
8739 8739 }
8740 8740
8741 8741 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
8742 8742 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
8743 8743 DDI_DMA_SYNC_FORDEV);
8744 8744 }
8745 8745
8746 8746 HBASTATS.FcpIssued++;
8747 8747
8748 8748 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8749 8749 return (FC_SUCCESS);
8750 8750
8751 8751 } /* emlxs_send_fcp_cmd() */
8752 8752
8753 8753
8754 8754
8755 8755
8756 8756 /*
8757 8757 * We have to consider this setup works for INTX, MSI, and MSIX
8758 8758 * For INTX, intr_count is always 1
8759 8759 * For MSI, intr_count is always 2 by default
8760 8760 * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
8761 8761 */
8762 8762 extern int
8763 8763 emlxs_select_msiid(emlxs_hba_t *hba)
8764 8764 {
8765 8765 int msiid = 0;
8766 8766
8767 8767 /* We use round-robin */
8768 8768 mutex_enter(&EMLXS_MSIID_LOCK);
8769 8769 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8770 8770 msiid = hba->last_msiid;
8771 8771 hba->last_msiid ++;
8772 8772 if (hba->last_msiid >= hba->intr_count) {
8773 8773 hba->last_msiid = 0;
8774 8774 }
8775 8775 } else {
8776 8776 /* This should work for INTX and MSI also */
8777 8777 /* For SLI3 the chan_count is always 4 */
8778 8778 /* For SLI3 the msiid is limited to chan_count */
8779 8779 msiid = hba->last_msiid;
8780 8780 hba->last_msiid ++;
8781 8781 if (hba->intr_count > hba->chan_count) {
8782 8782 if (hba->last_msiid >= hba->chan_count) {
8783 8783 hba->last_msiid = 0;
8784 8784 }
8785 8785 } else {
8786 8786 if (hba->last_msiid >= hba->intr_count) {
8787 8787 hba->last_msiid = 0;
8788 8788 }
8789 8789 }
8790 8790 }
8791 8791 mutex_exit(&EMLXS_MSIID_LOCK);
8792 8792
8793 8793 return (msiid);
8794 8794 } /* emlxs_select_msiid */
8795 8795
8796 8796
8797 8797 /*
8798 8798 * A channel has a association with a msi id.
8799 8799 * One msi id could be associated with multiple channels.
8800 8800 */
8801 8801 extern int
8802 8802 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
8803 8803 {
8804 8804 emlxs_config_t *cfg = &CFG;
8805 8805 EQ_DESC_t *eqp;
8806 8806 int chan;
8807 8807 int num_wq;
8808 8808
8809 8809 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8810 8810 /* For SLI4 round robin all WQs associated with the msi_id */
8811 8811 eqp = &hba->sli.sli4.eq[msi_id];
8812 8812
8813 8813 mutex_enter(&eqp->lastwq_lock);
8814 8814 chan = eqp->lastwq;
8815 8815 eqp->lastwq++;
8816 8816 num_wq = cfg[CFG_NUM_WQ].current;
8817 8817 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8818 8818 eqp->lastwq -= num_wq;
8819 8819 }
8820 8820 mutex_exit(&eqp->lastwq_lock);
8821 8821
8822 8822 return (chan);
8823 8823 } else {
8824 8824 /* This is for SLI3 mode */
8825 8825 return (hba->msi2chan[msi_id]);
8826 8826 }
8827 8827
8828 8828 } /* emlxs_msiid_to_chan */
8829 8829
8830 8830
8831 8831 #ifdef SFCT_SUPPORT
8832 8832 static int32_t
8833 8833 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8834 8834 {
8835 8835 emlxs_hba_t *hba = HBA;
8836 8836 IOCBQ *iocbq;
8837 8837 IOCB *iocb;
8838 8838 NODELIST *ndlp;
8839 8839 CHANNEL *cp;
8840 8840 uint32_t did;
8841 8841
8842 8842 did = sbp->did;
8843 8843 ndlp = sbp->node;
8844 8844 cp = (CHANNEL *)sbp->channel;
8845 8845
8846 8846 iocbq = &sbp->iocbq;
8847 8847 iocb = &iocbq->iocb;
8848 8848
8849 8849 /* Make sure node is still active */
8850 8850 if (!ndlp->nlp_active) {
8851 8851 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8852 8852 "*Node not found. did=%x", did);
8853 8853
8854 8854 return (FC_BADPACKET);
8855 8855 }
8856 8856
8857 8857 /* If gate is closed */
8858 8858 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8859 8859 return (FC_TRAN_BUSY);
8860 8860 }
8861 8861
8862 8862 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8863 8863 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8864 8864 IOERR_SUCCESS) {
8865 8865 return (FC_TRAN_BUSY);
8866 8866 }
8867 8867
8868 8868 HBASTATS.FcpIssued++;
8869 8869
8870 8870 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8871 8871
8872 8872 return (FC_SUCCESS);
8873 8873
8874 8874 } /* emlxs_send_fct_status() */
8875 8875
8876 8876
8877 8877 static int32_t
8878 8878 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8879 8879 {
8880 8880 emlxs_hba_t *hba = HBA;
8881 8881 IOCBQ *iocbq;
8882 8882 IOCB *iocb;
8883 8883 NODELIST *ndlp;
8884 8884 CHANNEL *cp;
8885 8885 uint32_t did;
8886 8886
8887 8887 did = sbp->did;
8888 8888 ndlp = sbp->node;
8889 8889 cp = (CHANNEL *)sbp->channel;
8890 8890
8891 8891 iocbq = &sbp->iocbq;
8892 8892 iocb = &iocbq->iocb;
8893 8893
8894 8894 /* Make sure node is still active */
8895 8895 if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8896 8896 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8897 8897 "*Node not found. did=%x", did);
8898 8898
8899 8899 return (FC_BADPACKET);
8900 8900 }
8901 8901
8902 8902 /* If gate is closed */
8903 8903 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8904 8904 return (FC_TRAN_BUSY);
8905 8905 }
8906 8906
8907 8907 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8908 8908 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8909 8909 IOERR_SUCCESS) {
8910 8910 return (FC_TRAN_BUSY);
8911 8911 }
8912 8912
8913 8913 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8914 8914
8915 8915 return (FC_SUCCESS);
8916 8916
8917 8917 } /* emlxs_send_fct_abort() */
8918 8918
8919 8919 #endif /* SFCT_SUPPORT */
8920 8920
8921 8921
8922 8922 static int32_t
8923 8923 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8924 8924 {
8925 8925 emlxs_hba_t *hba = HBA;
8926 8926 fc_packet_t *pkt;
8927 8927 IOCBQ *iocbq;
8928 8928 IOCB *iocb;
8929 8929 CHANNEL *cp;
8930 8930 uint32_t i;
8931 8931 NODELIST *ndlp;
8932 8932 uint32_t did;
8933 8933 int32_t rval;
8934 8934
8935 8935 pkt = PRIV2PKT(sbp);
8936 8936 cp = &hba->chan[hba->channel_ip];
8937 8937 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8938 8938
8939 8939 /* Check if node exists */
8940 8940 /* Broadcast did is always a success */
8941 8941 ndlp = emlxs_node_find_did(port, did, 1);
8942 8942
8943 8943 if (!ndlp || !ndlp->nlp_active) {
8944 8944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8945 8945 "Node not found. did=0x%x", did);
8946 8946
8947 8947 return (FC_BADPACKET);
8948 8948 }
8949 8949
8950 8950 /* Check if gate is temporarily closed */
8951 8951 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8952 8952 return (FC_TRAN_BUSY);
8953 8953 }
8954 8954
8955 8955 /* Check if an exchange has been created */
8956 8956 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8957 8957 /* No exchange. Try creating one */
8958 8958 (void) emlxs_create_xri(port, cp, ndlp);
8959 8959
8960 8960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8961 8961 "Adapter Busy. Exchange not found. did=0x%x", did);
8962 8962
8963 8963 return (FC_TRAN_BUSY);
8964 8964 }
8965 8965
8966 8966 /* ULP PATCH: pkt_cmdlen was found to be set to zero */
8967 8967 /* on BROADCAST commands */
8968 8968 if (pkt->pkt_cmdlen == 0) {
8969 8969 /* Set the pkt_cmdlen to the cookie size */
8970 8970 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8971 8971 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8972 8972 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8973 8973 }
8974 8974 #else
8975 8975 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8976 8976 #endif /* >= EMLXS_MODREV3 */
8977 8977
8978 8978 }
8979 8979
8980 8980 iocbq = &sbp->iocbq;
8981 8981 iocb = &iocbq->iocb;
8982 8982
8983 8983 iocbq->node = (void *)ndlp;
8984 8984 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8985 8985
8986 8986 if (rval == 0xff) {
8987 8987 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8988 8988 rval = FC_SUCCESS;
8989 8989 }
8990 8990
8991 8991 return (rval);
8992 8992 }
8993 8993
8994 8994 cp->ulpSendCmd++;
8995 8995
8996 8996 /* Initalize sbp */
8997 8997 mutex_enter(&sbp->mtx);
8998 8998 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8999 8999 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9000 9000 sbp->node = (void *)ndlp;
9001 9001 sbp->lun = EMLXS_LUN_NONE;
9002 9002 sbp->class = iocb->ULPCLASS;
9003 9003 sbp->did = did;
9004 9004 mutex_exit(&sbp->mtx);
9005 9005
9006 9006 if (pkt->pkt_cmdlen) {
9007 9007 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9008 9008 DDI_DMA_SYNC_FORDEV);
9009 9009 }
9010 9010
9011 9011 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9012 9012
9013 9013 return (FC_SUCCESS);
9014 9014
9015 9015 } /* emlxs_send_ip() */
9016 9016
9017 9017
9018 9018 static int32_t
9019 9019 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
9020 9020 {
9021 9021 emlxs_hba_t *hba = HBA;
9022 9022 emlxs_port_t *vport;
9023 9023 fc_packet_t *pkt;
9024 9024 IOCBQ *iocbq;
9025 9025 CHANNEL *cp;
9026 9026 SERV_PARM *sp;
9027 9027 uint32_t cmd;
9028 9028 int i;
9029 9029 ELS_PKT *els_pkt;
9030 9030 NODELIST *ndlp;
9031 9031 uint32_t did;
9032 9032 char fcsp_msg[32];
9033 9033 int rc;
9034 9034 int32_t rval;
9035 9035 emlxs_config_t *cfg = &CFG;
9036 9036
9037 9037 fcsp_msg[0] = 0;
9038 9038 pkt = PRIV2PKT(sbp);
9039 9039 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9040 9040 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9041 9041
9042 9042 iocbq = &sbp->iocbq;
9043 9043
9044 9044 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9045 9045 emlxs_swap_els_pkt(sbp);
9046 9046 #endif /* EMLXS_MODREV2X */
9047 9047
9048 9048 cmd = *((uint32_t *)pkt->pkt_cmd);
9049 9049 cmd &= ELS_CMD_MASK;
9050 9050
9051 9051 /* Point of no return, except for ADISC & PLOGI */
9052 9052
9053 9053 /* Check node */
9054 9054 switch (cmd) {
9055 9055 case ELS_CMD_FLOGI:
9056 9056 case ELS_CMD_FDISC:
9057 9057 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9058 9058
9059 9059 if (emlxs_vpi_logi_notify(port, sbp)) {
9060 9060 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9061 9061 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9062 9062 emlxs_unswap_pkt(sbp);
9063 9063 #endif /* EMLXS_MODREV2X */
9064 9064 return (FC_FAILURE);
9065 9065 }
9066 9066 } else {
9067 9067 /*
9068 9068 * If FLOGI is already complete, then we
9069 9069 * should not be receiving another FLOGI.
9070 9070 * Reset the link to recover.
9071 9071 */
9072 9072 if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
9073 9073 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9074 9074 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9075 9075 emlxs_unswap_pkt(sbp);
9076 9076 #endif /* EMLXS_MODREV2X */
9077 9077
9078 9078 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
9079 9079 return (FC_FAILURE);
9080 9080 }
9081 9081
9082 9082 if (port->vpi > 0) {
9083 9083 *((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
9084 9084 }
9085 9085 }
9086 9086
9087 9087 /* Command may have been changed */
9088 9088 cmd = *((uint32_t *)pkt->pkt_cmd);
9089 9089 cmd &= ELS_CMD_MASK;
9090 9090
9091 9091 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9092 9092 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9093 9093 }
9094 9094
9095 9095 ndlp = NULL;
9096 9096
9097 9097 /* We will process these cmds at the bottom of this routine */
9098 9098 break;
9099 9099
9100 9100 case ELS_CMD_PLOGI:
9101 9101 /* Make sure we don't log into ourself */
9102 9102 for (i = 0; i < MAX_VPORTS; i++) {
9103 9103 vport = &VPORT(i);
9104 9104
9105 9105 if (!(vport->flag & EMLXS_INI_BOUND)) {
9106 9106 continue;
9107 9107 }
9108 9108
9109 9109 if (did == vport->did) {
9110 9110 pkt->pkt_state = FC_PKT_NPORT_RJT;
9111 9111
9112 9112 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9113 9113 emlxs_unswap_pkt(sbp);
9114 9114 #endif /* EMLXS_MODREV2X */
9115 9115
9116 9116 return (FC_FAILURE);
9117 9117 }
9118 9118 }
9119 9119
9120 9120 ndlp = NULL;
9121 9121
9122 9122 if (hba->flag & FC_PT_TO_PT) {
9123 9123 MAILBOXQ *mbox;
9124 9124
9125 9125 /* ULP bug fix */
9126 9126 if (pkt->pkt_cmd_fhdr.s_id == 0) {
9127 9127 pkt->pkt_cmd_fhdr.s_id = FP_DEFAULT_SID;
9128 9128 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
9129 9129 "PLOGI: P2P Fix. sid=0-->%x did=%x",
9130 9130 pkt->pkt_cmd_fhdr.s_id,
9131 9131 pkt->pkt_cmd_fhdr.d_id);
9132 9132 }
9133 9133
9134 9134 mutex_enter(&EMLXS_PORT_LOCK);
9135 9135 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
9136 9136 port->rdid = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9137 9137 mutex_exit(&EMLXS_PORT_LOCK);
9138 9138
9139 9139 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
9140 9140 /* Update our service parms */
9141 9141 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
9142 9142 MEM_MBOX))) {
9143 9143 emlxs_mb_config_link(hba, mbox);
9144 9144
9145 9145 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
9146 9146 mbox, MBX_NOWAIT, 0);
9147 9147 if ((rc != MBX_BUSY) &&
9148 9148 (rc != MBX_SUCCESS)) {
9149 9149 emlxs_mem_put(hba, MEM_MBOX,
9150 9150 (void *)mbox);
9151 9151 }
9152 9152 }
9153 9153 }
9154 9154 }
9155 9155
9156 9156 /* We will process these cmds at the bottom of this routine */
9157 9157 break;
9158 9158
9159 9159 default:
9160 9160 ndlp = emlxs_node_find_did(port, did, 1);
9161 9161
9162 9162 /* If an ADISC is being sent and we have no node, */
9163 9163 /* then we must fail the ADISC now */
9164 9164 if (!ndlp && (cmd == ELS_CMD_ADISC) &&
9165 9165 (port->mode == MODE_INITIATOR)) {
9166 9166
9167 9167 /* Build the LS_RJT response */
9168 9168 els_pkt = (ELS_PKT *)pkt->pkt_resp;
9169 9169 els_pkt->elsCode = 0x01;
9170 9170 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
9171 9171 els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
9172 9172 LSRJT_LOGICAL_ERR;
9173 9173 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
9174 9174 LSEXP_NOTHING_MORE;
9175 9175 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
9176 9176
9177 9177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9178 9178 "ADISC Rejected. Node not found. did=0x%x", did);
9179 9179
9180 9180 if (sbp->channel == NULL) {
9181 9181 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9182 9182 sbp->channel =
9183 9183 &hba->chan[hba->channel_els];
9184 9184 } else {
9185 9185 sbp->channel =
9186 9186 &hba->chan[FC_ELS_RING];
9187 9187 }
9188 9188 }
9189 9189
9190 9190 /* Return this as rejected by the target */
9191 9191 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
9192 9192
9193 9193 return (FC_SUCCESS);
9194 9194 }
9195 9195 }
9196 9196
9197 9197 /* DID == BCAST_DID is special case to indicate that */
9198 9198 /* RPI is being passed in seq_id field */
9199 9199 /* This is used by emlxs_send_logo() for target mode */
9200 9200
9201 9201 /* Initalize iocbq */
9202 9202 iocbq->node = (void *)ndlp;
9203 9203 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9204 9204
9205 9205 if (rval == 0xff) {
9206 9206 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9207 9207 rval = FC_SUCCESS;
9208 9208 }
9209 9209
9210 9210 return (rval);
9211 9211 }
9212 9212
9213 9213 cp = &hba->chan[hba->channel_els];
9214 9214 cp->ulpSendCmd++;
9215 9215 sp = (SERV_PARM *)&els_pkt->un.logi;
9216 9216
9217 9217 /* Check cmd */
9218 9218 switch (cmd) {
9219 9219 case ELS_CMD_PRLI:
9220 9220 /*
9221 9221 * if our firmware version is 3.20 or later,
9222 9222 * set the following bits for FC-TAPE support.
9223 9223 */
9224 9224 if ((port->mode == MODE_INITIATOR) &&
9225 9225 (hba->vpd.feaLevelHigh >= 0x02) &&
9226 9226 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9227 9227 els_pkt->un.prli.ConfmComplAllowed = 1;
9228 9228 els_pkt->un.prli.Retry = 1;
9229 9229 els_pkt->un.prli.TaskRetryIdReq = 1;
9230 9230 } else {
9231 9231 els_pkt->un.prli.ConfmComplAllowed = 0;
9232 9232 els_pkt->un.prli.Retry = 0;
9233 9233 els_pkt->un.prli.TaskRetryIdReq = 0;
9234 9234 }
9235 9235
9236 9236 break;
9237 9237
9238 9238 /* This is a patch for the ULP stack. */
9239 9239
9240 9240 /*
9241 9241 * ULP only reads our service parameters once during bind_port,
9242 9242 * but the service parameters change due to topology.
9243 9243 */
9244 9244 case ELS_CMD_FLOGI:
9245 9245 case ELS_CMD_FDISC:
9246 9246 case ELS_CMD_PLOGI:
9247 9247 case ELS_CMD_PDISC:
9248 9248 /* Copy latest service parameters to payload */
9249 9249 bcopy((void *) &port->sparam, (void *)sp, sizeof (SERV_PARM));
9250 9250
9251 9251 if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
9252 9252
9253 9253 /* Clear support for virtual fabrics */
9254 9254 /* randomOffset bit controls this for FLOGI */
9255 9255 sp->cmn.randomOffset = 0;
9256 9256
9257 9257 /* Set R_A_TOV to current value */
9258 9258 sp->cmn.w2.r_a_tov =
9259 9259 LE_SWAP32((hba->fc_ratov * 1000));
9260 9260 }
9261 9261
9262 9262 if ((hba->flag & FC_NPIV_ENABLED) &&
9263 9263 (hba->flag & FC_NPIV_SUPPORTED) &&
9264 9264 (cmd == ELS_CMD_PLOGI)) {
9265 9265 emlxs_vvl_fmt_t *vvl;
9266 9266
9267 9267 sp->VALID_VENDOR_VERSION = 1;
9268 9268 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
9269 9269 vvl->un0.w0.oui = 0x0000C9;
9270 9270 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
9271 9271 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
9272 9272 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
9273 9273 }
9274 9274
9275 9275 #ifdef DHCHAP_SUPPORT
9276 9276 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9277 9277 #endif /* DHCHAP_SUPPORT */
9278 9278
9279 9279 break;
9280 9280 }
9281 9281
9282 9282 /* Initialize the sbp */
9283 9283 mutex_enter(&sbp->mtx);
9284 9284 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9285 9285 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9286 9286 sbp->node = (void *)ndlp;
9287 9287 sbp->lun = EMLXS_LUN_NONE;
9288 9288 sbp->did = did;
9289 9289 mutex_exit(&sbp->mtx);
9290 9290
9291 9291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
9292 9292 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
9293 9293
9294 9294 if (pkt->pkt_cmdlen) {
9295 9295 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9296 9296 DDI_DMA_SYNC_FORDEV);
9297 9297 }
9298 9298
9299 9299 /* Check node */
9300 9300 switch (cmd) {
9301 9301 case ELS_CMD_FLOGI:
9302 9302 case ELS_CMD_FDISC:
9303 9303 if (port->mode == MODE_INITIATOR) {
9304 9304 /* Make sure fabric node is destroyed */
9305 9305 /* It should already have been destroyed at link down */
9306 9306 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
9307 9307 ndlp = emlxs_node_find_did(port, FABRIC_DID, 1);
9308 9308 if (ndlp) {
9309 9309 if (EMLXS_SLI_UNREG_NODE(port, ndlp,
9310 9310 NULL, NULL, iocbq) == 0) {
9311 9311 /* Deferring iocb tx until */
9312 9312 /* completion of unreg */
9313 9313 return (FC_SUCCESS);
9314 9314 }
9315 9315 }
9316 9316 }
9317 9317 }
9318 9318 break;
9319 9319
9320 9320 case ELS_CMD_PLOGI:
9321 9321
9322 9322 ndlp = emlxs_node_find_did(port, did, 1);
9323 9323
9324 9324 if (ndlp && ndlp->nlp_active) {
9325 9325 /* Close the node for any further normal IO */
9326 9326 emlxs_node_close(port, ndlp, hba->channel_fcp,
9327 9327 pkt->pkt_timeout + 10);
9328 9328 emlxs_node_close(port, ndlp, hba->channel_ip,
9329 9329 pkt->pkt_timeout + 10);
9330 9330
9331 9331 /* Flush tx queues */
9332 9332 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9333 9333
9334 9334 /* Flush chip queues */
9335 9335 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9336 9336 }
9337 9337
9338 9338 break;
9339 9339
9340 9340 case ELS_CMD_PRLI:
9341 9341
9342 9342 ndlp = emlxs_node_find_did(port, did, 1);
9343 9343
9344 9344 if (ndlp && ndlp->nlp_active) {
9345 9345 /*
9346 9346 * Close the node for any further FCP IO;
9347 9347 * Flush all outstanding I/O only if
9348 9348 * "Establish Image Pair" bit is set.
9349 9349 */
9350 9350 emlxs_node_close(port, ndlp, hba->channel_fcp,
9351 9351 pkt->pkt_timeout + 10);
9352 9352
9353 9353 if (els_pkt->un.prli.estabImagePair) {
9354 9354 /* Flush tx queues */
9355 9355 (void) emlxs_tx_node_flush(port, ndlp,
9356 9356 &hba->chan[hba->channel_fcp], 0, 0);
9357 9357
9358 9358 /* Flush chip queues */
9359 9359 (void) emlxs_chipq_node_flush(port,
9360 9360 &hba->chan[hba->channel_fcp], ndlp, 0);
9361 9361 }
9362 9362 }
9363 9363
9364 9364 break;
9365 9365
9366 9366 }
9367 9367
9368 9368 HBASTATS.ElsCmdIssued++;
9369 9369
9370 9370 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9371 9371
9372 9372 return (FC_SUCCESS);
9373 9373
9374 9374 } /* emlxs_send_els() */
9375 9375
9376 9376
9377 9377
9378 9378
9379 9379 static int32_t
9380 9380 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9381 9381 {
9382 9382 emlxs_hba_t *hba = HBA;
9383 9383 emlxs_config_t *cfg = &CFG;
9384 9384 fc_packet_t *pkt;
9385 9385 IOCBQ *iocbq;
9386 9386 IOCB *iocb;
9387 9387 NODELIST *ndlp;
9388 9388 CHANNEL *cp;
9389 9389 int i;
9390 9390 uint32_t cmd;
9391 9391 uint32_t ucmd;
9392 9392 ELS_PKT *els_pkt;
9393 9393 fc_unsol_buf_t *ubp;
9394 9394 emlxs_ub_priv_t *ub_priv;
9395 9395 uint32_t did;
9396 9396 char fcsp_msg[32];
9397 9397 uint8_t *ub_buffer;
9398 9398 int32_t rval;
9399 9399
9400 9400 fcsp_msg[0] = 0;
9401 9401 pkt = PRIV2PKT(sbp);
9402 9402 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9403 9403 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9404 9404
9405 9405 iocbq = &sbp->iocbq;
9406 9406 iocb = &iocbq->iocb;
9407 9407
9408 9408 /* Acquire the unsolicited command this pkt is replying to */
9409 9409 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
9410 9410 /* This is for auto replies when no ub's are used */
9411 9411 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
9412 9412 ubp = NULL;
9413 9413 ub_priv = NULL;
9414 9414 ub_buffer = NULL;
9415 9415
9416 9416 #ifdef SFCT_SUPPORT
9417 9417 if (sbp->fct_cmd) {
9418 9418 fct_els_t *els =
9419 9419 (fct_els_t *)sbp->fct_cmd->cmd_specific;
9420 9420 ub_buffer = (uint8_t *)els->els_req_payload;
9421 9421 }
9422 9422 #endif /* SFCT_SUPPORT */
9423 9423
9424 9424 } else {
9425 9425 /* Find the ub buffer that goes with this reply */
9426 9426 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
9427 9427 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
9428 9428 "ELS reply: Invalid oxid=%x",
9429 9429 pkt->pkt_cmd_fhdr.ox_id);
9430 9430 return (FC_BADPACKET);
9431 9431 }
9432 9432
9433 9433 ub_buffer = (uint8_t *)ubp->ub_buffer;
9434 9434 ub_priv = ubp->ub_fca_private;
9435 9435 ucmd = ub_priv->cmd;
9436 9436
9437 9437 ub_priv->flags |= EMLXS_UB_REPLY;
9438 9438
9439 9439 /* Reset oxid to ELS command */
9440 9440 /* We do this because the ub is only valid */
9441 9441 /* until we return from this thread */
9442 9442 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
9443 9443 }
9444 9444
9445 9445 /* Save the result */
9446 9446 sbp->ucmd = ucmd;
9447 9447
9448 9448 if (sbp->channel == NULL) {
9449 9449 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9450 9450 sbp->channel = &hba->chan[hba->channel_els];
9451 9451 } else {
9452 9452 sbp->channel = &hba->chan[FC_ELS_RING];
9453 9453 }
9454 9454 }
9455 9455
9456 9456 /* Check for interceptions */
9457 9457 switch (ucmd) {
9458 9458
9459 9459 #ifdef ULP_PATCH2
9460 9460 case ELS_CMD_LOGO:
9461 9461 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
9462 9462 break;
9463 9463 }
9464 9464
9465 9465 /* Check if this was generated by ULP and not us */
9466 9466 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9467 9467
9468 9468 /*
9469 9469 * Since we replied to this already,
9470 9470 * we won't need to send this now
9471 9471 */
9472 9472 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9473 9473
9474 9474 return (FC_SUCCESS);
9475 9475 }
9476 9476
9477 9477 break;
9478 9478 #endif /* ULP_PATCH2 */
9479 9479
9480 9480 #ifdef ULP_PATCH3
9481 9481 case ELS_CMD_PRLI:
9482 9482 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
9483 9483 break;
9484 9484 }
9485 9485
9486 9486 /* Check if this was generated by ULP and not us */
9487 9487 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9488 9488
9489 9489 /*
9490 9490 * Since we replied to this already,
9491 9491 * we won't need to send this now
9492 9492 */
9493 9493 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9494 9494
9495 9495 return (FC_SUCCESS);
9496 9496 }
9497 9497
9498 9498 break;
9499 9499 #endif /* ULP_PATCH3 */
9500 9500
9501 9501
9502 9502 #ifdef ULP_PATCH4
9503 9503 case ELS_CMD_PRLO:
9504 9504 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
9505 9505 break;
9506 9506 }
9507 9507
9508 9508 /* Check if this was generated by ULP and not us */
9509 9509 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9510 9510 /*
9511 9511 * Since we replied to this already,
9512 9512 * we won't need to send this now
9513 9513 */
9514 9514 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9515 9515
9516 9516 return (FC_SUCCESS);
9517 9517 }
9518 9518
9519 9519 break;
9520 9520 #endif /* ULP_PATCH4 */
9521 9521
9522 9522 #ifdef ULP_PATCH6
9523 9523 case ELS_CMD_RSCN:
9524 9524 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
9525 9525 break;
9526 9526 }
9527 9527
9528 9528 /* Check if this RSCN was generated by us */
9529 9529 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9530 9530 cmd = *((uint32_t *)pkt->pkt_cmd);
9531 9531 cmd = LE_SWAP32(cmd);
9532 9532 cmd &= ELS_CMD_MASK;
9533 9533
9534 9534 /*
9535 9535 * If ULP is accepting this,
9536 9536 * then close affected node
9537 9537 */
9538 9538 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9539 9539 cmd == ELS_CMD_ACC) {
9540 9540 fc_rscn_t *rscn;
9541 9541 uint32_t count;
9542 9542 uint32_t *lp;
9543 9543
9544 9544 /*
9545 9545 * Only the Leadville code path will
9546 9546 * come thru here. The RSCN data is NOT
9547 9547 * swapped properly for the Comstar code
9548 9548 * path.
9549 9549 */
9550 9550 lp = (uint32_t *)ub_buffer;
9551 9551 rscn = (fc_rscn_t *)lp++;
9552 9552 count =
9553 9553 ((rscn->rscn_payload_len - 4) / 4);
9554 9554
9555 9555 /* Close affected ports */
9556 9556 for (i = 0; i < count; i++, lp++) {
9557 9557 (void) emlxs_port_offline(port,
9558 9558 *lp);
9559 9559 }
9560 9560 }
9561 9561
9562 9562 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9563 9563 "RSCN %s: did=%x oxid=%x rxid=%x. "
9564 9564 "Intercepted.", emlxs_elscmd_xlate(cmd),
9565 9565 did, pkt->pkt_cmd_fhdr.ox_id,
9566 9566 pkt->pkt_cmd_fhdr.rx_id);
9567 9567
9568 9568 /*
9569 9569 * Since we generated this RSCN,
9570 9570 * we won't need to send this reply
9571 9571 */
9572 9572 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9573 9573
9574 9574 return (FC_SUCCESS);
9575 9575 }
9576 9576
9577 9577 break;
9578 9578 #endif /* ULP_PATCH6 */
9579 9579
9580 9580 case ELS_CMD_PLOGI:
9581 9581 /* Check if this PLOGI was generated by us */
9582 9582 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9583 9583 cmd = *((uint32_t *)pkt->pkt_cmd);
9584 9584 cmd = LE_SWAP32(cmd);
9585 9585 cmd &= ELS_CMD_MASK;
9586 9586
9587 9587 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9588 9588 "PLOGI %s: did=%x oxid=%x rxid=%x. "
9589 9589 "Intercepted.", emlxs_elscmd_xlate(cmd),
9590 9590 did, pkt->pkt_cmd_fhdr.ox_id,
9591 9591 pkt->pkt_cmd_fhdr.rx_id);
9592 9592
9593 9593 /*
9594 9594 * Since we generated this PLOGI,
9595 9595 * we won't need to send this reply
9596 9596 */
9597 9597 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9598 9598
9599 9599 return (FC_SUCCESS);
9600 9600 }
9601 9601
9602 9602 break;
9603 9603 }
9604 9604
9605 9605 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9606 9606 emlxs_swap_els_pkt(sbp);
9607 9607 #endif /* EMLXS_MODREV2X */
9608 9608
9609 9609
9610 9610 cmd = *((uint32_t *)pkt->pkt_cmd);
9611 9611 cmd &= ELS_CMD_MASK;
9612 9612
9613 9613 /* Check if modifications are needed */
9614 9614 switch (ucmd) {
9615 9615 case (ELS_CMD_PRLI):
9616 9616
9617 9617 if (cmd == ELS_CMD_ACC) {
9618 9618 /* This is a patch for the ULP stack. */
9619 9619 /* ULP does not keep track of FCP2 support */
9620 9620 if ((port->mode == MODE_INITIATOR) &&
9621 9621 (hba->vpd.feaLevelHigh >= 0x02) &&
9622 9622 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9623 9623 els_pkt->un.prli.ConfmComplAllowed = 1;
9624 9624 els_pkt->un.prli.Retry = 1;
9625 9625 els_pkt->un.prli.TaskRetryIdReq = 1;
9626 9626 } else {
9627 9627 els_pkt->un.prli.ConfmComplAllowed = 0;
9628 9628 els_pkt->un.prli.Retry = 0;
9629 9629 els_pkt->un.prli.TaskRetryIdReq = 0;
9630 9630 }
9631 9631 }
9632 9632
9633 9633 break;
9634 9634
9635 9635 case ELS_CMD_FLOGI:
9636 9636 case ELS_CMD_FDISC:
9637 9637 if (cmd == ELS_CMD_ACC) {
9638 9638 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9639 9639
9640 9640 /* This is a patch for the ULP stack. */
9641 9641
9642 9642 /*
9643 9643 * ULP only reads our service parameters
9644 9644 * once during bind_port, but the service
9645 9645 * parameters change due to topology.
9646 9646 */
9647 9647
9648 9648 /* Copy latest service parameters to payload */
9649 9649 bcopy((void *)&port->sparam,
9650 9650 (void *)sp, sizeof (SERV_PARM));
9651 9651
9652 9652 /* We are in pt-to-pt mode. Set R_A_TOV to default */
9653 9653 sp->cmn.w2.r_a_tov =
9654 9654 LE_SWAP32((FF_DEF_RATOV * 1000));
9655 9655
9656 9656 /* Clear support for virtual fabrics */
9657 9657 /* randomOffset bit controls this for FLOGI */
9658 9658 sp->cmn.randomOffset = 0;
9659 9659 #ifdef DHCHAP_SUPPORT
9660 9660 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9661 9661 #endif /* DHCHAP_SUPPORT */
9662 9662 }
9663 9663 break;
9664 9664
9665 9665 case ELS_CMD_PLOGI:
9666 9666 case ELS_CMD_PDISC:
9667 9667 if (cmd == ELS_CMD_ACC) {
9668 9668 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9669 9669
9670 9670 /* This is a patch for the ULP stack. */
9671 9671
9672 9672 /*
9673 9673 * ULP only reads our service parameters
9674 9674 * once during bind_port, but the service
9675 9675 * parameters change due to topology.
9676 9676 */
9677 9677
9678 9678 /* Copy latest service parameters to payload */
9679 9679 bcopy((void *)&port->sparam,
9680 9680 (void *)sp, sizeof (SERV_PARM));
9681 9681
9682 9682 #ifdef DHCHAP_SUPPORT
9683 9683 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9684 9684 #endif /* DHCHAP_SUPPORT */
9685 9685 }
9686 9686 break;
9687 9687
9688 9688 }
9689 9689
9690 9690 /* Initalize iocbq */
9691 9691 iocbq->node = (void *)NULL;
9692 9692 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9693 9693
9694 9694 if (rval == 0xff) {
9695 9695 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9696 9696 rval = FC_SUCCESS;
9697 9697 }
9698 9698
9699 9699 return (rval);
9700 9700 }
9701 9701
9702 9702 cp = &hba->chan[hba->channel_els];
9703 9703 cp->ulpSendCmd++;
9704 9704
9705 9705 /* Initalize sbp */
9706 9706 mutex_enter(&sbp->mtx);
9707 9707 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9708 9708 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9709 9709 sbp->node = (void *) NULL;
9710 9710 sbp->lun = EMLXS_LUN_NONE;
9711 9711 sbp->class = iocb->ULPCLASS;
9712 9712 sbp->did = did;
9713 9713 mutex_exit(&sbp->mtx);
9714 9714
9715 9715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9716 9716 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
9717 9717 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
9718 9718 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
9719 9719
9720 9720 /* Process nodes */
9721 9721 switch (ucmd) {
9722 9722 case ELS_CMD_RSCN:
9723 9723 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9724 9724 cmd == ELS_CMD_ACC) {
9725 9725 fc_rscn_t *rscn;
9726 9726 uint32_t count;
9727 9727 uint32_t *lp = NULL;
9728 9728
9729 9729 /*
9730 9730 * Only the Leadville code path will come thru
9731 9731 * here. The RSCN data is NOT swapped properly
9732 9732 * for the Comstar code path.
9733 9733 */
9734 9734 lp = (uint32_t *)ub_buffer;
9735 9735 rscn = (fc_rscn_t *)lp++;
9736 9736 count = ((rscn->rscn_payload_len - 4) / 4);
9737 9737
9738 9738 /* Close affected ports */
9739 9739 for (i = 0; i < count; i++, lp++) {
9740 9740 (void) emlxs_port_offline(port, *lp);
9741 9741 }
9742 9742 }
9743 9743 break;
9744 9744
9745 9745 case ELS_CMD_PLOGI:
9746 9746 if (cmd == ELS_CMD_ACC) {
9747 9747 ndlp = emlxs_node_find_did(port, did, 1);
9748 9748
9749 9749 if (ndlp && ndlp->nlp_active) {
9750 9750 /* Close the node for any further normal IO */
9751 9751 emlxs_node_close(port, ndlp, hba->channel_fcp,
9752 9752 pkt->pkt_timeout + 10);
9753 9753 emlxs_node_close(port, ndlp, hba->channel_ip,
9754 9754 pkt->pkt_timeout + 10);
9755 9755
9756 9756 /* Flush tx queue */
9757 9757 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9758 9758
9759 9759 /* Flush chip queue */
9760 9760 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9761 9761 }
9762 9762 }
9763 9763 break;
9764 9764
9765 9765 case ELS_CMD_PRLI:
9766 9766 if (cmd == ELS_CMD_ACC) {
9767 9767 ndlp = emlxs_node_find_did(port, did, 1);
9768 9768
9769 9769 if (ndlp && ndlp->nlp_active) {
9770 9770 /* Close the node for any further normal IO */
9771 9771 emlxs_node_close(port, ndlp, hba->channel_fcp,
9772 9772 pkt->pkt_timeout + 10);
9773 9773
9774 9774 /* Flush tx queues */
9775 9775 (void) emlxs_tx_node_flush(port, ndlp,
9776 9776 &hba->chan[hba->channel_fcp], 0, 0);
9777 9777
9778 9778 /* Flush chip queues */
9779 9779 (void) emlxs_chipq_node_flush(port,
9780 9780 &hba->chan[hba->channel_fcp], ndlp, 0);
9781 9781 }
9782 9782 }
9783 9783 break;
9784 9784
9785 9785 case ELS_CMD_PRLO:
9786 9786 if (cmd == ELS_CMD_ACC) {
9787 9787 ndlp = emlxs_node_find_did(port, did, 1);
9788 9788
9789 9789 if (ndlp && ndlp->nlp_active) {
9790 9790 /* Close the node for any further normal IO */
9791 9791 emlxs_node_close(port, ndlp,
9792 9792 hba->channel_fcp, 60);
9793 9793
9794 9794 /* Flush tx queues */
9795 9795 (void) emlxs_tx_node_flush(port, ndlp,
9796 9796 &hba->chan[hba->channel_fcp], 0, 0);
9797 9797
9798 9798 /* Flush chip queues */
9799 9799 (void) emlxs_chipq_node_flush(port,
9800 9800 &hba->chan[hba->channel_fcp], ndlp, 0);
9801 9801 }
9802 9802 }
9803 9803
9804 9804 break;
9805 9805
9806 9806 case ELS_CMD_LOGO:
9807 9807 if (cmd == ELS_CMD_ACC) {
9808 9808 ndlp = emlxs_node_find_did(port, did, 1);
9809 9809
9810 9810 if (ndlp && ndlp->nlp_active) {
9811 9811 /* Close the node for any further normal IO */
9812 9812 emlxs_node_close(port, ndlp,
9813 9813 hba->channel_fcp, 60);
9814 9814 emlxs_node_close(port, ndlp,
9815 9815 hba->channel_ip, 60);
9816 9816
9817 9817 /* Flush tx queues */
9818 9818 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9819 9819
9820 9820 /* Flush chip queues */
9821 9821 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9822 9822 }
9823 9823 }
9824 9824
9825 9825 break;
9826 9826 }
9827 9827
9828 9828 if (pkt->pkt_cmdlen) {
9829 9829 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9830 9830 DDI_DMA_SYNC_FORDEV);
9831 9831 }
9832 9832
9833 9833 HBASTATS.ElsRspIssued++;
9834 9834
9835 9835 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9836 9836
9837 9837 return (FC_SUCCESS);
9838 9838
9839 9839 } /* emlxs_send_els_rsp() */
9840 9840
9841 9841
9842 9842 #ifdef MENLO_SUPPORT
9843 9843 static int32_t
9844 9844 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9845 9845 {
9846 9846 emlxs_hba_t *hba = HBA;
9847 9847 fc_packet_t *pkt;
9848 9848 IOCBQ *iocbq;
9849 9849 IOCB *iocb;
9850 9850 CHANNEL *cp;
9851 9851 NODELIST *ndlp;
9852 9852 uint32_t did;
9853 9853 uint32_t *lp;
9854 9854 int32_t rval;
9855 9855
9856 9856 pkt = PRIV2PKT(sbp);
9857 9857 did = EMLXS_MENLO_DID;
9858 9858 lp = (uint32_t *)pkt->pkt_cmd;
9859 9859
9860 9860 iocbq = &sbp->iocbq;
9861 9861 iocb = &iocbq->iocb;
9862 9862
9863 9863 ndlp = emlxs_node_find_did(port, did, 1);
9864 9864
9865 9865 if (!ndlp || !ndlp->nlp_active) {
9866 9866 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9867 9867 "Node not found. did=0x%x", did);
9868 9868
9869 9869 return (FC_BADPACKET);
9870 9870 }
9871 9871
9872 9872 iocbq->node = (void *) ndlp;
9873 9873 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9874 9874
9875 9875 if (rval == 0xff) {
9876 9876 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9877 9877 rval = FC_SUCCESS;
9878 9878 }
9879 9879
9880 9880 return (rval);
9881 9881 }
9882 9882
9883 9883 cp = &hba->chan[hba->channel_ct];
9884 9884 cp->ulpSendCmd++;
9885 9885
9886 9886 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9887 9887 /* Cmd phase */
9888 9888
9889 9889 /* Initalize iocb */
9890 9890 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9891 9891 iocb->ULPCONTEXT = 0;
9892 9892 iocb->ULPPU = 3;
9893 9893
9894 9894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9895 9895 "%s: [%08x,%08x,%08x,%08x]",
9896 9896 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9897 9897 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9898 9898
9899 9899 } else { /* FC_PKT_OUTBOUND */
9900 9900
9901 9901 /* MENLO_CMD_FW_DOWNLOAD Data Phase */
9902 9902 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9903 9903
9904 9904 /* Initalize iocb */
9905 9905 iocb->un.genreq64.param = 0;
9906 9906 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9907 9907 iocb->ULPPU = 1;
9908 9908
9909 9909 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9910 9910 "%s: Data: rxid=0x%x size=%d",
9911 9911 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9912 9912 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9913 9913 }
9914 9914
9915 9915 /* Initalize sbp */
9916 9916 mutex_enter(&sbp->mtx);
9917 9917 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9918 9918 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9919 9919 sbp->node = (void *) ndlp;
9920 9920 sbp->lun = EMLXS_LUN_NONE;
9921 9921 sbp->class = iocb->ULPCLASS;
9922 9922 sbp->did = did;
9923 9923 mutex_exit(&sbp->mtx);
9924 9924
9925 9925 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9926 9926 DDI_DMA_SYNC_FORDEV);
9927 9927
9928 9928 HBASTATS.CtCmdIssued++;
9929 9929
9930 9930 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9931 9931
9932 9932 return (FC_SUCCESS);
9933 9933
9934 9934 } /* emlxs_send_menlo() */
9935 9935 #endif /* MENLO_SUPPORT */
9936 9936
9937 9937
9938 9938 static int32_t
9939 9939 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9940 9940 {
9941 9941 emlxs_hba_t *hba = HBA;
9942 9942 fc_packet_t *pkt;
9943 9943 IOCBQ *iocbq;
9944 9944 IOCB *iocb;
9945 9945 NODELIST *ndlp;
9946 9946 uint32_t did;
9947 9947 CHANNEL *cp;
9948 9948 int32_t rval;
9949 9949
9950 9950 pkt = PRIV2PKT(sbp);
9951 9951 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9952 9952
9953 9953 iocbq = &sbp->iocbq;
9954 9954 iocb = &iocbq->iocb;
9955 9955
9956 9956 ndlp = emlxs_node_find_did(port, did, 1);
9957 9957
9958 9958 if (!ndlp || !ndlp->nlp_active) {
9959 9959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9960 9960 "Node not found. did=0x%x", did);
9961 9961
9962 9962 return (FC_BADPACKET);
9963 9963 }
9964 9964
9965 9965 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9966 9966 emlxs_swap_ct_pkt(sbp);
9967 9967 #endif /* EMLXS_MODREV2X */
9968 9968
9969 9969 iocbq->node = (void *)ndlp;
9970 9970 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9971 9971
9972 9972 if (rval == 0xff) {
9973 9973 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9974 9974 rval = FC_SUCCESS;
9975 9975 }
9976 9976
9977 9977 return (rval);
9978 9978 }
9979 9979
9980 9980 cp = &hba->chan[hba->channel_ct];
9981 9981 cp->ulpSendCmd++;
9982 9982
9983 9983 /* Initalize sbp */
9984 9984 mutex_enter(&sbp->mtx);
9985 9985 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9986 9986 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9987 9987 sbp->node = (void *)ndlp;
9988 9988 sbp->lun = EMLXS_LUN_NONE;
9989 9989 sbp->class = iocb->ULPCLASS;
9990 9990 sbp->did = did;
9991 9991 mutex_exit(&sbp->mtx);
9992 9992
9993 9993 if (did == NAMESERVER_DID) {
9994 9994 SLI_CT_REQUEST *CtCmd;
9995 9995 uint32_t *lp0;
9996 9996
9997 9997 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9998 9998 lp0 = (uint32_t *)pkt->pkt_cmd;
9999 9999
10000 10000 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10001 10001 "%s: did=%x [%08x,%08x]",
10002 10002 emlxs_ctcmd_xlate(
10003 10003 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10004 10004 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10005 10005
10006 10006 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
10007 10007 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
10008 10008 }
10009 10009
10010 10010 } else if (did == FDMI_DID) {
10011 10011 SLI_CT_REQUEST *CtCmd;
10012 10012 uint32_t *lp0;
10013 10013
10014 10014 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10015 10015 lp0 = (uint32_t *)pkt->pkt_cmd;
10016 10016
10017 10017 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10018 10018 "%s: did=%x [%08x,%08x]",
10019 10019 emlxs_mscmd_xlate(
10020 10020 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10021 10021 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10022 10022 } else {
10023 10023 SLI_CT_REQUEST *CtCmd;
10024 10024 uint32_t *lp0;
10025 10025
10026 10026 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10027 10027 lp0 = (uint32_t *)pkt->pkt_cmd;
10028 10028
10029 10029 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10030 10030 "%s: did=%x [%08x,%08x]",
10031 10031 emlxs_rmcmd_xlate(
10032 10032 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10033 10033 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10034 10034 }
10035 10035
10036 10036 if (pkt->pkt_cmdlen) {
10037 10037 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10038 10038 DDI_DMA_SYNC_FORDEV);
10039 10039 }
10040 10040
10041 10041 HBASTATS.CtCmdIssued++;
10042 10042
10043 10043 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10044 10044
10045 10045 return (FC_SUCCESS);
10046 10046
10047 10047 } /* emlxs_send_ct() */
10048 10048
10049 10049
10050 10050 static int32_t
10051 10051 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
10052 10052 {
10053 10053 emlxs_hba_t *hba = HBA;
10054 10054 fc_packet_t *pkt;
10055 10055 CHANNEL *cp;
10056 10056 IOCBQ *iocbq;
10057 10057 IOCB *iocb;
10058 10058 uint32_t *cmd;
10059 10059 SLI_CT_REQUEST *CtCmd;
10060 10060 int32_t rval;
10061 10061
10062 10062 pkt = PRIV2PKT(sbp);
10063 10063 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10064 10064 cmd = (uint32_t *)pkt->pkt_cmd;
10065 10065
10066 10066 iocbq = &sbp->iocbq;
10067 10067 iocb = &iocbq->iocb;
10068 10068
10069 10069 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10070 10070 emlxs_swap_ct_pkt(sbp);
10071 10071 #endif /* EMLXS_MODREV2X */
10072 10072
10073 10073 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
10074 10074
10075 10075 if (rval == 0xff) {
10076 10076 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
10077 10077 rval = FC_SUCCESS;
10078 10078 }
10079 10079
10080 10080 return (rval);
10081 10081 }
10082 10082
10083 10083 cp = &hba->chan[hba->channel_ct];
10084 10084 cp->ulpSendCmd++;
10085 10085
10086 10086 /* Initalize sbp */
10087 10087 mutex_enter(&sbp->mtx);
10088 10088 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10089 10089 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10090 10090 sbp->node = NULL;
10091 10091 sbp->lun = EMLXS_LUN_NONE;
10092 10092 sbp->class = iocb->ULPCLASS;
10093 10093 mutex_exit(&sbp->mtx);
10094 10094
10095 10095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
10096 10096 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
10097 10097 emlxs_rmcmd_xlate(LE_SWAP16(
10098 10098 CtCmd->CommandResponse.bits.CmdRsp)),
10099 10099 CtCmd->ReasonCode, CtCmd->Explanation,
10100 10100 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
10101 10101 pkt->pkt_cmd_fhdr.rx_id);
10102 10102
10103 10103 if (pkt->pkt_cmdlen) {
10104 10104 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10105 10105 DDI_DMA_SYNC_FORDEV);
10106 10106 }
10107 10107
10108 10108 HBASTATS.CtRspIssued++;
10109 10109
10110 10110 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10111 10111
10112 10112 return (FC_SUCCESS);
10113 10113
10114 10114 } /* emlxs_send_ct_rsp() */
10115 10115
10116 10116
10117 10117 /*
10118 10118 * emlxs_get_instance()
10119 10119 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
10120 10120 */
10121 10121 extern uint32_t
10122 10122 emlxs_get_instance(int32_t ddiinst)
10123 10123 {
10124 10124 uint32_t i;
10125 10125 uint32_t inst;
10126 10126
10127 10127 mutex_enter(&emlxs_device.lock);
10128 10128
10129 10129 inst = MAX_FC_BRDS;
10130 10130 for (i = 0; i < emlxs_instance_count; i++) {
10131 10131 if (emlxs_instance[i] == ddiinst) {
10132 10132 inst = i;
10133 10133 break;
10134 10134 }
10135 10135 }
10136 10136
10137 10137 mutex_exit(&emlxs_device.lock);
10138 10138
10139 10139 return (inst);
10140 10140
10141 10141 } /* emlxs_get_instance() */
10142 10142
10143 10143
10144 10144 /*
10145 10145 * emlxs_add_instance()
10146 10146 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
10147 10147 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
10148 10148 */
10149 10149 static uint32_t
10150 10150 emlxs_add_instance(int32_t ddiinst)
10151 10151 {
10152 10152 uint32_t i;
10153 10153
10154 10154 mutex_enter(&emlxs_device.lock);
10155 10155
10156 10156 /* First see if the ddiinst already exists */
10157 10157 for (i = 0; i < emlxs_instance_count; i++) {
10158 10158 if (emlxs_instance[i] == ddiinst) {
10159 10159 break;
10160 10160 }
10161 10161 }
10162 10162
10163 10163 /* If it doesn't already exist, add it */
10164 10164 if (i >= emlxs_instance_count) {
10165 10165 if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
10166 10166 emlxs_instance[i] = ddiinst;
10167 10167 emlxs_instance_count++;
10168 10168 emlxs_device.hba_count = emlxs_instance_count;
10169 10169 }
10170 10170 }
10171 10171
10172 10172 mutex_exit(&emlxs_device.lock);
10173 10173
10174 10174 return (i);
10175 10175
10176 10176 } /* emlxs_add_instance() */
10177 10177
10178 10178
10179 10179 /*ARGSUSED*/
10180 10180 extern void
10181 10181 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10182 10182 uint32_t doneq)
10183 10183 {
10184 10184 emlxs_hba_t *hba;
10185 10185 emlxs_port_t *port;
10186 10186 emlxs_buf_t *fpkt;
10187 10187
10188 10188 port = sbp->port;
10189 10189
10190 10190 if (!port) {
10191 10191 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
10192 10192 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
10193 10193
10194 10194 return;
10195 10195 }
10196 10196
10197 10197 hba = HBA;
10198 10198
10199 10199 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
10200 10200 (sbp->iotag)) {
10201 10201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
10202 10202 "WARNING: Completing IO with iotag. sbp=%p iotag=%d "
10203 10203 "xri_flags=%x",
10204 10204 sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
10205 10205
10206 10206 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
10207 10207 }
10208 10208
10209 10209 mutex_enter(&sbp->mtx);
10210 10210
10211 10211 /* Check for error conditions */
10212 10212 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
10213 10213 PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
10214 10214 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
10215 10215 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10216 10216 EMLXS_MSGF(EMLXS_CONTEXT,
10217 10217 &emlxs_pkt_completion_error_msg,
10218 10218 "Packet already returned. sbp=%p flags=%x", sbp,
10219 10219 sbp->pkt_flags);
10220 10220 }
10221 10221
10222 10222 else if (sbp->pkt_flags & PACKET_COMPLETED) {
10223 10223 EMLXS_MSGF(EMLXS_CONTEXT,
10224 10224 &emlxs_pkt_completion_error_msg,
10225 10225 "Packet already completed. sbp=%p flags=%x", sbp,
10226 10226 sbp->pkt_flags);
10227 10227 }
10228 10228
10229 10229 else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
10230 10230 EMLXS_MSGF(EMLXS_CONTEXT,
10231 10231 &emlxs_pkt_completion_error_msg,
10232 10232 "Pkt already on done queue. sbp=%p flags=%x", sbp,
10233 10233 sbp->pkt_flags);
10234 10234 }
10235 10235
10236 10236 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
10237 10237 EMLXS_MSGF(EMLXS_CONTEXT,
10238 10238 &emlxs_pkt_completion_error_msg,
10239 10239 "Packet already in completion. sbp=%p flags=%x",
10240 10240 sbp, sbp->pkt_flags);
10241 10241 }
10242 10242
10243 10243 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
10244 10244 EMLXS_MSGF(EMLXS_CONTEXT,
10245 10245 &emlxs_pkt_completion_error_msg,
10246 10246 "Packet still on chip queue. sbp=%p flags=%x",
10247 10247 sbp, sbp->pkt_flags);
10248 10248 }
10249 10249
10250 10250 else if (sbp->pkt_flags & PACKET_IN_TXQ) {
10251 10251 EMLXS_MSGF(EMLXS_CONTEXT,
10252 10252 &emlxs_pkt_completion_error_msg,
10253 10253 "Packet still on tx queue. sbp=%p flags=%x", sbp,
10254 10254 sbp->pkt_flags);
10255 10255 }
10256 10256
10257 10257 mutex_exit(&sbp->mtx);
10258 10258 return;
10259 10259 }
10260 10260
10261 10261 /* Packet is now in completion */
10262 10262 sbp->pkt_flags |= PACKET_IN_COMPLETION;
10263 10263
10264 10264 /* Set the state if not already set */
10265 10265 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10266 10266 emlxs_set_pkt_state(sbp, iostat, localstat, 0);
10267 10267 }
10268 10268
10269 10269 /* Check for parent flush packet */
10270 10270 /* If pkt has a parent flush packet then adjust its count now */
10271 10271 fpkt = sbp->fpkt;
10272 10272 if (fpkt) {
10273 10273 /*
10274 10274 * We will try to NULL sbp->fpkt inside the
10275 10275 * fpkt's mutex if possible
10276 10276 */
10277 10277
10278 10278 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
10279 10279 mutex_enter(&fpkt->mtx);
10280 10280 if (fpkt->flush_count) {
10281 10281 fpkt->flush_count--;
10282 10282 }
10283 10283 sbp->fpkt = NULL;
10284 10284 mutex_exit(&fpkt->mtx);
10285 10285 } else { /* fpkt has been returned already */
10286 10286
10287 10287 sbp->fpkt = NULL;
10288 10288 }
10289 10289 }
10290 10290
10291 10291 /* If pkt is polled, then wake up sleeping thread */
10292 10292 if (sbp->pkt_flags & PACKET_POLLED) {
10293 10293 /* Don't set the PACKET_ULP_OWNED flag here */
10294 10294 /* because the polling thread will do it */
10295 10295 sbp->pkt_flags |= PACKET_COMPLETED;
10296 10296 mutex_exit(&sbp->mtx);
10297 10297
10298 10298 /* Wake up sleeping thread */
10299 10299 mutex_enter(&EMLXS_PKT_LOCK);
10300 10300 cv_broadcast(&EMLXS_PKT_CV);
10301 10301 mutex_exit(&EMLXS_PKT_LOCK);
10302 10302 }
10303 10303
10304 10304 /* If packet was generated by our driver, */
10305 10305 /* then complete it immediately */
10306 10306 else if (sbp->pkt_flags & PACKET_ALLOCATED) {
10307 10307 mutex_exit(&sbp->mtx);
10308 10308
10309 10309 emlxs_iodone(sbp);
10310 10310 }
10311 10311
10312 10312 /* Put the pkt on the done queue for callback */
10313 10313 /* completion in another thread */
10314 10314 else {
10315 10315 sbp->pkt_flags |= PACKET_IN_DONEQ;
10316 10316 sbp->next = NULL;
10317 10317 mutex_exit(&sbp->mtx);
10318 10318
10319 10319 /* Put pkt on doneq, so I/O's will be completed in order */
10320 10320 mutex_enter(&EMLXS_PORT_LOCK);
10321 10321 if (hba->iodone_tail == NULL) {
10322 10322 hba->iodone_list = sbp;
10323 10323 hba->iodone_count = 1;
10324 10324 } else {
10325 10325 hba->iodone_tail->next = sbp;
10326 10326 hba->iodone_count++;
10327 10327 }
10328 10328 hba->iodone_tail = sbp;
10329 10329 mutex_exit(&EMLXS_PORT_LOCK);
10330 10330
10331 10331 /* Trigger a thread to service the doneq */
10332 10332 emlxs_thread_trigger1(&hba->iodone_thread,
10333 10333 emlxs_iodone_server);
10334 10334 }
10335 10335
10336 10336 return;
10337 10337
10338 10338 } /* emlxs_pkt_complete() */
10339 10339
10340 10340
10341 10341 #ifdef SAN_DIAG_SUPPORT
10342 10342 /*
10343 10343 * This routine is called with EMLXS_PORT_LOCK held so we can just increment
10344 10344 * normally. Don't have to use atomic operations.
10345 10345 */
10346 10346 extern void
10347 10347 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
10348 10348 {
10349 10349 emlxs_port_t *vport;
10350 10350 fc_packet_t *pkt;
10351 10351 uint32_t did;
10352 10352 hrtime_t t;
10353 10353 hrtime_t delta_time;
10354 10354 int i;
10355 10355 NODELIST *ndlp;
10356 10356
10357 10357 vport = sbp->port;
10358 10358
10359 10359 if ((emlxs_sd_bucket.search_type == 0) ||
10360 10360 (vport->sd_io_latency_state != SD_COLLECTING)) {
10361 10361 return;
10362 10362 }
10363 10363
10364 10364 /* Compute the iolatency time in microseconds */
10365 10365 t = gethrtime();
10366 10366 delta_time = t - sbp->sd_start_time;
10367 10367 pkt = PRIV2PKT(sbp);
10368 10368 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
10369 10369 ndlp = emlxs_node_find_did(vport, did, 1);
10370 10370
10371 10371 if (!ndlp) {
10372 10372 return;
10373 10373 }
10374 10374
10375 10375 if (delta_time >=
10376 10376 emlxs_sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) {
10377 10377 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
10378 10378 count++;
10379 10379 } else if (delta_time <= emlxs_sd_bucket.values[0]) {
10380 10380 ndlp->sd_dev_bucket[0].count++;
10381 10381 } else {
10382 10382 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
10383 10383 if ((delta_time > emlxs_sd_bucket.values[i-1]) &&
10384 10384 (delta_time <= emlxs_sd_bucket.values[i])) {
10385 10385 ndlp->sd_dev_bucket[i].count++;
10386 10386 break;
10387 10387 }
10388 10388 }
10389 10389 }
10390 10390
10391 10391 return;
10392 10392
10393 10393 } /* emlxs_update_sd_bucket() */
10394 10394 #endif /* SAN_DIAG_SUPPORT */
10395 10395
10396 10396 /*ARGSUSED*/
10397 10397 static void
10398 10398 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
10399 10399 {
10400 10400 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
10401 10401 emlxs_buf_t *sbp;
10402 10402
10403 10403 mutex_enter(&EMLXS_PORT_LOCK);
10404 10404
10405 10405 /* Remove one pkt from the doneq head and complete it */
10406 10406 while ((sbp = hba->iodone_list) != NULL) {
10407 10407 if ((hba->iodone_list = sbp->next) == NULL) {
10408 10408 hba->iodone_tail = NULL;
10409 10409 hba->iodone_count = 0;
10410 10410 } else {
10411 10411 hba->iodone_count--;
10412 10412 }
10413 10413
10414 10414 mutex_exit(&EMLXS_PORT_LOCK);
10415 10415
10416 10416 /* Prepare the pkt for completion */
10417 10417 mutex_enter(&sbp->mtx);
10418 10418 sbp->next = NULL;
10419 10419 sbp->pkt_flags &= ~PACKET_IN_DONEQ;
10420 10420 mutex_exit(&sbp->mtx);
10421 10421
10422 10422 /* Complete the IO now */
10423 10423 emlxs_iodone(sbp);
10424 10424
10425 10425 /* Reacquire lock and check if more work is to be done */
10426 10426 mutex_enter(&EMLXS_PORT_LOCK);
10427 10427 }
10428 10428
10429 10429 mutex_exit(&EMLXS_PORT_LOCK);
10430 10430
10431 10431 #ifdef FMA_SUPPORT
10432 10432 if (hba->flag & FC_DMA_CHECK_ERROR) {
10433 10433 emlxs_thread_spawn(hba, emlxs_restart_thread,
10434 10434 NULL, NULL);
10435 10435 }
10436 10436 #endif /* FMA_SUPPORT */
10437 10437
10438 10438 return;
10439 10439
10440 10440 } /* End emlxs_iodone_server */
10441 10441
10442 10442
10443 10443 static void
10444 10444 emlxs_iodone(emlxs_buf_t *sbp)
10445 10445 {
10446 10446 #ifdef FMA_SUPPORT
10447 10447 emlxs_port_t *port = sbp->port;
10448 10448 emlxs_hba_t *hba = port->hba;
10449 10449 #endif /* FMA_SUPPORT */
10450 10450
10451 10451 fc_packet_t *pkt;
10452 10452 CHANNEL *cp;
10453 10453
10454 10454 pkt = PRIV2PKT(sbp);
10455 10455
10456 10456 /* Check one more time that the pkt has not already been returned */
10457 10457 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10458 10458 return;
10459 10459 }
10460 10460
10461 10461 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10462 10462 emlxs_unswap_pkt(sbp);
10463 10463 #endif /* EMLXS_MODREV2X */
10464 10464
10465 10465 mutex_enter(&sbp->mtx);
10466 10466 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
10467 10467 mutex_exit(&sbp->mtx);
10468 10468
10469 10469 if (pkt->pkt_comp) {
10470 10470 #ifdef FMA_SUPPORT
10471 10471 emlxs_check_dma(hba, sbp);
10472 10472 #endif /* FMA_SUPPORT */
10473 10473
10474 10474 if (sbp->channel) {
10475 10475 cp = (CHANNEL *)sbp->channel;
10476 10476 cp->ulpCmplCmd++;
10477 10477 }
10478 10478
10479 10479 (*pkt->pkt_comp) (pkt);
10480 10480 }
10481 10481
10482 10482 return;
10483 10483
10484 10484 } /* emlxs_iodone() */
10485 10485
10486 10486
10487 10487
10488 10488 extern fc_unsol_buf_t *
10489 10489 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
10490 10490 {
10491 10491 emlxs_unsol_buf_t *pool;
10492 10492 fc_unsol_buf_t *ubp;
10493 10493 emlxs_ub_priv_t *ub_priv;
10494 10494
10495 10495 /* Check if this is a valid ub token */
10496 10496 if (token < EMLXS_UB_TOKEN_OFFSET) {
10497 10497 return (NULL);
10498 10498 }
10499 10499
10500 10500 mutex_enter(&EMLXS_UB_LOCK);
10501 10501
10502 10502 pool = port->ub_pool;
10503 10503 while (pool) {
10504 10504 /* Find a pool with the proper token range */
10505 10505 if (token >= pool->pool_first_token &&
10506 10506 token <= pool->pool_last_token) {
10507 10507 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
10508 10508 pool->pool_first_token)];
10509 10509 ub_priv = ubp->ub_fca_private;
10510 10510
10511 10511 if (ub_priv->token != token) {
10512 10512 EMLXS_MSGF(EMLXS_CONTEXT,
10513 10513 &emlxs_sfs_debug_msg,
10514 10514 "ub_find: Invalid token=%x", ubp, token,
10515 10515 ub_priv->token);
10516 10516
10517 10517 ubp = NULL;
10518 10518 }
10519 10519
10520 10520 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
10521 10521 EMLXS_MSGF(EMLXS_CONTEXT,
10522 10522 &emlxs_sfs_debug_msg,
10523 10523 "ub_find: Buffer not in use. buffer=%p "
10524 10524 "token=%x", ubp, token);
10525 10525
10526 10526 ubp = NULL;
10527 10527 }
10528 10528
10529 10529 mutex_exit(&EMLXS_UB_LOCK);
10530 10530
10531 10531 return (ubp);
10532 10532 }
10533 10533
10534 10534 pool = pool->pool_next;
10535 10535 }
10536 10536
10537 10537 mutex_exit(&EMLXS_UB_LOCK);
10538 10538
10539 10539 return (NULL);
10540 10540
10541 10541 } /* emlxs_ub_find() */
10542 10542
10543 10543
10544 10544
10545 10545 extern fc_unsol_buf_t *
10546 10546 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
10547 10547 uint32_t reserve)
10548 10548 {
10549 10549 emlxs_hba_t *hba = HBA;
10550 10550 emlxs_unsol_buf_t *pool;
10551 10551 fc_unsol_buf_t *ubp;
10552 10552 emlxs_ub_priv_t *ub_priv;
10553 10553 uint32_t i;
10554 10554 uint32_t resv_flag;
10555 10555 uint32_t pool_free;
10556 10556 uint32_t pool_free_resv;
10557 10557
10558 10558 mutex_enter(&EMLXS_UB_LOCK);
10559 10559
10560 10560 pool = port->ub_pool;
10561 10561 while (pool) {
10562 10562 /* Find a pool of the appropriate type and size */
10563 10563 if ((pool->pool_available == 0) ||
10564 10564 (pool->pool_type != type) ||
10565 10565 (pool->pool_buf_size < size)) {
10566 10566 goto next_pool;
10567 10567 }
10568 10568
10569 10569
10570 10570 /* Adjust free counts based on availablity */
10571 10571 /* The free reserve count gets first priority */
10572 10572 pool_free_resv =
10573 10573 min(pool->pool_free_resv, pool->pool_available);
10574 10574 pool_free =
10575 10575 min(pool->pool_free,
10576 10576 (pool->pool_available - pool_free_resv));
10577 10577
10578 10578 /* Initialize reserve flag */
10579 10579 resv_flag = reserve;
10580 10580
10581 10581 if (resv_flag) {
10582 10582 if (pool_free_resv == 0) {
10583 10583 if (pool_free == 0) {
10584 10584 goto next_pool;
10585 10585 }
10586 10586 resv_flag = 0;
10587 10587 }
10588 10588 } else if (pool_free == 0) {
10589 10589 goto next_pool;
10590 10590 }
10591 10591
10592 10592 /* Find next available free buffer in this pool */
10593 10593 for (i = 0; i < pool->pool_nentries; i++) {
10594 10594 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
10595 10595 ub_priv = ubp->ub_fca_private;
10596 10596
10597 10597 if (!ub_priv->available ||
10598 10598 ub_priv->flags != EMLXS_UB_FREE) {
10599 10599 continue;
10600 10600 }
10601 10601
10602 10602 ub_priv->time = hba->timer_tics;
10603 10603
10604 10604 /* Timeout in 5 minutes */
10605 10605 ub_priv->timeout = (5 * 60);
10606 10606
10607 10607 ub_priv->flags = EMLXS_UB_IN_USE;
10608 10608
10609 10609 /* Alloc the buffer from the pool */
10610 10610 if (resv_flag) {
10611 10611 ub_priv->flags |= EMLXS_UB_RESV;
10612 10612 pool->pool_free_resv--;
10613 10613 } else {
10614 10614 pool->pool_free--;
10615 10615 }
10616 10616
10617 10617 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
10618 10618 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
10619 10619 ub_priv->token, pool->pool_nentries,
10620 10620 pool->pool_available, pool->pool_free,
10621 10621 pool->pool_free_resv);
10622 10622
10623 10623 mutex_exit(&EMLXS_UB_LOCK);
10624 10624
10625 10625 return (ubp);
10626 10626 }
10627 10627 next_pool:
10628 10628
10629 10629 pool = pool->pool_next;
10630 10630 }
10631 10631
10632 10632 mutex_exit(&EMLXS_UB_LOCK);
10633 10633
10634 10634 return (NULL);
10635 10635
10636 10636 } /* emlxs_ub_get() */
10637 10637
10638 10638
10639 10639
10640 10640 extern void
10641 10641 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10642 10642 uint32_t lock)
10643 10643 {
10644 10644 fc_packet_t *pkt;
10645 10645 fcp_rsp_t *fcp_rsp;
10646 10646 uint32_t i;
10647 10647 emlxs_xlat_err_t *tptr;
10648 10648 emlxs_xlat_err_t *entry;
10649 10649
10650 10650
10651 10651 pkt = PRIV2PKT(sbp);
10652 10652
10653 10653 /* Warning: Some FCT sbp's don't have */
10654 10654 /* fc_packet objects, so just return */
10655 10655 if (!pkt) {
10656 10656 return;
10657 10657 }
10658 10658
10659 10659 if (lock) {
10660 10660 mutex_enter(&sbp->mtx);
10661 10661 }
10662 10662
10663 10663 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10664 10664 sbp->pkt_flags |= PACKET_STATE_VALID;
10665 10665
10666 10666 /* Perform table lookup */
10667 10667 entry = NULL;
10668 10668 if (iostat != IOSTAT_LOCAL_REJECT) {
10669 10669 tptr = emlxs_iostat_tbl;
10670 10670 for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
10671 10671 if (iostat == tptr->emlxs_status) {
10672 10672 entry = tptr;
10673 10673 break;
10674 10674 }
10675 10675 }
10676 10676 } else { /* iostate == IOSTAT_LOCAL_REJECT */
10677 10677
10678 10678 tptr = emlxs_ioerr_tbl;
10679 10679 for (i = 0; i < IOERR_MAX; i++, tptr++) {
10680 10680 if (localstat == tptr->emlxs_status) {
10681 10681 entry = tptr;
10682 10682 break;
10683 10683 }
10684 10684 }
10685 10685 }
10686 10686
10687 10687 if (entry) {
10688 10688 pkt->pkt_state = entry->pkt_state;
10689 10689 pkt->pkt_reason = entry->pkt_reason;
10690 10690 pkt->pkt_expln = entry->pkt_expln;
10691 10691 pkt->pkt_action = entry->pkt_action;
10692 10692 } else {
10693 10693 /* Set defaults */
10694 10694 pkt->pkt_state = FC_PKT_TRAN_ERROR;
10695 10695 pkt->pkt_reason = FC_REASON_ABORTED;
10696 10696 pkt->pkt_expln = FC_EXPLN_NONE;
10697 10697 pkt->pkt_action = FC_ACTION_RETRYABLE;
10698 10698 }
10699 10699
10700 10700
10701 10701 /* Set the residual counts and response frame */
10702 10702 /* Check if response frame was received from the chip */
10703 10703 /* If so, then the residual counts will already be set */
10704 10704 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
10705 10705 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
10706 10706 /* We have to create the response frame */
10707 10707 if (iostat == IOSTAT_SUCCESS) {
10708 10708 pkt->pkt_resp_resid = 0;
10709 10709 pkt->pkt_data_resid = 0;
10710 10710
10711 10711 if ((pkt->pkt_cmd_fhdr.type ==
10712 10712 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
10713 10713 pkt->pkt_resp) {
10714 10714 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
10715 10715
10716 10716 fcp_rsp->fcp_u.fcp_status.
10717 10717 rsp_len_set = 1;
10718 10718 fcp_rsp->fcp_response_len = 8;
10719 10719 }
10720 10720 } else {
10721 10721 /* Otherwise assume no data */
10722 10722 /* and no response received */
10723 10723 pkt->pkt_data_resid = pkt->pkt_datalen;
10724 10724 pkt->pkt_resp_resid = pkt->pkt_rsplen;
10725 10725 }
10726 10726 }
10727 10727 }
10728 10728
10729 10729 if (lock) {
10730 10730 mutex_exit(&sbp->mtx);
10731 10731 }
10732 10732
10733 10733 return;
10734 10734
10735 10735 } /* emlxs_set_pkt_state() */
10736 10736
10737 10737
10738 10738 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10739 10739
10740 10740 extern void
10741 10741 emlxs_swap_service_params(SERV_PARM *sp)
10742 10742 {
10743 10743 uint16_t *p;
10744 10744 int size;
10745 10745 int i;
10746 10746
10747 10747 size = (sizeof (CSP) - 4) / 2;
10748 10748 p = (uint16_t *)&sp->cmn;
10749 10749 for (i = 0; i < size; i++) {
10750 10750 p[i] = LE_SWAP16(p[i]);
10751 10751 }
10752 10752 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10753 10753
10754 10754 size = sizeof (CLASS_PARMS) / 2;
10755 10755 p = (uint16_t *)&sp->cls1;
10756 10756 for (i = 0; i < size; i++, p++) {
10757 10757 *p = LE_SWAP16(*p);
10758 10758 }
10759 10759
10760 10760 size = sizeof (CLASS_PARMS) / 2;
10761 10761 p = (uint16_t *)&sp->cls2;
10762 10762 for (i = 0; i < size; i++, p++) {
10763 10763 *p = LE_SWAP16(*p);
10764 10764 }
10765 10765
10766 10766 size = sizeof (CLASS_PARMS) / 2;
10767 10767 p = (uint16_t *)&sp->cls3;
10768 10768 for (i = 0; i < size; i++, p++) {
10769 10769 *p = LE_SWAP16(*p);
10770 10770 }
10771 10771
10772 10772 size = sizeof (CLASS_PARMS) / 2;
10773 10773 p = (uint16_t *)&sp->cls4;
10774 10774 for (i = 0; i < size; i++, p++) {
10775 10775 *p = LE_SWAP16(*p);
10776 10776 }
10777 10777
10778 10778 return;
10779 10779
10780 10780 } /* emlxs_swap_service_params() */
10781 10781
10782 10782 extern void
10783 10783 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10784 10784 {
10785 10785 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10786 10786 emlxs_swap_fcp_pkt(sbp);
10787 10787 }
10788 10788
10789 10789 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10790 10790 emlxs_swap_els_pkt(sbp);
10791 10791 }
10792 10792
10793 10793 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10794 10794 emlxs_swap_ct_pkt(sbp);
10795 10795 }
10796 10796
10797 10797 } /* emlxs_unswap_pkt() */
10798 10798
10799 10799
10800 10800 extern void
10801 10801 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10802 10802 {
10803 10803 fc_packet_t *pkt;
10804 10804 FCP_CMND *cmd;
10805 10805 fcp_rsp_t *rsp;
10806 10806 uint16_t *lunp;
10807 10807 uint32_t i;
10808 10808
10809 10809 mutex_enter(&sbp->mtx);
10810 10810
10811 10811 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10812 10812 mutex_exit(&sbp->mtx);
10813 10813 return;
10814 10814 }
10815 10815
10816 10816 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10817 10817 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10818 10818 } else {
10819 10819 sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10820 10820 }
10821 10821
10822 10822 mutex_exit(&sbp->mtx);
10823 10823
10824 10824 pkt = PRIV2PKT(sbp);
10825 10825
10826 10826 cmd = (FCP_CMND *)pkt->pkt_cmd;
10827 10827 rsp = (pkt->pkt_rsplen &&
10828 10828 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10829 10829 (fcp_rsp_t *)pkt->pkt_resp : NULL;
10830 10830
10831 10831 /* The size of data buffer needs to be swapped. */
10832 10832 cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10833 10833
10834 10834 /*
10835 10835 * Swap first 2 words of FCP CMND payload.
10836 10836 */
10837 10837 lunp = (uint16_t *)&cmd->fcpLunMsl;
10838 10838 for (i = 0; i < 4; i++) {
10839 10839 lunp[i] = LE_SWAP16(lunp[i]);
10840 10840 }
10841 10841
10842 10842 if (rsp) {
10843 10843 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10844 10844 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10845 10845 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10846 10846 }
10847 10847
10848 10848 return;
10849 10849
10850 10850 } /* emlxs_swap_fcp_pkt() */
10851 10851
10852 10852
10853 10853 extern void
10854 10854 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10855 10855 {
10856 10856 fc_packet_t *pkt;
10857 10857 uint32_t *cmd;
10858 10858 uint32_t *rsp;
10859 10859 uint32_t command;
10860 10860 uint16_t *c;
10861 10861 uint32_t i;
10862 10862 uint32_t swapped;
10863 10863
10864 10864 mutex_enter(&sbp->mtx);
10865 10865
10866 10866 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10867 10867 mutex_exit(&sbp->mtx);
10868 10868 return;
10869 10869 }
10870 10870
10871 10871 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10872 10872 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10873 10873 swapped = 1;
10874 10874 } else {
10875 10875 sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10876 10876 swapped = 0;
10877 10877 }
10878 10878
10879 10879 mutex_exit(&sbp->mtx);
10880 10880
10881 10881 pkt = PRIV2PKT(sbp);
10882 10882
10883 10883 cmd = (uint32_t *)pkt->pkt_cmd;
10884 10884 rsp = (pkt->pkt_rsplen &&
10885 10885 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10886 10886 (uint32_t *)pkt->pkt_resp : NULL;
10887 10887
10888 10888 if (!swapped) {
10889 10889 cmd[0] = LE_SWAP32(cmd[0]);
10890 10890 command = cmd[0] & ELS_CMD_MASK;
10891 10891 } else {
10892 10892 command = cmd[0] & ELS_CMD_MASK;
10893 10893 cmd[0] = LE_SWAP32(cmd[0]);
10894 10894 }
10895 10895
10896 10896 if (rsp) {
10897 10897 rsp[0] = LE_SWAP32(rsp[0]);
10898 10898 }
10899 10899
10900 10900 switch (command) {
10901 10901 case ELS_CMD_ACC:
10902 10902 if (sbp->ucmd == ELS_CMD_ADISC) {
10903 10903 /* Hard address of originator */
10904 10904 cmd[1] = LE_SWAP32(cmd[1]);
10905 10905
10906 10906 /* N_Port ID of originator */
10907 10907 cmd[6] = LE_SWAP32(cmd[6]);
10908 10908 }
10909 10909 break;
10910 10910
10911 10911 case ELS_CMD_PLOGI:
10912 10912 case ELS_CMD_FLOGI:
10913 10913 case ELS_CMD_FDISC:
10914 10914 if (rsp) {
10915 10915 emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10916 10916 }
10917 10917 break;
10918 10918
10919 10919 case ELS_CMD_LOGO:
10920 10920 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */
10921 10921 break;
10922 10922
10923 10923 case ELS_CMD_RLS:
10924 10924 cmd[1] = LE_SWAP32(cmd[1]);
10925 10925
10926 10926 if (rsp) {
10927 10927 for (i = 0; i < 6; i++) {
10928 10928 rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10929 10929 }
10930 10930 }
10931 10931 break;
10932 10932
10933 10933 case ELS_CMD_ADISC:
10934 10934 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */
10935 10935 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */
10936 10936 break;
10937 10937
10938 10938 case ELS_CMD_PRLI:
10939 10939 c = (uint16_t *)&cmd[1];
10940 10940 c[1] = LE_SWAP16(c[1]);
10941 10941
10942 10942 cmd[4] = LE_SWAP32(cmd[4]);
10943 10943
10944 10944 if (rsp) {
10945 10945 rsp[4] = LE_SWAP32(rsp[4]);
10946 10946 }
10947 10947 break;
10948 10948
10949 10949 case ELS_CMD_SCR:
10950 10950 cmd[1] = LE_SWAP32(cmd[1]);
10951 10951 break;
10952 10952
10953 10953 case ELS_CMD_LINIT:
10954 10954 if (rsp) {
10955 10955 rsp[1] = LE_SWAP32(rsp[1]);
10956 10956 }
10957 10957 break;
10958 10958
10959 10959 default:
10960 10960 break;
10961 10961 }
10962 10962
10963 10963 return;
10964 10964
10965 10965 } /* emlxs_swap_els_pkt() */
10966 10966
10967 10967
10968 10968 extern void
10969 10969 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10970 10970 {
10971 10971 fc_packet_t *pkt;
10972 10972 uint32_t *cmd;
10973 10973 uint32_t *rsp;
10974 10974 uint32_t command;
10975 10975 uint32_t i;
10976 10976 uint32_t swapped;
10977 10977
10978 10978 mutex_enter(&sbp->mtx);
10979 10979
10980 10980 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10981 10981 mutex_exit(&sbp->mtx);
10982 10982 return;
10983 10983 }
10984 10984
10985 10985 if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10986 10986 sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10987 10987 swapped = 1;
10988 10988 } else {
10989 10989 sbp->pkt_flags |= PACKET_CT_SWAPPED;
10990 10990 swapped = 0;
10991 10991 }
10992 10992
10993 10993 mutex_exit(&sbp->mtx);
10994 10994
10995 10995 pkt = PRIV2PKT(sbp);
10996 10996
10997 10997 cmd = (uint32_t *)pkt->pkt_cmd;
10998 10998 rsp = (pkt->pkt_rsplen &&
10999 10999 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
11000 11000 (uint32_t *)pkt->pkt_resp : NULL;
11001 11001
11002 11002 if (!swapped) {
11003 11003 cmd[0] = 0x01000000;
11004 11004 command = cmd[2];
11005 11005 }
11006 11006
11007 11007 cmd[0] = LE_SWAP32(cmd[0]);
11008 11008 cmd[1] = LE_SWAP32(cmd[1]);
11009 11009 cmd[2] = LE_SWAP32(cmd[2]);
11010 11010 cmd[3] = LE_SWAP32(cmd[3]);
11011 11011
11012 11012 if (swapped) {
11013 11013 command = cmd[2];
11014 11014 }
11015 11015
11016 11016 switch ((command >> 16)) {
11017 11017 case SLI_CTNS_GA_NXT:
11018 11018 cmd[4] = LE_SWAP32(cmd[4]);
11019 11019 break;
11020 11020
11021 11021 case SLI_CTNS_GPN_ID:
11022 11022 case SLI_CTNS_GNN_ID:
11023 11023 case SLI_CTNS_RPN_ID:
11024 11024 case SLI_CTNS_RNN_ID:
11025 11025 case SLI_CTNS_RSPN_ID:
11026 11026 cmd[4] = LE_SWAP32(cmd[4]);
11027 11027 break;
11028 11028
11029 11029 case SLI_CTNS_RCS_ID:
11030 11030 case SLI_CTNS_RPT_ID:
11031 11031 cmd[4] = LE_SWAP32(cmd[4]);
11032 11032 cmd[5] = LE_SWAP32(cmd[5]);
11033 11033 break;
11034 11034
11035 11035 case SLI_CTNS_RFT_ID:
11036 11036 cmd[4] = LE_SWAP32(cmd[4]);
11037 11037
11038 11038 /* Swap FC4 types */
11039 11039 for (i = 0; i < 8; i++) {
11040 11040 cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
11041 11041 }
11042 11042 break;
11043 11043
11044 11044 case SLI_CTNS_GFT_ID:
11045 11045 if (rsp) {
11046 11046 /* Swap FC4 types */
11047 11047 for (i = 0; i < 8; i++) {
11048 11048 rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
11049 11049 }
11050 11050 }
11051 11051 break;
11052 11052
11053 11053 case SLI_CTNS_GCS_ID:
11054 11054 case SLI_CTNS_GSPN_ID:
11055 11055 case SLI_CTNS_GSNN_NN:
11056 11056 case SLI_CTNS_GIP_NN:
11057 11057 case SLI_CTNS_GIPA_NN:
11058 11058
11059 11059 case SLI_CTNS_GPT_ID:
11060 11060 case SLI_CTNS_GID_NN:
11061 11061 case SLI_CTNS_GNN_IP:
11062 11062 case SLI_CTNS_GIPA_IP:
11063 11063 case SLI_CTNS_GID_FT:
11064 11064 case SLI_CTNS_GID_PT:
11065 11065 case SLI_CTNS_GID_PN:
11066 11066 case SLI_CTNS_RIP_NN:
11067 11067 case SLI_CTNS_RIPA_NN:
11068 11068 case SLI_CTNS_RSNN_NN:
11069 11069 case SLI_CTNS_DA_ID:
11070 11070 case SLI_CT_RESPONSE_FS_RJT:
11071 11071 case SLI_CT_RESPONSE_FS_ACC:
11072 11072
11073 11073 default:
11074 11074 break;
11075 11075 }
11076 11076 return;
11077 11077
11078 11078 } /* emlxs_swap_ct_pkt() */
11079 11079
11080 11080
11081 11081 extern void
11082 11082 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
11083 11083 {
11084 11084 emlxs_ub_priv_t *ub_priv;
11085 11085 fc_rscn_t *rscn;
11086 11086 uint32_t count;
11087 11087 uint32_t i;
11088 11088 uint32_t *lp;
11089 11089 la_els_logi_t *logi;
11090 11090
11091 11091 ub_priv = ubp->ub_fca_private;
11092 11092
11093 11093 switch (ub_priv->cmd) {
11094 11094 case ELS_CMD_RSCN:
11095 11095 rscn = (fc_rscn_t *)ubp->ub_buffer;
11096 11096
11097 11097 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
11098 11098
11099 11099 count = ((rscn->rscn_payload_len - 4) / 4);
11100 11100 lp = (uint32_t *)ubp->ub_buffer + 1;
11101 11101 for (i = 0; i < count; i++, lp++) {
11102 11102 *lp = LE_SWAP32(*lp);
11103 11103 }
11104 11104
11105 11105 break;
11106 11106
11107 11107 case ELS_CMD_FLOGI:
11108 11108 case ELS_CMD_PLOGI:
11109 11109 case ELS_CMD_FDISC:
11110 11110 case ELS_CMD_PDISC:
11111 11111 logi = (la_els_logi_t *)ubp->ub_buffer;
11112 11112 emlxs_swap_service_params(
11113 11113 (SERV_PARM *)&logi->common_service);
11114 11114 break;
11115 11115
11116 11116 /* ULP handles this */
11117 11117 case ELS_CMD_LOGO:
11118 11118 case ELS_CMD_PRLI:
11119 11119 case ELS_CMD_PRLO:
11120 11120 case ELS_CMD_ADISC:
11121 11121 default:
11122 11122 break;
11123 11123 }
11124 11124
11125 11125 return;
11126 11126
11127 11127 } /* emlxs_swap_els_ub() */
11128 11128
11129 11129
11130 11130 #endif /* EMLXS_MODREV2X */
11131 11131
11132 11132
11133 11133 extern char *
11134 11134 emlxs_mode_xlate(uint32_t mode)
11135 11135 {
11136 11136 static char buffer[32];
11137 11137 uint32_t i;
11138 11138 uint32_t count;
11139 11139
11140 11140 count = sizeof (emlxs_mode_table) / sizeof (emlxs_table_t);
11141 11141 for (i = 0; i < count; i++) {
11142 11142 if (mode == emlxs_mode_table[i].code) {
11143 11143 return (emlxs_mode_table[i].string);
11144 11144 }
11145 11145 }
11146 11146
11147 11147 (void) snprintf(buffer, sizeof (buffer), "Unknown (%x)", mode);
11148 11148 return (buffer);
11149 11149
11150 11150 } /* emlxs_mode_xlate() */
11151 11151
11152 11152
11153 11153 extern char *
11154 11154 emlxs_elscmd_xlate(uint32_t elscmd)
11155 11155 {
11156 11156 static char buffer[32];
11157 11157 uint32_t i;
11158 11158 uint32_t count;
11159 11159
11160 11160 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
11161 11161 for (i = 0; i < count; i++) {
11162 11162 if (elscmd == emlxs_elscmd_table[i].code) {
11163 11163 return (emlxs_elscmd_table[i].string);
11164 11164 }
11165 11165 }
11166 11166
11167 11167 (void) snprintf(buffer, sizeof (buffer), "ELS=0x%x", elscmd);
11168 11168 return (buffer);
11169 11169
11170 11170 } /* emlxs_elscmd_xlate() */
11171 11171
11172 11172
11173 11173 extern char *
11174 11174 emlxs_ctcmd_xlate(uint32_t ctcmd)
11175 11175 {
11176 11176 static char buffer[32];
11177 11177 uint32_t i;
11178 11178 uint32_t count;
11179 11179
11180 11180 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
11181 11181 for (i = 0; i < count; i++) {
11182 11182 if (ctcmd == emlxs_ctcmd_table[i].code) {
11183 11183 return (emlxs_ctcmd_table[i].string);
11184 11184 }
11185 11185 }
11186 11186
11187 11187 (void) snprintf(buffer, sizeof (buffer), "cmd=0x%x", ctcmd);
11188 11188 return (buffer);
11189 11189
11190 11190 } /* emlxs_ctcmd_xlate() */
11191 11191
11192 11192
11193 11193 #ifdef MENLO_SUPPORT
11194 11194 extern char *
11195 11195 emlxs_menlo_cmd_xlate(uint32_t cmd)
11196 11196 {
11197 11197 static char buffer[32];
11198 11198 uint32_t i;
11199 11199 uint32_t count;
11200 11200
11201 11201 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
11202 11202 for (i = 0; i < count; i++) {
11203 11203 if (cmd == emlxs_menlo_cmd_table[i].code) {
11204 11204 return (emlxs_menlo_cmd_table[i].string);
11205 11205 }
11206 11206 }
11207 11207
11208 11208 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
11209 11209 return (buffer);
11210 11210
11211 11211 } /* emlxs_menlo_cmd_xlate() */
11212 11212
11213 11213 extern char *
11214 11214 emlxs_menlo_rsp_xlate(uint32_t rsp)
11215 11215 {
11216 11216 static char buffer[32];
11217 11217 uint32_t i;
11218 11218 uint32_t count;
11219 11219
11220 11220 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
11221 11221 for (i = 0; i < count; i++) {
11222 11222 if (rsp == emlxs_menlo_rsp_table[i].code) {
11223 11223 return (emlxs_menlo_rsp_table[i].string);
11224 11224 }
11225 11225 }
11226 11226
11227 11227 (void) snprintf(buffer, sizeof (buffer), "Rsp=0x%x", rsp);
11228 11228 return (buffer);
11229 11229
11230 11230 } /* emlxs_menlo_rsp_xlate() */
11231 11231
11232 11232 #endif /* MENLO_SUPPORT */
11233 11233
11234 11234
11235 11235 extern char *
11236 11236 emlxs_rmcmd_xlate(uint32_t rmcmd)
11237 11237 {
11238 11238 static char buffer[32];
11239 11239 uint32_t i;
11240 11240 uint32_t count;
11241 11241
11242 11242 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
11243 11243 for (i = 0; i < count; i++) {
11244 11244 if (rmcmd == emlxs_rmcmd_table[i].code) {
11245 11245 return (emlxs_rmcmd_table[i].string);
11246 11246 }
11247 11247 }
11248 11248
11249 11249 (void) snprintf(buffer, sizeof (buffer), "RM=0x%x", rmcmd);
11250 11250 return (buffer);
11251 11251
11252 11252 } /* emlxs_rmcmd_xlate() */
11253 11253
11254 11254
11255 11255
11256 11256 extern char *
11257 11257 emlxs_mscmd_xlate(uint16_t mscmd)
11258 11258 {
11259 11259 static char buffer[32];
11260 11260 uint32_t i;
11261 11261 uint32_t count;
11262 11262
11263 11263 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
11264 11264 for (i = 0; i < count; i++) {
11265 11265 if (mscmd == emlxs_mscmd_table[i].code) {
11266 11266 return (emlxs_mscmd_table[i].string);
11267 11267 }
11268 11268 }
11269 11269
11270 11270 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", mscmd);
11271 11271 return (buffer);
11272 11272
11273 11273 } /* emlxs_mscmd_xlate() */
11274 11274
11275 11275
11276 11276 extern char *
11277 11277 emlxs_state_xlate(uint8_t state)
11278 11278 {
11279 11279 static char buffer[32];
11280 11280 uint32_t i;
11281 11281 uint32_t count;
11282 11282
11283 11283 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
11284 11284 for (i = 0; i < count; i++) {
11285 11285 if (state == emlxs_state_table[i].code) {
11286 11286 return (emlxs_state_table[i].string);
11287 11287 }
11288 11288 }
11289 11289
11290 11290 (void) snprintf(buffer, sizeof (buffer), "State=0x%x", state);
11291 11291 return (buffer);
11292 11292
11293 11293 } /* emlxs_state_xlate() */
11294 11294
11295 11295
11296 11296 extern char *
11297 11297 emlxs_error_xlate(uint8_t errno)
11298 11298 {
11299 11299 static char buffer[32];
11300 11300 uint32_t i;
11301 11301 uint32_t count;
11302 11302
11303 11303 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
11304 11304 for (i = 0; i < count; i++) {
11305 11305 if (errno == emlxs_error_table[i].code) {
11306 11306 return (emlxs_error_table[i].string);
11307 11307 }
11308 11308 }
11309 11309
11310 11310 (void) snprintf(buffer, sizeof (buffer), "Errno=0x%x", errno);
11311 11311 return (buffer);
11312 11312
11313 11313 } /* emlxs_error_xlate() */
11314 11314
11315 11315
11316 11316 static int
11317 11317 emlxs_pm_lower_power(dev_info_t *dip)
11318 11318 {
11319 11319 int ddiinst;
11320 11320 int emlxinst;
11321 11321 emlxs_config_t *cfg;
11322 11322 int32_t rval;
11323 11323 emlxs_hba_t *hba;
11324 11324
11325 11325 ddiinst = ddi_get_instance(dip);
11326 11326 emlxinst = emlxs_get_instance(ddiinst);
11327 11327 hba = emlxs_device.hba[emlxinst];
11328 11328 cfg = &CFG;
11329 11329
11330 11330 rval = DDI_SUCCESS;
11331 11331
11332 11332 /* Lower the power level */
11333 11333 if (cfg[CFG_PM_SUPPORT].current) {
11334 11334 rval =
11335 11335 pm_lower_power(dip, EMLXS_PM_ADAPTER,
11336 11336 EMLXS_PM_ADAPTER_DOWN);
11337 11337 } else {
11338 11338 /* We do not have kernel support of power management enabled */
11339 11339 /* therefore, call our power management routine directly */
11340 11340 rval =
11341 11341 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
11342 11342 }
11343 11343
11344 11344 return (rval);
11345 11345
11346 11346 } /* emlxs_pm_lower_power() */
11347 11347
11348 11348
11349 11349 static int
11350 11350 emlxs_pm_raise_power(dev_info_t *dip)
11351 11351 {
11352 11352 int ddiinst;
11353 11353 int emlxinst;
11354 11354 emlxs_config_t *cfg;
11355 11355 int32_t rval;
11356 11356 emlxs_hba_t *hba;
11357 11357
11358 11358 ddiinst = ddi_get_instance(dip);
11359 11359 emlxinst = emlxs_get_instance(ddiinst);
11360 11360 hba = emlxs_device.hba[emlxinst];
11361 11361 cfg = &CFG;
11362 11362
11363 11363 /* Raise the power level */
11364 11364 if (cfg[CFG_PM_SUPPORT].current) {
11365 11365 rval =
11366 11366 pm_raise_power(dip, EMLXS_PM_ADAPTER,
11367 11367 EMLXS_PM_ADAPTER_UP);
11368 11368 } else {
11369 11369 /* We do not have kernel support of power management enabled */
11370 11370 /* therefore, call our power management routine directly */
11371 11371 rval =
11372 11372 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
11373 11373 }
11374 11374
11375 11375 return (rval);
11376 11376
11377 11377 } /* emlxs_pm_raise_power() */
11378 11378
11379 11379
11380 11380 #ifdef IDLE_TIMER
11381 11381
11382 11382 extern int
11383 11383 emlxs_pm_busy_component(emlxs_hba_t *hba)
11384 11384 {
11385 11385 emlxs_config_t *cfg = &CFG;
11386 11386 int rval;
11387 11387
11388 11388 hba->pm_active = 1;
11389 11389
11390 11390 if (hba->pm_busy) {
11391 11391 return (DDI_SUCCESS);
11392 11392 }
11393 11393
11394 11394 mutex_enter(&EMLXS_PM_LOCK);
11395 11395
11396 11396 if (hba->pm_busy) {
11397 11397 mutex_exit(&EMLXS_PM_LOCK);
11398 11398 return (DDI_SUCCESS);
11399 11399 }
11400 11400 hba->pm_busy = 1;
11401 11401
11402 11402 mutex_exit(&EMLXS_PM_LOCK);
11403 11403
11404 11404 /* Attempt to notify system that we are busy */
11405 11405 if (cfg[CFG_PM_SUPPORT].current) {
11406 11406 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11407 11407 "pm_busy_component.");
11408 11408
11409 11409 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
11410 11410
11411 11411 if (rval != DDI_SUCCESS) {
11412 11412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11413 11413 "pm_busy_component failed. ret=%d", rval);
11414 11414
11415 11415 /* If this attempt failed then clear our flags */
11416 11416 mutex_enter(&EMLXS_PM_LOCK);
11417 11417 hba->pm_busy = 0;
11418 11418 mutex_exit(&EMLXS_PM_LOCK);
11419 11419
11420 11420 return (rval);
11421 11421 }
11422 11422 }
11423 11423
11424 11424 return (DDI_SUCCESS);
11425 11425
11426 11426 } /* emlxs_pm_busy_component() */
11427 11427
11428 11428
11429 11429 extern int
11430 11430 emlxs_pm_idle_component(emlxs_hba_t *hba)
11431 11431 {
11432 11432 emlxs_config_t *cfg = &CFG;
11433 11433 int rval;
11434 11434
11435 11435 if (!hba->pm_busy) {
11436 11436 return (DDI_SUCCESS);
11437 11437 }
11438 11438
11439 11439 mutex_enter(&EMLXS_PM_LOCK);
11440 11440
11441 11441 if (!hba->pm_busy) {
11442 11442 mutex_exit(&EMLXS_PM_LOCK);
11443 11443 return (DDI_SUCCESS);
11444 11444 }
11445 11445 hba->pm_busy = 0;
11446 11446
11447 11447 mutex_exit(&EMLXS_PM_LOCK);
11448 11448
11449 11449 if (cfg[CFG_PM_SUPPORT].current) {
11450 11450 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11451 11451 "pm_idle_component.");
11452 11452
11453 11453 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
11454 11454
11455 11455 if (rval != DDI_SUCCESS) {
11456 11456 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11457 11457 "pm_idle_component failed. ret=%d", rval);
11458 11458
11459 11459 /* If this attempt failed then */
11460 11460 /* reset our flags for another attempt */
11461 11461 mutex_enter(&EMLXS_PM_LOCK);
11462 11462 hba->pm_busy = 1;
11463 11463 mutex_exit(&EMLXS_PM_LOCK);
11464 11464
11465 11465 return (rval);
11466 11466 }
11467 11467 }
11468 11468
11469 11469 return (DDI_SUCCESS);
11470 11470
11471 11471 } /* emlxs_pm_idle_component() */
11472 11472
11473 11473
11474 11474 extern void
11475 11475 emlxs_pm_idle_timer(emlxs_hba_t *hba)
11476 11476 {
11477 11477 emlxs_config_t *cfg = &CFG;
11478 11478
11479 11479 if (hba->pm_active) {
11480 11480 /* Clear active flag and reset idle timer */
11481 11481 mutex_enter(&EMLXS_PM_LOCK);
11482 11482 hba->pm_active = 0;
11483 11483 hba->pm_idle_timer =
11484 11484 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11485 11485 mutex_exit(&EMLXS_PM_LOCK);
11486 11486 }
11487 11487
11488 11488 /* Check for idle timeout */
11489 11489 else if (hba->timer_tics >= hba->pm_idle_timer) {
11490 11490 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
11491 11491 mutex_enter(&EMLXS_PM_LOCK);
11492 11492 hba->pm_idle_timer =
11493 11493 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11494 11494 mutex_exit(&EMLXS_PM_LOCK);
11495 11495 }
11496 11496 }
11497 11497
11498 11498 return;
11499 11499
11500 11500 } /* emlxs_pm_idle_timer() */
11501 11501
11502 11502 #endif /* IDLE_TIMER */
11503 11503
11504 11504
11505 11505 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
11506 11506 static void
11507 11507 emlxs_read_vport_prop(emlxs_hba_t *hba)
11508 11508 {
11509 11509 emlxs_port_t *port = &PPORT;
11510 11510 emlxs_config_t *cfg = &CFG;
11511 11511 char **arrayp;
11512 11512 uint8_t *s;
11513 11513 uint8_t *np;
11514 11514 NAME_TYPE pwwpn;
11515 11515 NAME_TYPE wwnn;
11516 11516 NAME_TYPE wwpn;
11517 11517 uint32_t vpi;
11518 11518 uint32_t cnt;
11519 11519 uint32_t rval;
11520 11520 uint32_t i;
11521 11521 uint32_t j;
11522 11522 uint32_t c1;
11523 11523 uint32_t sum;
11524 11524 uint32_t errors;
11525 11525 char buffer[64];
11526 11526
11527 11527 /* Check for the per adapter vport setting */
11528 11528 (void) snprintf(buffer, sizeof (buffer), "%s%d-vport", DRIVER_NAME,
11529 11529 hba->ddiinst);
11530 11530 cnt = 0;
11531 11531 arrayp = NULL;
11532 11532 rval =
11533 11533 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11534 11534 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
11535 11535
11536 11536 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11537 11537 /* Check for the global vport setting */
11538 11538 cnt = 0;
11539 11539 arrayp = NULL;
11540 11540 rval =
11541 11541 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11542 11542 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
11543 11543 }
11544 11544
11545 11545 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11546 11546 return;
11547 11547 }
11548 11548
11549 11549 for (i = 0; i < cnt; i++) {
11550 11550 errors = 0;
11551 11551 s = (uint8_t *)arrayp[i];
11552 11552
11553 11553 if (!s) {
11554 11554 break;
11555 11555 }
11556 11556
11557 11557 np = (uint8_t *)&pwwpn;
11558 11558 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11559 11559 c1 = *s++;
11560 11560 if ((c1 >= '0') && (c1 <= '9')) {
11561 11561 sum = ((c1 - '0') << 4);
11562 11562 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11563 11563 sum = ((c1 - 'a' + 10) << 4);
11564 11564 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11565 11565 sum = ((c1 - 'A' + 10) << 4);
11566 11566 } else {
11567 11567 EMLXS_MSGF(EMLXS_CONTEXT,
11568 11568 &emlxs_attach_debug_msg,
11569 11569 "Config error: Invalid PWWPN found. "
11570 11570 "entry=%d byte=%d hi_nibble=%c",
11571 11571 i, j, c1);
11572 11572 errors++;
11573 11573 }
11574 11574
11575 11575 c1 = *s++;
11576 11576 if ((c1 >= '0') && (c1 <= '9')) {
11577 11577 sum |= (c1 - '0');
11578 11578 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11579 11579 sum |= (c1 - 'a' + 10);
11580 11580 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11581 11581 sum |= (c1 - 'A' + 10);
11582 11582 } else {
11583 11583 EMLXS_MSGF(EMLXS_CONTEXT,
11584 11584 &emlxs_attach_debug_msg,
11585 11585 "Config error: Invalid PWWPN found. "
11586 11586 "entry=%d byte=%d lo_nibble=%c",
11587 11587 i, j, c1);
11588 11588 errors++;
11589 11589 }
11590 11590
11591 11591 *np++ = (uint8_t)sum;
11592 11592 }
11593 11593
11594 11594 if (*s++ != ':') {
11595 11595 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11596 11596 "Config error: Invalid delimiter after PWWPN. "
11597 11597 "entry=%d", i);
11598 11598 goto out;
11599 11599 }
11600 11600
11601 11601 np = (uint8_t *)&wwnn;
11602 11602 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11603 11603 c1 = *s++;
11604 11604 if ((c1 >= '0') && (c1 <= '9')) {
11605 11605 sum = ((c1 - '0') << 4);
11606 11606 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11607 11607 sum = ((c1 - 'a' + 10) << 4);
11608 11608 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11609 11609 sum = ((c1 - 'A' + 10) << 4);
11610 11610 } else {
11611 11611 EMLXS_MSGF(EMLXS_CONTEXT,
11612 11612 &emlxs_attach_debug_msg,
11613 11613 "Config error: Invalid WWNN found. "
11614 11614 "entry=%d byte=%d hi_nibble=%c",
11615 11615 i, j, c1);
11616 11616 errors++;
11617 11617 }
11618 11618
11619 11619 c1 = *s++;
11620 11620 if ((c1 >= '0') && (c1 <= '9')) {
11621 11621 sum |= (c1 - '0');
11622 11622 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11623 11623 sum |= (c1 - 'a' + 10);
11624 11624 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11625 11625 sum |= (c1 - 'A' + 10);
11626 11626 } else {
11627 11627 EMLXS_MSGF(EMLXS_CONTEXT,
11628 11628 &emlxs_attach_debug_msg,
11629 11629 "Config error: Invalid WWNN found. "
11630 11630 "entry=%d byte=%d lo_nibble=%c",
11631 11631 i, j, c1);
11632 11632 errors++;
11633 11633 }
11634 11634
11635 11635 *np++ = (uint8_t)sum;
11636 11636 }
11637 11637
11638 11638 if (*s++ != ':') {
11639 11639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11640 11640 "Config error: Invalid delimiter after WWNN. "
11641 11641 "entry=%d", i);
11642 11642 goto out;
11643 11643 }
11644 11644
11645 11645 np = (uint8_t *)&wwpn;
11646 11646 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11647 11647 c1 = *s++;
11648 11648 if ((c1 >= '0') && (c1 <= '9')) {
11649 11649 sum = ((c1 - '0') << 4);
11650 11650 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11651 11651 sum = ((c1 - 'a' + 10) << 4);
11652 11652 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11653 11653 sum = ((c1 - 'A' + 10) << 4);
11654 11654 } else {
11655 11655 EMLXS_MSGF(EMLXS_CONTEXT,
11656 11656 &emlxs_attach_debug_msg,
11657 11657 "Config error: Invalid WWPN found. "
11658 11658 "entry=%d byte=%d hi_nibble=%c",
11659 11659 i, j, c1);
11660 11660
11661 11661 errors++;
11662 11662 }
11663 11663
11664 11664 c1 = *s++;
11665 11665 if ((c1 >= '0') && (c1 <= '9')) {
11666 11666 sum |= (c1 - '0');
11667 11667 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11668 11668 sum |= (c1 - 'a' + 10);
11669 11669 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11670 11670 sum |= (c1 - 'A' + 10);
11671 11671 } else {
11672 11672 EMLXS_MSGF(EMLXS_CONTEXT,
11673 11673 &emlxs_attach_debug_msg,
11674 11674 "Config error: Invalid WWPN found. "
11675 11675 "entry=%d byte=%d lo_nibble=%c",
11676 11676 i, j, c1);
11677 11677
11678 11678 errors++;
11679 11679 }
11680 11680
11681 11681 *np++ = (uint8_t)sum;
11682 11682 }
11683 11683
11684 11684 if (*s++ != ':') {
11685 11685 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11686 11686 "Config error: Invalid delimiter after WWPN. "
11687 11687 "entry=%d", i);
11688 11688
11689 11689 goto out;
11690 11690 }
11691 11691
11692 11692 sum = 0;
11693 11693 do {
11694 11694 c1 = *s++;
11695 11695 if ((c1 < '0') || (c1 > '9')) {
11696 11696 EMLXS_MSGF(EMLXS_CONTEXT,
11697 11697 &emlxs_attach_debug_msg,
11698 11698 "Config error: Invalid VPI found. "
11699 11699 "entry=%d c=%c vpi=%d", i, c1, sum);
11700 11700
11701 11701 goto out;
11702 11702 }
11703 11703
11704 11704 sum = (sum * 10) + (c1 - '0');
11705 11705
11706 11706 } while (*s != 0);
11707 11707
11708 11708 vpi = sum;
11709 11709
11710 11710 if (errors) {
11711 11711 continue;
11712 11712 }
11713 11713
11714 11714 /* Entry has been read */
11715 11715
11716 11716 /* Check if the physical port wwpn */
11717 11717 /* matches our physical port wwpn */
11718 11718 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
11719 11719 continue;
11720 11720 }
11721 11721
11722 11722 /* Check vpi range */
11723 11723 if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
11724 11724 continue;
11725 11725 }
11726 11726
11727 11727 /* Check if port has already been configured */
11728 11728 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
11729 11729 continue;
11730 11730 }
11731 11731
11732 11732 /* Set the highest configured vpi */
11733 11733 if (vpi > hba->vpi_high) {
11734 11734 hba->vpi_high = vpi;
11735 11735 }
11736 11736
11737 11737 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
11738 11738 sizeof (NAME_TYPE));
11739 11739 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
11740 11740 sizeof (NAME_TYPE));
11741 11741
11742 11742 if (hba->port[vpi].snn[0] == 0) {
11743 11743 (void) strncpy((caddr_t)hba->port[vpi].snn,
11744 11744 (caddr_t)hba->snn,
11745 11745 (sizeof (hba->port[vpi].snn)-1));
11746 11746 }
11747 11747
11748 11748 if (hba->port[vpi].spn[0] == 0) {
11749 11749 (void) snprintf((caddr_t)hba->port[vpi].spn,
11750 11750 sizeof (hba->port[vpi].spn),
11751 11751 "%s VPort-%d",
11752 11752 (caddr_t)hba->spn, vpi);
11753 11753 }
11754 11754
11755 11755 hba->port[vpi].flag |=
11756 11756 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
11757 11757
11758 11758 if (cfg[CFG_VPORT_RESTRICTED].current) {
11759 11759 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
11760 11760 }
11761 11761 }
11762 11762
11763 11763 out:
11764 11764
11765 11765 (void) ddi_prop_free((void *) arrayp);
11766 11766 return;
11767 11767
11768 11768 } /* emlxs_read_vport_prop() */
11769 11769 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
11770 11770
11771 11771
11772 11772 extern char *
11773 11773 emlxs_wwn_xlate(char *buffer, size_t len, uint8_t *wwn)
11774 11774 {
11775 11775 (void) snprintf(buffer, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
11776 11776 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11777 11777 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11778 11778
11779 11779 return (buffer);
11780 11780
11781 11781 } /* emlxs_wwn_xlate() */
11782 11782
11783 11783
11784 11784 extern int32_t
11785 11785 emlxs_wwn_cmp(uint8_t *wwn1, uint8_t *wwn2)
11786 11786 {
11787 11787 uint32_t i;
11788 11788
11789 11789 for (i = 0; i < 8; i ++, wwn1 ++, wwn2 ++) {
11790 11790 if (*wwn1 > *wwn2) {
11791 11791 return (1);
11792 11792 }
11793 11793 if (*wwn1 < *wwn2) {
11794 11794 return (-1);
11795 11795 }
11796 11796 }
11797 11797
11798 11798 return (0);
11799 11799
11800 11800 } /* emlxs_wwn_cmp() */
11801 11801
11802 11802
11803 11803 /* This is called at port online and offline */
11804 11804 extern void
11805 11805 emlxs_ub_flush(emlxs_port_t *port)
11806 11806 {
11807 11807 emlxs_hba_t *hba = HBA;
11808 11808 fc_unsol_buf_t *ubp;
11809 11809 emlxs_ub_priv_t *ub_priv;
11810 11810 emlxs_ub_priv_t *next;
11811 11811
11812 11812 /* Return if nothing to do */
11813 11813 if (!port->ub_wait_head) {
11814 11814 return;
11815 11815 }
11816 11816
11817 11817 mutex_enter(&EMLXS_PORT_LOCK);
11818 11818 ub_priv = port->ub_wait_head;
11819 11819 port->ub_wait_head = NULL;
11820 11820 port->ub_wait_tail = NULL;
11821 11821 mutex_exit(&EMLXS_PORT_LOCK);
11822 11822
11823 11823 while (ub_priv) {
11824 11824 next = ub_priv->next;
11825 11825 ubp = ub_priv->ubp;
11826 11826
11827 11827 /* Check if ULP is online and we have a callback function */
11828 11828 if (port->ulp_statec != FC_STATE_OFFLINE) {
11829 11829 /* Send ULP the ub buffer */
11830 11830 emlxs_ulp_unsol_cb(port, ubp);
11831 11831 } else { /* Drop the buffer */
11832 11832 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11833 11833 }
11834 11834
11835 11835 ub_priv = next;
11836 11836
11837 11837 } /* while () */
11838 11838
11839 11839 return;
11840 11840
11841 11841 } /* emlxs_ub_flush() */
11842 11842
11843 11843
11844 11844 extern void
11845 11845 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11846 11846 {
11847 11847 emlxs_hba_t *hba = HBA;
11848 11848 emlxs_ub_priv_t *ub_priv;
11849 11849
11850 11850 ub_priv = ubp->ub_fca_private;
11851 11851
11852 11852 /* Check if ULP is online */
11853 11853 if (port->ulp_statec != FC_STATE_OFFLINE) {
11854 11854 emlxs_ulp_unsol_cb(port, ubp);
11855 11855
11856 11856 } else { /* ULP offline */
11857 11857
11858 11858 if (hba->state >= FC_LINK_UP) {
11859 11859 /* Add buffer to queue tail */
11860 11860 mutex_enter(&EMLXS_PORT_LOCK);
11861 11861
11862 11862 if (port->ub_wait_tail) {
11863 11863 port->ub_wait_tail->next = ub_priv;
11864 11864 }
11865 11865 port->ub_wait_tail = ub_priv;
11866 11866
11867 11867 if (!port->ub_wait_head) {
11868 11868 port->ub_wait_head = ub_priv;
11869 11869 }
11870 11870
11871 11871 mutex_exit(&EMLXS_PORT_LOCK);
11872 11872 } else {
11873 11873 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11874 11874 }
11875 11875 }
11876 11876
11877 11877 return;
11878 11878
11879 11879 } /* emlxs_ub_callback() */
11880 11880
11881 11881
11882 11882 extern void
11883 11883 emlxs_fca_link_up(emlxs_port_t *port)
11884 11884 {
11885 11885 emlxs_ulp_statec_cb(port, port->ulp_statec);
11886 11886 return;
11887 11887
11888 11888 } /* emlxs_fca_link_up() */
11889 11889
11890 11890
11891 11891 extern void
11892 11892 emlxs_fca_link_down(emlxs_port_t *port)
11893 11893 {
11894 11894 emlxs_ulp_statec_cb(port, FC_STATE_OFFLINE);
11895 11895 return;
11896 11896
11897 11897 } /* emlxs_fca_link_down() */
11898 11898
11899 11899
11900 11900 static uint32_t
11901 11901 emlxs_integrity_check(emlxs_hba_t *hba)
11902 11902 {
11903 11903 uint32_t size;
11904 11904 uint32_t errors = 0;
11905 11905 int ddiinst = hba->ddiinst;
11906 11906
11907 11907 size = 16;
11908 11908 if (sizeof (ULP_BDL) != size) {
11909 11909 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16",
11910 11910 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11911 11911
11912 11912 errors++;
11913 11913 }
11914 11914 size = 8;
11915 11915 if (sizeof (ULP_BDE) != size) {
11916 11916 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8",
11917 11917 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11918 11918
11919 11919 errors++;
11920 11920 }
11921 11921 size = 12;
11922 11922 if (sizeof (ULP_BDE64) != size) {
11923 11923 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12",
11924 11924 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11925 11925
11926 11926 errors++;
11927 11927 }
11928 11928 size = 16;
11929 11929 if (sizeof (HBQE_t) != size) {
11930 11930 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16",
11931 11931 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11932 11932
11933 11933 errors++;
11934 11934 }
11935 11935 size = 8;
11936 11936 if (sizeof (HGP) != size) {
11937 11937 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8",
11938 11938 DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11939 11939
11940 11940 errors++;
11941 11941 }
11942 11942 if (sizeof (PGP) != size) {
11943 11943 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8",
11944 11944 DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11945 11945
11946 11946 errors++;
11947 11947 }
11948 11948 size = 4;
11949 11949 if (sizeof (WORD5) != size) {
11950 11950 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4",
11951 11951 DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11952 11952
11953 11953 errors++;
11954 11954 }
11955 11955 size = 124;
11956 11956 if (sizeof (MAILVARIANTS) != size) {
11957 11957 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. "
11958 11958 "%d != 124", DRIVER_NAME, ddiinst,
11959 11959 (int)sizeof (MAILVARIANTS));
11960 11960
11961 11961 errors++;
11962 11962 }
11963 11963 size = 128;
11964 11964 if (sizeof (SLI1_DESC) != size) {
11965 11965 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128",
11966 11966 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11967 11967
11968 11968 errors++;
11969 11969 }
11970 11970 if (sizeof (SLI2_DESC) != size) {
11971 11971 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128",
11972 11972 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11973 11973
11974 11974 errors++;
11975 11975 }
11976 11976 size = MBOX_SIZE;
11977 11977 if (sizeof (MAILBOX) != size) {
11978 11978 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d",
11979 11979 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
11980 11980
11981 11981 errors++;
11982 11982 }
11983 11983 size = PCB_SIZE;
11984 11984 if (sizeof (PCB) != size) {
11985 11985 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d",
11986 11986 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
11987 11987
11988 11988 errors++;
11989 11989 }
11990 11990 size = 260;
11991 11991 if (sizeof (ATTRIBUTE_ENTRY) != size) {
11992 11992 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. "
11993 11993 "%d != 260", DRIVER_NAME, ddiinst,
11994 11994 (int)sizeof (ATTRIBUTE_ENTRY));
11995 11995
11996 11996 errors++;
11997 11997 }
11998 11998 size = SLI_SLIM1_SIZE;
11999 11999 if (sizeof (SLIM1) != size) {
12000 12000 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d",
12001 12001 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
12002 12002
12003 12003 errors++;
12004 12004 }
12005 12005 size = SLI3_IOCB_CMD_SIZE;
12006 12006 if (sizeof (IOCB) != size) {
12007 12007 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d",
12008 12008 DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
12009 12009 SLI3_IOCB_CMD_SIZE);
12010 12010
12011 12011 errors++;
12012 12012 }
12013 12013
12014 12014 size = SLI_SLIM2_SIZE;
12015 12015 if (sizeof (SLIM2) != size) {
12016 12016 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d",
12017 12017 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
12018 12018 SLI_SLIM2_SIZE);
12019 12019
12020 12020 errors++;
12021 12021 }
12022 12022 return (errors);
12023 12023
12024 12024 } /* emlxs_integrity_check() */
12025 12025
12026 12026
12027 12027 #ifdef FMA_SUPPORT
12028 12028 /*
12029 12029 * FMA support
12030 12030 */
12031 12031
12032 12032 extern void
12033 12033 emlxs_fm_init(emlxs_hba_t *hba)
12034 12034 {
12035 12035 ddi_iblock_cookie_t iblk;
12036 12036
12037 12037 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12038 12038 return;
12039 12039 }
12040 12040
12041 12041 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12042 12042 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12043 12043 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12044 12044 }
12045 12045
12046 12046 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
12047 12047 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12048 12048 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
12049 12049 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
12050 12050 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
12051 12051 } else {
12052 12052 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12053 12053 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12054 12054 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12055 12055 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12056 12056 }
12057 12057
12058 12058 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
12059 12059
12060 12060 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12061 12061 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12062 12062 pci_ereport_setup(hba->dip);
12063 12063 }
12064 12064
12065 12065 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12066 12066 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
12067 12067 (void *)hba);
12068 12068 }
12069 12069
12070 12070 } /* emlxs_fm_init() */
12071 12071
12072 12072
12073 12073 extern void
12074 12074 emlxs_fm_fini(emlxs_hba_t *hba)
12075 12075 {
12076 12076 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12077 12077 return;
12078 12078 }
12079 12079
12080 12080 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12081 12081 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12082 12082 pci_ereport_teardown(hba->dip);
12083 12083 }
12084 12084
12085 12085 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12086 12086 ddi_fm_handler_unregister(hba->dip);
12087 12087 }
12088 12088
12089 12089 (void) ddi_fm_fini(hba->dip);
12090 12090
12091 12091 } /* emlxs_fm_fini() */
12092 12092
12093 12093
12094 12094 extern int
12095 12095 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
12096 12096 {
12097 12097 ddi_fm_error_t err;
12098 12098
12099 12099 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12100 12100 return (DDI_FM_OK);
12101 12101 }
12102 12102
12103 12103 /* Some S10 versions do not define the ahi_err structure */
12104 12104 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
12105 12105 return (DDI_FM_OK);
12106 12106 }
12107 12107
12108 12108 err.fme_status = DDI_FM_OK;
12109 12109 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
12110 12110
12111 12111 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */
12112 12112 if ((void *)&ddi_fm_acc_err_clear != NULL) {
12113 12113 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
12114 12114 }
12115 12115
12116 12116 return (err.fme_status);
12117 12117
12118 12118 } /* emlxs_fm_check_acc_handle() */
12119 12119
12120 12120
12121 12121 extern int
12122 12122 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
12123 12123 {
12124 12124 ddi_fm_error_t err;
12125 12125
12126 12126 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12127 12127 return (DDI_FM_OK);
12128 12128 }
12129 12129
12130 12130 err.fme_status = DDI_FM_OK;
12131 12131 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
12132 12132
12133 12133 return (err.fme_status);
12134 12134
12135 12135 } /* emlxs_fm_check_dma_handle() */
12136 12136
12137 12137
12138 12138 extern void
12139 12139 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
12140 12140 {
12141 12141 uint64_t ena;
12142 12142 char buf[FM_MAX_CLASS];
12143 12143
12144 12144 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12145 12145 return;
12146 12146 }
12147 12147
12148 12148 if (detail == NULL) {
12149 12149 return;
12150 12150 }
12151 12151
12152 12152 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12153 12153 ena = fm_ena_generate(0, FM_ENA_FMT1);
12154 12154
12155 12155 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
12156 12156 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12157 12157
12158 12158 } /* emlxs_fm_ereport() */
12159 12159
12160 12160
12161 12161 extern void
12162 12162 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
12163 12163 {
12164 12164 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12165 12165 return;
12166 12166 }
12167 12167
12168 12168 if (impact == NULL) {
12169 12169 return;
12170 12170 }
12171 12171
12172 12172 if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
12173 12173 (impact == DDI_SERVICE_DEGRADED)) {
12174 12174 impact = DDI_SERVICE_UNAFFECTED;
12175 12175 }
12176 12176
12177 12177 ddi_fm_service_impact(hba->dip, impact);
12178 12178
12179 12179 return;
12180 12180
12181 12181 } /* emlxs_fm_service_impact() */
12182 12182
12183 12183
12184 12184 /*
12185 12185 * The I/O fault service error handling callback function
12186 12186 */
12187 12187 /*ARGSUSED*/
12188 12188 extern int
12189 12189 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
12190 12190 const void *impl_data)
12191 12191 {
12192 12192 /*
12193 12193 * as the driver can always deal with an error
12194 12194 * in any dma or access handle, we can just return
12195 12195 * the fme_status value.
12196 12196 */
12197 12197 pci_ereport_post(dip, err, NULL);
12198 12198 return (err->fme_status);
12199 12199
12200 12200 } /* emlxs_fm_error_cb() */
12201 12201
12202 12202 extern void
12203 12203 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
12204 12204 {
12205 12205 emlxs_port_t *port = sbp->port;
12206 12206 fc_packet_t *pkt = PRIV2PKT(sbp);
12207 12207
12208 12208 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
12209 12209 if (emlxs_fm_check_dma_handle(hba,
12210 12210 hba->sli.sli4.slim2.dma_handle)
12211 12211 != DDI_FM_OK) {
12212 12212 EMLXS_MSGF(EMLXS_CONTEXT,
12213 12213 &emlxs_invalid_dma_handle_msg,
12214 12214 "slim2: hdl=%p",
12215 12215 hba->sli.sli4.slim2.dma_handle);
12216 12216
12217 12217 mutex_enter(&EMLXS_PORT_LOCK);
12218 12218 hba->flag |= FC_DMA_CHECK_ERROR;
12219 12219 mutex_exit(&EMLXS_PORT_LOCK);
12220 12220 }
12221 12221 } else {
12222 12222 if (emlxs_fm_check_dma_handle(hba,
12223 12223 hba->sli.sli3.slim2.dma_handle)
12224 12224 != DDI_FM_OK) {
12225 12225 EMLXS_MSGF(EMLXS_CONTEXT,
12226 12226 &emlxs_invalid_dma_handle_msg,
12227 12227 "slim2: hdl=%p",
12228 12228 hba->sli.sli3.slim2.dma_handle);
12229 12229
12230 12230 mutex_enter(&EMLXS_PORT_LOCK);
12231 12231 hba->flag |= FC_DMA_CHECK_ERROR;
12232 12232 mutex_exit(&EMLXS_PORT_LOCK);
12233 12233 }
12234 12234 }
12235 12235
12236 12236 if (hba->flag & FC_DMA_CHECK_ERROR) {
12237 12237 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12238 12238 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12239 12239 pkt->pkt_expln = FC_EXPLN_NONE;
12240 12240 pkt->pkt_action = FC_ACTION_RETRYABLE;
12241 12241 return;
12242 12242 }
12243 12243
12244 12244 if (pkt->pkt_cmdlen) {
12245 12245 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
12246 12246 != DDI_FM_OK) {
12247 12247 EMLXS_MSGF(EMLXS_CONTEXT,
12248 12248 &emlxs_invalid_dma_handle_msg,
12249 12249 "pkt_cmd_dma: hdl=%p",
12250 12250 pkt->pkt_cmd_dma);
12251 12251
12252 12252 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12253 12253 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12254 12254 pkt->pkt_expln = FC_EXPLN_NONE;
12255 12255 pkt->pkt_action = FC_ACTION_RETRYABLE;
12256 12256
12257 12257 return;
12258 12258 }
12259 12259 }
12260 12260
12261 12261 if (pkt->pkt_rsplen) {
12262 12262 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
12263 12263 != DDI_FM_OK) {
12264 12264 EMLXS_MSGF(EMLXS_CONTEXT,
12265 12265 &emlxs_invalid_dma_handle_msg,
12266 12266 "pkt_resp_dma: hdl=%p",
12267 12267 pkt->pkt_resp_dma);
12268 12268
12269 12269 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12270 12270 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12271 12271 pkt->pkt_expln = FC_EXPLN_NONE;
12272 12272 pkt->pkt_action = FC_ACTION_RETRYABLE;
12273 12273
12274 12274 return;
12275 12275 }
12276 12276 }
12277 12277
12278 12278 if (pkt->pkt_datalen) {
12279 12279 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
12280 12280 != DDI_FM_OK) {
12281 12281 EMLXS_MSGF(EMLXS_CONTEXT,
12282 12282 &emlxs_invalid_dma_handle_msg,
12283 12283 "pkt_data_dma: hdl=%p",
12284 12284 pkt->pkt_data_dma);
12285 12285
12286 12286 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12287 12287 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12288 12288 pkt->pkt_expln = FC_EXPLN_NONE;
12289 12289 pkt->pkt_action = FC_ACTION_RETRYABLE;
12290 12290
12291 12291 return;
12292 12292 }
12293 12293 }
12294 12294
12295 12295 return;
12296 12296
12297 12297 }
12298 12298 #endif /* FMA_SUPPORT */
12299 12299
12300 12300
12301 12301 extern void
12302 12302 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
12303 12303 {
12304 12304 uint32_t word;
12305 12305 uint32_t *wptr;
12306 12306 uint32_t i;
12307 12307
12308 12308 VERIFY((size % 4) == 0);
12309 12309
12310 12310 wptr = (uint32_t *)buffer;
12311 12311
12312 12312 for (i = 0; i < size / 4; i++) {
12313 12313 word = *wptr;
12314 12314 *wptr++ = SWAP32(word);
12315 12315 }
12316 12316
12317 12317 return;
12318 12318
12319 12319 } /* emlxs_swap32_buffer() */
12320 12320
12321 12321
12322 12322 extern void
12323 12323 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
12324 12324 {
12325 12325 uint32_t word;
12326 12326 uint32_t *sptr;
12327 12327 uint32_t *dptr;
12328 12328 uint32_t i;
12329 12329
12330 12330 VERIFY((size % 4) == 0);
12331 12331
12332 12332 sptr = (uint32_t *)src;
12333 12333 dptr = (uint32_t *)dst;
12334 12334
12335 12335 for (i = 0; i < size / 4; i++) {
12336 12336 word = *sptr++;
12337 12337 *dptr++ = SWAP32(word);
12338 12338 }
12339 12339
12340 12340 return;
12341 12341
12342 12342 } /* emlxs_swap32_buffer() */
12343 12343
12344 12344
12345 12345 extern char *
12346 12346 emlxs_strtoupper(char *str)
12347 12347 {
12348 12348 char *cptr = str;
12349 12349
12350 12350 while (*cptr) {
12351 12351 if ((*cptr >= 'a') && (*cptr <= 'z')) {
12352 12352 *cptr -= ('a' - 'A');
12353 12353 }
12354 12354 cptr++;
12355 12355 }
12356 12356
12357 12357 return (str);
12358 12358
12359 12359 } /* emlxs_strtoupper() */
12360 12360
12361 12361
12362 12362 extern void
12363 12363 emlxs_ulp_statec_cb(emlxs_port_t *port, uint32_t statec)
12364 12364 {
12365 12365 emlxs_hba_t *hba = HBA;
12366 12366
12367 12367 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12368 12368
12369 12369 mutex_enter(&EMLXS_PORT_LOCK);
12370 12370 if (!(port->flag & EMLXS_INI_BOUND)) {
12371 12371 mutex_exit(&EMLXS_PORT_LOCK);
12372 12372 return;
12373 12373 }
12374 12374 port->ulp_busy++;
12375 12375 mutex_exit(&EMLXS_PORT_LOCK);
12376 12376
12377 12377 port->ulp_statec_cb(port->ulp_handle, statec);
12378 12378
12379 12379 mutex_enter(&EMLXS_PORT_LOCK);
12380 12380 port->ulp_busy--;
12381 12381 mutex_exit(&EMLXS_PORT_LOCK);
12382 12382
12383 12383 return;
12384 12384
12385 12385 } /* emlxs_ulp_statec_cb() */
12386 12386
12387 12387
12388 12388 extern void
12389 12389 emlxs_ulp_unsol_cb(emlxs_port_t *port, fc_unsol_buf_t *ubp)
12390 12390 {
12391 12391 emlxs_hba_t *hba = HBA;
12392 12392
12393 12393 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12394 12394
12395 12395 mutex_enter(&EMLXS_PORT_LOCK);
12396 12396 if (!(port->flag & EMLXS_INI_BOUND)) {
12397 12397 mutex_exit(&EMLXS_PORT_LOCK);
12398 12398 return;
12399 12399 }
12400 12400 port->ulp_busy++;
12401 12401 mutex_exit(&EMLXS_PORT_LOCK);
12402 12402
12403 12403 port->ulp_unsol_cb(port->ulp_handle, ubp, ubp->ub_frame.type);
12404 12404
12405 12405 mutex_enter(&EMLXS_PORT_LOCK);
12406 12406 port->ulp_busy--;
12407 12407 mutex_exit(&EMLXS_PORT_LOCK);
12408 12408
12409 12409 return;
12410 12410
12411 12411 } /* emlxs_ulp_unsol_cb() */
↓ open down ↓ |
11915 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX